Recherche avancée

Médias (29)

Mot : - Tags -/Musique

Autres articles (92)

  • MediaSPIP version 0.1 Beta

    16 avril 2011, par

    MediaSPIP 0.1 beta est la première version de MediaSPIP décrétée comme "utilisable".
    Le fichier zip ici présent contient uniquement les sources de MediaSPIP en version standalone.
    Pour avoir une installation fonctionnelle, il est nécessaire d’installer manuellement l’ensemble des dépendances logicielles sur le serveur.
    Si vous souhaitez utiliser cette archive pour une installation en mode ferme, il vous faudra également procéder à d’autres modifications (...)

  • MediaSPIP 0.1 Beta version

    25 avril 2011, par

    MediaSPIP 0.1 beta is the first version of MediaSPIP proclaimed as "usable".
    The zip file provided here only contains the sources of MediaSPIP in its standalone version.
    To get a working installation, you must manually install all-software dependencies on the server.
    If you want to use this archive for an installation in "farm mode", you will also need to proceed to other manual (...)

  • Websites made ​​with MediaSPIP

    2 mai 2011, par

    This page lists some websites based on MediaSPIP.

Sur d’autres sites (18264)

  • Encoding a screenshot into a video using FFMPEG

    2 juillet 2013, par mohM

    I'm trying to get the pixels from the screen, and encode the screenshot into a video using ffmpeg. I've seen a couple of examples but they either assume you already have the pixel data, or use image file input. It seems like whether I use sws_scale() or not (which is included in the examples I've seen), or whether I'm typecasting a HBITMAP or RGBQUAD* it's telling me that the image src data is bad and is encoding a blank image rather than the screenshot. Is there something I'm missing here ?

    AVCodec* codec;
    AVCodecContext* c = NULL;
    AVFrame* inpic;
    uint8_t* outbuf, *picture_buf;
    int i, out_size, size, outbuf_size;
    HBITMAP hBmp;
    //int x,y;

    avcodec_register_all();

    printf("Video encoding\n");

    // Find the mpeg1 video encoder
    codec = avcodec_find_encoder(CODEC_ID_H264);
    if (!codec) {
       fprintf(stderr, "Codec not found\n");
       exit(1);
    }
    else printf("H264 codec found\n");

    c = avcodec_alloc_context3(codec);
    inpic = avcodec_alloc_frame();

    c->bit_rate = 400000;
    c->width = screenWidth;                                     // resolution must be a multiple of two
    c->height = screenHeight;
    c->time_base.num = 1;
    c->time_base.den = 25;
    c->gop_size = 10;                                           // emit one intra frame every ten frames
    c->max_b_frames=1;
    c->pix_fmt = PIX_FMT_YUV420P;
    c->codec_id = CODEC_ID_H264;
    //c->codec_type = AVMEDIA_TYPE_VIDEO;

    //av_opt_set(c->priv_data, "preset", "slow", 0);
    //printf("Setting presets to slow for performance\n");

    // Open the encoder
    if (avcodec_open2(c, codec,NULL) < 0) {
       fprintf(stderr, "Could not open codec\n");
       exit(1);
    }
    else printf("H264 codec opened\n");

    outbuf_size = 100000 + 12*c->width*c->height;           // alloc image and output buffer
    //outbuf_size = 100000;
    outbuf = static_cast(malloc(outbuf_size));
    size = c->width * c->height;
    picture_buf = static_cast(malloc((size*3)/2));
    printf("Setting buffer size to: %d\n",outbuf_size);

    FILE* f = fopen("example.mpg","wb");
    if(!f) printf("x  -  Cannot open video file for writing\n");
    else printf("Opened video file for writing\n");

    /*inpic->data[0] = picture_buf;
    inpic->data[1] = inpic->data[0] + size;
    inpic->data[2] = inpic->data[1] + size / 4;
    inpic->linesize[0] = c->width;
    inpic->linesize[1] = c->width / 2;
    inpic->linesize[2] = c->width / 2;*/


    //int x,y;
    // encode 1 second of video
    for(i=0;itime_base.den;i++) {
       fflush(stdout);


       HWND hDesktopWnd = GetDesktopWindow();
       HDC hDesktopDC = GetDC(hDesktopWnd);
       HDC hCaptureDC = CreateCompatibleDC(hDesktopDC);
       hBmp = CreateCompatibleBitmap(GetDC(0), screenWidth, screenHeight);
       SelectObject(hCaptureDC, hBmp);
       BitBlt(hCaptureDC, 0, 0, screenWidth, screenHeight, hDesktopDC, 0, 0, SRCCOPY|CAPTUREBLT);
       BITMAPINFO bmi = {0};
       bmi.bmiHeader.biSize = sizeof(bmi.bmiHeader);
       bmi.bmiHeader.biWidth = screenWidth;
       bmi.bmiHeader.biHeight = screenHeight;
       bmi.bmiHeader.biPlanes = 1;
       bmi.bmiHeader.biBitCount = 32;
       bmi.bmiHeader.biCompression = BI_RGB;
       RGBQUAD *pPixels = new RGBQUAD[screenWidth*screenHeight];
       GetDIBits(hCaptureDC,hBmp,0,screenHeight,pPixels,&bmi,DIB_RGB_COLORS);

    inpic->pts = (float) i * (1000.0/(float)(c->time_base.den))*90;
       avpicture_fill((AVPicture*)inpic, (uint8_t*)pPixels, PIX_FMT_BGR32, c->width, c->height);                   // Fill picture with image
       av_image_alloc(inpic->data, inpic->linesize, c->width, c->height, c->pix_fmt, 1);
       //printf("Allocated frame\n");
       //SaveBMPFile(L"screenshot.bmp",hBmp,hDc,screenWidth,screenHeight);
       ReleaseDC(hDesktopWnd,hDesktopDC);
       DeleteDC(hCaptureDC);
       DeleteObject(hBmp);

       // encode the image
       out_size = avcodec_encode_video(c, outbuf, outbuf_size, inpic);
       printf("Encoding frame %3d (size=%5d)\n", i, out_size);
       fwrite(outbuf, 1, out_size, f);
    }

    // get the delayed frames
    for(; out_size; i++) {
       fflush(stdout);

       out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
       printf("Writing frame %3d (size=%5d)\n", i, out_size);
       fwrite(outbuf, 1, out_size, f);
    }

    // add sequence end code to have a real mpeg file
    outbuf[0] = 0x00;
    outbuf[1] = 0x00;
    outbuf[2] = 0x01;
    outbuf[3] = 0xb7;
    fwrite(outbuf, 1, 4, f);
    fclose(f);
    free(picture_buf);
    free(outbuf);

    avcodec_close(c);
    av_free(c);
    av_free(inpic);
    printf("Closed codec and Freed\n");
  • Green screen writing FLV file libavformat

    5 mai 2013, par YYZ

    I've written a piece of C++ code that can capture webcam video frame, decode them, convert them to YUV420P, encode them and then write them to a file. If I use the mpeg2 codec and write to a .mpg file, everything works perfectly. But, if I use flv, then the output produced is just a green screen. I'm not sure if there are different encoder settings I need to set for encoding flv video ? Here's my code(the relevant parts) :

    Encoder settings :

    c->codec_id = codec_id;
    c->bit_rate = 400000;
    // Resolution must be a multiple of two.
    c->width    = 352;
    c->height   = 288;
    c->time_base.den = STREAM_FRAME_RATE;
    c->time_base.num = 1;
    //emit one intra frame every twelve frames at most
    c->gop_size      = 12;
    c->pix_fmt       = STREAM_PIX_FMT;

    Write the frames

    int ret;
    uint8_t *buffer = NULL;
    static struct SwsContext *sws_ctx;

    //Setup the codec context, and set its width and height to be equal to the input video width and height
    AVCodecContext *c = st->codec;
    c->width = pCodecCtx->width;
    c->height = pCodecCtx->height;

    av_init_packet(&packet);
    frameYUV = avcodec_alloc_frame();

    //Determine how big the buffer will need to be to store the captured frame
    numBytes = avpicture_get_size(STREAM_PIX_FMT,pCodecCtx->width,pCodecCtx->height);

    //Allocate the needed buffer size
    buffer = new uint8_t[numBytes];
    sws_ctx = sws_getContext(pCodecCtx->width,pCodecCtx->height,pCodecCtx->pix_fmt,
                           pCodecCtx->width,pCodecCtx->height,
                           STREAM_PIX_FMT,SWS_BICUBIC,NULL,NULL,NULL);

    //Fill the output frame
    avpicture_fill((AVPicture *)frameYUV,buffer,STREAM_PIX_FMT,pCodecCtx->width,pCodecCtx->height);

    //Read a video frame in
    av_read_frame(pFormatCtx,&packet);

    //Decode the contents of packet into pFrame
    avcodec_decode_video2(pCodecCtx,pFrame,&frameFinished,&packet);

    //Scale pFrame into frameYUV, and convert to PIXFMTYUV420P
    sws_scale
    (
       sws_ctx,
       (uint8_t const * const *)pFrame->data,
       pFrame->linesize,
       0,
       pCodecCtx->height,
       frameYUV->data,
       frameYUV->linesize
    );
    av_init_packet(&samsPacket);
    //Encode frameYUV
    avcodec_encode_video2(c, &samsPacket, frameYUV, &gotSamsPacket);

    AVPacket pkt = { 0 };
    int got_packet;
    av_init_packet(&pkt);
    // encode the image
    ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
    if (ret < 0){
       debugLogStreamSave->debug("Error encoding video frame");
       exit(1);
    }
    if (!ret && got_packet && pkt.size){
       pkt.stream_index = st->index;

       // Write the compressed frame to our output
       ret = av_interleaved_write_frame(oc, &pkt);

    Any help would be appreciated !

  • How to mention real image instead of dummy image in ffmpeg api-example.c

    2 mars 2013, par Mohan

    I am using video_encode_example function from api-example.c of FFmpeg,
    which basically creates 25 dummy images and encodes into a one second video.
    How ever i am unable to mention real images instead of dummy ones.
    If any one know how to do this for xcode objective C, pl submit a reply.
    Below is the function

    /*
    * Video encoding example
    */
    static void video_encode_example(const char *filename)
    {
       AVCodec *codec;
       AVCodecContext *c= NULL;
       int i, out_size, size, x, y, outbuf_size;
       FILE *f;
       AVFrame *picture;
       uint8_t *outbuf, *picture_buf;

       printf("Video encoding\n");

       /* find the mpeg1 video encoder */
       codec = avcodec_find_encoder(CODEC_ID_MPEG1VIDEO);
       if (!codec) {
           fprintf(stderr, "codec not found\n");
           exit(1);
       }

       c= avcodec_alloc_context();
       picture= avcodec_alloc_frame();

       /* put sample parameters */
       c->bit_rate = 400000;
       /* resolution must be a multiple of two */
       c->width = 352;
       c->height = 288;
       /* frames per second */
       c->time_base= (AVRational){1,25};
       c->gop_size = 10; /* emit one intra frame every ten frames */
       c->max_b_frames=1;
       c->pix_fmt = PIX_FMT_YUV420P;

       /* open it */
       if (avcodec_open(c, codec) < 0) {
           fprintf(stderr, "could not open codec\n");
           exit(1);
       }

       f = fopen(filename, "wb");
       if (!f) {
           fprintf(stderr, "could not open %s\n", filename);
           exit(1);
       }

       /* alloc image and output buffer */
       outbuf_size = 100000;
       outbuf = malloc(outbuf_size);
       size = c->width * c->height;
       picture_buf = malloc((size * 3) / 2); /* size for YUV 420 */

       picture->data[0] = picture_buf;
       picture->data[1] = picture->data[0] + size;
       picture->data[2] = picture->data[1] + size / 4;
       picture->linesize[0] = c->width;
       picture->linesize[1] = c->width / 2;
       picture->linesize[2] = c->width / 2;

       /* encode 1 second of video */
       for(i=0;i<25;i++) {
           fflush(stdout);
           /* prepare a dummy image */
           /* Y */
           for(y=0;yheight;y++) {
               for(x=0;xwidth;x++) {
                   picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3;
               }
           }

           /* Cb and Cr */
           for(y=0;yheight/2;y++) {
               for(x=0;xwidth/2;x++) {
                   picture->data[1][y * picture->linesize[1] + x] = 128 + y + i * 2;
                   picture->data[2][y * picture->linesize[2] + x] = 64 + x + i * 5;
               }
           }

           /* encode the image */
           out_size = avcodec_encode_video(c, outbuf, outbuf_size, picture);
           printf("encoding frame %3d (size=%5d)\n", i, out_size);
           fwrite(outbuf, 1, out_size, f);
       }

       /* get the delayed frames */
       for(; out_size; i++) {
           fflush(stdout);

           out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
           printf("write frame %3d (size=%5d)\n", i, out_size);
           fwrite(outbuf, 1, out_size, f);
       }

       /* add sequence end code to have a real mpeg file */
       outbuf[0] = 0x00;
       outbuf[1] = 0x00;
       outbuf[2] = 0x01;
       outbuf[3] = 0xb7;
       fwrite(outbuf, 1, 4, f);
       fclose(f);
       free(picture_buf);
       free(outbuf);

       avcodec_close(c);
       av_free(c);
       av_free(picture);
       printf("\n");
    }