Recherche avancée

Médias (1)

Mot : - Tags -/lev manovitch

Autres articles (48)

  • Multilang : améliorer l’interface pour les blocs multilingues

    18 février 2011, par

    Multilang est un plugin supplémentaire qui n’est pas activé par défaut lors de l’initialisation de MediaSPIP.
    Après son activation, une préconfiguration est mise en place automatiquement par MediaSPIP init permettant à la nouvelle fonctionnalité d’être automatiquement opérationnelle. Il n’est donc pas obligatoire de passer par une étape de configuration pour cela.

  • Les autorisations surchargées par les plugins

    27 avril 2010, par

    Mediaspip core
    autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs

  • HTML5 audio and video support

    13 avril 2011, par

    MediaSPIP uses HTML5 video and audio tags to play multimedia files, taking advantage of the latest W3C innovations supported by modern browsers.
    The MediaSPIP player used has been created specifically for MediaSPIP and can be easily adapted to fit in with a specific theme.
    For older browsers the Flowplayer flash fallback is used.
    MediaSPIP allows for media playback on major mobile platforms with the above (...)

Sur d’autres sites (9583)

  • ffmpeg record screen and save video file to disk as .mpg

    7 janvier 2015, par musimbate

    I want to record the screen of my pc (using gdigrab on my windows machine) and store the saved video file on my disk as an mp4 or mpg file .I have found an example piece of code that grabs the screen and shows it in an SDL window here :http://xwk.iteye.com/blog/2125720 (The code is on the bottom of the page and has an english version) and the ffmpeg muxing example https://ffmpeg.org/doxygen/trunk/muxing_8c-source.html seems to be able to help encode audio and video into a desired output video file.

    I have tried to combine these two by having a format context for grabbing the screen (AVFormatContext *pFormatCtx ; in my code ) and a separate format context to write the desired video file (AVFormatContext *outFormatContextEncoded ;).Within the loop to read packets from the input stream( screen grab stream) I directly encode write packets to the output file as shown in my code.I have kept the SDL code so I can see what I am recording.Below is my code with my modified write_video_frame() function .

    The code builds OK but the output video can’t be played by vlc. When I run the command

    ffmpeg -i filename.mpg

    I get this output

    [mpeg @ 003fed20] probed stream 0 failed
    [mpeg @ 003fed20] Stream #0: not enough frames to estimate rate; consider increasing probesize
    [mpeg @ 003fed20] Could not find codec parameters for stream 0 (Video: none): unknown codec
    Consider increasing the value for the 'analyzeduration' and 'probesize' options
    karamage.mpg: could not find codec parameters
    Input #0, mpeg, from 'karamage.mpg':
     Duration: 19:30:09.25, start: 37545.438756, bitrate: 2 kb/s
       Stream #0:0[0x1e0]: Video: none, 90k tbr, 90k tbn
    At least one output file must be specified

    Am I doing something wrong here ? I am new to ffmpeg and any guidance on this is highly appreciated.Thank you for your time.

    int main(int argc, char* argv[])
    {

       AVFormatContext *pFormatCtx;

       int             i, videoindex;
       AVCodecContext  *pCodecCtx;
       AVCodec         *pCodec;

       av_register_all();
       avformat_network_init();

       //Localy defined structure.
       OutputStream outVideoStream = { 0 };

       const char *filename;
       AVOutputFormat *outFormatEncoded;
       AVFormatContext *outFormatContextEncoded;
       AVCodec *videoCodec;

       filename="karamage.mpg";

       int ret1;

       int have_video = 0, have_audio = 0;
       int encode_video = 0, encode_audio = 0;


       AVDictionary *opt = NULL;



       //ASSIGN STH TO THE FORMAT CONTEXT.
       pFormatCtx = avformat_alloc_context();

       //
       //Use this when opening a local file.
       //char filepath[]="src01_480x272_22.h265";
       //avformat_open_input(&pFormatCtx,filepath,NULL,NULL)

       //Register Device
       avdevice_register_all();

       //Use gdigrab
       AVDictionary* options = NULL;
       //Set some options
       //grabbing frame rate
       //av_dict_set(&options,"framerate","5",0);
       //The distance from the left edge of the screen or desktop
       //av_dict_set(&options,"offset_x","20",0);
       //The distance from the top edge of the screen or desktop
       //av_dict_set(&options,"offset_y","40",0);
       //Video frame size. The default is to capture the full screen
       //av_dict_set(&options,"video_size","640x480",0);
       AVInputFormat *ifmt=av_find_input_format("gdigrab");
       if(avformat_open_input(&pFormatCtx,"desktop",ifmt,&options)!=0){
           printf("Couldn't open input stream.\n");
           return -1;
       }

       if(avformat_find_stream_info(pFormatCtx,NULL)<0)
       {
           printf("Couldn't find stream information.\n");
           return -1;
       }
       videoindex=-1;
       for(i=0; inb_streams; i++)
           if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
           {
               videoindex=i;
               break;
           }
       if(videoindex==-1)
       {
           printf("Didn't find a video stream.\n");
           return -1;
       }
       pCodecCtx=pFormatCtx->streams[videoindex]->codec;
       pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
       if(pCodec==NULL)
       {
           printf("Codec not found.\n");
           return -1;
       }
       if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)
       {
           printf("Could not open codec.\n");
           return -1;
       }


       AVFrame *pFrame,*pFrameYUV;
       pFrame=avcodec_alloc_frame();
       pFrameYUV=avcodec_alloc_frame();

       //PIX_FMT_YUV420P WHAT DOES THIS SAY ABOUT THE FORMAT??
       uint8_t *out_buffer=(uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));


       avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);

       //<<<<<<<<<<<-------PREP WORK TO WRITE ENCODED VIDEO FILES-----

       avformat_alloc_output_context2(&outFormatContextEncoded, NULL, NULL, filename);
       if (!outFormatContextEncoded) {
           printf("Could not deduce output format from file extension: using MPEG.\n");
           avformat_alloc_output_context2(&outFormatContextEncoded, NULL, "mpeg", filename);
       }
       if (!outFormatContextEncoded)
           return 1;

       outFormatEncoded=outFormatContextEncoded->oformat;


        //THIS CREATES THE STREAMS(AUDIO AND VIDEO) ADDED TO OUR OUTPUT STREAM

       if (outFormatEncoded->video_codec != AV_CODEC_ID_NONE) {

           //YOUR VIDEO AND AUDIO PROPS ARE SET HERE.
           add_stream(&outVideoStream, outFormatContextEncoded, &videoCodec, outFormatEncoded->video_codec);
           have_video = 1;
           encode_video = 1;
       }


        // Now that all the parameters are set, we can open the audio and
        // video codecs and allocate the necessary encode buffers.
       if (have_video)
           open_video(outFormatContextEncoded, videoCodec, &outVideoStream, opt);

        av_dump_format(outFormatContextEncoded, 0, filename, 1);


         /* open the output file, if needed */
       if (!(outFormatEncoded->flags & AVFMT_NOFILE)) {
           ret1 = avio_open(&outFormatContextEncoded->pb, filename, AVIO_FLAG_WRITE);
           if (ret1 < 0) {
               //fprintf(stderr, "Could not open '%s': %s\n", filename,
               //        av_err2str(ret));
               fprintf(stderr, "Could not open your dumb file.\n");
               return 1;
           }
       }


       /* Write the stream header, if any. */
       ret1 = avformat_write_header(outFormatContextEncoded, &opt);
       if (ret1 < 0) {
           //fprintf(stderr, "Error occurred when opening output file: %s\n",
            //       av_err2str(ret));
           fprintf(stderr, "Error occurred when opening output file\n");
           return 1;
       }


       //<<<<<<<<<<<-------PREP WORK TO WRITE ENCODED VIDEO FILES-----

       //SDL----------------------------
       if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {  
           printf( "Could not initialize SDL - %s\n", SDL_GetError());
           return -1;
       }
       int screen_w=640,screen_h=360;
       const SDL_VideoInfo *vi = SDL_GetVideoInfo();
       //Half of the Desktop's width and height.
       screen_w = vi->current_w/2;
       screen_h = vi->current_h/2;
       SDL_Surface *screen;
       screen = SDL_SetVideoMode(screen_w, screen_h, 0,0);

       if(!screen) {  
           printf("SDL: could not set video mode - exiting:%s\n",SDL_GetError());  
           return -1;
       }
       SDL_Overlay *bmp;
       bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height,SDL_YV12_OVERLAY, screen);
       SDL_Rect rect;
       //SDL End------------------------
       int ret, got_picture;

       AVPacket *packet=(AVPacket *)av_malloc(sizeof(AVPacket));

       //TRY TO INIT THE PACKET HERE
        av_init_packet(packet);


       //Output Information-----------------------------
       printf("File Information---------------------\n");
       av_dump_format(pFormatCtx,0,NULL,0);
       printf("-------------------------------------------------\n");

       struct SwsContext *img_convert_ctx;
       img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
       //------------------------------
       //
       while(av_read_frame(pFormatCtx, packet)>=0)
       {

           if(packet->stream_index==videoindex)
           {
               //HERE WE DECODE THE PACKET INTO THE FRAME
               ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
               if(ret < 0)
               {
                   printf("Decode Error.\n");
                   return -1;
               }
               if(got_picture)
               {

                   //THIS IS WHERE WE DO STH WITH THE FRAME WE JUST GOT FROM THE STREAM
                   //FREE AREA--START
                   //IN HERE YOU CAN WORK WITH THE FRAME OF THE PACKET.
                   write_video_frame(outFormatContextEncoded, &outVideoStream,packet);


                   //FREE AREA--END
                   sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);


                   SDL_LockYUVOverlay(bmp);
                   bmp->pixels[0]=pFrameYUV->data[0];
                   bmp->pixels[2]=pFrameYUV->data[1];
                   bmp->pixels[1]=pFrameYUV->data[2];    
                   bmp->pitches[0]=pFrameYUV->linesize[0];
                   bmp->pitches[2]=pFrameYUV->linesize[1];  
                   bmp->pitches[1]=pFrameYUV->linesize[2];
                   SDL_UnlockYUVOverlay(bmp);
                   rect.x = 0;    
                   rect.y = 0;    
                   rect.w = screen_w;    
                   rect.h = screen_h;  
                   SDL_DisplayYUVOverlay(bmp, &rect);
                   //Delay 40ms----WHY THIS DELAY????
                   SDL_Delay(40);
               }
           }
           av_free_packet(packet);
       }//THE LOOP TO PULL PACKETS FROM THE FORMAT CONTEXT ENDS HERE.

       //AFTER THE WHILE LOOP WE DO SOME CLEANING

       //av_read_pause(context);


        av_write_trailer(outFormatContextEncoded);
        close_stream(outFormatContextEncoded, &outVideoStream);
        if (!(outFormatContextEncoded->flags & AVFMT_NOFILE))
           /* Close the output file. */
           avio_close(outFormatContextEncoded->pb);

       /* free the stream */
       avformat_free_context(outFormatContextEncoded);



       //STOP DOING YOUR CLEANING
       sws_freeContext(img_convert_ctx);



       SDL_Quit();

       av_free(out_buffer);
       av_free(pFrameYUV);
       avcodec_close(pCodecCtx);
       avformat_close_input(&pFormatCtx);

       return 0;
    }



    /*
    * encode one video frame and send it to the muxer
    * return 1 when encoding is finished, 0 otherwise
    */
    static int write_video_frame(AVFormatContext *oc, OutputStream *ost,AVPacket * pkt11)
    {
       int ret;
       AVCodecContext *c;
       AVFrame *frame;
       int got_packet = 0;

       c = ost->st->codec;

       //DO NOT NEED THIS FRAME.
       //frame = get_video_frame(ost);

       if (oc->oformat->flags & AVFMT_RAWPICTURE) {

           //IGNORE THIS FOR A MOMENT
           /* a hack to avoid data copy with some raw video muxers */
           AVPacket pkt;
           av_init_packet(&pkt);

           if (!frame)
               return 1;

           pkt.flags        |= AV_PKT_FLAG_KEY;
           pkt.stream_index  = ost->st->index;
           pkt.data          = (uint8_t *)frame;
           pkt.size          = sizeof(AVPicture);

           pkt.pts = pkt.dts = frame->pts;
           av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);

           ret = av_interleaved_write_frame(oc, &pkt);
       } else {

               ret = write_frame(oc, &c->time_base, ost->st, pkt11);

       }

       if (ret < 0) {
           fprintf(stderr, "Error while writing video frame: %s\n");
           exit(1);
       }


       return 1;
    }
  • Display filtered ffmpeg frame with GTK

    22 janvier 2015, par Fjotten

    I want to display cropped and scaled frames using the ffmpeg api and I am using GTK+ 3 for the GUI components. From following the this tutorial and the ffmpeg examples, I am able to display unfiltered frames, though with some instability. The filtered frames does not display correctly at all. It mostly produces completely black output. I suspect that this is due to sws_scale() but I have not found out why this is happening.

    Using the "trivial" display from the ffmpeg examples I can confirm that the frame is being cropped and scaled properly.

    Running the code below I get a bunch of errors :

    [swscaler @ 0x7fb58b025400] bad src image pointers
    [swscaler @ 0x7fb58b025400] bad dst image pointers

    I also get this error :

    [swscaler @ 0x7fd05c025600] Warning: data is not aligned! This can lead to a speedloss

    I tried making a buffer that was 16 bit aligned, but it did not seem to have any effect on the result.

    This is how I decode the frames and apply the filters :

    void decode(gpointer args) {
       int ret;
       AVPacket packet;
       AVFrame *frame = av_frame_alloc();
       AVFrame *filt_frame = av_frame_alloc();
       int got_frame;

       if(!frame || !filt_frame) {
           perror("Could not allocate frame");
           exit(1);
       }

       /* read all packets */
       while (1) {
           if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
               break;
           if (packet.stream_index == video_stream_index) {
               got_frame = 0;
               ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &packet);
               if (ret < 0) {
                   av_log(NULL, AV_LOG_ERROR, "Error decoding video\n");
                   break;
               }
               if (got_frame) {
                   frame->pts = av_frame_get_best_effort_timestamp(frame);
                   /* push the decoded frame into the filtergraph */
                   if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
                       av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
                       break;
                   }
                   /* pull filtered frames from the filtergraph */
                   while (1) {
                       ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
                       if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
                           break;
                       if (ret < 0)
                           goto end;
                       display_frame2(filt_frame, buffersink_ctx->inputs[0]->time_base);
                       av_frame_unref(filt_frame);
                   }
                   av_frame_unref(frame);
               }
           }
           av_free_packet(&packet);
       }
    end:
       avfilter_graph_free(&filter_graph);
       avcodec_close(dec_ctx);
       avformat_close_input(&fmt_ctx);
       av_frame_free(&frame);
       av_frame_free(&filt_frame);
       if (ret < 0 && ret != AVERROR_EOF) {
           fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
           exit(1);
       }

    }

    And this is how I display the frames.

    void display_frame2(const AVFrame *frame, AVRational time_base) {
       GdkPixbuf *pixbuf;
       int64_t delay;
       AVFrame *filt;
       uint8_t *buffer;
       int num_bytes, i;
       buffer = NULL;


       filt = av_frame_alloc();
       num_bytes = avpicture_get_size(PIX_FMT_RGB24, dec_ctx->width, dec_ctx->height);
       buffer = (uint8_t *)av_malloc(num_bytes * sizeof(uint8_t));
       avpicture_fill((AVPicture *)filt, buffer, PIX_FMT_RGB24, dec_ctx->width, dec_ctx->height);

       if (frame->pts != AV_NOPTS_VALUE) {
           if (last_pts != AV_NOPTS_VALUE) {
               /* sleep roughly the right amount of time;
                * usleep is in microseconds, just like AV_TIME_BASE. */
               delay = av_rescale_q(frame->pts - last_pts,
                                    time_base, AV_TIME_BASE_Q);
               if (delay > 0 && delay < 1000000)
                   usleep(delay);
           }
           last_pts = frame->pts;
       }

       sws_scale(  sws_ctx,
                   (uint8_t const * const *)frame->data,
                   frame->linesize,
                   0,
                   frame->height,
                   filt->data,
                   filt->linesize);
       pixbuf = gdk_pixbuf_new_from_data(  filt->data[0], GDK_COLORSPACE_RGB,
                                           0, 8, dec_ctx->width, dec_ctx->height,
                                           filt->linesize[0], NULL, NULL);
       gtk_image_set_from_pixbuf((GtkImage *)image, pixbuf);
       free( filt );
       free( buffer );

    }

    EDIT :
    After some more thought and experimentation I got the filtered frames to be displayed, albeit in SDL, not GTK+. I used the transcoding example from ffmpeg to see if I could re-encode the video with the filters, and that does indeed work. With that example I basically changed the filter being fed the filtergtaph and most of the work is already done. At this point all I am doing is to display the video using SDL as shown in danger’s tutorial. The cropping filter creates a lot of artifacts but it is at least showing something.

    I have to do some more work to see if it will work with GTK+. I have not taken a detailed look at the differences between the above program and the one in the transcoding example, so I have not yet figured out why my old code does not display filtered frames. Both sets of code use sws_scale() but I get no errors with the new code, so that must mean that something is different. I will update this post once I make more progress.

    EDIT 2 :
    Added a small compilable example that should work, as per @drahnr’s request. I have not had the chance to try out replacing GtkPixbuf.

    #define _XOPEN_SOURCE 600
    #include <libavformat></libavformat>avformat.h>
    #include <libavformat></libavformat>avio.h>
    #include <libavfilter></libavfilter>avfiltergraph.h>
    #include <libavfilter></libavfilter>avcodec.h>
    #include <libavfilter></libavfilter>buffersink.h>
    #include <libavfilter></libavfilter>buffersrc.h>
    #include <libavcodec></libavcodec>avcodec.h>
    #include <libswscale></libswscale>swscale.h>
    #include <libavutil></libavutil>avstring.h>
    #include <libavutil></libavutil>time.h>
    #include <libavutil></libavutil>opt.h>
    #include

    #include <gtk></gtk>gtk.h>
    #include <gdk></gdk>gdkx.h>


    GtkWidget *image;
    GtkWidget *window;

    struct SwsContext *sws_ctx;
    char *filter_descr = "crop=100:100,scale=640:360";
    static AVFormatContext *fmt_ctx;
    static AVCodecContext *dec_ctx;
    AVFilterContext *buffersink_ctx;
    AVFilterContext *buffersrc_ctx;
    AVFilterGraph *filter_graph;
    static int video_stream_index = -1;

    static void open_input_file(const char *filename)
    {
       AVCodec *dec;
       avformat_open_input(&amp;fmt_ctx, filename, NULL, NULL);
       avformat_find_stream_info(fmt_ctx, NULL);
       video_stream_index = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &amp;dec, 0);
       dec_ctx = fmt_ctx->streams[video_stream_index]->codec;
       av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
       avcodec_open2(dec_ctx, dec, NULL);
    }


    static void init_filters(const char *filters_descr)
    {
       char args[512];
       AVFilter *buffersrc  = avfilter_get_by_name("buffer");
       AVFilter *buffersink = avfilter_get_by_name("buffersink");
       AVFilterInOut *outputs = avfilter_inout_alloc();
       AVFilterInOut *inputs  = avfilter_inout_alloc();
       AVRational time_base = fmt_ctx->streams[video_stream_index]->time_base;
       enum AVPixelFormat pix_fmts[] = { PIX_FMT_RGB24, AV_PIX_FMT_NONE };
       filter_graph = avfilter_graph_alloc();
       snprintf(args, sizeof(args),
               "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
               dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
               time_base.num, time_base.den,
               dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
       avfilter_graph_create_filter(&amp;buffersrc_ctx, buffersrc, "in", args, NULL, filter_graph);
       avfilter_graph_create_filter(&amp;buffersink_ctx, buffersink, "out", NULL, NULL, filter_graph);
       av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
       outputs->name       = av_strdup("in");
       outputs->filter_ctx = buffersrc_ctx;
       outputs->pad_idx    = 0;
       outputs->next       = NULL;
       inputs->name       = av_strdup("out");
       inputs->filter_ctx = buffersink_ctx;
       inputs->pad_idx    = 0;
       inputs->next       = NULL;
       avfilter_graph_parse_ptr(filter_graph, filters_descr, &amp;inputs, &amp;outputs, NULL);
       avfilter_graph_config(filter_graph, NULL);
    }

    static void display_frame2(const AVFrame *frame, AVRational time_base) {
       GdkPixbuf *pixbuf;

       AVFrame *filt;
       uint8_t *buffer;
       int num_bytes;
       buffer = NULL;

       filt = av_frame_alloc();
       num_bytes = avpicture_get_size(PIX_FMT_RGB24, dec_ctx->width, dec_ctx->height);
       buffer = (uint8_t *)av_malloc(num_bytes * sizeof(uint8_t));
       avpicture_fill((AVPicture *)filt, buffer, PIX_FMT_RGB24, dec_ctx->width, dec_ctx->height);
       usleep(33670 / 4);
       sws_scale(  sws_ctx,
                   (uint8_t const * const *)frame->data,
                   frame->linesize,
                   0,
                   frame->height,
                   filt->data,
                   filt->linesize);
       pixbuf = gdk_pixbuf_new_from_data(  filt->data[0], GDK_COLORSPACE_RGB,
                                           0, 8, dec_ctx->width, dec_ctx->height,
                                           filt->linesize[0], NULL, NULL);
       gtk_image_set_from_pixbuf((GtkImage *)image, pixbuf);
       free( filt );
       free( buffer );

    }


    void decode(gpointer args) {
       int ret;
       AVPacket packet;
       AVFrame *frame      = av_frame_alloc();
       AVFrame *filt_frame = av_frame_alloc();
       int got_frame;

       while (1) {
           av_read_frame(fmt_ctx, &amp;packet);
           if (packet.stream_index == video_stream_index) {
               got_frame = 0;
               avcodec_decode_video2(dec_ctx, frame, &amp;got_frame, &amp;packet);
               if (got_frame) {
                   frame->pts = av_frame_get_best_effort_timestamp(frame);
                   if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) &lt; 0) {
                       av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
                       break;
                   }
                   while (1) {
                       ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
                       if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
                           break;
                       // Display original frame
                       display_frame2(frame, buffersink_ctx->inputs[0]->time_base);
                       // Display filtered frame
                       // display_frame2(filt_frame, buffersink_ctx->inputs[0]->time_base);
                       av_frame_unref(filt_frame);
                   }
                   av_frame_unref(frame);
               }
           }
           av_free_packet(&amp;packet);
       }

    }

    static void realize_cb(GtkWidget *widget, gpointer data) {
       GThread *tid;
       tid = g_thread_new("video", decode, NULL);
    }

    static void destroy(GtkWidget *widget, gpointer data) {
           gtk_main_quit();
    }

    int main(int argc, char **argv)
    {
       av_register_all();
       avfilter_register_all();
       open_input_file(argv[1]);
       init_filters(filter_descr);
       sws_ctx = NULL;
       sws_ctx = sws_getContext(   dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt, dec_ctx->width, dec_ctx->height,
                                       PIX_FMT_RGB24, SWS_BILINEAR, NULL, NULL, NULL );
       av_dump_format( fmt_ctx, 0, argv[1], 0);

       gtk_init(&amp;argc, &amp;argv);
       window = gtk_window_new( GTK_WINDOW_TOPLEVEL );
       g_signal_connect(window, "realize", G_CALLBACK(realize_cb), NULL);
       g_signal_connect(window, "destroy", G_CALLBACK(destroy), NULL);
       gtk_container_set_border_width(GTK_CONTAINER(window), 10);
       image = gtk_image_new();
       gtk_widget_show(image);
       gtk_container_add(GTK_CONTAINER(window), image);
       gtk_widget_show(window);
       gtk_main();
       return 0;
    }
  • ffmpeg record screen and save video file to disk as .mp4 or .mpg

    5 janvier 2015, par musimbate

    I want to record the screen of my pc (using gdigrab on my windows machine) and store the saved video file on my disk as an mp4 or mpg file .I have found an example piece of code that grabs the screen and shows it in an SDL window here :http://xwk.iteye.com/blog/2125720 (The code is on the bottom of the page and has an english version) and the ffmpeg muxing example https://ffmpeg.org/doxygen/trunk/muxing_8c-source.html seems to be able to help encode audio and video into a desired output video file.

    I have tried to combine these two by having a format context for grabbing the screen (AVFormatContext *pFormatCtx ; in my code ) and a separate format context to write the desired video file (AVFormatContext *outFormatContextEncoded ;).Within the loop to read packets from the input stream( screen grab stream) I directly encode write packets to the output file as shown in my code.I have kept the SDL code so I can see what I am recording.Below is my code with my modified write_video_frame() function .

    The code builds OK but the output video can’t be played by vlc. When I run the command

    ffmpeg -i filename.mpg

    I get this output

    [mpeg @ 003fed20] probed stream 0 failed
    [mpeg @ 003fed20] Stream #0: not enough frames to estimate rate; consider increasing probesize
    [mpeg @ 003fed20] Could not find codec parameters for stream 0 (Video: none): unknown codec
    Consider increasing the value for the 'analyzeduration' and 'probesize' options
    karamage.mpg: could not find codec parameters
    Input #0, mpeg, from 'karamage.mpg':
     Duration: 19:30:09.25, start: 37545.438756, bitrate: 2 kb/s
       Stream #0:0[0x1e0]: Video: none, 90k tbr, 90k tbn
    At least one output file must be specified

    Am I doing something wrong here ? I am new to ffmpeg and any guidance on this is highly appreciated.Thank you for your time.

    int main(int argc, char* argv[])
    {

       AVFormatContext *pFormatCtx;

       int             i, videoindex;
       AVCodecContext  *pCodecCtx;
       AVCodec         *pCodec;

       av_register_all();
       avformat_network_init();

       //Localy defined structure.
       OutputStream outVideoStream = { 0 };

       const char *filename;
       AVOutputFormat *outFormatEncoded;
       AVFormatContext *outFormatContextEncoded;
       AVCodec *videoCodec;

       filename="karamage.mpg";

       int ret1;

       int have_video = 0, have_audio = 0;
       int encode_video = 0, encode_audio = 0;


       AVDictionary *opt = NULL;



       //ASSIGN STH TO THE FORMAT CONTEXT.
       pFormatCtx = avformat_alloc_context();

       //
       //Use this when opening a local file.
       //char filepath[]="src01_480x272_22.h265";
       //avformat_open_input(&amp;pFormatCtx,filepath,NULL,NULL)

       //Register Device
       avdevice_register_all();

       //Use gdigrab
       AVDictionary* options = NULL;
       //Set some options
       //grabbing frame rate
       //av_dict_set(&amp;options,"framerate","5",0);
       //The distance from the left edge of the screen or desktop
       //av_dict_set(&amp;options,"offset_x","20",0);
       //The distance from the top edge of the screen or desktop
       //av_dict_set(&amp;options,"offset_y","40",0);
       //Video frame size. The default is to capture the full screen
       //av_dict_set(&amp;options,"video_size","640x480",0);
       AVInputFormat *ifmt=av_find_input_format("gdigrab");
       if(avformat_open_input(&amp;pFormatCtx,"desktop",ifmt,&amp;options)!=0){
           printf("Couldn't open input stream.\n");
           return -1;
       }

       if(avformat_find_stream_info(pFormatCtx,NULL)&lt;0)
       {
           printf("Couldn't find stream information.\n");
           return -1;
       }
       videoindex=-1;
       for(i=0; inb_streams; i++)
           if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
           {
               videoindex=i;
               break;
           }
       if(videoindex==-1)
       {
           printf("Didn't find a video stream.\n");
           return -1;
       }
       pCodecCtx=pFormatCtx->streams[videoindex]->codec;
       pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
       if(pCodec==NULL)
       {
           printf("Codec not found.\n");
           return -1;
       }
       if(avcodec_open2(pCodecCtx, pCodec,NULL)&lt;0)
       {
           printf("Could not open codec.\n");
           return -1;
       }


       AVFrame *pFrame,*pFrameYUV;
       pFrame=avcodec_alloc_frame();
       pFrameYUV=avcodec_alloc_frame();

       //PIX_FMT_YUV420P WHAT DOES THIS SAY ABOUT THE FORMAT??
       uint8_t *out_buffer=(uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));


       avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);

       //&lt;&lt;&lt;&lt;&lt;&lt;&lt;&lt;&lt;&lt;&lt;-------PREP WORK TO WRITE ENCODED VIDEO FILES-----

       avformat_alloc_output_context2(&amp;outFormatContextEncoded, NULL, NULL, filename);
       if (!outFormatContextEncoded) {
           printf("Could not deduce output format from file extension: using MPEG.\n");
           avformat_alloc_output_context2(&amp;outFormatContextEncoded, NULL, "mpeg", filename);
       }
       if (!outFormatContextEncoded)
           return 1;

       outFormatEncoded=outFormatContextEncoded->oformat;


        //THIS CREATES THE STREAMS(AUDIO AND VIDEO) ADDED TO OUR OUTPUT STREAM

       if (outFormatEncoded->video_codec != AV_CODEC_ID_NONE) {

           //YOUR VIDEO AND AUDIO PROPS ARE SET HERE.
           add_stream(&amp;outVideoStream, outFormatContextEncoded, &amp;videoCodec, outFormatEncoded->video_codec);
           have_video = 1;
           encode_video = 1;
       }


        // Now that all the parameters are set, we can open the audio and
        // video codecs and allocate the necessary encode buffers.
       if (have_video)
           open_video(outFormatContextEncoded, videoCodec, &amp;outVideoStream, opt);

        av_dump_format(outFormatContextEncoded, 0, filename, 1);


         /* open the output file, if needed */
       if (!(outFormatEncoded->flags &amp; AVFMT_NOFILE)) {
           ret1 = avio_open(&amp;outFormatContextEncoded->pb, filename, AVIO_FLAG_WRITE);
           if (ret1 &lt; 0) {
               //fprintf(stderr, "Could not open '%s': %s\n", filename,
               //        av_err2str(ret));
               fprintf(stderr, "Could not open your dumb file.\n");
               return 1;
           }
       }


       /* Write the stream header, if any. */
       ret1 = avformat_write_header(outFormatContextEncoded, &amp;opt);
       if (ret1 &lt; 0) {
           //fprintf(stderr, "Error occurred when opening output file: %s\n",
            //       av_err2str(ret));
           fprintf(stderr, "Error occurred when opening output file\n");
           return 1;
       }


       //&lt;&lt;&lt;&lt;&lt;&lt;&lt;&lt;&lt;&lt;&lt;-------PREP WORK TO WRITE ENCODED VIDEO FILES-----

       //SDL----------------------------
       if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {  
           printf( "Could not initialize SDL - %s\n", SDL_GetError());
           return -1;
       }
       int screen_w=640,screen_h=360;
       const SDL_VideoInfo *vi = SDL_GetVideoInfo();
       //Half of the Desktop's width and height.
       screen_w = vi->current_w/2;
       screen_h = vi->current_h/2;
       SDL_Surface *screen;
       screen = SDL_SetVideoMode(screen_w, screen_h, 0,0);

       if(!screen) {  
           printf("SDL: could not set video mode - exiting:%s\n",SDL_GetError());  
           return -1;
       }
       SDL_Overlay *bmp;
       bmp = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height,SDL_YV12_OVERLAY, screen);
       SDL_Rect rect;
       //SDL End------------------------
       int ret, got_picture;

       AVPacket *packet=(AVPacket *)av_malloc(sizeof(AVPacket));

       //TRY TO INIT THE PACKET HERE
        av_init_packet(packet);


       //Output Information-----------------------------
       printf("File Information---------------------\n");
       av_dump_format(pFormatCtx,0,NULL,0);
       printf("-------------------------------------------------\n");

       struct SwsContext *img_convert_ctx;
       img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
       //------------------------------
       //
       while(av_read_frame(pFormatCtx, packet)>=0)
       {

           if(packet->stream_index==videoindex)
           {
               //HERE WE DECODE THE PACKET INTO THE FRAME
               ret = avcodec_decode_video2(pCodecCtx, pFrame, &amp;got_picture, packet);
               if(ret &lt; 0)
               {
                   printf("Decode Error.\n");
                   return -1;
               }
               if(got_picture)
               {

                   //THIS IS WHERE WE DO STH WITH THE FRAME WE JUST GOT FROM THE STREAM
                   //FREE AREA--START
                   //IN HERE YOU CAN WORK WITH THE FRAME OF THE PACKET.
                   write_video_frame(outFormatContextEncoded, &amp;outVideoStream,packet);


                   //FREE AREA--END
                   sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);


                   SDL_LockYUVOverlay(bmp);
                   bmp->pixels[0]=pFrameYUV->data[0];
                   bmp->pixels[2]=pFrameYUV->data[1];
                   bmp->pixels[1]=pFrameYUV->data[2];    
                   bmp->pitches[0]=pFrameYUV->linesize[0];
                   bmp->pitches[2]=pFrameYUV->linesize[1];  
                   bmp->pitches[1]=pFrameYUV->linesize[2];
                   SDL_UnlockYUVOverlay(bmp);
                   rect.x = 0;    
                   rect.y = 0;    
                   rect.w = screen_w;    
                   rect.h = screen_h;  
                   SDL_DisplayYUVOverlay(bmp, &amp;rect);
                   //Delay 40ms----WHY THIS DELAY????
                   SDL_Delay(40);
               }
           }
           av_free_packet(packet);
       }//THE LOOP TO PULL PACKETS FROM THE FORMAT CONTEXT ENDS HERE.

       //AFTER THE WHILE LOOP WE DO SOME CLEANING

       //av_read_pause(context);


        av_write_trailer(outFormatContextEncoded);
        close_stream(outFormatContextEncoded, &amp;outVideoStream);
        if (!(outFormatContextEncoded->flags &amp; AVFMT_NOFILE))
           /* Close the output file. */
           avio_close(outFormatContextEncoded->pb);

       /* free the stream */
       avformat_free_context(outFormatContextEncoded);



       //STOP DOING YOUR CLEANING
       sws_freeContext(img_convert_ctx);



       SDL_Quit();

       av_free(out_buffer);
       av_free(pFrameYUV);
       avcodec_close(pCodecCtx);
       avformat_close_input(&amp;pFormatCtx);

       return 0;
    }



    /*
    * encode one video frame and send it to the muxer
    * return 1 when encoding is finished, 0 otherwise
    */
    static int write_video_frame(AVFormatContext *oc, OutputStream *ost,AVPacket * pkt11)
    {
       int ret;
       AVCodecContext *c;
       AVFrame *frame;
       int got_packet = 0;

       c = ost->st->codec;

       //DO NOT NEED THIS FRAME.
       //frame = get_video_frame(ost);

       if (oc->oformat->flags &amp; AVFMT_RAWPICTURE) {

           //IGNORE THIS FOR A MOMENT
           /* a hack to avoid data copy with some raw video muxers */
           AVPacket pkt;
           av_init_packet(&amp;pkt);

           if (!frame)
               return 1;

           pkt.flags        |= AV_PKT_FLAG_KEY;
           pkt.stream_index  = ost->st->index;
           pkt.data          = (uint8_t *)frame;
           pkt.size          = sizeof(AVPicture);

           pkt.pts = pkt.dts = frame->pts;
           av_packet_rescale_ts(&amp;pkt, c->time_base, ost->st->time_base);

           ret = av_interleaved_write_frame(oc, &amp;pkt);
       } else {

               ret = write_frame(oc, &amp;c->time_base, ost->st, pkt11);

       }

       if (ret &lt; 0) {
           fprintf(stderr, "Error while writing video frame: %s\n");
           exit(1);
       }


       return 1;
    }