Recherche avancée

Médias (1)

Mot : - Tags -/getid3

Autres articles (105)

  • Personnaliser en ajoutant son logo, sa bannière ou son image de fond

    5 septembre 2013, par

    Certains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;

  • Ecrire une actualité

    21 juin 2013, par

    Présentez les changements dans votre MédiaSPIP ou les actualités de vos projets sur votre MédiaSPIP grâce à la rubrique actualités.
    Dans le thème par défaut spipeo de MédiaSPIP, les actualités sont affichées en bas de la page principale sous les éditoriaux.
    Vous pouvez personnaliser le formulaire de création d’une actualité.
    Formulaire de création d’une actualité Dans le cas d’un document de type actualité, les champs proposés par défaut sont : Date de publication ( personnaliser la date de publication ) (...)

  • Publier sur MédiaSpip

    13 juin 2013

    Puis-je poster des contenus à partir d’une tablette Ipad ?
    Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir

Sur d’autres sites (11568)

  • Video encoding and keyframes

    24 février 2013, par Tishu

    I am transcoding a video frame by frame and using x264+ffmpeg to encode. The original video plays fine, but the first few frames of my transcoded vide show grey artefacts. I understand this is because of time compression and these artefacts disappear after a few frames.

    See these two pictures which are the first and second frames. The third frame is normal (i.e. no grey artefact and not blurry like the second one)
    First frame
    Second frame

    How can I force the first frame to be a key frame (ie fully encoded in my output video) so that these artefacts do not show ?

    Edit - more details

    Here is what I am doing more in details. I used bit form differents tutorials to read a video frame by frame and reencode each frame to a new video. My encoding parameters are the following :

    avcodec_get_context_defaults3(c, *codec);
    c->codec_id = codec_id;
    c->bit_rate = output_bitrate;
    /* Resolution must be a multiple of two. */
    c->width    = output_width;
    c->height   = output_height;
    /* timebase: This is the fundamental unit of time (in seconds) in terms
    * of which frame timestamps are represented. For fixed-fps content,
    * timebase should be 1/framerate and timestamp increments should be
    * identical to 1. */
    st->r_frame_rate.num = output_framerate_num;
    st->r_frame_rate.den = output_framerate_den;
    c->time_base.den = output_timebase_den;
    c->time_base.num = output_timebase_num;
    c->gop_size      = 3; /* emit one intra frame every twelve frames at most */
    c->pix_fmt       = STREAM_PIX_FMT;
    if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
       /* just for testing, we also add B frames */
       c->max_b_frames = 2;
    }
    if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
       /* Needed to avoid using macroblocks in which some coeffs overflow.
        * This does not happen with normal video, it just happens here as
        * the motion of the chroma plane does not match the luma plane. */
       c->mb_decision = 2;
    }
    c->max_b_frames = 2;
    c->scenechange_threshold = 0;
    c->rc_buffer_size = 0;
    c->me_method = ME_ZERO;

    Then I process each frame, probably doing something wrong there. The decoding bit :

    while(av_read_frame(gFormatCtx, &packet)>=0) {
       // Is this a packet from the video stream?
       if(packet.stream_index==gVideoStreamIndex) {
           // Decode video frame
           avcodec_decode_video2(gVideoCodecCtx, pCurrentFrame, &frameFinished, &packet);
           // Did we get a video frame?
           if(frameFinished) {
               [...]
               if(firstPts == -999) /*Initial value*/
                   firstPts = packet.pts;
               deltaPts = packet.pts - firstPts;
               double seconds = deltaPts*av_q2d(gFormatCtx->streams[gVideoStreamIndex]->time_base);
               [...]
               muxing_writeVideoFrame(pCurrentFrame, packet.pts);
           }
       }
    }

    The actual writing :

    int muxing_writeVideoFrame(AVFrame *frame, int64_t pts)
    {
    frameCount = frameCount +1;
    if(frameCount > 0)
    {
       if (video_st)
           video_pts = (double)video_st->pts.val * video_st->time_base.num /
                       video_st->time_base.den;
       else
           video_pts = 0.0;

       if (video_st && !(video_st && audio_st && audio_pts < video_pts))
       {
           frame->pts = pts;//av_rescale_q(frame_count, video_st->codec->time_base, video_st->time_base);
           write_video_frame(oc, video_st, frame);
       }
    }

    return 0;
    }

    static int write_video_frame(AVFormatContext *oc, AVStream *st, AVFrame *frame)
    {
       int ret;
       static struct SwsContext *sws_ctx;
       //LOGI(10, frame_count);
       AVCodecContext *c = st->codec;

       /* encode the image */
       AVPacket pkt;
       int got_output;
       av_init_packet(&pkt);
       pkt.data = NULL;    // packet data will be allocated by the encoder
       pkt.size = 0;
    ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
    if (ret < 0) {
       fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
       exit(1);
    }
    /* If size is zero, it means the image was buffered. */
    if (got_output) {
       if (c->coded_frame->key_frame)
           pkt.flags |= AV_PKT_FLAG_KEY;
       pkt.stream_index = st->index;
       /* Write the compressed frame to the media file. */
       ret = av_interleaved_write_frame(oc, &pkt);
    } else {
       ret = 0;
    }

       if (ret != 0) {
           LOGI(10, av_err2str(ret));
           exit(1);
       }
       frame_count++;
       return got_output;
    }
  • Memory leak in ffmpeg or libav with Mac os x 10.8.2 ?

    26 mars 2013, par James491

    I've been developing a video player with FFmpeg and am experiencing some memory leakage. I am on a Mac OS X 10.8.2 and valgrind is unstable. A large memory leakage, about the size of the video itself, is leaked during av_read_frame. Also, memory leakage occurs with the simple code below, after avformat_find_stream_info. None of my programs, including the small one below, leak on my Windows computer. Any suggestions would be helpful. Should I report a bug to FFmpeg ? I am using the latest version of FFmpeg, 1.1.2. Memory leakage also occurs if I use ffplay, but not if I use VLC.

       #include <libavcodec></libavcodec>avcodec.h>
       #include <libavformat></libavformat>avformat.h>
       #include <libswscale></libswscale>swscale.h>
       #include

       int main(int argc, char* argv[]) {

           char              *video_addr = "/Users/###/Desktop/rawmovie.mov";
           AVFormatContext   *pFormatCtx;

           av_register_all();

           if (avformat_open_input(&amp;pFormatCtx, video_addr, NULL, NULL) != 0) {
               fprintf(stderr, "Could not open the video file\n");
               return -1;
           }

           if (avformat_find_stream_info(pFormatCtx, NULL) &lt; 0) {
               fprintf(stderr, "Couldn&#39;t find stream information\n");
           }

           avformat_close_input(&amp;pFormatCtx);

           return 0;
       }
  • Adding a text on top of video using libavfilter API

    19 février 2013, par prajwal

    I am trying to add a visible text ‘sampletext’ on a video and store in another file called “myvideo.mp4”

    Below is my code.(developed from *doc/examples/filtering_video.c *example)

    include

    #define _XOPEN_SOURCE 600 /* for usleep */

    #include <libavcodec></libavcodec>avcodec.h>
    #include <libavformat></libavformat>avformat.h>
    #include <libavfilter></libavfilter>avfiltergraph.h>
    #include <libavfilter></libavfilter>avcodec.h>
    #include <libavfilter></libavfilter>buffersink.h>
    #include <libavfilter></libavfilter>buffersrc.h>

    const char *filter_descr = "drawtext=fontcolor=white:fontfile=/usr/share/fonts/liberation/LiberationMono-Bold.ttf :rate=30000/1001:text=&#39;sampletext&#39;";

    static AVFormatContext *fmt_ctx;
    static AVCodecContext *dec_ctx;
    AVFilterContext *buffersink_ctx;
    AVFilterContext *buffersrc_ctx;
    AVFilterGraph *filter_graph;
    static int video_stream_index = -1;
    static int64_t last_pts = AV_NOPTS_VALUE;

    static int open_input_file(const char *filename)
    {
       int ret;
       AVCodec *dec;

       if ((ret = avformat_open_input(&amp;fmt_ctx, filename, NULL, NULL)) &lt; 0) {
           av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
           return ret;
       }

       if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) &lt; 0) {
           av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
           return ret;
       }

       /* select the video stream */
       ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &amp;dec, 0);
       if (ret &lt; 0) {
           av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the input file\n");
           return ret;
       }
       video_stream_index = ret;
       dec_ctx = fmt_ctx->streams[video_stream_index]->codec;

       /* init the video decoder */
       if ((ret = avcodec_open2(dec_ctx, dec, NULL)) &lt; 0) {
           av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
           return ret;
       }

       return 0;
    }

    static int init_filters(const char *filters_descr)
    {
       char args[512];
       int ret;
       AVFilter *buffersrc  = avfilter_get_by_name("buffer");
       AVFilter *buffersink = avfilter_get_by_name("ffbuffersink");
       AVFilterInOut *outputs = avfilter_inout_alloc();
       AVFilterInOut *inputs  = avfilter_inout_alloc();
       enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
       AVBufferSinkParams *buffersink_params;

       filter_graph = avfilter_graph_alloc();

       /* buffer video source: the decoded frames from the decoder will be inserted here. */
       snprintf(args, sizeof(args),
               "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
               dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
               dec_ctx->time_base.num, dec_ctx->time_base.den,
               dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);

       ret = avfilter_graph_create_filter(&amp;buffersrc_ctx, buffersrc, "in",
                                          args, NULL, filter_graph);
       if (ret &lt; 0) {
           av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
           return ret;
       }

       /* buffer video sink: to terminate the filter chain. */
       buffersink_params = av_buffersink_params_alloc();
       buffersink_params->pixel_fmts = pix_fmts;
       ret = avfilter_graph_create_filter(&amp;buffersink_ctx, buffersink, "out",
                                          NULL, buffersink_params, filter_graph);
       av_free(buffersink_params);
       if (ret &lt; 0) {
           av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
           return ret;
       }

       /* Endpoints for the filter graph. */
       outputs->name       = av_strdup("in");
       outputs->filter_ctx = buffersrc_ctx;
       outputs->pad_idx    = 0;
       outputs->next       = NULL;

       inputs->name       = av_strdup("out");
       inputs->filter_ctx = buffersink_ctx;
       inputs->pad_idx    = 0;
       inputs->next       = NULL;

       if ((ret = avfilter_graph_parse(filter_graph, filters_descr,
                                       &amp;inputs, &amp;outputs, NULL)) &lt; 0)
           return ret;

       if ((ret = avfilter_graph_config(filter_graph, NULL)) &lt; 0)
           return ret;
       return 0;
    }

    static void save_picref(AVFilterBufferRef *picref, AVRational time_base)
    {
       int x, y;
       uint8_t *p0, *p;
       int64_t delay;
       FILE *pFile;
     char szFilename[32]="myvideo.mp4";

     // Open file
       pFile=fopen(szFilename, "wb");
     if(pFile==NULL)
       return;

       if (picref->pts != AV_NOPTS_VALUE) {
           if (last_pts != AV_NOPTS_VALUE) {
               /* sleep roughly the right amount of time;
                * usleep is in microseconds, just like AV_TIME_BASE. */
               delay = av_rescale_q(picref->pts - last_pts,
                                    time_base, AV_TIME_BASE_Q);
               if (delay > 0 &amp;&amp; delay &lt; 1000000)
                   usleep(delay);
           }
           last_pts = picref->pts;
       }
     fprintf(pFile, "P6\n%d %d\n255\n", picref->video->w, picref->video->h);
       // Write pixel data
       for (y = 0; y &lt; picref->video->h; y++) {
       p = p0;

       fwrite(picref->data[0], 1, picref->video->w, pFile);

       p0 += picref->linesize[0];
       }
     // Close file
     fclose(pFile);

    }

    int main(int argc, char **argv)
    {
       int ret;
       AVPacket packet;
       AVFrame frame;
       int got_frame;

       if (argc != 2) {
           fprintf(stderr, "Usage: %s file\n", argv[0]);
           exit(1);
       }

       avcodec_register_all();
       av_register_all();
       avfilter_register_all();

       if ((ret = open_input_file(argv[1])) &lt; 0)
           goto end;
       if ((ret = init_filters(filter_descr)) &lt; 0)
           goto end;

       /* read all packets */
       while (1) {
           AVFilterBufferRef *picref;
           if ((ret = av_read_frame(fmt_ctx, &amp;packet)) &lt; 0)
               break;

           if (packet.stream_index == video_stream_index) {
               avcodec_get_frame_defaults(&amp;frame);
               got_frame = 0;
               ret = avcodec_decode_video2(dec_ctx, &amp;frame, &amp;got_frame, &amp;packet);
               if (ret &lt; 0) {
                   av_log(NULL, AV_LOG_ERROR, "Error decoding video\n");
                   break;
               }

               if (got_frame) {
                   frame.pts = av_frame_get_best_effort_timestamp(&amp;frame);

                   /* push the decoded frame into the filtergraph */
                   if (av_buffersrc_add_frame(buffersrc_ctx, &amp;frame, 0) &lt; 0) {
                       av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
                       break;
                   }

                   /* pull filtered pictures from the filtergraph */
                   while (1) {
                       ret = av_buffersink_get_buffer_ref(buffersink_ctx, &amp;picref, 0);
                       if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
                           break;
                       if (ret &lt; 0)
                           goto end;

                       if (picref) {
                           save_picref(picref, buffersink_ctx->inputs[0]->time_base);
                           avfilter_unref_bufferp(&amp;picref);
                       }
                   }
               }
           }
           av_free_packet(&amp;packet);
       }
    end:
       avfilter_graph_free(&amp;filter_graph);
       if (dec_ctx)
           avcodec_close(dec_ctx);
       avformat_close_input(&amp;fmt_ctx);

       if (ret &lt; 0 &amp;&amp; ret != AVERROR_EOF) {
           char buf[1024];
           av_strerror(ret, buf, sizeof(buf));
           fprintf(stderr, "Error occurred: %s\n", buf);
           exit(1);
       }

       exit(0);
    }

    After compilation and execution, when I try to play the video using

    ffplay myvideo.mp4 *

    I am getting the following error.

    myvideo.mp4 : Invalid data found when processing input*

    i am unable to resolve the error,can anyone pls help me out in solving the error.