Recherche avancée

Médias (1)

Mot : - Tags -/ogg

Autres articles (34)

  • Des sites réalisés avec MediaSPIP

    2 mai 2011, par

    Cette page présente quelques-uns des sites fonctionnant sous MediaSPIP.
    Vous pouvez bien entendu ajouter le votre grâce au formulaire en bas de page.

  • Support audio et vidéo HTML5

    10 avril 2011

    MediaSPIP utilise les balises HTML5 video et audio pour la lecture de documents multimedia en profitant des dernières innovations du W3C supportées par les navigateurs modernes.
    Pour les navigateurs plus anciens, le lecteur flash Flowplayer est utilisé.
    Le lecteur HTML5 utilisé a été spécifiquement créé pour MediaSPIP : il est complètement modifiable graphiquement pour correspondre à un thème choisi.
    Ces technologies permettent de distribuer vidéo et son à la fois sur des ordinateurs conventionnels (...)

  • HTML5 audio and video support

    13 avril 2011, par

    MediaSPIP uses HTML5 video and audio tags to play multimedia files, taking advantage of the latest W3C innovations supported by modern browsers.
    The MediaSPIP player used has been created specifically for MediaSPIP and can be easily adapted to fit in with a specific theme.
    For older browsers the Flowplayer flash fallback is used.
    MediaSPIP allows for media playback on major mobile platforms with the above (...)

Sur d’autres sites (3741)

  • How to generate a fixed duration and fps for a video using FFmpeg C++ libraries ? [closed]

    4 novembre 2024, par BlueSky Light Programmer

    I'm following the mux official example to write a C++ class that generates a video with a fixed duration (5s) and a fixed fps (60). For some reason, the duration of the output video is 3-4 seconds, although I call the function to write frames 300 times and set the fps to 60.

    


    Can you take a look at the code below and spot what I'm doing wrong ?

    


    #include "ffmpeg.h"&#xA;&#xA;#include <iostream>&#xA;&#xA;static int writeFrame(AVFormatContext *fmt_ctx, AVCodecContext *c,&#xA;                      AVStream *st, AVFrame *frame, AVPacket *pkt);&#xA;&#xA;static void addStream(OutputStream *ost, AVFormatContext *formatContext,&#xA;                      const AVCodec **codec, enum AVCodecID codec_id,&#xA;                      int width, int height, int fps);&#xA;&#xA;static AVFrame *allocFrame(enum AVPixelFormat pix_fmt, int width, int height);&#xA;&#xA;static void openVideo(AVFormatContext *formatContext, const AVCodec *codec,&#xA;                      OutputStream *ost, AVDictionary *opt_arg);&#xA;&#xA;static AVFrame *getVideoFrame(OutputStream *ost,&#xA;                              const std::vector<glubyte>&amp; pixels,&#xA;                              int duration);&#xA;&#xA;static int writeVideoFrame(AVFormatContext *formatContext,&#xA;                           OutputStream *ost,&#xA;                           const std::vector<glubyte>&amp; pixels,&#xA;                           int duration);&#xA;&#xA;static void closeStream(AVFormatContext *formatContext, OutputStream *ost);&#xA;&#xA;static void fillRGBImage(AVFrame *frame, int width, int height,&#xA;                         const std::vector<glubyte>&amp; pixels);&#xA;&#xA;#ifdef av_err2str&#xA;#undef av_err2str&#xA;#include <string>&#xA;av_always_inline std::string av_err2string(int errnum) {&#xA;  char str[AV_ERROR_MAX_STRING_SIZE];&#xA;  return av_make_error_string(str, AV_ERROR_MAX_STRING_SIZE, errnum);&#xA;}&#xA;#define av_err2str(err) av_err2string(err).c_str()&#xA;#endif  // av_err2str&#xA;&#xA;FFmpeg::FFmpeg(int width, int height, int fps, const char *fileName)&#xA;: videoStream{ 0 }&#xA;, formatContext{ nullptr } {&#xA;  const AVOutputFormat *outputFormat;&#xA;  const AVCodec *videoCodec{ nullptr };&#xA;  AVDictionary *opt{ nullptr };&#xA;  int ret{ 0 };&#xA;&#xA;  av_dict_set(&amp;opt, "crf", "17", 0);&#xA;&#xA;  /* Allocate the output media context. */&#xA;  avformat_alloc_output_context2(&amp;this->formatContext, nullptr, nullptr, fileName);&#xA;  if (!this->formatContext) {&#xA;    std::cout &lt;&lt; "Could not deduce output format from file extension: using MPEG." &lt;&lt; std::endl;&#xA;    avformat_alloc_output_context2(&amp;this->formatContext, nullptr, "mpeg", fileName);&#xA;  &#xA;    if (!formatContext)&#xA;      exit(-14);&#xA;  }&#xA;&#xA;  outputFormat = this->formatContext->oformat;&#xA;&#xA;  /* Add the video stream using the default format codecs&#xA;   * and initialize the codecs. */&#xA;  if (outputFormat->video_codec == AV_CODEC_ID_NONE) {&#xA;    std::cout &lt;&lt; "The output format doesn&#x27;t have a default codec video." &lt;&lt; std::endl;&#xA;    exit(-15);&#xA;  }&#xA;&#xA;  addStream(&#xA;    &amp;this->videoStream,&#xA;    this->formatContext,&#xA;    &amp;videoCodec,&#xA;    outputFormat->video_codec,&#xA;    width,&#xA;    height,&#xA;    fps&#xA;  );&#xA;  openVideo(this->formatContext, videoCodec, &amp;this->videoStream, opt);&#xA;  av_dump_format(this->formatContext, 0, fileName, 1);&#xA;  &#xA;  /* open the output file, if needed */&#xA;  if (!(outputFormat->flags &amp; AVFMT_NOFILE)) {&#xA;    ret = avio_open(&amp;this->formatContext->pb, fileName, AVIO_FLAG_WRITE);&#xA;    if (ret &lt; 0) {&#xA;      std::cout &lt;&lt; "Could not open &#x27;" &lt;&lt; fileName &lt;&lt; "&#x27;: " &lt;&lt; std::string{ av_err2str(ret) } &lt;&lt; std::endl;&#xA;      exit(-16);&#xA;    }&#xA;  }&#xA;&#xA;  /* Write the stream header, if any. */&#xA;  ret = avformat_write_header(this->formatContext, &amp;opt);&#xA;  if (ret &lt; 0) {&#xA;    std::cout &lt;&lt; "Error occurred when opening output file: " &lt;&lt; av_err2str(ret) &lt;&lt; std::endl;&#xA;    exit(-17);&#xA;  }&#xA;&#xA;  av_dict_free(&amp;opt);&#xA;}&#xA;&#xA;FFmpeg::~FFmpeg() {&#xA;  if (this->formatContext) {&#xA;    /* Close codec. */&#xA;    closeStream(this->formatContext, &amp;this->videoStream);&#xA;&#xA;    if (!(this->formatContext->oformat->flags &amp; AVFMT_NOFILE)) {&#xA;      /* Close the output file. */&#xA;      avio_closep(&amp;this->formatContext->pb);&#xA;    }&#xA;&#xA;    /* free the stream */&#xA;    avformat_free_context(this->formatContext);&#xA;  }&#xA;}&#xA;&#xA;void FFmpeg::Record(&#xA;  const std::vector<glubyte>&amp; pixels,&#xA;  unsigned frameIndex,&#xA;  int duration,&#xA;  bool isLastIndex&#xA;) {&#xA;  static bool encodeVideo{ true };&#xA;  if (encodeVideo)&#xA;    encodeVideo = !writeVideoFrame(this->formatContext,&#xA;                                   &amp;this->videoStream,&#xA;                                   pixels,&#xA;                                   duration);&#xA;&#xA;  if (isLastIndex) {&#xA;    av_write_trailer(this->formatContext);&#xA;    encodeVideo = false;&#xA;  }&#xA;}&#xA;&#xA;int writeFrame(AVFormatContext *fmt_ctx, AVCodecContext *c,&#xA;               AVStream *st, AVFrame *frame, AVPacket *pkt) {&#xA;  int ret;&#xA;&#xA;  // send the frame to the encoder&#xA;  ret = avcodec_send_frame(c, frame);&#xA;  if (ret &lt; 0) {&#xA;    std::cout &lt;&lt; "Error sending a frame to the encoder: " &lt;&lt; av_err2str(ret) &lt;&lt; std::endl;&#xA;    exit(-2);&#xA;  }&#xA;&#xA;  while (ret >= 0) {&#xA;    ret = avcodec_receive_packet(c, pkt);&#xA;    if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)&#xA;      break;&#xA;    else if (ret &lt; 0) {&#xA;      std::cout &lt;&lt; "Error encoding a frame: " &lt;&lt; av_err2str(ret) &lt;&lt; std::endl;&#xA;      exit(-3);&#xA;    }&#xA;&#xA;    /* rescale output packet timestamp values from codec to stream timebase */&#xA;    av_packet_rescale_ts(pkt, c->time_base, st->time_base);&#xA;    pkt->stream_index = st->index;&#xA;&#xA;    /* Write the compressed frame to the media file. */&#xA;    ret = av_interleaved_write_frame(fmt_ctx, pkt);&#xA;    /* pkt is now blank (av_interleaved_write_frame() takes ownership of&#xA;     * its contents and resets pkt), so that no unreferencing is necessary.&#xA;     * This would be different if one used av_write_frame(). */&#xA;    if (ret &lt; 0) {&#xA;      std::cout &lt;&lt; "Error while writing output packet: " &lt;&lt; av_err2str(ret) &lt;&lt; std::endl;&#xA;      exit(-4);&#xA;    }&#xA;  }&#xA;&#xA;  return ret == AVERROR_EOF ? 1 : 0;&#xA;}&#xA;&#xA;void addStream(OutputStream *ost, AVFormatContext *formatContext,&#xA;               const AVCodec **codec, enum AVCodecID codec_id,&#xA;               int width, int height, int fps) {&#xA;  AVCodecContext *c;&#xA;  int i;&#xA;&#xA;  /* find the encoder */&#xA;  *codec = avcodec_find_encoder(codec_id);&#xA;  if (!(*codec)) {&#xA;    std::cout &lt;&lt; "Could not find encoder for " &lt;&lt; avcodec_get_name(codec_id) &lt;&lt; "." &lt;&lt; std::endl;&#xA;    exit(-5);&#xA;  }&#xA;&#xA;  ost->tmpPkt = av_packet_alloc();&#xA;  if (!ost->tmpPkt) {&#xA;    std::cout &lt;&lt; "Could not allocate AVPacket." &lt;&lt; std::endl;&#xA;    exit(-6);&#xA;  }&#xA;&#xA;  ost->st = avformat_new_stream(formatContext, nullptr);&#xA;  if (!ost->st) {&#xA;    std::cout &lt;&lt; "Could not allocate stream." &lt;&lt; std::endl;&#xA;    exit(-7);&#xA;  }&#xA;&#xA;  ost->st->id = formatContext->nb_streams-1;&#xA;  c = avcodec_alloc_context3(*codec);&#xA;  if (!c) {&#xA;    std::cout &lt;&lt; "Could not alloc an encoding context." &lt;&lt; std::endl;&#xA;    exit(-8);&#xA;  }&#xA;  ost->enc = c;&#xA;&#xA;  switch ((*codec)->type) {&#xA;  case AVMEDIA_TYPE_VIDEO:&#xA;    c->codec_id = codec_id;&#xA;    c->bit_rate = 6000000;&#xA;    /* Resolution must be a multiple of two. */&#xA;    c->width    = width;&#xA;    c->height   = height;&#xA;    /* timebase: This is the fundamental unit of time (in seconds) in terms&#xA;      * of which frame timestamps are represented. For fixed-fps content,&#xA;      * timebase should be 1/framerate and timestamp increments should be&#xA;      * identical to 1. */&#xA;    ost->st->time_base = { 1, fps };&#xA;    c->time_base       = ost->st->time_base;&#xA;    c->framerate       = { fps, 1 };&#xA;&#xA;    c->gop_size      = 0; /* emit one intra frame every twelve frames at most */&#xA;    c->pix_fmt       = AV_PIX_FMT_YUV420P;&#xA;    if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {&#xA;      /* just for testing, we also add B-frames */&#xA;      c->max_b_frames = 2;&#xA;    }&#xA;    if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {&#xA;      /* Needed to avoid using macroblocks in which some coeffs overflow.&#xA;      *  This does not happen with normal video, it just happens here as&#xA;      *  the motion of the chroma plane does not match the luma plane.*/&#xA;      c->mb_decision = 2;&#xA;    }&#xA;    break;&#xA;&#xA;  default:&#xA;    break;&#xA;  }&#xA;&#xA;  /* Some formats want stream headers to be separate. */&#xA;  if (formatContext->oformat->flags &amp; AVFMT_GLOBALHEADER)&#xA;    c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;&#xA;}&#xA;&#xA;AVFrame *allocFrame(enum AVPixelFormat pix_fmt, int width, int height) {&#xA;  AVFrame *frame{ av_frame_alloc() };&#xA;  int ret;&#xA;&#xA;  if (!frame)&#xA;    return nullptr;&#xA;&#xA;  frame->format = pix_fmt;&#xA;  frame->width  = width;&#xA;  frame->height = height;&#xA;&#xA;  /* allocate the buffers for the frame data */&#xA;  ret = av_frame_get_buffer(frame, 0);&#xA;  if (ret &lt; 0) {&#xA;    std::cout &lt;&lt; "Could not allocate frame data." &lt;&lt; std::endl;&#xA;    exit(-8);&#xA;  }&#xA;&#xA;  return frame;&#xA;}&#xA;&#xA;void openVideo(AVFormatContext *formatContext, const AVCodec *codec,&#xA;               OutputStream *ost, AVDictionary *opt_arg) {&#xA;  int ret;&#xA;  AVCodecContext *c{ ost->enc };&#xA;  AVDictionary *opt{ nullptr };&#xA;&#xA;  av_dict_copy(&amp;opt, opt_arg, 0);&#xA;&#xA;  /* open the codec */&#xA;  ret = avcodec_open2(c, codec, &amp;opt);&#xA;  av_dict_free(&amp;opt);&#xA;  if (ret &lt; 0) {&#xA;    std::cout &lt;&lt; "Could not open video codec: " &lt;&lt; av_err2str(ret) &lt;&lt; std::endl;&#xA;    exit(-9);&#xA;  }&#xA;&#xA;  /* Allocate and init a re-usable frame. */&#xA;  ost->frame = allocFrame(c->pix_fmt, c->width, c->height);&#xA;  if (!ost->frame) {&#xA;    std::cout &lt;&lt; "Could not allocate video frame." &lt;&lt; std::endl;&#xA;    exit(-10);&#xA;  }&#xA;&#xA;  /* If the output format is not RGB24, then a temporary RGB24&#xA;   * picture is needed too. It is then converted to the required&#xA;   * output format. */&#xA;  ost->tmpFrame = nullptr;&#xA;  if (c->pix_fmt != AV_PIX_FMT_RGB24) {&#xA;    ost->tmpFrame = allocFrame(AV_PIX_FMT_RGB24, c->width, c->height);&#xA;    if (!ost->tmpFrame) {&#xA;      std::cout &lt;&lt; "Could not allocate temporary video frame." &lt;&lt; std::endl;&#xA;      exit(-11);&#xA;    }&#xA;  }&#xA;&#xA;  /* Copy the stream parameters to the muxer. */&#xA;  ret = avcodec_parameters_from_context(ost->st->codecpar, c);&#xA;  if (ret &lt; 0) {&#xA;    std::cout &lt;&lt; "Could not copy the stream parameters." &lt;&lt; std::endl;&#xA;    exit(-12);&#xA;  }&#xA;}&#xA;&#xA;AVFrame *getVideoFrame(OutputStream *ost,&#xA;                       const std::vector<glubyte>&amp; pixels,&#xA;                       int duration) {&#xA;  AVCodecContext *c{ ost->enc };&#xA;&#xA;  /* check if we want to generate more frames */&#xA;  if (av_compare_ts(ost->nextPts, c->time_base,&#xA;                    duration, { 1, 1 }) > 0) {&#xA;    return nullptr;&#xA;  }&#xA;&#xA;  /* when we pass a frame to the encoder, it may keep a reference to it&#xA;    * internally; make sure we do not overwrite it here */&#xA;  if (av_frame_make_writable(ost->frame) &lt; 0) {&#xA;    std::cout &lt;&lt; "It wasn&#x27;t possible to make frame writable." &lt;&lt; std::endl;&#xA;    exit(-12);&#xA;  }&#xA;&#xA;  if (c->pix_fmt != AV_PIX_FMT_RGB24) {&#xA;      /* as we only generate a YUV420P picture, we must convert it&#xA;        * to the codec pixel format if needed */&#xA;      if (!ost->swsContext) {&#xA;        ost->swsContext = sws_getContext(c->width, c->height,&#xA;                                         AV_PIX_FMT_RGB24,&#xA;                                         c->width, c->height,&#xA;                                         c->pix_fmt,&#xA;                                         SWS_BICUBIC, nullptr, nullptr, nullptr);&#xA;        if (!ost->swsContext) {&#xA;          std::cout &lt;&lt; "Could not initialize the conversion context." &lt;&lt; std::endl;&#xA;          exit(-13);&#xA;        }&#xA;      }&#xA;&#xA;      fillRGBImage(ost->tmpFrame, c->width, c->height, pixels);&#xA;      sws_scale(ost->swsContext, (const uint8_t * const *) ost->tmpFrame->data,&#xA;                ost->tmpFrame->linesize, 0, c->height, ost->frame->data,&#xA;                ost->frame->linesize);&#xA;  } else&#xA;    fillRGBImage(ost->frame, c->width, c->height, pixels);&#xA;&#xA;  ost->frame->pts = ost->nextPts&#x2B;&#x2B;;&#xA;&#xA;  return ost->frame;&#xA;}&#xA;&#xA;int writeVideoFrame(AVFormatContext *formatContext,&#xA;                    OutputStream *ost,&#xA;                    const std::vector<glubyte>&amp; pixels,&#xA;                    int duration) {&#xA;  return writeFrame(formatContext,&#xA;                    ost->enc,&#xA;                    ost->st,&#xA;                    getVideoFrame(ost, pixels, duration),&#xA;                    ost->tmpPkt);&#xA;}&#xA;&#xA;void closeStream(AVFormatContext *formatContext, OutputStream *ost) {&#xA;  avcodec_free_context(&amp;ost->enc);&#xA;  av_frame_free(&amp;ost->frame);&#xA;  av_frame_free(&amp;ost->tmpFrame);&#xA;  av_packet_free(&amp;ost->tmpPkt);&#xA;  sws_freeContext(ost->swsContext);&#xA;}&#xA;&#xA;static void fillRGBImage(AVFrame *frame, int width, int height,&#xA;                         const std::vector<glubyte>&amp; pixels) {&#xA;  // Copy pixel data into the frame&#xA;  int inputLineSize{ 3 * width };  // 3 bytes per pixel for RGB&#xA;  for (int y{ 0 }; y &lt; height; &#x2B;&#x2B;y) {&#xA;    memcpy(frame->data[0] &#x2B; y * frame->linesize[0],&#xA;           pixels.data() &#x2B; y * inputLineSize,&#xA;           inputLineSize);&#xA;  }&#xA;}&#xA;</glubyte></glubyte></glubyte></glubyte></string></glubyte></glubyte></glubyte></iostream>

    &#xA;

  • ffmpeg got stuck while trying to crossfade merge two videos

    30 juin 2017, par Jeflopo

    I’m trying to do a crossfade merge (1s) between two videos. An intro (39secs duration) video with the main video. When I executed the command it started working without throwing errors but at some frame ffmpeg gets stuck.

    I read a lot of q/a here in stackoverflow, and the official docs but I can’t solve this so :

    This is the command :

    ffmpeg -i "inputs/intro.mp4" -i "inputs/240p.mp4" -an -filter_complex \
       "[0:v]trim=start=0:end=38,setpts=PTS-STARTPTS[firstclip]; \
       [0:v]trim=start=38:end=39,setpts=PTS-STARTPTS[fadeoutsrc]; \
       [1:v]trim=start=1,setpts=PTS-STARTPTS[secondclip]; \
       [1:v]trim=start=0:end=1,setpts=PTS-STARTPTS[fadeinsrc]; \
       [fadeinsrc]format=pix_fmts=yuva420p, fade=t=in:st=0:d=1:alpha=1[fadein]; \
       [fadeoutsrc]format=pix_fmts=yuva420p, fade=t=out:st=0:d=1:alpha=1[fadeout]; \
       [fadein]fifo[fadeinfifo]; \
       [fadeout]fifo[fadeoutfifo]; \
       [fadeoutfifo][fadeinfifo]overlay[crossfade]; \
       [firstclip][crossfade][secondclip]concat=n=3[output]; \
       [0:a][1:a] acrossfade=d=1 [audio]" -vcodec libx264 -map "[output]" -map "[audio]" "outputs/240p.mp4"

    Here’s the raw command (the exact command I used) :

    ffmpeg -i "inputs/intro.mp4" -i "inputs/240p.mp4" -an -filter_complex "[0:v]trim=start=0:end=38,setpts=PTS-STARTPTS[firstclip]; [0:v]trim=start=38:end=39,setpts=PTS-STARTPTS[fadeoutsrc]; [1:v]trim=start=1,setpts=PTS-STARTPTS[secondclip]; [1:v]trim=start=0:end=1,setpts=PTS-STARTPTS[fadeinsrc]; [fadeinsrc]format=pix_fmts=yuva420p, fade=t=in:st=0:d=1:alpha=1[fadein]; [fadeoutsrc]format=pix_fmts=yuva420p, fade=t=out:st=0:d=1:alpha=1[fadeout]; [fadein]fifo[fadeinfifo]; [fadeout]fifo[fadeoutfifo]; [fadeoutfifo][fadeinfifo]overlay[crossfade]; [firstclip][crossfade][secondclip]concat=n=3[output]; [0:a][1:a] acrossfade=d=1 [audio]" -vcodec libx264 -map "[output]" -map "[audio]" "outputs/240p.mp4"

    The "error" is reproducible with and without the -an and the acrossfade filters.

    This is the output :

    PS C:\scripts\ffmpeg> ffmpeg -i "inputs/intro.mp4" -i "inputs/240p.mp4" -an -filter_complex "[0:v]trim=start=0:end=38,setpts=PTS-STARTPTS[firstclip]; [0:v]trim=start=38:end=39,setpts=PTS-STARTPTS[fadeoutsrc]; [1:v]trim=start=1,setpts=PTS-STARTPTS[secondclip]; [1:v]trim=start=0:end=1,setpts=PTS-STARTPTS[fadeinsrc]; [fadeinsrc]format=pix_fmts=yuva420p, fade=t=in:st=0:d=1:alpha=1[fadein]; [fadeoutsrc]format=pix_fmts=yuva420p, fade=t=out:st=0:d=1:alpha=1[fadeout]; [fadein]fifo[fadeinfifo]; [fadeout]fifo[fadeoutfifo]; [fadeoutfifo][fadeinfifo]overlay[crossfade]; [firstclip][crossfade][secondclip]concat=n=3[output]; [0:a][1:a] acrossfade=d=1 [audio]" -map "[output]" -map "[audio]" "outputs/240p.mp4"
    ffmpeg version N-86669-gc1d1274 Copyright (c) 2000-2017 the FFmpeg developers
     built with gcc 7.1.0 (GCC)
     configuration: --enable-gpl --enable-version3 --enable-cuda --enable-cuvid --enable-d3d11va --enable-dxva2 --enable-libmfx --enable-nvenc --enable-avisynth --enable-bzlib --enable-fontconfig --enable-frei0r --enable-gnutls --enable-iconv --enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca --enable-libfreetype --enable-libgme --enable-libgsm --enable-libilbc --enable-libmodplug --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenh264 --enable-libopenjpeg --enable-libopus --enable-librtmp --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libtheora --enable-libtwolame --enable-libvidstab --enable-libvo-amrwbenc --enable-libvorbis --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxavs --enable-libxvid --enable-libzimg --enable-lzma --enable-zlib
     libavutil      55. 67.100 / 55. 67.100
     libavcodec     57.100.102 / 57.100.102
     libavformat    57. 75.100 / 57. 75.100
     libavdevice    57.  7.100 / 57.  7.100
     libavfilter     6. 94.100 /  6. 94.100
     libswscale      4.  7.101 /  4.  7.101
     libswresample   2.  8.100 /  2.  8.100
     libpostproc    54.  6.100 / 54.  6.100
    Input #0, mov,mp4,m4a,3gp,3g2,mj2, from 'inputs/intro.mp4':
     Metadata:
       major_brand     : isom
       minor_version   : 512
       compatible_brands: isomiso2avc1mp41
       encoder         : Lavf57.72.101
     Duration: 00:06:24.45, start: 0.000000, bitrate: 491 kb/s
       Stream #0:0(und): Video: h264 (High) (avc1 / 0x31637661), yuv420p, 426x240 [SAR 1:1 DAR 71:40], 353 kb/s, 25 fps, 25 tbr, 12800 tbn, 50 tbc (default)
       Metadata:
         handler_name    : VideoHandler
       Stream #0:1(und): Audio: aac (LC) (mp4a / 0x6134706D), 44100 Hz, stereo, fltp, 130 kb/s (default)
       Metadata:
         handler_name    : SoundHandler
    Input #1, mov,mp4,m4a,3gp,3g2,mj2, from 'inputs/240p.mp4':
     Metadata:
       major_brand     : isom
       minor_version   : 512
       compatible_brands: isomiso2avc1mp41
       encoder         : Lavf56.40.101
     Duration: 00:06:24.43, start: 0.000000, bitrate: 375 kb/s
       Stream #1:0(und): Video: h264 (Main) (avc1 / 0x31637661), yuv420p, 426x240 [SAR 1:1 DAR 71:40], 243 kb/s, 25 fps, 25 tbr, 90k tbn, 50 tbc (default)
       Metadata:
         handler_name    : VideoHandler
       Stream #1:1(und): Audio: aac (LC) (mp4a / 0x6134706D), 44100 Hz, stereo, fltp, 125 kb/s (default)
       Metadata:
         handler_name    : SoundHandler
    Stream mapping:
     Stream #0:0 (h264) -> trim
     Stream #0:0 (h264) -> trim
     Stream #0:1 (aac) -> acrossfade:crossfade0
     Stream #1:0 (h264) -> trim
     Stream #1:0 (h264) -> trim
     Stream #1:1 (aac) -> acrossfade:crossfade1
     concat -> Stream #0:0 (libx264)
     acrossfade -> Stream #0:1 (aac)
    Press [q] to stop, [?] for help
    [libx264 @ 00000000026b2240] using SAR=1/1
    [libx264 @ 00000000026b2240] using cpu capabilities: MMX2 SSE2Fast SSSE3 SSE4.2 AVX FMA3 BMI2 AVX2
    [libx264 @ 00000000026b2240] profile High, level 2.1
    [libx264 @ 00000000026b2240] 264 - core 152 r2851 ba24899 - H.264/MPEG-4 AVC codec - Copyleft 2003-2017 - http://www.videolan.org/x264.html - options: cabac=1 ref=3 deblock=1:0:0 analyse=0x3:0x113 me=hex subme=7 psy=1 psy_rd=1.00:0.00 mixed_ref=1 me_range=16 chroma_me=1 trellis=1 8x8dct=1 cqm=0 deadzone=21,11 fast_pskip=1 chroma_qp_offset=-2 threads=7 lookahead_threads=1 sliced_threads=0 nr=0 decimate=1 interlaced=0 bluray_compat=0 constrained_intra=0 bframes=3 b_pyramid=2 b_adapt=1 b_bias=0 direct=1 weightb=1 open_gop=0 weightp=2 keyint=250 keyint_min=25 scenecut=40 intra_refresh=0 rc_lookahead=40 rc=crf mbtree=1 crf=23.0 qcomp=0.60 qpmin=0 qpmax=69 qpstep=4 ip_ratio=1.40 aq=1:1.00
    Output #0, mp4, to 'outputs/240p.mp4':
     Metadata:
       major_brand     : isom
       minor_version   : 512
       compatible_brands: isomiso2avc1mp41
       encoder         : Lavf57.75.100
       Stream #0:0: Video: h264 (libx264) ([33][0][0][0] / 0x0021), yuv420p, 426x240 [SAR 1:1 DAR 71:40], q=-1--1, 25 fps, 12800 tbn, 25 tbc (default)
       Metadata:
         encoder         : Lavc57.100.102 libx264
       Side data:
         cpb: bitrate max/min/avg: 0/0/0 buffer size: 0 vbv_delay: -1
       Stream #0:1: Audio: aac (LC) ([64][0][0][0] / 0x0040), 44100 Hz, stereo, fltp, 128 kb/s (default)
       Metadata:
         encoder         : Lavc57.100.102 aac
    frame=10369 fps=503 q=28.0 size=   24064kB time=00:06:55.68 bitrate= 474.2kbits/s speed=20.2x

    At frame 10000 it gets stuck... I waited for 1hour but it keeps stuck.

    I’ve updated ffmpeg :

    ffmpeg -version
    ffmpeg version N-86669-gc1d1274 Copyright (c) 2000-2017 the FFmpeg developers
    built with gcc 7.1.0 (GCC)
    configuration: --enable-gpl --enable-version3 --enable-cuda --enable-cuvid --enable-d3d11va --enable-dxva2 --enable-libmfx --enable-nvenc --enable-avisynth --enable-bzlib --enable-fontconfig --enable-frei0r --enable-gnutls --enable-iconv --enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca --enable-libfreetype --enable-libgme --enable-libgsm --enable-libilbc --enable-libmodplug --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenh264 --enable-libopenjpeg --enable-libopus --enable-librtmp --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libtheora --enable-libtwolame --enable-libvidstab --enable-libvo-amrwbenc --enable-libvorbis --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxavs --enable-libxvid --enable-libzimg --enable-lzma --enable-zlib
    libavutil      55. 67.100 / 55. 67.100
    libavcodec     57.100.102 / 57.100.102
    libavformat    57. 75.100 / 57. 75.100
    libavdevice    57.  7.100 / 57.  7.100
    libavfilter     6. 94.100 /  6. 94.100
    libswscale      4.  7.101 /  4.  7.101
    libswresample   2.  8.100 /  2.  8.100
    libpostproc    54.  6.100 / 54.  6.100

    I used these references :

  • FFmpeg encoding produces slightly incompatible MKV/MP4 container

    11 juin 2018, par james2048

    I’ve been using the FFmpeg libraries to read and write media files using the C API.

    So far, reading seems to be pretty straightforward. I am able to read frames which I can then process, convert to RGB, process, and then convert back to YUV420 to be encoded.

    The encoded files play back with VLC media player fine, and Windows Media Player if I have a codec pack installed. However, they do behave strangely : the stock Windows 10 player won’t play them, same for Adobe Premiere. Also thumbnailers don’t work on it.
    Basically it seems like nothing other than VLC or FFmpeg itself can play/process the file. I have seen this with both MP4 and MKV, so it is not a format-specific issue.

    The problems go away once you remux the file with FFmpeg, for example "ffmpeg -i input.mkv -c copy output.mkv". Everything can play the file correctly.
    Also, the "remuxing.c" sample from the official samples works as well, with the same library version and compilers that I’m using (Visual Studio 2017, FFmpeg compiled with MinGW). It will fix the file and make it playable in all software.

    I’m not sure what could be causing this. I also don’t understand what the remuxing "fixed". It must be a container issue as the frames aren’t touched by remuxing.
    I have analysed the output MKVs with FFprobe -show_packets. It seems to have budged the packet timestamps a little constant factor, and the output stream now has
    is_avc=true and nal_length_size=4 instead of is_avc=false and nal_length_size=0, but apart from that the files are identical.

    Now here’s the output of FFprobe with the 3 last test packets, stream info and format info for both streams. As you can see, they are identical except for a couple of field. But something in here must have been "fixed" during remuxing to make it work.

    [PACKET]
    codec_type=video
    stream_index=0
    pts=59050
    pts_time=59.050000
    dts=58890
    dts_time=58.890000
    duration=1
    duration_time=0.001000
    convergence_duration=N/A
    convergence_duration_time=N/A
    size=427
    pos=277358
    flags=__
    [/PACKET]
    [PACKET]
    codec_type=video
    stream_index=0
    pts=58970
    pts_time=58.970000
    dts=58970
    dts_time=58.970000
    duration=1
    duration_time=0.001000
    convergence_duration=N/A
    convergence_duration_time=N/A
    size=205
    pos=277792
    flags=__
    [/PACKET]
    [PACKET]
    codec_type=video
    stream_index=0
    pts=59130
    pts_time=59.130000
    dts=59050
    dts_time=59.050000
    duration=1
    duration_time=0.001000
    convergence_duration=N/A
    convergence_duration_time=N/A
    size=268
    pos=278004
    flags=__
    [/PACKET]
    [STREAM]
    index=0
    codec_name=h264
    codec_long_name=H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10
    profile=Main
    codec_type=video
    codec_time_base=1/2000
    codec_tag_string=[0][0][0][0]
    codec_tag=0x0000
    width=720
    height=576
    coded_width=720
    coded_height=576
    has_b_frames=2
    sample_aspect_ratio=N/A
    display_aspect_ratio=N/A
    pix_fmt=yuv420p
    level=50
    color_range=unknown
    color_space=unknown
    color_transfer=unknown
    color_primaries=unknown
    chroma_location=left
    field_order=progressive
    timecode=N/A
    refs=1
    is_avc=false
    nal_length_size=0
    id=N/A
    r_frame_rate=299/12
    avg_frame_rate=1000/1
    time_base=1/1000
    start_pts=0
    start_time=0.000000
    duration_ts=N/A
    duration=N/A
    bit_rate=N/A
    max_bit_rate=N/A
    bits_per_raw_sample=8
    nb_frames=N/A
    nb_read_frames=N/A
    nb_read_packets=737
    DISPOSITION:default=1
    DISPOSITION:dub=0
    DISPOSITION:original=0
    DISPOSITION:comment=0
    DISPOSITION:lyrics=0
    DISPOSITION:karaoke=0
    DISPOSITION:forced=0
    DISPOSITION:hearing_impaired=0
    DISPOSITION:visual_impaired=0
    DISPOSITION:clean_effects=0
    DISPOSITION:attached_pic=0
    DISPOSITION:timed_thumbnails=0
    TAG:DURATION=00:00:59.211000000
    [/STREAM]
    [FORMAT]
    filename=testEncLeft.mkv
    nb_streams=1
    nb_programs=0
    format_name=matroska,webm
    format_long_name=Matroska / WebM
    start_time=0.000000
    duration=59.211000
    size=278349
    bit_rate=37607
    probe_score=100
    TAG:COMMENT=Slickline Player Export
    TAG:ENCODER=Lavf57.83.100
    [/FORMAT]

    And the info after remuxing, which works :

    [PACKET]
    codec_type=video
    stream_index=0
    pts=59050
    pts_time=59.050000
    dts=58890
    dts_time=58.890000
    duration=1
    duration_time=0.001000
    convergence_duration=N/A
    convergence_duration_time=N/A
    size=427
    pos=277418
    flags=__
    [/PACKET]
    [PACKET]
    codec_type=video
    stream_index=0
    pts=58970
    pts_time=58.970000
    dts=58970
    dts_time=58.970000
    duration=1
    duration_time=0.001000
    convergence_duration=N/A
    convergence_duration_time=N/A
    size=205
    pos=277852
    flags=__
    [/PACKET]
    [PACKET]
    codec_type=video
    stream_index=0
    pts=59130
    pts_time=59.130000
    dts=59050
    dts_time=59.050000
    duration=1
    duration_time=0.001000
    convergence_duration=N/A
    convergence_duration_time=N/A
    size=268
    pos=278064
    flags=__
    [/PACKET]
    [STREAM]
    index=0
    codec_name=h264
    codec_long_name=H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10
    profile=Main
    codec_type=video
    codec_time_base=1/2000
    codec_tag_string=[0][0][0][0]
    codec_tag=0x0000
    width=720
    height=576
    coded_width=720
    coded_height=576
    has_b_frames=2
    sample_aspect_ratio=N/A
    display_aspect_ratio=N/A
    pix_fmt=yuv420p
    level=50
    color_range=unknown
    color_space=unknown
    color_transfer=unknown
    color_primaries=unknown
    chroma_location=left
    field_order=progressive
    timecode=N/A
    refs=1
    is_avc=true
    nal_length_size=4
    id=N/A
    r_frame_rate=299/12
    avg_frame_rate=1000/1
    time_base=1/1000
    start_pts=0
    start_time=0.000000
    duration_ts=N/A
    duration=N/A
    bit_rate=N/A
    max_bit_rate=N/A
    bits_per_raw_sample=8
    nb_frames=N/A
    nb_read_frames=N/A
    nb_read_packets=737
    DISPOSITION:default=1
    DISPOSITION:dub=0
    DISPOSITION:original=0
    DISPOSITION:comment=0
    DISPOSITION:lyrics=0
    DISPOSITION:karaoke=0
    DISPOSITION:forced=0
    DISPOSITION:hearing_impaired=0
    DISPOSITION:visual_impaired=0
    DISPOSITION:clean_effects=0
    DISPOSITION:attached_pic=0
    DISPOSITION:timed_thumbnails=0
    TAG:DURATION=00:00:59.212000000
    [/STREAM]
    [FORMAT]
    filename=fixedLeft.mkv
    nb_streams=1
    nb_programs=0
    format_name=matroska,webm
    format_long_name=Matroska / WebM
    start_time=0.000000
    duration=59.212000
    size=278409
    bit_rate=37615
    probe_score=100
    TAG:COMMENT=Slickline Player Export
    TAG:ENCODER=Lavf58.12.100
    [/FORMAT]

    Here is how I’m setting up the output context, for reference : it’s pretty standard, following the sample code.

    int ret;

    avformat_alloc_output_context2(&amp;outputFormatCtx, nullptr, nullptr, outFilePath.c_str());

    av_dict_set(&amp;outputFormatCtx->metadata, "comment", "FFmpeg Export", 0);

    if (!outputFormatCtx)
    {
       LOG_AND_THROW("Could not allocate output context");
    }

    outputVideoStream = avformat_new_stream(outputFormatCtx, nullptr);
    outputVideoStream->time_base = AVRational{ 1, AV_TIME_BASE }; // Stream timebase will be used by codec

    if (!outputVideoStream)
    {
       LOG_AND_THROW("Failed allocating output stream");
    }

    // defaults to "libx264"
    AVCodec *outCodec = avcodec_find_encoder_by_name(selectedCodecName.c_str());

    if (!outCodec)
    {
       LOG_AND_THROW("Failed finding output codec");
    }

    AVDictionary *opts = nullptr;

    if (selectedCodecName == "libx264")
    {
       opts = getX264CodecOptions();
    }

    encoderCtx = avcodec_alloc_context3(outCodec);

    if (!encoderCtx)
    {
       LOG_AND_THROW("Failed to allocate the encoder context");
    }

    encoderCtx->width = width;
    encoderCtx->height = height;
    encoderCtx->pix_fmt = AV_PIX_FMT_YUV420P;
    // time base for the frames we will provide to the encoder
    encoderCtx->time_base = AVRational{ 1, AV_TIME_BASE };
    // convert framerate from double to rational
    encoderCtx->framerate = AVRational{ (int)(frameRate * AV_TIME_BASE), AV_TIME_BASE};

    // Match encoderCtx time base for the stream
    outputVideoStream->time_base = encoderCtx->time_base;

    ret = avcodec_open2(encoderCtx, outCodec, &amp;opts);

    if (ret &lt; 0)
    {
       LOG_AND_THROW_PARAM("Cannot open video encoder for stream: %d", ret);
    }

    // Fill in some params for MP4 stream, details about encoder
    ret = avcodec_parameters_from_context(outputVideoStream->codecpar, encoderCtx);

    if (ret &lt; 0)
    {
       LOG_AND_THROW_PARAM("Failed to copy encoder parameters to output stream: %d", ret);
    }

    if (outputFormatCtx->oformat->flags &amp; AVFMT_GLOBALHEADER)
    {
       encoderCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    }

    av_dump_format(outputFormatCtx, 0, filePath.c_str(), 1);

    // End of encoder settings, setting up MP4
    if (!(outputFormatCtx->oformat->flags &amp; AVFMT_NOFILE))
    {
       ret = avio_open(&amp;outputFormatCtx->pb, outFilePath.c_str(), AVIO_FLAG_WRITE);

       if (ret &lt; 0)
       {
           LOG_AND_THROW_PARAMSTR("Could not open output file '%s'", outFilePath.c_str());
       }
    }

    ret = avformat_write_header(outputFormatCtx, nullptr);

    if (ret &lt; 0)
    {
       LOG_AND_THROW_PARAM("Error occurred when opening output file for writing: %d", ret);
    }

    Can anyone help me figure out why the container is not playing properly ?

    Thanks in advance.

    -James