Recherche avancée

Médias (91)

Autres articles (51)

  • HTML5 audio and video support

    13 avril 2011, par

    MediaSPIP uses HTML5 video and audio tags to play multimedia files, taking advantage of the latest W3C innovations supported by modern browsers.
    The MediaSPIP player used has been created specifically for MediaSPIP and can be easily adapted to fit in with a specific theme.
    For older browsers the Flowplayer flash fallback is used.
    MediaSPIP allows for media playback on major mobile platforms with the above (...)

  • Support audio et vidéo HTML5

    10 avril 2011

    MediaSPIP utilise les balises HTML5 video et audio pour la lecture de documents multimedia en profitant des dernières innovations du W3C supportées par les navigateurs modernes.
    Pour les navigateurs plus anciens, le lecteur flash Flowplayer est utilisé.
    Le lecteur HTML5 utilisé a été spécifiquement créé pour MediaSPIP : il est complètement modifiable graphiquement pour correspondre à un thème choisi.
    Ces technologies permettent de distribuer vidéo et son à la fois sur des ordinateurs conventionnels (...)

  • Librairies et binaires spécifiques au traitement vidéo et sonore

    31 janvier 2010, par

    Les logiciels et librairies suivantes sont utilisées par SPIPmotion d’une manière ou d’une autre.
    Binaires obligatoires FFMpeg : encodeur principal, permet de transcoder presque tous les types de fichiers vidéo et sonores dans les formats lisibles sur Internet. CF ce tutoriel pour son installation ; Oggz-tools : outils d’inspection de fichiers ogg ; Mediainfo : récupération d’informations depuis la plupart des formats vidéos et sonores ;
    Binaires complémentaires et facultatifs flvtool2 : (...)

Sur d’autres sites (7998)

  • Play video using mse (media source extension) in google chrome

    23 août 2019, par liyuqihxc

    I’m working on a project that convert rtsp stream (ffmpeg) and play it on the web page (signalr + mse).

    So far it works pretty much as I expected on the latest version of edge and firefox, but not chrome.

    here’s the code

    public class WebmMediaStreamContext
    {
       private Process _ffProcess;
       private readonly string _cmd;
       private byte[] _initSegment;
       private Task _readMediaStreamTask;
       private CancellationTokenSource _cancellationTokenSource;

       private const string _CmdTemplate = "-i {0} -c:v libvpx -tile-columns 4 -frame-parallel 1 -keyint_min 90 -g 90 -f webm -dash 1 pipe:";

       public static readonly byte[] ClusterStart = { 0x1F, 0x43, 0xB6, 0x75, 0x01, 0x00, 0x00, 0x00 };

       public event EventHandler<clusterreadyeventargs> ClusterReadyEvent;

       public WebmMediaStreamContext(string rtspFeed)
       {
           _cmd = string.Format(_CmdTemplate, rtspFeed);
       }

       public async Task StartConverting()
       {
           if (_ffProcess != null)
               throw new InvalidOperationException();

           _ffProcess = new Process();
           _ffProcess.StartInfo = new ProcessStartInfo
           {
               FileName = "ffmpeg/ffmpeg.exe",
               Arguments = _cmd,
               UseShellExecute = false,
               CreateNoWindow = true,
               RedirectStandardOutput = true
           };
           _ffProcess.Start();

           _initSegment = await ParseInitSegmentAndStartReadMediaStream();
       }

       public byte[] GetInitSegment()
       {
           return _initSegment;
       }

       // Find the first cluster, and everything before it is the InitSegment
       private async Task ParseInitSegmentAndStartReadMediaStream()
       {
           Memory<byte> buffer = new byte[10 * 1024];
           int length = 0;
           while (length != buffer.Length)
           {
               length += await _ffProcess.StandardOutput.BaseStream.ReadAsync(buffer.Slice(length));
               int cluster = buffer.Span.IndexOf(ClusterStart);
               if (cluster >= 0)
               {
                   _cancellationTokenSource = new CancellationTokenSource();
                   _readMediaStreamTask = new Task(() => ReadMediaStreamProc(buffer.Slice(cluster, length - cluster).ToArray(), _cancellationTokenSource.Token), _cancellationTokenSource.Token, TaskCreationOptions.LongRunning);
                   _readMediaStreamTask.Start();
                   return buffer.Slice(0, cluster).ToArray();
               }
           }

           throw new InvalidOperationException();
       }

       private void ReadMoreBytes(Span<byte> buffer)
       {
           int size = buffer.Length;
           while (size > 0)
           {
               int len = _ffProcess.StandardOutput.BaseStream.Read(buffer.Slice(buffer.Length - size));
               size -= len;
           }
       }

       // Parse every single cluster and fire ClusterReadyEvent
       private void ReadMediaStreamProc(byte[] bytesRead, CancellationToken cancel)
       {
           Span<byte> buffer = new byte[5 * 1024 * 1024];
           bytesRead.CopyTo(buffer);
           int bufferEmptyIndex = bytesRead.Length;

           do
           {
               if (bufferEmptyIndex &lt; ClusterStart.Length + 4)
               {
                   ReadMoreBytes(buffer.Slice(bufferEmptyIndex, 1024));
                   bufferEmptyIndex += 1024;
               }

               int clusterDataSize = BitConverter.ToInt32(
                   buffer.Slice(ClusterStart.Length, 4)
                   .ToArray()
                   .Reverse()
                   .ToArray()
               );
               int clusterSize = ClusterStart.Length + 4 + clusterDataSize;
               if (clusterSize > buffer.Length)
               {
                   byte[] newBuffer = new byte[clusterSize];
                   buffer.Slice(0, bufferEmptyIndex).CopyTo(newBuffer);
                   buffer = newBuffer;
               }

               if (bufferEmptyIndex &lt; clusterSize)
               {
                   ReadMoreBytes(buffer.Slice(bufferEmptyIndex, clusterSize - bufferEmptyIndex));
                   bufferEmptyIndex = clusterSize;
               }

               ClusterReadyEvent?.Invoke(this, new ClusterReadyEventArgs(buffer.Slice(0, bufferEmptyIndex).ToArray()));

               bufferEmptyIndex = 0;
           } while (!cancel.IsCancellationRequested);
       }
    }
    </byte></byte></byte></clusterreadyeventargs>

    I use ffmpeg to convert the rtsp stream to vp8 WEBM byte stream and parse it to "Init Segment" (ebml head、info、tracks...) and "Media Segment" (cluster), then send it to browser via signalR

    $(function () {

       var mediaSource = new MediaSource();
       var mimeCodec = 'video/webm; codecs="vp8"';

       var video = document.getElementById('video');

       mediaSource.addEventListener('sourceopen', callback, false);
       function callback(e) {
           var sourceBuffer = mediaSource.addSourceBuffer(mimeCodec);
           var queue = [];

           sourceBuffer.addEventListener('updateend', function () {
               if (queue.length === 0) {
                   return;
               }

               var base64 = queue[0];
               if (base64.length === 0) {
                   mediaSource.endOfStream();
                   queue.shift();
                   return;
               } else {
                   var buffer = new Uint8Array(atob(base64).split("").map(function (c) {
                       return c.charCodeAt(0);
                   }));
                   sourceBuffer.appendBuffer(buffer);
                   queue.shift();
               }
           }, false);

           var connection = new signalR.HubConnectionBuilder()
               .withUrl("/signalr-video")
               .configureLogging(signalR.LogLevel.Information)
               .build();
           connection.start().then(function () {
               connection.stream("InitVideoReceive")
                   .subscribe({
                       next: function(item) {
                           if (queue.length === 0 &amp;&amp; !!!sourceBuffer.updating) {
                               var buffer = new Uint8Array(atob(item).split("").map(function (c) {
                                   return c.charCodeAt(0);
                               }));
                               sourceBuffer.appendBuffer(buffer);
                               console.log(blockindex++ + " : " + buffer.byteLength);
                           } else {
                               queue.push(item);
                           }
                       },
                       complete: function () {
                           queue.push('');
                       },
                       error: function (err) {
                           console.error(err);
                       }
                   });
           });
       }
       video.src = window.URL.createObjectURL(mediaSource);
    })

    chrome just play the video for 3 5 seconds and then stop for buffering, even though there are plenty of cluster transfered and inserted into SourceBuffer.

    here’s the information in chrome ://media-internals/

    Player Properties :

    render_id: 217
    player_id: 1
    origin_url: http://localhost:52531/
    frame_url: http://localhost:52531/
    frame_title: Home Page
    url: blob:http://localhost:52531/dcb25d89-9830-40a5-ba88-33c13b5c03eb
    info: Selected FFmpegVideoDecoder for video decoding, config: codec: vp8 format: 1 profile: vp8 coded size: [1280,720] visible rect: [0,0,1280,720] natural size: [1280,720] has extra data? false encryption scheme: Unencrypted rotation: 0°
    pipeline_state: kSuspended
    found_video_stream: true
    video_codec_name: vp8
    video_dds: false
    video_decoder: FFmpegVideoDecoder
    duration: unknown
    height: 720
    width: 1280
    video_buffering_state: BUFFERING_HAVE_NOTHING
    for_suspended_start: false
    pipeline_buffering_state: BUFFERING_HAVE_NOTHING
    event: PAUSE

    Log

    Timestamp       Property            Value
    00:00:00 00     origin_url          http://localhost:52531/
    00:00:00 00     frame_url           http://localhost:52531/
    00:00:00 00     frame_title         Home Page
    00:00:00 00     url                 blob:http://localhost:52531/dcb25d89-9830-40a5-ba88-33c13b5c03eb
    00:00:00 00     info                ChunkDemuxer: buffering by DTS
    00:00:00 35     pipeline_state      kStarting
    00:00:15 213    found_video_stream  true
    00:00:15 213    video_codec_name    vp8
    00:00:15 216    video_dds           false
    00:00:15 216    video_decoder       FFmpegVideoDecoder
    00:00:15 216    info                Selected FFmpegVideoDecoder for video decoding, config: codec: vp8 format: 1 profile: vp8 coded size: [1280,720] visible rect: [0,0,1280,720] natural size: [1280,720] has extra data? false encryption scheme: Unencrypted rotation: 0°
    00:00:15 216    pipeline_state      kPlaying
    00:00:15 213    duration            unknown
    00:00:16 661    height              720
    00:00:16 661    width               1280
    00:00:16 665    video_buffering_state       BUFFERING_HAVE_ENOUGH
    00:00:16 665    for_suspended_start         false
    00:00:16 665    pipeline_buffering_state    BUFFERING_HAVE_ENOUGH
    00:00:16 667    pipeline_state      kSuspending
    00:00:16 670    pipeline_state      kSuspended
    00:00:52 759    info                Effective playback rate changed from 0 to 1
    00:00:52 759    event               PLAY
    00:00:52 759    pipeline_state      kResuming
    00:00:52 760    video_dds           false
    00:00:52 760    video_decoder       FFmpegVideoDecoder
    00:00:52 760    info                Selected FFmpegVideoDecoder for video decoding, config: codec: vp8 format: 1 profile: vp8 coded size: [1280,720] visible rect: [0,0,1280,720] natural size: [1280,720] has extra data? false encryption scheme: Unencrypted rotation: 0°
    00:00:52 760    pipeline_state      kPlaying
    00:00:52 793    height              720
    00:00:52 793    width               1280
    00:00:52 798    video_buffering_state       BUFFERING_HAVE_ENOUGH
    00:00:52 798    for_suspended_start         false
    00:00:52 798    pipeline_buffering_state    BUFFERING_HAVE_ENOUGH
    00:00:56 278    video_buffering_state       BUFFERING_HAVE_NOTHING
    00:00:56 295    for_suspended_start         false
    00:00:56 295    pipeline_buffering_state    BUFFERING_HAVE_NOTHING
    00:01:20 717    event               PAUSE
    00:01:33 538    event               PLAY
    00:01:35 94     event               PAUSE
    00:01:55 561    pipeline_state      kSuspending
    00:01:55 563    pipeline_state      kSuspended

    Can someone tell me what’s wrong with my code, or dose chrome require some magic configuration to work ?

    Thanks 

    Please excuse my english :)

  • "moov atom not found" when using av_interleaved_write_frame but not avio_write

    9 octobre 2017, par icStatic

    I am attempting to put together a class that can take arbitrary frames and construct a video from it using the ffmpeg 3.3.3 API. I’ve been struggling to find a good example for this as the examples still seem to be using deprecated functions, so I’ve attempted to patch this using the documentation in the headers and by referring to a few github repos that seem to be using the new version.

    If I use av_interleaved_write_frame to write the encoded packets to the output then ffprobe outputs the following :

    [mov,mp4,m4a,3gp,3g2,mj2 @ 0000000002760120] moov atom not found0
    X:\Diagnostics.mp4: Invalid data found when processing input

    ffplay is unable to play the file generated using this method.

    If I instead swap it out for a call to avio_write, ffprobe instead outputs :

    Input #0, h264, from 'X:\Diagnostics.mp4':
     Duration: N/A, bitrate: N/A
       Stream #0:0: Video: h264 (Main), yuv420p(progressive), 672x380 [SAR 1:1 DAR 168:95], 25 fps, 25 tbr, 1200k tbn, 50 tbc

    ffplay can mostly play this file until it gets towards the end, when it outputs :

    Input #0, h264, from 'X:\Diagnostics.mp4':    0KB sq=    0B f=0/0
     Duration: N/A, bitrate: N/A
       Stream #0:0: Video: h264 (Main), yuv420p(progressive), 672x380 [SAR 1:1 DAR 168:95], 25 fps, 25 tbr, 1200k tbn, 50 tbc
    [h264 @ 000000000254ef80] error while decoding MB 31 22, bytestream -65
    [h264 @ 000000000254ef80] concealing 102 DC, 102 AC, 102 MV errors in I frame
       nan M-V:    nan fd=   1 aq=    0KB vq=    0KB sq=    0B f=0/0

    VLC cannot play files from either method. The second method’s file displays a single black frame then hides the video output. The first does not display anything. Neither of them give a video duration.

    Does anyone have any ideas what’s happening here ? I assume my solution is close to working as I’m getting a good chunk of valid frames coming through.

    Code :

    void main()
    {
       OutputStream Stream( "Output.mp4", 672, 380, 25, true );
       Stream.Initialize();

       int i = 100;
       while( i-- )
       {
           //... Generate a frame

           Stream.WriteFrame( Frame );
       }
       Stream.CloseFile();
    }

    OutputStream::OutputStream( const std::string&amp; Path, unsigned int Width, unsigned int Height, int Framerate, bool IsBGR )
    : Stream()
    , FrameIndex( 0 )
    {
       auto&amp; ID = *m_InternalData;

       ID.Path = Path;

       ID.Width = Width;
       ID.Height= Height;
       ID.Framerate.num = Framerate;
       ID.Framerate.den = 1;

       ID.PixelFormat = IsBGR ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_RGB24;
       ID.CodecID = AV_CODEC_ID_H264;
       ID.CodecTag = 0;

       ID.AspectRatio.num = 1;
       ID.AspectRatio.den = 1;
    }

    CameraStreamError OutputStream::Initialize()
    {
       av_log_set_callback( &amp;InputStream::LogCallback );
       av_register_all();
       avformat_network_init();

       auto&amp; ID = *m_InternalData;

       av_init_packet( &amp;ID.Packet );

       int Result = avformat_alloc_output_context2( &amp;ID.FormatContext, nullptr, nullptr, ID.Path.c_str() );
       if( Result &lt; 0 || !ID.FormatContext )
       {
           STREAM_ERROR( UnknownError );
       }

       AVCodec* Encoder = avcodec_find_encoder( ID.CodecID );

       if( !Encoder )
       {
           STREAM_ERROR( NoH264Support );
       }

       AVStream* OutStream = avformat_new_stream( ID.FormatContext, Encoder );
       if( !OutStream )
       {
           STREAM_ERROR( UnknownError );
       }

       ID.CodecContext = avcodec_alloc_context3( Encoder );
       if( !ID.CodecContext )
       {
           STREAM_ERROR( NoH264Support );
       }

       ID.CodecContext->time_base = av_inv_q(ID.Framerate);

       {
           AVCodecParameters* CodecParams = OutStream->codecpar;

           CodecParams->width = ID.Width;
           CodecParams->height = ID.Height;
           CodecParams->format = AV_PIX_FMT_YUV420P;
           CodecParams->codec_id = ID.CodecID;
           CodecParams->codec_type = AVMEDIA_TYPE_VIDEO;
           CodecParams->profile = FF_PROFILE_H264_MAIN;
           CodecParams->level = 40;

           Result = avcodec_parameters_to_context( ID.CodecContext, CodecParams );
           if( Result &lt; 0 )
           {
               STREAM_ERROR( EncoderCreationError );
           }
       }

       if( ID.IsVideo )
       {
           ID.CodecContext->width = ID.Width;
           ID.CodecContext->height = ID.Height;
           ID.CodecContext->sample_aspect_ratio = ID.AspectRatio;
           ID.CodecContext->time_base = av_inv_q(ID.Framerate);

           if( Encoder->pix_fmts )
           {
               ID.CodecContext->pix_fmt = Encoder->pix_fmts[0];
           }
           else
           {
               ID.CodecContext->pix_fmt = ID.PixelFormat;
           }
       }
       //Snip

       Result = avcodec_open2( ID.CodecContext, Encoder, nullptr );
       if( Result &lt; 0 )
       {
           STREAM_ERROR( EncoderCreationError );
       }

       Result = avcodec_parameters_from_context( OutStream->codecpar, ID.CodecContext );
       if( Result &lt; 0 )
       {
           STREAM_ERROR( EncoderCreationError );
       }

       if( ID.FormatContext->oformat->flags &amp; AVFMT_GLOBALHEADER )
       {
           ID.CodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
       }

       OutStream->time_base = ID.CodecContext->time_base;
       OutStream->avg_frame_rate= av_inv_q(OutStream->time_base);

       if( !( ID.FormatContext->oformat->flags &amp; AVFMT_NOFILE ) )
       {
           Result = avio_open( &amp;ID.FormatContext->pb, ID.Path.c_str(), AVIO_FLAG_WRITE );
           if( Result &lt; 0 )
           {
               STREAM_ERROR( FileNotWriteable );
           }
       }

       Result = avformat_write_header( ID.FormatContext, nullptr );
       if( Result &lt; 0 )
       {
           STREAM_ERROR( WriteFailed );
       }

       ID.Output = std::make_unique( ID.CodecContext->width, ID.CodecContext->height, ID.CodecContext->pix_fmt );

       ID.ConversionContext = sws_getCachedContext(
           ID.ConversionContext,
           ID.Width,
           ID.Height,
           ID.PixelFormat,
           ID.CodecContext->width,
           ID.CodecContext->height,
           ID.CodecContext->pix_fmt,
           SWS_BICUBIC,
           NULL,
           NULL,
           NULL );

       return CameraStreamError::Success;
    }

    CameraStreamError OutputStream::WriteFrame( FFMPEG::Frame* Frame )
    {
       auto&amp; ID = *m_InternalData;

       ID.Output->Prepare();

       int OutputSliceSize = sws_scale( m_InternalData->ConversionContext, Frame->GetFrame()->data, Frame->GetFrame()->linesize, 0, Frame->GetHeight(), ID.Output->GetFrame()->data, ID.Output->GetFrame()->linesize );

       ID.Output->GetFrame()->pts = ID.CodecContext->frame_number;

       int Result = avcodec_send_frame( GetData().CodecContext, ID.Output->GetFrame() );
       if( Result == AVERROR(EAGAIN) )
       {
           CameraStreamError ResultErr = SendAll();
           if( ResultErr != CameraStreamError::Success )
           {
               return ResultErr;
           }
           Result = avcodec_send_frame( GetData().CodecContext, ID.Output->GetFrame() );
       }

       if( Result == 0 )
       {
           CameraStreamError ResultErr = SendAll();
           if( ResultErr != CameraStreamError::Success )
           {
               return ResultErr;
           }
       }

       FrameIndex++;

       return CameraStreamError::Success;
    }

    CameraStreamError OutputStream::SendAll( void )
    {
       auto&amp; ID = *m_InternalData;

       int Result;
       do
       {
           AVPacket TempPacket = {};
           av_init_packet( &amp;TempPacket );

           Result = avcodec_receive_packet( GetData().CodecContext, &amp;TempPacket );
           if( Result == 0 )
           {
               av_packet_rescale_ts( &amp;TempPacket, ID.CodecContext->time_base, ID.FormatContext->streams[0]->time_base );

               TempPacket.stream_index = ID.FormatContext->streams[0]->index;

               //avio_write( ID.FormatContext->pb, TempPacket.data, TempPacket.size );
               Result = av_interleaved_write_frame( ID.FormatContext, &amp;TempPacket );
               if( Result &lt; 0 )
               {
                   STREAM_ERROR( WriteFailed );
               }

               av_packet_unref( &amp;TempPacket );
           }
           else if( Result != AVERROR(EAGAIN) )
           {
               continue;
           }
           else if( Result != AVERROR_EOF )
           {
               break;
           }
           else if( Result &lt; 0 )
           {
               STREAM_ERROR( WriteFailed );
           }
       } while ( Result == 0);

       return CameraStreamError::Success;
    }

    CameraStreamError OutputStream::CloseFile()
    {
       auto&amp; ID = *m_InternalData;

       while( true )
       {
           //Flush
           int Result = avcodec_send_frame( ID.CodecContext, nullptr );
           if( Result == 0 )
           {
               CameraStreamError StrError = SendAll();
               if( StrError != CameraStreamError::Success )
               {
                   return StrError;
               }
           }
           else if( Result == AVERROR_EOF )
           {
               break;
           }
           else
           {
               STREAM_ERROR( WriteFailed );
           }
       }

       int Result = av_write_trailer( ID.FormatContext );
       if( Result &lt; 0 )
       {
           STREAM_ERROR( WriteFailed );
       }

       if( !(ID.FormatContext->oformat->flags&amp; AVFMT_NOFILE) )
       {
           Result = avio_close( ID.FormatContext->pb );
           if( Result &lt; 0 )
           {
               STREAM_ERROR( WriteFailed );
           }
       }

       return CameraStreamError::Success;
    }

    Note I’ve simplified a few things and inlined a few bits that were elsewhere. I’ve also removed all the shutdown code as anything that happens after the file is closed is irrelevant.

    Full repo here : https://github.com/IanNorris/Witness If you clone this the issue is with the ’Diagnostics’ output, the Output file is fine. There are two hardcoded paths to X :.

  • "moov atom not found" when using av_interleaved_write_frame but not avio_write

    9 octobre 2017, par icStatic

    I am attempting to put together a class that can take arbitrary frames and construct a video from it using the ffmpeg 3.3.3 API. I’ve been struggling to find a good example for this as the examples still seem to be using deprecated functions, so I’ve attempted to patch this using the documentation in the headers and by referring to a few github repos that seem to be using the new version.

    If I use av_interleaved_write_frame to write the encoded packets to the output then ffprobe outputs the following :

    [mov,mp4,m4a,3gp,3g2,mj2 @ 0000000002760120] moov atom not found0
    X:\Diagnostics.mp4: Invalid data found when processing input

    ffplay is unable to play the file generated using this method.

    If I instead swap it out for a call to avio_write, ffprobe instead outputs :

    Input #0, h264, from 'X:\Diagnostics.mp4':
     Duration: N/A, bitrate: N/A
       Stream #0:0: Video: h264 (Main), yuv420p(progressive), 672x380 [SAR 1:1 DAR 168:95], 25 fps, 25 tbr, 1200k tbn, 50 tbc

    ffplay can mostly play this file until it gets towards the end, when it outputs :

    Input #0, h264, from 'X:\Diagnostics.mp4':    0KB sq=    0B f=0/0
     Duration: N/A, bitrate: N/A
       Stream #0:0: Video: h264 (Main), yuv420p(progressive), 672x380 [SAR 1:1 DAR 168:95], 25 fps, 25 tbr, 1200k tbn, 50 tbc
    [h264 @ 000000000254ef80] error while decoding MB 31 22, bytestream -65
    [h264 @ 000000000254ef80] concealing 102 DC, 102 AC, 102 MV errors in I frame
       nan M-V:    nan fd=   1 aq=    0KB vq=    0KB sq=    0B f=0/0

    VLC cannot play files from either method. The second method’s file displays a single black frame then hides the video output. The first does not display anything. Neither of them give a video duration.

    Does anyone have any ideas what’s happening here ? I assume my solution is close to working as I’m getting a good chunk of valid frames coming through.

    Code :

    void main()
    {
       OutputStream Stream( "Output.mp4", 672, 380, 25, true );
       Stream.Initialize();

       int i = 100;
       while( i-- )
       {
           //... Generate a frame

           Stream.WriteFrame( Frame );
       }
       Stream.CloseFile();
    }

    OutputStream::OutputStream( const std::string&amp; Path, unsigned int Width, unsigned int Height, int Framerate, bool IsBGR )
    : Stream()
    , FrameIndex( 0 )
    {
       auto&amp; ID = *m_InternalData;

       ID.Path = Path;

       ID.Width = Width;
       ID.Height= Height;
       ID.Framerate.num = Framerate;
       ID.Framerate.den = 1;

       ID.PixelFormat = IsBGR ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_RGB24;
       ID.CodecID = AV_CODEC_ID_H264;
       ID.CodecTag = 0;

       ID.AspectRatio.num = 1;
       ID.AspectRatio.den = 1;
    }

    CameraStreamError OutputStream::Initialize()
    {
       av_log_set_callback( &amp;InputStream::LogCallback );
       av_register_all();
       avformat_network_init();

       auto&amp; ID = *m_InternalData;

       av_init_packet( &amp;ID.Packet );

       int Result = avformat_alloc_output_context2( &amp;ID.FormatContext, nullptr, nullptr, ID.Path.c_str() );
       if( Result &lt; 0 || !ID.FormatContext )
       {
           STREAM_ERROR( UnknownError );
       }

       AVCodec* Encoder = avcodec_find_encoder( ID.CodecID );

       if( !Encoder )
       {
           STREAM_ERROR( NoH264Support );
       }

       AVStream* OutStream = avformat_new_stream( ID.FormatContext, Encoder );
       if( !OutStream )
       {
           STREAM_ERROR( UnknownError );
       }

       ID.CodecContext = avcodec_alloc_context3( Encoder );
       if( !ID.CodecContext )
       {
           STREAM_ERROR( NoH264Support );
       }

       ID.CodecContext->time_base = av_inv_q(ID.Framerate);

       {
           AVCodecParameters* CodecParams = OutStream->codecpar;

           CodecParams->width = ID.Width;
           CodecParams->height = ID.Height;
           CodecParams->format = AV_PIX_FMT_YUV420P;
           CodecParams->codec_id = ID.CodecID;
           CodecParams->codec_type = AVMEDIA_TYPE_VIDEO;
           CodecParams->profile = FF_PROFILE_H264_MAIN;
           CodecParams->level = 40;

           Result = avcodec_parameters_to_context( ID.CodecContext, CodecParams );
           if( Result &lt; 0 )
           {
               STREAM_ERROR( EncoderCreationError );
           }
       }

       if( ID.IsVideo )
       {
           ID.CodecContext->width = ID.Width;
           ID.CodecContext->height = ID.Height;
           ID.CodecContext->sample_aspect_ratio = ID.AspectRatio;
           ID.CodecContext->time_base = av_inv_q(ID.Framerate);

           if( Encoder->pix_fmts )
           {
               ID.CodecContext->pix_fmt = Encoder->pix_fmts[0];
           }
           else
           {
               ID.CodecContext->pix_fmt = ID.PixelFormat;
           }
       }
       //Snip

       Result = avcodec_open2( ID.CodecContext, Encoder, nullptr );
       if( Result &lt; 0 )
       {
           STREAM_ERROR( EncoderCreationError );
       }

       Result = avcodec_parameters_from_context( OutStream->codecpar, ID.CodecContext );
       if( Result &lt; 0 )
       {
           STREAM_ERROR( EncoderCreationError );
       }

       if( ID.FormatContext->oformat->flags &amp; AVFMT_GLOBALHEADER )
       {
           ID.CodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
       }

       OutStream->time_base = ID.CodecContext->time_base;
       OutStream->avg_frame_rate= av_inv_q(OutStream->time_base);

       if( !( ID.FormatContext->oformat->flags &amp; AVFMT_NOFILE ) )
       {
           Result = avio_open( &amp;ID.FormatContext->pb, ID.Path.c_str(), AVIO_FLAG_WRITE );
           if( Result &lt; 0 )
           {
               STREAM_ERROR( FileNotWriteable );
           }
       }

       Result = avformat_write_header( ID.FormatContext, nullptr );
       if( Result &lt; 0 )
       {
           STREAM_ERROR( WriteFailed );
       }

       ID.Output = std::make_unique( ID.CodecContext->width, ID.CodecContext->height, ID.CodecContext->pix_fmt );

       ID.ConversionContext = sws_getCachedContext(
           ID.ConversionContext,
           ID.Width,
           ID.Height,
           ID.PixelFormat,
           ID.CodecContext->width,
           ID.CodecContext->height,
           ID.CodecContext->pix_fmt,
           SWS_BICUBIC,
           NULL,
           NULL,
           NULL );

       return CameraStreamError::Success;
    }

    CameraStreamError OutputStream::WriteFrame( FFMPEG::Frame* Frame )
    {
       auto&amp; ID = *m_InternalData;

       ID.Output->Prepare();

       int OutputSliceSize = sws_scale( m_InternalData->ConversionContext, Frame->GetFrame()->data, Frame->GetFrame()->linesize, 0, Frame->GetHeight(), ID.Output->GetFrame()->data, ID.Output->GetFrame()->linesize );

       ID.Output->GetFrame()->pts = ID.CodecContext->frame_number;

       int Result = avcodec_send_frame( GetData().CodecContext, ID.Output->GetFrame() );
       if( Result == AVERROR(EAGAIN) )
       {
           CameraStreamError ResultErr = SendAll();
           if( ResultErr != CameraStreamError::Success )
           {
               return ResultErr;
           }
           Result = avcodec_send_frame( GetData().CodecContext, ID.Output->GetFrame() );
       }

       if( Result == 0 )
       {
           CameraStreamError ResultErr = SendAll();
           if( ResultErr != CameraStreamError::Success )
           {
               return ResultErr;
           }
       }

       FrameIndex++;

       return CameraStreamError::Success;
    }

    CameraStreamError OutputStream::SendAll( void )
    {
       auto&amp; ID = *m_InternalData;

       int Result;
       do
       {
           AVPacket TempPacket = {};
           av_init_packet( &amp;TempPacket );

           Result = avcodec_receive_packet( GetData().CodecContext, &amp;TempPacket );
           if( Result == 0 )
           {
               av_packet_rescale_ts( &amp;TempPacket, ID.CodecContext->time_base, ID.FormatContext->streams[0]->time_base );

               TempPacket.stream_index = ID.FormatContext->streams[0]->index;

               //avio_write( ID.FormatContext->pb, TempPacket.data, TempPacket.size );
               Result = av_interleaved_write_frame( ID.FormatContext, &amp;TempPacket );
               if( Result &lt; 0 )
               {
                   STREAM_ERROR( WriteFailed );
               }

               av_packet_unref( &amp;TempPacket );
           }
           else if( Result != AVERROR(EAGAIN) )
           {
               continue;
           }
           else if( Result != AVERROR_EOF )
           {
               break;
           }
           else if( Result &lt; 0 )
           {
               STREAM_ERROR( WriteFailed );
           }
       } while ( Result == 0);

       return CameraStreamError::Success;
    }

    CameraStreamError OutputStream::CloseFile()
    {
       auto&amp; ID = *m_InternalData;

       while( true )
       {
           //Flush
           int Result = avcodec_send_frame( ID.CodecContext, nullptr );
           if( Result == 0 )
           {
               CameraStreamError StrError = SendAll();
               if( StrError != CameraStreamError::Success )
               {
                   return StrError;
               }
           }
           else if( Result == AVERROR_EOF )
           {
               break;
           }
           else
           {
               STREAM_ERROR( WriteFailed );
           }
       }

       int Result = av_write_trailer( ID.FormatContext );
       if( Result &lt; 0 )
       {
           STREAM_ERROR( WriteFailed );
       }

       if( !(ID.FormatContext->oformat->flags&amp; AVFMT_NOFILE) )
       {
           Result = avio_close( ID.FormatContext->pb );
           if( Result &lt; 0 )
           {
               STREAM_ERROR( WriteFailed );
           }
       }

       return CameraStreamError::Success;
    }

    Note I’ve simplified a few things and inlined a few bits that were elsewhere. I’ve also removed all the shutdown code as anything that happens after the file is closed is irrelevant.

    Full repo here : https://github.com/IanNorris/Witness If you clone this the issue is with the ’Diagnostics’ output, the Output file is fine. There are two hardcoded paths to X :.