
Recherche avancée
Médias (1)
-
Bug de détection d’ogg
22 mars 2013, par
Mis à jour : Avril 2013
Langue : français
Type : Video
Autres articles (93)
-
MediaSPIP 0.1 Beta version
25 avril 2011, parMediaSPIP 0.1 beta is the first version of MediaSPIP proclaimed as "usable".
The zip file provided here only contains the sources of MediaSPIP in its standalone version.
To get a working installation, you must manually install all-software dependencies on the server.
If you want to use this archive for an installation in "farm mode", you will also need to proceed to other manual (...) -
Multilang : améliorer l’interface pour les blocs multilingues
18 février 2011, parMultilang est un plugin supplémentaire qui n’est pas activé par défaut lors de l’initialisation de MediaSPIP.
Après son activation, une préconfiguration est mise en place automatiquement par MediaSPIP init permettant à la nouvelle fonctionnalité d’être automatiquement opérationnelle. Il n’est donc pas obligatoire de passer par une étape de configuration pour cela. -
Use, discuss, criticize
13 avril 2011, parTalk to people directly involved in MediaSPIP’s development, or to people around you who could use MediaSPIP to share, enhance or develop their creative projects.
The bigger the community, the more MediaSPIP’s potential will be explored and the faster the software will evolve.
A discussion list is available for all exchanges between users.
Sur d’autres sites (7924)
-
ffmpeg avcodec_encode_video2 hangs when using Quick Sync h264_qsv encoder
9 juin 2020, par Mike SimpsonWhen I use the mpeg4 or h264 encoders, I am able to successfully encode images to make a valid AVI file using the API for ffmpeg 3.1.0. However, when I use the Quick Sync encoder (h264_qsv), avcodec_encode_video2 will hang some of the time. I found that when using images that are 1920x1080, it was rare that avcodec_encode_video2 would hang. When using 256x256 images, it was very likely that the function would hang.



I have created the test code below that demonstrates the hang of avcodec_encode_video2. The code will create a 1000 frame, 256x256 AVI with a bit rate of 400000. The frames are simply allocated, so the output video should just be green frames.



The problem was observed using Windows 7 and Windows 10, using the 32-bit or 64-bit test application.



If anyone has any idea on how I can avoid the avcodec_encode_video2 hang I would be very grateful ! Thanks in advance for any assistance.



extern "C"
{
#ifndef __STDC_CONSTANT_MACROS
#define __STDC_CONSTANT_MACROS
#endif
#include "avcodec.h"
#include "avformat.h"
#include "swscale.h"
#include "avutil.h"
#include "imgutils.h"
#include "opt.h"
#include 
}

#include <iostream>


// Globals
AVCodec* m_pCodec = NULL;
AVStream *m_pStream = NULL;
AVOutputFormat* m_pFormat = NULL;
AVFormatContext* m_pFormatContext = NULL;
AVCodecContext* m_pCodecContext = NULL;
AVFrame* m_pFrame = NULL;
int m_frameIndex;

// Output format
AVPixelFormat m_pixType = AV_PIX_FMT_NV12;
// Use for mpeg4
//AVPixelFormat m_pixType = AV_PIX_FMT_YUV420P;

// Output frame rate
int m_frameRate = 30;
// Output image dimensions
int m_imageWidth = 256;
int m_imageHeight = 256;
// Number of frames to export
int m_frameCount = 1000;
// Output file name
const char* m_fileName = "c:/test/test.avi";
// Output file type
const char* m_fileType = "AVI";
// Codec name used to encode
const char* m_encoderName = "h264_qsv";
// use for mpeg4
//const char* m_encoderName = "mpeg4";
// Target bit rate
int m_targetBitRate = 400000;

void addVideoStream()
{
 m_pStream = avformat_new_stream( m_pFormatContext, m_pCodec );
 m_pStream->id = m_pFormatContext->nb_streams - 1;
 m_pStream->time_base = m_pCodecContext->time_base;
 m_pStream->codec->pix_fmt = m_pixType;
 m_pStream->codec->flags = m_pCodecContext->flags;
 m_pStream->codec->width = m_pCodecContext->width;
 m_pStream->codec->height = m_pCodecContext->height;
 m_pStream->codec->time_base = m_pCodecContext->time_base;
 m_pStream->codec->bit_rate = m_pCodecContext->bit_rate;
}

AVFrame* allocatePicture( enum AVPixelFormat pix_fmt, int width, int height )
{
 AVFrame *frame;

 frame = av_frame_alloc();

 if ( !frame )
 {
 return NULL;
 }

 frame->format = pix_fmt;
 frame->width = width;
 frame->height = height;

 int checkImage = av_image_alloc( frame->data, frame->linesize, width, height, pix_fmt, 32 );

 if ( checkImage < 0 )
 {
 return NULL;
 }

 return frame;
}

bool initialize()
{
 AVRational frameRate;
 frameRate.den = m_frameRate;
 frameRate.num = 1;

 av_register_all();

 m_pCodec = avcodec_find_encoder_by_name(m_encoderName);

 if( !m_pCodec )
 {
 return false;
 }

 m_pCodecContext = avcodec_alloc_context3( m_pCodec );
 m_pCodecContext->width = m_imageWidth;
 m_pCodecContext->height = m_imageHeight;
 m_pCodecContext->time_base = frameRate;
 m_pCodecContext->gop_size = 0;
 m_pCodecContext->pix_fmt = m_pixType;
 m_pCodecContext->codec_id = m_pCodec->id;
 m_pCodecContext->bit_rate = m_targetBitRate;

 av_opt_set( m_pCodecContext->priv_data, "+CBR", "", 0 );

 return true;
}

bool startExport()
{
 m_frameIndex = 0;
 char fakeFileName[512]; 
 int checkAllocContext = avformat_alloc_output_context2( &m_pFormatContext, NULL, m_fileType, fakeFileName );

 if ( checkAllocContext < 0 )
 {
 return false;
 }

 if ( !m_pFormatContext ) 
 {
 return false;
 }

 m_pFormat = m_pFormatContext->oformat;

 if ( m_pFormat->video_codec != AV_CODEC_ID_NONE ) 
 {
 addVideoStream();

 int checkOpen = avcodec_open2( m_pCodecContext, m_pCodec, NULL );

 if ( checkOpen < 0 )
 {
 return false;
 }

 m_pFrame = allocatePicture( m_pCodecContext->pix_fmt, m_pCodecContext->width, m_pCodecContext->height ); 
 if( !m_pFrame ) 
 {
 return false;
 }
 m_pFrame->pts = 0;
 }

 int checkOpen = avio_open( &m_pFormatContext->pb, m_fileName, AVIO_FLAG_WRITE );
 if ( checkOpen < 0 )
 {
 return false;
 }

 av_dict_set( &(m_pFormatContext->metadata), "title", "QS Test", 0 );

 int checkHeader = avformat_write_header( m_pFormatContext, NULL );
 if ( checkHeader < 0 )
 {
 return false;
 }

 return true;
}

int processFrame( AVPacket& avPacket )
{
 avPacket.stream_index = 0;
 avPacket.pts = av_rescale_q( m_pFrame->pts, m_pStream->codec->time_base, m_pStream->time_base );
 avPacket.dts = av_rescale_q( m_pFrame->pts, m_pStream->codec->time_base, m_pStream->time_base );
 m_pFrame->pts++;

 int retVal = av_interleaved_write_frame( m_pFormatContext, &avPacket );
 return retVal;
}

bool exportFrame()
{
 int success = 1;
 int result = 0;

 AVPacket avPacket;

 av_init_packet( &avPacket );
 avPacket.data = NULL;
 avPacket.size = 0;

 fflush(stdout);

 std::cout << "Before avcodec_encode_video2 for frame: " << m_frameIndex << std::endl;
 success = avcodec_encode_video2( m_pCodecContext, &avPacket, m_pFrame, &result );
 std::cout << "After avcodec_encode_video2 for frame: " << m_frameIndex << std::endl;

 if( result )
 { 
 success = processFrame( avPacket );
 }

 av_packet_unref( &avPacket );

 m_frameIndex++;
 return ( success == 0 );
}

void endExport()
{
 int result = 0;
 int success = 0;

 if (m_pFrame)
 {
 while ( success == 0 )
 {
 AVPacket avPacket;
 av_init_packet( &avPacket );
 avPacket.data = NULL;
 avPacket.size = 0;

 fflush(stdout);
 success = avcodec_encode_video2( m_pCodecContext, &avPacket, NULL, &result );

 if( result )
 { 
 success = processFrame( avPacket );
 }
 av_packet_unref( &avPacket );

 if (!result)
 {
 break;
 }
 }
 }

 if (m_pFormatContext)
 {
 av_write_trailer( m_pFormatContext );

 if( m_pFrame )
 {
 av_frame_free( &m_pFrame );
 }

 avio_closep( &m_pFormatContext->pb );
 avformat_free_context( m_pFormatContext );
 m_pFormatContext = NULL;
 }
}

void cleanup()
{
 if( m_pFrame || m_pCodecContext )
 {
 if( m_pFrame )
 {
 av_frame_free( &m_pFrame );
 }

 if( m_pCodecContext )
 {
 avcodec_close( m_pCodecContext );
 av_free( m_pCodecContext );
 }
 }
}

int main()
{
 bool success = true;
 if (initialize())
 {
 if (startExport())
 {
 for (int loop = 0; loop < m_frameCount; loop++)
 {
 if (!exportFrame())
 {
 std::cout << "Failed to export frame\n";
 success = false;
 break;
 }
 }
 endExport();
 }
 else
 {
 std::cout << "Failed to start export\n";
 success = false;
 }

 cleanup();
 }
 else
 {
 std::cout << "Failed to initialize export\n";
 success = false;
 }

 if (success)
 {
 std::cout << "Successfully exported file\n";
 }
 return 1;
}
</iostream>


-
avcodec_encode_video2 the memory usage is very high
9 juin 2020, par 陆骁剑code :
 ```
 #include "pch.h"
 #include



extern "C"
{
#ifndef __STDC_CONSTANT_MACROS
#define __STDC_CONSTANT_MACROS
#endif
#include <libavcodec></libavcodec>avcodec.h>
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavutil/avutil.h"
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
#include <libavutil></libavutil>rational.h>
//#include "libavutil/avassert.h"


//#include "libavutil/attributes.h"
//#include "libavutil/avassert.h"
//#include "libavutil/frame.h"
//#include "libavutil/imgutils.h"
//#include "internal1.h"
//#include "libavutil/samplefmt.h"
}
#include 
//#include "avcodec.h"
//#include "frame_thread_encoder.h"
//#include "internal.h"

// Globals
AVCodec* m_pCodec = NULL;
AVStream *m_pStream = NULL;
AVOutputFormat* m_pFormat = NULL;
AVFormatContext* m_pFormatContext = NULL;
AVCodecContext* m_pCodecContext = NULL;
AVFrame* m_pFrame = NULL;
int m_frameIndex;

// Output format
AVPixelFormat m_pixType = AV_PIX_FMT_NV12;
// Use for mpeg4
//AVPixelFormat m_pixType = AV_PIX_FMT_YUV420P;

// Output frame rate
int m_frameRate = 30;
// Output image dimensions
int m_imageWidth = 256;
int m_imageHeight = 1537;
// Number of frames to export
int m_frameCount = 20000;
// Output file name
const char* m_fileName = "test.h264";
// Output file type
const char* m_fileType = "H264";
// Codec name used to encode
const char* m_encoderName = "h264_qsv";
// use for mpeg4
//const char* m_encoderName = "mpeg4";
// Target bit rate
int m_targetBitRate = 400000;

void addVideoStream()
{
 m_pStream = avformat_new_stream(m_pFormatContext, m_pCodec);
 m_pStream->id = m_pFormatContext->nb_streams - 1;
 m_pStream->time_base = m_pCodecContext->time_base;
 m_pStream->codec->pix_fmt = m_pixType;
 m_pStream->codec->flags = m_pCodecContext->flags;
 m_pStream->codec->width = m_pCodecContext->width;
 m_pStream->codec->height = m_pCodecContext->height;
 m_pStream->codec->time_base = m_pCodecContext->time_base;
 m_pStream->codec->bit_rate = m_pCodecContext->bit_rate;
}

AVFrame* allocatePicture(enum AVPixelFormat pix_fmt, int width, int height)
{
 AVFrame *frame;

 frame = av_frame_alloc();

 if (!frame)
 {
 return NULL;
 }

 frame->format = pix_fmt;
 frame->width = width;
 frame->height = height;

 int checkImage = av_image_alloc(frame->data, frame->linesize, width, height, pix_fmt, 32);

 if (checkImage < 0)
 {
 return NULL;
 }

 return frame;
}

bool initialize()
{
 AVRational frameRate;
 frameRate.den = m_frameRate;
 frameRate.num = 1;

 av_register_all();

 m_pCodec = avcodec_find_encoder_by_name(m_encoderName);

 if (!m_pCodec)
 {
 return false;
 }

 m_pCodecContext = avcodec_alloc_context3(m_pCodec);
 m_pCodecContext->width = m_imageWidth;
 m_pCodecContext->height = m_imageHeight;
 m_pCodecContext->time_base = frameRate;
 m_pCodecContext->gop_size = 0;
 m_pCodecContext->pix_fmt = m_pixType;
 m_pCodecContext->codec_id = m_pCodec->id;
 m_pCodecContext->bit_rate = m_targetBitRate;

 av_opt_set(m_pCodecContext->priv_data, "+CBR", "", 0);

 return true;
}

bool startExport()
{
 m_frameIndex = 0;
 char fakeFileName[512];
 int checkAllocContext = avformat_alloc_output_context2(&m_pFormatContext, NULL, m_fileType, fakeFileName);

 if (checkAllocContext < 0)
 {
 return false;
 }

 if (!m_pFormatContext)
 {
 return false;
 }

 m_pFormat = m_pFormatContext->oformat;

 if (m_pFormat->video_codec != AV_CODEC_ID_NONE)
 {
 addVideoStream();

 int checkOpen = avcodec_open2(m_pCodecContext, m_pCodec, NULL);

 if (checkOpen < 0)
 {
 return false;
 }

 m_pFrame = allocatePicture(m_pCodecContext->pix_fmt, m_pCodecContext->width, m_pCodecContext->height);
 if (!m_pFrame)
 {
 return false;
 }
 m_pFrame->pts = 0;
 }

 int checkOpen = avio_open(&m_pFormatContext->pb, m_fileName, AVIO_FLAG_WRITE);
 if (checkOpen < 0)
 {
 return false;
 }

 av_dict_set(&(m_pFormatContext->metadata), "title", "QS Test", 0);

 int checkHeader = avformat_write_header(m_pFormatContext, NULL);
 if (checkHeader < 0)
 {
 return false;
 }

 return true;
}

int processFrame(AVPacket& avPacket)
{
 avPacket.stream_index = 0;
 avPacket.pts = av_rescale_q(m_pFrame->pts, m_pStream->codec->time_base, m_pStream->time_base);
 avPacket.dts = av_rescale_q(m_pFrame->pts, m_pStream->codec->time_base, m_pStream->time_base);
 m_pFrame->pts++;

 int retVal = av_interleaved_write_frame(m_pFormatContext, &avPacket);
 return retVal;
}

bool exportFrame()
{
 int success = 1;
 int result = 0;

 AVPacket avPacket;

 av_init_packet(&avPacket);
 avPacket.data = NULL;
 avPacket.size = 0;

 AVPacket* avPacket1 = new AVPacket();

 av_init_packet(avPacket1);
 avPacket1->data = NULL;
 avPacket1->size = 0;

 fflush(stdout);

 std::cout << "Before avcodec_encode_video2 for frame: " << m_frameIndex << std::endl;
 success = avcodec_encode_video2(m_pCodecContext, &avPacket, m_pFrame, &result);
 std::cout << "After avcodec_encode_video2 for frame: " << m_frameIndex << std::endl;

 if (result)
 {
 success = processFrame(avPacket);
 }

 av_packet_unref(&avPacket);

 av_free_packet(&avPacket);
 //av_frame_free(&m_pFrame);

 m_frameIndex++;
 return (success == 0);
}

void endExport()
{
 int result = 0;
 int success = 0;

 if (m_pFrame)
 {
 while (success == 0)
 {
 AVPacket avPacket;
 av_init_packet(&avPacket);
 avPacket.data = NULL;
 avPacket.size = 0;

 fflush(stdout);
 success = avcodec_encode_video2(m_pCodecContext, &avPacket, NULL, &result);

 if (result)
 {
 success = processFrame(avPacket);
 }
 av_packet_unref(&avPacket);

 if (!result)
 {
 break;
 }
 }
 }

 if (m_pFormatContext)
 {
 av_write_trailer(m_pFormatContext);

 if (m_pFrame)
 {
 av_frame_free(&m_pFrame);
 }

 avio_closep(&m_pFormatContext->pb);
 avformat_free_context(m_pFormatContext);
 m_pFormatContext = NULL;
 }
}

void cleanup()
{
 if (m_pFrame || m_pCodecContext)
 {
 if (m_pFrame)
 {
 av_frame_free(&m_pFrame);
 }

 if (m_pCodecContext)
 {
 avcodec_close(m_pCodecContext);
 av_free(m_pCodecContext);
 }
 }
}

int main()
{
 bool success = true;
 if (initialize())
 {
 if (startExport())
 {
 for (int loop = 0; loop < m_frameCount; loop++)
 {
 if (!exportFrame())
 {
 std::cout << "Failed to export frame\n";
 success = false;
 break;
 }
 }
 endExport();
 }
 else
 {
 std::cout << "Failed to start export\n";
 success = false;
 }

 cleanup();
 }
 else
 {
 std::cout << "Failed to initialize export\n";
 success = false;
 }

 if (success)
 {
 std::cout << "Successfully exported file\n";
 }
 return 1;
}


 ```




When I set the m_imageHeight to 1536, the memory usage is very high. But when set to 1535 or 1537 or other values, the memory usage is normal, can you tell me why ?
I have navigated to avcodec_encode_video2
enter link description here
I am using the code from the link
I have updated to the latest Intel® Graphics Driver


-
FFMPEG API Mp4 H264 Encoding/Muxing - unspecified pixel format
28 juillet 2020, par FabriceI'm working on a c++ project using ffmpeg. I have to generate an mp4 file with h264 encoding.


My problem is that the file generate but when reading the file with VLC I've got no image, and analyzing it with ffprobe give me (log below) the error :




unspecified pixel format




ffprobe version N-93020-g3224d6691c Copyright (c) 2007-2019 the FFmpeg developers
 built with gcc 8.2.1 (GCC) 20181201
 configuration: --disable-static --enable-shared --enable-gpl --enable-version3 --enable-sdl2 --enable-fontconfig --enable-gnutls --enable-iconv --enable-libass --enable-libbluray --enable-libfreetype --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-libopus --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libtheora --enable-libtwolame --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxml2 --enable-libzimg --enable-lzma --enable-zlib --enable-gmp --enable-libvidstab --enable-libvorbis --enable-libvo-amrwbenc --enable-libmysofa --enable-libspeex --enable-libxvid --enable-libaom --enable-libmfx --enable-amf --enable-ffnvcodec --enable-cuvid --enable-d3d11va --enable-nvenc --enable-nvdec --enable-dxva2 --enable-avisynth --enable-libopenmpt
 libavutil 56. 26.100 / 56. 26.100
 libavcodec 58. 44.100 / 58. 44.100
 libavformat 58. 26.100 / 58. 26.100
 libavdevice 58. 6.101 / 58. 6.101
 libavfilter 7. 48.100 / 7. 48.100
 libswscale 5. 4.100 / 5. 4.100
 libswresample 3. 4.100 / 3. 4.100
 libpostproc 55. 4.100 / 55. 4.100
[h264 @ 02a46240] non-existing PPS 0 referenced
[h264 @ 02a46240] decode_slice_header error
[h264 @ 02a46240] no frame!
...
[h264 @ 02a46240] non-existing PPS 0 referenced
[h264 @ 02a46240] decode_slice_header error
[h264 @ 02a46240] no frame!
[mov,mp4,m4a,3gp,3g2,mj2 @ 02a35380] decoding for stream 0 failed
[mov,mp4,m4a,3gp,3g2,mj2 @ 02a35380] Could not find codec parameters for stream 0 (Video: h264 (avc1 / 0x31637661), none, 352x288, 320 kb/s): unspecified pixel format
Consider increasing the value for the 'analyzeduration' and 'probesize' options
Input #0, mov,mp4,m4a,3gp,3g2,mj2, from 'C:\Users\Fabrice\Desktop\video\Test.mp4':
 Metadata:
 major_brand : isom
 minor_version : 512
 compatible_brands: isomiso2avc1mp41
 encoder : Lavf58.26.100
 Duration: 00:00:09.00, start: 0.000000, bitrate: 323 kb/s
 Stream #0:0(und): Video: h264 (avc1 / 0x31637661), none, 352x288, 320 kb/s, 25.11 fps, 25 tbr, 12800 tbn, 25600 tbc (default)
 Metadata:
 handler_name : VideoHandler



Here is the code I use to genererate my mp4 File, it's based on sample from ffmpeg (see : FFMPEG Muxing sample). I have tried to adapt it without using deprecated function. It works using webm/vp8 encoding, but not mp4/h264.


#include 
#include 
#include 
extern "C"
{
#include <libavcodec></libavcodec>avcodec.h>
#include <libswscale></libswscale>swscale.h>
#include <libavformat></libavformat>avformat.h>
#include <libavutil></libavutil>error.h> 
#include <libavutil></libavutil>opt.h>
#include <libavutil></libavutil>imgutils.h>
}

#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "swscale.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")

/* 10 seconds stream duration */
#define STREAM_DURATION 10.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */

//#pragma warning(disable : 4996) // TODO: remove

static int sws_flags = SWS_BICUBIC;

/* Add an output stream. */
static AVStream *add_stream(AVFormatContext *formatContext, AVCodec **codec, enum AVCodecID codecId, AVCodecContext **codecCtx)
{
 AVStream *stream;

 // Get the encoder codec
 *codec = avcodec_find_encoder(codecId);
 if (!(*codec)) {
 fprintf(stderr, "Could not find encoder for '%s'\n",
 avcodec_get_name(codecId));
 exit(1);
 }

 // Get the stream for codec
 stream = avformat_new_stream(formatContext, *codec);
 if (!stream) {
 fprintf(stderr, "Could not allocate stream\n");
 exit(1);
 }
 stream->id = formatContext->nb_streams - 1;

 (*codecCtx) = avcodec_alloc_context3(*codec);

 switch ((*codec)->type) {
 case AVMEDIA_TYPE_VIDEO:
 stream->codecpar->codec_id = codecId;
 stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
 stream->codecpar->bit_rate = 400000;
 stream->codecpar->width = 352;
 stream->codecpar->height = 288;
 stream->codecpar->format = STREAM_PIX_FMT;
 stream->time_base = { 1, STREAM_FRAME_RATE };

 avcodec_parameters_to_context((*codecCtx), stream->codecpar);
 (*codecCtx)->gop_size = 12; /* emit one intra frame every twelve frames at most */
 (*codecCtx)->max_b_frames = 2;
 (*codecCtx)->time_base = { 1, STREAM_FRAME_RATE };
 if ((*codecCtx)->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
 /* Needed to avoid using macroblocks in which some coeffs overflow.
 * This does not happen with normal video, it just happens here as
 * the motion of the chroma plane does not match the luma plane. */
 (*codecCtx)->mb_decision = 2;
 }
 break;

 default:
 break;
 }
 
 //if (stream->codecpar->codec_id == AV_CODEC_ID_H264) {
 // av_opt_set(codecCtx, "preset", "ultrafast", 0);
 //}
 //(*codecCtx)->flags |= AV_CODEC_FLAG_LOW_DELAY;

 /* Some formats want stream headers to be separate. */
 if (formatContext->oformat->flags & AVFMT_GLOBALHEADER)
 (*codecCtx)->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;


 int ret = avcodec_parameters_from_context(stream->codecpar, (*codecCtx));
 if (ret < 0) {
 char error[255];
 av_strerror(ret, error, 255);
 fprintf(stderr, "avcodec_parameters_from_context returned (%d) - %s", ret, error);
 return false;
 }

 return stream;
}

/**************************************************************/
/* video output */

static AVFrame *frame_video;
static int frame_count;

static void open_video(AVCodec *codec, AVStream *stream, AVCodecContext *codecCtx)
{
 int ret;

 /* open the codec */
 ret = avcodec_open2(codecCtx, codec, NULL);
 if (ret < 0) {
 char error[255];
 av_strerror(ret, error, 255);
 fprintf(stderr, "Could not open video codec: %s\n", error);
 exit(1);
 }

 /* allocate and init a re-usable frame */
 frame_video = av_frame_alloc();
 if (!frame_video) {
 fprintf(stderr, "Could not allocate video frame\n");
 exit(1);
 }

 frame_video->format = codecCtx->pix_fmt;
 frame_video->width = codecCtx->width;
 frame_video->height = codecCtx->height;

 ret = av_frame_get_buffer(frame_video, 32);
 if (ret < 0) {
 fprintf(stderr, "Could not allocate the video frame data\n");
 exit(1);
 }
}

/* Prepare a dummy image. */
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
{
 int x, y, i;

 i = frame_index;

 /* Y */
 for (y = 0; y < height; y++)
 for (x = 0; x < width; x++)
 pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;

 /* Cb and Cr */
 for (y = 0; y < height / 2; y++) {
 for (x = 0; x < width / 2; x++) {
 pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
 pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
 }
 }
}

int timestamp = 0;
static void write_video_frame(AVFormatContext *formatContext, AVStream *stream, AVCodecContext *codecCtx)
{
 int ret;
 static struct SwsContext *sws_ctx;

 if (frame_count >= STREAM_NB_FRAMES) {
 /* No more frames to compress. The codec has a latency of a few
 * frames if using B-frames, so we get the last frames by
 * passing the same picture again. */
 }
 else {
 if (codecCtx->pix_fmt != AV_PIX_FMT_YUV420P) {
 /* as we only generate a YUV420P picture, we must convert it
 * to the codec pixel format if needed */
 if (!sws_ctx) {
 sws_ctx = sws_getContext(codecCtx->width, codecCtx->height, AV_PIX_FMT_YUV420P,
 codecCtx->width, codecCtx->height, codecCtx->pix_fmt,
 sws_flags, NULL, NULL, NULL);
 if (!sws_ctx) {
 fprintf(stderr, "Could not initialize the conversion context\n");
 exit(1);
 }
 }
 fill_yuv_image(frame_video, frame_count, codecCtx->width, codecCtx->height);
 sws_scale(sws_ctx, (const uint8_t * const *)frame_video->data, frame_video->linesize,
 0, codecCtx->height, frame_video->data, frame_video->linesize);
 }
 else {
 fill_yuv_image(frame_video, frame_count, codecCtx->width, codecCtx->height);
 }
 }

 frame_video->format = AV_PIX_FMT_YUV420P;
 frame_video->width = codecCtx->width;
 frame_video->height = codecCtx->height;

 if (formatContext->oformat->flags & 0x0020) {
 /* Raw video case - directly store the picture in the packet */
 AVPacket pkt;
 av_init_packet(&pkt);

 pkt.flags |= AV_PKT_FLAG_KEY;
 pkt.stream_index = stream->index;
 pkt.data = frame_video->data[0];
 pkt.size = sizeof(AVPicture);

 ret = av_interleaved_write_frame(formatContext, &pkt);
 }
 else {
 AVPacket pkt = { 0 };
 av_init_packet(&pkt);

 /* encode the image */
 fprintf(stderr, "\nFrame type : %c\n", av_get_picture_type_char(frame_video->pict_type));
 fprintf(stderr, "Frame pts: %lld, \n", frame_video->pts);
 fprintf(stderr, "Codec timebase: %d/%d\n", codecCtx->time_base.num, codecCtx->time_base.den);
 fprintf(stderr, "Stream timebase: %d/%d\n", stream->time_base.num, stream->time_base.den);
 fprintf(stderr, "Resacale: %lld, \n\n", av_rescale_q(1, codecCtx->time_base, stream->time_base));
 ret = avcodec_send_frame(codecCtx, frame_video);
 if (ret < 0) {
 char error[255];
 av_strerror(ret, error, 255);
 fprintf(stderr, "Error encoding video frame: %s\n", error);
 exit(1);
 }
 /* If size is zero, it means the image was buffered. */
 ret = avcodec_receive_packet(codecCtx, &pkt);
 if (!ret && pkt.size) {
 pkt.stream_index = stream->index;
 fprintf(stderr, "Packet flags : %d\n", pkt.flags);
 fprintf(stderr, "Packet pts: %lld\n", pkt.pts);
 fprintf(stderr, "Packet dts: %lld\n", pkt.dts);
 fprintf(stderr, "Packet duration: %lld\n", pkt.duration);
 fprintf(stderr, "Packet pos: %lld\n\n", pkt.pos);
 
 /* Write the compressed frame to the media file. */
 ret = av_interleaved_write_frame(formatContext, &pkt);
 }
 else {
 ret = 0;
 }
 }
 if (ret != 0) {
 char error[255];
 av_strerror(ret, error, 255);
 fprintf(stderr, "Error while writing video frame: %s\n", error);
 exit(1);
 }
 frame_count++;
}

static void close_video(AVFormatContext *oc, AVStream *st)
{
 av_free(frame_video->data[0]);
 av_free(frame_video);
}

/**************************************************************/
/* media file output */

int main(int argc, char **argv)
{
 // The outputed media
 char filename[100];
 const char *mediaFormat = "mp4"; AVCodecID mediaVideoCodec = AV_CODEC_ID_H264;
 //const char *mediaFormat="webm"; AVCodecID mediaVideoCodec = AV_CODEC_ID_VP8;
 AVOutputFormat *formatOut;
 AVFormatContext *formatCtx;

 // The video stream
 AVStream *stream_video;
 AVCodec *codec_video = nullptr;
 AVCodecContext *codecCtx_video = nullptr;
 double time_video = 0;

 // Return code
 int ret;

 strcpy_s(filename, "C:\\Test.");
 strcat_s(filename, mediaFormat);

 // allocate the output media context
 avformat_alloc_output_context2(&formatCtx, NULL, NULL, filename);
 if (!formatCtx) {
 return 1;
 }
 formatOut = formatCtx->oformat;

 // Add the video stream using H264 codec
 stream_video = NULL;
 stream_video = add_stream(formatCtx, &codec_video, mediaVideoCodec, &codecCtx_video);

 // Open video codec and allocate the necessary encode buffers
 if (stream_video)
 open_video(codec_video, stream_video, codecCtx_video);

 av_dump_format(formatCtx, 0, filename, 1);

 // Open the output media file, if needed
 if (!(formatOut->flags & AVFMT_NOFILE)) {
 ret = avio_open(&formatCtx->pb, filename, AVIO_FLAG_WRITE);
 if (ret < 0) {
 char error[255];
 av_strerror(ret, error, 255);
 fprintf(stderr, "Could not open '%s': %s\n", filename, error);
 return 1;
 }
 }

 // Write media header
 ret = avformat_write_header(formatCtx, NULL);
 if (ret < 0) {
 char error[255];
 av_strerror(ret, error, 255);
 fprintf(stderr, "Error occurred when opening output file: %s\n", error);
 return 1;
 }

 if (frame_video)
 frame_video->pts = 0;
 for (;;) {
 // Compute video time from last added video frame
 time_video = ((double)frame_video->pts) * av_q2d(stream_video->time_base);

 // Stop media if enough time
 if (!stream_video || time_video >= STREAM_DURATION)
 break;

 // Add a video frame
 write_video_frame(formatCtx, stream_video, codecCtx_video);

 // Increase frame pts according to time base
 frame_video->pts += av_rescale_q(1, codecCtx_video->time_base, stream_video->time_base);
 }

 // Write media trailer
 av_write_trailer(formatCtx);

 /* Close each codec. */
 if (stream_video)
 close_video(formatCtx, stream_video);

 if (!(formatOut->flags & AVFMT_NOFILE))
 /* Close the output file. */
 avio_close(formatCtx->pb);

 /* free the stream */
 avformat_free_context(formatCtx);

 return 0;
}



What am I missing ? Which part give me this error ?