
Recherche avancée
Médias (91)
-
Chuck D with Fine Arts Militia - No Meaning No
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
Paul Westerberg - Looking Up in Heaven
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
Le Tigre - Fake French
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
Thievery Corporation - DC 3000
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
Dan the Automator - Relaxation Spa Treatment
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
Gilberto Gil - Oslodum
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
Autres articles (56)
-
Des sites réalisés avec MediaSPIP
2 mai 2011, parCette page présente quelques-uns des sites fonctionnant sous MediaSPIP.
Vous pouvez bien entendu ajouter le votre grâce au formulaire en bas de page. -
Les autorisations surchargées par les plugins
27 avril 2010, parMediaspip core
autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs -
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir
Sur d’autres sites (9696)
-
Use ffmpeg multiple h264_nvenc instances will crash occurs during release
13 août 2024, par yang zhaoUse FFMpeg, When multiple threads use multiple h264_nvenc instances(one instance per thread), an exception crash occurs during release(avcodec_free_context), and the final exception occurs in libnvcuvid.so.
I don't know what the reason is ? Please help, thanks.
The same problem exists : ffmpeg v5.0.1 + cuda v11.6 and ffmpeg v7.0.1 + cuda v12.2
operating system:Ubuntu 22.04.4 LTS


The specific code is as follows :


class NvencEncoder {
public:
 NvencEncoder() {}
 ~NvencEncoder { Close(); }
 
 bool Open() {
 auto encoder = avcodec_find_encoder_by_name("h264_nvenc");
 pCodecCtx_ = avcodec_alloc_context3(encoder);
 if (!pCodecCtx_)
 return false;

 int width = 1920;
 int height = 1080;
 int bitrate = 1000000;
 
 pCodecCtx_->codec_type = AVMEDIA_TYPE_VIDEO;
 pCodecCtx_->pix_fmt = AV_PIX_FMT_YUV420P;
 pCodecCtx_->width = width;
 pCodecCtx_->height = height;
 pCodecCtx_->bit_rate = bitrate;
 pCodecCtx_->rc_min_rate = bitrate;
 pCodecCtx_->rc_max_rate = bitrate;
 pCodecCtx_->bit_rate_tolerance = bitrate;
 pCodecCtx_->rc_buffer_size = bitrate / 2;
 pCodecCtx_->time_base = AVRational{ 1, 90000 };
 pCodecCtx_->framerate = AVRational{ 25, 1 };
 pCodecCtx_->gop_size = 50;
 pCodecCtx_->max_b_frames = 0;
 pCodecCtx_->delay = 0;
 pCodecCtx_->refs = 2;
 pCodecCtx_->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

 av_opt_set_int(pCodecCtx_->priv_data, "gpu", 0, 0);
 av_opt_set(pCodecCtx_->priv_data, "preset", "llhp", 0);
 av_opt_set(pCodecCtx_->priv_data, "rc", "cbr", 0);
 av_opt_set(pCodecCtx_->priv_data, "profile", "main", 0);
 av_opt_set(pCodecCtx_->priv_data, "zerolatency", "1", 0);
 av_opt_set(pCodecCtx_->priv_data, "delay", "0", 0);
 av_opt_set(pCodecCtx_->priv_data, "preset", "medium", 0);

 int ret = avcodec_open2(pCodecCtx_, encoder, nullptr);
 if (ret < 0)
 return false;

 pkt_ = av_packet_alloc();
 if (!pkt_)
 return false;

 char output_mp4[] = "output.mp4";
 ret = avformat_alloc_output_context2(&avMp4Context_, NULL, "mp4", output_mp4);
 if (ret < 0)
 return false;
 
 mp4_stream_ = avformat_new_stream(avMp4Context_, nullptr);
 if (!mp4_stream_)
 return false;

 ret = avcodec_parameters_copy(mp4_stream_->codecpar, out_stream_->codecpar);
 if (ret < 0)
 return false;
 
 mp4_stream_->codecpar->codec_tag = 0;

 if (!(avMp4Context_->oformat->flags & AVFMT_NOFILE)) {
 ret = avio_open(&avMp4Context_->pb, output_mp4_.c_str(), AVIO_FLAG_WRITE);
 if (ret < 0) {
 return false;
 }
 return true;
 }

 void Close() {
 if (pCodecCtx_)
 avcodec_free_context(&pCodecCtx_); // Crash will occur in libnvcuvid.so

 if (avMp4Context_) {
 if (avMp4Context_->oformat && !(avMp4Context_->oformat->flags & AVFMT_NOFILE)) {
 avio_closep(&avMp4Context_->pb);
 }
 avformat_free_context(avMp4Context_);
 avMp4Context_ = nullptr;
 }
 
 if (pkt_)
 av_packet_free(&pkt_);
 }

 bool InputFrame(AVFrame* frame) {
 int ret = avcodec_send_frame(pEncoderVideoCodecCtx_, frame);
 if (ret < 0)
 return false;
 
 while (ret >= 0) {
 ret = avcodec_receive_packet(pEncoderVideoCodecCtx_, pkt_);
 if (ret < 0)
 break;

 if (avNotHeadWrited_) {
 ret = avformat_write_header(avMp4Context_, &opts);
 if (ret < 0) {
 av_packet_unref(pkt_);
 break;
 }
 avNotHeadWrited_ = false;
 }

 av_packet_rescale_ts(pkt_, pCodecCtx_->time_base, mp4_stream_->time_base);
 ret = av_write_frame(avMp4Context_, pkt_);
 if (ret < 0) {
 av_packet_unref(pkt_);
 break;
 }

 av_packet_unref(pkt_);
 }
 
 return (ret >= 0);
 }
private:
 AVPacket* pkt_ = nullptr;
 AVCodecContext* pCodecCtx_ = nullptr;
 AVFormatContext* avMp4Context_ = nullptr;
 AVStream* mp4_stream_ = nullptr;
 avNotHeadWrited_ = true;
}

uint8_t* data = nullptr; //a frame of yuv420 data
void Run(int idx);

int main() {
 //Fill a frame of yuv420 data here
 ...

 std::thread th[3];
 for (int i = 0; i < 3; i++) {
 th[i] = std::thread(Run, i);
 sleep(3);
 }

 sleep(35);

 for (int i = 0; i < 3; i++) {
 if (th[i].joinable()) {
 printf("thread %d join()\n", i);
 th[i].join();
 }
 }

 free(data);
 printf("Exit\n");
}

void Run(int idx) {
 printf("Run() thread(%d)\n", idx);
 //cudaSetDevice(0);

 auto nvenc = new NvencEncoder(ffpar, FFOutputCB);
 if (!nvenc->Open()) {
 delete nvenc;
 return;
 }

 auto avframe_ = av_frame_alloc();
 avframe_->width = 1920;
 avframe_->height = 1080;
 avframe_->format = AV_PIX_FMT_YUV420P;

 int ret = av_frame_get_buffer(avframe_, 0);
 if (ret < 0) {
 printf("av_frame_get_buffer() is error %d\n", ret);
 delete nvenc;
 av_frame_free(&avframe_);
 return;
 }

 int frame_size = 1920 * 1080;
 double one_frame_us = 1000000.0 / 25.0;
 unsigned long frame_count = 0;
 struct timeval t1, t2;
 double timeuse;

 AVRational timebase = { ffpar.timebase_num, ffpar.timebase_den };
 std::int64_t llCalcDuration = (double)AV_TIME_BASE / 25.0;
 double in_stream_timebase = av_q2d(timebase);
 std::int64_t duration = (double)llCalcDuration / (double)(in_stream_timebase * AV_TIME_BASE);
 avframe_->time_base = timebase;
 gettimeofday(&t1, NULL);

 while (frame_count < 25*30) { //30 seconds

 avframe_->pts = (double)(frame_count * llCalcDuration) / (double(in_stream_timebase * AV_TIME_BASE));
 //avframe_->duration = duration;
 frame_count++;

 ret = av_frame_make_writable(avframe_);
 if (ret < 0) {
 printf("av_frame_make_writable() is error %d\n", ret);
 break;
 }

 // copy YUV420
 memcpy(avframe_->data[0], data, frame_size);
 memcpy(avframe_->data[1], data + frame_size, frame_size / 4);
 memcpy(avframe_->data[2], data + frame_size * 5 / 4, frame_size / 4);

 ret = nvenc->InputFrame(avframe_);
 if (ret < 0) {
 printf("InputFrame() is error: %d\n", ret);
 break;
 }

 // frame rate
 gettimeofday(&t2, NULL);
 timeuse = (t2.tv_sec - t1.tv_sec) * 1000000 + (t2.tv_usec - t1.tv_usec); //us
 if (timeuse < one_frame_us) {
 usleep(one_frame_us - timeuse);
 }
 gettimeofday(&t1, NULL);
 }

 if (frame_count > 0) {
 nvenc->WriteTrailer();
 }

 printf("do Close() thread(%d)\n", idx);
 nvenc->Close(); // Crash will occur
 printf("Closed thread(%d)\n", idx);
 delete nvenc;
 av_frame_free(&avframe_);
}



-
Need help using libavfilter for adding overlay to frames [closed]
30 juillet 2024, par Michael WernerHello gentlemen and ladies,


I am working with libavfilter and I am getting crazy.


On Windows 11 OS with latest libav (full build) a C/C++ app reads YUV420P frames from a frame grabber card.


I want to draw a bitmap (BGR24) overlay image from file on every frame via libavfilter. First I convert the BGR24 overlay image via format filter to YUV420P. Then I feed the YUV420P frame from frame grabber and the YUV420P overlay into the overlay filter.


Everything seems to be fine but when I try to get the frame out of the filter graph I always get an "Resource is temporary not available" (EAGAIN) return code, independent on how many frames I put into the graph.


The frames from the frame grabber card are fine, I could encode them or write them to a .yuv file. The overlay frame looks fine too.


My current initialization code looks like below. It does not report any errors or warnings but when I try to get the filtered frame out of the graph via
av_buffersink_get_frame
I always get anEAGAIN
return code.

Here is my current initialization code :


int init_overlay_filter(AVFilterGraph** graph, AVFilterContext** src_ctx, AVFilterContext** overlay_src_ctx,
 AVFilterContext** sink_ctx)
{
 AVFilterGraph* filter_graph;
 AVFilterContext* buffersrc_ctx;
 AVFilterContext* overlay_buffersrc_ctx;
 AVFilterContext* buffersink_ctx;
 AVFilterContext* overlay_ctx;
 AVFilterContext* format_ctx;
 const AVFilter *buffersrc, *buffersink, *overlay_buffersrc, *overlay_filter, *format_filter;
 int ret;

 // Create the filter graph
 filter_graph = avfilter_graph_alloc();
 if (!filter_graph)
 {
 fprintf(stderr, "Unable to create filter graph.\n");
 return AVERROR(ENOMEM);
 }

 // Create buffer source filter for main video
 buffersrc = avfilter_get_by_name("buffer");
 if (!buffersrc)
 {
 fprintf(stderr, "Unable to find buffer filter.\n");
 return AVERROR_FILTER_NOT_FOUND;
 }

 // Create buffer source filter for overlay image
 overlay_buffersrc = avfilter_get_by_name("buffer");
 if (!overlay_buffersrc)
 {
 fprintf(stderr, "Unable to find buffer filter.\n");
 return AVERROR_FILTER_NOT_FOUND;
 }

 // Create buffer sink filter
 buffersink = avfilter_get_by_name("buffersink");
 if (!buffersink)
 {
 fprintf(stderr, "Unable to find buffersink filter.\n");
 return AVERROR_FILTER_NOT_FOUND;
 }

 // Create overlay filter
 overlay_filter = avfilter_get_by_name("overlay");
 if (!overlay_filter)
 {
 fprintf(stderr, "Unable to find overlay filter.\n");
 return AVERROR_FILTER_NOT_FOUND;
 }

 // Create format filter
 format_filter = avfilter_get_by_name("format");
 if (!format_filter) 
 {
 fprintf(stderr, "Unable to find format filter.\n");
 return AVERROR_FILTER_NOT_FOUND;
 }

 // Initialize the main video buffer source
 char args[512];
 snprintf(args, sizeof(args),
 "video_size=1920x1080:pix_fmt=yuv420p:time_base=1/25:pixel_aspect=1/1");
 ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", args, NULL, filter_graph);
 if (ret < 0)
 {
 fprintf(stderr, "Unable to create buffer source filter for main video.\n");
 return ret;
 }

 // Initialize the overlay buffer source
 snprintf(args, sizeof(args),
 "video_size=165x165:pix_fmt=bgr24:time_base=1/25:pixel_aspect=1/1");
 ret = avfilter_graph_create_filter(&overlay_buffersrc_ctx, overlay_buffersrc, "overlay_in", args, NULL,
 filter_graph);
 if (ret < 0)
 {
 fprintf(stderr, "Unable to create buffer source filter for overlay.\n");
 return ret;
 }

 // Initialize the format filter to convert overlay image to yuv420p
 snprintf(args, sizeof(args), "pix_fmts=yuv420p");
 ret = avfilter_graph_create_filter(&format_ctx, format_filter, "format", args, NULL, filter_graph);

 if (ret < 0) 
 {
 fprintf(stderr, "Unable to create format filter.\n");
 return ret;
 }

 // Initialize the buffer sink
 ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", NULL, NULL, filter_graph);
 if (ret < 0)
 {
 fprintf(stderr, "Unable to create buffer sink filter.\n");
 return ret;
 }

 // Initialize the overlay filter
 ret = avfilter_graph_create_filter(&overlay_ctx, overlay_filter, "overlay", "W-w:H-h:enable='between(t,0,20)':format=yuv420", NULL, filter_graph);
 if (ret < 0)
 {
 fprintf(stderr, "Unable to create overlay filter.\n");
 return ret;
 }

 // Connect the filters
 ret = avfilter_link(overlay_buffersrc_ctx, 0, format_ctx, 0);

 if (ret >= 0)
 {
 ret = avfilter_link(buffersrc_ctx, 0, overlay_ctx, 0);
 }
 else
 {
 fprintf(stderr, "Unable to configure filter graph.\n");
 return ret;
 }


 if (ret >= 0) 
 {
 ret = avfilter_link(format_ctx, 0, overlay_ctx, 1);
 }
 else
 {
 fprintf(stderr, "Unable to configure filter graph.\n");
 return ret;
 }

 if (ret >= 0) 
 {
 if ((ret = avfilter_link(overlay_ctx, 0, buffersink_ctx, 0)) < 0)
 {
 fprintf(stderr, "Unable to link filter graph.\n");
 return ret;
 }
 }
 else
 {
 fprintf(stderr, "Unable to configure filter graph.\n");
 return ret;
 }

 // Configure the filter graph
 if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
 {
 fprintf(stderr, "Unable to configure filter graph.\n");
 return ret;
 }

 *graph = filter_graph;
 *src_ctx = buffersrc_ctx;
 *overlay_src_ctx = overlay_buffersrc_ctx;
 *sink_ctx = buffersink_ctx;

 return 0;
}



Feeding the filter graph is done this way :


av_buffersrc_add_frame_flags(buffersrc_ctx, pFrameGrabberFrame, AV_BUFFERSRC_FLAG_KEEP_REF)
av_buffersink_get_frame(buffersink_ctx, filtered_frame)



av_buffersink_get_frame
returns alwaysEAGAIN
, no matter how many frames I feed into the graph. The frames (from framegrabber and the overlay frame) itself are looking fine.

I did set libav logging level to maximum but I do not see any warnings or errors or helpful, related information in the log.


Here the log output related to the filter configuration :


[in @ 00000288ee494f40] Setting 'video_size' to value '1920x1080'
[in @ 00000288ee494f40] Setting 'pix_fmt' to value 'yuv420p'
[in @ 00000288ee494f40] Setting 'time_base' to value '1/25'
[in @ 00000288ee494f40] Setting 'pixel_aspect' to value '1/1'
[in @ 00000288ee494f40] w:1920 h:1080 pixfmt:yuv420p tb:1/25 fr:0/1 sar:1/1 csp:unknown range:unknown
[overlay_in @ 00000288ff1013c0] Setting 'video_size' to value '165x165'
[overlay_in @ 00000288ff1013c0] Setting 'pix_fmt' to value 'bgr24'
[overlay_in @ 00000288ff1013c0] Setting 'time_base' to value '1/25'
[overlay_in @ 00000288ff1013c0] Setting 'pixel_aspect' to value '1/1'
[overlay_in @ 00000288ff1013c0] w:165 h:165 pixfmt:bgr24 tb:1/25 fr:0/1 sar:1/1 csp:unknown range:unknown
[format @ 00000288ff1015c0] Setting 'pix_fmts' to value 'yuv420p'
[overlay @ 00000288ff101880] Setting 'x' to value 'W-w'
[overlay @ 00000288ff101880] Setting 'y' to value 'H-h'
[overlay @ 00000288ff101880] Setting 'enable' to value 'between(t,0,20)'
[overlay @ 00000288ff101880] Setting 'format' to value 'yuv420'
[auto_scale_0 @ 00000288ff101ec0] w:iw h:ih flags:'' interl:0
[format @ 00000288ff1015c0] auto-inserting filter 'auto_scale_0' between the filter 'overlay_in' and the filter 'format'
[auto_scale_1 @ 00000288ee4a4cc0] w:iw h:ih flags:'' interl:0
[overlay @ 00000288ff101880] auto-inserting filter 'auto_scale_1' between the filter 'format' and the filter 'overlay'
[AVFilterGraph @ 00000288ee495c80] query_formats: 5 queried, 6 merged, 6 already done, 0 delayed
[auto_scale_0 @ 00000288ff101ec0] w:165 h:165 fmt:bgr24 csp:gbr range:pc sar:1/1 -> w:165 h:165 fmt:yuv420p csp:unknown range:unknown sar:1/1 flags:0x00000004
[auto_scale_1 @ 00000288ee4a4cc0] w:165 h:165 fmt:yuv420p csp:unknown range:unknown sar:1/1 -> w:165 h:165 fmt:yuva420p csp:unknown range:unknown sar:1/1 flags:0x00000004
[overlay @ 00000288ff101880] main w:1920 h:1080 fmt:yuv420p overlay w:165 h:165 fmt:yuva420p
[overlay @ 00000288ff101880] [framesync @ 00000288ff1019a8] Selected 1/25 time base
[overlay @ 00000288ff101880] [framesync @ 00000288ff1019a8] Sync level 2



-
FFmpeg.Autogen : Issue with Zero-Sized Atom Boxes in MP4 Output
16 juin 2024, par Alexander JanssonI just started learning ffmpeg using ffmpeg.autogen wrapper version 5.1 in c#, and ffmpeg shared libs version 5.1. Im trying to facilitate a class which screenrecords using gdigrab and produces streamable mp4 to a/an buffer/event. Everything seems to work as suposed to with no error except that the outputstream produces atom boxes with 0 in size thus small file size aswell, no data seems to be produced in the boxes, the "debug test mp4 file" is analyzed with MP4Box and the box info is provided in the thread.


To be more specific why does this code produce empty atomboxes, is someone able to make the data produced actually contain any frame data from the gdigrab editing my code ?


`code :


public unsafe class ScreenStreamer : IDisposable
 {
 private readonly AVCodec* productionCodec;
 private readonly AVCodec* screenCaptureAVCodec;
 private readonly AVCodecContext* productionAVCodecContext;
 private readonly AVFormatContext* productionFormatContext;
 private readonly AVCodecContext* screenCaptureAVCodecContext;
 private readonly AVDictionary* productionAVCodecOptions;
 private readonly AVInputFormat* screenCaptureInputFormat;
 private readonly AVFormatContext* screenCaptureInputFormatContext;
 private readonly int gDIGrabVideoStreamIndex;
 private readonly System.Drawing.Size screenBounds;
 private readonly int _produceAtleastAmount;
 public EventHandler OnNewVideoDataProduced;
 private MemoryStream unsafeToManagedBridgeBuffer;
 private CancellationTokenSource cancellationTokenSource;
 private Task recorderTask;

 public ScreenStreamer(int fps, int bitrate, int screenIndex, int produceAtleastAmount = 1000)
 {
 ffmpeg.avdevice_register_all();
 ffmpeg.avformat_network_init();
 recorderTask = Task.CompletedTask;
 cancellationTokenSource = new CancellationTokenSource();
 unsafeToManagedBridgeBuffer = new MemoryStream();
 _produceAtleastAmount = produceAtleastAmount;

 // Allocate and initialize production codec and context
 productionCodec = ffmpeg.avcodec_find_encoder(AVCodecID.AV_CODEC_ID_H264);
 if (productionCodec == null) throw new ApplicationException("Could not find encoder for codec ID H264.");

 productionAVCodecContext = ffmpeg.avcodec_alloc_context3(productionCodec);
 if (productionAVCodecContext == null) throw new ApplicationException("Could not allocate video codec context.");

 // Set codec parameters
 screenBounds = RetrieveScreenBounds(screenIndex);
 productionAVCodecContext->width = screenBounds.Width;
 productionAVCodecContext->height = screenBounds.Height;
 productionAVCodecContext->time_base = new AVRational() { den = fps, num = 1 };
 productionAVCodecContext->pix_fmt = AVPixelFormat.AV_PIX_FMT_YUV420P;
 productionAVCodecContext->bit_rate = bitrate;

 int result = ffmpeg.av_opt_set(productionAVCodecContext->priv_data, "preset", "veryfast", 0);
 if (result != 0)
 {
 throw new ApplicationException($"Failed to set options with error code {result}.");
 }

 // Open codec
 fixed (AVDictionary** pm = &productionAVCodecOptions)
 {
 result = ffmpeg.av_dict_set(pm, "movflags", "frag_keyframe+empty_moov+default_base_moof", 0);
 if (result < 0)
 {
 throw new ApplicationException($"Failed to set dictionary with error code {result}.");
 }

 result = ffmpeg.avcodec_open2(productionAVCodecContext, productionCodec, pm);
 if (result < 0)
 {
 throw new ApplicationException($"Failed to open codec with error code {result}.");
 }
 }

 // Allocate and initialize screen capture codec and context
 screenCaptureInputFormat = ffmpeg.av_find_input_format("gdigrab");
 if (screenCaptureInputFormat == null) throw new ApplicationException("Could not find input format gdigrab.");

 fixed (AVFormatContext** ps = &screenCaptureInputFormatContext)
 {
 result = ffmpeg.avformat_open_input(ps, "desktop", screenCaptureInputFormat, null);
 if (result < 0)
 {
 throw new ApplicationException($"Failed to open input with error code {result}.");
 }

 result = ffmpeg.avformat_find_stream_info(screenCaptureInputFormatContext, null);
 if (result < 0)
 {
 throw new ApplicationException($"Failed to find stream info with error code {result}.");
 }
 }

 gDIGrabVideoStreamIndex = -1;
 for (int i = 0; i < screenCaptureInputFormatContext->nb_streams; i++)
 {
 if (screenCaptureInputFormatContext->streams[i]->codecpar->codec_type == AVMediaType.AVMEDIA_TYPE_VIDEO)
 {
 gDIGrabVideoStreamIndex = i;
 break;
 }
 }

 if (gDIGrabVideoStreamIndex < 0)
 {
 throw new ApplicationException("Failed to find video stream in input.");
 }

 AVCodecParameters* codecParameters = screenCaptureInputFormatContext->streams[gDIGrabVideoStreamIndex]->codecpar;
 screenCaptureAVCodec = ffmpeg.avcodec_find_decoder(codecParameters->codec_id);
 if (screenCaptureAVCodec == null)
 {
 throw new ApplicationException("Could not find decoder for screen capture.");
 }

 screenCaptureAVCodecContext = ffmpeg.avcodec_alloc_context3(screenCaptureAVCodec);
 if (screenCaptureAVCodecContext == null)
 {
 throw new ApplicationException("Could not allocate screen capture codec context.");
 }

 result = ffmpeg.avcodec_parameters_to_context(screenCaptureAVCodecContext, codecParameters);
 if (result < 0)
 {
 throw new ApplicationException($"Failed to copy codec parameters to context with error code {result}.");
 }

 result = ffmpeg.avcodec_open2(screenCaptureAVCodecContext, screenCaptureAVCodec, null);
 if (result < 0)
 {
 throw new ApplicationException($"Failed to open screen capture codec with error code {result}.");
 }
 }

 public void Start()
 {
 recorderTask = Task.Run(() =>
 {
 AVPacket* packet = ffmpeg.av_packet_alloc();
 AVFrame* rawFrame = ffmpeg.av_frame_alloc();
 AVFrame* compatibleFrame = null;
 byte* dstBuffer = null;

 try
 {
 while (!cancellationTokenSource.Token.IsCancellationRequested)
 {
 if (ffmpeg.av_read_frame(screenCaptureInputFormatContext, packet) >= 0)
 {
 if (packet->stream_index == gDIGrabVideoStreamIndex)
 {
 int response = ffmpeg.avcodec_send_packet(screenCaptureAVCodecContext, packet);
 if (response < 0)
 {
 throw new ApplicationException($"Error while sending a packet to the decoder: {response}");
 }

 response = ffmpeg.avcodec_receive_frame(screenCaptureAVCodecContext, rawFrame);
 if (response == ffmpeg.AVERROR(ffmpeg.EAGAIN) || response == ffmpeg.AVERROR_EOF)
 {
 continue;
 }
 else if (response < 0)
 {
 throw new ApplicationException($"Error while receiving a frame from the decoder: {response}");
 }

 compatibleFrame = ConvertToCompatiblePixelFormat(rawFrame, out dstBuffer);

 response = ffmpeg.avcodec_send_frame(productionAVCodecContext, compatibleFrame);
 if (response < 0)
 {
 throw new ApplicationException($"Error while sending a frame to the encoder: {response}");
 }

 while (response >= 0)
 {
 response = ffmpeg.avcodec_receive_packet(productionAVCodecContext, packet);
 if (response == ffmpeg.AVERROR(ffmpeg.EAGAIN) || response == ffmpeg.AVERROR_EOF)
 {
 break;
 }
 else if (response < 0)
 {
 throw new ApplicationException($"Error while receiving a packet from the encoder: {response}");
 }

 using var packetStream = new UnmanagedMemoryStream(packet->data, packet->size);
 packetStream.CopyTo(unsafeToManagedBridgeBuffer);
 byte[] managedBytes = unsafeToManagedBridgeBuffer.ToArray();
 OnNewVideoDataProduced?.Invoke(this, managedBytes);
 unsafeToManagedBridgeBuffer.SetLength(0);
 }
 }
 }
 ffmpeg.av_packet_unref(packet);
 ffmpeg.av_frame_unref(rawFrame);
 if (compatibleFrame != null)
 {
 ffmpeg.av_frame_unref(compatibleFrame);
 ffmpeg.av_free(dstBuffer);
 }
 }
 }
 finally
 {
 ffmpeg.av_packet_free(&packet);
 ffmpeg.av_frame_free(&rawFrame);
 if (compatibleFrame != null)
 {
 ffmpeg.av_frame_free(&compatibleFrame);
 }
 }
 });
 }

 public AVFrame* ConvertToCompatiblePixelFormat(AVFrame* srcFrame, out byte* dstBuffer)
 {
 AVFrame* dstFrame = ffmpeg.av_frame_alloc();
 int buffer_size = ffmpeg.av_image_get_buffer_size(productionAVCodecContext->pix_fmt, productionAVCodecContext->width, productionAVCodecContext->height, 1);
 byte_ptrArray4 dstData = new byte_ptrArray4();
 int_array4 dstLinesize = new int_array4();
 dstBuffer = (byte*)ffmpeg.av_malloc((ulong)buffer_size);
 ffmpeg.av_image_fill_arrays(ref dstData, ref dstLinesize, dstBuffer, productionAVCodecContext->pix_fmt, productionAVCodecContext->width, productionAVCodecContext->height, 1);

 dstFrame->format = (int)productionAVCodecContext->pix_fmt;
 dstFrame->width = productionAVCodecContext->width;
 dstFrame->height = productionAVCodecContext->height;
 dstFrame->data.UpdateFrom(dstData);
 dstFrame->linesize.UpdateFrom(dstLinesize);

 SwsContext* swsCtx = ffmpeg.sws_getContext(
 srcFrame->width, srcFrame->height, (AVPixelFormat)srcFrame->format,
 productionAVCodecContext->width, productionAVCodecContext->height, productionAVCodecContext->pix_fmt,
 ffmpeg.SWS_BILINEAR, null, null, null);

 if (swsCtx == null)
 {
 throw new ApplicationException("Could not initialize the conversion context.");
 }

 ffmpeg.sws_scale(swsCtx, srcFrame->data, srcFrame->linesize, 0, srcFrame->height, dstFrame->data, dstFrame->linesize);
 ffmpeg.sws_freeContext(swsCtx);
 return dstFrame;
 }

 private System.Drawing.Size RetrieveScreenBounds(int screenIndex)
 {
 return new System.Drawing.Size(1920, 1080);
 }

 public void Dispose()
 {
 cancellationTokenSource?.Cancel();
 recorderTask?.Wait();
 cancellationTokenSource?.Dispose();
 recorderTask?.Dispose();
 unsafeToManagedBridgeBuffer?.Dispose();

 fixed (AVCodecContext** p = &productionAVCodecContext)
 {
 if (*p != null)
 {
 ffmpeg.avcodec_free_context(p);
 }
 }
 fixed (AVCodecContext** p = &screenCaptureAVCodecContext)
 {
 if (*p != null)
 {
 ffmpeg.avcodec_free_context(p);
 }
 }

 if (productionFormatContext != null)
 {
 ffmpeg.avformat_free_context(productionFormatContext);
 }

 if (screenCaptureInputFormatContext != null)
 {
 ffmpeg.avformat_free_context(screenCaptureInputFormatContext);
 }

 if (productionAVCodecOptions != null)
 {
 fixed (AVDictionary** p = &productionAVCodecOptions)
 {
 ffmpeg.av_dict_free(p);
 }
 }
 }
 }



I call Start method and wait 8 econds, out of scope I write the bytes to an mp4 file without using the write trailer just to debug the atomboxes. and the mp4 debugging box output I got :


(Full OUTPUT)
https://pastebin.com/xkM4MfG7



(Not full)




"
<boxes>
<uuidbox size="0" type="uuid" uuid="{00000000-00000000-00000000-00000000}" specification="unknown" container="unknown">
</uuidbox>
<trackreferencetypebox size="0" type="cdsc" specification="p12" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="hint" specification="p12" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="font" specification="p12" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="hind" specification="p12" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="vdep" specification="p12" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="vplx" specification="p12" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="subt" specification="p12" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="thmb" specification="p12" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="mpod" specification="p14" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="dpnd" specification="p14" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="sync" specification="p14" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="ipir" specification="p14" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="sbas" specification="p15" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="scal" specification="p15" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="tbas" specification="p15" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="sabt" specification="p15" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="oref" specification="p15" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="adda" specification="p12" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="adrc" specification="p12" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="iloc" specification="p12" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="avcp" specification="p15" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="swto" specification="p15" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="swfr" specification="p15" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="chap" specification="apple" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="tmcd" specification="apple" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="cdep" specification="apple" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="scpt" specification="apple" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="ssrc" specification="apple" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<trackreferencetypebox size="0" type="lyra" specification="apple" container="tref">
<trackreferenceentry trackid=""></trackreferenceentry>
</trackreferencetypebox>
<itemreferencebox size="0" type="tbas" specification="p12" container="iref">
<itemreferenceboxentry itemid=""></itemreferenceboxentry>
</itemreferencebox>
<itemreferencebox size="0" type="iloc" specification="p12" container="iref">
<itemreferenceboxentry itemid=""></itemreferenceboxentry>
</itemreferencebox>
<itemreferencebox size="0" type="fdel" specification="p12" container="iref">
<itemreferenceboxentry itemid=""></itemreferenceboxentry>
</itemreferencebox>
<samplegroupdescriptionbox size="0" type="sgpd" version="0" flags="0" specification="p12" container="stbl traf">
<rollrecoveryentry></rollrecoveryentry>
</samplegroupdescriptionbox>
<samplegroupdescriptionbox size="0" type="sgpd" version="0" flags="0" specification="p12" container="stbl traf">
<audioprerollentry></audioprerollentry>
</samplegroupdescriptionbox>
<samplegroupdescriptionbox size="0" type="sgpd" version="0" flags="0" specification="p12" container="stbl traf">
<visualrandomaccessentry></visualrandomaccessentry>
</samplegroupdescriptionbox>
<samplegroupdescriptionbox size="0" type="sgpd" version="0" flags="0" specification="p15" container="stbl traf">
<cencsampleencryptiongroupentry isencrypted="" kid=""></cencsampleencryptiongroupentry>
</samplegroupdescriptionbox>
<samplegroupdescriptionbox size="0" type="sgpd" version="0" flags="0" specification="p15" container="stbl traf">
<operatingpointsinformation>
 <profiletierlevel></profiletierlevel>
<operatingpoint minpicwidth="" minpicheight="" maxpicwidth="" maxpicheight="" maxchromaformat="" maxbitdepth="" avgframerate="" constantframerate="" maxbitrate="" avgbitrate=""></operatingpoint>

</operatingpointsinformation>
</samplegroupdescriptionbox>
<samplegroupdescriptionbox size="0" type="sgpd" version="0" flags="0" specification="p15" container="stbl traf">
<layerinformation>
<layerinfoitem></layerinfoitem>
</layerinformation>
</samplegroupdescriptionbox>
<samplegroupdescriptionbox size="0" type="sgpd" version="0" flags="0" specification="p15" container="stbl traf">
<tileregiongroupentry tilegroup="" independent="" x="" y="" w="" h="">
<tileregiondependency tileid=""></tileregiondependency>
</tileregiongroupentry>
</samplegroupdescriptionbox>
<samplegroupdescriptionbox size="0" type="sgpd" version="0" flags="0" specification="p15" container="stbl traf">
<nalumap rle="">
<nalumapentry groupid=""></nalumapentry>
</nalumap>
</samplegroupdescriptionbox>
<samplegroupdescriptionbox size="0" type="sgpd" version="0" flags="0" specification="p12" container="stbl traf">
<temporallevelentry></temporallevelentry>
</samplegroupdescriptionbox>
<samplegroupdescriptionbox size="0" type="sgpd" version="0" flags="0" specification="p12" container="stbl traf">
<defaultsamplegroupdescriptionentry size=""></defaultsamplegroupdescriptionentry>
</samplegroupdescriptionbox>
<samplegroupdescriptionbox size="0" type="sgpd" version="0" flags="0" specification="p12" container="stbl traf">
<defaultsamplegroupdescriptionentry size=""></defaultsamplegroupdescriptionentry>
</samplegroupdescriptionbox>
<samplegroupdescriptionbox size="0" type="sgpd" version="0" flags="0" specification="p12" container="stbl traf">
<sapentry></sapentry>
</samplegroupdescriptionbox>
<samplegroupdescriptionbox size="0" type="sgpd" version="0" flags="0" specification="p15" container="stbl traf">
<defaultsamplegroupdescriptionentry size=""></defaultsamplegroupdescriptionentry>
</samplegroupdescriptionbox>
<samplegroupdescriptionbox size="0" type="sgpd" version="0" flags="0" specification="p15" container="stbl traf">
<defaultsamplegroupdescriptionentry size=""></defaultsamplegroupdescriptionentry>
</samplegroupdescriptionbox>
<samplegroupdescriptionbox size="0" type="sgpd" version="0" flags="0" specification="p15" container="stbl traf">
<defaultsamplegroupdescriptionentry size=""></defaultsamplegroupdescriptionentry>
</samplegroupdescriptionbox>
<samplegroupdescriptionbox size="0" type="sgpd" version="0" flags="0" specification="p15" container="stbl traf">
<defaultsamplegroupdescriptionentry size=""></defaultsamplegroupdescriptionentry>
</samplegroupdescriptionbox>
<samplegroupdescriptionbox size="0" type="sgpd" version="0" flags="0" specification="p15" container="stbl traf">
<defaultsamplegroupdescriptionentry size=""></defaultsamplegroupdescriptionentry>
</samplegroupdescriptionbox>
<samplegroupdescriptionbox size="0" type="sgpd" version="0" flags="0" specification="p15" container="stbl traf">
<defaultsamplegroupdescriptionentry size=""></defaultsamplegroupdescriptionentry>
</samplegroupdescriptionbox>
<samplegroupdescriptionbox size="0" type="sgpd" version="0" flags="0" specification="p15" container="stbl traf">
<defaultsamplegroupdescriptionentry size=""></defaultsamplegroupdescriptionentry>
</samplegroupdescriptionbox>
<samplegroupdescriptionbox size="0" type="sgpd" version="0" flags="0" specification="p15" container="stbl traf">
<defaultsamplegroupdescriptionentry size=""></defaultsamplegroupdescriptionentry>
</samplegroupdescriptionbox>
<samplegroupdescriptionbox size="0" type="sgpd" version="0" flags="0" specification="p15" container="stbl traf">
<syncsamplegroupentry></syncsamplegroupentry>
</samplegroupdescriptionbox>
<samplegroupdescriptionbox size="0" type="sgpd" version="0" flags="0" specification="p15" container="stbl traf">
<defaultsamplegroupdescriptionentry size=""></defaultsamplegroupdescriptionentry>
</samplegroupdescriptionbox>
<samplegroupdescriptionbox size="0" type="sgpd" version="0" flags="0" specification="p15" container="stbl traf">
<defaultsamplegroupdescriptionentry size=""></defaultsamplegroupdescriptionentry>
</samplegroupdescriptionbox>
<samplegroupdescriptionbox size="0" type="sgpd" version="0" flags="0" specification="p15" container="stbl traf">
<defaultsamplegroupdescriptionentry size=""></defaultsamplegroupdescriptionentry>
</samplegroupdescriptionbox>
<samplegroupdescriptionbox size="0" type="sgpd" version="0" flags="0" specification="p15" container="stbl traf">
<subpictureorderentry refs=""></subpictureorderentry>
</samplegroupdescriptionbox>
<samplegroupdescriptionbox size="0" type="sgpd" version="0" flags="0" specification="3gpp" container="stbl traf">
<defaultsamplegroupdescriptionentry size=""></defaultsamplegroupdescriptionentry>
</samplegroupdescriptionbox>
<samplegroupdescriptionbox size="0" type="sgpd" version="0" flags="0" specification="3gpp" container="stbl traf">
<defaultsamplegroupdescriptionentry size=""></defaultsamplegroupdescriptionentry>
</samplegroupdescriptionbox>
<sampledescriptionentrybox size="0" type="GNRM" specification="unknown" container="stsd" extensiondatasize="0">
</sampledescriptionentrybox>
<visualsampledescriptionbox size="0" type="GNRV" specification="unknown" container="stsd" version="0" revision="0" vendor="0" temporalquality="0" spacialquality="0" width="0" height="0" horizontalresolution="4718592" verticalresolution="4718592" compressorname="" bitdepth="24">
</visualsampledescriptionbox>
<audiosampledescriptionbox size="0" type="GNRA" specification="unknown" container="stsd" version="0" revision="0" vendor="0" channelcount="2" bitspersample="16" samplerate="0">
</audiosampledescriptionbox>
<trackgrouptypebox size="0" type="msrc" version="0" flags="0" specification="p12" container="trgr">
</trackgrouptypebox>
<trackgrouptypebox size="0" type="ster" version="0" flags="0" specification="p12" container="trgr">
</trackgrouptypebox>
<trackgrouptypebox size="0" type="cstg" version="0" flags="0" specification="p15" container="trgr">
</trackgrouptypebox>
<freespacebox size="0" type="free" specification="p12" container="*">
</freespacebox>
<freespacebox size="0" type="free" specification="p12" container="*">
</freespacebox>
<mediadatabox size="0" type="mdat" specification="p12" container="file">
</mediadatabox>
<mediadatabox size="0" type="mdat" specification="p12" container="meta">
"
</mediadatabox></boxes>