
Recherche avancée
Autres articles (66)
-
Websites made with MediaSPIP
2 mai 2011, parThis page lists some websites based on MediaSPIP.
-
Encodage et transformation en formats lisibles sur Internet
10 avril 2011MediaSPIP transforme et ré-encode les documents mis en ligne afin de les rendre lisibles sur Internet et automatiquement utilisables sans intervention du créateur de contenu.
Les vidéos sont automatiquement encodées dans les formats supportés par HTML5 : MP4, Ogv et WebM. La version "MP4" est également utilisée pour le lecteur flash de secours nécessaire aux anciens navigateurs.
Les documents audios sont également ré-encodés dans les deux formats utilisables par HTML5 :MP3 et Ogg. La version "MP3" (...) -
Creating farms of unique websites
13 avril 2011, parMediaSPIP platforms can be installed as a farm, with a single "core" hosted on a dedicated server and used by multiple websites.
This allows (among other things) : implementation costs to be shared between several different projects / individuals rapid deployment of multiple unique sites creation of groups of like-minded sites, making it possible to browse media in a more controlled and selective environment than the major "open" (...)
Sur d’autres sites (7247)
-
Banking Data Strategies – A Primer to Zero-party, First-party, Second-party and Third-party data
25 octobre 2024, par Daniel Crough — Banking and Financial Services, Privacy -
Use ffmpeg multiple h264_nvenc instances will crash occurs during release
13 août 2024, par yang zhaoUse FFMpeg, When multiple threads use multiple h264_nvenc instances(one instance per thread), an exception crash occurs during release(avcodec_free_context), and the final exception occurs in libnvcuvid.so.
I don't know what the reason is ? Please help, thanks.
The same problem exists : ffmpeg v5.0.1 + cuda v11.6 and ffmpeg v7.0.1 + cuda v12.2
operating system:Ubuntu 22.04.4 LTS


The specific code is as follows :


class NvencEncoder {
public:
 NvencEncoder() {}
 ~NvencEncoder { Close(); }
 
 bool Open() {
 auto encoder = avcodec_find_encoder_by_name("h264_nvenc");
 pCodecCtx_ = avcodec_alloc_context3(encoder);
 if (!pCodecCtx_)
 return false;

 int width = 1920;
 int height = 1080;
 int bitrate = 1000000;
 
 pCodecCtx_->codec_type = AVMEDIA_TYPE_VIDEO;
 pCodecCtx_->pix_fmt = AV_PIX_FMT_YUV420P;
 pCodecCtx_->width = width;
 pCodecCtx_->height = height;
 pCodecCtx_->bit_rate = bitrate;
 pCodecCtx_->rc_min_rate = bitrate;
 pCodecCtx_->rc_max_rate = bitrate;
 pCodecCtx_->bit_rate_tolerance = bitrate;
 pCodecCtx_->rc_buffer_size = bitrate / 2;
 pCodecCtx_->time_base = AVRational{ 1, 90000 };
 pCodecCtx_->framerate = AVRational{ 25, 1 };
 pCodecCtx_->gop_size = 50;
 pCodecCtx_->max_b_frames = 0;
 pCodecCtx_->delay = 0;
 pCodecCtx_->refs = 2;
 pCodecCtx_->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

 av_opt_set_int(pCodecCtx_->priv_data, "gpu", 0, 0);
 av_opt_set(pCodecCtx_->priv_data, "preset", "llhp", 0);
 av_opt_set(pCodecCtx_->priv_data, "rc", "cbr", 0);
 av_opt_set(pCodecCtx_->priv_data, "profile", "main", 0);
 av_opt_set(pCodecCtx_->priv_data, "zerolatency", "1", 0);
 av_opt_set(pCodecCtx_->priv_data, "delay", "0", 0);
 av_opt_set(pCodecCtx_->priv_data, "preset", "medium", 0);

 int ret = avcodec_open2(pCodecCtx_, encoder, nullptr);
 if (ret < 0)
 return false;

 pkt_ = av_packet_alloc();
 if (!pkt_)
 return false;

 char output_mp4[] = "output.mp4";
 ret = avformat_alloc_output_context2(&avMp4Context_, NULL, "mp4", output_mp4);
 if (ret < 0)
 return false;
 
 mp4_stream_ = avformat_new_stream(avMp4Context_, nullptr);
 if (!mp4_stream_)
 return false;

 ret = avcodec_parameters_copy(mp4_stream_->codecpar, out_stream_->codecpar);
 if (ret < 0)
 return false;
 
 mp4_stream_->codecpar->codec_tag = 0;

 if (!(avMp4Context_->oformat->flags & AVFMT_NOFILE)) {
 ret = avio_open(&avMp4Context_->pb, output_mp4_.c_str(), AVIO_FLAG_WRITE);
 if (ret < 0) {
 return false;
 }
 return true;
 }

 void Close() {
 if (pCodecCtx_)
 avcodec_free_context(&pCodecCtx_); // Crash will occur in libnvcuvid.so

 if (avMp4Context_) {
 if (avMp4Context_->oformat && !(avMp4Context_->oformat->flags & AVFMT_NOFILE)) {
 avio_closep(&avMp4Context_->pb);
 }
 avformat_free_context(avMp4Context_);
 avMp4Context_ = nullptr;
 }
 
 if (pkt_)
 av_packet_free(&pkt_);
 }

 bool InputFrame(AVFrame* frame) {
 int ret = avcodec_send_frame(pEncoderVideoCodecCtx_, frame);
 if (ret < 0)
 return false;
 
 while (ret >= 0) {
 ret = avcodec_receive_packet(pEncoderVideoCodecCtx_, pkt_);
 if (ret < 0)
 break;

 if (avNotHeadWrited_) {
 ret = avformat_write_header(avMp4Context_, &opts);
 if (ret < 0) {
 av_packet_unref(pkt_);
 break;
 }
 avNotHeadWrited_ = false;
 }

 av_packet_rescale_ts(pkt_, pCodecCtx_->time_base, mp4_stream_->time_base);
 ret = av_write_frame(avMp4Context_, pkt_);
 if (ret < 0) {
 av_packet_unref(pkt_);
 break;
 }

 av_packet_unref(pkt_);
 }
 
 return (ret >= 0);
 }
private:
 AVPacket* pkt_ = nullptr;
 AVCodecContext* pCodecCtx_ = nullptr;
 AVFormatContext* avMp4Context_ = nullptr;
 AVStream* mp4_stream_ = nullptr;
 avNotHeadWrited_ = true;
}

uint8_t* data = nullptr; //a frame of yuv420 data
void Run(int idx);

int main() {
 //Fill a frame of yuv420 data here
 ...

 std::thread th[3];
 for (int i = 0; i < 3; i++) {
 th[i] = std::thread(Run, i);
 sleep(3);
 }

 sleep(35);

 for (int i = 0; i < 3; i++) {
 if (th[i].joinable()) {
 printf("thread %d join()\n", i);
 th[i].join();
 }
 }

 free(data);
 printf("Exit\n");
}

void Run(int idx) {
 printf("Run() thread(%d)\n", idx);
 //cudaSetDevice(0);

 auto nvenc = new NvencEncoder(ffpar, FFOutputCB);
 if (!nvenc->Open()) {
 delete nvenc;
 return;
 }

 auto avframe_ = av_frame_alloc();
 avframe_->width = 1920;
 avframe_->height = 1080;
 avframe_->format = AV_PIX_FMT_YUV420P;

 int ret = av_frame_get_buffer(avframe_, 0);
 if (ret < 0) {
 printf("av_frame_get_buffer() is error %d\n", ret);
 delete nvenc;
 av_frame_free(&avframe_);
 return;
 }

 int frame_size = 1920 * 1080;
 double one_frame_us = 1000000.0 / 25.0;
 unsigned long frame_count = 0;
 struct timeval t1, t2;
 double timeuse;

 AVRational timebase = { ffpar.timebase_num, ffpar.timebase_den };
 std::int64_t llCalcDuration = (double)AV_TIME_BASE / 25.0;
 double in_stream_timebase = av_q2d(timebase);
 std::int64_t duration = (double)llCalcDuration / (double)(in_stream_timebase * AV_TIME_BASE);
 avframe_->time_base = timebase;
 gettimeofday(&t1, NULL);

 while (frame_count < 25*30) { //30 seconds

 avframe_->pts = (double)(frame_count * llCalcDuration) / (double(in_stream_timebase * AV_TIME_BASE));
 //avframe_->duration = duration;
 frame_count++;

 ret = av_frame_make_writable(avframe_);
 if (ret < 0) {
 printf("av_frame_make_writable() is error %d\n", ret);
 break;
 }

 // copy YUV420
 memcpy(avframe_->data[0], data, frame_size);
 memcpy(avframe_->data[1], data + frame_size, frame_size / 4);
 memcpy(avframe_->data[2], data + frame_size * 5 / 4, frame_size / 4);

 ret = nvenc->InputFrame(avframe_);
 if (ret < 0) {
 printf("InputFrame() is error: %d\n", ret);
 break;
 }

 // frame rate
 gettimeofday(&t2, NULL);
 timeuse = (t2.tv_sec - t1.tv_sec) * 1000000 + (t2.tv_usec - t1.tv_usec); //us
 if (timeuse < one_frame_us) {
 usleep(one_frame_us - timeuse);
 }
 gettimeofday(&t1, NULL);
 }

 if (frame_count > 0) {
 nvenc->WriteTrailer();
 }

 printf("do Close() thread(%d)\n", idx);
 nvenc->Close(); // Crash will occur
 printf("Closed thread(%d)\n", idx);
 delete nvenc;
 av_frame_free(&avframe_);
}



-
Need help using libavfilter for adding overlay to frames [closed]
30 juillet 2024, par Michael WernerHello gentlemen and ladies,


I am working with libavfilter and I am getting crazy.


On Windows 11 OS with latest libav (full build) a C/C++ app reads YUV420P frames from a frame grabber card.


I want to draw a bitmap (BGR24) overlay image from file on every frame via libavfilter. First I convert the BGR24 overlay image via format filter to YUV420P. Then I feed the YUV420P frame from frame grabber and the YUV420P overlay into the overlay filter.


Everything seems to be fine but when I try to get the frame out of the filter graph I always get an "Resource is temporary not available" (EAGAIN) return code, independent on how many frames I put into the graph.


The frames from the frame grabber card are fine, I could encode them or write them to a .yuv file. The overlay frame looks fine too.


My current initialization code looks like below. It does not report any errors or warnings but when I try to get the filtered frame out of the graph via
av_buffersink_get_frame
I always get anEAGAIN
return code.

Here is my current initialization code :


int init_overlay_filter(AVFilterGraph** graph, AVFilterContext** src_ctx, AVFilterContext** overlay_src_ctx,
 AVFilterContext** sink_ctx)
{
 AVFilterGraph* filter_graph;
 AVFilterContext* buffersrc_ctx;
 AVFilterContext* overlay_buffersrc_ctx;
 AVFilterContext* buffersink_ctx;
 AVFilterContext* overlay_ctx;
 AVFilterContext* format_ctx;
 const AVFilter *buffersrc, *buffersink, *overlay_buffersrc, *overlay_filter, *format_filter;
 int ret;

 // Create the filter graph
 filter_graph = avfilter_graph_alloc();
 if (!filter_graph)
 {
 fprintf(stderr, "Unable to create filter graph.\n");
 return AVERROR(ENOMEM);
 }

 // Create buffer source filter for main video
 buffersrc = avfilter_get_by_name("buffer");
 if (!buffersrc)
 {
 fprintf(stderr, "Unable to find buffer filter.\n");
 return AVERROR_FILTER_NOT_FOUND;
 }

 // Create buffer source filter for overlay image
 overlay_buffersrc = avfilter_get_by_name("buffer");
 if (!overlay_buffersrc)
 {
 fprintf(stderr, "Unable to find buffer filter.\n");
 return AVERROR_FILTER_NOT_FOUND;
 }

 // Create buffer sink filter
 buffersink = avfilter_get_by_name("buffersink");
 if (!buffersink)
 {
 fprintf(stderr, "Unable to find buffersink filter.\n");
 return AVERROR_FILTER_NOT_FOUND;
 }

 // Create overlay filter
 overlay_filter = avfilter_get_by_name("overlay");
 if (!overlay_filter)
 {
 fprintf(stderr, "Unable to find overlay filter.\n");
 return AVERROR_FILTER_NOT_FOUND;
 }

 // Create format filter
 format_filter = avfilter_get_by_name("format");
 if (!format_filter) 
 {
 fprintf(stderr, "Unable to find format filter.\n");
 return AVERROR_FILTER_NOT_FOUND;
 }

 // Initialize the main video buffer source
 char args[512];
 snprintf(args, sizeof(args),
 "video_size=1920x1080:pix_fmt=yuv420p:time_base=1/25:pixel_aspect=1/1");
 ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", args, NULL, filter_graph);
 if (ret < 0)
 {
 fprintf(stderr, "Unable to create buffer source filter for main video.\n");
 return ret;
 }

 // Initialize the overlay buffer source
 snprintf(args, sizeof(args),
 "video_size=165x165:pix_fmt=bgr24:time_base=1/25:pixel_aspect=1/1");
 ret = avfilter_graph_create_filter(&overlay_buffersrc_ctx, overlay_buffersrc, "overlay_in", args, NULL,
 filter_graph);
 if (ret < 0)
 {
 fprintf(stderr, "Unable to create buffer source filter for overlay.\n");
 return ret;
 }

 // Initialize the format filter to convert overlay image to yuv420p
 snprintf(args, sizeof(args), "pix_fmts=yuv420p");
 ret = avfilter_graph_create_filter(&format_ctx, format_filter, "format", args, NULL, filter_graph);

 if (ret < 0) 
 {
 fprintf(stderr, "Unable to create format filter.\n");
 return ret;
 }

 // Initialize the buffer sink
 ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", NULL, NULL, filter_graph);
 if (ret < 0)
 {
 fprintf(stderr, "Unable to create buffer sink filter.\n");
 return ret;
 }

 // Initialize the overlay filter
 ret = avfilter_graph_create_filter(&overlay_ctx, overlay_filter, "overlay", "W-w:H-h:enable='between(t,0,20)':format=yuv420", NULL, filter_graph);
 if (ret < 0)
 {
 fprintf(stderr, "Unable to create overlay filter.\n");
 return ret;
 }

 // Connect the filters
 ret = avfilter_link(overlay_buffersrc_ctx, 0, format_ctx, 0);

 if (ret >= 0)
 {
 ret = avfilter_link(buffersrc_ctx, 0, overlay_ctx, 0);
 }
 else
 {
 fprintf(stderr, "Unable to configure filter graph.\n");
 return ret;
 }


 if (ret >= 0) 
 {
 ret = avfilter_link(format_ctx, 0, overlay_ctx, 1);
 }
 else
 {
 fprintf(stderr, "Unable to configure filter graph.\n");
 return ret;
 }

 if (ret >= 0) 
 {
 if ((ret = avfilter_link(overlay_ctx, 0, buffersink_ctx, 0)) < 0)
 {
 fprintf(stderr, "Unable to link filter graph.\n");
 return ret;
 }
 }
 else
 {
 fprintf(stderr, "Unable to configure filter graph.\n");
 return ret;
 }

 // Configure the filter graph
 if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
 {
 fprintf(stderr, "Unable to configure filter graph.\n");
 return ret;
 }

 *graph = filter_graph;
 *src_ctx = buffersrc_ctx;
 *overlay_src_ctx = overlay_buffersrc_ctx;
 *sink_ctx = buffersink_ctx;

 return 0;
}



Feeding the filter graph is done this way :


av_buffersrc_add_frame_flags(buffersrc_ctx, pFrameGrabberFrame, AV_BUFFERSRC_FLAG_KEEP_REF)
av_buffersink_get_frame(buffersink_ctx, filtered_frame)



av_buffersink_get_frame
returns alwaysEAGAIN
, no matter how many frames I feed into the graph. The frames (from framegrabber and the overlay frame) itself are looking fine.

I did set libav logging level to maximum but I do not see any warnings or errors or helpful, related information in the log.


Here the log output related to the filter configuration :


[in @ 00000288ee494f40] Setting 'video_size' to value '1920x1080'
[in @ 00000288ee494f40] Setting 'pix_fmt' to value 'yuv420p'
[in @ 00000288ee494f40] Setting 'time_base' to value '1/25'
[in @ 00000288ee494f40] Setting 'pixel_aspect' to value '1/1'
[in @ 00000288ee494f40] w:1920 h:1080 pixfmt:yuv420p tb:1/25 fr:0/1 sar:1/1 csp:unknown range:unknown
[overlay_in @ 00000288ff1013c0] Setting 'video_size' to value '165x165'
[overlay_in @ 00000288ff1013c0] Setting 'pix_fmt' to value 'bgr24'
[overlay_in @ 00000288ff1013c0] Setting 'time_base' to value '1/25'
[overlay_in @ 00000288ff1013c0] Setting 'pixel_aspect' to value '1/1'
[overlay_in @ 00000288ff1013c0] w:165 h:165 pixfmt:bgr24 tb:1/25 fr:0/1 sar:1/1 csp:unknown range:unknown
[format @ 00000288ff1015c0] Setting 'pix_fmts' to value 'yuv420p'
[overlay @ 00000288ff101880] Setting 'x' to value 'W-w'
[overlay @ 00000288ff101880] Setting 'y' to value 'H-h'
[overlay @ 00000288ff101880] Setting 'enable' to value 'between(t,0,20)'
[overlay @ 00000288ff101880] Setting 'format' to value 'yuv420'
[auto_scale_0 @ 00000288ff101ec0] w:iw h:ih flags:'' interl:0
[format @ 00000288ff1015c0] auto-inserting filter 'auto_scale_0' between the filter 'overlay_in' and the filter 'format'
[auto_scale_1 @ 00000288ee4a4cc0] w:iw h:ih flags:'' interl:0
[overlay @ 00000288ff101880] auto-inserting filter 'auto_scale_1' between the filter 'format' and the filter 'overlay'
[AVFilterGraph @ 00000288ee495c80] query_formats: 5 queried, 6 merged, 6 already done, 0 delayed
[auto_scale_0 @ 00000288ff101ec0] w:165 h:165 fmt:bgr24 csp:gbr range:pc sar:1/1 -> w:165 h:165 fmt:yuv420p csp:unknown range:unknown sar:1/1 flags:0x00000004
[auto_scale_1 @ 00000288ee4a4cc0] w:165 h:165 fmt:yuv420p csp:unknown range:unknown sar:1/1 -> w:165 h:165 fmt:yuva420p csp:unknown range:unknown sar:1/1 flags:0x00000004
[overlay @ 00000288ff101880] main w:1920 h:1080 fmt:yuv420p overlay w:165 h:165 fmt:yuva420p
[overlay @ 00000288ff101880] [framesync @ 00000288ff1019a8] Selected 1/25 time base
[overlay @ 00000288ff101880] [framesync @ 00000288ff1019a8] Sync level 2