
Recherche avancée
Médias (1)
-
Video d’abeille en portrait
14 mai 2011, par
Mis à jour : Février 2012
Langue : français
Type : Video
Autres articles (107)
-
Les autorisations surchargées par les plugins
27 avril 2010, parMediaspip core
autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs -
Ajouter notes et légendes aux images
7 février 2011, parPour pouvoir ajouter notes et légendes aux images, la première étape est d’installer le plugin "Légendes".
Une fois le plugin activé, vous pouvez le configurer dans l’espace de configuration afin de modifier les droits de création / modification et de suppression des notes. Par défaut seuls les administrateurs du site peuvent ajouter des notes aux images.
Modification lors de l’ajout d’un média
Lors de l’ajout d’un média de type "image" un nouveau bouton apparait au dessus de la prévisualisation (...) -
Taille des images et des logos définissables
9 février 2011, parDans beaucoup d’endroits du site, logos et images sont redimensionnées pour correspondre aux emplacements définis par les thèmes. L’ensemble des ces tailles pouvant changer d’un thème à un autre peuvent être définies directement dans le thème et éviter ainsi à l’utilisateur de devoir les configurer manuellement après avoir changé l’apparence de son site.
Ces tailles d’images sont également disponibles dans la configuration spécifique de MediaSPIP Core. La taille maximale du logo du site en pixels, on permet (...)
Sur d’autres sites (11007)
-
Ffmpeg error "avcodec_send_frame" return "invalid argument"
17 octobre 2023, par Paulo CoutinhoI have a problem in function
avcodec_send_frame
throwing errorError sending frame for encoding: Invalid argument
(-22). I already search, check, recheck and nothing. It is near the ffmpeg examples. Can anyone help me ? Thanks.

This is my code :


static void callbackAddSubtitle(const Message &m, const Response r)
{
 try
 {
 av_log_set_level(AV_LOG_DEBUG);

 spdlog::debug("[Mapping :: callbackAddSubtitle] Adding subtitle...");

 auto inputOpt = m.get("input");
 auto outputOpt = m.get("output");

 if (!inputOpt.has_value() || !outputOpt.has_value())
 {
 r(std::string{"INVALID-PARAMS"});
 return;
 }

 const std::string &input = inputOpt.value();
 const std::string &output = outputOpt.value();

 // initialize input
 spdlog::debug("[Mapping :: callbackAddSubtitle] Initializing input video...");

 AVFormatContext *inputFormatCtx = avformat_alloc_context();
 if (avformat_open_input(&inputFormatCtx, input.c_str(), nullptr, nullptr) != 0)
 {
 spdlog::error("Failed to open input");
 r(std::string{"ERROR-OPEN-INPUT"});
 return;
 }

 if (avformat_find_stream_info(inputFormatCtx, nullptr) < 0)
 {
 spdlog::error("Failed to find stream information");
 avformat_close_input(&inputFormatCtx);
 r(std::string{"ERROR-FIND-STREAM"});
 return;
 }

 int videoStreamIndex = av_find_best_stream(inputFormatCtx, AVMEDIA_TYPE_VIDEO, -1, -1, nullptr, 0);
 if (videoStreamIndex < 0)
 {
 spdlog::error("Could not find a video stream");
 r(std::string{"ERROR-FIND-VIDEO-STREAM"});
 return;
 }

 AVRational timeBase = inputFormatCtx->streams[videoStreamIndex]->time_base;

 AVCodecParameters *inputCodecPar = inputFormatCtx->streams[videoStreamIndex]->codecpar;
 const AVCodec *inputCodec = avcodec_find_decoder(inputCodecPar->codec_id);
 AVCodecContext *inputCodecCtx = avcodec_alloc_context3(inputCodec);

 avcodec_parameters_to_context(inputCodecCtx, inputCodecPar);
 avcodec_open2(inputCodecCtx, inputCodec, nullptr);

 // initialize input audio
 spdlog::debug("[Mapping :: callbackAddSubtitle] Initializing input audio...");

 int audioStreamIndex = av_find_best_stream(inputFormatCtx, AVMEDIA_TYPE_AUDIO, -1, -1, nullptr, 0);
 if (audioStreamIndex < 0)
 {
 spdlog::error("Could not find an audio stream");
 r(std::string{"ERROR-FIND-AUDIO-STREAM"});
 return;
 }

 AVCodecParameters *inputAudioCodecPar = inputFormatCtx->streams[audioStreamIndex]->codecpar;
 const AVCodec *inputAudioCodec = avcodec_find_decoder(inputAudioCodecPar->codec_id);
 AVCodecContext *inputAudioCodecCtx = avcodec_alloc_context3(inputAudioCodec);

 avcodec_parameters_to_context(inputAudioCodecCtx, inputAudioCodecPar);
 avcodec_open2(inputAudioCodecCtx, inputAudioCodec, nullptr);

 // initialize output video
 spdlog::debug("[Mapping :: callbackAddSubtitle] Initializing output video...");

 AVFormatContext *outputFormatCtx = nullptr;
 avformat_alloc_output_context2(&outputFormatCtx, nullptr, nullptr, output.c_str());
 AVStream *outputStream = avformat_new_stream(outputFormatCtx, nullptr);

 AVCodecContext *outputCodecCtx = avcodec_alloc_context3(inputCodec);
 avcodec_parameters_to_context(outputCodecCtx, inputCodecPar);
 int retOutVideo = avcodec_open2(outputCodecCtx, inputCodec, nullptr);

 if (retOutVideo < 0)
 {
 char err[AV_ERROR_MAX_STRING_SIZE];
 av_make_error_string(err, AV_ERROR_MAX_STRING_SIZE, retOutVideo);
 spdlog::error("Failed to initialize output video: {}", err);
 r(std::string{"ERROR-INIT-OUTPUT-VIDEO"});
 return;
 }

 outputStream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
 outputStream->codecpar->codec_id = inputCodec->id;
 avcodec_parameters_from_context(outputStream->codecpar, outputCodecCtx);

 if (!(outputFormatCtx->oformat->flags & AVFMT_NOFILE))
 {
 avio_open(&outputFormatCtx->pb, output.c_str(), AVIO_FLAG_WRITE);
 }

 const char *pixelFormatName = getPixelFormatName(outputCodecCtx->pix_fmt);
 spdlog::debug("Pixel Format: {}", pixelFormatName);

 // initialize output audio
 spdlog::debug("[Mapping :: callbackAddSubtitle] Initializing output audio...");

 AVStream *outputAudioStream = avformat_new_stream(outputFormatCtx, nullptr);
 AVCodecContext *outputAudioCodecCtx = avcodec_alloc_context3(inputAudioCodec);
 avcodec_parameters_to_context(outputAudioCodecCtx, inputAudioCodecPar);
 int retOutAudio = avcodec_open2(outputAudioCodecCtx, inputAudioCodec, nullptr);

 if (retOutAudio < 0)
 {
 char err[AV_ERROR_MAX_STRING_SIZE];
 av_make_error_string(err, AV_ERROR_MAX_STRING_SIZE, retOutAudio);
 spdlog::error("Failed to initialize output audio: {}", err);
 r(std::string{"ERROR-INIT-OUTPUT-AUDIO"});
 return;
 }

 outputAudioStream->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
 outputAudioStream->codecpar->codec_id = inputAudioCodec->id;
 avcodec_parameters_from_context(outputAudioStream->codecpar, outputAudioCodecCtx);

 // initialize filters
 spdlog::debug("[Mapping :: callbackAddSubtitle] Initializing filters...");

 AVFilterGraph *filterGraph = avfilter_graph_alloc();
 if (!filterGraph)
 {
 spdlog::error("Failed to allocate filter graph");
 r(std::string{"ERROR-FILTER-GRAPH"});
 return;
 }

 AVFilterContext *bufferSinkCtx;
 AVFilterContext *bufferSrcCtx;

 const AVFilter *bufferSink = avfilter_get_by_name("buffersink");
 const AVFilter *bufferSrc = avfilter_get_by_name("buffer");

 // input filter
 char filterInArgs[512];
 snprintf(filterInArgs, sizeof(filterInArgs), "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", inputCodecPar->width, inputCodecPar->height, inputCodecCtx->pix_fmt, timeBase.num, timeBase.den, inputCodecCtx->sample_aspect_ratio.num, inputCodecCtx->sample_aspect_ratio.den);

 spdlog::debug("[Mapping :: callbackAddSubtitle] Buffer src args: {}", filterInArgs);

 int retFilterIn = avfilter_graph_create_filter(&bufferSrcCtx, bufferSrc, "in", filterInArgs, nullptr, filterGraph);
 if (retFilterIn < 0)
 {
 char err[AV_ERROR_MAX_STRING_SIZE];
 av_make_error_string(err, AV_ERROR_MAX_STRING_SIZE, retFilterIn);
 spdlog::error("Failed to create bufferSrcCtx: {}", err);
 r(std::string{"ERROR-CREATE-FILTER-SRC"});
 return;
 }

 // output filter
 int retFilterOut = avfilter_graph_create_filter(&bufferSinkCtx, bufferSink, "out", nullptr, nullptr, filterGraph);

 if (retFilterOut < 0)
 {
 char err[AV_ERROR_MAX_STRING_SIZE];
 av_make_error_string(err, AV_ERROR_MAX_STRING_SIZE, retFilterOut);
 spdlog::error("Failed to create bufferSinkCtx: {}", err);
 r(std::string{"ERROR-CREATE-FILTER-SINK"});
 return;
 }

 enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE};
 av_opt_set_int_list(bufferSinkCtx, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);

 // add filters to graph and link them
 const char *filterSpec = "drawtext=text='Legenda Adicionada Automaticamente Via FFMPEG e C++': fontcolor=yellow: bordercolor=black: fontfile='/Users/paulo/Downloads/roboto/Roboto-Black.ttf'";
 const AVFilter *filter = avfilter_get_by_name("drawtext");

 AVFilterInOut *outputs = avfilter_inout_alloc();
 AVFilterInOut *inputs = avfilter_inout_alloc();

 outputs->name = av_strdup("in");
 outputs->filter_ctx = bufferSrcCtx;
 outputs->pad_idx = 0;
 outputs->next = nullptr;
 inputs->name = av_strdup("out");
 inputs->filter_ctx = bufferSinkCtx;
 inputs->pad_idx = 0;
 inputs->next = nullptr;

 if (avfilter_graph_parse_ptr(filterGraph, filterSpec, &inputs, &outputs, nullptr) < 0)
 {
 spdlog::error("Failed to parse filter graph");
 r(std::string{"ERROR-PARSE-FILTER"});
 return;
 }

 if (avfilter_graph_config(filterGraph, nullptr) < 0)
 {
 spdlog::error("Failed to configure filter graph");
 r(std::string{"ERROR-CONFIG-FILTER"});
 return;
 }

 // header
 spdlog::debug("[Mapping :: callbackAddSubtitle] Writing header...");

 if (avformat_write_header(outputFormatCtx, nullptr) < 0)
 {
 spdlog::error("Error writing header");
 r(std::string{"ERROR-WRITE-HEADER"});
 return;
 }

 // read frames and write to output
 AVPacket *packet = av_packet_alloc();
 AVFrame *frame = av_frame_alloc();

 frame->format = inputCodecCtx->pix_fmt;
 frame->width = inputCodecCtx->width;
 frame->height = inputCodecCtx->height;

 AVFrame *filt_frame = av_frame_alloc();

 filt_frame->format = inputCodecCtx->pix_fmt;
 filt_frame->width = inputCodecCtx->width;
 filt_frame->height = inputCodecCtx->height;

 while (av_read_frame(inputFormatCtx, packet) >= 0)
 {
 if (packet->stream_index == videoStreamIndex)
 {
 if (avcodec_send_packet(inputCodecCtx, packet) < 0)
 {
 spdlog::error("Error sending packet for decoding");
 r(std::string{"ERROR-SEND-PACKET-DECODE"});
 return;
 }

 while (avcodec_receive_frame(inputCodecCtx, frame) == 0)
 {
 // Envia o quadro decodificado para o gráfico de filtro
 if (av_buffersrc_add_frame_flags(bufferSrcCtx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0)
 {
 spdlog::error("Error while feeding the filtergraph");
 r(std::string{"ERROR-FEED-FILTERGRAPH"});
 return;
 }

 // Recebe um quadro do gráfico de filtro
 if (av_buffersink_get_frame(bufferSinkCtx, filt_frame) < 0)
 {
 spdlog::error("Error while receiving the filtered frame");
 r(std::string{"ERROR-RECEIVE-FILTERED-FRAME"});
 return;
 }

 // Envia o quadro decodificado para re-codificação
 int retSendFrame = avcodec_send_frame(outputCodecCtx, filt_frame);
 if (retSendFrame < 0)
 {
 char err[AV_ERROR_MAX_STRING_SIZE];
 av_make_error_string(err, AV_ERROR_MAX_STRING_SIZE, retSendFrame);
 spdlog::error("Error sending frame for encoding: {}", err);
 r(std::string{"ERROR-SEND-FRAME-ENCODE"});
 return;
 }

 AVPacket *output_packet = av_packet_alloc();
 output_packet->data = nullptr;
 output_packet->size = 0;

 // Re-codifica filt_frame para um pacote
 if (avcodec_receive_packet(outputCodecCtx, output_packet) == 0)
 {
 // Escreve o pacote no fluxo de saída
 av_write_frame(outputFormatCtx, output_packet);
 av_packet_unref(output_packet);
 }

 av_frame_unref(filt_frame);
 }

 // time
 packet->pts = av_rescale_q_rnd(packet->pts, inputFormatCtx->streams[videoStreamIndex]->time_base, outputFormatCtx->streams[videoStreamIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
 packet->dts = av_rescale_q_rnd(packet->dts, inputFormatCtx->streams[videoStreamIndex]->time_base, outputFormatCtx->streams[videoStreamIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
 packet->duration = av_rescale_q(packet->duration, inputFormatCtx->streams[videoStreamIndex]->time_base, outputFormatCtx->streams[videoStreamIndex]->time_base);
 packet->stream_index = videoStreamIndex;

 // write packet to output video stream
 av_interleaved_write_frame(outputFormatCtx, packet);
 }
 else if (packet->stream_index == audioStreamIndex)
 {
 // rescale timestamps
 packet->pts = av_rescale_q_rnd(packet->pts, inputFormatCtx->streams[audioStreamIndex]->time_base, outputFormatCtx->streams[audioStreamIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
 packet->dts = av_rescale_q_rnd(packet->dts, inputFormatCtx->streams[audioStreamIndex]->time_base, outputFormatCtx->streams[audioStreamIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
 packet->duration = av_rescale_q(packet->duration, inputFormatCtx->streams[audioStreamIndex]->time_base, outputFormatCtx->streams[audioStreamIndex]->time_base);
 packet->stream_index = audioStreamIndex;

 // write packet to output audio stream
 av_interleaved_write_frame(outputFormatCtx, packet);
 }

 av_packet_unref(packet);
 }

 av_packet_free(&packet);
 av_frame_free(&frame);
 av_frame_free(&filt_frame);

 spdlog::debug("[Mapping :: callbackAddSubtitle] Writing trailer...");

 if (av_write_trailer(outputFormatCtx) < 0)
 {
 spdlog::error("Error writing trailer");
 r(std::string{"ERROR-WRITE-TRAILER"});
 return;
 }

 // cleanup
 spdlog::debug("[Mapping :: callbackAddSubtitle] Cleaning...");

 if (!(outputFormatCtx->oformat->flags & AVFMT_NOFILE))
 {
 avio_closep(&outputFormatCtx->pb);
 }

 avcodec_free_context(&inputCodecCtx);
 avcodec_free_context(&inputAudioCodecCtx);
 avcodec_free_context(&outputCodecCtx);
 avcodec_free_context(&outputAudioCodecCtx);

 avformat_free_context(inputFormatCtx);
 avformat_free_context(outputFormatCtx);

 r(std::string{"OK"});
 }
 catch (const std::exception &e)
 {
 spdlog::error("Error: {}", e.what());
 r(std::string{"ERROR"});
 }
}



The error is :


[2023-10-17 06:30:16.936] [debug] [Mapping :: callbackAddSubtitle] Adding subtitle...
[2023-10-17 06:30:16.936] [debug] [Mapping :: callbackAddSubtitle] Initializing input video...
[NULL @ 0x153604a60] Opening '/Users/paulo/Downloads/movie.mp4' for reading
[file @ 0x6000001fd170] Setting default whitelist 'file,crypto,data'
[mov,mp4,m4a,3gp,3g2,mj2 @ 0x153604a60] Format mov,mp4,m4a,3gp,3g2,mj2 probed with size=2048 and score=100
[mov,mp4,m4a,3gp,3g2,mj2 @ 0x153604a60] ISO: File Type Major Brand: isom
[mov,mp4,m4a,3gp,3g2,mj2 @ 0x153604a60] Unknown dref type 0x206c7275 size 12
[mov,mp4,m4a,3gp,3g2,mj2 @ 0x153604a60] Processing st: 0, edit list 0 - media time: 0, duration: 2669670
[mov,mp4,m4a,3gp,3g2,mj2 @ 0x153604a60] Unknown dref type 0x206c7275 size 12
[mov,mp4,m4a,3gp,3g2,mj2 @ 0x153604a60] Processing st: 1, edit list 0 - media time: 1024, duration: 4272096
[mov,mp4,m4a,3gp,3g2,mj2 @ 0x153604a60] drop a frame at curr_cts: 0 @ 0
[mov,mp4,m4a,3gp,3g2,mj2 @ 0x153604a60] Before avformat_find_stream_info() pos: 113542488 bytes read:110788 seeks:1 nb_streams:2
[h264 @ 0x153604cd0] nal_unit_type: 7(SPS), nal_ref_idc: 3
[h264 @ 0x153604cd0] Decoding VUI
[h264 @ 0x153604cd0] nal_unit_type: 8(PPS), nal_ref_idc: 3
[mov,mp4,m4a,3gp,3g2,mj2 @ 0x153604a60] demuxer injecting skip 1024 / discard 0
[aac @ 0x1536056f0] skip 1024 / discard 0 samples due to side data
[h264 @ 0x153604cd0] nal_unit_type: 7(SPS), nal_ref_idc: 3
[h264 @ 0x153604cd0] Decoding VUI
[h264 @ 0x153604cd0] nal_unit_type: 8(PPS), nal_ref_idc: 3
[h264 @ 0x153604cd0] nal_unit_type: 6(SEI), nal_ref_idc: 0
[h264 @ 0x153604cd0] nal_unit_type: 5(IDR), nal_ref_idc: 3
[h264 @ 0x153604cd0] Format yuv420p chosen by get_format().
[h264 @ 0x153604cd0] Reinit context to 1088x1920, pix_fmt: yuv420p
[mov,mp4,m4a,3gp,3g2,mj2 @ 0x153604a60] All info found
[mov,mp4,m4a,3gp,3g2,mj2 @ 0x153604a60] After avformat_find_stream_info() pos: 195211 bytes read:305951 seeks:2 frames:2
[2023-10-17 06:30:18.160] [debug] [Mapping :: callbackAddSubtitle] Initializing input audio...
[h264 @ 0x143604330] nal_unit_type: 7(SPS), nal_ref_idc: 3
[h264 @ 0x143604330] Decoding VUI
[h264 @ 0x143604330] nal_unit_type: 8(PPS), nal_ref_idc: 3
[2023-10-17 06:30:18.160] [debug] [Mapping :: callbackAddSubtitle] Initializing output video...
[h264 @ 0x143611ec0] nal_unit_type: 7(SPS), nal_ref_idc: 3
[h264 @ 0x143611ec0] Decoding VUI
[h264 @ 0x143611ec0] nal_unit_type: 8(PPS), nal_ref_idc: 3
[file @ 0x6000001f4000] Setting default whitelist 'file,crypto,data'
[2023-10-17 06:30:18.167] [debug] Pixel Format: YUV420P
[2023-10-17 06:30:18.167] [debug] [Mapping :: callbackAddSubtitle] Initializing output audio...
[2023-10-17 06:30:18.167] [debug] [Mapping :: callbackAddSubtitle] Initializing filters...
[2023-10-17 06:30:18.168] [debug] [Mapping :: callbackAddSubtitle] Buffer src args: video_size=1080x1920:pix_fmt=0:time_base=1/30000:pixel_aspect=1/1
detected 10 logical cores
[in @ 0x6000004ec0b0] Setting 'video_size' to value '1080x1920'
[in @ 0x6000004ec0b0] Setting 'pix_fmt' to value '0'
[in @ 0x6000004ec0b0] Setting 'time_base' to value '1/30000'
[in @ 0x6000004ec0b0] Setting 'pixel_aspect' to value '1/1'
[in @ 0x6000004ec0b0] w:1080 h:1920 pixfmt:yuv420p tb:1/30000 fr:0/1 sar:1/1
[AVFilterGraph @ 0x6000017e8000] Setting 'text' to value 'Legenda Adicionada Automaticamente Via FFMPEG e C++'
[AVFilterGraph @ 0x6000017e8000] Setting 'fontcolor' to value 'yellow'
[AVFilterGraph @ 0x6000017e8000] Setting 'bordercolor' to value 'black'
[AVFilterGraph @ 0x6000017e8000] Setting 'fontfile' to value '/Users/paulo/Downloads/roboto/Roboto-Black.ttf'
[AVFilterGraph @ 0x6000017e8000] query_formats: 3 queried, 2 merged, 0 already done, 0 delayed
[2023-10-17 06:30:18.172] [debug] [Mapping :: callbackAddSubtitle] Writing header...
[h264 @ 0x143604330] nal_unit_type: 6(SEI), nal_ref_idc: 0
[h264 @ 0x143604330] nal_unit_type: 5(IDR), nal_ref_idc: 3
[h264 @ 0x143604330] Format yuv420p chosen by get_format().
[h264 @ 0x143604330] Reinit context to 1088x1920, pix_fmt: yuv420p
[Parsed_drawtext_0 @ 0x6000004f4160] Copying data in avfilter.
[Parsed_drawtext_0 @ 0x6000004f4160] n:0 t:0.000000 text_w:424 text_h:16 x:0 y:0
[2023-10-17 06:30:18.182] [error] Error sending frame for encoding: Invalid argument
Returned Value: ERROR-SEND-FRAME-ENCODE



-
Slideshow from .jpg
13 octobre 2023, par DJArtyHave an command to make slideshow from *.jpg files :


ffmpeg -r 1/3 -f concat -safe 0 -i <(ls -v *.jpg | sed "s|^|file '$PWD/|") -vf "scale='min(1920,iw)':min'(1080,ih)':force_original_aspect_ratio=decrease,pad=1920:1080:(ow-iw)/2:(oh-ih)/2" -c:v libx264 -pix_fmt yuv420p -r 60 -preset slow -crf 14 slideshow.mp4


Need good effects from one jpg to another like morphing or just fade in fade out, not just "one picture 3 sec, next picture 3 sec, etc."


need nicey slideshow


-
How to overlay multiple landscape regions from a single input to a new portrait video ? FFmpeg
27 août 2023, par 3V1LXDI have an electron program that selects multiple regions of a landscape video and lets you rearrange them in a portrait canvas. I'm having trouble building the proper ffmpeg command to create the video. I have this somewhat working. I can export 2 layers, but i can't export if i only have 1 layer or if i have 3 or more layers selected.


2 regions of video selected


layers [
 { top: 0, left: 658, width: 576, height: 1080 },
 { top: 262, left: 0, width: 576, height: 324 }
]
newPositions [
 { top: 0, left: 0, width: 576, height: 1080 },
 { top: 0, left: 0, width: 576, height: 324 }
]
filtergraph [0]crop=576:1080:658:0,scale=576:1080[v0];[0]crop=576:324:0:262,scale=576:324[v1];[v0][v1]overlay=0:0:0:0[out]

No Error export successful



1 region selected


layers [ { top: 0, left: 650, width: 576, height: 1080 } ]
newPositions [ { top: 0, left: 0, width: 576, height: 1080 } ]
filtergraph [0]crop=576:1080:650:0,scale=576:1080[v0];[v0]overlay=0:0[out]

FFmpeg error: [fc#0 @ 000001dd3b6db0c0] Cannot find a matching stream for unlabeled input pad overlay
Error initializing complex filters: Invalid argument



3 regions of video selected


layers [
 { top: 0, left: 641, width: 576, height: 1080 },
 { top: 250, left: 0, width: 576, height: 324 },
 { top: 756, left: 0, width: 576, height: 324 }
]
newPositions [
 { top: 0, left: 0, width: 576, height: 1080 },
 { top: 0, left: 0, width: 576, height: 324 },
 { top: 756, left: 0, width: 576, height: 324 }
]
filtergraph [0]crop=576:1080:641:0,scale=576:1080[v0];[0]crop=576:324:0:250,scale=576:324[v1];[0]crop=576:324:0:756,scale=576:324[v2];[v0][v1][v2]overlay=0:0:0:0:0:756[out]

FFmpeg error: [AVFilterGraph @ 0000018faf2189c0] More input link labels specified for filter 'overlay' than it has inputs: 3 > 2
[AVFilterGraph @ 0000018faf2189c0] Error linking filters

FFmpeg error: Failed to set value '[0]crop=576:1080:698:0,scale=576:1080[v0];[0]crop=576:324:0:264,scale=576:324[v1];[0]crop=576:324:0:756,scale=576:324[v2];[v0][v1][v2]overlay=0:0:0:0:0:0[out]' for option 'filter_complex': Invalid argument
Error parsing global options: Invalid argument



I can't figure out how to construct the proper overlay command. Here is the js code i'm using from my electron app.


ipcMain.handle('export-video', async (_event, args) => {
 const { videoFile, outputName, layers, newPositions } = args;
 const ffmpegPath = path.join(__dirname, 'bin', 'ffmpeg');
 const outputDir = checkOutputDir();
 
 // use same video for each layer as input
 // crop, scale, and position each layer
 // overlay each layer on top of each other

 // export video
 console.log('layers', layers);
 console.log('newPositions', newPositions);

 let filtergraph = '';

 for (let i = 0; i < layers.length; i++) {
 const { top, left, width, height } = layers[i];
 const { width: newWidth, height: newHeight } = newPositions[i];
 const filter = `[0]crop=${width}:${height}:${left}:${top},scale=${newWidth}:${newHeight}[v${i}];`;
 filtergraph += filter;
 }

 for (let i = 0; i < layers.length; i++) {
 const filter = `[v${i}]`;
 filtergraph += filter;
 }

 filtergraph += `overlay=`;
 for (let i = 0; i < layers.length; i++) {
 const { top: newTop, left: newLeft } = newPositions[i];
 const overlay = `${newLeft}:${newTop}:`;
 filtergraph += overlay;
 }

 filtergraph = filtergraph.slice(0, -1); // remove last comma
 filtergraph += `[out]`;
 
 console.log('filtergraph', filtergraph);

 const ffmpeg = spawn(ffmpegPath, [
 '-i', videoFile,
 '-filter_complex', filtergraph,
 '-map', '[out]',
 '-c:v', 'libx264',
 '-preset', 'ultrafast',
 '-crf', '18',
 '-y',
 path.join(outputDir, `${outputName}`)
 ]); 

 ffmpeg.stdout.on('data', (data) => {
 console.log(`FFmpeg output: ${data}`);
 });

 ffmpeg.stderr.on('data', (data) => {
 console.error(`FFmpeg error: ${data}`);
 });

 ffmpeg.on('close', (code) => {
 console.log(`FFmpeg process exited with code ${code}`);
 // event.reply('ffmpeg-export-done'); // Notify the renderer process
 });
});



Any advice might be helpful, The docs are confusing, Thanks.


Edit
I'm getting closer with this
Output :


layers [
 { top: 0, left: 677, width: 576, height: 1080 },
 { top: 240, left: 0, width: 576, height: 324 }
]
newPositions [
 { top: 0, left: 0, width: 576, height: 1080 },
 { top: 0, left: 0, width: 576, height: 324 }
]
filtergraph [0]crop=576:1080:677:0,scale=576:1080[v0];[0]crop=576:324:0:240,scale=576:324[v1];[0][v0]overlay=0:0[o0];[o0][v1]overlay=0:0[o1]



ipcMain.handle('export-video', async (_event, args) => {
 const { videoFile, outputName, layers, newPositions } = args;
 const ffmpegPath = path.join(__dirname, 'bin', 'ffmpeg');
 const outputDir = checkOutputDir();
 
 // use same video for each layer as input
 // crop, scale, and position each layer
 // overlay each layer on top of each other

 // export video
 console.log('layers', layers);
 console.log('newPositions', newPositions);

 let filtergraph = '';

 for (let i = 0; i < layers.length; i++) {
 const { top, left, width, height } = layers[i];
 const { width: newWidth, height: newHeight } = newPositions[i];
 const filter = `[0]crop=${width}:${height}:${left}:${top},scale=${newWidth}:${newHeight}[v${i}];`;
 filtergraph += filter;
 }

 for (let i = 0; i < layers.length; i++) {
 if (i === 0) {
 filtergraph += `[0][v${i}]overlay=`;
 } else {
 filtergraph += `[o${i-1}][v${i}]overlay=`;
 }
 const { top: newTop, left: newLeft } = newPositions[i];
 let overlay = '';
 if (i !== layers.length - 1) {
 overlay = `${newLeft}:${newTop}[o${i}];`;
 } else {
 overlay = `${newLeft}:${newTop};`;
 }
 filtergraph += overlay;
 }

 filtergraph = filtergraph.slice(0, -1); // remove last comma
 filtergraph += `[o${layers.length-1}]`;
 
 console.log('filtergraph', filtergraph);

 const ffmpeg = spawn(ffmpegPath, [
 '-i', videoFile,
 '-filter_complex', filtergraph,
 '-map', `[o${layers.length-1}]`,
 '-c:v', 'libx264',
 '-preset', 'ultrafast',
 '-crf', '18',
 '-y',
 path.join(outputDir, `${outputName}`)
 ]); 

 ffmpeg.stdout.on('data', (data) => {
 console.log(`FFmpeg output: ${data}`);
 });

 ffmpeg.stderr.on('data', (data) => {
 console.error(`FFmpeg error: ${data}`);
 });

 ffmpeg.on('close', (code) => {
 console.log(`FFmpeg process exited with code ${code}`);
 // event.reply('ffmpeg-export-done'); // Notify the renderer process
 });
});



The problem I'm having now is that its overlaying the regions over the original input and keeping the landscape dimensions instead of making a portrait video.