Recherche avancée

Médias (1)

Mot : - Tags -/remix

Autres articles (71)

  • Ecrire une actualité

    21 juin 2013, par

    Présentez les changements dans votre MédiaSPIP ou les actualités de vos projets sur votre MédiaSPIP grâce à la rubrique actualités.
    Dans le thème par défaut spipeo de MédiaSPIP, les actualités sont affichées en bas de la page principale sous les éditoriaux.
    Vous pouvez personnaliser le formulaire de création d’une actualité.
    Formulaire de création d’une actualité Dans le cas d’un document de type actualité, les champs proposés par défaut sont : Date de publication ( personnaliser la date de publication ) (...)

  • Websites made ​​with MediaSPIP

    2 mai 2011, par

    This page lists some websites based on MediaSPIP.

  • Publier sur MédiaSpip

    13 juin 2013

    Puis-je poster des contenus à partir d’une tablette Ipad ?
    Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir

Sur d’autres sites (11867)

  • How to use ffmpeg lib transform mp4(h264&aac) to m3u8 (hls) by C code ?

    1er juillet 2020, par itning

    I used official examples transcoding.c but console print pkt->duration = 0, maybe the hls segment duration will not precise.

    


    I use this code to set duration but invalid。

    


    av_opt_set_int(ofmt_ctx->priv_data, "hls_time", 5, AV_OPT_SEARCH_CHILDREN);

    


    In command line

    


    ffmpeg -i a.mp4 -codec copy -vbsf h264_mp4toannexb -map 0 -f segment -segment_list a.m3u8 -segment_time 10 a-%03d.ts

    


    How to use C code to achieve this command ?

    


    this is my code :

    


    /**&#xA; * @file&#xA; * API example for demuxing, decoding, filtering, encoding and muxing&#xA; * @example transcoding.c&#xA; */&#xA;&#xA;#include <libavcodec></libavcodec>avcodec.h>&#xA;#include <libavformat></libavformat>avformat.h>&#xA;#include <libavfilter></libavfilter>buffersink.h>&#xA;#include <libavfilter></libavfilter>buffersrc.h>&#xA;#include <libavutil></libavutil>opt.h>&#xA;#include <libavutil></libavutil>pixdesc.h>&#xA;&#xA;static AVFormatContext *ifmt_ctx;&#xA;static AVFormatContext *ofmt_ctx;&#xA;typedef struct FilteringContext {&#xA;    AVFilterContext *buffersink_ctx;&#xA;    AVFilterContext *buffersrc_ctx;&#xA;    AVFilterGraph *filter_graph;&#xA;} FilteringContext;&#xA;static FilteringContext *filter_ctx;&#xA;&#xA;typedef struct StreamContext {&#xA;    AVCodecContext *dec_ctx;&#xA;    AVCodecContext *enc_ctx;&#xA;} StreamContext;&#xA;static StreamContext *stream_ctx;&#xA;&#xA;static int open_input_file(const char *filename) {&#xA;    int ret;&#xA;    unsigned int i;&#xA;&#xA;    ifmt_ctx = NULL;&#xA;    if ((ret = avformat_open_input(&amp;ifmt_ctx, filename, NULL, NULL)) &lt; 0) {&#xA;        av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");&#xA;        return ret;&#xA;    }&#xA;&#xA;    if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) &lt; 0) {&#xA;        av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");&#xA;        return ret;&#xA;    }&#xA;&#xA;    stream_ctx = av_mallocz_array(ifmt_ctx->nb_streams, sizeof(*stream_ctx));&#xA;    if (!stream_ctx)&#xA;        return AVERROR(ENOMEM);&#xA;&#xA;    for (i = 0; i &lt; ifmt_ctx->nb_streams; i&#x2B;&#x2B;) {&#xA;        AVStream *stream = ifmt_ctx->streams[i];&#xA;        AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);&#xA;        AVCodecContext *codec_ctx;&#xA;        if (!dec) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);&#xA;            return AVERROR_DECODER_NOT_FOUND;&#xA;        }&#xA;        codec_ctx = avcodec_alloc_context3(dec);&#xA;        if (!codec_ctx) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Failed to allocate the decoder context for stream #%u\n", i);&#xA;            return AVERROR(ENOMEM);&#xA;        }&#xA;        ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Failed to copy decoder parameters to input decoder context "&#xA;                                       "for stream #%u\n", i);&#xA;            return ret;&#xA;        }&#xA;        /* Reencode video &amp; audio and remux subtitles etc. */&#xA;        if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO&#xA;            || codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {&#xA;            if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)&#xA;                codec_ctx->framerate = av_guess_frame_rate(ifmt_ctx, stream, NULL);&#xA;            /* Open decoder */&#xA;            ret = avcodec_open2(codec_ctx, dec, NULL);&#xA;            if (ret &lt; 0) {&#xA;                av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);&#xA;                return ret;&#xA;            }&#xA;        }&#xA;        stream_ctx[i].dec_ctx = codec_ctx;&#xA;    }&#xA;&#xA;    av_dump_format(ifmt_ctx, 0, filename, 0);&#xA;    return 0;&#xA;}&#xA;&#xA;static int open_output_file(const char *filename, enum AVCodecID videoCodecId, enum AVCodecID audioCodecId) {&#xA;    AVStream *out_stream;&#xA;    AVStream *in_stream;&#xA;    AVCodecContext *dec_ctx, *enc_ctx;&#xA;    AVCodec *encoder;&#xA;    int ret;&#xA;    unsigned int i;&#xA;&#xA;    ofmt_ctx = NULL;&#xA;    avformat_alloc_output_context2(&amp;ofmt_ctx, NULL, NULL, filename);&#xA;    if (!ofmt_ctx) {&#xA;        av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");&#xA;        return AVERROR_UNKNOWN;&#xA;    }&#xA;&#xA;    for (i = 0; i &lt; ifmt_ctx->nb_streams; i&#x2B;&#x2B;) {&#xA;        out_stream = avformat_new_stream(ofmt_ctx, NULL);&#xA;        if (!out_stream) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");&#xA;            return AVERROR_UNKNOWN;&#xA;        }&#xA;&#xA;        in_stream = ifmt_ctx->streams[i];&#xA;        dec_ctx = stream_ctx[i].dec_ctx;&#xA;&#xA;        if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO&#xA;            || dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {&#xA;&#xA;            if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {&#xA;                encoder = avcodec_find_encoder(videoCodecId);&#xA;            } else {&#xA;                encoder = avcodec_find_encoder(audioCodecId);&#xA;            }&#xA;            //encoder = avcodec_find_encoder(dec_ctx->codec_id);&#xA;            if (!encoder) {&#xA;                av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");&#xA;                return AVERROR_INVALIDDATA;&#xA;            }&#xA;            enc_ctx = avcodec_alloc_context3(encoder);&#xA;            if (!enc_ctx) {&#xA;                av_log(NULL, AV_LOG_FATAL, "Failed to allocate the encoder context\n");&#xA;                return AVERROR(ENOMEM);&#xA;            }&#xA;&#xA;            /* In this example, we transcode to same properties (picture size,&#xA;             * sample rate etc.). These properties can be changed for output&#xA;             * streams easily using filters */&#xA;            if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {&#xA;                enc_ctx->height = dec_ctx->height;&#xA;                enc_ctx->width = dec_ctx->width;&#xA;                enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;&#xA;                /* take first format from list of supported formats */&#xA;                if (encoder->pix_fmts)&#xA;                    enc_ctx->pix_fmt = encoder->pix_fmts[0];&#xA;                else&#xA;                    enc_ctx->pix_fmt = dec_ctx->pix_fmt;&#xA;                /* video time_base can be set to whatever is handy and supported by encoder */&#xA;                enc_ctx->time_base = av_inv_q(dec_ctx->framerate);&#xA;            } else {&#xA;                enc_ctx->sample_rate = dec_ctx->sample_rate;&#xA;                enc_ctx->channel_layout = dec_ctx->channel_layout;&#xA;                enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);&#xA;                /* take first format from list of supported formats */&#xA;                enc_ctx->sample_fmt = encoder->sample_fmts[0];&#xA;                enc_ctx->time_base = (AVRational) {1, enc_ctx->sample_rate};&#xA;            }&#xA;&#xA;            if (ofmt_ctx->oformat->flags &amp; AVFMT_GLOBALHEADER)&#xA;                enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;&#xA;&#xA;            /* Third parameter can be used to pass settings to encoder */&#xA;            ret = avcodec_open2(enc_ctx, encoder, NULL);&#xA;            if (ret &lt; 0) {&#xA;                av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);&#xA;                return ret;&#xA;            }&#xA;            ret = avcodec_parameters_from_context(out_stream->codecpar, enc_ctx);&#xA;            if (ret &lt; 0) {&#xA;                av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i);&#xA;                return ret;&#xA;            }&#xA;&#xA;            out_stream->time_base = enc_ctx->time_base;&#xA;            stream_ctx[i].enc_ctx = enc_ctx;&#xA;        } else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {&#xA;            av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);&#xA;            return AVERROR_INVALIDDATA;&#xA;        } else {&#xA;            /* if this stream must be remuxed */&#xA;            ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);&#xA;            if (ret &lt; 0) {&#xA;                av_log(NULL, AV_LOG_ERROR, "Copying parameters for stream #%u failed\n", i);&#xA;                return ret;&#xA;            }&#xA;            out_stream->time_base = in_stream->time_base;&#xA;        }&#xA;&#xA;    }&#xA;    av_dump_format(ofmt_ctx, 0, filename, 1);&#xA;&#xA;    av_opt_set_int(ofmt_ctx->priv_data, "hls_time", 5, AV_OPT_SEARCH_CHILDREN);&#xA;&#xA;    if (!(ofmt_ctx->oformat->flags &amp; AVFMT_NOFILE)) {&#xA;        ret = avio_open(&amp;ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Could not open output file &#x27;%s&#x27;", filename);&#xA;            return ret;&#xA;        }&#xA;    }&#xA;&#xA;    /* init muxer, write output file header */&#xA;    ret = avformat_write_header(ofmt_ctx, NULL);&#xA;    if (ret &lt; 0) {&#xA;        av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");&#xA;        return ret;&#xA;    }&#xA;&#xA;    return 0;&#xA;}&#xA;&#xA;static int init_filter(FilteringContext *fctx, AVCodecContext *dec_ctx,&#xA;                       AVCodecContext *enc_ctx, const char *filter_spec) {&#xA;    char args[512];&#xA;    int ret = 0;&#xA;    const AVFilter *buffersrc = NULL;&#xA;    const AVFilter *buffersink = NULL;&#xA;    AVFilterContext *buffersrc_ctx = NULL;&#xA;    AVFilterContext *buffersink_ctx = NULL;&#xA;    AVFilterInOut *outputs = avfilter_inout_alloc();&#xA;    AVFilterInOut *inputs = avfilter_inout_alloc();&#xA;    AVFilterGraph *filter_graph = avfilter_graph_alloc();&#xA;&#xA;    if (!outputs || !inputs || !filter_graph) {&#xA;        ret = AVERROR(ENOMEM);&#xA;        goto end;&#xA;    }&#xA;&#xA;    if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {&#xA;        buffersrc = avfilter_get_by_name("buffer");&#xA;        buffersink = avfilter_get_by_name("buffersink");&#xA;        if (!buffersrc || !buffersink) {&#xA;            av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");&#xA;            ret = AVERROR_UNKNOWN;&#xA;            goto end;&#xA;        }&#xA;&#xA;        snprintf(args, sizeof(args),&#xA;                 "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",&#xA;                 dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,&#xA;                 dec_ctx->time_base.num, dec_ctx->time_base.den,&#xA;                 dec_ctx->sample_aspect_ratio.num,&#xA;                 dec_ctx->sample_aspect_ratio.den);&#xA;&#xA;        ret = avfilter_graph_create_filter(&amp;buffersrc_ctx, buffersrc, "in",&#xA;                                           args, NULL, filter_graph);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        ret = avfilter_graph_create_filter(&amp;buffersink_ctx, buffersink, "out",&#xA;                                           NULL, NULL, filter_graph);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",&#xA;                             (uint8_t *) &amp;enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),&#xA;                             AV_OPT_SEARCH_CHILDREN);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");&#xA;            goto end;&#xA;        }&#xA;    } else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {&#xA;        buffersrc = avfilter_get_by_name("abuffer");&#xA;        buffersink = avfilter_get_by_name("abuffersink");&#xA;        if (!buffersrc || !buffersink) {&#xA;            av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");&#xA;            ret = AVERROR_UNKNOWN;&#xA;            goto end;&#xA;        }&#xA;&#xA;        if (!dec_ctx->channel_layout)&#xA;            dec_ctx->channel_layout =&#xA;                    av_get_default_channel_layout(dec_ctx->channels);&#xA;        snprintf(args, sizeof(args),&#xA;                 "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,&#xA;                 dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,&#xA;                 av_get_sample_fmt_name(dec_ctx->sample_fmt),&#xA;                 dec_ctx->channel_layout);&#xA;        ret = avfilter_graph_create_filter(&amp;buffersrc_ctx, buffersrc, "in",&#xA;                                           args, NULL, filter_graph);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        ret = avfilter_graph_create_filter(&amp;buffersink_ctx, buffersink, "out",&#xA;                                           NULL, NULL, filter_graph);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",&#xA;                             (uint8_t *) &amp;enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),&#xA;                             AV_OPT_SEARCH_CHILDREN);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",&#xA;                             (uint8_t *) &amp;enc_ctx->channel_layout,&#xA;                             sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        ret = av_opt_set_bin(buffersink_ctx, "sample_rates",&#xA;                             (uint8_t *) &amp;enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),&#xA;                             AV_OPT_SEARCH_CHILDREN);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");&#xA;            goto end;&#xA;        }&#xA;    } else {&#xA;        ret = AVERROR_UNKNOWN;&#xA;        goto end;&#xA;    }&#xA;&#xA;    /* Endpoints for the filter graph. */&#xA;    outputs->name = av_strdup("in");&#xA;    outputs->filter_ctx = buffersrc_ctx;&#xA;    outputs->pad_idx = 0;&#xA;    outputs->next = NULL;&#xA;&#xA;    inputs->name = av_strdup("out");&#xA;    inputs->filter_ctx = buffersink_ctx;&#xA;    inputs->pad_idx = 0;&#xA;    inputs->next = NULL;&#xA;&#xA;    if (!outputs->name || !inputs->name) {&#xA;        ret = AVERROR(ENOMEM);&#xA;        goto end;&#xA;    }&#xA;&#xA;    if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,&#xA;                                        &amp;inputs, &amp;outputs, NULL)) &lt; 0)&#xA;        goto end;&#xA;&#xA;    if ((ret = avfilter_graph_config(filter_graph, NULL)) &lt; 0)&#xA;        goto end;&#xA;&#xA;    /* Fill FilteringContext */&#xA;    fctx->buffersrc_ctx = buffersrc_ctx;&#xA;    fctx->buffersink_ctx = buffersink_ctx;&#xA;    fctx->filter_graph = filter_graph;&#xA;&#xA;    end:&#xA;    avfilter_inout_free(&amp;inputs);&#xA;    avfilter_inout_free(&amp;outputs);&#xA;&#xA;    return ret;&#xA;}&#xA;&#xA;static int init_filters(void) {&#xA;    const char *filter_spec;&#xA;    unsigned int i;&#xA;    int ret;&#xA;    filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));&#xA;    if (!filter_ctx)&#xA;        return AVERROR(ENOMEM);&#xA;&#xA;    for (i = 0; i &lt; ifmt_ctx->nb_streams; i&#x2B;&#x2B;) {&#xA;        filter_ctx[i].buffersrc_ctx = NULL;&#xA;        filter_ctx[i].buffersink_ctx = NULL;&#xA;        filter_ctx[i].filter_graph = NULL;&#xA;        if (!(ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO&#xA;              || ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO))&#xA;            continue;&#xA;&#xA;&#xA;        if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)&#xA;            filter_spec = "null"; /* passthrough (dummy) filter for video */&#xA;        else&#xA;            filter_spec = "anull"; /* passthrough (dummy) filter for audio */&#xA;        ret = init_filter(&amp;filter_ctx[i], stream_ctx[i].dec_ctx,&#xA;                          stream_ctx[i].enc_ctx, filter_spec);&#xA;        if (ret)&#xA;            return ret;&#xA;    }&#xA;    return 0;&#xA;}&#xA;&#xA;static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {&#xA;    int ret;&#xA;    int got_frame_local;&#xA;    AVPacket enc_pkt;&#xA;    int (*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =&#xA;    (ifmt_ctx->streams[stream_index]->codecpar->codec_type ==&#xA;     AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;&#xA;&#xA;    if (!got_frame)&#xA;        got_frame = &amp;got_frame_local;&#xA;&#xA;    av_log(NULL, AV_LOG_INFO, "Encoding frame\n");&#xA;    /* encode filtered frame */&#xA;    enc_pkt.data = NULL;&#xA;    enc_pkt.size = 0;&#xA;    av_init_packet(&amp;enc_pkt);&#xA;    ret = enc_func(stream_ctx[stream_index].enc_ctx, &amp;enc_pkt,&#xA;                   filt_frame, got_frame);&#xA;    av_frame_free(&amp;filt_frame);&#xA;    if (ret &lt; 0)&#xA;        return ret;&#xA;    if (!(*got_frame))&#xA;        return 0;&#xA;&#xA;    /* prepare packet for muxing */&#xA;    enc_pkt.stream_index = stream_index;&#xA;    av_packet_rescale_ts(&amp;enc_pkt,&#xA;                         stream_ctx[stream_index].enc_ctx->time_base,&#xA;                         ofmt_ctx->streams[stream_index]->time_base);&#xA;&#xA;    av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");&#xA;    /* mux encoded frame */&#xA;    ret = av_interleaved_write_frame(ofmt_ctx, &amp;enc_pkt);&#xA;    return ret;&#xA;}&#xA;&#xA;static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index) {&#xA;    int ret;&#xA;    AVFrame *filt_frame;&#xA;&#xA;    av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");&#xA;    /* push the decoded frame into the filtergraph */&#xA;    ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,&#xA;                                       frame, 0);&#xA;    if (ret &lt; 0) {&#xA;        av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");&#xA;        return ret;&#xA;    }&#xA;&#xA;    /* pull filtered frames from the filtergraph */&#xA;    while (1) {&#xA;        filt_frame = av_frame_alloc();&#xA;        if (!filt_frame) {&#xA;            ret = AVERROR(ENOMEM);&#xA;            break;&#xA;        }&#xA;        av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");&#xA;        ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,&#xA;                                      filt_frame);&#xA;        if (ret &lt; 0) {&#xA;            /* if no more frames for output - returns AVERROR(EAGAIN)&#xA;             * if flushed and no more frames for output - returns AVERROR_EOF&#xA;             * rewrite retcode to 0 to show it as normal procedure completion&#xA;             */&#xA;            if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)&#xA;                ret = 0;&#xA;            av_frame_free(&amp;filt_frame);&#xA;            break;&#xA;        }&#xA;&#xA;        filt_frame->pict_type = AV_PICTURE_TYPE_NONE;&#xA;        ret = encode_write_frame(filt_frame, stream_index, NULL);&#xA;        if (ret &lt; 0)&#xA;            break;&#xA;    }&#xA;&#xA;    return ret;&#xA;}&#xA;&#xA;static int flush_encoder(unsigned int stream_index) {&#xA;    int ret;&#xA;    int got_frame;&#xA;&#xA;    if (!(stream_ctx[stream_index].enc_ctx->codec->capabilities &amp;&#xA;          AV_CODEC_CAP_DELAY))&#xA;        return 0;&#xA;&#xA;    while (1) {&#xA;        av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);&#xA;        ret = encode_write_frame(NULL, stream_index, &amp;got_frame);&#xA;        if (ret &lt; 0)&#xA;            break;&#xA;        if (!got_frame)&#xA;            return 0;&#xA;    }&#xA;    return ret;&#xA;}&#xA;&#xA;int main() {&#xA;    char *inputFile = "D:/20200623_094923.mp4";&#xA;    char *outputFile = "D:/test/te.m3u8";&#xA;    enum AVCodecID videoCodec = AV_CODEC_ID_H264;&#xA;    enum AVCodecID audioCodec = AV_CODEC_ID_AAC;&#xA;&#xA;    int ret;&#xA;    AVPacket packet = {.data = NULL, .size = 0};&#xA;    AVFrame *frame = NULL;&#xA;    enum AVMediaType type;&#xA;    unsigned int stream_index;&#xA;    unsigned int i;&#xA;    int got_frame;&#xA;    int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);&#xA;&#xA;    if ((ret = open_input_file(inputFile)) &lt; 0)&#xA;        goto end;&#xA;    if ((ret = open_output_file(outputFile, videoCodec, audioCodec)) &lt; 0)&#xA;        goto end;&#xA;    if ((ret = init_filters()) &lt; 0)&#xA;        goto end;&#xA;&#xA;    /* read all packets */&#xA;    while (1) {&#xA;        if ((ret = av_read_frame(ifmt_ctx, &amp;packet)) &lt; 0)&#xA;            break;&#xA;        stream_index = packet.stream_index;&#xA;        type = ifmt_ctx->streams[packet.stream_index]->codecpar->codec_type;&#xA;        av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",&#xA;               stream_index);&#xA;&#xA;        if (filter_ctx[stream_index].filter_graph) {&#xA;            av_log(NULL, AV_LOG_DEBUG, "Going to reencode&amp;filter the frame\n");&#xA;            frame = av_frame_alloc();&#xA;            if (!frame) {&#xA;                ret = AVERROR(ENOMEM);&#xA;                break;&#xA;            }&#xA;            av_packet_rescale_ts(&amp;packet,&#xA;                                 ifmt_ctx->streams[stream_index]->time_base,&#xA;                                 stream_ctx[stream_index].dec_ctx->time_base);&#xA;            dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :&#xA;                       avcodec_decode_audio4;&#xA;            ret = dec_func(stream_ctx[stream_index].dec_ctx, frame,&#xA;                           &amp;got_frame, &amp;packet);&#xA;            if (ret &lt; 0) {&#xA;                av_frame_free(&amp;frame);&#xA;                av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");&#xA;                break;&#xA;            }&#xA;&#xA;            if (got_frame) {&#xA;                frame->pts = frame->best_effort_timestamp;&#xA;                ret = filter_encode_write_frame(frame, stream_index);&#xA;                av_frame_free(&amp;frame);&#xA;                if (ret &lt; 0)&#xA;                    goto end;&#xA;            } else {&#xA;                av_frame_free(&amp;frame);&#xA;            }&#xA;        } else {&#xA;            /* remux this frame without reencoding */&#xA;            av_packet_rescale_ts(&amp;packet,&#xA;                                 ifmt_ctx->streams[stream_index]->time_base,&#xA;                                 ofmt_ctx->streams[stream_index]->time_base);&#xA;&#xA;            ret = av_interleaved_write_frame(ofmt_ctx, &amp;packet);&#xA;            if (ret &lt; 0)&#xA;                goto end;&#xA;        }&#xA;        av_packet_unref(&amp;packet);&#xA;    }&#xA;&#xA;    /* flush filters and encoders */&#xA;    for (i = 0; i &lt; ifmt_ctx->nb_streams; i&#x2B;&#x2B;) {&#xA;        /* flush filter */&#xA;        if (!filter_ctx[i].filter_graph)&#xA;            continue;&#xA;        ret = filter_encode_write_frame(NULL, i);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        /* flush encoder */&#xA;        ret = flush_encoder(i);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");&#xA;            goto end;&#xA;        }&#xA;    }&#xA;&#xA;    av_write_trailer(ofmt_ctx);&#xA;    end:&#xA;    av_packet_unref(&amp;packet);&#xA;    av_frame_free(&amp;frame);&#xA;    for (i = 0; i &lt; ifmt_ctx->nb_streams; i&#x2B;&#x2B;) {&#xA;        avcodec_free_context(&amp;stream_ctx[i].dec_ctx);&#xA;        if (ofmt_ctx &amp;&amp; ofmt_ctx->nb_streams > i &amp;&amp; ofmt_ctx->streams[i] &amp;&amp; stream_ctx[i].enc_ctx)&#xA;            avcodec_free_context(&amp;stream_ctx[i].enc_ctx);&#xA;        if (filter_ctx &amp;&amp; filter_ctx[i].filter_graph)&#xA;            avfilter_graph_free(&amp;filter_ctx[i].filter_graph);&#xA;    }&#xA;    av_free(filter_ctx);&#xA;    av_free(stream_ctx);&#xA;    avformat_close_input(&amp;ifmt_ctx);&#xA;    if (ofmt_ctx &amp;&amp; !(ofmt_ctx->oformat->flags &amp; AVFMT_NOFILE))&#xA;        avio_closep(&amp;ofmt_ctx->pb);&#xA;    avformat_free_context(ofmt_ctx);&#xA;&#xA;    if (ret &lt; 0)&#xA;        av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));&#xA;&#xA;    return ret ? 1 : 0;&#xA;}&#xA;

    &#xA;

  • Muxing Android MediaCodec encoded H264 packets into RTMP

    31 décembre 2015, par Vadym

    I am coming from a thread Encoding H.264 from camera with Android MediaCodec. My setup is very similar. However, I attempt to write mux the encoded frames and with javacv and broadcast them via rtmp.

    RtmpClient.java

    ...
    private volatile BlockingQueue mFrameQueue = new LinkedBlockingQueue(MAXIMUM_VIDEO_FRAME_BACKLOG);
    ...
    private void startStream() throws FrameRecorder.Exception, IOException {
       if (TextUtils.isEmpty(mDestination)) {
           throw new IllegalStateException("Cannot start RtmpClient without destination");
       }

       if (mCamera == null) {
           throw new IllegalStateException("Cannot start RtmpClient without camera.");
       }

       Camera.Parameters cameraParams = mCamera.getParameters();

       mRecorder = new FFmpegFrameRecorder(
               mDestination,
               mVideoQuality.resX,
               mVideoQuality.resY,
               (mAudioQuality.channelType.equals(AudioQuality.CHANNEL_TYPE_STEREO) ? 2 : 1));

       mRecorder.setFormat("flv");

       mRecorder.setFrameRate(mVideoQuality.frameRate);
       mRecorder.setVideoBitrate(mVideoQuality.bitRate);
       mRecorder.setVideoCodec(avcodec.AV_CODEC_ID_H264);

       mRecorder.setSampleRate(mAudioQuality.samplingRate);
       mRecorder.setAudioBitrate(mAudioQuality.bitRate);
       mRecorder.setAudioCodec(avcodec.AV_CODEC_ID_AAC);

       mVideoStream = new VideoStream(mRecorder, mVideoQuality, mFrameQueue, mCamera);
       mAudioStream = new AudioStream(mRecorder, mAudioQuality);

       mRecorder.start();

       // Setup a bufferred preview callback
       setupCameraCallback(mCamera, mRtmpClient, DEFAULT_PREVIEW_CALLBACK_BUFFERS,
               mVideoQuality.resX * mVideoQuality.resY * ImageFormat.getBitsPerPixel(
                       cameraParams.getPreviewFormat())/8);

       try {
           mVideoStream.start();
           mAudioStream.start();
       }
       catch(Exception e) {
           e.printStackTrace();
           stopStream();
       }
    }
    ...
    @Override
    public void onPreviewFrame(byte[] data, Camera camera) {
       boolean frameQueued = false;

       if (mRecorder == null || data == null) {
           return;
       }

       frameQueued = mFrameQueue.offer(data);

       // return the buffer to be reused - done in videostream
       //camera.addCallbackBuffer(data);
    }
    ...

    VideoStream.java

    ...
    @Override
    public void run() {
       try {
           mMediaCodec = MediaCodec.createEncoderByType("video/avc");
           MediaFormat mediaFormat = MediaFormat.createVideoFormat("video/avc", mVideoQuality.resX, mVideoQuality.resY);
           mediaFormat.setInteger(MediaFormat.KEY_BIT_RATE, mVideoQuality.bitRate);
           mediaFormat.setInteger(MediaFormat.KEY_FRAME_RATE, mVideoQuality.frameRate);
           mediaFormat.setInteger(MediaFormat.KEY_COLOR_FORMAT, MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420SemiPlanar);
           mediaFormat.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, 1);
           mMediaCodec.configure(mediaFormat, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
           mMediaCodec.start();
       }
       catch(IOException e) {
           e.printStackTrace();
       }

       long startTimestamp = System.currentTimeMillis();
       long frameTimestamp = 0;
       byte[] rawFrame = null;

       try {
           while (!Thread.interrupted()) {
               rawFrame = mFrameQueue.take();

               frameTimestamp = 1000 * (System.currentTimeMillis() - startTimestamp);

               encodeFrame(rawFrame, frameTimestamp);

               // return the buffer to be reused
               mCamera.addCallbackBuffer(rawFrame);
           }
       }
       catch (InterruptedException ignore) {
           // ignore interrup while waiting
       }

       // Clean up video stream allocations
       try {
           mMediaCodec.stop();
           mMediaCodec.release();
           mOutputStream.flush();
           mOutputStream.close();
       } catch (Exception e){
           e.printStackTrace();
       }
    }
    ...
    private void encodeFrame(byte[] input, long timestamp) {
       try {
           ByteBuffer[] inputBuffers = mMediaCodec.getInputBuffers();
           ByteBuffer[] outputBuffers = mMediaCodec.getOutputBuffers();

           int inputBufferIndex = mMediaCodec.dequeueInputBuffer(0);

           if (inputBufferIndex >= 0) {
               ByteBuffer inputBuffer = inputBuffers[inputBufferIndex];
               inputBuffer.clear();
               inputBuffer.put(input);
               mMediaCodec.queueInputBuffer(inputBufferIndex, 0, input.length, timestamp, 0);
           }

           MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo();

           int outputBufferIndex = mMediaCodec.dequeueOutputBuffer(bufferInfo, 0);

           if (outputBufferIndex >= 0) {
               while (outputBufferIndex >= 0) {
                   ByteBuffer outputBuffer = outputBuffers[outputBufferIndex];

                   // Should this be a direct byte buffer?
                   byte[] outData = new byte[bufferInfo.size - bufferInfo.offset];
                   outputBuffer.get(outData);

                   mFrameRecorder.record(outData, bufferInfo.offset, outData.length, timestamp);

                   mMediaCodec.releaseOutputBuffer(outputBufferIndex, false);
                   outputBufferIndex = mMediaCodec.dequeueOutputBuffer(bufferInfo, 0);
               }
           }
           else if (outputBufferIndex == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
               outputBuffers = mMediaCodec.getOutputBuffers();
           } else if (outputBufferIndex == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
               // ignore for now
           }
       } catch (Throwable t) {
           t.printStackTrace();
       }

    }
    ...

    FFmpegFrameRecorder.java

    ...
    // Hackish codec copy frame recording function
    public boolean record(byte[] encodedData, int offset, int length, long frameCount) throws Exception {
       int ret;

       if (encodedData == null) {
           return false;
       }

       av_init_packet(video_pkt);

       // this is why i wondered whether I should get outputbuffer data into direct byte buffer
       video_outbuf.put(encodedData, 0, encodedData.length);

       video_pkt.data(video_outbuf);
       video_pkt.size(video_outbuf_size);

       video_pkt.pts(frameCount);
       video_pkt.dts(frameCount);

       video_pkt.stream_index(video_st.index());

       synchronized (oc) {
           /* write the compressed frame in the media file */
           if (interleaved &amp;&amp; audio_st != null) {
               if ((ret = av_interleaved_write_frame(oc, video_pkt)) &lt; 0) {
                   throw new Exception("av_interleaved_write_frame() error " + ret + " while writing interleaved video frame.");
               }
           } else {
               if ((ret = av_write_frame(oc, video_pkt)) &lt; 0) {
                   throw new Exception("av_write_frame() error " + ret + " while writing video frame.");
               }
           }
       }
       return (video_pkt.flags() &amp; AV_PKT_FLAG_KEY) == 1;
    }
    ...

    When I try to stream the video and run ffprobe on it, I get the following output :

    ffprobe version 2.5.3 Copyright (c) 2007-2015 the FFmpeg developers
     built on Jan 19 2015 12:56:57 with gcc 4.1.2 (GCC) 20080704 (Red Hat 4.1.2-55)
     configuration: --prefix=/usr --bindir=/usr/bin --datadir=/usr/share/ffmpeg --incdir=/usr/include/ffmpeg --libdir=/usr/lib64 --mandir=/usr/share/man --arch=x86_64 --optflags='-O2 -g -pipe -Wall -Wp,-D_FORTIFY_SOURCE=2 -fexceptions -fstack-protector --param=ssp-buffer-size=4 -m64 -mtune=generic' --enable-bzlib --disable-crystalhd --enable-libass --enable-libdc1394 --enable-libfaac --enable-nonfree --disable-indev=jack --enable-libfreetype --enable-libgsm --enable-libmp3lame --enable-openal --enable-libopencv --enable-libopenjpeg --enable-libopus --enable-librtmp --enable-libtheora --enable-libvorbis --enable-libvpx --enable-libx264 --enable-libxvid --enable-x11grab --enable-avfilter --enable-avresample --enable-postproc --enable-pthreads --disable-static --enable-shared --enable-gpl --disable-debug --disable-stripping --enable-libcaca --shlibdir=/usr/lib64 --enable-runtime-cpudetect
     libavutil      54. 15.100 / 54. 15.100
     libavcodec     56. 13.100 / 56. 13.100
     libavformat    56. 15.102 / 56. 15.102
     libavdevice    56.  3.100 / 56.  3.100
     libavfilter     5.  2.103 /  5.  2.103
     libavresample   2.  1.  0 /  2.  1.  0
     libswscale      3.  1.101 /  3.  1.101
     libswresample   1.  1.100 /  1.  1.100
     libpostproc    53.  3.100 / 53.  3.100
    Metadata:
     Server                NGINX RTMP (github.com/arut/nginx-rtmp-module)
     width                 320.00
     height                240.00
     displayWidth          320.00
     displayHeight         240.00
     duration              0.00
     framerate             0.00
     fps                   0.00
     videodatarate         261.00
     videocodecid          7.00
     audiodatarate         62.00
     audiocodecid          10.00
     profile
     level
    [live_flv @ 0x1edb0820] Could not find codec parameters for stream 0 (Video: none, none, 267 kb/s): unknown codec
    Consider increasing the value for the 'analyzeduration' and 'probesize' options
    Input #0, live_flv, from 'rtmp://<server>/input/<stream>':
     Metadata:
       Server          : NGINX RTMP (github.com/arut/nginx-rtmp-module)
       displayWidth    : 320
       displayHeight   : 240
       fps             : 0
       profile         :
       level           :
     Duration: 00:00:00.00, start: 16.768000, bitrate: N/A
       Stream #0:0: Video: none, none, 267 kb/s, 1k tbr, 1k tbn, 1k tbc
       Stream #0:1: Audio: aac (LC), 16000 Hz, mono, fltp, 63 kb/s
    Unsupported codec with id 0 for input stream 0
    </stream></server>

    I am not, by any means, an expert in H264 or video encoding. I know that the encoded frames that come out from MediaCodec contain SPS NAL, PPS NAL, and frame NAL units. I’ve also written the MediaCodec output into a file and was able to play it back (I did have to specify the format and framerate as otherwise it would play too fast).

    My assumption is that things should work (see how little I know :)). Knowing that SPS and PPS are written out, decoder should know enough. Yet, ffprobe fails to recognize codec, fps, and other video information. Do I need to pass packet flag information to FFmpegFrameRecorder.java:record() function ? Or should I use direct buffer ? Any suggestion will be appreciated ! I should figure things out with a hint.

    PS : I know that some codecs use Planar and other SemiPlanar color formats. That distinction will come later if I get past this. Also, I didn’t go the Surface to MediaCodec way because I need to support API 17 and it requires more changes than this route, which I think helps me understand the more basic flow. Agan, I appreciate any suggestions. Please let me know if something needs to be clarified.

    Update #1

    So having done more testing, I see that my encoder outputs the following frames :

    000000016742800DDA0507E806D0A1350000000168CE06E2
    0000000165B840A6F1E7F0EA24000AE73BEB5F51CC7000233A84240...
    0000000141E2031364E387FD4F9BB3D67F51CC7000279B9F9CFE811...
    0000000141E40304423FFFFF0B7867F89FAFFFFFFFFFFCBE8EF25E6...
    0000000141E602899A3512EF8AEAD1379F0650CC3F905131504F839...
    ...

    The very first frame contains SPS and PPS. From what I was able to see, these are transmitted only once. The rest are NAL types 1 and 5. So, my assumption is that, for ffprobe to see stream info not only when the stream starts, I should capture SPS and PPS frames and re-transmit them myself periodically, after a certain number of frames, or perhaps before every I-frame. What do you think ?

    Update #2

    Unable to validate that I’m writing frames successfully. After having tried to read back the written packet, I cannot validate written bytes. As strange, on successful write of IPL image and streaming, I also cannot print out bytes of encoded packet after avcodec_encode_video2. Hit the official dead end.

  • Using FFmpeg with URL input causes SIGSEGV in AWS Lambda (Python runtime)

    26 mars, par Dave94

    I'm trying to implement a video converting solution on AWS Lambda following their article named Processing user-generated content using AWS Lambda and FFmpeg.&#xA;However when I run my command with subprocess.Popen() it returns -11 which translates to SIGSEGV (segmentation fault).&#xA;I've tried to process the video with the newest (4.3.1) static build from John Van Sickle's site as with the "official" ffmpeg-lambda-layer but it seems like it doesn't matter which one I use, the result is the same.

    &#xA;

    If I download the video to the Lambda's /tmp directory and add this downloaded file as an input to FFmpeg it works correctly (with the same parameters). However I'm trying to prevent this as the /tmp directory's max. size is only 512 MB which is not quite enough for me.

    &#xA;

    The relevant code which returns SIGSEGV :

    &#xA;

    ffmpeg_cmd = &#x27;/opt/bin/ffmpeg -stream_loop -1 -i "&#x27; &#x2B; s3_source_signed_url &#x2B; &#x27;" -i /opt/bin/audio.mp3 -i /opt/bin/watermark.png -shortest -y -deinterlace -vcodec libx264 -pix_fmt yuv420p -preset veryfast -r 30 -g 60 -b:v 4500k -c:a copy -map 0:v:0 -map 1:a:0 -filter_complex scale=1920:1080:force_original_aspect_ratio=decrease,pad=1920:1080:(ow-iw)/2:(oh-ih)/2,setsar=1,overlay=(W-w)/2:(H-h)/2,format=yuv420p -loglevel verbose -f flv -&#x27;&#xA;command1 = shlex.split(ffmpeg_cmd)&#xA;p1 = subprocess.Popen(command1, stdout=subprocess.PIPE, stderr=subprocess.PIPE)&#xA;stdout, stderr = p1.communicate()&#xA;print(p1.returncode) #prints -11&#xA;

    &#xA;

    stderr of FFmpeg :

    &#xA;

    ffmpeg version 4.1.3-static https://johnvansickle.com/ffmpeg/  Copyright (c) 2000-2019 the FFmpeg developers&#xA;  built with gcc 6.3.0 (Debian 6.3.0-18&#x2B;deb9u1) 20170516&#xA;  configuration: --enable-gpl --enable-version3 --enable-static --disable-debug --disable-ffplay --disable-indev=sndio --disable-outdev=sndio --cc=gcc-6 --enable-fontconfig --enable-frei0r --enable-gnutls --enable-gmp --enable-gray --enable-libaom --enable-libfribidi --enable-libass --enable-libvmaf --enable-libfreetype --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-librubberband --enable-libsoxr --enable-libspeex --enable-libvorbis --enable-libopus --enable-libtheora --enable-libvidstab --enable-libvo-amrwbenc --enable-libvpx --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxml2 --enable-libxvid --enable-libzvbi --enable-libzimg&#xA;  libavutil      56. 22.100 / 56. 22.100&#xA;  libavcodec     58. 35.100 / 58. 35.100&#xA;  libavformat    58. 20.100 / 58. 20.100&#xA;  libavdevice    58.  5.100 / 58.  5.100&#xA;  libavfilter     7. 40.101 /  7. 40.101&#xA;  libswscale      5.  3.100 /  5.  3.100&#xA;  libswresample   3.  3.100 /  3.  3.100&#xA;  libpostproc    55.  3.100 / 55.  3.100&#xA;[tcp @ 0x728cc00] Starting connection attempt to 52.219.74.177 port 443&#xA;[tcp @ 0x728cc00] Successfully connected to 52.219.74.177 port 443&#xA;[h264 @ 0x729b780] Reinit context to 1280x720, pix_fmt: yuv420p&#xA;Input #0, mov,mp4,m4a,3gp,3g2,mj2, from &#x27;https://bucket.s3.amazonaws.com --> presigned url with 15 min expiration time&#x27;:&#xA;  Metadata:&#xA;    major_brand     : mp42&#xA;    minor_version   : 0&#xA;    compatible_brands: mp42mp41isomavc1&#xA;    creation_time   : 2015-09-02T07:42:42.000000Z&#xA;  Duration: 00:00:15.64, start: 0.000000, bitrate: 2640 kb/s&#xA;    Stream #0:0(und): Video: h264 (High), 1 reference frame (avc1 / 0x31637661), yuv420p(tv, bt709, left), 1280x720 [SAR 1:1 DAR 16:9], 2475 kb/s, 25 fps, 25 tbr, 25 tbn, 50 tbc (default)&#xA;    Metadata:&#xA;      creation_time   : 2015-09-02T07:42:42.000000Z&#xA;      handler_name    : L-SMASH Video Handler&#xA;      encoder         : AVC Coding&#xA;    Stream #0:1(und): Audio: aac (LC) (mp4a / 0x6134706D), 48000 Hz, stereo, fltp, 160 kb/s (default)&#xA;    Metadata:&#xA;      creation_time   : 2015-09-02T07:42:42.000000Z&#xA;      handler_name    : L-SMASH Audio Handler&#xA;[mp3 @ 0x733f340] Skipping 0 bytes of junk at 1344.&#xA;Input #1, mp3, from &#x27;/opt/bin/audio.mp3&#x27;:&#xA;  Metadata:&#xA;    encoded_by      : Logic Pro X&#xA;    date            : 2021-01-03&#xA;    coding_history  : &#xA;    time_reference  : 158760000&#xA;    umid            : 0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004500F9E4&#xA;    encoder         : Lavf58.49.100&#xA;  Duration: 00:04:01.21, start: 0.025057, bitrate: 320 kb/s&#xA;    Stream #1:0: Audio: mp3, 44100 Hz, stereo, fltp, 320 kb/s&#xA;    Metadata:&#xA;      encoder         : Lavc58.97&#xA;Input #2, png_pipe, from &#x27;/opt/bin/watermark.png&#x27;:&#xA;  Duration: N/A, bitrate: N/A&#xA;    Stream #2:0: Video: png, 1 reference frame, rgba(pc), 701x190 [SAR 1521:1521 DAR 701:190], 25 tbr, 25 tbn, 25 tbc&#xA;[Parsed_scale_0 @ 0x7341140] w:1920 h:1080 flags:&#x27;bilinear&#x27; interl:0&#xA;Stream mapping:&#xA;  Stream #0:0 (h264) -> scale&#xA;  Stream #2:0 (png) -> overlay:overlay&#xA;  format -> Stream #0:0 (libx264)&#xA;  Stream #1:0 -> #0:1 (copy)&#xA;Press [q] to stop, [?] for help&#xA;[h264 @ 0x72d8600] Reinit context to 1280x720, pix_fmt: yuv420p&#xA;[Parsed_scale_0 @ 0x733c1c0] w:1920 h:1080 flags:&#x27;bilinear&#x27; interl:0&#xA;[graph 0 input from stream 0:0 @ 0x7669200] w:1280 h:720 pixfmt:yuv420p tb:1/25 fr:25/1 sar:1/1 sws_param:flags=2&#xA;[graph 0 input from stream 2:0 @ 0x766a980] w:701 h:190 pixfmt:rgba tb:1/25 fr:25/1 sar:1521/1521 sws_param:flags=2&#xA;[auto_scaler_0 @ 0x7670240] w:iw h:ih flags:&#x27;bilinear&#x27; interl:0&#xA;[deinterlace_in_2_0 @ 0x766b680] auto-inserting filter &#x27;auto_scaler_0&#x27; between the filter &#x27;graph 0 input from stream 2:0&#x27; and the filter &#x27;deinterlace_in_2_0&#x27;&#xA;[Parsed_scale_0 @ 0x733c1c0] w:1280 h:720 fmt:yuv420p sar:1/1 -> w:1920 h:1080 fmt:yuv420p sar:1/1 flags:0x2&#xA;[Parsed_pad_1 @ 0x733ce00] w:1920 h:1080 -> w:1920 h:1080 x:0 y:0 color:0x000000FF&#xA;[Parsed_setsar_2 @ 0x733da00] w:1920 h:1080 sar:1/1 dar:16/9 -> sar:1/1 dar:16/9&#xA;[auto_scaler_0 @ 0x7670240] w:701 h:190 fmt:rgba sar:1521/1521 -> w:701 h:190 fmt:yuva420p sar:1/1 flags:0x2&#xA;[Parsed_overlay_3 @ 0x733e440] main w:1920 h:1080 fmt:yuv420p overlay w:701 h:190 fmt:yuva420p&#xA;[Parsed_overlay_3 @ 0x733e440] [framesync @ 0x733e5a8] Selected 1/50 time base&#xA;[Parsed_overlay_3 @ 0x733e440] [framesync @ 0x733e5a8] Sync level 2&#xA;[libx264 @ 0x72c1c00] using SAR=1/1&#xA;[libx264 @ 0x72c1c00] using cpu capabilities: MMX2 SSE2Fast SSSE3 SSE4.2 AVX FMA3 BMI2 AVX2&#xA;[libx264 @ 0x72c1c00] profile Progressive High, level 4.0, 4:2:0, 8-bit&#xA;[libx264 @ 0x72c1c00] 264 - core 157 r2969 d4099dd - H.264/MPEG-4 AVC codec - Copyleft 2003-2019 - http://www.videolan.org/x264.html - options: cabac=1 ref=1 deblock=1:0:0 analyse=0x3:0x113 me=hex subme=2 psy=1 psy_rd=1.00:0.00 mixed_ref=0 me_range=16 chroma_me=1 trellis=0 8x8dct=1 cqm=0 deadzone=21,11 fast_pskip=1 chroma_qp_offset=0 threads=9 lookahead_threads=3 sliced_threads=0 nr=0 decimate=1 interlaced=0 bluray_compat=0 constrained_intra=0 bframes=3 b_pyramid=2 b_adapt=1 b_bias=0 direct=1 weightb=1 open_gop=0 weightp=1 keyint=60 keyint_min=6 scenecut=40 intra_refresh=0 rc_lookahead=10 rc=abr mbtree=1 bitrate=4500 ratetol=1.0 qcomp=0.60 qpmin=0 qpmax=69 qpstep=4 ip_ratio=1.40 aq=1:1.00&#xA;Output #0, flv, to &#x27;pipe:&#x27;:&#xA;  Metadata:&#xA;    major_brand     : mp42&#xA;    minor_version   : 0&#xA;    compatible_brands: mp42mp41isomavc1&#xA;    encoder         : Lavf58.20.100&#xA;    Stream #0:0: Video: h264 (libx264), 1 reference frame ([7][0][0][0] / 0x0007), yuv420p, 1920x1080 [SAR 1:1 DAR 16:9], q=-1--1, 4500 kb/s, 30 fps, 1k tbn, 30 tbc (default)&#xA;    Metadata:&#xA;      encoder         : Lavc58.35.100 libx264&#xA;    Side data:&#xA;      cpb: bitrate max/min/avg: 0/0/4500000 buffer size: 0 vbv_delay: -1&#xA;    Stream #0:1: Audio: mp3 ([2][0][0][0] / 0x0002), 44100 Hz, stereo, fltp, 320 kb/s&#xA;    Metadata:&#xA;      encoder         : Lavc58.97&#xA;frame=   27 fps=0.0 q=32.0 size=     247kB time=00:00:00.03 bitrate=59500.0kbits/s speed=0.0672x&#xA;frame=   77 fps= 77 q=27.0 size=    1115kB time=00:00:02.03 bitrate=4478.0kbits/s speed=2.03x&#xA;frame=  126 fps= 83 q=25.0 size=    2302kB time=00:00:04.00 bitrate=4712.4kbits/s speed=2.64x&#xA;frame=  177 fps= 87 q=26.0 size=    3576kB time=00:00:06.03 bitrate=4854.4kbits/s speed=2.97x&#xA;frame=  225 fps= 88 q=25.0 size=    4910kB time=00:00:07.96 bitrate=5047.8kbits/s speed=3.13x&#xA;frame=  272 fps= 89 q=27.0 size=    6189kB time=00:00:09.84 bitrate=5147.9kbits/s speed=3.22x&#xA;frame=  320 fps= 90 q=27.0 size=    7058kB time=00:00:11.78 bitrate=4907.5kbits/s speed=3.31x&#xA;frame=  372 fps= 91 q=26.0 size=    8098kB time=00:00:13.84 bitrate=4791.0kbits/s speed=3.4x&#xA;

    &#xA;

    And that's the end of it. It should continue to do the processing until 00:04:02 as that's my audio's length but it stops here every time (approximately this is my video length).

    &#xA;

    The relevant code which works correctly :

    &#xA;

    ffmpeg_cmd = &#x27;/opt/bin/ffmpeg -stream_loop -1 -i "&#x27; &#x2B; &#x27;/tmp/&#x27; &#x2B; s3_source_key &#x2B; &#x27;" -i /opt/bin/audio.mp3 -i /opt/bin/watermark.png -shortest -y -deinterlace -vcodec libx264 -pix_fmt yuv420p -preset veryfast -r 30 -g 60 -b:v 4500k -c:a copy -map 0:v:0 -map 1:a:0 -filter_complex scale=1920:1080:force_original_aspect_ratio=decrease,pad=1920:1080:(ow-iw)/2:(oh-ih)/2,setsar=1,overlay=(W-w)/2:(H-h)/2,format=yuv420p -loglevel verbose -f flv -&#x27;&#xA;command1 = shlex.split(ffmpeg_cmd)&#xA;p1 = subprocess.Popen(command1, stdout=subprocess.PIPE, stderr=subprocess.PIPE)&#xA;stdout, stderr = p1.communicate()&#xA;print(p1.returncode) #prints 0&#xA;

    &#xA;

    With this code it repeats the video as many times as it has to do to be as long as the audio.

    &#xA;

    Both versions work correctly on my computer.

    &#xA;

    This question is almost the same but in my case FFmpeg is able to access the signed URL.

    &#xA;