Recherche avancée

Médias (0)

Mot : - Tags -/alertes

Aucun média correspondant à vos critères n’est disponible sur le site.

Autres articles (64)

  • Contribute to a better visual interface

    13 avril 2011

    MediaSPIP is based on a system of themes and templates. Templates define the placement of information on the page, and can be adapted to a wide range of uses. Themes define the overall graphic appearance of the site.
    Anyone can submit a new graphic theme or template and make it available to the MediaSPIP community.

  • Support de tous types de médias

    10 avril 2011

    Contrairement à beaucoup de logiciels et autres plate-formes modernes de partage de documents, MediaSPIP a l’ambition de gérer un maximum de formats de documents différents qu’ils soient de type : images (png, gif, jpg, bmp et autres...) ; audio (MP3, Ogg, Wav et autres...) ; vidéo (Avi, MP4, Ogv, mpg, mov, wmv et autres...) ; contenu textuel, code ou autres (open office, microsoft office (tableur, présentation), web (html, css), LaTeX, Google Earth) (...)

  • Les notifications de la ferme

    1er décembre 2010, par

    Afin d’assurer une gestion correcte de la ferme, il est nécessaire de notifier plusieurs choses lors d’actions spécifiques à la fois à l’utilisateur mais également à l’ensemble des administrateurs de la ferme.
    Les notifications de changement de statut
    Lors d’un changement de statut d’une instance, l’ensemble des administrateurs de la ferme doivent être notifiés de cette modification ainsi que l’utilisateur administrateur de l’instance.
    À la demande d’un canal
    Passage au statut "publie"
    Passage au (...)

Sur d’autres sites (6509)

  • How to fix the deprecated issue of ffmpeg ?

    19 mai 2022, par md612

    I try to compile the example code "transcoding" of ffmpeg. However, I encounter one deprecated issue of ffmpeg. After I added a inline=__inline and _CRT_SECURE_NO_DEPRECATE to the preprocessor definition, the following error still exists. I don't know how to fix that. Please help.

    



    The error shows that error C4996: 'avcodec_encode_video2': was declared deprecated for the line

    



    int(*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
 (ifmt_ctx->streams[stream_index]->codecpar->codec_type ==
 AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2 ;

    



    #include &#xA;#include &#xA;#include &#xA;&#xA;&#xA;#include <libavcodec></libavcodec>avcodec.h>&#xA;#include <libavformat></libavformat>avformat.h>&#xA;#include <libavfilter></libavfilter>avfiltergraph.h>&#xA;#include <libavfilter></libavfilter>buffersink.h>&#xA;#include <libavfilter></libavfilter>buffersrc.h>&#xA;#include <libavutil></libavutil>opt.h>&#xA;#include <libavutil></libavutil>pixdesc.h>&#xA;&#xA;#pragma warning(disable 4996)&#xA;&#xA;#ifdef _MSC_VER&#xA;#define snprintf _snprintf&#xA;#endif&#xA;&#xA;static AVFormatContext *ifmt_ctx;&#xA;static AVFormatContext *ofmt_ctx;&#xA;typedef struct FilteringContext {&#xA;    AVFilterContext *buffersink_ctx;&#xA;    AVFilterContext *buffersrc_ctx;&#xA;    AVFilterGraph *filter_graph;&#xA;} FilteringContext;&#xA;static FilteringContext *filter_ctx;&#xA;&#xA;typedef struct StreamContext {&#xA;    AVCodecContext *dec_ctx;&#xA;    AVCodecContext *enc_ctx;&#xA;} StreamContext;&#xA;static StreamContext *stream_ctx;&#xA;&#xA;static int open_input_file(const char *filename)&#xA;{&#xA;    int ret;&#xA;    unsigned int i;&#xA;&#xA;    ifmt_ctx = NULL;&#xA;    if ((ret = avformat_open_input(&amp;ifmt_ctx, filename, NULL, NULL)) &lt; 0) {&#xA;        av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");&#xA;        return ret;&#xA;    }&#xA;&#xA;    if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) &lt; 0) {&#xA;        av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");&#xA;        return ret;&#xA;    }&#xA;&#xA;    stream_ctx = (StreamContext *)av_mallocz_array(ifmt_ctx->nb_streams, sizeof(*stream_ctx));&#xA;    if (!stream_ctx)&#xA;        return AVERROR(ENOMEM);&#xA;&#xA;    for (i = 0; i &lt; ifmt_ctx->nb_streams; i&#x2B;&#x2B;) {&#xA;        AVStream *stream = ifmt_ctx->streams[i];&#xA;        AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);&#xA;        AVCodecContext *codec_ctx;&#xA;        if (!dec) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);&#xA;            return AVERROR_DECODER_NOT_FOUND;&#xA;        }&#xA;        codec_ctx = avcodec_alloc_context3(dec);&#xA;        if (!codec_ctx) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Failed to allocate the decoder context for stream #%u\n", i);&#xA;            return AVERROR(ENOMEM);&#xA;        }&#xA;        ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Failed to copy decoder parameters to input decoder context "&#xA;                "for stream #%u\n", i);&#xA;            return ret;&#xA;        }&#xA;        /* Reencode video &amp; audio and remux subtitles etc. */&#xA;        if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO&#xA;            || codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {&#xA;            if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)&#xA;                codec_ctx->framerate = av_guess_frame_rate(ifmt_ctx, stream, NULL);&#xA;            /* Open decoder */&#xA;            ret = avcodec_open2(codec_ctx, dec, NULL);&#xA;            if (ret &lt; 0) {&#xA;                av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);&#xA;                return ret;&#xA;            }&#xA;        }&#xA;        stream_ctx[i].dec_ctx = codec_ctx;&#xA;    }&#xA;&#xA;    av_dump_format(ifmt_ctx, 0, filename, 0);&#xA;    return 0;&#xA;}&#xA;&#xA;static int open_output_file(const char *filename)&#xA;{&#xA;    AVStream *out_stream;&#xA;    AVStream *in_stream;&#xA;    AVCodecContext *dec_ctx, *enc_ctx;&#xA;    AVCodec *encoder;&#xA;    int ret;&#xA;    unsigned int i;&#xA;&#xA;    ofmt_ctx = NULL;&#xA;    avformat_alloc_output_context2(&amp;ofmt_ctx, NULL, NULL, filename);&#xA;    if (!ofmt_ctx) {&#xA;        av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");&#xA;        return AVERROR_UNKNOWN;&#xA;    }&#xA;&#xA;&#xA;    for (i = 0; i &lt; ifmt_ctx->nb_streams; i&#x2B;&#x2B;) {&#xA;        out_stream = avformat_new_stream(ofmt_ctx, NULL);&#xA;        if (!out_stream) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");&#xA;            return AVERROR_UNKNOWN;&#xA;        }&#xA;&#xA;        in_stream = ifmt_ctx->streams[i];&#xA;        dec_ctx = stream_ctx[i].dec_ctx;&#xA;&#xA;        if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO&#xA;            || dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {&#xA;            /* in this example, we choose transcoding to same codec */&#xA;            encoder = avcodec_find_encoder(dec_ctx->codec_id);&#xA;            if (!encoder) {&#xA;                av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");&#xA;                return AVERROR_INVALIDDATA;&#xA;            }&#xA;            enc_ctx = avcodec_alloc_context3(encoder);&#xA;            if (!enc_ctx) {&#xA;                av_log(NULL, AV_LOG_FATAL, "Failed to allocate the encoder context\n");&#xA;                return AVERROR(ENOMEM);&#xA;            }&#xA;&#xA;            /* In this example, we transcode to same properties (picture size,&#xA;            * sample rate etc.). These properties can be changed for output&#xA;            * streams easily using filters */&#xA;            if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {&#xA;                enc_ctx->height = dec_ctx->height;&#xA;                enc_ctx->width = dec_ctx->width;&#xA;                enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;&#xA;                /* take first format from list of supported formats */&#xA;                if (encoder->pix_fmts)&#xA;                    enc_ctx->pix_fmt = encoder->pix_fmts[0];&#xA;                else&#xA;                    enc_ctx->pix_fmt = dec_ctx->pix_fmt;&#xA;                /* video time_base can be set to whatever is handy and supported by encoder */&#xA;                enc_ctx->time_base = av_inv_q(dec_ctx->framerate);&#xA;            }&#xA;            else {&#xA;                enc_ctx->sample_rate = dec_ctx->sample_rate;&#xA;                enc_ctx->channel_layout = dec_ctx->channel_layout;&#xA;                enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);&#xA;                /* take first format from list of supported formats */&#xA;                enc_ctx->sample_fmt = encoder->sample_fmts[0];&#xA;                enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };&#xA;            }&#xA;&#xA;            /* Third parameter can be used to pass settings to encoder */&#xA;            ret = avcodec_open2(enc_ctx, encoder, NULL);&#xA;            if (ret &lt; 0) {&#xA;                av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);&#xA;                return ret;&#xA;            }&#xA;            ret = avcodec_parameters_from_context(out_stream->codecpar, enc_ctx);&#xA;            if (ret &lt; 0) {&#xA;                av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i);&#xA;                return ret;&#xA;            }&#xA;            if (ofmt_ctx->oformat->flags &amp; AVFMT_GLOBALHEADER)&#xA;                enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;&#xA;&#xA;            out_stream->time_base = enc_ctx->time_base;&#xA;            stream_ctx[i].enc_ctx = enc_ctx;&#xA;        }&#xA;        else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {&#xA;            av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);&#xA;            return AVERROR_INVALIDDATA;&#xA;        }&#xA;        else {&#xA;            /* if this stream must be remuxed */&#xA;            ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);&#xA;            if (ret &lt; 0) {&#xA;                av_log(NULL, AV_LOG_ERROR, "Copying parameters for stream #%u failed\n", i);&#xA;                return ret;&#xA;            }&#xA;            out_stream->time_base = in_stream->time_base;&#xA;        }&#xA;&#xA;    }&#xA;    av_dump_format(ofmt_ctx, 0, filename, 1);&#xA;&#xA;    if (!(ofmt_ctx->oformat->flags &amp; AVFMT_NOFILE)) {&#xA;        ret = avio_open(&amp;ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Could not open output file &#x27;%s&#x27;", filename);&#xA;            return ret;&#xA;        }&#xA;    }&#xA;&#xA;    /* init muxer, write output file header */&#xA;    ret = avformat_write_header(ofmt_ctx, NULL);&#xA;    if (ret &lt; 0) {&#xA;        av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");&#xA;        return ret;&#xA;    }&#xA;&#xA;    return 0;&#xA;}&#xA;&#xA;static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,&#xA;    AVCodecContext *enc_ctx, const char *filter_spec)&#xA;{&#xA;    char args[512];&#xA;    int ret = 0;&#xA;    AVFilter *buffersrc = NULL;&#xA;    AVFilter *buffersink = NULL;&#xA;    AVFilterContext *buffersrc_ctx = NULL;&#xA;    AVFilterContext *buffersink_ctx = NULL;&#xA;    AVFilterInOut *outputs = avfilter_inout_alloc();&#xA;    AVFilterInOut *inputs = avfilter_inout_alloc();&#xA;    AVFilterGraph *filter_graph = avfilter_graph_alloc();&#xA;&#xA;    if (!outputs || !inputs || !filter_graph) {&#xA;        ret = AVERROR(ENOMEM);&#xA;        goto end;&#xA;    }&#xA;&#xA;    if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {&#xA;        buffersrc = avfilter_get_by_name("buffer");&#xA;        buffersink = avfilter_get_by_name("buffersink");&#xA;        if (!buffersrc || !buffersink) {&#xA;            av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");&#xA;            ret = AVERROR_UNKNOWN;&#xA;            goto end;&#xA;        }&#xA;&#xA;        snprintf(args, sizeof(args),&#xA;            "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",&#xA;            dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,&#xA;            dec_ctx->time_base.num, dec_ctx->time_base.den,&#xA;            dec_ctx->sample_aspect_ratio.num,&#xA;            dec_ctx->sample_aspect_ratio.den);&#xA;&#xA;        ret = avfilter_graph_create_filter(&amp;buffersrc_ctx, buffersrc, "in",&#xA;            args, NULL, filter_graph);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        ret = avfilter_graph_create_filter(&amp;buffersink_ctx, buffersink, "out",&#xA;            NULL, NULL, filter_graph);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",&#xA;            (uint8_t*)&amp;enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),&#xA;            AV_OPT_SEARCH_CHILDREN);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");&#xA;            goto end;&#xA;        }&#xA;    }&#xA;    else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {&#xA;        buffersrc = avfilter_get_by_name("abuffer");&#xA;        buffersink = avfilter_get_by_name("abuffersink");&#xA;        if (!buffersrc || !buffersink) {&#xA;            av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");&#xA;            ret = AVERROR_UNKNOWN;&#xA;            goto end;&#xA;        }&#xA;&#xA;        if (!dec_ctx->channel_layout)&#xA;            dec_ctx->channel_layout =&#xA;            av_get_default_channel_layout(dec_ctx->channels);&#xA;        snprintf(args, sizeof(args),&#xA;            "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,&#xA;            dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,&#xA;            av_get_sample_fmt_name(dec_ctx->sample_fmt),&#xA;            dec_ctx->channel_layout);&#xA;        ret = avfilter_graph_create_filter(&amp;buffersrc_ctx, buffersrc, "in",&#xA;            args, NULL, filter_graph);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        ret = avfilter_graph_create_filter(&amp;buffersink_ctx, buffersink, "out",&#xA;            NULL, NULL, filter_graph);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",&#xA;            (uint8_t*)&amp;enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),&#xA;            AV_OPT_SEARCH_CHILDREN);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",&#xA;            (uint8_t*)&amp;enc_ctx->channel_layout,&#xA;            sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        ret = av_opt_set_bin(buffersink_ctx, "sample_rates",&#xA;            (uint8_t*)&amp;enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),&#xA;            AV_OPT_SEARCH_CHILDREN);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");&#xA;            goto end;&#xA;        }&#xA;    }&#xA;    else {&#xA;        ret = AVERROR_UNKNOWN;&#xA;        goto end;&#xA;    }&#xA;&#xA;    /* Endpoints for the filter graph. */&#xA;    outputs->name = av_strdup("in");&#xA;    outputs->filter_ctx = buffersrc_ctx;&#xA;    outputs->pad_idx = 0;&#xA;    outputs->next = NULL;&#xA;&#xA;    inputs->name = av_strdup("out");&#xA;    inputs->filter_ctx = buffersink_ctx;&#xA;    inputs->pad_idx = 0;&#xA;    inputs->next = NULL;&#xA;&#xA;    if (!outputs->name || !inputs->name) {&#xA;        ret = AVERROR(ENOMEM);&#xA;        goto end;&#xA;    }&#xA;&#xA;    if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,&#xA;        &amp;inputs, &amp;outputs, NULL)) &lt; 0)&#xA;        goto end;&#xA;&#xA;    if ((ret = avfilter_graph_config(filter_graph, NULL)) &lt; 0)&#xA;        goto end;&#xA;&#xA;    /* Fill FilteringContext */&#xA;    fctx->buffersrc_ctx = buffersrc_ctx;&#xA;    fctx->buffersink_ctx = buffersink_ctx;&#xA;    fctx->filter_graph = filter_graph;&#xA;&#xA;end:&#xA;    avfilter_inout_free(&amp;inputs);&#xA;    avfilter_inout_free(&amp;outputs);&#xA;&#xA;    return ret;&#xA;}&#xA;&#xA;static int init_filters(void)&#xA;{&#xA;    const char *filter_spec;&#xA;    unsigned int i;&#xA;    int ret;&#xA;    filter_ctx = (FilteringContext *)av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));&#xA;    if (!filter_ctx)&#xA;        return AVERROR(ENOMEM);&#xA;&#xA;    for (i = 0; i &lt; ifmt_ctx->nb_streams; i&#x2B;&#x2B;) {&#xA;        filter_ctx[i].buffersrc_ctx = NULL;&#xA;        filter_ctx[i].buffersink_ctx = NULL;&#xA;        filter_ctx[i].filter_graph = NULL;&#xA;        if (!(ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO&#xA;            || ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO))&#xA;            continue;&#xA;&#xA;&#xA;        if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)&#xA;            filter_spec = "null"; /* passthrough (dummy) filter for video */&#xA;        else&#xA;            filter_spec = "anull"; /* passthrough (dummy) filter for audio */&#xA;        ret = init_filter(&amp;filter_ctx[i], stream_ctx[i].dec_ctx,&#xA;            stream_ctx[i].enc_ctx, filter_spec);&#xA;        if (ret)&#xA;            return ret;&#xA;    }&#xA;    return 0;&#xA;}&#xA;&#xA;static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {&#xA;    int ret;&#xA;    int got_frame_local;&#xA;    AVPacket enc_pkt;&#xA;    int(*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =&#xA;        (ifmt_ctx->streams[stream_index]->codecpar->codec_type ==&#xA;        AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;&#xA;&#xA;    if (!got_frame)&#xA;        got_frame = &amp;got_frame_local;&#xA;&#xA;    av_log(NULL, AV_LOG_INFO, "Encoding frame\n");&#xA;    /* encode filtered frame */&#xA;    enc_pkt.data = NULL;&#xA;    enc_pkt.size = 0;&#xA;    av_init_packet(&amp;enc_pkt);&#xA;    ret = enc_func(stream_ctx[stream_index].enc_ctx, &amp;enc_pkt,&#xA;        filt_frame, got_frame);&#xA;    av_frame_free(&amp;filt_frame);&#xA;    if (ret &lt; 0)&#xA;        return ret;&#xA;    if (!(*got_frame))&#xA;        return 0;&#xA;&#xA;    /* prepare packet for muxing */&#xA;    enc_pkt.stream_index = stream_index;&#xA;    av_packet_rescale_ts(&amp;enc_pkt,&#xA;        stream_ctx[stream_index].enc_ctx->time_base,&#xA;        ofmt_ctx->streams[stream_index]->time_base);&#xA;&#xA;    av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");&#xA;    /* mux encoded frame */&#xA;    ret = av_interleaved_write_frame(ofmt_ctx, &amp;enc_pkt);&#xA;    return ret;&#xA;}&#xA;&#xA;static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)&#xA;{&#xA;    int ret;&#xA;    AVFrame *filt_frame;&#xA;&#xA;    av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");&#xA;    /* push the decoded frame into the filtergraph */&#xA;    ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,&#xA;        frame, 0);&#xA;    if (ret &lt; 0) {&#xA;        av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");&#xA;        return ret;&#xA;    }&#xA;&#xA;    /* pull filtered frames from the filtergraph */&#xA;    while (1) {&#xA;        filt_frame = av_frame_alloc();&#xA;        if (!filt_frame) {&#xA;            ret = AVERROR(ENOMEM);&#xA;            break;&#xA;        }&#xA;        av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");&#xA;        ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,&#xA;            filt_frame);&#xA;        if (ret &lt; 0) {&#xA;            /* if no more frames for output - returns AVERROR(EAGAIN)&#xA;            * if flushed and no more frames for output - returns AVERROR_EOF&#xA;            * rewrite retcode to 0 to show it as normal procedure completion&#xA;            */&#xA;            if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)&#xA;                ret = 0;&#xA;            av_frame_free(&amp;filt_frame);&#xA;            break;&#xA;        }&#xA;&#xA;        filt_frame->pict_type = AV_PICTURE_TYPE_NONE;&#xA;        ret = encode_write_frame(filt_frame, stream_index, NULL);&#xA;        if (ret &lt; 0)&#xA;            break;&#xA;    }&#xA;&#xA;    return ret;&#xA;}&#xA;&#xA;static int flush_encoder(unsigned int stream_index)&#xA;{&#xA;    int ret;&#xA;    int got_frame;&#xA;&#xA;    if (!(stream_ctx[stream_index].enc_ctx->codec->capabilities &amp;&#xA;        AV_CODEC_CAP_DELAY))&#xA;        return 0;&#xA;&#xA;    while (1) {&#xA;        av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);&#xA;        ret = encode_write_frame(NULL, stream_index, &amp;got_frame);&#xA;        if (ret &lt; 0)&#xA;            break;&#xA;        if (!got_frame)&#xA;            return 0;&#xA;    }&#xA;    return ret;&#xA;}&#xA;&#xA;int main(int argc, char **argv)&#xA;{&#xA;    int ret;&#xA;    AVPacket packet = { .data = NULL, .size = 0 };&#xA;    AVFrame *frame = NULL;&#xA;    enum AVMediaType type;&#xA;    unsigned int stream_index;&#xA;    unsigned int i;&#xA;    int got_frame;&#xA;    int(*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);&#xA;&#xA;    if (argc != 3) {&#xA;        av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file="file" /> <output file="file">\n", argv[0]);&#xA;        return 1;&#xA;    }&#xA;&#xA;    av_register_all();&#xA;    avfilter_register_all();&#xA;&#xA;    if ((ret = open_input_file(argv[1])) &lt; 0)&#xA;        goto end;&#xA;    if ((ret = open_output_file(argv[2])) &lt; 0)&#xA;        goto end;&#xA;    if ((ret = init_filters()) &lt; 0)&#xA;        goto end;&#xA;&#xA;    /* read all packets */&#xA;    while (1) {&#xA;        if ((ret = av_read_frame(ifmt_ctx, &amp;packet)) &lt; 0)&#xA;            break;&#xA;        stream_index = packet.stream_index;&#xA;        type = ifmt_ctx->streams[packet.stream_index]->codecpar->codec_type;&#xA;        av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",&#xA;            stream_index);&#xA;&#xA;        if (filter_ctx[stream_index].filter_graph) {&#xA;            av_log(NULL, AV_LOG_DEBUG, "Going to reencode&amp;filter the frame\n");&#xA;            frame = av_frame_alloc();&#xA;            if (!frame) {&#xA;                ret = AVERROR(ENOMEM);&#xA;                break;&#xA;            }&#xA;            av_packet_rescale_ts(&amp;packet,&#xA;                ifmt_ctx->streams[stream_index]->time_base,&#xA;                stream_ctx[stream_index].dec_ctx->time_base);&#xA;            dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :&#xA;                avcodec_decode_audio4;&#xA;            ret = dec_func(stream_ctx[stream_index].dec_ctx, frame,&#xA;                &amp;got_frame, &amp;packet);&#xA;            if (ret &lt; 0) {&#xA;                av_frame_free(&amp;frame);&#xA;                av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");&#xA;                break;&#xA;            }&#xA;&#xA;            if (got_frame) {&#xA;                frame->pts = frame->best_effort_timestamp;&#xA;                ret = filter_encode_write_frame(frame, stream_index);&#xA;                av_frame_free(&amp;frame);&#xA;                if (ret &lt; 0)&#xA;                    goto end;&#xA;            }&#xA;            else {&#xA;                av_frame_free(&amp;frame);&#xA;            }&#xA;        }&#xA;        else {&#xA;            /* remux this frame without reencoding */&#xA;            av_packet_rescale_ts(&amp;packet,&#xA;                ifmt_ctx->streams[stream_index]->time_base,&#xA;                ofmt_ctx->streams[stream_index]->time_base);&#xA;&#xA;            ret = av_interleaved_write_frame(ofmt_ctx, &amp;packet);&#xA;            if (ret &lt; 0)&#xA;                goto end;&#xA;        }&#xA;        av_packet_unref(&amp;packet);&#xA;    }&#xA;&#xA;    /* flush filters and encoders */&#xA;    for (i = 0; i &lt; ifmt_ctx->nb_streams; i&#x2B;&#x2B;) {&#xA;        /* flush filter */&#xA;        if (!filter_ctx[i].filter_graph)&#xA;            continue;&#xA;        ret = filter_encode_write_frame(NULL, i);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        /* flush encoder */&#xA;        ret = flush_encoder(i);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");&#xA;            goto end;&#xA;        }&#xA;    }&#xA;&#xA;    av_write_trailer(ofmt_ctx);&#xA;end:&#xA;    av_packet_unref(&amp;packet);&#xA;    av_frame_free(&amp;frame);&#xA;    for (i = 0; i &lt; ifmt_ctx->nb_streams; i&#x2B;&#x2B;) {&#xA;        avcodec_free_context(&amp;stream_ctx[i].dec_ctx);&#xA;        if (ofmt_ctx &amp;&amp; ofmt_ctx->nb_streams > i &amp;&amp; ofmt_ctx->streams[i] &amp;&amp; stream_ctx[i].enc_ctx)&#xA;            avcodec_free_context(&amp;stream_ctx[i].enc_ctx);&#xA;        if (filter_ctx &amp;&amp; filter_ctx[i].filter_graph)&#xA;            avfilter_graph_free(&amp;filter_ctx[i].filter_graph);&#xA;    }&#xA;    av_free(filter_ctx);&#xA;    av_free(stream_ctx);&#xA;    avformat_close_input(&amp;ifmt_ctx);&#xA;    if (ofmt_ctx &amp;&amp; !(ofmt_ctx->oformat->flags &amp; AVFMT_NOFILE))&#xA;        avio_closep(&amp;ofmt_ctx->pb);&#xA;    avformat_free_context(ofmt_ctx);&#xA;&#xA;    if (ret &lt; 0)&#xA;        av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));&#xA;&#xA;    return ret ? 1 : 0;&#xA;}&#xA;</output>

    &#xA;

  • Translating Return To Ringworld

    17 août 2016, par Multimedia Mike — Game Hacking

    As indicated in my previous post, the Translator has expressed interest in applying his hobby towards another DOS adventure game from the mid 1990s : Return to Ringworld (henceforth R2RW) by Tsunami Media. This represents significantly more work than the previous outing, Phantasmagoria.


    Return to Ringworld Title Screen
    Return to Ringworld Title Screen

    I have been largely successful thus far in crafting translation tools. I have pushed the fruits of these labors to a Github repository named improved-spoon (named using Github’s random name generator because I wanted something more interesting than ‘game-hacking-tools’).

    Further, I have recorded everything I have learned about the game’s resource format (named RLB) at the XentaxWiki.

    New Challenges
    The previous project mostly involved scribbling subtitle text on an endless series of video files by leveraging a separate software library which took care of rendering fonts. In contrast, R2RW has at least 30k words of English text contained in various blocks which require translation. Further, the game encodes its own fonts (9 of them) which stubbornly refuse to be useful for rendering text in nearly any other language.

    Thus, the immediate 2 challenges are :

    1. Translating volumes of text to Spanish
    2. Expanding the fonts to represent Spanish characters

    Normally, “figuring out the file format data structures involved” is on the list as well. Thankfully, understanding the formats is not a huge challenge since the folks at the ScummVM project already did all the heavy lifting of reverse engineering the file formats.

    The Pitch
    Here was the plan :

    • Create a tool that can dump out the interesting data from the game’s master resource file.
    • Create a tool that can perform the elaborate file copy described in the previous post. The new file should be bit for bit compatible with the original file.
    • Modify the rewriting tool to repack some modified strings into the new resource file.
    • Unpack the fonts and figure out a way to add new characters.
    • Repack the new fonts into the resource file.
    • Repack message strings with Spanish characters.

    Showing The Work : Modifying Strings
    First, I created the tool to unpack blocks of message string resources. I elected to dump the strings to disk as JSON data since it’s easy to write and read JSON using Python, and it’s quick to check if any mistakes have crept in.

    The next step is to find a string to focus on. So I started the game and looked for the first string I could trigger :


    Return to Ringworld: Original text

    This shows up in the JSON string dump as :

      
        "Spanish" : " !0205Your quarters on the Lance of Truth are spartan, in accord with your mercenary lifestyle.",
        "English" : " !0205Your quarters on the Lance of Truth are spartan, in accord with your mercenary lifestyle."
      ,
    

    As you can see, many of the strings are encoded with an ID key as part of the string which should probably be left unmodified. I changed the Spanish string :

      
        "Spanish" : " !0205Hey, is this thing on ?",
        "English" : " !0205Your quarters on the Lance of Truth are spartan, in accord with your mercenary lifestyle."
      ,
    

    And then I wrote the repacking tool to substitute this message block for the original one. Look ! The engine liked it !


    Return to Ringworld: Modified text

    Little steps, little steps.

    Showing The Work : Modifying Fonts
    The next little step is to find a place to put the new characters. First, a problem definition : The immediate goal is to translate the game into Spanish. The current fonts encoded in the game resource only support 128 characters, corresponding to 7-bit ASCII. In order to properly express Spanish, 16 new characters are required : á, é, í, ó, ú, ü, ñ (each in upper and lower case for a total of 14 characters) as well as the inverted punctuation symbols : ¿, ¡.

    Again, ScummVM already documents (via code) the font coding format. So I quickly determined that each of the 9 fonts is comprised of 128 individual bitmaps with either 1 or 2 bits per pixel. I wrote a tool to unpack each character into an individual portable grey map (PGM) image. These can be edited with graphics editors or with text editors since they are just text files.

    Where to put the 16 new Spanish characters ? ASCII characters 1-31 are non-printable, so my first theory was that these characters would be empty and could be repurposed. However, after dumping and inspecting, I learned that they represent the same set of characters as seen in DOS Code Page 437. So that’s a no-go (so I assumed ; I didn’t check if any existing strings leveraged those characters).

    My next plan was hope that I could extend the font beyond index 127 and use positions 128-143. This worked superbly. This is the new example string :

      
        "Spanish" : " !0205¿Ves esto ? ¡La puntuacion se hace girar !",
        "English" : " !0205Your quarters on the Lance of Truth are spartan, in accord with your mercenary lifestyle."
      ,
    

    Fortunately, JSON understands UTF-8 and after mapping the 16 necessary characters down to the numeric range of 128-143, I repacked the new fonts and the new string :


    Return to Ringworld: Espanol
    Translation : “See this ? The punctuation is rotated !”

    Another victory. Notice that there are no diacritics in this string. None are required for this translation (according to Google Translate). But adding the diacritics to the 14 characters isn’t my department. My tool does help by prepopulating [aeiounAEIOUN] into the right positions to make editing easier for the Translator. But the tool does make the effort to rotate the punctuation since that is easy to automate.

    Next Steps and Residual Weirdness
    There is another method for storing ASCII text inside the R2RW resource called strip resources. These store conversation scripts. There are plenty of fields in the data structures that I don’t fully understand. So, following the lessons I learned from my previous translation outing, I was determined to modify as little as possible. This means copying over most of the original data structures intact, but changing the field representing the relative offset that points to the corresponding string. This works well since the strings are invariably stored NULL-terminated in a concatenated manner.

    I wanted to document for the record that the format that R2RW uses has some weirdness in they way it handles residual bytes in a resource. The variant of the resource format that R2RW uses requires every block to be aligned on a 16-byte boundary. If there is space between the logical end of the resource and the start of the next resource, there are random bytes in that space. This leads me to believe that these bytes were originally recorded from stale/uninitialized memory. This frustrates me because when I write the initial file copy tool which unpacks and repacks each block, I want the new file to be identical to the original. However, these apparent nonsense bytes at the end thwart that effort.

    But leaving those bytes as 0 produces an acceptable resource file.

    Text On Static Images
    There is one last resource type we are working on translating. There are various bits of text that are rendered as images. For example, from the intro :


    Return to Ringworld: Static text

    It’s possible to locate and extract the exact image that is overlaid on this scene, though without the colors :


    Original static text

    The palettes are stored in a separate resource type. So it seems the challenge is to figure out the palette in use for these frames and render a transparent image that uses the same palette, then repack the new text-image into the new resource file.

  • How to use ffmpeg lib transform mp4(h264&aac) to m3u8 (hls) by C code ?

    1er juillet 2020, par itning

    I used official examples transcoding.c but console print pkt->duration = 0, maybe the hls segment duration will not precise.

    &#xA;

    I use this code to set duration but invalid。

    &#xA;

    av_opt_set_int(ofmt_ctx->priv_data, "hls_time", 5, AV_OPT_SEARCH_CHILDREN);

    &#xA;

    In command line

    &#xA;

    ffmpeg -i a.mp4 -codec copy -vbsf h264_mp4toannexb -map 0 -f segment -segment_list a.m3u8 -segment_time 10 a-%03d.ts

    &#xA;

    How to use C code to achieve this command ?

    &#xA;

    this is my code :

    &#xA;

    /**&#xA; * @file&#xA; * API example for demuxing, decoding, filtering, encoding and muxing&#xA; * @example transcoding.c&#xA; */&#xA;&#xA;#include <libavcodec></libavcodec>avcodec.h>&#xA;#include <libavformat></libavformat>avformat.h>&#xA;#include <libavfilter></libavfilter>buffersink.h>&#xA;#include <libavfilter></libavfilter>buffersrc.h>&#xA;#include <libavutil></libavutil>opt.h>&#xA;#include <libavutil></libavutil>pixdesc.h>&#xA;&#xA;static AVFormatContext *ifmt_ctx;&#xA;static AVFormatContext *ofmt_ctx;&#xA;typedef struct FilteringContext {&#xA;    AVFilterContext *buffersink_ctx;&#xA;    AVFilterContext *buffersrc_ctx;&#xA;    AVFilterGraph *filter_graph;&#xA;} FilteringContext;&#xA;static FilteringContext *filter_ctx;&#xA;&#xA;typedef struct StreamContext {&#xA;    AVCodecContext *dec_ctx;&#xA;    AVCodecContext *enc_ctx;&#xA;} StreamContext;&#xA;static StreamContext *stream_ctx;&#xA;&#xA;static int open_input_file(const char *filename) {&#xA;    int ret;&#xA;    unsigned int i;&#xA;&#xA;    ifmt_ctx = NULL;&#xA;    if ((ret = avformat_open_input(&amp;ifmt_ctx, filename, NULL, NULL)) &lt; 0) {&#xA;        av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");&#xA;        return ret;&#xA;    }&#xA;&#xA;    if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) &lt; 0) {&#xA;        av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");&#xA;        return ret;&#xA;    }&#xA;&#xA;    stream_ctx = av_mallocz_array(ifmt_ctx->nb_streams, sizeof(*stream_ctx));&#xA;    if (!stream_ctx)&#xA;        return AVERROR(ENOMEM);&#xA;&#xA;    for (i = 0; i &lt; ifmt_ctx->nb_streams; i&#x2B;&#x2B;) {&#xA;        AVStream *stream = ifmt_ctx->streams[i];&#xA;        AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);&#xA;        AVCodecContext *codec_ctx;&#xA;        if (!dec) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);&#xA;            return AVERROR_DECODER_NOT_FOUND;&#xA;        }&#xA;        codec_ctx = avcodec_alloc_context3(dec);&#xA;        if (!codec_ctx) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Failed to allocate the decoder context for stream #%u\n", i);&#xA;            return AVERROR(ENOMEM);&#xA;        }&#xA;        ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Failed to copy decoder parameters to input decoder context "&#xA;                                       "for stream #%u\n", i);&#xA;            return ret;&#xA;        }&#xA;        /* Reencode video &amp; audio and remux subtitles etc. */&#xA;        if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO&#xA;            || codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {&#xA;            if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)&#xA;                codec_ctx->framerate = av_guess_frame_rate(ifmt_ctx, stream, NULL);&#xA;            /* Open decoder */&#xA;            ret = avcodec_open2(codec_ctx, dec, NULL);&#xA;            if (ret &lt; 0) {&#xA;                av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);&#xA;                return ret;&#xA;            }&#xA;        }&#xA;        stream_ctx[i].dec_ctx = codec_ctx;&#xA;    }&#xA;&#xA;    av_dump_format(ifmt_ctx, 0, filename, 0);&#xA;    return 0;&#xA;}&#xA;&#xA;static int open_output_file(const char *filename, enum AVCodecID videoCodecId, enum AVCodecID audioCodecId) {&#xA;    AVStream *out_stream;&#xA;    AVStream *in_stream;&#xA;    AVCodecContext *dec_ctx, *enc_ctx;&#xA;    AVCodec *encoder;&#xA;    int ret;&#xA;    unsigned int i;&#xA;&#xA;    ofmt_ctx = NULL;&#xA;    avformat_alloc_output_context2(&amp;ofmt_ctx, NULL, NULL, filename);&#xA;    if (!ofmt_ctx) {&#xA;        av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");&#xA;        return AVERROR_UNKNOWN;&#xA;    }&#xA;&#xA;    for (i = 0; i &lt; ifmt_ctx->nb_streams; i&#x2B;&#x2B;) {&#xA;        out_stream = avformat_new_stream(ofmt_ctx, NULL);&#xA;        if (!out_stream) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");&#xA;            return AVERROR_UNKNOWN;&#xA;        }&#xA;&#xA;        in_stream = ifmt_ctx->streams[i];&#xA;        dec_ctx = stream_ctx[i].dec_ctx;&#xA;&#xA;        if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO&#xA;            || dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {&#xA;&#xA;            if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {&#xA;                encoder = avcodec_find_encoder(videoCodecId);&#xA;            } else {&#xA;                encoder = avcodec_find_encoder(audioCodecId);&#xA;            }&#xA;            //encoder = avcodec_find_encoder(dec_ctx->codec_id);&#xA;            if (!encoder) {&#xA;                av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");&#xA;                return AVERROR_INVALIDDATA;&#xA;            }&#xA;            enc_ctx = avcodec_alloc_context3(encoder);&#xA;            if (!enc_ctx) {&#xA;                av_log(NULL, AV_LOG_FATAL, "Failed to allocate the encoder context\n");&#xA;                return AVERROR(ENOMEM);&#xA;            }&#xA;&#xA;            /* In this example, we transcode to same properties (picture size,&#xA;             * sample rate etc.). These properties can be changed for output&#xA;             * streams easily using filters */&#xA;            if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {&#xA;                enc_ctx->height = dec_ctx->height;&#xA;                enc_ctx->width = dec_ctx->width;&#xA;                enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;&#xA;                /* take first format from list of supported formats */&#xA;                if (encoder->pix_fmts)&#xA;                    enc_ctx->pix_fmt = encoder->pix_fmts[0];&#xA;                else&#xA;                    enc_ctx->pix_fmt = dec_ctx->pix_fmt;&#xA;                /* video time_base can be set to whatever is handy and supported by encoder */&#xA;                enc_ctx->time_base = av_inv_q(dec_ctx->framerate);&#xA;            } else {&#xA;                enc_ctx->sample_rate = dec_ctx->sample_rate;&#xA;                enc_ctx->channel_layout = dec_ctx->channel_layout;&#xA;                enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);&#xA;                /* take first format from list of supported formats */&#xA;                enc_ctx->sample_fmt = encoder->sample_fmts[0];&#xA;                enc_ctx->time_base = (AVRational) {1, enc_ctx->sample_rate};&#xA;            }&#xA;&#xA;            if (ofmt_ctx->oformat->flags &amp; AVFMT_GLOBALHEADER)&#xA;                enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;&#xA;&#xA;            /* Third parameter can be used to pass settings to encoder */&#xA;            ret = avcodec_open2(enc_ctx, encoder, NULL);&#xA;            if (ret &lt; 0) {&#xA;                av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);&#xA;                return ret;&#xA;            }&#xA;            ret = avcodec_parameters_from_context(out_stream->codecpar, enc_ctx);&#xA;            if (ret &lt; 0) {&#xA;                av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i);&#xA;                return ret;&#xA;            }&#xA;&#xA;            out_stream->time_base = enc_ctx->time_base;&#xA;            stream_ctx[i].enc_ctx = enc_ctx;&#xA;        } else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {&#xA;            av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);&#xA;            return AVERROR_INVALIDDATA;&#xA;        } else {&#xA;            /* if this stream must be remuxed */&#xA;            ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);&#xA;            if (ret &lt; 0) {&#xA;                av_log(NULL, AV_LOG_ERROR, "Copying parameters for stream #%u failed\n", i);&#xA;                return ret;&#xA;            }&#xA;            out_stream->time_base = in_stream->time_base;&#xA;        }&#xA;&#xA;    }&#xA;    av_dump_format(ofmt_ctx, 0, filename, 1);&#xA;&#xA;    av_opt_set_int(ofmt_ctx->priv_data, "hls_time", 5, AV_OPT_SEARCH_CHILDREN);&#xA;&#xA;    if (!(ofmt_ctx->oformat->flags &amp; AVFMT_NOFILE)) {&#xA;        ret = avio_open(&amp;ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Could not open output file &#x27;%s&#x27;", filename);&#xA;            return ret;&#xA;        }&#xA;    }&#xA;&#xA;    /* init muxer, write output file header */&#xA;    ret = avformat_write_header(ofmt_ctx, NULL);&#xA;    if (ret &lt; 0) {&#xA;        av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");&#xA;        return ret;&#xA;    }&#xA;&#xA;    return 0;&#xA;}&#xA;&#xA;static int init_filter(FilteringContext *fctx, AVCodecContext *dec_ctx,&#xA;                       AVCodecContext *enc_ctx, const char *filter_spec) {&#xA;    char args[512];&#xA;    int ret = 0;&#xA;    const AVFilter *buffersrc = NULL;&#xA;    const AVFilter *buffersink = NULL;&#xA;    AVFilterContext *buffersrc_ctx = NULL;&#xA;    AVFilterContext *buffersink_ctx = NULL;&#xA;    AVFilterInOut *outputs = avfilter_inout_alloc();&#xA;    AVFilterInOut *inputs = avfilter_inout_alloc();&#xA;    AVFilterGraph *filter_graph = avfilter_graph_alloc();&#xA;&#xA;    if (!outputs || !inputs || !filter_graph) {&#xA;        ret = AVERROR(ENOMEM);&#xA;        goto end;&#xA;    }&#xA;&#xA;    if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {&#xA;        buffersrc = avfilter_get_by_name("buffer");&#xA;        buffersink = avfilter_get_by_name("buffersink");&#xA;        if (!buffersrc || !buffersink) {&#xA;            av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");&#xA;            ret = AVERROR_UNKNOWN;&#xA;            goto end;&#xA;        }&#xA;&#xA;        snprintf(args, sizeof(args),&#xA;                 "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",&#xA;                 dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,&#xA;                 dec_ctx->time_base.num, dec_ctx->time_base.den,&#xA;                 dec_ctx->sample_aspect_ratio.num,&#xA;                 dec_ctx->sample_aspect_ratio.den);&#xA;&#xA;        ret = avfilter_graph_create_filter(&amp;buffersrc_ctx, buffersrc, "in",&#xA;                                           args, NULL, filter_graph);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        ret = avfilter_graph_create_filter(&amp;buffersink_ctx, buffersink, "out",&#xA;                                           NULL, NULL, filter_graph);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",&#xA;                             (uint8_t *) &amp;enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),&#xA;                             AV_OPT_SEARCH_CHILDREN);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");&#xA;            goto end;&#xA;        }&#xA;    } else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {&#xA;        buffersrc = avfilter_get_by_name("abuffer");&#xA;        buffersink = avfilter_get_by_name("abuffersink");&#xA;        if (!buffersrc || !buffersink) {&#xA;            av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");&#xA;            ret = AVERROR_UNKNOWN;&#xA;            goto end;&#xA;        }&#xA;&#xA;        if (!dec_ctx->channel_layout)&#xA;            dec_ctx->channel_layout =&#xA;                    av_get_default_channel_layout(dec_ctx->channels);&#xA;        snprintf(args, sizeof(args),&#xA;                 "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,&#xA;                 dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,&#xA;                 av_get_sample_fmt_name(dec_ctx->sample_fmt),&#xA;                 dec_ctx->channel_layout);&#xA;        ret = avfilter_graph_create_filter(&amp;buffersrc_ctx, buffersrc, "in",&#xA;                                           args, NULL, filter_graph);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        ret = avfilter_graph_create_filter(&amp;buffersink_ctx, buffersink, "out",&#xA;                                           NULL, NULL, filter_graph);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",&#xA;                             (uint8_t *) &amp;enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),&#xA;                             AV_OPT_SEARCH_CHILDREN);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",&#xA;                             (uint8_t *) &amp;enc_ctx->channel_layout,&#xA;                             sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        ret = av_opt_set_bin(buffersink_ctx, "sample_rates",&#xA;                             (uint8_t *) &amp;enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),&#xA;                             AV_OPT_SEARCH_CHILDREN);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");&#xA;            goto end;&#xA;        }&#xA;    } else {&#xA;        ret = AVERROR_UNKNOWN;&#xA;        goto end;&#xA;    }&#xA;&#xA;    /* Endpoints for the filter graph. */&#xA;    outputs->name = av_strdup("in");&#xA;    outputs->filter_ctx = buffersrc_ctx;&#xA;    outputs->pad_idx = 0;&#xA;    outputs->next = NULL;&#xA;&#xA;    inputs->name = av_strdup("out");&#xA;    inputs->filter_ctx = buffersink_ctx;&#xA;    inputs->pad_idx = 0;&#xA;    inputs->next = NULL;&#xA;&#xA;    if (!outputs->name || !inputs->name) {&#xA;        ret = AVERROR(ENOMEM);&#xA;        goto end;&#xA;    }&#xA;&#xA;    if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,&#xA;                                        &amp;inputs, &amp;outputs, NULL)) &lt; 0)&#xA;        goto end;&#xA;&#xA;    if ((ret = avfilter_graph_config(filter_graph, NULL)) &lt; 0)&#xA;        goto end;&#xA;&#xA;    /* Fill FilteringContext */&#xA;    fctx->buffersrc_ctx = buffersrc_ctx;&#xA;    fctx->buffersink_ctx = buffersink_ctx;&#xA;    fctx->filter_graph = filter_graph;&#xA;&#xA;    end:&#xA;    avfilter_inout_free(&amp;inputs);&#xA;    avfilter_inout_free(&amp;outputs);&#xA;&#xA;    return ret;&#xA;}&#xA;&#xA;static int init_filters(void) {&#xA;    const char *filter_spec;&#xA;    unsigned int i;&#xA;    int ret;&#xA;    filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));&#xA;    if (!filter_ctx)&#xA;        return AVERROR(ENOMEM);&#xA;&#xA;    for (i = 0; i &lt; ifmt_ctx->nb_streams; i&#x2B;&#x2B;) {&#xA;        filter_ctx[i].buffersrc_ctx = NULL;&#xA;        filter_ctx[i].buffersink_ctx = NULL;&#xA;        filter_ctx[i].filter_graph = NULL;&#xA;        if (!(ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO&#xA;              || ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO))&#xA;            continue;&#xA;&#xA;&#xA;        if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)&#xA;            filter_spec = "null"; /* passthrough (dummy) filter for video */&#xA;        else&#xA;            filter_spec = "anull"; /* passthrough (dummy) filter for audio */&#xA;        ret = init_filter(&amp;filter_ctx[i], stream_ctx[i].dec_ctx,&#xA;                          stream_ctx[i].enc_ctx, filter_spec);&#xA;        if (ret)&#xA;            return ret;&#xA;    }&#xA;    return 0;&#xA;}&#xA;&#xA;static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {&#xA;    int ret;&#xA;    int got_frame_local;&#xA;    AVPacket enc_pkt;&#xA;    int (*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =&#xA;    (ifmt_ctx->streams[stream_index]->codecpar->codec_type ==&#xA;     AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;&#xA;&#xA;    if (!got_frame)&#xA;        got_frame = &amp;got_frame_local;&#xA;&#xA;    av_log(NULL, AV_LOG_INFO, "Encoding frame\n");&#xA;    /* encode filtered frame */&#xA;    enc_pkt.data = NULL;&#xA;    enc_pkt.size = 0;&#xA;    av_init_packet(&amp;enc_pkt);&#xA;    ret = enc_func(stream_ctx[stream_index].enc_ctx, &amp;enc_pkt,&#xA;                   filt_frame, got_frame);&#xA;    av_frame_free(&amp;filt_frame);&#xA;    if (ret &lt; 0)&#xA;        return ret;&#xA;    if (!(*got_frame))&#xA;        return 0;&#xA;&#xA;    /* prepare packet for muxing */&#xA;    enc_pkt.stream_index = stream_index;&#xA;    av_packet_rescale_ts(&amp;enc_pkt,&#xA;                         stream_ctx[stream_index].enc_ctx->time_base,&#xA;                         ofmt_ctx->streams[stream_index]->time_base);&#xA;&#xA;    av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");&#xA;    /* mux encoded frame */&#xA;    ret = av_interleaved_write_frame(ofmt_ctx, &amp;enc_pkt);&#xA;    return ret;&#xA;}&#xA;&#xA;static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index) {&#xA;    int ret;&#xA;    AVFrame *filt_frame;&#xA;&#xA;    av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");&#xA;    /* push the decoded frame into the filtergraph */&#xA;    ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,&#xA;                                       frame, 0);&#xA;    if (ret &lt; 0) {&#xA;        av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");&#xA;        return ret;&#xA;    }&#xA;&#xA;    /* pull filtered frames from the filtergraph */&#xA;    while (1) {&#xA;        filt_frame = av_frame_alloc();&#xA;        if (!filt_frame) {&#xA;            ret = AVERROR(ENOMEM);&#xA;            break;&#xA;        }&#xA;        av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");&#xA;        ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,&#xA;                                      filt_frame);&#xA;        if (ret &lt; 0) {&#xA;            /* if no more frames for output - returns AVERROR(EAGAIN)&#xA;             * if flushed and no more frames for output - returns AVERROR_EOF&#xA;             * rewrite retcode to 0 to show it as normal procedure completion&#xA;             */&#xA;            if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)&#xA;                ret = 0;&#xA;            av_frame_free(&amp;filt_frame);&#xA;            break;&#xA;        }&#xA;&#xA;        filt_frame->pict_type = AV_PICTURE_TYPE_NONE;&#xA;        ret = encode_write_frame(filt_frame, stream_index, NULL);&#xA;        if (ret &lt; 0)&#xA;            break;&#xA;    }&#xA;&#xA;    return ret;&#xA;}&#xA;&#xA;static int flush_encoder(unsigned int stream_index) {&#xA;    int ret;&#xA;    int got_frame;&#xA;&#xA;    if (!(stream_ctx[stream_index].enc_ctx->codec->capabilities &amp;&#xA;          AV_CODEC_CAP_DELAY))&#xA;        return 0;&#xA;&#xA;    while (1) {&#xA;        av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);&#xA;        ret = encode_write_frame(NULL, stream_index, &amp;got_frame);&#xA;        if (ret &lt; 0)&#xA;            break;&#xA;        if (!got_frame)&#xA;            return 0;&#xA;    }&#xA;    return ret;&#xA;}&#xA;&#xA;int main() {&#xA;    char *inputFile = "D:/20200623_094923.mp4";&#xA;    char *outputFile = "D:/test/te.m3u8";&#xA;    enum AVCodecID videoCodec = AV_CODEC_ID_H264;&#xA;    enum AVCodecID audioCodec = AV_CODEC_ID_AAC;&#xA;&#xA;    int ret;&#xA;    AVPacket packet = {.data = NULL, .size = 0};&#xA;    AVFrame *frame = NULL;&#xA;    enum AVMediaType type;&#xA;    unsigned int stream_index;&#xA;    unsigned int i;&#xA;    int got_frame;&#xA;    int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);&#xA;&#xA;    if ((ret = open_input_file(inputFile)) &lt; 0)&#xA;        goto end;&#xA;    if ((ret = open_output_file(outputFile, videoCodec, audioCodec)) &lt; 0)&#xA;        goto end;&#xA;    if ((ret = init_filters()) &lt; 0)&#xA;        goto end;&#xA;&#xA;    /* read all packets */&#xA;    while (1) {&#xA;        if ((ret = av_read_frame(ifmt_ctx, &amp;packet)) &lt; 0)&#xA;            break;&#xA;        stream_index = packet.stream_index;&#xA;        type = ifmt_ctx->streams[packet.stream_index]->codecpar->codec_type;&#xA;        av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",&#xA;               stream_index);&#xA;&#xA;        if (filter_ctx[stream_index].filter_graph) {&#xA;            av_log(NULL, AV_LOG_DEBUG, "Going to reencode&amp;filter the frame\n");&#xA;            frame = av_frame_alloc();&#xA;            if (!frame) {&#xA;                ret = AVERROR(ENOMEM);&#xA;                break;&#xA;            }&#xA;            av_packet_rescale_ts(&amp;packet,&#xA;                                 ifmt_ctx->streams[stream_index]->time_base,&#xA;                                 stream_ctx[stream_index].dec_ctx->time_base);&#xA;            dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :&#xA;                       avcodec_decode_audio4;&#xA;            ret = dec_func(stream_ctx[stream_index].dec_ctx, frame,&#xA;                           &amp;got_frame, &amp;packet);&#xA;            if (ret &lt; 0) {&#xA;                av_frame_free(&amp;frame);&#xA;                av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");&#xA;                break;&#xA;            }&#xA;&#xA;            if (got_frame) {&#xA;                frame->pts = frame->best_effort_timestamp;&#xA;                ret = filter_encode_write_frame(frame, stream_index);&#xA;                av_frame_free(&amp;frame);&#xA;                if (ret &lt; 0)&#xA;                    goto end;&#xA;            } else {&#xA;                av_frame_free(&amp;frame);&#xA;            }&#xA;        } else {&#xA;            /* remux this frame without reencoding */&#xA;            av_packet_rescale_ts(&amp;packet,&#xA;                                 ifmt_ctx->streams[stream_index]->time_base,&#xA;                                 ofmt_ctx->streams[stream_index]->time_base);&#xA;&#xA;            ret = av_interleaved_write_frame(ofmt_ctx, &amp;packet);&#xA;            if (ret &lt; 0)&#xA;                goto end;&#xA;        }&#xA;        av_packet_unref(&amp;packet);&#xA;    }&#xA;&#xA;    /* flush filters and encoders */&#xA;    for (i = 0; i &lt; ifmt_ctx->nb_streams; i&#x2B;&#x2B;) {&#xA;        /* flush filter */&#xA;        if (!filter_ctx[i].filter_graph)&#xA;            continue;&#xA;        ret = filter_encode_write_frame(NULL, i);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");&#xA;            goto end;&#xA;        }&#xA;&#xA;        /* flush encoder */&#xA;        ret = flush_encoder(i);&#xA;        if (ret &lt; 0) {&#xA;            av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");&#xA;            goto end;&#xA;        }&#xA;    }&#xA;&#xA;    av_write_trailer(ofmt_ctx);&#xA;    end:&#xA;    av_packet_unref(&amp;packet);&#xA;    av_frame_free(&amp;frame);&#xA;    for (i = 0; i &lt; ifmt_ctx->nb_streams; i&#x2B;&#x2B;) {&#xA;        avcodec_free_context(&amp;stream_ctx[i].dec_ctx);&#xA;        if (ofmt_ctx &amp;&amp; ofmt_ctx->nb_streams > i &amp;&amp; ofmt_ctx->streams[i] &amp;&amp; stream_ctx[i].enc_ctx)&#xA;            avcodec_free_context(&amp;stream_ctx[i].enc_ctx);&#xA;        if (filter_ctx &amp;&amp; filter_ctx[i].filter_graph)&#xA;            avfilter_graph_free(&amp;filter_ctx[i].filter_graph);&#xA;    }&#xA;    av_free(filter_ctx);&#xA;    av_free(stream_ctx);&#xA;    avformat_close_input(&amp;ifmt_ctx);&#xA;    if (ofmt_ctx &amp;&amp; !(ofmt_ctx->oformat->flags &amp; AVFMT_NOFILE))&#xA;        avio_closep(&amp;ofmt_ctx->pb);&#xA;    avformat_free_context(ofmt_ctx);&#xA;&#xA;    if (ret &lt; 0)&#xA;        av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));&#xA;&#xA;    return ret ? 1 : 0;&#xA;}&#xA;

    &#xA;