
Recherche avancée
Médias (1)
-
Richard Stallman et le logiciel libre
19 octobre 2011, par
Mis à jour : Mai 2013
Langue : français
Type : Texte
Autres articles (86)
-
MediaSPIP 0.1 Beta version
25 avril 2011, parMediaSPIP 0.1 beta is the first version of MediaSPIP proclaimed as "usable".
The zip file provided here only contains the sources of MediaSPIP in its standalone version.
To get a working installation, you must manually install all-software dependencies on the server.
If you want to use this archive for an installation in "farm mode", you will also need to proceed to other manual (...) -
Amélioration de la version de base
13 septembre 2013Jolie sélection multiple
Le plugin Chosen permet d’améliorer l’ergonomie des champs de sélection multiple. Voir les deux images suivantes pour comparer.
Il suffit pour cela d’activer le plugin Chosen (Configuration générale du site > Gestion des plugins), puis de configurer le plugin (Les squelettes > Chosen) en activant l’utilisation de Chosen dans le site public et en spécifiant les éléments de formulaires à améliorer, par exemple select[multiple] pour les listes à sélection multiple (...) -
MediaSPIP Player : problèmes potentiels
22 février 2011, parLe lecteur ne fonctionne pas sur Internet Explorer
Sur Internet Explorer (8 et 7 au moins), le plugin utilise le lecteur Flash flowplayer pour lire vidéos et son. Si le lecteur ne semble pas fonctionner, cela peut venir de la configuration du mod_deflate d’Apache.
Si dans la configuration de ce module Apache vous avez une ligne qui ressemble à la suivante, essayez de la supprimer ou de la commenter pour voir si le lecteur fonctionne correctement : /** * GeSHi (C) 2004 - 2007 Nigel McNie, (...)
Sur d’autres sites (10355)
-
How to fix the deprecated issue of ffmpeg ?
19 mai 2022, par md612I try to compile the example code "transcoding" of ffmpeg. However, I encounter one deprecated issue of ffmpeg. After I added a inline=__inline and _CRT_SECURE_NO_DEPRECATE to the preprocessor definition, the following error still exists. I don't know how to fix that. Please help.



The error shows that
error C4996: 'avcodec_encode_video2': was declared deprecated
for the line


int(*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
 (ifmt_ctx->streams[stream_index]->codecpar->codec_type ==
 AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2 ;



#include 
#include 
#include 


#include <libavcodec></libavcodec>avcodec.h>
#include <libavformat></libavformat>avformat.h>
#include <libavfilter></libavfilter>avfiltergraph.h>
#include <libavfilter></libavfilter>buffersink.h>
#include <libavfilter></libavfilter>buffersrc.h>
#include <libavutil></libavutil>opt.h>
#include <libavutil></libavutil>pixdesc.h>

#pragma warning(disable 4996)

#ifdef _MSC_VER
#define snprintf _snprintf
#endif

static AVFormatContext *ifmt_ctx;
static AVFormatContext *ofmt_ctx;
typedef struct FilteringContext {
 AVFilterContext *buffersink_ctx;
 AVFilterContext *buffersrc_ctx;
 AVFilterGraph *filter_graph;
} FilteringContext;
static FilteringContext *filter_ctx;

typedef struct StreamContext {
 AVCodecContext *dec_ctx;
 AVCodecContext *enc_ctx;
} StreamContext;
static StreamContext *stream_ctx;

static int open_input_file(const char *filename)
{
 int ret;
 unsigned int i;

 ifmt_ctx = NULL;
 if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
 return ret;
 }

 if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
 return ret;
 }

 stream_ctx = (StreamContext *)av_mallocz_array(ifmt_ctx->nb_streams, sizeof(*stream_ctx));
 if (!stream_ctx)
 return AVERROR(ENOMEM);

 for (i = 0; i < ifmt_ctx->nb_streams; i++) {
 AVStream *stream = ifmt_ctx->streams[i];
 AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);
 AVCodecContext *codec_ctx;
 if (!dec) {
 av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);
 return AVERROR_DECODER_NOT_FOUND;
 }
 codec_ctx = avcodec_alloc_context3(dec);
 if (!codec_ctx) {
 av_log(NULL, AV_LOG_ERROR, "Failed to allocate the decoder context for stream #%u\n", i);
 return AVERROR(ENOMEM);
 }
 ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Failed to copy decoder parameters to input decoder context "
 "for stream #%u\n", i);
 return ret;
 }
 /* Reencode video & audio and remux subtitles etc. */
 if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
 || codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
 if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
 codec_ctx->framerate = av_guess_frame_rate(ifmt_ctx, stream, NULL);
 /* Open decoder */
 ret = avcodec_open2(codec_ctx, dec, NULL);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
 return ret;
 }
 }
 stream_ctx[i].dec_ctx = codec_ctx;
 }

 av_dump_format(ifmt_ctx, 0, filename, 0);
 return 0;
}

static int open_output_file(const char *filename)
{
 AVStream *out_stream;
 AVStream *in_stream;
 AVCodecContext *dec_ctx, *enc_ctx;
 AVCodec *encoder;
 int ret;
 unsigned int i;

 ofmt_ctx = NULL;
 avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);
 if (!ofmt_ctx) {
 av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
 return AVERROR_UNKNOWN;
 }


 for (i = 0; i < ifmt_ctx->nb_streams; i++) {
 out_stream = avformat_new_stream(ofmt_ctx, NULL);
 if (!out_stream) {
 av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
 return AVERROR_UNKNOWN;
 }

 in_stream = ifmt_ctx->streams[i];
 dec_ctx = stream_ctx[i].dec_ctx;

 if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
 || dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
 /* in this example, we choose transcoding to same codec */
 encoder = avcodec_find_encoder(dec_ctx->codec_id);
 if (!encoder) {
 av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
 return AVERROR_INVALIDDATA;
 }
 enc_ctx = avcodec_alloc_context3(encoder);
 if (!enc_ctx) {
 av_log(NULL, AV_LOG_FATAL, "Failed to allocate the encoder context\n");
 return AVERROR(ENOMEM);
 }

 /* In this example, we transcode to same properties (picture size,
 * sample rate etc.). These properties can be changed for output
 * streams easily using filters */
 if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
 enc_ctx->height = dec_ctx->height;
 enc_ctx->width = dec_ctx->width;
 enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
 /* take first format from list of supported formats */
 if (encoder->pix_fmts)
 enc_ctx->pix_fmt = encoder->pix_fmts[0];
 else
 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
 /* video time_base can be set to whatever is handy and supported by encoder */
 enc_ctx->time_base = av_inv_q(dec_ctx->framerate);
 }
 else {
 enc_ctx->sample_rate = dec_ctx->sample_rate;
 enc_ctx->channel_layout = dec_ctx->channel_layout;
 enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
 /* take first format from list of supported formats */
 enc_ctx->sample_fmt = encoder->sample_fmts[0];
 enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
 }

 /* Third parameter can be used to pass settings to encoder */
 ret = avcodec_open2(enc_ctx, encoder, NULL);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
 return ret;
 }
 ret = avcodec_parameters_from_context(out_stream->codecpar, enc_ctx);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i);
 return ret;
 }
 if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
 enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

 out_stream->time_base = enc_ctx->time_base;
 stream_ctx[i].enc_ctx = enc_ctx;
 }
 else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
 av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
 return AVERROR_INVALIDDATA;
 }
 else {
 /* if this stream must be remuxed */
 ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Copying parameters for stream #%u failed\n", i);
 return ret;
 }
 out_stream->time_base = in_stream->time_base;
 }

 }
 av_dump_format(ofmt_ctx, 0, filename, 1);

 if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
 ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
 return ret;
 }
 }

 /* init muxer, write output file header */
 ret = avformat_write_header(ofmt_ctx, NULL);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
 return ret;
 }

 return 0;
}

static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
 AVCodecContext *enc_ctx, const char *filter_spec)
{
 char args[512];
 int ret = 0;
 AVFilter *buffersrc = NULL;
 AVFilter *buffersink = NULL;
 AVFilterContext *buffersrc_ctx = NULL;
 AVFilterContext *buffersink_ctx = NULL;
 AVFilterInOut *outputs = avfilter_inout_alloc();
 AVFilterInOut *inputs = avfilter_inout_alloc();
 AVFilterGraph *filter_graph = avfilter_graph_alloc();

 if (!outputs || !inputs || !filter_graph) {
 ret = AVERROR(ENOMEM);
 goto end;
 }

 if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
 buffersrc = avfilter_get_by_name("buffer");
 buffersink = avfilter_get_by_name("buffersink");
 if (!buffersrc || !buffersink) {
 av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
 ret = AVERROR_UNKNOWN;
 goto end;
 }

 snprintf(args, sizeof(args),
 "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
 dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
 dec_ctx->time_base.num, dec_ctx->time_base.den,
 dec_ctx->sample_aspect_ratio.num,
 dec_ctx->sample_aspect_ratio.den);

 ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
 args, NULL, filter_graph);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
 goto end;
 }

 ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
 NULL, NULL, filter_graph);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
 goto end;
 }

 ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
 (uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
 AV_OPT_SEARCH_CHILDREN);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
 goto end;
 }
 }
 else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
 buffersrc = avfilter_get_by_name("abuffer");
 buffersink = avfilter_get_by_name("abuffersink");
 if (!buffersrc || !buffersink) {
 av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
 ret = AVERROR_UNKNOWN;
 goto end;
 }

 if (!dec_ctx->channel_layout)
 dec_ctx->channel_layout =
 av_get_default_channel_layout(dec_ctx->channels);
 snprintf(args, sizeof(args),
 "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
 dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
 av_get_sample_fmt_name(dec_ctx->sample_fmt),
 dec_ctx->channel_layout);
 ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
 args, NULL, filter_graph);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
 goto end;
 }

 ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
 NULL, NULL, filter_graph);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
 goto end;
 }

 ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
 (uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
 AV_OPT_SEARCH_CHILDREN);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
 goto end;
 }

 ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
 (uint8_t*)&enc_ctx->channel_layout,
 sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
 goto end;
 }

 ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
 (uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
 AV_OPT_SEARCH_CHILDREN);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
 goto end;
 }
 }
 else {
 ret = AVERROR_UNKNOWN;
 goto end;
 }

 /* Endpoints for the filter graph. */
 outputs->name = av_strdup("in");
 outputs->filter_ctx = buffersrc_ctx;
 outputs->pad_idx = 0;
 outputs->next = NULL;

 inputs->name = av_strdup("out");
 inputs->filter_ctx = buffersink_ctx;
 inputs->pad_idx = 0;
 inputs->next = NULL;

 if (!outputs->name || !inputs->name) {
 ret = AVERROR(ENOMEM);
 goto end;
 }

 if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
 &inputs, &outputs, NULL)) < 0)
 goto end;

 if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
 goto end;

 /* Fill FilteringContext */
 fctx->buffersrc_ctx = buffersrc_ctx;
 fctx->buffersink_ctx = buffersink_ctx;
 fctx->filter_graph = filter_graph;

end:
 avfilter_inout_free(&inputs);
 avfilter_inout_free(&outputs);

 return ret;
}

static int init_filters(void)
{
 const char *filter_spec;
 unsigned int i;
 int ret;
 filter_ctx = (FilteringContext *)av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));
 if (!filter_ctx)
 return AVERROR(ENOMEM);

 for (i = 0; i < ifmt_ctx->nb_streams; i++) {
 filter_ctx[i].buffersrc_ctx = NULL;
 filter_ctx[i].buffersink_ctx = NULL;
 filter_ctx[i].filter_graph = NULL;
 if (!(ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO
 || ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO))
 continue;


 if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
 filter_spec = "null"; /* passthrough (dummy) filter for video */
 else
 filter_spec = "anull"; /* passthrough (dummy) filter for audio */
 ret = init_filter(&filter_ctx[i], stream_ctx[i].dec_ctx,
 stream_ctx[i].enc_ctx, filter_spec);
 if (ret)
 return ret;
 }
 return 0;
}

static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
 int ret;
 int got_frame_local;
 AVPacket enc_pkt;
 int(*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
 (ifmt_ctx->streams[stream_index]->codecpar->codec_type ==
 AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;

 if (!got_frame)
 got_frame = &got_frame_local;

 av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
 /* encode filtered frame */
 enc_pkt.data = NULL;
 enc_pkt.size = 0;
 av_init_packet(&enc_pkt);
 ret = enc_func(stream_ctx[stream_index].enc_ctx, &enc_pkt,
 filt_frame, got_frame);
 av_frame_free(&filt_frame);
 if (ret < 0)
 return ret;
 if (!(*got_frame))
 return 0;

 /* prepare packet for muxing */
 enc_pkt.stream_index = stream_index;
 av_packet_rescale_ts(&enc_pkt,
 stream_ctx[stream_index].enc_ctx->time_base,
 ofmt_ctx->streams[stream_index]->time_base);

 av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
 /* mux encoded frame */
 ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
 return ret;
}

static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
{
 int ret;
 AVFrame *filt_frame;

 av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
 /* push the decoded frame into the filtergraph */
 ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,
 frame, 0);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
 return ret;
 }

 /* pull filtered frames from the filtergraph */
 while (1) {
 filt_frame = av_frame_alloc();
 if (!filt_frame) {
 ret = AVERROR(ENOMEM);
 break;
 }
 av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
 ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,
 filt_frame);
 if (ret < 0) {
 /* if no more frames for output - returns AVERROR(EAGAIN)
 * if flushed and no more frames for output - returns AVERROR_EOF
 * rewrite retcode to 0 to show it as normal procedure completion
 */
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
 ret = 0;
 av_frame_free(&filt_frame);
 break;
 }

 filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
 ret = encode_write_frame(filt_frame, stream_index, NULL);
 if (ret < 0)
 break;
 }

 return ret;
}

static int flush_encoder(unsigned int stream_index)
{
 int ret;
 int got_frame;

 if (!(stream_ctx[stream_index].enc_ctx->codec->capabilities &
 AV_CODEC_CAP_DELAY))
 return 0;

 while (1) {
 av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
 ret = encode_write_frame(NULL, stream_index, &got_frame);
 if (ret < 0)
 break;
 if (!got_frame)
 return 0;
 }
 return ret;
}

int main(int argc, char **argv)
{
 int ret;
 AVPacket packet = { .data = NULL, .size = 0 };
 AVFrame *frame = NULL;
 enum AVMediaType type;
 unsigned int stream_index;
 unsigned int i;
 int got_frame;
 int(*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);

 if (argc != 3) {
 av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file="file" /> <output file="file">\n", argv[0]);
 return 1;
 }

 av_register_all();
 avfilter_register_all();

 if ((ret = open_input_file(argv[1])) < 0)
 goto end;
 if ((ret = open_output_file(argv[2])) < 0)
 goto end;
 if ((ret = init_filters()) < 0)
 goto end;

 /* read all packets */
 while (1) {
 if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
 break;
 stream_index = packet.stream_index;
 type = ifmt_ctx->streams[packet.stream_index]->codecpar->codec_type;
 av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
 stream_index);

 if (filter_ctx[stream_index].filter_graph) {
 av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
 frame = av_frame_alloc();
 if (!frame) {
 ret = AVERROR(ENOMEM);
 break;
 }
 av_packet_rescale_ts(&packet,
 ifmt_ctx->streams[stream_index]->time_base,
 stream_ctx[stream_index].dec_ctx->time_base);
 dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
 avcodec_decode_audio4;
 ret = dec_func(stream_ctx[stream_index].dec_ctx, frame,
 &got_frame, &packet);
 if (ret < 0) {
 av_frame_free(&frame);
 av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
 break;
 }

 if (got_frame) {
 frame->pts = frame->best_effort_timestamp;
 ret = filter_encode_write_frame(frame, stream_index);
 av_frame_free(&frame);
 if (ret < 0)
 goto end;
 }
 else {
 av_frame_free(&frame);
 }
 }
 else {
 /* remux this frame without reencoding */
 av_packet_rescale_ts(&packet,
 ifmt_ctx->streams[stream_index]->time_base,
 ofmt_ctx->streams[stream_index]->time_base);

 ret = av_interleaved_write_frame(ofmt_ctx, &packet);
 if (ret < 0)
 goto end;
 }
 av_packet_unref(&packet);
 }

 /* flush filters and encoders */
 for (i = 0; i < ifmt_ctx->nb_streams; i++) {
 /* flush filter */
 if (!filter_ctx[i].filter_graph)
 continue;
 ret = filter_encode_write_frame(NULL, i);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
 goto end;
 }

 /* flush encoder */
 ret = flush_encoder(i);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
 goto end;
 }
 }

 av_write_trailer(ofmt_ctx);
end:
 av_packet_unref(&packet);
 av_frame_free(&frame);
 for (i = 0; i < ifmt_ctx->nb_streams; i++) {
 avcodec_free_context(&stream_ctx[i].dec_ctx);
 if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && stream_ctx[i].enc_ctx)
 avcodec_free_context(&stream_ctx[i].enc_ctx);
 if (filter_ctx && filter_ctx[i].filter_graph)
 avfilter_graph_free(&filter_ctx[i].filter_graph);
 }
 av_free(filter_ctx);
 av_free(stream_ctx);
 avformat_close_input(&ifmt_ctx);
 if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
 avio_closep(&ofmt_ctx->pb);
 avformat_free_context(ofmt_ctx);

 if (ret < 0)
 av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));

 return ret ? 1 : 0;
}
</output>


-
Use libavcodec on MPEG-TS to transcode video and copy audio
16 août 2017, par DannyI’m trying to use
libavcodec
to reduce the video bitrate of an MPEG transport stream while passing audio streams unchanged. The equivalentffmpeg
command line would be :ffmpeg -i SPTS_HD_2min.prog.ts -b:v 800k -s cif -map 0:0 -map 0:1 -map 0:2 -c:a copy ./transcodeCLI.ts
The input file contains :
Input #0, mpegts, from 'SPTS_HD_2min.program8.ts': Duration: 00:02:01.56 bitrate: 10579 kb/s
Program 1
Stream #0:0[0x21]: Video: h264 (High) ([27][0][0][0] / 0x001B), yuv420p(tv, bt709, top first), 1920x1080 [SAR 1:1 DAR 16:9], 25 fps, 25 tbr, 90k tbn, 50 tbc
Stream #0:1[0x61](eng): Audio: ac3 (AC-3 / 0x332D4341), 48000 Hz, 5.1(side), fltp, 384 kb/s
Stream #0:2[0x63](eng): Audio: mp2 ([3][0][0][0] / 0x0003), 48000 Hz, stereo, s16p, 192 kb/sUsing the transcoding.c example, the program does generate a valid transport stream. However, the file does not play. Also, analysis tools show the file duration as almost twice as long. Something is not right.
The program is simple : read, encode, write in a loop. The codec context for the video stream is set for a lower bit rate and width/height.
I’ve tinkered with it for a long time without success, but nothing I set gets the expected behavior...
The very summarized source is directly below. (I’ve removed variable defs, error checking and debug messages for brevity).
EDIT
And I’ve included the full, compilable, source code just after that.
EDIT II
The MPEG-TS test file can be downloaded from this sharing server
typedef struct StreamContext
{
AVCodecContext *decodeCtx;
AVCodecContext *encodeCtx;
} StreamContext;
static StreamContext *streamCtx;
static int
openInputFile(const char *filename)
{
inFormatCtx = NULL;
ret = avformat_open_input(&inFormatCtx, filename, NULL, NULL);
ret = avformat_find_stream_info(inFormatCtx, NULL)
streamCtx = av_mallocz_array(inFormatCtx->nb_streams, sizeof(*streamCtx));
for (i = 0; i < inFormatCtx->nb_streams; i++)
{
AVStream *stream = inFormatCtx->streams[i];
AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);
AVCodecContext *pCodecCtx = avcodec_alloc_context3(dec);
ret = avcodec_parameters_to_context(pCodecCtx, stream->codecpar);
if (pCodecCtx->codec_type == AVMEDIA_TYPE_VIDEO || pCodecCtx->codec_type == AVMEDIA_TYPE_AUDIO)
{
if (pCodecCtx->codec_type == AVMEDIA_TYPE_VIDEO)
pCodecCtx->framerate = av_guess_frame_rate(inFormatCtx, stream, NULL);
/* Open decoder */
ret = avcodec_open2(pCodecCtx, dec, NULL);
}
streamCtx[i].decodeCtx = pCodecCtx;
}
return 0;
}
static int
openOutputFile(const char *filename)
{
outFormatCtx = NULL;
avformat_alloc_output_context2(&outFormatCtx, NULL, NULL, filename);
for (i = 0; i < inFormatCtx->nb_streams; i++)
{
out_stream = avformat_new_stream(outFormatCtx, NULL);
in_stream = inFormatCtx->streams[i];
decodeCtx = streamCtx[i].decodeCtx;
if (decodeCtx->codec_type == AVMEDIA_TYPE_VIDEO || decodeCtx->codec_type == AVMEDIA_TYPE_AUDIO)
{
encoder = avcodec_find_encoder(decodeCtx->codec_id);
pEncodeCtx = avcodec_alloc_context3(encoder);
if (decodeCtx->codec_type == AVMEDIA_TYPE_VIDEO)
{
// MAKE IT SMALLER!
pEncodeCtx->height = decodeCtx->height / 4;
pEncodeCtx->width = decodeCtx->width / 4;
pEncodeCtx->sample_aspect_ratio = decodeCtx->sample_aspect_ratio;
// perhaps set these too?
pEncodeCtx->bit_rate = 700000;
pEncodeCtx->bit_rate_tolerance = 0;
pEncodeCtx->framerate = decodeCtx->framerate;
pEncodeCtx->time_base = decodeCtx->time_base;
/* take first format from list of supported formats */
if (encoder->pix_fmts)
pEncodeCtx->pix_fmt = encoder->pix_fmts[0];
else
pEncodeCtx->pix_fmt = decodeCtx->pix_fmt;
/* video time_base can be set to whatever is handy and supported by encoder */
pEncodeCtx->time_base = av_inv_q(decodeCtx->framerate);
}
else
{
pEncodeCtx->sample_rate = decodeCtx->sample_rate;
pEncodeCtx->channel_layout = decodeCtx->channel_layout;
pEncodeCtx->channels = av_get_channel_layout_nb_channels(pEncodeCtx->channel_layout);
/* take first format from list of supported formats */
pEncodeCtx->sample_fmt = encoder->sample_fmts[0];
pEncodeCtx->time_base = (AVRational) { 1, pEncodeCtx->sample_rate };
}
ret = avcodec_open2(pEncodeCtx, encoder, NULL);
ret = avcodec_parameters_from_context(out_stream->codecpar, pEncodeCtx);
if (outFormatCtx->oformat->flags & AVFMT_GLOBALHEADER)
pEncodeCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
out_stream->time_base = pEncodeCtx->time_base;
streamCtx[i].encodeCtx = pEncodeCtx;
}
else if (decodeCtx->codec_type == AVMEDIA_TYPE_UNKNOWN)
return AVERROR_INVALIDDATA;
else
{
/* if this stream must be remuxed */
ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
out_stream->time_base = in_stream->time_base;
}
}
av_dump_format(outFormatCtx, 0, filename, 1);
if (!(outFormatCtx->oformat->flags & AVFMT_NOFILE))
ret = avio_open(&outFormatCtx->pb, filename, AVIO_FLAG_WRITE);
/* init muxer, write output file header */
ret = avformat_write_header(outFormatCtx, NULL);
return 0;
}
static int
encodeAndWriteFrame(AVFrame *inFrame, unsigned int streamIndex, int *pGotFrame)
{
encodedPkt.data = NULL;
encodedPkt.size = 0;
av_init_packet(&encodedPkt);
int codecType = inFormatCtx->streams[streamIndex]->codecpar->codec_type;
if (codecType == AVMEDIA_TYPE_VIDEO)
ret = avcodec_encode_video2(streamCtx[streamIndex].encodeCtx, &encodedPkt, inFrame, pGotFrame);
else
ret = avcodec_encode_audio2(streamCtx[streamIndex].encodeCtx, &encodedPkt, inFrame, pGotFrame);
if (*pGotFrame == 0)
return 0;
/* prepare packet for muxing */
encodedPkt.stream_index = streamIndex;
av_packet_rescale_ts(&encodedPkt, streamCtx[streamIndex].encodeCtx->time_base, outFormatCtx->streams[streamIndex]->time_base);
/* mux encoded frame */
ret = av_interleaved_write_frame(outFormatCtx, &encodedPkt);
return ret;
}
int
main(int argc, char **argv)
{
av_register_all();
avfilter_register_all();
if ((ret = openInputFile(argv[1])) < 0)
goto end;
if ((ret = openOutputFile(argv[2])) < 0)
goto end;
/* read all packets */
while (1)
{
if ((ret = av_read_frame(inFormatCtx, &packet)) < 0)
break;
readPktNum++;
streamIndex = packet.stream_index;
type = inFormatCtx->streams[packet.stream_index]->codecpar->codec_type;
av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n", streamIndex);
if (type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO)
{
pDecFrame = av_frame_alloc();
av_packet_rescale_ts(&packet, inFormatCtx->streams[streamIndex]->time_base, streamCtx[streamIndex].decodeCtx->time_base);
if (type == AVMEDIA_TYPE_VIDEO)
ret = avcodec_decode_video2(streamCtx[streamIndex].decodeCtx, pDecFrame, &gotDecFrame, &packet);
else
ret = avcodec_decode_audio4(streamCtx[streamIndex].decodeCtx, pDecFrame, &gotDecFrame, &packet);
if (gotDecFrame)
{
pDecFrame->pts = av_frame_get_best_effort_timestamp(pDecFrame);
ret = encodeAndWriteFrame(pDecFrame, streamIndex, 0);
av_frame_free(&pDecFrame);
if (ret < 0)
goto end;
}
else
av_frame_free(&pDecFrame);
}
else
{
/* remux this frame without reencoding */
av_packet_rescale_ts(&packet, inFormatCtx->streams[streamIndex]->time_base, outFormatCtx->streams[streamIndex]->time_base);
ret = av_interleaved_write_frame(outFormatCtx, &packet);
if (ret < 0)
goto end;
}
av_packet_unref(&packet);
}
/* flush encoders */
for (i = 0; i < inFormatCtx->nb_streams; i++)
{
/* flush encoder */
ret = flushEncoder(i);
}
av_write_trailer(outFormatCtx);
end:
av_packet_unref(&packet);
av_frame_free(&pDecFrame);
for (i = 0; i < inFormatCtx->nb_streams; i++)
{
avcodec_free_context(&streamCtx[i].decodeCtx);
if (outFormatCtx && outFormatCtx->nb_streams > i && outFormatCtx->streams[i] && streamCtx[i].encodeCtx)
avcodec_free_context(&streamCtx[i].encodeCtx);
}
av_free(streamCtx);
avformat_close_input(&inFormatCtx);
if (outFormatCtx && !(outFormatCtx->oformat->flags & AVFMT_NOFILE))
avio_closep(&outFormatCtx->pb);
avformat_free_context(outFormatCtx);
return ret ? 1 : 0;
}**EDIT - Full source **
/**
* @file
* API example for demuxing, decoding, filtering, encoding and muxing
* @example transcoding.c
*/
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libavfilter/avfiltergraph.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#define ANSI_COLOR_RED "\x1b[31m"
#define ANSI_COLOR_PINK "\x1b[31;1m"
#define ANSI_COLOR_GREEN "\x1b[32m"
#define ANSI_COLOR_LIME "\x1b[32;1m"
#define ANSI_COLOR_YELLOW "\x1b[33;1m"
#define ANSI_COLOR_BLUE "\x1b[34;1m"
#define ANSI_COLOR_MAGENTA "\x1b[35m"
#define ANSI_COLOR_CYAN "\x1b[36;1m"
#define ANSI_COLOR_RESET "\x1b[0m"
#define true 1
#define false 0
static int readPktNum = 0;
static int decFrameNum = 0;
static int encFrameNum = 0;
static AVFormatContext *inFormatCtx;
static AVFormatContext *outFormatCtx;
typedef struct StreamContext
{
AVCodecContext *decodeCtx;
AVCodecContext *encodeCtx;
} StreamContext;
static StreamContext *streamCtx;
void
writeAVFrameAsYUVFile(const char *filePath, AVFrame *pFrame)
{
printf("Writing YUV file: %d x %d\n", pFrame->width, pFrame->height);
FILE *pFile = fopen(filePath, "wb");
if (!pFile)
return;
int y;
// Writing Y
for (y=0; y < pFrame->height; y++)
fwrite(&pFrame->data[0][pFrame->linesize[0] * y], pFrame->width, 1, pFile);
// Writing U
for (y=0; y < pFrame->height/2; y++)
fwrite(&pFrame->data[1][pFrame->linesize[1] * y], pFrame->width/2, 1, pFile);
// Writing V
for (y=0; y < pFrame->height/2; y++)
fwrite(&pFrame->data[2][pFrame->linesize[2] * y], pFrame->width/2, 1, pFile);
fclose(pFile);
printf("Wrote %s: %d x %d\n", filePath, pFrame->width, pFrame->height);
}
static void
dumpCodecContext(const AVCodecContext *pCodecContext)
{
printf("Codec Context:\n");
printf(" bit rate %d\n", (int)pCodecContext->bit_rate);
printf(" bit rate tolerance %d\n", pCodecContext->bit_rate_tolerance);
printf(" size %d x %d\n", pCodecContext->width, pCodecContext->height);
printf(" GOP Size %d\n", pCodecContext->gop_size);
printf(" Max B Frames %d\n", pCodecContext->max_b_frames);
printf(" Sample Aspect %d:%d (%.3f)\n",
pCodecContext->sample_aspect_ratio.num, pCodecContext->sample_aspect_ratio.den,
1.0 * pCodecContext->sample_aspect_ratio.num / pCodecContext->sample_aspect_ratio.den);
printf(" framerate %d / %d (%.3f fps)\n",
pCodecContext->framerate.num, pCodecContext->framerate.den,
1.0 * pCodecContext->framerate.den / pCodecContext->framerate.num);
printf(" time_base %d / %d (%.3f fps)\n",
pCodecContext->time_base.num, pCodecContext->time_base.den,
1.0 * pCodecContext->time_base.den / pCodecContext->time_base.num);
}
static int
openInputFile(const char *filename)
{
int ret;
unsigned int i;
inFormatCtx = NULL;
if ((ret = avformat_open_input(&inFormatCtx, filename, NULL, NULL)) < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
return ret;
}
if ((ret = avformat_find_stream_info(inFormatCtx, NULL)) < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
return ret;
}
streamCtx = av_mallocz_array(inFormatCtx->nb_streams, sizeof(*streamCtx));
if (!streamCtx)
return AVERROR(ENOMEM);
for (i = 0; i < inFormatCtx->nb_streams; i++)
{
AVStream *stream = inFormatCtx->streams[i];
AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);
AVCodecContext *pCodecCtx;
if (!dec)
{
av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);
return AVERROR_DECODER_NOT_FOUND;
}
pCodecCtx = avcodec_alloc_context3(dec);
if (!pCodecCtx)
{
av_log(NULL, AV_LOG_ERROR, "Failed to allocate the decoder context for stream #%u\n", i);
return AVERROR(ENOMEM);
}
ret = avcodec_parameters_to_context(pCodecCtx, stream->codecpar);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Failed to copy decoder parameters to input decoder context for stream #%u\n", i);
return ret;
}
/* Reencode video & audio and remux subtitles etc. */
if (pCodecCtx->codec_type == AVMEDIA_TYPE_VIDEO || pCodecCtx->codec_type == AVMEDIA_TYPE_AUDIO)
{
if (pCodecCtx->codec_type == AVMEDIA_TYPE_VIDEO)
pCodecCtx->framerate = av_guess_frame_rate(inFormatCtx, stream, NULL);
/* Open decoder */
ret = avcodec_open2(pCodecCtx, dec, NULL);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
return ret;
}
}
streamCtx[i].decodeCtx = pCodecCtx;
}
av_dump_format(inFormatCtx, 0, filename, 0);
return 0;
}
static int
openOutputFile(const char *filename)
{
AVStream *out_stream;
AVStream *in_stream;
AVCodecContext *decodeCtx, *pEncodeCtx;
AVCodec *encoder;
int ret;
unsigned int i;
outFormatCtx = NULL;
avformat_alloc_output_context2(&outFormatCtx, NULL, NULL, filename);
if (!outFormatCtx)
{
av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
return AVERROR_UNKNOWN;
}
for (i = 0; i < inFormatCtx->nb_streams; i++)
{
out_stream = avformat_new_stream(outFormatCtx, NULL);
if (!out_stream)
{
av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
return AVERROR_UNKNOWN;
}
in_stream = inFormatCtx->streams[i];
decodeCtx = streamCtx[i].decodeCtx;
if (decodeCtx->codec_type == AVMEDIA_TYPE_VIDEO || decodeCtx->codec_type == AVMEDIA_TYPE_AUDIO)
{
/* in this example, we choose transcoding to same codec */
encoder = avcodec_find_encoder(decodeCtx->codec_id);
if (!encoder)
{
av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
return AVERROR_INVALIDDATA;
}
pEncodeCtx = avcodec_alloc_context3(encoder);
if (!pEncodeCtx)
{
av_log(NULL, AV_LOG_FATAL, "Failed to allocate the encoder context\n");
return AVERROR(ENOMEM);
}
/* In this example, we transcode to same properties (picture size,
* sample rate etc.). These properties can be changed for output
* streams easily using filters */
if (decodeCtx->codec_type == AVMEDIA_TYPE_VIDEO)
{
printf("DECODE CONTEXT "); dumpCodecContext(decodeCtx);
// MAKE IT SMALLER!
pEncodeCtx->height = decodeCtx->height / 4;
pEncodeCtx->width = decodeCtx->width / 4;
pEncodeCtx->sample_aspect_ratio = decodeCtx->sample_aspect_ratio;
// perhaps set these too?
pEncodeCtx->bit_rate = 700000;
pEncodeCtx->bit_rate_tolerance = 0;
pEncodeCtx->framerate = decodeCtx->framerate;
pEncodeCtx->time_base = decodeCtx->time_base;
printf("ENCODE CONTEXT "); dumpCodecContext(pEncodeCtx);
/* take first format from list of supported formats */
if (encoder->pix_fmts)
pEncodeCtx->pix_fmt = encoder->pix_fmts[0];
else
pEncodeCtx->pix_fmt = decodeCtx->pix_fmt;
/* video time_base can be set to whatever is handy and supported by encoder */
pEncodeCtx->time_base = av_inv_q(decodeCtx->framerate);
}
else
{
pEncodeCtx->sample_rate = decodeCtx->sample_rate;
pEncodeCtx->channel_layout = decodeCtx->channel_layout;
pEncodeCtx->channels = av_get_channel_layout_nb_channels(pEncodeCtx->channel_layout);
/* take first format from list of supported formats */
pEncodeCtx->sample_fmt = encoder->sample_fmts[0];
pEncodeCtx->time_base = (AVRational) { 1, pEncodeCtx->sample_rate };
}
/* Third parameter can be used to pass settings to encoder */
ret = avcodec_open2(pEncodeCtx, encoder, NULL);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
return ret;
}
ret = avcodec_parameters_from_context(out_stream->codecpar, pEncodeCtx);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i);
return ret;
}
if (outFormatCtx->oformat->flags & AVFMT_GLOBALHEADER)
pEncodeCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
out_stream->time_base = pEncodeCtx->time_base;
streamCtx[i].encodeCtx = pEncodeCtx;
}
else if (decodeCtx->codec_type == AVMEDIA_TYPE_UNKNOWN)
{
av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
return AVERROR_INVALIDDATA;
}
else
{
printf("STREAM %d is not video or audio\n", i);
/* if this stream must be remuxed */
ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Copying parameters for stream #%u failed\n", i);
return ret;
}
out_stream->time_base = in_stream->time_base;
}
}
av_dump_format(outFormatCtx, 0, filename, 1);
if (!(outFormatCtx->oformat->flags & AVFMT_NOFILE))
{
ret = avio_open(&outFormatCtx->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
return ret;
}
}
/* init muxer, write output file header */
ret = avformat_write_header(outFormatCtx, NULL);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
return ret;
}
return 0;
}
static int
encodeAndWriteFrame(AVFrame *inFrame, unsigned int streamIndex, int *pGotFrame)
{
int ret;
int got_frame_local;
AVPacket encodedPkt;
if (pGotFrame == 0)
pGotFrame = &got_frame_local;
encodedPkt.data = NULL;
encodedPkt.size = 0;
av_init_packet(&encodedPkt);
int codecType = inFormatCtx->streams[streamIndex]->codecpar->codec_type;
if (codecType == AVMEDIA_TYPE_VIDEO)
ret = avcodec_encode_video2(streamCtx[streamIndex].encodeCtx, &encodedPkt, inFrame, pGotFrame);
else
ret = avcodec_encode_audio2(streamCtx[streamIndex].encodeCtx, &encodedPkt, inFrame, pGotFrame);
if (ret < 0)
return ret;
if (*pGotFrame == 0)
return 0;
if (encFrameNum++ % 10 == 0)
printf("Encoded %s frame #%d\n", (codecType == AVMEDIA_TYPE_VIDEO) ? "Video" : "Audio", encFrameNum);
/* prepare packet for muxing */
encodedPkt.stream_index = streamIndex;
av_packet_rescale_ts(&encodedPkt, streamCtx[streamIndex].encodeCtx->time_base, outFormatCtx->streams[streamIndex]->time_base);
av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
/* mux encoded frame */
ret = av_interleaved_write_frame(outFormatCtx, &encodedPkt);
return ret;
}
static int
flushEncoder(unsigned int streamIndex)
{
int ret;
int got_frame;
if (!(streamCtx[streamIndex].encodeCtx->codec->capabilities & AV_CODEC_CAP_DELAY))
return 0;
while (1)
{
av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", streamIndex);
ret = encodeAndWriteFrame(NULL, streamIndex, &got_frame);
if (ret < 0)
break;
if (!got_frame)
return 0;
}
return ret;
}
int
main(int argc, char **argv)
{
int ret;
AVPacket packet = { .data = NULL, .size = 0 };
AVFrame *pDecFrame = NULL;
enum AVMediaType type;
unsigned int streamIndex;
unsigned int i;
int gotDecFrame;
if (argc != 3)
{
av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file="file" /> <output file="file">\n", argv[0]);
return 1;
}
av_register_all();
avfilter_register_all();
if ((ret = openInputFile(argv[1])) < 0)
goto end;
if ((ret = openOutputFile(argv[2])) < 0)
goto end;
/* read all packets */
while (1)
{
if ((ret = av_read_frame(inFormatCtx, &packet)) < 0)
{
printf(ANSI_COLOR_YELLOW "READ PACKET RETURNED %d\n" ANSI_COLOR_RESET, ret);
break;
}
readPktNum++;
streamIndex = packet.stream_index;
type = inFormatCtx->streams[packet.stream_index]->codecpar->codec_type;
av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n", streamIndex);
if (type == AVMEDIA_TYPE_VIDEO || type == AVMEDIA_TYPE_AUDIO)
{
pDecFrame = av_frame_alloc();
if (!pDecFrame)
{
ret = AVERROR(ENOMEM);
break;
}
av_packet_rescale_ts(&packet, inFormatCtx->streams[streamIndex]->time_base, streamCtx[streamIndex].decodeCtx->time_base);
if (type == AVMEDIA_TYPE_VIDEO)
ret = avcodec_decode_video2(streamCtx[streamIndex].decodeCtx, pDecFrame, &gotDecFrame, &packet);
else
ret = avcodec_decode_audio4(streamCtx[streamIndex].decodeCtx, pDecFrame, &gotDecFrame, &packet);
if (ret < 0)
{
av_frame_free(&pDecFrame);
av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
break;
}
if (gotDecFrame)
{
if (decFrameNum++ % 10 == 0)
printf("Decoded %s frame #%d\n", (type == AVMEDIA_TYPE_VIDEO) ? "Video" : "Audio", decFrameNum);
if (0 && type == AVMEDIA_TYPE_VIDEO)
{
printf("VIDEO width %d height %d\n", pDecFrame->width, pDecFrame->height);
writeAVFrameAsYUVFile("/mnt/swdevel/DVStor/decodedYUV.yuv", pDecFrame);
}
pDecFrame->pts = av_frame_get_best_effort_timestamp(pDecFrame);
ret = encodeAndWriteFrame(pDecFrame, streamIndex, 0);
av_frame_free(&pDecFrame);
if (ret < 0)
goto end;
}
else
av_frame_free(&pDecFrame);
}
else
{
/* remux this frame without reencoding */
av_packet_rescale_ts(&packet, inFormatCtx->streams[streamIndex]->time_base, outFormatCtx->streams[streamIndex]->time_base);
ret = av_interleaved_write_frame(outFormatCtx, &packet);
if (ret < 0)
goto end;
}
av_packet_unref(&packet);
}
printf(ANSI_COLOR_YELLOW "EXIT MAIN WHILE(1) - FLUSHING\n" ANSI_COLOR_RESET);
/* flush encoders */
for (i = 0; i < inFormatCtx->nb_streams; i++)
{
/* flush encoder */
ret = flushEncoder(i);
if (ret < 0)
{
av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
goto end;
}
}
av_write_trailer(outFormatCtx);
end:
av_packet_unref(&packet);
av_frame_free(&pDecFrame);
for (i = 0; i < inFormatCtx->nb_streams; i++)
{
avcodec_free_context(&streamCtx[i].decodeCtx);
if (outFormatCtx && outFormatCtx->nb_streams > i && outFormatCtx->streams[i] && streamCtx[i].encodeCtx)
avcodec_free_context(&streamCtx[i].encodeCtx);
}
av_free(streamCtx);
avformat_close_input(&inFormatCtx);
if (outFormatCtx && !(outFormatCtx->oformat->flags & AVFMT_NOFILE))
avio_closep(&outFormatCtx->pb);
avformat_free_context(outFormatCtx);
if (ret < 0)
av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));
return ret ? 1 : 0;
}
</output> -
Scale filter crashes with error when used from transcoding example
27 juin 2017, par ValiI’ve modified a bit (just to compile in c++) this code example :
https://github.com/FFmpeg/FFmpeg/blob/master/doc/examples/transcoding.c.What works : as is (null filter), a number of other filters like framerate, drawtext, ...
What doesn’t work : scale filter when scaling down.
I use the following syntax for scale ( I’ve tried many others also, same effect) :
"scale=w=iw/2 :-1"The error is : "Input picture width (240) is greater than stride (128)" where the values for width and stride depend on the input.
Misc environment info : windows, VS 2017, input example : rtsp ://wowzaec2demo.streamlock.net/vod/mp4:BigBuckBunny_115k.mov
Any clue as to what I’m doing wrong ?
Thanks !
EDITED to add working code sample
#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avfilter.lib")
/*
* Copyright (c) 2010 Nicolas George
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2014 Andrey Utkin
*
**** EDITED 2017 for testing (see original here: https://github.com/FFmpeg/FFmpeg/blob/master/doc/examples/transcoding.c)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @file
* API example for demuxing, decoding, filtering, encoding and muxing
* @example transcoding.c
*/
extern "C"
{
#include <libavcodec></libavcodec>avcodec.h>
#include <libavformat></libavformat>avformat.h>
#include <libavfilter></libavfilter>avfiltergraph.h>
#include <libavfilter></libavfilter>buffersink.h>
#include <libavfilter></libavfilter>buffersrc.h>
#include <libavutil></libavutil>opt.h>
#include <libavutil></libavutil>pixdesc.h>
}
static AVFormatContext *ifmt_ctx;
static AVFormatContext *ofmt_ctx;
typedef struct FilteringContext {
AVFilterContext *buffersink_ctx;
AVFilterContext *buffersrc_ctx;
AVFilterGraph *filter_graph;
} FilteringContext;
static FilteringContext *filter_ctx;
typedef struct StreamContext {
AVCodecContext *dec_ctx;
AVCodecContext *enc_ctx;
} StreamContext;
static StreamContext *stream_ctx;
static int open_input_file(const char *filename, int& videoStreamIndex)
{
int ret;
unsigned int i;
ifmt_ctx = NULL;
if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
return ret;
}
if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
return ret;
}
// Just need video
videoStreamIndex = -1;
for (unsigned int i = 0; i < ifmt_ctx->nb_streams; i++)
{
if (ifmt_ctx->streams[i]->codecpar->codec_type != AVMEDIA_TYPE_VIDEO)
continue;
videoStreamIndex = i;
break;
}
if (videoStreamIndex < 0)
{
av_log(NULL, AV_LOG_ERROR, "Cannot find video stream\n");
return videoStreamIndex;
}
stream_ctx = (StreamContext*)av_mallocz_array(ifmt_ctx->nb_streams, sizeof(*stream_ctx));
if (!stream_ctx)
return AVERROR(ENOMEM);
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
// Just need video
if (i != videoStreamIndex)
continue;
AVStream *stream = ifmt_ctx->streams[i];
AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);
AVCodecContext *codec_ctx;
if (!dec) {
av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);
return AVERROR_DECODER_NOT_FOUND;
}
codec_ctx = avcodec_alloc_context3(dec);
if (!codec_ctx) {
av_log(NULL, AV_LOG_ERROR, "Failed to allocate the decoder context for stream #%u\n", i);
return AVERROR(ENOMEM);
}
ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Failed to copy decoder parameters to input decoder context "
"for stream #%u\n", i);
return ret;
}
/* Reencode video & audio and remux subtitles etc. */
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|| codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
codec_ctx->framerate = av_guess_frame_rate(ifmt_ctx, stream, NULL);
/* Open decoder */
ret = avcodec_open2(codec_ctx, dec, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
return ret;
}
}
stream_ctx[i].dec_ctx = codec_ctx;
}
av_dump_format(ifmt_ctx, 0, filename, 0);
return 0;
}
static int open_output_file(const char *filename, const int videoStreamIndex)
{
AVStream *out_stream;
AVStream *in_stream;
AVCodecContext *dec_ctx, *enc_ctx;
AVCodec *encoder;
int ret;
unsigned int i;
ofmt_ctx = NULL;
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);
if (!ofmt_ctx) {
av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
return AVERROR_UNKNOWN;
}
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
// Just need video
if (i != videoStreamIndex)
continue;
out_stream = avformat_new_stream(ofmt_ctx, NULL);
if (!out_stream) {
av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
return AVERROR_UNKNOWN;
}
in_stream = ifmt_ctx->streams[i];
dec_ctx = stream_ctx[i].dec_ctx;
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
/* in this example, we choose transcoding to same codec */
encoder = avcodec_find_encoder(dec_ctx->codec_id);
if (!encoder) {
av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
return AVERROR_INVALIDDATA;
}
enc_ctx = avcodec_alloc_context3(encoder);
if (!enc_ctx) {
av_log(NULL, AV_LOG_FATAL, "Failed to allocate the encoder context\n");
return AVERROR(ENOMEM);
}
/* In this example, we transcode to same properties (picture size,
* sample rate etc.). These properties can be changed for output
* streams easily using filters */
enc_ctx->height = dec_ctx->height;
enc_ctx->width = dec_ctx->width;
enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
/* take first format from list of supported formats */
if (encoder->pix_fmts)
enc_ctx->pix_fmt = encoder->pix_fmts[0];
else
enc_ctx->pix_fmt = dec_ctx->pix_fmt;
/* video time_base can be set to whatever is handy and supported by encoder */
//enc_ctx->time_base = av_inv_q(dec_ctx->framerate);
enc_ctx->time_base = dec_ctx->time_base;
/* Third parameter can be used to pass settings to encoder */
ret = avcodec_open2(enc_ctx, encoder, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
return ret;
}
ret = avcodec_parameters_from_context(out_stream->codecpar, enc_ctx);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i);
return ret;
}
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
out_stream->time_base = enc_ctx->time_base;
stream_ctx[i].enc_ctx = enc_ctx;
}
else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
return AVERROR_INVALIDDATA;
}
else {
/* if this stream must be remuxed */
ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Copying parameters for stream #%u failed\n", i);
return ret;
}
out_stream->time_base = in_stream->time_base;
}
}
av_dump_format(ofmt_ctx, 0, filename, 1);
if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
return ret;
}
}
/* init muxer, write output file header */
ret = avformat_write_header(ofmt_ctx, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
return ret;
}
return 0;
}
static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
AVCodecContext *enc_ctx, const char *filter_spec)
{
char args[512];
int ret = 0;
AVFilter *buffersrc = NULL;
AVFilter *buffersink = NULL;
AVFilterContext *buffersrc_ctx = NULL;
AVFilterContext *buffersink_ctx = NULL;
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
AVFilterGraph *filter_graph = avfilter_graph_alloc();
if (!outputs || !inputs || !filter_graph) {
ret = AVERROR(ENOMEM);
goto end;
}
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
buffersrc = avfilter_get_by_name("buffer");
buffersink = avfilter_get_by_name("buffersink");
if (!buffersrc || !buffersink) {
av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
ret = AVERROR_UNKNOWN;
goto end;
}
snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
dec_ctx->time_base.num, dec_ctx->time_base.den,
dec_ctx->sample_aspect_ratio.num,
dec_ctx->sample_aspect_ratio.den);
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
args, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
goto end;
}
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
NULL, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
goto end;
}
ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
(uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
goto end;
}
}
else {
ret = AVERROR_UNKNOWN;
goto end;
}
/* Endpoints for the filter graph. */
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = NULL;
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = NULL;
if (!outputs->name || !inputs->name) {
ret = AVERROR(ENOMEM);
goto end;
}
if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
&inputs, &outputs, NULL)) < 0)
goto end;
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
goto end;
/* Fill FilteringContext */
fctx->buffersrc_ctx = buffersrc_ctx;
fctx->buffersink_ctx = buffersink_ctx;
fctx->filter_graph = filter_graph;
end:
avfilter_inout_free(&inputs);
avfilter_inout_free(&outputs);
return ret;
}
static int init_filters(const int videoStreamIndex)
{
const char *filter_spec;
unsigned int i;
int ret;
filter_ctx = (FilteringContext*)av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));
if (!filter_ctx)
return AVERROR(ENOMEM);
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
// Just video
if (i != videoStreamIndex)
continue;
filter_ctx[i].buffersrc_ctx = NULL;
filter_ctx[i].buffersink_ctx = NULL;
filter_ctx[i].filter_graph = NULL;
if (!(ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO
|| ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO))
continue;
filter_spec = "null"; /* passthrough (dummy) filter for video */
//filter_spec = "scale=w=iw/2:-1";
// filter_spec = "drawtext=fontfile=FreeSerif.ttf: text='%{localtime}': x=w-text_w: y=0: fontsize=24: fontcolor=yellow@1.0: box=1: boxcolor=red@1.0";
// filter_spec = "drawtext=fontfile=FreeSerif.ttf :text='test': x=w-text_w: y=text_h: fontsize=24: fontcolor=yellow@1.0: box=1: boxcolor=red@1.0";
ret = init_filter(&filter_ctx[i], stream_ctx[i].dec_ctx,
stream_ctx[i].enc_ctx, filter_spec);
if (ret)
return ret;
}
return 0;
}
static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame, const int videoStreamIndex) {
// Just video
if (stream_index != videoStreamIndex)
return 0;
int ret;
int got_frame_local;
AVPacket enc_pkt;
int(*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
(ifmt_ctx->streams[stream_index]->codecpar->codec_type ==
AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;
if (!got_frame)
got_frame = &got_frame_local;
// av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
/* encode filtered frame */
enc_pkt.data = NULL;
enc_pkt.size = 0;
av_init_packet(&enc_pkt);
ret = enc_func(stream_ctx[stream_index].enc_ctx, &enc_pkt,
filt_frame, got_frame);
av_frame_free(&filt_frame);
if (ret < 0)
return ret;
if (!(*got_frame))
return 0;
/* prepare packet for muxing */
/*enc_pkt.stream_index = stream_index;
av_packet_rescale_ts(&enc_pkt, stream_ctx[stream_index].enc_ctx->time_base, ofmt_ctx->streams[stream_index]->time_base);*/
enc_pkt.stream_index = 0;
av_packet_rescale_ts(&enc_pkt, stream_ctx[stream_index].enc_ctx->time_base, ofmt_ctx->streams[0]->time_base);
av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
/* mux encoded frame */
ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
return ret;
}
static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index, const int videoStreamIndex)
{
// Just video, all else crashes
if (stream_index != videoStreamIndex)
return 0;
int ret;
AVFrame *filt_frame;
// av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
/* push the decoded frame into the filtergraph */
ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,
frame, 0);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
return ret;
}
/* pull filtered frames from the filtergraph */
while (1) {
filt_frame = av_frame_alloc();
if (!filt_frame) {
ret = AVERROR(ENOMEM);
break;
}
// av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,
filt_frame);
if (ret < 0) {
/* if no more frames for output - returns AVERROR(EAGAIN)
* if flushed and no more frames for output - returns AVERROR_EOF
* rewrite retcode to 0 to show it as normal procedure completion
*/
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
ret = 0;
av_frame_free(&filt_frame);
break;
}
filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
ret = encode_write_frame(filt_frame, stream_index, NULL, videoStreamIndex);
if (ret < 0)
break;
}
return ret;
}
static int flush_encoder(unsigned int stream_index, const int videoStreamIndex)
{
int ret;
int got_frame;
// Just video
if (stream_index != videoStreamIndex)
return 0;
if (!(stream_ctx[stream_index].enc_ctx->codec->capabilities &
AV_CODEC_CAP_DELAY))
return 0;
while (1) {
av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
ret = encode_write_frame(NULL, stream_index, &got_frame, videoStreamIndex);
if (ret < 0)
break;
if (!got_frame)
return 0;
}
return ret;
}
#include <vector>
int main(int argc, char **argv)
{
int ret;
AVPacket packet;
packet.data = NULL;
packet.size = 0;
AVFrame *frame = NULL;
enum AVMediaType type;
unsigned int stream_index;
unsigned int i;
int got_frame;
int(*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);
#ifdef _DEBUG
// Hardcoded arguments
std::vector varguments;
{
varguments.push_back(argv[0]);
// Source
varguments.push_back("./big_buck_bunny_short.mp4 ");
// Destination
varguments.push_back("./big_buck_bunny_short-processed.mp4");
}
char** arguments = new char*[varguments.size()];
for (unsigned int i = 0; i < varguments.size(); i++)
{
arguments[i] = varguments[i];
}
argc = varguments.size();
argv = arguments;
#endif // _DEBUG
if (argc != 3) {
av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file="file" /> <output file="file">\n", argv[0]);
return 1;
}
av_register_all();
avfilter_register_all();
int videoStreamIndex = -1;
if ((ret = open_input_file(argv[1], videoStreamIndex)) < 0)
goto end;
if ((ret = open_output_file(argv[2], videoStreamIndex)) < 0)
goto end;
if ((ret = init_filters(videoStreamIndex)) < 0)
goto end;
// Stop after a couple of frames
int framesToGet = 100;
/* read all packets */
//while (framesToGet--)
while(1)
{
if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
break;
stream_index = packet.stream_index;
// I just need video
if (stream_index != videoStreamIndex) {
av_packet_unref(&packet);
continue;
}
type = ifmt_ctx->streams[packet.stream_index]->codecpar->codec_type;
av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
stream_index);
if (filter_ctx[stream_index].filter_graph) {
av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
frame = av_frame_alloc();
if (!frame) {
ret = AVERROR(ENOMEM);
break;
}
av_packet_rescale_ts(&packet,
ifmt_ctx->streams[stream_index]->time_base,
stream_ctx[stream_index].dec_ctx->time_base);
dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
avcodec_decode_audio4;
ret = dec_func(stream_ctx[stream_index].dec_ctx, frame,
&got_frame, &packet);
if (ret < 0) {
av_frame_free(&frame);
av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
break;
}
if (got_frame) {
frame->pts = frame->best_effort_timestamp;
ret = filter_encode_write_frame(frame, stream_index, videoStreamIndex);
av_frame_free(&frame);
if (ret < 0)
goto end;
}
else {
av_frame_free(&frame);
}
}
else {
/* remux this frame without reencoding */
av_packet_rescale_ts(&packet,
ifmt_ctx->streams[stream_index]->time_base,
ofmt_ctx->streams[stream_index]->time_base);
ret = av_interleaved_write_frame(ofmt_ctx, &packet);
if (ret < 0)
goto end;
}
av_packet_unref(&packet);
}
/* flush filters and encoders */
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
/* flush filter */
if (!filter_ctx[i].filter_graph)
continue;
ret = filter_encode_write_frame(NULL, i, videoStreamIndex);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
goto end;
}
/* flush encoder */
ret = flush_encoder(i, videoStreamIndex);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
goto end;
}
}
av_write_trailer(ofmt_ctx);
end:
av_packet_unref(&packet);
av_frame_free(&frame);
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
// Just video
if (i != videoStreamIndex)
continue;
avcodec_free_context(&stream_ctx[i].dec_ctx);
if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && stream_ctx[i].enc_ctx)
avcodec_free_context(&stream_ctx[i].enc_ctx);
if (filter_ctx && filter_ctx[i].filter_graph)
avfilter_graph_free(&filter_ctx[i].filter_graph);
}
av_free(filter_ctx);
av_free(stream_ctx);
avformat_close_input(&ifmt_ctx);
if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
avio_closep(&ofmt_ctx->pb);
avformat_free_context(ofmt_ctx);
/*if (ret < 0)
av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));*/
return ret ? 1 : 0;
}
</output></vector>