
Recherche avancée
Médias (91)
-
Chuck D with Fine Arts Militia - No Meaning No
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
Paul Westerberg - Looking Up in Heaven
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
Le Tigre - Fake French
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
Thievery Corporation - DC 3000
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
Dan the Automator - Relaxation Spa Treatment
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
Gilberto Gil - Oslodum
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
Autres articles (45)
-
Participer à sa traduction
10 avril 2011Vous pouvez nous aider à améliorer les locutions utilisées dans le logiciel ou à traduire celui-ci dans n’importe qu’elle nouvelle langue permettant sa diffusion à de nouvelles communautés linguistiques.
Pour ce faire, on utilise l’interface de traduction de SPIP où l’ensemble des modules de langue de MediaSPIP sont à disposition. ll vous suffit de vous inscrire sur la liste de discussion des traducteurs pour demander plus d’informations.
Actuellement MediaSPIP n’est disponible qu’en français et (...) -
Support audio et vidéo HTML5
10 avril 2011MediaSPIP utilise les balises HTML5 video et audio pour la lecture de documents multimedia en profitant des dernières innovations du W3C supportées par les navigateurs modernes.
Pour les navigateurs plus anciens, le lecteur flash Flowplayer est utilisé.
Le lecteur HTML5 utilisé a été spécifiquement créé pour MediaSPIP : il est complètement modifiable graphiquement pour correspondre à un thème choisi.
Ces technologies permettent de distribuer vidéo et son à la fois sur des ordinateurs conventionnels (...) -
D’autres logiciels intéressants
12 avril 2011, parOn ne revendique pas d’être les seuls à faire ce que l’on fait ... et on ne revendique surtout pas d’être les meilleurs non plus ... Ce que l’on fait, on essaie juste de le faire bien, et de mieux en mieux...
La liste suivante correspond à des logiciels qui tendent peu ou prou à faire comme MediaSPIP ou que MediaSPIP tente peu ou prou à faire pareil, peu importe ...
On ne les connais pas, on ne les a pas essayé, mais vous pouvez peut être y jeter un coup d’oeil.
Videopress
Site Internet : (...)
Sur d’autres sites (6468)
-
flush encoded packets to disk when muxing audio and video
23 juin 2016, par chanduI am using muxing.c example (without any modifications) provided with ffmpeg 3.0 version to create MP4 file (H.264 & AAC) with VS 2013. The sample is working fine with default width & height for video. But when changed the width to 1920 and height to 1080, sample is taking nearly 400MB (using task manager & in Release mode) through out the program and also never writing the encoded packets to the out file. It is writing to the out file (out.mp4) only when calling avcodec_close() at the end.
I have tried to
- free the encoded packet after calling write_frame().
- used avio_flush()
- used avcodec_flush_buffers()
but no success.
Could anybody please tell me, how can I save the every encoded packet to disk by not keeping in RAM, so the memory usage would be low ?
Note : There is no issue with flushing the buffers after recording is over, I am doing this by calling av_interleaved_write_frame() with AVPacket NULL.
-
How to use ffmpeg lib transform mp4(h264&aac) to m3u8 (hls) by C code ?
1er juillet 2020, par itningI used official examples
transcoding.c
but console printpkt->duration = 0, maybe the hls segment duration will not precise
.

I use this code to set duration but invalid。


av_opt_set_int(ofmt_ctx->priv_data, "hls_time", 5, AV_OPT_SEARCH_CHILDREN);


In command line


ffmpeg -i a.mp4 -codec copy -vbsf h264_mp4toannexb -map 0 -f segment -segment_list a.m3u8 -segment_time 10 a-%03d.ts


How to use C code to achieve this command ?


this is my code :


/**
 * @file
 * API example for demuxing, decoding, filtering, encoding and muxing
 * @example transcoding.c
 */

#include <libavcodec></libavcodec>avcodec.h>
#include <libavformat></libavformat>avformat.h>
#include <libavfilter></libavfilter>buffersink.h>
#include <libavfilter></libavfilter>buffersrc.h>
#include <libavutil></libavutil>opt.h>
#include <libavutil></libavutil>pixdesc.h>

static AVFormatContext *ifmt_ctx;
static AVFormatContext *ofmt_ctx;
typedef struct FilteringContext {
 AVFilterContext *buffersink_ctx;
 AVFilterContext *buffersrc_ctx;
 AVFilterGraph *filter_graph;
} FilteringContext;
static FilteringContext *filter_ctx;

typedef struct StreamContext {
 AVCodecContext *dec_ctx;
 AVCodecContext *enc_ctx;
} StreamContext;
static StreamContext *stream_ctx;

static int open_input_file(const char *filename) {
 int ret;
 unsigned int i;

 ifmt_ctx = NULL;
 if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
 return ret;
 }

 if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
 return ret;
 }

 stream_ctx = av_mallocz_array(ifmt_ctx->nb_streams, sizeof(*stream_ctx));
 if (!stream_ctx)
 return AVERROR(ENOMEM);

 for (i = 0; i < ifmt_ctx->nb_streams; i++) {
 AVStream *stream = ifmt_ctx->streams[i];
 AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);
 AVCodecContext *codec_ctx;
 if (!dec) {
 av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);
 return AVERROR_DECODER_NOT_FOUND;
 }
 codec_ctx = avcodec_alloc_context3(dec);
 if (!codec_ctx) {
 av_log(NULL, AV_LOG_ERROR, "Failed to allocate the decoder context for stream #%u\n", i);
 return AVERROR(ENOMEM);
 }
 ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Failed to copy decoder parameters to input decoder context "
 "for stream #%u\n", i);
 return ret;
 }
 /* Reencode video & audio and remux subtitles etc. */
 if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
 || codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
 if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
 codec_ctx->framerate = av_guess_frame_rate(ifmt_ctx, stream, NULL);
 /* Open decoder */
 ret = avcodec_open2(codec_ctx, dec, NULL);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
 return ret;
 }
 }
 stream_ctx[i].dec_ctx = codec_ctx;
 }

 av_dump_format(ifmt_ctx, 0, filename, 0);
 return 0;
}

static int open_output_file(const char *filename, enum AVCodecID videoCodecId, enum AVCodecID audioCodecId) {
 AVStream *out_stream;
 AVStream *in_stream;
 AVCodecContext *dec_ctx, *enc_ctx;
 AVCodec *encoder;
 int ret;
 unsigned int i;

 ofmt_ctx = NULL;
 avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);
 if (!ofmt_ctx) {
 av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
 return AVERROR_UNKNOWN;
 }

 for (i = 0; i < ifmt_ctx->nb_streams; i++) {
 out_stream = avformat_new_stream(ofmt_ctx, NULL);
 if (!out_stream) {
 av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
 return AVERROR_UNKNOWN;
 }

 in_stream = ifmt_ctx->streams[i];
 dec_ctx = stream_ctx[i].dec_ctx;

 if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
 || dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {

 if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
 encoder = avcodec_find_encoder(videoCodecId);
 } else {
 encoder = avcodec_find_encoder(audioCodecId);
 }
 //encoder = avcodec_find_encoder(dec_ctx->codec_id);
 if (!encoder) {
 av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
 return AVERROR_INVALIDDATA;
 }
 enc_ctx = avcodec_alloc_context3(encoder);
 if (!enc_ctx) {
 av_log(NULL, AV_LOG_FATAL, "Failed to allocate the encoder context\n");
 return AVERROR(ENOMEM);
 }

 /* In this example, we transcode to same properties (picture size,
 * sample rate etc.). These properties can be changed for output
 * streams easily using filters */
 if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
 enc_ctx->height = dec_ctx->height;
 enc_ctx->width = dec_ctx->width;
 enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
 /* take first format from list of supported formats */
 if (encoder->pix_fmts)
 enc_ctx->pix_fmt = encoder->pix_fmts[0];
 else
 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
 /* video time_base can be set to whatever is handy and supported by encoder */
 enc_ctx->time_base = av_inv_q(dec_ctx->framerate);
 } else {
 enc_ctx->sample_rate = dec_ctx->sample_rate;
 enc_ctx->channel_layout = dec_ctx->channel_layout;
 enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
 /* take first format from list of supported formats */
 enc_ctx->sample_fmt = encoder->sample_fmts[0];
 enc_ctx->time_base = (AVRational) {1, enc_ctx->sample_rate};
 }

 if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
 enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

 /* Third parameter can be used to pass settings to encoder */
 ret = avcodec_open2(enc_ctx, encoder, NULL);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
 return ret;
 }
 ret = avcodec_parameters_from_context(out_stream->codecpar, enc_ctx);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i);
 return ret;
 }

 out_stream->time_base = enc_ctx->time_base;
 stream_ctx[i].enc_ctx = enc_ctx;
 } else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
 av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
 return AVERROR_INVALIDDATA;
 } else {
 /* if this stream must be remuxed */
 ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Copying parameters for stream #%u failed\n", i);
 return ret;
 }
 out_stream->time_base = in_stream->time_base;
 }

 }
 av_dump_format(ofmt_ctx, 0, filename, 1);

 av_opt_set_int(ofmt_ctx->priv_data, "hls_time", 5, AV_OPT_SEARCH_CHILDREN);

 if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
 ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
 return ret;
 }
 }

 /* init muxer, write output file header */
 ret = avformat_write_header(ofmt_ctx, NULL);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
 return ret;
 }

 return 0;
}

static int init_filter(FilteringContext *fctx, AVCodecContext *dec_ctx,
 AVCodecContext *enc_ctx, const char *filter_spec) {
 char args[512];
 int ret = 0;
 const AVFilter *buffersrc = NULL;
 const AVFilter *buffersink = NULL;
 AVFilterContext *buffersrc_ctx = NULL;
 AVFilterContext *buffersink_ctx = NULL;
 AVFilterInOut *outputs = avfilter_inout_alloc();
 AVFilterInOut *inputs = avfilter_inout_alloc();
 AVFilterGraph *filter_graph = avfilter_graph_alloc();

 if (!outputs || !inputs || !filter_graph) {
 ret = AVERROR(ENOMEM);
 goto end;
 }

 if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
 buffersrc = avfilter_get_by_name("buffer");
 buffersink = avfilter_get_by_name("buffersink");
 if (!buffersrc || !buffersink) {
 av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
 ret = AVERROR_UNKNOWN;
 goto end;
 }

 snprintf(args, sizeof(args),
 "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
 dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
 dec_ctx->time_base.num, dec_ctx->time_base.den,
 dec_ctx->sample_aspect_ratio.num,
 dec_ctx->sample_aspect_ratio.den);

 ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
 args, NULL, filter_graph);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
 goto end;
 }

 ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
 NULL, NULL, filter_graph);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
 goto end;
 }

 ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
 (uint8_t *) &enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
 AV_OPT_SEARCH_CHILDREN);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
 goto end;
 }
 } else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
 buffersrc = avfilter_get_by_name("abuffer");
 buffersink = avfilter_get_by_name("abuffersink");
 if (!buffersrc || !buffersink) {
 av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
 ret = AVERROR_UNKNOWN;
 goto end;
 }

 if (!dec_ctx->channel_layout)
 dec_ctx->channel_layout =
 av_get_default_channel_layout(dec_ctx->channels);
 snprintf(args, sizeof(args),
 "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
 dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
 av_get_sample_fmt_name(dec_ctx->sample_fmt),
 dec_ctx->channel_layout);
 ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
 args, NULL, filter_graph);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
 goto end;
 }

 ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
 NULL, NULL, filter_graph);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
 goto end;
 }

 ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
 (uint8_t *) &enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
 AV_OPT_SEARCH_CHILDREN);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
 goto end;
 }

 ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
 (uint8_t *) &enc_ctx->channel_layout,
 sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
 goto end;
 }

 ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
 (uint8_t *) &enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
 AV_OPT_SEARCH_CHILDREN);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
 goto end;
 }
 } else {
 ret = AVERROR_UNKNOWN;
 goto end;
 }

 /* Endpoints for the filter graph. */
 outputs->name = av_strdup("in");
 outputs->filter_ctx = buffersrc_ctx;
 outputs->pad_idx = 0;
 outputs->next = NULL;

 inputs->name = av_strdup("out");
 inputs->filter_ctx = buffersink_ctx;
 inputs->pad_idx = 0;
 inputs->next = NULL;

 if (!outputs->name || !inputs->name) {
 ret = AVERROR(ENOMEM);
 goto end;
 }

 if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
 &inputs, &outputs, NULL)) < 0)
 goto end;

 if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
 goto end;

 /* Fill FilteringContext */
 fctx->buffersrc_ctx = buffersrc_ctx;
 fctx->buffersink_ctx = buffersink_ctx;
 fctx->filter_graph = filter_graph;

 end:
 avfilter_inout_free(&inputs);
 avfilter_inout_free(&outputs);

 return ret;
}

static int init_filters(void) {
 const char *filter_spec;
 unsigned int i;
 int ret;
 filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));
 if (!filter_ctx)
 return AVERROR(ENOMEM);

 for (i = 0; i < ifmt_ctx->nb_streams; i++) {
 filter_ctx[i].buffersrc_ctx = NULL;
 filter_ctx[i].buffersink_ctx = NULL;
 filter_ctx[i].filter_graph = NULL;
 if (!(ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO
 || ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO))
 continue;


 if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
 filter_spec = "null"; /* passthrough (dummy) filter for video */
 else
 filter_spec = "anull"; /* passthrough (dummy) filter for audio */
 ret = init_filter(&filter_ctx[i], stream_ctx[i].dec_ctx,
 stream_ctx[i].enc_ctx, filter_spec);
 if (ret)
 return ret;
 }
 return 0;
}

static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
 int ret;
 int got_frame_local;
 AVPacket enc_pkt;
 int (*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
 (ifmt_ctx->streams[stream_index]->codecpar->codec_type ==
 AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;

 if (!got_frame)
 got_frame = &got_frame_local;

 av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
 /* encode filtered frame */
 enc_pkt.data = NULL;
 enc_pkt.size = 0;
 av_init_packet(&enc_pkt);
 ret = enc_func(stream_ctx[stream_index].enc_ctx, &enc_pkt,
 filt_frame, got_frame);
 av_frame_free(&filt_frame);
 if (ret < 0)
 return ret;
 if (!(*got_frame))
 return 0;

 /* prepare packet for muxing */
 enc_pkt.stream_index = stream_index;
 av_packet_rescale_ts(&enc_pkt,
 stream_ctx[stream_index].enc_ctx->time_base,
 ofmt_ctx->streams[stream_index]->time_base);

 av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
 /* mux encoded frame */
 ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
 return ret;
}

static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index) {
 int ret;
 AVFrame *filt_frame;

 av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
 /* push the decoded frame into the filtergraph */
 ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,
 frame, 0);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
 return ret;
 }

 /* pull filtered frames from the filtergraph */
 while (1) {
 filt_frame = av_frame_alloc();
 if (!filt_frame) {
 ret = AVERROR(ENOMEM);
 break;
 }
 av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
 ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,
 filt_frame);
 if (ret < 0) {
 /* if no more frames for output - returns AVERROR(EAGAIN)
 * if flushed and no more frames for output - returns AVERROR_EOF
 * rewrite retcode to 0 to show it as normal procedure completion
 */
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
 ret = 0;
 av_frame_free(&filt_frame);
 break;
 }

 filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
 ret = encode_write_frame(filt_frame, stream_index, NULL);
 if (ret < 0)
 break;
 }

 return ret;
}

static int flush_encoder(unsigned int stream_index) {
 int ret;
 int got_frame;

 if (!(stream_ctx[stream_index].enc_ctx->codec->capabilities &
 AV_CODEC_CAP_DELAY))
 return 0;

 while (1) {
 av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
 ret = encode_write_frame(NULL, stream_index, &got_frame);
 if (ret < 0)
 break;
 if (!got_frame)
 return 0;
 }
 return ret;
}

int main() {
 char *inputFile = "D:/20200623_094923.mp4";
 char *outputFile = "D:/test/te.m3u8";
 enum AVCodecID videoCodec = AV_CODEC_ID_H264;
 enum AVCodecID audioCodec = AV_CODEC_ID_AAC;

 int ret;
 AVPacket packet = {.data = NULL, .size = 0};
 AVFrame *frame = NULL;
 enum AVMediaType type;
 unsigned int stream_index;
 unsigned int i;
 int got_frame;
 int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);

 if ((ret = open_input_file(inputFile)) < 0)
 goto end;
 if ((ret = open_output_file(outputFile, videoCodec, audioCodec)) < 0)
 goto end;
 if ((ret = init_filters()) < 0)
 goto end;

 /* read all packets */
 while (1) {
 if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
 break;
 stream_index = packet.stream_index;
 type = ifmt_ctx->streams[packet.stream_index]->codecpar->codec_type;
 av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
 stream_index);

 if (filter_ctx[stream_index].filter_graph) {
 av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
 frame = av_frame_alloc();
 if (!frame) {
 ret = AVERROR(ENOMEM);
 break;
 }
 av_packet_rescale_ts(&packet,
 ifmt_ctx->streams[stream_index]->time_base,
 stream_ctx[stream_index].dec_ctx->time_base);
 dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
 avcodec_decode_audio4;
 ret = dec_func(stream_ctx[stream_index].dec_ctx, frame,
 &got_frame, &packet);
 if (ret < 0) {
 av_frame_free(&frame);
 av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
 break;
 }

 if (got_frame) {
 frame->pts = frame->best_effort_timestamp;
 ret = filter_encode_write_frame(frame, stream_index);
 av_frame_free(&frame);
 if (ret < 0)
 goto end;
 } else {
 av_frame_free(&frame);
 }
 } else {
 /* remux this frame without reencoding */
 av_packet_rescale_ts(&packet,
 ifmt_ctx->streams[stream_index]->time_base,
 ofmt_ctx->streams[stream_index]->time_base);

 ret = av_interleaved_write_frame(ofmt_ctx, &packet);
 if (ret < 0)
 goto end;
 }
 av_packet_unref(&packet);
 }

 /* flush filters and encoders */
 for (i = 0; i < ifmt_ctx->nb_streams; i++) {
 /* flush filter */
 if (!filter_ctx[i].filter_graph)
 continue;
 ret = filter_encode_write_frame(NULL, i);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
 goto end;
 }

 /* flush encoder */
 ret = flush_encoder(i);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
 goto end;
 }
 }

 av_write_trailer(ofmt_ctx);
 end:
 av_packet_unref(&packet);
 av_frame_free(&frame);
 for (i = 0; i < ifmt_ctx->nb_streams; i++) {
 avcodec_free_context(&stream_ctx[i].dec_ctx);
 if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && stream_ctx[i].enc_ctx)
 avcodec_free_context(&stream_ctx[i].enc_ctx);
 if (filter_ctx && filter_ctx[i].filter_graph)
 avfilter_graph_free(&filter_ctx[i].filter_graph);
 }
 av_free(filter_ctx);
 av_free(stream_ctx);
 avformat_close_input(&ifmt_ctx);
 if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
 avio_closep(&ofmt_ctx->pb);
 avformat_free_context(ofmt_ctx);

 if (ret < 0)
 av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));

 return ret ? 1 : 0;
}



-
C++ Qt FFMPEG RTMP : getting 0 fps
7 juin 2021, par Jinwoo LimI've been programming on Qt and developing an RTMP server.


I almost finished it, but the server says the sent RTMP video has 0 fps.


I think the data are being sent correctly - the server says video in-bytes and audio in-bytes are above 0.


Also, whenever I play RTMP video from the server with VLC, it only shows the exact frame that is supposed to be shown at the time VLC connected to the server.


My conclusion was that my program sends the frame correctly, but the set fps of RTMP server is 0 so VLC refused to play the RTMP video.


Where did I get wrong ?


#include "threadcam_rtmp.h"
#include "global.h"
#include "rtmpstream.h"

#include <vector>

#include <opencv2></opencv2>highgui.hpp>
#include <opencv2></opencv2>video.hpp>

extern "C" {
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
#include "libavutil/avutil.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/time.h"
#include "libswscale/swscale.h"
#include "libavdevice/avdevice.h"
}

#include <qapplication>
#include <qdebug>
#include <qpixmap>

ThreadCam_RTMP::ThreadCam_RTMP(int selectedCam, QMutex &mutex_img):
 mutex(mutex_img)
{
 cam_index = selectedCam;
 show_on = true;
}

void ThreadCam_RTMP::run()
{
 //Settings for opencv cam
 cam_index = start_cam_index;
 if (!cam.open(cam_index)) cam_on = false;
 cam.set(CV_CAP_PROP_FRAME_WIDTH, img_width);
 cam.set(CV_CAP_PROP_FRAME_HEIGHT, img_height);

 //Settings for RTMP streaming
 output = server.c_str();

 av_register_all();
 avdevice_register_all();
 avformat_network_init();

 ifmt = av_find_input_format("dshow");

 AVDictionary *device_param = 0;

 //Set audio device
 if (avformat_open_input(&ifmt_ctx_a, device_name_a, ifmt, &device_param) != 0)
 qDebug("Couldn't open audio stream.");

 //Audio input initialize
 if (avformat_find_stream_info(ifmt_ctx_a, NULL) < 0)
 qDebug("Couldn't find audio stream information.");

 audioindex = -1;
 for (int i = 0; i < ifmt_ctx_a->nb_streams; i++)
 {
 if(ifmt_ctx_a->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
 {
 audioindex = i;
 break;
 }
 }
 if (audioindex == -1) qDebug("Couldn't find an audio stream");
 if (avcodec_open2(ifmt_ctx_a->streams[audioindex]->codec, avcodec_find_decoder(ifmt_ctx_a->streams[audioindex]->codec->codec_id), NULL) < 0)
 qDebug("Couldn't open audio codec");

 //Output audio initialize
 out_codec_a = avcodec_find_encoder(AV_CODEC_ID_AAC);
 if(!out_codec_a)
 qDebug("Couldn't find output audio encoder.");
 out_codec_ctx_a = avcodec_alloc_context3(out_codec_a);
 out_codec_ctx_a->channels = 2;
 out_codec_ctx_a->channel_layout = av_get_default_channel_layout(2);
 out_codec_ctx_a->sample_rate = ifmt_ctx_a->streams[audioindex]->codec->sample_rate;
 out_codec_ctx_a->sample_fmt = out_codec_a->sample_fmts[0];
 out_codec_ctx_a->bit_rate = bitrate;
 out_codec_ctx_a->time_base.num = 1;
 out_codec_ctx_a->time_base.den = out_codec_ctx_a->sample_rate;
 out_codec_ctx_a->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
 if (avcodec_open2(out_codec_ctx_a, out_codec_a, NULL) < 0) qDebug("Couldn't open output audio encoder");

 //Output initialize
 initialize_avformat_context(ofmt_ctx, "flv");
 initialize_io_context(ofmt_ctx, output);

 out_codec = avcodec_find_encoder(AV_CODEC_ID_FLV1);
 if(!out_codec)
 qDebug("Couldn't find output video encoder.");
 out_stream = avformat_new_stream(ofmt_ctx, out_codec);
 out_codec_ctx = avcodec_alloc_context3(out_codec);

 set_codec_params(ofmt_ctx, out_codec_ctx, img_width, img_height, fps, bitrate);
 initialize_codec_stream(out_stream, out_codec_ctx, out_codec, codec_profile);

 out_stream->codecpar->extradata = out_codec_ctx->extradata;
 out_stream->codecpar->extradata_size = out_codec_ctx->extradata_size;

 //Add a new stream to output for muxing
 out_stream_a = avformat_new_stream(ofmt_ctx, out_codec_a);
 out_stream_a->time_base.num = 1;
 out_stream_a->time_base.den = out_codec_ctx_a->sample_rate;
 out_stream_a->codec = out_codec_ctx_a;

 av_dump_format(ofmt_ctx, 0, output, 1);

 int ret = avformat_write_header(ofmt_ctx, nullptr);
 if (ret < 0)
 {
 qDebug("Could not write header!");
 QApplication::quit();
 }

 aud_convert_ctx = swr_alloc_set_opts(NULL,
 av_get_default_channel_layout(out_codec_ctx_a->channels),
 out_codec_ctx_a->sample_fmt,
 out_codec_ctx_a->sample_rate,
 av_get_default_channel_layout(ifmt_ctx_a->streams[audioindex]->codec->channels),
 ifmt_ctx_a->streams[audioindex]->codec->sample_fmt,
 ifmt_ctx_a->streams[audioindex]->codec->sample_rate,
 0, NULL);

 swr_init(aud_convert_ctx);

 AVRational r_framerate1 = {fps, 1};
 int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1));
 int64_t start_time = av_gettime();

 AVAudioFifo *fifo = NULL;
 fifo = av_audio_fifo_alloc(out_codec_ctx_a->sample_fmt, out_codec_ctx_a->channels, 1);

 uint8_t **converted_input_samples = NULL;
 if (!(converted_input_samples = (uint8_t**)calloc(out_codec_ctx_a->channels, sizeof(**converted_input_samples))))
 qDebug("Could not allocate converted input sample pointers");

 int dec_got_frame_a, enc_got_frame_a; 

 auto *frame = allocate_frame_buffer(out_codec_ctx, img_width, img_height);
 auto *swsctx = initialize_sample_scaler(out_codec_ctx, img_width, img_height);

 int64_t vid_pts = 0;

 ofmt_ctx->streams[0]->r_frame_rate = out_codec_ctx->framerate;
 ofmt_ctx->streams[0]->codec->time_base = out_codec_ctx->time_base;

 while(cam_on)
 {
 cv::Mat temp; // image captured from opencv cam
 show_on = keep_sending;

 //capture image from webcam
 if (cam.isOpened())
 {
 cam.read(temp);
 //convert BGR to RGB to show them
 cv::cvtColor(temp, temp, cv::COLOR_BGR2RGB);
 }

 if(show_on)
 {
 //resize and save read img to global variable cap_img
 mutex.lock();
 temp.copyTo(cap_img);
 QImage qimg = QImage(cap_img.data, img_width, img_height, img_width * img_channels, QImage::Format_RGB888);
 mutex.unlock();
 emit ThreadCam_RTMP::setImage(qimg);

 }

 //rtmp streaming
 if (encode_video || encode_audio)
 {
 if (encode_video && (!encode_audio || av_compare_ts(vid_next_pts, time_base_q, aud_next_pts, time_base_q) <= 0))
 {
 mutex.lock();
 AVPacket pkt = {0};
 av_init_packet(&pkt);

 frame = av_frame_alloc();

 std::vector framebuf(av_image_get_buffer_size(out_codec_ctx->pix_fmt, img_width, img_height, 1));
 av_image_fill_arrays(frame->data, frame->linesize, framebuf.data(), out_codec_ctx->pix_fmt,img_width, img_height, 1);
 frame->width = img_width;
 frame->height = img_height;
 frame->format = static_cast<int>(out_codec_ctx->pix_fmt);

 const int stride[] = {static_cast<int>(temp.step[0])};
 sws_scale(swsctx, &temp.data, stride, 0, temp.rows, frame->data, frame->linesize);
 frame->pts = vid_pts + av_rescale_q(1, out_codec_ctx->time_base, out_stream->time_base);
 vid_pts = frame->pts;
 pkt.pts = frame->pts;
 qDebug() << frame->pts;

 int ret = avcodec_send_frame(out_codec_ctx, frame);
 if (ret < 0)
 {
 qDebug("Error sending frame to codec context!");
 av_log_set_callback(printerror);
 }

 ret = avcodec_receive_packet(out_codec_ctx, &pkt);
 if (ret < 0)
 {
 qDebug("Error receiving packet from codec context!");
 }

 av_interleaved_write_frame(ofmt_ctx, &pkt);
 av_packet_unref(&pkt);

 qDebug() << av_q2d(ofmt_ctx->streams[0]->codec->time_base);

 mutex.unlock();
 framecnt++;
 vid_next_pts = framecnt * calc_duration;
 }
 else
 {
 //audio trancoding here
 const int output_frame_size = out_codec_ctx_a->frame_size;

 /**
 * Make sure that there is one frame worth of samples in the FIFO
 * buffer so that the encoder can do its work.
 * Since the decoder's and the encoder's frame size may differ, we
 * need to FIFO buffer to store as many frames worth of input samples
 * that they make up at least one frame worth of output samples.
 */
 while (av_audio_fifo_size(fifo) < output_frame_size)
 {
 /**
 * Decode one frame worth of audio samples, convert it to the
 * output sample format and put it into the FIFO buffer.
 */
 AVFrame *input_frame = av_frame_alloc();
 if (!input_frame) ret = AVERROR(ENOMEM);

 /** Decode one frame worth of audio samples. */
 /** Packet used for temporary storage. */
 AVPacket input_packet;
 av_init_packet(&input_packet);
 input_packet.data = NULL;
 input_packet.size = 0;

 /** Read one audio frame from the input file into a temporary packet. */
 if ((ret = av_read_frame(ifmt_ctx_a, &input_packet)) < 0)
 {
 /** If we are at the end of the file, flush the decoder below. */
 if (ret == AVERROR_EOF) encode_audio = 0;
 else qDebug("Could not read audio frame");
 }

 /**
 * Decode the audio frame stored in the temporary packet.
 * The input audio stream decoder is used to do this.
 * If we are at the end of the file, pass an empty packet to the decoder
 * to flush it.
 */
 if ((ret = avcodec_decode_audio4(ifmt_ctx_a->streams[audioindex]->codec, input_frame, &dec_got_frame_a, &input_packet)) < 0)
 qDebug("Could not decode audio frame");

 av_packet_unref(&input_packet);
 /** If there is decoded data, convert and store it */
 if (dec_got_frame_a)
 {
 /**
 * Allocate memory for the samples of all channels in one consecutive
 * block for convenience.
 */
 if ((ret = av_samples_alloc(converted_input_samples, NULL, out_codec_ctx_a->channels,
 input_frame->nb_samples, out_codec_ctx_a->sample_fmt, 0)) < 0)
 {
 qDebug("Could not allocate converted input samples");
 av_freep(&(*converted_input_samples)[0]);
 free(*converted_input_samples);
 }

 /**
 * Convert the input samples to the desired output sample format.
 * This requires a temporary storage provided by converted_input_samples.
 */
 /** Convert the samples using the resampler. */
 if ((ret = swr_convert(aud_convert_ctx, converted_input_samples, input_frame->nb_samples,
 (const uint8_t**)input_frame->extended_data, input_frame->nb_samples)) < 0) {
 qDebug("Could not convert input samples"); qDebug() << ret;
 }

 /** Add the converted input samples to the FIFO buffer for later processing. */
 /**
 * Make the FIFO as large as it needs to be to hold both,
 * the old and the new samples.
 */
 if ((ret = av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + input_frame->nb_samples)) < 0)
 qDebug("Could not reallocate FIFO");

 /** Store the new samples in the FIFO buffer. */
 if (av_audio_fifo_write(fifo, (void **)converted_input_samples,
 input_frame->nb_samples) < input_frame->nb_samples)
 qDebug("Could not write data to FIFO");
 }
 }

 /**
 * If we have enough samples for the encoder, we encode them.
 * At the end of the file, we pass the remaining samples to
 * the encoder.
 */
 if (av_audio_fifo_size(fifo) >= output_frame_size)
 /**
 * Take one frame worth of audio samples from the FIFO buffer,
 * encode it and write it to the output file.
 */
 {
 /** Temporary storage of the output samples of the frame written to the file. */
 AVFrame *output_frame = av_frame_alloc();
 if (!output_frame) ret = AVERROR(ENOMEM);
 /**
 * Use the maximum number of possible samples per frame.
 * If there is less than the maximum possible frame size in the FIFO
 * buffer use this number. Otherwise, use the maximum possible frame size
 */
 const int frame_size = FFMIN(av_audio_fifo_size(fifo), out_codec_ctx_a->frame_size);

 /** Initialize temporary storage for one output frame. */
 /**
 * Set the frame's parameters, especially its size and format.
 * av_frame_get_buffer needs this to allocate memory for the
 * audio samples of the frame.
 * Default channel layouts based on the number of channels
 * are assumed for simplicity.
 */
 output_frame->nb_samples = frame_size;
 output_frame->channel_layout = out_codec_ctx_a->channel_layout;
 output_frame->format = out_codec_ctx_a->sample_fmt;
 output_frame->sample_rate = out_codec_ctx_a->sample_rate;

 /**
 * Allocate the samples of the created frame. This call will make
 * sure that the audio frame can hold as many samples as specified.
 */
 if ((ret = av_frame_get_buffer(output_frame, 0)) < 0)
 {
 qDebug("Could not allocate output frame samples");
 av_frame_free(&output_frame);
 }

 /**
 * Read as many samples from the FIFO buffer as required to fill the frame.
 * The samples are stored in the frame temporarily.
 */
 if (av_audio_fifo_read(fifo, (void **)output_frame->data, frame_size) < frame_size)
 qDebug("Could not read data from FIFO");

 /** Encode one frame worth of audio samples. */
 /** Packet used for temporary storage. */
 AVPacket output_packet;
 av_init_packet(&output_packet);
 output_packet.data = NULL;
 output_packet.size = 0;

 /** Set a timestamp based on the sample rate for the container. */
 if (output_frame) nb_samples += output_frame->nb_samples;

 /**
 * Encode the audio frame and store it in the temporary packet.
 * The output audio stream encoder is used to do this.
 */
 if ((ret = avcodec_encode_audio2(out_codec_ctx_a, &output_packet, output_frame, &enc_got_frame_a)) < 0)
 {
 qDebug("Could not encode frame");
 av_packet_unref(&output_packet);
 }

 /** Write one audio frame from the temporary packet to the output file. */
 if (enc_got_frame_a)
 {
 output_packet.stream_index = 1;

 AVRational time_base = ofmt_ctx->streams[1]->time_base;
 AVRational r_framerate1 = { ifmt_ctx_a->streams[audioindex]->codec->sample_rate, 1 };// { 44100, 1};
 int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1));

 output_packet.pts = av_rescale_q(nb_samples*calc_duration, time_base_q, time_base);
 output_packet.dts = output_packet.pts;
 output_packet.duration = output_frame->nb_samples;

 //qDebug("audio pts : %d\n", output_packet.pts);
 aud_next_pts = nb_samples*calc_duration;

 int64_t pts_time = av_rescale_q(output_packet.pts, time_base, time_base_q);
 int64_t now_time = av_gettime() - start_time;
 if ((pts_time > now_time) && ((aud_next_pts + pts_time - now_time)/cleanup
 if (out_stream_a) avcodec_close(out_stream_a->codec);
 if (fifo) av_audio_fifo_free(fifo);
 avio_close(ofmt_ctx->pb);
 avformat_free_context(ifmt_ctx_a);
 avformat_free_context(ofmt_ctx);
}
</int></int></qpixmap></qdebug></qapplication></vector>



#include "rtmpstream.h"
#include "global.h"

extern "C" {
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
#include "libavutil/avutil.h"
#include "libswscale/swscale.h"
}

#include <qmessagebox>
#include <qapplication>
#include <qdebug>

void initialize_avformat_context(AVFormatContext *&fctx, const char *format_name)
{
 int ret = avformat_alloc_output_context2(&fctx, nullptr, format_name, nullptr);
 if (ret != 0)
 {
 qDebug("Could not allocate output format context!");
 QApplication::quit();
 }
}

void initialize_io_context(AVFormatContext *&fctx, const char *output)
{
 if (!(fctx->oformat->flags & AVFMT_NOFILE))
 {
 int ret = avio_open2(&fctx->pb, output, AVIO_FLAG_WRITE, nullptr, nullptr);
 if (ret < 0)
 {
 qDebug("Could not open output IO context!");
 QApplication::quit();
 }
 }
}

void set_codec_params(AVFormatContext *fctx, AVCodecContext *codec_ctx, double width, double height, int fps, int bitrate)
{
 const AVRational dst_fps = {fps, 1};
 codec_ctx->codec_tag = 0;
 codec_ctx->codec_id = AV_CODEC_ID_FLV1;
 codec_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
 codec_ctx->width = width;
 codec_ctx->height = height;
 codec_ctx->gop_size = 12;
 codec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
 codec_ctx->framerate = dst_fps;
 codec_ctx->time_base = av_inv_q(dst_fps);
 codec_ctx->bit_rate = bitrate;
 codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}

void initialize_codec_stream(AVStream *stream, AVCodecContext *codec_ctx, AVCodec *codec, std::string codec_profile)
{
 int ret = avcodec_parameters_from_context(stream->codecpar, codec_ctx);
 if (ret < 0)
 {
 qDebug("Could not initialize stream codec parameters!");
 QApplication::quit();
 }

 // open video encoder
 ret = avcodec_open2(codec_ctx, codec, 0);
 if (ret < 0)
 {
 qDebug("Could not open video encoder!");
 QApplication::quit();
 }
}


SwsContext *initialize_sample_scaler(AVCodecContext *codec_ctx, double width, double height)
{
 SwsContext *swsctx = sws_getContext(width, height, AV_PIX_FMT_BGR24, width, height, codec_ctx->pix_fmt, SWS_BICUBIC, nullptr, nullptr, nullptr);
 if (!swsctx)
 {
 qDebug("Could not initialize sample scaler!");
 QApplication::quit();
 }

 return swsctx;
}

AVFrame *allocate_frame_buffer(AVCodecContext *codec_ctx, double width, double height)
{
 AVFrame *frame = av_frame_alloc();

 std::vector framebuf(av_image_get_buffer_size(codec_ctx->pix_fmt, width, height, 1));
 av_image_fill_arrays(frame->data, frame->linesize, framebuf.data(), codec_ctx->pix_fmt, width, height, 1);
 frame->width = width;
 frame->height = height;
 frame->format = static_cast<int>(codec_ctx->pix_fmt);

 return frame;
}

void write_frame(AVCodecContext *codec_ctx, AVFormatContext *fmt_ctx, AVStream *st, AVFrame *frame)
{
 AVPacket pkt = {0};
 av_init_packet(&pkt);

 int ret = avcodec_send_frame(codec_ctx, frame);
 if (ret < 0)
 {
 qDebug("Error sending frame to codec context!");
 QApplication::quit();
 }

 ret = avcodec_receive_packet(codec_ctx, &pkt);
 if (ret < 0)
 {
 qDebug("Error receiving packet from codec context!");
 QApplication::quit();
 }

 /* rescale output packet timestamp values from codec to stream timebase */
 av_packet_rescale_ts(&pkt, codec_ctx->time_base, st->time_base);
 pkt.stream_index = st->index;

 av_interleaved_write_frame(fmt_ctx, &pkt);
 av_packet_unref(&pkt);
}

int flush_encoder_a(AVFormatContext *ifmt_ctx_a, AVFormatContext *ofmt_ctx, unsigned int stream_index, int nb_samples)
{
 int ret;
 int got_frame;
 AVPacket enc_pkt;
 if (!(ofmt_ctx->streams[stream_index]->codec->codec->capabilities & AV_CODEC_CAP_DELAY)) return 0;
 while (1)
 {
 enc_pkt.data = NULL;
 enc_pkt.size = 0;
 av_init_packet(&enc_pkt);
 ret = avcodec_encode_audio2(ofmt_ctx->streams[stream_index]->codec, &enc_pkt, NULL, &got_frame);
 av_frame_free(NULL);

 if (ret < 0) break;
 if (!got_frame)
 {
 ret = 0;
 break;
 }

 qDebug("Flush Encoder: Succeed to encode 1 frame!\tsize:%5d\n", enc_pkt.size);
 nb_samples+=1024;

 //Write PTS
 AVRational time_base = ofmt_ctx->streams[stream_index]->time_base;//{ 1, 1000 };
 AVRational r_framerate1 = { ifmt_ctx_a->streams[0]->codec->sample_rate, 1 };
 AVRational time_base_q = { 1, AV_TIME_BASE };

 //Duration between 2 frames (us)
 int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1));

 //Parameters
 enc_pkt.pts = av_rescale_q(nb_samples*calc_duration, time_base_q, time_base);
 enc_pkt.dts = enc_pkt.pts;
 enc_pkt.duration = 1024;

 /* copy packet*/
 //Convert PTS/DTS
 enc_pkt.pos = -1;

 //ofmt_ctx->duration = enc_pkt.duration * nb_samples;

 /* mux encoded frame */
 ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
 if (ret < 0) break;
 }
 return ret;
}
</int></qdebug></qapplication></qmessagebox>