
Recherche avancée
Médias (3)
-
MediaSPIP Simple : futur thème graphique par défaut ?
26 septembre 2013, par
Mis à jour : Octobre 2013
Langue : français
Type : Video
-
GetID3 - Bloc informations de fichiers
9 avril 2013, par
Mis à jour : Mai 2013
Langue : français
Type : Image
-
GetID3 - Boutons supplémentaires
9 avril 2013, par
Mis à jour : Avril 2013
Langue : français
Type : Image
Autres articles (46)
-
Les autorisations surchargées par les plugins
27 avril 2010, parMediaspip core
autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs -
HTML5 audio and video support
13 avril 2011, parMediaSPIP uses HTML5 video and audio tags to play multimedia files, taking advantage of the latest W3C innovations supported by modern browsers.
The MediaSPIP player used has been created specifically for MediaSPIP and can be easily adapted to fit in with a specific theme.
For older browsers the Flowplayer flash fallback is used.
MediaSPIP allows for media playback on major mobile platforms with the above (...) -
Support audio et vidéo HTML5
10 avril 2011MediaSPIP utilise les balises HTML5 video et audio pour la lecture de documents multimedia en profitant des dernières innovations du W3C supportées par les navigateurs modernes.
Pour les navigateurs plus anciens, le lecteur flash Flowplayer est utilisé.
Le lecteur HTML5 utilisé a été spécifiquement créé pour MediaSPIP : il est complètement modifiable graphiquement pour correspondre à un thème choisi.
Ces technologies permettent de distribuer vidéo et son à la fois sur des ordinateurs conventionnels (...)
Sur d’autres sites (11657)
-
How to use ffmpeg lib transform mp4(h264&aac) to m3u8 (hls) by C code ?
1er juillet 2020, par itningI used official examples
transcoding.c
but console printpkt->duration = 0, maybe the hls segment duration will not precise
.

I use this code to set duration but invalid。


av_opt_set_int(ofmt_ctx->priv_data, "hls_time", 5, AV_OPT_SEARCH_CHILDREN);


In command line


ffmpeg -i a.mp4 -codec copy -vbsf h264_mp4toannexb -map 0 -f segment -segment_list a.m3u8 -segment_time 10 a-%03d.ts


How to use C code to achieve this command ?


this is my code :


/**
 * @file
 * API example for demuxing, decoding, filtering, encoding and muxing
 * @example transcoding.c
 */

#include <libavcodec></libavcodec>avcodec.h>
#include <libavformat></libavformat>avformat.h>
#include <libavfilter></libavfilter>buffersink.h>
#include <libavfilter></libavfilter>buffersrc.h>
#include <libavutil></libavutil>opt.h>
#include <libavutil></libavutil>pixdesc.h>

static AVFormatContext *ifmt_ctx;
static AVFormatContext *ofmt_ctx;
typedef struct FilteringContext {
 AVFilterContext *buffersink_ctx;
 AVFilterContext *buffersrc_ctx;
 AVFilterGraph *filter_graph;
} FilteringContext;
static FilteringContext *filter_ctx;

typedef struct StreamContext {
 AVCodecContext *dec_ctx;
 AVCodecContext *enc_ctx;
} StreamContext;
static StreamContext *stream_ctx;

static int open_input_file(const char *filename) {
 int ret;
 unsigned int i;

 ifmt_ctx = NULL;
 if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
 return ret;
 }

 if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
 return ret;
 }

 stream_ctx = av_mallocz_array(ifmt_ctx->nb_streams, sizeof(*stream_ctx));
 if (!stream_ctx)
 return AVERROR(ENOMEM);

 for (i = 0; i < ifmt_ctx->nb_streams; i++) {
 AVStream *stream = ifmt_ctx->streams[i];
 AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);
 AVCodecContext *codec_ctx;
 if (!dec) {
 av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);
 return AVERROR_DECODER_NOT_FOUND;
 }
 codec_ctx = avcodec_alloc_context3(dec);
 if (!codec_ctx) {
 av_log(NULL, AV_LOG_ERROR, "Failed to allocate the decoder context for stream #%u\n", i);
 return AVERROR(ENOMEM);
 }
 ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Failed to copy decoder parameters to input decoder context "
 "for stream #%u\n", i);
 return ret;
 }
 /* Reencode video & audio and remux subtitles etc. */
 if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
 || codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
 if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
 codec_ctx->framerate = av_guess_frame_rate(ifmt_ctx, stream, NULL);
 /* Open decoder */
 ret = avcodec_open2(codec_ctx, dec, NULL);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
 return ret;
 }
 }
 stream_ctx[i].dec_ctx = codec_ctx;
 }

 av_dump_format(ifmt_ctx, 0, filename, 0);
 return 0;
}

static int open_output_file(const char *filename, enum AVCodecID videoCodecId, enum AVCodecID audioCodecId) {
 AVStream *out_stream;
 AVStream *in_stream;
 AVCodecContext *dec_ctx, *enc_ctx;
 AVCodec *encoder;
 int ret;
 unsigned int i;

 ofmt_ctx = NULL;
 avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);
 if (!ofmt_ctx) {
 av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
 return AVERROR_UNKNOWN;
 }

 for (i = 0; i < ifmt_ctx->nb_streams; i++) {
 out_stream = avformat_new_stream(ofmt_ctx, NULL);
 if (!out_stream) {
 av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
 return AVERROR_UNKNOWN;
 }

 in_stream = ifmt_ctx->streams[i];
 dec_ctx = stream_ctx[i].dec_ctx;

 if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
 || dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {

 if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
 encoder = avcodec_find_encoder(videoCodecId);
 } else {
 encoder = avcodec_find_encoder(audioCodecId);
 }
 //encoder = avcodec_find_encoder(dec_ctx->codec_id);
 if (!encoder) {
 av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
 return AVERROR_INVALIDDATA;
 }
 enc_ctx = avcodec_alloc_context3(encoder);
 if (!enc_ctx) {
 av_log(NULL, AV_LOG_FATAL, "Failed to allocate the encoder context\n");
 return AVERROR(ENOMEM);
 }

 /* In this example, we transcode to same properties (picture size,
 * sample rate etc.). These properties can be changed for output
 * streams easily using filters */
 if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
 enc_ctx->height = dec_ctx->height;
 enc_ctx->width = dec_ctx->width;
 enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
 /* take first format from list of supported formats */
 if (encoder->pix_fmts)
 enc_ctx->pix_fmt = encoder->pix_fmts[0];
 else
 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
 /* video time_base can be set to whatever is handy and supported by encoder */
 enc_ctx->time_base = av_inv_q(dec_ctx->framerate);
 } else {
 enc_ctx->sample_rate = dec_ctx->sample_rate;
 enc_ctx->channel_layout = dec_ctx->channel_layout;
 enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
 /* take first format from list of supported formats */
 enc_ctx->sample_fmt = encoder->sample_fmts[0];
 enc_ctx->time_base = (AVRational) {1, enc_ctx->sample_rate};
 }

 if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
 enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

 /* Third parameter can be used to pass settings to encoder */
 ret = avcodec_open2(enc_ctx, encoder, NULL);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
 return ret;
 }
 ret = avcodec_parameters_from_context(out_stream->codecpar, enc_ctx);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i);
 return ret;
 }

 out_stream->time_base = enc_ctx->time_base;
 stream_ctx[i].enc_ctx = enc_ctx;
 } else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
 av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
 return AVERROR_INVALIDDATA;
 } else {
 /* if this stream must be remuxed */
 ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Copying parameters for stream #%u failed\n", i);
 return ret;
 }
 out_stream->time_base = in_stream->time_base;
 }

 }
 av_dump_format(ofmt_ctx, 0, filename, 1);

 av_opt_set_int(ofmt_ctx->priv_data, "hls_time", 5, AV_OPT_SEARCH_CHILDREN);

 if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
 ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
 return ret;
 }
 }

 /* init muxer, write output file header */
 ret = avformat_write_header(ofmt_ctx, NULL);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
 return ret;
 }

 return 0;
}

static int init_filter(FilteringContext *fctx, AVCodecContext *dec_ctx,
 AVCodecContext *enc_ctx, const char *filter_spec) {
 char args[512];
 int ret = 0;
 const AVFilter *buffersrc = NULL;
 const AVFilter *buffersink = NULL;
 AVFilterContext *buffersrc_ctx = NULL;
 AVFilterContext *buffersink_ctx = NULL;
 AVFilterInOut *outputs = avfilter_inout_alloc();
 AVFilterInOut *inputs = avfilter_inout_alloc();
 AVFilterGraph *filter_graph = avfilter_graph_alloc();

 if (!outputs || !inputs || !filter_graph) {
 ret = AVERROR(ENOMEM);
 goto end;
 }

 if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
 buffersrc = avfilter_get_by_name("buffer");
 buffersink = avfilter_get_by_name("buffersink");
 if (!buffersrc || !buffersink) {
 av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
 ret = AVERROR_UNKNOWN;
 goto end;
 }

 snprintf(args, sizeof(args),
 "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
 dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
 dec_ctx->time_base.num, dec_ctx->time_base.den,
 dec_ctx->sample_aspect_ratio.num,
 dec_ctx->sample_aspect_ratio.den);

 ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
 args, NULL, filter_graph);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
 goto end;
 }

 ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
 NULL, NULL, filter_graph);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
 goto end;
 }

 ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
 (uint8_t *) &enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
 AV_OPT_SEARCH_CHILDREN);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
 goto end;
 }
 } else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
 buffersrc = avfilter_get_by_name("abuffer");
 buffersink = avfilter_get_by_name("abuffersink");
 if (!buffersrc || !buffersink) {
 av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
 ret = AVERROR_UNKNOWN;
 goto end;
 }

 if (!dec_ctx->channel_layout)
 dec_ctx->channel_layout =
 av_get_default_channel_layout(dec_ctx->channels);
 snprintf(args, sizeof(args),
 "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
 dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
 av_get_sample_fmt_name(dec_ctx->sample_fmt),
 dec_ctx->channel_layout);
 ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
 args, NULL, filter_graph);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
 goto end;
 }

 ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
 NULL, NULL, filter_graph);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
 goto end;
 }

 ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
 (uint8_t *) &enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
 AV_OPT_SEARCH_CHILDREN);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
 goto end;
 }

 ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
 (uint8_t *) &enc_ctx->channel_layout,
 sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
 goto end;
 }

 ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
 (uint8_t *) &enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
 AV_OPT_SEARCH_CHILDREN);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
 goto end;
 }
 } else {
 ret = AVERROR_UNKNOWN;
 goto end;
 }

 /* Endpoints for the filter graph. */
 outputs->name = av_strdup("in");
 outputs->filter_ctx = buffersrc_ctx;
 outputs->pad_idx = 0;
 outputs->next = NULL;

 inputs->name = av_strdup("out");
 inputs->filter_ctx = buffersink_ctx;
 inputs->pad_idx = 0;
 inputs->next = NULL;

 if (!outputs->name || !inputs->name) {
 ret = AVERROR(ENOMEM);
 goto end;
 }

 if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
 &inputs, &outputs, NULL)) < 0)
 goto end;

 if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
 goto end;

 /* Fill FilteringContext */
 fctx->buffersrc_ctx = buffersrc_ctx;
 fctx->buffersink_ctx = buffersink_ctx;
 fctx->filter_graph = filter_graph;

 end:
 avfilter_inout_free(&inputs);
 avfilter_inout_free(&outputs);

 return ret;
}

static int init_filters(void) {
 const char *filter_spec;
 unsigned int i;
 int ret;
 filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));
 if (!filter_ctx)
 return AVERROR(ENOMEM);

 for (i = 0; i < ifmt_ctx->nb_streams; i++) {
 filter_ctx[i].buffersrc_ctx = NULL;
 filter_ctx[i].buffersink_ctx = NULL;
 filter_ctx[i].filter_graph = NULL;
 if (!(ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO
 || ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO))
 continue;


 if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
 filter_spec = "null"; /* passthrough (dummy) filter for video */
 else
 filter_spec = "anull"; /* passthrough (dummy) filter for audio */
 ret = init_filter(&filter_ctx[i], stream_ctx[i].dec_ctx,
 stream_ctx[i].enc_ctx, filter_spec);
 if (ret)
 return ret;
 }
 return 0;
}

static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
 int ret;
 int got_frame_local;
 AVPacket enc_pkt;
 int (*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
 (ifmt_ctx->streams[stream_index]->codecpar->codec_type ==
 AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;

 if (!got_frame)
 got_frame = &got_frame_local;

 av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
 /* encode filtered frame */
 enc_pkt.data = NULL;
 enc_pkt.size = 0;
 av_init_packet(&enc_pkt);
 ret = enc_func(stream_ctx[stream_index].enc_ctx, &enc_pkt,
 filt_frame, got_frame);
 av_frame_free(&filt_frame);
 if (ret < 0)
 return ret;
 if (!(*got_frame))
 return 0;

 /* prepare packet for muxing */
 enc_pkt.stream_index = stream_index;
 av_packet_rescale_ts(&enc_pkt,
 stream_ctx[stream_index].enc_ctx->time_base,
 ofmt_ctx->streams[stream_index]->time_base);

 av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
 /* mux encoded frame */
 ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
 return ret;
}

static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index) {
 int ret;
 AVFrame *filt_frame;

 av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
 /* push the decoded frame into the filtergraph */
 ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,
 frame, 0);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
 return ret;
 }

 /* pull filtered frames from the filtergraph */
 while (1) {
 filt_frame = av_frame_alloc();
 if (!filt_frame) {
 ret = AVERROR(ENOMEM);
 break;
 }
 av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
 ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,
 filt_frame);
 if (ret < 0) {
 /* if no more frames for output - returns AVERROR(EAGAIN)
 * if flushed and no more frames for output - returns AVERROR_EOF
 * rewrite retcode to 0 to show it as normal procedure completion
 */
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
 ret = 0;
 av_frame_free(&filt_frame);
 break;
 }

 filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
 ret = encode_write_frame(filt_frame, stream_index, NULL);
 if (ret < 0)
 break;
 }

 return ret;
}

static int flush_encoder(unsigned int stream_index) {
 int ret;
 int got_frame;

 if (!(stream_ctx[stream_index].enc_ctx->codec->capabilities &
 AV_CODEC_CAP_DELAY))
 return 0;

 while (1) {
 av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
 ret = encode_write_frame(NULL, stream_index, &got_frame);
 if (ret < 0)
 break;
 if (!got_frame)
 return 0;
 }
 return ret;
}

int main() {
 char *inputFile = "D:/20200623_094923.mp4";
 char *outputFile = "D:/test/te.m3u8";
 enum AVCodecID videoCodec = AV_CODEC_ID_H264;
 enum AVCodecID audioCodec = AV_CODEC_ID_AAC;

 int ret;
 AVPacket packet = {.data = NULL, .size = 0};
 AVFrame *frame = NULL;
 enum AVMediaType type;
 unsigned int stream_index;
 unsigned int i;
 int got_frame;
 int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);

 if ((ret = open_input_file(inputFile)) < 0)
 goto end;
 if ((ret = open_output_file(outputFile, videoCodec, audioCodec)) < 0)
 goto end;
 if ((ret = init_filters()) < 0)
 goto end;

 /* read all packets */
 while (1) {
 if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
 break;
 stream_index = packet.stream_index;
 type = ifmt_ctx->streams[packet.stream_index]->codecpar->codec_type;
 av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
 stream_index);

 if (filter_ctx[stream_index].filter_graph) {
 av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
 frame = av_frame_alloc();
 if (!frame) {
 ret = AVERROR(ENOMEM);
 break;
 }
 av_packet_rescale_ts(&packet,
 ifmt_ctx->streams[stream_index]->time_base,
 stream_ctx[stream_index].dec_ctx->time_base);
 dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
 avcodec_decode_audio4;
 ret = dec_func(stream_ctx[stream_index].dec_ctx, frame,
 &got_frame, &packet);
 if (ret < 0) {
 av_frame_free(&frame);
 av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
 break;
 }

 if (got_frame) {
 frame->pts = frame->best_effort_timestamp;
 ret = filter_encode_write_frame(frame, stream_index);
 av_frame_free(&frame);
 if (ret < 0)
 goto end;
 } else {
 av_frame_free(&frame);
 }
 } else {
 /* remux this frame without reencoding */
 av_packet_rescale_ts(&packet,
 ifmt_ctx->streams[stream_index]->time_base,
 ofmt_ctx->streams[stream_index]->time_base);

 ret = av_interleaved_write_frame(ofmt_ctx, &packet);
 if (ret < 0)
 goto end;
 }
 av_packet_unref(&packet);
 }

 /* flush filters and encoders */
 for (i = 0; i < ifmt_ctx->nb_streams; i++) {
 /* flush filter */
 if (!filter_ctx[i].filter_graph)
 continue;
 ret = filter_encode_write_frame(NULL, i);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
 goto end;
 }

 /* flush encoder */
 ret = flush_encoder(i);
 if (ret < 0) {
 av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
 goto end;
 }
 }

 av_write_trailer(ofmt_ctx);
 end:
 av_packet_unref(&packet);
 av_frame_free(&frame);
 for (i = 0; i < ifmt_ctx->nb_streams; i++) {
 avcodec_free_context(&stream_ctx[i].dec_ctx);
 if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && stream_ctx[i].enc_ctx)
 avcodec_free_context(&stream_ctx[i].enc_ctx);
 if (filter_ctx && filter_ctx[i].filter_graph)
 avfilter_graph_free(&filter_ctx[i].filter_graph);
 }
 av_free(filter_ctx);
 av_free(stream_ctx);
 avformat_close_input(&ifmt_ctx);
 if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
 avio_closep(&ofmt_ctx->pb);
 avformat_free_context(ofmt_ctx);

 if (ret < 0)
 av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));

 return ret ? 1 : 0;
}



-
Produce waveform video from audio using FFMPEG
27 avril 2017, par RhythmicDevilI am trying to create a waveform video from audio. My goal is to produce a video that looks something like this
For my test I have an mp3 that plays a short clipped sound. There are 4 bars of 1/4 notes and 4 bars of 1/8 notes played at 120bpm. I am having some trouble coming up with the right combination of preprocessing and filtering to produce a video that looks like the image. The colors dont have to be exact, I am more concerned with the shape of the beats. I tried a couple of different approaches using showwaves and showspectrum. I cant quite wrap my head around why when using showwaves the beats go past so quickly, but using showspectrum produces a video where I can see each individual beat.
ShowWaves
ffmpeg -i beat_test.mp3 -filter_complex "[0:a]showwaves=s=1280x100:mode=cline:rate=25:scale=sqrt,format=yuv420p[v]" -map "[v]" -map 0:a output_wav.mp4
This link will download the output of that command.
ShowSpectrum
ffmpeg -i beat_test.mp3 -filter_complex "[0:a]showspectrum=s=1280x100:mode=combined:color=intensity:saturation=5:slide=1:scale=cbrt,format=yuv420p[v]" -map "[v]" -an -map 0:a output_spec.mp4
This link will download the output of that command.
I posted the simple examples because I didn’t want to confuse the issue by adding all the variations I have tried.
In practice I suppose I can get away with the output from showspectrum but I’d like to understand where/how I am thinking about this incorrectly. Thanks for any advice.
Here is a link to the source audio file.
-
Aspect ratio problems at transcoding with ffmpeg [closed]
19 novembre 2023, par udippelI have a huge collection of videos from the last 20+ years, videos in all sorts of formats. I use gerbera as open source UPnP-AV media server. Our TV handles only very limited of these formats. Therefore I use the transcoding feature of gerbera (I don't want to convert the 2000+ files ; thereby avoiding loss of multiple audio tracks, (multiple) subtitles, and so forth).


This is my current unified argument line for ffmpeg :

-c:v mpeg2video -maxrate 20000k -vf setdar=16/9 -r 24000/1001 -qscale:v 4 -top 1 -c:a mp2 -f mpeg -y

It works pretty okay, except of the aspect ratios. Well, I don't understand this fully, because ffprobe for File A states :
Stream #0:0: Video: mpeg4 (Simple Profile) (XVID / 0x44495658), yuv420p, 624x464 [SAR 1:1 DAR 39:29], 1500 kb/s, 25 fps, 25 tbr, 25 tbn, 25 tbc

This file displays very well.
File B comes as :
Stream #0:0(eng): Video: h264 (High), yuv420p(tv, bt709, progressive), 960x720, SAR 1:1 DAR 4:3, 23.98 fps, 23.98 tbr, 1k tbn, 180k tbc (default)

This file displays horribly squeezed vertically and doesn't fill the screen left and right neither, with the same settings of the TV. Also, playing this file (and others, naturally) the TV doesn't offer the 14:9 display option, which is available e.g. for the file further up.

Both have same SAR, DAR, almost identical H:V ratios (1.345, 1.333) ; and almost identical DAR.


My questions :


- 

- Why, despite of almost identical pixel ratios, DAR and SAR are these files handled so differently in one and the same session on the same TV (SONY), please ?
- With which method could I instruct ffmpeg to display the second file properly, too, please ?
(I have already tried 'scale' ; but to no avail. Which could have been foreseen, since the ratios are already very close.)
My guess is, that the
(tv, bt709, progressive)
mess things up.
(I have already tried to add theyuv420p
in the argument line, also to no avail.)






Appreciate any help,


Uwe


I have already tried to add a 'scale' option ; but to no avail. Which could have been foreseen, since the ratios are already very close.
I have already tried to add the
yuv420p
in the argument line, also to no avail.
I have already triedforce_original_aspect_ratio
, but also here, nothing improving.
Also, I played with -aspect, but the aspects are okay, and would need individual corrections, which I can't and don't to do for 2000+ files. A simple 16:9 doesn't cut it.