
Recherche avancée
Médias (91)
-
Richard Stallman et le logiciel libre
19 octobre 2011, par
Mis à jour : Mai 2013
Langue : français
Type : Texte
-
Stereo master soundtrack
17 octobre 2011, par
Mis à jour : Octobre 2011
Langue : English
Type : Audio
-
Elephants Dream - Cover of the soundtrack
17 octobre 2011, par
Mis à jour : Octobre 2011
Langue : English
Type : Image
-
#7 Ambience
16 octobre 2011, par
Mis à jour : Juin 2015
Langue : English
Type : Audio
-
#6 Teaser Music
16 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
-
#5 End Title
16 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
Autres articles (74)
-
Websites made with MediaSPIP
2 mai 2011, parThis page lists some websites based on MediaSPIP.
-
Personnaliser en ajoutant son logo, sa bannière ou son image de fond
5 septembre 2013, parCertains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;
-
Creating farms of unique websites
13 avril 2011, parMediaSPIP platforms can be installed as a farm, with a single "core" hosted on a dedicated server and used by multiple websites.
This allows (among other things) : implementation costs to be shared between several different projects / individuals rapid deployment of multiple unique sites creation of groups of like-minded sites, making it possible to browse media in a more controlled and selective environment than the major "open" (...)
Sur d’autres sites (10987)
-
How to reduce time while writing to output stream
9 février 2021, par SummitI am streaming the render ouput of a opengl application using mpegts.The issue that i am facing is that the time taken to encode the frame is quite long.


The application renders at 60 fps with frame size of 1920 X 1080 , the frame data of the application is pushed to a std::queue.


This is the process for ffmpeg.


I initialize the stream like this.

 streamerUpd.InitUPD("udp://127.0.0.1:1234", 1920, 1080, rings_);

int StreamUPD::InitUPD(const char* url, int width, int height, std::shared_ptr<ringbuffer2> rings)
{

 rings_ = rings;
 width_ = width;
 height_ = height;
 filename = url;
 int ret;
 av_dict_set(&opt, "pkt_size", "1316", 0);
 
 
 avformat_alloc_output_context2(&oc, nullptr, "mpegts", filename);
 if (!oc) {
 return 1;
 }

 fmt = oc->oformat;
 /* Add the audio and video streams using the default format codecs
 * and initialize the codecs. */
 if (fmt->video_codec != AV_CODEC_ID_NONE) {
 add_stream(&video_st, oc, &video_codec, fmt->video_codec);
 have_video = 1;
 encode_video = 1;
 }

 /* Write the stream header, if any. */
 ret = avformat_write_header(oc, &opt);
 if (ret < 0) {
 fprintf(stderr, "Error occurred when opening output file: %s\n",
 av_err2str(ret));
 return 1;
 }
 thr = std::thread(&StreamUPD::output_result, this);
 return 0;
}
</ringbuffer2>


////////////////////////////////////////////////////////////////////////////////////////


// Add the output stream


void StreamUPD::add_stream(OutputStream* ost, AVFormatContext* oc, AVCodec** codec, enum AVCodecID codec_id)
{
 AVCodecContext* c;
 int i;
 /* find the encoder */
 *codec = avcodec_find_encoder(codec_id);
 if (!(*codec)) {
 fprintf(stderr, "Could not find encoder for '%s'\n",
 avcodec_get_name(codec_id));
 exit(1);
 }
 ost->st = avformat_new_stream(oc, NULL);
 if (!ost->st) {
 fprintf(stderr, "Could not allocate stream\n");
 exit(1);
 }
 ost->st->id = oc->nb_streams - 1;
 c = avcodec_alloc_context3(*codec);
 if (!c) {
 fprintf(stderr, "Could not alloc an encoding context\n");
 exit(1);
 }
 ost->enc = c;
 switch ((*codec)->type) {
 case AVMEDIA_TYPE_VIDEO:
 c->codec_id = codec_id;
 c->bit_rate = 400000;

 /* Resolution must be a multiple of two. */
 c->width = width_;
 c->height = height_;
 /* timebase: This is the fundamental unit of time (in seconds) in terms
 * of which frame timestamps are represented. For fixed-fps content,
 * timebase should be 1/framerate and timestamp increments should be
 * identical to 1. */
 ost->st->time_base = { 1, STREAM_FRAME_RATE };
 c->time_base = ost->st->time_base;
 c->gop_size = 12; /* emit one intra frame every twelve frames at most */
 c->pix_fmt = STREAM_PIX_FMT;
 
 if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
 /* just for testing, we also add B-frames */
 qDebug() << "This is MPEG2VIDEO Frame";
 c->max_b_frames = 2;
 
 }
 if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
 /* Needed to avoid using macroblocks in which some coeffs overflow.
 * This does not happen with normal video, it just happens here as
 * the motion of the chroma plane does not match the luma plane. */
 c->mb_decision = 2;
 }
 break;
 default:
 break;
 }
 /* Some formats want stream headers to be separate. */
 if (oc->oformat->flags & AVFMT_GLOBALHEADER)
 c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}



//////////////////////////////////////////////////////////////////////////////////


// Open the video


void StreamUPD::open_video(AVFormatContext* oc, AVCodec* codec, OutputStream* ost, AVDictionary* opt_arg)
 {
 int ret;
 AVCodecContext* c = ost->enc;
 AVDictionary* opt = NULL;
 av_dict_copy(&opt, opt_arg, 0);
 /* open the codec */
 ret = avcodec_open2(c, codec, &opt);
 av_dict_free(&opt);
 if (ret < 0) {
 fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
 exit(1);
 }
 /* allocate and init a re-usable frame */
 ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
 if (!ost->frame) {
 fprintf(stderr, "Could not allocate video frame\n");
 exit(1);
 }
 /* If the output format is not YUV420P, then a temporary YUV420P
 * picture is needed too. It is then converted to the required
 * output format. */
 ost->tmp_frame = NULL;
 if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
 ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
 if (!ost->tmp_frame) {
 fprintf(stderr, "Could not allocate temporary picture\n");
 exit(1);
 }
 }
 /* copy the stream parameters to the muxer */
 ret = avcodec_parameters_from_context(ost->st->codecpar, c);
 if (ret < 0) {
 fprintf(stderr, "Could not copy the stream parameters\n");
 exit(1);
 }
 }



Once i have setup the ffmpeg output stream this is how i am streaming the data.


This function gets the frame data from the std::queue(pixelsQueue) and sends it for encoding.


int StreamUPD::stream_video_frame()
{ 
 ost = &video_st;
 c = ost->enc; 

 /* when we pass a frame to the encoder, it may keep a reference to it
 * internally; make sure we do not overwrite it here */
 if (av_frame_make_writable(ost->frame) < 0)
 exit(1);
 if (!ost->sws_ctx) {
 ost->sws_ctx = sws_getContext(c->width, c->height,
 AV_PIX_FMT_RGB24,
 c->width, c->height,
 c->pix_fmt,
 SWS_FAST_BILINEAR, NULL, NULL, NULL);
 if (!ost->sws_ctx) {
 fprintf(stderr,
 "Could not initialize the conversion context\n");
 exit(1);
 }
 }
 finished_ = true;

 if (pixelsQueue.size() > 0) { 
 if (pixelsQueue.pop(pixels)) {
 fill_yuv_image(ost->sws_ctx, frame_data->pixels_.get(), ost->frame, c->width, c->height);
 ost->frame->pts = ost->next_pts++;
 return write_frame(oc, ost->enc, ost->st, ost->frame);
 }
 }
 return 1;
}



Writing the data to the output stream.


The function avcodec_receive_packet is the one that takes lot of time.


int StreamUPD::write_frame(AVFormatContext* fmt_ctx, AVCodecContext* c,
 AVStream* st, AVFrame* frame)
{
 int ret;
 // send the frame to the encoder
 AVPacket pkt = { 0 };
 ret = avcodec_send_frame(c, frame);
 ret = avcodec_receive_packet(c, &pkt);
 if (ret < 0) {
 fprintf(stderr, "Error sending a frame to the encoder: %s\n",
 av_err2str(ret));
 exit(1);
 }
 
 while (ret >= 0) {
 AVPacket pkt = { 0 };
 ret = avcodec_receive_packet(c, &pkt); // This is the function that takes lot of time
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
 break;
 else if (ret < 0) {
 fprintf(stderr, "Error encoding a frame: %s\n", av_err2str(ret));
 exit(1);
 }
 // rescale output packet timestamp values from codec to stream timebase 
 av_packet_rescale_ts(&pkt, c->time_base, st->time_base);
 pkt.stream_index = st->index;
 // Write the compressed frame to the media file. 
 ret = av_interleaved_write_frame(fmt_ctx, &pkt);
 av_packet_unref(&pkt);
 if (ret < 0) {
 fprintf(stderr, "Error while writing output packet: %s\n", av_err2str(ret));
 exit(1);
 }
 }
 return ret == AVERROR_EOF ? 1 : 0;
}



How can i reduce the outputting time while writing the frames to the stream ?


Currently i push more frames in the buffer and the outputting speed is less so the buffer starts to run out of memory in some time.


-
how to extract audio from video using ffmpeg c++
29 septembre 2020, par daenerysI am trying to write c++ code on how to extract audio from mp4 format file. I have compiled the examples in ffmpeg library and tried to run the demuxing_decoding.c file. The problem is that on running the code, it starts to decode way more than the actual file size and decodes wrong codec format (the decoded files cant be run).



here is the demuxing_decoding.c that I am trying to run :



#include 

#include <libavutil></libavutil>opt.h>
#include <libavcodec></libavcodec>avcodec.h>
#include <libavutil></libavutil>channel_layout.h>
#include <libavutil></libavutil>common.h>
#include <libavutil></libavutil>imgutils.h>
#include <libavutil></libavutil>mathematics.h>
#include <libavutil></libavutil>samplefmt.h>

#define INBUF_SIZE 4096
#define AUDIO_INBUF_SIZE 20480
#define AUDIO_REFILL_THRESH 4096

/* check that a given sample format is supported by the encoder */
static int check_sample_fmt(AVCodec *codec, enum AVSampleFormat sample_fmt)
{
 const enum AVSampleFormat *p = codec->sample_fmts;

 while (*p != AV_SAMPLE_FMT_NONE) {
 if (*p == sample_fmt)
 return 1;
 p++;
 }
 return 0;
}

/* just pick the highest supported samplerate */
static int select_sample_rate(AVCodec *codec)
{
 const int *p;
 int best_samplerate = 0;

 if (!codec->supported_samplerates)
 return 44100;

 p = codec->supported_samplerates;
 while (*p) {
 best_samplerate = FFMAX(*p, best_samplerate);
 p++;
 }
 return best_samplerate;
}

/* select layout with the highest channel count */
static int select_channel_layout(AVCodec *codec)
{
 const uint64_t *p;
 uint64_t best_ch_layout = 0;
 int best_nb_channels = 0;

 if (!codec->channel_layouts)
 return AV_CH_LAYOUT_STEREO;

 p = codec->channel_layouts;
 while (*p) {
 int nb_channels = av_get_channel_layout_nb_channels(*p);

 if (nb_channels > best_nb_channels) {
 best_ch_layout = *p;
 best_nb_channels = nb_channels;
 }
 p++;
 }
 return best_ch_layout;
}

/*
 * Audio encoding example
 */
static void audio_encode_example(const char *filename)
{
 AVCodec *codec;
 AVCodecContext *c= NULL;
 AVFrame *frame;
 AVPacket pkt;
 int i, j, k, ret, got_output;
 int buffer_size;
 FILE *f;
 uint16_t *samples;
 float t, tincr;

 printf("Encode audio file %s\n", filename);

 /* find the MP2 encoder */
 codec = avcodec_find_encoder(AV_CODEC_ID_MP2);
 if (!codec) {
 fprintf(stderr, "Codec not found\n");
 exit(1);
 }

 c = avcodec_alloc_context3(codec);
 if (!c) {
 fprintf(stderr, "Could not allocate audio codec context\n");
 exit(1);
 }

 /* put sample parameters */
 c->bit_rate = 64000;

 /* check that the encoder supports s16 pcm input */
 c->sample_fmt = AV_SAMPLE_FMT_S16;
 if (!check_sample_fmt(codec, c->sample_fmt)) {
 fprintf(stderr, "Encoder does not support sample format %s",
 av_get_sample_fmt_name(c->sample_fmt));
 exit(1);
 }

 /* select other audio parameters supported by the encoder */
 c->sample_rate = select_sample_rate(codec);
 c->channel_layout = select_channel_layout(codec);
 c->channels = av_get_channel_layout_nb_channels(c->channel_layout);

 /* open it */
 if (avcodec_open2(c, codec, NULL) < 0) {
 fprintf(stderr, "Could not open codec\n");
 exit(1);
 }

 f = fopen(filename, "wb");
 if (!f) {
 fprintf(stderr, "Could not open %s\n", filename);
 exit(1);
 }

 /* frame containing input raw audio */
 frame = av_frame_alloc();
 if (!frame) {
 fprintf(stderr, "Could not allocate audio frame\n");
 exit(1);
 }

 frame->nb_samples = c->frame_size;
 frame->format = c->sample_fmt;
 frame->channel_layout = c->channel_layout;

 /* the codec gives us the frame size, in samples,
 * we calculate the size of the samples buffer in bytes */
 buffer_size = av_samples_get_buffer_size(NULL, c->channels, c->frame_size,
 c->sample_fmt, 0);
 if (buffer_size < 0) {
 fprintf(stderr, "Could not get sample buffer size\n");
 exit(1);
 }
 samples = av_malloc(buffer_size);
 if (!samples) {
 fprintf(stderr, "Could not allocate %d bytes for samples buffer\n",
 buffer_size);
 exit(1);
 }
 /* setup the data pointers in the AVFrame */
 ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
 (const uint8_t*)samples, buffer_size, 0);
 if (ret < 0) {
 fprintf(stderr, "Could not setup audio frame\n");
 exit(1);
 }

 /* encode a single tone sound */
 t = 0;
 tincr = 2 * M_PI * 440.0 / c->sample_rate;
 for (i = 0; i < 200; i++) {
 av_init_packet(&pkt);
 pkt.data = NULL; // packet data will be allocated by the encoder
 pkt.size = 0;

 for (j = 0; j < c->frame_size; j++) {
 samples[2*j] = (int)(sin(t) * 10000);

 for (k = 1; k < c->channels; k++)
 samples[2*j + k] = samples[2*j];
 t += tincr;
 }
 /* encode the samples */
 ret = avcodec_encode_audio2(c, &pkt, frame, &got_output);
 if (ret < 0) {
 fprintf(stderr, "Error encoding audio frame\n");
 exit(1);
 }
 if (got_output) {
 fwrite(pkt.data, 1, pkt.size, f);
 av_free_packet(&pkt);
 }
 }

 /* get the delayed frames */
 for (got_output = 1; got_output; i++) {
 ret = avcodec_encode_audio2(c, &pkt, NULL, &got_output);
 if (ret < 0) {
 fprintf(stderr, "Error encoding frame\n");
 exit(1);
 }

 if (got_output) {
 fwrite(pkt.data, 1, pkt.size, f);
 av_free_packet(&pkt);
 }
 }
 fclose(f);

 av_freep(&samples);
 av_frame_free(&frame);
 avcodec_close(c);
 av_free(c);
}

/*
 * Audio decoding.
 */
static void audio_decode_example(const char *outfilename, const char *filename)
{
 AVCodec *codec;
 AVCodecContext *c= NULL;
 int len;
 FILE *f, *outfile;
 uint8_t inbuf[AUDIO_INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
 AVPacket avpkt;
 AVFrame *decoded_frame = NULL;

 av_init_packet(&avpkt);

 printf("Decode audio file %s to %s\n", filename, outfilename);

 /* find the mpeg audio decoder */
 codec = avcodec_find_decoder(AV_CODEC_ID_MP2);
 if (!codec) {
 fprintf(stderr, "Codec not found\n");
 exit(1);
 }

 c = avcodec_alloc_context3(codec);
 if (!c) {
 fprintf(stderr, "Could not allocate audio codec context\n");
 exit(1);
 }

 /* open it */
 if (avcodec_open2(c, codec, NULL) < 0) {
 fprintf(stderr, "Could not open codec\n");
 exit(1);
 }

 f = fopen(filename, "rb");
 if (!f) {
 fprintf(stderr, "Could not open %s\n", filename);
 exit(1);
 }
 outfile = fopen(outfilename, "wb");
 if (!outfile) {
 av_free(c);
 exit(1);
 }

 /* decode until eof */
 avpkt.data = inbuf;
 avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f);

 while (avpkt.size > 0) {
 int i, ch;
 int got_frame = 0;

 if (!decoded_frame) {
 if (!(decoded_frame = av_frame_alloc())) {
 fprintf(stderr, "Could not allocate audio frame\n");
 exit(1);
 }
 }

 len = avcodec_decode_audio4(c, decoded_frame, &got_frame, &avpkt);
 if (len < 0) {
 fprintf(stderr, "Error while decoding\n");
 exit(1);
 }
 if (got_frame) {
 /* if a frame has been decoded, output it */
 int data_size = av_get_bytes_per_sample(c->sample_fmt);
 if (data_size < 0) {
 /* This should not occur, checking just for paranoia */
 fprintf(stderr, "Failed to calculate data size\n");
 exit(1);
 }
 for (i=0; inb_samples; i++)
 for (ch=0; chchannels; ch++)
 fwrite(decoded_frame->data[ch] + data_size*i, 1, data_size, outfile);
 }
 avpkt.size -= len;
 avpkt.data += len;
 avpkt.dts =
 avpkt.pts = AV_NOPTS_VALUE;
 if (avpkt.size < AUDIO_REFILL_THRESH) {
 /* Refill the input buffer, to avoid trying to decode
 * incomplete frames. Instead of this, one could also use
 * a parser, or use a proper container format through
 * libavformat. */
 memmove(inbuf, avpkt.data, avpkt.size);
 avpkt.data = inbuf;
 len = fread(avpkt.data + avpkt.size, 1,
 AUDIO_INBUF_SIZE - avpkt.size, f);
 if (len > 0)
 avpkt.size += len;
 }
 }

 fclose(outfile);
 fclose(f);

 avcodec_close(c);
 av_free(c);
 av_frame_free(&decoded_frame);
}

/*
 * Video encoding example
 */
static void video_encode_example(const char *filename, int codec_id)
{
 AVCodec *codec;
 AVCodecContext *c= NULL;
 int i, ret, x, y, got_output;
 FILE *f;
 AVFrame *frame;
 AVPacket pkt;
 uint8_t endcode[] = { 0, 0, 1, 0xb7 };

 printf("Encode video file %s\n", filename);

 /* find the mpeg1 video encoder */
 codec = avcodec_find_encoder(codec_id);
 if (!codec) {
 fprintf(stderr, "Codec not found\n");
 exit(1);
 }

 c = avcodec_alloc_context3(codec);
 if (!c) {
 fprintf(stderr, "Could not allocate video codec context\n");
 exit(1);
 }

 /* put sample parameters */
 c->bit_rate = 400000;
 /* resolution must be a multiple of two */
 c->width = 352;
 c->height = 288;
 /* frames per second */
 c->time_base = (AVRational){1,25};
 /* emit one intra frame every ten frames
 * check frame pict_type before passing frame
 * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
 * then gop_size is ignored and the output of encoder
 * will always be I frame irrespective to gop_size
 */
 c->gop_size = 10;
 c->max_b_frames = 1;
 c->pix_fmt = AV_PIX_FMT_YUV420P;

 if (codec_id == AV_CODEC_ID_H264)
 av_opt_set(c->priv_data, "preset", "slow", 0);

 /* open it */
 if (avcodec_open2(c, codec, NULL) < 0) {
 fprintf(stderr, "Could not open codec\n");
 exit(1);
 }

 f = fopen(filename, "wb");
 if (!f) {
 fprintf(stderr, "Could not open %s\n", filename);
 exit(1);
 }

 frame = av_frame_alloc();
 if (!frame) {
 fprintf(stderr, "Could not allocate video frame\n");
 exit(1);
 }
 frame->format = c->pix_fmt;
 frame->width = c->width;
 frame->height = c->height;

 /* the image can be allocated by any means and av_image_alloc() is
 * just the most convenient way if av_malloc() is to be used */
 ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
 c->pix_fmt, 32);
 if (ret < 0) {
 fprintf(stderr, "Could not allocate raw picture buffer\n");
 exit(1);
 }

 /* encode 1 second of video */
 for (i = 0; i < 25; i++) {
 av_init_packet(&pkt);
 pkt.data = NULL; // packet data will be allocated by the encoder
 pkt.size = 0;

 fflush(stdout);
 /* prepare a dummy image */
 /* Y */
 for (y = 0; y < c->height; y++) {
 for (x = 0; x < c->width; x++) {
 frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
 }
 }

 /* Cb and Cr */
 for (y = 0; y < c->height/2; y++) {
 for (x = 0; x < c->width/2; x++) {
 frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
 frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
 }
 }

 frame->pts = i;

 /* encode the image */
 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
 if (ret < 0) {
 fprintf(stderr, "Error encoding frame\n");
 exit(1);
 }

 if (got_output) {
 printf("Write frame %3d (size=%5d)\n", i, pkt.size);
 fwrite(pkt.data, 1, pkt.size, f);
 av_free_packet(&pkt);
 }
 }

 /* get the delayed frames */
 for (got_output = 1; got_output; i++) {
 fflush(stdout);

 ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
 if (ret < 0) {
 fprintf(stderr, "Error encoding frame\n");
 exit(1);
 }

 if (got_output) {
 printf("Write frame %3d (size=%5d)\n", i, pkt.size);
 fwrite(pkt.data, 1, pkt.size, f);
 av_free_packet(&pkt);
 }
 }

 /* add sequence end code to have a real mpeg file */
 fwrite(endcode, 1, sizeof(endcode), f);
 fclose(f);

 avcodec_close(c);
 av_free(c);
 av_freep(&frame->data[0]);
 av_frame_free(&frame);
 printf("\n");
}

/*
 * Video decoding example
 */

static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,
 char *filename)
{
 FILE *f;
 int i;

 f = fopen(filename,"w");
 fprintf(f, "P5\n%d %d\n%d\n", xsize, ysize, 255);
 for (i = 0; i < ysize; i++)
 fwrite(buf + i * wrap, 1, xsize, f);
 fclose(f);
}

static int decode_write_frame(const char *outfilename, AVCodecContext *avctx,
 AVFrame *frame, int *frame_count, AVPacket *pkt, int last)
{
 int len, got_frame;
 char buf[1024];

 len = avcodec_decode_video2(avctx, frame, &got_frame, pkt);
 if (len < 0) {
 fprintf(stderr, "Error while decoding frame %d\n", *frame_count);
 return len;
 }
 if (got_frame) {
 printf("Saving %sframe %3d\n", last ? "last " : "", *frame_count);
 fflush(stdout);

 /* the picture is allocated by the decoder, no need to free it */
 snprintf(buf, sizeof(buf), outfilename, *frame_count);
 pgm_save(frame->data[0], frame->linesize[0],
 frame->width, frame->height, buf);
 (*frame_count)++;
 }
 if (pkt->data) {
 pkt->size -= len;
 pkt->data += len;
 }
 return 0;
}

static void video_decode_example(const char *outfilename, const char *filename)
{
 AVCodec *codec;
 AVCodecContext *c= NULL;
 int frame_count;
 FILE *f;
 AVFrame *frame;
 uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
 AVPacket avpkt;

 av_init_packet(&avpkt);

 /* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
 memset(inbuf + INBUF_SIZE, 0, AV_INPUT_BUFFER_PADDING_SIZE);

 printf("Decode video file %s to %s\n", filename, outfilename);

 /* find the mpeg1 video decoder */
 codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO);
 if (!codec) {
 fprintf(stderr, "Codec not found\n");
 exit(1);
 }

 c = avcodec_alloc_context3(codec);
 if (!c) {
 fprintf(stderr, "Could not allocate video codec context\n");
 exit(1);
 }

 if (codec->capabilities & AV_CODEC_CAP_TRUNCATED)
 c->flags |= AV_CODEC_FLAG_TRUNCATED; // we do not send complete frames

 /* For some codecs, such as msmpeg4 and mpeg4, width and height
 MUST be initialized there because this information is not
 available in the bitstream. */

 /* open it */
 if (avcodec_open2(c, codec, NULL) < 0) {
 fprintf(stderr, "Could not open codec\n");
 exit(1);
 }

 f = fopen(filename, "rb");
 if (!f) {
 fprintf(stderr, "Could not open %s\n", filename);
 exit(1);
 }

 frame = av_frame_alloc();
 if (!frame) {
 fprintf(stderr, "Could not allocate video frame\n");
 exit(1);
 }

 frame_count = 0;
 for (;;) {
 avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);
 if (avpkt.size == 0)
 break;

 /* NOTE1: some codecs are stream based (mpegvideo, mpegaudio)
 and this is the only method to use them because you cannot
 know the compressed data size before analysing it.

 BUT some other codecs (msmpeg4, mpeg4) are inherently frame
 based, so you must call them with all the data for one
 frame exactly. You must also initialize 'width' and
 'height' before initializing them. */

 /* NOTE2: some codecs allow the raw parameters (frame size,
 sample rate) to be changed at any frame. We handle this, so
 you should also take care of it */

 /* here, we use a stream based decoder (mpeg1video), so we
 feed decoder and see if it could decode a frame */
 avpkt.data = inbuf;
 while (avpkt.size > 0)
 if (decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 0) < 0)
 exit(1);
 }

 /* some codecs, such as MPEG, transmit the I and P frame with a
 latency of one frame. You must do the following to have a
 chance to get the last frame of the video */
 avpkt.data = NULL;
 avpkt.size = 0;
 decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 1);

 fclose(f);

 avcodec_close(c);
 av_free(c);
 av_frame_free(&frame);
 printf("\n");
}

int main(int argc, char **argv)
{
 const char *output_type;

 /* register all the codecs */
 avcodec_register_all();

 if (argc < 2) {
 printf("usage: %s output_type\n"
 "API example program to decode/encode a media stream with libavcodec.\n"
 "This program generates a synthetic stream and encodes it to a file\n"
 "named test.h264, test.mp2 or test.mpg depending on output_type.\n"
 "The encoded stream is then decoded and written to a raw data output.\n"
 "output_type must be chosen between 'h264', 'mp2', 'mpg'.\n",
 argv[0]);
 return 1;
 }
 output_type = argv[1];

 if (!strcmp(output_type, "h264")) {
 video_encode_example("test.h264", AV_CODEC_ID_H264);
 } else if (!strcmp(output_type, "mp2")) {
 audio_encode_example("test.mp2");
 audio_decode_example("test.pcm", "test.mp2");
 } else if (!strcmp(output_type, "mpg")) {
 video_encode_example("test.mpg", AV_CODEC_ID_MPEG1VIDEO);
 video_decode_example("test%02d.pgm", "test.mpg");
 } else {
 fprintf(stderr, "Invalid output type '%s', choose between 'h264', 'mp2', or 'mpg'\n",
 output_type);
 return 1;
 }

 return 0;
}




What could be wrong ?
The command I used is :



./demuxing_decoding /home/cortana/Burn.mp4 /home/cortana/Desktop/Burn.mp4 /home/cortana/Desktop/Burn.aac



-
FFMPEG API Mp4 H264 Encoding/Muxing - unspecified pixel format
28 juillet 2020, par FabriceI'm working on a c++ project using ffmpeg. I have to generate an mp4 file with h264 encoding.


My problem is that the file generate but when reading the file with VLC I've got no image, and analyzing it with ffprobe give me (log below) the error :




unspecified pixel format




ffprobe version N-93020-g3224d6691c Copyright (c) 2007-2019 the FFmpeg developers
 built with gcc 8.2.1 (GCC) 20181201
 configuration: --disable-static --enable-shared --enable-gpl --enable-version3 --enable-sdl2 --enable-fontconfig --enable-gnutls --enable-iconv --enable-libass --enable-libbluray --enable-libfreetype --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-libopus --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libtheora --enable-libtwolame --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxml2 --enable-libzimg --enable-lzma --enable-zlib --enable-gmp --enable-libvidstab --enable-libvorbis --enable-libvo-amrwbenc --enable-libmysofa --enable-libspeex --enable-libxvid --enable-libaom --enable-libmfx --enable-amf --enable-ffnvcodec --enable-cuvid --enable-d3d11va --enable-nvenc --enable-nvdec --enable-dxva2 --enable-avisynth --enable-libopenmpt
 libavutil 56. 26.100 / 56. 26.100
 libavcodec 58. 44.100 / 58. 44.100
 libavformat 58. 26.100 / 58. 26.100
 libavdevice 58. 6.101 / 58. 6.101
 libavfilter 7. 48.100 / 7. 48.100
 libswscale 5. 4.100 / 5. 4.100
 libswresample 3. 4.100 / 3. 4.100
 libpostproc 55. 4.100 / 55. 4.100
[h264 @ 02a46240] non-existing PPS 0 referenced
[h264 @ 02a46240] decode_slice_header error
[h264 @ 02a46240] no frame!
...
[h264 @ 02a46240] non-existing PPS 0 referenced
[h264 @ 02a46240] decode_slice_header error
[h264 @ 02a46240] no frame!
[mov,mp4,m4a,3gp,3g2,mj2 @ 02a35380] decoding for stream 0 failed
[mov,mp4,m4a,3gp,3g2,mj2 @ 02a35380] Could not find codec parameters for stream 0 (Video: h264 (avc1 / 0x31637661), none, 352x288, 320 kb/s): unspecified pixel format
Consider increasing the value for the 'analyzeduration' and 'probesize' options
Input #0, mov,mp4,m4a,3gp,3g2,mj2, from 'C:\Users\Fabrice\Desktop\video\Test.mp4':
 Metadata:
 major_brand : isom
 minor_version : 512
 compatible_brands: isomiso2avc1mp41
 encoder : Lavf58.26.100
 Duration: 00:00:09.00, start: 0.000000, bitrate: 323 kb/s
 Stream #0:0(und): Video: h264 (avc1 / 0x31637661), none, 352x288, 320 kb/s, 25.11 fps, 25 tbr, 12800 tbn, 25600 tbc (default)
 Metadata:
 handler_name : VideoHandler



Here is the code I use to genererate my mp4 File, it's based on sample from ffmpeg (see : FFMPEG Muxing sample). I have tried to adapt it without using deprecated function. It works using webm/vp8 encoding, but not mp4/h264.


#include 
#include 
#include 
extern "C"
{
#include <libavcodec></libavcodec>avcodec.h>
#include <libswscale></libswscale>swscale.h>
#include <libavformat></libavformat>avformat.h>
#include <libavutil></libavutil>error.h> 
#include <libavutil></libavutil>opt.h>
#include <libavutil></libavutil>imgutils.h>
}

#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "swscale.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")

/* 10 seconds stream duration */
#define STREAM_DURATION 10.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */

//#pragma warning(disable : 4996) // TODO: remove

static int sws_flags = SWS_BICUBIC;

/* Add an output stream. */
static AVStream *add_stream(AVFormatContext *formatContext, AVCodec **codec, enum AVCodecID codecId, AVCodecContext **codecCtx)
{
 AVStream *stream;

 // Get the encoder codec
 *codec = avcodec_find_encoder(codecId);
 if (!(*codec)) {
 fprintf(stderr, "Could not find encoder for '%s'\n",
 avcodec_get_name(codecId));
 exit(1);
 }

 // Get the stream for codec
 stream = avformat_new_stream(formatContext, *codec);
 if (!stream) {
 fprintf(stderr, "Could not allocate stream\n");
 exit(1);
 }
 stream->id = formatContext->nb_streams - 1;

 (*codecCtx) = avcodec_alloc_context3(*codec);

 switch ((*codec)->type) {
 case AVMEDIA_TYPE_VIDEO:
 stream->codecpar->codec_id = codecId;
 stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
 stream->codecpar->bit_rate = 400000;
 stream->codecpar->width = 352;
 stream->codecpar->height = 288;
 stream->codecpar->format = STREAM_PIX_FMT;
 stream->time_base = { 1, STREAM_FRAME_RATE };

 avcodec_parameters_to_context((*codecCtx), stream->codecpar);
 (*codecCtx)->gop_size = 12; /* emit one intra frame every twelve frames at most */
 (*codecCtx)->max_b_frames = 2;
 (*codecCtx)->time_base = { 1, STREAM_FRAME_RATE };
 if ((*codecCtx)->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
 /* Needed to avoid using macroblocks in which some coeffs overflow.
 * This does not happen with normal video, it just happens here as
 * the motion of the chroma plane does not match the luma plane. */
 (*codecCtx)->mb_decision = 2;
 }
 break;

 default:
 break;
 }
 
 //if (stream->codecpar->codec_id == AV_CODEC_ID_H264) {
 // av_opt_set(codecCtx, "preset", "ultrafast", 0);
 //}
 //(*codecCtx)->flags |= AV_CODEC_FLAG_LOW_DELAY;

 /* Some formats want stream headers to be separate. */
 if (formatContext->oformat->flags & AVFMT_GLOBALHEADER)
 (*codecCtx)->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;


 int ret = avcodec_parameters_from_context(stream->codecpar, (*codecCtx));
 if (ret < 0) {
 char error[255];
 av_strerror(ret, error, 255);
 fprintf(stderr, "avcodec_parameters_from_context returned (%d) - %s", ret, error);
 return false;
 }

 return stream;
}

/**************************************************************/
/* video output */

static AVFrame *frame_video;
static int frame_count;

static void open_video(AVCodec *codec, AVStream *stream, AVCodecContext *codecCtx)
{
 int ret;

 /* open the codec */
 ret = avcodec_open2(codecCtx, codec, NULL);
 if (ret < 0) {
 char error[255];
 av_strerror(ret, error, 255);
 fprintf(stderr, "Could not open video codec: %s\n", error);
 exit(1);
 }

 /* allocate and init a re-usable frame */
 frame_video = av_frame_alloc();
 if (!frame_video) {
 fprintf(stderr, "Could not allocate video frame\n");
 exit(1);
 }

 frame_video->format = codecCtx->pix_fmt;
 frame_video->width = codecCtx->width;
 frame_video->height = codecCtx->height;

 ret = av_frame_get_buffer(frame_video, 32);
 if (ret < 0) {
 fprintf(stderr, "Could not allocate the video frame data\n");
 exit(1);
 }
}

/* Prepare a dummy image. */
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
{
 int x, y, i;

 i = frame_index;

 /* Y */
 for (y = 0; y < height; y++)
 for (x = 0; x < width; x++)
 pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;

 /* Cb and Cr */
 for (y = 0; y < height / 2; y++) {
 for (x = 0; x < width / 2; x++) {
 pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
 pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
 }
 }
}

int timestamp = 0;
static void write_video_frame(AVFormatContext *formatContext, AVStream *stream, AVCodecContext *codecCtx)
{
 int ret;
 static struct SwsContext *sws_ctx;

 if (frame_count >= STREAM_NB_FRAMES) {
 /* No more frames to compress. The codec has a latency of a few
 * frames if using B-frames, so we get the last frames by
 * passing the same picture again. */
 }
 else {
 if (codecCtx->pix_fmt != AV_PIX_FMT_YUV420P) {
 /* as we only generate a YUV420P picture, we must convert it
 * to the codec pixel format if needed */
 if (!sws_ctx) {
 sws_ctx = sws_getContext(codecCtx->width, codecCtx->height, AV_PIX_FMT_YUV420P,
 codecCtx->width, codecCtx->height, codecCtx->pix_fmt,
 sws_flags, NULL, NULL, NULL);
 if (!sws_ctx) {
 fprintf(stderr, "Could not initialize the conversion context\n");
 exit(1);
 }
 }
 fill_yuv_image(frame_video, frame_count, codecCtx->width, codecCtx->height);
 sws_scale(sws_ctx, (const uint8_t * const *)frame_video->data, frame_video->linesize,
 0, codecCtx->height, frame_video->data, frame_video->linesize);
 }
 else {
 fill_yuv_image(frame_video, frame_count, codecCtx->width, codecCtx->height);
 }
 }

 frame_video->format = AV_PIX_FMT_YUV420P;
 frame_video->width = codecCtx->width;
 frame_video->height = codecCtx->height;

 if (formatContext->oformat->flags & 0x0020) {
 /* Raw video case - directly store the picture in the packet */
 AVPacket pkt;
 av_init_packet(&pkt);

 pkt.flags |= AV_PKT_FLAG_KEY;
 pkt.stream_index = stream->index;
 pkt.data = frame_video->data[0];
 pkt.size = sizeof(AVPicture);

 ret = av_interleaved_write_frame(formatContext, &pkt);
 }
 else {
 AVPacket pkt = { 0 };
 av_init_packet(&pkt);

 /* encode the image */
 fprintf(stderr, "\nFrame type : %c\n", av_get_picture_type_char(frame_video->pict_type));
 fprintf(stderr, "Frame pts: %lld, \n", frame_video->pts);
 fprintf(stderr, "Codec timebase: %d/%d\n", codecCtx->time_base.num, codecCtx->time_base.den);
 fprintf(stderr, "Stream timebase: %d/%d\n", stream->time_base.num, stream->time_base.den);
 fprintf(stderr, "Resacale: %lld, \n\n", av_rescale_q(1, codecCtx->time_base, stream->time_base));
 ret = avcodec_send_frame(codecCtx, frame_video);
 if (ret < 0) {
 char error[255];
 av_strerror(ret, error, 255);
 fprintf(stderr, "Error encoding video frame: %s\n", error);
 exit(1);
 }
 /* If size is zero, it means the image was buffered. */
 ret = avcodec_receive_packet(codecCtx, &pkt);
 if (!ret && pkt.size) {
 pkt.stream_index = stream->index;
 fprintf(stderr, "Packet flags : %d\n", pkt.flags);
 fprintf(stderr, "Packet pts: %lld\n", pkt.pts);
 fprintf(stderr, "Packet dts: %lld\n", pkt.dts);
 fprintf(stderr, "Packet duration: %lld\n", pkt.duration);
 fprintf(stderr, "Packet pos: %lld\n\n", pkt.pos);
 
 /* Write the compressed frame to the media file. */
 ret = av_interleaved_write_frame(formatContext, &pkt);
 }
 else {
 ret = 0;
 }
 }
 if (ret != 0) {
 char error[255];
 av_strerror(ret, error, 255);
 fprintf(stderr, "Error while writing video frame: %s\n", error);
 exit(1);
 }
 frame_count++;
}

static void close_video(AVFormatContext *oc, AVStream *st)
{
 av_free(frame_video->data[0]);
 av_free(frame_video);
}

/**************************************************************/
/* media file output */

int main(int argc, char **argv)
{
 // The outputed media
 char filename[100];
 const char *mediaFormat = "mp4"; AVCodecID mediaVideoCodec = AV_CODEC_ID_H264;
 //const char *mediaFormat="webm"; AVCodecID mediaVideoCodec = AV_CODEC_ID_VP8;
 AVOutputFormat *formatOut;
 AVFormatContext *formatCtx;

 // The video stream
 AVStream *stream_video;
 AVCodec *codec_video = nullptr;
 AVCodecContext *codecCtx_video = nullptr;
 double time_video = 0;

 // Return code
 int ret;

 strcpy_s(filename, "C:\\Test.");
 strcat_s(filename, mediaFormat);

 // allocate the output media context
 avformat_alloc_output_context2(&formatCtx, NULL, NULL, filename);
 if (!formatCtx) {
 return 1;
 }
 formatOut = formatCtx->oformat;

 // Add the video stream using H264 codec
 stream_video = NULL;
 stream_video = add_stream(formatCtx, &codec_video, mediaVideoCodec, &codecCtx_video);

 // Open video codec and allocate the necessary encode buffers
 if (stream_video)
 open_video(codec_video, stream_video, codecCtx_video);

 av_dump_format(formatCtx, 0, filename, 1);

 // Open the output media file, if needed
 if (!(formatOut->flags & AVFMT_NOFILE)) {
 ret = avio_open(&formatCtx->pb, filename, AVIO_FLAG_WRITE);
 if (ret < 0) {
 char error[255];
 av_strerror(ret, error, 255);
 fprintf(stderr, "Could not open '%s': %s\n", filename, error);
 return 1;
 }
 }

 // Write media header
 ret = avformat_write_header(formatCtx, NULL);
 if (ret < 0) {
 char error[255];
 av_strerror(ret, error, 255);
 fprintf(stderr, "Error occurred when opening output file: %s\n", error);
 return 1;
 }

 if (frame_video)
 frame_video->pts = 0;
 for (;;) {
 // Compute video time from last added video frame
 time_video = ((double)frame_video->pts) * av_q2d(stream_video->time_base);

 // Stop media if enough time
 if (!stream_video || time_video >= STREAM_DURATION)
 break;

 // Add a video frame
 write_video_frame(formatCtx, stream_video, codecCtx_video);

 // Increase frame pts according to time base
 frame_video->pts += av_rescale_q(1, codecCtx_video->time_base, stream_video->time_base);
 }

 // Write media trailer
 av_write_trailer(formatCtx);

 /* Close each codec. */
 if (stream_video)
 close_video(formatCtx, stream_video);

 if (!(formatOut->flags & AVFMT_NOFILE))
 /* Close the output file. */
 avio_close(formatCtx->pb);

 /* free the stream */
 avformat_free_context(formatCtx);

 return 0;
}



What am I missing ? Which part give me this error ?