
Recherche avancée
Médias (91)
-
MediaSPIP Simple : futur thème graphique par défaut ?
26 septembre 2013, par
Mis à jour : Octobre 2013
Langue : français
Type : Video
-
avec chosen
13 septembre 2013, par
Mis à jour : Septembre 2013
Langue : français
Type : Image
-
sans chosen
13 septembre 2013, par
Mis à jour : Septembre 2013
Langue : français
Type : Image
-
config chosen
13 septembre 2013, par
Mis à jour : Septembre 2013
Langue : français
Type : Image
-
SPIP - plugins - embed code - Exemple
2 septembre 2013, par
Mis à jour : Septembre 2013
Langue : français
Type : Image
-
GetID3 - Bloc informations de fichiers
9 avril 2013, par
Mis à jour : Mai 2013
Langue : français
Type : Image
Autres articles (58)
-
Websites made with MediaSPIP
2 mai 2011, parThis page lists some websites based on MediaSPIP.
-
Creating farms of unique websites
13 avril 2011, parMediaSPIP platforms can be installed as a farm, with a single "core" hosted on a dedicated server and used by multiple websites.
This allows (among other things) : implementation costs to be shared between several different projects / individuals rapid deployment of multiple unique sites creation of groups of like-minded sites, making it possible to browse media in a more controlled and selective environment than the major "open" (...) -
Les autorisations surchargées par les plugins
27 avril 2010, parMediaspip core
autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs
Sur d’autres sites (9313)
-
Data Privacy Day 2021 : Five ways to embrace privacy into your business
-
Matomo Launches Global Partner Programme to Deepen Local Connections and Champion Ethical Analytics
25 juin, par Matomo Core Team — Press Releases -
How to generate a fixed duration and fps for a video using FFmpeg C++ libraries ? [closed]
4 novembre 2024, par BlueSky Light ProgrammerI'm following the mux official example to write a C++ class that generates a video with a fixed duration (5s) and a fixed fps (60). For some reason, the duration of the output video is 3-4 seconds, although I call the function to write frames 300 times and set the fps to 60.


Can you take a look at the code below and spot what I'm doing wrong ?


#include "ffmpeg.h"

#include <iostream>

static int writeFrame(AVFormatContext *fmt_ctx, AVCodecContext *c,
 AVStream *st, AVFrame *frame, AVPacket *pkt);

static void addStream(OutputStream *ost, AVFormatContext *formatContext,
 const AVCodec **codec, enum AVCodecID codec_id,
 int width, int height, int fps);

static AVFrame *allocFrame(enum AVPixelFormat pix_fmt, int width, int height);

static void openVideo(AVFormatContext *formatContext, const AVCodec *codec,
 OutputStream *ost, AVDictionary *opt_arg);

static AVFrame *getVideoFrame(OutputStream *ost,
 const std::vector<glubyte>& pixels,
 int duration);

static int writeVideoFrame(AVFormatContext *formatContext,
 OutputStream *ost,
 const std::vector<glubyte>& pixels,
 int duration);

static void closeStream(AVFormatContext *formatContext, OutputStream *ost);

static void fillRGBImage(AVFrame *frame, int width, int height,
 const std::vector<glubyte>& pixels);

#ifdef av_err2str
#undef av_err2str
#include <string>
av_always_inline std::string av_err2string(int errnum) {
 char str[AV_ERROR_MAX_STRING_SIZE];
 return av_make_error_string(str, AV_ERROR_MAX_STRING_SIZE, errnum);
}
#define av_err2str(err) av_err2string(err).c_str()
#endif // av_err2str

FFmpeg::FFmpeg(int width, int height, int fps, const char *fileName)
: videoStream{ 0 }
, formatContext{ nullptr } {
 const AVOutputFormat *outputFormat;
 const AVCodec *videoCodec{ nullptr };
 AVDictionary *opt{ nullptr };
 int ret{ 0 };

 av_dict_set(&opt, "crf", "17", 0);

 /* Allocate the output media context. */
 avformat_alloc_output_context2(&this->formatContext, nullptr, nullptr, fileName);
 if (!this->formatContext) {
 std::cout << "Could not deduce output format from file extension: using MPEG." << std::endl;
 avformat_alloc_output_context2(&this->formatContext, nullptr, "mpeg", fileName);
 
 if (!formatContext)
 exit(-14);
 }

 outputFormat = this->formatContext->oformat;

 /* Add the video stream using the default format codecs
 * and initialize the codecs. */
 if (outputFormat->video_codec == AV_CODEC_ID_NONE) {
 std::cout << "The output format doesn't have a default codec video." << std::endl;
 exit(-15);
 }

 addStream(
 &this->videoStream,
 this->formatContext,
 &videoCodec,
 outputFormat->video_codec,
 width,
 height,
 fps
 );
 openVideo(this->formatContext, videoCodec, &this->videoStream, opt);
 av_dump_format(this->formatContext, 0, fileName, 1);
 
 /* open the output file, if needed */
 if (!(outputFormat->flags & AVFMT_NOFILE)) {
 ret = avio_open(&this->formatContext->pb, fileName, AVIO_FLAG_WRITE);
 if (ret < 0) {
 std::cout << "Could not open '" << fileName << "': " << std::string{ av_err2str(ret) } << std::endl;
 exit(-16);
 }
 }

 /* Write the stream header, if any. */
 ret = avformat_write_header(this->formatContext, &opt);
 if (ret < 0) {
 std::cout << "Error occurred when opening output file: " << av_err2str(ret) << std::endl;
 exit(-17);
 }

 av_dict_free(&opt);
}

FFmpeg::~FFmpeg() {
 if (this->formatContext) {
 /* Close codec. */
 closeStream(this->formatContext, &this->videoStream);

 if (!(this->formatContext->oformat->flags & AVFMT_NOFILE)) {
 /* Close the output file. */
 avio_closep(&this->formatContext->pb);
 }

 /* free the stream */
 avformat_free_context(this->formatContext);
 }
}

void FFmpeg::Record(
 const std::vector<glubyte>& pixels,
 unsigned frameIndex,
 int duration,
 bool isLastIndex
) {
 static bool encodeVideo{ true };
 if (encodeVideo)
 encodeVideo = !writeVideoFrame(this->formatContext,
 &this->videoStream,
 pixels,
 duration);

 if (isLastIndex) {
 av_write_trailer(this->formatContext);
 encodeVideo = false;
 }
}

int writeFrame(AVFormatContext *fmt_ctx, AVCodecContext *c,
 AVStream *st, AVFrame *frame, AVPacket *pkt) {
 int ret;

 // send the frame to the encoder
 ret = avcodec_send_frame(c, frame);
 if (ret < 0) {
 std::cout << "Error sending a frame to the encoder: " << av_err2str(ret) << std::endl;
 exit(-2);
 }

 while (ret >= 0) {
 ret = avcodec_receive_packet(c, pkt);
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
 break;
 else if (ret < 0) {
 std::cout << "Error encoding a frame: " << av_err2str(ret) << std::endl;
 exit(-3);
 }

 /* rescale output packet timestamp values from codec to stream timebase */
 av_packet_rescale_ts(pkt, c->time_base, st->time_base);
 pkt->stream_index = st->index;

 /* Write the compressed frame to the media file. */
 ret = av_interleaved_write_frame(fmt_ctx, pkt);
 /* pkt is now blank (av_interleaved_write_frame() takes ownership of
 * its contents and resets pkt), so that no unreferencing is necessary.
 * This would be different if one used av_write_frame(). */
 if (ret < 0) {
 std::cout << "Error while writing output packet: " << av_err2str(ret) << std::endl;
 exit(-4);
 }
 }

 return ret == AVERROR_EOF ? 1 : 0;
}

void addStream(OutputStream *ost, AVFormatContext *formatContext,
 const AVCodec **codec, enum AVCodecID codec_id,
 int width, int height, int fps) {
 AVCodecContext *c;
 int i;

 /* find the encoder */
 *codec = avcodec_find_encoder(codec_id);
 if (!(*codec)) {
 std::cout << "Could not find encoder for " << avcodec_get_name(codec_id) << "." << std::endl;
 exit(-5);
 }

 ost->tmpPkt = av_packet_alloc();
 if (!ost->tmpPkt) {
 std::cout << "Could not allocate AVPacket." << std::endl;
 exit(-6);
 }

 ost->st = avformat_new_stream(formatContext, nullptr);
 if (!ost->st) {
 std::cout << "Could not allocate stream." << std::endl;
 exit(-7);
 }

 ost->st->id = formatContext->nb_streams-1;
 c = avcodec_alloc_context3(*codec);
 if (!c) {
 std::cout << "Could not alloc an encoding context." << std::endl;
 exit(-8);
 }
 ost->enc = c;

 switch ((*codec)->type) {
 case AVMEDIA_TYPE_VIDEO:
 c->codec_id = codec_id;
 c->bit_rate = 6000000;
 /* Resolution must be a multiple of two. */
 c->width = width;
 c->height = height;
 /* timebase: This is the fundamental unit of time (in seconds) in terms
 * of which frame timestamps are represented. For fixed-fps content,
 * timebase should be 1/framerate and timestamp increments should be
 * identical to 1. */
 ost->st->time_base = { 1, fps };
 c->time_base = ost->st->time_base;
 c->framerate = { fps, 1 };

 c->gop_size = 0; /* emit one intra frame every twelve frames at most */
 c->pix_fmt = AV_PIX_FMT_YUV420P;
 if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
 /* just for testing, we also add B-frames */
 c->max_b_frames = 2;
 }
 if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
 /* Needed to avoid using macroblocks in which some coeffs overflow.
 * This does not happen with normal video, it just happens here as
 * the motion of the chroma plane does not match the luma plane.*/
 c->mb_decision = 2;
 }
 break;

 default:
 break;
 }

 /* Some formats want stream headers to be separate. */
 if (formatContext->oformat->flags & AVFMT_GLOBALHEADER)
 c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}

AVFrame *allocFrame(enum AVPixelFormat pix_fmt, int width, int height) {
 AVFrame *frame{ av_frame_alloc() };
 int ret;

 if (!frame)
 return nullptr;

 frame->format = pix_fmt;
 frame->width = width;
 frame->height = height;

 /* allocate the buffers for the frame data */
 ret = av_frame_get_buffer(frame, 0);
 if (ret < 0) {
 std::cout << "Could not allocate frame data." << std::endl;
 exit(-8);
 }

 return frame;
}

void openVideo(AVFormatContext *formatContext, const AVCodec *codec,
 OutputStream *ost, AVDictionary *opt_arg) {
 int ret;
 AVCodecContext *c{ ost->enc };
 AVDictionary *opt{ nullptr };

 av_dict_copy(&opt, opt_arg, 0);

 /* open the codec */
 ret = avcodec_open2(c, codec, &opt);
 av_dict_free(&opt);
 if (ret < 0) {
 std::cout << "Could not open video codec: " << av_err2str(ret) << std::endl;
 exit(-9);
 }

 /* Allocate and init a re-usable frame. */
 ost->frame = allocFrame(c->pix_fmt, c->width, c->height);
 if (!ost->frame) {
 std::cout << "Could not allocate video frame." << std::endl;
 exit(-10);
 }

 /* If the output format is not RGB24, then a temporary RGB24
 * picture is needed too. It is then converted to the required
 * output format. */
 ost->tmpFrame = nullptr;
 if (c->pix_fmt != AV_PIX_FMT_RGB24) {
 ost->tmpFrame = allocFrame(AV_PIX_FMT_RGB24, c->width, c->height);
 if (!ost->tmpFrame) {
 std::cout << "Could not allocate temporary video frame." << std::endl;
 exit(-11);
 }
 }

 /* Copy the stream parameters to the muxer. */
 ret = avcodec_parameters_from_context(ost->st->codecpar, c);
 if (ret < 0) {
 std::cout << "Could not copy the stream parameters." << std::endl;
 exit(-12);
 }
}

AVFrame *getVideoFrame(OutputStream *ost,
 const std::vector<glubyte>& pixels,
 int duration) {
 AVCodecContext *c{ ost->enc };

 /* check if we want to generate more frames */
 if (av_compare_ts(ost->nextPts, c->time_base,
 duration, { 1, 1 }) > 0) {
 return nullptr;
 }

 /* when we pass a frame to the encoder, it may keep a reference to it
 * internally; make sure we do not overwrite it here */
 if (av_frame_make_writable(ost->frame) < 0) {
 std::cout << "It wasn't possible to make frame writable." << std::endl;
 exit(-12);
 }

 if (c->pix_fmt != AV_PIX_FMT_RGB24) {
 /* as we only generate a YUV420P picture, we must convert it
 * to the codec pixel format if needed */
 if (!ost->swsContext) {
 ost->swsContext = sws_getContext(c->width, c->height,
 AV_PIX_FMT_RGB24,
 c->width, c->height,
 c->pix_fmt,
 SWS_BICUBIC, nullptr, nullptr, nullptr);
 if (!ost->swsContext) {
 std::cout << "Could not initialize the conversion context." << std::endl;
 exit(-13);
 }
 }

 fillRGBImage(ost->tmpFrame, c->width, c->height, pixels);
 sws_scale(ost->swsContext, (const uint8_t * const *) ost->tmpFrame->data,
 ost->tmpFrame->linesize, 0, c->height, ost->frame->data,
 ost->frame->linesize);
 } else
 fillRGBImage(ost->frame, c->width, c->height, pixels);

 ost->frame->pts = ost->nextPts++;

 return ost->frame;
}

int writeVideoFrame(AVFormatContext *formatContext,
 OutputStream *ost,
 const std::vector<glubyte>& pixels,
 int duration) {
 return writeFrame(formatContext,
 ost->enc,
 ost->st,
 getVideoFrame(ost, pixels, duration),
 ost->tmpPkt);
}

void closeStream(AVFormatContext *formatContext, OutputStream *ost) {
 avcodec_free_context(&ost->enc);
 av_frame_free(&ost->frame);
 av_frame_free(&ost->tmpFrame);
 av_packet_free(&ost->tmpPkt);
 sws_freeContext(ost->swsContext);
}

static void fillRGBImage(AVFrame *frame, int width, int height,
 const std::vector<glubyte>& pixels) {
 // Copy pixel data into the frame
 int inputLineSize{ 3 * width }; // 3 bytes per pixel for RGB
 for (int y{ 0 }; y < height; ++y) {
 memcpy(frame->data[0] + y * frame->linesize[0],
 pixels.data() + y * inputLineSize,
 inputLineSize);
 }
}
</glubyte></glubyte></glubyte></glubyte></string></glubyte></glubyte></glubyte></iostream>