
Recherche avancée
Autres articles (62)
-
Des sites réalisés avec MediaSPIP
2 mai 2011, parCette page présente quelques-uns des sites fonctionnant sous MediaSPIP.
Vous pouvez bien entendu ajouter le votre grâce au formulaire en bas de page. -
HTML5 audio and video support
13 avril 2011, parMediaSPIP uses HTML5 video and audio tags to play multimedia files, taking advantage of the latest W3C innovations supported by modern browsers.
The MediaSPIP player used has been created specifically for MediaSPIP and can be easily adapted to fit in with a specific theme.
For older browsers the Flowplayer flash fallback is used.
MediaSPIP allows for media playback on major mobile platforms with the above (...) -
Librairies et binaires spécifiques au traitement vidéo et sonore
31 janvier 2010, parLes logiciels et librairies suivantes sont utilisées par SPIPmotion d’une manière ou d’une autre.
Binaires obligatoires FFMpeg : encodeur principal, permet de transcoder presque tous les types de fichiers vidéo et sonores dans les formats lisibles sur Internet. CF ce tutoriel pour son installation ; Oggz-tools : outils d’inspection de fichiers ogg ; Mediainfo : récupération d’informations depuis la plupart des formats vidéos et sonores ;
Binaires complémentaires et facultatifs flvtool2 : (...)
Sur d’autres sites (6460)
-
Revision 686b99741c : Removing vp9_invtrans.{c, h} files. Moving single function from vp9_invtrans.c
18 juin 2013, par Dmitry KovalevChanged Paths :
Delete /vp9/common/vp9_invtrans.c
Delete /vp9/common/vp9_invtrans.h
Modify /vp9/decoder/vp9_decodframe.c
Modify /vp9/encoder/vp9_encodeframe.c
Modify /vp9/encoder/vp9_encodeintra.c
Modify /vp9/encoder/vp9_encodemb.c
Modify /vp9/vp9_common.mk
Removing vp9_invtrans.c, h files.Moving single function from vp9_invtrans.c to vp9_encodemb.c.
Change-Id : I26bf6bb90de342a3036c0dbfba78a7dd75a61fe7
-
FFMPEG : cannot play MPEG4 video encoded from images. Duration and bitrate undefined
17 juin 2013, par KaiKI've been trying to set a H264 video stream created from images, into an MPEG4 container. I've been able to get the video stream from images successfully. But when muxing it in the container, I must do something wrong because no player is able to reproduce it, despite ffplay - that plays the video until the end and after that, the image gets frozen until the eternity -.
The ffplay cannot identify Duration neither bitrate, so I supose it might be an issue related with dts and pts, but I've searched about how to solve it with no success.
Here's the ffplay output :
~$ ffplay testContainer.mp4
ffplay version git-2012-01-31-c673671 Copyright (c) 2003-2012 the FFmpeg developers
built on Feb 7 2012 20:32:12 with gcc 4.4.3
configuration: --enable-gpl --enable-version3 --enable-nonfree --enable-postproc --enable- libfaac --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libtheora --enable-libvorbis --enable-libx264 --enable-libxvid --enable-x11grab --enable-libvpx --enable-libmp3lame --enable-debug=3
libavutil 51. 36.100 / 51. 36.100
libavcodec 54. 0.102 / 54. 0.102
libavformat 54. 0.100 / 54. 0.100
libavdevice 53. 4.100 / 53. 4.100
libavfilter 2. 60.100 / 2. 60.100
libswscale 2. 1.100 / 2. 1.100
libswresample 0. 6.100 / 0. 6.100
libpostproc 52. 0.100 / 52. 0.100
[h264 @ 0xa4849c0] max_analyze_duration 5000000 reached at 5000000
[h264 @ 0xa4849c0] Estimating duration from bitrate, this may be inaccurate
Input #0, h264, from 'testContainer.mp4':
Duration: N/A, bitrate: N/A
Stream #0:0: Video: h264 (High), yuv420p, 512x512, 25 fps, 25 tbr, 1200k tbn, 50 tbc
2.74 A-V: 0.000 fd= 0 aq= 0KB vq= 160KB sq= 0B f=0/0 0/0Structure
My code is C++ styled, so I've a class that handles all the encoding, and then a main that initilize it, passes some images in a bucle, and finally notify the end of the process as following :
int main (int argc, const char * argv[])
{
MyVideoEncoder* videoEncoder = new MyVideoEncoder(512, 512, 512, 512, "output/testContainer.mp4", 25, 20);
if(!videoEncoder->initWithCodec(MyVideoEncoder::H264))
{
std::cout << "something really bad happened. Exit!!" << std::endl;
exit(-1);
}
/* encode 1 second of video */
for(int i=0;i<228;i++) {
std::stringstream filepath;
filepath << "input2/image" << i << ".jpg";
videoEncoder->encodeFrameFromJPG(const_cast(filepath.str().c_str()));
}
videoEncoder->endEncoding();
}Hints
I've seen a lot of examples about decoding of a video and encoding into another, but no working example of muxing a video from the scratch, so I'm not sure how to proceed with the pts and dts packet values. That's the reason why I suspect the issue must be in the following method :
bool MyVideoEncoder::encodeImageAsFrame(){
bool res = false;
pTempFrame->pts = frameCount * frameRate * 90; //90Hz by the standard for PTS-values
frameCount++;
/* encode the image */
out_size = avcodec_encode_video(pVideoStream->codec, outbuf, outbuf_size, pTempFrame);
if (out_size > 0) {
AVPacket pkt;
av_init_packet(&pkt);
pkt.pts = pkt.dts = 0;
if (pVideoStream->codec->coded_frame->pts != AV_NOPTS_VALUE) {
pkt.pts = av_rescale_q(pVideoStream->codec->coded_frame->pts,
pVideoStream->codec->time_base, pVideoStream->time_base);
pkt.dts = pTempFrame->pts;
}
if (pVideoStream->codec->coded_frame->key_frame) {
pkt.flags |= AV_PKT_FLAG_KEY;
}
pkt.stream_index = pVideoStream->index;
pkt.data = outbuf;
pkt.size = out_size;
res = (av_interleaved_write_frame(pFormatContext, &pkt) == 0);
}
return res;
}Any help or insight would be appreciated. Thanks in advance !!
P.S. The rest of the code, where config is done, is the following :
// MyVideoEncoder.cpp
#include "MyVideoEncoder.h"
#include "Image.hpp"
#include <cstring>
#include <sstream>
#include
#define MAX_AUDIO_PACKET_SIZE (128 * 1024)
MyVideoEncoder::MyVideoEncoder(int inwidth, int inheight,
int outwidth, int outheight, char* fileOutput, int framerate,
int compFactor) {
inWidth = inwidth;
inHeight = inheight;
outWidth = outwidth;
outHeight = outheight;
pathToMovie = fileOutput;
frameRate = framerate;
compressionFactor = compFactor;
frameCount = 0;
}
MyVideoEncoder::~MyVideoEncoder() {
}
bool MyVideoEncoder::initWithCodec(
MyVideoEncoder::encoderType type) {
if (!initializeEncoder(type))
return false;
if (!configureFrames())
return false;
return true;
}
bool MyVideoEncoder::encodeFrameFromJPG(char* filepath) {
setJPEGImage(filepath);
return encodeImageAsFrame();
}
bool MyVideoEncoder::encodeDelayedFrames(){
bool res = false;
while(out_size > 0)
{
pTempFrame->pts = frameCount * frameRate * 90; //90Hz by the standard for PTS-values
frameCount++;
out_size = avcodec_encode_video(pVideoStream->codec, outbuf, outbuf_size, NULL);
if (out_size > 0)
{
AVPacket pkt;
av_init_packet(&pkt);
pkt.pts = pkt.dts = 0;
if (pVideoStream->codec->coded_frame->pts != AV_NOPTS_VALUE) {
pkt.pts = av_rescale_q(pVideoStream->codec->coded_frame->pts,
pVideoStream->codec->time_base, pVideoStream->time_base);
pkt.dts = pTempFrame->pts;
}
if (pVideoStream->codec->coded_frame->key_frame) {
pkt.flags |= AV_PKT_FLAG_KEY;
}
pkt.stream_index = pVideoStream->index;
pkt.data = outbuf;
pkt.size = out_size;
res = (av_interleaved_write_frame(pFormatContext, &pkt) == 0);
}
}
return res;
}
void MyVideoEncoder::endEncoding() {
encodeDelayedFrames();
closeEncoder();
}
bool MyVideoEncoder::setJPEGImage(char* imgFilename) {
Image* rgbImage = new Image();
rgbImage->read_jpeg_image(imgFilename);
bool ret = setImageFromRGBArray(rgbImage->get_data());
delete rgbImage;
return ret;
}
bool MyVideoEncoder::setImageFromRGBArray(unsigned char* data) {
memcpy(pFrameRGB->data[0], data, 3 * inWidth * inHeight);
int ret = sws_scale(img_convert_ctx, pFrameRGB->data, pFrameRGB->linesize,
0, inHeight, pTempFrame->data, pTempFrame->linesize);
pFrameRGB->pts++;
if (ret)
return true;
else
return false;
}
bool MyVideoEncoder::initializeEncoder(encoderType type) {
av_register_all();
pTempFrame = avcodec_alloc_frame();
pTempFrame->pts = 0;
pOutFormat = NULL;
pFormatContext = NULL;
pVideoStream = NULL;
pAudioStream = NULL;
bool res = false;
// Create format
switch (type) {
case MyVideoEncoder::H264:
pOutFormat = av_guess_format("h264", NULL, NULL);
break;
case MyVideoEncoder::MPEG1:
pOutFormat = av_guess_format("mpeg", NULL, NULL);
break;
default:
pOutFormat = av_guess_format(NULL, pathToMovie.c_str(), NULL);
break;
}
if (!pOutFormat) {
pOutFormat = av_guess_format(NULL, pathToMovie.c_str(), NULL);
if (!pOutFormat) {
std::cout << "output format not found" << std::endl;
return false;
}
}
// allocate context
pFormatContext = avformat_alloc_context();
if(!pFormatContext)
{
std::cout << "cannot alloc format context" << std::endl;
return false;
}
pFormatContext->oformat = pOutFormat;
memcpy(pFormatContext->filename, pathToMovie.c_str(), min( (const int) pathToMovie.length(), (const int)sizeof(pFormatContext->filename)));
//Add video and audio streams
pVideoStream = AddVideoStream(pFormatContext,
pOutFormat->video_codec);
// Set the output parameters
av_dump_format(pFormatContext, 0, pathToMovie.c_str(), 1);
// Open Video stream
if (pVideoStream) {
res = openVideo(pFormatContext, pVideoStream);
}
if (res && !(pOutFormat->flags & AVFMT_NOFILE)) {
if (avio_open(&pFormatContext->pb, pathToMovie.c_str(), AVIO_FLAG_WRITE) < 0) {
res = false;
std::cout << "Cannot open output file" << std::endl;
}
}
if (res) {
avformat_write_header(pFormatContext,NULL);
}
else{
freeMemory();
std::cout << "Cannot init encoder" << std::endl;
}
return res;
}
AVStream *MyVideoEncoder::AddVideoStream(AVFormatContext *pContext, CodecID codec_id)
{
AVCodecContext *pCodecCxt = NULL;
AVStream *st = NULL;
st = avformat_new_stream(pContext, NULL);
if (!st)
{
std::cout << "Cannot add new video stream" << std::endl;
return NULL;
}
st->id = 0;
pCodecCxt = st->codec;
pCodecCxt->codec_id = (CodecID)codec_id;
pCodecCxt->codec_type = AVMEDIA_TYPE_VIDEO;
pCodecCxt->frame_number = 0;
// Put sample parameters.
pCodecCxt->bit_rate = outWidth * outHeight * 3 * frameRate/ compressionFactor;
pCodecCxt->width = outWidth;
pCodecCxt->height = outHeight;
/* frames per second */
pCodecCxt->time_base= (AVRational){1,frameRate};
/* pixel format must be YUV */
pCodecCxt->pix_fmt = PIX_FMT_YUV420P;
if (pCodecCxt->codec_id == CODEC_ID_H264)
{
av_opt_set(pCodecCxt->priv_data, "preset", "slow", 0);
av_opt_set(pCodecCxt->priv_data, "vprofile", "baseline", 0);
pCodecCxt->max_b_frames = 16;
}
if (pCodecCxt->codec_id == CODEC_ID_MPEG1VIDEO)
{
pCodecCxt->mb_decision = 1;
}
if(pContext->oformat->flags & AVFMT_GLOBALHEADER)
{
pCodecCxt->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
pCodecCxt->coder_type = 1; // coder = 1
pCodecCxt->flags|=CODEC_FLAG_LOOP_FILTER; // flags=+loop
pCodecCxt->me_range = 16; // me_range=16
pCodecCxt->gop_size = 50; // g=250
pCodecCxt->keyint_min = 25; // keyint_min=25
return st;
}
bool MyVideoEncoder::openVideo(AVFormatContext *oc, AVStream *pStream)
{
AVCodec *pCodec;
AVCodecContext *pContext;
pContext = pStream->codec;
// Find the video encoder.
pCodec = avcodec_find_encoder(pContext->codec_id);
if (!pCodec)
{
std::cout << "Cannot found video codec" << std::endl;
return false;
}
// Open the codec.
if (avcodec_open2(pContext, pCodec, NULL) < 0)
{
std::cout << "Cannot open video codec" << std::endl;
return false;
}
return true;
}
bool MyVideoEncoder::configureFrames() {
/* alloc image and output buffer */
outbuf_size = outWidth*outHeight*3;
outbuf = (uint8_t*) malloc(outbuf_size);
av_image_alloc(pTempFrame->data, pTempFrame->linesize, pVideoStream->codec->width,
pVideoStream->codec->height, pVideoStream->codec->pix_fmt, 1);
//Alloc RGB temp frame
pFrameRGB = avcodec_alloc_frame();
if (pFrameRGB == NULL)
return false;
avpicture_alloc((AVPicture *) pFrameRGB, PIX_FMT_RGB24, inWidth, inHeight);
pFrameRGB->pts = 0;
//Set SWS context to convert from RGB images to YUV images
if (img_convert_ctx == NULL) {
img_convert_ctx = sws_getContext(inWidth, inHeight, PIX_FMT_RGB24,
outWidth, outHeight, pVideoStream->codec->pix_fmt, /*SWS_BICUBIC*/
SWS_FAST_BILINEAR, NULL, NULL, NULL);
if (img_convert_ctx == NULL) {
fprintf(stderr, "Cannot initialize the conversion context!\n");
return false;
}
}
return true;
}
void MyVideoEncoder::closeEncoder() {
av_write_frame(pFormatContext, NULL);
av_write_trailer(pFormatContext);
freeMemory();
}
void MyVideoEncoder::freeMemory()
{
bool res = true;
if (pFormatContext)
{
// close video stream
if (pVideoStream)
{
closeVideo(pFormatContext, pVideoStream);
}
// Free the streams.
for(size_t i = 0; i < pFormatContext->nb_streams; i++)
{
av_freep(&pFormatContext->streams[i]->codec);
av_freep(&pFormatContext->streams[i]);
}
if (!(pFormatContext->flags & AVFMT_NOFILE) && pFormatContext->pb)
{
avio_close(pFormatContext->pb);
}
// Free the stream.
av_free(pFormatContext);
pFormatContext = NULL;
}
}
void MyVideoEncoder::closeVideo(AVFormatContext *pContext, AVStream *pStream)
{
avcodec_close(pStream->codec);
if (pTempFrame)
{
if (pTempFrame->data)
{
av_free(pTempFrame->data[0]);
pTempFrame->data[0] = NULL;
}
av_free(pTempFrame);
pTempFrame = NULL;
}
if (pFrameRGB)
{
if (pFrameRGB->data)
{
av_free(pFrameRGB->data[0]);
pFrameRGB->data[0] = NULL;
}
av_free(pFrameRGB);
pFrameRGB = NULL;
}
}
</sstream></cstring> -
FFMPEG : cannot play MPEG4 video encoded from images. Duration and bitrate undefined
17 juin 2013, par KaiKI've been trying to set a H264 video stream created from images, into an MPEG4 container. I've been able to get the video stream from images successfully. But when muxing it in the container, I must do something wrong because no player is able to reproduce it, despite ffplay - that plays the video until the end and after that, the image gets frozen until the eternity -.
The ffplay cannot identify Duration neither bitrate, so I supose it might be an issue related with dts and pts, but I've searched about how to solve it with no success.
Here's the ffplay output :
~$ ffplay testContainer.mp4
ffplay version git-2012-01-31-c673671 Copyright (c) 2003-2012 the FFmpeg developers
built on Feb 7 2012 20:32:12 with gcc 4.4.3
configuration: --enable-gpl --enable-version3 --enable-nonfree --enable-postproc --enable- libfaac --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libtheora --enable-libvorbis --enable-libx264 --enable-libxvid --enable-x11grab --enable-libvpx --enable-libmp3lame --enable-debug=3
libavutil 51. 36.100 / 51. 36.100
libavcodec 54. 0.102 / 54. 0.102
libavformat 54. 0.100 / 54. 0.100
libavdevice 53. 4.100 / 53. 4.100
libavfilter 2. 60.100 / 2. 60.100
libswscale 2. 1.100 / 2. 1.100
libswresample 0. 6.100 / 0. 6.100
libpostproc 52. 0.100 / 52. 0.100
[h264 @ 0xa4849c0] max_analyze_duration 5000000 reached at 5000000
[h264 @ 0xa4849c0] Estimating duration from bitrate, this may be inaccurate
Input #0, h264, from 'testContainer.mp4':
Duration: N/A, bitrate: N/A
Stream #0:0: Video: h264 (High), yuv420p, 512x512, 25 fps, 25 tbr, 1200k tbn, 50 tbc
2.74 A-V: 0.000 fd= 0 aq= 0KB vq= 160KB sq= 0B f=0/0 0/0Structure
My code is C++ styled, so I've a class that handles all the encoding, and then a main that initilize it, passes some images in a bucle, and finally notify the end of the process as following :
int main (int argc, const char * argv[])
{
MyVideoEncoder* videoEncoder = new MyVideoEncoder(512, 512, 512, 512, "output/testContainer.mp4", 25, 20);
if(!videoEncoder->initWithCodec(MyVideoEncoder::H264))
{
std::cout << "something really bad happened. Exit!!" << std::endl;
exit(-1);
}
/* encode 1 second of video */
for(int i=0;i<228;i++) {
std::stringstream filepath;
filepath << "input2/image" << i << ".jpg";
videoEncoder->encodeFrameFromJPG(const_cast(filepath.str().c_str()));
}
videoEncoder->endEncoding();
}Hints
I've seen a lot of examples about decoding of a video and encoding into another, but no working example of muxing a video from the scratch, so I'm not sure how to proceed with the pts and dts packet values. That's the reason why I suspect the issue must be in the following method :
bool MyVideoEncoder::encodeImageAsFrame(){
bool res = false;
pTempFrame->pts = frameCount * frameRate * 90; //90Hz by the standard for PTS-values
frameCount++;
/* encode the image */
out_size = avcodec_encode_video(pVideoStream->codec, outbuf, outbuf_size, pTempFrame);
if (out_size > 0) {
AVPacket pkt;
av_init_packet(&pkt);
pkt.pts = pkt.dts = 0;
if (pVideoStream->codec->coded_frame->pts != AV_NOPTS_VALUE) {
pkt.pts = av_rescale_q(pVideoStream->codec->coded_frame->pts,
pVideoStream->codec->time_base, pVideoStream->time_base);
pkt.dts = pTempFrame->pts;
}
if (pVideoStream->codec->coded_frame->key_frame) {
pkt.flags |= AV_PKT_FLAG_KEY;
}
pkt.stream_index = pVideoStream->index;
pkt.data = outbuf;
pkt.size = out_size;
res = (av_interleaved_write_frame(pFormatContext, &pkt) == 0);
}
return res;
}Any help or insight would be appreciated. Thanks in advance !!
P.S. The rest of the code, where config is done, is the following :
// MyVideoEncoder.cpp
#include "MyVideoEncoder.h"
#include "Image.hpp"
#include <cstring>
#include <sstream>
#include
#define MAX_AUDIO_PACKET_SIZE (128 * 1024)
MyVideoEncoder::MyVideoEncoder(int inwidth, int inheight,
int outwidth, int outheight, char* fileOutput, int framerate,
int compFactor) {
inWidth = inwidth;
inHeight = inheight;
outWidth = outwidth;
outHeight = outheight;
pathToMovie = fileOutput;
frameRate = framerate;
compressionFactor = compFactor;
frameCount = 0;
}
MyVideoEncoder::~MyVideoEncoder() {
}
bool MyVideoEncoder::initWithCodec(
MyVideoEncoder::encoderType type) {
if (!initializeEncoder(type))
return false;
if (!configureFrames())
return false;
return true;
}
bool MyVideoEncoder::encodeFrameFromJPG(char* filepath) {
setJPEGImage(filepath);
return encodeImageAsFrame();
}
bool MyVideoEncoder::encodeDelayedFrames(){
bool res = false;
while(out_size > 0)
{
pTempFrame->pts = frameCount * frameRate * 90; //90Hz by the standard for PTS-values
frameCount++;
out_size = avcodec_encode_video(pVideoStream->codec, outbuf, outbuf_size, NULL);
if (out_size > 0)
{
AVPacket pkt;
av_init_packet(&pkt);
pkt.pts = pkt.dts = 0;
if (pVideoStream->codec->coded_frame->pts != AV_NOPTS_VALUE) {
pkt.pts = av_rescale_q(pVideoStream->codec->coded_frame->pts,
pVideoStream->codec->time_base, pVideoStream->time_base);
pkt.dts = pTempFrame->pts;
}
if (pVideoStream->codec->coded_frame->key_frame) {
pkt.flags |= AV_PKT_FLAG_KEY;
}
pkt.stream_index = pVideoStream->index;
pkt.data = outbuf;
pkt.size = out_size;
res = (av_interleaved_write_frame(pFormatContext, &pkt) == 0);
}
}
return res;
}
void MyVideoEncoder::endEncoding() {
encodeDelayedFrames();
closeEncoder();
}
bool MyVideoEncoder::setJPEGImage(char* imgFilename) {
Image* rgbImage = new Image();
rgbImage->read_jpeg_image(imgFilename);
bool ret = setImageFromRGBArray(rgbImage->get_data());
delete rgbImage;
return ret;
}
bool MyVideoEncoder::setImageFromRGBArray(unsigned char* data) {
memcpy(pFrameRGB->data[0], data, 3 * inWidth * inHeight);
int ret = sws_scale(img_convert_ctx, pFrameRGB->data, pFrameRGB->linesize,
0, inHeight, pTempFrame->data, pTempFrame->linesize);
pFrameRGB->pts++;
if (ret)
return true;
else
return false;
}
bool MyVideoEncoder::initializeEncoder(encoderType type) {
av_register_all();
pTempFrame = avcodec_alloc_frame();
pTempFrame->pts = 0;
pOutFormat = NULL;
pFormatContext = NULL;
pVideoStream = NULL;
pAudioStream = NULL;
bool res = false;
// Create format
switch (type) {
case MyVideoEncoder::H264:
pOutFormat = av_guess_format("h264", NULL, NULL);
break;
case MyVideoEncoder::MPEG1:
pOutFormat = av_guess_format("mpeg", NULL, NULL);
break;
default:
pOutFormat = av_guess_format(NULL, pathToMovie.c_str(), NULL);
break;
}
if (!pOutFormat) {
pOutFormat = av_guess_format(NULL, pathToMovie.c_str(), NULL);
if (!pOutFormat) {
std::cout << "output format not found" << std::endl;
return false;
}
}
// allocate context
pFormatContext = avformat_alloc_context();
if(!pFormatContext)
{
std::cout << "cannot alloc format context" << std::endl;
return false;
}
pFormatContext->oformat = pOutFormat;
memcpy(pFormatContext->filename, pathToMovie.c_str(), min( (const int) pathToMovie.length(), (const int)sizeof(pFormatContext->filename)));
//Add video and audio streams
pVideoStream = AddVideoStream(pFormatContext,
pOutFormat->video_codec);
// Set the output parameters
av_dump_format(pFormatContext, 0, pathToMovie.c_str(), 1);
// Open Video stream
if (pVideoStream) {
res = openVideo(pFormatContext, pVideoStream);
}
if (res && !(pOutFormat->flags & AVFMT_NOFILE)) {
if (avio_open(&pFormatContext->pb, pathToMovie.c_str(), AVIO_FLAG_WRITE) < 0) {
res = false;
std::cout << "Cannot open output file" << std::endl;
}
}
if (res) {
avformat_write_header(pFormatContext,NULL);
}
else{
freeMemory();
std::cout << "Cannot init encoder" << std::endl;
}
return res;
}
AVStream *MyVideoEncoder::AddVideoStream(AVFormatContext *pContext, CodecID codec_id)
{
AVCodecContext *pCodecCxt = NULL;
AVStream *st = NULL;
st = avformat_new_stream(pContext, NULL);
if (!st)
{
std::cout << "Cannot add new video stream" << std::endl;
return NULL;
}
st->id = 0;
pCodecCxt = st->codec;
pCodecCxt->codec_id = (CodecID)codec_id;
pCodecCxt->codec_type = AVMEDIA_TYPE_VIDEO;
pCodecCxt->frame_number = 0;
// Put sample parameters.
pCodecCxt->bit_rate = outWidth * outHeight * 3 * frameRate/ compressionFactor;
pCodecCxt->width = outWidth;
pCodecCxt->height = outHeight;
/* frames per second */
pCodecCxt->time_base= (AVRational){1,frameRate};
/* pixel format must be YUV */
pCodecCxt->pix_fmt = PIX_FMT_YUV420P;
if (pCodecCxt->codec_id == CODEC_ID_H264)
{
av_opt_set(pCodecCxt->priv_data, "preset", "slow", 0);
av_opt_set(pCodecCxt->priv_data, "vprofile", "baseline", 0);
pCodecCxt->max_b_frames = 16;
}
if (pCodecCxt->codec_id == CODEC_ID_MPEG1VIDEO)
{
pCodecCxt->mb_decision = 1;
}
if(pContext->oformat->flags & AVFMT_GLOBALHEADER)
{
pCodecCxt->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
pCodecCxt->coder_type = 1; // coder = 1
pCodecCxt->flags|=CODEC_FLAG_LOOP_FILTER; // flags=+loop
pCodecCxt->me_range = 16; // me_range=16
pCodecCxt->gop_size = 50; // g=250
pCodecCxt->keyint_min = 25; // keyint_min=25
return st;
}
bool MyVideoEncoder::openVideo(AVFormatContext *oc, AVStream *pStream)
{
AVCodec *pCodec;
AVCodecContext *pContext;
pContext = pStream->codec;
// Find the video encoder.
pCodec = avcodec_find_encoder(pContext->codec_id);
if (!pCodec)
{
std::cout << "Cannot found video codec" << std::endl;
return false;
}
// Open the codec.
if (avcodec_open2(pContext, pCodec, NULL) < 0)
{
std::cout << "Cannot open video codec" << std::endl;
return false;
}
return true;
}
bool MyVideoEncoder::configureFrames() {
/* alloc image and output buffer */
outbuf_size = outWidth*outHeight*3;
outbuf = (uint8_t*) malloc(outbuf_size);
av_image_alloc(pTempFrame->data, pTempFrame->linesize, pVideoStream->codec->width,
pVideoStream->codec->height, pVideoStream->codec->pix_fmt, 1);
//Alloc RGB temp frame
pFrameRGB = avcodec_alloc_frame();
if (pFrameRGB == NULL)
return false;
avpicture_alloc((AVPicture *) pFrameRGB, PIX_FMT_RGB24, inWidth, inHeight);
pFrameRGB->pts = 0;
//Set SWS context to convert from RGB images to YUV images
if (img_convert_ctx == NULL) {
img_convert_ctx = sws_getContext(inWidth, inHeight, PIX_FMT_RGB24,
outWidth, outHeight, pVideoStream->codec->pix_fmt, /*SWS_BICUBIC*/
SWS_FAST_BILINEAR, NULL, NULL, NULL);
if (img_convert_ctx == NULL) {
fprintf(stderr, "Cannot initialize the conversion context!\n");
return false;
}
}
return true;
}
void MyVideoEncoder::closeEncoder() {
av_write_frame(pFormatContext, NULL);
av_write_trailer(pFormatContext);
freeMemory();
}
void MyVideoEncoder::freeMemory()
{
bool res = true;
if (pFormatContext)
{
// close video stream
if (pVideoStream)
{
closeVideo(pFormatContext, pVideoStream);
}
// Free the streams.
for(size_t i = 0; i < pFormatContext->nb_streams; i++)
{
av_freep(&pFormatContext->streams[i]->codec);
av_freep(&pFormatContext->streams[i]);
}
if (!(pFormatContext->flags & AVFMT_NOFILE) && pFormatContext->pb)
{
avio_close(pFormatContext->pb);
}
// Free the stream.
av_free(pFormatContext);
pFormatContext = NULL;
}
}
void MyVideoEncoder::closeVideo(AVFormatContext *pContext, AVStream *pStream)
{
avcodec_close(pStream->codec);
if (pTempFrame)
{
if (pTempFrame->data)
{
av_free(pTempFrame->data[0]);
pTempFrame->data[0] = NULL;
}
av_free(pTempFrame);
pTempFrame = NULL;
}
if (pFrameRGB)
{
if (pFrameRGB->data)
{
av_free(pFrameRGB->data[0]);
pFrameRGB->data[0] = NULL;
}
av_free(pFrameRGB);
pFrameRGB = NULL;
}
}
</sstream></cstring>