
Recherche avancée
Autres articles (40)
-
La file d’attente de SPIPmotion
28 novembre 2010, parUne file d’attente stockée dans la base de donnée
Lors de son installation, SPIPmotion crée une nouvelle table dans la base de donnée intitulée spip_spipmotion_attentes.
Cette nouvelle table est constituée des champs suivants : id_spipmotion_attente, l’identifiant numérique unique de la tâche à traiter ; id_document, l’identifiant numérique du document original à encoder ; id_objet l’identifiant unique de l’objet auquel le document encodé devra être attaché automatiquement ; objet, le type d’objet auquel (...) -
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir -
Contribute to documentation
13 avril 2011Documentation is vital to the development of improved technical capabilities.
MediaSPIP welcomes documentation by users as well as developers - including : critique of existing features and functions articles contributed by developers, administrators, content producers and editors screenshots to illustrate the above translations of existing documentation into other languages
To contribute, register to the project users’ mailing (...)
Sur d’autres sites (7950)
-
How to reduce time while writing to output stream
9 février 2021, par SummitI am streaming the render ouput of a opengl application using mpegts.The issue that i am facing is that the time taken to encode the frame is quite long.


The application renders at 60 fps with frame size of 1920 X 1080 , the frame data of the application is pushed to a std::queue.


This is the process for ffmpeg.


I initialize the stream like this.

 streamerUpd.InitUPD("udp://127.0.0.1:1234", 1920, 1080, rings_);

int StreamUPD::InitUPD(const char* url, int width, int height, std::shared_ptr<ringbuffer2> rings)
{

 rings_ = rings;
 width_ = width;
 height_ = height;
 filename = url;
 int ret;
 av_dict_set(&opt, "pkt_size", "1316", 0);
 
 
 avformat_alloc_output_context2(&oc, nullptr, "mpegts", filename);
 if (!oc) {
 return 1;
 }

 fmt = oc->oformat;
 /* Add the audio and video streams using the default format codecs
 * and initialize the codecs. */
 if (fmt->video_codec != AV_CODEC_ID_NONE) {
 add_stream(&video_st, oc, &video_codec, fmt->video_codec);
 have_video = 1;
 encode_video = 1;
 }

 /* Write the stream header, if any. */
 ret = avformat_write_header(oc, &opt);
 if (ret < 0) {
 fprintf(stderr, "Error occurred when opening output file: %s\n",
 av_err2str(ret));
 return 1;
 }
 thr = std::thread(&StreamUPD::output_result, this);
 return 0;
}
</ringbuffer2>


////////////////////////////////////////////////////////////////////////////////////////


// Add the output stream


void StreamUPD::add_stream(OutputStream* ost, AVFormatContext* oc, AVCodec** codec, enum AVCodecID codec_id)
{
 AVCodecContext* c;
 int i;
 /* find the encoder */
 *codec = avcodec_find_encoder(codec_id);
 if (!(*codec)) {
 fprintf(stderr, "Could not find encoder for '%s'\n",
 avcodec_get_name(codec_id));
 exit(1);
 }
 ost->st = avformat_new_stream(oc, NULL);
 if (!ost->st) {
 fprintf(stderr, "Could not allocate stream\n");
 exit(1);
 }
 ost->st->id = oc->nb_streams - 1;
 c = avcodec_alloc_context3(*codec);
 if (!c) {
 fprintf(stderr, "Could not alloc an encoding context\n");
 exit(1);
 }
 ost->enc = c;
 switch ((*codec)->type) {
 case AVMEDIA_TYPE_VIDEO:
 c->codec_id = codec_id;
 c->bit_rate = 400000;

 /* Resolution must be a multiple of two. */
 c->width = width_;
 c->height = height_;
 /* timebase: This is the fundamental unit of time (in seconds) in terms
 * of which frame timestamps are represented. For fixed-fps content,
 * timebase should be 1/framerate and timestamp increments should be
 * identical to 1. */
 ost->st->time_base = { 1, STREAM_FRAME_RATE };
 c->time_base = ost->st->time_base;
 c->gop_size = 12; /* emit one intra frame every twelve frames at most */
 c->pix_fmt = STREAM_PIX_FMT;
 
 if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
 /* just for testing, we also add B-frames */
 qDebug() << "This is MPEG2VIDEO Frame";
 c->max_b_frames = 2;
 
 }
 if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
 /* Needed to avoid using macroblocks in which some coeffs overflow.
 * This does not happen with normal video, it just happens here as
 * the motion of the chroma plane does not match the luma plane. */
 c->mb_decision = 2;
 }
 break;
 default:
 break;
 }
 /* Some formats want stream headers to be separate. */
 if (oc->oformat->flags & AVFMT_GLOBALHEADER)
 c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}



//////////////////////////////////////////////////////////////////////////////////


// Open the video


void StreamUPD::open_video(AVFormatContext* oc, AVCodec* codec, OutputStream* ost, AVDictionary* opt_arg)
 {
 int ret;
 AVCodecContext* c = ost->enc;
 AVDictionary* opt = NULL;
 av_dict_copy(&opt, opt_arg, 0);
 /* open the codec */
 ret = avcodec_open2(c, codec, &opt);
 av_dict_free(&opt);
 if (ret < 0) {
 fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
 exit(1);
 }
 /* allocate and init a re-usable frame */
 ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
 if (!ost->frame) {
 fprintf(stderr, "Could not allocate video frame\n");
 exit(1);
 }
 /* If the output format is not YUV420P, then a temporary YUV420P
 * picture is needed too. It is then converted to the required
 * output format. */
 ost->tmp_frame = NULL;
 if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
 ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
 if (!ost->tmp_frame) {
 fprintf(stderr, "Could not allocate temporary picture\n");
 exit(1);
 }
 }
 /* copy the stream parameters to the muxer */
 ret = avcodec_parameters_from_context(ost->st->codecpar, c);
 if (ret < 0) {
 fprintf(stderr, "Could not copy the stream parameters\n");
 exit(1);
 }
 }



Once i have setup the ffmpeg output stream this is how i am streaming the data.


This function gets the frame data from the std::queue(pixelsQueue) and sends it for encoding.


int StreamUPD::stream_video_frame()
{ 
 ost = &video_st;
 c = ost->enc; 

 /* when we pass a frame to the encoder, it may keep a reference to it
 * internally; make sure we do not overwrite it here */
 if (av_frame_make_writable(ost->frame) < 0)
 exit(1);
 if (!ost->sws_ctx) {
 ost->sws_ctx = sws_getContext(c->width, c->height,
 AV_PIX_FMT_RGB24,
 c->width, c->height,
 c->pix_fmt,
 SWS_FAST_BILINEAR, NULL, NULL, NULL);
 if (!ost->sws_ctx) {
 fprintf(stderr,
 "Could not initialize the conversion context\n");
 exit(1);
 }
 }
 finished_ = true;

 if (pixelsQueue.size() > 0) { 
 if (pixelsQueue.pop(pixels)) {
 fill_yuv_image(ost->sws_ctx, frame_data->pixels_.get(), ost->frame, c->width, c->height);
 ost->frame->pts = ost->next_pts++;
 return write_frame(oc, ost->enc, ost->st, ost->frame);
 }
 }
 return 1;
}



Writing the data to the output stream.


The function avcodec_receive_packet is the one that takes lot of time.


int StreamUPD::write_frame(AVFormatContext* fmt_ctx, AVCodecContext* c,
 AVStream* st, AVFrame* frame)
{
 int ret;
 // send the frame to the encoder
 AVPacket pkt = { 0 };
 ret = avcodec_send_frame(c, frame);
 ret = avcodec_receive_packet(c, &pkt);
 if (ret < 0) {
 fprintf(stderr, "Error sending a frame to the encoder: %s\n",
 av_err2str(ret));
 exit(1);
 }
 
 while (ret >= 0) {
 AVPacket pkt = { 0 };
 ret = avcodec_receive_packet(c, &pkt); // This is the function that takes lot of time
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
 break;
 else if (ret < 0) {
 fprintf(stderr, "Error encoding a frame: %s\n", av_err2str(ret));
 exit(1);
 }
 // rescale output packet timestamp values from codec to stream timebase 
 av_packet_rescale_ts(&pkt, c->time_base, st->time_base);
 pkt.stream_index = st->index;
 // Write the compressed frame to the media file. 
 ret = av_interleaved_write_frame(fmt_ctx, &pkt);
 av_packet_unref(&pkt);
 if (ret < 0) {
 fprintf(stderr, "Error while writing output packet: %s\n", av_err2str(ret));
 exit(1);
 }
 }
 return ret == AVERROR_EOF ? 1 : 0;
}



How can i reduce the outputting time while writing the frames to the stream ?


Currently i push more frames in the buffer and the outputting speed is less so the buffer starts to run out of memory in some time.


-
Encoding .png images with h264 to a file on disk
19 février 2021, par xyfixCan somebody help me to find out why I end up with a file on disk that is only 24 kb and not readable by vlc or so, while I send valid YUV images to the codec. I have added the .h and .cpp file. Up till "avcodec_receive_packet" everything seems to be OK. The function call "avcodec_send_frame" returns 0, so that must be OK but "avcodec_receive_packet" returns -11. If I flush the encoder (currently commented) then "avcodec_receive_packet" returns 0 and I can see encoded data if I store it on disk. Also the input image to the encoder is also correct (currently commented) and checked. I'm aiming for an intra-frame encoding, so I should get the encoded frame data back, but I don't get anything back even if I send 24 images to it.


.h file


#ifndef MOVIECODEC_H
#define MOVIECODEC_H

#include 

extern "C"
{
 #include "Codec/include/libavcodec/avcodec.h"
 #include "Codec/include/libavdevice/avdevice.h"
 #include "Codec/include/libavformat/avformat.h"
 #include "Codec/include/libavutil/avutil.h"
 #include "Codec/include/libavutil/imgutils.h"
 #include "Codec/include/libswscale/swscale.h"
}


class MovieCodec
{
public:

 MovieCodec(const char *filename);

 ~MovieCodec();

 void encodeImage( const cv::Mat& image );

 void close();
 
private :

 void add_stream();

 void openVideoCodec();

 void write_video_frame(const cv::Mat& image);

 void createFrame( const cv::Mat& image );

private:

 static int s_frameCount;

 int m_timeVideo = 0;

 std::string m_filename;

 FILE* m_file;

 AVCodec* m_encoder = NULL;

 AVOutputFormat* m_outputFormat = NULL;

 AVFormatContext* m_formatCtx = NULL;

 AVCodecContext* m_codecCtx = NULL;

 AVStream* m_streamOut = NULL;

 AVFrame* m_frame = NULL;

 AVPacket* m_packet = NULL;

};



.cpp file


#ifndef MOVIECODEC_CPP
#define MOVIECODEC_CPP

#include "moviecodec.h"


#define STREAM_DURATION 5.0
#define STREAM_FRAME_RATE 24
#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */


static int sws_flags = SWS_BICUBIC;
int MovieCodec::s_frameCount = 0;

MovieCodec::MovieCodec( const char* filename ) :
 m_filename( filename ),
 m_encoder( avcodec_find_encoder( AV_CODEC_ID_H264 ))
{
 av_log_set_level(AV_LOG_VERBOSE);

 int ret(0);

 m_file = fopen( m_filename.c_str(), "wb");

 // allocate the output media context
 ret = avformat_alloc_output_context2( &m_formatCtx, m_outputFormat, NULL, m_filename.c_str());

 if (!m_formatCtx)
 return;

 m_outputFormat = m_formatCtx->oformat;

 // Add the video stream using H264 codec
 add_stream();

 // Open video codec and allocate the necessary encode buffers
 if (m_streamOut)
 openVideoCodec();

 av_dump_format( m_formatCtx, 0, m_filename.c_str(), 1);

 // Open the output media file, if needed
 if (!( m_outputFormat->flags & AVFMT_NOFILE))
 {
 ret = avio_open( &m_formatCtx->pb, m_filename.c_str(), AVIO_FLAG_WRITE);

 if (ret < 0)
 {
 char error[255];
 ret = av_strerror( ret, error, 255);
 fprintf(stderr, "Could not open '%s': %s\n", m_filename.c_str(), error);
 return ;
 }
 }
 else
 {
 return;
 }

 m_formatCtx->flush_packets = 1;

 ret = avformat_write_header( m_formatCtx, NULL );

 if (ret < 0)
 {
 char error[255];
 av_strerror(ret, error, 255);
 fprintf(stderr, "Error occurred when opening output file: %s\n", error);
 return;
 }


 if ( m_frame )
 m_frame->pts = 0;
}



MovieCodec::~MovieCodec()
{
 close();
}



void MovieCodec::encodeImage(const cv::Mat &image)
{
 // Compute video time from last added video frame
 m_timeVideo = (double)m_frame->pts) * av_q2d(m_streamOut->time_base);

 // Stop media if enough time
 if (!m_streamOut /*|| m_timeVideo >= STREAM_DURATION*/)
 return;

 // Add a video frame
 write_video_frame( image );

 // Increase frame pts according to time base
 m_frame->pts += av_rescale_q(1, m_codecCtx->time_base, m_streamOut->time_base);
}


void MovieCodec::close()
{
 int ret( 0 );

 // Write media trailer
// if( m_formatCtx )
// ret = av_write_trailer( m_formatCtx );

 /* flush the encoder */
 ret = avcodec_send_frame(m_codecCtx, NULL);

 /* Close each codec. */
 if ( m_streamOut )
 {
 av_free( m_frame->data[0]);
 av_free( m_frame );
 }

 if (!( m_outputFormat->flags & AVFMT_NOFILE))
 /* Close the output file. */
 ret = avio_close( m_formatCtx->pb);


 /* free the stream */
 avformat_free_context( m_formatCtx );

 fflush( m_file );
}


void MovieCodec::createFrame( const cv::Mat& image )
{
 /**
 * \note allocate frame
 */
 m_frame = av_frame_alloc();
 m_frame->format = STREAM_PIX_FMT;
 m_frame->width = image.cols();
 m_frame->height = image.rows();
 m_frame->pict_type = AV_PICTURE_TYPE_I;
 int ret = av_image_alloc(m_frame->data, m_frame->linesize, m_frame->width, m_frame->height, STREAM_PIX_FMT, 1);

 if (ret < 0)
 {
 return;
 }

 struct SwsContext* sws_ctx = sws_getContext((int)image.cols(), (int)image.rows(), AV_PIX_FMT_RGB24,
 (int)image.cols(), (int)image.rows(), STREAM_PIX_FMT, 0, NULL, NULL, NULL);

 const uint8_t* rgbData[1] = { (uint8_t* )image.getData() };
 int rgbLineSize[1] = { 3 * image.cols() };

 sws_scale(sws_ctx, rgbData, rgbLineSize, 0, image.rows(), m_frame->data, m_frame->linesize);
}


/* Add an output stream. */
void MovieCodec::add_stream()
{
 AVCodecID codecId = AV_CODEC_ID_H264;

 if (!( m_encoder ))
 {
 fprintf(stderr, "Could not find encoder for '%s'\n",
 avcodec_get_name(codecId));
 return;
 }

 // Get the stream for codec
 m_streamOut = avformat_new_stream(m_formatCtx, m_encoder);

 if (!m_streamOut) {
 fprintf(stderr, "Could not allocate stream\n");
 return;
 }

 m_streamOut->id = m_formatCtx->nb_streams - 1;

 m_codecCtx = avcodec_alloc_context3( m_encoder);

 m_streamOut->codecpar->codec_id = codecId;
 m_streamOut->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
 m_streamOut->codecpar->bit_rate = 400000;
 m_streamOut->codecpar->width = 800;
 m_streamOut->codecpar->height = 640;
 m_streamOut->codecpar->format = STREAM_PIX_FMT;
 m_streamOut->codecpar->codec_tag = 0x31637661;
 m_streamOut->codecpar->video_delay = 0;
 m_streamOut->time_base = { 1, STREAM_FRAME_RATE };


 avcodec_parameters_to_context( m_codecCtx, m_streamOut->codecpar);
 
 m_codecCtx->gop_size = 0; /* emit one intra frame every twelve frames at most */
 m_codecCtx->max_b_frames = 0;
 m_codecCtx->time_base = { 1, STREAM_FRAME_RATE };
 m_codecCtx->framerate = { STREAM_FRAME_RATE, 1 };
 m_codecCtx->pix_fmt = STREAM_PIX_FMT;



 if (m_streamOut->codecpar->codec_id == AV_CODEC_ID_H264)
 {
 av_opt_set( m_codecCtx, "preset", "ultrafast", 0 );
 av_opt_set( m_codecCtx, "vprofile", "baseline", 0 );
 av_opt_set( m_codecCtx, "tune", "zerolatency", 0 );
 }

// /* Some formats want stream headers to be separate. */
// if (m_formatCtx->oformat->flags & AVFMT_GLOBALHEADER)
// m_codecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;


}


void MovieCodec::openVideoCodec()
{
 int ret;

 /* open the codec */
 ret = avcodec_open2(m_codecCtx, m_encoder, NULL);

 if (ret < 0)
 {
 char error[255];
 av_strerror(ret, error, 255);
 fprintf(stderr, "Could not open video codec: %s\n", error);
 }
}



void MovieCodec::write_video_frame( const cv::Mat& image )
{
 int ret;

 createFrame( image );


 if (m_formatCtx->oformat->flags & 0x0020 )
 {
 /* Raw video case - directly store the picture in the packet */
 AVPacket pkt;
 av_init_packet(&pkt);

 pkt.flags |= AV_PKT_FLAG_KEY;
 pkt.stream_index = m_streamOut->index;
 pkt.data = m_frame->data[0];
 pkt.size = sizeof(AVPicture);

// ret = av_interleaved_write_frame(m_formatCtx, &pkt);
 ret = av_write_frame( m_formatCtx, &pkt );
 }
 else
 {
 AVPacket pkt;
 av_init_packet(&pkt);

 /* encode the image */

//cv::Mat yuv420p( m_frame->height + m_frame->height/2, m_frame->width, CV_8UC1, m_frame->data[0]);
//cv::Mat cvmIm;
//cv::cvtColor(yuv420p,cvmIm,CV_YUV420p2BGR);
//cv::imwrite("c:\\tmp\\YUVoriginal.png", cvmIm);

 ret = avcodec_send_frame(m_codecCtx, m_frame);

 if (ret < 0)
 {
 char error[255];
 av_strerror(ret, error, 255);
 fprintf(stderr, "Error encoding video frame: %s\n", error);
 return;
 }

 /* If size is zero, it means the image was buffered. */
// ret = avcodec_receive_packet(m_codecCtx, &pkt);

 do
 {
 ret = avcodec_receive_packet(m_codecCtx, &pkt);

 if (ret == 0)
 {
 ret = av_write_frame( m_formatCtx, &pkt );
 av_packet_unref(&pkt);

 break;
 }
// else if ((ret < 0) && (ret != AVERROR(EAGAIN)))
// {
// return;
// }
// else if (ret == AVERROR(EAGAIN))
// {
// /* flush the encoder */
// ret = avcodec_send_frame(m_codecCtx, NULL);
//
// if (0 > ret)
// return;
// }
 } while (ret == 0);

 if( !ret && pkt.size)
 {
 pkt.stream_index = m_streamOut->index;

 /* Write the compressed frame to the media file. */
// ret = av_interleaved_write_frame(m_formatCtx, &pkt);
 ret = av_write_frame( m_formatCtx, &pkt );
 }
 else
 {
 ret = 0;
 }
 }

 if (ret != 0)
 {
 char error[255];
 av_strerror(ret, error, 255);
 fprintf(stderr, "Error while writing video frame: %s\n", error);
 return;
 }

 s_frameCount++;
}



-
Encoding .png images with h264 to a file on disk
21 février 2021, par xyfixCan somebody help me to find out why I end up with a file on disk that is only 24 kb and not readable by vlc or so, while I send valid YUV images to the codec. I have added the .h and .cpp file. Up till "avcodec_receive_packet" everything seems to be OK. The function call "avcodec_send_frame" returns 0, so that must be OK but "avcodec_receive_packet" returns -11. If I flush the encoder (currently commented) then "avcodec_receive_packet" returns 0 and I can see encoded data if I store it on disk. Also the input image to the encoder is also correct (currently commented) and checked. I'm aiming for an intra-frame encoding, so I should get the encoded frame data back, but I don't get anything back even if I send 24 images to it.


.h file


#ifndef MOVIECODEC_H
#define MOVIECODEC_H

#include 

extern "C"
{
 #include "Codec/include/libavcodec/avcodec.h"
 #include "Codec/include/libavdevice/avdevice.h"
 #include "Codec/include/libavformat/avformat.h"
 #include "Codec/include/libavutil/avutil.h"
 #include "Codec/include/libavutil/imgutils.h"
 #include "Codec/include/libswscale/swscale.h"
}


class MovieCodec
{
public:

 MovieCodec(const char *filename);

 ~MovieCodec();

 void encodeImage( const cv::Mat& image );

 void close();
 
private :

 void add_stream();

 void openVideoCodec();

 void write_video_frame(const cv::Mat& image);

 void createFrame( const cv::Mat& image );

private:

 static int s_frameCount;

 int m_timeVideo = 0;

 std::string m_filename;

 FILE* m_file;

 AVCodec* m_encoder = NULL;

 AVOutputFormat* m_outputFormat = NULL;

 AVFormatContext* m_formatCtx = NULL;

 AVCodecContext* m_codecCtx = NULL;

 AVStream* m_streamOut = NULL;

 AVFrame* m_frame = NULL;

 AVPacket* m_packet = NULL;

};



.cpp file


#ifndef MOVIECODEC_CPP
#define MOVIECODEC_CPP

#include "moviecodec.h"


#define STREAM_DURATION 5.0
#define STREAM_FRAME_RATE 24
#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */


static int sws_flags = SWS_BICUBIC;
int MovieCodec::s_frameCount = 0;

MovieCodec::MovieCodec( const char* filename ) :
 m_filename( filename ),
 m_encoder( avcodec_find_encoder( AV_CODEC_ID_H264 ))
{
 av_log_set_level(AV_LOG_VERBOSE);

 int ret(0);

 m_file = fopen( m_filename.c_str(), "wb");

 // allocate the output media context
 ret = avformat_alloc_output_context2( &m_formatCtx, m_outputFormat, NULL, m_filename.c_str());

 if (!m_formatCtx)
 return;

 m_outputFormat = m_formatCtx->oformat;

 // Add the video stream using H264 codec
 add_stream();

 // Open video codec and allocate the necessary encode buffers
 if (m_streamOut)
 openVideoCodec();

 av_dump_format( m_formatCtx, 0, m_filename.c_str(), 1);

 // Open the output media file, if needed
 if (!( m_outputFormat->flags & AVFMT_NOFILE))
 {
 ret = avio_open( &m_formatCtx->pb, m_filename.c_str(), AVIO_FLAG_WRITE);

 if (ret < 0)
 {
 char error[255];
 ret = av_strerror( ret, error, 255);
 fprintf(stderr, "Could not open '%s': %s\n", m_filename.c_str(), error);
 return ;
 }
 }
 else
 {
 return;
 }

 m_formatCtx->flush_packets = 1;

 ret = avformat_write_header( m_formatCtx, NULL );

 if (ret < 0)
 {
 char error[255];
 av_strerror(ret, error, 255);
 fprintf(stderr, "Error occurred when opening output file: %s\n", error);
 return;
 }


 if ( m_frame )
 m_frame->pts = 0;
}



MovieCodec::~MovieCodec()
{
 close();
}



void MovieCodec::encodeImage(const cv::Mat &image)
{
 // Compute video time from last added video frame
 m_timeVideo = (double)m_frame->pts) * av_q2d(m_streamOut->time_base);

 // Stop media if enough time
 if (!m_streamOut /*|| m_timeVideo >= STREAM_DURATION*/)
 return;

 // Add a video frame
 write_video_frame( image );

 // Increase frame pts according to time base
 m_frame->pts += av_rescale_q(1, m_codecCtx->time_base, m_streamOut->time_base);
}


void MovieCodec::close()
{
 int ret( 0 );

 // Write media trailer
// if( m_formatCtx )
// ret = av_write_trailer( m_formatCtx );

 /* flush the encoder */
 ret = avcodec_send_frame(m_codecCtx, NULL);

 /* Close each codec. */
 if ( m_streamOut )
 {
 av_free( m_frame->data[0]);
 av_free( m_frame );
 }

 if (!( m_outputFormat->flags & AVFMT_NOFILE))
 /* Close the output file. */
 ret = avio_close( m_formatCtx->pb);


 /* free the stream */
 avformat_free_context( m_formatCtx );

 fflush( m_file );
}


void MovieCodec::createFrame( const cv::Mat& image )
{
 /**
 * \note allocate frame
 */
 m_frame = av_frame_alloc();
 m_frame->format = STREAM_PIX_FMT;
 m_frame->width = image.cols();
 m_frame->height = image.rows();
 m_frame->pict_type = AV_PICTURE_TYPE_I;
 int ret = av_image_alloc(m_frame->data, m_frame->linesize, m_frame->width, m_frame->height, STREAM_PIX_FMT, 1);

 if (ret < 0)
 {
 return;
 }

 struct SwsContext* sws_ctx = sws_getContext((int)image.cols(), (int)image.rows(), AV_PIX_FMT_RGB24,
 (int)image.cols(), (int)image.rows(), STREAM_PIX_FMT, 0, NULL, NULL, NULL);

 const uint8_t* rgbData[1] = { (uint8_t* )image.getData() };
 int rgbLineSize[1] = { 3 * image.cols() };

 sws_scale(sws_ctx, rgbData, rgbLineSize, 0, image.rows(), m_frame->data, m_frame->linesize);
}


/* Add an output stream. */
void MovieCodec::add_stream()
{
 AVCodecID codecId = AV_CODEC_ID_H264;

 if (!( m_encoder ))
 {
 fprintf(stderr, "Could not find encoder for '%s'\n",
 avcodec_get_name(codecId));
 return;
 }

 // Get the stream for codec
 m_streamOut = avformat_new_stream(m_formatCtx, m_encoder);

 if (!m_streamOut) {
 fprintf(stderr, "Could not allocate stream\n");
 return;
 }

 m_streamOut->id = m_formatCtx->nb_streams - 1;

 m_codecCtx = avcodec_alloc_context3( m_encoder);

 m_streamOut->codecpar->codec_id = codecId;
 m_streamOut->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
 m_streamOut->codecpar->bit_rate = 400000;
 m_streamOut->codecpar->width = 800;
 m_streamOut->codecpar->height = 640;
 m_streamOut->codecpar->format = STREAM_PIX_FMT;
 m_streamOut->codecpar->codec_tag = 0x31637661;
 m_streamOut->codecpar->video_delay = 0;
 m_streamOut->time_base = { 1, STREAM_FRAME_RATE };


 avcodec_parameters_to_context( m_codecCtx, m_streamOut->codecpar);
 
 m_codecCtx->gop_size = 0; /* emit one intra frame every twelve frames at most */
 m_codecCtx->max_b_frames = 0;
 m_codecCtx->time_base = { 1, STREAM_FRAME_RATE };
 m_codecCtx->framerate = { STREAM_FRAME_RATE, 1 };
 m_codecCtx->pix_fmt = STREAM_PIX_FMT;



 if (m_streamOut->codecpar->codec_id == AV_CODEC_ID_H264)
 {
 av_opt_set( m_codecCtx, "preset", "ultrafast", 0 );
 av_opt_set( m_codecCtx, "vprofile", "baseline", 0 );
 av_opt_set( m_codecCtx, "tune", "zerolatency", 0 );
 }

// /* Some formats want stream headers to be separate. */
// if (m_formatCtx->oformat->flags & AVFMT_GLOBALHEADER)
// m_codecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;


}


void MovieCodec::openVideoCodec()
{
 int ret;

 /* open the codec */
 ret = avcodec_open2(m_codecCtx, m_encoder, NULL);

 if (ret < 0)
 {
 char error[255];
 av_strerror(ret, error, 255);
 fprintf(stderr, "Could not open video codec: %s\n", error);
 }
}



void MovieCodec::write_video_frame( const cv::Mat& image )
{
 int ret;

 createFrame( image );


 if (m_formatCtx->oformat->flags & 0x0020 )
 {
 /* Raw video case - directly store the picture in the packet */
 AVPacket pkt;
 av_init_packet(&pkt);

 pkt.flags |= AV_PKT_FLAG_KEY;
 pkt.stream_index = m_streamOut->index;
 pkt.data = m_frame->data[0];
 pkt.size = sizeof(AVPicture);

// ret = av_interleaved_write_frame(m_formatCtx, &pkt);
 ret = av_write_frame( m_formatCtx, &pkt );
 }
 else
 {
 AVPacket pkt;
 av_init_packet(&pkt);

 /* encode the image */

//cv::Mat yuv420p( m_frame->height + m_frame->height/2, m_frame->width, CV_8UC1, m_frame->data[0]);
//cv::Mat cvmIm;
//cv::cvtColor(yuv420p,cvmIm,CV_YUV420p2BGR);
//cv::imwrite("c:\\tmp\\YUVoriginal.png", cvmIm);

 ret = avcodec_send_frame(m_codecCtx, m_frame);

 if (ret < 0)
 {
 char error[255];
 av_strerror(ret, error, 255);
 fprintf(stderr, "Error encoding video frame: %s\n", error);
 return;
 }

 /* If size is zero, it means the image was buffered. */
// ret = avcodec_receive_packet(m_codecCtx, &pkt);

 do
 {
 ret = avcodec_receive_packet(m_codecCtx, &pkt);

 if (ret == 0)
 {
 ret = av_write_frame( m_formatCtx, &pkt );
 av_packet_unref(&pkt);

 break;
 }
// else if ((ret < 0) && (ret != AVERROR(EAGAIN)))
// {
// return;
// }
// else if (ret == AVERROR(EAGAIN))
// {
// /* flush the encoder */
// ret = avcodec_send_frame(m_codecCtx, NULL);
//
// if (0 > ret)
// return;
// }
 } while (ret == 0);

 if( !ret && pkt.size)
 {
 pkt.stream_index = m_streamOut->index;

 /* Write the compressed frame to the media file. */
// ret = av_interleaved_write_frame(m_formatCtx, &pkt);
 ret = av_write_frame( m_formatCtx, &pkt );
 }
 else
 {
 ret = 0;
 }
 }

 if (ret != 0)
 {
 char error[255];
 av_strerror(ret, error, 255);
 fprintf(stderr, "Error while writing video frame: %s\n", error);
 return;
 }

 s_frameCount++;
}