
Recherche avancée
Médias (91)
-
#3 The Safest Place
16 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
-
#4 Emo Creates
15 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
-
#2 Typewriter Dance
15 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
-
#1 The Wires
11 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
-
ED-ME-5 1-DVD
11 octobre 2011, par
Mis à jour : Octobre 2011
Langue : English
Type : Audio
-
Revolution of Open-source and film making towards open film making
6 octobre 2011, par
Mis à jour : Juillet 2013
Langue : English
Type : Texte
Autres articles (44)
-
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir -
Organiser par catégorie
17 mai 2013, parDans MédiaSPIP, une rubrique a 2 noms : catégorie et rubrique.
Les différents documents stockés dans MédiaSPIP peuvent être rangés dans différentes catégories. On peut créer une catégorie en cliquant sur "publier une catégorie" dans le menu publier en haut à droite ( après authentification ). Une catégorie peut être rangée dans une autre catégorie aussi ce qui fait qu’on peut construire une arborescence de catégories.
Lors de la publication prochaine d’un document, la nouvelle catégorie créée sera proposée (...) -
Les tâches Cron régulières de la ferme
1er décembre 2010, parLa gestion de la ferme passe par l’exécution à intervalle régulier de plusieurs tâches répétitives dites Cron.
Le super Cron (gestion_mutu_super_cron)
Cette tâche, planifiée chaque minute, a pour simple effet d’appeler le Cron de l’ensemble des instances de la mutualisation régulièrement. Couplée avec un Cron système sur le site central de la mutualisation, cela permet de simplement générer des visites régulières sur les différents sites et éviter que les tâches des sites peu visités soient trop (...)
Sur d’autres sites (5352)
-
issue after video rotation how fix
2 avril 2015, par VahagnI have next code for rotate video
OpenCVFrameConverter.ToIplImage converter2 = new OpenCVFrameConverter.ToIplImage() ;
for (int i = firstIndex; i <= lastIndex; i++) {
long t = timestamps[i % timestamps.length] - startTime;
if (t >= 0) {
if (t > recorder.getTimestamp()) {
recorder.setTimestamp(t);
}
Frame g = converter2.convert(rotate(converter2.convertToIplImage(images[i % images.length]),9 0));
recorder.record(g);
}
}images[i] - Frame in JavaCV
after in video have green linesUPDATE
Convertation function/*
* Copyright (C) 2015 Samuel Audet
*
* This file is part of JavaCV.
*
* JavaCV is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version (subject to the "Classpath" exception
* as provided in the LICENSE.txt file that accompanied this code).
*
* JavaCV is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with JavaCV. If not, see /www.gnu.org/licenses/>.
*/
package com.example.vvardanyan.ffmpeg;
import org.bytedeco.javacpp.BytePointer;
import org.bytedeco.javacpp.Pointer;
import java.nio.Buffer;
import static org.bytedeco.javacpp.opencv_core.CV_16S;
import static org.bytedeco.javacpp.opencv_core.CV_16U;
import static org.bytedeco.javacpp.opencv_core.CV_32F;
import static org.bytedeco.javacpp.opencv_core.CV_32S;
import static org.bytedeco.javacpp.opencv_core.CV_64F;
import static org.bytedeco.javacpp.opencv_core.CV_8S;
import static org.bytedeco.javacpp.opencv_core.CV_8U;
import static org.bytedeco.javacpp.opencv_core.CV_MAKETYPE;
import static org.bytedeco.javacpp.opencv_core.IPL_DEPTH_16S;
import static org.bytedeco.javacpp.opencv_core.IPL_DEPTH_16U;
import static org.bytedeco.javacpp.opencv_core.IPL_DEPTH_32F;
import static org.bytedeco.javacpp.opencv_core.IPL_DEPTH_32S;
import static org.bytedeco.javacpp.opencv_core.IPL_DEPTH_64F;
import static org.bytedeco.javacpp.opencv_core.IPL_DEPTH_8S;
import static org.bytedeco.javacpp.opencv_core.IPL_DEPTH_8U;
import static org.bytedeco.javacpp.opencv_core.IplImage;
import static org.bytedeco.javacpp.opencv_core.Mat;
/**
* A utility class to map data between {@link Frame} and {@link IplImage} or {@link Mat}.
* Since this is an abstract class, one must choose between two concrete classes:
* {@link ToIplImage} or {@link ToMat}.
*
* @author Samuel Audet
*/
public abstract class OpenCVFrameConverter<f> extends FrameConverter<f> {
IplImage img;
Mat mat;
public static class ToIplImage extends OpenCVFrameConverter<iplimage> {
@Override public IplImage convert(Frame frame) { return convertToIplImage(frame); }
}
public static class ToMat extends OpenCVFrameConverter<mat> {
@Override public Mat convert(Frame frame) { return convertToMat(frame); }
}
public static int getFrameDepth(int depth) {
switch (depth) {
case IPL_DEPTH_8U: case CV_8U: return Frame.DEPTH_UBYTE;
case IPL_DEPTH_8S: case CV_8S: return Frame.DEPTH_BYTE;
case IPL_DEPTH_16U: case CV_16U: return Frame.DEPTH_USHORT;
case IPL_DEPTH_16S: case CV_16S: return Frame.DEPTH_SHORT;
case IPL_DEPTH_32F: case CV_32F: return Frame.DEPTH_FLOAT;
case IPL_DEPTH_32S: case CV_32S: return Frame.DEPTH_INT;
case IPL_DEPTH_64F: case CV_64F: return Frame.DEPTH_DOUBLE;
default: return -1;
}
}
public static int getIplImageDepth(Frame frame) {
switch (frame.imageDepth) {
case Frame.DEPTH_UBYTE: return IPL_DEPTH_8U;
case Frame.DEPTH_BYTE: return IPL_DEPTH_8S;
case Frame.DEPTH_USHORT: return IPL_DEPTH_16U;
case Frame.DEPTH_SHORT: return IPL_DEPTH_16S;
case Frame.DEPTH_FLOAT: return IPL_DEPTH_32F;
case Frame.DEPTH_INT: return IPL_DEPTH_32S;
case Frame.DEPTH_DOUBLE: return IPL_DEPTH_64F;
default: return -1;
}
}
static boolean isEqual(Frame frame, IplImage img) {
return img != null && frame != null && frame.image != null && frame.image.length > 0
&& frame.imageWidth == img.width() && frame.imageHeight == img.height()
&& frame.imageChannels == img.nChannels() && getIplImageDepth(frame) == img.depth()
&& new Pointer(frame.image[0]).address() == img.imageData().address()
&& frame.imageStride * Math.abs(frame.imageDepth) / 8 == img.widthStep();
}
public IplImage convertToIplImage(Frame frame) {
if (frame == null) {
return null;
} else if (frame.opaque instanceof IplImage) {
return (IplImage)frame.opaque;
} else if (!isEqual(frame, img)) {
int depth = getIplImageDepth(frame);
img = depth < 0 ? null : IplImage.createHeader(frame.imageWidth, frame.imageHeight, depth, frame.imageChannels)
.imageData(new BytePointer(new Pointer(frame.image[0].position(0)))).widthStep(frame.imageStride * Math.abs(frame.imageDepth) / 8);
}
return img;
}
public Frame convert(IplImage img) {
if (img == null) {
return null;
} else if (!isEqual(frame, img)) {
frame = new Frame();
frame.imageWidth = img.width();
frame.imageHeight = img.height();
frame.imageDepth = getFrameDepth(img.depth());
frame.imageChannels = img.nChannels();
frame.imageStride = img.widthStep() * 8 / Math.abs(frame.imageDepth);
frame.image = new Buffer[] { img.createBuffer() };
frame.opaque = img;
}
return frame;
}
public static int getMatDepth(Frame frame) {
switch (frame.imageDepth) {
case Frame.DEPTH_UBYTE: return CV_8U;
case Frame.DEPTH_BYTE: return CV_8S;
case Frame.DEPTH_USHORT: return CV_16U;
case Frame.DEPTH_SHORT: return CV_16S;
case Frame.DEPTH_FLOAT: return CV_32F;
case Frame.DEPTH_INT: return CV_32S;
case Frame.DEPTH_DOUBLE: return CV_64F;
default: return -1;
}
}
static boolean isEqual(Frame frame, Mat mat) {
return mat != null && frame != null && frame.image != null && frame.image.length > 0
&& frame.imageWidth == mat.cols() && frame.imageHeight == mat.rows()
&& frame.imageChannels == mat.channels() && getMatDepth(frame) == mat.depth()
&& new Pointer(frame.image[0]).address() == mat.data().address()
&& frame.imageStride * Math.abs(frame.imageDepth) / 8 == (int)mat.step();
}
public Mat convertToMat(Frame frame) {
if (frame == null) {
return null;
} else if (frame.opaque instanceof Mat) {
return (Mat)frame.opaque;
} else if (!isEqual(frame, mat)) {
int depth = getMatDepth(frame);
mat = depth < 0 ? null : new Mat(frame.imageHeight, frame.imageWidth, CV_MAKETYPE(depth, frame.imageChannels),
new Pointer(frame.image[0].position(0)), frame.imageStride * Math.abs(frame.imageDepth) / 8);
}
return mat;
}
public Frame convert(Mat mat) {
if (mat == null) {
return null;
} else if (!isEqual(frame, mat)) {
frame = new Frame();
frame.imageWidth = mat.cols();
frame.imageHeight = mat.rows();
frame.imageDepth = getFrameDepth(mat.depth());
frame.imageChannels = mat.channels();
frame.imageStride = (int)mat.step() * 8 / Math.abs(frame.imageDepth);
frame.image = new Buffer[] { mat.createBuffer() };
frame.opaque = mat;
}
return frame;
}
}
</mat></iplimage></f></f> -
ffmpeg avcodec_encode_video2 hangs when using Quick Sync h264_qsv encoder
11 janvier 2017, par Mike SimpsonWhen I use the mpeg4 or h264 encoders, I am able to successfully encode images to make a valid AVI file using the API for ffmpeg 3.1.0. However, when I use the Quick Sync encoder (h264_qsv), avcodec_encode_video2 will hang some of the time. I found that when using images that are 1920x1080, it was rare that avcodec_encode_video2 would hang. When using 256x256 images, it was very likely that the function would hang.
I have created the test code below that demonstrates the hang of avcodec_encode_video2. The code will create a 1000 frame, 256x256 AVI with a bit rate of 400000. The frames are simply allocated, so the output video should just be green frames.
The problem was observed using Windows 7 and Windows 10, using the 32-bit or 64-bit test application.
If anyone has any idea on how I can avoid the avcodec_encode_video2 hang I would be very grateful ! Thanks in advance for any assistance.
extern "C"
{
#ifndef __STDC_CONSTANT_MACROS
#define __STDC_CONSTANT_MACROS
#endif
#include "avcodec.h"
#include "avformat.h"
#include "swscale.h"
#include "avutil.h"
#include "imgutils.h"
#include "opt.h"
#include
}
#include <iostream>
// Globals
AVCodec* m_pCodec = NULL;
AVStream *m_pStream = NULL;
AVOutputFormat* m_pFormat = NULL;
AVFormatContext* m_pFormatContext = NULL;
AVCodecContext* m_pCodecContext = NULL;
AVFrame* m_pFrame = NULL;
int m_frameIndex;
// Output format
AVPixelFormat m_pixType = AV_PIX_FMT_NV12;
// Use for mpeg4
//AVPixelFormat m_pixType = AV_PIX_FMT_YUV420P;
// Output frame rate
int m_frameRate = 30;
// Output image dimensions
int m_imageWidth = 256;
int m_imageHeight = 256;
// Number of frames to export
int m_frameCount = 1000;
// Output file name
const char* m_fileName = "c:/test/test.avi";
// Output file type
const char* m_fileType = "AVI";
// Codec name used to encode
const char* m_encoderName = "h264_qsv";
// use for mpeg4
//const char* m_encoderName = "mpeg4";
// Target bit rate
int m_targetBitRate = 400000;
void addVideoStream()
{
m_pStream = avformat_new_stream( m_pFormatContext, m_pCodec );
m_pStream->id = m_pFormatContext->nb_streams - 1;
m_pStream->time_base = m_pCodecContext->time_base;
m_pStream->codec->pix_fmt = m_pixType;
m_pStream->codec->flags = m_pCodecContext->flags;
m_pStream->codec->width = m_pCodecContext->width;
m_pStream->codec->height = m_pCodecContext->height;
m_pStream->codec->time_base = m_pCodecContext->time_base;
m_pStream->codec->bit_rate = m_pCodecContext->bit_rate;
}
AVFrame* allocatePicture( enum AVPixelFormat pix_fmt, int width, int height )
{
AVFrame *frame;
frame = av_frame_alloc();
if ( !frame )
{
return NULL;
}
frame->format = pix_fmt;
frame->width = width;
frame->height = height;
int checkImage = av_image_alloc( frame->data, frame->linesize, width, height, pix_fmt, 32 );
if ( checkImage < 0 )
{
return NULL;
}
return frame;
}
bool initialize()
{
AVRational frameRate;
frameRate.den = m_frameRate;
frameRate.num = 1;
av_register_all();
m_pCodec = avcodec_find_encoder_by_name(m_encoderName);
if( !m_pCodec )
{
return false;
}
m_pCodecContext = avcodec_alloc_context3( m_pCodec );
m_pCodecContext->width = m_imageWidth;
m_pCodecContext->height = m_imageHeight;
m_pCodecContext->time_base = frameRate;
m_pCodecContext->gop_size = 0;
m_pCodecContext->pix_fmt = m_pixType;
m_pCodecContext->codec_id = m_pCodec->id;
m_pCodecContext->bit_rate = m_targetBitRate;
av_opt_set( m_pCodecContext->priv_data, "+CBR", "", 0 );
return true;
}
bool startExport()
{
m_frameIndex = 0;
char fakeFileName[512];
int checkAllocContext = avformat_alloc_output_context2( &m_pFormatContext, NULL, m_fileType, fakeFileName );
if ( checkAllocContext < 0 )
{
return false;
}
if ( !m_pFormatContext )
{
return false;
}
m_pFormat = m_pFormatContext->oformat;
if ( m_pFormat->video_codec != AV_CODEC_ID_NONE )
{
addVideoStream();
int checkOpen = avcodec_open2( m_pCodecContext, m_pCodec, NULL );
if ( checkOpen < 0 )
{
return false;
}
m_pFrame = allocatePicture( m_pCodecContext->pix_fmt, m_pCodecContext->width, m_pCodecContext->height );
if( !m_pFrame )
{
return false;
}
m_pFrame->pts = 0;
}
int checkOpen = avio_open( &m_pFormatContext->pb, m_fileName, AVIO_FLAG_WRITE );
if ( checkOpen < 0 )
{
return false;
}
av_dict_set( &(m_pFormatContext->metadata), "title", "QS Test", 0 );
int checkHeader = avformat_write_header( m_pFormatContext, NULL );
if ( checkHeader < 0 )
{
return false;
}
return true;
}
int processFrame( AVPacket& avPacket )
{
avPacket.stream_index = 0;
avPacket.pts = av_rescale_q( m_pFrame->pts, m_pStream->codec->time_base, m_pStream->time_base );
avPacket.dts = av_rescale_q( m_pFrame->pts, m_pStream->codec->time_base, m_pStream->time_base );
m_pFrame->pts++;
int retVal = av_interleaved_write_frame( m_pFormatContext, &avPacket );
return retVal;
}
bool exportFrame()
{
int success = 1;
int result = 0;
AVPacket avPacket;
av_init_packet( &avPacket );
avPacket.data = NULL;
avPacket.size = 0;
fflush(stdout);
std::cout << "Before avcodec_encode_video2 for frame: " << m_frameIndex << std::endl;
success = avcodec_encode_video2( m_pCodecContext, &avPacket, m_pFrame, &result );
std::cout << "After avcodec_encode_video2 for frame: " << m_frameIndex << std::endl;
if( result )
{
success = processFrame( avPacket );
}
av_packet_unref( &avPacket );
m_frameIndex++;
return ( success == 0 );
}
void endExport()
{
int result = 0;
int success = 0;
if (m_pFrame)
{
while ( success == 0 )
{
AVPacket avPacket;
av_init_packet( &avPacket );
avPacket.data = NULL;
avPacket.size = 0;
fflush(stdout);
success = avcodec_encode_video2( m_pCodecContext, &avPacket, NULL, &result );
if( result )
{
success = processFrame( avPacket );
}
av_packet_unref( &avPacket );
if (!result)
{
break;
}
}
}
if (m_pFormatContext)
{
av_write_trailer( m_pFormatContext );
if( m_pFrame )
{
av_frame_free( &m_pFrame );
}
avio_closep( &m_pFormatContext->pb );
avformat_free_context( m_pFormatContext );
m_pFormatContext = NULL;
}
}
void cleanup()
{
if( m_pFrame || m_pCodecContext )
{
if( m_pFrame )
{
av_frame_free( &m_pFrame );
}
if( m_pCodecContext )
{
avcodec_close( m_pCodecContext );
av_free( m_pCodecContext );
}
}
}
int main()
{
bool success = true;
if (initialize())
{
if (startExport())
{
for (int loop = 0; loop < m_frameCount; loop++)
{
if (!exportFrame())
{
std::cout << "Failed to export frame\n";
success = false;
break;
}
}
endExport();
}
else
{
std::cout << "Failed to start export\n";
success = false;
}
cleanup();
}
else
{
std::cout << "Failed to initialize export\n";
success = false;
}
if (success)
{
std::cout << "Successfully exported file\n";
}
return 1;
}
</iostream> -
ffmpeg avcodec_encode_video2 hangs when using Quick Sync h264_qsv encoder
9 juin 2020, par Mike SimpsonWhen I use the mpeg4 or h264 encoders, I am able to successfully encode images to make a valid AVI file using the API for ffmpeg 3.1.0. However, when I use the Quick Sync encoder (h264_qsv), avcodec_encode_video2 will hang some of the time. I found that when using images that are 1920x1080, it was rare that avcodec_encode_video2 would hang. When using 256x256 images, it was very likely that the function would hang.



I have created the test code below that demonstrates the hang of avcodec_encode_video2. The code will create a 1000 frame, 256x256 AVI with a bit rate of 400000. The frames are simply allocated, so the output video should just be green frames.



The problem was observed using Windows 7 and Windows 10, using the 32-bit or 64-bit test application.



If anyone has any idea on how I can avoid the avcodec_encode_video2 hang I would be very grateful ! Thanks in advance for any assistance.



extern "C"
{
#ifndef __STDC_CONSTANT_MACROS
#define __STDC_CONSTANT_MACROS
#endif
#include "avcodec.h"
#include "avformat.h"
#include "swscale.h"
#include "avutil.h"
#include "imgutils.h"
#include "opt.h"
#include 
}

#include <iostream>


// Globals
AVCodec* m_pCodec = NULL;
AVStream *m_pStream = NULL;
AVOutputFormat* m_pFormat = NULL;
AVFormatContext* m_pFormatContext = NULL;
AVCodecContext* m_pCodecContext = NULL;
AVFrame* m_pFrame = NULL;
int m_frameIndex;

// Output format
AVPixelFormat m_pixType = AV_PIX_FMT_NV12;
// Use for mpeg4
//AVPixelFormat m_pixType = AV_PIX_FMT_YUV420P;

// Output frame rate
int m_frameRate = 30;
// Output image dimensions
int m_imageWidth = 256;
int m_imageHeight = 256;
// Number of frames to export
int m_frameCount = 1000;
// Output file name
const char* m_fileName = "c:/test/test.avi";
// Output file type
const char* m_fileType = "AVI";
// Codec name used to encode
const char* m_encoderName = "h264_qsv";
// use for mpeg4
//const char* m_encoderName = "mpeg4";
// Target bit rate
int m_targetBitRate = 400000;

void addVideoStream()
{
 m_pStream = avformat_new_stream( m_pFormatContext, m_pCodec );
 m_pStream->id = m_pFormatContext->nb_streams - 1;
 m_pStream->time_base = m_pCodecContext->time_base;
 m_pStream->codec->pix_fmt = m_pixType;
 m_pStream->codec->flags = m_pCodecContext->flags;
 m_pStream->codec->width = m_pCodecContext->width;
 m_pStream->codec->height = m_pCodecContext->height;
 m_pStream->codec->time_base = m_pCodecContext->time_base;
 m_pStream->codec->bit_rate = m_pCodecContext->bit_rate;
}

AVFrame* allocatePicture( enum AVPixelFormat pix_fmt, int width, int height )
{
 AVFrame *frame;

 frame = av_frame_alloc();

 if ( !frame )
 {
 return NULL;
 }

 frame->format = pix_fmt;
 frame->width = width;
 frame->height = height;

 int checkImage = av_image_alloc( frame->data, frame->linesize, width, height, pix_fmt, 32 );

 if ( checkImage < 0 )
 {
 return NULL;
 }

 return frame;
}

bool initialize()
{
 AVRational frameRate;
 frameRate.den = m_frameRate;
 frameRate.num = 1;

 av_register_all();

 m_pCodec = avcodec_find_encoder_by_name(m_encoderName);

 if( !m_pCodec )
 {
 return false;
 }

 m_pCodecContext = avcodec_alloc_context3( m_pCodec );
 m_pCodecContext->width = m_imageWidth;
 m_pCodecContext->height = m_imageHeight;
 m_pCodecContext->time_base = frameRate;
 m_pCodecContext->gop_size = 0;
 m_pCodecContext->pix_fmt = m_pixType;
 m_pCodecContext->codec_id = m_pCodec->id;
 m_pCodecContext->bit_rate = m_targetBitRate;

 av_opt_set( m_pCodecContext->priv_data, "+CBR", "", 0 );

 return true;
}

bool startExport()
{
 m_frameIndex = 0;
 char fakeFileName[512]; 
 int checkAllocContext = avformat_alloc_output_context2( &m_pFormatContext, NULL, m_fileType, fakeFileName );

 if ( checkAllocContext < 0 )
 {
 return false;
 }

 if ( !m_pFormatContext ) 
 {
 return false;
 }

 m_pFormat = m_pFormatContext->oformat;

 if ( m_pFormat->video_codec != AV_CODEC_ID_NONE ) 
 {
 addVideoStream();

 int checkOpen = avcodec_open2( m_pCodecContext, m_pCodec, NULL );

 if ( checkOpen < 0 )
 {
 return false;
 }

 m_pFrame = allocatePicture( m_pCodecContext->pix_fmt, m_pCodecContext->width, m_pCodecContext->height ); 
 if( !m_pFrame ) 
 {
 return false;
 }
 m_pFrame->pts = 0;
 }

 int checkOpen = avio_open( &m_pFormatContext->pb, m_fileName, AVIO_FLAG_WRITE );
 if ( checkOpen < 0 )
 {
 return false;
 }

 av_dict_set( &(m_pFormatContext->metadata), "title", "QS Test", 0 );

 int checkHeader = avformat_write_header( m_pFormatContext, NULL );
 if ( checkHeader < 0 )
 {
 return false;
 }

 return true;
}

int processFrame( AVPacket& avPacket )
{
 avPacket.stream_index = 0;
 avPacket.pts = av_rescale_q( m_pFrame->pts, m_pStream->codec->time_base, m_pStream->time_base );
 avPacket.dts = av_rescale_q( m_pFrame->pts, m_pStream->codec->time_base, m_pStream->time_base );
 m_pFrame->pts++;

 int retVal = av_interleaved_write_frame( m_pFormatContext, &avPacket );
 return retVal;
}

bool exportFrame()
{
 int success = 1;
 int result = 0;

 AVPacket avPacket;

 av_init_packet( &avPacket );
 avPacket.data = NULL;
 avPacket.size = 0;

 fflush(stdout);

 std::cout << "Before avcodec_encode_video2 for frame: " << m_frameIndex << std::endl;
 success = avcodec_encode_video2( m_pCodecContext, &avPacket, m_pFrame, &result );
 std::cout << "After avcodec_encode_video2 for frame: " << m_frameIndex << std::endl;

 if( result )
 { 
 success = processFrame( avPacket );
 }

 av_packet_unref( &avPacket );

 m_frameIndex++;
 return ( success == 0 );
}

void endExport()
{
 int result = 0;
 int success = 0;

 if (m_pFrame)
 {
 while ( success == 0 )
 {
 AVPacket avPacket;
 av_init_packet( &avPacket );
 avPacket.data = NULL;
 avPacket.size = 0;

 fflush(stdout);
 success = avcodec_encode_video2( m_pCodecContext, &avPacket, NULL, &result );

 if( result )
 { 
 success = processFrame( avPacket );
 }
 av_packet_unref( &avPacket );

 if (!result)
 {
 break;
 }
 }
 }

 if (m_pFormatContext)
 {
 av_write_trailer( m_pFormatContext );

 if( m_pFrame )
 {
 av_frame_free( &m_pFrame );
 }

 avio_closep( &m_pFormatContext->pb );
 avformat_free_context( m_pFormatContext );
 m_pFormatContext = NULL;
 }
}

void cleanup()
{
 if( m_pFrame || m_pCodecContext )
 {
 if( m_pFrame )
 {
 av_frame_free( &m_pFrame );
 }

 if( m_pCodecContext )
 {
 avcodec_close( m_pCodecContext );
 av_free( m_pCodecContext );
 }
 }
}

int main()
{
 bool success = true;
 if (initialize())
 {
 if (startExport())
 {
 for (int loop = 0; loop < m_frameCount; loop++)
 {
 if (!exportFrame())
 {
 std::cout << "Failed to export frame\n";
 success = false;
 break;
 }
 }
 endExport();
 }
 else
 {
 std::cout << "Failed to start export\n";
 success = false;
 }

 cleanup();
 }
 else
 {
 std::cout << "Failed to initialize export\n";
 success = false;
 }

 if (success)
 {
 std::cout << "Successfully exported file\n";
 }
 return 1;
}
</iostream>