
Recherche avancée
Médias (9)
-
Stereo master soundtrack
17 octobre 2011, par
Mis à jour : Octobre 2011
Langue : English
Type : Audio
-
Elephants Dream - Cover of the soundtrack
17 octobre 2011, par
Mis à jour : Octobre 2011
Langue : English
Type : Image
-
#7 Ambience
16 octobre 2011, par
Mis à jour : Juin 2015
Langue : English
Type : Audio
-
#6 Teaser Music
16 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
-
#5 End Title
16 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
-
#3 The Safest Place
16 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
Autres articles (71)
-
Problèmes fréquents
10 mars 2010, parPHP et safe_mode activé
Une des principales sources de problèmes relève de la configuration de PHP et notamment de l’activation du safe_mode
La solution consiterait à soit désactiver le safe_mode soit placer le script dans un répertoire accessible par apache pour le site -
Mediabox : ouvrir les images dans l’espace maximal pour l’utilisateur
8 février 2011, parLa visualisation des images est restreinte par la largeur accordée par le design du site (dépendant du thème utilisé). Elles sont donc visibles sous un format réduit. Afin de profiter de l’ensemble de la place disponible sur l’écran de l’utilisateur, il est possible d’ajouter une fonctionnalité d’affichage de l’image dans une boite multimedia apparaissant au dessus du reste du contenu.
Pour ce faire il est nécessaire d’installer le plugin "Mediabox".
Configuration de la boite multimédia
Dès (...) -
(Dés)Activation de fonctionnalités (plugins)
18 février 2011, parPour gérer l’ajout et la suppression de fonctionnalités supplémentaires (ou plugins), MediaSPIP utilise à partir de la version 0.2 SVP.
SVP permet l’activation facile de plugins depuis l’espace de configuration de MediaSPIP.
Pour y accéder, il suffit de se rendre dans l’espace de configuration puis de se rendre sur la page "Gestion des plugins".
MediaSPIP est fourni par défaut avec l’ensemble des plugins dits "compatibles", ils ont été testés et intégrés afin de fonctionner parfaitement avec chaque (...)
Sur d’autres sites (11772)
-
openGL ES 2.0 on android , YUV to RGB and Rendering with ffMpeg
14 octobre 2013, par 101110101100111111101101My renderer dies 1 2 frames later when video shows after.
FATAL ERROR 11 : blabla...(Exactly occurs in glDrawElements (Y part))
I think problem is 'glPixelStorei' or 'GL_RGB', 'GL_LUMINANCE' but.. I don't get it.
My rendering way :
-
Decode data that got from network, (SDK Getting-> NDK Decoding), Enqueueing.
-
Dequeueing another threads (of course synchronized) get ready to setup OpenGL ES 2.0.(SDK)
-
When onDrawFrame, onSurfaceCreated, onSurfaceChanged methods are called, it shrink down to NDK. (My Renderer source in NDK will attach below.)
-
Rendering.
As you know, Fragment shader is using for conversion.
My Data is YUV 420p (pix_fmt_YUV420p) (12bit per pixel)Here is my entire source.
I haven't any knowledge about OpenGL ES before, this is first time.
Please let me know what am I do improving performance.
and What am I use parameters in 'glTexImage2D', 'glTexSubImage2D', 'glRenderbufferStorage' ????
GL_LUMINANCE ? GL_RGBA ? GL_RGB ? (GL_LUMINANCE is using now)void Renderer::set_draw_frame(JNIEnv* jenv, jbyteArray yData, jbyteArray uData, jbyteArray vData)
{
for (int i = 0; i < 3; i++) {
if (yuv_data_[i] != NULL) {
free(yuv_data_[i]);
}
}
int YSIZE = -1;
int USIZE = -1;
int VSIZE = -1;
if (yData != NULL) {
YSIZE = (int)jenv->GetArrayLength(yData);
LOG_DEBUG("YSIZE : %d", YSIZE);
yuv_data_[0] = (unsigned char*)malloc(sizeof(unsigned char) * YSIZE);
memset(yuv_data_[0], 0, YSIZE);
jenv->GetByteArrayRegion(yData, 0, YSIZE, (jbyte*)yuv_data_[0]);
yuv_data_[0] = reinterpret_cast<unsigned>(yuv_data_[0]);
} else {
YSIZE = (int)jenv->GetArrayLength(yData);
yuv_data_[0] = (unsigned char*)malloc(sizeof(unsigned char) * YSIZE);
memset(yuv_data_[0], 1, YSIZE);
}
if (uData != NULL) {
USIZE = (int)jenv->GetArrayLength(uData);
LOG_DEBUG("USIZE : %d", USIZE);
yuv_data_[1] = (unsigned char*)malloc(sizeof(unsigned char) * USIZE);
memset(yuv_data_[1], 0, USIZE);
jenv->GetByteArrayRegion(uData, 0, USIZE, (jbyte*)yuv_data_[1]);
yuv_data_[1] = reinterpret_cast<unsigned>(yuv_data_[1]);
} else {
USIZE = YSIZE/4;
yuv_data_[1] = (unsigned char*)malloc(sizeof(unsigned char) * USIZE);
memset(yuv_data_[1], 1, USIZE);
}
if (vData != NULL) {
VSIZE = (int)jenv->GetArrayLength(vData);
LOG_DEBUG("VSIZE : %d", VSIZE);
yuv_data_[2] = (unsigned char*)malloc(sizeof(unsigned char) * VSIZE);
memset(yuv_data_[2], 0, VSIZE);
jenv->GetByteArrayRegion(vData, 0, VSIZE, (jbyte*)yuv_data_[2]);
yuv_data_[2] = reinterpret_cast<unsigned>(yuv_data_[2]);
} else {
VSIZE = YSIZE/4;
yuv_data_[2] = (unsigned char*)malloc(sizeof(unsigned char) * VSIZE);
memset(yuv_data_[2], 1, VSIZE);
}
glClearColor(1.0F, 1.0F, 1.0F, 1.0F);
check_gl_error("glClearColor");
glClear(GL_COLOR_BUFFER_BIT);
check_gl_error("glClear");
}
void Renderer::draw_frame()
{
// Binding created FBO
glBindFramebuffer(GL_FRAMEBUFFER, frame_buffer_object_);
check_gl_error("glBindFramebuffer");
// Add program to OpenGL environment
glUseProgram(program_object_);
check_gl_error("glUseProgram");
for (int i = 0; i < 3; i++) {
LOG_DEBUG("Success");
//Bind texture
glActiveTexture(GL_TEXTURE0 + i);
check_gl_error("glActiveTexture");
glBindTexture(GL_TEXTURE_2D, yuv_texture_id_[i]);
check_gl_error("glBindTexture");
glUniform1i(yuv_texture_object_[i], i);
check_gl_error("glBindTexture");
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, stream_yuv_width_[i], stream_yuv_height_[i], GL_RGBA, GL_UNSIGNED_BYTE, yuv_data_[i]);
check_gl_error("glTexSubImage2D");
}
LOG_DEBUG("Success");
// Load vertex information
glVertexAttribPointer(position_object_, 2, GL_FLOAT, GL_FALSE, kStride, kVertexInformation);
check_gl_error("glVertexAttribPointer");
// Load texture information
glVertexAttribPointer(texture_position_object_, 2, GL_SHORT, GL_FALSE, kStride, kTextureCoordinateInformation);
check_gl_error("glVertexAttribPointer");
LOG_DEBUG("9");
glEnableVertexAttribArray(position_object_);
check_gl_error("glEnableVertexAttribArray");
glEnableVertexAttribArray(texture_position_object_);
check_gl_error("glEnableVertexAttribArray");
// Back to window buffer
glBindFramebuffer(GL_FRAMEBUFFER, 0);
check_gl_error("glBindFramebuffer");
LOG_DEBUG("Success");
// Draw the Square
glDrawElements(GL_TRIANGLE_STRIP, 6, GL_UNSIGNED_SHORT, kIndicesInformation);
check_gl_error("glDrawElements");
}
void Renderer::setup_render_to_texture()
{
glGenFramebuffers(1, &frame_buffer_object_);
check_gl_error("glGenFramebuffers");
glBindFramebuffer(GL_FRAMEBUFFER, frame_buffer_object_);
check_gl_error("glBindFramebuffer");
glGenRenderbuffers(1, &render_buffer_object_);
check_gl_error("glGenRenderbuffers");
glBindRenderbuffer(GL_RENDERBUFFER, render_buffer_object_);
check_gl_error("glBindRenderbuffer");
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA4, stream_yuv_width_[0], stream_yuv_height_[0]);
check_gl_error("glRenderbufferStorage");
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, render_buffer_object_);
check_gl_error("glFramebufferRenderbuffer");
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, yuv_texture_id_[0], 0);
check_gl_error("glFramebufferTexture2D");
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, yuv_texture_id_[1], 0);
check_gl_error("glFramebufferTexture2D");
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, yuv_texture_id_[2], 0);
check_gl_error("glFramebufferTexture2D");
glBindFramebuffer(GL_FRAMEBUFFER, 0);
check_gl_error("glBindFramebuffer");
GLint status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE) {
print_log("renderer.cpp", "setup_graphics", "FBO setting fault.", LOGERROR);
LOG_ERROR("%d\n", status);
return;
}
}
void Renderer::setup_yuv_texture()
{
// Use tightly packed data
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
check_gl_error("glPixelStorei");
for (int i = 0; i < 3; i++) {
if (yuv_texture_id_[i]) {
glDeleteTextures(1, &yuv_texture_id_[i]);
check_gl_error("glDeleteTextures");
}
glActiveTexture(GL_TEXTURE0+i);
check_gl_error("glActiveTexture");
// Generate texture object
glGenTextures(1, &yuv_texture_id_[i]);
check_gl_error("glGenTextures");
glBindTexture(GL_TEXTURE_2D, yuv_texture_id_[i]);
check_gl_error("glBindTexture");
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
check_gl_error("glTexParameteri");
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
check_gl_error("glTexParameteri");
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
check_gl_error("glTexParameterf");
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
check_gl_error("glTexParameterf");
glEnable(GL_TEXTURE_2D);
check_gl_error("glEnable");
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, maximum_yuv_width_[i], maximum_yuv_height_[i], 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
check_gl_error("glTexImage2D");
}
}
void Renderer::setup_graphics()
{
print_gl_string("Version", GL_VERSION);
print_gl_string("Vendor", GL_VENDOR);
print_gl_string("Renderer", GL_RENDERER);
print_gl_string("Extensions", GL_EXTENSIONS);
program_object_ = create_program(kVertexShader, kFragmentShader);
if (!program_object_) {
print_log("renderer.cpp", "setup_graphics", "Could not create program.", LOGERROR);
return;
}
position_object_ = glGetAttribLocation(program_object_, "vPosition");
check_gl_error("glGetAttribLocation");
texture_position_object_ = glGetAttribLocation(program_object_, "vTexCoord");
check_gl_error("glGetAttribLocation");
yuv_texture_object_[0] = glGetUniformLocation(program_object_, "yTexture");
check_gl_error("glGetUniformLocation");
yuv_texture_object_[1] = glGetUniformLocation(program_object_, "uTexture");
check_gl_error("glGetUniformLocation");
yuv_texture_object_[2] = glGetUniformLocation(program_object_, "vTexture");
check_gl_error("glGetUniformLocation");
setup_yuv_texture();
setup_render_to_texture();
glViewport(0, 0, stream_yuv_width_[0], stream_yuv_height_[0]);//736, 480);//1920, 1080);//maximum_yuv_width_[0], maximum_yuv_height_[0]);
check_gl_error("glViewport");
}
GLuint Renderer::create_program(const char* vertex_source, const char* fragment_source)
{
GLuint vertexShader = load_shader(GL_VERTEX_SHADER, vertex_source);
if (!vertexShader) {
return 0;
}
GLuint pixelShader = load_shader(GL_FRAGMENT_SHADER, fragment_source);
if (!pixelShader) {
return 0;
}
GLuint program = glCreateProgram();
if (program) {
glAttachShader(program, vertexShader);
check_gl_error("glAttachShader");
glAttachShader(program, pixelShader);
check_gl_error("glAttachShader");
glLinkProgram(program);
/* Get a Status */
GLint linkStatus = GL_FALSE;
glGetProgramiv(program, GL_LINK_STATUS, &linkStatus);
if (linkStatus != GL_TRUE) {
GLint bufLength = 0;
glGetProgramiv(program, GL_INFO_LOG_LENGTH, &bufLength);
if (bufLength) {
char* buf = (char*) malloc(bufLength);
if (buf) {
glGetProgramInfoLog(program, bufLength, NULL, buf);
print_log("renderer.cpp", "create_program", "Could not link program.", LOGERROR);
LOG_ERROR("%s\n", buf);
free(buf);
}
}
glDeleteProgram(program);
program = 0;
}
}
return program;
}
GLuint Renderer::load_shader(GLenum shaderType, const char* pSource)
{
GLuint shader = glCreateShader(shaderType);
if (shader) {
glShaderSource(shader, 1, &pSource, NULL);
glCompileShader(shader);
/* Get a Status */
GLint compiled = 0;
glGetShaderiv(shader, GL_COMPILE_STATUS, &compiled);
if (!compiled) {
GLint infoLen = 0;
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &infoLen);
if (infoLen) {
char* buf = (char*) malloc(infoLen);
if (buf) {
glGetShaderInfoLog(shader, infoLen, NULL, buf);
print_log("renderer.cpp", "load_shader", "Could not link program.", LOGERROR);
LOG_ERROR("%d :: %s\n", shaderType, buf);
free(buf);
}
glDeleteShader(shader);
shader = 0;
}
}
}
return shader;
}
void Renderer::onDrawFrame(JNIEnv* jenv, jbyteArray yData, jbyteArray uData, jbyteArray vData)
{
set_draw_frame(jenv, yData, uData, vData);
draw_frame();
return;
}
void Renderer::setSize(int stream_width, int stream_height) {
stream_yuv_width_[0] = stream_width;
stream_yuv_width_[1] = stream_width/2;
stream_yuv_width_[2] = stream_width/2;
stream_yuv_height_[0] = stream_height;
stream_yuv_height_[1] = stream_height/2;
stream_yuv_height_[2] = stream_height/2;
}
void Renderer::onSurfaceChanged(int width, int height)
{
mobile_yuv_width_[0] = width;
mobile_yuv_width_[1] = width/2;
mobile_yuv_width_[2] = width/2;
mobile_yuv_height_[0] = height;
mobile_yuv_height_[1] = height/2;
mobile_yuv_height_[2] = height/2;
maximum_yuv_width_[0] = 1920;
maximum_yuv_width_[1] = 1920/2;
maximum_yuv_width_[2] = 1920/2;
maximum_yuv_height_[0] = 1080;
maximum_yuv_height_[1] = 1080/2;
maximum_yuv_height_[2] = 1080/2;
// If stream size not setting, default size D1
//if (stream_yuv_width_[0] == 0) {
stream_yuv_width_[0] = 736;
stream_yuv_width_[1] = 736/2;
stream_yuv_width_[2] = 736/2;
stream_yuv_height_[0] = 480;
stream_yuv_height_[1] = 480/2;
stream_yuv_height_[2] = 480/2;
//}
setup_graphics();
return;
}
</unsigned></unsigned></unsigned>Here is my Fragment, Vertex source and coordinates :
static const char kVertexShader[] =
"attribute vec4 vPosition; \n"
"attribute vec2 vTexCoord; \n"
"varying vec2 v_vTexCoord; \n"
"void main() { \n"
"gl_Position = vPosition; \n"
"v_vTexCoord = vTexCoord; \n"
"} \n";
static const char kFragmentShader[] =
"precision mediump float; \n"
"varying vec2 v_vTexCoord; \n"
"uniform sampler2D yTexture; \n"
"uniform sampler2D uTexture; \n"
"uniform sampler2D vTexture; \n"
"void main() { \n"
"float y=texture2D(yTexture, v_vTexCoord).r;\n"
"float u=texture2D(uTexture, v_vTexCoord).r - 0.5;\n"
"float v=texture2D(vTexture, v_vTexCoord).r - 0.5;\n"
"float r=y + 1.13983 * v;\n"
"float g=y - 0.39465 * u - 0.58060 * v;\n"
"float b=y + 2.03211 * u;\n"
"gl_FragColor = vec4(r, g, b, 1.0);\n"
"}\n";
static const GLfloat kVertexInformation[] =
{
-1.0f, 1.0f, // TexCoord 0 top left
-1.0f,-1.0f, // TexCoord 1 bottom left
1.0f,-1.0f, // TexCoord 2 bottom right
1.0f, 1.0f // TexCoord 3 top right
};
static const GLshort kTextureCoordinateInformation[] =
{
0, 0, // TexCoord 0 top left
0, 1, // TexCoord 1 bottom left
1, 1, // TexCoord 2 bottom right
1, 0 // TexCoord 3 top right
};
static const GLuint kStride = 0;//COORDS_PER_VERTEX * 4;
static const GLshort kIndicesInformation[] =
{
0, 1, 2,
0, 2, 3
}; -
-
slow avcodec_decode_video2, ffmpeg on android
5 janvier 2014, par John SimpsonI am developing a player on android using ffmpeg. However, I found out that avcodec_decode_video2 very slow. Sometimes, it takes about 0.1, even 0.2 second to decode a frame from a video with 1920 × 1080 resolution.
How can I improve the speed of avcodec_decode_video2() ?
-
Error using FFMPEG to convert each input image into H264 compiling in Visual Studio running in MevisLab
21 février 2014, par user3012914I am creating a ML Module in MevisLab Framework, I am using FFMPEG to convert each image i get into a H264 Video and save it after I get all the frames. But unfortunately I have problem allocating the output buffer size. The application crashes when I include this in my code.If I am not including it, the output file size is just 4kb. Nothing is stored in it.
I am also not very sure whether it is correct way of getting the HBitmap into the Encoder. Would be great to have your suggestions.
My Code :
BITMAPINFO bitmapInfo;
HDC hdc;
ZeroMemory(&bitmapInfo, sizeof(bitmapInfo));
BITMAPINFOHEADER &bitmapInfoHeader = bitmapInfo.bmiHeader;
bitmapInfoHeader.biSize = sizeof(bitmapInfoHeader);
bitmapInfoHeader.biWidth = _imgWidth;
bitmapInfoHeader.biHeight = _imgHeight;
bitmapInfoHeader.biPlanes = 1;
bitmapInfoHeader.biBitCount = 24;
bitmapInfoHeader.biCompression = BI_RGB;
bitmapInfoHeader.biSizeImage = ((bitmapInfoHeader.biWidth * bitmapInfoHeader.biBitCount / 8 + 3) & 0xFFFFFFFC) * bitmapInfoHeader.biHeight;
bitmapInfoHeader.biXPelsPerMeter = 10000;
bitmapInfoHeader.biYPelsPerMeter = 10000;
bitmapInfoHeader.biClrUsed = 0;
bitmapInfoHeader.biClrImportant = 0;
//RGBQUAD* Ref = new RGBQUAD[_imgWidth,_imgHeight];
HDC hdcscreen = GetDC(0);
hdc = CreateCompatibleDC(hdcscreen);
ReleaseDC(0, hdcscreen);
_hbitmap = CreateDIBSection(hdc, (BITMAPINFO*) &bitmapInfoHeader, DIB_RGB_COLORS, &_bits, NULL, NULL);To get the BitMap I use the above code. Then I allocate the Codec Context as followed
c->bit_rate = 400000;
// resolution must be a multiple of two
c->width = 1920;
c->height = 1080;
// frames per second
frame_rate = _framesPerSecondFld->getIntValue();
//AVRational rational = {1,10};
//c->time_base = (AVRational){1,25};
//c->time_base = (AVRational){1,25};
c->gop_size = 10; // emit one intra frame every ten frames
c->max_b_frames = 1;
c->keyint_min = 1; //minimum GOP size
c->time_base.num = 1; // framerate numerator
c->time_base.den = _framesPerSecondFld->getIntValue();
c->i_quant_factor = (float)0.71; // qscale factor between P and I frames
c->pix_fmt = AV_PIX_FMT_RGB32;
std::string msg;
msg.append("Context is stored");
_messageFld->setStringValue(msg.c_str());I create the Bitmap Image as followed from the input
PagedImage *inImg = getUpdatedInputImage(0);
ML_CHECK(inImg);
ImageVector imgExt = inImg->getImageExtent();
if ((imgExt.x = _imgWidth) && (imgExt.y == _imgHeight))
{
if (((imgExt.x % 4)==0) && ((imgExt.y % 4) == 0))
{
// read out input image and write output image into video
// get input image as an array
void* imgData = NULL;
SubImageBox imageBox(imgExt); // get the whole image
getTile(inImg, imageBox, MLuint8Type, &imgData);
iData = (MLuint8*)imgData;
int r = 0; int g = 0;int b = 0;
// since we have only images with
// a z-ext of 1, we can compute the c stride as follows
int cStride = _imgWidth * _imgHeight;
uint8_t offset = 0;
// pointer into the bitmap that is
// used to write images into the avi
UCHAR* dst = (UCHAR*)_bits;
for (int y = _imgHeight-1; y >= 0; y--)
{ // reversely scan the image. if y-rows of DIB are set in normal order, no compression will be available.
offset = _imgWidth * y;
for (int x = 0; x < _imgWidth; x++)
{
if (_isGreyValueImage)
{
r = iData[offset + x];
*dst++ = (UCHAR)r;
*dst++ = (UCHAR)r;
*dst++ = (UCHAR)r;
}
else
{
b = iData[offset + x]; // windows bitmap need reverse order: bgr instead of rgb
g = iData[offset + x + cStride ];
r = iData[offset + x + cStride + cStride];
*dst++ = (UCHAR)r;
*dst++ = (UCHAR)g;
*dst++ = (UCHAR)b;
}
// alpha channel in input image is ignored
}
}Then I add it to the Encoder as followed as write as H264
in_width = c->width;
in_height = c->height;
out_width = c->width;
out_height = c->height;
ibytes = avpicture_get_size(PIX_FMT_BGR32, in_width, in_height);
obytes = avpicture_get_size(PIX_FMT_YUV420P, out_width, out_height);
outbuf_size = 100000 + c->width*c->height*(32>>3); // allocate output buffer
outbuf = static_cast(malloc(outbuf_size));
if(!obytes)
{
std::string msg;
msg.append("Bytes cannot be allocated");
_messageFld->setStringValue(msg.c_str());
}
else
{
std::string msg;
msg.append("Bytes allocation done");
_messageFld->setStringValue(msg.c_str());
}
//create buffer for the output image
inbuffer = (uint8_t*)av_malloc(ibytes);
outbuffer = (uint8_t*)av_malloc(obytes);
inbuffer = (uint8_t*)dst;
//create ffmpeg frame structures. These do not allocate space for image data,
//just the pointers and other information about the image.
AVFrame* inpic = avcodec_alloc_frame();
AVFrame* outpic = avcodec_alloc_frame();
//this will set the pointers in the frame structures to the right points in
//the input and output buffers.
avpicture_fill((AVPicture*)inpic, inbuffer, PIX_FMT_BGR32, in_width, in_height);
avpicture_fill((AVPicture*)outpic, outbuffer, PIX_FMT_YUV420P, out_width, out_height);
av_image_alloc(outpic->data, outpic->linesize, c->width, c->height, c->pix_fmt, 1);
inpic->data[0] += inpic->linesize[0]*(_imgHeight-1); // flipping frame
inpic->linesize[0] = -inpic->linesize[0];
if(!inpic)
{
std::string msg;
msg.append("Image is empty");
_messageFld->setStringValue(msg.c_str());
}
else
{
std::string msg;
msg.append("Picture has allocations");
_messageFld->setStringValue(msg.c_str());
}
//create the conversion context
fooContext = sws_getContext(in_width, in_height, PIX_FMT_BGR32, out_width, out_height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);
//perform the conversion
sws_scale(fooContext, inpic->data, inpic->linesize, 0, in_height, outpic->data, outpic->linesize);
//out_size = avcodec_encode_video(c, outbuf,outbuf_size, outpic);
if(!out_size)
{
std::string msg;
msg.append("Outsize is not valid");
_messageFld->setStringValue(msg.c_str());
}
else
{
std::string msg;
msg.append("Outsize is valid");
_messageFld->setStringValue(msg.c_str());
}
fwrite(outbuf, 1, out_size, f);
if(!fwrite)
{
std::string msg;
msg.append("Frames couldnt be written");
_messageFld->setStringValue(msg.c_str());
}
else
{
std::string msg;
msg.append("Frames written to the file");
_messageFld->setStringValue(msg.c_str());
}
// for (;out_size; i++)
// {
out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
std::string msg;
msg.append("Writing Frames");
_messageFld->setStringValue(msg.c_str());// encode the delayed frames
_numFramesFld->setIntValue(_numFramesFld->getIntValue()+1);
fwrite(outbuf, 1, out_size, f);
// }
outbuf[0] = 0x00;
outbuf[1] = 0x00; // add sequence end code to have a real mpeg file
outbuf[2] = 0x01;
outbuf[3] = 0xb7;
fwrite(outbuf, 1, 4, f);
}Then close and clean the Image Buffer and file
ML_TRACE_IN("MovieCreator::_endRecording()")
if (_numFramesFld->getIntValue() == 0)
{
_messageFld->setStringValue("Empty movie, nothing saved.");
}
else
{
_messageFld->setStringValue("Movie written to disk.");
_numFramesFld->setIntValue(0);
if (_hbitmap)
{
DeleteObject(_hbitmap);
}
if (c != NULL)
{
av_free(outbuffer);
av_free(inpic);
av_free(outpic);
fclose(f);
avcodec_close(c); // freeing memory
free(outbuf);
av_free(c);
}
}}
I think the Main Problem is over here !!
//out_size = avcodec_encode_video(c, outbuf,outbuf_size, outpic);