Newest 'ffmpeg' Questions - Stack Overflow
Les articles publiés sur le site
-
glTexSubImage2D does not update texture
27 février 2014, par user3177342I am writing video program with ffmpeg and opengl. Generate 2 textures and swap them when paint. the code of DrawFirstFrame invoked once is
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet); // Did we get a video frame? if(frameFinished) { f++; pFrameRGB=avcodec_alloc_frame(); //Allocate memory for the raw data we get when converting. uint8_t *buffer; int numBytes; numBytes = avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height); buffer = (uint8_t *) av_malloc(numBytes*sizeof(uint8_t)); //Associate frame with our buffer avpicture_fill((AVPicture *) pFrameRGB, buffer, PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height); pFrameRGB->linesize[0]= pCodecCtx->width*3; // in case of rgb4 one plane struct SwsContext* swsContext = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL); if (swsContext == NULL) { fprintf(stderr, "Cannot initialize the conversion context!\n"); exit(1); }; sws_scale(swsContext, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize); current_Vtex=&VideoTexture; glGenTextures(1, &VideoTexture); glBindTexture(GL_TEXTURE_2D, VideoTexture); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); // glTexImage2D(GL_TEXTURE_2D, 0, 3, pCodecCtx->width, pCodecCtx->height, 0, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, pCodecCtx->width, pCodecCtx->height, 0, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]); glGenTextures(1, &VideoTexture2); glBindTexture(GL_TEXTURE_2D, VideoTexture2); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); // glTexImage2D(GL_TEXTURE_2D, 0, 3, pCodecCtx->width, pCodecCtx->height, 0, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]); glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, pCodecCtx->width, pCodecCtx->height, 0, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]); //glDeleteTextures(1, &VideoTexture); GLenum err; while ((err = glGetError()) != GL_NO_ERROR) { cerr << "OpenGL error: " << err << endl; } av_free(buffer); } } // Free the packet that was allocated by av_read_frame av_free_packet(&packet); if (f>1) break; }
then in a cycle
if (token.IsSameAs("frame")) { this->panel->DrawNextFrame(); wxPaintEvent evt; this->panel->GetEventHandler()->AddPendingEvent(evt); GetThread()->Sleep(500); token=tkz.GetNextToken(); token.ToLong(&lastframe); wxCriticalSectionLocker lock(*CSect); this->fram->Clear(); }
DrawNextFrame() code is here, trying to update the textures generated in DrawFirstFrame
void BasicGLPane::DrawNextFrame() { int f=1; while(av_read_frame(pFormatCtx, &packet)>=0) { // Is this a packet from the video stream? if(packet.stream_index==videoStream) { // Decode video frame avcodec_decode_video2(pCodecCtx, pFrame, &FrameFinished, &packet); // Did we get a video frame? if(FrameFinished) { f++; pFrameRGB=avcodec_alloc_frame(); //Allocate memory for the raw data we get when converting. uint8_t *buffer; int numBytes; numBytes = avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height); buffer = (uint8_t *) av_malloc(numBytes*sizeof(uint8_t)); //Associate frame with our buffer avpicture_fill((AVPicture *) pFrameRGB, buffer, PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height); pFrameRGB->linesize[0]= pCodecCtx->width*3; // in case of rgb4 one plane struct SwsContext* swsContext = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL); if (swsContext == NULL) { fprintf(stderr, "Cannot initialize the conversion context!\n"); exit(1); }; sws_scale(swsContext, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize); //glGenTextures(1, &VideoTexture); if ((*current_Vtex)==VideoTexture) current_Vtex = &VideoTexture2;else current_Vtex = &VideoTexture; glBindTexture(GL_TEXTURE_2D, (*current_Vtex)); glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR ); glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR ); glPixelStorei(GL_UNPACK_ALIGNMENT, 1); glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, pCodecCtx->width, pCodecCtx->height, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]); //glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, pCodecCtx->width, pCodecCtx->height, 0, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]); //glDeleteTextures(1, &VideoTexture); GLenum err; while ((err = glGetError()) != GL_NO_ERROR) { cerr << "OpenGL error: " << err << endl; } } } // Free the packet that was allocated by av_read_frame av_free_packet(&packet); if (f>1) break; } av_free(pFrameRGB); Refresh(); }
Render Function renders (*current_Vtex) in a quad.
if ((*current_Vtex) != 0) { GLfloat z = 0; glEnable(GL_TEXTURE_2D); glPushMatrix(); glBindTexture(GL_TEXTURE_2D, (*current_Vtex)); glBegin(GL_QUADS); glTexCoord2i(0, 0); glVertex3f(0.0f, 0.0f, z); glTexCoord2i(1, 0); glVertex3f(Width, 0,z); glTexCoord2i(1, 1); glVertex3f(Width, Height,z); glTexCoord2i(0, 1); glVertex3f(0, Height,z); glEnd(); glPopMatrix(); glDisable(GL_TEXTURE_2D); } wxCriticalSectionLocker lock(*CSect); this->fram->render(); glFlush(); SwapBuffers();
THe problem is that the picture on screen doesn't update (why?) only shows what was generated in DrawFirstFrame. I surely know that the program swaps textures when paint.
-
ffmpeg php encoding trace encode failures
27 février 2014, par user1575946I am executing ffmpeg from php's exec
function convertToMP4($sourceFile,$outputfile) { $Command="ffmpeg -i ".$sourceFile.".".$outputfile." 2>&1"; exec($Command,$result,$status); return $result; }
I am getting result of the entire command to an array
$result.
My question is ,how can I exactly find out if a video fails to encode.
Thank you.
-
Repair mpeg files using ffmpeg
27 février 2014, par rsdrsdI have a bunch of mpeg files which are some how invalid or incorrect. I can play the files in different media players but when I upload the files and they should automagically be converted. It takes a very long time to create screenshots and it creates about 10000 screenshots instead of the 50 to be expected. The command is part of an automatic conversion app. With mp4 and other files it works great but whit mpeg it doesn't work as expected. The creation of screenshots eats up all memory and processor power.
For creating screenshots I have tried the following:
ffmpeg -y -i /input/file.mpeg -f image2 -aspect 16:9 -bt 20M -vsync passthrough -vf select='isnan(prev_selected_t)+gte(t-prev_selected_t\,10)' /output/file-%05d.jpg
this just creates 2 screenshots while I expect 50 or so. The following command:
ffmpeg -y -i /input/file.mpeg -f image2 -vf fps=fps=1/10 -aspect 16:9 -vsync passthrough -bt 20M /output/file-%05d.jpg
gave me errors about buffers:
ffmpeg version N-39361-g1524b0f Copyright (c) 2000-2014 the FFmpeg developers built on Feb 26 2014 23:46:40 with gcc 4.4.7 (GCC) 20120313 (Red Hat 4.4.7-4) configuration: --prefix=/home/example/ffmpeg_build --extra-cflags=-I/home/example/ffmpeg_build/include --extra-ldflags=-L/home/example/ffmpeg_build/lib --bindir=/home/example/bin --extra-libs=-ldl --enable-gpl --enable-nonfree --enable-libfdk_aac --enable-libmp3lame --enable-libopus --enable-libvorbis --enable-libvpx --enable-libx264 --enable-libfreetype --enable-libspeex --enable-libtheora libavutil 52. 66.100 / 52. 66.100 libavcodec 55. 52.102 / 55. 52.102 libavformat 55. 33.100 / 55. 33.100 libavdevice 55. 10.100 / 55. 10.100 libavfilter 4. 2.100 / 4. 2.100 libswscale 2. 5.101 / 2. 5.101 libswresample 0. 18.100 / 0. 18.100 libpostproc 52. 3.100 / 52. 3.100 [mp3 @ 0x200d7c0] Header missing [mpegts @ 0x2008a60] DTS discontinuity in stream 0: packet 6 with DTS 34185, packet 7 with DTS 8589926735 [mpegts @ 0x2008a60] Invalid timestamps stream=0, pts=7157, dts=8589932741, size=150851 Input #0, mpegts, from '/home/example/app/uploads/21.mpeg': Duration: 00:03:14.75, start: 0.213000, bitrate: 26112 kb/s Program 1 Stream #0:0[0x3e9]: Video: mpeg2video (Main) ([2][0][0][0] / 0x0002), yuv420p(tv), 1440x1080 [SAR 4:3 DAR 16:9], max. 25000 kb/s, 29.97 fps, 60 tbr, 90k tbn, 59.94 tbc Stream #0:1[0x3ea]: Audio: mp2 ([3][0][0][0] / 0x0003), 48000 Hz, stereo, s16p, 384 kb/s [swscaler @ 0x1ff9860] deprecated pixel format used, make sure you did set range correctly Output #0, image2, to '/home/example/app/uploads/21-%05d.jpg': Metadata: encoder : Lavf55.33.100 Stream #0:0: Video: mjpeg, yuvj420p, 1440x1080 [SAR 4:3 DAR 16:9], q=2-31, 200 kb/s, 90k tbn, 0.10 tbc Stream mapping: Stream #0:0 -> #0:0 (mpeg2video -> mjpeg) Press [q] to stop, [?] for help [mpegts @ 0x2008a60] Invalid timestamps stream=0, pts=7157, dts=8589932741, size=150851 [output stream 0:0 @ 0x1ff2ba0] 100 buffers queued in output stream 0:0, something may be wrong. [output stream 0:0 @ 0x1ff2ba0] 1000 buffers queued in output stream 0:0, something may be wrong.
and it creates about 10000 screenshots while I expect 50.
Now I have read some where on how to repair some broken files. For this I have the following command:
ffmpeg -y -i input.mpeg -codec:v copy -codec:a copy output.mpeg
This however creates a file somewhat smaller, but if I run the same command on the output again, I would expect that it creates the same file, but the following command
ffmpeg -y -i output.mpeg -codec:v copy -codec:a copy output2.mpeg
returns a file which is much smaller and runs for only a few seconds while the original was about 3 minutes.
If I run the "repair" commands for a not broken mpeg it results the first time I run the command in a much smaller file. With ffprobe I checked what changed but the only thing that changes is MPEG-TS to MPEG-PS.
If I run the command over an mp4 file it results in exactly the same file as expected. Does someone have a clue of what is going wrong. It is boggling me now for about two days and I really have no clue. Or does someone have a good suggestion on how to extract screenshots every 10 seconds without creating too much screenshots and eating up all memory and processor power.
-
Debian Wheezy ffmpeg version 1.2.5 and OpenCV
27 février 2014, par user3135427I recently installed "ffmpeg version 1.2.5" from "deb ftp://ftp.deb-multimedia.org wheezy-backports main" on my Debian Wheezy.
I wrote some code with OpenCV and now I get the following error when I try to open a video.
[NULL @ 0x21e2ec0] Value 4613937818241073152.000000 for parameter 'thread_type' out of range [NULL @ 0x21e2ec0] Value -4616189618054758400.000000 for parameter 'request_sample_fmt' out of range [mov,mp4,m4a,3gp,3g2,mj2 @ 0x282d580] decoding for stream 0 failed [mov,mp4,m4a,3gp,3g2,mj2 @ 0x282d580] Could not find codec parameters (Video: h264 (avc1 / 0x31637661), 23069 kb/s) [IMGUTILS @ 0x7ffff1da3fd0] Picture size 0x0 is invalid [IMGUTILS @ 0x7ffff1da3fe0] Picture size 0x0 is invalid
/////// SOLUTION /////////
I speculated the problem started after I added the following two source to my /etc/apt/sources.list file on Debian Wheezy. I did this procure a newer version of FFMPEG.
deb ftp:// ftp.deb-multimedia . org wheezy main non-free deb ftp:// ftp.deb-multimedia . org wheezy-backports main
It was then all my code written using the OpenVC started to break. I had a hunch it was my recent upgrade but not sure until I read the following.
“Make sure proper versions of ffmpeg or gstreamer is installed. Sometimes, it is a headache to work with Video Capture mostly due to wrong installation of ffmpeg/gstreamer.” ~ http: // opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html
[ SOLUTION ]
This search will tell you what installed packages come from Debian Multimedia:
aptitude search '~i ?origin(Unofficial Multimedia Packages)'
And this command removes them:
aptitude purge '~i ?origin(Unofficial Multimedia Packages)'
Remove the deb-multimedia.org repository from the source.list file. Install FFMPEG and OpenVC and any additional packages that you need, since much was purged from your system during the downgrade.
sudo aptitude install python-opencv ffmpeg
-
Getting realtime output of ffmpeg with pexpect
27 février 2014, par Shumanhi i'm trying to get the progress info when calling ffmpeg command line tool, with pexpect, i can get the progress with regex, but the problem is after a few seconds, maybe 20 or 10sec, pexpect stops getting new print outs from ffmpeg, it's still running, i saw the output file size growing. am i doing something wrong ?
cmd = 'ffmpeg -i rtmp://xxxx -c copy -absf aac_adtstoasc /usr/tmp/tmp/out.mp4' import os import re import time import subprocess import pexpect thread = pexpect.spawn(cmd) print 'started' reo = re.compile("""\S+\s+(?P\d+) # frame \s\S+\s+(?P
\d+) # fps \sq=(?P \S+) # q \s\S+\s+(?P
\S+) # size \stime=(?P the output is
started Duration: 03:55:42.00 frame= 74 fps= 55 q=-1.0 size= 984kB time=00:00:06.17 bitrate=1304.5 frame= 89 fps= 43 q=-1.0 size= 1197kB time=00:00:07.43 bitrate=1319.8 frame= 113 fps= 41 q=-1.0 size= 1407kB time=00:00:09.33 bitrate=1234.8 frame= 125 fps= 32 q=-1.0 size= 1613kB time=00:00:10.35 bitrate=1275.6 frame= 132 fps= 29 q=-1.0 size= 1705kB time=00:00:10.95 bitrate=1274.7 frame= 135 fps= 26 q=-1.0 size= 1825kB time=00:00:11.23 bitrate=1330.6 frame= 140 fps= 24 q=-1.0 size= 2022kB time=00:00:11.60 bitrate=1426.5 frame= 140 fps= 21 q=-1.0 size= 2097kB time=00:00:11.70 bitrate=1467.7 frame= 142 fps= 19 q=-1.0 size= 2224kB time=00:00:11.79 bitrate=1544.4 frame= 143 fps= 17 q=-1.0 size= 2447kB time=00:00:11.98 bitrate=1672.8 frame= 145 fps= 16 q=-1.0 size= 2687kB time=00:00:12.07 bitrate=1822.8 frame= 155 fps= 15 q=-1.0 size= 2780kB time=00:00:12.95 bitrate=1757.6 frame= 163 fps= 15 q=-1.0 size= 2940kB time=00:00:13.65 bitrate=1764.2 frame= 167 fps= 14 q=-1.0 size= 3062kB time=00:00:13.83 bitrate=1812.3 frame= 168 fps= 13 q=-1.0 size= 3149kB time=00:00:14.02 bitrate=1839.4 frame= 190 fps= 14 q=-1.0 size= 3322kB time=00:00:15.78 bitrate=1723.6 frame= 213 fps= 15 q=-1.0 size= 3481kB time=00:00:17.78 bitrate=1603.4 frame= 235 fps= 16 q=-1.0 size= 3671kB time=00:00:19.59 bitrate=1534.3 frame= 244 fps= 16 q=-1.0 size= 3790kB time=00:00:20.29 bitrate=1530.0 frame= 256 fps= 16 q=-1.0 size= 3909kB time=00:00:21.31 bitrate=1502.1 frame= 276 fps= 16 q=-1.0 size= 4029kB time=00:00:22.94 bitrate=1438.8 frame= 299 fps= 17 q=-1.0 size= 4177kB time=00:00:24.93 bitrate=1372.1 frame= 339 fps= 19 q=-1.0 size= 4388kB time=00:00:28.28 bitrate=1270.9 frame= 363 fps= 19 q=-1.0 size= 4557kB time=00:00:30.18 bitrate=1236.8 frame= 405 fps= 20 q=-1.0 size= 4587kB time=00:00:33.76 bitrate=1113.1 frame= 421 fps= 20 q=-1.0 size= 4598kB time=00:00:35.15 bitrate=1071.4
it stops here but the code is still running. the file size is still growing and the code didn't exit, so it's not because of the timeout bug in pexpect.
and also , if user press ctrl+c to terminate the main program, ffmpeg still runs in the bg, how do i also tell it to exit ?
edit: i found that the culprit might be ffmpeg is not printing out all the info after 10 lines or so. it only update the display where it needs to be changed.so the regex is not matching, so what to do now , make 6 seperate regex ?
edit2: it seems if i use this regex, it works...
reo = re.compile("""frame=\s*(?P\d+) # frame \sfps=\s*(?P
\d+) # fps \sq=\s*(?P \S+) # q \ssize=\s*(?P
\S+) # size \stime=\s*(?P it's saying only match when frame number is updated, ( maybe this implies all the other text are also updated ? )