Newest 'ffmpeg' Questions - Stack Overflow
Les articles publiés sur le site
-
ffmpeg : change alpha channel of a showwaves filter [on hold]
15 février 2014, par ZnuffI've been trying to figure this issue out for a few hours and I can't seem to find any solution.
I'm creating a video from an .mp3 and some images with the following command
fmpeg.exe -y -i temp\audio.mp3 -loop 1 -i Bokeh\frame-%03d.png -r 25 -filter_complex "[0:a] showwaves=size=1280x100:mode=line:r=25[wave];[1:v][wave] overlay=y=H-h:eval=init[canvas];[canvas]drawtext=fontfile='./tools/impact.ttf':fontsize=42:text='ORGANIKISMNESS':x=20:y=(h-170-text_h*2.20):fontcolor=white:shadowy=2:shadowx=2:shadowcolor=black,drawtext=fontfile='./tools/impact.ttf':fontsize=42:text='RETURN TO THE SOURCE PT.2 (ORGANIKISMNESS REMIX)':x=20:y=(h-170-text_h):fontcolor=white:shadowy=2:shadowx=2:shadowcolor=black" -shortest -acodec copy -vcodec libx264 -pix_fmt yuv420p -preset ultrafast -tune stillimage -crf 19 -movflags faststart "videos\Organikismness-Return to the Source Pt.2 (Organikismness Remix).mp4"
I'm trying to make the [wave] (showwaves) filter have some sort of alpha channel, to be slightly transparent to be overlayed on the rest of the video later.
So far I've tried the
blend
filter, but this complains that the sources are not the same size (one is 1280x720, the showwaves source is 1280x100).I tried the
colorchannelmixer
filter, but I couldn't figure out how this should work.Anyone has any idea how to do it?
-
ffmpeg - creating mainfest file for smooth streaming
14 février 2014, par user2928842I'm new to ffmpeg (and video encoding), and i'm trying to understand how do i create a manifest file for IIS i've tried creating the .ismv filem but it did not create the manifest file, i've saw the ismindex tool under the ffmpeg tools folder, but didn't quite understand how to use it.
thanks
-
Muxing with libav
14 février 2014, par LordDoskiasI have a program which is supposed to demux input mpeg-ts, transcode the mpeg2 into h264 and then mux the audio alongside the transcoded video. When I open the resulting muxed file with VLC I neither get audio nor video. Here is the relevant code.
My main worker loop is as follows:
void *writer_thread(void *thread_ctx) { struct transcoder_ctx_t *ctx = (struct transcoder_ctx_t *) thread_ctx; AVStream *video_stream = NULL, *audio_stream = NULL; AVFormatContext *output_context = init_output_context(ctx, &video_stream, &audio_stream); struct mux_state_t mux_state = {0}; //from omxtx mux_state.pts_offset = av_rescale_q(ctx->input_context->start_time, AV_TIME_BASE_Q, output_context->streams[ctx->video_stream_index]->time_base); //write stream header if any avformat_write_header(output_context, NULL); //do not start doing anything until we get an encoded packet pthread_mutex_lock(&ctx->pipeline.video_encode.is_running_mutex); while (!ctx->pipeline.video_encode.is_running) { pthread_cond_wait(&ctx->pipeline.video_encode.is_running_cv, &ctx->pipeline.video_encode.is_running_mutex); } while (!ctx->pipeline.video_encode.eos || !ctx->processed_audio_queue->queue_finished) { //FIXME a memory barrier is required here so that we don't race //on above variables //fill a buffer with video data OERR(OMX_FillThisBuffer(ctx->pipeline.video_encode.h, omx_get_next_output_buffer(&ctx->pipeline.video_encode))); write_audio_frame(output_context, audio_stream, ctx); //write full audio frame //FIXME no guarantee that we have a full frame per packet? write_video_frame(output_context, video_stream, ctx, &mux_state); //write full video frame //encoded_video_queue is being filled by the previous command } av_write_trailer(output_context); //free all the resources avcodec_close(video_stream->codec); avcodec_close(audio_stream->codec); /* Free the streams. */ for (int i = 0; i < output_context->nb_streams; i++) { av_freep(&output_context->streams[i]->codec); av_freep(&output_context->streams[i]); } if (!(output_context->oformat->flags & AVFMT_NOFILE)) { /* Close the output file. */ avio_close(output_context->pb); } /* free the stream */ av_free(output_context); free(mux_state.pps); free(mux_state.sps); }
The code for initialising libav output context is this:
static AVFormatContext * init_output_context(const struct transcoder_ctx_t *ctx, AVStream **video_stream, AVStream **audio_stream) { AVFormatContext *oc; AVOutputFormat *fmt; AVStream *input_stream, *output_stream; AVCodec *c; AVCodecContext *cc; int audio_copied = 0; //copy just 1 stream fmt = av_guess_format("mpegts", NULL, NULL); if (!fmt) { fprintf(stderr, "[DEBUG] Error guessing format, dying\n"); exit(199); } oc = avformat_alloc_context(); if (!oc) { fprintf(stderr, "[DEBUG] Error allocating context, dying\n"); exit(200); } oc->oformat = fmt; snprintf(oc->filename, sizeof(oc->filename), "%s", ctx->output_filename); oc->debug = 1; oc->start_time_realtime = ctx->input_context->start_time; oc->start_time = ctx->input_context->start_time; oc->duration = 0; oc->bit_rate = 0; for (int i = 0; i < ctx->input_context->nb_streams; i++) { input_stream = ctx->input_context->streams[i]; output_stream = NULL; if (input_stream->index == ctx->video_stream_index) { //copy stuff from input video index c = avcodec_find_encoder(CODEC_ID_H264); output_stream = avformat_new_stream(oc, c); *video_stream = output_stream; cc = output_stream->codec; cc->width = input_stream->codec->width; cc->height = input_stream->codec->height; cc->codec_id = CODEC_ID_H264; cc->codec_type = AVMEDIA_TYPE_VIDEO; cc->bit_rate = ENCODED_BITRATE; cc->time_base = input_stream->codec->time_base; output_stream->avg_frame_rate = input_stream->avg_frame_rate; output_stream->r_frame_rate = input_stream->r_frame_rate; output_stream->start_time = AV_NOPTS_VALUE; } else if ((input_stream->codec->codec_type == AVMEDIA_TYPE_AUDIO) && !audio_copied) { /* i care only about audio */ c = avcodec_find_encoder(input_stream->codec->codec_id); output_stream = avformat_new_stream(oc, c); *audio_stream = output_stream; avcodec_copy_context(output_stream->codec, input_stream->codec); /* Apparently fixes a crash on .mkvs with attachments: */ av_dict_copy(&output_stream->metadata, input_stream->metadata, 0); /* Reset the codec tag so as not to cause problems with output format */ output_stream->codec->codec_tag = 0; audio_copied = 1; } } for (int i = 0; i < oc->nb_streams; i++) { if (oc->oformat->flags & AVFMT_GLOBALHEADER) oc->streams[i]->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; if (oc->streams[i]->codec->sample_rate == 0) oc->streams[i]->codec->sample_rate = 48000; /* ish */ } if (!(fmt->flags & AVFMT_NOFILE)) { fprintf(stderr, "[DEBUG] AVFMT_NOFILE set, allocating output container\n"); if (avio_open(&oc->pb, ctx->output_filename, AVIO_FLAG_WRITE) < 0) { fprintf(stderr, "[DEBUG] error creating the output context\n"); exit(1); } } return oc; }
Finally this is the code for writing audio:
static void write_audio_frame(AVFormatContext *oc, AVStream *st, struct transcoder_ctx_t *ctx) { AVPacket pkt = {0}; // data and size must be 0; struct packet_t *source_audio; av_init_packet(&pkt); if (!(source_audio = packet_queue_get_next_item_asynch(ctx->processed_audio_queue))) { return; } pkt.stream_index = st->index; pkt.size = source_audio->data_length; pkt.data = source_audio->data; pkt.pts = source_audio->PTS; pkt.dts = source_audio->DTS; pkt.duration = source_audio->duration; pkt.destruct = avpacket_destruct; /* Write the compressed frame to the media file. */ if (av_interleaved_write_frame(oc, &pkt) != 0) { fprintf(stderr, "[DEBUG] Error while writing audio frame\n"); } packet_queue_free_packet(source_audio, 0); }
A resulting mpeg4 file can be obtained from here:
http://87.120.131.41/dl/mpeg4.h264
I have ommited the write_video_frame code since it is a lot more complicated and I might be making something wrong there as I'm doing timebase conversation etc. For audio however I'm doing 1:1 copy. Each packet_t packet contains data from av_read_frame from the input mpegts container. In the worst case I'd expect that my audio is working and not my video. However I cannot get either of those to work. Seems the documentation is rather vague on making things like that - I've tried both libav and ffmpeg irc channels to no avail. Any information regarding how I can debug the issue will be greatly appreciated.
-
running ffmpeg from Popen inside (twisted) timer.LoopingCall() stalls
14 février 2014, par user1913115I have an RTSP stream which i need to re-stream as HLS. When RTSP stream goes down (e.g. camera disconnects) I put a blue screen to let the user know that the camera went offline. HLS segmenter is running separately, listening on port 22200 for incoming packets.
in python the code essentially boils down to this:
import psutil, subprocess as sb from twisted.internet import reactor, task from cameraControls import camStatus, camURL ffOn = False psRef = False def monitor(): print "TIMER TICK" if camStatus()=='ON' and not ffOn: #camera just came online cmd = ["ffmpeg", "-i", camURL, "-codec", "copy", "-f", "mpegts", "udp://127.0.0.1:22200"] ps = sb.Popen(cmd,stderr=sb.PIPE) psRef=ps #check the stream: psmon = psutil.Process(psRef.pid) if psmon.status!=psutil.STATUS_RUNNING: print "FFMPEG STOPPED" tmr = task.LoopingCall(monitor) tmr.start(2) reactor.run()
it works fine for 5-6 minutes, then i see the video stall and if i check the cpu usage of the ffmpeg it shows 0, the ffmpeg output doesn't change, as if paused. however psmon.status shows as running, and the timer is still going (i see "TIMER TICK" message pop up every 2 seconds in the command line.
if i simply run the ffmpeg command from the command line (not from python) then it works for hours no problem.
does anybody know if the twisted reactor is stalling the process? or is it the subprocess.Popen itself issue? or the timer itself is glitching somehow(even though it gets to the 'monitor' function)? i have other timers running also in the same reactor (same thread), could that be an issue?
-
Access violation reading location 0x000000148965F000
14 février 2014, par user3012914I tried to encode BMP Images, which i get from a buffer and store it as a H264 Video. I am stuck with these errors the arrive randomly and repeatedly
I am using Visual Studio 2012
1) Access violation reading location 0x000000148965F000.
2)Heap corruption
The debug shows the error at this point
struct SwsContext* fooContext = sws_getContext(_imgWidth,_imgHeight,PIX_FMT_RGB32,c->width,c->height,PIX_FMT_YUV420P, SWS_FAST_BILINEAR,NULL,NULL,NULL); sws_scale(fooContext, inpic->data, inpic->linesize, 0, c->height, outpic->data, outpic->linesize); // converting frame size and format
I guess the read violation happens due to non - pre initialized values. But i couldnt exactly understand why. I have also attached part of the code below
PagedImage *inImg = getUpdatedInputImage(0); ML_CHECK(inImg); ImageVector imgExt = inImg->getImageExtent(); if ((imgExt.x == _imgWidth) && (imgExt.y == _imgHeight)) { if (((imgExt.x % 4) == 0) && ((imgExt.y % 4) == 0)) { _numFramesFld->setIntValue(_numFramesFld->getIntValue() + 1); MLFree(unicodeFilename); // configure header //BITMAPINFO bitmapInfo // read out input image and write output image into video // get input image as an array void* imgData = NULL; SubImageBox imageBox(imgExt); // get the whole image getTile(inImg, imageBox, MLuint8Type, &imgData); MLuint8* iData = (MLuint8*)imgData; // since we have only images with // a z-ext of 1, we can compute the c stride as follows int cStride = _imgWidth * _imgHeight; int offset = 0; MLuint8 r=0, g=0, b=0; // pointer into the bitmap that is // used to write images into an video UCHAR* dst = (UCHAR*)_bits; for (int y = _imgHeight-1; y >= 0; y--) { // reversely scan the image. if y-rows of DIB are set in normal order, no compression will be available. offset = _imgWidth * y; for (int x = 0; x < _imgWidth; x++) { if (_isGreyValueImage) { r = iData[offset + x]; *dst++ = (UCHAR)r; *dst++ = (UCHAR)r; *dst++ = (UCHAR)r; } else { b = iData[offset + x]; // windows bitmap need reverse order: bgr instead of rgb g = iData[offset + x + cStride ]; r = iData[offset + x + cStride + cStride]; *dst++ = (UCHAR)r; *dst++ = (UCHAR)g; *dst++ = (UCHAR)b; } // alpha channel in input image is ignored } } outbuf_size = 100000 + c->width*c->height*(32>>3); // allocate output buffer outbuf = static_cast(malloc(outbuf_size)); fileName_ = (_outputFilenameFld->getStringValue()).c_str(); FILE* f = fopen(fileName_,"wb"); // opening video file for writing if(!f) { _messageFld->setStringValue("Cannot open file"); } else _messageFld->setStringValue("Opened video file for writing\n"); //for(i=0;i<_numFramesFld->getIntValue();i++) //{ fflush(stdout); int nbytes = avpicture_get_size(PIX_FMT_YUV420P, c->width, c->height); // allocating outbuffer uint8_t* outbuffer = (uint8_t*)av_malloc(nbytes*sizeof(uint8_t)); AVFrame* inpic = avcodec_alloc_frame(); // mandatory frame allocation AVFrame* outpic = avcodec_alloc_frame(); //outpic->pts = (int64_t)((float)i * (1000.0/((float)(c->time_base.den))) * 90); // setting frame pts avpicture_fill((AVPicture*)inpic,(uint8_t*)dst, PIX_FMT_RGB32, c->width, c->height); // fill image with input screenshot avpicture_fill((AVPicture*)outpic, outbuffer, PIX_FMT_YUV420P, c->width, c->height); // clear output picture for buffer copy av_image_alloc(outpic->data, outpic->linesize, c->width, c->height, c->pix_fmt, 1); inpic->data[0] += inpic->linesize[0]*(c->height-1); // flipping frame inpic->linesize[0] = -inpic->linesize[0]; // flipping frame struct SwsContext* fooContext = sws_getContext(_imgWidth,_imgHeight,PIX_FMT_RGB32,c->width,c->height,PIX_FMT_YUV420P, SWS_FAST_BILINEAR,NULL,NULL,NULL); sws_scale(fooContext, inpic->data, inpic->linesize, 0, c->height, outpic->data, outpic->linesize); // converting frame size and format out_size = avcodec_encode_video(c, outbuf, outbuf_size, outpic); // encoding video _messageFld->setStringValue("Encoding frame %3d (size=%5d)\n"); fwrite(outbuf, 1, out_size, f); delete [] dst; // freeing memory av_free(outbuffer); av_free(inpic); av_free(outpic); av_free(fooContext); DeleteObject(_hbitmap); for(int Z = 0; Z/ encode the delayed frames fwrite(outbuf, 1, out_size, f); } //outbuf[0] = 0x00; //outbuf[1] = 0x00; // add sequence end code to have a real mpeg file //outbuf[2] = 0x01; //outbuf[3] = 0xb7; //fwrite(outbuf, 1, 4, f); fclose(f); avcodec_close(c); // freeing memory free(outbuf); av_free(c); printf("Closed codec and Freed\n"); } }