Newest 'ffmpeg' Questions - Stack Overflow

http://stackoverflow.com/questions/tagged/ffmpeg

Les articles publiés sur le site

  • rtmp ffmpeg stream after opencv process c++

    13 juin 2016, par Javier Cabrera Arteaga

    I want to catch video stream from live stream, process image with opencv and repacket to rtmp live stream with the original audio. The first step is done, i have the opencv processed image, but when i send to output live stream and open with some video program (eg. VLC) nothing is showing. Here is mi code. Thanks in advance.

    #include 
    #include 
    #include 
    #include opencv.hpp>
    
    extern "C" {
    //Library containing decoders and encoders for audio/video codecs.
        #include common.h>
        #include avassert.h>
        #include channel_layout.h>
        #include opt.h>
        #include mathematics.h>
        #include timestamp.h>
        #include avformat.h>
        #include swscale.h>
        #include swresample.h>
        #includeimgutils.h>
        //Library performing highly optimized image scaling and color space/pixel format conversion operations.
    }
    using namespace std;
    
    struct openCVFrameContext{
        cv::Mat cvFrameRGB;
        bool errorStatus;
        bool isEmpty;
    };
    
    char errorBuffer[80];
    
    class Capture_FFMPEG{
    public:
        Capture_FFMPEG(){init();}
    
        ~Capture_FFMPEG(){close();}
    
        virtual bool openVideoFile(const char* filename);
        virtual openCVFrameContext queryFrame(AVFrame **dstAudio);
    
        int videoStream;
        int audioStream;
        int currentStream;
        int frameFinished;
    
        //Video
        AVFormatContext *pFormatContext;
        AVCodecContext *pCodecContext;
        AVCodec *pVCodec;
        AVFrame *pFrame;
        AVFrame *pFrameBGR;
        //Video
    
        //Audio
        AVCodecContext *pACodecContext;
        AVCodec *pACodec;
        AVFrame *pAFrame;
        //Audio
    
    
        uint8_t *bufferBGR;
        AVPacket pVPacket;
    
        openCVFrameContext cvFrameContext;
        struct SwsContext *pVImgConvertCtx;
    
    protected:
        virtual void init();
        virtual void close();
    
    };
    
    //function to initialize all protected variables
    void Capture_FFMPEG::init(){
        videoStream = -1;
        frameFinished = 0;
        audioStream = -1;
        currentStream = 0;
    }
    
    //Function to destroy all protected variables
    
    void Capture_FFMPEG::close() {
        if(pFrame) av_free(pFrame);
        if(pFrameBGR) av_free(pFrameBGR);
        if(&pVPacket) av_free(&pVPacket);
        //if(pVImgConvertCtx) sws_frpeeContext(pVImgConvertCtx);
        if(pFormatContext) avformat_close_input(&pFormatContext);
    //    if(pCodecContext) avcodec_close(pCodecContext);
    }
    
    bool Capture_FFMPEG::openVideoFile(const char *filename) {
        bool statusError = false;
    
        if(avformat_open_input(&pFormatContext, filename,NULL, NULL) != 0){
            cout << "Error opening video file";
            statusError = true;
        }
    
        if(avformat_find_stream_info(pFormatContext, NULL) < 0){
            cout << "Error loading video information";
            statusError = true;
        }
    
        av_dump_format(pFormatContext,0,filename, 0);
    
        videoStream = -1;
    
        audioStream = -1;
        //Getting only video channel
    
        for(int i = 0; i < pFormatContext->nb_streams; i++){
            if(pFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
                videoStream = i;
            }
            if(pFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
                audioStream = i;
        }
    
        if(videoStream < 0){
            cout << "Error getting video stream index" << endl;
        }
    
        if(audioStream < 0){
            cout << "Error getting audio stream idex" << endl;
        }
    
        // Check video stream is > 0
    
    
        pCodecContext = pFormatContext->streams[videoStream]->codec;
    
        pVCodec = avcodec_find_decoder(pCodecContext->codec_id);
    
        cout << "Open video decoder " << pVCodec->name << endl;
    
        // checking opening codec
    
        if(avcodec_open2(pCodecContext,pVCodec, NULL) < 0){
            cout << "Error opening video codec" << endl;
            statusError = true;
        }
    
        pFrame = av_frame_alloc();
        pFrameBGR = av_frame_alloc();
    
        int numBytes = av_image_get_buffer_size(AV_PIX_FMT_BGR24, pCodecContext->width, pCodecContext->height, 24);
    
        cout << numBytes;
        bufferBGR = (uint8_t *)av_malloc(numBytes* sizeof(uint8_t));
    //    av_image_alloc(pFrameBGR->data, pFrameBGR->linesize, pCodecContext->width, pCodecContext->height,
    //                   AV_PIX_FMT_BGR24,32);
    
        int ret = av_image_fill_arrays(pFrameBGR->data, pFrameBGR->linesize, bufferBGR,AV_PIX_FMT_BGR24,
        pCodecContext->width, pCodecContext->height, 24);
    
        cout << ret << endl;
        if(ret < 0){
            av_strerror(ret, errorBuffer, 80);
            cout << "Could not fill image "<< errorBuffer;
        }
    
        cvFrameContext.cvFrameRGB.create(pCodecContext->height,pCodecContext->width, CV_8UC(3));
    
        //audio
    
        pACodecContext = pFormatContext->streams[audioStream]->codec;
        pACodec = avcodec_find_decoder(pACodecContext->codec_id);
    
    
        avcodec_open2(pACodecContext, pACodec, NULL);
    
        cout << "Audio decoder " << pACodec->name << endl;
        pAFrame = av_frame_alloc();
    
        return statusError;
    }
    
    
    
    openCVFrameContext Capture_FFMPEG::queryFrame(AVFrame **audio_dst) {
    
        if(av_read_frame(pFormatContext, &pVPacket) < 0){
            cout << "Error Could not read frame" << endl;
            return cvFrameContext;
        }
    
        currentStream = pVPacket.stream_index;
    
        if(pVPacket.stream_index == videoStream){
    
            if(avcodec_decode_video2(pCodecContext,pFrame, &frameFinished, &pVPacket) < 0){
                cout << "Error could not decode video" << endl;
            }
    
            if(frameFinished){
    
                if(pVImgConvertCtx == NULL){
                    pVImgConvertCtx = sws_getContext(pCodecContext->width, pCodecContext->height,
                    pCodecContext->pix_fmt, pCodecContext->width, pCodecContext->height, AV_PIX_FMT_BGR24, SWS_BICUBIC,
                            NULL,NULL, NULL);
                }
    
    
    //            int ret = av_frame_make_writable(pFrameBGR);
    //            if(ret < 0) {
    //                av_strerror(ret, errorBuffer, 80);
    //                cout << "Could not write frame" << errorBuffer << endl;
    //            }
    
                sws_scale(pVImgConvertCtx,(const uint8_t* const *) pFrame->data,pFrame->linesize,
                          0, pCodecContext->height, pFrameBGR->data,pFrameBGR->linesize );
    
                //Populate opencv matrix
                for(int y = 0; y < pCodecContext->height; y++){
                    for(int x = 0; x < pCodecContext->width; x++){
                        cvFrameContext.cvFrameRGB.at(y, x)[0] = pFrameBGR->data[0][y*pFrameBGR->linesize[0] +x*3 + 0];
                        cvFrameContext.cvFrameRGB.at(y, x)[1] = pFrameBGR->data[0][y*pFrameBGR->linesize[0] +x*3 + 1];
                        cvFrameContext.cvFrameRGB.at(y, x)[2] = pFrameBGR->data[0][y*pFrameBGR->linesize[0] +x*3 + 2];
                    }
                }
    
            }
    
        }
    
        *audio_dst = NULL;
    
        if(pVPacket.stream_index == audioStream){
    
            int ret = avcodec_decode_audio4(pACodecContext, pAFrame, &frameFinished, &pVPacket);
            if(ret < 0){
                av_strerror(ret, errorBuffer,80);
    
                cout << "Could not decode audio " << errorBuffer << endl;
            }
    
            *audio_dst = pAFrame;
        }
    
    
        return cvFrameContext;
    }
    
    
    int main() {
    
    
        av_register_all();
        avformat_network_init();
    
        Capture_FFMPEG *capture = new Capture_FFMPEG;
    
    
        openCVFrameContext frame;
        frame.errorStatus = false;
    
        string fname = "/var/www/html/stream/test2.ts";
        //string fname = "rtmp://127.0.0.1:1935/live/got.ts";
        //string fname = "/home/javier/PycharmProjects/unmask/output.mpg";
        frame.errorStatus = capture->openVideoFile(fname.c_str());
        //frame.errorStatus = capture->openVideoFile("http://localhost/stream/out1.ts");
    
    //    cv::namedWindow("test",  cv::WINDOW_NORMAL);
    
        AVFormatContext* outfc = NULL;
        AVIOContext * avioCTX;
    
        outfc = avformat_alloc_context();
    
        int ret2 = avformat_alloc_output_context2(&outfc, NULL, "mpegts", "rtmp://127.0.0.1:1935/live/test");
         //int ret2 = avformat_alloc_output_context2(&outfc, NULL, NULL, "/home/javier/Videos/test.mpg");
    
    
        if(ret2 < 0){
            av_strerror(ret2, errorBuffer, 80);
            cout << "Could not open video to encode output " << errorBuffer << endl;
        }
    
        AVCodec* outCodec = avcodec_find_encoder(AV_CODEC_ID_RAWVIDEO);
    
        if(!outCodec){
            cout << "Could not find coder" << endl;
        }
    
        AVStream* str = avformat_new_stream(outfc, outCodec);
    
        avcodec_get_context_defaults3(str->codec, outCodec);
        str->codec->width = capture->pCodecContext->width;
        str->codec->height = capture->pCodecContext->height;
        str->codec->pix_fmt = capture->pCodecContext->pix_fmt;
        str->time_base = capture->pCodecContext->time_base;
        str->codec->time_base = str->time_base;
        str->codec->framerate = capture->pCodecContext->framerate;
        str->codec->bit_rate = capture->pCodecContext->bit_rate;
        str->codec->gop_size = capture->pCodecContext->gop_size;
        str->codec->has_b_frames = capture->pCodecContext->has_b_frames;
    
    
        avcodec_open2(str->codec, outCodec, NULL);
    
        AVCodec* audioCodec = avcodec_find_encoder(outfc->oformat->audio_codec);
        AVStream* audioStream = avformat_new_stream(outfc, audioCodec);
    
        avcodec_get_context_defaults3(audioStream->codec, audioCodec);
        audioStream->codec->sample_fmt = capture->pACodecContext->sample_fmt;
        audioStream->codec->bit_rate = capture->pACodecContext->bit_rate;
        audioStream->codec->sample_rate = capture->pACodecContext->sample_rate;
    
        audioStream->codec->channel_layout = AV_CH_LAYOUT_STEREO;
        audioStream->codec->channels = av_get_channel_layout_nb_channels(str->codec->channel_layout);
        audioStream->time_base = (AVRational){1, str->codec->sample_rate};
    
        avcodec_open2(audioStream->codec, audioCodec, NULL);
    
    
        av_dump_format(outfc,0, "rtmp://127.0.0.1:1935/live/test", true);
        av_dump_format(outfc,1, "rtmp://127.0.0.1:1935/live/test", true);
    
    
        ret2 = avio_open2(&outfc->pb, "rtmp://127.0.0.1:1935/live/test", AVIO_FLAG_WRITE, NULL, NULL);
        cout << ret2 << endl;
        int ret = 0;
    
        SwsContext* swsctx = sws_getCachedContext(
                NULL, capture->pCodecContext->width, capture->pCodecContext->height, AV_PIX_FMT_BGR24,
                str->codec->width, str->codec->height, str->codec->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL);
    
    
        AVFrame* outFrame = av_frame_alloc();
    //    av_frame_get_buffer(outFrame, 32);
        std::vector framebuf((unsigned long)av_image_get_buffer_size(str->codec->pix_fmt, str->codec->width, str->codec->height, 24));
    
        ret = av_image_fill_arrays(outFrame->data, outFrame->linesize, framebuf.data(), str->codec->pix_fmt, capture->pCodecContext->width,
                       capture->pCodecContext->height, 12);
    
        cout <<  ret << endl;
        if(ret < 0){
            av_strerror(ret, errorBuffer, 80);
            cout << "Could not fill image data empty for frame " << errorBuffer << endl;
        }
    
        outFrame->width = capture->pCodecContext->width;
        outFrame->height = capture->pCodecContext->height;
        outFrame->format = str->codec->pix_fmt;
    
    
    
    //    AVFrame* audioOutFrame = avcodec_alloc_frame();
    
        int r = avformat_write_header(outfc, NULL);
    
        if(r < 0){
            av_strerror(r, errorBuffer, 80);
            cout << "Could not write header "<< errorBuffer << endl;
            exit(1);
        }
    
        cv::Mat gray;
        cv::Mat msk;
        cv::Mat copy;
        cv::Mat zeros;
        cv::Mat inp;
    
        vector > contours;
        vector rectangles;
    
        int got;
        int got_audio;
        int frame_pts = 0;
        int delay = 1;
        int dst_nb_samples;
    
        AVFrame *audioFrame;
    
        while(1) {
    
            frame_pts++;
            cout << frame_pts << endl;
    
            frame = capture->queryFrame(&audioFrame);
    
            if(capture->currentStream == capture->videoStream && capture->frameFinished)
            {
                cv::cvtColor(frame.cvFrameRGB, gray, cv::COLOR_RGB2GRAY);
    
                const int stride[] = {static_cast(frame.cvFrameRGB.step[0])};
                ret = sws_scale(swsctx, &frame.cvFrameRGB.data, stride,
                 0, frame.cvFrameRGB.rows, outFrame->data, outFrame->linesize);
                if(ret < 0){
                    av_strerror(ret, errorBuffer, 80);
                    cout << "Could not scale "<< errorBuffer << endl;
                }
    
                outFrame->pts = capture->pVPacket.pts;
    
                AVPacket outPck = {0};
    
                av_init_packet(&outPck);
    
                ret = avcodec_encode_video2(str->codec, &outPck, outFrame, &got);
    
                if (ret < 0) {
    
                    av_strerror(ret, errorBuffer, 80);
                    cout << "Error encoding frame " << errorBuffer << endl;
                }
    
                av_packet_rescale_ts(&outPck,capture->pCodecContext->time_base, str->time_base);
    
                if (got) {
                    outPck.stream_index = str->index;
    //                av_interleaved_write_frame(outfc, &outPck);
                    av_write_frame(outfc, &outPck);
                }
                av_packet_free_side_data(&outPck);
            }
            else{
                  AVPacket audioPckt = {0};
    //
                av_packet_ref(&audioPckt, &capture->pVPacket);
                audioPckt.stream_index = 1;
                av_write_frame(outfc, &audioPckt);
    //            av_interleaved_write_frame(outfc, &audioPckt);
    
    
                av_packet_free_side_data(&audioPckt);
    //
            }
        }
    
        av_write_trailer(outfc);
    
        delete capture;
    
        return 0;
    }
    
  • Add diffrent animation for diffrent frame in video using ffmpeg android

    13 juin 2016, par Sachin Suthar

    I am trying to apply animations for the particular video frame but hereby I'm not seeing that video frame by which I have to apply the animation. I only need fade in and fade out frame and display it as slide show video. Do you have any idea about this?

    I am using the same command which was given to this url but for the particular 5 second video hide and not getting slide show effect.

  • FFmpeg command for crossfading between 2 videos and merge last video without fading not working

    13 juin 2016, par Harsh Bhavsar

    Here, New in FFmpeg . I am using this library

    I already done with 2 video join with this command :

    String[] complexCommand = {"ffmpeg","-y","-i","/sdcard/cut_output.mp4",
                        "-i","/sdcard/harsh1.mp4","-strict","experimental",
                        "-filter_complex",
                        "[0:v]scale=640x480,setsar=1:1[v0];[1:v]scale=640x480,setsar=1:1[v1];[v0][0:a][v1][1:a] concat=n=2:v=1:a=1",
                        "-ab","48000","-ac","2","-ar","22050","-s","640x480","-r","30","-vcodec","mpeg4","-b","2097k","/sdcard/merged.mp4"};
    

    Now i need a command that first 2 videos join between cross fade effect and third video without fading effect directly join .I need that command Help me out pls... Thanx

  • Bash script to text watermark video from filename (ffmpeg)

    13 juin 2016, par Yian

    I would like to automate some text to be 'watermarked' on my videos. Basically I want to create a marker in my file names (eg "&&"). Then I want the script to take the content after "&&" and place it in the "drawtext" content.

    For example, file named "Video2132 && The First Test" would make the part after "text=" become "The First Test".

    The below code works fine without the drawtext part.

    As soon as I add that function, the script runs, but it creates empty video files (zero bytes).

    for f in ./*.mov; do
        printf '%s\n' "Doing stuff with: ${f}"
    
        i='0'
        while (( i <= 5 )); do
            ffmpeg -ss "$(( i * 25 ))" -t 25 -i "${f}" -acodec copy -vf drawtext="fontfile=/Users/mac1/Library/Fonts/Gillsanslight.ttf: \
    text='Stack Overflow': fontcolor=white: fontsize=24: box=1: boxcolor=black@0.5: \
    boxborderw=5: x=(w-text_w)/2: y=(h-text_h)/2" -codec:a copy "${f%.mov}.${i}.mov" -loglevel quiet
            (( i++ ))
        done
    done
    
  • ffmpeg mp4 upload to Twitter - Unsupported Error

    13 juin 2016, par Noitidart

    I have created a screencast, it was saved as webm. Using ffmpeg I converted it to a mp4:

    That kind of file isn't supported yet. Try uploading a gif, video, or picture instead.

    ffmpeg -f input.webm -pix_fmt yuv420p -strict -2 output.mp4
    

    I use yuv420p as that is what this topic says for twitter - https://twittercommunity.com/t/unable-to-upload-video-to-twitter/61721/3

    However when I open a new tab, go to twitter, click the photo icon, clicking upload instantly gives me error "That kind of file isn't supported yet. Try uploading a gif, video, or picture instead.". It cant check the pix format that fast. Anyonek now what can be going on here?

    Here is ffmpeg console output:

    ffmpeg version 2.2.1 Copyright (c) 2000-2014 the FFmpeg developers
      built on Jun  9 2014 20:01:41 with emcc (Emscripten GCC-like replacement) 1.12.0 (commit 6960d2296299e96d43e694806f5d35799ef8d39c)
      configuration: --cc=emcc --prefix=/Users/bgrinstead/Sites/videoconverter.js/build/ffmpeg/../dist --enable-cross-compile --target-os=none --arch=x86_32 --cpu=generic --disable-ffplay --disable-ffprobe --disable-ffserver --disable-asm --disable-doc --disable-devices --disable-pthreads --disable-w32threads --disable-network --disable-hwaccels --disable-parsers --disable-bsfs --disable-debug --disable-protocols --disable-indevs --disable-outdevs --enable-protocol=file
      libavutil      52. 66.100 / 52. 66.100
      libavcodec     55. 52.102 / 55. 52.102
      libavformat    55. 33.100 / 55. 33.100
      libavdevice    55. 10.100 / 55. 10.100
      libavfilter     4.  2.100 /  4.  2.100
      libswscale      2.  5.102 /  2.  5.102
      libswresample   0. 18.100 /  0. 18.100
    [vp8 @ 0xdde410] Warning: not compiled with thread support, using thread emulation
    Input #0, matroska,webm, from 'input.webm':
      Metadata:
        encoder         : QTmuxingAppLibWebM-0.0.1
      Duration: N/A, start: 0.000000, bitrate: N/A
        Stream #0:0(eng): Video: vp8, yuv420p, 1920x1200, SAR 1:1 DAR 8:5, 30 fps, 30 tbr, 1k tbn, 1k tbc (default)
    Incompatible pixel format 'yuv420p' for codec 'gif', auto-selecting format 'pal8'
    [swscaler @ 0xdd7f40] No accelerated colorspace conversion found from yuv420p to bgr8.
    [gif @ 0xde5290] Warning: not compiled with thread support, using thread emulation
    [vp8 @ 0xdde410] Warning: not compiled with thread support, using thread emulation
    Output #0, gif, to 'output/output.gif':
      Metadata:
        encoder         : Lavf55.33.100
        Stream #0:0(eng): Video: gif, pal8, 1920x1200 [SAR 1:1 DAR 8:5], q=2-31, 200 kb/s, 100 tbn, 30 tbc (default)
    Stream mapping:
      Stream #0:0 -> #0:0 (vp8 -> gif)
    Press [q] to stop, [?] for help
    frame=    1 fps=0.0 q=0.0 size=       0kB time=00:00:00.03 bitrate=   8.5kbits/s    
    frame=    6 fps=0.0 q=0.0 size=     640kB time=00:00:00.33 bitrate=15877.0kbits/s    
    frame=   12 fps= 11 q=0.0 size=    1452kB time=00:00:00.63 bitrate=18878.9kbits/s    
    frame=   17 fps= 10 q=0.0 size=    2080kB time=00:00:00.91 bitrate=18723.7kbits/s    
    frame=   22 fps=9.6 q=0.0 size=    2698kB time=00:00:01.16 bitrate=19052.0kbits/s    
    frame=   27 fps=9.7 q=0.0 size=    3294kB time=00:00:01.43 bitrate=18869.4kbits/s    
    frame=   33 fps=9.6 q=0.0 size=    4001kB time=00:00:01.76 bitrate=18622.0kbits/s    
    frame=   38 fps=9.5 q=0.0 size=    4567kB time=00:00:02.04 bitrate=18339.7kbits/s    
    frame=   44 fps=9.5 q=0.0 size=    5236kB time=00:00:02.37 bitrate=18097.4kbits/s    
    frame=   50 fps=9.7 q=0.0 size=    5902kB time=00:00:02.71 bitrate=17839.9kbits/s    
    frame=   55 fps=9.6 q=0.0 size=    6457kB time=00:00:02.96 bitrate=17868.9kbits/s    
    frame=   55 fps=9.6 q=0.0 Lsize=    6567kB time=00:00:02.96 bitrate=18175.9kbits/s    
    video:6623kB audio:0kB subtitle:0 data:0 global headers:0kB muxing overhead -0.834044%
    

    Here is a screenshot: