Newest 'ffmpeg' Questions - Stack Overflow

http://stackoverflow.com/questions/tagged/ffmpeg

Les articles publiés sur le site

  • FFMPEG:avfilter_graph_create_filter method failed when initializing filter

    28 mars 2017, par IT_Layman

    I want to implement the transcoding.c sample hosted on the FFMPEG website. But the avfilter_graph_create_filterfunction failed with a return code of -22 (Line 175). I only made a minor change to the source code to make it runnable in my C++ console application. I also searched it online, but couldn't find any helpful information. Below is my code:

     extern "C" 
    {
    #include avcodec.h>
    #include avformat.h>
    #include avfiltergraph.h>
    #include buffersink.h>
    #include buffersrc.h>
    #include opt.h>
    #include pixdesc.h>
    }
    static AVFormatContext *ifmt_ctx;
    static AVFormatContext *ofmt_ctx;
    typedef struct FilteringContext {
        AVFilterContext *buffersink_ctx;
        AVFilterContext *buffersrc_ctx;
        AVFilterGraph *filter_graph;
    } FilteringContext;
    static FilteringContext *filter_ctx;
    
    static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
        AVCodecContext *enc_ctx, const char *filter_spec)
    {
        char args[512];
        int ret = 0;
        AVFilter *buffersrc = NULL;
        AVFilter *buffersink = NULL;
        AVFilterContext *buffersrc_ctx = NULL;
        AVFilterContext *buffersink_ctx = NULL;
        AVFilterInOut *outputs = avfilter_inout_alloc();
        AVFilterInOut *inputs = avfilter_inout_alloc();
        AVFilterGraph *filter_graph = avfilter_graph_alloc();
        if (!outputs || !inputs || !filter_graph) {
            ret = AVERROR(ENOMEM);
            goto end;
        }
        if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
            buffersrc = avfilter_get_by_name("buffer");
            buffersink = avfilter_get_by_name("buffersink");
            if (!buffersrc || !buffersink) {
                av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
                ret = AVERROR_UNKNOWN;
                goto end;
            }
            /*sprintf(args, sizeof(args),
                "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
                dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
                dec_ctx->time_base.num, dec_ctx->time_base.den,
                dec_ctx->sample_aspect_ratio.num,
                dec_ctx->sample_aspect_ratio.den);*/
            ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
                args, NULL, filter_graph);
            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
                goto end;
            }
            ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
                NULL, NULL, filter_graph);
            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
                goto end;
            }
            ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
                (uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
                AV_OPT_SEARCH_CHILDREN);
            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
                goto end;
            }
        }
        else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
            buffersrc = avfilter_get_by_name("abuffer");
            buffersink = avfilter_get_by_name("abuffersink");
            if (!buffersrc || !buffersink) {
                av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
                ret = AVERROR_UNKNOWN;
                goto end;
            }
            if (!dec_ctx->channel_layout)
                dec_ctx->channel_layout =
                av_get_default_channel_layout(dec_ctx->channels);
            /*snprintf(args, sizeof(args),
                "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
                dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
                av_get_sample_fmt_name(dec_ctx->sample_fmt),
                dec_ctx->channel_layout);*/
            ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
                args, NULL, filter_graph);
            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
                goto end;
            }
            ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
                NULL, NULL, filter_graph);
            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
                goto end;
            }
            ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
                (uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
                AV_OPT_SEARCH_CHILDREN);
            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
                goto end;
            }
            ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
                (uint8_t*)&enc_ctx->channel_layout,
                sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
                goto end;
            }
            ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
                (uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
                AV_OPT_SEARCH_CHILDREN);
            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
                goto end;
            }
        }
        else {
            ret = AVERROR_UNKNOWN;
            goto end;
        }
        /* Endpoints for the filter graph. */
        outputs->name = av_strdup("in");
        outputs->filter_ctx = buffersrc_ctx;
        outputs->pad_idx = 0;
        outputs->next = NULL;
        inputs->name = av_strdup("out");
        inputs->filter_ctx = buffersink_ctx;
        inputs->pad_idx = 0;
        inputs->next = NULL;
        if (!outputs->name || !inputs->name) {
            ret = AVERROR(ENOMEM);
            goto end;
        }
        if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
            &inputs, &outputs, NULL)) < 0)
            goto end;
        if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
            goto end;
        /* Fill FilteringContext */
        fctx->buffersrc_ctx = buffersrc_ctx;
        fctx->buffersink_ctx = buffersink_ctx;
        fctx->filter_graph = filter_graph;
    end:
        avfilter_inout_free(&inputs);
        avfilter_inout_free(&outputs);
        return ret;
    }
    static int init_filters(void)
    {
        const char *filter_spec;
        unsigned int i;
        int ret;
        filter_ctx = (FilteringContext *)av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));
        if (!filter_ctx)
            return AVERROR(ENOMEM);
        for (i = 0; i < ifmt_ctx->nb_streams; i++) {
            filter_ctx[i].buffersrc_ctx = NULL;
            filter_ctx[i].buffersink_ctx = NULL;
            filter_ctx[i].filter_graph = NULL;
            if (!(ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO
                || ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO))
                continue;
            if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
                filter_spec = "null"; /* passthrough (dummy) filter for video */
            else
                filter_spec = "anull"; /* passthrough (dummy) filter for audio */
            ret = init_filter(&filter_ctx[i], ifmt_ctx->streams[i]->codec,
                ofmt_ctx->streams[i]->codec, filter_spec);
            if (ret)
                return ret;
        }
        return 0;
    }
    
    int main(int argc, char **argv)
    {
        int ret;
        AVPacket packet = {  NULL, 0 };
        AVFrame *frame = NULL;
        enum AVMediaType type;
        unsigned int stream_index;
        unsigned int i;
        int got_frame;
        int(*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);
    
        av_register_all();
        avfilter_register_all();
    
        if ((ret = init_filters()) < 0)
            goto end;
        /*...*/
        system("Pause");
        return ret ? 1 : 0;
    }
    
  • Live HTML5 Video to Node.js to HTML5 Video streaming

    28 mars 2017, par sandorvasas

    I've searched around in similar topics, but haven't really found the answer for my question. I'm making a webcam live-streaming site, and reading video input from HTML5 , periodically uploading the frames via WebSocket to a Node.js server, which -so far as I understood-, should write the incoming frames' data to a videofile, so that file can be streamed with ffmpeg or gstreamer to broadcast the live stream to multiple viewers.

    I'm planning to use livecam, since it can stream from a file as well.

    My uncertainty arises at the point when the frames are received from the broadcaster:

    I have this simple node RTC endpoint:

    const RTCAPI = (apiServer) => {
    
      let primus = new Primus(apiServer, { 
        transformer: 'uws', 
        parser: 'binary',
        pathname: '/messaging',
        plugin: {
          rooms: PrimusRooms,
          responder: PrimusResponder
        }
      });
    
    
      let clients = {};
    
      primus.on('connection', spark => {
        clients[spark.id] = spark;
    
        spark.on('data', data => {
    
            // here -- fs.createWriteStream? 
    
        });
    
      });
    
    }
    

    A side question is, how can I safely write the frames to a file that ffmpeg/gstreamer could stream? Is it safe append raw incoming data to the file?

    Since this would be live-stream only, I won't need to keep the recorded files, so I guess the file should only keep the last N frames, deleting the last one when adding a new. I'm not sure how can I achieve this. I'm not even sure I have to deal with these manually or ffmpeg/gstreamer supports the 'moving window of frames' out of the box.

    Any advice would be greatly appreciated!

    Thanks.

  • Fffmpeg mapping audio and video by language

    28 mars 2017, par Terrabyte

    I want to batch convert these mkv files, they contain more than one language. Instances include: Eng/Ger, Eng/JPN. Sometimes there are multiple video sources within the mkv for whatever reason:

    ive tried using:

    FFMPEG_PATH . ' -i ' . $localFile . ' -sn -map 0:v:0 -map 0:a:m:language:jpn -c:v ' . $videoCodec . ' -tune animation -crf 20 -refs 6 -bf 6 -trellis 1 -b_strategy 1 -profile:v high -level 4.0 -pix_fmt yuv420p -ac 2 -flags +aic+mv4 ' . $scale . ' ' . $convertedFilename;
    

    This one grabs the first video (this doesnt matter, but i need it to grab one video just in case)

    -map 0:v:0 -map 
    

    I don't thing this one works because when i tried it, it grabbed the english one instead so i dont know the issue.

    0:a:m:language:jpn
    

    I could do

    0:m:language:jpn
    

    but then id have to remove: 0:v:0 and sometimes theres a issue where just using the general mapping would encode both video sources so it wold double in file sizes

    so how would i just map the audio file while keeping the video mapping? Fgmpeg is really confusing with this instruction.

  • ffmpeg - remove sequentially duplicate frames

    27 mars 2017, par metlira

    Is there any way to detect duplicate frames within the video using ffmpeg.

    I tried -vf flag with select=gt(scene\,0.xxx) for scene change. But, it did not work for my case.

  • FFMpeg open a DVD VOB chain ?

    27 mars 2017, par Sugrue

    I'm new to FFMpeg so this may be a dumb question, but I don't see the answer in the documentation.

    I want to decode frames from a DVD vob files. Opening the first VOB in the group works fine, but how do I tell ffmpeg to continue on to the next VOB and read all the VOBs on a DVD?

    I have the VOB files in a folder on a hard disk.