Recherche avancée

Médias (91)

Sur d’autres sites (362)

  • Qt Open Source Product 2 - Vlc Demo [closed]

    3 juin 2021, par cool code

    Ⅰ. Preface

    


    The previous work was made by the FFmpeg kernel, and FFmpeg is too powerful for many beginners to understand. There are also a lot of users only need a simple video stream can be played, do not need to be involved in the responsible decoding and transcoding, so VLC came in handy, it directly made FFMPEG deep encapsulation, to provide a friendly interface. There's an MPV that does the same thing, and MPV is even better than VLC in that it's just one library file, and it looks like it's packaged as a static library, unlike VLC, VLC comes with a bunch of dynamic library files and plug-in files.
Of course, the simplicity of VLC is that it only needs a few lines of code to start, so that beginners immediately see the effect is very important, very excited, you can more quickly carry out the next step of coding, experience the fun of coding.

    


    Ⅱ. Code framework

    


    #include "ffmpeg.h"

FFmpegThread::FFmpegThread(QObject *parent) : QThread(parent)
{
    setObjectName("FFmpegThread");
    stopped = false;
    isPlay = false;

    frameFinish = false;
    videoWidth = 0;
    videoHeight = 0;
    oldWidth = 0;
    oldHeight = 0;
    videoStreamIndex = -1;
    audioStreamIndex = -1;

    url = "rtsp://192.168.1.128:554/1";

    buffer = NULL;
    avPacket = NULL;
    avFrame = NULL;
    avFrame2 = NULL;
    avFrame3 = NULL;
    avFormatContext = NULL;
    videoCodec = NULL;
    audioCodec = NULL;
    swsContext = NULL;

    options = NULL;
    videoDecoder = NULL;
    audioDecoder = NULL;

    //Initial registration, only register once in a software
    FFmpegThread::initlib();
}

//Only need to initialize once in a software
void FFmpegThread::initlib()
{
    static QMutex mutex;
    QMutexLocker locker(&mutex);
    static bool isInit = false;
    if (!isInit) {
        //Register all available file formats and decoders in the library
        av_register_all();
        //Register all devices, mainly for local camera playback support
#ifdef ffmpegdevice
        avdevice_register_all();
#endif
        //Initialize the network stream format, which must be executed first when using the network stream
        avformat_network_init();

        isInit = true;
        qDebug() << TIMEMS << "init ffmpeg lib ok" << " version:" << FFMPEG_VERSION;
#if 0
        //Output all supported decoder names
        QStringList listCodeName;
        AVCodec *code = av_codec_next(NULL);
        while (code != NULL) {
            listCodeName << code->name;
            code = code->next;
        }

        qDebug() << TIMEMS << listCodeName;
#endif
    }
}

bool FFmpegThread::init()
{
    //Before opening the code stream, specify various parameters such as: detection time/timeout time/maximum delay, etc.
    //Set the cache size, 1080p can increase the value
    av_dict_set(&options, "buffer_size", "8192000", 0);
    //Open in tcp mode, if open in udp mode, replace tcp with udp
    av_dict_set(&options, "rtsp_transport", "tcp", 0);
    //Set the timeout disconnection time, the unit is microseconds, 3000000 means 3 seconds
    av_dict_set(&options, "stimeout", "3000000", 0);
    //Set the maximum delay, in microseconds, 1000000 means 1 second
    av_dict_set(&options, "max_delay", "1000000", 0);
    //Automatically start the number of threads
    av_dict_set(&options, "threads", "auto", 0);

    //Open video stream
    avFormatContext = avformat_alloc_context();

    int result = avformat_open_input(&avFormatContext, url.toStdString().data(), NULL, &options);
    if (result < 0) {
        qDebug() << TIMEMS << "open input error" << url;
        return false;
    }

    //Release setting parameters
    if (options != NULL) {
        av_dict_free(&options);
    }

    //Get flow information
    result = avformat_find_stream_info(avFormatContext, NULL);
    if (result < 0) {
        qDebug() << TIMEMS << "find stream info error";
        return false;
    }

    //----------At the beginning of the video stream part, make a mark to facilitate the folding of the code----------
    if (1) {
        videoStreamIndex = av_find_best_stream(avFormatContext, AVMEDIA_TYPE_VIDEO, -1, -1, &videoDecoder, 0);
        if (videoStreamIndex < 0) {
            qDebug() << TIMEMS << "find video stream index error";
            return false;
        }

        //Get video stream
        AVStream *videoStream = avFormatContext->streams[videoStreamIndex];

        //Get the video stream decoder, or specify the decoder
        videoCodec = videoStream->codec;
        videoDecoder = avcodec_find_decoder(videoCodec->codec_id);
        //videoDecoder = avcodec_find_decoder_by_name("h264_qsv");
        if (videoDecoder == NULL) {
            qDebug() << TIMEMS << "video decoder not found";
            return false;
        }

        //Set up accelerated decoding
        videoCodec->lowres = videoDecoder->max_lowres;
        videoCodec->flags2 |= AV_CODEC_FLAG2_FAST;

        //Open the video decoder
        result = avcodec_open2(videoCodec, videoDecoder, NULL);
        if (result < 0) {
            qDebug() << TIMEMS << "open video codec error";
            return false;
        }

        //Get the resolution size
        videoWidth = videoStream->codec->width;
        videoHeight = videoStream->codec->height;

        //If the width and height are not obtained, return
        if (videoWidth == 0 || videoHeight == 0) {
            qDebug() << TIMEMS << "find width height error";
            return false;
        }

        QString videoInfo = QString("Video stream info -> index: %1  decode: %2  format: %3  duration: %4 s  Resolution: %5*%6")
                            .arg(videoStreamIndex).arg(videoDecoder->name).arg(avFormatContext->iformat->name)
                            .arg((avFormatContext->duration) / 1000000).arg(videoWidth).arg(videoHeight);
        qDebug() << TIMEMS << videoInfo;
    }
    //----------The video stream part starts----------

    //----------Start the audio stream part, mark it to facilitate the code folding----------
    if (1) {
        //Loop to find audio stream index
        audioStreamIndex = -1;
        for (uint i = 0; i < avFormatContext->nb_streams; i++) {
            if (avFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
                audioStreamIndex = i;
                break;
            }
        }

        //Some have no audio stream, so there is no need to return here
        if (audioStreamIndex == -1) {
            qDebug() << TIMEMS << "find audio stream index error";
        } else {
            //Get audio stream
            AVStream *audioStream = avFormatContext->streams[audioStreamIndex];
            audioCodec = audioStream->codec;

            //Get the audio stream decoder, or specify the decoder
            audioDecoder = avcodec_find_decoder(audioCodec->codec_id);
            //audioDecoder = avcodec_find_decoder_by_name("aac");
            if (audioDecoder == NULL) {
                qDebug() << TIMEMS << "audio codec not found";
                return false;
            }

            //Open the audio decoder
            result = avcodec_open2(audioCodec, audioDecoder, NULL);
            if (result < 0) {
                qDebug() << TIMEMS << "open audio codec error";
                return false;
            }

            QString audioInfo = QString("Audio stream information -> index: %1  decode: %2  Bit rate: %3  channel num: %4  sampling: %5")
                                .arg(audioStreamIndex).arg(audioDecoder->name).arg(avFormatContext->bit_rate)
                                .arg(audioCodec->channels).arg(audioCodec->sample_rate);
            qDebug() << TIMEMS << audioInfo;
        }
    }
    //----------End of audio stream----------

    //Pre-allocated memory
    avPacket = av_packet_alloc();
    avFrame = av_frame_alloc();
    avFrame2 = av_frame_alloc();
    avFrame3 = av_frame_alloc();

    //Compare the width and height of the last file. When changing, you need to reallocate the memory
    if (oldWidth != videoWidth || oldHeight != videoHeight) {
        int byte = avpicture_get_size(AV_PIX_FMT_RGB32, videoWidth, videoHeight);
        buffer = (uint8_t *)av_malloc(byte * sizeof(uint8_t));
        oldWidth = videoWidth;
        oldHeight = videoHeight;
    }

    //Define pixel format
    AVPixelFormat srcFormat = AV_PIX_FMT_YUV420P;
    AVPixelFormat dstFormat = AV_PIX_FMT_RGB32;
    //Get the decoded format through the decoder
    srcFormat = videoCodec->pix_fmt;

    //The SWS_FAST_BILINEAR parameter used by the default fastest decoding may lose part of the picture data, and you can change it to other parameters by yourself
    int flags = SWS_FAST_BILINEAR;

    //Open up a cache to store one frame of data
    //The following two methods are ok, avpicture_fill has been gradually abandoned
    //avpicture_fill((AVPicture *)avFrame3, buffer, dstFormat, videoWidth, videoHeight);
    av_image_fill_arrays(avFrame3->data, avFrame3->linesize, buffer, dstFormat, videoWidth, videoHeight, 1);

    //Image conversion
    swsContext = sws_getContext(videoWidth, videoHeight, srcFormat, videoWidth, videoHeight, dstFormat, flags, NULL, NULL, NULL);

    //Output video information
    //av_dump_format(avFormatContext, 0, url.toStdString().data(), 0);

    //qDebug() << TIMEMS << "init ffmpeg finsh";
    return true;
}

void FFmpegThread::run()
{
    while (!stopped) {
        //Perform initialization based on the flag bit
        if (isPlay) {
            this->init();
            isPlay = false;
            continue;
        }

        if (av_read_frame(avFormatContext, avPacket) >= 0) {
            //Determine whether the current package is video or audio
            int index = avPacket->stream_index;
            if (index == videoStreamIndex) {
                //Decode video stream avcodec_decode_video2 method has been deprecated
#if 0
                avcodec_decode_video2(videoCodec, avFrame2, &frameFinish, avPacket);
#else
                frameFinish = avcodec_send_packet(videoCodec, avPacket);
                if (frameFinish < 0) {
                    continue;
                }

                frameFinish = avcodec_receive_frame(videoCodec, avFrame2);
                if (frameFinish < 0) {
                    continue;
                }
#endif

                if (frameFinish >= 0) {
                    //Turn the data into a picture
                    sws_scale(swsContext, (const uint8_t *const *)avFrame2->data, avFrame2->linesize, 0, videoHeight, avFrame3->data, avFrame3->linesize);

                    //The following two methods can be used
                    //QImage image(avFrame3->data[0], videoWidth, videoHeight, QImage::Format_RGB32);
                    QImage image((uchar *)buffer, videoWidth, videoHeight, QImage::Format_RGB32);
                    if (!image.isNull()) {
                        emit receiveImage(image);
                    }

                    msleep(1);
                }
            } else if (index == audioStreamIndex) {
                //Decode the audio stream, it will not be processed here, and will be handed over to sdl to play
            }
        }

        av_packet_unref(avPacket);
        av_freep(avPacket);
        msleep(1);
    }

    //Release resources after the thread ends
    free();
    stopped = false;
    isPlay = false;
    qDebug() << TIMEMS << "stop ffmpeg thread";
}

void FFmpegThread::setUrl(const QString &url)
{
    this->url = url;
}

void FFmpegThread::free()
{
    if (swsContext != NULL) {
        sws_freeContext(swsContext);
        swsContext = NULL;
    }

    if (avPacket != NULL) {
        av_packet_unref(avPacket);
        avPacket = NULL;
    }

    if (avFrame != NULL) {
        av_frame_free(&avFrame);
        avFrame = NULL;
    }

    if (avFrame2 != NULL) {
        av_frame_free(&avFrame2);
        avFrame2 = NULL;
    }

    if (avFrame3 != NULL) {
        av_frame_free(&avFrame3);
        avFrame3 = NULL;
    }

    if (videoCodec != NULL) {
        avcodec_close(videoCodec);
        videoCodec = NULL;
    }

    if (audioCodec != NULL) {
        avcodec_close(audioCodec);
        audioCodec = NULL;
    }

    if (avFormatContext != NULL) {
        avformat_close_input(&avFormatContext);
        avFormatContext = NULL;
    }

    av_dict_free(&options);
    //qDebug() << TIMEMS << "close ffmpeg ok";
}

void FFmpegThread::play()
{
    //Let the thread perform initialization through the flag bit
    isPlay = true;
}

void FFmpegThread::pause()
{

}

void FFmpegThread::next()
{

}

void FFmpegThread::stop()
{
    //Stop the thread through the flag
    stopped = true;
}

//Real-time video display form class
FFmpegWidget::FFmpegWidget(QWidget *parent) : QWidget(parent)
{
    thread = new FFmpegThread(this);
    connect(thread, SIGNAL(receiveImage(QImage)), this, SLOT(updateImage(QImage)));
    image = QImage();
}

FFmpegWidget::~FFmpegWidget()
{
    close();
}

void FFmpegWidget::paintEvent(QPaintEvent *)
{
    if (image.isNull()) {
        return;
    }

    //qDebug() << TIMEMS << "paintEvent" << objectName();
    QPainter painter(this);
    painter.drawImage(this->rect(), image);
}

void FFmpegWidget::updateImage(const QImage &image)
{
    //this->image = image.copy();
    this->image = image;
    this->update();
}

void FFmpegWidget::setUrl(const QString &url)
{
    thread->setUrl(url);
}

void FFmpegWidget::open()
{
    //qDebug() << TIMEMS << "open video" << objectName();
    clear();

    thread->play();
    thread->start();
}

void FFmpegWidget::pause()
{
    thread->pause();
}

void FFmpegWidget::next()
{
    thread->next();
}

void FFmpegWidget::close()
{
    //qDebug() << TIMEMS << "close video" << objectName();
    if (thread->isRunning()) {
        thread->stop();
        thread->quit();
        thread->wait(500);
    }

    QTimer::singleShot(1, this, SLOT(clear()));
}

void FFmpegWidget::clear()
{
    image = QImage();
    update();
}



    


    Ⅲ. Renderings

    


    https://youtu.be/CLsGw2CJW-c

    


    Ⅳ. Open source code download URL

    


    1.download URL for dropbox :

    


    https://www.dropbox.com/sh/n58ucs57pscp25e/AABWBQlg4U3Oz2WF9YOJDrj1a?dl=0

    


    2.download URL for box :

    


    https://app.box.com/s/x48a7ttpk667afqqdk7t1fqok4fmvmyv

    


  • FFmpeg C++ API : Using HW acceleration (VAAPI) to transcode video coming from a webcam [closed]

    16 avril, par nicoh

    I'm actually trying to use HW acceleration with the FFmpeg C++ API in order to transcode the video coming from a webcam (which may vary from one config to another) into a given output format (i.e : converting the video stream coming from the webcam in MJPEG to H264 so that it can be written into a MP4 file).

    


    I already succeeded to achieve this by transferring the AVFrame output by the HW decoder from GPU to CPU, then transfer this to the HW encoder input (so from CPU to GPU).
This is not so optimized and on top of that, for the given above config (MJPEG => H264), I cannot provide the output of the decoder as an input for the encoder as the MJPEG HW decoder wants to output in RGBA pixel format, and the H264 encoder wants NV12. So I have to perform pixel format conversion on CPU side.

    


    That's why I would like to connect the output of the HW video decoder directly to the input of the HW encoder (inside the GPU).
To do this, I followed this example given by FFmpeg : https://github.com/FFmpeg/FFmpeg/blob/master/doc/examples/vaapi_transcode.c.

    


    This works fine when transcoding an AVI file with MJPEG inside to H264 but it fails when using a MJPEG stream coming from a webcam as input.
In this case, the encoder says :

    


    [h264_vaapi @ 0x5555555e5140] No usable encoding profile found.


    


    Below the code of the FFmpeg example I modified to connect on webcam instead of opening input file :

    


    /*&#xA; * Permission is hereby granted, free of charge, to any person obtaining a copy&#xA; * of this software and associated documentation files (the "Software"), to deal&#xA; * in the Software without restriction, including without limitation the rights&#xA; * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell&#xA; * copies of the Software, and to permit persons to whom the Software is&#xA; * furnished to do so, subject to the following conditions:&#xA; *&#xA; * The above copyright notice and this permission notice shall be included in&#xA; * all copies or substantial portions of the Software.&#xA; *&#xA; * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR&#xA; * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,&#xA; * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL&#xA; * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER&#xA; * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,&#xA; * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN&#xA; * THE SOFTWARE.&#xA; */&#xA;&#xA;/**&#xA; * @file Intel VAAPI-accelerated transcoding API usage example&#xA; * @example vaapi_transcode.c&#xA; *&#xA; * Perform VAAPI-accelerated transcoding.&#xA; * Usage: vaapi_transcode input_stream codec output_stream&#xA; * e.g: - vaapi_transcode input.mp4 h264_vaapi output_h264.mp4&#xA; *      - vaapi_transcode input.mp4 vp9_vaapi output_vp9.ivf&#xA; */&#xA;&#xA;#include &#xA;#include &#xA;#include <iostream>&#xA;&#xA;//#define USE_INPUT_FILE&#xA;&#xA;extern "C"{&#xA;#include <libavutil></libavutil>hwcontext.h>&#xA;#include <libavcodec></libavcodec>avcodec.h>&#xA;#include <libavformat></libavformat>avformat.h>&#xA;#include <libavdevice></libavdevice>avdevice.h>&#xA;}&#xA;&#xA;static AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;&#xA;static AVBufferRef *hw_device_ctx = NULL;&#xA;static AVCodecContext *decoder_ctx = NULL, *encoder_ctx = NULL;&#xA;static int video_stream = -1;&#xA;static AVStream *ost;&#xA;static int initialized = 0;&#xA;&#xA;static enum AVPixelFormat get_vaapi_format(AVCodecContext *ctx,&#xA;                                           const enum AVPixelFormat *pix_fmts)&#xA;{&#xA;    const enum AVPixelFormat *p;&#xA;&#xA;    for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p&#x2B;&#x2B;) {&#xA;        if (*p == AV_PIX_FMT_VAAPI)&#xA;            return *p;&#xA;    }&#xA;&#xA;    std::cout &lt;&lt; "Unable to decode this file using VA-API." &lt;&lt; std::endl;&#xA;    return AV_PIX_FMT_NONE;&#xA;}&#xA;&#xA;static int open_input_file(const char *filename)&#xA;{&#xA;    int ret;&#xA;    AVCodec *decoder = NULL;&#xA;    AVStream *video = NULL;&#xA;    AVDictionary    *pInputOptions = nullptr;&#xA;&#xA;#ifdef USE_INPUT_FILE&#xA;    if ((ret = avformat_open_input(&amp;ifmt_ctx, filename, NULL, NULL)) &lt; 0) {&#xA;        char errMsg[1024] = {0};&#xA;        std::cout &lt;&lt; "Cannot open input file &#x27;" &lt;&lt; filename &lt;&lt; "&#x27;, Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;        return ret;&#xA;    }&#xA;#else&#xA;    avdevice_register_all();&#xA;    av_dict_set(&amp;pInputOptions, "input_format", "mjpeg", 0);&#xA;    av_dict_set(&amp;pInputOptions, "framerate", "30", 0);&#xA;    av_dict_set(&amp;pInputOptions, "video_size", "640x480", 0);&#xA;&#xA;    if ((ret = avformat_open_input(&amp;ifmt_ctx, "/dev/video0", NULL, &amp;pInputOptions)) &lt; 0) {&#xA;        char errMsg[1024] = {0};&#xA;        std::cout &lt;&lt; "Cannot open input file &#x27;" &lt;&lt; filename &lt;&lt; "&#x27;, Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;        return ret;&#xA;    }&#xA;#endif&#xA;&#xA;    ifmt_ctx->flags |= AVFMT_FLAG_NONBLOCK;&#xA;&#xA;    if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) &lt; 0) {&#xA;        char errMsg[1024] = {0};&#xA;        std::cout &lt;&lt; "Cannot find input stream information. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;        return ret;&#xA;    }&#xA;&#xA;    ret = av_find_best_stream(ifmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &amp;decoder, 0);&#xA;    if (ret &lt; 0) {&#xA;        char errMsg[1024] = {0};&#xA;        std::cout &lt;&lt; "Cannot find a video stream in the input file. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;        return ret;&#xA;    }&#xA;    video_stream = ret;&#xA;&#xA;    if (!(decoder_ctx = avcodec_alloc_context3(decoder)))&#xA;        return AVERROR(ENOMEM);&#xA;&#xA;    video = ifmt_ctx->streams[video_stream];&#xA;    if ((ret = avcodec_parameters_to_context(decoder_ctx, video->codecpar)) &lt; 0) {&#xA;        char errMsg[1024] = {0};&#xA;        std::cout &lt;&lt; "avcodec_parameters_to_context error. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;        return ret;&#xA;    }&#xA;&#xA;    decoder_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);&#xA;    if (!decoder_ctx->hw_device_ctx) {&#xA;        std::cout &lt;&lt; "A hardware device reference create failed." &lt;&lt; std::endl;&#xA;        return AVERROR(ENOMEM);&#xA;    }&#xA;    decoder_ctx->get_format    = get_vaapi_format;&#xA;&#xA;    if ((ret = avcodec_open2(decoder_ctx, decoder, NULL)) &lt; 0)&#xA;    {&#xA;        char errMsg[1024] = {0};&#xA;        std::cout &lt;&lt; "Failed to open codec for decoding. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;    }&#xA;&#xA;    return ret;&#xA;}&#xA;&#xA;static int encode_write(AVPacket *enc_pkt, AVFrame *frame)&#xA;{&#xA;    int ret = 0;&#xA;&#xA;    av_packet_unref(enc_pkt);&#xA;&#xA;    AVHWDeviceContext *pHwDevCtx = reinterpret_cast(encoder_ctx->hw_device_ctx);&#xA;    AVHWFramesContext *pHwFrameCtx = reinterpret_cast(encoder_ctx->hw_frames_ctx);&#xA;&#xA;    if ((ret = avcodec_send_frame(encoder_ctx, frame)) &lt; 0) {&#xA;        char errMsg[1024] = {0};&#xA;        std::cout &lt;&lt; "Error during encoding. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;        goto end;&#xA;    }&#xA;    while (1) {&#xA;        ret = avcodec_receive_packet(encoder_ctx, enc_pkt);&#xA;        if (ret)&#xA;            break;&#xA;&#xA;        enc_pkt->stream_index = 0;&#xA;        av_packet_rescale_ts(enc_pkt, ifmt_ctx->streams[video_stream]->time_base,&#xA;                             ofmt_ctx->streams[0]->time_base);&#xA;        ret = av_interleaved_write_frame(ofmt_ctx, enc_pkt);&#xA;        if (ret &lt; 0) {&#xA;            char errMsg[1024] = {0};&#xA;            std::cout &lt;&lt; "Error during writing data to output file. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;            return -1;&#xA;        }&#xA;    }&#xA;&#xA;end:&#xA;    if (ret == AVERROR_EOF)&#xA;        return 0;&#xA;    ret = ((ret == AVERROR(EAGAIN)) ? 0:-1);&#xA;    return ret;&#xA;}&#xA;&#xA;static int dec_enc(AVPacket *pkt, const AVCodec *enc_codec, AVCodecContext *pDecCtx)&#xA;{&#xA;    AVFrame *frame;&#xA;    int ret = 0;&#xA;&#xA;    ret = avcodec_send_packet(decoder_ctx, pkt);&#xA;    if (ret &lt; 0) {&#xA;        char errMsg[1024] = {0};&#xA;        std::cout &lt;&lt; "Error during decoding. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;        return ret;&#xA;    }&#xA;&#xA;    while (ret >= 0) {&#xA;        if (!(frame = av_frame_alloc()))&#xA;            return AVERROR(ENOMEM);&#xA;&#xA;        ret = avcodec_receive_frame(decoder_ctx, frame);&#xA;        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {&#xA;            av_frame_free(&amp;frame);&#xA;            return 0;&#xA;        } else if (ret &lt; 0) {&#xA;            char errMsg[1024] = {0};&#xA;            std::cout &lt;&lt; "Error while decoding. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;            goto fail;&#xA;        }&#xA;&#xA;        if (!initialized) {&#xA;            AVHWFramesContext *pHwFrameCtx = reinterpret_cast(decoder_ctx->hw_frames_ctx);&#xA;            &#xA;            /* we need to ref hw_frames_ctx of decoder to initialize encoder&#x27;s codec.&#xA;               Only after we get a decoded frame, can we obtain its hw_frames_ctx */&#xA;            encoder_ctx->hw_frames_ctx = av_buffer_ref(pDecCtx->hw_frames_ctx);&#xA;            if (!encoder_ctx->hw_frames_ctx) {&#xA;                ret = AVERROR(ENOMEM);&#xA;                goto fail;&#xA;            }&#xA;            /* set AVCodecContext Parameters for encoder, here we keep them stay&#xA;             * the same as decoder.&#xA;             * xxx: now the sample can&#x27;t handle resolution change case.&#xA;             */&#xA;            if(encoder_ctx->time_base.den == 1 &amp;&amp; encoder_ctx->time_base.num == 0)&#xA;            {&#xA;                encoder_ctx->time_base = av_inv_q(ifmt_ctx->streams[video_stream]->avg_frame_rate);&#xA;            }&#xA;            else&#xA;            {&#xA;                encoder_ctx->time_base = av_inv_q(decoder_ctx->framerate);&#xA;            }&#xA;            encoder_ctx->pix_fmt   = AV_PIX_FMT_VAAPI;&#xA;            encoder_ctx->width     = decoder_ctx->width;&#xA;            encoder_ctx->height    = decoder_ctx->height;&#xA;&#xA;            if ((ret = avcodec_open2(encoder_ctx, enc_codec, NULL)) &lt; 0) {&#xA;                char errMsg[1024] = {0};&#xA;                std::cout &lt;&lt; "Failed to open encode codec. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;                goto fail;&#xA;            }&#xA;&#xA;            if (!(ost = avformat_new_stream(ofmt_ctx, enc_codec))) {&#xA;                std::cout &lt;&lt; "Failed to allocate stream for output format." &lt;&lt; std::endl;&#xA;                ret = AVERROR(ENOMEM);&#xA;                goto fail;&#xA;            }&#xA;&#xA;            ost->time_base = encoder_ctx->time_base;&#xA;            ret = avcodec_parameters_from_context(ost->codecpar, encoder_ctx);&#xA;            if (ret &lt; 0) {&#xA;                char errMsg[1024] = {0};&#xA;                std::cout &lt;&lt; "Failed to copy the stream parameters. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;                goto fail;&#xA;            }&#xA;&#xA;            /* write the stream header */&#xA;            if ((ret = avformat_write_header(ofmt_ctx, NULL)) &lt; 0) {&#xA;                char errMsg[1024] = {0};&#xA;                std::cout &lt;&lt; "Error while writing stream header. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;                goto fail;&#xA;            }&#xA;&#xA;            initialized = 1;&#xA;        }&#xA;&#xA;        if ((ret = encode_write(pkt, frame)) &lt; 0)&#xA;            std::cout &lt;&lt; "Error during encoding and writing." &lt;&lt; std::endl;&#xA;&#xA;fail:&#xA;        av_frame_free(&amp;frame);&#xA;        if (ret &lt; 0)&#xA;            return ret;&#xA;    }&#xA;    return 0;&#xA;}&#xA;&#xA;int main(int argc, char **argv)&#xA;{&#xA;    const AVCodec *enc_codec;&#xA;    int ret = 0;&#xA;    AVPacket *dec_pkt;&#xA;&#xA;    if (argc != 4) {&#xA;        fprintf(stderr, "Usage: %s <input file="file" /> <encode codec="codec"> <output file="file">\n"&#xA;                "The output format is guessed according to the file extension.\n"&#xA;                "\n", argv[0]);&#xA;        return -1;&#xA;    }&#xA;&#xA;    ret = av_hwdevice_ctx_create(&amp;hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, NULL, NULL, 0);&#xA;    if (ret &lt; 0) {&#xA;        char errMsg[1024] = {0};&#xA;        std::cout &lt;&lt; "Failed to create a VAAPI device. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;        return -1;&#xA;    }&#xA;&#xA;    dec_pkt = av_packet_alloc();&#xA;    if (!dec_pkt) {&#xA;        std::cout &lt;&lt; "Failed to allocate decode packet" &lt;&lt; std::endl;&#xA;        goto end;&#xA;    }&#xA;&#xA;    if ((ret = open_input_file(argv[1])) &lt; 0)&#xA;        goto end;&#xA;&#xA;    if (!(enc_codec = avcodec_find_encoder_by_name(argv[2]))) {&#xA;        std::cout &lt;&lt; "Could not find encoder &#x27;" &lt;&lt; argv[2] &lt;&lt; "&#x27;" &lt;&lt; std::endl;&#xA;        ret = -1;&#xA;        goto end;&#xA;    }&#xA;&#xA;    if ((ret = (avformat_alloc_output_context2(&amp;ofmt_ctx, NULL, NULL, argv[3]))) &lt; 0) {&#xA;        char errMsg[1024] = {0};&#xA;        std::cout &lt;&lt; "Failed to deduce output format from file extension. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;        goto end;&#xA;    }&#xA;&#xA;    if (!(encoder_ctx = avcodec_alloc_context3(enc_codec))) {&#xA;        ret = AVERROR(ENOMEM);&#xA;        goto end;&#xA;    }&#xA;&#xA;    ret = avio_open(&amp;ofmt_ctx->pb, argv[3], AVIO_FLAG_WRITE);&#xA;    if (ret &lt; 0) {&#xA;        char errMsg[1024] = {0};&#xA;        std::cout &lt;&lt; "Cannot open output file. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;        goto end;&#xA;    }&#xA;&#xA;    /* read all packets and only transcoding video */&#xA;    while (ret >= 0) {&#xA;        if ((ret = av_read_frame(ifmt_ctx, dec_pkt)) &lt; 0)&#xA;            break;&#xA;&#xA;        if (video_stream == dec_pkt->stream_index)&#xA;            ret = dec_enc(dec_pkt, enc_codec, decoder_ctx);&#xA;&#xA;        av_packet_unref(dec_pkt);&#xA;    }&#xA;&#xA;    /* flush decoder */&#xA;    av_packet_unref(dec_pkt);&#xA;    ret = dec_enc(dec_pkt, enc_codec, decoder_ctx);&#xA;&#xA;    /* flush encoder */&#xA;    ret = encode_write(dec_pkt, NULL);&#xA;&#xA;    /* write the trailer for output stream */&#xA;    av_write_trailer(ofmt_ctx);&#xA;&#xA;end:&#xA;    avformat_close_input(&amp;ifmt_ctx);&#xA;    avformat_close_input(&amp;ofmt_ctx);&#xA;    avcodec_free_context(&amp;decoder_ctx);&#xA;    avcodec_free_context(&amp;encoder_ctx);&#xA;    av_buffer_unref(&amp;hw_device_ctx);&#xA;    av_packet_free(&amp;dec_pkt);&#xA;    return ret;&#xA;}&#xA;</output></encode></iostream>

    &#xA;

    And the content of the associated CMakeLists.txt file to build it using gcc :

    &#xA;

    cmake_minimum_required(VERSION 3.5)&#xA;&#xA;include(FetchContent)&#xA;&#xA;set(CMAKE_CXX_STANDARD 17)&#xA;set(CMAKE_CXX_STANDARD_REQUIRED ON)&#xA;&#xA;set(CMAKE_VERBOSE_MAKEFILE ON)&#xA;&#xA;SET (FFMPEG_HW_TRANSCODE_INCS&#xA;    ${CMAKE_CURRENT_LIST_DIR})&#xA;&#xA;include_directories(&#xA;    ${CMAKE_INCLUDE_PATH}&#xA;    ${CMAKE_CURRENT_LIST_DIR}&#xA;)&#xA;&#xA;project(FFmpeg_HW_transcode LANGUAGES CXX)&#xA;&#xA;set(CMAKE_CXX_FLAGS "-Wall -Werror=return-type -pedantic -fPIC -gdwarf-4")&#xA;set(CMAKE_CPP_FLAGS "-Wall -Werror=return-type -pedantic -fPIC -gdwarf-4")&#xA;&#xA;set(EXECUTABLE_OUTPUT_PATH "${CMAKE_CURRENT_LIST_DIR}/build/${CMAKE_BUILD_TYPE}/FFmpeg_HW_transcode")&#xA;set(LIBRARY_OUTPUT_PATH "${CMAKE_CURRENT_LIST_DIR}/build/${CMAKE_BUILD_TYPE}/FFmpeg_HW_transcode")&#xA;&#xA;add_executable(${PROJECT_NAME})&#xA;&#xA;target_sources(${PROJECT_NAME} PRIVATE&#xA;                vaapi_transcode.cpp)&#xA;&#xA;target_link_libraries(${PROJECT_NAME}&#xA;                -L${CMAKE_CURRENT_LIST_DIR}/../build/${CMAKE_BUILD_TYPE}/FFmpeg_HW_transcode&#xA;                -lavdevice&#xA;                -lavformat&#xA;                -lavutil&#xA;                -lavcodec)&#xA;

    &#xA;

    Has anyone tried to do this kind of stuff ?

    &#xA;

    Thanks for your help.

    &#xA;

  • FFmpeg C++ API : Using HW acceleration (VAAPI) to transcode video coming from a webcam

    17 avril, par nicoh

    I'm actually trying to use HW acceleration with the FFmpeg C++ API in order to transcode the video coming from a webcam (which may vary from one config to another) into a given output format (i.e : converting the video stream coming from the webcam in MJPEG to H264 so that it can be written into a MP4 file).

    &#xA;

    I already succeeded to achieve this by transferring the AVFrame output by the HW decoder from GPU to CPU, then transfer this to the HW encoder input (so from CPU to GPU).&#xA;This is not so optimized and on top of that, for the given above config (MJPEG => H264), I cannot provide the output of the decoder as an input for the encoder as the MJPEG HW decoder wants to output in RGBA pixel format, and the H264 encoder wants NV12. So I have to perform pixel format conversion on CPU side.

    &#xA;

    That's why I would like to connect the output of the HW video decoder directly to the input of the HW encoder (inside the GPU).&#xA;To do this, I followed this example given by FFmpeg : https://github.com/FFmpeg/FFmpeg/blob/master/doc/examples/vaapi_transcode.c.

    &#xA;

    This works fine when transcoding an AVI file with MJPEG inside to H264 but it fails when using a MJPEG stream coming from a webcam as input.&#xA;In this case, the encoder says :

    &#xA;

    [h264_vaapi @ 0x5555555e5140] No usable encoding profile found.&#xA;

    &#xA;

    Below the code of the FFmpeg example I modified to connect on webcam instead of opening input file :

    &#xA;

    /*&#xA; * Permission is hereby granted, free of charge, to any person obtaining a copy&#xA; * of this software and associated documentation files (the "Software"), to deal&#xA; * in the Software without restriction, including without limitation the rights&#xA; * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell&#xA; * copies of the Software, and to permit persons to whom the Software is&#xA; * furnished to do so, subject to the following conditions:&#xA; *&#xA; * The above copyright notice and this permission notice shall be included in&#xA; * all copies or substantial portions of the Software.&#xA; *&#xA; * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR&#xA; * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,&#xA; * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL&#xA; * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER&#xA; * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,&#xA; * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN&#xA; * THE SOFTWARE.&#xA; */&#xA;&#xA;/**&#xA; * @file Intel VAAPI-accelerated transcoding API usage example&#xA; * @example vaapi_transcode.c&#xA; *&#xA; * Perform VAAPI-accelerated transcoding.&#xA; * Usage: vaapi_transcode input_stream codec output_stream&#xA; * e.g: - vaapi_transcode input.mp4 h264_vaapi output_h264.mp4&#xA; *      - vaapi_transcode input.mp4 vp9_vaapi output_vp9.ivf&#xA; */&#xA;&#xA;#include &#xA;#include &#xA;#include <iostream>&#xA;&#xA;//#define USE_INPUT_FILE&#xA;&#xA;extern "C"{&#xA;#include <libavutil></libavutil>hwcontext.h>&#xA;#include <libavcodec></libavcodec>avcodec.h>&#xA;#include <libavformat></libavformat>avformat.h>&#xA;#include <libavdevice></libavdevice>avdevice.h>&#xA;}&#xA;&#xA;static AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;&#xA;static AVBufferRef *hw_device_ctx = NULL;&#xA;static AVCodecContext *decoder_ctx = NULL, *encoder_ctx = NULL;&#xA;static int video_stream = -1;&#xA;static AVStream *ost;&#xA;static int initialized = 0;&#xA;&#xA;static enum AVPixelFormat get_vaapi_format(AVCodecContext *ctx,&#xA;                                           const enum AVPixelFormat *pix_fmts)&#xA;{&#xA;    const enum AVPixelFormat *p;&#xA;&#xA;    for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p&#x2B;&#x2B;) {&#xA;        if (*p == AV_PIX_FMT_VAAPI)&#xA;            return *p;&#xA;    }&#xA;&#xA;    std::cout &lt;&lt; "Unable to decode this file using VA-API." &lt;&lt; std::endl;&#xA;    return AV_PIX_FMT_NONE;&#xA;}&#xA;&#xA;static int open_input_file(const char *filename)&#xA;{&#xA;    int ret;&#xA;    AVCodec *decoder = NULL;&#xA;    AVStream *video = NULL;&#xA;    AVDictionary    *pInputOptions = nullptr;&#xA;&#xA;#ifdef USE_INPUT_FILE&#xA;    if ((ret = avformat_open_input(&amp;ifmt_ctx, filename, NULL, NULL)) &lt; 0) {&#xA;        char errMsg[1024] = {0};&#xA;        std::cout &lt;&lt; "Cannot open input file &#x27;" &lt;&lt; filename &lt;&lt; "&#x27;, Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;        return ret;&#xA;    }&#xA;#else&#xA;    avdevice_register_all();&#xA;    av_dict_set(&amp;pInputOptions, "input_format", "mjpeg", 0);&#xA;    av_dict_set(&amp;pInputOptions, "framerate", "30", 0);&#xA;    av_dict_set(&amp;pInputOptions, "video_size", "640x480", 0);&#xA;&#xA;    if ((ret = avformat_open_input(&amp;ifmt_ctx, "/dev/video0", NULL, &amp;pInputOptions)) &lt; 0) {&#xA;        char errMsg[1024] = {0};&#xA;        std::cout &lt;&lt; "Cannot open input file &#x27;" &lt;&lt; filename &lt;&lt; "&#x27;, Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;        return ret;&#xA;    }&#xA;#endif&#xA;&#xA;    ifmt_ctx->flags |= AVFMT_FLAG_NONBLOCK;&#xA;&#xA;    if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) &lt; 0) {&#xA;        char errMsg[1024] = {0};&#xA;        std::cout &lt;&lt; "Cannot find input stream information. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;        return ret;&#xA;    }&#xA;&#xA;    ret = av_find_best_stream(ifmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &amp;decoder, 0);&#xA;    if (ret &lt; 0) {&#xA;        char errMsg[1024] = {0};&#xA;        std::cout &lt;&lt; "Cannot find a video stream in the input file. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;        return ret;&#xA;    }&#xA;    video_stream = ret;&#xA;&#xA;    if (!(decoder_ctx = avcodec_alloc_context3(decoder)))&#xA;        return AVERROR(ENOMEM);&#xA;&#xA;    video = ifmt_ctx->streams[video_stream];&#xA;    if ((ret = avcodec_parameters_to_context(decoder_ctx, video->codecpar)) &lt; 0) {&#xA;        char errMsg[1024] = {0};&#xA;        std::cout &lt;&lt; "avcodec_parameters_to_context error. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;        return ret;&#xA;    }&#xA;&#xA;    decoder_ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);&#xA;    if (!decoder_ctx->hw_device_ctx) {&#xA;        std::cout &lt;&lt; "A hardware device reference create failed." &lt;&lt; std::endl;&#xA;        return AVERROR(ENOMEM);&#xA;    }&#xA;    decoder_ctx->get_format    = get_vaapi_format;&#xA;&#xA;    if ((ret = avcodec_open2(decoder_ctx, decoder, NULL)) &lt; 0)&#xA;    {&#xA;        char errMsg[1024] = {0};&#xA;        std::cout &lt;&lt; "Failed to open codec for decoding. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;    }&#xA;&#xA;    return ret;&#xA;}&#xA;&#xA;static int encode_write(AVPacket *enc_pkt, AVFrame *frame)&#xA;{&#xA;    int ret = 0;&#xA;&#xA;    av_packet_unref(enc_pkt);&#xA;&#xA;    AVHWDeviceContext *pHwDevCtx = reinterpret_cast(encoder_ctx->hw_device_ctx);&#xA;    AVHWFramesContext *pHwFrameCtx = reinterpret_cast(encoder_ctx->hw_frames_ctx);&#xA;&#xA;    if ((ret = avcodec_send_frame(encoder_ctx, frame)) &lt; 0) {&#xA;        char errMsg[1024] = {0};&#xA;        std::cout &lt;&lt; "Error during encoding. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;        goto end;&#xA;    }&#xA;    while (1) {&#xA;        ret = avcodec_receive_packet(encoder_ctx, enc_pkt);&#xA;        if (ret)&#xA;            break;&#xA;&#xA;        enc_pkt->stream_index = 0;&#xA;        av_packet_rescale_ts(enc_pkt, ifmt_ctx->streams[video_stream]->time_base,&#xA;                             ofmt_ctx->streams[0]->time_base);&#xA;        ret = av_interleaved_write_frame(ofmt_ctx, enc_pkt);&#xA;        if (ret &lt; 0) {&#xA;            char errMsg[1024] = {0};&#xA;            std::cout &lt;&lt; "Error during writing data to output file. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;            return -1;&#xA;        }&#xA;    }&#xA;&#xA;end:&#xA;    if (ret == AVERROR_EOF)&#xA;        return 0;&#xA;    ret = ((ret == AVERROR(EAGAIN)) ? 0:-1);&#xA;    return ret;&#xA;}&#xA;&#xA;static int dec_enc(AVPacket *pkt, const AVCodec *enc_codec, AVCodecContext *pDecCtx)&#xA;{&#xA;    AVFrame *frame;&#xA;    int ret = 0;&#xA;&#xA;    ret = avcodec_send_packet(decoder_ctx, pkt);&#xA;    if (ret &lt; 0) {&#xA;        char errMsg[1024] = {0};&#xA;        std::cout &lt;&lt; "Error during decoding. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;        return ret;&#xA;    }&#xA;&#xA;    while (ret >= 0) {&#xA;        if (!(frame = av_frame_alloc()))&#xA;            return AVERROR(ENOMEM);&#xA;&#xA;        ret = avcodec_receive_frame(decoder_ctx, frame);&#xA;        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {&#xA;            av_frame_free(&amp;frame);&#xA;            return 0;&#xA;        } else if (ret &lt; 0) {&#xA;            char errMsg[1024] = {0};&#xA;            std::cout &lt;&lt; "Error while decoding. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;            goto fail;&#xA;        }&#xA;&#xA;        if (!initialized) {&#xA;            AVHWFramesContext *pHwFrameCtx = reinterpret_cast(decoder_ctx->hw_frames_ctx);&#xA;            &#xA;            /* we need to ref hw_frames_ctx of decoder to initialize encoder&#x27;s codec.&#xA;               Only after we get a decoded frame, can we obtain its hw_frames_ctx */&#xA;            encoder_ctx->hw_frames_ctx = av_buffer_ref(pDecCtx->hw_frames_ctx);&#xA;            if (!encoder_ctx->hw_frames_ctx) {&#xA;                ret = AVERROR(ENOMEM);&#xA;                goto fail;&#xA;            }&#xA;            /* set AVCodecContext Parameters for encoder, here we keep them stay&#xA;             * the same as decoder.&#xA;             * xxx: now the sample can&#x27;t handle resolution change case.&#xA;             */&#xA;            if(encoder_ctx->time_base.den == 1 &amp;&amp; encoder_ctx->time_base.num == 0)&#xA;            {&#xA;                encoder_ctx->time_base = av_inv_q(ifmt_ctx->streams[video_stream]->avg_frame_rate);&#xA;            }&#xA;            else&#xA;            {&#xA;                encoder_ctx->time_base = av_inv_q(decoder_ctx->framerate);&#xA;            }&#xA;            encoder_ctx->pix_fmt   = AV_PIX_FMT_VAAPI;&#xA;            encoder_ctx->width     = decoder_ctx->width;&#xA;            encoder_ctx->height    = decoder_ctx->height;&#xA;&#xA;            if ((ret = avcodec_open2(encoder_ctx, enc_codec, NULL)) &lt; 0) {&#xA;                char errMsg[1024] = {0};&#xA;                std::cout &lt;&lt; "Failed to open encode codec. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;                goto fail;&#xA;            }&#xA;&#xA;            if (!(ost = avformat_new_stream(ofmt_ctx, enc_codec))) {&#xA;                std::cout &lt;&lt; "Failed to allocate stream for output format." &lt;&lt; std::endl;&#xA;                ret = AVERROR(ENOMEM);&#xA;                goto fail;&#xA;            }&#xA;&#xA;            ost->time_base = encoder_ctx->time_base;&#xA;            ret = avcodec_parameters_from_context(ost->codecpar, encoder_ctx);&#xA;            if (ret &lt; 0) {&#xA;                char errMsg[1024] = {0};&#xA;                std::cout &lt;&lt; "Failed to copy the stream parameters. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;                goto fail;&#xA;            }&#xA;&#xA;            /* write the stream header */&#xA;            if ((ret = avformat_write_header(ofmt_ctx, NULL)) &lt; 0) {&#xA;                char errMsg[1024] = {0};&#xA;                std::cout &lt;&lt; "Error while writing stream header. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;                goto fail;&#xA;            }&#xA;&#xA;            initialized = 1;&#xA;        }&#xA;&#xA;        if ((ret = encode_write(pkt, frame)) &lt; 0)&#xA;            std::cout &lt;&lt; "Error during encoding and writing." &lt;&lt; std::endl;&#xA;&#xA;fail:&#xA;        av_frame_free(&amp;frame);&#xA;        if (ret &lt; 0)&#xA;            return ret;&#xA;    }&#xA;    return 0;&#xA;}&#xA;&#xA;int main(int argc, char **argv)&#xA;{&#xA;    const AVCodec *enc_codec;&#xA;    int ret = 0;&#xA;    AVPacket *dec_pkt;&#xA;&#xA;    if (argc != 4) {&#xA;        fprintf(stderr, "Usage: %s <input file="file" /> <encode codec="codec"> <output file="file">\n"&#xA;                "The output format is guessed according to the file extension.\n"&#xA;                "\n", argv[0]);&#xA;        return -1;&#xA;    }&#xA;&#xA;    ret = av_hwdevice_ctx_create(&amp;hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI, NULL, NULL, 0);&#xA;    if (ret &lt; 0) {&#xA;        char errMsg[1024] = {0};&#xA;        std::cout &lt;&lt; "Failed to create a VAAPI device. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;        return -1;&#xA;    }&#xA;&#xA;    dec_pkt = av_packet_alloc();&#xA;    if (!dec_pkt) {&#xA;        std::cout &lt;&lt; "Failed to allocate decode packet" &lt;&lt; std::endl;&#xA;        goto end;&#xA;    }&#xA;&#xA;    if ((ret = open_input_file(argv[1])) &lt; 0)&#xA;        goto end;&#xA;&#xA;    if (!(enc_codec = avcodec_find_encoder_by_name(argv[2]))) {&#xA;        std::cout &lt;&lt; "Could not find encoder &#x27;" &lt;&lt; argv[2] &lt;&lt; "&#x27;" &lt;&lt; std::endl;&#xA;        ret = -1;&#xA;        goto end;&#xA;    }&#xA;&#xA;    if ((ret = (avformat_alloc_output_context2(&amp;ofmt_ctx, NULL, NULL, argv[3]))) &lt; 0) {&#xA;        char errMsg[1024] = {0};&#xA;        std::cout &lt;&lt; "Failed to deduce output format from file extension. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;        goto end;&#xA;    }&#xA;&#xA;    if (!(encoder_ctx = avcodec_alloc_context3(enc_codec))) {&#xA;        ret = AVERROR(ENOMEM);&#xA;        goto end;&#xA;    }&#xA;&#xA;    ret = avio_open(&amp;ofmt_ctx->pb, argv[3], AVIO_FLAG_WRITE);&#xA;    if (ret &lt; 0) {&#xA;        char errMsg[1024] = {0};&#xA;        std::cout &lt;&lt; "Cannot open output file. Error code: " &lt;&lt; av_make_error_string(errMsg, 1024, ret) &lt;&lt; std::endl;&#xA;        goto end;&#xA;    }&#xA;&#xA;    /* read all packets and only transcoding video */&#xA;    while (ret >= 0) {&#xA;        if ((ret = av_read_frame(ifmt_ctx, dec_pkt)) &lt; 0)&#xA;            break;&#xA;&#xA;        if (video_stream == dec_pkt->stream_index)&#xA;            ret = dec_enc(dec_pkt, enc_codec, decoder_ctx);&#xA;&#xA;        av_packet_unref(dec_pkt);&#xA;    }&#xA;&#xA;    /* flush decoder */&#xA;    av_packet_unref(dec_pkt);&#xA;    ret = dec_enc(dec_pkt, enc_codec, decoder_ctx);&#xA;&#xA;    /* flush encoder */&#xA;    ret = encode_write(dec_pkt, NULL);&#xA;&#xA;    /* write the trailer for output stream */&#xA;    av_write_trailer(ofmt_ctx);&#xA;&#xA;end:&#xA;    avformat_close_input(&amp;ifmt_ctx);&#xA;    avformat_close_input(&amp;ofmt_ctx);&#xA;    avcodec_free_context(&amp;decoder_ctx);&#xA;    avcodec_free_context(&amp;encoder_ctx);&#xA;    av_buffer_unref(&amp;hw_device_ctx);&#xA;    av_packet_free(&amp;dec_pkt);&#xA;    return ret;&#xA;}&#xA;</output></encode></iostream>

    &#xA;

    And the content of the associated CMakeLists.txt file to build it using gcc :

    &#xA;

    cmake_minimum_required(VERSION 3.5)&#xA;&#xA;include(FetchContent)&#xA;&#xA;set(CMAKE_CXX_STANDARD 17)&#xA;set(CMAKE_CXX_STANDARD_REQUIRED ON)&#xA;&#xA;set(CMAKE_VERBOSE_MAKEFILE ON)&#xA;&#xA;SET (FFMPEG_HW_TRANSCODE_INCS&#xA;    ${CMAKE_CURRENT_LIST_DIR})&#xA;&#xA;include_directories(&#xA;    ${CMAKE_INCLUDE_PATH}&#xA;    ${CMAKE_CURRENT_LIST_DIR}&#xA;)&#xA;&#xA;project(FFmpeg_HW_transcode LANGUAGES CXX)&#xA;&#xA;set(CMAKE_CXX_FLAGS "-Wall -Werror=return-type -pedantic -fPIC -gdwarf-4")&#xA;set(CMAKE_CPP_FLAGS "-Wall -Werror=return-type -pedantic -fPIC -gdwarf-4")&#xA;&#xA;set(EXECUTABLE_OUTPUT_PATH "${CMAKE_CURRENT_LIST_DIR}/build/${CMAKE_BUILD_TYPE}/FFmpeg_HW_transcode")&#xA;set(LIBRARY_OUTPUT_PATH "${CMAKE_CURRENT_LIST_DIR}/build/${CMAKE_BUILD_TYPE}/FFmpeg_HW_transcode")&#xA;&#xA;add_executable(${PROJECT_NAME})&#xA;&#xA;target_sources(${PROJECT_NAME} PRIVATE&#xA;                vaapi_transcode.cpp)&#xA;&#xA;target_link_libraries(${PROJECT_NAME}&#xA;                -L${CMAKE_CURRENT_LIST_DIR}/../build/${CMAKE_BUILD_TYPE}/FFmpeg_HW_transcode&#xA;                -lavdevice&#xA;                -lavformat&#xA;                -lavutil&#xA;                -lavcodec)&#xA;

    &#xA;

    Has anyone tried to do this kind of stuff ?

    &#xA;

    Thanks for your help.

    &#xA;