Recherche avancée

Médias (1)

Mot : - Tags -/portrait

Autres articles (12)

  • Ajouter notes et légendes aux images

    7 février 2011, par

    Pour pouvoir ajouter notes et légendes aux images, la première étape est d’installer le plugin "Légendes".
    Une fois le plugin activé, vous pouvez le configurer dans l’espace de configuration afin de modifier les droits de création / modification et de suppression des notes. Par défaut seuls les administrateurs du site peuvent ajouter des notes aux images.
    Modification lors de l’ajout d’un média
    Lors de l’ajout d’un média de type "image" un nouveau bouton apparait au dessus de la prévisualisation (...)

  • Les formats acceptés

    28 janvier 2010, par

    Les commandes suivantes permettent d’avoir des informations sur les formats et codecs gérés par l’installation local de ffmpeg :
    ffmpeg -codecs ffmpeg -formats
    Les format videos acceptés en entrée
    Cette liste est non exhaustive, elle met en exergue les principaux formats utilisés : h264 : H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 m4v : raw MPEG-4 video format flv : Flash Video (FLV) / Sorenson Spark / Sorenson H.263 Theora wmv :
    Les formats vidéos de sortie possibles
    Dans un premier temps on (...)

  • Les vidéos

    21 avril 2011, par

    Comme les documents de type "audio", Mediaspip affiche dans la mesure du possible les vidéos grâce à la balise html5 .
    Un des inconvénients de cette balise est qu’elle n’est pas reconnue correctement par certains navigateurs (Internet Explorer pour ne pas le nommer) et que chaque navigateur ne gère en natif que certains formats de vidéos.
    Son avantage principal quant à lui est de bénéficier de la prise en charge native de vidéos dans les navigateur et donc de se passer de l’utilisation de Flash et (...)

Sur d’autres sites (4083)

  • Video creation with the most recent ffmpeg API (2017)

    19 octobre 2022, par ar2015

    I have started learning how to work with ffmpeg which has a suffering deprecation of all tutorial and available examples such as this.

    



    I am looking for a code which creates an output video.

    



    Unfortunately, most of good examples are focusing on reading from a file rather than creating one.

    



    Here, I have found a deprecated example and I spent a long time to fix its errors until it became like this :

    



    #include <iostream>&#xA;#include &#xA;#include &#xA;#include <string>&#xA;&#xA;extern "C" {&#xA;        #include <libavcodec></libavcodec>avcodec.h>&#xA;        #include <libavformat></libavformat>avformat.h>&#xA;        #include <libswscale></libswscale>swscale.h>&#xA;        #include <libavformat></libavformat>avio.h>&#xA;        #include <libavutil></libavutil>opt.h>&#xA;}&#xA;&#xA;#define WIDTH 800&#xA;#define HEIGHT 480&#xA;#define STREAM_NB_FRAMES  ((int)(STREAM_DURATION * FRAME_RATE))&#xA;#define FRAME_RATE 24&#xA;#define PIXEL_FORMAT AV_PIX_FMT_YUV420P&#xA;#define STREAM_DURATION 5.0 //seconds&#xA;#define BIT_RATE 400000&#xA;&#xA;#define AV_CODEC_FLAG_GLOBAL_HEADER (1 &lt;&lt; 22)&#xA;#define CODEC_FLAG_GLOBAL_HEADER AV_CODEC_FLAG_GLOBAL_HEADER&#xA;#define AVFMT_RAWPICTURE 0x0020&#xA;&#xA;using namespace std;&#xA;&#xA;static int sws_flags = SWS_BICUBIC;&#xA;&#xA;AVFrame *picture, *tmp_picture;&#xA;uint8_t *video_outbuf;&#xA;int frame_count, video_outbuf_size;&#xA;&#xA;&#xA;/****** IF LINUX ******/&#xA;inline int sprintf_s(char* buffer, size_t sizeOfBuffer, const char* format, ...)&#xA;{&#xA;    va_list ap;&#xA;    va_start(ap, format);&#xA;    int result = vsnprintf(buffer, sizeOfBuffer, format, ap);&#xA;    va_end(ap);&#xA;    return result;&#xA;}&#xA;&#xA;/****** IF LINUX ******/&#xA;template&#xA;inline int sprintf_s(char (&amp;buffer)[sizeOfBuffer], const char* format, ...)&#xA;{&#xA;    va_list ap;&#xA;    va_start(ap, format);&#xA;    int result = vsnprintf(buffer, sizeOfBuffer, format, ap);&#xA;    va_end(ap);&#xA;    return result;&#xA;}&#xA;&#xA;&#xA;static void closeVideo(AVFormatContext *oc, AVStream *st)&#xA;{&#xA;    avcodec_close(st->codec);&#xA;    av_free(picture->data[0]);&#xA;    av_free(picture);&#xA;    if (tmp_picture)&#xA;    {&#xA;        av_free(tmp_picture->data[0]);&#xA;        av_free(tmp_picture);&#xA;    }&#xA;    av_free(video_outbuf);&#xA;}&#xA;&#xA;static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)&#xA;{&#xA;    AVFrame *picture;&#xA;    uint8_t *picture_buf;&#xA;    int size;&#xA;&#xA;    picture = av_frame_alloc();&#xA;    if(!picture)&#xA;        return NULL;&#xA;    size = avpicture_get_size(pix_fmt, width, height);&#xA;    picture_buf = (uint8_t*)(av_malloc(size));&#xA;    if (!picture_buf)&#xA;    {&#xA;        av_free(picture);&#xA;        return NULL;&#xA;    }&#xA;    avpicture_fill((AVPicture *) picture, picture_buf, pix_fmt, WIDTH, HEIGHT);&#xA;    return picture;&#xA;}&#xA;&#xA;static void openVideo(AVFormatContext *oc, AVStream *st)&#xA;{&#xA;    AVCodec *codec;&#xA;    AVCodecContext *c;&#xA;&#xA;    c = st->codec;&#xA;    if(c->idct_algo == AV_CODEC_ID_H264)&#xA;        av_opt_set(c->priv_data, "preset", "slow", 0);&#xA;&#xA;    codec = avcodec_find_encoder(c->codec_id);&#xA;    if(!codec)&#xA;    {&#xA;        std::cout &lt;&lt; "Codec not found." &lt;&lt; std::endl;&#xA;        std::cin.get();std::cin.get();exit(1);&#xA;    }&#xA;&#xA;    if(codec->id == AV_CODEC_ID_H264)&#xA;        av_opt_set(c->priv_data, "preset", "medium", 0);&#xA;&#xA;    if(avcodec_open2(c, codec, NULL) &lt; 0)&#xA;    {&#xA;        std::cout &lt;&lt; "Could not open codec." &lt;&lt; std::endl;&#xA;        std::cin.get();std::cin.get();exit(1);&#xA;    }&#xA;    video_outbuf = NULL;&#xA;    if(!(oc->oformat->flags &amp; AVFMT_RAWPICTURE))&#xA;    {&#xA;        video_outbuf_size = 200000;&#xA;        video_outbuf = (uint8_t*)(av_malloc(video_outbuf_size));&#xA;    }&#xA;    picture = alloc_picture(c->pix_fmt, c->width, c->height);&#xA;    if(!picture)&#xA;    {&#xA;        std::cout &lt;&lt; "Could not allocate picture" &lt;&lt; std::endl;&#xA;        std::cin.get();exit(1);&#xA;    }&#xA;    tmp_picture = NULL;&#xA;    if(c->pix_fmt != AV_PIX_FMT_YUV420P)&#xA;    {&#xA;        tmp_picture = alloc_picture(AV_PIX_FMT_YUV420P, WIDTH, HEIGHT);&#xA;        if(!tmp_picture)&#xA;        {&#xA;            std::cout &lt;&lt; " Could not allocate temporary picture" &lt;&lt; std::endl;&#xA;            std::cin.get();exit(1);&#xA;        }&#xA;    }&#xA;}&#xA;&#xA;&#xA;static AVStream* addVideoStream(AVFormatContext *context, enum AVCodecID codecID)&#xA;{&#xA;    AVCodecContext *codec;&#xA;    AVStream *stream;&#xA;    stream = avformat_new_stream(context, NULL);&#xA;    if(!stream)&#xA;    {&#xA;        std::cout &lt;&lt; "Could not alloc stream." &lt;&lt; std::endl;&#xA;        std::cin.get();exit(1);&#xA;    }&#xA;&#xA;    codec = stream->codec;&#xA;    codec->codec_id = codecID;&#xA;    codec->codec_type = AVMEDIA_TYPE_VIDEO;&#xA;&#xA;    // sample rate&#xA;    codec->bit_rate = BIT_RATE;&#xA;    // resolution must be a multiple of two&#xA;    codec->width = WIDTH;&#xA;    codec->height = HEIGHT;&#xA;    codec->time_base.den = FRAME_RATE; // stream fps&#xA;    codec->time_base.num = 1;&#xA;    codec->gop_size = 12; // intra frame every twelve frames at most&#xA;    codec->pix_fmt = PIXEL_FORMAT;&#xA;    if(codec->codec_id == AV_CODEC_ID_MPEG2VIDEO)&#xA;        codec->max_b_frames = 2; // for testing, B frames&#xA;&#xA;    if(codec->codec_id == AV_CODEC_ID_MPEG1VIDEO)&#xA;        codec->mb_decision = 2;&#xA;&#xA;    if(context->oformat->flags &amp; AVFMT_GLOBALHEADER)&#xA;        codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;&#xA;&#xA;    return stream;&#xA;}&#xA;&#xA;static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)&#xA;{&#xA;    int x, y, i;&#xA;    i = frame_index;&#xA;&#xA;    /* Y */&#xA;    for(y=0;ydata[0][y * pict->linesize[0] &#x2B; x] = x &#x2B; y &#x2B; i * 3;&#xA;        }&#xA;    }&#xA;&#xA;    /* Cb and Cr */&#xA;    for(y=0;y<height></height>2;y&#x2B;&#x2B;) {&#xA;        for(x=0;x<width></width>2;x&#x2B;&#x2B;) {&#xA;            pict->data[1][y * pict->linesize[1] &#x2B; x] = 128 &#x2B; y &#x2B; i * 2;&#xA;            pict->data[2][y * pict->linesize[2] &#x2B; x] = 64 &#x2B; x &#x2B; i * 5;&#xA;        }&#xA;    }&#xA;}&#xA;&#xA;static void write_video_frame(AVFormatContext *oc, AVStream *st)&#xA;{&#xA;    int out_size, ret;&#xA;    AVCodecContext *c;&#xA;    static struct SwsContext *img_convert_ctx;&#xA;    c = st->codec;&#xA;&#xA;    if(frame_count >= STREAM_NB_FRAMES)&#xA;    {&#xA;&#xA;    }&#xA;    else&#xA;    {&#xA;        if(c->pix_fmt != AV_PIX_FMT_YUV420P)&#xA;        {&#xA;            if(img_convert_ctx = NULL)&#xA;            {&#xA;                img_convert_ctx = sws_getContext(WIDTH, HEIGHT, AV_PIX_FMT_YUV420P, WIDTH, HEIGHT,&#xA;                                                c->pix_fmt, sws_flags, NULL, NULL, NULL);&#xA;                if(img_convert_ctx == NULL)&#xA;                {&#xA;                    std::cout &lt;&lt; "Cannot initialize the conversion context" &lt;&lt; std::endl;&#xA;                    std::cin.get();exit(1);&#xA;                }&#xA;            }&#xA;            fill_yuv_image(tmp_picture, frame_count, WIDTH, HEIGHT);&#xA;            sws_scale(img_convert_ctx, tmp_picture->data, tmp_picture->linesize, 0, HEIGHT,&#xA;                        picture->data, picture->linesize);&#xA;        }&#xA;        else&#xA;        {&#xA;            fill_yuv_image(picture, frame_count, WIDTH, HEIGHT);&#xA;        }&#xA;    }&#xA;&#xA;    if (oc->oformat->flags &amp; AVFMT_RAWPICTURE)&#xA;    {&#xA;        /* raw video case. The API will change slightly in the near&#xA;           futur for that */&#xA;        AVPacket pkt;&#xA;        av_init_packet(&amp;pkt);&#xA;&#xA;        pkt.flags |= AV_PKT_FLAG_KEY;&#xA;        pkt.stream_index= st->index;&#xA;        pkt.data= (uint8_t *)picture;&#xA;        pkt.size= sizeof(AVPicture);&#xA;&#xA;        ret = av_interleaved_write_frame(oc, &amp;pkt);&#xA;    }&#xA;    else&#xA;    {&#xA;        /* encode the image */&#xA;        out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture);&#xA;        /* if zero size, it means the image was buffered */&#xA;        if (out_size > 0)&#xA;        {&#xA;            AVPacket pkt;&#xA;            av_init_packet(&amp;pkt);&#xA;&#xA;            if (c->coded_frame->pts != AV_NOPTS_VALUE)&#xA;                pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);&#xA;            if(c->coded_frame->key_frame)&#xA;                pkt.flags |= AV_PKT_FLAG_KEY;&#xA;            pkt.stream_index= st->index;&#xA;            pkt.data= video_outbuf;&#xA;            pkt.size= out_size;&#xA;            /* write the compressed frame in the media file */&#xA;            ret = av_interleaved_write_frame(oc, &amp;pkt);&#xA;        } else {&#xA;            ret = 0;&#xA;        }&#xA;    }&#xA;    if (ret != 0) {&#xA;        std::cout &lt;&lt; "Error while writing video frames" &lt;&lt; std::endl;&#xA;        std::cin.get();exit(1);&#xA;    }&#xA;    frame_count&#x2B;&#x2B;;&#xA;}&#xA;&#xA;int main ( int argc, char *argv[] )&#xA;{&#xA;    const char* filename = "test.h264";&#xA;    AVOutputFormat *outputFormat;&#xA;    AVFormatContext *context;&#xA;    AVCodecContext *codec;&#xA;    AVStream *videoStream;&#xA;    double videoPTS;&#xA;&#xA;    // init libavcodec, register all codecs and formats&#xA;    av_register_all(); &#xA;    // auto detect the output format from the name&#xA;    outputFormat = av_guess_format(NULL, filename, NULL);&#xA;    if(!outputFormat)&#xA;    {&#xA;        std::cout &lt;&lt; "Cannot guess output format! Using mpeg!" &lt;&lt; std::endl;&#xA;        std::cin.get();&#xA;        outputFormat = av_guess_format(NULL, "h263" , NULL);&#xA;    }&#xA;    if(!outputFormat)&#xA;    {&#xA;        std::cout &lt;&lt; "Could not find suitable output format." &lt;&lt; std::endl;&#xA;        std::cin.get();exit(1);&#xA;    }&#xA;&#xA;    context = avformat_alloc_context();&#xA;    if(!context)&#xA;    {&#xA;        std::cout &lt;&lt; "Cannot allocate avformat memory." &lt;&lt; std::endl;&#xA;        std::cin.get();exit(1);&#xA;    }&#xA;    context->oformat = outputFormat;&#xA;    sprintf_s(context->filename, sizeof(context->filename), "%s", filename);&#xA;    std::cout &lt;&lt; "Is &#x27;" &lt;&lt; context->filename &lt;&lt; "&#x27; = &#x27;" &lt;&lt; filename &lt;&lt; "&#x27;" &lt;&lt; std::endl;&#xA;&#xA;&#xA;    videoStream = NULL;&#xA;    outputFormat->audio_codec = AV_CODEC_ID_NONE;&#xA;    videoStream = addVideoStream(context, outputFormat->video_codec);&#xA;&#xA;    /* still needed?&#xA;    if(av_set_parameters(context, NULL) &lt; 0)&#xA;    {&#xA;        std::cout &lt;&lt; "Invalid output format parameters." &lt;&lt; std::endl;&#xA;        exit(0);&#xA;    }*/&#xA;&#xA;    av_dump_format(context, 0, filename, 1);&#xA;&#xA;    if(videoStream)&#xA;        openVideo(context, videoStream);&#xA;&#xA;    if(!outputFormat->flags &amp; AVFMT_NOFILE)&#xA;    {&#xA;        if(avio_open(&amp;context->pb, filename, AVIO_FLAG_READ_WRITE) &lt; 0)&#xA;        {&#xA;            std::cout &lt;&lt; "Could not open " &lt;&lt; filename &lt;&lt; std::endl;&#xA;            std::cin.get();exit(1);&#xA;        }&#xA;    }&#xA;&#xA;    avformat_write_header(context, 0);&#xA;&#xA;    while(true)&#xA;    {&#xA;        if(videoStream)&#xA;            videoPTS = (double) videoStream->pts.val * videoStream->time_base.num / videoStream->time_base.den;&#xA;        else&#xA;            videoPTS = 0.;&#xA;&#xA;        if((!videoStream || videoPTS >= STREAM_DURATION))&#xA;        {&#xA;            break;&#xA;        }&#xA;        write_video_frame(context, videoStream);&#xA;    }&#xA;    av_write_trailer(context);&#xA;    if(videoStream)&#xA;        closeVideo(context, videoStream);&#xA;    for(int i = 0; i &lt; context->nb_streams; i&#x2B;&#x2B;)&#xA;    {&#xA;        av_freep(&amp;context->streams[i]->codec);&#xA;        av_freep(&amp;context->streams[i]);&#xA;    }&#xA;&#xA;    if(!(outputFormat->flags &amp; AVFMT_NOFILE))&#xA;    {&#xA;        avio_close(context->pb);&#xA;    }&#xA;    av_free(context);&#xA;    std::cin.get();&#xA;    return 0;&#xA;}&#xA;</string></iostream>

    &#xA;&#xA;

    Compile :

    &#xA;&#xA;

    g&#x2B;&#x2B; -I ./FFmpeg/ video.cpp -L fflibs -lavcodec -lavformat&#xA;

    &#xA;&#xA;

    The code comes with two errors :

    &#xA;&#xA;

    video.cpp:249:84: error: ‘avcodec_encode_video’ was not declared in this scope&#xA;         out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture);&#xA;                                                                                    ^&#xA;&#xA;&#xA;video.cpp: In function ‘int main(int, char**)’:&#xA;video.cpp:342:46: error: ‘AVStream {aka struct AVStream}’ has no member named ‘pts’&#xA;             videoPTS = (double) videoStream->pts.val * videoStream->time_base.num / videoStream->time_base.den;&#xA;                                              ^&#xA;

    &#xA;&#xA;

    and a huge number of warnings for deprecation.

    &#xA;&#xA;

    video.cpp: In function ‘void closeVideo(AVFormatContext*, AVStream*)’:&#xA;video.cpp:60:23: warning: ‘AVStream::codec’ is deprecated [-Wdeprecated-declarations]&#xA;     avcodec_close(st->codec);&#xA;                       ^&#xA;In file included from video.cpp:9:0:&#xA;./FFmpeg/libavformat/avformat.h:876:21: note: declared here&#xA;     AVCodecContext *codec;&#xA;                     ^&#xA;video.cpp:60:23: warning: ‘AVStream::codec’ is deprecated [-Wdeprecated-declarations]&#xA;     avcodec_close(st->codec);&#xA;                       ^&#xA;In file included from video.cpp:9:0:&#xA;./FFmpeg/libavformat/avformat.h:876:21: note: declared here&#xA;     AVCodecContext *codec;&#xA;                     ^&#xA;video.cpp:60:23: warning: ‘AVStream::codec’ is deprecated [-Wdeprecated-declarations]&#xA;     avcodec_close(st->codec);&#xA;                       ^&#xA;In file included from video.cpp:9:0:&#xA;./FFmpeg/libavformat/avformat.h:876:21: note: declared here&#xA;     AVCodecContext *codec;&#xA;                     ^&#xA;video.cpp: In function ‘AVFrame* alloc_picture(AVPixelFormat, int, int)’:&#xA;video.cpp:80:12: warning: ‘int avpicture_get_size(AVPixelFormat, int, int)’ is deprecated [-Wdeprecated-declarations]&#xA;     size = avpicture_get_size(pix_fmt, width, height);&#xA;            ^&#xA;In file included from video.cpp:8:0:&#xA;./FFmpeg/libavcodec/avcodec.h:5228:5: note: declared here&#xA; int avpicture_get_size(enum AVPixelFormat pix_fmt, int width, int height);&#xA;     ^&#xA;video.cpp:80:12: warning: ‘int avpicture_get_size(AVPixelFormat, int, int)’ is deprecated [-Wdeprecated-declarations]&#xA;     size = avpicture_get_size(pix_fmt, width, height);&#xA;            ^&#xA;In file included from video.cpp:8:0:&#xA;./FFmpeg/libavcodec/avcodec.h:5228:5: note: declared here&#xA; int avpicture_get_size(enum AVPixelFormat pix_fmt, int width, int height);&#xA;     ^&#xA;video.cpp:80:53: warning: ‘int avpicture_get_size(AVPixelFormat, int, int)’ is deprecated [-Wdeprecated-declarations]&#xA;     size = avpicture_get_size(pix_fmt, width, height);&#xA;                                                     ^&#xA;In file included from video.cpp:8:0:&#xA;./FFmpeg/libavcodec/avcodec.h:5228:5: note: declared here&#xA; int avpicture_get_size(enum AVPixelFormat pix_fmt, int width, int height);&#xA;     ^&#xA;video.cpp:87:5: warning: ‘int avpicture_fill(AVPicture*, const uint8_t*, AVPixelFormat, int, int)’ is deprecated [-Wdeprecated-declarations]&#xA;     avpicture_fill((AVPicture *) picture, picture_buf, pix_fmt, WIDTH, HEIGHT);&#xA;     ^&#xA;In file included from video.cpp:8:0:&#xA;./FFmpeg/libavcodec/avcodec.h:5213:5: note: declared here&#xA; int avpicture_fill(AVPicture *picture, const uint8_t *ptr,&#xA;     ^&#xA;video.cpp:87:5: warning: ‘int avpicture_fill(AVPicture*, const uint8_t*, AVPixelFormat, int, int)’ is deprecated [-Wdeprecated-declarations]&#xA;     avpicture_fill((AVPicture *) picture, picture_buf, pix_fmt, WIDTH, HEIGHT);&#xA;     ^&#xA;In file included from video.cpp:8:0:&#xA;./FFmpeg/libavcodec/avcodec.h:5213:5: note: declared here&#xA; int avpicture_fill(AVPicture *picture, const uint8_t *ptr,&#xA;     ^&#xA;video.cpp:87:78: warning: ‘int avpicture_fill(AVPicture*, const uint8_t*, AVPixelFormat, int, int)’ is deprecated [-Wdeprecated-declarations]&#xA;     avpicture_fill((AVPicture *) picture, picture_buf, pix_fmt, WIDTH, HEIGHT);&#xA;                                                                              ^&#xA;In file included from video.cpp:8:0:&#xA;./FFmpeg/libavcodec/avcodec.h:5213:5: note: declared here&#xA; int avpicture_fill(AVPicture *picture, const uint8_t *ptr,&#xA;     ^&#xA;video.cpp: In function ‘void openVideo(AVFormatContext*, AVStream*)’:&#xA;video.cpp:96:13: warning: ‘AVStream::codec’ is deprecated [-Wdeprecated-declarations]&#xA;     c = st->codec;&#xA;             ^&#xA;In file included from video.cpp:9:0:&#xA;./FFmpeg/libavformat/avformat.h:876:21: note: declared here&#xA;     AVCodecContext *codec;&#xA;                     ^&#xA;video.cpp:96:13: warning: ‘AVStream::codec’ is deprecated [-Wdeprecated-declarations]&#xA;     c = st->codec;&#xA;             ^&#xA;In file included from video.cpp:9:0:&#xA;./FFmpeg/libavformat/avformat.h:876:21: note: declared here&#xA;     AVCodecContext *codec;&#xA;                     ^&#xA;video.cpp:96:13: warning: ‘AVStream::codec’ is deprecated [-Wdeprecated-declarations]&#xA;     c = st->codec;&#xA;             ^&#xA;In file included from video.cpp:9:0:&#xA;./FFmpeg/libavformat/avformat.h:876:21: note: declared here&#xA;     AVCodecContext *codec;&#xA;                     ^&#xA;video.cpp: In function ‘AVStream* addVideoStream(AVFormatContext*, AVCodecID)’:&#xA;video.cpp:151:21: warning: ‘AVStream::codec’ is deprecated [-Wdeprecated-declarations]&#xA;     codec = stream->codec;&#xA;                     ^&#xA;In file included from video.cpp:9:0:&#xA;./FFmpeg/libavformat/avformat.h:876:21: note: declared here&#xA;     AVCodecContext *codec;&#xA;                     ^&#xA;video.cpp:151:21: warning: ‘AVStream::codec’ is deprecated [-Wdeprecated-declarations]&#xA;     codec = stream->codec;&#xA;                     ^&#xA;In file included from video.cpp:9:0:&#xA;./FFmpeg/libavformat/avformat.h:876:21: note: declared here&#xA;     AVCodecContext *codec;&#xA;                     ^&#xA;video.cpp:151:21: warning: ‘AVStream::codec’ is deprecated [-Wdeprecated-declarations]&#xA;     codec = stream->codec;&#xA;                     ^&#xA;In file included from video.cpp:9:0:&#xA;./FFmpeg/libavformat/avformat.h:876:21: note: declared here&#xA;     AVCodecContext *codec;&#xA;                     ^&#xA;video.cpp: In function ‘void write_video_frame(AVFormatContext*, AVStream*)’:&#xA;video.cpp:202:13: warning: ‘AVStream::codec’ is deprecated [-Wdeprecated-declarations]&#xA;     c = st->codec;&#xA;             ^&#xA;In file included from video.cpp:9:0:&#xA;./FFmpeg/libavformat/avformat.h:876:21: note: declared here&#xA;     AVCodecContext *codec;&#xA;                     ^&#xA;video.cpp:202:13: warning: ‘AVStream::codec’ is deprecated [-Wdeprecated-declarations]&#xA;     c = st->codec;&#xA;             ^&#xA;In file included from video.cpp:9:0:&#xA;./FFmpeg/libavformat/avformat.h:876:21: note: declared here&#xA;     AVCodecContext *codec;&#xA;                     ^&#xA;video.cpp:202:13: warning: ‘AVStream::codec’ is deprecated [-Wdeprecated-declarations]&#xA;     c = st->codec;&#xA;             ^&#xA;In file included from video.cpp:9:0:&#xA;./FFmpeg/libavformat/avformat.h:876:21: note: declared here&#xA;     AVCodecContext *codec;&#xA;                     ^&#xA;video.cpp:256:20: warning: ‘AVCodecContext::coded_frame’ is deprecated [-Wdeprecated-declarations]&#xA;             if (c->coded_frame->pts != AV_NOPTS_VALUE)&#xA;                    ^&#xA;In file included from video.cpp:8:0:&#xA;./FFmpeg/libavcodec/avcodec.h:2723:35: note: declared here&#xA;     attribute_deprecated AVFrame *coded_frame;&#xA;                                   ^&#xA;video.cpp:256:20: warning: ‘AVCodecContext::coded_frame’ is deprecated [-Wdeprecated-declarations]&#xA;             if (c->coded_frame->pts != AV_NOPTS_VALUE)&#xA;                    ^&#xA;In file included from video.cpp:8:0:&#xA;./FFmpeg/libavcodec/avcodec.h:2723:35: note: declared here&#xA;     attribute_deprecated AVFrame *coded_frame;&#xA;                                   ^&#xA;video.cpp:256:20: warning: ‘AVCodecContext::coded_frame’ is deprecated [-Wdeprecated-declarations]&#xA;             if (c->coded_frame->pts != AV_NOPTS_VALUE)&#xA;                    ^&#xA;In file included from video.cpp:8:0:&#xA;./FFmpeg/libavcodec/avcodec.h:2723:35: note: declared here&#xA;     attribute_deprecated AVFrame *coded_frame;&#xA;                                   ^&#xA;video.cpp:257:42: warning: ‘AVCodecContext::coded_frame’ is deprecated [-Wdeprecated-declarations]&#xA;                 pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);&#xA;                                          ^&#xA;In file included from video.cpp:8:0:&#xA;./FFmpeg/libavcodec/avcodec.h:2723:35: note: declared here&#xA;     attribute_deprecated AVFrame *coded_frame;&#xA;                                   ^&#xA;video.cpp:257:42: warning: ‘AVCodecContext::coded_frame’ is deprecated [-Wdeprecated-declarations]&#xA;                 pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);&#xA;                                          ^&#xA;In file included from video.cpp:8:0:&#xA;./FFmpeg/libavcodec/avcodec.h:2723:35: note: declared here&#xA;     attribute_deprecated AVFrame *coded_frame;&#xA;                                   ^&#xA;video.cpp:257:42: warning: ‘AVCodecContext::coded_frame’ is deprecated [-Wdeprecated-declarations]&#xA;                 pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);&#xA;                                          ^&#xA;In file included from video.cpp:8:0:&#xA;./FFmpeg/libavcodec/avcodec.h:2723:35: note: declared here&#xA;     attribute_deprecated AVFrame *coded_frame;&#xA;                                   ^&#xA;video.cpp:258:19: warning: ‘AVCodecContext::coded_frame’ is deprecated [-Wdeprecated-declarations]&#xA;             if(c->coded_frame->key_frame)&#xA;                   ^&#xA;In file included from video.cpp:8:0:&#xA;./FFmpeg/libavcodec/avcodec.h:2723:35: note: declared here&#xA;     attribute_deprecated AVFrame *coded_frame;&#xA;                                   ^&#xA;video.cpp:258:19: warning: ‘AVCodecContext::coded_frame’ is deprecated [-Wdeprecated-declarations]&#xA;             if(c->coded_frame->key_frame)&#xA;                   ^&#xA;In file included from video.cpp:8:0:&#xA;./FFmpeg/libavcodec/avcodec.h:2723:35: note: declared here&#xA;     attribute_deprecated AVFrame *coded_frame;&#xA;                                   ^&#xA;video.cpp:258:19: warning: ‘AVCodecContext::coded_frame’ is deprecated [-Wdeprecated-declarations]&#xA;             if(c->coded_frame->key_frame)&#xA;                   ^&#xA;In file included from video.cpp:8:0:&#xA;./FFmpeg/libavcodec/avcodec.h:2723:35: note: declared here&#xA;     attribute_deprecated AVFrame *coded_frame;&#xA;                                   ^&#xA;video.cpp:357:40: warning: ‘AVStream::codec’ is deprecated [-Wdeprecated-declarations]&#xA;         av_freep(&amp;context->streams[i]->codec);&#xA;                                        ^&#xA;In file included from video.cpp:9:0:&#xA;./FFmpeg/libavformat/avformat.h:876:21: note: declared here&#xA;     AVCodecContext *codec;&#xA;                     ^&#xA;video.cpp:357:40: warning: ‘AVStream::codec’ is deprecated [-Wdeprecated-declarations]&#xA;         av_freep(&amp;context->streams[i]->codec);&#xA;                                        ^&#xA;In file included from video.cpp:9:0:&#xA;./FFmpeg/libavformat/avformat.h:876:21: note: declared here&#xA;     AVCodecContext *codec;&#xA;                     ^&#xA;video.cpp:357:40: warning: ‘AVStream::codec’ is deprecated [-Wdeprecated-declarations]&#xA;         av_freep(&amp;context->streams[i]->codec);&#xA;                                        ^&#xA;In file included from video.cpp:9:0:&#xA;./FFmpeg/libavformat/avformat.h:876:21: note: declared here&#xA;     AVCodecContext *codec;&#xA;                     ^&#xA;video.cpp:337:38: warning: ignoring return value of ‘int avformat_write_header(AVFormatContext*, AVDictionary**)’, declared with attribute warn_unused_result [-Wunused-result]&#xA;     avformat_write_header(context, 0);&#xA;                                      ^&#xA;

    &#xA;&#xA;

    I have also defined a few macros to redefine those who have been omited. In a modern ffmpeg API, they must be replaced.

    &#xA;&#xA;

    Could someone please help me solving errors and deprecation warnings to comply with recent ffmpeg API ?

    &#xA;

  • swscaler bad src image pointers

    7 mars 2018, par user1496491

    I’m completely lost. I’m trying to capture 30 screenshots and put them into a video with FFMPEG under Windows 10. And it keeps telling me that [swscaler @ 073890a0] bad src image pointers. As a result the video is entirely green. If I change format to dshow using video=screen-capture-recorder the video looks to be mostly garbage. Here’s my short code for that. I’m completely stuck and don’t know even in which direction to look.

    MainWindow.h

    #ifndef MAINWINDOW_H
    #define MAINWINDOW_H

    #include <qmainwindow>
    #include <qfuture>
    #include <qfuturewatcher>
    #include <qmutex>
    #include <qmutexlocker>

    extern "C" {
    #include "libavcodec/avcodec.h"
    #include "libavcodec/avfft.h"

    #include "libavdevice/avdevice.h"

    #include "libavfilter/avfilter.h"
    #include "libavfilter/avfiltergraph.h"
    #include "libavfilter/buffersink.h"
    #include "libavfilter/buffersrc.h"

    #include "libavformat/avformat.h"
    #include "libavformat/avio.h"

    #include "libavutil/opt.h"
    #include "libavutil/common.h"
    #include "libavutil/channel_layout.h"
    #include "libavutil/imgutils.h"
    #include "libavutil/mathematics.h"
    #include "libavutil/samplefmt.h"
    #include "libavutil/time.h"
    #include "libavutil/opt.h"
    #include "libavutil/pixdesc.h"
    #include "libavutil/file.h"

    #include "libswscale/swscale.h"
    }

    class MainWindow : public QMainWindow
    {
       Q_OBJECT

    public:
       MainWindow(QWidget *parent = 0);
       ~MainWindow();

    private:
       AVFormatContext *inputFormatContext = nullptr;
       AVFormatContext *outFormatContext = nullptr;

       AVStream* videoStream = nullptr;

       AVDictionary* options = nullptr;

       AVCodec* outCodec = nullptr;
       AVCodec* inputCodec = nullptr;
       AVCodecContext* inputCodecContext = nullptr;
       AVCodecContext* outCodecContext = nullptr;
       SwsContext* swsContext = nullptr;

    private:
       void init();
       void initOutFile();
       void collectFrame();
    };

    #endif // MAINWINDOW_H
    </qmutexlocker></qmutex></qfuturewatcher></qfuture></qmainwindow>

    MainWindow.cpp

    #include "MainWindow.h"

    #include <qguiapplication>
    #include <qlabel>
    #include <qscreen>
    #include <qtimer>
    #include <qlayout>
    #include <qimage>
    #include <qtconcurrent></qtconcurrent>QtConcurrent>
    #include <qthreadpool>

    #include "ScreenCapture.h"

    MainWindow::MainWindow(QWidget *parent) : QMainWindow(parent)
    {
       resize(800, 600);

       auto label = new QLabel();
       label->setAlignment(Qt::AlignHCenter | Qt::AlignVCenter);

       auto layout = new QHBoxLayout();
       layout->addWidget(label);

       auto widget = new QWidget();
       widget->setLayout(layout);
       setCentralWidget(widget);

       init();
       initOutFile();
       collectFrame();
    }

    MainWindow::~MainWindow()
    {
       avformat_close_input(&amp;inputFormatContext);
       avformat_free_context(inputFormatContext);

       QThreadPool::globalInstance()->waitForDone();
    }

    void MainWindow::init()
    {
       av_register_all();
       avcodec_register_all();
       avdevice_register_all();
       avformat_network_init();

       auto screen = QGuiApplication::screens()[0];
       QRect geometry = screen->geometry();

       inputFormatContext = avformat_alloc_context();

       options = NULL;
       av_dict_set(&amp;options, "framerate", "30", NULL);
       av_dict_set(&amp;options, "offset_x", QString::number(geometry.x()).toLatin1().data(), NULL);
       av_dict_set(&amp;options, "offset_y", QString::number(geometry.y()).toLatin1().data(), NULL);
       av_dict_set(&amp;options, "video_size", QString(QString::number(geometry.width()) + "x" + QString::number(geometry.height())).toLatin1().data(), NULL);
       av_dict_set(&amp;options, "show_region", "1", NULL);

       AVInputFormat* inputFormat = av_find_input_format("gdigrab");
       avformat_open_input(&amp;inputFormatContext, "desktop", inputFormat, &amp;options);

       int videoStreamIndex = av_find_best_stream(inputFormatContext, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);

       inputCodecContext = inputFormatContext->streams[videoStreamIndex]->codec;
       inputCodecContext->width = geometry.width();
       inputCodecContext->height = geometry.height();
       inputCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;

       inputCodec = avcodec_find_decoder(inputCodecContext->codec_id);
       avcodec_open2(inputCodecContext, inputCodec, NULL);
    }

    void MainWindow::initOutFile()
    {
       const char* filename = "C:/Temp/output.mp4";

       avformat_alloc_output_context2(&amp;outFormatContext, NULL, NULL, filename);

       outCodec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);

       videoStream = avformat_new_stream(outFormatContext, outCodec);
       videoStream->time_base = {1, 30};

       outCodecContext = videoStream->codec;
       outCodecContext->codec_id = AV_CODEC_ID_MPEG4;
       outCodecContext->codec_type = AVMEDIA_TYPE_VIDEO;
       outCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
       outCodecContext->bit_rate = 400000;
       outCodecContext->width = inputCodecContext->width;
       outCodecContext->height = inputCodecContext->height;
       outCodecContext->gop_size = 3;
       outCodecContext->max_b_frames = 2;
       outCodecContext->time_base = videoStream->time_base;

       if (outFormatContext->oformat->flags &amp; AVFMT_GLOBALHEADER)
           outCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

       avcodec_open2(outCodecContext, outCodec, NULL);

       if (!(outFormatContext->flags &amp; AVFMT_NOFILE))
           avio_open2(&amp;outFormatContext->pb, filename, AVIO_FLAG_WRITE, NULL, NULL);

       swsContext = sws_getContext(inputCodecContext->width,
                                   inputCodecContext->height,
                                   inputCodecContext->pix_fmt,
                                   outCodecContext->width,
                                   outCodecContext->height,
                                   outCodecContext->pix_fmt,
                                   SWS_BICUBIC, NULL, NULL, NULL);

       avformat_write_header(outFormatContext, &amp;options);
    }

    void MainWindow::collectFrame()
    {
       AVFrame* frame = av_frame_alloc();
       frame->data[0] = NULL;
       frame->width = inputCodecContext->width;
       frame->height = inputCodecContext->height;
       frame->format = inputCodecContext->pix_fmt;

       av_image_alloc(frame->data, frame->linesize, inputCodecContext->width, inputCodecContext->height, (AVPixelFormat)frame->format, 32);

       AVFrame* outFrame = av_frame_alloc();
       outFrame->data[0] = NULL;
       outFrame->width = outCodecContext->width;
       outFrame->height = outCodecContext->height;
       outFrame->format = outCodecContext->pix_fmt;

       av_image_alloc(outFrame->data, outFrame->linesize, outCodecContext->width, outCodecContext->height, (AVPixelFormat)outFrame->format, 32);

       int bufferSize = av_image_get_buffer_size(outCodecContext->pix_fmt,
                                                 outCodecContext->width,
                                                 outCodecContext->height,
                                                 24);

       uint8_t* outBuffer = (uint8_t*)av_malloc(bufferSize);

       avpicture_fill((AVPicture*)outFrame, outBuffer,
                      AV_PIX_FMT_YUV420P,
                      outCodecContext->width, outCodecContext->height);

       int frameCount = 30;
       int count = 0;

       AVPacket* packet = (AVPacket*)av_malloc(sizeof(AVPacket));
       av_init_packet(packet);

       while(av_read_frame(inputFormatContext, packet) >= 0)
       {
           if(packet->stream_index == videoStream->index)
           {
               int frameFinished = 0;
               avcodec_decode_video2(inputCodecContext, frame, &amp;frameFinished, packet);

               if(frameFinished)
               {
                   if(++count > frameCount)
                   {
                       qDebug() &lt;&lt; "FINISHED!";
                       break;
                   }

                   sws_scale(swsContext, frame->data, frame->linesize, 0, inputCodecContext->height, outFrame->data, outFrame->linesize);

                   AVPacket outPacket;
                   av_init_packet(&amp;outPacket);
                   outPacket.data = NULL;
                   outPacket.size = 0;

                   int got_picture = 0;
                   avcodec_encode_video2(outCodecContext, &amp;outPacket, outFrame, &amp;got_picture);

                   if(got_picture)
                   {
                       if(outPacket.pts != AV_NOPTS_VALUE) outPacket.pts = av_rescale_q(outPacket.pts, videoStream->codec->time_base, videoStream->time_base);
                       if(outPacket.dts != AV_NOPTS_VALUE) outPacket.dts = av_rescale_q(outPacket.dts, videoStream->codec->time_base, videoStream->time_base);

                       av_write_frame(outFormatContext , &amp;outPacket);
                   }

                   av_packet_unref(&amp;outPacket);
               }
           }
       }

       av_write_trailer(outFormatContext);

       av_free(outBuffer);
    }
    </qthreadpool></qimage></qlayout></qtimer></qscreen></qlabel></qguiapplication>
  • FFMPEG libav gdigrab capturing with wrong colors

    7 mars 2018, par user1496491

    I’m capturing screen with code below, and it gets me the picture with wrong colors.

    Screenshot

    The picture on left is raw data which I assumed in ARGB the picture in right is encoded as YUV. I’ve tried different formats, the pictures slighly changing, but it’s never looks ow it should be. In what format gdigrab gives its output ? What’s the right way to encode it ?

    #include "MainWindow.h"

    #include <qguiapplication>
    #include <qlabel>
    #include <qscreen>
    #include <qtimer>
    #include <qlayout>
    #include <qimage>
    #include <qtconcurrent></qtconcurrent>QtConcurrent>
    #include <qthreadpool>

    #include "ScreenCapture.h"

    MainWindow::MainWindow(QWidget *parent) : QMainWindow(parent)
    {
       resize(800, 600);

       label = new QLabel();
       label->setAlignment(Qt::AlignHCenter | Qt::AlignVCenter);

       auto layout = new QHBoxLayout();
       layout->addWidget(label);

       auto widget = new QWidget();
       widget->setLayout(layout);
       setCentralWidget(widget);

       init();
       initOutFile();
       collectFrame();
    }

    MainWindow::~MainWindow()
    {
       avformat_close_input(&amp;inputFormatContext);
       avformat_free_context(inputFormatContext);

       QThreadPool::globalInstance()->waitForDone();
    }

    void MainWindow::init()
    {

       av_register_all();
       avcodec_register_all();
       avdevice_register_all();

       auto screen = QGuiApplication::screens()[1];
       QRect geometry = screen->geometry();

       inputFormatContext = avformat_alloc_context();

       AVDictionary* options = NULL;
       av_dict_set(&amp;options, "framerate", "30", NULL);
       av_dict_set(&amp;options, "offset_x", QString::number(geometry.x()).toLatin1().data(), NULL);
       av_dict_set(&amp;options, "offset_y", QString::number(geometry.y()).toLatin1().data(), NULL);
       av_dict_set(&amp;options, "preset", "ultrafast", NULL);
       av_dict_set(&amp;options, "probesize", "10MB", NULL);
       av_dict_set(&amp;options, "pix_fmt", "yuv420p", NULL);
       av_dict_set(&amp;options, "video_size", QString(QString::number(geometry.width()) + "x" + QString::number(geometry.height())).toLatin1().data(), NULL);

       AVInputFormat* inputFormat = av_find_input_format("gdigrab");
       avformat_open_input(&amp;inputFormatContext, "desktop", inputFormat, &amp;options);

    //    AVDictionary* options = NULL;
    //    av_dict_set(&amp;options, "framerate", "30", NULL);
    //    av_dict_set(&amp;options, "preset", "ultrafast", NULL);
    //    av_dict_set(&amp;options, "vcodec", "h264", NULL);
    //    av_dict_set(&amp;options, "s", "1280x720", NULL);
    //    av_dict_set(&amp;options, "crf", "0", NULL);
    //    av_dict_set(&amp;options, "rtbufsize", "100M", NULL);

    //    AVInputFormat *format = av_find_input_format("dshow");
    //    avformat_open_input(&amp;inputFormatContext, "video=screen-capture-recorder", format, &amp;options);

       av_dict_free(&amp;options);
       avformat_find_stream_info(inputFormatContext, NULL);

       videoStreamIndex = av_find_best_stream(inputFormatContext, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);

       inputCodec = avcodec_find_decoder(inputFormatContext->streams[videoStreamIndex]->codecpar->codec_id);
       if(!inputCodec) qDebug() &lt;&lt; "Не найден кодек входящего потока!";

       inputCodecContext = avcodec_alloc_context3(inputCodec);
       inputCodecContext->codec_id = inputCodec->id;

       avcodec_parameters_to_context(inputCodecContext, inputFormatContext->streams[videoStreamIndex]->codecpar);

       if(avcodec_open2(inputCodecContext, inputCodec, NULL)) qDebug() &lt;&lt; "Не удалось открыть входной кодек!";
    }

    void MainWindow::initOutFile()
    {
       const char* filename = "C:/Temp/output.mp4";

       if(avformat_alloc_output_context2(&amp;outFormatContext, NULL, NULL, filename) &lt; 0) qDebug() &lt;&lt; "Не удалось создать выходной контекст!";

       outCodec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);
       if(!outCodec) qDebug() &lt;&lt; "Не удалось найти кодек!";

       videoStream = avformat_new_stream(outFormatContext, outCodec);
       videoStream->time_base = {1, 30};

       const AVPixelFormat* pixelFormat = outCodec->pix_fmts;
       while (*pixelFormat != AV_PIX_FMT_NONE)
       {
           qDebug() &lt;&lt; "OUT_FORMAT" &lt;&lt; av_get_pix_fmt_name(*pixelFormat);
           ++pixelFormat;
       }

       outCodecContext = videoStream->codec;
       outCodecContext->bit_rate = 400000;
       outCodecContext->width = inputCodecContext->width;
       outCodecContext->height = inputCodecContext->height;
       outCodecContext->time_base = videoStream->time_base;
       outCodecContext->gop_size = 10;
       outCodecContext->max_b_frames = 1;
       outCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;

       if (outFormatContext->oformat->flags &amp; AVFMT_GLOBALHEADER) outCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;

       if(avcodec_open2(outCodecContext, outCodec, NULL)) qDebug() &lt;&lt; "Не удалось открыть выходной кодек!";

       swsContext = sws_getContext(inputCodecContext->width,
                                   inputCodecContext->height,
    //                                inputCodecContext->pix_fmt,
                                   AV_PIX_FMT_ABGR,
                                   outCodecContext->width,
                                   outCodecContext->height,
                                   outCodecContext->pix_fmt,
                                   SWS_BICUBIC, NULL, NULL, NULL);

       if(avio_open(&amp;outFormatContext->pb, filename, AVIO_FLAG_WRITE) &lt; 0) qDebug() &lt;&lt; "Не удалось открыть файл!";
       if(avformat_write_header(outFormatContext, NULL) &lt; 0) qDebug() &lt;&lt; "Не удалось записать заголовок!";
    }

    void MainWindow::collectFrame()
    {
       AVFrame* inFrame = av_frame_alloc();
       inFrame->format = inputCodecContext->pix_fmt;
       inFrame->width = inputCodecContext->width;
       inFrame->height = inputCodecContext->height;

       int size = av_image_alloc(inFrame->data, inFrame->linesize, inFrame->width, inFrame->height, inputCodecContext->pix_fmt, 1);
       qDebug() &lt;&lt; size;

       AVFrame* outFrame = av_frame_alloc();
       outFrame->format = outCodecContext->pix_fmt;
       outFrame->width = outCodecContext->width;
       outFrame->height = outCodecContext->height;

       qDebug() &lt;&lt; av_image_alloc(outFrame->data, outFrame->linesize, outFrame->width, outFrame->height, outCodecContext->pix_fmt, 1);

       AVPacket packet;
       av_init_packet(&amp;packet);

       av_read_frame(inputFormatContext, &amp;packet);
    //    while(av_read_frame(inputFormatContext, &amp;packet) >= 0)
    //    {
           if(packet.stream_index == videoStream->index)
           {

               memcpy(inFrame->data[0], packet.data, size);

               sws_scale(swsContext, inFrame->data, inFrame->linesize, 0, inputCodecContext->height, outFrame->data, outFrame->linesize);

               QImage image(inFrame->data[0], inFrame->width, inFrame->height, QImage::Format_ARGB32);
               label->setPixmap(QPixmap::fromImage(image).scaled(label->size(), Qt::KeepAspectRatio));

               AVPacket outPacket;
               av_init_packet(&amp;outPacket);

               int encodeResult = avcodec_receive_packet(outCodecContext, &amp;outPacket);
               while(encodeResult == AVERROR(EAGAIN))
               {
                   if(avcodec_send_frame(outCodecContext, outFrame)) qDebug() &lt;&lt; "Ошибка отправки фрейма на кодирование!";

                   encodeResult = avcodec_receive_packet(outCodecContext, &amp;outPacket);
               }
               if(encodeResult != 0) qDebug() &lt;&lt; "Ошибка во время кодирования!" &lt;&lt; encodeResult;

               if(outPacket.pts != AV_NOPTS_VALUE) outPacket.pts = av_rescale_q(outPacket.pts, videoStream->codec->time_base, videoStream->time_base);
               if(outPacket.dts != AV_NOPTS_VALUE) outPacket.dts = av_rescale_q(outPacket.dts, videoStream->codec->time_base, videoStream->time_base);

               av_write_frame(outFormatContext, &amp;outPacket);

               av_packet_unref(&amp;outPacket);
           }
    //    }

       av_packet_unref(&amp;packet);

       av_write_trailer(outFormatContext);
       avio_close(outFormatContext->pb);
    }
    </qthreadpool></qimage></qlayout></qtimer></qscreen></qlabel></qguiapplication>