Recherche avancée

Médias (1)

Mot : - Tags -/stallman

Autres articles (68)

  • Personnaliser en ajoutant son logo, sa bannière ou son image de fond

    5 septembre 2013, par

    Certains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;

  • Ecrire une actualité

    21 juin 2013, par

    Présentez les changements dans votre MédiaSPIP ou les actualités de vos projets sur votre MédiaSPIP grâce à la rubrique actualités.
    Dans le thème par défaut spipeo de MédiaSPIP, les actualités sont affichées en bas de la page principale sous les éditoriaux.
    Vous pouvez personnaliser le formulaire de création d’une actualité.
    Formulaire de création d’une actualité Dans le cas d’un document de type actualité, les champs proposés par défaut sont : Date de publication ( personnaliser la date de publication ) (...)

  • Publier sur MédiaSpip

    13 juin 2013

    Puis-je poster des contenus à partir d’une tablette Ipad ?
    Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir

Sur d’autres sites (8194)

  • c++ - using FFmpeg encode and UDP with a Webcam

    14 mars, par Rendres

    I'm trying to get frames from a Webcam using OpenCV, encode them with FFmpeg and send them using UDP.

    



    I did before a similar project that instead of sending the packets with UDP, it saved them in a video file.

    



    My code is.

    



    #include &#xA;#include &#xA;#include &#xA;#include &#xA;&#xA;extern "C" {&#xA;#include <libavcodec></libavcodec>avcodec.h>&#xA;#include <libavformat></libavformat>avformat.h>&#xA;#include <libavutil></libavutil>opt.h>&#xA;#include <libavutil></libavutil>imgutils.h>&#xA;#include <libavutil></libavutil>mathematics.h>&#xA;#include <libswscale></libswscale>swscale.h>&#xA;#include <libswresample></libswresample>swresample.h>&#xA;}&#xA;&#xA;#include <opencv2></opencv2>opencv.hpp>&#xA;&#xA;using namespace std;&#xA;using namespace cv;&#xA;&#xA;#define WIDTH 640&#xA;#define HEIGHT 480&#xA;#define CODEC_ID AV_CODEC_ID_H264&#xA;#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P&#xA;&#xA;static AVFrame *frame, *pFrameBGR;&#xA;&#xA;int main(int argc, char **argv)&#xA;{&#xA;VideoCapture cap(0);&#xA;const char *url = "udp://127.0.0.1:8080";&#xA;&#xA;AVFormatContext *formatContext;&#xA;AVStream *stream;&#xA;AVCodec *codec;&#xA;AVCodecContext *c;&#xA;AVDictionary *opts = NULL;&#xA;&#xA;int ret, got_packet;&#xA;&#xA;if (!cap.isOpened())&#xA;{&#xA;    return -1;&#xA;}&#xA;&#xA;av_log_set_level(AV_LOG_TRACE);&#xA;&#xA;av_register_all();&#xA;avformat_network_init();&#xA;&#xA;avformat_alloc_output_context2(&amp;formatContext, NULL, "h264", url);&#xA;if (!formatContext)&#xA;{&#xA;    av_log(NULL, AV_LOG_FATAL, "Could not allocate an output context for &#x27;%s&#x27;.\n", url);&#xA;}&#xA;&#xA;codec = avcodec_find_encoder(CODEC_ID);&#xA;if (!codec)&#xA;{&#xA;    av_log(NULL, AV_LOG_ERROR, "Could not find encoder.\n");&#xA;}&#xA;&#xA;stream = avformat_new_stream(formatContext, codec);&#xA;&#xA;c = avcodec_alloc_context3(codec);&#xA;&#xA;stream->id = formatContext->nb_streams - 1;&#xA;stream->time_base = (AVRational){1, 25};&#xA;&#xA;c->codec_id = CODEC_ID;&#xA;c->bit_rate = 400000;&#xA;c->width = WIDTH;&#xA;c->height = HEIGHT;&#xA;c->time_base = stream->time_base;&#xA;c->gop_size = 12;&#xA;c->pix_fmt = STREAM_PIX_FMT;&#xA;&#xA;if (formatContext->flags &amp; AVFMT_GLOBALHEADER)&#xA;    c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;&#xA;&#xA;av_dict_set(&amp;opts, "preset", "fast", 0);&#xA;&#xA;av_dict_set(&amp;opts, "tune", "zerolatency", 0);&#xA;&#xA;ret = avcodec_open2(c, codec, NULL);&#xA;if (ret &lt; 0)&#xA;{&#xA;    av_log(NULL, AV_LOG_ERROR, "Could not open video codec.\n");&#xA;}&#xA;&#xA;pFrameBGR = av_frame_alloc();&#xA;if (!pFrameBGR)&#xA;{&#xA;    av_log(NULL, AV_LOG_ERROR, "Could not allocate video frame.\n");&#xA;}&#xA;&#xA;frame = av_frame_alloc();&#xA;if (!frame)&#xA;{&#xA;    av_log(NULL, AV_LOG_ERROR, "Could not allocate video frame.\n");&#xA;}&#xA;&#xA;frame->format = c->pix_fmt;&#xA;frame->width = c->width;&#xA;frame->height = c->height;&#xA;&#xA;ret = avcodec_parameters_from_context(stream->codecpar, c);&#xA;if (ret &lt; 0)&#xA;{&#xA;    av_log(NULL, AV_LOG_ERROR, "Could not open video codec.\n");&#xA;}&#xA;&#xA;av_dump_format(formatContext, 0, url, 1);&#xA;&#xA;ret = avformat_write_header(formatContext, NULL);&#xA;if (ret != 0)&#xA;{&#xA;    av_log(NULL, AV_LOG_ERROR, "Failed to connect to &#x27;%s&#x27;.\n", url);&#xA;}&#xA;&#xA;Mat image(Size(HEIGHT, WIDTH), CV_8UC3);&#xA;SwsContext *swsctx = sws_getContext(WIDTH, HEIGHT, AV_PIX_FMT_BGR24, WIDTH, HEIGHT, AV_PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);&#xA;int frame_pts = 0;&#xA;&#xA;while (1)&#xA;{&#xA;    cap >> image;&#xA;&#xA;    int numBytesYUV = av_image_get_buffer_size(STREAM_PIX_FMT, WIDTH, HEIGHT, 1);&#xA;    uint8_t *bufferYUV = (uint8_t *)av_malloc(numBytesYUV * sizeof(uint8_t));&#xA;&#xA;    avpicture_fill((AVPicture *)pFrameBGR, image.data, AV_PIX_FMT_BGR24, WIDTH, HEIGHT);&#xA;    avpicture_fill((AVPicture *)frame, bufferYUV, STREAM_PIX_FMT, WIDTH, HEIGHT);&#xA;&#xA;    sws_scale(swsctx, (uint8_t const *const *)pFrameBGR->data, pFrameBGR->linesize, 0, HEIGHT, frame->data, frame->linesize);&#xA;&#xA;    AVPacket pkt = {0};&#xA;    av_init_packet(&amp;pkt);&#xA;&#xA;    frame->pts = frame_pts;&#xA;&#xA;    ret = avcodec_encode_video2(c, &amp;pkt, frame, &amp;got_packet);&#xA;    if (ret &lt; 0)&#xA;    {&#xA;        av_log(NULL, AV_LOG_ERROR, "Error encoding frame\n");&#xA;    }&#xA;&#xA;    if (got_packet)&#xA;    {&#xA;        pkt.pts = av_rescale_q_rnd(pkt.pts, c->time_base, stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));&#xA;        pkt.dts = av_rescale_q_rnd(pkt.dts, c->time_base, stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));&#xA;        pkt.duration = av_rescale_q(pkt.duration, c->time_base, stream->time_base);&#xA;        pkt.stream_index = stream->index;&#xA;&#xA;        return av_interleaved_write_frame(formatContext, &amp;pkt);&#xA;&#xA;        cout &lt;&lt; "Seguro que si" &lt;&lt; endl;&#xA;    }&#xA;    frame_pts&#x2B;&#x2B;;&#xA;}&#xA;&#xA;avcodec_free_context(&amp;c);&#xA;av_frame_free(&amp;frame);&#xA;avformat_free_context(formatContext);&#xA;&#xA;return 0;&#xA;}&#xA;

    &#xA;&#xA;

    The code compiles but it returns Segmentation fault in the function av_interleaved_write_frame(). I've tried several implementations or several codecs (in this case I'm using libopenh264, but using mpeg2video returns the same segmentation fault). I tried also with av_write_frame() but it returns the same error.

    &#xA;&#xA;

    As I told before, I only want to grab frames from a webcam connected via USB, encode them to H264 and send the packets through UDP to another PC.

    &#xA;&#xA;

    My console log when I run the executable is.

    &#xA;&#xA;

    [100%] Built target display&#xA;[OpenH264] this = 0x0x244b4f0, Info:CWelsH264SVCEncoder::SetOption():ENCODER_OPTION_TRACE_CALLBACK callback = 0x7f0c302a87c0.&#xA;[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:CWelsH264SVCEncoder::InitEncoder(), openh264 codec version = 5a5c4f1&#xA;[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:iUsageType = 0,iPicWidth= 640;iPicHeight= 480;iTargetBitrate= 400000;iMaxBitrate= 400000;iRCMode= 0;iPaddingFlag= 0;iTemporalLayerNum= 1;iSpatialLayerNum= 1;fFrameRate= 25.000000f;uiIntraPeriod= 12;eSpsPpsIdStrategy = 0;bPrefixNalAddingCtrl = 0;bSimulcastAVC=0;bEnableDenoise= 0;bEnableBackgroundDetection= 1;bEnableSceneChangeDetect = 1;bEnableAdaptiveQuant= 1;bEnableFrameSkip= 0;bEnableLongTermReference= 0;iLtrMarkPeriod= 30, bIsLosslessLink=0;iComplexityMode = 0;iNumRefFrame = 1;iEntropyCodingModeFlag = 0;uiMaxNalSize = 0;iLTRRefNum = 0;iMultipleThreadIdc = 1;iLoopFilterDisableIdc = 0 (offset(alpha/beta): 0,0;iComplexityMode = 0,iMaxQp = 51;iMinQp = 0)&#xA;[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:sSpatialLayers[0]: .iVideoWidth= 640; .iVideoHeight= 480; .fFrameRate= 25.000000f; .iSpatialBitrate= 400000; .iMaxSpatialBitrate= 400000; .sSliceArgument.uiSliceMode= 1; .sSliceArgument.iSliceNum= 0; .sSliceArgument.uiSliceSizeConstraint= 1500;uiProfileIdc = 66;uiLevelIdc = 41&#xA;[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Warning:SliceArgumentValidationFixedSliceMode(), unsupported setting with Resolution and uiSliceNum combination under RC on! So uiSliceNum is changed to 6!&#xA;[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:Setting MaxSpatialBitrate (400000) the same at SpatialBitrate (400000) will make the    actual bit rate lower than SpatialBitrate&#xA;[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Warning:bEnableFrameSkip = 0,bitrate can&#x27;t be controlled for RC_QUALITY_MODE,RC_BITRATE_MODE and RC_TIMESTAMP_MODE without enabling skip frame.&#xA;[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Warning:Change QP Range from(0,51) to (12,42)&#xA;[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:WELS CPU features/capacities (0x4007fe3f) detected:   HTT:      Y, MMX:      Y, MMXEX:    Y, SSE:      Y, SSE2:     Y, SSE3:     Y, SSSE3:    Y, SSE4.1:   Y, SSE4.2:   Y, AVX:      Y, FMA:      Y, X87-FPU:  Y, 3DNOW:    N, 3DNOWEX:  N, ALTIVEC:  N, CMOV:     Y, MOVBE:    Y, AES:      Y, NUMBER OF LOGIC PROCESSORS ON CHIP: 8, CPU CACHE LINE SIZE (BYTES):        64&#xA;[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:WelsInitEncoderExt() exit, overall memory usage: 4542878 bytes&#xA;[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:WelsInitEncoderExt(), pCtx= 0x0x245a400.&#xA;Output #0, h264, to &#x27;udp://192.168.100.39:8080&#x27;:&#xA;Stream #0:0, 0, 1/25: Video: h264 (libopenh264), 1 reference frame, yuv420p, 640x480 (0x0), 0/1, q=2-31, 400 kb/s, 25 tbn&#xA;[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Debug:RcUpdateIntraComplexity iFrameDqBits = 385808,iQStep= 2016,iIntraCmplx = 777788928&#xA;[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Debug:[Rc]Layer 0: Frame timestamp = 0, Frame type = 2, encoding_qp = 30, average qp = 30, max qp = 33, min qp = 27, index = 0, iTid = 0, used = 385808, bitsperframe = 16000, target = 64000, remainingbits = -257808, skipbuffersize = 200000&#xA;[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Debug:WelsEncoderEncodeExt() OutputInfo iLayerNum = 2,iFrameSize = 48252&#xA;[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Debug:WelsEncoderEncodeExt() OutputInfo iLayerId = 0,iNalType = 0,iNalCount = 2, first Nal Length=18,uiSpatialId = 0,uiTemporalId = 0,iSubSeqId = 0&#xA;[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Debug:WelsEncoderEncodeExt() OutputInfo iLayerId = 1,iNalType = 1,iNalCount = 6, first Nal Length=6057,uiSpatialId = 0,uiTemporalId = 0,iSubSeqId = 0&#xA;[libopenh264 @ 0x244aa00] 6 slices&#xA;./scriptBuild.sh: line 20: 10625 Segmentation fault      (core dumped) ./display&#xA;

    &#xA;&#xA;

    As you can see, FFmpeg uses libopenh264 and configures it correctly. However, no matter what. It always returns the same Segmentation fault error...

    &#xA;&#xA;

    I've used commands like this.

    &#xA;&#xA;

    ffmpeg -s 640x480 -f video4linux2 -i /dev/video0 -r 30 -vcodec libopenh264 -an -f h264 udp://127.0.0.1:8080&#xA;

    &#xA;&#xA;

    And it works perfectly, but I need to process the frames before sending them. Thats why I'm trying to use the libs.

    &#xA;&#xA;

    My FFmpeg version is.

    &#xA;&#xA;

    ffmpeg version 3.3.6 Copyright (c) 2000-2017 the FFmpeg developers&#xA;built with gcc 4.8 (Ubuntu 4.8.4-2ubuntu1~14.04.3)&#xA;configuration: --disable-yasm --enable-shared --enable-libopenh264 --cc=&#x27;gcc -fPIC&#x27;&#xA;libavutil      55. 58.100 / 55. 58.100&#xA;libavcodec     57. 89.100 / 57. 89.100&#xA;libavformat    57. 71.100 / 57. 71.100&#xA;libavdevice    57.  6.100 / 57.  6.100&#xA;libavfilter     6. 82.100 /  6. 82.100&#xA;libswscale      4.  6.100 /  4.  6.100&#xA;libswresample   2.  7.100 /  2.  7.100&#xA;

    &#xA;&#xA;

    I tried to get more information of the error using gbd, but it didn't give me debugging info.

    &#xA;&#xA;

    How can I solve this problem ? I don't know what else can I try...

    &#xA;&#xA;

    Thank you !

    &#xA;

  • avcodec_receive_packet() doesn't see the output

    1er mars 2018, par Eugene Alexeev

    I’m trying to create a converter which will make a video out of set of images. Everything is at its place, AVFormatContext, AVCodecContext, AVCodec. I’m creating YUV AVFrame out of UIImage and send it to encoder by avcodec_send_frame() method. Everything goes fine until I’m trying to get AVPacket with method avcodec_receive_packet(). Every time it returns -53 which means - output is not available in the current state - user must try to send input. As I said, I’m sending an input before I’m trying to get something and sending is successful.

    Here’s my code :

    Init ffmpeg entities :

    - (BOOL)setupForConvert:(DummyFVPVideoFile *)videoFile outputPath:(NSString *)path
    {
       if (!videoFile) {
           [self.delegate convertationFailed:@"VideoFile is nil!"];
           return NO;
       }
       currentVideoFile = videoFile;
       outputPath = path;
       BOOL success = NO;

       success = [self initFormatCtxAndCodecs:path];
       if (!success) {
           return NO;
       }

       success = [self addCameraStreams:videoFile];
       if (!success) {
           return NO;
       }

       success = [self openIOContext:path];
       if (!success) {
           return NO;
       }

       return YES;
    }

    - (BOOL)initFormatCtxAndCodecs:(NSString *)path
    {
       //AVOutputFormat *fmt = av_guess_format("mp4", NULL, NULL);
       int ret = avformat_alloc_output_context2(&amp;pFormatCtx, NULL, NULL, [path UTF8String]);
       if (ret &lt; 0) {
           NSLog(@"Couldn't create output context");
           return NO;
       }

       //encoder codec init
       pCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
       if (!pCodec) {
           NSLog(@"Couldn't find a encoder codec!");
           return NO;
       }

       pCodecCtx = avcodec_alloc_context3(pCodec);
       if (!pCodecCtx) {
           NSLog(@"Couldn't alloc encoder codec context!");
           return NO;
       }

       pCodecCtx->codec_tag = AV_CODEC_ID_H264;
       pCodecCtx->bit_rate = 400000;
       pCodecCtx->width = currentVideoFile.size.width;
       pCodecCtx->height = currentVideoFile.size.height;
       pCodecCtx->time_base = (AVRational){1, (int)currentVideoFile.framerate};
       pCodecCtx->framerate = (AVRational){(int)currentVideoFile.framerate, 1};
       pCodecCtx->gop_size = 10;
       pCodecCtx->max_b_frames = 1;
       pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;

       if (avcodec_open2(pCodecCtx, pCodec, NULL) &lt; 0) {
           NSLog(@"Couldn't open the encoder codec!");
           return NO;
       }

       pPacket = av_packet_alloc();

       return YES;
    }

    - (BOOL)addCameraStreams:(DummyFVPVideoFile *)videoFile
    {
       AVCodecParameters *params = avcodec_parameters_alloc();
       if (!params) {
           NSLog(@"Couldn't allocate codec parameters!");
           return NO;
       }

       if (avcodec_parameters_from_context(params, pCodecCtx) &lt; 0) {
           NSLog(@"Couldn't copy parameters from context!");
           return NO;
       }

       for (int i = 0; i &lt; videoFile.idCameras.count - 1; i++)
       {
           NSString *path = [videoFile.url URLByAppendingPathComponent:videoFile.idCameras[i]].path;
           AVStream *stream = avformat_new_stream(pFormatCtx, pCodec);
           if (!stream) {
               NSLog(@"Couldn't alloc stream!");
               return NO;
           }

           if (avcodec_parameters_copy(stream->codecpar, params) &lt; 0) {
               NSLog(@"Couldn't copy parameters into stream!");
               return NO;
           }

           stream->avg_frame_rate.num = videoFile.framerate;
           stream->avg_frame_rate.den = 1;
           stream->codecpar->codec_tag = 0;    //some silly workaround
           stream->index = i;
           streams[path] = [[VideoStream alloc] initWithStream:stream];
       }

       return YES;
    }

    - (BOOL)openIOContext:(NSString *)path
    {
       AVIOContext *ioCtx = nil;
       if (avio_open(&amp;ioCtx, [path UTF8String], AVIO_FLAG_WRITE) &lt; 0) {
           return NO;
       }
       pFormatCtx->pb = ioCtx;

       return YES;
    }

    And here’s convertation process :

    - (void)launchConvert:(DummyFVPVideoFile *)videoFile
    {
       BOOL convertInProgress = YES;
       unsigned int frameCount = 1;
       unsigned long pts = 0;
       BOOL success = NO;

       success = [self writeHeader];
       if (!success) {
           NSLog(@"Couldn't write header!");
           return;
       }

       AVRational defaultTimeBase;
       defaultTimeBase.num = 1;
       defaultTimeBase.den = videoFile.framerate;
       AVRational streamTimeBase = streams.allValues.firstObject.stream->time_base;

       while (convertInProgress)
       {
           pts += av_rescale_q(1, defaultTimeBase, streamTimeBase);
           for (NSString *path in streams.allKeys)
           {
               UIImage *img = [UIImage imageWithContentsOfFile:[NSString stringWithFormat:@"%@/%u.jpg", path, frameCount]];
               AVPacket *pkt = [self getAVPacket:img withPts:pts];
               if (!pkt->data) {   continue;   }
               pkt->stream_index = streams[path].stream->index;
               //check all settings of pkt

               if (![self writePacket:pkt]) {
                   NSLog(@"Couldn't write packet!");
                   convertInProgress = NO;
                   break;
               }
           }

           frameCount++;
       }

       success = [self writeTrailer];
       if (!success) {
           NSLog(@"Couldn't write trailer!");
           return;
       }

       NSLog(@"Convertation finished!");
       //delegate convertationFinished method
    }

    - (BOOL)writeHeader
    {
       if (avformat_write_header(pFormatCtx, NULL) &lt; 0) {
           return NO;
       }

       return YES;
    }

    - (BOOL)writePacket:(AVPacket *)pkt
    {
       if (av_interleaved_write_frame(pFormatCtx, pkt) != 0) {
           return NO;
       }

       return YES;
    }

    - (BOOL)writeTrailer
    {
       if (av_write_trailer(pFormatCtx) != 0) {
           return NO;
       }

       return YES;
    }


    /**
    This method will create AVPacket out of UIImage.

    @return AVPacket
    */
    - (AVPacket *)getAVPacket:(UIImage *)img withPts:(unsigned long)pts
    {
       if (!img) {
           NSLog(@"imgData is nil!");
           return nil;
       }
       uint8_t *imgData = [self getPixelDataFromImage:img];

       AVFrame *frame_yuv = av_frame_alloc();
       if (!frame_yuv) {
           NSLog(@"frame_yuv is nil!");
           return nil;
       }
       frame_yuv->format = AV_PIX_FMT_YUV420P;
       frame_yuv->width = (int)img.size.width;
       frame_yuv->height = (int)img.size.height;

       int ret = av_image_alloc(frame_yuv->data,
                                  frame_yuv->linesize,
                                  frame_yuv->width,
                                  frame_yuv->height,
                                  frame_yuv->format,
                                  32);
       if (ret &lt; 0) {
           NSLog(@"Couldn't alloc yuv frame!");
           return nil;
       }

       struct SwsContext *sws_ctx = nil;
       sws_ctx = sws_getContext((int)img.size.width, (int)img.size.height, AV_PIX_FMT_RGB24,
                                (int)img.size.width, (int)img.size.height, AV_PIX_FMT_YUV420P,
                                0, NULL, NULL, NULL);
       const uint8_t *scaleData[1] = { imgData };
       int inLineSize[1] = { 4 * img.size.width };
       sws_scale(sws_ctx, scaleData, inLineSize, 0, (int)img.size.height, frame_yuv->data, frame_yuv->linesize);

       frame_yuv->pict_type = AV_PICTURE_TYPE_I;
       frame_yuv->pts = pCodecCtx->frame_number;

       ret = avcodec_send_frame(pCodecCtx, frame_yuv);   //every time everything is fine
       if (ret != 0) {
           NSLog(@"Couldn't send yuv frame!");
           return nil;
       }

       av_init_packet(pPacket);
       pPacket->dts = pPacket->pts = pts;
       do {
           ret = avcodec_receive_packet(pCodecCtx, pPacket);   //every time -35 error
           NSLog(@"ret = %d", ret);
           if (ret == AVERROR_EOF) {
               NSLog(@"AVERROR_EOF!");
           } else if (ret == AVERROR(EAGAIN)) {
               NSLog(@"AVERROR(EAGAIN)");
           } else if (ret == AVERROR(EINVAL)) {
               NSLog(@"AVERROR(EINVAL)");
           }
           if (ret != 0) {
               NSLog(@"Couldn't receive packet!");
               //return nil;
           }
       } while ( ret == 0 );

       free(imgData);
       av_packet_unref(pPacket);
       av_packet_free(pPacket);
       av_frame_unref(&amp;frame_yuv);
       av_frame_free(&amp;frame_yuv);
       //perform other clean up and test dat shit

       return pPacket;
    }

    Any insights would be helpful. Thanks !

  • swscaler bad src image pointers

    7 mars 2018, par user1496491

    I’m completely lost. I’m trying to capture 30 screenshots and put them into a video with FFMPEG under Windows 10. And it keeps telling me that [swscaler @ 073890a0] bad src image pointers. As a result the video is entirely green. If I change format to dshow using video=screen-capture-recorder the video looks to be mostly garbage. Here’s my short code for that. I’m completely stuck and don’t know even in which direction to look.

    MainWindow.h

    #ifndef MAINWINDOW_H
    #define MAINWINDOW_H

    #include <qmainwindow>
    #include <qfuture>
    #include <qfuturewatcher>
    #include <qmutex>
    #include <qmutexlocker>

    extern "C" {
    #include "libavcodec/avcodec.h"
    #include "libavcodec/avfft.h"

    #include "libavdevice/avdevice.h"

    #include "libavfilter/avfilter.h"
    #include "libavfilter/avfiltergraph.h"
    #include "libavfilter/buffersink.h"
    #include "libavfilter/buffersrc.h"

    #include "libavformat/avformat.h"
    #include "libavformat/avio.h"

    #include "libavutil/opt.h"
    #include "libavutil/common.h"
    #include "libavutil/channel_layout.h"
    #include "libavutil/imgutils.h"
    #include "libavutil/mathematics.h"
    #include "libavutil/samplefmt.h"
    #include "libavutil/time.h"
    #include "libavutil/opt.h"
    #include "libavutil/pixdesc.h"
    #include "libavutil/file.h"

    #include "libswscale/swscale.h"
    }

    class MainWindow : public QMainWindow
    {
       Q_OBJECT

    public:
       MainWindow(QWidget *parent = 0);
       ~MainWindow();

    private:
       AVFormatContext *inputFormatContext = nullptr;
       AVFormatContext *outFormatContext = nullptr;

       AVStream* videoStream = nullptr;

       AVDictionary* options = nullptr;

       AVCodec* outCodec = nullptr;
       AVCodec* inputCodec = nullptr;
       AVCodecContext* inputCodecContext = nullptr;
       AVCodecContext* outCodecContext = nullptr;
       SwsContext* swsContext = nullptr;

    private:
       void init();
       void initOutFile();
       void collectFrame();
    };

    #endif // MAINWINDOW_H
    </qmutexlocker></qmutex></qfuturewatcher></qfuture></qmainwindow>

    MainWindow.cpp

    #include "MainWindow.h"

    #include <qguiapplication>
    #include <qlabel>
    #include <qscreen>
    #include <qtimer>
    #include <qlayout>
    #include <qimage>
    #include <qtconcurrent></qtconcurrent>QtConcurrent>
    #include <qthreadpool>

    #include "ScreenCapture.h"

    MainWindow::MainWindow(QWidget *parent) : QMainWindow(parent)
    {
       resize(800, 600);

       auto label = new QLabel();
       label->setAlignment(Qt::AlignHCenter | Qt::AlignVCenter);

       auto layout = new QHBoxLayout();
       layout->addWidget(label);

       auto widget = new QWidget();
       widget->setLayout(layout);
       setCentralWidget(widget);

       init();
       initOutFile();
       collectFrame();
    }

    MainWindow::~MainWindow()
    {
       avformat_close_input(&amp;inputFormatContext);
       avformat_free_context(inputFormatContext);

       QThreadPool::globalInstance()->waitForDone();
    }

    void MainWindow::init()
    {
       av_register_all();
       avcodec_register_all();
       avdevice_register_all();
       avformat_network_init();

       auto screen = QGuiApplication::screens()[0];
       QRect geometry = screen->geometry();

       inputFormatContext = avformat_alloc_context();

       options = NULL;
       av_dict_set(&amp;options, "framerate", "30", NULL);
       av_dict_set(&amp;options, "offset_x", QString::number(geometry.x()).toLatin1().data(), NULL);
       av_dict_set(&amp;options, "offset_y", QString::number(geometry.y()).toLatin1().data(), NULL);
       av_dict_set(&amp;options, "video_size", QString(QString::number(geometry.width()) + "x" + QString::number(geometry.height())).toLatin1().data(), NULL);
       av_dict_set(&amp;options, "show_region", "1", NULL);

       AVInputFormat* inputFormat = av_find_input_format("gdigrab");
       avformat_open_input(&amp;inputFormatContext, "desktop", inputFormat, &amp;options);

       int videoStreamIndex = av_find_best_stream(inputFormatContext, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);

       inputCodecContext = inputFormatContext->streams[videoStreamIndex]->codec;
       inputCodecContext->width = geometry.width();
       inputCodecContext->height = geometry.height();
       inputCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;

       inputCodec = avcodec_find_decoder(inputCodecContext->codec_id);
       avcodec_open2(inputCodecContext, inputCodec, NULL);
    }

    void MainWindow::initOutFile()
    {
       const char* filename = "C:/Temp/output.mp4";

       avformat_alloc_output_context2(&amp;outFormatContext, NULL, NULL, filename);

       outCodec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);

       videoStream = avformat_new_stream(outFormatContext, outCodec);
       videoStream->time_base = {1, 30};

       outCodecContext = videoStream->codec;
       outCodecContext->codec_id = AV_CODEC_ID_MPEG4;
       outCodecContext->codec_type = AVMEDIA_TYPE_VIDEO;
       outCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
       outCodecContext->bit_rate = 400000;
       outCodecContext->width = inputCodecContext->width;
       outCodecContext->height = inputCodecContext->height;
       outCodecContext->gop_size = 3;
       outCodecContext->max_b_frames = 2;
       outCodecContext->time_base = videoStream->time_base;

       if (outFormatContext->oformat->flags &amp; AVFMT_GLOBALHEADER)
           outCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

       avcodec_open2(outCodecContext, outCodec, NULL);

       if (!(outFormatContext->flags &amp; AVFMT_NOFILE))
           avio_open2(&amp;outFormatContext->pb, filename, AVIO_FLAG_WRITE, NULL, NULL);

       swsContext = sws_getContext(inputCodecContext->width,
                                   inputCodecContext->height,
                                   inputCodecContext->pix_fmt,
                                   outCodecContext->width,
                                   outCodecContext->height,
                                   outCodecContext->pix_fmt,
                                   SWS_BICUBIC, NULL, NULL, NULL);

       avformat_write_header(outFormatContext, &amp;options);
    }

    void MainWindow::collectFrame()
    {
       AVFrame* frame = av_frame_alloc();
       frame->data[0] = NULL;
       frame->width = inputCodecContext->width;
       frame->height = inputCodecContext->height;
       frame->format = inputCodecContext->pix_fmt;

       av_image_alloc(frame->data, frame->linesize, inputCodecContext->width, inputCodecContext->height, (AVPixelFormat)frame->format, 32);

       AVFrame* outFrame = av_frame_alloc();
       outFrame->data[0] = NULL;
       outFrame->width = outCodecContext->width;
       outFrame->height = outCodecContext->height;
       outFrame->format = outCodecContext->pix_fmt;

       av_image_alloc(outFrame->data, outFrame->linesize, outCodecContext->width, outCodecContext->height, (AVPixelFormat)outFrame->format, 32);

       int bufferSize = av_image_get_buffer_size(outCodecContext->pix_fmt,
                                                 outCodecContext->width,
                                                 outCodecContext->height,
                                                 24);

       uint8_t* outBuffer = (uint8_t*)av_malloc(bufferSize);

       avpicture_fill((AVPicture*)outFrame, outBuffer,
                      AV_PIX_FMT_YUV420P,
                      outCodecContext->width, outCodecContext->height);

       int frameCount = 30;
       int count = 0;

       AVPacket* packet = (AVPacket*)av_malloc(sizeof(AVPacket));
       av_init_packet(packet);

       while(av_read_frame(inputFormatContext, packet) >= 0)
       {
           if(packet->stream_index == videoStream->index)
           {
               int frameFinished = 0;
               avcodec_decode_video2(inputCodecContext, frame, &amp;frameFinished, packet);

               if(frameFinished)
               {
                   if(++count > frameCount)
                   {
                       qDebug() &lt;&lt; "FINISHED!";
                       break;
                   }

                   sws_scale(swsContext, frame->data, frame->linesize, 0, inputCodecContext->height, outFrame->data, outFrame->linesize);

                   AVPacket outPacket;
                   av_init_packet(&amp;outPacket);
                   outPacket.data = NULL;
                   outPacket.size = 0;

                   int got_picture = 0;
                   avcodec_encode_video2(outCodecContext, &amp;outPacket, outFrame, &amp;got_picture);

                   if(got_picture)
                   {
                       if(outPacket.pts != AV_NOPTS_VALUE) outPacket.pts = av_rescale_q(outPacket.pts, videoStream->codec->time_base, videoStream->time_base);
                       if(outPacket.dts != AV_NOPTS_VALUE) outPacket.dts = av_rescale_q(outPacket.dts, videoStream->codec->time_base, videoStream->time_base);

                       av_write_frame(outFormatContext , &amp;outPacket);
                   }

                   av_packet_unref(&amp;outPacket);
               }
           }
       }

       av_write_trailer(outFormatContext);

       av_free(outBuffer);
    }
    </qthreadpool></qimage></qlayout></qtimer></qscreen></qlabel></qguiapplication>