
Recherche avancée
Médias (1)
-
Rennes Emotion Map 2010-11
19 octobre 2011, par
Mis à jour : Juillet 2013
Langue : français
Type : Texte
Autres articles (62)
-
Websites made with MediaSPIP
2 mai 2011, parThis page lists some websites based on MediaSPIP.
-
Participer à sa traduction
10 avril 2011Vous pouvez nous aider à améliorer les locutions utilisées dans le logiciel ou à traduire celui-ci dans n’importe qu’elle nouvelle langue permettant sa diffusion à de nouvelles communautés linguistiques.
Pour ce faire, on utilise l’interface de traduction de SPIP où l’ensemble des modules de langue de MediaSPIP sont à disposition. ll vous suffit de vous inscrire sur la liste de discussion des traducteurs pour demander plus d’informations.
Actuellement MediaSPIP n’est disponible qu’en français et (...) -
Creating farms of unique websites
13 avril 2011, parMediaSPIP platforms can be installed as a farm, with a single "core" hosted on a dedicated server and used by multiple websites.
This allows (among other things) : implementation costs to be shared between several different projects / individuals rapid deployment of multiple unique sites creation of groups of like-minded sites, making it possible to browse media in a more controlled and selective environment than the major "open" (...)
Sur d’autres sites (10405)
-
c++ - using FFmpeg encode and UDP with a Webcam
14 mars, par RendresI'm trying to get frames from a Webcam using OpenCV, encode them with FFmpeg and send them using UDP.



I did before a similar project that instead of sending the packets with UDP, it saved them in a video file.



My code is.



#include 
#include 
#include 
#include 

extern "C" {
#include <libavcodec></libavcodec>avcodec.h>
#include <libavformat></libavformat>avformat.h>
#include <libavutil></libavutil>opt.h>
#include <libavutil></libavutil>imgutils.h>
#include <libavutil></libavutil>mathematics.h>
#include <libswscale></libswscale>swscale.h>
#include <libswresample></libswresample>swresample.h>
}

#include <opencv2></opencv2>opencv.hpp>

using namespace std;
using namespace cv;

#define WIDTH 640
#define HEIGHT 480
#define CODEC_ID AV_CODEC_ID_H264
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P

static AVFrame *frame, *pFrameBGR;

int main(int argc, char **argv)
{
VideoCapture cap(0);
const char *url = "udp://127.0.0.1:8080";

AVFormatContext *formatContext;
AVStream *stream;
AVCodec *codec;
AVCodecContext *c;
AVDictionary *opts = NULL;

int ret, got_packet;

if (!cap.isOpened())
{
 return -1;
}

av_log_set_level(AV_LOG_TRACE);

av_register_all();
avformat_network_init();

avformat_alloc_output_context2(&formatContext, NULL, "h264", url);
if (!formatContext)
{
 av_log(NULL, AV_LOG_FATAL, "Could not allocate an output context for '%s'.\n", url);
}

codec = avcodec_find_encoder(CODEC_ID);
if (!codec)
{
 av_log(NULL, AV_LOG_ERROR, "Could not find encoder.\n");
}

stream = avformat_new_stream(formatContext, codec);

c = avcodec_alloc_context3(codec);

stream->id = formatContext->nb_streams - 1;
stream->time_base = (AVRational){1, 25};

c->codec_id = CODEC_ID;
c->bit_rate = 400000;
c->width = WIDTH;
c->height = HEIGHT;
c->time_base = stream->time_base;
c->gop_size = 12;
c->pix_fmt = STREAM_PIX_FMT;

if (formatContext->flags & AVFMT_GLOBALHEADER)
 c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

av_dict_set(&opts, "preset", "fast", 0);

av_dict_set(&opts, "tune", "zerolatency", 0);

ret = avcodec_open2(c, codec, NULL);
if (ret < 0)
{
 av_log(NULL, AV_LOG_ERROR, "Could not open video codec.\n");
}

pFrameBGR = av_frame_alloc();
if (!pFrameBGR)
{
 av_log(NULL, AV_LOG_ERROR, "Could not allocate video frame.\n");
}

frame = av_frame_alloc();
if (!frame)
{
 av_log(NULL, AV_LOG_ERROR, "Could not allocate video frame.\n");
}

frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;

ret = avcodec_parameters_from_context(stream->codecpar, c);
if (ret < 0)
{
 av_log(NULL, AV_LOG_ERROR, "Could not open video codec.\n");
}

av_dump_format(formatContext, 0, url, 1);

ret = avformat_write_header(formatContext, NULL);
if (ret != 0)
{
 av_log(NULL, AV_LOG_ERROR, "Failed to connect to '%s'.\n", url);
}

Mat image(Size(HEIGHT, WIDTH), CV_8UC3);
SwsContext *swsctx = sws_getContext(WIDTH, HEIGHT, AV_PIX_FMT_BGR24, WIDTH, HEIGHT, AV_PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);
int frame_pts = 0;

while (1)
{
 cap >> image;

 int numBytesYUV = av_image_get_buffer_size(STREAM_PIX_FMT, WIDTH, HEIGHT, 1);
 uint8_t *bufferYUV = (uint8_t *)av_malloc(numBytesYUV * sizeof(uint8_t));

 avpicture_fill((AVPicture *)pFrameBGR, image.data, AV_PIX_FMT_BGR24, WIDTH, HEIGHT);
 avpicture_fill((AVPicture *)frame, bufferYUV, STREAM_PIX_FMT, WIDTH, HEIGHT);

 sws_scale(swsctx, (uint8_t const *const *)pFrameBGR->data, pFrameBGR->linesize, 0, HEIGHT, frame->data, frame->linesize);

 AVPacket pkt = {0};
 av_init_packet(&pkt);

 frame->pts = frame_pts;

 ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
 if (ret < 0)
 {
 av_log(NULL, AV_LOG_ERROR, "Error encoding frame\n");
 }

 if (got_packet)
 {
 pkt.pts = av_rescale_q_rnd(pkt.pts, c->time_base, stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
 pkt.dts = av_rescale_q_rnd(pkt.dts, c->time_base, stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
 pkt.duration = av_rescale_q(pkt.duration, c->time_base, stream->time_base);
 pkt.stream_index = stream->index;

 return av_interleaved_write_frame(formatContext, &pkt);

 cout << "Seguro que si" << endl;
 }
 frame_pts++;
}

avcodec_free_context(&c);
av_frame_free(&frame);
avformat_free_context(formatContext);

return 0;
}




The code compiles but it returns Segmentation fault in the function av_interleaved_write_frame(). I've tried several implementations or several codecs (in this case I'm using libopenh264, but using mpeg2video returns the same segmentation fault). I tried also with av_write_frame() but it returns the same error.



As I told before, I only want to grab frames from a webcam connected via USB, encode them to H264 and send the packets through UDP to another PC.



My console log when I run the executable is.



[100%] Built target display
[OpenH264] this = 0x0x244b4f0, Info:CWelsH264SVCEncoder::SetOption():ENCODER_OPTION_TRACE_CALLBACK callback = 0x7f0c302a87c0.
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:CWelsH264SVCEncoder::InitEncoder(), openh264 codec version = 5a5c4f1
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:iUsageType = 0,iPicWidth= 640;iPicHeight= 480;iTargetBitrate= 400000;iMaxBitrate= 400000;iRCMode= 0;iPaddingFlag= 0;iTemporalLayerNum= 1;iSpatialLayerNum= 1;fFrameRate= 25.000000f;uiIntraPeriod= 12;eSpsPpsIdStrategy = 0;bPrefixNalAddingCtrl = 0;bSimulcastAVC=0;bEnableDenoise= 0;bEnableBackgroundDetection= 1;bEnableSceneChangeDetect = 1;bEnableAdaptiveQuant= 1;bEnableFrameSkip= 0;bEnableLongTermReference= 0;iLtrMarkPeriod= 30, bIsLosslessLink=0;iComplexityMode = 0;iNumRefFrame = 1;iEntropyCodingModeFlag = 0;uiMaxNalSize = 0;iLTRRefNum = 0;iMultipleThreadIdc = 1;iLoopFilterDisableIdc = 0 (offset(alpha/beta): 0,0;iComplexityMode = 0,iMaxQp = 51;iMinQp = 0)
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:sSpatialLayers[0]: .iVideoWidth= 640; .iVideoHeight= 480; .fFrameRate= 25.000000f; .iSpatialBitrate= 400000; .iMaxSpatialBitrate= 400000; .sSliceArgument.uiSliceMode= 1; .sSliceArgument.iSliceNum= 0; .sSliceArgument.uiSliceSizeConstraint= 1500;uiProfileIdc = 66;uiLevelIdc = 41
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Warning:SliceArgumentValidationFixedSliceMode(), unsupported setting with Resolution and uiSliceNum combination under RC on! So uiSliceNum is changed to 6!
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:Setting MaxSpatialBitrate (400000) the same at SpatialBitrate (400000) will make the actual bit rate lower than SpatialBitrate
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Warning:bEnableFrameSkip = 0,bitrate can't be controlled for RC_QUALITY_MODE,RC_BITRATE_MODE and RC_TIMESTAMP_MODE without enabling skip frame.
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Warning:Change QP Range from(0,51) to (12,42)
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:WELS CPU features/capacities (0x4007fe3f) detected: HTT: Y, MMX: Y, MMXEX: Y, SSE: Y, SSE2: Y, SSE3: Y, SSSE3: Y, SSE4.1: Y, SSE4.2: Y, AVX: Y, FMA: Y, X87-FPU: Y, 3DNOW: N, 3DNOWEX: N, ALTIVEC: N, CMOV: Y, MOVBE: Y, AES: Y, NUMBER OF LOGIC PROCESSORS ON CHIP: 8, CPU CACHE LINE SIZE (BYTES): 64
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:WelsInitEncoderExt() exit, overall memory usage: 4542878 bytes
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Info:WelsInitEncoderExt(), pCtx= 0x0x245a400.
Output #0, h264, to 'udp://192.168.100.39:8080':
Stream #0:0, 0, 1/25: Video: h264 (libopenh264), 1 reference frame, yuv420p, 640x480 (0x0), 0/1, q=2-31, 400 kb/s, 25 tbn
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Debug:RcUpdateIntraComplexity iFrameDqBits = 385808,iQStep= 2016,iIntraCmplx = 777788928
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Debug:[Rc]Layer 0: Frame timestamp = 0, Frame type = 2, encoding_qp = 30, average qp = 30, max qp = 33, min qp = 27, index = 0, iTid = 0, used = 385808, bitsperframe = 16000, target = 64000, remainingbits = -257808, skipbuffersize = 200000
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Debug:WelsEncoderEncodeExt() OutputInfo iLayerNum = 2,iFrameSize = 48252
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Debug:WelsEncoderEncodeExt() OutputInfo iLayerId = 0,iNalType = 0,iNalCount = 2, first Nal Length=18,uiSpatialId = 0,uiTemporalId = 0,iSubSeqId = 0
[libopenh264 @ 0x244aa00] [OpenH264] this = 0x0x244b4f0, Debug:WelsEncoderEncodeExt() OutputInfo iLayerId = 1,iNalType = 1,iNalCount = 6, first Nal Length=6057,uiSpatialId = 0,uiTemporalId = 0,iSubSeqId = 0
[libopenh264 @ 0x244aa00] 6 slices
./scriptBuild.sh: line 20: 10625 Segmentation fault (core dumped) ./display




As you can see, FFmpeg uses libopenh264 and configures it correctly. However, no matter what. It always returns the same Segmentation fault error...



I've used commands like this.



ffmpeg -s 640x480 -f video4linux2 -i /dev/video0 -r 30 -vcodec libopenh264 -an -f h264 udp://127.0.0.1:8080




And it works perfectly, but I need to process the frames before sending them. Thats why I'm trying to use the libs.



My FFmpeg version is.



ffmpeg version 3.3.6 Copyright (c) 2000-2017 the FFmpeg developers
built with gcc 4.8 (Ubuntu 4.8.4-2ubuntu1~14.04.3)
configuration: --disable-yasm --enable-shared --enable-libopenh264 --cc='gcc -fPIC'
libavutil 55. 58.100 / 55. 58.100
libavcodec 57. 89.100 / 57. 89.100
libavformat 57. 71.100 / 57. 71.100
libavdevice 57. 6.100 / 57. 6.100
libavfilter 6. 82.100 / 6. 82.100
libswscale 4. 6.100 / 4. 6.100
libswresample 2. 7.100 / 2. 7.100




I tried to get more information of the error using gbd, but it didn't give me debugging info.



How can I solve this problem ? I don't know what else can I try...



Thank you !


-
How to keep transparency when scale webm file with ffmpeg
5 octobre 2022, par Sonia KidmanI'm using ffmpeg to scale my WEBM file, by using below command : 
ffmpeg -i in.webm -c:v libvpx -vf scale=100:100 out.webm
The output has correct resolution as I expected but the problem is transparency become black background.



Could someone give me a solution for this.



Thank you so much.



Below is the log of the operation :



ffmpeg version 3.4 Copyright (c) 2000-2017 the FFmpeg developers
 built with gcc 7.2.0 (GCC)
 configuration: --enable-gpl --enable-version3 --enable-sdl2 --enable-bzlib --enable-fontconfig --enable-gnutls --enable-iconv --enable-libass --enable-libbluray --enable-libfreetype --enable-libmp3lame --enable-libopenjpeg --enable-libopus --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libtheora --enable-libtwolame --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxml2 --enable-libzimg --enable-lzma --enable-zlib --enable-gmp --enable-libvidstab --enable-libvorbis --enable-cuda --enable-cuvid --enable-d3d11va --enable-nvenc --enable-dxva2 --enable-avisynth --enable-libmfx
 libavutil 55. 78.100 / 55. 78.100
 libavcodec 57.107.100 / 57.107.100
 libavformat 57. 83.100 / 57. 83.100
 libavdevice 57. 10.100 / 57. 10.100
 libavfilter 6.107.100 / 6.107.100
 libswscale 4. 8.100 / 4. 8.100
 libswresample 2. 9.100 / 2. 9.100
 libpostproc 54. 7.100 / 54. 7.100
Splitting the commandline.
Reading option '-v' ... matched as option 'v' (set logging level) with argument '56'.
Reading option '-i' ... matched as input url with argument 'in.webm'.
Reading option '-c:v' ... matched as option 'c' (codec name) with argument 'libvpx'.
Reading option '-vf' ... matched as option 'vf' (set video filters) with argument 'scale=320:240'.
Reading option 'out.webm' ... matched as output url.
Finished splitting the commandline.
Parsing a group of options: global .
Applying option v (set logging level) with argument 56.
Successfully parsed a group of options.
Parsing a group of options: input url in.webm.
Successfully parsed a group of options.
Opening an input file: in.webm.
[NULL @ 000002387e6322a0] Opening 'in.webm' for reading
[file @ 000002387e632ea0] Setting default whitelist 'file,crypto'
Probing matroska,webm score:100 size:2048
Probing mp3 score:1 size:2048
[matroska,webm @ 000002387e6322a0] Format matroska,webm probed with size=2048 and score=100
st:0 removing common factor 1000000 from timebase
[matroska,webm @ 000002387e6322a0] Before avformat_find_stream_info() pos: 634 bytes read:32768 seeks:0 nb_streams:1
[matroska,webm @ 000002387e6322a0] All info found
[matroska,webm @ 000002387e6322a0] stream 0: start_time: 0.000 duration: -9223372036854776.000
[matroska,webm @ 000002387e6322a0] format: start_time: 0.000 duration: 0.400 bitrate=1432 kb/s
[matroska,webm @ 000002387e6322a0] After avformat_find_stream_info() pos: 34843 bytes read:65536 seeks:0 frames:1
Input #0, matroska,webm, from 'in.webm':
 Metadata:
 ENCODER : Lavf57.83.100
 Duration: 00:00:00.40, start: 0.000000, bitrate: 1432 kb/s
 Stream #0:0, 1, 1/1000: Video: vp8, 1 reference frame, yuv420p(progressive), 640x480, 0/1, SAR 1:1 DAR 4:3, 10 fps, 10 tbr, 1k tbn, 1k tbc (default)
 Metadata:
 alpha_mode : 1
 ENCODER : Lavc57.107.100 libvpx
 DURATION : 00:00:00.400000000
Successfully opened the file.
Parsing a group of options: output url out.webm.
Applying option c:v (codec name) with argument libvpx.
Applying option vf (set video filters) with argument scale=320:240.
Successfully parsed a group of options.
Opening an output file: out.webm.
[file @ 000002387e658b40] Setting default whitelist 'file,crypto'
Successfully opened the file.
detected 4 logical cores
Stream mapping:
 Stream #0:0 -> #0:0 (vp8 (native) -> vp8 (libvpx))
Press [q] to stop, [?] for help
cur_dts is invalid (this is harmless if it occurs once at the start per stream)
 Last message repeated 4 times
[Parsed_scale_0 @ 000002387e718a60] Setting 'w' to value '320'
[Parsed_scale_0 @ 000002387e718a60] Setting 'h' to value '240'
[Parsed_scale_0 @ 000002387e718a60] Setting 'flags' to value 'bicubic'
[Parsed_scale_0 @ 000002387e718a60] w:320 h:240 flags:'bicubic' interl:0
[graph 0 input from stream 0:0 @ 000002387e743b00] Setting 'video_size' to value '640x480'
[graph 0 input from stream 0:0 @ 000002387e743b00] Setting 'pix_fmt' to value '0'
[graph 0 input from stream 0:0 @ 000002387e743b00] Setting 'time_base' to value '1/1000'
[graph 0 input from stream 0:0 @ 000002387e743b00] Setting 'pixel_aspect' to value '1/1'
[graph 0 input from stream 0:0 @ 000002387e743b00] Setting 'sws_param' to value 'flags=2'
[graph 0 input from stream 0:0 @ 000002387e743b00] Setting 'frame_rate' to value '10/1'
[graph 0 input from stream 0:0 @ 000002387e743b00] w:640 h:480 pixfmt:yuv420p tb:1/1000 fr:10/1 sar:1/1 sws_param:flags=2
[format @ 000002387e7fe1e0] compat: called with args=[yuv420p|yuva420p]
[format @ 000002387e7fe1e0] Setting 'pix_fmts' to value 'yuv420p|yuva420p'
[AVFilterGraph @ 000002387e634e60] query_formats: 4 queried, 3 merged, 0 already done, 0 delayed
[Parsed_scale_0 @ 000002387e718a60] w:640 h:480 fmt:yuv420p sar:1/1 -> w:320 h:240 fmt:yuv420p sar:1/1 flags:0x4
[libvpx @ 000002387e657fe0] v1.6.1
[libvpx @ 000002387e657fe0] --prefix=/Users/kyle/software/libvpx/win64/libvpx-1.6.1-win64 --target=x86_64-win64-gcc
[libvpx @ 000002387e657fe0] vpx_codec_enc_cfg
[libvpx @ 000002387e657fe0] generic settings
 g_usage: 0
 g_threads: 0
 g_profile: 0
 g_w: 320
 g_h: 240
 g_bit_depth: 8
 g_input_bit_depth: 8
 g_timebase: {1/30}
 g_error_resilient: 0
 g_pass: 0
 g_lag_in_frames: 0
[libvpx @ 000002387e657fe0] rate control settings
 rc_dropframe_thresh: 0
 rc_resize_allowed: 0
 rc_resize_up_thresh: 60
 rc_resize_down_thresh: 30
 rc_end_usage: 0
 rc_twopass_stats_in: 0000000000000000(0)
 rc_target_bitrate: 256
[libvpx @ 000002387e657fe0] quantizer settings
 rc_min_quantizer: 4
 rc_max_quantizer: 63
[libvpx @ 000002387e657fe0] bitrate tolerance
 rc_undershoot_pct: 100
 rc_overshoot_pct: 100
[libvpx @ 000002387e657fe0] decoder buffer model
 rc_buf_sz: 6000
 rc_buf_initial_sz: 4000
 rc_buf_optimal_sz: 5000
[libvpx @ 000002387e657fe0] 2 pass rate control settings
 rc_2pass_vbr_bias_pct: 50
 rc_2pass_vbr_minsection_pct: 0
 rc_2pass_vbr_maxsection_pct: 400
[libvpx @ 000002387e657fe0] keyframing settings
 kf_mode: 1
 kf_min_dist: 0
 kf_max_dist: 128
[libvpx @ 000002387e657fe0] 
[libvpx @ 000002387e657fe0] vpx_codec_enc_cfg
[libvpx @ 000002387e657fe0] generic settings
 g_usage: 0
 g_threads: 0
 g_profile: 0
 g_w: 320
 g_h: 240
 g_bit_depth: 8
 g_input_bit_depth: 8
 g_timebase: {1/10}
 g_error_resilient: 0
 g_pass: 0
 g_lag_in_frames: 25
[libvpx @ 000002387e657fe0] rate control settings
 rc_dropframe_thresh: 0
 rc_resize_allowed: 0
 rc_resize_up_thresh: 60
 rc_resize_down_thresh: 30
 rc_end_usage: 0
 rc_twopass_stats_in: 0000000000000000(0)
 rc_target_bitrate: 200
[libvpx @ 000002387e657fe0] quantizer settings
 rc_min_quantizer: 4
 rc_max_quantizer: 63
[libvpx @ 000002387e657fe0] bitrate tolerance
 rc_undershoot_pct: 100
 rc_overshoot_pct: 100
[libvpx @ 000002387e657fe0] decoder buffer model
 rc_buf_sz: 6000
 rc_buf_initial_sz: 4000
 rc_buf_optimal_sz: 5000
[libvpx @ 000002387e657fe0] 2 pass rate control settings
 rc_2pass_vbr_bias_pct: 50
 rc_2pass_vbr_minsection_pct: 0
 rc_2pass_vbr_maxsection_pct: 400
[libvpx @ 000002387e657fe0] keyframing settings
 kf_mode: 1
 kf_min_dist: 0
 kf_max_dist: 128
[libvpx @ 000002387e657fe0] 
[libvpx @ 000002387e657fe0] vpx_codec_control
[libvpx @ 000002387e657fe0] VP8E_SET_CPUUSED: 1
[libvpx @ 000002387e657fe0] VP8E_SET_ARNR_MAXFRAMES: 0
[libvpx @ 000002387e657fe0] VP8E_SET_ARNR_STRENGTH: 3
[libvpx @ 000002387e657fe0] VP8E_SET_ARNR_TYPE: 3
[libvpx @ 000002387e657fe0] VP8E_SET_NOISE_SENSITIVITY: 0
[libvpx @ 000002387e657fe0] VP8E_SET_TOKEN_PARTITIONS: 0
[libvpx @ 000002387e657fe0] VP8E_SET_STATIC_THRESHOLD: 0
[libvpx @ 000002387e657fe0] Using deadline: 1000000
Output #0, webm, to 'out.webm':
 Metadata:
 encoder : Lavf57.83.100
 Stream #0:0, 0, 1/1000: Video: vp8 (libvpx), 1 reference frame, yuv420p, 320x240 [SAR 1:1 DAR 4:3], 0/1, q=-1--1, 200 kb/s, 10 fps, 1k tbn, 10 tbc (default)
 Metadata:
 alpha_mode : 1
 DURATION : 00:00:00.400000000
 encoder : Lavc57.107.100 libvpx
 Side data:
 cpb: bitrate max/min/avg: 0/0/0 buffer size: 0 vbv_delay: -1
Clipping frame in rate conversion by 0.000008
[webm @ 000002387e656880] get_metadata_duration returned: 400000
[webm @ 000002387e656880] Write early duration from metadata = 400
[webm @ 000002387e656880] Writing block at offset 3, size 11223, pts 0, dts 0, duration 100, keyframe 1
[webm @ 000002387e656880] Writing block at offset 11233, size 1288, pts 100, dts 100, duration 100, keyframe 0
[webm @ 000002387e656880] Writing block at offset 12528, size 1504, pts 200, dts 200, duration 100, keyframe 0
[webm @ 000002387e656880] Writing block at offset 14039, size 2481, pts 300, dts 300, duration 100, keyframe 0
[out_0_0 @ 000002387e743d60] EOF on sink link out_0_0:default.
No more output streams to write to, finishing.
[webm @ 000002387e656880] end duration = 400
[webm @ 000002387e656880] stream 0 end duration = 400
frame= 4 fps=0.0 q=0.0 Lsize= 17kB time=00:00:00.30 bitrate= 457.8kbits/s speed=4.45x 
video:16kB audio:0kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead: 4.413191%
Input file #0 (in.webm):
 Input stream #0:0 (video): 4 packets read (34992 bytes); 4 frames decoded; 
 Total: 4 packets (34992 bytes) demuxed
Output file #0 (out.webm):
 Output stream #0:0 (video): 4 frames encoded; 4 packets muxed (16496 bytes); 
 Total: 4 packets (16496 bytes) muxed
4 frames successfully decoded, 0 decoding errors
[AVIOContext @ 000002387e698c20] Statistics: 14 seeks, 10 writeouts
[AVIOContext @ 000002387cc773e0] Statistics: 71649 bytes read, 0 seeks



-
avcodec_receive_packet() doesn't see the output
1er mars 2018, par Eugene AlexeevI’m trying to create a converter which will make a video out of set of images. Everything is at its place,
AVFormatContext
,AVCodecContext
,AVCodec
. I’m creating YUV AVFrame out of UIImage and send it to encoder byavcodec_send_frame()
method. Everything goes fine until I’m trying to getAVPacket
with methodavcodec_receive_packet()
. Every time it returns -53 which means -output is not available in the current state - user must try to send input
. As I said, I’m sending an input before I’m trying to get something and sending is successful.Here’s my code :
Init ffmpeg entities :
- (BOOL)setupForConvert:(DummyFVPVideoFile *)videoFile outputPath:(NSString *)path
{
if (!videoFile) {
[self.delegate convertationFailed:@"VideoFile is nil!"];
return NO;
}
currentVideoFile = videoFile;
outputPath = path;
BOOL success = NO;
success = [self initFormatCtxAndCodecs:path];
if (!success) {
return NO;
}
success = [self addCameraStreams:videoFile];
if (!success) {
return NO;
}
success = [self openIOContext:path];
if (!success) {
return NO;
}
return YES;
}
- (BOOL)initFormatCtxAndCodecs:(NSString *)path
{
//AVOutputFormat *fmt = av_guess_format("mp4", NULL, NULL);
int ret = avformat_alloc_output_context2(&pFormatCtx, NULL, NULL, [path UTF8String]);
if (ret < 0) {
NSLog(@"Couldn't create output context");
return NO;
}
//encoder codec init
pCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!pCodec) {
NSLog(@"Couldn't find a encoder codec!");
return NO;
}
pCodecCtx = avcodec_alloc_context3(pCodec);
if (!pCodecCtx) {
NSLog(@"Couldn't alloc encoder codec context!");
return NO;
}
pCodecCtx->codec_tag = AV_CODEC_ID_H264;
pCodecCtx->bit_rate = 400000;
pCodecCtx->width = currentVideoFile.size.width;
pCodecCtx->height = currentVideoFile.size.height;
pCodecCtx->time_base = (AVRational){1, (int)currentVideoFile.framerate};
pCodecCtx->framerate = (AVRational){(int)currentVideoFile.framerate, 1};
pCodecCtx->gop_size = 10;
pCodecCtx->max_b_frames = 1;
pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
NSLog(@"Couldn't open the encoder codec!");
return NO;
}
pPacket = av_packet_alloc();
return YES;
}
- (BOOL)addCameraStreams:(DummyFVPVideoFile *)videoFile
{
AVCodecParameters *params = avcodec_parameters_alloc();
if (!params) {
NSLog(@"Couldn't allocate codec parameters!");
return NO;
}
if (avcodec_parameters_from_context(params, pCodecCtx) < 0) {
NSLog(@"Couldn't copy parameters from context!");
return NO;
}
for (int i = 0; i < videoFile.idCameras.count - 1; i++)
{
NSString *path = [videoFile.url URLByAppendingPathComponent:videoFile.idCameras[i]].path;
AVStream *stream = avformat_new_stream(pFormatCtx, pCodec);
if (!stream) {
NSLog(@"Couldn't alloc stream!");
return NO;
}
if (avcodec_parameters_copy(stream->codecpar, params) < 0) {
NSLog(@"Couldn't copy parameters into stream!");
return NO;
}
stream->avg_frame_rate.num = videoFile.framerate;
stream->avg_frame_rate.den = 1;
stream->codecpar->codec_tag = 0; //some silly workaround
stream->index = i;
streams[path] = [[VideoStream alloc] initWithStream:stream];
}
return YES;
}
- (BOOL)openIOContext:(NSString *)path
{
AVIOContext *ioCtx = nil;
if (avio_open(&ioCtx, [path UTF8String], AVIO_FLAG_WRITE) < 0) {
return NO;
}
pFormatCtx->pb = ioCtx;
return YES;
}And here’s convertation process :
- (void)launchConvert:(DummyFVPVideoFile *)videoFile
{
BOOL convertInProgress = YES;
unsigned int frameCount = 1;
unsigned long pts = 0;
BOOL success = NO;
success = [self writeHeader];
if (!success) {
NSLog(@"Couldn't write header!");
return;
}
AVRational defaultTimeBase;
defaultTimeBase.num = 1;
defaultTimeBase.den = videoFile.framerate;
AVRational streamTimeBase = streams.allValues.firstObject.stream->time_base;
while (convertInProgress)
{
pts += av_rescale_q(1, defaultTimeBase, streamTimeBase);
for (NSString *path in streams.allKeys)
{
UIImage *img = [UIImage imageWithContentsOfFile:[NSString stringWithFormat:@"%@/%u.jpg", path, frameCount]];
AVPacket *pkt = [self getAVPacket:img withPts:pts];
if (!pkt->data) { continue; }
pkt->stream_index = streams[path].stream->index;
//check all settings of pkt
if (![self writePacket:pkt]) {
NSLog(@"Couldn't write packet!");
convertInProgress = NO;
break;
}
}
frameCount++;
}
success = [self writeTrailer];
if (!success) {
NSLog(@"Couldn't write trailer!");
return;
}
NSLog(@"Convertation finished!");
//delegate convertationFinished method
}
- (BOOL)writeHeader
{
if (avformat_write_header(pFormatCtx, NULL) < 0) {
return NO;
}
return YES;
}
- (BOOL)writePacket:(AVPacket *)pkt
{
if (av_interleaved_write_frame(pFormatCtx, pkt) != 0) {
return NO;
}
return YES;
}
- (BOOL)writeTrailer
{
if (av_write_trailer(pFormatCtx) != 0) {
return NO;
}
return YES;
}
/**
This method will create AVPacket out of UIImage.
@return AVPacket
*/
- (AVPacket *)getAVPacket:(UIImage *)img withPts:(unsigned long)pts
{
if (!img) {
NSLog(@"imgData is nil!");
return nil;
}
uint8_t *imgData = [self getPixelDataFromImage:img];
AVFrame *frame_yuv = av_frame_alloc();
if (!frame_yuv) {
NSLog(@"frame_yuv is nil!");
return nil;
}
frame_yuv->format = AV_PIX_FMT_YUV420P;
frame_yuv->width = (int)img.size.width;
frame_yuv->height = (int)img.size.height;
int ret = av_image_alloc(frame_yuv->data,
frame_yuv->linesize,
frame_yuv->width,
frame_yuv->height,
frame_yuv->format,
32);
if (ret < 0) {
NSLog(@"Couldn't alloc yuv frame!");
return nil;
}
struct SwsContext *sws_ctx = nil;
sws_ctx = sws_getContext((int)img.size.width, (int)img.size.height, AV_PIX_FMT_RGB24,
(int)img.size.width, (int)img.size.height, AV_PIX_FMT_YUV420P,
0, NULL, NULL, NULL);
const uint8_t *scaleData[1] = { imgData };
int inLineSize[1] = { 4 * img.size.width };
sws_scale(sws_ctx, scaleData, inLineSize, 0, (int)img.size.height, frame_yuv->data, frame_yuv->linesize);
frame_yuv->pict_type = AV_PICTURE_TYPE_I;
frame_yuv->pts = pCodecCtx->frame_number;
ret = avcodec_send_frame(pCodecCtx, frame_yuv); //every time everything is fine
if (ret != 0) {
NSLog(@"Couldn't send yuv frame!");
return nil;
}
av_init_packet(pPacket);
pPacket->dts = pPacket->pts = pts;
do {
ret = avcodec_receive_packet(pCodecCtx, pPacket); //every time -35 error
NSLog(@"ret = %d", ret);
if (ret == AVERROR_EOF) {
NSLog(@"AVERROR_EOF!");
} else if (ret == AVERROR(EAGAIN)) {
NSLog(@"AVERROR(EAGAIN)");
} else if (ret == AVERROR(EINVAL)) {
NSLog(@"AVERROR(EINVAL)");
}
if (ret != 0) {
NSLog(@"Couldn't receive packet!");
//return nil;
}
} while ( ret == 0 );
free(imgData);
av_packet_unref(pPacket);
av_packet_free(pPacket);
av_frame_unref(&frame_yuv);
av_frame_free(&frame_yuv);
//perform other clean up and test dat shit
return pPacket;
}Any insights would be helpful. Thanks !