
Recherche avancée
Médias (1)
-
Richard Stallman et le logiciel libre
19 octobre 2011, par
Mis à jour : Mai 2013
Langue : français
Type : Texte
Autres articles (112)
-
Personnaliser en ajoutant son logo, sa bannière ou son image de fond
5 septembre 2013, parCertains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;
-
Des sites réalisés avec MediaSPIP
2 mai 2011, parCette page présente quelques-uns des sites fonctionnant sous MediaSPIP.
Vous pouvez bien entendu ajouter le votre grâce au formulaire en bas de page. -
Emballe médias : à quoi cela sert ?
4 février 2011, parCe plugin vise à gérer des sites de mise en ligne de documents de tous types.
Il crée des "médias", à savoir : un "média" est un article au sens SPIP créé automatiquement lors du téléversement d’un document qu’il soit audio, vidéo, image ou textuel ; un seul document ne peut être lié à un article dit "média" ;
Sur d’autres sites (12210)
-
ffmpeg record video plays too fast
29 avril 2023, par Kris XiaI'm a college student and I am studying FFmpeg now.



I have wrote a software that can record desktops and audio('virtual-audio-capturer') with FFmpeg.And I am now writing Audio and Video Synchronization.
I met some problems that video recording plays too fast.



When I look for audio and video synchronization help on the Internet,I find a formula for calculating PTS :



pts = n * ((1 / timbase)/ fps)



When I use this formula,I find a phenomenon.



1.The higher frame rate is,the faster the video playback speed.



2.The slower the frame rate, the faster the video playback.



Also I find while the framerate is 10,the video playback speed will be right.



Why has this situation happened ?



I have thought this question for three days. I really hope someone can help me solve this problem.



I really appreciate the help.



#include "stdafx.h"

#ifdef __cplusplus
extern "C"
{
#endif
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavdevice/avdevice.h"
#include "libavutil/audio_fifo.h"

#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#include "libavutil/imgutils.h"
#include "libavutil/mathematics.h"
#include "libavutil/samplefmt.h"
#include "libavutil/time.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/file.h"
#include "libavutil/mem.h"
#include "libavutil/frame.h"
#include "libavfilter/avfilter.h"
#include "libswresample/swresample.h"

#pragma comment(lib, "avcodec.lib")
#pragma comment(lib, "avformat.lib")
#pragma comment(lib, "avutil.lib")
#pragma comment(lib, "avdevice.lib")
#pragma comment(lib, "avfilter.lib")

#pragma comment(lib, "avfilter.lib")
#pragma comment(lib, "postproc.lib")
#pragma comment(lib, "swresample.lib")
#pragma comment(lib, "swscale.lib")
#ifdef __cplusplus
};
#endif

AVFormatContext *pFormatCtx_Video = NULL, *pFormatCtx_Audio = NULL, *pFormatCtx_Out = NULL;

AVCodecContext *outVideoCodecCtx = NULL;
AVCodecContext *outAudioCodecCtx = NULL;

AVStream *pVideoStream = NULL, *pAudioStream = NULL;

AVCodec *outAVCodec;
AVCodec *outAudioCodec;

AVCodecContext *pCodecCtx_Video;
AVCodec *pCodec_Video;
AVFifoBuffer *fifo_video = NULL;
AVAudioFifo *fifo_audio = NULL;
int VideoIndex, AudioIndex;
int codec_id;

CRITICAL_SECTION AudioSection, VideoSection;



SwsContext *img_convert_ctx;
int frame_size = 0;

uint8_t *picture_buf = NULL, *frame_buf = NULL;

bool bCap = true;

DWORD WINAPI ScreenCapThreadProc( LPVOID lpParam );
DWORD WINAPI AudioCapThreadProc( LPVOID lpParam );

int OpenVideoCapture()
{
 AVInputFormat *ifmt=av_find_input_format("gdigrab");
 AVDictionary *options = NULL;
 av_dict_set(&options, "framerate", "60", NULL);
 if(avformat_open_input(&pFormatCtx_Video, "desktop", ifmt, &options)!=0)
 {
 printf("Couldn't open input stream.(无法打开视频输入流)\n");
 return -1;
 }
 if(avformat_find_stream_info(pFormatCtx_Video,NULL)<0)
 {
 printf("Couldn't find stream information.(无法获取视频流信息)\n");
 return -1;
 }
 if (pFormatCtx_Video->streams[0]->codec->codec_type != AVMEDIA_TYPE_VIDEO)
 {
 printf("Couldn't find video stream information.(无法获取视频流信息)\n");
 return -1;
 }
 pCodecCtx_Video = pFormatCtx_Video->streams[0]->codec;
 pCodec_Video = avcodec_find_decoder(pCodecCtx_Video->codec_id);
 if(pCodec_Video == NULL)
 {
 printf("Codec not found.(没有找到解码器)\n");
 return -1;
 }
 if(avcodec_open2(pCodecCtx_Video, pCodec_Video, NULL) < 0)
 {
 printf("Could not open codec.(无法打开解码器)\n");
 return -1;
 }

 av_dump_format(pFormatCtx_Video, 0, NULL, 0);

 img_convert_ctx = sws_getContext(pCodecCtx_Video->width, pCodecCtx_Video->height, pCodecCtx_Video->pix_fmt, 
 pCodecCtx_Video->width, pCodecCtx_Video->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); 

 frame_size = avpicture_get_size(pCodecCtx_Video->pix_fmt, pCodecCtx_Video->width, pCodecCtx_Video->height);
 fifo_video = av_fifo_alloc(30 * avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx_Video->width, pCodecCtx_Video->height));

 return 0;
}

static char *dup_wchar_to_utf8(wchar_t *w)
{
 char *s = NULL;
 int l = WideCharToMultiByte(CP_UTF8, 0, w, -1, 0, 0, 0, 0);
 s = (char *) av_malloc(l);
 if (s)
 WideCharToMultiByte(CP_UTF8, 0, w, -1, s, l, 0, 0);
 return s;
}

int OpenAudioCapture()
{
 AVInputFormat *pAudioInputFmt = av_find_input_format("dshow");
 char * psDevName = dup_wchar_to_utf8(L"audio=virtual-audio-capturer");

 if (avformat_open_input(&pFormatCtx_Audio, psDevName, pAudioInputFmt,NULL) < 0)
 {
 printf("Couldn't open input stream.(无法打开音频输入流)\n");
 return -1;
 }

 if(avformat_find_stream_info(pFormatCtx_Audio,NULL)<0) 
 return -1; 

 if(pFormatCtx_Audio->streams[0]->codec->codec_type != AVMEDIA_TYPE_AUDIO)
 {
 printf("Couldn't find video stream information.(无法获取音频流信息)\n");
 return -1;
 }

 AVCodec *tmpCodec = avcodec_find_decoder(pFormatCtx_Audio->streams[0]->codec->codec_id);
 if(0 > avcodec_open2(pFormatCtx_Audio->streams[0]->codec, tmpCodec, NULL))
 {
 printf("can not find or open audio decoder!\n");
 }

 av_dump_format(pFormatCtx_Audio, 0, NULL, 0);

 return 0;
}

int OpenOutPut()
{
 AVStream *pVideoStream = NULL, *pAudioStream = NULL;
 const char *outFileName = "test.mp4";
 avformat_alloc_output_context2(&pFormatCtx_Out, NULL, NULL, outFileName);

 if (pFormatCtx_Video->streams[0]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
 {
 VideoIndex = 0;
 pVideoStream = avformat_new_stream(pFormatCtx_Out, NULL);
 if (!pVideoStream)
 {
 printf("can not new stream for output!\n");
 return -1;
 }

 outVideoCodecCtx = avcodec_alloc_context3(outAVCodec);
 if ( !outVideoCodecCtx )
 {
 printf("Error : avcodec_alloc_context3()\n");
 return -1;
 }

 //set codec context param
 outVideoCodecCtx = pVideoStream->codec;
 outVideoCodecCtx->codec_id = AV_CODEC_ID_MPEG4;
 outVideoCodecCtx->width = pFormatCtx_Video->streams[0]->codec->width;
 outVideoCodecCtx->height = pFormatCtx_Video->streams[0]->codec->height;
 outVideoCodecCtx->time_base = pFormatCtx_Video->streams[0]->codec->time_base;
 outVideoCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
 outVideoCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;

 if (codec_id == AV_CODEC_ID_H264)
 {
 av_opt_set(outVideoCodecCtx->priv_data, "preset", "slow", 0);
 }

 outAVCodec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);
 if( !outAVCodec )
 {
 printf("\n\nError : avcodec_find_encoder()");
 return -1;
 }
 if (pFormatCtx_Out->oformat->flags & AVFMT_GLOBALHEADER)
 outVideoCodecCtx->flags |=CODEC_FLAG_GLOBAL_HEADER;

 if ((avcodec_open2(outVideoCodecCtx,outAVCodec, NULL)) < 0)
 {
 printf("can not open the encoder\n");
 return -1;
 }
 }

 if(pFormatCtx_Audio->streams[0]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
 {
 AVCodecContext *pOutputCodecCtx;
 AudioIndex = 1;
 pAudioStream = avformat_new_stream(pFormatCtx_Out, NULL);

 pAudioStream->codec->codec = avcodec_find_encoder(pFormatCtx_Out->oformat->audio_codec);

 pOutputCodecCtx = pAudioStream->codec;

 pOutputCodecCtx->sample_rate = pFormatCtx_Audio->streams[0]->codec->sample_rate;
 pOutputCodecCtx->channel_layout = pFormatCtx_Out->streams[0]->codec->channel_layout;
 pOutputCodecCtx->channels = av_get_channel_layout_nb_channels(pAudioStream->codec->channel_layout);
 if(pOutputCodecCtx->channel_layout == 0)
 {
 pOutputCodecCtx->channel_layout = AV_CH_LAYOUT_STEREO;
 pOutputCodecCtx->channels = av_get_channel_layout_nb_channels(pOutputCodecCtx->channel_layout);

 }
 pOutputCodecCtx->sample_fmt = pAudioStream->codec->codec->sample_fmts[0];
 AVRational time_base={1, pAudioStream->codec->sample_rate};
 pAudioStream->time_base = time_base;
 //audioCodecCtx->time_base = time_base;

 pOutputCodecCtx->codec_tag = 0; 
 if (pFormatCtx_Out->oformat->flags & AVFMT_GLOBALHEADER) 
 pOutputCodecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;

 if (avcodec_open2(pOutputCodecCtx, pOutputCodecCtx->codec, 0) < 0)
 {
 printf("编码器打开失败,退出程序\n");
 return -1;
 }
 }

 if (!(pFormatCtx_Out->oformat->flags & AVFMT_NOFILE))
 {
 if(avio_open(&pFormatCtx_Out->pb, outFileName, AVIO_FLAG_WRITE) < 0)
 {
 printf("can not open output file handle!\n");
 return -1;
 }
 }

 if(avformat_write_header(pFormatCtx_Out, NULL) < 0)
 {
 printf("can not write the header of the output file!\n");
 return -1;
 }

 return 0;
}

int _tmain(int argc, _TCHAR* argv[])
{
 av_register_all();
 avdevice_register_all();
 if (OpenVideoCapture() < 0)
 {
 return -1;
 }
 if (OpenAudioCapture() < 0)
 {
 return -1;
 }
 if (OpenOutPut() < 0)
 {
 return -1;
 }
// int fps;
 /*printf("输入帧率:");
 scanf_s("%d",&fps);
 if ( NULL == fps)
 {
 fps = 10;
 }*/

 InitializeCriticalSection(&VideoSection);
 InitializeCriticalSection(&AudioSection);

 AVFrame *picture = av_frame_alloc();
 int size = avpicture_get_size(pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt, 
 pFormatCtx_Out->streams[VideoIndex]->codec->width, pFormatCtx_Out->streams[VideoIndex]->codec->height);
 picture_buf = new uint8_t[size];

 avpicture_fill((AVPicture *)picture, picture_buf, 
 pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt, 
 pFormatCtx_Out->streams[VideoIndex]->codec->width, 
 pFormatCtx_Out->streams[VideoIndex]->codec->height);



 //star cap screen thread
 CreateThread( NULL, 0, ScreenCapThreadProc, 0, 0, NULL);
 //star cap audio thread
 CreateThread( NULL, 0, AudioCapThreadProc, 0, 0, NULL);
 int64_t cur_pts_v=0,cur_pts_a=0;
 int VideoFrameIndex = 0, AudioFrameIndex = 0;

 while(1)
 {
 if (_kbhit() != 0 && bCap)
 {
 bCap = false;
 Sleep(2000);
 }
 if (fifo_audio && fifo_video)
 {
 int sizeAudio = av_audio_fifo_size(fifo_audio);
 int sizeVideo = av_fifo_size(fifo_video);
 //缓存数据写完就结束循环
 if (av_audio_fifo_size(fifo_audio) <= pFormatCtx_Out->streams[AudioIndex]->codec->frame_size && 
 av_fifo_size(fifo_video) <= frame_size && !bCap)
 {
 break;
 }
 }

 if(av_compare_ts(cur_pts_v, pFormatCtx_Out->streams[VideoIndex]->time_base, 
 cur_pts_a,pFormatCtx_Out->streams[AudioIndex]->time_base) <= 0)
 {
 if (av_fifo_size(fifo_video) < frame_size && !bCap)
 {
 cur_pts_v = 0x7fffffffffffffff;
 }
 if(av_fifo_size(fifo_video) >= size)
 {
 EnterCriticalSection(&VideoSection);
 av_fifo_generic_read(fifo_video, picture_buf, size, NULL); //将数据从avfifobuffer馈送到用户提供的回调。
 LeaveCriticalSection(&VideoSection);

 avpicture_fill((AVPicture *)picture, picture_buf,
 pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt,
 pFormatCtx_Out->streams[VideoIndex]->codec->width,
 pFormatCtx_Out->streams[VideoIndex]->codec->height); //根据指定的图像参数和提供的图像数据缓冲区设置图片字段。

 //pts = n * ((1 / timbase)/ fps);
 //picture->pts = VideoFrameIndex * ((pFormatCtx_Video->streams[0]->time_base.den / pFormatCtx_Video->streams[0]->time_base.num) / 24);
 picture->pts = VideoFrameIndex * ((outVideoCodecCtx->time_base.den * 100000 / outVideoCodecCtx->time_base.num) / 180);

 int got_picture = 0;
 AVPacket pkt;
 av_init_packet(&pkt);

 pkt.data = NULL;
 pkt.size = 0;
 //从帧中获取输入的原始视频数据
 int ret = avcodec_encode_video2(pFormatCtx_Out->streams[VideoIndex]->codec, &pkt, picture, &got_picture);
 if(ret < 0)
 {
 continue;
 }

 if (got_picture==1)
 {
 pkt.stream_index = VideoIndex;
 /*int count = 1;
 pkt.pts = pkt.dts = count * ((pFormatCtx_Video->streams[0]->time_base.den / pFormatCtx_Video->streams[0]->time_base.num) / 15);
 count++;*/

 //x = pts * (timebase1.num / timebase1.den )* (timebase2.den / timebase2.num);

 pkt.pts = av_rescale_q_rnd(pkt.pts, pFormatCtx_Video->streams[0]->time_base, 
 pFormatCtx_Out->streams[VideoIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); 
 pkt.dts = av_rescale_q_rnd(pkt.dts, pFormatCtx_Video->streams[0]->time_base, 
 pFormatCtx_Out->streams[VideoIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); 


 pkt.duration = ((pFormatCtx_Out->streams[0]->time_base.den / pFormatCtx_Out->streams[0]->time_base.num) / 60);
 //pkt.duration = 1000/60;
 //pkt.pts = pkt.dts = Count * (ofmt_ctx->streams[stream_index]->time_base.den) /ofmt_ctx->streams[stream_index]->time_base.num / 10;

 //Count++;


 cur_pts_v = pkt.pts;

 ret = av_interleaved_write_frame(pFormatCtx_Out, &pkt);
 //delete[] pkt.data;
 av_free_packet(&pkt);
 }
 VideoFrameIndex++;
 }
 }
 else
 {
 if (NULL == fifo_audio)
 {
 continue;//还未初始化fifo
 }
 if (av_audio_fifo_size(fifo_audio) < pFormatCtx_Out->streams[AudioIndex]->codec->frame_size && !bCap)
 {
 cur_pts_a = 0x7fffffffffffffff;
 }
 if(av_audio_fifo_size(fifo_audio) >= 
 (pFormatCtx_Out->streams[AudioIndex]->codec->frame_size > 0 ? pFormatCtx_Out->streams[AudioIndex]->codec->frame_size : 1024))
 {
 AVFrame *frame;
 frame = av_frame_alloc();
 frame->nb_samples = pFormatCtx_Out->streams[AudioIndex]->codec->frame_size>0 ? pFormatCtx_Out->streams[AudioIndex]->codec->frame_size: 1024;
 frame->channel_layout = pFormatCtx_Out->streams[AudioIndex]->codec->channel_layout;
 frame->format = pFormatCtx_Out->streams[AudioIndex]->codec->sample_fmt;
 frame->sample_rate = pFormatCtx_Out->streams[AudioIndex]->codec->sample_rate;
 av_frame_get_buffer(frame, 0);

 EnterCriticalSection(&AudioSection);
 av_audio_fifo_read(fifo_audio, (void **)frame->data, 
 (pFormatCtx_Out->streams[AudioIndex]->codec->frame_size > 0 ? pFormatCtx_Out->streams[AudioIndex]->codec->frame_size : 1024));
 LeaveCriticalSection(&AudioSection);

 AVPacket pkt_out;
 av_init_packet(&pkt_out);
 int got_picture = -1;
 pkt_out.data = NULL;
 pkt_out.size = 0;

 frame->pts = AudioFrameIndex * pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;
 if (avcodec_encode_audio2(pFormatCtx_Out->streams[AudioIndex]->codec, &pkt_out, frame, &got_picture) < 0)
 {
 printf("can not decoder a frame");
 }
 av_frame_free(&frame);
 if (got_picture) 
 {
 pkt_out.stream_index = AudioIndex;
 pkt_out.pts = AudioFrameIndex * pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;
 pkt_out.dts = AudioFrameIndex * pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;
 pkt_out.duration = pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;

 cur_pts_a = pkt_out.pts;

 int ret = av_interleaved_write_frame(pFormatCtx_Out, &pkt_out);
 av_free_packet(&pkt_out);
 }
 AudioFrameIndex++;
 }
 }
 }

 delete[] picture_buf;

 av_fifo_free(fifo_video);
 av_audio_fifo_free(fifo_audio);

 av_write_trailer(pFormatCtx_Out);

 avio_close(pFormatCtx_Out->pb);
 avformat_free_context(pFormatCtx_Out);

 if (pFormatCtx_Video != NULL)
 {
 avformat_close_input(&pFormatCtx_Video);
 pFormatCtx_Video = NULL;
 }
 if (pFormatCtx_Audio != NULL)
 {
 avformat_close_input(&pFormatCtx_Audio);
 pFormatCtx_Audio = NULL;
 }

 return 0;
}

DWORD WINAPI ScreenCapThreadProc( LPVOID lpParam )
{
 AVPacket packet;
 int got_picture;
 AVFrame *pFrame;
 pFrame=av_frame_alloc();

 AVFrame *picture = av_frame_alloc();
 int size = avpicture_get_size(pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt, 
 pFormatCtx_Out->streams[VideoIndex]->codec->width, 
 pFormatCtx_Out->streams[VideoIndex]->codec->height);

 avpicture_fill((AVPicture *)picture, picture_buf, 
 pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt, 
 pFormatCtx_Out->streams[VideoIndex]->codec->width, 
 pFormatCtx_Out->streams[VideoIndex]->codec->height);

 FILE *p = NULL;
 p = fopen("proc_test.yuv", "wb+");
 av_init_packet(&packet);
 int height = pFormatCtx_Out->streams[VideoIndex]->codec->height;
 int width = pFormatCtx_Out->streams[VideoIndex]->codec->width;
 int y_size=height*width;
 while(bCap)
 {
 packet.data = NULL;
 packet.size = 0;
 if (av_read_frame(pFormatCtx_Video, &packet) < 0)
 {
 continue;
 }
 if(packet.stream_index == 0)
 {
 if (avcodec_decode_video2(pCodecCtx_Video, pFrame, &got_picture, &packet) < 0)
 {
 printf("Decode Error.(解码错误)\n");
 continue;
 }
 if (got_picture)
 {
 sws_scale(img_convert_ctx, 
 (const uint8_t* const*)pFrame->data,
 pFrame->linesize, 
 0, 
 pFormatCtx_Out->streams[VideoIndex]->codec->height,
 picture->data,
 picture->linesize);

 if (av_fifo_space(fifo_video) >= size)
 {
 EnterCriticalSection(&VideoSection); 
 av_fifo_generic_write(fifo_video, picture->data[0], y_size, NULL);
 av_fifo_generic_write(fifo_video, picture->data[1], y_size/4, NULL);
 av_fifo_generic_write(fifo_video, picture->data[2], y_size/4, NULL);
 LeaveCriticalSection(&VideoSection);
 }
 }
 }
 av_free_packet(&packet);
 }
 av_frame_free(&pFrame);
 av_frame_free(&picture);
 return 0;
}

DWORD WINAPI AudioCapThreadProc( LPVOID lpParam )
{
 AVPacket pkt;
 AVFrame *frame;
 frame = av_frame_alloc();
 int gotframe;
 while(bCap)
 {
 pkt.data = NULL;
 pkt.size = 0;
 if(av_read_frame(pFormatCtx_Audio,&pkt) < 0)
 {
 continue;
 }

 if (avcodec_decode_audio4(pFormatCtx_Audio->streams[0]->codec, frame, &gotframe, &pkt) < 0)
 {
 av_frame_free(&frame);
 printf("can not decoder a frame");
 break;
 }
 av_free_packet(&pkt);

 if (!gotframe)
 {
 printf("没有获取到数据,继续下一次");
 continue;
 }

 if (NULL == fifo_audio)
 {
 fifo_audio = av_audio_fifo_alloc(pFormatCtx_Audio->streams[0]->codec->sample_fmt, 
 pFormatCtx_Audio->streams[0]->codec->channels, 30 * frame->nb_samples);
 }

 int buf_space = av_audio_fifo_space(fifo_audio);
 if (av_audio_fifo_space(fifo_audio) >= frame->nb_samples)
 {
 EnterCriticalSection(&AudioSection);
 av_audio_fifo_write(fifo_audio, (void **)frame->data, frame->nb_samples);
 LeaveCriticalSection(&AudioSection);
 }
 }
 av_frame_free(&frame);
 return 0;
}




Maybe there is another way to calculate PTS and DTS



I hope whatever the frame rate is,video playback speed is right.Not too fast or too slow.


-
random select file in batch scrip and ffmpeg
8 mars 2019, par Hòa Hưng NgôI’m building a script to create multiple videos from multiple images and audio inputs. I want to create randomness in selecting images to audio. I found the loop when using the ffmpeg script to build the video, but it did not support random pick up. I tried to understand the script in the language of windows and found out the use of the variable $ (shuf -n1 -e e : audio *) but I could not include it in the ffmpeg loop. I really hope to have help from the community. Sincere thanks !
This is my scenario :set audiorandom = $(shuf -n1 -e e:\audio\*.mp3)
: encode video
for %%i in ("%INPUT%\*.jpg") DO ffmpeg -i "%%i" -i "%%audiorandom" -threads 0 -shortest -preset ultrafast "%output%\%%~ni.mp4" -
nginx RTMP to HLS : FFMPG error when trying multiple bitrate output [on hold]
28 mai 2014, par user3685074I’m currently trying to convert my RTMP Livestream into a HLS with 3 quality-settings.
I followed this guide
I’ve compiled my own FFMPEG and it’s working if I just convert 1 file.
It seems libx264 isn’t able to do multiple encodings at the same time ?I’m using these command :
exec /usr/local/bin/ffmpeg -i rtmp://localhost/src/$name
-c:a libfdk_aac -b:a 32k -c:v libx264 -b:v 128K -f flv rtmp://localhost/hls/$name_low
-c:a libfdk_aac -b:a 64k -c:v libx264 -b:v 256K -f flv rtmp://localhost/hls/$name_mid
-c:a libfdk_aac -b:a 128k -c:v libx264 -b:v 512K -f flv rtmp://localhost/hls/$name_hi 2>>/tmp/ffmpeg.log;this is the output :
ffmpeg version N-63519-g61917a1 Copyright (c) 2000-2014 the FFmpeg developers
built on May 28 2014 18:06:42 with gcc 4.4.3 (Ubuntu 4.4.3-4ubuntu5.1)
configuration: --enable-gpl --enable-version3 --enable-nonfree --enable-postproc --enable-libfaac --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libtheora --enable-libvorbis --enable-libx264 --enable-x11grab --enable-libvpx --enable-libmp3lame --enable-librtmp --enable-libspeex --enable-libfdk_aac
libavutil 52. 87.100 / 52. 87.100
libavcodec 55. 65.100 / 55. 65.100
libavformat 55. 41.100 / 55. 41.100
libavdevice 55. 13.101 / 55. 13.101
libavfilter 4. 5.100 / 4. 5.100
libswscale 2. 6.100 / 2. 6.100
libswresample 0. 19.100 / 0. 19.100
libpostproc 52. 3.100 / 52. 3.100
Metadata:
Server NGINX RTMP (github.com/arut/nginx-rtmp-module)
width 1280.00
height 720.00
displayWidth 1280.00
displayHeight 720.00
duration 0.00
framerate 25.00
fps 25.00
videodatarate 390.00
videocodecid 0.00
audiodatarate 27.00
audiocodecid 11.00
Input #0, flv, from 'rtmp://localhost/src/test':
Metadata:
Server : NGINX RTMP (github.com/arut/nginx-rtmp-module)
displayWidth : 1280
displayHeight : 720
fps : 25
profile :
level :
Duration: 00:00:00.00, start: 0.080000, bitrate: N/A
Stream #0:0: Video: h264 (High), yuv420p, 1280x720, 399 kb/s, 25 fps, 25 tbr, 1k tbn, 50 tbc
Stream #0:1: Audio: speex, 16000 Hz, mono, s16, 27 kb/s
[libx264 @ 0x5260380] using cpu capabilities: MMX2 SSE2Fast SSSE3 SSE4.2
[libx264 @ 0x5260380] profile High, level 3.1
[libx264 @ 0x5260380] 264 - core 142 r2431 f23da7c - H.264/MPEG-4 AVC codec - Copyleft 2003-2014 - http://www.videolan.org/x264.html - options: cabac=1 ref=3 deblock=1:0:0 analyse=0x3:0x113 me=hex subme=7 psy=1 psy_rd=1.00:0.00 mixed_ref=1 me_range=16 chroma_me=1 trellis=1 8x8dct=1 cqm=0 deadzone=21,11 fast_pskip=1 chroma_qp_offset=-2 threads=24 lookahead_threads=4 sliced_threads=0 nr=0 decimate=1 interlaced=0 bluray_compat=0 constrained_intra=0 bframes=3 b_pyramid=2 b_adapt=1 b_bias=0 direct=1 weightb=1 open_gop=0 weightp=2 keyint=250 keyint_min=25 scenecut=40 intra_refresh=0 rc_lookahead=40 rc=abr mbtree=1 bitrate=128 ratetol=1.0 qcomp=0.60 qpmin=0 qpmax=69 qpstep=4 ip_ratio=1.40 aq=1:1.00
[libx264 @ 0x525a920] using cpu capabilities: MMX2 SSE2Fast SSSE3 SSE4.2
Output #0, flv, to 'rtmp://localhost/hls/test_low':
Metadata:
Server : NGINX RTMP (github.com/arut/nginx-rtmp-module)
displayWidth : 1280
displayHeight : 720
fps : 25
profile :
level :
Stream #0:0: Video: h264 (libx264), yuv420p, 1280x720, q=-1--1, 128 kb/s, 25 fps, 90k tbn, 25 tbc
Metadata:
encoder : Lavc55.65.100 libx264
Stream #0:1: Audio: aac (libfdk_aac), 16000 Hz, mono, s16, 32 kb/s
Metadata:
encoder : Lavc55.65.100 libfdk_aac
Output #1, flv, to 'rtmp://localhost/hls/test_mid':
Metadata:
Server : NGINX RTMP (github.com/arut/nginx-rtmp-module)
displayWidth : 1280
displayHeight : 720
fps : 25
profile :
level :
Stream #1:0: Video: h264, yuv420p, 1280x720, q=-1--1, 256 kb/s, 25 fps, 90k tbn, 25 tbc
Metadata:
encoder : Lavc55.65.100 libx264
Stream #1:1: Audio: aac, 16000 Hz, mono, s16
Metadata:
encoder : Lavc55.65.100 libfdk_aac
Output #2, flv, to 'rtmp://localhost/hls/test_hi':
Metadata:
Server : NGINX RTMP (github.com/arut/nginx-rtmp-module)
displayWidth : 1280
displayHeight : 720
fps : 25
profile :
level :
Stream #2:0: Video: h264, yuv420p, 1280x720, q=-1--1, 25 fps, 90k tbn, 25 tbc
Metadata:
encoder : Lavc55.65.100 libx264
Stream #2:1: Audio: aac, 16000 Hz, mono, s16
Metadata:
encoder : Lavc55.65.100 libfdk_aac
Stream mapping:
Stream #0:0 -> #0:0 (h264 -> libx264)
Stream #0:1 -> #0:1 (libspeex -> libfdk_aac)
Stream #0:0 -> #1:0 (h264 -> libx264)
Stream #0:1 -> #1:1 (libspeex -> libfdk_aac)
Stream #0:0 -> #2:0 (h264 -> libx264)
Stream #0:1 -> #2:1 (libspeex -> libfdk_aac)
Error while opening encoder for output stream #1:0 - maybe incorrect parameters such as bit_rate, rate, width or heightI hope you can help me and sorry for my bad english.
Greetz
Kevin