
Recherche avancée
Médias (1)
-
Rennes Emotion Map 2010-11
19 octobre 2011, par
Mis à jour : Juillet 2013
Langue : français
Type : Texte
Autres articles (104)
-
Encoding and processing into web-friendly formats
13 avril 2011, parMediaSPIP automatically converts uploaded files to internet-compatible formats.
Video files are encoded in MP4, Ogv and WebM (supported by HTML5) and MP4 (supported by Flash).
Audio files are encoded in MP3 and Ogg (supported by HTML5) and MP3 (supported by Flash).
Where possible, text is analyzed in order to retrieve the data needed for search engine detection, and then exported as a series of image files.
All uploaded files are stored online in their original format, so you can (...) -
Les formats acceptés
28 janvier 2010, parLes commandes suivantes permettent d’avoir des informations sur les formats et codecs gérés par l’installation local de ffmpeg :
ffmpeg -codecs ffmpeg -formats
Les format videos acceptés en entrée
Cette liste est non exhaustive, elle met en exergue les principaux formats utilisés : h264 : H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 m4v : raw MPEG-4 video format flv : Flash Video (FLV) / Sorenson Spark / Sorenson H.263 Theora wmv :
Les formats vidéos de sortie possibles
Dans un premier temps on (...) -
Gestion de la ferme
2 mars 2010, parLa ferme est gérée dans son ensemble par des "super admins".
Certains réglages peuvent être fais afin de réguler les besoins des différents canaux.
Dans un premier temps il utilise le plugin "Gestion de mutualisation"
Sur d’autres sites (6764)
-
Benefits and Shortcomings of Multi-Touch Attribution
13 mars 2023, par Erin — Analytics Tips -
Screenrecorder application output video resolution issues [closed]
23 juin 2022, par JessieKUsing Github code for ScreenRecorder on Linux
Everything works fine, besides the resolution of output video.
Tried to play with setting, quality has significantly improved, but still no way to change resolution.
I need to get output video with the same size as input video


using namespace std;

 /* initialize the resources*/
 ScreenRecorder::ScreenRecorder()
 {
 
 av_register_all();
 avcodec_register_all();
 avdevice_register_all();
 cout<<"\nall required functions are registered successfully";
 }
 
 /* uninitialize the resources */
 ScreenRecorder::~ScreenRecorder()
 {
 
 avformat_close_input(&pAVFormatContext);
 if( !pAVFormatContext )
 {
 cout<<"\nfile closed sucessfully";
 }
 else
 {
 cout<<"\nunable to close the file";
 exit(1);
 }
 
 avformat_free_context(pAVFormatContext);
 if( !pAVFormatContext )
 {
 cout<<"\navformat free successfully";
 }
 else
 {
 cout<<"\nunable to free avformat context";
 exit(1);
 }
 
 }
 
 /* function to capture and store data in frames by allocating required memory and auto deallocating the memory. */
 int ScreenRecorder::CaptureVideoFrames()
 {
 int flag;
 int frameFinished;//when you decode a single packet, you still don't have information enough to have a frame [depending on the type of codec, some of them //you do], when you decode a GROUP of packets that represents a frame, then you have a picture! that's why frameFinished will let //you know you decoded enough to have a frame.
 
 int frame_index = 0;
 value = 0;
 
 pAVPacket = (AVPacket *)av_malloc(sizeof(AVPacket));
 av_init_packet(pAVPacket);
 
 pAVFrame = av_frame_alloc();
 if( !pAVFrame )
 {
 cout<<"\nunable to release the avframe resources";
 exit(1);
 }
 
 outFrame = av_frame_alloc();//Allocate an AVFrame and set its fields to default values.
 if( !outFrame )
 {
 cout<<"\nunable to release the avframe resources for outframe";
 exit(1);
 }
 
 int video_outbuf_size;
 int nbytes = av_image_get_buffer_size(outAVCodecContext->pix_fmt,outAVCodecContext->width,outAVCodecContext->height,32);
 uint8_t *video_outbuf = (uint8_t*)av_malloc(nbytes);
 if( video_outbuf == NULL )
 {
 cout<<"\nunable to allocate memory";
 exit(1);
 }
 
 // Setup the data pointers and linesizes based on the specified image parameters and the provided array.
 value = av_image_fill_arrays( outFrame->data, outFrame->linesize, video_outbuf , AV_PIX_FMT_YUV420P, outAVCodecContext->width,outAVCodecContext->height,1 ); // returns : the size in bytes required for src
 if(value < 0)
 {
 cout<<"\nerror in filling image array";
 }
 
 SwsContext* swsCtx_ ;
 
 // Allocate and return swsContext.
 // a pointer to an allocated context, or NULL in case of error
 // Deprecated : Use sws_getCachedContext() instead.
 swsCtx_ = sws_getContext(pAVCodecContext->width,
 pAVCodecContext->height,
 pAVCodecContext->pix_fmt,
 outAVCodecContext->width,
 outAVCodecContext->height,
 outAVCodecContext->pix_fmt,
 SWS_BICUBIC, NULL, NULL, NULL);
 
 
 int ii = 0;
 int no_frames = 100;
 cout<<"\nenter No. of frames to capture : ";
 cin>>no_frames;
 
 AVPacket outPacket;
 int j = 0;
 
 int got_picture;
 
 while( av_read_frame( pAVFormatContext , pAVPacket ) >= 0 )
 {
 if( ii++ == no_frames )break;
 if(pAVPacket->stream_index == VideoStreamIndx)
 {
 value = avcodec_decode_video2( pAVCodecContext , pAVFrame , &frameFinished , pAVPacket );
 if( value < 0)
 {
 cout<<"unable to decode video";
 }
 
 if(frameFinished)// Frame successfully decoded :)
 {
 sws_scale(swsCtx_, pAVFrame->data, pAVFrame->linesize,0, pAVCodecContext->height, outFrame->data,outFrame->linesize);
 av_init_packet(&outPacket);
 outPacket.data = NULL; // packet data will be allocated by the encoder
 outPacket.size = 0;
 
 avcodec_encode_video2(outAVCodecContext , &outPacket ,outFrame , &got_picture);
 
 if(got_picture)
 {
 if(outPacket.pts != AV_NOPTS_VALUE)
 outPacket.pts = av_rescale_q(outPacket.pts, video_st->codec->time_base, video_st->time_base);
 if(outPacket.dts != AV_NOPTS_VALUE)
 outPacket.dts = av_rescale_q(outPacket.dts, video_st->codec->time_base, video_st->time_base);
 
 printf("Write frame %3d (size= %2d)\n", j++, outPacket.size/1000);
 if(av_write_frame(outAVFormatContext , &outPacket) != 0)
 {
 cout<<"\nerror in writing video frame";
 }
 
 av_packet_unref(&outPacket);
 } // got_picture
 
 av_packet_unref(&outPacket);
 } // frameFinished
 
 }
 }// End of while-loop



One part of two parts is above...Actually original app seem to record video of same size as does my application, but still it has not any use



Second part of the code


av_free(video_outbuf);

}

/* establishing the connection between camera or screen through its respective folder */
int ScreenRecorder::openCamera()
{

 value = 0;
 options = NULL;
 pAVFormatContext = NULL;

 pAVFormatContext = avformat_alloc_context();//Allocate an AVFormatContext.
/*

X11 video input device.
To enable this input device during configuration you need libxcb installed on your system. It will be automatically detected during configuration.
This device allows one to capture a region of an X11 display. 
refer : https://www.ffmpeg.org/ffmpeg-devices.html#x11grab
*/
 /* current below is for screen recording. to connect with camera use v4l2 as a input parameter for av_find_input_format */ 
 pAVInputFormat = av_find_input_format("x11grab");
 value = avformat_open_input(&pAVFormatContext, ":0.0+10,250", pAVInputFormat, NULL);
 if(value != 0)
 {
 cout<<"\nerror in opening input device";
 exit(1);
 }

 /* set frame per second */
 value = av_dict_set( &options,"framerate","30",0 );
 if(value < 0)
 {
 cout<<"\nerror in setting dictionary value";
 exit(1);
 }

 value = av_dict_set( &options, "preset", "medium", 0 );
 if(value < 0)
 {
 cout<<"\nerror in setting preset values";
 exit(1);
 }

// value = avformat_find_stream_info(pAVFormatContext,NULL);
 if(value < 0)
 {
 cout<<"\nunable to find the stream information";
 exit(1);
 }

 VideoStreamIndx = -1;

 /* find the first video stream index . Also there is an API available to do the below operations */
 for(int i = 0; i < pAVFormatContext->nb_streams; i++ ) // find video stream posistion/index.
 {
 if( pAVFormatContext->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO )
 {
 VideoStreamIndx = i;
 break;
 }

 } 

 if( VideoStreamIndx == -1)
 {
 cout<<"\nunable to find the video stream index. (-1)";
 exit(1);
 }

 // assign pAVFormatContext to VideoStreamIndx
 pAVCodecContext = pAVFormatContext->streams[VideoStreamIndx]->codec;

 pAVCodec = avcodec_find_decoder(pAVCodecContext->codec_id);
 if( pAVCodec == NULL )
 {
 cout<<"\nunable to find the decoder";
 exit(1);
 }

 value = avcodec_open2(pAVCodecContext , pAVCodec , NULL);//Initialize the AVCodecContext to use the given AVCodec.
 if( value < 0 )
 {
 cout<<"\nunable to open the av codec";
 exit(1);
 }
}

/* initialize the video output file and its properties */
int ScreenRecorder::init_outputfile()
{
 outAVFormatContext = NULL;
 value = 0;
 output_file = "../media/output.mp4";

 avformat_alloc_output_context2(&outAVFormatContext, NULL, NULL, output_file);
 if (!outAVFormatContext)
 {
 cout<<"\nerror in allocating av format output context";
 exit(1);
 }

/* Returns the output format in the list of registered output formats which best matches the provided parameters, or returns NULL if there is no match. */
 output_format = av_guess_format(NULL, output_file ,NULL);
 if( !output_format )
 {
 cout<<"\nerror in guessing the video format. try with correct format";
 exit(1);
 }

 video_st = avformat_new_stream(outAVFormatContext ,NULL);
 if( !video_st )
 {
 cout<<"\nerror in creating a av format new stream";
 exit(1);
 }

 outAVCodecContext = avcodec_alloc_context3(outAVCodec);
 if( !outAVCodecContext )
 {
 cout<<"\nerror in allocating the codec contexts";
 exit(1);
 }

 /* set property of the video file */
 outAVCodecContext = video_st->codec;
 outAVCodecContext->codec_id = AV_CODEC_ID_MPEG4;// AV_CODEC_ID_MPEG4; // AV_CODEC_ID_H264 // AV_CODEC_ID_MPEG1VIDEO
 outAVCodecContext->codec_type = AVMEDIA_TYPE_VIDEO;
 outAVCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
 outAVCodecContext->bit_rate = 2500000; // 2500000
 outAVCodecContext->width = 1920;
 outAVCodecContext->height = 1080;
 outAVCodecContext->gop_size = 3;
 outAVCodecContext->max_b_frames = 2;
 outAVCodecContext->time_base.num = 1;
 outAVCodecContext->time_base.den = 30; // 15fps

 {
 av_opt_set(outAVCodecContext->priv_data, "preset", "slow", 0);
 }

 outAVCodec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);
 if( !outAVCodec )
 {
 cout<<"\nerror in finding the av codecs. try again with correct codec";
 exit(1);
 }

 /* Some container formats (like MP4) require global headers to be present
 Mark the encoder so that it behaves accordingly. */

 if ( outAVFormatContext->oformat->flags & AVFMT_GLOBALHEADER)
 {
 outAVCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
 }

 value = avcodec_open2(outAVCodecContext, outAVCodec, NULL);
 if( value < 0)
 {
 cout<<"\nerror in opening the avcodec";
 exit(1);
 }

 /* create empty video file */
 if ( !(outAVFormatContext->flags & AVFMT_NOFILE) )
 {
 if( avio_open2(&outAVFormatContext->pb , output_file , AVIO_FLAG_WRITE ,NULL, NULL) < 0 )
 {
 cout<<"\nerror in creating the video file";
 exit(1);
 }
 }

 if(!outAVFormatContext->nb_streams)
 {
 cout<<"\noutput file dose not contain any stream";
 exit(1);
 }

 /* imp: mp4 container or some advanced container file required header information*/
 value = avformat_write_header(outAVFormatContext , &options);
 if(value < 0)
 {
 cout<<"\nerror in writing the header context";
 exit(1);
 }


 cout<<"\n\nOutput file information :\n\n";
 av_dump_format(outAVFormatContext , 0 ,output_file ,1);



Github link https://github.com/abdullahfarwees/screen-recorder-ffmpeg-cpp


-
Error : Output format mp4 is not available
12 avril 2024, par alpaca pwaaI'm using fluent-ffmpeg in my nextjs application, I'm trying to process the video and specified a format to stream on my s3 bucket but it keeps on failing. My terminal keeps on throwing "Error : Error : Output format mp4 is not available". I already verify my ffmpeg format "ffmpeg -format" and confirm that it supports encoding and decoding mp4 files. I've already tried solutions from other forums but it's not working for me.


createVideo: privateProcedure
 .input(
 z.object({
 fileId: z.string(),
 })
 )
 .mutation(async ({ ctx, input }) => {
 const { getUser } = getKindeServerSession();
 const user = await getUser();

 if (!user || !user.id || !user.email) {
 throw new TRPCError({ code: "UNAUTHORIZED" });
 }

 const dbUser = await db.user.findFirst({
 where: {
 id: user.id,
 },
 });

 if (!dbUser) {
 throw new TRPCError({
 code: "UNAUTHORIZED",
 message: "User not found in the database.",
 });
 }

 const putObjectCommand = new PutObjectCommand({
 Bucket: process.env.AWS_BUCKET_NAME!,
 Key: generateFileName(),
 });

 const s3 = new S3Client({
 region: process.env.AWS_BUCKET_REGION!,
 credentials: {
 accessKeyId: process.env.AWS_ACCESS_KEY!,
 secretAccessKey: process.env.AWS_SECRET_ACCESS_KEY!,
 },
 });

 const singedURL = await getSignedUrl(s3, putObjectCommand, {
 expiresIn: 60,
 });

 const ffmpeg = require("fluent-ffmpeg");
 const passthroughStream = new PassThrough();

 ffmpeg({ source: "./template1.mp4" })
 .on("end", async () => {
 console.log("Job done");
 await uploadToS3(passthroughStream);
 })
 .on("error", (error: string) => {
 console.error("Error:", error);
 throw new Error("Error processing video");
 })
 .videoFilter({
 filter: "drawtext",
 options: {
 text: "hi",
 fontsize: 24,
 fontcolor: "white",
 x: "(w-text_w)/2",
 y: "(h-text_h)/2",
 box: 1,
 boxcolor: "black@0.5",
 boxborderw: 5,
 fontfile: "/Windows/fonts/calibri.ttf",
 },
 })
 .videoCodec("libx264")
 .outputFormat("mp4")
 .outputOptions(["-movflags frag_keyframe+empty_moov"])
 .pipe(passthroughStream, { end: true });

 const uploadToS3 = async (stream: PassThrough) => {
 const upload = new Upload({
 client: s3,
 params: {
 Bucket: process.env.AWS_BUCKET_NAME!,
 Key: generateFileName(),
 Body: stream,
 },
 });
 await upload.done();
 };

 await new Promise((resolve, reject) => {
 passthroughStream.on("end", resolve);
 passthroughStream.on("error", reject);
 });

 const createdVideo = await db.video.create({
 data: {
 name: "Test Name",
 url: singedURL.split("?")[0],
 key: singedURL,
 fileId: input.fileId,
 },
 });

 return createdVideo;
 }),



Here's the ffmpeg log.


ffmpeg started on 2024-04-11 at 20:58:56
Report written to "ffmpeg-20240411-205856.log"
Log level: 48
Command line:
"C:\\ProgramData\\chocolatey\\lib\\ffmpeg-full\\tools\\ffmpeg\\bin\\ffmpeg.exe" -i ./template1.mp4 -filter:v "drawtext=text=hi:fontsize=24:fontcolor=white:x=(w-text_w)/2:y=(h-text_h)/2:box=1:boxcolor=black@0.5:boxborderw=5:fontfile=/Windows/fonts/calibri.ttf" -report pipe:1
ffmpeg version 7.0-full_build-www.gyan.dev Copyright (c) 2000-2024 the FFmpeg developers
 built with gcc 13.2.0 (Rev5, Built by MSYS2 project)
 configuration: --enable-gpl --enable-version3 --enable-static --disable-w32threads --disable-autodetect --enable-fontconfig --enable-iconv --enable-gnutls --enable-libxml2 --enable-gmp --enable-bzlib --enable-lzma --enable-libsnappy --enable-zlib --enable-librist --enable-libsrt --enable-libssh --enable-libzmq --enable-avisynth --enable-libbluray --enable-libcaca --enable-sdl2 --enable-libaribb24 --enable-libaribcaption --enable-libdav1d --enable-libdavs2 --enable-libuavs3d --enable-libxevd --enable-libzvbi --enable-librav1e --enable-libsvtav1 --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxavs2 --enable-libxeve --enable-libxvid --enable-libaom --enable-libjxl --enable-libopenjpeg --enable-libvpx --enable-mediafoundation --enable-libass --enable-frei0r --enable-libfreetype --enable-libfribidi --enable-libharfbuzz --enable-liblensfun --enable-libvidstab --enable-libvmaf --enable-libzimg --enable-amf --enable-cuda-llvm --enable-cuvid --enable-dxva2 --enable-d3d11va --enable-d3d12va --enable-f libavutil 59. 8.100 / 59. 8.100
 libavcodec 61. 3.100 / 61. 3.100
 libavformat 61. 1.100 / 61. 1.100
 libavdevice 61. 1.100 / 61. 1.100
 libavfilter 10. 1.100 / 10. 1.100
 libswscale 8. 1.100 / 8. 1.100
 libswresample 5. 1.100 / 5. 1.100
 libpostproc 58. 1.100 / 58. 1.100
Splitting the commandline.
Reading option '-i' ... matched as input url with argument './template1.mp4'.
Reading option '-filter:v' ... matched as option 'filter' (apply specified filters to audio/video) with argument 'drawtext=text=hi:fontsize=24:fontcolor=white:x=(w-text_w)/2:y=(h-text_h)/2:box=1:boxcolor=black@0.5:boxborderw=5:fontfile=/Windows/fonts/calibri.ttf'.
Reading option '-report' ... matched as option 'report' (generate a report) with argument '1'.
Reading option 'pipe:1' ... matched as output url.
Finished splitting the commandline.
Parsing a group of options: global .
Applying option report (generate a report) with argument 1.
Successfully parsed a group of options.
Parsing a group of options: input url ./template1.mp4.
Successfully parsed a group of options.
Opening an input file: ./template1.mp4.
[AVFormatContext @ 00000262cd0888c0] Opening './template1.mp4' for reading
[file @ 00000262cd0a94c0] Setting default whitelist 'file,crypto,data'
[mov,mp4,m4a,3gp,3g2,mj2 @ 00000262cd0888c0] Format mov,mp4,m4a,3gp,3g2,mj2 probed with size=2048 and score=100
[mov,mp4,m4a,3gp,3g2,mj2 @ 00000262cd0888c0] ISO: File Type Major Brand: isom
[mov,mp4,m4a,3gp,3g2,mj2 @ 00000262cd0888c0] Unknown dref type 0x206c7275 size 12
[mov,mp4,m4a,3gp,3g2,mj2 @ 00000262cd0888c0] Processing st: 0, edit list 0 - media time: 1024, duration: 126981
[mov,mp4,m4a,3gp,3g2,mj2 @ 00000262cd0888c0] Offset DTS by 1024 to make first pts zero.
[mov,mp4,m4a,3gp,3g2,mj2 @ 00000262cd0888c0] Setting codecpar->delay to 2 for stream st: 0
[mov,mp4,m4a,3gp,3g2,mj2 @ 00000262cd0888c0] Before avformat_find_stream_info() pos: 6965 bytes read:32768 seeks:0 nb_streams:1
[h264 @ 00000262cd0bb140] nal_unit_type: 7(SPS), nal_ref_idc: 3
[h264 @ 00000262cd0bb140] Decoding VUI
[h264 @ 00000262cd0bb140] nal_unit_type: 8(PPS), nal_ref_idc: 3
[h264 @ 00000262cd0bb140] nal_unit_type: 7(SPS), nal_ref_idc: 3
[h264 @ 00000262cd0bb140] Decoding VUI
[h264 @ 00000262cd0bb140] nal_unit_type: 8(PPS), nal_ref_idc: 3
[h264 @ 00000262cd0bb140] Decoding VUI
[h264 @ 00000262cd0bb140] nal_unit_type: 6(SEI), nal_ref_idc: 0
[h264 @ 00000262cd0bb140] nal_unit_type: 7(SPS), nal_ref_idc: 3
[h264 @ 00000262cd0bb140] nal_unit_type: 8(PPS), nal_ref_idc: 3
[h264 @ 00000262cd0bb140] nal_unit_type: 5(IDR), nal_ref_idc: 3
[h264 @ 00000262cd0bb140] Decoding VUI
[h264 @ 00000262cd0bb140] Format yuv420p chosen by get_format().
[h264 @ 00000262cd0bb140] Reinit context to 1088x1920, pix_fmt: yuv420p
[h264 @ 00000262cd0bb140] no picture 
[mov,mp4,m4a,3gp,3g2,mj2 @ 00000262cd0888c0] All info found
[mov,mp4,m4a,3gp,3g2,mj2 @ 00000262cd0888c0] After avformat_find_stream_info() pos: 82242 bytes read:82242 seeks:0 frames:1
Input #0, mov,mp4,m4a,3gp,3g2,mj2, from './template1.mp4':
 Metadata:
 major_brand : isom
 minor_version : 512
 compatible_brands: isomiso2avc1mp41
 encoder : Lavf58.76.100
 Duration: 00:00:08.27, start: 0.000000, bitrate: 3720 kb/s
 Stream #0:0[0x1](und), 1, 1/15360: Video: h264 (High) (avc1 / 0x31637661), yuv420p(tv, bt709, progressive), 1080x1920, 3714 kb/s, 30 fps, 30 tbr, 15360 tbn (default)
 Metadata:
 handler_name : VideoHandler
 vendor_id : [0][0][0][0]
Successfully opened the file.
Parsing a group of options: output url pipe:1.
Applying option filter:v (apply specified filters to audio/video) with argument drawtext=text=hi:fontsize=24:fontcolor=white:x=(w-text_w)/2:y=(h-text_h)/2:box=1:boxcolor=black@0.5:boxborderw=5:fontfile=/Windows/fonts/calibri.ttf.
Successfully parsed a group of options.
Opening an output file: pipe:1.
[AVFormatContext @ 00000262cd0b2240] Unable to choose an output format for 'pipe:1'; use a standard extension for the filename or specify the format manually.
[out#0 @ 00000262cd0bb300] Error initializing the muxer for pipe:1: Invalid argument
Error opening output file pipe:1.
Error opening output files: Invalid argument
[AVIOContext @ 00000262cd0a9580] Statistics: 82242 bytes read, 0 seeks



I should be able to stream the processed video to my s3, but it keeps on throwing "Error : Error : Output format mp4 is not available"