
Recherche avancée
Médias (2)
-
Valkaama DVD Label
4 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Image
-
Podcasting Legal guide
16 mai 2011, par
Mis à jour : Mai 2011
Langue : English
Type : Texte
Autres articles (100)
-
MediaSPIP 0.1 Beta version
25 avril 2011, parMediaSPIP 0.1 beta is the first version of MediaSPIP proclaimed as "usable".
The zip file provided here only contains the sources of MediaSPIP in its standalone version.
To get a working installation, you must manually install all-software dependencies on the server.
If you want to use this archive for an installation in "farm mode", you will also need to proceed to other manual (...) -
Personnaliser en ajoutant son logo, sa bannière ou son image de fond
5 septembre 2013, parCertains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;
-
Multilang : améliorer l’interface pour les blocs multilingues
18 février 2011, parMultilang est un plugin supplémentaire qui n’est pas activé par défaut lors de l’initialisation de MediaSPIP.
Après son activation, une préconfiguration est mise en place automatiquement par MediaSPIP init permettant à la nouvelle fonctionnalité d’être automatiquement opérationnelle. Il n’est donc pas obligatoire de passer par une étape de configuration pour cela.
Sur d’autres sites (8256)
-
Error transcoding with FFmpeg : Error : Output format hls is not available
6 mai 2024, par asif mohmdI am using FFmpeg library to transcode a video file into multiple resolutions and create an HLS (HTTP Live Streaming) master playlist.


It takes a video file as input but its does give me the output with HLS playlist.I got a error called "Output format hls is not available". Only the Output directory is creating


I am using FFMpeg 7.0 full build version and also tried older versions and ffmpeg essentials and also tried chocolatey.


if i remove the implementation of HLS from this code.it will create 4 different resolution videos in my output.


Note:I just tried this same code on my friend MAC Book by only changing the setffmpegPath : "ffmpeg.setFfmpegPath("C :\ffmpeg\bin\ffmpeg.exe") ;" to his ffmpeg directory.
Its working perfectly in his mac book


import "dotenv/config";
import * as fs from "fs";
import * as path from "path";
import ffmpeg from "fluent-ffmpeg";
import crypto from "crypto";

ffmpeg.setFfmpegPath("C:\\ffmpeg\\bin\\ffmpeg.exe");

export const FFmpegTranscoder = async (file: any): Promise<any> => {
 try {
 console.log("Starting script");
 console.time("req_time");

 const randomName = (bytes = 32) =>
 crypto.randomBytes(bytes).toString("hex");
 const fileName = randomName();
 const directoryPath = path.join(__dirname, "..", "..", "input");
 const filePath = path.join(directoryPath, `${fileName}.mp4`);

 if (!fs.existsSync(directoryPath)) {
 fs.mkdirSync(directoryPath, { recursive: true });
 }

 const paths = await new Promise<any>((resolve, reject) => {
 fs.writeFile(filePath, file, async (err) => {
 if (err) {
 console.error("Error saving file:", err);
 throw err;
 }
 console.log("File saved successfully:", filePath);

 try {
 const outputDirectoryPath = await transcodeWithFFmpeg(
 fileName,
 filePath
 );
 resolve({ directoryPath, filePath, fileName, outputDirectoryPath });
 } catch (error) {
 console.error("Error transcoding with FFmpeg:", error);
 }
 });
 });
 return paths;
 } catch (e: any) {
 console.log(e);
 }
};

const transcodeWithFFmpeg = async (fileName: string, filePath: string) => {
 const directoryPath = path.join(
 __dirname,
 "..",
 "..",
 `output/hls/${fileName}`
 );

 if (!fs.existsSync(directoryPath)) {
 fs.mkdirSync(directoryPath, { recursive: true });
 }

 const resolutions = [
 {
 resolution: "256x144",
 videoBitrate: "200k",
 audioBitrate: "64k",
 },
 {
 resolution: "640x360",
 videoBitrate: "800k",
 audioBitrate: "128k",
 },
 {
 resolution: "1280x720",
 videoBitrate: "2500k",
 audioBitrate: "192k",
 },
 {
 resolution: "1920x1080",
 videoBitrate: "5000k",
 audioBitrate: "256k",
 },
 ];

 const variantPlaylists: { resolution: string; outputFileName: string }[] = [];

 for (const { resolution, videoBitrate, audioBitrate } of resolutions) {
 console.log(`HLS conversion starting for ${resolution}`);
 const outputFileName = `${fileName}_${resolution}.m3u8`;
 const segmentFileName = `${fileName}_${resolution}_%03d.ts`;

 await new Promise<void>((resolve, reject) => {
 ffmpeg(filePath)
 .outputOptions([
 `-c:v h264`,
 `-b:v ${videoBitrate}`,
 `-c:a aac`,
 `-b:a ${audioBitrate}`,
 `-vf scale=${resolution}`,
 `-f hls`,
 `-hls_time 10`,
 `-hls_list_size 0`,
 `-hls_segment_filename ${directoryPath}/${segmentFileName}`,
 ])
 .output(`${directoryPath}/${outputFileName}`)
 .on("end", () => resolve())
 .on("error", (err) => reject(err))
 .run();
 });
 const variantPlaylist = {
 resolution,
 outputFileName,
 };
 variantPlaylists.push(variantPlaylist);
 console.log(`HLS conversion done for ${resolution}`);
 }
 console.log(`HLS master m3u8 playlist generating`);

 let masterPlaylist = variantPlaylists
 .map((variantPlaylist) => {
 const { resolution, outputFileName } = variantPlaylist;
 const bandwidth =
 resolution === "256x144"
 ? 264000
 : resolution === "640x360"
 ? 1024000
 : resolution === "1280x720"
 ? 3072000
 : 5500000;
 ``;
 return `#EXT-X-STREAM-INF:BANDWIDTH=${bandwidth},RESOLUTION=${resolution}\n${outputFileName}`;
 })
 .join("\n");
 masterPlaylist = `#EXTM3U\n` + masterPlaylist;

 const masterPlaylistFileName = `${fileName}_master.m3u8`;

 const masterPlaylistPath = `${directoryPath}/${masterPlaylistFileName}`;
 fs.writeFileSync(masterPlaylistPath, masterPlaylist);
 console.log(`HLS master m3u8 playlist generated`);
 return directoryPath;
};
</void></any></any>


My console.log is :


Starting script
 HLS conversion starting for 256x144
 Error transcoding with FFmpeg: Error: Output format hls is not available
 at C:\Users\asifa\Desktop\Genius Grid\Transcode-service\node_modules\fluent-ffmpeg\lib\capabilities.js:589:21
 at nextTask (C:\Users\asifa\Desktop\Genius Grid\Transcode-service\node_modules\async\dist\async.js:5791:13)
 at next (C:\Users\asifa\Desktop\Genius Grid\Transcode-service\node_modules\async\dist\async.js:5799:13)
 at C:\Users\asifa\Desktop\Genius Grid\Transcode-service\node_modules\async\dist\async.js:329:20
 at C:\Users\asifa\Desktop\Genius Grid\Transcode-service\node_modules\fluent-ffmpeg\lib\capabilities.js:549:7
 at handleExit (C:\Users\asifa\Desktop\Genius Grid\Transcode-service\node_modules\fluent-ffmpeg\lib\processor.js:170:11)
 at ChildProcess.<anonymous> (C:\Users\asifa\Desktop\Genius Grid\Transcode-service\node_modules\fluent-ffmpeg\lib\processor.js:184:11)
 at ChildProcess.emit (node:events:518:28)
 at ChildProcess.emit (node:domain:488:12)
 at Process.ChildProcess._handle.onexit (node:internal/child_process:294:12) 
</anonymous>


I am using Windows 11 and FFMpeg version 7.0. I repeatedly checked, using CMD commands, that my FFMpeg was installed correctly and confirmed the environment variables path, experimented with various FFMpeg versions, and tried with FFMpeg full build Chocolatey package.


In Command Line its working perfectly :


PS C:\Users\asifa\Desktop\test fmmpeg> ffmpeg -hide_banner -y -i .\SampleVideo_1280x720_30mb.mp4 -vf scale=w=640:h=360:force_original_aspect_ratio=decrease -c:a aac -b:v 800k -c:v h264 -b:a 128k -f hls -hls_time 14 -hls_list_size 0 -hls_segment_filename beach/480p_%03d.ts beach/480p.m3u8
Input #0, mov,mp4,m4a,3gp,3g2,mj2, from '.\SampleVideo_1280x720_30mb.mp4':
 Metadata:
 major_brand : isom
 minor_version : 512
 compatible_brands: isomiso2avc1mp41
 creation_time : 1970-01-01T00:00:00.000000Z
 encoder : Lavf53.24.2
 Duration: 00:02:50.86, start: 0.000000, bitrate: 1474 kb/s
 Stream #0:0[0x1](und): Video: h264 (Main) (avc1 / 0x31637661), yuv420p(progressive), 1280x720 [SAR 1:1 DAR 16:9], 1086 kb/s, 25 fps, 25 tbr, 12800 tbn (default)
 Metadata:
 creation_time : 1970-01-01T00:00:00.000000Z
 handler_name : VideoHandler
 vendor_id : [0][0][0][0]
 Stream #0:1[0x2](und): Audio: aac (LC) (mp4a / 0x6134706D), 48000 Hz, 5.1, fltp, 383 kb/s (default)
 Metadata:
 creation_time : 1970-01-01T00:00:00.000000Z
 handler_name : SoundHandler
 vendor_id : [0][0][0][0]
Stream mapping:
 Stream #0:0 -> #0:0 (h264 (native) -> h264 (libx264))
 Stream #0:1 -> #0:1 (aac (native) -> aac (native))
Press [q] to stop, [?] for help
[libx264 @ 000001ef1288ec00] using SAR=1/1
[libx264 @ 000001ef1288ec00] using cpu capabilities: MMX2 SSE2Fast SSSE3 SSE4.2 AVX FMA3 BMI2 AVX2
[libx264 @ 000001ef1288ec00] profile High, level 3.0, 4:2:0, 8-bit
[libx264 @ 000001ef1288ec00] 264 - core 164 r3190 7ed753b - H.264/MPEG-4 AVC codec - Copyleft 2003-2024 - http://www.videolan.org/x264.html - options: cabac=1 ref=3 deblock=1:0:0 analyse=0x3:0x113 me=hex subme=7 psy=1 psy_rd=1.00:0.00 mixed_ref=1 me_range=16 chroma_me=1 trellis=1 8x8dct=1 cqm=0 deadzone=21,11 fast_pskip=1 chroma_qp_offset=-2 threads=11 lookahead_threads=1 sliced_threads=0 nr=0 decimate=1 interlaced=0 bluray_compat=0 constrained_intra=0 bframes=3 b_pyramid=2 b_adapt=1 b_bias=0 direct=1 weightb=1 open_gop=0 weightp=2 keyint=250 keyint_min=25 scenecut=40 intra_refresh=0 rc_lookahead=40 rc=abr mbtree=1 bitrate=800 ratetol=1.0 qcomp=0.60 qpmin=0 qpmax=69 qpstep=4 ip_ratio=1.40 aq=1:1.00
Output #0, hls, to 'beach/480p.m3u8':
 Metadata:
 major_brand : isom
 minor_version : 512
 compatible_brands: isomiso2avc1mp41
 encoder : Lavf61.1.100
 Stream #0:0(und): Video: h264, yuv420p(progressive), 640x360 [SAR 1:1 DAR 16:9], q=2-31, 800 kb/s, 25 fps, 90k tbn (default)
 Metadata:
 creation_time : 1970-01-01T00:00:00.000000Z
 handler_name : VideoHandler
 vendor_id : [0][0][0][0]
 encoder : Lavc61.3.100 libx264
 Side data:
 cpb: bitrate max/min/avg: 0/0/800000 buffer size: 0 vbv_delay: N/A
 Stream #0:1(und): Audio: aac (LC), 48000 Hz, 5.1, fltp, 128 kb/s (default)
 Metadata:
 creation_time : 1970-01-01T00:00:00.000000Z
 handler_name : SoundHandler
 vendor_id : [0][0][0][0]
 encoder : Lavc61.3.100 aac
[hls @ 000001ef12482040] Opening 'beach/480p_000.ts' for writing speed=15.5x
[hls @ 000001ef12482040] Opening 'beach/480p.m3u8.tmp' for writing
[hls @ 000001ef12482040] Opening 'beach/480p_001.ts' for writing speed=17.9x
[hls @ 000001ef12482040] Opening 'beach/480p.m3u8.tmp' for writing
[hls @ 000001ef12482040] Opening 'beach/480p_002.ts' for writing speed=17.3x
[hls @ 000001ef12482040] Opening 'beach/480p.m3u8.tmp' for writing
[hls @ 000001ef12482040] Opening 'beach/480p_003.ts' for writing speed=19.4x
[hls @ 000001ef12482040] Opening 'beach/480p.m3u8.tmp' for writing
[hls @ 000001ef12482040] Opening 'beach/480p_004.ts' for writing speed=19.3x
[hls @ 000001ef12482040] Opening 'beach/480p.m3u8.tmp' for writing
[hls @ 000001ef12482040] Opening 'beach/480p_005.ts' for writing speed=19.2x
[hls @ 000001ef12482040] Opening 'beach/480p.m3u8.tmp' for writing
[hls @ 000001ef12482040] Opening 'beach/480p_006.ts' for writing
[hls @ 000001ef12482040] Opening 'beach/480p.m3u8.tmp' for writing
[hls @ 000001ef12482040] Opening 'beach/480p_007.ts' for writing speed=19.4x
[hls @ 000001ef12482040] Opening 'beach/480p.m3u8.tmp' for writing
[hls @ 000001ef12482040] Opening 'beach/480p_008.ts' for writing speed=19.5x
[hls @ 000001ef12482040] Opening 'beach/480p.m3u8.tmp' for writing
[hls @ 000001ef12482040] Opening 'beach/480p_009.ts' for writing speed=19.5x
[hls @ 000001ef12482040] Opening 'beach/480p.m3u8.tmp' for writing
[hls @ 000001ef12482040] Opening 'beach/480p_010.ts' for writing speed=19.4x
[hls @ 000001ef12482040] Opening 'beach/480p.m3u8.tmp' for writing
[hls @ 000001ef12482040] Opening 'beach/480p_011.ts' for writing/A =19.4x
[hls @ 000001ef12482040] Opening 'beach/480p.m3u8.tmp' for writing
[out#0/hls @ 000001ef11d4e880] video:17094KiB audio:2680KiB subtitle:0KiB other streams:0KiB global headers:0KiB muxing overhead: unknown
frame= 4271 fps=485 q=-1.0 Lsize=N/A time=00:02:50.76 bitrate=N/A speed=19.4x
[libx264 @ 000001ef1288ec00] frame I:45 Avg QP:10.29 size: 60418
[libx264 @ 000001ef1288ec00] frame P:1914 Avg QP:14.53 size: 5582
[libx264 @ 000001ef1288ec00] frame B:2312 Avg QP:20.63 size: 1774
[libx264 @ 000001ef1288ec00] consecutive B-frames: 22.9% 11.9% 8.6% 56.6%
[libx264 @ 000001ef1288ec00] mb I I16..4: 15.6% 32.1% 52.2%
[libx264 @ 000001ef1288ec00] mb P I16..4: 0.3% 3.4% 1.2% P16..4: 20.3% 10.0% 13.1% 0.0% 0.0% skip:51.8%
[libx264 @ 000001ef1288ec00] mb B I16..4: 0.1% 0.9% 0.4% B16..8: 17.2% 5.6% 2.8% direct: 2.0% skip:71.0% L0:41.5% L1:44.1% BI:14.4%
[libx264 @ 000001ef1288ec00] final ratefactor: 16.13
[libx264 @ 000001ef1288ec00] 8x8 transform intra:58.4% inter:51.7%
[libx264 @ 000001ef1288ec00] coded y,uvDC,uvAC intra: 86.7% 94.3% 78.8% inter: 12.6% 15.0% 4.5%
[libx264 @ 000001ef1288ec00] i16 v,h,dc,p: 17% 42% 14% 28%
[libx264 @ 000001ef1288ec00] i8 v,h,dc,ddl,ddr,vr,hd,vl,hu: 23% 19% 11% 6% 7% 8% 8% 9% 9%
[libx264 @ 000001ef1288ec00] i4 v,h,dc,ddl,ddr,vr,hd,vl,hu: 23% 18% 12% 6% 9% 9% 8% 8% 7%
[libx264 @ 000001ef1288ec00] i8c dc,h,v,p: 44% 24% 20% 12%
[libx264 @ 000001ef1288ec00] Weighted P-Frames: Y:0.0% UV:0.0%
[libx264 @ 000001ef1288ec00] ref P L0: 78.3% 9.7% 8.8% 3.2%
[libx264 @ 000001ef1288ec00] ref B L0: 92.5% 6.0% 1.5%
[libx264 @ 000001ef1288ec00] ref B L1: 97.1% 2.9%
[libx264 @ 000001ef1288ec00] kb/s:819.63
[aac @ 000001ef128f7c80] Qavg: 452.137



When I use the
.on('start', (cmdline) => console.log(cmdline))}
code with the-f hls
command, the error "Output format hls is not available" appears, as previously mentioned. But my Console.log looks like this if I run my code without using-f hls
command :

Without -f hls command


await new Promise<void>((resolve, reject) => {
 ffmpeg(filePath)
 .outputOptions([
 `-c:v h264`,
 `-b:v ${videoBitrate}`,
 `-c:a aac`,
 `-b:a ${audioBitrate}`,
 `-vf scale=${resolution}`,
 
 `-hls_time 10`,
 `-hls_list_size 0`,
 `-hls_segment_filename ${directoryPath}/${segmentFileName}`,
 ])
 .output(`${directoryPath}/${outputFileName}`)
 .on('start', (cmdline) => console.log(cmdline)) 
 .on("end", () => resolve())
 .on("error", (err) => reject(err))
 .run();
});
</void>


Console.log is :


`Starting script
File saved successfully: C:\Users\asifa\Desktop\Genius Grid\Transcode-service\input\c9fcf43726e617a295b203d5acb7b81658b5f05f80eafc74cee21b053422fef1.mp4
HLS conversion starting for 256x144
ffmpeg -i C:\Users\asifa\Desktop\Genius Grid\Transcode-service\input\c9fcf43726e617a295b203d5acb7b81658b5f05f80eafc74cee21b053422fef1.mp4 -y -c:v h264 -b:v 200k -c:a aac -b:a 64k -vf scale=256x144 -hls_time 10 -hls_list_size 0 -hls_segment_filename C:\Users\asifa\Desktop\Genius Grid\Transcode-service\output\hls\c9fcf43726e617a295b203d5acb7b81658b5f05f80eafc74cee21b053422fef1/c9fcf43726e617a295b203d5acb7b81658b5f05f80eafc74cee21b053422fef1_256x144_%03d.ts C:\Users\asifa\Desktop\Genius Grid\Transcode-service\output\hls\c9fcf43726e617a295b203d5acb7b81658b5f05f80eafc74cee21b053422fef1/c9fcf43726e617a295b203d5acb7b81658b5f05f80eafc74cee21b053422fef1_256x144.m3u8
Error transcoding with FFmpeg: Error: ffmpeg exited with code 2880417800: Unrecognized option 'hls_segment_filename C:\Users\asifa\Desktop\Genius Grid\Transcode-service\output\hls\c9fcf43726e617a295b203d5acb7b81658b5f05f80eafc74cee21b053422fef1/c9fcf43726e617a295b203d5acb7b81658b5f05f80eafc74cee21b053422fef1_256x144_%03d.ts'.
Error splitting the argument list: Option not found`



-
Grand Unified Theory of Compact Disc
1er février 2013, par Multimedia Mike — GeneralThis is something I started writing about a decade ago (and I almost certainly have some of it wrong), back when compact discs still had a fair amount of relevance. Back around 2002, after a few years investigating multimedia technology, I took an interest in compact discs of all sorts. Even though there may seem to be a wide range of CD types, I generally found that they’re all fundamentally the same. I thought I would finally publishing something, incomplete though it may be.
Physical Perspective
There are a lot of ways to look at a compact disc. First, there’s the physical format, where a laser detects where pits/grooves have disturbed the smooth surface (a.k.a. lands). A lot of technical descriptions claim that these lands and pits on a CD correspond to ones and zeros. That’s not actually true, but you have to decide what level of abstraction you care about, and that abstraction is good enough if you only care about the discs from a software perspective.Grand Unified Theory (Software Perspective)
Looking at a disc from a software perspective, I have generally found it useful to view a CD as a combination of a 2 main components :- table of contents (TOC)
- a long string of sectors, each of which is 2352 bytes long
I like to believe that’s pretty much all there is to it. All of the information on a CD is stored as a string of sectors that might be chopped up into a series of anywhere from 1-99 individual tracks. The exact sector locations where these individual tracks begin are defined in the TOC.
Audio CDs (CD-DA / Red Book)
The initial purpose for the compact disc was to store digital audio. The strange sector size of 2352 bytes is an artifact of this original charter. “CD quality audio”, as any multimedia nerd knows, is formally defined as stereo PCM samples that are each 16 bits wide and played at a frequency of 44100 Hz.
(44100 audio frames / 1 second) * (2 samples / audio frame) * (16 bits / 1 sample) * (1 byte / 8 bits) = 176,400 bytes / second (176,400 bytes / 1 second) / (2352 bytes / 1 sector) = 75
75 is the number of sectors required to store a single second of CD-quality audio. A single sector stores 1/75th of a second, or a ‘frame’ of audio (though I think ‘frame’ gets tossed around at all levels when describing CD formats).
The term “red book” is thrown around in relation to audio CDs. There is a series of rainbow books that define various optical disc standards and the red book describes audio CDs.
Basic Data CD-ROMs (Mode 1 / Yellow Book)
Somewhere along the line, someone decided that general digital information could be stored on these discs. Hence, the CD-ROM was born. The standard model above still applies– TOC and string of 2352-byte sectors. However, it’s generally only useful to have a single track on a CD-ROM. Thus, the TOC only lists a single track. That single track can easily span the entire disc (something that would be unusual for a typical audio CD).While the model is mostly the same, the most notable difference between and audio CD and a plain CD-ROM is that, while each sector is 2352 bytes long, only 2048 bytes are used to store actual data payload. The remaining bytes are used for synchronization and additional error detection/correction.
At least, the foregoing is true for mode 1 / form 1 CD-ROMs (which are the most common). “Mode 1″ CD-ROMs are defined by a publication called the yellow book. There is also mode 1 / form 2. This forgoes the additional error detection and correction afforded by form 1 and dedicates 2336 of the 2352 sector bytes to the data payload.
CD-ROM XA (Mode 2 / Green Book)
From a software perspective, these are similar to mode 1 CD-ROMs. There are also 2 forms here. The first form gives a 2048-byte data payload while the second form yields a 2324-byte data payload.Video CD (VCD / White Book)
These are CD-ROM XA discs that carry MPEG-1 video and audio data.Photo CD (Beige Book)
This is something I have never personally dealt with. But it’s supposed to conform to the CD-ROM XA standard and probably fits into my model. It seems to date back to early in the CD-ROM era when CDs were particularly cost prohibitive.Multisession CDs (Blue Book)
Okay, I admit that this confuses me a bit. Multisession discs allow a user to burn multiple sessions to a single recordable disc. I.e., burn a lump of data, then burn another lump at a later time, and the final result will look like all the lumps were recorded as the same big lump. I remember this being incredibly useful and cost effective back when recordable CDs cost around US$10 each (vs. being able to buy a spindle of 100 CD-Rs for US$10 or less now). Studying the cdrom.h file for the Linux OS, I found a system call named CDROMMULTISESSION that returns the sector address of the start of the last session. If I were to hypothesize about how to make this fit into my model, I might guess that the TOC has some hint that the disc was recorded in multisession (which needs to be decided up front) and the CDROMMULTISESSION call is made to find the last session. Or it could be that a disc read initialization operation always leads off with the CDROMMULTISESSION query in order to determine this.I suppose I could figure out how to create a multisession disc with modern software, or possibly dig up a multisession disc from 15+ years ago, and then figure out how it should be read.
CD-i
This type puzzles my as well. I do have some CD-i discs and I thought that I could read them just fine (the last time I looked, which was many years ago). But my research for this blog post has me thinking that I might not have been seeing the entire picture when I first studied my CD-i samples. I was able to see some of the data, but sources indicate that only proper CD-i hardware is able to see all of the data on the disc (apparently, the TOC doesn’t show all of the sectors on disc).Hybrid CDs (Data + Audio)
At some point, it became a notable selling point for an audio CD to have a data track with bonus features. Even more common (particularly in the early era of CD-ROMs) were computer and console games that used the first track of a disc for all the game code and assets and the remaining tracks for beautifully rendered game audio that could also be enjoyed outside the game. Same model : TOC points to the various tracks and also makes notes about which ones are data and which are audio.There seems to be 2 distinct things described above. One type is the mixed mode CD which generally has the data in the first track and the audio in tracks 2..n. Then there is the enhanced CD, which apparently used multisession recording and put the data at the end. I think that the reasoning for this is that most audio CD player hardware would only read tracks from the first session and would have no way to see the data track. This was a positive thing. By contrast, when placing a mixed-mode CD into an audio player, the data track would be rendered as nonsense noise.
Subchannels
There’s at least one small detail that my model ignores : subchannels. CDs can encode bits of data in subchannels in sectors. This is used for things like CD-Text and CD-G. I may need to revisit this.In Summary
There’s still a lot of ground to cover, like how those sectors might be formatted to show something useful (e.g., filesystems), and how the model applies to other types of optical discs. Sounds like something for another post. -
Libavformat/FFMPEG : Muxing into mp4 with AVFormatContext drops the final frame, depending on the number of frames
27 octobre 2020, par Galen LynchI am trying to use libavformat to create a
.mp4
video
with a single h.264 video stream, but the final frame in the resulting file
often has a duration of zero and is effectively dropped from the video.
Strangely enough, whether the final frame is dropped or not depends on how many
frames I try to add to the file. Some simple testing that I outline below makes
me think that I am somehow misconfiguring either theAVFormatContext
or the
h.264 encoder, resulting in two edit lists that sometimes chop off the final
frame. I will also post a simplified version of the code I am using, in case I'm
making some obvious mistake. Any help would be greatly appreciated : I've been
struggling with this issue for the past few days and have made little progress.

I can recover the dropped frame by creating a new mp4 container using
ffmpeg

binary with the copy codec if I use the-ignore_editlist
option. Inspecting
the file with a missing frame usingffprobe
,mp4trackdump
, ormp4file --dump
, shows that the final frame is dropped if its sample time is exactly the
same the end of the edit list. When I make a file that has no dropped frames, it
still has two edit lists : the only difference is that the end time of the edit
list is beyond all samples in files that do not have dropped frames. Though this
is hardly a fair comparison, if I make a.png
for each frame and then generate
a.mp4
withffmpeg
using theimage2
codec and similar h.264 settings, I
produce a movie with all frames present, only one edit list, and similar PTS
times as my mangled movies with two edit lists. In this case, the edit list
always ends after the last frame/sample time.

I am using this command to determine the number of frames in the resulting stream,
though I also get the same number with other utilities :


ffprobe -v error -count_frames -select_streams v:0 -show_entries stream=nb_read_frames -of default=nokey=1:noprint_wrappers=1 video_file_name.mp4



Simple inspection of the file with ffprobe shows no obviously alarming signs to
me, besides the framerate being affected by the missing frame (the target was
24) :


$ ffprobe -hide_banner testing.mp4
Input #0, mov,mp4,m4a,3gp,3g2,mj2, from 'testing.mp4':
 Metadata:
 major_brand : isom
 minor_version : 512
 compatible_brands: isomiso2avc1mp41
 encoder : Lavf58.45.100
 Duration: 00:00:04.13, start: 0.041016, bitrate: 724 kb/s
 Stream #0:0(und): Video: h264 (High) (avc1 / 0x31637661), yuv420p, 100x100, 722 kb/s, 24.24 fps, 24 tbr, 12288 tbn, 48 tbc (default)
 Metadata:
 handler_name : VideoHandler



The files that I generate programatically always have two edit lists, one of
which is very short. In files both with and without a missing frame, the
duration one of the frames is 0, while all the others have the same duration
(512). You can see this in the
ffmpeg
output for this file that I tried to put
100 frames into, though only 99 are visible despite the file containing all 100
samples.

$ ffmpeg -hide_banner -y -v 9 -loglevel 99 -i testing.mp4 
...
<edited to="to" remove="remove" the="the" class="class" printing="printing">
type:'edts' parent:'trak' sz: 48 100 948
type:'elst' parent:'edts' sz: 40 8 40
track[0].edit_count = 2
duration=41 time=-1 rate=1.000000
duration=4125 time=0 rate=1.000000
type:'mdia' parent:'trak' sz: 808 148 948
type:'mdhd' parent:'mdia' sz: 32 8 800
type:'hdlr' parent:'mdia' sz: 45 40 800
ctype=[0][0][0][0]
stype=vide
type:'minf' parent:'mdia' sz: 723 85 800
type:'vmhd' parent:'minf' sz: 20 8 715
type:'dinf' parent:'minf' sz: 36 28 715
type:'dref' parent:'dinf' sz: 28 8 28
Unknown dref type 0x206c7275 size 12
type:'stbl' parent:'minf' sz: 659 64 715
type:'stsd' parent:'stbl' sz: 151 8 651
size=135 4CC=avc1 codec_type=0
type:'avcC' parent:'stsd' sz: 49 8 49
type:'stts' parent:'stbl' sz: 32 159 651
track[0].stts.entries = 2
sample_count=99, sample_duration=512
sample_count=1, sample_duration=0
...
AVIndex stream 0, sample 99, offset 5a0ed, dts 50688, size 3707, distance 0, keyframe 1
Processing st: 0, edit list 0 - media time: -1, duration: 504
Processing st: 0, edit list 1 - media time: 0, duration: 50688
type:'udta' parent:'moov' sz: 98 1072 1162
...
</edited>


The last frame has zero duration :


$ mp4trackdump -v testing.mp4
...
mp4file testing.mp4, track 1, samples 100, timescale 12288
sampleId 1, size 6943 duration 512 time 0 00:00:00.000 S
sampleId 2, size 3671 duration 512 time 512 00:00:00.041 S
...
sampleId 99, size 3687 duration 512 time 50176 00:00:04.083 S
sampleId 100, size 3707 duration 0 time 50688 00:00:04.125 S



Non-mangled videos that I generate have similar structure, as you can see in
this video that had 99 input frames, all of which are visible in the output.
Even though the sample_duration is set to zero for one of the samples in the
stss box, it is not dropped from the frame count or when reading the frames back
in with ffmpeg.


$ ffmpeg -hide_banner -y -v 9 -loglevel 99 -i testing_99.mp4 
...
type:'elst' parent:'edts' sz: 40 8 40
track[0].edit_count = 2
duration=41 time=-1 rate=1.000000
duration=4084 time=0 rate=1.000000
...
track[0].stts.entries = 2
sample_count=98, sample_duration=512
sample_count=1, sample_duration=0
...
AVIndex stream 0, sample 98, offset 5d599, dts 50176, size 3833, distance 0, keyframe 1
Processing st: 0, edit list 0 - media time: -1, duration: 504
Processing st: 0, edit list 1 - media time: 0, duration: 50184
...



$ mp4trackdump -v testing_99.mp4
...
sampleId 98, size 3814 duration 512 time 49664 00:00:04.041 S
sampleId 99, size 3833 duration 0 time 50176 00:00:04.083 S



One difference that jumps out to me is that the mangled file's second edit list
ends at time 50688, which coincides with the last sample, while the non-mangled
file's edit list ends at 50184, which is after the time of the last sample
at 50176. As I mentioned before, whether the last frame is clipped depends on
the number of frames I encode and mux into the container : 100 input frames
results in 1 dropped frame, 99 results in 0, 98 in 0, 97 in 1, etc...


Here is the code that I used to generate these files, which is a MWE script
version of library functions that I am modifying. It is written in Julia,
which I do not think is important here, and calls the FFMPEG library version
4.3.1. It's more or less a direct translation from of the FFMPEG muxing
demo, although the codec
context here is created before the format context. I am presenting the code that
interacts with ffmpeg first, although it relies on some helper code that I will
put below.


The helper code just makes it easier to work with nested C structs in Julia, and
allows
.
syntax in Julia to be used in place of C's arrow (->
) operator for
field access of struct pointers. Libav structs such asAVFrame
appear as a
thin wrapper typeAVFramePtr
, and similarlyAVStream
appears as
AVStreamPtr
etc... These act like single or double pointers for the purposes
of function calls, depending on the function's type signature. Hopefully it will
be clear enough to understand if you are familiar with working with libav in C,
and I don't think looking at the helper code should be necessary if you don't
want to run the code.

# Function to transfer array to AVPicture/AVFrame
function transfer_img_buf_to_frame!(frame, img)
 img_pointer = pointer(img)
 data_pointer = frame.data[1] # Base-1 indexing, get pointer to first data buffer in frame
 for h = 1:frame.height
 data_line_pointer = data_pointer + (h-1) * frame.linesize[1] # base-1 indexing
 img_line_pointer = img_pointer + (h-1) * frame.width
 unsafe_copyto!(data_line_pointer, img_line_pointer, frame.width) # base-1 indexing
 end
end

# Function to transfer AVFrame to AVCodecContext, and AVPacket to AVFormatContext
function encode_mux!(packet, format_context, frame, codec_context; flush = false)
 if flush
 fret = avcodec_send_frame(codec_context, C_NULL)
 else
 fret = avcodec_send_frame(codec_context, frame)
 end
 if fret < 0 && !in(fret, [-Libc.EAGAIN, VIO_AVERROR_EOF])
 error("Error $fret sending a frame for encoding")
 end

 pret = Cint(0)
 while pret >= 0
 pret = avcodec_receive_packet(codec_context, packet)
 if pret == -Libc.EAGAIN || pret == VIO_AVERROR_EOF
 break
 elseif pret < 0
 error("Error $pret during encoding")
 end
 stream = format_context.streams[1] # Base-1 indexing
 av_packet_rescale_ts(packet, codec_context.time_base, stream.time_base)
 packet.stream_index = 0
 ret = av_interleaved_write_frame(format_context, packet)
 ret < 0 && error("Error muxing packet: $ret")
 end
 if !flush && fret == -Libc.EAGAIN && pret != VIO_AVERROR_EOF
 fret = avcodec_send_frame(codec_context, frame)
 if fret < 0 && fret != VIO_AVERROR_EOF
 error("Error $fret sending a frame for encoding")
 end
 end
 return pret
end

# Set parameters of test movie
nframe = 100
width, height = 100, 100
framerate = 24
gop = 0
codec_name = "libx264"
filename = "testing.mp4"

((width % 2 !=0) || (height % 2 !=0)) && error("Encoding error: Image dims must be a multiple of two")

# Make test images
imgstack = map(x->rand(UInt8,width,height),1:nframe);

pix_fmt = AV_PIX_FMT_GRAY8
framerate_rat = Rational(framerate)

codec = avcodec_find_encoder_by_name(codec_name)
codec == C_NULL && error("Codec '$codec_name' not found")

# Allocate AVCodecContext
codec_context_p = avcodec_alloc_context3(codec) # raw pointer
codec_context_p == C_NULL && error("Could not allocate AVCodecContext")
# Easier to work with pointer that acts like a c struct pointer, type defined below
codec_context = AVCodecContextPtr(codec_context_p)

codec_context.width = width
codec_context.height = height
codec_context.time_base = AVRational(1/framerate_rat)
codec_context.framerate = AVRational(framerate_rat)
codec_context.pix_fmt = pix_fmt
codec_context.gop_size = gop

ret = avcodec_open2(codec_context, codec, C_NULL)
ret < 0 && error("Could not open codec: Return code $(ret)")

# Allocate AVFrame and wrap it in a Julia convenience type
frame_p = av_frame_alloc()
frame_p == C_NULL && error("Could not allocate AVFrame")
frame = AVFramePtr(frame_p)

frame.format = pix_fmt
frame.width = width
frame.height = height

# Allocate picture buffers for frame
ret = av_frame_get_buffer(frame, 0)
ret < 0 && error("Could not allocate the video frame data")

# Allocate AVPacket and wrap it in a Julia convenience type
packet_p = av_packet_alloc()
packet_p == C_NULL && error("Could not allocate AVPacket")
packet = AVPacketPtr(packet_p)

# Allocate AVFormatContext and wrap it in a Julia convenience type
format_context_dp = Ref(Ptr{AVFormatContext}()) # double pointer
ret = avformat_alloc_output_context2(format_context_dp, C_NULL, C_NULL, filename)
if ret != 0 || format_context_dp[] == C_NULL
 error("Could not allocate AVFormatContext")
end
format_context = AVFormatContextPtr(format_context_dp)

# Add video stream to AVFormatContext and configure it to use the encoder made above
stream_p = avformat_new_stream(format_context, C_NULL)
stream_p == C_NULL && error("Could not allocate output stream")
stream = AVStreamPtr(stream_p) # Wrap this pointer in a convenience type

stream.time_base = codec_context.time_base
stream.avg_frame_rate = 1 / convert(Rational, stream.time_base)
ret = avcodec_parameters_from_context(stream.codecpar, codec_context)
ret < 0 && error("Could not set parameters of stream")

# Open the AVIOContext
pb_ptr = field_ptr(format_context, :pb)
# This following is just a call to avio_open, with a bit of extra protection
# so the Julia garbage collector does not destroy format_context during the call
ret = GC.@preserve format_context avio_open(pb_ptr, filename, AVIO_FLAG_WRITE)
ret < 0 && error("Could not open file $filename for writing")

# Write the header
ret = avformat_write_header(format_context, C_NULL)
ret < 0 && error("Could not write header")

# Encode and mux each frame
for i in 1:nframe # iterate from 1 to nframe
 img = imgstack[i] # base-1 indexing
 ret = av_frame_make_writable(frame)
 ret < 0 && error("Could not make frame writable")
 transfer_img_buf_to_frame!(frame, img)
 frame.pts = i
 encode_mux!(packet, format_context, frame, codec_context)
end

# Flush the encoder
encode_mux!(packet, format_context, frame, codec_context; flush = true)

# Write the trailer
av_write_trailer(format_context)

# Close the AVIOContext
pb_ptr = field_ptr(format_context, :pb) # get pointer to format_context.pb
ret = GC.@preserve format_context avio_closep(pb_ptr) # simply a call to avio_closep
ret < 0 && error("Could not free AVIOContext")

# Deallocation
avcodec_free_context(codec_context)
av_frame_free(frame)
av_packet_free(packet)
avformat_free_context(format_context)



Below is the helper code that makes accessing pointers to nested c structs not a
total pain in Julia. If you try to run the code yourself, please enter this in
before the logic of the code shown above. It requires
VideoIO.jl, a Julia wrapper to libav.


# Convenience type and methods to make the above code look more like C
using Base: RefValue, fieldindex

import Base: unsafe_convert, getproperty, setproperty!, getindex, setindex!,
 unsafe_wrap, propertynames

# VideoIO is a Julia wrapper to libav
#
# Bring bindings to libav library functions into namespace
using VideoIO: AVCodecContext, AVFrame, AVPacket, AVFormatContext, AVRational,
 AVStream, AV_PIX_FMT_GRAY8, AVIO_FLAG_WRITE, AVFMT_NOFILE,
 avformat_alloc_output_context2, avformat_free_context, avformat_new_stream,
 av_dump_format, avio_open, avformat_write_header,
 avcodec_parameters_from_context, av_frame_make_writable, avcodec_send_frame,
 avcodec_receive_packet, av_packet_rescale_ts, av_interleaved_write_frame,
 avformat_query_codec, avcodec_find_encoder_by_name, avcodec_alloc_context3,
 avcodec_open2, av_frame_alloc, av_frame_get_buffer, av_packet_alloc,
 avio_closep, av_write_trailer, avcodec_free_context, av_frame_free,
 av_packet_free

# Submodule of VideoIO
using VideoIO: AVCodecs

# Need to import this function from Julia's Base to add more methods
import Base: convert

const VIO_AVERROR_EOF = -541478725 # AVERROR_EOF

# Methods to convert between AVRational and Julia's Rational type, because it's
# hard to access the AV rational macros with Julia's C interface
convert(::Type{Rational{T}}, r::AVRational) where T = Rational{T}(r.num, r.den)
convert(::Type{Rational}, r::AVRational) = Rational(r.num, r.den)
convert(::Type{AVRational}, r::Rational) = AVRational(numerator(r), denominator(r))

"""
 mutable struct NestedCStruct{T}

Wraps a pointer to a C struct, and acts like a double pointer to that memory.
The methods below will automatically convert it to a single pointer if needed
for a function call, and make interacting with it in Julia look (more) similar
to interacting with it in C, except '->' in C is replaced by '.' in Julia.
"""
mutable struct NestedCStruct{T}
 data::RefValue{Ptr{T}}
end
NestedCStruct{T}(a::Ptr) where T = NestedCStruct{T}(Ref(a))
NestedCStruct(a::Ptr{T}) where T = NestedCStruct{T}(a)

const AVCodecContextPtr = NestedCStruct{AVCodecContext}
const AVFramePtr = NestedCStruct{AVFrame}
const AVPacketPtr = NestedCStruct{AVPacket}
const AVFormatContextPtr = NestedCStruct{AVFormatContext}
const AVStreamPtr = NestedCStruct{AVStream}

function field_ptr(::Type{S}, struct_pointer::Ptr{T}, field::Symbol,
 index::Integer = 1) where {S,T}
 fieldpos = fieldindex(T, field)
 field_pointer = convert(Ptr{S}, struct_pointer) +
 fieldoffset(T, fieldpos) + (index - 1) * sizeof(S)
 return field_pointer
end

field_ptr(a::Ptr{T}, field::Symbol, args...) where T =
 field_ptr(fieldtype(T, field), a, field, args...)

function check_ptr_valid(p::Ptr, err::Bool = true)
 valid = p != C_NULL
 err && !valid && error("Invalid pointer")
 valid
end

unsafe_convert(::Type{Ptr{T}}, ap::NestedCStruct{T}) where T =
 getfield(ap, :data)[]
unsafe_convert(::Type{Ptr{Ptr{T}}}, ap::NestedCStruct{T}) where T =
 unsafe_convert(Ptr{Ptr{T}}, getfield(ap, :data))

function check_ptr_valid(a::NestedCStruct{T}, args...) where T
 p = unsafe_convert(Ptr{T}, a)
 GC.@preserve a check_ptr_valid(p, args...)
end

nested_wrap(x::Ptr{T}) where T = NestedCStruct(x)
nested_wrap(x) = x

function getproperty(ap::NestedCStruct{T}, s::Symbol) where T
 check_ptr_valid(ap)
 p = unsafe_convert(Ptr{T}, ap)
 res = GC.@preserve ap unsafe_load(field_ptr(p, s))
 nested_wrap(res)
end

function setproperty!(ap::NestedCStruct{T}, s::Symbol, x) where T
 check_ptr_valid(ap)
 p = unsafe_convert(Ptr{T}, ap)
 fp = field_ptr(p, s)
 GC.@preserve ap unsafe_store!(fp, x)
end

function getindex(ap::NestedCStruct{T}, i::Integer) where T
 check_ptr_valid(ap)
 p = unsafe_convert(Ptr{T}, ap)
 res = GC.@preserve ap unsafe_load(p, i)
 nested_wrap(res)
end

function setindex!(ap::NestedCStruct{T}, i::Integer, x) where T
 check_ptr_valid(ap)
 p = unsafe_convert(Ptr{T}, ap)
 GC.@preserve ap unsafe_store!(p, x, i)
end

function unsafe_wrap(::Type{T}, ap::NestedCStruct{S}, i) where {S, T}
 check_ptr_valid(ap)
 p = unsafe_convert(Ptr{S}, ap)
 GC.@preserve ap unsafe_wrap(T, p, i)
end

function field_ptr(::Type{S}, a::NestedCStruct{T}, field::Symbol,
 args...) where {S, T}
 check_ptr_valid(a)
 p = unsafe_convert(Ptr{T}, a)
 GC.@preserve a field_ptr(S, p, field, args...)
end

field_ptr(a::NestedCStruct{T}, field::Symbol, args...) where T =
 field_ptr(fieldtype(T, field), a, field, args...)

propertynames(ap::T) where {S, T<:NestedCStruct{S}} = (fieldnames(S)...,
 fieldnames(T)...)




Edit : Some things that I have already tried


- 

- Explicitly setting the stream duration to be the same number as the number of frames that I add, or a few more beyond that
- Explicitly setting the stream start time to zero, while the first frame has a PTS of 1
- Playing around with encoder parameters, as well as
gop_size
, using B frames, etc. - Setting the private data for the mov/mp4 muxer to set the movflag
negative_cts_offsets
- Changing the framerate
- Tried different pixel formats, such as AV_PIX_FMT_YUV420P














Also to be clear while I can just transfer the file into another while ignoring the edit lists to work around this problem, I am hoping to not make damaged mp4 files in the first place.