
Recherche avancée
Autres articles (38)
-
La sauvegarde automatique de canaux SPIP
1er avril 2010, parDans le cadre de la mise en place d’une plateforme ouverte, il est important pour les hébergeurs de pouvoir disposer de sauvegardes assez régulières pour parer à tout problème éventuel.
Pour réaliser cette tâche on se base sur deux plugins SPIP : Saveauto qui permet une sauvegarde régulière de la base de donnée sous la forme d’un dump mysql (utilisable dans phpmyadmin) mes_fichiers_2 qui permet de réaliser une archive au format zip des données importantes du site (les documents, les éléments (...) -
MediaSPIP v0.2
21 juin 2013, parMediaSPIP 0.2 est la première version de MediaSPIP stable.
Sa date de sortie officielle est le 21 juin 2013 et est annoncée ici.
Le fichier zip ici présent contient uniquement les sources de MediaSPIP en version standalone.
Comme pour la version précédente, il est nécessaire d’installer manuellement l’ensemble des dépendances logicielles sur le serveur.
Si vous souhaitez utiliser cette archive pour une installation en mode ferme, il vous faudra également procéder à d’autres modifications (...) -
Mise à disposition des fichiers
14 avril 2011, parPar défaut, lors de son initialisation, MediaSPIP ne permet pas aux visiteurs de télécharger les fichiers qu’ils soient originaux ou le résultat de leur transformation ou encodage. Il permet uniquement de les visualiser.
Cependant, il est possible et facile d’autoriser les visiteurs à avoir accès à ces documents et ce sous différentes formes.
Tout cela se passe dans la page de configuration du squelette. Il vous faut aller dans l’espace d’administration du canal, et choisir dans la navigation (...)
Sur d’autres sites (4896)
-
How can you combine multiple video files with FFMPEG and merging the audio track as well
19 décembre 2023, par CodrutI'm trying to combine multiple MP4 files in Delphi with the FFMPEG video library. I have the headers unit with all the functions. All videos are
MPEG-4
, and so is the destination output file.

I found this question on Stack Overflow asking the same question. To combine video files while keeping the audio and video tracks.
I have translated the answers to Delphi, and while the code is executed successfully, the output file is invalid and cannot be played.


Here is my implementation :


var
 Files: TArray<pansichar>;
 Output: PAnsiChar;

 I, S: integer;

 i_fmt_ctx: PAVFormatContext;
 i_video_stream: PAVStream;
 o_fmt_ctx: PAVFormatContext;
 o_video_stream: PAVStream;

 P: PPAVStream;
begin
 SetLength(Files, 2);
 Files[0] := PAnsiChar('.\Clips\file9.mp4');
 Files[1] := PAnsiChar('.\Clips\file10.mp4');
 Output := '.\Output\out.mp4';

 avcodec_register_all(); 
 av_register_all();

 (* should set to NULL so that avformat_open_input() allocate a new one *)
 i_fmt_ctx := nil;

 if avformat_open_input(@i_fmt_ctx, Files[0], nil, nil) <> 0 then
 raise Exception.Create('Could not open file');

 if avformat_find_stream_info(i_fmt_ctx, nil) < 0 then
 raise Exception.Create('Could not find stream info');
 
 (* Find 1st video stream *)
 i_video_stream := nil;
 P := i_fmt_ctx.streams;
 for i := 0 to i_fmt_ctx.nb_streams-1 do begin
 if P^.codec.codec_type = AVMEDIA_TYPE_VIDEO then
 begin
 i_video_stream := P^;
 Break;
 end;
 Inc(P);
 end;
 if i_video_stream = nil then
 raise Exception.Create('Could not find video stream');

 avformat_alloc_output_context2(@o_fmt_ctx, nil, nil, Output);

 (*
 since all input files are supposed to be identical (framerate, dimension, color format, ...)
 we can safely set output codec values from first input file
 *)
 o_video_stream := avformat_new_stream(o_fmt_ctx, nil);
 
 var c: PAVCodecContext;
 c := o_video_stream.codec;
 c.bit_rate := 400000;
 c.codec_id := i_video_stream.codec.codec_id;
 c.codec_type := i_video_stream.codec.codec_type;
 c.time_base.num := i_video_stream.time_base.num;
 c.time_base.den := i_video_stream.time_base.den;
 //fprintf(stderr, "time_base.num = %d time_base.den = %d\n", c->time_base.num, c->time_base.den);
 c.width := i_video_stream.codec.width;
 c.height := i_video_stream.codec.height;
 c.pix_fmt := i_video_stream.codec.pix_fmt;
 //printf("%d %d %d", c->width, c->height, c->pix_fmt);
 c.flags := i_video_stream.codec.flags;
 c.flags := c.flags or CODEC_FLAG_GLOBAL_HEADER;
 c.me_range := i_video_stream.codec.me_range;
 c.max_qdiff := i_video_stream.codec.max_qdiff;

 c.qmin := i_video_stream.codec.qmin;
 c.qmax := i_video_stream.codec.qmax;

 c.qcompress := i_video_stream.codec.qcompress;

 c.extradata := i_video_stream.codec.extradata;
 c.extradata_size := i_video_stream.codec.extradata_size;

 avio_open(@o_fmt_ctx.pb, Output, AVIO_FLAG_WRITE);

 (* yes! this is redundant *)
 avformat_close_input(@i_fmt_ctx);

 avformat_write_header(o_fmt_ctx, nil);

 var last_pts: integer; last_pts := 0;
 var last_dts: integer; last_dts := 0;
 for i := 1 to High(Files) do begin
 i_fmt_ctx := nil;

 if avformat_open_input(@i_fmt_ctx, Files[i], nil, nil) <> 0 then
 raise Exception.Create('Could not open input file');

 if avformat_find_stream_info(i_fmt_ctx, nil) < 0 then
 raise Exception.Create('Could not find stream info');

 av_dump_format(i_fmt_ctx, 0, Files[i], 0);
 
 (* we only use first video stream of each input file *)
 i_video_stream := nil;

 P := i_fmt_ctx.streams;
 for S := 0 to i_fmt_ctx.nb_streams-1 do
 begin
 if (P^.codec.codec_type = AVMEDIA_TYPE_VIDEO) then
 begin
 i_video_stream := P^;
 break;
 end;
 
 Inc(P);
 end;

 if i_video_stream = nil then
 raise Exception.Create('Could not find video stream');
 
 var pts, dts: int64;
 pts := 0; dts := 0;
 while true do begin
 var i_pkt: TAVPacket;
 av_init_packet( @i_pkt );
 i_pkt.size := 0;
 i_pkt.data := nil;

 if av_read_frame(i_fmt_ctx, @i_pkt) < 0 then
 break;
 (*
 pts and dts should increase monotonically
 pts should be >= dts
 *)
 i_pkt.flags := i_pkt.flags or AV_PKT_FLAG_KEY;
 pts := i_pkt.pts;
 Inc(i_pkt.pts, last_pts);
 dts := i_pkt.dts;
 Inc(i_pkt.dts, last_dts);
 i_pkt.stream_index := 0;

 // Write
 av_interleaved_write_frame(o_fmt_ctx, @i_pkt);
 end;

 Inc(last_dts, dts);
 Inc(last_pts, pts); 
 
 avformat_close_input(@i_fmt_ctx)
 end;

 av_write_trailer(o_fmt_ctx);

 avcodec_close(o_fmt_ctx.streams^.codec);
 av_freep(&o_fmt_ctx.streams^.codec);
 av_freep(&o_fmt_ctx.streams);

 avio_close(o_fmt_ctx.pb);
 av_free(o_fmt_ctx);
</pansichar>


Which is a translation of
Михаил Чеботарев
's answer.

Even if the code worked, I see no handling of the
AVMEDIA_TYPE_AUDIO
stream, which means this answer is 1/2 of the problem, since It only combines the video stream.

Another approach I tried was using the UBitmaps2Video FFMPEG implementation, which is successfully able to merge the video files, but only the video stream, no audio.


I tried manually converting the audio stream with the Bass Audio Library. It was able to read the audio and write It in a single WAV file, which then I converted to MP3. Finally muxing the combined video file and the MP3 file with
MuxStreams2
. Unfortunately, the audio and video do not align properly. I was unable to pinpoint the issue.

Currently, the only functional option is using the precompiled FFMPEG Executables and using ShellExecute with the according parameters to combine the videos.
This more exactly :


ffmpeg -f concat -safe 0 -i video-list.txt -c copy output.mp4



But I would still rather use the FFMPEG headers in Delphi to combine the videos that way, as that gives the option for Progress indicatiors, more control of the playback and the ability to pause the thread at any point.


So, why does my implementation to merge video files not work. And what is a good method to include the audio stream as well ?


-
TS video copied to MP4, missing 3 first frames when programmatically read (ffmpeg bug)
3 septembre 2023, par Vasilis LemonidisRunning :


ffmpeg -i test.ts -fflags +genpts -c copy -y test.mp4



for this test.ts, which has 30 frames, readable by opencv, I end up with 28 frames, out of which 27 are readable by opencv. More specifically :


ffprobe -v error -select_streams v:0 -count_packets -show_entries stream=nb_read_packets -of csv=p=0 tmp.ts 



returns 30.


ffprobe -v error -select_streams v:0 -count_packets -show_entries stream=nb_read_packets -of csv=p=0 tmp.mp4



returns 28.


Using OpenCV in that manner


cap = cv2.VideoCapture(tmp_path)
readMat = []
while cap.isOpened():
 ret, frame = cap.read()
 if not ret:
 break
 readMat.append(frame)



I get for the ts file 30 frames, while for the mp4 27 frames.


Could someone explain why the discrepancies ? I get no error during the transformation from ts to mp4 :


ffmpeg version N-111746-gd53acf452f Copyright (c) 2000-2023 the FFmpeg developers
 built with gcc 11.3.0 (GCC)
 configuration: --ld=g++ --bindir=/bin --extra-libs='-lpthread -lm' --pkg-config-flags=--static --enable-static --enable-gpl --enable-libaom --enable-libass --enable-libfreetype --enable-libmp3lame --enable-libopus --enable-libsvtav1 --enable-libdav1d --enable-libvorbis --enable-libvpx --enable-libx264 --enable-libx265 --enable-nonfree --enable-cuda-nvcc --enable-cuvid --enable-nvenc --enable-libnpp 
 libavutil 58. 16.101 / 58. 16.101
 libavcodec 60. 23.100 / 60. 23.100
 libavformat 60. 10.100 / 60. 10.100
 libavdevice 60. 2.101 / 60. 2.101
 libavfilter 9. 10.100 / 9. 10.100
 libswscale 7. 3.100 / 7. 3.100
 libswresample 4. 11.100 / 4. 11.100
 libpostproc 57. 2.100 / 57. 2.100
[mpegts @ 0x4237240] DTS discontinuity in stream 0: packet 5 with DTS 306003, packet 6 with DTS 396001
Input #0, mpegts, from 'tmp.ts':
 Duration: 00:00:21.33, start: 3.400000, bitrate: 15 kb/s
 Program 1 
 Metadata:
 service_name : Service01
 service_provider: FFmpeg
 Stream #0:0[0x100]: Video: h264 (High) ([27][0][0][0] / 0x001B), yuv420p(progressive), 300x300, 1 fps, 3 tbr, 90k tbn
Output #0, mp4, to 'test.mp4':
 Metadata:
 encoder : Lavf60.10.100
 Stream #0:0: Video: h264 (High) (avc1 / 0x31637661), yuv420p(progressive), 300x300, q=2-31, 1 fps, 3 tbr, 90k tbn
Stream mapping:
 Stream #0:0 -> #0:0 (copy)
Press [q] to stop, [?] for help
[out#0/mp4 @ 0x423e280] video:25kB audio:0kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead: 4.192123%
frame= 30 fps=0.0 q=-1.0 Lsize= 26kB time=00:00:21.00 bitrate= 10.3kbits/s speed=1e+04x 



Additional information


The origin of the video I am processing comes from a continuous stitching operation of still images ts videos, produced by this class
update
method :

import cv2
import os
import subprocess
from tempfile import NamedTemporaryFile
class VideoUpdater:
 def __init__(
 self, video_path: str, framerate: int, timePerFrame: Optional[int] = None
 ):
 """
 Video updater takes in a video path, and updates it using a supplied frame, based on a given framerate.
 Args:
 video_path: str: Specify the path to the video file
 framerate: int: Set the frame rate of the video
 """
 if not video_path.endswith(".mp4"):
 LOGGER.warning(
 f"File type {os.path.splitext(video_path)[1]} not supported for streaming, switching to ts"
 )
 video_path = os.path.splitext(video_path)[0] + ".mp4"

 self._ps = None
 self.env = {
 
 }
 self.ffmpeg = "/usr/bin/ffmpeg "

 self.video_path = video_path
 self.ts_path = video_path.replace(".mp4", ".ts")
 self.tfile = None
 self.framerate = framerate
 self._video = None
 self.last_frame = None
 self.curr_frame = None


 def update(self, frame: np.ndarray):
 if len(frame.shape) == 2:
 frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
 else:
 frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
 self.writeFrame(frame)

 def writeFrame(self, frame: np.ndarray):
 """
 The writeFrame function takes a frame and writes it to the video file.
 Args:
 frame: np.ndarray: Write the frame to a temporary file
 """


 tImLFrame = NamedTemporaryFile(suffix=".png")
 tVidLFrame = NamedTemporaryFile(suffix=".ts")

 cv2.imwrite(tImLFrame.name, frame)
 ps = subprocess.Popen(
 self.ffmpeg
 + rf"-loop 1 -r {self.framerate} -i {tImLFrame.name} -t {self.framerate} -vcodec libx264 -pix_fmt yuv420p -y {tVidLFrame.name}",
 env=self.env,
 shell=True,
 stdout=subprocess.PIPE,
 stderr=subprocess.PIPE,
 )
 ps.communicate()
 if os.path.isfile(self.ts_path):
 # this does not work to watch, as timestamps are not updated
 ps = subprocess.Popen(
 self.ffmpeg
 + rf'-i "concat:{self.ts_path}|{tVidLFrame.name}" -c copy -y {self.ts_path.replace(".ts", ".bak.ts")}',
 env=self.env,
 shell=True,
 stdout=subprocess.PIPE,
 stderr=subprocess.PIPE,
 )
 ps.communicate()
 shutil.move(self.ts_path.replace(".ts", ".bak.ts"), self.ts_path)

 else:
 shutil.copyfile(tVidLFrame.name, self.ts_path)
 # fixing timestamps, we dont have to wait for this operation
 ps = subprocess.Popen(
 self.ffmpeg
 + rf"-i {self.ts_path} -fflags +genpts -c copy -y {self.video_path}",
 env=self.env,
 shell=True,
 # stdout=subprocess.PIPE,
 # stderr=subprocess.PIPE,
 )
 tImLFrame.close()
 tVidLFrame.close()



-
FFMPEG : TS video copied to MP4, missing 3 first frames [closed]
21 août 2023, par Vasilis LemonidisRunning :


ffmpeg -i test.ts -fflags +genpts -c copy -y test.mp4



for this test.ts, which has 30 frames, readable by opencv, I end up with 28 frames, out of which 27 are readable by opencv. More specifically :


ffprobe -v error -select_streams v:0 -count_packets -show_entries stream=nb_read_packets -of csv=p=0 tmp.ts 



returns 30.


ffprobe -v error -select_streams v:0 -count_packets -show_entries stream=nb_read_packets -of csv=p=0 tmp.mp4



returns 28.


Using OpenCV in that manner


cap = cv2.VideoCapture(tmp_path)
readMat = []
while cap.isOpened():
 ret, frame = cap.read()
 if not ret:
 break
 readMat.append(frame)



I get for the ts file 30 frames, while for the mp4 27 frames.


Could someone explain why the discrepancies ? I get no error during the transformation from ts to mp4 :


ffmpeg version N-111746-gd53acf452f Copyright (c) 2000-2023 the FFmpeg developers
 built with gcc 11.3.0 (GCC)
 configuration: --ld=g++ --bindir=/bin --extra-libs='-lpthread -lm' --pkg-config-flags=--static --enable-static --enable-gpl --enable-libaom --enable-libass --enable-libfreetype --enable-libmp3lame --enable-libopus --enable-libsvtav1 --enable-libdav1d --enable-libvorbis --enable-libvpx --enable-libx264 --enable-libx265 --enable-nonfree --enable-cuda-nvcc --enable-cuvid --enable-nvenc --enable-libnpp 
 libavutil 58. 16.101 / 58. 16.101
 libavcodec 60. 23.100 / 60. 23.100
 libavformat 60. 10.100 / 60. 10.100
 libavdevice 60. 2.101 / 60. 2.101
 libavfilter 9. 10.100 / 9. 10.100
 libswscale 7. 3.100 / 7. 3.100
 libswresample 4. 11.100 / 4. 11.100
 libpostproc 57. 2.100 / 57. 2.100
[mpegts @ 0x4237240] DTS discontinuity in stream 0: packet 5 with DTS 306003, packet 6 with DTS 396001
Input #0, mpegts, from 'tmp.ts':
 Duration: 00:00:21.33, start: 3.400000, bitrate: 15 kb/s
 Program 1 
 Metadata:
 service_name : Service01
 service_provider: FFmpeg
 Stream #0:0[0x100]: Video: h264 (High) ([27][0][0][0] / 0x001B), yuv420p(progressive), 300x300, 1 fps, 3 tbr, 90k tbn
Output #0, mp4, to 'test.mp4':
 Metadata:
 encoder : Lavf60.10.100
 Stream #0:0: Video: h264 (High) (avc1 / 0x31637661), yuv420p(progressive), 300x300, q=2-31, 1 fps, 3 tbr, 90k tbn
Stream mapping:
 Stream #0:0 -> #0:0 (copy)
Press [q] to stop, [?] for help
[out#0/mp4 @ 0x423e280] video:25kB audio:0kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead: 4.192123%
frame= 30 fps=0.0 q=-1.0 Lsize= 26kB time=00:00:21.00 bitrate= 10.3kbits/s speed=1e+04x 



Additional information


The origin of the video I am processing comes from a continuous stitching operation of still images ts videos, produced by this class
update
method :

import cv2
import os
import subprocess
from tempfile import NamedTemporaryFile
class VideoUpdater:
 def __init__(
 self, video_path: str, framerate: int, timePerFrame: Optional[int] = None
 ):
 """
 Video updater takes in a video path, and updates it using a supplied frame, based on a given framerate.
 Args:
 video_path: str: Specify the path to the video file
 framerate: int: Set the frame rate of the video
 """
 if not video_path.endswith(".mp4"):
 LOGGER.warning(
 f"File type {os.path.splitext(video_path)[1]} not supported for streaming, switching to ts"
 )
 video_path = os.path.splitext(video_path)[0] + ".mp4"

 self._ps = None
 self.env = {
 
 }
 self.ffmpeg = "/usr/bin/ffmpeg "

 self.video_path = video_path
 self.ts_path = video_path.replace(".mp4", ".ts")
 self.tfile = None
 self.framerate = framerate
 self._video = None
 self.last_frame = None
 self.curr_frame = None


 def update(self, frame: np.ndarray):
 if len(frame.shape) == 2:
 frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
 else:
 frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
 self.writeFrame(frame)

 def writeFrame(self, frame: np.ndarray):
 """
 The writeFrame function takes a frame and writes it to the video file.
 Args:
 frame: np.ndarray: Write the frame to a temporary file
 """


 tImLFrame = NamedTemporaryFile(suffix=".png")
 tVidLFrame = NamedTemporaryFile(suffix=".ts")

 cv2.imwrite(tImLFrame.name, frame)
 ps = subprocess.Popen(
 self.ffmpeg
 + rf"-loop 1 -r {self.framerate} -i {tImLFrame.name} -t {self.framerate} -vcodec libx264 -pix_fmt yuv420p -y {tVidLFrame.name}",
 env=self.env,
 shell=True,
 stdout=subprocess.PIPE,
 stderr=subprocess.PIPE,
 )
 ps.communicate()
 if os.path.isfile(self.ts_path):
 # this does not work to watch, as timestamps are not updated
 ps = subprocess.Popen(
 self.ffmpeg
 + rf'-i "concat:{self.ts_path}|{tVidLFrame.name}" -c copy -y {self.ts_path.replace(".ts", ".bak.ts")}',
 env=self.env,
 shell=True,
 stdout=subprocess.PIPE,
 stderr=subprocess.PIPE,
 )
 ps.communicate()
 shutil.move(self.ts_path.replace(".ts", ".bak.ts"), self.ts_path)

 else:
 shutil.copyfile(tVidLFrame.name, self.ts_path)
 # fixing timestamps, we dont have to wait for this operation
 ps = subprocess.Popen(
 self.ffmpeg
 + rf"-i {self.ts_path} -fflags +genpts -c copy -y {self.video_path}",
 env=self.env,
 shell=True,
 # stdout=subprocess.PIPE,
 # stderr=subprocess.PIPE,
 )
 tImLFrame.close()
 tVidLFrame.close()