
Recherche avancée
Médias (2)
-
GetID3 - Bloc informations de fichiers
9 avril 2013, par
Mis à jour : Mai 2013
Langue : français
Type : Image
-
GetID3 - Boutons supplémentaires
9 avril 2013, par
Mis à jour : Avril 2013
Langue : français
Type : Image
Autres articles (90)
-
Amélioration de la version de base
13 septembre 2013Jolie sélection multiple
Le plugin Chosen permet d’améliorer l’ergonomie des champs de sélection multiple. Voir les deux images suivantes pour comparer.
Il suffit pour cela d’activer le plugin Chosen (Configuration générale du site > Gestion des plugins), puis de configurer le plugin (Les squelettes > Chosen) en activant l’utilisation de Chosen dans le site public et en spécifiant les éléments de formulaires à améliorer, par exemple select[multiple] pour les listes à sélection multiple (...) -
Multilang : améliorer l’interface pour les blocs multilingues
18 février 2011, parMultilang est un plugin supplémentaire qui n’est pas activé par défaut lors de l’initialisation de MediaSPIP.
Après son activation, une préconfiguration est mise en place automatiquement par MediaSPIP init permettant à la nouvelle fonctionnalité d’être automatiquement opérationnelle. Il n’est donc pas obligatoire de passer par une étape de configuration pour cela. -
ANNEXE : Les plugins utilisés spécifiquement pour la ferme
5 mars 2010, parLe site central/maître de la ferme a besoin d’utiliser plusieurs plugins supplémentaires vis à vis des canaux pour son bon fonctionnement. le plugin Gestion de la mutualisation ; le plugin inscription3 pour gérer les inscriptions et les demandes de création d’instance de mutualisation dès l’inscription des utilisateurs ; le plugin verifier qui fournit une API de vérification des champs (utilisé par inscription3) ; le plugin champs extras v2 nécessité par inscription3 (...)
Sur d’autres sites (9755)
-
Python aiortc : How to record audio and video come from client in the same file ? [closed]
22 décembre 2024, par Chris PI have an aiortc app, which html5 client send data (microphone,camera) into server.


In server side i sucessfully played this two streams seperatly.


But when i try to record using aiortc MediaRecorder helper class, only the voice is recording, and the video is dropped (mp4 format).


I think this is due sync issue.


The audio_frame and the video_frame (each pair of them) have different time_base.
(I don't know if this is strange).
But it also has different time.


I can share code, but couldn't help at all right now.


Edit : I also tried to synchronize them client side with no luck


// Synchronize tracks
async function synchronizeTracks(localStream) {
 const videoTrack = localStream.getVideoTracks()[0];
 const audioTrack = localStream.getAudioTracks()[0];

 const syncedStream = new MediaStream();

 // Add tracks to the synchronized stream
 syncedStream.addTrack(videoTrack);
 syncedStream.addTrack(audioTrack);

 // Video and audio processors
 const videoProcessor = new MediaStreamTrackProcessor({ track: videoTrack });
 const audioProcessor = new MediaStreamTrackProcessor({ track: audioTrack });

 const videoReader = videoProcessor.readable.getReader();
 const audioReader = audioProcessor.readable.getReader();

 const videoWriter = new MediaStreamTrackGenerator({ kind: "video" }).writable.getWriter();
 const audioWriter = new MediaStreamTrackGenerator({ kind: "audio" }).writable.getWriter();

 const syncThreshold = 5; // Maximum allowable time difference in milliseconds
 let baseTimestamp = null;

 async function processTracks() {
 try {
 while (true) {
 const [videoResult, audioResult] = await Promise.all([
 videoReader.read(),
 audioReader.read(),
 ]);

 if (videoResult.done || audioResult.done) break;

 const videoFrame = videoResult.value;
 const audioFrame = audioResult.value;

 // Initialize base timestamp if needed
 if (baseTimestamp === null) {
 baseTimestamp = Math.min(videoFrame.timestamp, audioFrame.timestamp);
 }

 const videoRelativeTimestamp = videoFrame.timestamp - baseTimestamp;
 const audioRelativeTimestamp = audioFrame.timestamp - baseTimestamp;

 const timeDifference = videoRelativeTimestamp - audioRelativeTimestamp;

 if (Math.abs(timeDifference) <= syncThreshold) {
 // Frames are in sync
 await videoWriter.write(videoFrame);
 await audioWriter.write(audioFrame);
 } else if (timeDifference > 0) {
 // Video is ahead, wait for audio to catch up
 await audioWriter.write(audioFrame);
 // Reuse video frame on the next loop
 videoReader.releaseLock();
 } else {
 // Audio is ahead, wait for video to catch up
 await videoWriter.write(videoFrame);
 // Reuse audio frame on the next loop
 audioReader.releaseLock();
 }

 // Release frames
 videoFrame.close();
 audioFrame.close();
 }
 } catch (error) {
 console.error("Error in track synchronization:", error);
 } finally {
 videoReader.releaseLock();
 audioReader.releaseLock();
 videoWriter.close();
 audioWriter.close();
 }
 }

 processTracks();

 return syncedStream;
}




python code to improve :


class SyncClientTracksForRecording:
 def __init__(self, audio_track, video_track, audio_track_sync_q, video_track_sync_q):
 self.audio_track = audio_track
 self.video_track = video_track
 self.audio_track_sync_q = audio_track_sync_q
 self.video_track_sync_q = video_track_sync_q

 # Time bases
 self.audio_time_base = fractions.Fraction(1, 48000) # 48 kHz audio
 self.video_time_base = fractions.Fraction(1, 90000) # 90 kHz video

 # Elapsed time tracking
 self.audio_elapsed_time = 0.0
 self.video_elapsed_time = 0.0

 # Stop signal for synchronization loop
 self.stop_signal = False

 async def sync(self):
 while not self.stop_signal:
 try:
 # Receive audio and video frames concurrently
 audio_task = asyncio.create_task(self.audio_track.recv())
 video_task = asyncio.create_task(self.video_track.recv())

 audio_frame, video_frame = await asyncio.gather(audio_task, video_task)

 # Set time bases
 audio_frame.time_base = self.audio_time_base
 video_frame.time_base = self.video_time_base

 # Calculate and assign PTS values
 audio_frame.pts = int(self.audio_elapsed_time / float(self.audio_time_base))
 video_frame.pts = int(self.video_elapsed_time / float(self.video_time_base))

 # Increment elapsed time
 self.audio_elapsed_time += 0.020 # Assuming 20 ms audio frame duration
 self.video_elapsed_time += 1 / 30 # Assuming 30 fps video frame rate

 # Enqueue frames
 await asyncio.gather(
 self.audio_track_sync_q.put(audio_frame),
 self.video_track_sync_q.put(video_frame),
 )

 except Exception as e:
 print(f"Error in sync loop: {e}")
 break

 def stop(self):
 """Stop the synchronization loop."""
 self.stop_signal = True




-
FFPMEG : stream local video to UDP address, make client aware about video length and current frame time offset (make stream seekable ?)
17 décembre 2014, par klimJust started to use FFMPEG. This is a really great library which is capable of video life transcoding and streaming.
I use following commands to transcode and stream local video file to UDP address :
ffmpeg -y -re -i inputvideo.mpeg2 -vsync 1 -vcodec mpeg4 -b 1600k -r 15 -crf 20 -acodec aac -ar 44100 -strict -2 -f mpegts udp ://192.168.1.30:1234It works smooth. I can open this udp address in VLC player and play life stream.
Does anybody know how to make client aware about video duration and current time stamp ?
Ideally would be nice to make stream seekable, as far as I understand it is not possible, but at least I would like to tell VLC client the total duration of the video and current frame time stamp, so it could show the progress.
Thanks.
-
Is there any problem in my FFMPEG encoder client code ?
29 janvier 2024, par kyhnzI am trying to write code have function of capture video stream, encode as hevc265 and send to server as UDP :


// There can some unnecessary library imports, rule 1: If it is ok, don't touch it!

#include <iostream>
#include <sys></sys>types.h>
#include 
#include <cstring>
#include <sys></sys>socket.h>
#include <arpa></arpa>inet.h>
#include <netinet></netinet>in.h> 
#include <string>
#include <cstdlib>
#include <cstdio>

extern "C"
{
#include <libavutil></libavutil>frame.h>
#include <libavdevice></libavdevice>avdevice.h>
#include <libavformat></libavformat>avformat.h>
#include <libavcodec></libavcodec>avcodec.h>
#include <libavutil></libavutil>imgutils.h>
#include <libavcodec></libavcodec>packet.h>
#include <libavcodec></libavcodec>codec_id.h>
#include <libavutil></libavutil>error.h>
#include <libavutil></libavutil>error.h>
#include <libavutil></libavutil>opt.h>
}

#define PORT 9999
#define IP_ADDRESS "127.0.0.1"

using namespace std;

int main(){
 const int width = 1080;
 const int height = 720;
 const int fps = 18; 
 const auto resulation = "1080x720";
 const auto device = "/dev/video0";
 const auto format = "v4l2";
 const AVCodecID codec_id = AV_CODEC_ID_H265;

 avdevice_register_all();
 avformat_network_init();
 AVFormatContext *formatContext = nullptr;

 AVDictionary *format_opts = nullptr;
 av_dict_set(&format_opts, "framerate", "18", 0);;
 av_dict_set(&format_opts, "video_size", resulation, 0);
 
 const AVInputFormat *inputFormat = av_find_input_format(format);
 if (!inputFormat)
 {
 cerr << "Unknown input format: " << format << endl;
 return 1;
 }
 cout << "Input format: "<< format << endl;

 const AVCodec *codec = avcodec_find_encoder(codec_id);
 if (!codec) {
 cerr << "Codec can't find "<< codec_id << endl;
 return 1;
 }
 cout << "Found codec: "<< codec_id << endl;

 AVCodecContext *avctx = avcodec_alloc_context3(codec);
 if (!avctx) {
 cerr << "Error: Could not create encoder!" << endl;
 return 1;
 }
 cout << "Create encoder. " << endl;


 avctx->bit_rate = 1000000;
 avctx->width = width; 
 avctx->height = height; 
 avctx->pix_fmt = AV_PIX_FMT_YUV420P; 
 avctx->time_base = (AVRational){1, fps};
 avctx->framerate = (AVRational){fps, 1};
 avctx->gop_size = fps*2;
 avctx->refs = 3;

 av_opt_set(avctx->priv_data, "preset", "medium", 0);
 av_opt_set(avctx->priv_data, "crf", "18", 0);
 av_opt_set(avctx->priv_data, "tune", "zerolatency", 0);

 if (avcodec_open2(avctx, codec, nullptr) < 0) {
 cerr << "Error: Couldn't open codec" << endl;
 return 1;
 }
 cout << "Open codec succesfully." << endl;

 int open_input = avformat_open_input(&formatContext, device,
 const_cast<avinputformat>(inputFormat), &format_opts);
 if (open_input != 0)
 {
 cerr << "Device cant open " << device << endl;
 return 1;
 }
 cout << "Device active: " << device <width = avctx->width;
 frame->height = avctx->height;
 frame->format = avctx->pix_fmt;

 if(av_frame_get_buffer(frame, 0) != 0){
 cerr << "Error: video frame " << endl;
 return 1;
 }cout << "Video frame has been created." << endl;

 if(av_frame_make_writable(frame) != 0){
 cerr << "Error: frame is not writable" << endl;
 return 1;
 }

 AVPacket *packet= av_packet_alloc();
 if (!packet) {
 cerr << "error: has not been created packet" << endl;
 }

 av_dump_format(formatContext, 0, device, 0);
 av_dict_free(&format_opts);
 
 int socket_client = socket(AF_INET, SOCK_DGRAM, 0);
 if (socket_client == -1) {
 cerr << "Error: socket!" << endl;
 exit(EXIT_FAILURE);
 }
 cout << "Socket has been created" << endl;

 struct sockaddr_in serverAddr = {0};
 serverAddr.sin_family = AF_INET;
 serverAddr.sin_port = htons(PORT);
 serverAddr.sin_addr.s_addr = inet_addr(IP_ADDRESS);

 while (true) {
 if (av_read_frame(formatContext, packet) != 0) {
 cerr << "Error" << endl;
 av_packet_unref(packet);
 continue;
 }

 frame->data[7] = packet->data; 
 frame->linesize[7] = packet->size;

 if (avcodec_send_frame(avctx, frame) != 0){
 cerr << "Error: Frame sending is missing ---> "<< &av_strerror << endl;
 av_packet_unref(packet);
 continue;
 }

 if (avcodec_receive_packet(avctx, packet) != 0){
 cerr << "Error: Packet giving is missing! ---> " << &av_strerror << endl;
 av_packet_unref(packet);
 continue;
 }

 ssize_t snd = sendto(socket_client, packet->data, packet->size,
 MSG_CONFIRM, (struct sockaddr *)&serverAddr,
 sizeof(serverAddr));

 if(snd == -1){
 cerr << "Error: Data sending failed !" << endl;
 av_packet_unref(packet);
 continue;
 }else {
 cout << "Data sending succesfully" << endl;
 }

 av_packet_unref(packet);
 av_frame_unref(frame);
 }

 av_frame_free(&frame);
 av_packet_free(&packet);
 close(socket_client);
 avformat_free_context(formatContext);
 avformat_close_input(&formatContext);
 avformat_network_deinit();

 return 0;
}
</avinputformat></cstdio></cstdlib></string></cstring></iostream>


There is such an output, I do not understand the reason yet :


[video4linux2,v4l2 @ 0x643732e80b40] The V4L2 driver changed the video from 1080x720 to 1280x720
[video4linux2,v4l2 @ 0x643732e80b40] The V4L2 driver changed the video from 1280x720 to 640x480
[video4linux2,v4l2 @ 0x643732e80b40] The driver changed the time per frame from 1/18 to 1/30



Is there any problem in my encoder client code ?


In addition, i can't send datas to server.