
Recherche avancée
Autres articles (47)
-
Des sites réalisés avec MediaSPIP
2 mai 2011, parCette page présente quelques-uns des sites fonctionnant sous MediaSPIP.
Vous pouvez bien entendu ajouter le votre grâce au formulaire en bas de page. -
Support audio et vidéo HTML5
10 avril 2011MediaSPIP utilise les balises HTML5 video et audio pour la lecture de documents multimedia en profitant des dernières innovations du W3C supportées par les navigateurs modernes.
Pour les navigateurs plus anciens, le lecteur flash Flowplayer est utilisé.
Le lecteur HTML5 utilisé a été spécifiquement créé pour MediaSPIP : il est complètement modifiable graphiquement pour correspondre à un thème choisi.
Ces technologies permettent de distribuer vidéo et son à la fois sur des ordinateurs conventionnels (...) -
De l’upload à la vidéo finale [version standalone]
31 janvier 2010, parLe chemin d’un document audio ou vidéo dans SPIPMotion est divisé en trois étapes distinctes.
Upload et récupération d’informations de la vidéo source
Dans un premier temps, il est nécessaire de créer un article SPIP et de lui joindre le document vidéo "source".
Au moment où ce document est joint à l’article, deux actions supplémentaires au comportement normal sont exécutées : La récupération des informations techniques des flux audio et video du fichier ; La génération d’une vignette : extraction d’une (...)
Sur d’autres sites (4250)
-
ffmpeg capture from ip camera video in h264 stream [closed]
23 mars 2023, par Иванов ИванI can't read the frames from the camera and then write them to a video file (any). The fact is that I even get crooked frames, they seem to have violated the coordinates of the position of each point, the video is crooked, distorted


c++ code.


https://drive.google.com/file/d/1W2sZMR5D5pvVmnhiQyhiaQhC9frhdeII/view?usp=sharing


#define INBUF_SIZE 4096


 //writing the minimal required header for a pgm file format
 //portable graymap format-> https://en.wikipedia.org/wiki/Netpbm_format#PGM_example
 fprintf (f, "P5\n%d %d\n%d\n", xsize, ysize, 255);

 //writing line by line
 for (i = 0; i /contains data on a configuration of media content, such as bitrate, 
 //frame rate, sampling frequency, channels, height and many other things.
 AVCodecContext * AVCodecContext_ = NULL;
 AVCodecParameters * AVCodecParametr_ = NULL;
 FILE * f;
 //This structure describes decoded (raw) audio- or video this.
 AVFrame * frame;
 uint8_t inbuf [INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
 uint8_t * data;
 size_t data_size;
 int ret;
 int eof;
 AVFormatContext * AVfc = NULL;
 int ERRORS;
 //AVCodec * codec;
 char buf [1024];
 const char * FileName;
 
 //https://habr.com/ru/post/137793/
 //Stores the compressed one shot.
 AVPacket * pkt;
 
 //**********************************************************************
 //Beginning of reading video from the camera. 
 //**********************************************************************
 
 avdevice_register_all ();
 
 filename = "rtsp://admin: 754HG@192.168.1.75:554/11";
 //filename = "c:\\1.avi";
 outfilename = "C:\\2.MP4";
 
 //We open a flow of video (it is the file or the camera). 
 ERRORS = avformat_open_input (& AVfc, filename, NULL, NULL);
 if (ERRORS <0) {
 fprintf (stderr, "ffmpeg: could not open file \n");
 return-1;
 }
 
 //After opening, we can print out information on the video file (iformat = the name of a format; 
 //duration = duration). But as I connected the camera to me wrote: Duration: N/A, 
 //start: 0.000000, bitrate: N/A
 printf ("Format %s, duration %lld us", AVfc-> iformat-> long_name, AVfc-> duration);
 
 
 ERRORS = avformat_find_stream_info (AVfc, NULL);
 if (ERRORS <0) {
 fprintf (stderr, "ffmpeg: Unable to find stream info\n");
 return-1;
 }
 
 
 int CountStream;
 
 //We learn quantity of streams. 
 CountStream = AVfc-> nb_streams;
 
 //Let's look for the codec. 
 int video_stream;
 for (video_stream = 0; video_stream nb_streams; ++ video_stream) {
 if (AVfc-> streams[video_stream]-> codecpar-> codec_type == AVMEDIA_TYPE_VIDEO) {
 break;
 }
 
 }
 
 if (video_stream == AVfc-> nb_streams) {
 fprintf (stderr, "ffmpeg: Unable to find video stream\n");
 return-1;
 }
 
 //Here we define a type of the codec, for my camera it is equal as AV_CODEC_ID_HEVC (This that in what is broadcast by my camera)
 codec = avcodec_find_decoder(AVfc-> streams [video_stream]-> codecpar-> codec_id);
 //--------------------------------------------------------------------------------------
 
 //Functions for inquiry of opportunities of libavcodec,
 AVCodecContext_ = avcodec_alloc_context3(codec);
 if (! AVCodecContext _) {
 fprintf (stderr, "Was not succeeded to allocate a video codec context, since it not poddrerzhivayetsya\n");
 exit(1);
 }
 
 //This function is used for initialization 
 //AVCodecContext of video and audio of the codec. The announcement of avcodec_open2 () is in libavcodecavcodec.h
 //We open the codec. 
 
 ERRORS = avcodec_open2 (AVCodecContext _, codec, NULL);
 if (ERRORS <0) {
 fprintf (stderr, "ffmpeg: It is not possible to open codec \n");
 return-1;
 }
 
 //It for processing of a sound - a reserve.
 //swr_alloc_set_opts ()
 //swr_init (); 
 
 //To output all information on the video file. 
 av_dump_format (AVfc, 0, argv[1], 0);
 
 //=========================================================================================
 //Further, we receive frames. before we only received all infomration about the entering video.
 //=========================================================================================
 
 //Now we are going to read packages from a stream and to decode them in shots, but at first 
 //we need to mark out memory for both components (AVPacket and AVFrame).
 frame = av_frame_alloc ();
 
 if (! frame) {
 fprintf (stderr, "Is not possible to mark out memory for video footage \n");
 exit(1);
 }
 //We mark out memory for a package 
 pkt = av_packet_alloc ();
 //We define a file name for saving the picture.
 const char * FileName1 = "C:\\Users\\Павел\\Desktop\\NyFile.PGM";
 //Data reading if they is. 
 while (av_read_frame (AVfc, pkt)> = 0) {
 //It is a package from a video stream? Because there is still a soundtrack.
 if (pkt-> stream_index == video_stream) {
 int ret;
 
 //Transfer of the raw package data as input data in the decoder
 ret = avcodec_send_packet (AVCodecContext _, pkt);
 if (ret <0 | | ret == AVERROR(EAGAIN) | | ret == AVERROR_EOF) {
 std:: cout <<"avcodec_send_packet:" <<ret while="while"> = 0) {
 
 //Returns the decoded output data from the decoder or the encoder
 ret = avcodec_receive_frame (AVCodecContext _, frame);
 if (ret == AVERROR(EAGAIN) | | ret == AVERROR_EOF) {
 //std:: cout <<"avcodec_receive_frame:" <<ret cout="cout"> of frame_number </============================================================================================
 
 //Experimentally - we will keep a shot in the picture. 
 
 save_gray_frame(frame-> data [0], frame-> linesize [0], frame-> width, frame-> height, (char *) FileName1);
 }
 }
 }
 
 //av_parser_close(parser);
 avcodec_free_context (& AVCodecContext _);
 av_frame_free (& frame);
 av_packet_free (& pkt);
 
 return 0;
</ret></ret>


-
VLC and ffplay not receiving video from RTMP stream on Nginx
14 janvier, par EkobaddI'm streaming via OBS 30.1.2 to an RTMP server on a digitalocean droplet. The server is running on nginx 1.26.0 using the RTMP plugin (libnginx-mod-rtmp in apt).


OBS is configured to output H.264-encoded, 1200kbps, 24fps, 1920x1080 video and aac-encoded, stereo, 44.1kHz, 160kbps audio.


Below is the minimal reproducible example. When I attempt to play the rtmp stream with ffplay or VLC, it's a random chance whether I get video or not. Audio is always fine. The output from ffplay or ffprobe (example below) occasionally does not show any video stream.


rtmp {
 server {
 listen 1935;
 chunk_size 4096;

 application ingest {
 live on;
 record off;

 allow publish <my ip="ip" address="address">;
 deny publish all;

 allow play all;
 }
 }
}
</my>


The server has two applications, "ingest" and "live", the former uses the following ffmpeg command to transcode the stream and create a corresponding stream on the latter application :


exec_push ffmpeg -i rtmp://localhost/ingest/$name -b:v 1200k -c:v libx264 -c:a aac -ar 44100 -ac 1 -f flv -preset veryfast -tune zerolatency rtmp://localhost/live/$name 2>>/tmp/rtmp-ingest-$name.log;



As you can see, this produces a log which shows the following :


Output #0, flv, to 'rtmp://localhost/live/ekobadd':
 Metadata:
 |RtmpSampleAccess: true
 Server : NGINX RTMP (github.com/arut/nginx-rtmp-module)
 displayWidth : 1920
 displayHeight : 1080
 fps : 23
 profile :
 level :
 encoder : Lavf61.1.100
 Stream #0:0: Audio: aac (LC) ([10][0][0][0] / 0x000A), 44100 Hz, mono, fltp, 69 kb/s
 Metadata:
 encoder : Lavc61.3.100 aac



The video is not present, though I can see on the digitalocean control panel that the server is pulling 1.2Mbps inbound, which is about the same as my OBS video bitrate. And, although the ffmpeg instance which is transcoding does not appear to see the video stream from the ingest application, ffprobe from my local machine does, sometimes :


> ffprobe rtmp://mydomain.com/ingest/ekobadd
...
Input #0, flv, from 'rtmp://mydomain.com/ingest/ekobadd': 0B f=0/0
 Metadata:
 |RtmpSampleAccess: true
 Server : NGINX RTMP (github.com/arut/nginx-rtmp-module)
 displayWidth : 1920
 displayHeight : 1080
 fps : 23
 profile :
 level :
 Duration: 00:00:00.00, start: 122.045000, bitrate: N/A
 Stream #0:0: Audio: aac (LC), 48000 Hz, stereo, fltp, 163 kb/s
 Stream #0:1: Video: h264 (High), yuv420p(tv, bt709, progressive), 1920x1080 [SAR 1:1 DAR 16:9], 1228 kb/s, 23 fps, 23.98 tbr, 1k tbn
 126.24 A-V: -1.071 fd= 0 aq= 54KB vq= 161KB sq= 0B f=0/0



Sometimes, however, it doesn't see the stream at all :


[rtmp @ 0000022d87d0fe00] Server error: No such stream
rtmp://mydomain.com/ingest/ekobadd: Operation not permitted



Testing the stream with VLC gives the same results.


Of course, the "live" application also doesn't have video. I have, however, streamed video from it before. I assume if I restart nginx enough, that the exec_push ffmpeg command will randomly see the video stream much like ffprobe does. I also have HLS and DASH configured, and they're both working perfect if you're a radio talk show host.


/etc/nginx/nginx.conf : (I'm quite sure I never touched anything in the HTTP section, but it's included just in case)


rtmp {
 server {
 listen 1935;
 chunk_size 8192;

 idle_streams off;

 application ingest {
 live on;
 record off;

 # Transcode to h264/aac via flv, 1.2Mbps 24fps 44.1kHz, single audio channel video (HLS Ready)
 exec_push ffmpeg -i rtmp://localhost/ingest/$name -b:v 1200k -c:v libx264 -c:a aac -ar 44100 -ac 1 -f flv -preset veryfast -tune zerolatency rtmp://localhost/live/$name 2>>/tmp/rtmp-ingest-$name.log;

 allow publish <my ip="ip" address="address">;
 deny publish all;

 allow play all; # This was added for debugging.
 }

 application live {
 live on;
 record off;

 hls on;
 hls_path /var/www/mydomain.com/html/live/hls;
 hls_fragment 6s;
 hls_playlist_length 60;

 dash on;
 dash_path /var/www/mydomain.com/html/live/dash;

 allow publish 127.0.0.1;
 deny publish all;

 allow play all;
 }
 }
}

http {

 ##
 # Basic Settings
 ##

 sendfile on;
 tcp_nopush on;
 types_hash_max_size 2048;
 server_tokens build; # Recommended practice is to turn this off

 server_names_hash_bucket_size 64;
 # server_name_in_redirect off;

 include /etc/nginx/mime.types;
 default_type application/octet-stream;

 ##
 # SSL Settings
 ##

 ssl_protocols TLSv1.2 TLSv1.3; # Dropping SSLv3 (POODLE), TLS 1.0, 1.1
 ssl_prefer_server_ciphers off; # Don't force server cipher order.

 ##
 # Logging Settings
 ##

 access_log /var/log/nginx/access.log;

 ##
 # Gzip Settings
 ##

 gzip on;

 # gzip_vary on;
 # gzip_proxied any;
 # gzip_comp_level 6;
 # gzip_buffers 16 8k;
 # gzip_http_version 1.1;
 # gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;

 ##
 # Virtual Host Configs
 ##

 include /etc/nginx/conf.d/*.conf;
 include /etc/nginx/sites-enabled/*;
}
</my>


/etc/nginx/sites-available/mydomain.com :


server {
 listen 443 ssl;

 ssl_certificate /etc/letsencrypt/live/mydomain.com/fullchain.pem;
 ssl_certificate_key /etc/letsencrypt/live/mydomain.com/privkey.pem;
 ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
 ssl_ciphers HIGH:!aNULL:!MD5;

 root /var/www/mydomain.com/html;

 server_name mydomain.com www.mydomain.com;

 location / {
 root /var/www/mydomain.com/html/live;

# add_header Cache-Control no-cache;
 add_header Access-Control-Allow-Origin *;
 }
}

types {
# application/vnd.apple.mpegurl m3u8;
 application/dash+xml mpd;
}



-
ffmpeg-next potential bug in write_header causes timebase to bet set to Rational(1/15360)
7 septembre 2024, par HuhngutI am trying to encode a video using the ffmpeg_next crate. I got everything working and it successfully creates an output video.
The only problem is that the time_base of my stream is wrongly written to the file.
I can confirm that I set the timebase correctly for both the encoder as well as the stream.


By debug prints I was able to narrow the problem down.
octx.write_header().unwrap();
causes the stream timebase to be reset from Rational(1/30) to Rational(1/15360). Changing the timebase back afterwards has no effect. The wrong value must have been written to the header.

I modified the src code of ffmpeg-next and recompiled it. I can confirm that the correct value is set before the call to
avformat_write_header


pub fn write_header(&mut self) -> Result<(), Error> {
 println!(
 "_________________ {:?}",
 self.stream(0).unwrap().time_base()
 );
 unsafe {
 match avformat_write_header(self.as_mut_ptr(), ptr::null_mut()) {
 0 => Ok(()),
 e => Err(Error::from(e)),
 }
 }
 }



To my understanding this must be a bug in the crate but I dont want to accuse someone with my non existing knowledge about ffmpeg. Also the examples in the github repo seem not to have this problem. My fault then ? Unfortunately I was not able to get the transcode-x264 to run. Most of my code comes from this example.


Relevant code bits are these. I dont know how much the set_parameters influences anything. My testing said it has no influence. I also tried to set the timebase at the very end of the function if it gets reset my the parameters. This is not working


let mut ost = octx.add_stream(codec)?;
ost.set_time_base(Rational::new(1, FPS));

ost.set_parameters(&encoder);
encoder.set_time_base(Rational::new(1, FPS));
ost.set_parameters(&opened_encoder);



By default and in the above example the streams timebase is 0/0. If I leave it out or change it to this manually it has no effect.


I also noticed that changing the value inside set_pts influences the output fps. Although not the timebase. I think this is more of a sideeffect.


I will leave a minimal reproducible example below. Any help or hints would be appreciated


abstract main function


fn main() {
 let output_file = "output.mp4";
 let x264_opts = parse_opts("preset=medium".to_string()).expect("invalid x264 options string");

 ffmpeg_next::init().unwrap();
 let mut octx = format::output(output_file).unwrap();

 let mut encoder = Encoder::new(&mut octx, x264_opts).unwrap();

 format::context::output::dump(&octx, 0, Some(&output_file));
 //This line somehow clears the streams time base
 octx.write_header().unwrap();

 // Without this line, the next logs returns Rational(1/30) Rational(1/15360) indicating streams timebase is wrong. even thought I set it above
 // this line changes it back but only for the print but not the actual output. Because the faulty data is written into the header
 // octx.stream_mut(0)
 // .unwrap()
 // .set_time_base(Rational::new(1, FPS));

 println!(
 "---------------- {:?} {:?}",
 encoder.encoder.time_base(),
 octx.stream(0).unwrap().time_base(),
 );

 for frame_num in 0..100 {
 let mut frame = encoder.create_frame();
 frame.set_pts(Some(frame_num));
 encoder.add_frame(&frame, &mut octx);
 }

 encoder.close(&mut octx);
 octx.write_trailer().unwrap();
}



Encoder struct containing the implementation logic


struct Encoder {
 encoder: encoder::Video,
}

impl Encoder {
 fn new(
 octx: &mut format::context::Output,
 x264_opts: Dictionary,
 ) -> Result {
 let set_header = octx
 .format()
 .flags()
 .contains(ffmpeg_next::format::flag::Flags::GLOBAL_HEADER);

 let codec = encoder::find(codec::Id::H264);
 let mut ost = octx.add_stream(codec)?;
 ost.set_time_base(Rational::new(1, FPS));

 let mut encoder = codec::context::Context::new_with_codec(
 encoder::find(codec::Id::H264)
 .ok_or(ffmpeg_next::Error::InvalidData)
 .unwrap(),
 )
 .encoder()
 .video()
 .unwrap();
 ost.set_parameters(&encoder);

 encoder.set_width(WIDTH);
 encoder.set_height(HEIGHT);
 encoder.set_aspect_ratio(WIDTH as f64 / HEIGHT as f64);
 encoder.set_format(util::format::Pixel::YUV420P);
 encoder.set_frame_rate(Some(Rational::new(FPS, 1)));
 encoder.set_time_base(Rational::new(1, FPS));

 if set_header {
 encoder.set_flags(ffmpeg_next::codec::flag::Flags::GLOBAL_HEADER);
 }

 let opened_encoder = encoder
 .open_with(x264_opts.to_owned())
 .expect("error opening x264 with supplied settings");
 ost.set_parameters(&opened_encoder);

 println!(
 "\nost time_base: {}; encoder time_base: {}; encoder frame_rate: {}\n",
 ost.time_base(),
 &opened_encoder.time_base(),
 &opened_encoder.frame_rate()
 );

 Ok(Self {
 encoder: opened_encoder,
 })
 }

 fn add_frame(&mut self, frame: &frame::Video, octx: &mut format::context::Output) {
 self.encoder.send_frame(frame).unwrap();
 self.process_packets(octx);
 }

 fn close(&mut self, octx: &mut format::context::Output) {
 self.encoder.send_eof().unwrap();
 self.process_packets(octx);
 }

 fn process_packets(&mut self, octx: &mut format::context::Output) {
 let mut encoded = Packet::empty();
 while self.encoder.receive_packet(&mut encoded).is_ok() {
 encoded.set_stream(0);
 encoded.write_interleaved(octx).unwrap();
 }
 }

 fn create_frame(&self) -> frame::Video {
 return frame::Video::new(
 self.encoder.format(),
 self.encoder.width(),
 self.encoder.height(),
 );
 }
}



other util stuff


use ffmpeg_next::{
 codec::{self},
 encoder, format, frame, util, Dictionary, Packet, Rational,
};

const FPS: i32 = 30;
const WIDTH: u32 = 720;
const HEIGHT: u32 = 1080;

fn parse_opts<'a>(s: String) -> Option> {
 let mut dict = Dictionary::new();
 for keyval in s.split_terminator(',') {
 let tokens: Vec<&str> = keyval.split('=').collect();
 match tokens[..] {
 [key, val] => dict.set(key, val),
 _ => return None,
 }
 }
 Some(dict)
}