
Recherche avancée
Médias (91)
-
Spoon - Revenge !
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
My Morning Jacket - One Big Holiday
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
Zap Mama - Wadidyusay ?
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
David Byrne - My Fair Lady
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
Beastie Boys - Now Get Busy
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
Granite de l’Aber Ildut
9 septembre 2011, par
Mis à jour : Septembre 2011
Langue : français
Type : Texte
Autres articles (21)
-
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir -
Librairies et logiciels spécifiques aux médias
10 décembre 2010, parPour un fonctionnement correct et optimal, plusieurs choses sont à prendre en considération.
Il est important, après avoir installé apache2, mysql et php5, d’installer d’autres logiciels nécessaires dont les installations sont décrites dans les liens afférants. Un ensemble de librairies multimedias (x264, libtheora, libvpx) utilisées pour l’encodage et le décodage des vidéos et sons afin de supporter le plus grand nombre de fichiers possibles. Cf. : ce tutoriel ; FFMpeg avec le maximum de décodeurs et (...) -
MediaSPIP Init et Diogène : types de publications de MediaSPIP
11 novembre 2010, parÀ l’installation d’un site MediaSPIP, le plugin MediaSPIP Init réalise certaines opérations dont la principale consiste à créer quatre rubriques principales dans le site et de créer cinq templates de formulaire pour Diogène.
Ces quatre rubriques principales (aussi appelées secteurs) sont : Medias ; Sites ; Editos ; Actualités ;
Pour chacune de ces rubriques est créé un template de formulaire spécifique éponyme. Pour la rubrique "Medias" un second template "catégorie" est créé permettant d’ajouter (...)
Sur d’autres sites (5182)
-
Waiting between FFMPEG scripts when creating .hdr/.chk and .mpd files on NGINX server using RTMP module
17 avril 2020, par Mathew KnightWonder if anyone might be able to suggest a solution to and issue i'm having.



I'm running some FFMPEG scripts inside a RTMP server block on an NGINX server running on Ubuntu 18.04.



Basically i'm ingesting the RTMP stream to the server then using FFMPEG to Demux into separate audio and video header and chunk files, then in separate scripts i'm creating two separate manifests for both the audio and video.



The player i have (a development ambisonic, 360 video player) is having problems reading the stream correctly and i believe this is due to the FFMPEG process for the manifests not waiting 2 seconds before running.



Is there a way to program a wait in between the scripts to facilitate this ?



furthermore, is there a way to make the manifest scripts only run once then quit ?



heres my current NGINX .conf



user root;
#user www-data;
worker_processes 1;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
 worker_connections 768;
 # multi_accept on;
}
http {
 ##
 # Basic Settings
 ##
 sendfile on;
 tcp_nopush on;
 tcp_nodelay on;
 keepalive_timeout 65;
 types_hash_max_size 2048;
 # server_tokens off;
 # server_names_hash_bucket_size 64;
 # server_name_in_redirect off;
 include /etc/nginx/mime.types;
 default_type application/octet-stream;
 ##
 # SSL Settings
 ##
 #ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
 #ssl_prefer_server_ciphers on;
 ##
 # Logging Settings
 ##
 access_log /var/log/nginx/access.log;
 error_log /var/log/nginx/error.log;
 ##
 # Gzip Settings
 ##
 ##
 # Virtual Host Configs
 ##
 include /etc/nginx/conf.d/*.conf;
 include /etc/nginx/sites-enabled/*;
}
rtmp {
 server {
 listen 1935;
 chunk_size 4096;

 application live {
 live on;
 record off;
 interleave off;
 wait_key on;
 meta on;
 wait_video off;
 idle_streams off;
 sync 300ms;
 session_relay on;
 #allow publish 127.0.0.1;
 #allow publish 192.168.2.0/24;
 allow publish all;
 #deny publish all;
 allow play all; 
 #RX stream to FFMPEG, demux audio and video, write .hdr files, start chunking DASH segments .chk
 exec_push sudo ffmpeg -y -re -i 'rtmp://localhost:1935/live/stream' -map 0:1 -pix_fmt yuv420p -maxrate 750k -bufsize 3000k -c:v libvpx-vp9 -s 1920x1080 -keyint_min 60 -g 60 -speed 6 -tile-columns 4 -frame-parallel 1 -threads 8 -static-thresh 0 -max-intra-rate 300 -deadline realtime -lag-in-frames 0 -error-resilient 1 -b:v 6000k -f webm_chunk -header "/var/www/html/dash/video_360.hdr" -chunk_start_index 1 "/var/www/html/dash/video_360_%d.chk" -map 0:2 -c:a libopus -mapping_family 255 -b:a 1024k -vn -f webm_chunk -audio_chunk_duration 2000 -header "/var/www/html/dash/audio_171.hdr" -chunk_start_index 1 "/var/www/html/dash/audio_171_%d.chk" 2>>/var/log/nginx/ffmpegChunk.log;

 #Create video manifest
 exec_push sudo ffmpeg -probesize 500M -analyzeduration 100M -f webm_dash_manifest -live 1 -i "/var/www/html/dash/video_360.hdr" -map 0 -c copy -f webm_dash_manifest -live 1 -adaptation_sets "id=0,streams=0" -chunk_start_index 1 -chunk_duration_ms 2000 -minimum_update_period 7200 "/var/www/html/dash/video.mpd" 2>>/var/log/nginx/ffmpegManifestVideo.log;
 #Create audio manifest
 exec_push sudo ffmpeg -probesize 500M -analyzeduration 100M -f webm_dash_manifest -live 1 -i "/var/www/html/dash/audio_171.hdr" -map 0 -c libopus -mapping_family 255 -f webm_dash_manifest -live 1 -adaptation_sets "id=1,streams=0" -chunk_start_index 1 -chunk_duration_ms 2000 -minimum_update_period 7200 "/var/www/html/dash/audio_16ch.mpd" 2>>/var/log/nginx/ffmpegManifestAudio.log;

 ##
 # Record the incoming stream
 ##
 # Record audio and video together
 record all;
 record_path /home/mathewknight/Desktop/StreamRecord/Master;
 record_notify on;
 # Record audio seperately
 recorder audio {
 record audio;
 record_path /home/mathewknight/Desktop/StreamRecord/Audio;
 record_suffix -%d-%b-%y-%T.audio.flv;
 record_notify on;
 }
 # Record video seperately
 recorder video{
 record video;
 record_path /home/mathewknight/Desktop/StreamRecord/Video;
 record_suffix -%d-%b-%y-%T.video.flv;
 record_notify on;
 }

 }


 }

}




-
ffmpeg creates empty mpd-file
1er avril 2021, par MacsterI'm trying to live stream a webm file using ffmpeg and Dash.js as a Player. Creating chunks with ffmpeg seems to work fine, but when want to create the manifest it says
Could not find codec parameters for stream 0 (Video: vp9, none, 640x480): unspecified pixel format Consider increasing the value for the 'analyzeduration' and 'probesize' options
and at the endOutput file is empty, nothing was encoded (check -ss / -t / -frames parameters if used)
.

Commands I use


ffmpeg -re -r 25 -i Dash/strm.webm
 -map 0:v:0
 -pix_fmt yuv420p
 -c:v libvpx-vp9
 -s 640x480 -keyint_min 25 -g 25 
 -speed 6 -threads 8 -static-thresh 0 -max-intra-rate 300 
 -deadline realtime -lag-in-frames 0 -error-resilient 1
 -f webm_chunk 
 -header "Dash/glass_360.hdr"
 -chunk_start_index 1
 Dash\glass_360_%d.chk



Manifest


ffmpeg ^
 -f webm_dash_manifest -live 1
 -r 25
 -i Dash/glass_360.hdr
 -c copy
 -map 0
 -r 25
 -framerate 25
 -f webm_dash_manifest -live 1
 -adaptation_sets "id=0,streams=0"
 -chunk_start_index 1
 -chunk_duration_ms 1000
 -time_shift_buffer_depth 7200
 -minimum_update_period 7200
 Dash/glass_live_manifest.mpd



Manifest Output


libavutil 56. 49.100 / 56. 49.100
 libavcodec 58. 87.101 / 58. 87.101
 libavformat 58. 43.100 / 58. 43.100
 libavdevice 58. 9.103 / 58. 9.103
 libavfilter 7. 83.100 / 7. 83.100
 libswscale 5. 6.101 / 5. 6.101
 libswresample 3. 6.100 / 3. 6.100
 libpostproc 55. 6.100 / 55. 6.100
[webm_dash_manifest @ 0000015f19f2ea40] Could not find codec parameters for stream 0 (Video: vp9, none, 640x480): unspecified pixel format
Consider increasing the value for the 'analyzeduration' and 'probesize' options
Input #0, webm_dash_manifest, from 'Dash/glass_360.hdr':
 Metadata:
 ENCODER : Lavf58.43.100
 Duration: N/A, bitrate: N/A
 Stream #0:0(eng): Video: vp9, none, 640x480, SAR 1:1 DAR 4:3, 1k tbr, 1k tbn, 1k tbc (default)
 Metadata:
 ALPHA_MODE : 1
 ENCODER : Lavc58.87.101 libvpx-vp9
 webm_dash_manifest_file_name: glass_360.hdr
 webm_dash_manifest_track_number: 1
Output #0, webm_dash_manifest, to 'stream_manifest.mpd':
 Metadata:
 encoder : Lavf58.43.100
 Stream #0:0(eng): Video: vp9, none, 640x480 [SAR 1:1 DAR 4:3], q=2-31, 1k tbr, 1k tbn, 1k tbc (default)
 Metadata:
 ALPHA_MODE : 1
 ENCODER : Lavc58.87.101 libvpx-vp9
 webm_dash_manifest_file_name: glass_360.hdr
 webm_dash_manifest_track_number: 1
Stream mapping:
 Stream #0:0 -> #0:0 (copy)
Press [q] to stop, [?] for help
frame= 0 fps=0.0 q=-1.0 Lsize= 1kB time=00:00:00.00 bitrate=N/A speed= 0x
video:0kB audio:0kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead: unknown
Output file is empty, nothing was encoded (check -ss / -t / -frames parameters if used)



glass_live_manifest.mpd


<?xml version="1.0" encoding="UTF-8"?>

<period start="PT0S">
<adaptationset mimetype="video/webm" codecs="vp9" lang="eng" bitstreamswitching="true" subsegmentalignment="true" subsegmentstartswithsap="1">
<contentcomponent type="video"></contentcomponent>
<segmenttemplate timescale="1000" duration="1000" media="glass_$RepresentationID$_$Number$.chk" startnumber="1" initialization="glass_$RepresentationID$.hdr"></segmenttemplate>
<representation bandwidth="1000000" width="640" height="480" codecs="vp9" mimetype="video/webm" startswithsap="1"></representation>
</adaptationset>
</period>




UPDATE


I fixed the "warnings" by changing the comands like this :


ffmpeg -re -r 25 -i Dash/strm.webm
-map 0:v:0
-pix_fmt yuv420p
-c:v libvpx
-s 640x480 -keyint_min 60 -g 60 -speed 6 -tile-columns 4 -frame-parallel 1 -threads 8 -static-thresh 0 -max-intra-rate 300 -deadline realtime -lag-in-frames 0 -error-resilient 1
-b:v 3000k
-f webm_chunk
-header "Dash/glass_360.hdr"
-chunk_start_index 1 Dash/glass_360_%d.chk
-map 0:a:0
-c:a libvorbis
-b:a 128k -ar 44100
-f webm_chunk
-audio_chunk_duration 2000
-header Dash/glass_171.hdr
-chunk_start_index 1 Dash/glass_171_%d.chk

ffmpeg
-f webm_dash_manifest -live 1
-i Dash/glass_360.hdr
-f webm_dash_manifest -live 1
-i Dash/glass_171.hdr
-c copy
-map 0 -map 1
-f webm_dash_manifest -live 1
-adaptation_sets "id=0,streams=0 id=1,streams=1"
-chunk_start_index 1
-chunk_duration_ms 2000
-time_shift_buffer_depth 7200
-minimum_update_period 7200 Dash/glass_video_manifest.mpd



However, it's still not working.


-
Screeching white sound coming while playing audio as a raw stream
27 avril 2020, par Sri Nithya SharabheshwaranandaI. Background



- 

- I am trying to make an application which helps to match subtitles to the audio waveform very accurately at the waveform level, at the word level or even at the character level.
- The audio is expected to be Sanskrit chants (Yoga, rituals etc.) which are extremely long compound words [ example - aṅganyā-sokta-mātaro-bījam is traditionally one word broken only to assist reading ]
- The input transcripts / subtitles might be roughly in sync at the sentence/verse level but surely would not be in sync at the word level.
- The application should be able to figure out points of silence in the audio waveform, so that it can guess the start and end points of each word (or even letter/consonant/vowel in a word), such that the audio-chanting and visual-subtitle at the word level (or even at letter/consonant/vowel level) perfectly match, and the corresponding UI just highlights or animates the exact word (or even letter) in the subtitle line which is being chanted at that moment, and also show that word (or even the letter/consonant/vowel) in bigger font. This app's purpose is to assist learning Sanskrit chanting.
- It is not expected to be a 100% automated process, nor 100% manual but a mix where the application should assist the human as much as possible.













II. Following is the first code I wrote for this purpose, wherein



- 

- First I open a mp3 (or any audio format) file,
- Seek to some arbitrary point in the timeline of the audio file // as of now playing from zero offset
- Get the audio data in raw format for 2 purposes - (1) playing it and (2) drawing the waveform.
- Playing the raw audio data using standard java audio libraries











III. The problem I am facing is, between every cycle there is screeching sound.



- 

- Probably I need to close the line between cycles ? Sounds simple, I can try.
- But I am also wondering if this overall approach itself is correct ? Any tip, guide, suggestion, link would be really helpful.
- Also I just hard coded the sample-rate etc ( 44100Hz etc. ), are these good to set as default presets or it should depend on the input format ?









IV. Here is the code



import com.github.kokorin.jaffree.StreamType;
import com.github.kokorin.jaffree.ffmpeg.FFmpeg;
import com.github.kokorin.jaffree.ffmpeg.FFmpegProgress;
import com.github.kokorin.jaffree.ffmpeg.FFmpegResult;
import com.github.kokorin.jaffree.ffmpeg.NullOutput;
import com.github.kokorin.jaffree.ffmpeg.PipeOutput;
import com.github.kokorin.jaffree.ffmpeg.ProgressListener;
import com.github.kokorin.jaffree.ffprobe.Stream;
import com.github.kokorin.jaffree.ffmpeg.UrlInput;
import com.github.kokorin.jaffree.ffprobe.FFprobe;
import com.github.kokorin.jaffree.ffprobe.FFprobeResult;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.DataLine;
import javax.sound.sampled.SourceDataLine;


public class FFMpegToRaw {
 Path BIN = Paths.get("f:\\utilities\\ffmpeg-20190413-0ad0533-win64-static\\bin");
 String VIDEO_MP4 = "f:\\org\\TEMPLE\\DeviMahatmyamRecitationAudio\\03_01_Devi Kavacham.mp3";
 FFprobe ffprobe;
 FFmpeg ffmpeg;

 public void basicCheck() throws Exception {
 if (BIN != null) {
 ffprobe = FFprobe.atPath(BIN);
 } else {
 ffprobe = FFprobe.atPath();
 }
 FFprobeResult result = ffprobe
 .setShowStreams(true)
 .setInput(VIDEO_MP4)
 .execute();

 for (Stream stream : result.getStreams()) {
 System.out.println("Stream " + stream.getIndex()
 + " type " + stream.getCodecType()
 + " duration " + stream.getDuration(TimeUnit.SECONDS));
 } 
 if (BIN != null) {
 ffmpeg = FFmpeg.atPath(BIN);
 } else {
 ffmpeg = FFmpeg.atPath();
 }

 //Sometimes ffprobe can't show exact duration, use ffmpeg trancoding to NULL output to get it
 final AtomicLong durationMillis = new AtomicLong();
 FFmpegResult fFmpegResult = ffmpeg
 .addInput(
 UrlInput.fromUrl(VIDEO_MP4)
 )
 .addOutput(new NullOutput())
 .setProgressListener(new ProgressListener() {
 @Override
 public void onProgress(FFmpegProgress progress) {
 durationMillis.set(progress.getTimeMillis());
 }
 })
 .execute();
 System.out.println("audio size - "+fFmpegResult.getAudioSize());
 System.out.println("Exact duration: " + durationMillis.get() + " milliseconds");
 }

 public void toRawAndPlay() throws Exception {
 ProgressListener listener = new ProgressListener() {
 @Override
 public void onProgress(FFmpegProgress progress) {
 System.out.println(progress.getFrame());
 }
 };

 // code derived from : https://stackoverflow.com/questions/32873596/play-raw-pcm-audio-received-in-udp-packets

 int sampleRate = 44100;//24000;//Hz
 int sampleSize = 16;//Bits
 int channels = 1;
 boolean signed = true;
 boolean bigEnd = false;
 String format = "s16be"; //"f32le"

 //https://trac.ffmpeg.org/wiki/audio types
 final AudioFormat af = new AudioFormat(sampleRate, sampleSize, channels, signed, bigEnd);
 final DataLine.Info info = new DataLine.Info(SourceDataLine.class, af);
 final SourceDataLine line = (SourceDataLine) AudioSystem.getLine(info);

 line.open(af, 4096); // format , buffer size
 line.start();

 OutputStream destination = new OutputStream() {
 @Override public void write(int b) throws IOException {
 throw new UnsupportedOperationException("Nobody uses thi.");
 }
 @Override public void write(byte[] b, int off, int len) throws IOException {
 String o = new String(b);
 boolean showString = false;
 System.out.println("New output ("+ len
 + ", off="+off + ") -> "+(showString?o:"")); 
 // output wave form repeatedly

 if(len%2!=0) {
 len -= 1;
 System.out.println("");
 }
 line.write(b, off, len);
 System.out.println("done round");
 }
 };

 // src : http://blog.wudilabs.org/entry/c3d357ed/?lang=en-US
 FFmpegResult result = FFmpeg.atPath(BIN).
 addInput(UrlInput.fromPath(Paths.get(VIDEO_MP4))).
 addOutput(PipeOutput.pumpTo(destination).
 disableStream(StreamType.VIDEO). //.addArgument("-vn")
 setFrameRate(sampleRate). //.addArguments("-ar", sampleRate)
 addArguments("-ac", "1").
 setFormat(format) //.addArguments("-f", format)
 ).
 setProgressListener(listener).
 execute();

 // shut down audio
 line.drain();
 line.stop();
 line.close();

 System.out.println("result = "+result.toString());
 }

 public static void main(String[] args) throws Exception {
 FFMpegToRaw raw = new FFMpegToRaw();
 raw.basicCheck();
 raw.toRawAndPlay();
 }
}





Thank You