Recherche avancée

Médias (91)

Autres articles (21)

  • Publier sur MédiaSpip

    13 juin 2013

    Puis-je poster des contenus à partir d’une tablette Ipad ?
    Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir

  • Librairies et logiciels spécifiques aux médias

    10 décembre 2010, par

    Pour un fonctionnement correct et optimal, plusieurs choses sont à prendre en considération.
    Il est important, après avoir installé apache2, mysql et php5, d’installer d’autres logiciels nécessaires dont les installations sont décrites dans les liens afférants. Un ensemble de librairies multimedias (x264, libtheora, libvpx) utilisées pour l’encodage et le décodage des vidéos et sons afin de supporter le plus grand nombre de fichiers possibles. Cf. : ce tutoriel ; FFMpeg avec le maximum de décodeurs et (...)

  • MediaSPIP Init et Diogène : types de publications de MediaSPIP

    11 novembre 2010, par

    À l’installation d’un site MediaSPIP, le plugin MediaSPIP Init réalise certaines opérations dont la principale consiste à créer quatre rubriques principales dans le site et de créer cinq templates de formulaire pour Diogène.
    Ces quatre rubriques principales (aussi appelées secteurs) sont : Medias ; Sites ; Editos ; Actualités ;
    Pour chacune de ces rubriques est créé un template de formulaire spécifique éponyme. Pour la rubrique "Medias" un second template "catégorie" est créé permettant d’ajouter (...)

Sur d’autres sites (5182)

  • Waiting between FFMPEG scripts when creating .hdr/.chk and .mpd files on NGINX server using RTMP module

    17 avril 2020, par Mathew Knight

    Wonder if anyone might be able to suggest a solution to and issue i'm having.

    



    I'm running some FFMPEG scripts inside a RTMP server block on an NGINX server running on Ubuntu 18.04.

    



    Basically i'm ingesting the RTMP stream to the server then using FFMPEG to Demux into separate audio and video header and chunk files, then in separate scripts i'm creating two separate manifests for both the audio and video.

    



    The player i have (a development ambisonic, 360 video player) is having problems reading the stream correctly and i believe this is due to the FFMPEG process for the manifests not waiting 2 seconds before running.

    



    Is there a way to program a wait in between the scripts to facilitate this ?

    



    furthermore, is there a way to make the manifest scripts only run once then quit ?

    



    heres my current NGINX .conf

    



    user root;
#user www-data;
worker_processes 1;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
events {
    worker_connections 768;
    # multi_accept on;
}
http {
    ##
    # Basic Settings
    ##
    sendfile on;
    tcp_nopush on;
    tcp_nodelay on;
    keepalive_timeout 65;
    types_hash_max_size 2048;
    # server_tokens off;
    # server_names_hash_bucket_size 64;
    # server_name_in_redirect off;
    include /etc/nginx/mime.types;
    default_type application/octet-stream;
    ##
    # SSL Settings
    ##
    #ssl_protocols TLSv1 TLSv1.1 TLSv1.2; # Dropping SSLv3, ref: POODLE
    #ssl_prefer_server_ciphers on;
    ##
    # Logging Settings
    ##
    access_log /var/log/nginx/access.log;
    error_log /var/log/nginx/error.log;
    ##
    # Gzip Settings
    ##
    ##
    # Virtual Host Configs
    ##
    include /etc/nginx/conf.d/*.conf;
    include /etc/nginx/sites-enabled/*;
}
rtmp {
        server {
            listen 1935;
            chunk_size 4096;

            application live {
            live on;
            record off;
            interleave off;
            wait_key on;
            meta on;
            wait_video off;
            idle_streams off;
            sync 300ms;
            session_relay on;
            #allow publish 127.0.0.1;
            #allow publish 192.168.2.0/24;
            allow publish all;
            #deny publish all;
            allow play all;    
        #RX stream to FFMPEG, demux audio and video, write .hdr files, start chunking DASH segments .chk
         exec_push sudo ffmpeg -y -re -i 'rtmp://localhost:1935/live/stream' -map 0:1 -pix_fmt yuv420p -maxrate 750k -bufsize 3000k -c:v libvpx-vp9 -s 1920x1080 -keyint_min 60 -g 60 -speed 6 -tile-columns 4 -frame-parallel 1 -threads 8 -static-thresh 0 -max-intra-rate 300 -deadline realtime -lag-in-frames 0 -error-resilient 1 -b:v 6000k -f webm_chunk -header "/var/www/html/dash/video_360.hdr" -chunk_start_index 1 "/var/www/html/dash/video_360_%d.chk" -map 0:2 -c:a libopus -mapping_family 255 -b:a 1024k -vn -f webm_chunk -audio_chunk_duration 2000 -header "/var/www/html/dash/audio_171.hdr" -chunk_start_index 1 "/var/www/html/dash/audio_171_%d.chk" 2>>/var/log/nginx/ffmpegChunk.log;

        #Create video manifest
        exec_push sudo ffmpeg -probesize 500M -analyzeduration 100M -f webm_dash_manifest -live 1 -i "/var/www/html/dash/video_360.hdr" -map 0 -c copy -f webm_dash_manifest -live 1 -adaptation_sets "id=0,streams=0" -chunk_start_index 1 -chunk_duration_ms 2000 -minimum_update_period 7200 "/var/www/html/dash/video.mpd" 2>>/var/log/nginx/ffmpegManifestVideo.log;
        #Create audio manifest
        exec_push sudo ffmpeg -probesize 500M -analyzeduration 100M -f webm_dash_manifest -live 1 -i "/var/www/html/dash/audio_171.hdr" -map 0 -c libopus -mapping_family 255 -f webm_dash_manifest -live 1 -adaptation_sets "id=1,streams=0" -chunk_start_index 1 -chunk_duration_ms 2000 -minimum_update_period 7200 "/var/www/html/dash/audio_16ch.mpd" 2>>/var/log/nginx/ffmpegManifestAudio.log;

        ##
        # Record the incoming stream
        ##
        # Record audio and video together
        record all;
        record_path /home/mathewknight/Desktop/StreamRecord/Master;
        record_notify on;
        # Record audio seperately
        recorder audio {
            record audio;
            record_path /home/mathewknight/Desktop/StreamRecord/Audio;
            record_suffix -%d-%b-%y-%T.audio.flv;
            record_notify on;
            }
        # Record video seperately
        recorder video{
            record video;
            record_path /home/mathewknight/Desktop/StreamRecord/Video;
            record_suffix -%d-%b-%y-%T.video.flv;
            record_notify on;
            }

            }


        }

}



    


  • ffmpeg creates empty mpd-file

    1er avril 2021, par Macster

    I'm trying to live stream a webm file using ffmpeg and Dash.js as a Player. Creating chunks with ffmpeg seems to work fine, but when want to create the manifest it says Could not find codec parameters for stream 0 (Video: vp9, none, 640x480): unspecified pixel format Consider increasing the value for the 'analyzeduration' and 'probesize' options and at the end Output file is empty, nothing was encoded (check -ss / -t / -frames parameters if used).

    


    Commands I use

    


    ffmpeg -re -r 25 -i Dash/strm.webm
 -map 0:v:0
 -pix_fmt yuv420p
 -c:v libvpx-vp9
 -s 640x480 -keyint_min 25 -g 25 
 -speed 6 -threads 8 -static-thresh 0 -max-intra-rate 300 
 -deadline realtime -lag-in-frames 0 -error-resilient 1
 -f webm_chunk 
 -header "Dash/glass_360.hdr"
 -chunk_start_index 1
 Dash\glass_360_%d.chk


    


    Manifest

    


    ffmpeg ^
 -f webm_dash_manifest -live 1
 -r 25
 -i Dash/glass_360.hdr
 -c copy
 -map 0
 -r 25
 -framerate 25
 -f webm_dash_manifest -live 1
 -adaptation_sets "id=0,streams=0"
 -chunk_start_index 1
 -chunk_duration_ms 1000
 -time_shift_buffer_depth 7200
 -minimum_update_period 7200
 Dash/glass_live_manifest.mpd


    


    Manifest Output

    


     libavutil      56. 49.100 / 56. 49.100
  libavcodec     58. 87.101 / 58. 87.101
  libavformat    58. 43.100 / 58. 43.100
  libavdevice    58.  9.103 / 58.  9.103
  libavfilter     7. 83.100 /  7. 83.100
  libswscale      5.  6.101 /  5.  6.101
  libswresample   3.  6.100 /  3.  6.100
  libpostproc    55.  6.100 / 55.  6.100
[webm_dash_manifest @ 0000015f19f2ea40] Could not find codec parameters for stream 0 (Video: vp9, none, 640x480): unspecified pixel format
Consider increasing the value for the 'analyzeduration' and 'probesize' options
Input #0, webm_dash_manifest, from 'Dash/glass_360.hdr':
  Metadata:
    ENCODER         : Lavf58.43.100
  Duration: N/A, bitrate: N/A
    Stream #0:0(eng): Video: vp9, none, 640x480, SAR 1:1 DAR 4:3, 1k tbr, 1k tbn, 1k tbc (default)
    Metadata:
      ALPHA_MODE      : 1
      ENCODER         : Lavc58.87.101 libvpx-vp9
      webm_dash_manifest_file_name: glass_360.hdr
      webm_dash_manifest_track_number: 1
Output #0, webm_dash_manifest, to 'stream_manifest.mpd':
  Metadata:
    encoder         : Lavf58.43.100
    Stream #0:0(eng): Video: vp9, none, 640x480 [SAR 1:1 DAR 4:3], q=2-31, 1k tbr, 1k tbn, 1k tbc (default)
    Metadata:
      ALPHA_MODE      : 1
      ENCODER         : Lavc58.87.101 libvpx-vp9
      webm_dash_manifest_file_name: glass_360.hdr
      webm_dash_manifest_track_number: 1
Stream mapping:
  Stream #0:0 -> #0:0 (copy)
Press [q] to stop, [?] for help
frame=    0 fps=0.0 q=-1.0 Lsize=       1kB time=00:00:00.00 bitrate=N/A speed=   0x
video:0kB audio:0kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead: unknown
Output file is empty, nothing was encoded (check -ss / -t / -frames parameters if used)


    


    glass_live_manifest.mpd

    


    &lt;?xml version="1.0" encoding="UTF-8"?>&#xA;&#xA;<period start="PT0S">&#xA;<adaptationset mimetype="video/webm" codecs="vp9" lang="eng" bitstreamswitching="true" subsegmentalignment="true" subsegmentstartswithsap="1">&#xA;<contentcomponent type="video"></contentcomponent>&#xA;<segmenttemplate timescale="1000" duration="1000" media="glass_$RepresentationID$_$Number$.chk" startnumber="1" initialization="glass_$RepresentationID$.hdr"></segmenttemplate>&#xA;<representation bandwidth="1000000" width="640" height="480" codecs="vp9" mimetype="video/webm" startswithsap="1"></representation>&#xA;</adaptationset>&#xA;</period>&#xA;&#xA;

    &#xA;

    UPDATE

    &#xA;

    I fixed the "warnings" by changing the comands like this :

    &#xA;

    ffmpeg -re -r 25 -i Dash/strm.webm&#xA;-map 0:v:0&#xA;-pix_fmt yuv420p&#xA;-c:v libvpx&#xA;-s 640x480 -keyint_min 60 -g 60 -speed 6 -tile-columns 4 -frame-parallel 1 -threads 8 -static-thresh 0 -max-intra-rate 300 -deadline realtime -lag-in-frames 0 -error-resilient 1&#xA;-b:v 3000k&#xA;-f webm_chunk&#xA;-header "Dash/glass_360.hdr"&#xA;-chunk_start_index 1 Dash/glass_360_%d.chk&#xA;-map 0:a:0&#xA;-c:a libvorbis&#xA;-b:a 128k -ar 44100&#xA;-f webm_chunk&#xA;-audio_chunk_duration 2000&#xA;-header Dash/glass_171.hdr&#xA;-chunk_start_index 1 Dash/glass_171_%d.chk&#xA;&#xA;ffmpeg&#xA;-f webm_dash_manifest -live 1&#xA;-i Dash/glass_360.hdr&#xA;-f webm_dash_manifest -live 1&#xA;-i Dash/glass_171.hdr&#xA;-c copy&#xA;-map 0 -map 1&#xA;-f webm_dash_manifest -live 1&#xA;-adaptation_sets "id=0,streams=0 id=1,streams=1"&#xA;-chunk_start_index 1&#xA;-chunk_duration_ms 2000&#xA;-time_shift_buffer_depth 7200&#xA;-minimum_update_period 7200 Dash/glass_video_manifest.mpd&#xA;

    &#xA;

    However, it's still not working.

    &#xA;

  • Screeching white sound coming while playing audio as a raw stream

    27 avril 2020, par Sri Nithya Sharabheshwarananda

    I. Background

    &#xA;&#xA;

      &#xA;
    1. I am trying to make an application which helps to match subtitles to the audio waveform very accurately at the waveform level, at the word level or even at the character level.
    2. &#xA;

    3. The audio is expected to be Sanskrit chants (Yoga, rituals etc.) which are extremely long compound words [ example - aṅganyā-sokta-mātaro-bījam is traditionally one word broken only to assist reading ]
    4. &#xA;

    5. The input transcripts / subtitles might be roughly in sync at the sentence/verse level but surely would not be in sync at the word level.
    6. &#xA;

    7. The application should be able to figure out points of silence in the audio waveform, so that it can guess the start and end points of each word (or even letter/consonant/vowel in a word), such that the audio-chanting and visual-subtitle at the word level (or even at letter/consonant/vowel level) perfectly match, and the corresponding UI just highlights or animates the exact word (or even letter) in the subtitle line which is being chanted at that moment, and also show that word (or even the letter/consonant/vowel) in bigger font. This app's purpose is to assist learning Sanskrit chanting.
    8. &#xA;

    9. It is not expected to be a 100% automated process, nor 100% manual but a mix where the application should assist the human as much as possible.
    10. &#xA;

    &#xA;&#xA;

    II. Following is the first code I wrote for this purpose, wherein

    &#xA;&#xA;

      &#xA;
    1. First I open a mp3 (or any audio format) file,
    2. &#xA;

    3. Seek to some arbitrary point in the timeline of the audio file // as of now playing from zero offset
    4. &#xA;

    5. Get the audio data in raw format for 2 purposes - (1) playing it and (2) drawing the waveform.
    6. &#xA;

    7. Playing the raw audio data using standard java audio libraries
    8. &#xA;

    &#xA;&#xA;

    III. The problem I am facing is, between every cycle there is screeching sound.

    &#xA;&#xA;

      &#xA;
    • Probably I need to close the line between cycles ? Sounds simple, I can try.
    • &#xA;

    • But I am also wondering if this overall approach itself is correct ? Any tip, guide, suggestion, link would be really helpful.
    • &#xA;

    • Also I just hard coded the sample-rate etc ( 44100Hz etc. ), are these good to set as default presets or it should depend on the input format ?
    • &#xA;

    &#xA;&#xA;

    IV. Here is the code

    &#xA;&#xA;

    import com.github.kokorin.jaffree.StreamType;&#xA;import com.github.kokorin.jaffree.ffmpeg.FFmpeg;&#xA;import com.github.kokorin.jaffree.ffmpeg.FFmpegProgress;&#xA;import com.github.kokorin.jaffree.ffmpeg.FFmpegResult;&#xA;import com.github.kokorin.jaffree.ffmpeg.NullOutput;&#xA;import com.github.kokorin.jaffree.ffmpeg.PipeOutput;&#xA;import com.github.kokorin.jaffree.ffmpeg.ProgressListener;&#xA;import com.github.kokorin.jaffree.ffprobe.Stream;&#xA;import com.github.kokorin.jaffree.ffmpeg.UrlInput;&#xA;import com.github.kokorin.jaffree.ffprobe.FFprobe;&#xA;import com.github.kokorin.jaffree.ffprobe.FFprobeResult;&#xA;import java.io.IOException;&#xA;import java.io.OutputStream;&#xA;import java.nio.file.Path;&#xA;import java.nio.file.Paths;&#xA;import java.util.concurrent.TimeUnit;&#xA;import java.util.concurrent.atomic.AtomicLong;&#xA;import javax.sound.sampled.AudioFormat;&#xA;import javax.sound.sampled.AudioSystem;&#xA;import javax.sound.sampled.DataLine;&#xA;import javax.sound.sampled.SourceDataLine;&#xA;&#xA;&#xA;public class FFMpegToRaw {&#xA;    Path BIN = Paths.get("f:\\utilities\\ffmpeg-20190413-0ad0533-win64-static\\bin");&#xA;    String VIDEO_MP4 = "f:\\org\\TEMPLE\\DeviMahatmyamRecitationAudio\\03_01_Devi Kavacham.mp3";&#xA;    FFprobe ffprobe;&#xA;    FFmpeg ffmpeg;&#xA;&#xA;    public void basicCheck() throws Exception {&#xA;        if (BIN != null) {&#xA;            ffprobe = FFprobe.atPath(BIN);&#xA;        } else {&#xA;            ffprobe = FFprobe.atPath();&#xA;        }&#xA;        FFprobeResult result = ffprobe&#xA;                .setShowStreams(true)&#xA;                .setInput(VIDEO_MP4)&#xA;                .execute();&#xA;&#xA;        for (Stream stream : result.getStreams()) {&#xA;            System.out.println("Stream " &#x2B; stream.getIndex()&#xA;                    &#x2B; " type " &#x2B; stream.getCodecType()&#xA;                    &#x2B; " duration " &#x2B; stream.getDuration(TimeUnit.SECONDS));&#xA;        }    &#xA;        if (BIN != null) {&#xA;            ffmpeg = FFmpeg.atPath(BIN);&#xA;        } else {&#xA;            ffmpeg = FFmpeg.atPath();&#xA;        }&#xA;&#xA;        //Sometimes ffprobe can&#x27;t show exact duration, use ffmpeg trancoding to NULL output to get it&#xA;        final AtomicLong durationMillis = new AtomicLong();&#xA;        FFmpegResult fFmpegResult = ffmpeg&#xA;                .addInput(&#xA;                        UrlInput.fromUrl(VIDEO_MP4)&#xA;                )&#xA;                .addOutput(new NullOutput())&#xA;                .setProgressListener(new ProgressListener() {&#xA;                    @Override&#xA;                    public void onProgress(FFmpegProgress progress) {&#xA;                        durationMillis.set(progress.getTimeMillis());&#xA;                    }&#xA;                })&#xA;                .execute();&#xA;        System.out.println("audio size - "&#x2B;fFmpegResult.getAudioSize());&#xA;        System.out.println("Exact duration: " &#x2B; durationMillis.get() &#x2B; " milliseconds");&#xA;    }&#xA;&#xA;    public void toRawAndPlay() throws Exception {&#xA;        ProgressListener listener = new ProgressListener() {&#xA;            @Override&#xA;            public void onProgress(FFmpegProgress progress) {&#xA;                System.out.println(progress.getFrame());&#xA;            }&#xA;        };&#xA;&#xA;        // code derived from : https://stackoverflow.com/questions/32873596/play-raw-pcm-audio-received-in-udp-packets&#xA;&#xA;        int sampleRate = 44100;//24000;//Hz&#xA;        int sampleSize = 16;//Bits&#xA;        int channels   = 1;&#xA;        boolean signed = true;&#xA;        boolean bigEnd = false;&#xA;        String format  = "s16be"; //"f32le"&#xA;&#xA;        //https://trac.ffmpeg.org/wiki/audio types&#xA;        final AudioFormat af = new AudioFormat(sampleRate, sampleSize, channels, signed, bigEnd);&#xA;        final DataLine.Info info = new DataLine.Info(SourceDataLine.class, af);&#xA;        final SourceDataLine line = (SourceDataLine) AudioSystem.getLine(info);&#xA;&#xA;        line.open(af, 4096); // format , buffer size&#xA;        line.start();&#xA;&#xA;        OutputStream destination = new OutputStream() {&#xA;            @Override public void write(int b) throws IOException {&#xA;                throw new UnsupportedOperationException("Nobody uses thi.");&#xA;            }&#xA;            @Override public void write(byte[] b, int off, int len) throws IOException {&#xA;                String o = new String(b);&#xA;                boolean showString = false;&#xA;                System.out.println("New output ("&#x2B; len&#xA;                        &#x2B; ", off="&#x2B;off &#x2B; ") -> "&#x2B;(showString?o:"")); &#xA;                // output wave form repeatedly&#xA;&#xA;                if(len%2!=0) {&#xA;                    len -= 1;&#xA;                    System.out.println("");&#xA;                }&#xA;                line.write(b, off, len);&#xA;                System.out.println("done round");&#xA;            }&#xA;        };&#xA;&#xA;        // src : http://blog.wudilabs.org/entry/c3d357ed/?lang=en-US&#xA;        FFmpegResult result = FFmpeg.atPath(BIN).&#xA;            addInput(UrlInput.fromPath(Paths.get(VIDEO_MP4))).&#xA;            addOutput(PipeOutput.pumpTo(destination).&#xA;                disableStream(StreamType.VIDEO). //.addArgument("-vn")&#xA;                setFrameRate(sampleRate).            //.addArguments("-ar", sampleRate)&#xA;                addArguments("-ac", "1").&#xA;                setFormat(format)              //.addArguments("-f", format)&#xA;            ).&#xA;            setProgressListener(listener).&#xA;            execute();&#xA;&#xA;        // shut down audio&#xA;        line.drain();&#xA;        line.stop();&#xA;        line.close();&#xA;&#xA;        System.out.println("result = "&#x2B;result.toString());&#xA;    }&#xA;&#xA;    public static void main(String[] args) throws Exception {&#xA;        FFMpegToRaw raw = new FFMpegToRaw();&#xA;        raw.basicCheck();&#xA;        raw.toRawAndPlay();&#xA;    }&#xA;}&#xA;&#xA;

    &#xA;&#xA;

    Thank You

    &#xA;