Recherche avancée

Médias (1)

Mot : - Tags -/iphone

Autres articles (89)

  • Organiser par catégorie

    17 mai 2013, par

    Dans MédiaSPIP, une rubrique a 2 noms : catégorie et rubrique.
    Les différents documents stockés dans MédiaSPIP peuvent être rangés dans différentes catégories. On peut créer une catégorie en cliquant sur "publier une catégorie" dans le menu publier en haut à droite ( après authentification ). Une catégorie peut être rangée dans une autre catégorie aussi ce qui fait qu’on peut construire une arborescence de catégories.
    Lors de la publication prochaine d’un document, la nouvelle catégorie créée sera proposée (...)

  • Récupération d’informations sur le site maître à l’installation d’une instance

    26 novembre 2010, par

    Utilité
    Sur le site principal, une instance de mutualisation est définie par plusieurs choses : Les données dans la table spip_mutus ; Son logo ; Son auteur principal (id_admin dans la table spip_mutus correspondant à un id_auteur de la table spip_auteurs)qui sera le seul à pouvoir créer définitivement l’instance de mutualisation ;
    Il peut donc être tout à fait judicieux de vouloir récupérer certaines de ces informations afin de compléter l’installation d’une instance pour, par exemple : récupérer le (...)

  • Personnaliser les catégories

    21 juin 2013, par

    Formulaire de création d’une catégorie
    Pour ceux qui connaissent bien SPIP, une catégorie peut être assimilée à une rubrique.
    Dans le cas d’un document de type catégorie, les champs proposés par défaut sont : Texte
    On peut modifier ce formulaire dans la partie :
    Administration > Configuration des masques de formulaire.
    Dans le cas d’un document de type média, les champs non affichés par défaut sont : Descriptif rapide
    Par ailleurs, c’est dans cette partie configuration qu’on peut indiquer le (...)

Sur d’autres sites (10249)

  • Recording voice using HTML5 and processing it with ffmpeg

    22 mars 2015, par user3789242

    I need to use ffmpeg in my javascript/HTML5 project which allows the user to select the format he wants the audio to open with.I don’t know anything about ffmpeg and I’ve been doing lots of research I don’t know how to use it in my project. I found an example https://github.com/sopel39/audioconverter.js but the problem how can I install the ffmpeg.js which is 8 mg to m project. please if someone can help me I’ll be very thankfull
    here is my full code :

    the javascript page :

    // variables
    var leftchannel = [];
    var rightchannel = [];
    var recorder = null;
    var recording = false;
    var recordingLength = 0;
    var volume = null;
    var audioInput = null;
    var sampleRate = 44100;
    var audioContext = null;
    var context = null;
    var outputString;



    if (!navigator.getUserMedia)
    navigator.getUserMedia = navigator.getUserMedia ||
                            navigator.webkitGetUserMedia ||
                            navigator.mozGetUserMedia ||
                            navigator.msGetUserMedia;

    if (navigator.getUserMedia){
    navigator.getUserMedia({audio:true}, success, function(e) {
    alert('Error capturing audio.');
    });
    } else alert('getUserMedia not supported in this browser.');



    function getVal(value)
     {

    // if R is pressed, we start recording
    if ( value == "record"){
       recording = true;
       // reset the buffers for the new recording
       leftchannel.length = rightchannel.length = 0;
       recordingLength = 0;
       document.getElementById('output').innerHTML="Recording now...";

    // if S is pressed, we stop the recording and package the WAV file
    } else if ( value == "stop" ){

       // we stop recording
       recording = false;
       document.getElementById('output').innerHTML="Building wav file...";

       // we flat the left and right channels down
       var leftBuffer = mergeBuffers ( leftchannel, recordingLength );
       var rightBuffer = mergeBuffers ( rightchannel, recordingLength );
       // we interleave both channels together
       var interleaved = interleave ( leftBuffer, rightBuffer );



       var buffer = new ArrayBuffer(44 + interleaved.length * 2);
       var view = new DataView(buffer);

       // RIFF chunk descriptor
       writeUTFBytes(view, 0, 'RIFF');
       view.setUint32(4, 44 + interleaved.length * 2, true);
       writeUTFBytes(view, 8, 'WAVE');
       // FMT sub-chunk
       writeUTFBytes(view, 12, 'fmt ');
       view.setUint32(16, 16, true);
       view.setUint16(20, 1, true);
       // stereo (2 channels)
       view.setUint16(22, 2, true);
       view.setUint32(24, sampleRate, true);
       view.setUint32(28, sampleRate * 4, true);
       view.setUint16(32, 4, true);
       view.setUint16(34, 16, true);
       // data sub-chunk
       writeUTFBytes(view, 36, 'data');
       view.setUint32(40, interleaved.length * 2, true);


       var lng = interleaved.length;
       var index = 44;
       var volume = 1;
       for (var i = 0; i < lng; i++){
           view.setInt16(index, interleaved[i] * (0x7FFF * volume), true);
           index += 2;
       }

       var blob = new Blob ( [ view ], { type : 'audio/wav' } );

       // let's save it locally

       document.getElementById('output').innerHTML='Handing off the file now...';
       var url = (window.URL || window.webkitURL).createObjectURL(blob);

       var li = document.createElement('li');
       var au = document.createElement('audio');
       var hf = document.createElement('a');

       au.controls = true;
       au.src = url;
       hf.href = url;
       hf.download = 'audio_recording_' + new Date().getTime() + '.wav';
       hf.innerHTML = hf.download;
       li.appendChild(au);
       li.appendChild(hf);
       recordingList.appendChild(li);

    }
    }


    function success(e){

    audioContext = window.AudioContext || window.webkitAudioContext;
    context = new audioContext();


    volume = context.createGain();

    // creates an audio node from the microphone incoming stream(source)
    source = context.createMediaStreamSource(e);

    // connect the stream(source) to the gain node
    source.connect(volume);

    var bufferSize = 2048;

    recorder = context.createScriptProcessor(bufferSize, 2, 2);

    //node for the visualizer
    analyser = context.createAnalyser();
    analyser.smoothingTimeConstant = 0.3;
    analyser.fftSize = 512;

    splitter = context.createChannelSplitter();
    //when recording happens
    recorder.onaudioprocess = function(e){

       if (!recording) return;
       var left = e.inputBuffer.getChannelData (0);
       var right = e.inputBuffer.getChannelData (1);

       leftchannel.push (new Float32Array (left));
       rightchannel.push (new Float32Array (right));
       recordingLength += bufferSize;

       // get the average for the first channel
       var array =  new Uint8Array(analyser.frequencyBinCount);
       analyser.getByteFrequencyData(array);

       var c=document.getElementById("myCanvas");
       var ctx = c.getContext("2d");
       // clear the current state
       ctx.clearRect(0, 0, 1000, 325);
       var gradient = ctx.createLinearGradient(0,0,0,300);
       gradient.addColorStop(1,'#000000');
       gradient.addColorStop(0.75,'#ff0000');
       gradient.addColorStop(0.25,'#ffff00');
       gradient.addColorStop(0,'#ffffff');
       // set the fill style
       ctx.fillStyle=gradient;
       drawSpectrum(array);
       function drawSpectrum(array) {
           for ( var i = 0; i < (array.length); i++ ){
                   var value = array[i];
                   ctx.fillRect(i*5,325-value,3,325);
               }

       }
    }

    function getAverageVolume(array) {
       var values = 0;
       var average;

       var length = array.length;

       // get all the frequency amplitudes
       for (var i = 0; i < length; i++) {
           values += array[i];
       }

       average = values / length;
       return average;
    }

       // we connect the recorder(node to destination(speakers))
       volume.connect(splitter);
       splitter.connect(analyser, 0, 0);

       analyser.connect(recorder);
       recorder.connect(context.destination);

    }




    function mergeBuffers(channelBuffer, recordingLength){
    var result = new Float32Array(recordingLength);
    var offset = 0;
    var lng = channelBuffer.length;
    for (var i = 0; i < lng; i++){
    var buffer = channelBuffer[i];
    result.set(buffer, offset);
    offset += buffer.length;
    }
       return result;
      }

    function interleave(leftChannel, rightChannel){
    var length = leftChannel.length + rightChannel.length;
    var result = new Float32Array(length);

    var inputIndex = 0;

    for (var index = 0; index < length; ){
    result[index++] = leftChannel[inputIndex];
    result[index++] = rightChannel[inputIndex];
    inputIndex++;
    }
    return result;
    }


    function writeUTFBytes(view, offset, string){
    var lng = string.length;
    for (var i = 0; i < lng; i++){

    view.setUint8(offset + i, string.charCodeAt(i));
    }
    }

    and here is the html code :

       
       
       
       <code class="echappe-js">&lt;script src=&quot;http://stackoverflow.com/feeds/tag/js/functions.js&quot;&gt;&lt;/script&gt;


    • FFMPEG - Multi Track, Multi Channel file to discrete mono files

      26 novembre 2020, par vade

      I have files which are multi track, and multi channel (ie, track 1 may be 5.1, track 2 may be stereo, track 3 may be stereo etc)

      &#xA;

      I am looking to output every channel from every track into its own 'unrolled' discrete mono file.

      &#xA;

      example media :

      &#xA;

      ffprobe version 4.3.1-0york0~18.04 Copyright (c) 2007-2020 the FFmpeg developers&#xA;  built with gcc 7 (Ubuntu 7.5.0-3ubuntu1~18.04)&#xA;  configuration: --prefix=/usr --extra-version=&#x27;0york0~18.04&#x27; --toolchain=hardened --libdir=/usr/lib/x86_64-linux-gnu --incdir=/usr/include/x86_64-linux-gnu --arch=amd64 --enable-gpl --disable-stripping --enable-avresample --disable-filter=resample --enable-gnutls --enable-ladspa --enable-libaom --enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca --enable-libcdio --enable-libcodec2 --enable-libflite --enable-libfontconfig --enable-libfreetype --enable-libfribidi --enable-libgme --enable-libgsm --enable-libjack --enable-libmp3lame --enable-libmysofa --enable-libopenjpeg --enable-libopenmpt --enable-libopus --enable-libpulse --enable-librabbitmq --enable-librsvg --enable-librubberband --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libsrt --enable-libssh --enable-libtheora --enable-libtwolame --enable-libvidstab --enable-libvorbis --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx265 --enable-libxml2 --enable-libxvid --enable-libzmq --enable-libzvbi --enable-lv2 --enable-omx --enable-openal --enable-opencl --enable-opengl --enable-sdl2 --enable-libzimg --enable-pocketsphinx --enable-libdc1394 --enable-libdrm --enable-libiec61883 --enable-chromaprint --enable-frei0r --enable-libx264 --enable-shared&#xA;  libavutil      56. 51.100 / 56. 51.100&#xA;  libavcodec     58. 91.100 / 58. 91.100&#xA;  libavformat    58. 45.100 / 58. 45.100&#xA;  libavdevice    58. 10.100 / 58. 10.100&#xA;  libavfilter     7. 85.100 /  7. 85.100&#xA;  libavresample   4.  0.  0 /  4.  0.  0&#xA;  libswscale      5.  7.100 /  5.  7.100&#xA;  libswresample   3.  7.100 /  3.  7.100&#xA;  libpostproc    55.  7.100 / 55.  7.100&#xA;[mxf @ 0x55d3e7fc2680] wrapping of stream 0 is unknown&#xA;[jpeg2000 @ 0x55d3e805ce00] End mismatch 1&#xA;    Last message repeated 1 times&#xA;Input #0, mxf, from &#x27;redacted.mxf&#x27;:&#xA;  Metadata:&#xA;    operational_pattern_ul: 060e2b34.04010101.0d010201.01010900&#xA;    modification_date: 2019-10-03T09:58:16.368000Z&#xA;    uid             : f6267ae2-680e-4357-9b1d-c77c045d3cd7&#xA;    generation_uid  : e7e6f5a1-6f15-4df5-aea8-a41f3ef535d6&#xA;    company_name    : redacted&#xA;    product_name    : redacted&#xA;    product_version : 11.6.1.5.301404&#xA;    product_uid     : 84ae5ffc-4710-11dd-a6fe-0010c629ec73&#xA;    application_platform: 4KICR1&#xA;    material_package_umid: 0x060A2B340101010501010D2013000000BE3608F3135E48AD99E4340643E47F22&#xA;    timecode        : 00:59:20:00&#xA;  Duration: 00:26:16.07, start: 0.000000, bitrate: 139194 kb/s&#xA;    Stream #0:0: Video: jpeg2000, yuv422p10le(progressive), 1920x1080, SAR 1:1 DAR 16:9, 23.98 tbr, 23.98 tbn, 23.98 tbc&#xA;    Metadata:&#xA;      file_package_umid: 0x060A2B340101010501010D201300000091A43E578B86490698045924FA9EECC5&#xA;      track_name      : Picture&#xA;    Stream #0:1: Audio: pcm_s24le, 48000 Hz, 6 channels, s32 (24 bit), 6912 kb/s&#xA;    Metadata:&#xA;      file_package_umid: 0x060A2B340101010501010D201300000091A43E578B86490698045924FA9EECC5&#xA;      track_name      : Sound&#xA;    Stream #0:2: Audio: pcm_s24le, 48000 Hz, 2 channels, s32 (24 bit), 2304 kb/s&#xA;    Metadata:&#xA;      file_package_umid: 0x060A2B340101010501010D201300000091A43E578B86490698045924FA9EECC5&#xA;      track_name      : Sound&#xA;    Stream #0:3: Audio: pcm_s24le, 48000 Hz, 2 channels, s32 (24 bit), 2304 kb/s&#xA;    Metadata:&#xA;      file_package_umid: 0x060A2B340101010501010D201300000091A43E578B86490698045924FA9EECC5&#xA;      track_name      : Sound&#xA;    Stream #0:4: Audio: pcm_s24le, 48000 Hz, 2 channels, s32 (24 bit), 2304 kb/s&#xA;    Metadata:&#xA;      file_package_umid: 0x060A2B340101010501010D201300000091A43E578B86490698045924FA9EECC5&#xA;      track_name      : Sound&#xA;    Stream #0:5: Data: none&#xA;    Metadata:&#xA;      file_package_umid: 0x060A2B340101010501010D201300000091A43E578B86490698045924FA9EECC5&#xA;      track_name      : Auxiliary Data&#xA;      data_type       : vbi_vanc_smpte_436M&#xA;Unsupported codec with id 0 for input stream 5&#xA;

      &#xA;

      These files are vendor qualified masters, and the track / channel combinations vary between vendors, so some might be stereo, 5.1, 7.1 order, some might be all discrete mono already, some might be discrete stereo, 5.1, and mono tracks. Its all a mix. So im looking for some general strategy that gracefully handles all channels from all tracks.

      &#xA;

      Now I have seen various strategies documented to handle discretizing audio via ffmpeg docs, but none of them seem to show how to target different channels from different tracks. Im sure its a pebkac error, but I'd love some guidance.

      &#xA;

      I have tried both a map_channel approach as well as a -filtercomplex channelsplit approach.

      &#xA;

      ffmpeg -i redacted.mxf -ss 60 \&#xA;-map_channel 0.1.0 -t 10 track_1_0.wav \&#xA;-map_channel 0.1.1 -t 10 track_1_1.wav \&#xA;-map_channel 0.1.2 -t 10 track_1_2.wav \&#xA;-map_channel 0.1.3 -t 10 track_1_3.wav \&#xA;-map_channel 0.1.4 -t 10 track_1_4.wav \&#xA;-map_channel 0.1.5 -t 10 track_1_5.wav \&#xA;-map_channel 0.2.0 -t 10 track_2_0.wav \&#xA;-map_channel 0.2.1 -t 10 track_2_1.wav \&#xA;-map_channel 0.3.0 -t 10 track_3_0.wav \&#xA;-map_channel 0.3.1 -t 10 track_3_1.wav \&#xA;-map_channel 0.4.0 -t 10 track_4_0.wav \&#xA;-map_channel 0.4.1 -t 10 track_4_1.wav &#xA;

      &#xA;

      However, the output files are not all mono, some are marked as 5.1. I dont believe they are inheriting a sane / correct channel layout (mono) - but the output files that are marked 5.1 are nonsensical, as they are all sourced from stereo tracks. ie track_2_0.wav track_2_1.wav, track_3_0.wav, track_3_1.wav, track_4_0.wav, track_4_1.wav. Which seems odd. Track 1_0 from the above command outputs a sane media info :

      &#xA;

      File size                                : 938 KiB&#xA;Duration                                 : 10s 0ms&#xA;Overall bit rate mode                    : Constant&#xA;Overall bit rate                         : 768 Kbps&#xA;Writing application                      : Lavf58.45.100&#xA;&#xA;Audio&#xA;Format                                   : PCM&#xA;Format settings                          : Little / Signed&#xA;Codec ID                                 : 1&#xA;Duration                                 : 10s 0ms&#xA;Bit rate mode                            : Constant&#xA;Bit rate                                 : 768 Kbps&#xA;Channel(s)                               : 1 channel&#xA;Sampling rate                            : 48.0 KHz&#xA;Bit depth                                : 16 bits&#xA;Stream size                              : 938 KiB (100%)&#xA;&#xA;

      &#xA;

      However the second and 3rd track have the wrong channel layout and an unexpected codec id :

      &#xA;

      Format                                   : Wave&#xA;File size                                : 5.49 MiB&#xA;Duration                                 : 10s 0ms&#xA;Overall bit rate mode                    : Constant&#xA;Overall bit rate                         : 4 608 Kbps&#xA;Writing application                      : Lavf58.45.100&#xA;&#xA;Audio&#xA;Format                                   : PCM&#xA;Format settings                          : Little / Signed&#xA;Codec ID                                 : 00000001-0000-0010-8000-00AA00389B71&#xA;Duration                                 : 10s 0ms&#xA;Bit rate mode                            : Constant&#xA;Bit rate                                 : 4 608 Kbps&#xA;Channel(s)                               : 6 channels&#xA;Channel layout                           : L R C LFE Lb Rb&#xA;Sampling rate                            : 48.0 KHz&#xA;Bit depth                                : 16 bits&#xA;Stream size                              : 5.49 MiB (100%)&#xA;&#xA;

      &#xA;

      Additionally re : map_channel, there are some docs that cast doubt that its the right approach :

      &#xA;

      &#xA;

      Note that currently each output stream can only contain channels from a single input stream ; you can’t for example use "-map_channel" to pick multiple input audio channels contained in different streams (from the same or different files) and merge them into a single output stream. It is therefore not currently possible, for example, to turn two separate mono streams into a single stereo stream. However splitting a stereo stream into two single channel mono streams is possible.

      &#xA;

      &#xA;

      Using filter complex, the docs/bug tracker have an example of discretizing 5.1 and marking mono. I can target the tracks I want, and get a valid filter chain as seen in debug log reporting, however I only get audio for the 1st track :

      &#xA;

      ffmpeg -y -v 40 -i redacted.mxf -ss 60 \&#xA;    -disposition:a default \&#xA;    -filter_complex \&#xA;    "[0:a:0]channelsplit=channel_layout=5.1[c1][c2][c3][c4][c5][c6],\&#xA;    [c1]aformat=channel_layouts=mono[c1],\&#xA;    [c2]aformat=channel_layouts=mono[c2],\&#xA;    [c3]aformat=channel_layouts=mono[c3],\&#xA;    [c4]aformat=channel_layouts=mono[c4],\&#xA;    [c5]aformat=channel_layouts=mono[c5],\&#xA;    [c6]aformat=channel_layouts=mono[c6],\&#xA;    [0:a:1]channelsplit=channel_layout=stereo[c7][c8],\&#xA;    [c7]aformat=channel_layouts=mono[c7],\&#xA;    [c8]aformat=channel_layouts=mono[c8],\&#xA;    [0:a:2]channelsplit=channel_layout=stereo[c9][c10],\&#xA;    [c9]aformat=channel_layouts=mono[c9],\&#xA;    [c10]aformat=channel_layouts=mono[c10],\&#xA;    [0:a:3]channelsplit=channel_layout=stereo[c11][c12],\&#xA;    [c11]aformat=channel_layouts=mono[c11],\&#xA;    [c12]aformat=channel_layouts=mono[c12]"\&#xA;     -map  "[c1]" -t 10 1.wav\&#xA;     -map  "[c2]" -t 10 2.wav\&#xA;     -map  "[c3]" -t 10 3.wav\&#xA;     -map  "[c4]" -t 10 4.wav\&#xA;     -map  "[c5]" -t 10 5.wav\&#xA;     -map  "[c6]" -t 10 6.wav\&#xA;     -map  "[c7]" -t 10 7.wav\&#xA;     -map  "[c8]" -t 10 8.wav\&#xA;     -map  "[c9]" -t 10 9.wav\&#xA;     -map  "[c10]" -t 10 10.wav\&#xA;     -map  "[c11]" -t 10 11.wav\&#xA;     -map  "[c12]" -t 10 12.wav&#xA;

      &#xA;

      TL/DR ;

      &#xA;

      In short, how does one export every channel of every track as a discrete mono audio track (regardless of the channel layouts ?)

      &#xA;

      Thank you !

      &#xA;

    • Screeching white sound coming while playing audio as a raw stream

      27 avril 2020, par Sri Nithya Sharabheshwarananda

      I. Background

      &#xA;&#xA;

        &#xA;
      1. I am trying to make an application which helps to match subtitles to the audio waveform very accurately at the waveform level, at the word level or even at the character level.
      2. &#xA;

      3. The audio is expected to be Sanskrit chants (Yoga, rituals etc.) which are extremely long compound words [ example - aṅganyā-sokta-mātaro-bījam is traditionally one word broken only to assist reading ]
      4. &#xA;

      5. The input transcripts / subtitles might be roughly in sync at the sentence/verse level but surely would not be in sync at the word level.
      6. &#xA;

      7. The application should be able to figure out points of silence in the audio waveform, so that it can guess the start and end points of each word (or even letter/consonant/vowel in a word), such that the audio-chanting and visual-subtitle at the word level (or even at letter/consonant/vowel level) perfectly match, and the corresponding UI just highlights or animates the exact word (or even letter) in the subtitle line which is being chanted at that moment, and also show that word (or even the letter/consonant/vowel) in bigger font. This app's purpose is to assist learning Sanskrit chanting.
      8. &#xA;

      9. It is not expected to be a 100% automated process, nor 100% manual but a mix where the application should assist the human as much as possible.
      10. &#xA;

      &#xA;&#xA;

      II. Following is the first code I wrote for this purpose, wherein

      &#xA;&#xA;

        &#xA;
      1. First I open a mp3 (or any audio format) file,
      2. &#xA;

      3. Seek to some arbitrary point in the timeline of the audio file // as of now playing from zero offset
      4. &#xA;

      5. Get the audio data in raw format for 2 purposes - (1) playing it and (2) drawing the waveform.
      6. &#xA;

      7. Playing the raw audio data using standard java audio libraries
      8. &#xA;

      &#xA;&#xA;

      III. The problem I am facing is, between every cycle there is screeching sound.

      &#xA;&#xA;

        &#xA;
      • Probably I need to close the line between cycles ? Sounds simple, I can try.
      • &#xA;

      • But I am also wondering if this overall approach itself is correct ? Any tip, guide, suggestion, link would be really helpful.
      • &#xA;

      • Also I just hard coded the sample-rate etc ( 44100Hz etc. ), are these good to set as default presets or it should depend on the input format ?
      • &#xA;

      &#xA;&#xA;

      IV. Here is the code

      &#xA;&#xA;

      import com.github.kokorin.jaffree.StreamType;&#xA;import com.github.kokorin.jaffree.ffmpeg.FFmpeg;&#xA;import com.github.kokorin.jaffree.ffmpeg.FFmpegProgress;&#xA;import com.github.kokorin.jaffree.ffmpeg.FFmpegResult;&#xA;import com.github.kokorin.jaffree.ffmpeg.NullOutput;&#xA;import com.github.kokorin.jaffree.ffmpeg.PipeOutput;&#xA;import com.github.kokorin.jaffree.ffmpeg.ProgressListener;&#xA;import com.github.kokorin.jaffree.ffprobe.Stream;&#xA;import com.github.kokorin.jaffree.ffmpeg.UrlInput;&#xA;import com.github.kokorin.jaffree.ffprobe.FFprobe;&#xA;import com.github.kokorin.jaffree.ffprobe.FFprobeResult;&#xA;import java.io.IOException;&#xA;import java.io.OutputStream;&#xA;import java.nio.file.Path;&#xA;import java.nio.file.Paths;&#xA;import java.util.concurrent.TimeUnit;&#xA;import java.util.concurrent.atomic.AtomicLong;&#xA;import javax.sound.sampled.AudioFormat;&#xA;import javax.sound.sampled.AudioSystem;&#xA;import javax.sound.sampled.DataLine;&#xA;import javax.sound.sampled.SourceDataLine;&#xA;&#xA;&#xA;public class FFMpegToRaw {&#xA;    Path BIN = Paths.get("f:\\utilities\\ffmpeg-20190413-0ad0533-win64-static\\bin");&#xA;    String VIDEO_MP4 = "f:\\org\\TEMPLE\\DeviMahatmyamRecitationAudio\\03_01_Devi Kavacham.mp3";&#xA;    FFprobe ffprobe;&#xA;    FFmpeg ffmpeg;&#xA;&#xA;    public void basicCheck() throws Exception {&#xA;        if (BIN != null) {&#xA;            ffprobe = FFprobe.atPath(BIN);&#xA;        } else {&#xA;            ffprobe = FFprobe.atPath();&#xA;        }&#xA;        FFprobeResult result = ffprobe&#xA;                .setShowStreams(true)&#xA;                .setInput(VIDEO_MP4)&#xA;                .execute();&#xA;&#xA;        for (Stream stream : result.getStreams()) {&#xA;            System.out.println("Stream " &#x2B; stream.getIndex()&#xA;                    &#x2B; " type " &#x2B; stream.getCodecType()&#xA;                    &#x2B; " duration " &#x2B; stream.getDuration(TimeUnit.SECONDS));&#xA;        }    &#xA;        if (BIN != null) {&#xA;            ffmpeg = FFmpeg.atPath(BIN);&#xA;        } else {&#xA;            ffmpeg = FFmpeg.atPath();&#xA;        }&#xA;&#xA;        //Sometimes ffprobe can&#x27;t show exact duration, use ffmpeg trancoding to NULL output to get it&#xA;        final AtomicLong durationMillis = new AtomicLong();&#xA;        FFmpegResult fFmpegResult = ffmpeg&#xA;                .addInput(&#xA;                        UrlInput.fromUrl(VIDEO_MP4)&#xA;                )&#xA;                .addOutput(new NullOutput())&#xA;                .setProgressListener(new ProgressListener() {&#xA;                    @Override&#xA;                    public void onProgress(FFmpegProgress progress) {&#xA;                        durationMillis.set(progress.getTimeMillis());&#xA;                    }&#xA;                })&#xA;                .execute();&#xA;        System.out.println("audio size - "&#x2B;fFmpegResult.getAudioSize());&#xA;        System.out.println("Exact duration: " &#x2B; durationMillis.get() &#x2B; " milliseconds");&#xA;    }&#xA;&#xA;    public void toRawAndPlay() throws Exception {&#xA;        ProgressListener listener = new ProgressListener() {&#xA;            @Override&#xA;            public void onProgress(FFmpegProgress progress) {&#xA;                System.out.println(progress.getFrame());&#xA;            }&#xA;        };&#xA;&#xA;        // code derived from : https://stackoverflow.com/questions/32873596/play-raw-pcm-audio-received-in-udp-packets&#xA;&#xA;        int sampleRate = 44100;//24000;//Hz&#xA;        int sampleSize = 16;//Bits&#xA;        int channels   = 1;&#xA;        boolean signed = true;&#xA;        boolean bigEnd = false;&#xA;        String format  = "s16be"; //"f32le"&#xA;&#xA;        //https://trac.ffmpeg.org/wiki/audio types&#xA;        final AudioFormat af = new AudioFormat(sampleRate, sampleSize, channels, signed, bigEnd);&#xA;        final DataLine.Info info = new DataLine.Info(SourceDataLine.class, af);&#xA;        final SourceDataLine line = (SourceDataLine) AudioSystem.getLine(info);&#xA;&#xA;        line.open(af, 4096); // format , buffer size&#xA;        line.start();&#xA;&#xA;        OutputStream destination = new OutputStream() {&#xA;            @Override public void write(int b) throws IOException {&#xA;                throw new UnsupportedOperationException("Nobody uses thi.");&#xA;            }&#xA;            @Override public void write(byte[] b, int off, int len) throws IOException {&#xA;                String o = new String(b);&#xA;                boolean showString = false;&#xA;                System.out.println("New output ("&#x2B; len&#xA;                        &#x2B; ", off="&#x2B;off &#x2B; ") -> "&#x2B;(showString?o:"")); &#xA;                // output wave form repeatedly&#xA;&#xA;                if(len%2!=0) {&#xA;                    len -= 1;&#xA;                    System.out.println("");&#xA;                }&#xA;                line.write(b, off, len);&#xA;                System.out.println("done round");&#xA;            }&#xA;        };&#xA;&#xA;        // src : http://blog.wudilabs.org/entry/c3d357ed/?lang=en-US&#xA;        FFmpegResult result = FFmpeg.atPath(BIN).&#xA;            addInput(UrlInput.fromPath(Paths.get(VIDEO_MP4))).&#xA;            addOutput(PipeOutput.pumpTo(destination).&#xA;                disableStream(StreamType.VIDEO). //.addArgument("-vn")&#xA;                setFrameRate(sampleRate).            //.addArguments("-ar", sampleRate)&#xA;                addArguments("-ac", "1").&#xA;                setFormat(format)              //.addArguments("-f", format)&#xA;            ).&#xA;            setProgressListener(listener).&#xA;            execute();&#xA;&#xA;        // shut down audio&#xA;        line.drain();&#xA;        line.stop();&#xA;        line.close();&#xA;&#xA;        System.out.println("result = "&#x2B;result.toString());&#xA;    }&#xA;&#xA;    public static void main(String[] args) throws Exception {&#xA;        FFMpegToRaw raw = new FFMpegToRaw();&#xA;        raw.basicCheck();&#xA;        raw.toRawAndPlay();&#xA;    }&#xA;}&#xA;&#xA;

      &#xA;&#xA;

      Thank You

      &#xA;