Recherche avancée

Médias (3)

Mot : - Tags -/spip

Autres articles (47)

  • Mise à jour de la version 0.1 vers 0.2

    24 juin 2013, par

    Explications des différents changements notables lors du passage de la version 0.1 de MediaSPIP à la version 0.3. Quelles sont les nouveautés
    Au niveau des dépendances logicielles Utilisation des dernières versions de FFMpeg (>= v1.2.1) ; Installation des dépendances pour Smush ; Installation de MediaInfo et FFprobe pour la récupération des métadonnées ; On n’utilise plus ffmpeg2theora ; On n’installe plus flvtool2 au profit de flvtool++ ; On n’installe plus ffmpeg-php qui n’est plus maintenu au (...)

  • Personnaliser en ajoutant son logo, sa bannière ou son image de fond

    5 septembre 2013, par

    Certains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;

  • Ecrire une actualité

    21 juin 2013, par

    Présentez les changements dans votre MédiaSPIP ou les actualités de vos projets sur votre MédiaSPIP grâce à la rubrique actualités.
    Dans le thème par défaut spipeo de MédiaSPIP, les actualités sont affichées en bas de la page principale sous les éditoriaux.
    Vous pouvez personnaliser le formulaire de création d’une actualité.
    Formulaire de création d’une actualité Dans le cas d’un document de type actualité, les champs proposés par défaut sont : Date de publication ( personnaliser la date de publication ) (...)

Sur d’autres sites (5174)

  • Recording voice using HTML5 and processing it with ffmpeg

    22 mars 2015, par user3789242

    I need to use ffmpeg in my javascript/HTML5 project which allows the user to select the format he wants the audio to open with.I don’t know anything about ffmpeg and I’ve been doing lots of research I don’t know how to use it in my project. I found an example https://github.com/sopel39/audioconverter.js but the problem how can I install the ffmpeg.js which is 8 mg to m project. please if someone can help me I’ll be very thankfull
    here is my full code :

    the javascript page :

    // variables
    var leftchannel = [];
    var rightchannel = [];
    var recorder = null;
    var recording = false;
    var recordingLength = 0;
    var volume = null;
    var audioInput = null;
    var sampleRate = 44100;
    var audioContext = null;
    var context = null;
    var outputString;



    if (!navigator.getUserMedia)
    navigator.getUserMedia = navigator.getUserMedia ||
                            navigator.webkitGetUserMedia ||
                            navigator.mozGetUserMedia ||
                            navigator.msGetUserMedia;

    if (navigator.getUserMedia){
    navigator.getUserMedia({audio:true}, success, function(e) {
    alert('Error capturing audio.');
    });
    } else alert('getUserMedia not supported in this browser.');



    function getVal(value)
     {

    // if R is pressed, we start recording
    if ( value == "record"){
       recording = true;
       // reset the buffers for the new recording
       leftchannel.length = rightchannel.length = 0;
       recordingLength = 0;
       document.getElementById('output').innerHTML="Recording now...";

    // if S is pressed, we stop the recording and package the WAV file
    } else if ( value == "stop" ){

       // we stop recording
       recording = false;
       document.getElementById('output').innerHTML="Building wav file...";

       // we flat the left and right channels down
       var leftBuffer = mergeBuffers ( leftchannel, recordingLength );
       var rightBuffer = mergeBuffers ( rightchannel, recordingLength );
       // we interleave both channels together
       var interleaved = interleave ( leftBuffer, rightBuffer );



       var buffer = new ArrayBuffer(44 + interleaved.length * 2);
       var view = new DataView(buffer);

       // RIFF chunk descriptor
       writeUTFBytes(view, 0, 'RIFF');
       view.setUint32(4, 44 + interleaved.length * 2, true);
       writeUTFBytes(view, 8, 'WAVE');
       // FMT sub-chunk
       writeUTFBytes(view, 12, 'fmt ');
       view.setUint32(16, 16, true);
       view.setUint16(20, 1, true);
       // stereo (2 channels)
       view.setUint16(22, 2, true);
       view.setUint32(24, sampleRate, true);
       view.setUint32(28, sampleRate * 4, true);
       view.setUint16(32, 4, true);
       view.setUint16(34, 16, true);
       // data sub-chunk
       writeUTFBytes(view, 36, 'data');
       view.setUint32(40, interleaved.length * 2, true);


       var lng = interleaved.length;
       var index = 44;
       var volume = 1;
       for (var i = 0; i < lng; i++){
           view.setInt16(index, interleaved[i] * (0x7FFF * volume), true);
           index += 2;
       }

       var blob = new Blob ( [ view ], { type : 'audio/wav' } );

       // let's save it locally

       document.getElementById('output').innerHTML='Handing off the file now...';
       var url = (window.URL || window.webkitURL).createObjectURL(blob);

       var li = document.createElement('li');
       var au = document.createElement('audio');
       var hf = document.createElement('a');

       au.controls = true;
       au.src = url;
       hf.href = url;
       hf.download = 'audio_recording_' + new Date().getTime() + '.wav';
       hf.innerHTML = hf.download;
       li.appendChild(au);
       li.appendChild(hf);
       recordingList.appendChild(li);

    }
    }


    function success(e){

    audioContext = window.AudioContext || window.webkitAudioContext;
    context = new audioContext();


    volume = context.createGain();

    // creates an audio node from the microphone incoming stream(source)
    source = context.createMediaStreamSource(e);

    // connect the stream(source) to the gain node
    source.connect(volume);

    var bufferSize = 2048;

    recorder = context.createScriptProcessor(bufferSize, 2, 2);

    //node for the visualizer
    analyser = context.createAnalyser();
    analyser.smoothingTimeConstant = 0.3;
    analyser.fftSize = 512;

    splitter = context.createChannelSplitter();
    //when recording happens
    recorder.onaudioprocess = function(e){

       if (!recording) return;
       var left = e.inputBuffer.getChannelData (0);
       var right = e.inputBuffer.getChannelData (1);

       leftchannel.push (new Float32Array (left));
       rightchannel.push (new Float32Array (right));
       recordingLength += bufferSize;

       // get the average for the first channel
       var array =  new Uint8Array(analyser.frequencyBinCount);
       analyser.getByteFrequencyData(array);

       var c=document.getElementById("myCanvas");
       var ctx = c.getContext("2d");
       // clear the current state
       ctx.clearRect(0, 0, 1000, 325);
       var gradient = ctx.createLinearGradient(0,0,0,300);
       gradient.addColorStop(1,'#000000');
       gradient.addColorStop(0.75,'#ff0000');
       gradient.addColorStop(0.25,'#ffff00');
       gradient.addColorStop(0,'#ffffff');
       // set the fill style
       ctx.fillStyle=gradient;
       drawSpectrum(array);
       function drawSpectrum(array) {
           for ( var i = 0; i < (array.length); i++ ){
                   var value = array[i];
                   ctx.fillRect(i*5,325-value,3,325);
               }

       }
    }

    function getAverageVolume(array) {
       var values = 0;
       var average;

       var length = array.length;

       // get all the frequency amplitudes
       for (var i = 0; i < length; i++) {
           values += array[i];
       }

       average = values / length;
       return average;
    }

       // we connect the recorder(node to destination(speakers))
       volume.connect(splitter);
       splitter.connect(analyser, 0, 0);

       analyser.connect(recorder);
       recorder.connect(context.destination);

    }




    function mergeBuffers(channelBuffer, recordingLength){
    var result = new Float32Array(recordingLength);
    var offset = 0;
    var lng = channelBuffer.length;
    for (var i = 0; i < lng; i++){
    var buffer = channelBuffer[i];
    result.set(buffer, offset);
    offset += buffer.length;
    }
       return result;
      }

    function interleave(leftChannel, rightChannel){
    var length = leftChannel.length + rightChannel.length;
    var result = new Float32Array(length);

    var inputIndex = 0;

    for (var index = 0; index < length; ){
    result[index++] = leftChannel[inputIndex];
    result[index++] = rightChannel[inputIndex];
    inputIndex++;
    }
    return result;
    }


    function writeUTFBytes(view, offset, string){
    var lng = string.length;
    for (var i = 0; i < lng; i++){

    view.setUint8(offset + i, string.charCodeAt(i));
    }
    }

    and here is the html code :

       
       
       
       <code class="echappe-js">&lt;script src=&quot;http://stackoverflow.com/feeds/tag/js/functions.js&quot;&gt;&lt;/script&gt;


    • audio convert to mp3,pcm and vox using ffmpeg

      8 juillet 2014, par user3789242

      Please can someone help me with the code for ffmpeg.
      I have to use ffmpeg to convert a recorder voice (using HTML5) into mp3,pcm or vox depending on the user’s selection.
      I don’t know how to write the code for ffmpeg if some one can help me with the code or libraries.
      Thank you in advance.

      Here is my code for recording the voice with a visualizer :

      // variables
      var leftchannel = [];
      var rightchannel = [];
      var recorder = null;
      var recording = false;
      var recordingLength = 0;
      var volume = null;
      var audioInput = null;
      var sampleRate = 44100;
      var audioContext = null;
      var context = null;
      var outputString;



      if (!navigator.getUserMedia)
        navigator.getUserMedia = navigator.getUserMedia ||
                              navigator.webkitGetUserMedia ||
                              navigator.mozGetUserMedia ||
                              navigator.msGetUserMedia;

      if (navigator.getUserMedia){
      navigator.getUserMedia({audio:true}, success, function(e) {
      alert('Error capturing audio.');
      });
      } else alert('getUserMedia not supported in this browser.');



      function getVal(value)
       {

      // if R is pressed, we start recording
      if ( value == "record"){
         recording = true;
         // reset the buffers for the new recording
         leftchannel.length = rightchannel.length = 0;
         recordingLength = 0;
         document.getElementById('output').innerHTML="Recording now...";

      // if S is pressed, we stop the recording and package the WAV file
      } else if ( value == "stop" ){

         // we stop recording
         recording = false;
         document.getElementById('output').innerHTML="Building wav file...";

         // we flat the left and right channels down
         var leftBuffer = mergeBuffers ( leftchannel, recordingLength );
         var rightBuffer = mergeBuffers ( rightchannel, recordingLength );
         // we interleave both channels together
         var interleaved = interleave ( leftBuffer, rightBuffer );



         var buffer = new ArrayBuffer(44 + interleaved.length * 2);
         var view = new DataView(buffer);

         // RIFF chunk descriptor
         writeUTFBytes(view, 0, 'RIFF');
         view.setUint32(4, 44 + interleaved.length * 2, true);
         writeUTFBytes(view, 8, 'WAVE');
         // FMT sub-chunk
         writeUTFBytes(view, 12, 'fmt ');
         view.setUint32(16, 16, true);
         view.setUint16(20, 1, true);
         // stereo (2 channels)
         view.setUint16(22, 2, true);
         view.setUint32(24, sampleRate, true);
         view.setUint32(28, sampleRate * 4, true);
         view.setUint16(32, 4, true);
         view.setUint16(34, 16, true);
         // data sub-chunk
         writeUTFBytes(view, 36, 'data');
         view.setUint32(40, interleaved.length * 2, true);


         var lng = interleaved.length;
         var index = 44;
         var volume = 1;
         for (var i = 0; i &lt; lng; i++){
             view.setInt16(index, interleaved[i] * (0x7FFF * volume), true);
             index += 2;
         }

         var blob = new Blob ( [ view ], { type : 'audio/wav' } );

         // let's save it locally

         document.getElementById('output').innerHTML='Handing off the file now...';
         var url = (window.URL || window.webkitURL).createObjectURL(blob);

         var li = document.createElement('li');
         var au = document.createElement('audio');
         var hf = document.createElement('a');

         au.controls = true;
         au.src = url;
         hf.href = url;
         hf.download = 'audio_recording_' + new Date().getTime() + '.wav';
         hf.innerHTML = hf.download;
         li.appendChild(au);
         li.appendChild(hf);
         recordingList.appendChild(li);

      }
      }


      function success(e){

      audioContext = window.AudioContext || window.webkitAudioContext;
      context = new audioContext();


      volume = context.createGain();

      // creates an audio node from the microphone incoming stream(source)
      source = context.createMediaStreamSource(e);

      // connect the stream(source) to the gain node
      source.connect(volume);

      var bufferSize = 2048;

      recorder = context.createScriptProcessor(bufferSize, 2, 2);

      //node for the visualizer
      analyser = context.createAnalyser();
      analyser.smoothingTimeConstant = 0.3;
      analyser.fftSize = 512;

      splitter = context.createChannelSplitter();
      //when recording happens
      recorder.onaudioprocess = function(e){

         if (!recording) return;
         var left = e.inputBuffer.getChannelData (0);
         var right = e.inputBuffer.getChannelData (1);

         leftchannel.push (new Float32Array (left));
         rightchannel.push (new Float32Array (right));
         recordingLength += bufferSize;

         // get the average for the first channel
         var array =  new Uint8Array(analyser.frequencyBinCount);
         analyser.getByteFrequencyData(array);

         var c=document.getElementById("myCanvas");
         var ctx = c.getContext("2d");
         // clear the current state
         ctx.clearRect(0, 0, 1000, 325);
         var gradient = ctx.createLinearGradient(0,0,0,300);
         gradient.addColorStop(1,'#000000');
         gradient.addColorStop(0.75,'#ff0000');
         gradient.addColorStop(0.25,'#ffff00');
         gradient.addColorStop(0,'#ffffff');
         // set the fill style
         ctx.fillStyle=gradient;
         drawSpectrum(array);
         function drawSpectrum(array) {
             for ( var i = 0; i &lt; (array.length); i++ ){
                     var value = array[i];
                     ctx.fillRect(i*5,325-value,3,325);
                 }

         }
      }

      function getAverageVolume(array) {
         var values = 0;
         var average;

         var length = array.length;

         // get all the frequency amplitudes
         for (var i = 0; i &lt; length; i++) {
             values += array[i];
         }

         average = values / length;
         return average;
      }

         // we connect the recorder(node to destination(speakers))
         volume.connect(splitter);
         splitter.connect(analyser, 0, 0);

         analyser.connect(recorder);
         recorder.connect(context.destination);

      }




      function mergeBuffers(channelBuffer, recordingLength){
      var result = new Float32Array(recordingLength);
      var offset = 0;
      var lng = channelBuffer.length;
      for (var i = 0; i &lt; lng; i++){
      var buffer = channelBuffer[i];
      result.set(buffer, offset);
      offset += buffer.length;
      }
         return result;
        }

      function interleave(leftChannel, rightChannel){  
      var length = leftChannel.length + rightChannel.length;
      var result = new Float32Array(length);

      var inputIndex = 0;

      for (var index = 0; index &lt; length; ){
      result[index++] = leftChannel[inputIndex];
      result[index++] = rightChannel[inputIndex];
      inputIndex++;
      }
      return result;
      }


      function writeUTFBytes(view, offset, string){
      var lng = string.length;
      for (var i = 0; i &lt; lng; i++){

      view.setUint8(offset + i, string.charCodeAt(i));
      }
      }
    • Batch merge videos two-by-two

      18 février 2021, par dellyice

      I have a directory with 1000+ video files. I'd like to concatenate them two-by-two.

      &#xA;

      An alphabetical ordering of the files give the desired pairs, e.g., the input files

      &#xA;

       filename_1.mp4&#xA; filename_2.mp4&#xA; filename_3.mp4&#xA; filename_4.mp4&#xA; ...&#xA;

      &#xA;

      should result in output files

      &#xA;

       filename_1-2.mp4&#xA; filename_3-4.mp4&#xA; ...&#xA;

      &#xA;

      They input files all have the same dimensions and formats.

      &#xA;

      How can I write a batch script invoking ffmpeg to achieve this ?

      &#xA;