Recherche avancée

Médias (0)

Mot : - Tags -/xmlrpc

Aucun média correspondant à vos critères n’est disponible sur le site.

Autres articles (54)

  • Gestion des droits de création et d’édition des objets

    8 février 2011, par

    Par défaut, beaucoup de fonctionnalités sont limitées aux administrateurs mais restent configurables indépendamment pour modifier leur statut minimal d’utilisation notamment : la rédaction de contenus sur le site modifiables dans la gestion des templates de formulaires ; l’ajout de notes aux articles ; l’ajout de légendes et d’annotations sur les images ;

  • Les images

    15 mai 2013
  • Supporting all media types

    13 avril 2011, par

    Unlike most software and media-sharing platforms, MediaSPIP aims to manage as many different media types as possible. The following are just a few examples from an ever-expanding list of supported formats : images : png, gif, jpg, bmp and more audio : MP3, Ogg, Wav and more video : AVI, MP4, OGV, mpg, mov, wmv and more text, code and other data : OpenOffice, Microsoft Office (Word, PowerPoint, Excel), web (html, CSS), LaTeX, Google Earth and (...)

Sur d’autres sites (6105)

  • Batch merge videos two-by-two

    18 février 2021, par dellyice

    I have a directory with 1000+ video files. I'd like to concatenate them two-by-two.

    


    An alphabetical ordering of the files give the desired pairs, e.g., the input files

    


     filename_1.mp4
 filename_2.mp4
 filename_3.mp4
 filename_4.mp4
 ...


    


    should result in output files

    


     filename_1-2.mp4
 filename_3-4.mp4
 ...


    


    They input files all have the same dimensions and formats.

    


    How can I write a batch script invoking ffmpeg to achieve this ?

    


  • audio convert to mp3,pcm and vox using ffmpeg

    8 juillet 2014, par user3789242

    Please can someone help me with the code for ffmpeg.
    I have to use ffmpeg to convert a recorder voice (using HTML5) into mp3,pcm or vox depending on the user’s selection.
    I don’t know how to write the code for ffmpeg if some one can help me with the code or libraries.
    Thank you in advance.

    Here is my code for recording the voice with a visualizer :

    // variables
    var leftchannel = [];
    var rightchannel = [];
    var recorder = null;
    var recording = false;
    var recordingLength = 0;
    var volume = null;
    var audioInput = null;
    var sampleRate = 44100;
    var audioContext = null;
    var context = null;
    var outputString;



    if (!navigator.getUserMedia)
      navigator.getUserMedia = navigator.getUserMedia ||
                            navigator.webkitGetUserMedia ||
                            navigator.mozGetUserMedia ||
                            navigator.msGetUserMedia;

    if (navigator.getUserMedia){
    navigator.getUserMedia({audio:true}, success, function(e) {
    alert('Error capturing audio.');
    });
    } else alert('getUserMedia not supported in this browser.');



    function getVal(value)
     {

    // if R is pressed, we start recording
    if ( value == "record"){
       recording = true;
       // reset the buffers for the new recording
       leftchannel.length = rightchannel.length = 0;
       recordingLength = 0;
       document.getElementById('output').innerHTML="Recording now...";

    // if S is pressed, we stop the recording and package the WAV file
    } else if ( value == "stop" ){

       // we stop recording
       recording = false;
       document.getElementById('output').innerHTML="Building wav file...";

       // we flat the left and right channels down
       var leftBuffer = mergeBuffers ( leftchannel, recordingLength );
       var rightBuffer = mergeBuffers ( rightchannel, recordingLength );
       // we interleave both channels together
       var interleaved = interleave ( leftBuffer, rightBuffer );



       var buffer = new ArrayBuffer(44 + interleaved.length * 2);
       var view = new DataView(buffer);

       // RIFF chunk descriptor
       writeUTFBytes(view, 0, 'RIFF');
       view.setUint32(4, 44 + interleaved.length * 2, true);
       writeUTFBytes(view, 8, 'WAVE');
       // FMT sub-chunk
       writeUTFBytes(view, 12, 'fmt ');
       view.setUint32(16, 16, true);
       view.setUint16(20, 1, true);
       // stereo (2 channels)
       view.setUint16(22, 2, true);
       view.setUint32(24, sampleRate, true);
       view.setUint32(28, sampleRate * 4, true);
       view.setUint16(32, 4, true);
       view.setUint16(34, 16, true);
       // data sub-chunk
       writeUTFBytes(view, 36, 'data');
       view.setUint32(40, interleaved.length * 2, true);


       var lng = interleaved.length;
       var index = 44;
       var volume = 1;
       for (var i = 0; i < lng; i++){
           view.setInt16(index, interleaved[i] * (0x7FFF * volume), true);
           index += 2;
       }

       var blob = new Blob ( [ view ], { type : 'audio/wav' } );

       // let's save it locally

       document.getElementById('output').innerHTML='Handing off the file now...';
       var url = (window.URL || window.webkitURL).createObjectURL(blob);

       var li = document.createElement('li');
       var au = document.createElement('audio');
       var hf = document.createElement('a');

       au.controls = true;
       au.src = url;
       hf.href = url;
       hf.download = 'audio_recording_' + new Date().getTime() + '.wav';
       hf.innerHTML = hf.download;
       li.appendChild(au);
       li.appendChild(hf);
       recordingList.appendChild(li);

    }
    }


    function success(e){

    audioContext = window.AudioContext || window.webkitAudioContext;
    context = new audioContext();


    volume = context.createGain();

    // creates an audio node from the microphone incoming stream(source)
    source = context.createMediaStreamSource(e);

    // connect the stream(source) to the gain node
    source.connect(volume);

    var bufferSize = 2048;

    recorder = context.createScriptProcessor(bufferSize, 2, 2);

    //node for the visualizer
    analyser = context.createAnalyser();
    analyser.smoothingTimeConstant = 0.3;
    analyser.fftSize = 512;

    splitter = context.createChannelSplitter();
    //when recording happens
    recorder.onaudioprocess = function(e){

       if (!recording) return;
       var left = e.inputBuffer.getChannelData (0);
       var right = e.inputBuffer.getChannelData (1);

       leftchannel.push (new Float32Array (left));
       rightchannel.push (new Float32Array (right));
       recordingLength += bufferSize;

       // get the average for the first channel
       var array =  new Uint8Array(analyser.frequencyBinCount);
       analyser.getByteFrequencyData(array);

       var c=document.getElementById("myCanvas");
       var ctx = c.getContext("2d");
       // clear the current state
       ctx.clearRect(0, 0, 1000, 325);
       var gradient = ctx.createLinearGradient(0,0,0,300);
       gradient.addColorStop(1,'#000000');
       gradient.addColorStop(0.75,'#ff0000');
       gradient.addColorStop(0.25,'#ffff00');
       gradient.addColorStop(0,'#ffffff');
       // set the fill style
       ctx.fillStyle=gradient;
       drawSpectrum(array);
       function drawSpectrum(array) {
           for ( var i = 0; i < (array.length); i++ ){
                   var value = array[i];
                   ctx.fillRect(i*5,325-value,3,325);
               }

       }
    }

    function getAverageVolume(array) {
       var values = 0;
       var average;

       var length = array.length;

       // get all the frequency amplitudes
       for (var i = 0; i < length; i++) {
           values += array[i];
       }

       average = values / length;
       return average;
    }

       // we connect the recorder(node to destination(speakers))
       volume.connect(splitter);
       splitter.connect(analyser, 0, 0);

       analyser.connect(recorder);
       recorder.connect(context.destination);

    }




    function mergeBuffers(channelBuffer, recordingLength){
    var result = new Float32Array(recordingLength);
    var offset = 0;
    var lng = channelBuffer.length;
    for (var i = 0; i < lng; i++){
    var buffer = channelBuffer[i];
    result.set(buffer, offset);
    offset += buffer.length;
    }
       return result;
      }

    function interleave(leftChannel, rightChannel){  
    var length = leftChannel.length + rightChannel.length;
    var result = new Float32Array(length);

    var inputIndex = 0;

    for (var index = 0; index < length; ){
    result[index++] = leftChannel[inputIndex];
    result[index++] = rightChannel[inputIndex];
    inputIndex++;
    }
    return result;
    }


    function writeUTFBytes(view, offset, string){
    var lng = string.length;
    for (var i = 0; i < lng; i++){

    view.setUint8(offset + i, string.charCodeAt(i));
    }
    }
  • Recording voice using HTML5 and processing it with ffmpeg

    22 mars 2015, par user3789242

    I need to use ffmpeg in my javascript/HTML5 project which allows the user to select the format he wants the audio to open with.I don’t know anything about ffmpeg and I’ve been doing lots of research I don’t know how to use it in my project. I found an example https://github.com/sopel39/audioconverter.js but the problem how can I install the ffmpeg.js which is 8 mg to m project. please if someone can help me I’ll be very thankfull
    here is my full code :

    the javascript page :

    // variables
    var leftchannel = [];
    var rightchannel = [];
    var recorder = null;
    var recording = false;
    var recordingLength = 0;
    var volume = null;
    var audioInput = null;
    var sampleRate = 44100;
    var audioContext = null;
    var context = null;
    var outputString;



    if (!navigator.getUserMedia)
    navigator.getUserMedia = navigator.getUserMedia ||
                            navigator.webkitGetUserMedia ||
                            navigator.mozGetUserMedia ||
                            navigator.msGetUserMedia;

    if (navigator.getUserMedia){
    navigator.getUserMedia({audio:true}, success, function(e) {
    alert('Error capturing audio.');
    });
    } else alert('getUserMedia not supported in this browser.');



    function getVal(value)
     {

    // if R is pressed, we start recording
    if ( value == "record"){
       recording = true;
       // reset the buffers for the new recording
       leftchannel.length = rightchannel.length = 0;
       recordingLength = 0;
       document.getElementById('output').innerHTML="Recording now...";

    // if S is pressed, we stop the recording and package the WAV file
    } else if ( value == "stop" ){

       // we stop recording
       recording = false;
       document.getElementById('output').innerHTML="Building wav file...";

       // we flat the left and right channels down
       var leftBuffer = mergeBuffers ( leftchannel, recordingLength );
       var rightBuffer = mergeBuffers ( rightchannel, recordingLength );
       // we interleave both channels together
       var interleaved = interleave ( leftBuffer, rightBuffer );



       var buffer = new ArrayBuffer(44 + interleaved.length * 2);
       var view = new DataView(buffer);

       // RIFF chunk descriptor
       writeUTFBytes(view, 0, 'RIFF');
       view.setUint32(4, 44 + interleaved.length * 2, true);
       writeUTFBytes(view, 8, 'WAVE');
       // FMT sub-chunk
       writeUTFBytes(view, 12, 'fmt ');
       view.setUint32(16, 16, true);
       view.setUint16(20, 1, true);
       // stereo (2 channels)
       view.setUint16(22, 2, true);
       view.setUint32(24, sampleRate, true);
       view.setUint32(28, sampleRate * 4, true);
       view.setUint16(32, 4, true);
       view.setUint16(34, 16, true);
       // data sub-chunk
       writeUTFBytes(view, 36, 'data');
       view.setUint32(40, interleaved.length * 2, true);


       var lng = interleaved.length;
       var index = 44;
       var volume = 1;
       for (var i = 0; i < lng; i++){
           view.setInt16(index, interleaved[i] * (0x7FFF * volume), true);
           index += 2;
       }

       var blob = new Blob ( [ view ], { type : 'audio/wav' } );

       // let's save it locally

       document.getElementById('output').innerHTML='Handing off the file now...';
       var url = (window.URL || window.webkitURL).createObjectURL(blob);

       var li = document.createElement('li');
       var au = document.createElement('audio');
       var hf = document.createElement('a');

       au.controls = true;
       au.src = url;
       hf.href = url;
       hf.download = 'audio_recording_' + new Date().getTime() + '.wav';
       hf.innerHTML = hf.download;
       li.appendChild(au);
       li.appendChild(hf);
       recordingList.appendChild(li);

    }
    }


    function success(e){

    audioContext = window.AudioContext || window.webkitAudioContext;
    context = new audioContext();


    volume = context.createGain();

    // creates an audio node from the microphone incoming stream(source)
    source = context.createMediaStreamSource(e);

    // connect the stream(source) to the gain node
    source.connect(volume);

    var bufferSize = 2048;

    recorder = context.createScriptProcessor(bufferSize, 2, 2);

    //node for the visualizer
    analyser = context.createAnalyser();
    analyser.smoothingTimeConstant = 0.3;
    analyser.fftSize = 512;

    splitter = context.createChannelSplitter();
    //when recording happens
    recorder.onaudioprocess = function(e){

       if (!recording) return;
       var left = e.inputBuffer.getChannelData (0);
       var right = e.inputBuffer.getChannelData (1);

       leftchannel.push (new Float32Array (left));
       rightchannel.push (new Float32Array (right));
       recordingLength += bufferSize;

       // get the average for the first channel
       var array =  new Uint8Array(analyser.frequencyBinCount);
       analyser.getByteFrequencyData(array);

       var c=document.getElementById("myCanvas");
       var ctx = c.getContext("2d");
       // clear the current state
       ctx.clearRect(0, 0, 1000, 325);
       var gradient = ctx.createLinearGradient(0,0,0,300);
       gradient.addColorStop(1,'#000000');
       gradient.addColorStop(0.75,'#ff0000');
       gradient.addColorStop(0.25,'#ffff00');
       gradient.addColorStop(0,'#ffffff');
       // set the fill style
       ctx.fillStyle=gradient;
       drawSpectrum(array);
       function drawSpectrum(array) {
           for ( var i = 0; i < (array.length); i++ ){
                   var value = array[i];
                   ctx.fillRect(i*5,325-value,3,325);
               }

       }
    }

    function getAverageVolume(array) {
       var values = 0;
       var average;

       var length = array.length;

       // get all the frequency amplitudes
       for (var i = 0; i < length; i++) {
           values += array[i];
       }

       average = values / length;
       return average;
    }

       // we connect the recorder(node to destination(speakers))
       volume.connect(splitter);
       splitter.connect(analyser, 0, 0);

       analyser.connect(recorder);
       recorder.connect(context.destination);

    }




    function mergeBuffers(channelBuffer, recordingLength){
    var result = new Float32Array(recordingLength);
    var offset = 0;
    var lng = channelBuffer.length;
    for (var i = 0; i < lng; i++){
    var buffer = channelBuffer[i];
    result.set(buffer, offset);
    offset += buffer.length;
    }
       return result;
      }

    function interleave(leftChannel, rightChannel){
    var length = leftChannel.length + rightChannel.length;
    var result = new Float32Array(length);

    var inputIndex = 0;

    for (var index = 0; index < length; ){
    result[index++] = leftChannel[inputIndex];
    result[index++] = rightChannel[inputIndex];
    inputIndex++;
    }
    return result;
    }


    function writeUTFBytes(view, offset, string){
    var lng = string.length;
    for (var i = 0; i < lng; i++){

    view.setUint8(offset + i, string.charCodeAt(i));
    }
    }

    and here is the html code :

       
       
       
       <code class="echappe-js">&lt;script src=&quot;http://stackoverflow.com/feeds/tag/js/functions.js&quot;&gt;&lt;/script&gt;