Recherche avancée

Médias (91)

Autres articles (46)

  • Des sites réalisés avec MediaSPIP

    2 mai 2011, par

    Cette page présente quelques-uns des sites fonctionnant sous MediaSPIP.
    Vous pouvez bien entendu ajouter le votre grâce au formulaire en bas de page.

  • Support audio et vidéo HTML5

    10 avril 2011

    MediaSPIP utilise les balises HTML5 video et audio pour la lecture de documents multimedia en profitant des dernières innovations du W3C supportées par les navigateurs modernes.
    Pour les navigateurs plus anciens, le lecteur flash Flowplayer est utilisé.
    Le lecteur HTML5 utilisé a été spécifiquement créé pour MediaSPIP : il est complètement modifiable graphiquement pour correspondre à un thème choisi.
    Ces technologies permettent de distribuer vidéo et son à la fois sur des ordinateurs conventionnels (...)

  • HTML5 audio and video support

    13 avril 2011, par

    MediaSPIP uses HTML5 video and audio tags to play multimedia files, taking advantage of the latest W3C innovations supported by modern browsers.
    The MediaSPIP player used has been created specifically for MediaSPIP and can be easily adapted to fit in with a specific theme.
    For older browsers the Flowplayer flash fallback is used.
    MediaSPIP allows for media playback on major mobile platforms with the above (...)

Sur d’autres sites (7158)

  • ffmpeg on windows 'Could find no file' error

    14 octobre 2015, par Brooks

    This is my command :

    "C:\...\ffmpeg.exe" -i "C:\...\audio.mp3" -r 1  -i "C:\...\%%d5.jpg" -qscale:v 1 -shortest -s:v 1280x720  -y -r 30 -pix_fmt yuv420p -filter_complex "[0:a]volume=1[aout]" -map 1:v -map "[aout]" "C:\...\video1.mp4"

    this is the error I get everytime :

    CMD

    I’ve searched on google and found various answers so I tried with %%d5 %%5d %d5 %5d with no luck. (my files are named 00000.jpg, 00001.jpg ..., 00005.jpg)

    What am I doing wrong ?

  • FFMPEG multiple .jpg to .mp4 - different slide lengths in output

    25 octobre 2015, par Brooks

    I have 6 .jpg files in a folder and using the code below I get a video in which each .jpg plays for 5 seconds so the video has 30 seconds in total.

    How can the code be altered so the first slide to have a different playtime (8 seconds) than the rest so the total output video length to be 33 seconds ?

    ffmpeg -i audio.mp3 -framerate 1/5 -i image%d.jpg -c:v libx264 -qscale:v 1 -shortest -s:v 640x480 -tune stillimage -c:a aac -strict experimental -b:a 192k -pix_fmt yuv420p -filter_complex "[0:a]volume=1[aout]" -r 25 -map 1:v -map "[aout]" out6.mp4
  • audio convert to mp3,pcm and vox using ffmpeg

    8 juillet 2014, par user3789242

    Please can someone help me with the code for ffmpeg.
    I have to use ffmpeg to convert a recorder voice (using HTML5) into mp3,pcm or vox depending on the user’s selection.
    I don’t know how to write the code for ffmpeg if some one can help me with the code or libraries.
    Thank you in advance.

    Here is my code for recording the voice with a visualizer :

    // variables
    var leftchannel = [];
    var rightchannel = [];
    var recorder = null;
    var recording = false;
    var recordingLength = 0;
    var volume = null;
    var audioInput = null;
    var sampleRate = 44100;
    var audioContext = null;
    var context = null;
    var outputString;



    if (!navigator.getUserMedia)
      navigator.getUserMedia = navigator.getUserMedia ||
                            navigator.webkitGetUserMedia ||
                            navigator.mozGetUserMedia ||
                            navigator.msGetUserMedia;

    if (navigator.getUserMedia){
    navigator.getUserMedia({audio:true}, success, function(e) {
    alert('Error capturing audio.');
    });
    } else alert('getUserMedia not supported in this browser.');



    function getVal(value)
     {

    // if R is pressed, we start recording
    if ( value == "record"){
       recording = true;
       // reset the buffers for the new recording
       leftchannel.length = rightchannel.length = 0;
       recordingLength = 0;
       document.getElementById('output').innerHTML="Recording now...";

    // if S is pressed, we stop the recording and package the WAV file
    } else if ( value == "stop" ){

       // we stop recording
       recording = false;
       document.getElementById('output').innerHTML="Building wav file...";

       // we flat the left and right channels down
       var leftBuffer = mergeBuffers ( leftchannel, recordingLength );
       var rightBuffer = mergeBuffers ( rightchannel, recordingLength );
       // we interleave both channels together
       var interleaved = interleave ( leftBuffer, rightBuffer );



       var buffer = new ArrayBuffer(44 + interleaved.length * 2);
       var view = new DataView(buffer);

       // RIFF chunk descriptor
       writeUTFBytes(view, 0, 'RIFF');
       view.setUint32(4, 44 + interleaved.length * 2, true);
       writeUTFBytes(view, 8, 'WAVE');
       // FMT sub-chunk
       writeUTFBytes(view, 12, 'fmt ');
       view.setUint32(16, 16, true);
       view.setUint16(20, 1, true);
       // stereo (2 channels)
       view.setUint16(22, 2, true);
       view.setUint32(24, sampleRate, true);
       view.setUint32(28, sampleRate * 4, true);
       view.setUint16(32, 4, true);
       view.setUint16(34, 16, true);
       // data sub-chunk
       writeUTFBytes(view, 36, 'data');
       view.setUint32(40, interleaved.length * 2, true);


       var lng = interleaved.length;
       var index = 44;
       var volume = 1;
       for (var i = 0; i < lng; i++){
           view.setInt16(index, interleaved[i] * (0x7FFF * volume), true);
           index += 2;
       }

       var blob = new Blob ( [ view ], { type : 'audio/wav' } );

       // let's save it locally

       document.getElementById('output').innerHTML='Handing off the file now...';
       var url = (window.URL || window.webkitURL).createObjectURL(blob);

       var li = document.createElement('li');
       var au = document.createElement('audio');
       var hf = document.createElement('a');

       au.controls = true;
       au.src = url;
       hf.href = url;
       hf.download = 'audio_recording_' + new Date().getTime() + '.wav';
       hf.innerHTML = hf.download;
       li.appendChild(au);
       li.appendChild(hf);
       recordingList.appendChild(li);

    }
    }


    function success(e){

    audioContext = window.AudioContext || window.webkitAudioContext;
    context = new audioContext();


    volume = context.createGain();

    // creates an audio node from the microphone incoming stream(source)
    source = context.createMediaStreamSource(e);

    // connect the stream(source) to the gain node
    source.connect(volume);

    var bufferSize = 2048;

    recorder = context.createScriptProcessor(bufferSize, 2, 2);

    //node for the visualizer
    analyser = context.createAnalyser();
    analyser.smoothingTimeConstant = 0.3;
    analyser.fftSize = 512;

    splitter = context.createChannelSplitter();
    //when recording happens
    recorder.onaudioprocess = function(e){

       if (!recording) return;
       var left = e.inputBuffer.getChannelData (0);
       var right = e.inputBuffer.getChannelData (1);

       leftchannel.push (new Float32Array (left));
       rightchannel.push (new Float32Array (right));
       recordingLength += bufferSize;

       // get the average for the first channel
       var array =  new Uint8Array(analyser.frequencyBinCount);
       analyser.getByteFrequencyData(array);

       var c=document.getElementById("myCanvas");
       var ctx = c.getContext("2d");
       // clear the current state
       ctx.clearRect(0, 0, 1000, 325);
       var gradient = ctx.createLinearGradient(0,0,0,300);
       gradient.addColorStop(1,'#000000');
       gradient.addColorStop(0.75,'#ff0000');
       gradient.addColorStop(0.25,'#ffff00');
       gradient.addColorStop(0,'#ffffff');
       // set the fill style
       ctx.fillStyle=gradient;
       drawSpectrum(array);
       function drawSpectrum(array) {
           for ( var i = 0; i < (array.length); i++ ){
                   var value = array[i];
                   ctx.fillRect(i*5,325-value,3,325);
               }

       }
    }

    function getAverageVolume(array) {
       var values = 0;
       var average;

       var length = array.length;

       // get all the frequency amplitudes
       for (var i = 0; i < length; i++) {
           values += array[i];
       }

       average = values / length;
       return average;
    }

       // we connect the recorder(node to destination(speakers))
       volume.connect(splitter);
       splitter.connect(analyser, 0, 0);

       analyser.connect(recorder);
       recorder.connect(context.destination);

    }




    function mergeBuffers(channelBuffer, recordingLength){
    var result = new Float32Array(recordingLength);
    var offset = 0;
    var lng = channelBuffer.length;
    for (var i = 0; i < lng; i++){
    var buffer = channelBuffer[i];
    result.set(buffer, offset);
    offset += buffer.length;
    }
       return result;
      }

    function interleave(leftChannel, rightChannel){  
    var length = leftChannel.length + rightChannel.length;
    var result = new Float32Array(length);

    var inputIndex = 0;

    for (var index = 0; index < length; ){
    result[index++] = leftChannel[inputIndex];
    result[index++] = rightChannel[inputIndex];
    inputIndex++;
    }
    return result;
    }


    function writeUTFBytes(view, offset, string){
    var lng = string.length;
    for (var i = 0; i < lng; i++){

    view.setUint8(offset + i, string.charCodeAt(i));
    }
    }