Recherche avancée

Médias (0)

Mot : - Tags -/serveur

Aucun média correspondant à vos critères n’est disponible sur le site.

Autres articles (72)

  • Les autorisations surchargées par les plugins

    27 avril 2010, par

    Mediaspip core
    autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs

  • Encoding and processing into web-friendly formats

    13 avril 2011, par

    MediaSPIP automatically converts uploaded files to internet-compatible formats.
    Video files are encoded in MP4, Ogv and WebM (supported by HTML5) and MP4 (supported by Flash).
    Audio files are encoded in MP3 and Ogg (supported by HTML5) and MP3 (supported by Flash).
    Where possible, text is analyzed in order to retrieve the data needed for search engine detection, and then exported as a series of image files.
    All uploaded files are stored online in their original format, so you can (...)

  • Librairies et logiciels spécifiques aux médias

    10 décembre 2010, par

    Pour un fonctionnement correct et optimal, plusieurs choses sont à prendre en considération.
    Il est important, après avoir installé apache2, mysql et php5, d’installer d’autres logiciels nécessaires dont les installations sont décrites dans les liens afférants. Un ensemble de librairies multimedias (x264, libtheora, libvpx) utilisées pour l’encodage et le décodage des vidéos et sons afin de supporter le plus grand nombre de fichiers possibles. Cf. : ce tutoriel ; FFMpeg avec le maximum de décodeurs et (...)

Sur d’autres sites (7775)

  • How to create video from a stream webcam and canvas ?

    1er mai 2024, par Stefdelec

    I am trying to generate a video on browser from different cut :
Slide : stream from canvas
Video : stream from webcam

    


    I just want to allow user to download the video edit with
slide1 + video1 + slide2 + video2 + slide3 + video3.

    


    Here is my code :

    


    const canvas = document.getElementById('myCanvas');
const ctx = canvas.getContext('2d');
const webcam = document.getElementById('webcam');
const videoPlayer = document.createElement('video');
videoPlayer.controls = true;
document.body.appendChild(videoPlayer);
const videoWidth = 640;
const videoHeight = 480;
let keepAnimating = true;
const frameRate=30;
// Attempt to get webcam access
function setupWebcam() {
 const constraints = {
        video: {
             frameRate: frameRate,
            width: videoWidth,  
            height: videoHeight 
        }
    };
  navigator.mediaDevices.getUserMedia(constraints)
    .then(stream => {
      webcam.srcObject = stream;
      webcam.addEventListener('loadedmetadata', () => {
        recordSegments();
        console.log('Webcam feed is now displayed');
      });
    })
    .catch(err => {
      console.error("Error accessing webcam:", err);
      alert('Could not access the webcam. Please ensure permissions are granted and try again.');
    });
}


// Function to continuously draw on the canvas
function animateCanvas(content) {
  if (!keepAnimating) {
    console.log("keepAnimating", keepAnimating);
    return;
  }; // Stop the animation when keepAnimating is false

  ctx.clearRect(0, 0, canvas.width, canvas.height); // Clear previous drawings
  ctx.fillStyle = `rgba(${Math.floor(Math.random() * 255)}, ${Math.floor(Math.random() * 255)}, ${Math.floor(Math.random() * 255)}, 0.5)`;
  ctx.fillRect(0, 0, canvas.width, canvas.height);
  ctx.fillStyle = '#000';
  ctx.font = '48px serif';
  ctx.fillText(content + ' ' + new Date().toLocaleTimeString(), 50, 100);

  // Request the next frame
  requestAnimationFrame(() => animateCanvas(content));
}


// Initialize recording segments array
const recordedSegments = [];
// Modified startRecording to manage animation
function startRecording(stream, duration = 5000, content) {
  const recorder = new MediaRecorder(stream, { mimeType: 'video/webm' });
  const data = [];

  recorder.ondataavailable = e => data.push(e.data);


  // Start animating the canvas
  keepAnimating = true;
  animateCanvas(content);
  recorder.start();
  return new Promise((resolve) => {
    // Automatically stop recording after 'duration' milliseconds
    setTimeout(() => {
      recorder.stop();
      // Stop the animation when recording stops
      keepAnimating = false;
    }, duration);

    recorder.onstop = () => {
      const blob = new Blob(data, { type: 'video/webm' });
      recordedSegments.push(blob);
       keepAnimating = true;
      resolve(blob);
    };
  });
}

// Sequence to record segments
async function recordSegments() {
  // Record canvas with dynamic content
  await startRecording(canvas.captureStream(frameRate), 2000, 'Canvas Draw 1').then(() => console.log('Canvas 1 recorded'));

      await startRecording(webcam.srcObject,3000).then(() => console.log('Webcam 1 recorded'));

          await startRecording(webcam.srcObject).then(() => console.log('Webcam 1 recorded'));
  mergeAndDownloadVideo();
}

function downLoadVideo(blob){
 const url = URL.createObjectURL(blob);

  // Create an anchor element and trigger a download
  const a = document.createElement('a');
  a.style.display = 'none';
  a.href = url;
  a.download = 'merged-video.webm';
  document.body.appendChild(a);
  a.click();

  // Clean up by revoking the Blob URL and removing the anchor element after the download
  setTimeout(() => {
    document.body.removeChild(a);
    window.URL.revokeObjectURL(url);
  }, 100);
}
function mergeAndDownloadVideo() {
  console.log("recordedSegments length", recordedSegments.length);
  // Create a new Blob from all recorded video segments
  const superBlob = new Blob(recordedSegments, { type: 'video/webm' });
  
  downLoadVideo(superBlob)

  // Create a URL for the superBlob
 
}

// Start the process by setting up the webcam first
setupWebcam();


    


    You can find it here : https://jsfiddle.net/Sulot/nmqf6wdj/25/

    


    I am unable to have one "slide" + webcam video + "slide" + webcam video.

    


    It merges only the first 2 segments, but not the other. I tried with ffmpeg browser side.

    


  • TypeError : _ffmpeg_ffmpeg__WEBPACK_IMPORTED_MODULE_1__ is not a constructor

    10 novembre 2023, par Shubham
    import { useState, useRef } from "react";&#xA;&#xA;import \* as FFmpeg from "@ffmpeg/ffmpeg";&#xA;&#xA;const AudioRecorders = ({ onAudioRecorded }) =\> {&#xA;const \[permission, setPermission\] = useState(false);&#xA;const \[stream, setStream\] = useState(null);&#xA;const mimeType = "video/webm";&#xA;const mediaRecorder = useRef(null);&#xA;const \[recordingStatus, setRecordingStatus\] = useState("inactive");&#xA;const \[audioChunks, setAudioChunks\] = useState(\[\]);&#xA;const \[audio, setAudio\] = useState(null);&#xA;&#xA;const ffmpeg = useRef(null);&#xA;&#xA;const createFFmpeg = async ({ log = false }) =\> {&#xA;// here I am facing the error&#xA;const ffmpegInstance = new FFmpeg({ log });&#xA;await ffmpegInstance.load();&#xA;return ffmpegInstance;&#xA;};&#xA;&#xA;const convertWebmToWav = async (webmBlob) =\> {&#xA;if (!ffmpeg.current) {&#xA;ffmpeg.current = await createFFmpeg({ log: false });&#xA;}&#xA;&#xA;    const inputName = "input.webm";&#xA;    const outputName = "output.wav";&#xA;    &#xA;    ffmpeg.current.FS("writeFile", inputName, await webmBlob.arrayBuffer());&#xA;    await ffmpeg.current.run("-i", inputName, outputName);&#xA;    &#xA;    const outputData = ffmpeg.current.FS("readFile", outputName);&#xA;    const outputBlob = new Blob([outputData.buffer], { type: "audio/wav" });&#xA;    &#xA;    return outputBlob;&#xA;&#xA;};&#xA;&#xA;const getMicrophonePermission = async () =\> {&#xA;if ("MediaRecorder" in window) {&#xA;try {&#xA;const streamData = await navigator.mediaDevices.getUserMedia({&#xA;audio: true,&#xA;video: false,&#xA;});&#xA;setPermission(true);&#xA;setStream(streamData);&#xA;} catch (err) {&#xA;alert(err.message);&#xA;}&#xA;} else {&#xA;alert("The MediaRecorder API is not supported in your browser.");&#xA;}&#xA;};&#xA;&#xA;const startRecording = async () =\> {&#xA;setRecordingStatus("recording");&#xA;//create new Media recorder instance using the stream&#xA;const media = new MediaRecorder(stream, { type: mimeType });&#xA;//set the MediaRecorder instance to the mediaRecorder ref&#xA;mediaRecorder.current = media;&#xA;//invokes the start method to start the recording process&#xA;mediaRecorder.current.start();&#xA;let localAudioChunks = \[\];&#xA;mediaRecorder.current.ondataavailable = (event) =\> {&#xA;if (typeof event.data === "undefined") return;&#xA;if (event.data.size === 0) return;&#xA;localAudioChunks.push(event.data);&#xA;};&#xA;setAudioChunks(localAudioChunks);&#xA;};&#xA;&#xA;const stopRecording = () =\> {&#xA;setRecordingStatus("inactive");&#xA;//stops the recording instance&#xA;mediaRecorder.current.stop();&#xA;mediaRecorder.current.onstop = async () =\> {&#xA;//creates a blob file from the audiochunks data&#xA;const audioBlob = new Blob(audioChunks, { type: mimeType });&#xA;// creates a playable URL from the blob file.&#xA;const audioUrl = URL.createObjectURL(audioBlob);&#xA;// converts the WebM blob to a WAV blob.&#xA;const newBlob = await convertWebmToWav(audioBlob);&#xA;await onAudioRecorded(newBlob);&#xA;setAudio(audioUrl);&#xA;setAudioChunks(\[\]);&#xA;};&#xA;};&#xA;&#xA;return (&#xA;\&#xA;<h2>Audio Recorder</h2>&#xA;\&#xA;\<div classname="audio-controls">&#xA;{!permission ? (&#xA;\<button type="button">&#xA;Get Microphone&#xA;\&#xA;) : null}&#xA;{permission &amp;&amp; recordingStatus === "inactive" ? (&#xA;\<button type="button">&#xA;Start Recording&#xA;\&#xA;) : null}&#xA;{recordingStatus === "recording" ? (&#xA;\<button type="button">&#xA;Stop Recording&#xA;\&#xA;) : null}&#xA;{audio ? (&#xA;\<div classname="audio-container">&#xA;\<audio src="{audio}">\&#xA;<a>&#xA;Download Recording&#xA;</a>&#xA;\&#xA;) : null}&#xA;\&#xA;\&#xA;\&#xA;);&#xA;};&#xA;export default AudioRecorders;&#xA;&#xA;\`&#xA;&#xA;</audio></div></button></button></button></div>

    &#xA;

    ERROR&#xA;ffmpeg_ffmpeg__WEBPACK_IMPORTED_MODULE_1_ is not a constructor&#xA;TypeError : ffmpeg_ffmpeg__WEBPACK_IMPORTED_MODULE_1_ is not a constructor&#xA;at createFFmpeg (http://localhost:3000/main.48220156e0c620f1acd0.hot-update.js:41:28)&#xA;at convertWebmToWav (http://localhost:3000/main.48220156e0c620f1acd0.hot-update.js:49:30)&#xA;at mediaRecorder.current.onstop (http://localhost:3000/main.48220156e0c620f1acd0.hot-update.js:109:29)`

    &#xA;

    I am trying to record the voice in audio/wav formate but its recording in video/webm formate not because of \<const mimetype="video/webm">. Whatever the mimeType I am giving its showing the file type video/webm on "https://www.checkfiletype.com/". I am recording it for the speech_recognition used in flask backend which is accepting only audio/wav.&#xA;So in frontend I have written a function "convertWebmToWav " which is giving me the error :&#xA;Uncaught runtime errors:&#xA;&#xA;</const>

    &#xA;

  • Audio recorded with MediaRecorder on Chrome missing duration

    3 juin 2017, par suppp111

    I am recording audio (oga/vorbis) files with MediaRecorder. When I record these file through Chrome I get problems : I cannot edit the files on ffmpeg and when I try to play them on Firefox it says they are corrupt (they do play fine on Chrome though).

    Looking at their metadata on ffmpeg I get this :

    Input #0, matroska,webm, from '91.oga':
     Metadata:
       encoder         : Chrome
     Duration: N/A, start: 0.000000, bitrate: N/A
       Stream #0:0(eng): Audio: opus, 48000 Hz, mono, fltp (default)
    [STREAM]
    index=0
    codec_name=opus
    codec_long_name=Opus (Opus Interactive Audio Codec)
    profile=unknown
    codec_type=audio
    codec_time_base=1/48000
    codec_tag_string=[0][0][0][0]
    codec_tag=0x0000
    sample_fmt=fltp
    sample_rate=48000
    channels=1
    channel_layout=mono
    bits_per_sample=0
    id=N/A
    r_frame_rate=0/0
    avg_frame_rate=0/0
    time_base=1/1000
    start_pts=0
    start_time=0.000000
    duration_ts=N/A
    duration=N/A
    bit_rate=N/A
    max_bit_rate=N/A
    bits_per_raw_sample=N/A
    nb_frames=N/A
    nb_read_frames=N/A
    nb_read_packets=N/A
    DISPOSITION:default=1
    DISPOSITION:dub=0
    DISPOSITION:original=0
    DISPOSITION:comment=0
    DISPOSITION:lyrics=0
    DISPOSITION:karaoke=0
    DISPOSITION:forced=0
    DISPOSITION:hearing_impaired=0
    DISPOSITION:visual_impaired=0
    DISPOSITION:clean_effects=0
    DISPOSITION:attached_pic=0
    TAG:language=eng
    [/STREAM]
    [FORMAT]
    filename=91.oga
    nb_streams=1
    nb_programs=0
    format_name=matroska,webm
    format_long_name=Matroska / WebM
    start_time=0.000000
    duration=N/A
    size=7195
    bit_rate=N/A
    probe_score=100
    TAG:encoder=Chrome

    As you can see there are problems with the duration. I have looked at posts like this :
    How can I add predefined length to audio recorded from MediaRecorder in Chrome ?

    But even trying that, I got errors when trying to chop and merge files.For example when running :

    ffmpeg -f concat  -i 89_inputs.txt -c copy final.oga

    I get a lot of this :

    [oga @ 00000000006789c0] Non-monotonous DTS in output stream 0:0; previous: 57612, current: 1980; changing to 57613. This may result in incorrect timestamps in the output file.
    [oga @ 00000000006789c0] Non-monotonous DTS in output stream 0:0; previous: 57613, current: 2041; changing to 57614. This may result in incorrect timestamps in the output file.
    DTS -442721849179034176, next:42521 st:0 invalid dropping
    PTS -442721849179034176, next:42521 invalid dropping st:0
    [oga @ 00000000006789c0] Non-monotonous DTS in output stream 0:0; previous: 57614, current: 2041; changing to 57615. This may result in incorrect timestamps in the output file.
    [oga @ 00000000006789c0] Timestamps are unset in a packet for stream 0. This is deprecated and will stop working in the future. Fix your code to set the timestamps properly
    DTS -442721849179031296, next:42521 st:0 invalid dropping
    PTS -442721849179031296, next:42521 invalid dropping st:0

    Does anyone know what we need to do to audio files recorded from Chrome for them to be useful ? Or is there a problem with my setup ?

    Recorder js :

    if (navigator.getUserMedia) {
     console.log('getUserMedia supported.');

     var constraints = { audio: true };
     var chunks = [];

     var onSuccess = function(stream) {
       var mediaRecorder = new MediaRecorder(stream);

       record.onclick = function() {
         mediaRecorder.start();
         console.log(mediaRecorder.state);
         console.log("recorder started");
         record.style.background = "red";

         stop.disabled = false;
         record.disabled = true;

         var aud = document.getElementById("audioClip");
         start = aud.currentTime;
       }

       stop.onclick = function() {
         console.log(mediaRecorder.state);
         console.log("Recording request sent.");
         mediaRecorder.stop();
       }

       mediaRecorder.onstop = function(e) {
         console.log("data available after MediaRecorder.stop() called.");

         var audio = document.createElement('audio');
         audio.setAttribute('controls', '');
         audio.setAttribute('id', 'audioClip');

         audio.controls = true;
         var blob = new Blob(chunks, { 'type' : 'audio/ogg; codecs="vorbis"' });
         chunks = [];
         var audioURL = window.URL.createObjectURL(blob);
         audio.src = audioURL;

         sendRecToPost(blob);   // this just send the audio blob to the server by post
         console.log("recorder stopped");

       }