Newest 'ffmpeg' Questions - Stack Overflow

http://stackoverflow.com/questions/tagged/ffmpeg

Les articles publiés sur le site

  • Ffmpeg cpp libav randomly uses high cpu and less gpu or uses high gpu and less cpu with hw accel

    29 avril, par cngkyt

    I am using ffmpeg as library with cpp the integration of library is perfectly done i am using AVFilterGraph as audio resampler and video pixfmt converter.

    I cannot share code because it is a complete library and contains too many files but i can assure there is no problem about code but maybe approach.

    My whole class is based on the transcode example of ffmpeg text

    The problem is when i run my program that encodes approx 90 streams

    it starts with 25% cpu usage 80% gpu encode usage %85 gpu decode usage with 100% sm utilization

    or

    it starts with 80% cpu usage 45% gpu encode usage 50% gpu decode usage with 60% sm utilization

    when it start like second type of utilization videos are perfect but when it starts like the first type of utilization videos are freezing

    my program uses pure gpu for encoding for all streams but using gpu decoding for every 3 cpu deoding that means i use hwaccel for 1/3 of streams because my cards are 4070 ti super and they have 2 NVENC 1 NVDEC chips

    my program takes udp streams and encode it and outputs udp mpegts

    bitrate and quality settings are persistent across streams depending on resolution

    i always test on same streams

    the problem happens randomly but if it starts with high cpu and low gpu utilization it goes on like this when i restart program problem occurs randomly again

    what can couse this

    just as an info i have experience on c++ more than 25 years

    if there is someone who can teach me something i will be happy

    thanks for your further helps

    i have tried to reproduce it with compiled ffmpeg instead of my libav library usage same thing happens but not often

    i have tried using hevc_cuvid instead of hwacceleration with avcodec_find_decoder_by_name function

  • How to create video from a stream webcam and canvas ?

    29 avril, par Stefdelec

    I am trying to generate a video on browser from different cut: Slide: stream from canvas Video: stream from webcam

    I just want to allow user to download the video edit with slide1 + video1 + slide2 + video2 + slide3 + video3.

    Here is my code:

    const canvas = document.getElementById('myCanvas');
    const ctx = canvas.getContext('2d');
    const webcam = document.getElementById('webcam');
    const videoPlayer = document.createElement('video');
    videoPlayer.controls = true;
    document.body.appendChild(videoPlayer);
    const videoWidth = 640;
    const videoHeight = 480;
    let keepAnimating = true;
    const frameRate=30;
    // Attempt to get webcam access
    function setupWebcam() {
     const constraints = {
            video: {
                 frameRate: frameRate,
                width: videoWidth,  
                height: videoHeight 
            }
        };
      navigator.mediaDevices.getUserMedia(constraints)
        .then(stream => {
          webcam.srcObject = stream;
          webcam.addEventListener('loadedmetadata', () => {
            recordSegments();
            console.log('Webcam feed is now displayed');
          });
        })
        .catch(err => {
          console.error("Error accessing webcam:", err);
          alert('Could not access the webcam. Please ensure permissions are granted and try again.');
        });
    }
    
    
    // Function to continuously draw on the canvas
    function animateCanvas(content) {
      if (!keepAnimating) {
        console.log("keepAnimating", keepAnimating);
        return;
      }; // Stop the animation when keepAnimating is false
    
      ctx.clearRect(0, 0, canvas.width, canvas.height); // Clear previous drawings
      ctx.fillStyle = `rgba(${Math.floor(Math.random() * 255)}, ${Math.floor(Math.random() * 255)}, ${Math.floor(Math.random() * 255)}, 0.5)`;
      ctx.fillRect(0, 0, canvas.width, canvas.height);
      ctx.fillStyle = '#000';
      ctx.font = '48px serif';
      ctx.fillText(content + ' ' + new Date().toLocaleTimeString(), 50, 100);
    
      // Request the next frame
      requestAnimationFrame(() => animateCanvas(content));
    }
    
    
    // Initialize recording segments array
    const recordedSegments = [];
    // Modified startRecording to manage animation
    function startRecording(stream, duration = 5000, content) {
      const recorder = new MediaRecorder(stream, { mimeType: 'video/webm' });
      const data = [];
    
      recorder.ondataavailable = e => data.push(e.data);
    
    
      // Start animating the canvas
      keepAnimating = true;
      animateCanvas(content);
      recorder.start();
      return new Promise((resolve) => {
        // Automatically stop recording after 'duration' milliseconds
        setTimeout(() => {
          recorder.stop();
          // Stop the animation when recording stops
          keepAnimating = false;
        }, duration);
    
        recorder.onstop = () => {
          const blob = new Blob(data, { type: 'video/webm' });
          recordedSegments.push(blob);
           keepAnimating = true;
          resolve(blob);
        };
      });
    }
    
    // Sequence to record segments
    async function recordSegments() {
      // Record canvas with dynamic content
      await startRecording(canvas.captureStream(frameRate), 2000, 'Canvas Draw 1').then(() => console.log('Canvas 1 recorded'));
    
          await startRecording(webcam.srcObject,3000).then(() => console.log('Webcam 1 recorded'));
    
              await startRecording(webcam.srcObject).then(() => console.log('Webcam 1 recorded'));
      mergeAndDownloadVideo();
    }
    
    function downLoadVideo(blob){
     const url = URL.createObjectURL(blob);
    
      // Create an anchor element and trigger a download
      const a = document.createElement('a');
      a.style.display = 'none';
      a.href = url;
      a.download = 'merged-video.webm';
      document.body.appendChild(a);
      a.click();
    
      // Clean up by revoking the Blob URL and removing the anchor element after the download
      setTimeout(() => {
        document.body.removeChild(a);
        window.URL.revokeObjectURL(url);
      }, 100);
    }
    function mergeAndDownloadVideo() {
      console.log("recordedSegments length", recordedSegments.length);
      // Create a new Blob from all recorded video segments
      const superBlob = new Blob(recordedSegments, { type: 'video/webm' });
      
      downLoadVideo(superBlob)
    
      // Create a URL for the superBlob
     
    }
    
    
    // Function to play recorded segments one by one
    function playRecordedSegments() {
      let currentSegmentIndex = 0;
    
      videoPlayer.onended = () => {
        currentSegmentIndex++;
        if (currentSegmentIndex < recordedSegments.length) {
          playSegment(currentSegmentIndex);
        }
      };
    
      // Play the first segment
      playSegment(currentSegmentIndex);
    }
    
    function playSegment(index) {
      const videoBlob = recordedSegments[index];
      const videoURL = URL.createObjectURL(videoBlob);
      videoPlayer.src = videoURL;
      videoPlayer.play();
    }
    
    // Start the process by setting up the webcam first
    setupWebcam();
    

    You can find it here: https://jsfiddle.net/Sulot/nmqf6wdj/25/

    I am unable to have one "slide" + webcam video + "slide" + webcam video.

    It merges only the first 2 segments, but not the other. I tried with ffmpeg browser side.

  • Can't find error in function for changing sampling rate

    29 avril, par kitty uwu

    I have function for changing sampling rate of audio (only one channel):

    int change_sampling_rate(float *audio_input, int input_sample_rate, int output_sample_rate, int input_num_of_samples, float **audio_output, int *result_num_of_samples) {
        AVChannelLayout src_ch_layout = AV_CHANNEL_LAYOUT_MONO;
        AVChannelLayout dst_ch_layout = AV_CHANNEL_LAYOUT_MONO;
    
        struct SwrContext *swr_ctx;
        swr_ctx = swr_alloc();
        int ret;
        if (!swr_ctx) {
            fprintf(stderr, "Could not allocate resampler context\n");
            ret = AVERROR(ENOMEM);
        }
    
        av_opt_set_chlayout(swr_ctx, "in_chlayout",    &src_ch_layout, 0);
        av_opt_set_int(swr_ctx, "in_sample_rate",       input_sample_rate, 0);
        av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_FLT, 0);
    
        av_opt_set_chlayout(swr_ctx, "out_chlayout",    &dst_ch_layout, 0);
        av_opt_set_int(swr_ctx, "out_sample_rate",       output_sample_rate, 0);
        av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", AV_SAMPLE_FMT_FLT, 0);
    
        if ((ret = swr_init(swr_ctx)) < 0) {
            fprintf(stderr, "Failed to initialize the resampling context\n");
            return -1;
        }
    
        int output_samples_count = av_rescale_rnd(swr_get_delay(swr_ctx, input_sample_rate) + input_num_of_samples, output_sample_rate, input_sample_rate, AV_ROUND_UP);
        uint8_t **resampled_data = NULL;
        if (av_samples_alloc_array_and_samples(&resampled_data, NULL, 1, output_samples_count, AV_SAMPLE_FMT_FLT, 0) < 0) {
            fprintf(stderr, "Could not allocate resampled data\n");
            swr_free(&swr_ctx);
            return -1;
        }
    
        const uint8_t *in_samples[1] = {(const uint8_t *)audio_input};
        int frame_count = swr_convert(swr_ctx, resampled_data, output_samples_count, in_samples, input_num_of_samples);
    
        if (frame_count < 0) {
            fprintf(stderr, "Error while resampling\n");
            av_freep(&resampled_data[0]);
            free(resampled_data);
            swr_free(&swr_ctx);
            return -1;
        }
    
        *audio_output = (float *) malloc(frame_count * sizeof(float));
        if (!*audio_output) {
            fprintf(stderr, "Could not allocate memory for output\n");
            av_freep(&resampled_data[0]);
            free(resampled_data);
            swr_free(&swr_ctx);
            return -1;
        }
    
        memcpy(*audio_output, resampled_data[0], frame_count * sizeof(float));
    
        *result_num_of_samples = frame_count;
        av_freep(&resampled_data[0]);
        swr_free(&swr_ctx);
        return SUCCESS;
    }
    

    When I run tests on time lag between two files (mp3) with different sampling rates, it gives answer that differs on about 15-20 ms with right answer. Can anybody, please, help me find mistakes in the code?

    For example, I have two audios: [audio_1] (https://jmp.sh/s/USFPaGnHXVuKFVYarYpm) and [audio_2] (https://jmp.sh/s/jbmWbPTwkdDujAocmi56) - second audio is just a sample of first. The answer should be 35264 ms, but my function gives 35249 ms :(

  • ffmpeg merge audio and video from ytdl-core, outputs an mp4 with no time (no seekable)

    29 avril, par Andreu

    i'm trying to merge video and audio from ytdl-core with ffmpeg and export it to user in mp4. The output video apears without time (inifit duration) and can not seek or navigate to another point of the video.

    app.get('/downloadmp4', async (req, res)=>{
        let url = req.query.url;
            if (!ytdl.validateURL(url)) {
                return res.sendStatus(400);
            }
        let title = 'video';
            let info = await ytdl.getInfo(url);
            title = info.videoDetails.title.replace(/[^\x00-\x7F]/g, "");
            res.header('Content-Disposition', `attachment; filename="${title}.mp4"`);
    
        
              
               let video = ytdl(url,{filter:'videoonly', quality: "highest"})
               let audio = ytdl(url, {filter: 'audioonly', highWaterMark: 1<<25});
              
        
            // create the ffmpeg process for muxing
    const ffmpegProcess = cp.spawn(
        ffmpeg,
        [
              '-loglevel', '0', '-hide_banner',
          // input audio by pipe
          "-i", "pipe:3",
      
          // input video by pipe
          "-i", "pipe:4",
      
          // map audio and video correspondingly
          "-map", "0:a",
          "-map", "1:v",
      
          // change the codec
        '-c:v', 'copy',
        '-c:a', 'flac',
        //   "-c:a", "aac",
        '-crf','27',
        "-preset", "veryfast",
    
        // Allow output to be seekable, which is needed for mp4 output
        '-movflags','frag_keyframe+empty_moov',
      
          // Define output container
          '-f', 'mp4', 'pipe:5',
        ],
        {
          // no popup window for Windows users
          windowsHide: true,
          stdio: [
            // silence stdin/out, forward stderr,
            "inherit", "inherit", "inherit",
            // and pipe audio, video, output
            "pipe", "pipe", "pipe",
          ],
        }
      );
      audio.pipe(ffmpegProcess.stdio[3]);
      video.pipe(ffmpegProcess.stdio[4]);
      ffmpegProcess.stdio[5].pipe(res);
    
        })
    

    i tried various formats of video and audio and also the following command '-movflags','frag_keyframe+empty_moov', without result

  • How to get the video resolution from a File in Dart/Flutter ?

    29 avril, par Fractale

    I was using video_player to do the rest of the metadata extraction I need but I didn't find a way to get the resolution with this package.