Newest 'ffmpeg' Questions - Stack Overflow

http://stackoverflow.com/questions/tagged/ffmpeg

Les articles publiés sur le site

  • Run docker container with FFMPEG rstp stream on websockets

    18 février, par bmvr

    I have create a node js application that uses ffmpeg with spawn node library.

    Here's the backend sample:

    const startStreamWs = (cameraId, rtsp_url) => {
      console.log(`Starting stream for camera: ${cameraId}`);
    
      const ffmpeg = spawn("ffmpeg", [
        "-rtsp_transport", "tcp", // Use TCP for reliable streaming
        "-i", rtsp_url,
        "-analyzeduration", "5000000",  // Increase analyzeduration
        "-probesize", "5000000",       // Increase probesize
        "-fflags", "nobuffer",         // Reduce buffering
        "-flags", "low_delay",         // Low latency
        "-strict", "experimental",
        "-max_delay", "200000",        // Reduce max delay for faster response
        "-bufsize", "2M",              // Buffer size for smoother streaming
        "-f", "mpegts",                // MPEG-TS container for streaming
        "-codec:v", "mpeg1video",         // MPEG-1 video codec
        "-s", "1280x720",              // Video resolution
        "-r", "25",                    // Frame rate (25 fps)
        "-b:v", "1500k",               // Bitrate for video
        "-maxrate", "2000k",           // Maximum bitrate
        "-bufsize", "2M",              // Buffer size (needed with maxrate)
        "-bf", "0",                    // Disable B-frames for lower latency
        "-an",                         // Disable audio
        "-"
    ]);
    
    
      ffmpeg.stdout.on("data", (data) => {
        if (cameraStreams[cameraId]) {
          console.log(`Data sent for camera ${cameraId}`);
          // Broadcast stream data to all connected clients
          for (const client of cameraStreams[cameraId].clients) {
            if (client.readyState === ws.OPEN) {
              client.send(data);
            }
          }
        }
      });
    
      ffmpeg.stderr.on("data", (data) => {
        console.error(`FFmpeg stderr (Camera ${cameraId}): ${data.toString()}`);
        logErrorToFile(data);
      });
    
      ffmpeg.on("close", (code) => {
        console.log(`FFmpeg process exited for Camera ${cameraId} with code ${code}`);
        if (cameraStreams[cameraId]) {
          // Close all remaining clients
          for (const client of cameraStreams[cameraId].clients) {
            client.close();
          }
          delete cameraStreams[cameraId];
        }
      });
    
      return ffmpeg;
    };
    

    Front End Sample my angular component

    import { Component, OnDestroy, OnInit } from '@angular/core';
    import { FormsModule } from '@angular/forms';
    import { ActivatedRoute } from '@angular/router';
    
    declare var JSMpeg: any; // Declare JSMpeg from the global script
    
    @Component({
      selector: 'app-video-player',
      templateUrl: './video-player.component.html',
      styleUrls: ['./video-player.component.css'],
      standalone: false
    })
    export class VideoPlayerComponent implements OnInit, OnDestroy {
      stepSize: number = 0.1;
      private player: any;
      cameraId: string | null = null;
      ws: WebSocket | null = null; 
      wsUrl: string | null = null;
    
      constructor(private route: ActivatedRoute) {
        this.cameraId = this.route.snapshot.paramMap.get('id');
        this.wsUrl = `ws://localhost:8085?cameraId=${this.cameraId}`;
        this.ws = new WebSocket(this.wsUrl);
      }
    
      async ngOnInit(): Promise {
        const canvas = document.getElementById('videoCanvas') as HTMLCanvasElement;
        this.player = new JSMpeg.Player(this.wsUrl, { canvas: canvas });
      }
    
      async ngOnDestroy(): Promise {
        this.ws?.close(1000, "Exiting");
      }
    
      getStepSize(): number {
        return this.stepSize;
      }
    }
    
    
    

    On localhost is fine, once i containerize, it's not. I can serve the website but not the stream. I have the same version FFMPEG 7.1 and the codec is available. Although I run localhost macosx and unbutu on docker.

  • When decoding video with ffmpeg and playing the decoded audio data with SDL2, it's all noise [closed]

    18 février, par jodan feng

    it is the code in the main thread

    int main(int argc, char *argv[])
    {
        QApplication a(argc, argv);
        Ffplay f;
        f.start();
        return a.exec();
    }
    

    Ffplay is the thread that decode the video and play it

    class Ffplay : public QThread
    {
       Q_OBJECT
    public:
        Ffplay();
        protected:
          void run() override;
    public:
        AVFormatContext* format2;
        Thread2 t2;
    };
    Ffplay::Ffplay() {}
    
    void Ffplay::run(){
        format2=avformat_alloc_context();
        avformat_open_input(&format2,"c:/Qt/project/player/movie.mp4",NULL,NULL);
        avformat_find_stream_info(format2,NULL);
        t2.format=format2;
        t2.start();
    }
    

    Thread2 is the thread that decoed the audio part and play it

    class Thread2 : public QThread
    {
        Q_OBJECT
    public:
        Thread2();
    protected:
        void run() override;
    public:
        AVFormatContext* format;
        AVCodecContext* codec;
        const AVCodec* c;
        AVPacket* packet;
        AVFrame* frame;
        SwrContext* s;
        FILE* f;
        FILE* f2;
    
    };
    Thread2::Thread2() {}
    
    struct Sound
    {
        Uint8* data;
        Uint32 position=0;
        Uint32 len;
    };
    
    Sound* d=new Sound;
    
    void write(void* userdata,Uint8* stream,int len){
        SDL_memset(stream,0,len);
        int size2=std::min((Uint32)len,d->len-d->position);
        SDL_memcpy(stream,d->data+d->position,size2);
        d->position=d->position+size2;
    }
    
    
    void Thread2::run(){
        int index=-1;
        for(unsigned int i=0;inb_streams;i++){
            if(format->streams[i]->codecpar->codec_type==AVMEDIA_TYPE_AUDIO){
                index=i;
                break;
            }
        }
        c=avcodec_find_decoder(format->streams[index]->codecpar->codec_id);
        codec=avcodec_alloc_context3(c);
        avcodec_parameters_to_context(codec,format->streams[index]->codecpar);
        avcodec_open2(codec,c,NULL);
        packet=av_packet_alloc();
        frame=av_frame_alloc();
        s=swr_alloc();
        swr_alloc_set_opts2(&s,&codec->ch_layout,AV_SAMPLE_FMT_S16,codec->sample_rate,&codec->ch_layout,codec->sample_fmt,codec->sample_rate,0,NULL);
        swr_init(s);
        f=fopen("yinpin","wb");
        uint8_t* buff=(uint8_t*)av_malloc(2*1024*4);  
        while(av_read_frame(format,packet)==0){
            if(packet->stream_index==1){
                avcodec_send_packet(codec,packet);
                while(avcodec_receive_frame(codec,frame)>=0){
                    swr_convert(s,&buff,2*1024*2*2,(const uint8_t **)frame->data,frame->nb_samples);
                    int size=av_samples_get_buffer_size(NULL,2,frame->nb_samples,AV_SAMPLE_FMT_S16,1);
                    fwrite(buff,1,size,f);
                }
            }
        }
    
    
        SDL_Init(SDL_INIT_AUDIO);
        SDL_AudioSpec spec;
        spec.freq=48000;
        spec.format=AUDIO_S16;
        spec.channels=2;
        spec.samples=1024;
        spec.callback=write;
        SDL_OpenAudio(&spec,NULL);
        SDL_PauseAudio(0);
        f2=fopen("yinpin","rb");
        uint8_t* buff2=(uint8_t*)av_malloc(2*2*1024*2);
        size_t size=10;
        while(size>0){
            size=fread(buff2,1,2*2*1024*2,f2);
            d->data=buff2;
            d->len=(Uint32)size;
        }
    }
    

    After the program is run, the sound played is all noise, which makes me a little confused because I don't know whether it is a decoding problem or a problem with SDL2 audio playback. Is there any troubleshooting method? Has anyone figured out the source of the problem?

  • How to hide console output of FFmpeg in Python ?

    18 février, par Shiven Saini

    I was working on a YouTube video downloader Python program.

    I want to encode downloaded data to other media formats for this job i used FFmpeg and FFmpeg-Python (Package to use FFmpeg in Python).

    Everything is Fine but i want to ask that how can i disable FFmpeg Output on the console ?

    Here is some Pic of my Program :-

    Main Graphical Interface

    But this console often appears when my program starts encoding, suppressing the main GUI :-

    FFMPEG - OUTPUT

    If you know any solution for my problem then please give me some solution. It is my first time that i am trying Stackoverflow for my problem.

    THANKS IN ADVANCE !!!!!

  • macos - Batch create 'samples' with multiple cuts from videos [closed]

    17 février, par Thiago

    I'm on macOS, and have ffmpeg and python installed through homebrew. Bash solutions are also welcome - though I have no experience with bash

    I have folders with many videos, most (if not all) in either mp4 or mkv. I want to generate quick samples for each video, and each sample should have multiples slices/cuts from the original video.

    I don't care if the resulting encode, resolution and bitrate are the same as the original, or if it's fixed for example: mp4 at 720. Whatever is easier to write on the script or faster do execute.

    Example result: folder has 10 videos of different duration. Script will result in 10 videos titled "ORIGINALTITLE-sample.EXT". Each sample will be 2 minutes long, and it'll be made of 24 cuts - 5 seconds long each - from the original video.

    The cuts should be approximately well distributed during the video, but doesn't need to be precise.

    Edit

    someone on Reddit suggested the script below, but the result has some issues, like images blinking in and out (see it here https://youtu.be/FZC3aIvugpI). Maybe it's related to errors like this I saw? [hevc @ 0x11c631a30] Could not find ref with POC -43

    I was also not able to change the 1-second duration of each clip for something longer, and would still need to make this loop on every video in the folder.

    #!/bin/bash
    f="original.mp4"
    dur=$(ffprobe -v 16 -show_entries format=duration -of csv=p=0 "$f")
    cnt=$(echo "scale=0; ${dur} * 0.95 / 8" | bc -l)
    echo $dur $cnt
    
    ffmpeg -i "$f" -c copy -f segment -segment_time $cnt -reset_timestamps 1 "/tmp/out_%03d.${f##*.}" -y -hide_banner
    
    echo "#list">/tmp/1.txt
    for g in /tmp/out_*; do
        echo "file $g" >> /tmp/1.txt
        echo "outpoint 1" >> /tmp/1.txt
    done
    
    o="/tmp/out.${f##*.}"
    ffmpeg -f concat -safe 0 -i /tmp/1.txt -c copy "$o" -y -v error -stats test.mp4
    
  • Cloud Functions for Firebase : completing long processes without touching maximum timeout

    17 février, par Scott Ewing

    I have to transcode videos from webm to mp4 when they're uploaded to firebase storage. I have a code demo here that works, but if the uploaded video is too large, firebase functions will time out on me before the conversion is finished. I know it's possible to increase the timeout limit for the function, but that seems messy, since I can't ever confirm the process will take less time than the timeout limit.

    Is there some way to stop firebase from timing out without just increasing the maximum timeout limit?

    If not, is there a way to complete time consuming processes (like video conversion) while still having each process start using firebase function triggers?

    If even completing time consuming processes using firebase functions isn't something that really exists, is there some way to speed up the conversion of fluent-ffmpeg without touching the quality that much? (I realize this part is a lot to ask. I plan on lowering the quality if I absolutely have to, as the reason webms are being converted to mp4 is for IOS devices)

    For reference, here's the main portion of the demo I mentioned. As I said before, the full code can be seen here, but this section of the code copied over is the part that creates the Promise that makes sure the transcoding finishes. The full code is only 70 something lines, so it should be relatively easy to go through if needed.

    const functions = require('firebase-functions');
    const mkdirp = require('mkdirp-promise');
    const gcs = require('@google-cloud/storage')();
    const Promise = require('bluebird');
    const ffmpeg = require('fluent-ffmpeg');
    const ffmpeg_static = require('ffmpeg-static');
    

    (There's a bunch of text parsing code here, followed by this next chunk of code inside an onChange event)

    function promisifyCommand (command) {
        return new Promise( (cb) => {
            command
            .on( 'end',   ()      => { cb(null)  } )
            .on( 'error', (error) => { cb(error) } )
            .run();
        })
    }
    return mkdirp(tempLocalDir).then(() => {
        console.log('Directory Created')
        //Download item from bucket
        const bucket = gcs.bucket(object.bucket);
        return bucket.file(filePath).download({destination: tempLocalFile}).then(() => {
          console.log('file downloaded to convert. Location:', tempLocalFile)
          cmd = ffmpeg({source:tempLocalFile})
                   .setFfmpegPath(ffmpeg_static.path)
                   .inputFormat(fileExtension)
                   .output(tempLocalMP4File)
          cmd = promisifyCommand(cmd)
          return cmd.then(() => {
            //Getting here takes forever, because video transcoding takes forever!
            console.log('mp4 created at ', tempLocalMP4File)
            return bucket.upload(tempLocalMP4File, {
                destination: MP4FilePath
            }).then(() => {
              console.log('mp4 uploaded at', filePath);
            });
          })
        });
      });