Newest 'ffmpeg' Questions - Stack Overflow

http://stackoverflow.com/questions/tagged/ffmpeg

Les articles publiés sur le site

  • Linker error by trying example program with ffmpeg

    11 mars, par Chris

    I think this might be a stupid question and I'm just blind but that thing is driving me nuts for hours now. I downloaded ffmpeg and build it. Now I want to try the thing out in a program but I can't setup cmake to link ffmpeg properly and have no idea what is wrong.

    The linker always tells me this:

    christoph@christoph-ThinkPad-T490:~/develop/ffmpg_example/build$ make
    [ 50%] Linking CXX executable test
    CMakeFiles/test.dir/main.cxx.o: In function `main':
    main.cxx:(.text+0x180): undefined reference to `av_register_all()'
    main.cxx:(.text+0x1a7): undefined reference to `avformat_open_input(AVFormatContext**, char const*, AVInputFormat*, AVDictionary**)'
    main.cxx:(.text+0x1ce): undefined reference to `avformat_find_stream_info(AVFormatContext*, AVDictionary**)'
    main.cxx:(.text+0x206): undefined reference to `av_dump_format(AVFormatContext*, int, char const*, int)'
    main.cxx:(.text+0x2bb): undefined reference to `avcodec_find_decoder(AVCodecID)'
    main.cxx:(.text+0x2fc): undefined reference to `avcodec_alloc_context3(AVCodec const*)'
    main.cxx:(.text+0x316): undefined reference to `avcodec_copy_context(AVCodecContext*, AVCodecContext const*)'
    main.cxx:(.text+0x361): undefined reference to `avcodec_open2(AVCodecContext*, AVCodec const*, AVDictionary**)'
    main.cxx:(.text+0x377): undefined reference to `av_frame_alloc()'
    main.cxx:(.text+0x383): undefined reference to `av_frame_alloc()'
    main.cxx:(.text+0x3ba): undefined reference to `avpicture_get_size(AVPixelFormat, int, int)'
    main.cxx:(.text+0x3d0): undefined reference to `av_malloc(unsigned long)'
    main.cxx:(.text+0x3ff): undefined reference to `avpicture_fill(AVPicture*, unsigned char const*, AVPixelFormat, int, int)'
    main.cxx:(.text+0x43d): undefined reference to `sws_getContext(int, int, AVPixelFormat, int, int, AVPixelFormat, int, SwsFilter*, SwsFilter*, double const*)'
    main.cxx:(.text+0x465): undefined reference to `av_read_frame(AVFormatContext*, AVPacket*)'
    main.cxx:(.text+0x49f): undefined reference to `avcodec_decode_video2(AVCodecContext*, AVFrame*, int*, AVPacket const*)'
    main.cxx:(.text+0x4fd): undefined reference to `sws_scale(SwsContext*, unsigned char const* const*, int const*, int, int, unsigned char* const*, int const*)'
    main.cxx:(.text+0x545): undefined reference to `av_free_packet(AVPacket*)'
    main.cxx:(.text+0x556): undefined reference to `av_free(void*)'
    main.cxx:(.text+0x565): undefined reference to `av_frame_free(AVFrame**)'
    main.cxx:(.text+0x574): undefined reference to `av_frame_free(AVFrame**)'
    main.cxx:(.text+0x580): undefined reference to `avcodec_close(AVCodecContext*)'
    main.cxx:(.text+0x58f): undefined reference to `avcodec_close(AVCodecContext*)'
    main.cxx:(.text+0x59e): undefined reference to `avformat_close_input(AVFormatContext**)'
    collect2: error: ld returned 1 exit status
    CMakeFiles/test.dir/build.make:87: recipe for target 'test' failed
    make[2]: *** [test] Error 1
    CMakeFiles/Makefile2:75: recipe for target 'CMakeFiles/test.dir/all' failed
    make[1]: *** [CMakeFiles/test.dir/all] Error 2
    Makefile:83: recipe for target 'all' failed
    make: *** [all] Error 2
    

    The cmake list looks like this:

    cmake_minimum_required(VERSION 3.16)
    
    project(ffmpeg_test)
    
    add_library(avformat STATIC IMPORTED)
    set_target_properties(avformat
        PROPERTIES IMPORTED_LOCATION /home/christoph/develop/FFmpeg/build/lib/libavformat.a
    )
    add_library(avcodec STATIC IMPORTED)
    set_target_properties(avcodec
        PROPERTIES IMPORTED_LOCATION /home/christoph/develop/FFmpeg/build/lib/libavcodec.a
    )
    add_library(swscale STATIC IMPORTED)
    set_target_properties(swscale
        PROPERTIES IMPORTED_LOCATION /home/christoph/develop/FFmpeg/build/lib/libswscale.a
    )
    add_library(avutil STATIC IMPORTED)
    set_target_properties(avutil
        PROPERTIES IMPORTED_LOCATION /home/christoph/develop/FFmpeg/build/lib/libavutil.a
    )
    add_executable(test main.cxx)
    
    target_link_libraries(test PRIVATE
        /home/christoph/develop/FFmpeg/build/lib/libavformat.a
        avcodec
        swscale
        avutil
    )
    target_include_directories(test PRIVATE /home/christoph/develop/FFmpeg/build/include)
    

    And here are the ffmpeg libs:

    christoph@christoph-ThinkPad-T490:~/develop/FFmpeg/build/lib$ ll
    total 277840
    drwxr-xr-x  3 christoph christoph      4096 Dez  7 23:59 ./
    drwxr-xr-x 17 christoph christoph      4096 Dez  7 23:59 ../
    -rw-r--r--  1 christoph christoph 173479270 Dez  7 23:59 libavcodec.a
    -rw-r--r--  1 christoph christoph   2174910 Dez  7 23:59 libavdevice.a
    -rw-r--r--  1 christoph christoph  37992438 Dez  7 23:59 libavfilter.a
    -rw-r--r--  1 christoph christoph  59222040 Dez  7 23:59 libavformat.a
    -rw-r--r--  1 christoph christoph   4759514 Dez  7 23:59 libavutil.a
    -rw-r--r--  1 christoph christoph    695698 Dez  7 23:59 libswresample.a
    -rw-r--r--  1 christoph christoph   6164398 Dez  7 23:59 libswscale.a
    drwxr-xr-x  2 christoph christoph      4096 Dez  7 23:59 pkgconfig/
    

    And this is the example code:

    #include
    
    #include avcodec.h>
    #include avformat.h>
    #include swscale.h>
    
    // compatibility with newer API
    #if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,28,1)
    #define av_frame_alloc avcodec_alloc_frame
    #define av_frame_free avcodec_free_frame
    #endif
    
    void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame) {
      FILE *pFile;
      char szFilename[32];
      int  y;
      
      // Open file
      sprintf(szFilename, "frame%d.ppm", iFrame);
      pFile=fopen(szFilename, "wb");
      if(pFile==NULL)
        return;
      
      // Write header
      fprintf(pFile, "P6\n%d %d\n255\n", width, height);
      
      // Write pixel data
      for(y=0; ydata[0]+y*pFrame->linesize[0], 1, width*3, pFile);
      
      // Close file
      fclose(pFile);
    }
    
    int main(int argc, char *argv[]) {
      // Initalizing these to NULL prevents segfaults!
      AVFormatContext   *pFormatCtx = NULL;
      int               i, videoStream;
      AVCodecContext    *pCodecCtxOrig = NULL;
      AVCodecContext    *pCodecCtx = NULL;
      AVCodec           *pCodec = NULL;
      AVFrame           *pFrame = NULL;
      AVFrame           *pFrameRGB = NULL;
      AVPacket          packet;
      int               frameFinished;
      int               numBytes;
      uint8_t           *buffer = NULL;
      struct SwsContext *sws_ctx = NULL;
    
      if(argc < 2) {
        printf("Please provide a movie file\n");
        return -1;
      }
      // Register all formats and codecs
      av_register_all();
      
      // Open video file
      if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
        return -1; // Couldn't open file
      
      // Retrieve stream information
      if(avformat_find_stream_info(pFormatCtx, NULL)<0)
        return -1; // Couldn't find stream information
      
      // Dump information about file onto standard error
      av_dump_format(pFormatCtx, 0, argv[1], 0);
      
      // Find the first video stream
      videoStream=-1;
      for(i=0; inb_streams; i++)
        if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
          videoStream=i;
          break;
        }
      if(videoStream==-1)
        return -1; // Didn't find a video stream
      
      // Get a pointer to the codec context for the video stream
      pCodecCtxOrig=pFormatCtx->streams[videoStream]->codec;
      // Find the decoder for the video stream
      pCodec=avcodec_find_decoder(pCodecCtxOrig->codec_id);
      if(pCodec==NULL) {
        fprintf(stderr, "Unsupported codec!\n");
        return -1; // Codec not found
      }
      // Copy context
      pCodecCtx = avcodec_alloc_context3(pCodec);
      if(avcodec_copy_context(pCodecCtx, pCodecCtxOrig) != 0) {
        fprintf(stderr, "Couldn't copy codec context");
        return -1; // Error copying codec context
      }
    
      // Open codec
      if(avcodec_open2(pCodecCtx, pCodec, NULL)<0)
        return -1; // Could not open codec
      
      // Allocate video frame
      pFrame=av_frame_alloc();
      
      // Allocate an AVFrame structure
      pFrameRGB=av_frame_alloc();
      if(pFrameRGB==NULL)
        return -1;
    
      // Determine required buffer size and allocate buffer
      numBytes=avpicture_get_size(AV_PIX_FMT_RGB24, pCodecCtx->width,
                      pCodecCtx->height);
      buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
      
      // Assign appropriate parts of buffer to image planes in pFrameRGB
      // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
      // of AVPicture
      avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_RGB24,
             pCodecCtx->width, pCodecCtx->height);
      
      // initialize SWS context for software scaling
      sws_ctx = sws_getContext(pCodecCtx->width,
                   pCodecCtx->height,
                   pCodecCtx->pix_fmt,
                   pCodecCtx->width,
                   pCodecCtx->height,
                   AV_PIX_FMT_RGB24,
                   SWS_BILINEAR,
                   NULL,
                   NULL,
                   NULL
                   );
    
      // Read frames and save first five frames to disk
      i=0;
      while(av_read_frame(pFormatCtx, &packet)>=0) {
        // Is this a packet from the video stream?
        if(packet.stream_index==videoStream) {
          // Decode video frame
          avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
          
          // Did we get a video frame?
          if(frameFinished) {
        // Convert the image from its native format to RGB
        sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data,
              pFrame->linesize, 0, pCodecCtx->height,
              pFrameRGB->data, pFrameRGB->linesize);
        
        // Save the frame to disk
        if(++i<=5)
          SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, 
                i);
          }
        }
        
        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);
      }
      
      // Free the RGB image
      av_free(buffer);
      av_frame_free(&pFrameRGB);
      
      // Free the YUV frame
      av_frame_free(&pFrame);
      
      // Close the codecs
      avcodec_close(pCodecCtx);
      avcodec_close(pCodecCtxOrig);
    
      // Close the video file
      avformat_close_input(&pFormatCtx);
      
      return 0;
    }
    
  • FFMPEG compile with x264 support for Android

    10 mars, par Pecana

    I am trying to build ffmpeg extensions for media3 (ExoPlayer) for Android

    https://github.com/androidx/media/tree/release/libraries/decoder_ffmpeg

    Using the default settings everything is fine but if I try to add support to libx264 the build failed. I compiled x264 for Android locally and it worked so the .so file for arm64 is present, I added it to the pkg-config with :

    export PKG_CONFIG_PATH=/build/x264/arm64/lib/pkgconfig:$PKG_CONFIG_PATH

    but when I try to build ffmpeg with the following command it fails :

    ./configure \
    --prefix=/build/ffmpeg \
    --enable-gpl \
    --enable-libx264 \
    --enable-static \
    --enable-pic \
    --arch=arm64 \
    --target-os=android \
    --cross-prefix=$TOOLCHAIN/bin/aarch64-linux-android21- \
    --sysroot=$SYSROOT
    

    Error : ERROR: x264 not found using pkg-config

    But it is not due to pkg-config as the command:

    pkg-config --cflags --libs x264
    

    reports : -DX264_API_IMPORTS -I/build/x264/arm64/include -L/build/x264/arm64/lib -lx264

    Any idea on how to fix it ?

    Thank you :-)

  • How to seek one frame forward in ffmpeg [closed]

    10 mars, par Summit

    i want to seek one frame forward when i call this function but it gets stuck on the first frame seeked and does not move forward.

    void seekFrameUp() {
        if (!fmt_ctx || video_stream_index == -1) return;
    
        AVRational frame_rate = fmt_ctx->streams[video_stream_index]->r_frame_rate;
        if (frame_rate.num == 0) return;  // Avoid division by zero
    
        // Compute frame duration in AV_TIME_BASE_Q
        int64_t frame_duration = av_rescale_q(1,
            av_make_q(frame_rate.den, frame_rate.num),
            AV_TIME_BASE_Q);
    
        int64_t next_pts = requestedTimestamp + frame_duration;
    
        qDebug() << "Seeking forward: " << next_pts
            << " (Current PTS: " << requestedTimestamp
            << ", Frame Duration: " << frame_duration << ")";
    
        requestFrameAt(next_pts);
    
        // Update the requested timestamp after seeking
        requestedTimestamp = next_pts;
    }
    
    
    
    
    
    void requestFrameAt(int64_t timestamp) {
         {
             std::lock_guard lock(mtx);
             decoding = true;  // Ensure the thread keeps decoding when needed
         }
         cv.notify_one();
     }
    
    
    void decodeLoop() {
        while (!stopThread) {
            std::unique_lock lock(mtx);
            cv.wait(lock, [this] { return decoding || stopThread; });
    
            if (stopThread) break;
    
            // Avoid redundant seeking
            if (requestedTimestamp == lastRequestedTimestamp) {
                decoding = false;
                continue;
            }
    
           
    
            lastRequestedTimestamp.store(requestedTimestamp.load());
            int64_t target_pts = av_rescale_q(requestedTimestamp, AV_TIME_BASE_Q, fmt_ctx->streams[video_stream_index]->time_base);
    
            target_pts = FFMAX(target_pts, 0); // Ensure it's not negative
    
            if (av_seek_frame(fmt_ctx, video_stream_index, target_pts, AVSEEK_FLAG_ANY) >= 0) {
                avcodec_flush_buffers(codec_ctx);  // Clear old frames from the decoder
                qDebug() << "Seek successful to PTS:" << target_pts;
            }
            else {
                qDebug() << "Seeking failed!";
                decoding = false;
                continue;
            }
    
            lock.unlock();
    
            // Keep decoding until we receive a valid frame
            bool frameDecoded = false;
            while (av_read_frame(fmt_ctx, pkt) >= 0) {
                if (pkt->stream_index == video_stream_index) {
                    if (avcodec_send_packet(codec_ctx, pkt) == 0) {
                        while (avcodec_receive_frame(codec_ctx, frame) == 0) {
                            qDebug() << "FRAME DECODED ++++++++++++ PTS:" << frame->pts;
                            if (frame->pts != AV_NOPTS_VALUE) {
                                // Rescale PTS to AV_TIME_BASE_Q
                                int64_t pts_in_correct_base = av_rescale_q(frame->pts,
                                    fmt_ctx->streams[video_stream_index]->time_base,
                                    AV_TIME_BASE_Q);
    
                                // Ensure we don’t reset to 0 incorrectly
                                if (pts_in_correct_base > 0) {
                                    current_pts.store(pts_in_correct_base);
                                    qDebug() << "Updated current_pts to:" << current_pts.load();
                                }
                                else {
                                    qDebug() << "Warning: Decoded frame has PTS <= 0, keeping last valid PTS.";
                                }
                            }
                            else {
                                qDebug() << "Invalid frame->pts (AV_NOPTS_VALUE)";
                            }
    
                            QImage img = convertFrameToImage(frame);
                            emit frameDecodedSignal(img);
                    
                            frameDecoded = true;
                            break;  // Exit after the first valid frame
                        }
    
                        if (frameDecoded) {
                            decoding = (requestedTimestamp != lastRequestedTimestamp);
                            break;
                        }
                    }
                }
                av_packet_unref(pkt);
            }
        }
    }
    
  • How to Use SVG Image Files Directly in FFmpeg ? [closed]

    10 mars, par Pubg Mobile

    I generated a bar chart race using the Flourish Studio website and captured the frames as a PDF sequence using a Python Playwright script. Then, I converted all the PDF files into SVG format using the following Python script, because SVG is the only image format that maintains quality without loss when zoomed in:

    import os
    import subprocess
    import multiprocessing
    
    # Define paths
    pdf2svg_path = r"E:\Desktop\dist-64bits\pdf2svg.exe"  # Full path to pdf2svg.exe
    input_dir = r"E:\Desktop\New folder (4)\New folder"
    output_dir = r"E:\Desktop\New folder (4)\New folder (2)"
    
    # Ensure output directory exists
    os.makedirs(output_dir, exist_ok=True)
    
    def convert_pdf_to_svg(pdf_file):
        """ Convert a single PDF file to SVG. """
        input_pdf = os.path.join(input_dir, pdf_file)
        output_svg = os.path.join(output_dir, os.path.splitext(pdf_file)[0] + ".svg")
    
        try:
            subprocess.run([pdf2svg_path, input_pdf, output_svg], check=True)
            print(f"Converted: {pdf_file} -> {output_svg}")
        except FileNotFoundError:
            print(f"Error: Could not find {pdf2svg_path}. Make sure the path is correct.")
        except subprocess.CalledProcessError:
            print(f"Error: Conversion failed for {pdf_file}")
    
    if __name__ == "__main__":
        # Get list of PDF files in input directory
        pdf_files = [f for f in os.listdir(input_dir) if f.lower().endswith(".pdf")]
    
        # Use multiprocessing to speed up conversion
        with multiprocessing.Pool(processes=multiprocessing.cpu_count()) as pool:
            pool.map(convert_pdf_to_svg, pdf_files)
    
        print("All conversions completed!")
    

    Problem:

    Now, I want to use these SVG images in FFmpeg to create a high-quality video file. However, FFmpeg does not support SVG files directly, and I have read that I must convert SVG files into PNG before using them in FFmpeg. The problem is that PNG images reduce quality, especially when zooming in, which is why I want to avoid converting to PNG.

    Is there any way to use SVG files directly in FFmpeg or another method to convert them into a high-quality video while maintaining full resolution? Any ideas or suggestions would be greatly appreciated!

  • OpenGL to FFMpeg encode [closed]

    10 mars, par Ian A McElhenny

    I have a opengl buffer that I need to forward directly to ffmpeg to do the nvenc based h264 encoding.

    My current way of doing this is glReadPixels to get the pixels out of the frame buffer and then passing that pointer into ffmpeg such that it can encode the frame into H264 packets for RTSP. However, this is bad because I have to copy bytes out of the GPU ram into CPU ram, to only copy them back into the GPU for encoding.

    How do you do bypass the need to copy to and from the CPU?