Recherche avancée

Médias (0)

Mot : - Tags -/auteurs

Aucun média correspondant à vos critères n’est disponible sur le site.

Autres articles (75)

  • Submit enhancements and plugins

    13 avril 2011

    If you have developed a new extension to add one or more useful features to MediaSPIP, let us know and its integration into the core MedisSPIP functionality will be considered.
    You can use the development discussion list to request for help with creating a plugin. As MediaSPIP is based on SPIP - or you can use the SPIP discussion list SPIP-Zone.

  • Personnaliser en ajoutant son logo, sa bannière ou son image de fond

    5 septembre 2013, par

    Certains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;

  • Encoding and processing into web-friendly formats

    13 avril 2011, par

    MediaSPIP automatically converts uploaded files to internet-compatible formats.
    Video files are encoded in MP4, Ogv and WebM (supported by HTML5) and MP4 (supported by Flash).
    Audio files are encoded in MP3 and Ogg (supported by HTML5) and MP3 (supported by Flash).
    Where possible, text is analyzed in order to retrieve the data needed for search engine detection, and then exported as a series of image files.
    All uploaded files are stored online in their original format, so you can (...)

Sur d’autres sites (6270)

  • Resampling audio with FFMPEG LibAV

    22 septembre 2020, par FennecFix

    Well, since FFMPEG documentation and code examples are absolute garbage, I guess my only choise is to go here and aks.

    


    So what I'm trying to do is simply record audio from microphione and write it to the file. So I initialize my input and out formats, I get an audio packet decode it, resample, encode and write. But everytime I try to play and audio there's only a stub of data. It seems like for some reason it writes only a start packet. Which is still very strange and let me explain why :

    


    if((response = swr_config_frame(resampleContext, audioOutputFrame, frame) < 0)) qDebug() << "can't configure frame!" <<  av_make_error(response);

if((response = swr_convert_frame(resampleContext, audioOutputFrame, frame) < 0)) qDebug() << "can't resample frame!" <<  av_make_error(response);


    


    Here's the code I'm using to resample. My frame has data but swr_convert_frame writes empty data to audioOutputFrame

    


    How do I fix that ? FFMPEG literally driving me crazy.

    


    Here's the full code of my class

    


    VideoReader.h

    


    #ifndef VIDEOREADER_H&#xA;#define VIDEOREADER_H&#xA;&#xA;extern "C"&#xA;{&#xA;#include <libavcodec></libavcodec>avcodec.h>&#xA;#include <libavformat></libavformat>avformat.h>&#xA;#include <libswscale></libswscale>swscale.h>&#xA;#include <libavdevice></libavdevice>avdevice.h>&#xA;#include "libavutil/audio_fifo.h"&#xA;#include "libavformat/avio.h"&#xA;#include "libswresample/swresample.h"&#xA;#include &#xA;}&#xA;&#xA;#include <qstring>&#xA;#include <qelapsedtimer>&#xA;&#xA;class VideoReader&#xA;{&#xA;public:&#xA;    VideoReader();&#xA;&#xA;    bool open(const char* filename);&#xA;    bool fillFrame();&#xA;    bool readFrame(uint8_t *&amp;frameData);&#xA;    void close();&#xA;&#xA;    int width, height;&#xA;&#xA;private:&#xA;    bool configInput();&#xA;    bool configOutput(const char *filename);&#xA;    bool configResampler();&#xA;&#xA;    bool encode(AVFrame *frame, AVCodecContext *encoderContext, AVPacket *outputPacket, int streamIndex, QString type);&#xA;&#xA;    int audioStreamIndex = -1;&#xA;    int videoStreamIndex = -1;&#xA;&#xA;    int64_t videoStartPts = 0;&#xA;    int64_t audioStartPts = 0;&#xA;&#xA;    AVFormatContext* inputFormatContext = nullptr;&#xA;    AVFormatContext* outputFormatContext = nullptr;&#xA;&#xA;    AVCodecContext* videoDecoderContext = nullptr;&#xA;    AVCodecContext* videoEncoderContext = nullptr;&#xA;&#xA;    AVCodecContext* audioDecoderContext = nullptr;&#xA;    AVCodecContext* audioEncoderContext = nullptr;&#xA;&#xA;    AVFrame* videoInputFrame = nullptr;&#xA;    AVFrame* audioInputFrame = nullptr;&#xA;&#xA;    AVFrame* videoOutputFrame = nullptr;&#xA;    AVFrame* audioOutputFrame = nullptr;&#xA;&#xA;    AVPacket* inputPacket = nullptr;&#xA;&#xA;    AVPacket* videoOutputPacket = nullptr;&#xA;    AVPacket* audioOutputPacket = nullptr;&#xA;&#xA;    SwsContext* innerScaleContext = nullptr;&#xA;    SwsContext* outerScaleContext = nullptr;&#xA;&#xA;    SwrContext *resampleContext = nullptr;&#xA;};&#xA;&#xA;#endif // VIDEOREADER_H&#xA;</qelapsedtimer></qstring>

    &#xA;

    VideoReader.cpp

    &#xA;

    #include "VideoReader.h"&#xA;&#xA;#include <qdebug>&#xA;&#xA;static const char* av_make_error(int errnum)&#xA;{&#xA;    static char str[AV_ERROR_MAX_STRING_SIZE];&#xA;    memset(str, 0, sizeof(str));&#xA;    return av_make_error_string(str, AV_ERROR_MAX_STRING_SIZE, errnum);&#xA;}&#xA;&#xA;VideoReader::VideoReader()&#xA;{&#xA;&#xA;}&#xA;&#xA;bool VideoReader::open(const char *filename)&#xA;{&#xA;    if(!configInput()) return false;&#xA;    if(!configOutput(filename)) return false;&#xA;    if(!configResampler()) return false;&#xA;&#xA;    return true;&#xA;}&#xA;&#xA;bool VideoReader::fillFrame()&#xA;{&#xA;    auto convertToYUV = [=](AVFrame* frame)&#xA;    {&#xA;        int response = 0;&#xA;&#xA;        if((response = sws_scale(outerScaleContext, frame->data, frame->linesize, 0, videoEncoderContext->height, videoOutputFrame->data, videoOutputFrame->linesize)) &lt; 0) qDebug() &lt;&lt; "can&#x27;t rescale" &lt;&lt; av_make_error(response);&#xA;    };&#xA;&#xA;    auto convertAudio = [this](AVFrame* frame)&#xA;    {&#xA;        int response = 0;&#xA;&#xA;        auto&amp; out = audioOutputFrame;&#xA;        qDebug() &lt;&lt; out->linesize[0] &lt;&lt; out->nb_samples;&#xA;        if((response = swr_convert_frame(resampleContext, audioOutputFrame, frame)) &lt; 0) qDebug() &lt;&lt; "can&#x27;t resample frame!" &lt;&lt; av_make_error(response);&#xA;        qDebug() &lt;&lt; "poop";&#xA;    };&#xA;&#xA;    auto decodeEncode = [=](AVPacket* inputPacket, AVFrame* inputFrame, AVCodecContext* decoderContext,&#xA;                            AVPacket* outputPacket, AVFrame* outputFrame, AVCodecContext* encoderContext,&#xA;                            std::function<void> convertFunc,&#xA;                            int streamIndex, int64_t startPts, QString type)&#xA;    {&#xA;        int response = avcodec_send_packet(decoderContext, inputPacket);&#xA;        if(response &lt; 0) { qDebug() &lt;&lt; "failed to send" &lt;&lt; type &lt;&lt; "packet!" &lt;&lt;  av_make_error(response); return false; }&#xA;&#xA;        response = avcodec_receive_frame(decoderContext, inputFrame);&#xA;        if(response == AVERROR(EAGAIN) || response == AVERROR_EOF) { av_packet_unref(inputPacket); return false; }&#xA;        else if (response &lt; 0) { qDebug() &lt;&lt; "failed to decode" &lt;&lt; type &lt;&lt; "frame!" &lt;&lt; response &lt;&lt; av_make_error(response); return false; }&#xA;&#xA;        if(encoderContext)&#xA;        {&#xA;            outputFrame->pts = inputPacket->pts - startPts;&#xA;&#xA;            convertFunc(inputFrame);&#xA;            if(!encode(outputFrame, encoderContext, outputPacket, streamIndex, type)) return false;&#xA;        }&#xA;&#xA;        av_packet_unref(inputPacket);&#xA;&#xA;        return true;&#xA;    };&#xA;&#xA;    while(av_read_frame(inputFormatContext, inputPacket) >= 0) //actually read packet&#xA;    {&#xA;        if(inputPacket->stream_index == videoStreamIndex)&#xA;        {&#xA;            if(!videoStartPts) videoStartPts = inputPacket->pts;&#xA;            if(decodeEncode(inputPacket, videoInputFrame, videoDecoderContext, videoOutputPacket, videoOutputFrame, videoEncoderContext, convertToYUV, videoStreamIndex, videoStartPts, "video")) break;&#xA;        }&#xA;        else if(inputPacket->stream_index == audioStreamIndex)&#xA;        {&#xA;            if(!audioStartPts) audioStartPts = inputPacket->pts;&#xA;            if(decodeEncode(inputPacket, audioInputFrame, audioDecoderContext, audioOutputPacket, audioOutputFrame, audioEncoderContext, convertAudio, audioStreamIndex, audioStartPts, "audio")) break;&#xA;        }&#xA;    }&#xA;&#xA;    return true;&#xA;}&#xA;&#xA;bool VideoReader::readFrame(uint8_t *&amp;frameData)&#xA;{&#xA;    if(!fillFrame()) { qDebug() &lt;&lt; "readFrame method failed!"; return false; };&#xA;&#xA;    const int bytesPerPixel = 4;&#xA;&#xA;    uint8_t* destination[bytesPerPixel] = {frameData, NULL, NULL, NULL};&#xA;    int destinationLinesize[bytesPerPixel] = { videoInputFrame->width * bytesPerPixel,  0, 0, 0};&#xA;&#xA;    sws_scale(innerScaleContext, videoInputFrame->data, videoInputFrame->linesize, 0, videoInputFrame->height, destination, destinationLinesize);&#xA;&#xA;    return true;&#xA;}&#xA;&#xA;void VideoReader::close()&#xA;{&#xA;    encode(NULL, videoEncoderContext, videoOutputPacket, videoStreamIndex, "video");&#xA;    encode(NULL, audioEncoderContext, audioOutputPacket, audioStreamIndex, "audio");&#xA;&#xA;    if(av_write_trailer(outputFormatContext) &lt; 0) { qDebug() &lt;&lt; "failed to write trailer"; };&#xA;&#xA;    avformat_close_input(&amp;outputFormatContext);&#xA;    avformat_free_context(outputFormatContext);&#xA;    avformat_close_input(&amp;inputFormatContext);&#xA;    avformat_free_context(inputFormatContext);&#xA;&#xA;    av_frame_free(&amp;videoInputFrame);&#xA;    av_frame_free(&amp;audioInputFrame);&#xA;&#xA;    av_frame_free(&amp;videoOutputFrame);&#xA;    av_frame_free(&amp;audioOutputFrame);&#xA;&#xA;    av_packet_free(&amp;inputPacket);&#xA;&#xA;    av_packet_free(&amp;videoOutputPacket);&#xA;    av_packet_free(&amp;audioOutputPacket);&#xA;&#xA;    avcodec_free_context(&amp;videoDecoderContext);&#xA;    avcodec_free_context(&amp;videoEncoderContext);&#xA;&#xA;    avcodec_free_context(&amp;audioDecoderContext);&#xA;    avcodec_free_context(&amp;audioEncoderContext);&#xA;&#xA;    sws_freeContext(innerScaleContext);&#xA;    sws_freeContext(outerScaleContext);&#xA;&#xA;    swr_free(&amp;resampleContext);&#xA;}&#xA;&#xA;bool VideoReader::configInput()&#xA;{&#xA;    avdevice_register_all();&#xA;&#xA;    inputFormatContext = avformat_alloc_context();&#xA;&#xA;    if(!inputFormatContext) { qDebug() &lt;&lt; "can&#x27;t create context!"; return false; }&#xA;&#xA;    const char* inputFormatName = "dshow";/*"gdigrab"*/&#xA;    AVInputFormat* inputFormat = av_find_input_format(inputFormatName);&#xA;&#xA;    if(!inputFormat){ qDebug() &lt;&lt; "Can&#x27;t find" &lt;&lt; inputFormatName; return false; }&#xA;&#xA;    AVDictionary* options = NULL;&#xA;    av_dict_set(&amp;options, "framerate", "30", 0);&#xA;    av_dict_set(&amp;options, "video_size", "1920x1080", 0);&#xA;&#xA;    if(avformat_open_input(&amp;inputFormatContext, "video=HD USB Camera:audio=Microphone (High Definition Audio Device)" /*"desktop"*/, inputFormat, &amp;options) != 0) { qDebug() &lt;&lt; "can&#x27;t open video file!"; return false; }&#xA;&#xA;    AVCodecParameters* videoCodecParams = nullptr;&#xA;    AVCodecParameters* audioCodecParams = nullptr;&#xA;    AVCodec* videoDecoder = nullptr;&#xA;    AVCodec* audioDecoder = nullptr;&#xA;&#xA;    for (uint i = 0; i &lt; inputFormatContext->nb_streams; &#x2B;&#x2B;i)&#xA;    {&#xA;        auto stream = inputFormatContext->streams[i];&#xA;        auto codecParams = stream->codecpar;&#xA;&#xA;        if(codecParams->codec_type == AVMEDIA_TYPE_AUDIO) { audioStreamIndex = i; audioDecoder = avcodec_find_decoder(codecParams->codec_id); audioCodecParams = codecParams; }&#xA;        if(codecParams->codec_type == AVMEDIA_TYPE_VIDEO) { videoStreamIndex = i; videoDecoder = avcodec_find_decoder(codecParams->codec_id); videoCodecParams = codecParams; }&#xA;&#xA;        if(audioStreamIndex != -1 &amp;&amp; videoStreamIndex != -1) break;&#xA;    }&#xA;&#xA;    if(audioStreamIndex == -1) { qDebug() &lt;&lt; "failed to find audio stream inside file"; return false; }&#xA;    if(videoStreamIndex == -1) { qDebug() &lt;&lt; "failed to find video stream inside file"; return false; }&#xA;&#xA;    auto configureCodecContext = [=](AVCodecContext*&amp; context, AVCodec* decoder, AVCodecParameters* params, AVFrame*&amp; frame, QString type)&#xA;    {&#xA;        context = avcodec_alloc_context3(decoder);&#xA;        if(!context) { qDebug() &lt;&lt; "failed to create" &lt;&lt; type &lt;&lt; "decoder context!"; return false; }&#xA;&#xA;        if(avcodec_parameters_to_context(context, params) &lt; 0) { qDebug() &lt;&lt; "can&#x27;t initialize input" &lt;&lt; type &lt;&lt; "decoder context"; return false; }&#xA;&#xA;        if(avcodec_open2(context, decoder, NULL) &lt; 0) { qDebug() &lt;&lt; "can&#x27;t open" &lt;&lt; type &lt;&lt; "decoder"; return false; }&#xA;&#xA;        frame = av_frame_alloc();&#xA;        if(!frame) { qDebug() &lt;&lt; "can&#x27;t allocate" &lt;&lt; type &lt;&lt; "frame"; return false; }&#xA;&#xA;        return true;&#xA;    };&#xA;&#xA;    if(!configureCodecContext(videoDecoderContext, videoDecoder, videoCodecParams, videoInputFrame, "video")) return false;&#xA;    if(!configureCodecContext(audioDecoderContext, audioDecoder, audioCodecParams, audioInputFrame, "audio")) return false;&#xA;&#xA;    audioDecoderContext->channel_layout = AV_CH_LAYOUT_STEREO;&#xA;    audioInputFrame->channel_layout = audioDecoderContext->channel_layout;&#xA;&#xA;    inputPacket = av_packet_alloc();&#xA;    if(!inputPacket) { qDebug() &lt;&lt; "can&#x27;t allocate input packet!";  return false; }&#xA;&#xA;    //first frame, needed fo initialization&#xA;    if(!fillFrame()) { qDebug() &lt;&lt; "Failed to fill frame on init!"; return false; };&#xA;&#xA;    width = videoDecoderContext->width;&#xA;    height = videoDecoderContext->height;&#xA;&#xA;    innerScaleContext = sws_getContext(width, height, videoDecoderContext->pix_fmt,&#xA;                                       width, height, AV_PIX_FMT_RGB0,&#xA;                                       SWS_FAST_BILINEAR,&#xA;                                       NULL,&#xA;                                       NULL,&#xA;                                       NULL);&#xA;&#xA;    outerScaleContext = sws_getContext(width, height, videoDecoderContext->pix_fmt,&#xA;                                       width, height, AV_PIX_FMT_YUV420P,&#xA;                                       SWS_FAST_BILINEAR,&#xA;                                       NULL,&#xA;                                       NULL,&#xA;                                       NULL);&#xA;&#xA;&#xA;    if(!innerScaleContext) { qDebug() &lt;&lt; "failed to initialize scaler context"; return false; }&#xA;&#xA;    return true;&#xA;}&#xA;&#xA;bool VideoReader::configOutput(const char *filename)&#xA;{&#xA;    avformat_alloc_output_context2(&amp;outputFormatContext, NULL, NULL, filename);&#xA;    if(!outputFormatContext) { qDebug() &lt;&lt; "failed to create output context"; return false; }&#xA;&#xA;    AVOutputFormat* outputFormat = outputFormatContext->oformat;&#xA;&#xA;    auto prepareOutputContext = [=](AVCodecContext*&amp; encoderContext,&#xA;                                    std::function<void> configureContextFunc,&#xA;                                    std::function<void> configureFrameFunc,&#xA;                                    AVCodecID codecId, AVFrame*&amp; frame, AVPacket*&amp; packet, QString type)&#xA;    {&#xA;        auto stream = avformat_new_stream(outputFormatContext, NULL);&#xA;        if(!stream) { qDebug() &lt;&lt; "failed to allocate output" &lt;&lt; type &lt;&lt; "stream"; return false; }&#xA;&#xA;        AVCodec* encoder = avcodec_find_encoder(codecId);&#xA;        if(!encoder) { qDebug() &lt;&lt; "failed to find" &lt;&lt; type &lt;&lt; "encoder!"; return false; }&#xA;&#xA;        encoderContext = avcodec_alloc_context3(encoder);&#xA;        if(!encoderContext) { qDebug() &lt;&lt; "failed to create video encoder context!"; return false; }&#xA;&#xA;        configureContextFunc(encoderContext, encoder);&#xA;&#xA;        int result = avcodec_open2(encoderContext, encoder, NULL);&#xA;        if(result &lt; 0) { qDebug() &lt;&lt; "failed to open audio encoder" &lt;&lt; av_make_error(result); return false; }&#xA;        if(avcodec_parameters_from_context(stream->codecpar, encoderContext) &lt; 0) { qDebug() &lt;&lt; "failed to copy parameters to audio output stream"; return false; }&#xA;&#xA;        packet = av_packet_alloc();&#xA;        if(!packet) {qDebug() &lt;&lt; "failed allocate output" &lt;&lt; type &lt;&lt; "packet"; return false;}&#xA;&#xA;        frame = av_frame_alloc();&#xA;        if(!frame) { qDebug() &lt;&lt; "can&#x27;t allocate output" &lt;&lt; type &lt;&lt; "frame"; return false; }&#xA;&#xA;        configureFrameFunc(frame);&#xA;&#xA;        av_frame_get_buffer(frame, 0);&#xA;&#xA;        return true;&#xA;    };&#xA;&#xA;    auto configureAudioFrame = [=](AVFrame* frame)&#xA;    {&#xA;        frame->nb_samples = audioEncoderContext->frame_size;&#xA;        frame->format = audioEncoderContext->sample_fmt;&#xA;        frame->sample_rate = audioEncoderContext->sample_rate;&#xA;        frame->channel_layout = av_get_default_channel_layout(audioDecoderContext->channels);&#xA;    };&#xA;&#xA;    auto configureAudioEncoderContext = [=](AVCodecContext* encoderContext, AVCodec* encoder)&#xA;    {&#xA;        encoderContext->bit_rate = 64000;&#xA;        encoderContext->sample_fmt = encoder->sample_fmts[0];&#xA;        encoderContext->sample_rate = 44100;&#xA;        encoderContext->codec_type = AVMEDIA_TYPE_AUDIO;&#xA;        encoderContext->channel_layout = AV_CH_LAYOUT_STEREO;&#xA;        encoderContext->channels = av_get_channel_layout_nb_channels(encoderContext->channel_layout);&#xA;    };&#xA;&#xA;    auto configureVideoFrame = [=](AVFrame* frame)&#xA;    {&#xA;        frame->format = videoEncoderContext->pix_fmt;&#xA;        frame->width  = videoEncoderContext->width;&#xA;        frame->height = videoEncoderContext->height;&#xA;    };&#xA;&#xA;    auto configureVideoEncoderContext = [=](AVCodecContext* encoderContext, AVCodec* encoder)&#xA;    {&#xA;        encoderContext->width = videoDecoderContext->width;&#xA;        encoderContext->height = videoDecoderContext->height;&#xA;        encoderContext->pix_fmt = encoder->pix_fmts[0];&#xA;        encoderContext->gop_size = 10;&#xA;        encoderContext->max_b_frames = 1;&#xA;        encoderContext->framerate = AVRational{30, 1};&#xA;        encoderContext->time_base = AVRational{1, 30};&#xA;&#xA;        av_opt_set(encoderContext->priv_data, "preset", "ultrafast", 0);&#xA;        av_opt_set(encoderContext->priv_data, "tune", "zerolatency", 0);&#xA;    };&#xA;&#xA;    if(!prepareOutputContext(videoEncoderContext, configureVideoEncoderContext, configureVideoFrame, outputFormat->video_codec, videoOutputFrame, videoOutputPacket, "video")) return false;&#xA;    if(!prepareOutputContext(audioEncoderContext, configureAudioEncoderContext, configureAudioFrame, outputFormat->audio_codec, audioOutputFrame, audioOutputPacket, "audio")) return false;&#xA;&#xA;    if(outputFormat->flags &amp; AVFMT_GLOBALHEADER) outputFormat->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;&#xA;&#xA;    int result = 0;&#xA;    if(!(outputFormat->flags &amp; AVFMT_NOFILE))&#xA;        if((result = avio_open(&amp;outputFormatContext->pb, filename, AVIO_FLAG_WRITE)) &lt; 0)&#xA;            { qDebug() &lt;&lt; "failed to open file" &lt;&lt;  av_make_error(result); return false; }&#xA;&#xA;    result = avformat_write_header(outputFormatContext, NULL);&#xA;    if(result &lt; 0) {qDebug() &lt;&lt; "failed to write header!" &lt;&lt; av_make_error(result); return false; }&#xA;&#xA;    return true;&#xA;}&#xA;&#xA;bool VideoReader::configResampler()&#xA;{&#xA;&#xA;    resampleContext = swr_alloc_set_opts(NULL,&#xA;                                         av_get_default_channel_layout(audioEncoderContext->channels),&#xA;                                         audioEncoderContext->sample_fmt,&#xA;                                         audioEncoderContext->sample_rate,&#xA;                                         av_get_default_channel_layout(audioDecoderContext->channels),&#xA;                                         audioDecoderContext->sample_fmt,&#xA;                                         audioDecoderContext->sample_rate,&#xA;                                         0, NULL);&#xA;    if (!resampleContext) { qDebug() &lt;&lt; "Could not allocate resample context"; return false; }&#xA;&#xA;    int error;&#xA;    if ((error = swr_init(resampleContext)) &lt; 0) { qDebug() &lt;&lt; "Could not open resample context"; swr_free(&amp;resampleContext); return false; }&#xA;&#xA;    return true;&#xA;}&#xA;&#xA;bool VideoReader::encode(AVFrame* frame, AVCodecContext* encoderContext, AVPacket* outputPacket, int streamIndex, QString type)&#xA;{&#xA;    int response;&#xA;&#xA;    response = avcodec_send_frame(encoderContext, frame);&#xA;    if(response &lt; 0) { qDebug() &lt;&lt; "failed to send" &lt;&lt; type &lt;&lt; "frame" &lt;&lt; av_make_error(response); return false; }&#xA;&#xA;    while(response >= 0)&#xA;    {&#xA;        response = avcodec_receive_packet(encoderContext, outputPacket);&#xA;        if(response == AVERROR(EAGAIN) || response == AVERROR_EOF) { av_packet_unref(outputPacket); continue; }&#xA;        else if (response &lt; 0) { qDebug() &lt;&lt; "failed to encode" &lt;&lt; type &lt;&lt; "frame!" &lt;&lt; response &lt;&lt; av_make_error(response); return false; }&#xA;&#xA;        outputPacket->stream_index = streamIndex;&#xA;&#xA;        AVStream *inStream = inputFormatContext->streams[streamIndex];&#xA;        AVStream *outStream = outputFormatContext->streams[streamIndex];&#xA;&#xA;        av_packet_rescale_ts(outputPacket, inStream->time_base, outStream->time_base);&#xA;&#xA;        if((response = av_interleaved_write_frame(outputFormatContext, outputPacket)) != 0) { qDebug() &lt;&lt; "Failed to write" &lt;&lt; type &lt;&lt; "packet!" &lt;&lt;  av_make_error(response); av_packet_unref(outputPacket); return false; }&#xA;&#xA;        av_packet_unref(outputPacket);&#xA;    }&#xA;&#xA;    return true;&#xA;}&#xA;</void></void></void></qdebug>

    &#xA;

    I could try to write down shorter example if needed

    &#xA;

  • [FFmpeg]how to make codes for converting jpg files to avi(motion jpeg)

    13 avril 2016, par YJJ

    I want to make the code so that I can embed the code to a machine with cameras.

    I have one I have worked on.

    It generates a output avi file, but it doesn’t work.

    I think I didn’t make a logic to input raw images and I don’t know how.

    I want to input 100 jpg images from street_01.jpg to street_99.jpg

    int main(int argc, char* argv[])
    {
       AVFormatContext* pFormatCtx;
       AVOutputFormat* fmt;
       AVStream* video_st;
       AVCodecContext* pCodecCtx;
       AVCodec* pCodec;
       AVPacket pkt;
       uint8_t* picture_buf;
       AVFrame* pFrame;
       int picture_size;
       int y_size;
       int framecnt = 0;
       //FILE *in_file = fopen("src01_480x272.yuv", "rb"); //Input raw YUV data
       FILE *in_file = fopen("street_01.jpg", "rb");       //Input raw YUV data
       int in_w = 2456, in_h = 2058;                       //Input data's width and height
       int framenum = 100;                                 //Frames to encode
       //const char* out_file = "src01.h264";              //Output Filepath
       //const char* out_file = "src01.ts";
       //const char* out_file = "src01.hevc";
       const char* out_file = "output.avi";

       av_register_all();
    /*
       //Method1.
       pFormatCtx = avformat_alloc_context();
       //Guess Format
       fmt = av_guess_format(NULL, out_file, NULL);
       pFormatCtx->oformat = fmt;
    */
       //Method 2.
       avformat_alloc_output_context2(&amp;pFormatCtx, NULL, "avi", out_file);
       fmt = pFormatCtx->oformat;


       //Open output URL
       if (avio_open(&amp;pFormatCtx->pb, out_file, AVIO_FLAG_READ_WRITE) &lt; 0){
           printf("Failed to open output file! \n");
           return -1;
       }

       video_st = avformat_new_stream(pFormatCtx, 0);
       video_st->time_base.num = 1;
       video_st->time_base.den = 25;

       if (video_st == NULL){
           return -1;
       }
       //Param that must set
       pCodecCtx = video_st->codec;
       //pCodecCtx->codec_id =AV_CODEC_ID_HEVC;
       pCodecCtx->codec_id = AV_CODEC_ID_MJPEG;
       //pCodecCtx->codec_id = fmt->video_codec;
       pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
       pCodecCtx->pix_fmt = AV_PIX_FMT_YUV444P;
       pCodecCtx->width = in_w;
       pCodecCtx->height = in_h;
       pCodecCtx->time_base.num = 1;
       pCodecCtx->time_base.den = 25;
       pCodecCtx->bit_rate = 400000;
       pCodecCtx->gop_size = 250;
       //H264
       //pCodecCtx->me_range = 16;
       //pCodecCtx->max_qdiff = 4;
       //pCodecCtx->qcompress = 0.6;
       pCodecCtx->qmin = 10;
       pCodecCtx->qmax = 51;

       //Optional Param
       pCodecCtx->max_b_frames = 3;

       // Set Option
       AVDictionary *param = 0;
       //H264
       if (pCodecCtx->codec_id == AV_CODEC_ID_H264) {
           av_dict_set(&amp;param, "preset", "slow", 0);
           av_dict_set(&amp;param, "tune", "zerolatency", 0);
           //av_dict_set(&amp;param, "profile", "main", 0);
       }
       //H265
       if (pCodecCtx->codec_id == AV_CODEC_ID_H265){
           av_dict_set(&amp;param, "preset", "ultrafast", 0);
           av_dict_set(&amp;param, "tune", "zero-latency", 0);
       }

       //Show some Information
       av_dump_format(pFormatCtx, 0, out_file, 1);

       pCodec = avcodec_find_encoder(pCodecCtx->codec_id);
       if (!pCodec){
           printf("Can not find encoder! \n");
           return -1;
       }
       if (avcodec_open2(pCodecCtx, pCodec, &amp;param) &lt; 0){
           printf("Failed to open encoder! \n");
           return -1;
       }


       pFrame = av_frame_alloc();
       //picture_size = av_image_get_buffer_size(pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, 1);
       picture_size = avpicture_get_size(pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
       picture_buf = (uint8_t *)av_malloc(picture_size);
       avpicture_fill((AVPicture *)pFrame, picture_buf, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);

       //Write File Header
       avformat_write_header(pFormatCtx, NULL);

       av_new_packet(&amp;pkt, picture_size);

       y_size = pCodecCtx->width * pCodecCtx->height;

       for (int i = 0; i/Read raw YUV data
           if (fread(picture_buf, 1, y_size * 3 / 2, in_file) &lt;= 0){
               printf("Failed to read raw data! \n");
               return -1;
           }
           else if (feof(in_file)){
               break;
           }
           pFrame->data[0] = picture_buf;                  // Y
           pFrame->data[1] = picture_buf + y_size;         // U
           pFrame->data[2] = picture_buf + y_size * 5 / 4; // V
           //PTS
           pFrame->pts = i;
           int got_picture = 0;
           //Encode
           int ret = avcodec_encode_video2(pCodecCtx, &amp;pkt, pFrame, &amp;got_picture);
           if (ret &lt; 0){
               printf("Failed to encode! \n");
               return -1;
           }
           if (got_picture == 1){
               printf("Succeed to encode frame: %5d\tsize:%5d\n", framecnt, pkt.size);
               framecnt++;
               pkt.stream_index = video_st->index;
               ret = av_write_frame(pFormatCtx, &amp;pkt);
               av_free_packet(&amp;pkt);
           }
       }
       //Flush Encoder
       int ret = flush_encoder(pFormatCtx, 0);
       if (ret &lt; 0) {
           printf("Flushing encoder failed\n");
           return -1;
       }

       //Write file trailer
       av_write_trailer(pFormatCtx);

       //Clean
       if (video_st){
           avcodec_close(video_st->codec);
           av_free(pFrame);
           av_free(picture_buf);
       }
       avio_close(pFormatCtx->pb);
       avformat_free_context(pFormatCtx);

       fclose(in_file);

       return 0;
    }
  • i am trying to record internal audio and screen using ffmpeg. but the video is lagging [closed]

    13 septembre 2020, par super computer

    ffmpeg -rtbufsize 1500M -f dshow -i audio="Stereo Mix (Realtek High Definition Audio)" -f -y -rtbufsize 100M -f gdigrab -t 00:20:00 -framerate 20 -probesize 10M -draw_mouse 1 -i desktop -c:v libx264 -r 20 -preset ultrafast -tune zerolatency -crf 25 -pix_fmt yuv420p "P :/myvideo.mp4"

    &#xA;