Recherche avancée

Médias (0)

Mot : - Tags -/xmlrpc

Aucun média correspondant à vos critères n’est disponible sur le site.

Autres articles (66)

  • Use, discuss, criticize

    13 avril 2011, par

    Talk to people directly involved in MediaSPIP’s development, or to people around you who could use MediaSPIP to share, enhance or develop their creative projects.
    The bigger the community, the more MediaSPIP’s potential will be explored and the faster the software will evolve.
    A discussion list is available for all exchanges between users.

  • HTML5 audio and video support

    13 avril 2011, par

    MediaSPIP uses HTML5 video and audio tags to play multimedia files, taking advantage of the latest W3C innovations supported by modern browsers.
    The MediaSPIP player used has been created specifically for MediaSPIP and can be easily adapted to fit in with a specific theme.
    For older browsers the Flowplayer flash fallback is used.
    MediaSPIP allows for media playback on major mobile platforms with the above (...)

  • Support de tous types de médias

    10 avril 2011

    Contrairement à beaucoup de logiciels et autres plate-formes modernes de partage de documents, MediaSPIP a l’ambition de gérer un maximum de formats de documents différents qu’ils soient de type : images (png, gif, jpg, bmp et autres...) ; audio (MP3, Ogg, Wav et autres...) ; vidéo (Avi, MP4, Ogv, mpg, mov, wmv et autres...) ; contenu textuel, code ou autres (open office, microsoft office (tableur, présentation), web (html, css), LaTeX, Google Earth) (...)

Sur d’autres sites (10848)

  • 'ffmpeg' is not recognized as an internal or external command in Hapi js(Node js)

    15 mai 2020, par Eswar

    I want to add on watermark image to while uploading a video. I tried to use node ffmpeg but unluckily the git base command line says *"ffmpeg"** not recognized.

    



    'ffmpeg' is not recognized as an internal or external command,operable program or batch file.

    



    const createVideoWatermark = async () =>{
    try {
        const ffmpeg = require("ffmpeg");
        var process = new ffmpeg('./uploads/video1.mp4');
        process.then(async function (video) {
            console.log('The video is ready to be processed');
            var watermarkPath = './uploads/watermark.jpg',
            newFilepath = './uploads/video-com-watermark.mp4',
            settings = {
                position        : "SE"      // Position: NE NC NW SE SC SW C CE CW
              , margin_nord     : null      // Margin nord
              , margin_sud      : null      // Margin sud
              , margin_east     : null      // Margin east
              , margin_west     : null      // Margin west
            };
            var callback = async function (error, files) {
            if(error){
                console.log('ERROR: ', error);
              }
              else{
                console.log('Waiting', files);
              }
            }
          //add watermark
            await video.fnAddWatermark(watermarkPath, newFilepath, settings, callback);
            return watermarkPath;    
        }, function (err) {
          console.log('Error: ' + err);
          return err;
      });
  } catch (e) {
      console.log(e.code);
      console.log(e.msg);
     return e.msg;
  }


    



    }

    



    Please help me to solve this. I am not sure where i did the mistake. Kindly correct me guys.

    


  • Simple FFMpeg player for Android

    23 janvier 2017, par don11995

    I have a problem with the output of AVFrame (AVPicture) into the ANativeWindow. I wrote simpe test code :

    void *Player::readThread(void * reserved) {
       ALOGD("Read thread started!");
       VideoState *state =  (VideoState *) reserved;

       int err = 0;
       int ret;
       int i;
       AVFormatContext *formatContext = NULL;
       AVCodecContext *codecContext = NULL;
       AVCodecParameters *codecParams = NULL;
       AVCodecID codecID = AV_CODEC_ID_NONE;
       AVCodec *decoder = NULL;
       AVFrame *frame = NULL;
       AVFrame *frameRGBA = NULL;
       AVPacket packet;
       struct SwsContext *img2RGBAContext;
       ANativeWindow_Buffer windowBuffer;
       uint8_t *RGBABuffer = NULL;
       int RGBABufferSize = 0;
       int got = 0;
       int windowWidth = 640;
       int windowHeight = 480;

       const char *url = state->url.c_str();
       if (url == NULL || strlen(url) <= 0) {
           err = ERROR_UNKNOWN_URI;
           goto exit;
       }
       ALOGD("URL to play: %s", url);

       state->isPlaying = true;

       formatContext = avformat_alloc_context();
       if (formatContext == NULL) {
           err = ERROR_OUT_OF_MEMORY;
           goto exit;
       }
       ALOGD("formatContext allocated");

       frame = av_frame_alloc();
       if (frame == NULL) {
           err = ERROR_OUT_OF_MEMORY;
           goto exit;
       }
       ALOGD("frame allocated");

       frameRGBA = av_frame_alloc();
       if (frameRGBA == NULL) {
           err = ERROR_OUT_OF_MEMORY;
           goto exit;
       }
       ALOGD("frameRGBA allocated");

       ret = avformat_open_input(&formatContext, url, NULL, NULL);
       if (ret != 0) {
           err = ERROR_CAN_NOT_OPEN_URI;
           goto exit;
       }
       ALOGD("formatContext opened");

       ret = avformat_find_stream_info(formatContext, NULL);
       if (ret != 0) {
           err = ERROR_CAN_NOT_FIND_STREAM_INFO;
           goto exit;
       }
       ALOGD("file info found");

       for (i = 0; i < formatContext->nb_streams; i++) {
           AVStream *stream = formatContext->streams[i];
           AVCodecParameters *codecParams = stream->codecpar;
           AVCodecID codecID = codecParams->codec_id;
           AVMediaType type = codecParams->codec_type;
           const char *codecName = avcodec_get_name(codecID);
           switch (type) {
               case AVMEDIA_TYPE_AUDIO:
                   ALOGD("Stream [%d]: type=AUDIO codecName=%s",i,codecName);
                   break;
               case AVMEDIA_TYPE_VIDEO:
                   ALOGD("Stream [%d]: type=VIDEO codecName=%s",i,codecName);
                   if (state->video_stream == -1) {
                       state->video_stream = i;
                   }
                   break;
               case AVMEDIA_TYPE_SUBTITLE:
                   ALOGD("Stream [%d]: type=SUBTITLE codecName=%s",i,codecName);
                   break;
               default:
                   ALOGD("Stream [%d]: type=UNKNOWN codecName=%s",i,codecName);
                   break;
           }
       }

       if (state->video_stream == -1) {
           err = ERROR_CAN_NOT_FIND_ANY_STREAM;
           goto exit;
       }
       ALOGD("Video stream index: %d",state->video_stream);

       codecParams = formatContext->streams[state->video_stream]->codecpar;
       codecID = codecParams->codec_id;
       if (codecID == AV_CODEC_ID_NONE) {
           err = ERROR_UNKNOWN_CODEC;
           goto exit;
       }
       ALOGD("Codec found");

       decoder = avcodec_find_decoder(codecID);
       if (decoder == NULL) {
           err = ERROR_CAN_NOT_FIND_DECODER;
           goto exit;
       }
       ALOGD("Decoder found");

       codecContext = avcodec_alloc_context3(decoder);
       if (codecContext == NULL) {
           err = ERROR_OUT_OF_MEMORY;
           goto exit;
       }
       ALOGD("codecContext created");

       ret = avcodec_parameters_to_context(codecContext, codecParams);
       if (ret < 0) {
           err = ERROR_CAN_NOT_START_DECODER;
           goto exit;
       }
       ALOGD("codecContext params was set");

       ret = avcodec_open2(codecContext, decoder, NULL);
       if (ret != 0) {
           err = ERROR_CAN_NOT_START_DECODER;
           goto exit;
       }
       ALOGD("Decoder opened");

       if (state->window != NULL) {
           ANativeWindow_setBuffersGeometry(state->window, codecParams->width, codecParams->height, WINDOW_FORMAT_RGB_565);
           ALOGD("Window geometry changed");
       }

       if (codecParams->width>0 && codecParams->height>0) {
           ALOGD("Video width: %d\nVideo height: %d",codecParams->width, codecParams->height);
           img2RGBAContext = sws_getCachedContext(
               NULL,
               codecParams->width,
               codecParams->height,
               (AVPixelFormat)codecParams->format,
               codecParams->width,
               codecParams->height,
               AV_PIX_FMT_RGB565,
               SWS_BICUBIC,
               NULL,
               NULL,
               NULL);
           if (img2RGBAContext == NULL) {
               err = ERROR_OUT_OF_MEMORY;
               goto exit;
           }
       } else {
           err = ERROR_CAN_NOT_START_DECODER;
           goto exit;
       }
       ALOGD("img2RGBAContext created");

       RGBABufferSize = av_image_get_buffer_size(AV_PIX_FMT_RGB565, codecParams->width, codecParams->height, 1);
       RGBABuffer = (uint8_t *)malloc(RGBABufferSize*sizeof(uint8_t));
       if (RGBABuffer == NULL) {
           err = ERROR_OUT_OF_MEMORY;
           goto exit;
       }
       ALOGD("frameRGBABuffer size %d bytes",RGBABufferSize);

       ret = av_image_alloc(frameRGBA->data, frameRGBA->linesize, codecParams->width, codecParams->height, AV_PIX_FMT_RGB565, 1);
       if (ret < 0) {
           err = ERROR_OUT_OF_MEMORY;
           goto exit;
       }

       while (av_read_frame(formatContext, &packet) >= 0 && state->isPlaying) {
           if (packet.stream_index != state->video_stream) {
               ALOGD("Packet is not a video packet. Discard.");
               av_packet_unref(&packet);
               continue;
           }
           ret = avcodec_send_packet(codecContext, &packet);
           if (ret != 0) {
               ALOGE("Can not send packet to decode");
               av_packet_unref(&packet);
               continue;
           }
           ret = avcodec_receive_frame(codecContext, frame);
           if (ret != 0) {
               ALOGE("Can not receive decoded frame yet");
               av_packet_unref(&packet);
               continue;
           }
           ALOGD("Converting image to RGB565...");
           sws_scale(img2RGBAContext, frame->data, frame->linesize, 0, codecParams->height, frameRGBA->data, frameRGBA->linesize);
           ALOGD("Image converted to RGB565");
           av_image_copy_to_buffer(RGBABuffer,
               RGBABufferSize,
               frameRGBA->data,
               frameRGBA->linesize,
               AV_PIX_FMT_RGB565,
               codecParams->width,
               codecParams->height,
               1);
           ALOGD("Image wrote into frameRGBABuffer");
           if (ANativeWindow_lock(state->window, &windowBuffer, NULL) == 0) {
               ALOGD("Writing %d bytes to windowBuffer", RGBABufferSize);
               memcpy(windowBuffer.bits, RGBABuffer, RGBABufferSize);
               ANativeWindow_unlockAndPost(state->window);
               ALOGD("Image displayed");
           } else {
               ALOGE("Can not display frame");
           }
           av_packet_unref(&packet);
       }

       exit:
       ALOGD("Releasing resources...");
       if (err!=0) {
           state->isPlaying = false;
           #if !LOG_NDEBUG
               switch (err) {
                   case  ERROR_OUT_OF_MEMORY:
                       ALOGE("Out of memory!");
                       break;
                   case  ERROR_CAN_NOT_OPEN_URI:
                       ALOGE("Can not open URI: %s", url);
                       break;
                   case  ERROR_UNKNOWN_URI:
                       ALOGE("Unknown URI to open!");
                       break;
                   default:
                       ALOGE("Unknown error");
                       break;
               }
           #endif
           // TODO: send error to listener
       }
       sws_freeContext(img2RGBAContext);
       free(RGBABuffer);
       av_free(frame);
       av_freep(&frameRGBA->data[0]);
       av_packet_unref(&packet);
       avcodec_close(codecContext);
       avformat_close_input(&formatContext);
       avformat_free_context(formatContext);
       ALOGD("Read thread closed!");
    }

    I faced with the next problem in some videos :
    enter image description here
    For example, this video gives me next logs :

    10-23 14:53:42.212 26970-4527/com.don.ffmpegplayer D/Player: Read thread started!
    10-23 14:53:42.212 26970-4527/com.don.ffmpegplayer D/Player: URL to play: http://www.ex.ua/load/280797285
    10-23 14:53:42.212 26970-4527/com.don.ffmpegplayer D/Player: formatContext allocated
    10-23 14:53:42.212 26970-4527/com.don.ffmpegplayer D/Player: frame allocated
    10-23 14:53:42.212 26970-4527/com.don.ffmpegplayer D/Player: frameRGBA allocated
    10-23 14:53:42.846 26970-4527/com.don.ffmpegplayer D/Player: formatContext opened
    10-23 14:53:42.879 26970-4527/com.don.ffmpegplayer D/Player: file info found
    10-23 14:53:42.879 26970-4527/com.don.ffmpegplayer D/Player: Stream [0]: type=VIDEO codecName=h264
    10-23 14:53:42.879 26970-4527/com.don.ffmpegplayer D/Player: Stream [1]: type=AUDIO codecName=ac3
    10-23 14:53:42.880 26970-4527/com.don.ffmpegplayer D/Player: Stream [2]: type=AUDIO codecName=ac3
    10-23 14:53:42.880 26970-4527/com.don.ffmpegplayer D/Player: Stream [3]: type=AUDIO codecName=ac3
    10-23 14:53:42.880 26970-4527/com.don.ffmpegplayer D/Player: Stream [4]: type=SUBTITLE codecName=subrip
    10-23 14:53:42.880 26970-4527/com.don.ffmpegplayer D/Player: Video stream index: 0
    10-23 14:53:42.880 26970-4527/com.don.ffmpegplayer D/Player: Codec found
    10-23 14:53:42.880 26970-4527/com.don.ffmpegplayer D/Player: Decoder found
    10-23 14:53:42.880 26970-4527/com.don.ffmpegplayer D/Player: codecContext created
    10-23 14:53:42.880 26970-4527/com.don.ffmpegplayer D/Player: codecContext params was set
    10-23 14:53:42.880 26970-4527/com.don.ffmpegplayer D/Player: Decoder opened
    10-23 14:53:42.880 26970-4527/com.don.ffmpegplayer D/Player: Window geometry changed
    10-23 14:53:42.880 26970-4527/com.don.ffmpegplayer D/Player: Video width: 1024
                                                                Video height: 424
    10-23 14:53:42.882 26970-4527/com.don.ffmpegplayer D/Player: img2RGBAContext created
    10-23 14:53:42.882 26970-4527/com.don.ffmpegplayer D/Player: frameRGBABuffer size 868352 bytes
    10-23 14:53:42.889 26970-4527/com.don.ffmpegplayer E/Player: Can not receive decoded frame yet
    10-23 14:53:42.889 26970-4527/com.don.ffmpegplayer D/Player: Packet is not a video packet. Discard.
    10-23 14:53:42.889 26970-4527/com.don.ffmpegplayer D/Player: Packet is not a video packet. Discard.
    10-23 14:53:42.889 26970-4527/com.don.ffmpegplayer D/Player: Packet is not a video packet. Discard.
    10-23 14:53:42.889 26970-4527/com.don.ffmpegplayer D/Player: Packet is not a video packet. Discard.
    10-23 14:53:42.889 26970-4527/com.don.ffmpegplayer D/Player: Packet is not a video packet. Discard.
    10-23 14:53:42.889 26970-4527/com.don.ffmpegplayer D/Player: Packet is not a video packet. Discard.
    10-23 14:53:42.889 26970-4527/com.don.ffmpegplayer D/Player: Packet is not a video packet. Discard.
    10-23 14:53:42.889 26970-4527/com.don.ffmpegplayer D/Player: Packet is not a video packet. Discard.
    10-23 14:53:42.889 26970-4527/com.don.ffmpegplayer D/Player: Packet is not a video packet. Discard.
    10-23 14:53:42.889 26970-4527/com.don.ffmpegplayer D/Player: Packet is not a video packet. Discard.
    10-23 14:53:42.889 26970-4527/com.don.ffmpegplayer D/Player: Packet is not a video packet. Discard.
    10-23 14:53:42.889 26970-4527/com.don.ffmpegplayer D/Player: Packet is not a video packet. Discard.
    10-23 14:53:42.889 26970-4527/com.don.ffmpegplayer D/Player: Packet is not a video packet. Discard.
    10-23 14:53:42.889 26970-4527/com.don.ffmpegplayer D/Player: Packet is not a video packet. Discard.
    10-23 14:53:42.890 26970-4527/com.don.ffmpegplayer D/Player: Packet is not a video packet. Discard.
    10-23 14:53:42.890 26970-4527/com.don.ffmpegplayer D/Player: Packet is not a video packet. Discard.
    10-23 14:53:42.890 26970-4527/com.don.ffmpegplayer D/Player: Packet is not a video packet. Discard.
    10-23 14:53:42.890 26970-4527/com.don.ffmpegplayer D/Player: Packet is not a video packet. Discard.
    10-23 14:53:42.890 26970-4527/com.don.ffmpegplayer D/Player: Packet is not a video packet. Discard.
    10-23 14:53:42.890 26970-4527/com.don.ffmpegplayer D/Player: Packet is not a video packet. Discard.
    10-23 14:53:42.890 26970-4527/com.don.ffmpegplayer D/Player: Packet is not a video packet. Discard.
    10-23 14:53:42.890 26970-4527/com.don.ffmpegplayer D/Player: Packet is not a video packet. Discard.
    10-23 14:53:42.890 26970-4527/com.don.ffmpegplayer D/Player: Packet is not a video packet. Discard.
    10-23 14:53:42.890 26970-4527/com.don.ffmpegplayer D/Player: Packet is not a video packet. Discard.
    10-23 14:53:42.899 26970-4527/com.don.ffmpegplayer E/Player: Can not receive decoded frame yet
    10-23 14:53:42.905 26970-4527/com.don.ffmpegplayer D/Player: Converting image to RGB565...
    10-23 14:53:42.918 26970-4527/com.don.ffmpegplayer D/Player: Image converted to RGB565
    10-23 14:53:42.919 26970-4527/com.don.ffmpegplayer D/Player: Image wrote into frameRGBABuffer
    10-23 14:53:42.920 26970-4527/com.don.ffmpegplayer D/Player: Writing 868352 bytes to windowBuffer
    10-23 14:53:42.921 26970-4527/com.don.ffmpegplayer D/Player: Image displayed
    10-23 14:53:42.926 26970-4527/com.don.ffmpegplayer D/Player: Converting image to RGB565...
    10-23 14:53:42.934 26970-4527/com.don.ffmpegplayer D/Player: Image converted to RGB565
    10-23 14:53:42.935 26970-4527/com.don.ffmpegplayer D/Player: Image wrote into frameRGBABuffer
    10-23 14:53:42.936 26970-4527/com.don.ffmpegplayer D/Player: Writing 868352 bytes to windowBuffer
    10-23 14:53:42.937 26970-4527/com.don.ffmpegplayer D/Player: Image displayed

    What am I doing wrong ? If I understood correctly, I need to follow next steps :

    1. Get decoded AVFrame from decoder
    2. Convert AVFrame data to RGB565 or RGB8888
    3. Get pixel data from converted frame
    4. Write it to native window

    But in this code two points confuse me : is ANative_setBuffersGeometry called correctly and why frameRGBABuffer size is 868352 bytes ? If video size is 1024*424 frameRGBABuffer size must be width*height*4, isn’t it ? If I change frameRGBABuffer size to width*height*4 program carashes after first image diplayed. I pass video dimmensions to ANative_setBuffersGeometry.

    For any help thanks in anvance.

  • C++ ffmpeg Queue input is backward in time while encoding

    15 août 2022, par Turgut

    I've made a program that takes a video as an input, decodes it's video and audio data, then edits the video data and encodes both video and audio (audio remains unedited). I've managed to successfully get the edited video as an output so far, but when I add in the audio, I get an error that says Queue input is backward in time. I used the muxing example from ffmpegs doc/examples for encoding, here is what it looks like (I'm not including the video encoding parts since its working just fine) :

    


    typedef struct {
            OutputStream video_st, audio_st;
            const AVOutputFormat *fmt;
            AVFormatContext *oc;
            int have_video, have_audio, encode_video, encode_audio;
            std::string name;
        } encode_info;

encode_info enc_inf;

void video_encoder::open_audio(AVFormatContext *oc, const AVCodec *codec,
                       OutputStream *ost, AVDictionary *opt_arg)
{
    AVCodecContext *c;
    int nb_samples;
    int ret;
    AVDictionary *opt = NULL;

    c = ost->enc;

    /* open it */
    av_dict_copy(&opt, opt_arg, 0);
    ret = avcodec_open2(c, codec, &opt);
    av_dict_free(&opt);
    if (ret < 0) {
        fprintf(stderr, "Could not open audio codec: %s\n", ret);
        exit(1);
    }

    /* init signal generator */
    ost->t     = 0;
    ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
    /* increment frequency by 110 Hz per second */
    ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;

    if (c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)
        nb_samples = 10000;
    else
        nb_samples = c->frame_size;

    ost->frame     = alloc_audio_frame(c->sample_fmt, c->channel_layout,
                                       c->sample_rate, nb_samples);
    ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
                                       c->sample_rate, nb_samples);

    /* copy the stream parameters to the muxer */
    ret = avcodec_parameters_from_context(ost->st->codecpar, c);
    if (ret < 0) {
        fprintf(stderr, "Could not copy the stream parameters\n");
        exit(1);
    }

    /* create resampler context */
    ost->swr_ctx = swr_alloc();
    if (!ost->swr_ctx) {
        fprintf(stderr, "Could not allocate resampler context\n");
        exit(1);
    }

    /* set options */
    av_opt_set_int       (ost->swr_ctx, "in_channel_count",   c->channels,       0);
    av_opt_set_int       (ost->swr_ctx, "in_sample_rate",     c->sample_rate,    0);
    av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt",      AV_SAMPLE_FMT_S16, 0);
    av_opt_set_int       (ost->swr_ctx, "out_channel_count",  c->channels,       0);
    av_opt_set_int       (ost->swr_ctx, "out_sample_rate",    c->sample_rate,    0);
    av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt",     c->sample_fmt,     0);

    /* initialize the resampling context */
    if ((ret = swr_init(ost->swr_ctx)) < 0) {
        fprintf(stderr, "Failed to initialize the resampling context\n");
        exit(1);
    }
}


void video_encoder::encode_one_frame()
{
    if (enc_inf.encode_video || enc_inf.encode_audio) {
        /* select the stream to encode */
       if (enc_inf.encode_video &&
            (!enc_inf.encode_audio || av_compare_ts(enc_inf.video_st.next_pts, enc_inf.video_st.enc->time_base,
                                            enc_inf.audio_st.next_pts, enc_inf.audio_st.enc->time_base) <= 0)) {
            enc_inf.encode_video = !write_video_frame(enc_inf.oc, &enc_inf.video_st);
        } else {
            std::cout << "Encoding audio" << std::endl;
            enc_inf.encode_audio = !write_audio_frame(enc_inf.oc, &enc_inf.audio_st);
        }
    }
}

int video_encoder::write_audio_frame(AVFormatContext *oc, OutputStream *ost)
{
    AVCodecContext *c;
    AVFrame *frame;
    int ret;
    int dst_nb_samples;

    c = ost->enc;

    frame = audio_frame;//get_audio_frame(ost);

    if (frame) {
        /* convert samples from native format to destination codec format, using the resampler */
        /* compute destination number of samples */
        dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
                                        c->sample_rate, c->sample_rate, AV_ROUND_UP);
        //av_assert0(dst_nb_samples == frame->nb_samples);

        /* when we pass a frame to the encoder, it may keep a reference to it
         * internally;
         * make sure we do not overwrite it here
         */
        ret = av_frame_make_writable(ost->frame);
        if (ret < 0)
            exit(1);

        /* convert to destination format */
        ret = swr_convert(ost->swr_ctx,
                          ost->frame->data, dst_nb_samples,
                          (const uint8_t **)frame->data, frame->nb_samples);
        if (ret < 0) {
            fprintf(stderr, "Error while converting\n");
            exit(1);
        }
        frame = ost->frame;

        frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
        ost->samples_count += dst_nb_samples;
    }

    return write_frame(oc, c, ost->st, frame, ost->tmp_pkt);
}
void video_encoder::set_audio_frame(AVFrame* frame)
{
    audio_frame = frame;
}


    


    Normally the muxing example above uses get_audio_frame(ost) for frame insde write_audio_frame to create a dummy audio frame, but I want to use the audio that I have decoded from my input video. After decoding the audio frame I pass it to the encoder using set_audio_frame so my encoder can use it. Then I removed get_audio_frame(ost) and simply replaced it with audio_frame. So here is what my main loop looks like :

    


    ...
open_audio(args);
...
while(current_second < ouput_duration)
{
...
   video_reader_read_frame(buffer, &pts, start_ts);
   edit_decoded_video(buffer);
   ...
   if(frame_type == 2)
      encoder->set_audio_frame(audio_test->get_frame());
   encoder->encode_one_frame();
}


    


    And here is what my decoder looks like :

    


       int video_decode::decode_audio(AVCodecContext *dec, const AVPacket *pkt)
    {
        auto& frame= state.av_frame;
        int ret = 0;
    
        // submit the packet to the decoder
        ret = avcodec_send_packet(dec, pkt);
        if (ret < 0) {
            std::cout << "Error submitting a packet for decoding" << std::endl;
            return ret;
        }
    
        // get all the available frames from the decoder
        while (ret >= 0) {
            ret = avcodec_receive_frame(dec, frame);
            if (ret < 0) {
                // those two return values are special and mean there is no output
                // frame available, but there were no errors during decoding
                if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
                    return 0;
    
                std::cout << "Decode err" << std::endl;
                return ret;
            }
    
            if (ret < 0)
                return ret;
        }
    
        return 0;
    }

    int video_decode::video_reader_read_frame(uint8_t* frame_buffer, int64_t* pts, double seg_start) 
    {
        // Unpack members of state
        auto& width = state.width;
        auto& height = state.height;
        auto& av_format_ctx = state.av_format_ctx;
        auto& av_codec_ctx = state.av_codec_ctx;
        auto& audio_codec_ctx = state.audio_codec_ctx;
        auto& video_stream_index = state.video_stream_index;
        auto& audio_stream_index = state.audio_stream_index;
        auto& av_frame = state.av_frame;
        auto& av_packet = state.av_packet;
        auto& sws_scaler_ctx = state.sws_scaler_ctx;

        // Decode one frame
        //double pt_in_seconds = (*pts) * (double)state.time_base.num / (double)state.time_base.den;
        if (!this->skipped) {
            this->skipped = true;
            *pts = (int64_t)(seg_start * (double)state.time_base.den / (double)state.time_base.num);
            video_reader_seek_frame(*pts);
        }

        int response;
        while (av_read_frame(av_format_ctx, av_packet) >= 0) {
            // Audio decode

            if (av_packet->stream_index == video_stream_index){
                std::cout << "Decoded VIDEO" << std::endl;

                response = avcodec_send_packet(av_codec_ctx, av_packet);
                if (response < 0) {
                    printf("Failed to decode packet: %s\n", av_make_error(response));
                    return false;
                }


                response = avcodec_receive_frame(av_codec_ctx, av_frame);
                if (response == AVERROR(EAGAIN) || response == AVERROR_EOF) {
                    av_packet_unref(av_packet);
                    continue;
                } else if (response < 0) {
                    printf("Failed to decode packet: %s\n", av_make_error(response));
                    return false;
                }


                *pts = av_frame->pts;
                // Set up sws scaler
                if (!sws_scaler_ctx) {
                    auto source_pix_fmt = correct_for_deprecated_pixel_format(av_codec_ctx->pix_fmt);
                    sws_scaler_ctx = sws_getContext(width, height, source_pix_fmt,
                                                    width, height, AV_PIX_FMT_RGB0,
                                                    SWS_BICUBIC, NULL, NULL, NULL);
                }
                if (!sws_scaler_ctx) {
                    printf("Couldn't initialize sw scaler\n");
                    return false;
                }

                uint8_t* dest[4] = { frame_buffer, NULL, NULL, NULL };
                int dest_linesize[4] = { width * 4, 0, 0, 0 };
                sws_scale(sws_scaler_ctx, av_frame->data, av_frame->linesize, 0, height, dest, dest_linesize);
                av_packet_unref(av_packet);
                return 1;
            }
            if (av_packet->stream_index == audio_stream_index){
                std::cout << "Decoded AUDIO" << std::endl;
                decode_audio(audio_codec_ctx, av_packet);
                av_packet_unref(av_packet);
                return 2;
            }else {
                av_packet_unref(av_packet);
                continue;
            }

            av_packet_unref(av_packet);
            break;
        }


        return true;
    }
void init()
{
...
if (open_codec_context(&audio_stream_index, &audio_dec_ctx, av_format_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
      audio_stream = av_format_ctx->streams[audio_stream_index];
}
...
}


    


    My decoder uses the same format context, packet and frame for video and audio decoding and uses separate stream and codec context.

    


    Why am I getting Queue input is backward in time error and how can I propperly encode the audio ? I'm not sure but from the looks of it decodes the audio just fine. And again there are no problems on video encoding/decoding whatsoever.