Recherche avancée

Médias (91)

Autres articles (98)

  • MediaSPIP 0.1 Beta version

    25 avril 2011, par

    MediaSPIP 0.1 beta is the first version of MediaSPIP proclaimed as "usable".
    The zip file provided here only contains the sources of MediaSPIP in its standalone version.
    To get a working installation, you must manually install all-software dependencies on the server.
    If you want to use this archive for an installation in "farm mode", you will also need to proceed to other manual (...)

  • ANNEXE : Les plugins utilisés spécifiquement pour la ferme

    5 mars 2010, par

    Le site central/maître de la ferme a besoin d’utiliser plusieurs plugins supplémentaires vis à vis des canaux pour son bon fonctionnement. le plugin Gestion de la mutualisation ; le plugin inscription3 pour gérer les inscriptions et les demandes de création d’instance de mutualisation dès l’inscription des utilisateurs ; le plugin verifier qui fournit une API de vérification des champs (utilisé par inscription3) ; le plugin champs extras v2 nécessité par inscription3 (...)

  • Problèmes fréquents

    10 mars 2010, par

    PHP et safe_mode activé
    Une des principales sources de problèmes relève de la configuration de PHP et notamment de l’activation du safe_mode
    La solution consiterait à soit désactiver le safe_mode soit placer le script dans un répertoire accessible par apache pour le site

Sur d’autres sites (7053)

  • Error using FFMPEG to convert each input image into H264 compiling in Visual Studio running in MevisLab

    21 février 2014, par user3012914

    I am creating a ML Module in MevisLab Framework, I am using FFMPEG to convert each image i get into a H264 Video and save it after I get all the frames. But unfortunately I have problem allocating the output buffer size. The application crashes when I include this in my code.If I am not including it, the output file size is just 4kb. Nothing is stored in it.

    I am also not very sure whether it is correct way of getting the HBitmap into the Encoder. Would be great to have your suggestions.

    My Code :

    BITMAPINFO bitmapInfo;
               HDC        hdc;

               ZeroMemory(&bitmapInfo, sizeof(bitmapInfo));

               BITMAPINFOHEADER &bitmapInfoHeader = bitmapInfo.bmiHeader;
               bitmapInfoHeader.biSize            = sizeof(bitmapInfoHeader);
               bitmapInfoHeader.biWidth           = _imgWidth;
               bitmapInfoHeader.biHeight          = _imgHeight;
               bitmapInfoHeader.biPlanes          =  1;
               bitmapInfoHeader.biBitCount        = 24;
               bitmapInfoHeader.biCompression     = BI_RGB;
               bitmapInfoHeader.biSizeImage       = ((bitmapInfoHeader.biWidth * bitmapInfoHeader.biBitCount / 8 + 3) & 0xFFFFFFFC) * bitmapInfoHeader.biHeight;
               bitmapInfoHeader.biXPelsPerMeter   = 10000;
               bitmapInfoHeader.biYPelsPerMeter   = 10000;
               bitmapInfoHeader.biClrUsed         = 0;
               bitmapInfoHeader.biClrImportant    = 0;
               //RGBQUAD* Ref = new RGBQUAD[_imgWidth,_imgHeight];
               HDC hdcscreen = GetDC(0);

               hdc = CreateCompatibleDC(hdcscreen);
               ReleaseDC(0, hdcscreen);

               _hbitmap = CreateDIBSection(hdc, (BITMAPINFO*) &bitmapInfoHeader, DIB_RGB_COLORS, &_bits, NULL, NULL);

    To get the BitMap I use the above code. Then I allocate the Codec Context as followed

    c->bit_rate = 400000;
                   // resolution must be a multiple of two
                   c->width = 1920;
                   c->height = 1080;
                   // frames per second
                   frame_rate = _framesPerSecondFld->getIntValue();
                   //AVRational rational = {1,10};
                   //c->time_base = (AVRational){1,25};
                    //c->time_base = (AVRational){1,25};
                    c->gop_size = 10; // emit one intra frame every ten frames
                    c->max_b_frames = 1;
                    c->keyint_min = 1;   //minimum GOP size
                    c->time_base.num = 1;                                  // framerate numerator
                    c->time_base.den = _framesPerSecondFld->getIntValue();
                    c->i_quant_factor = (float)0.71;                        // qscale factor between P and I frames
                    c->pix_fmt = AV_PIX_FMT_RGB32;
                    std::string msg;
                    msg.append("Context is stored");
                    _messageFld->setStringValue(msg.c_str());

    I create the Bitmap Image as followed from the input

    PagedImage *inImg = getUpdatedInputImage(0);
           ML_CHECK(inImg);
           ImageVector imgExt = inImg->getImageExtent();
           if ((imgExt.x = _imgWidth) && (imgExt.y == _imgHeight))
           {
           if (((imgExt.x % 4)==0) && ((imgExt.y % 4) == 0))
           {
                    // read out input image and write output image into video
                   // get input image as an array
                   void* imgData = NULL;
                   SubImageBox imageBox(imgExt); // get the whole image
                   getTile(inImg, imageBox, MLuint8Type, &imgData);
                   iData = (MLuint8*)imgData;
                   int r = 0; int g = 0;int  b = 0;
                   // since we have only images with
                   // a z-ext of 1, we can compute the c stride as follows
                   int cStride = _imgWidth * _imgHeight;
                   uint8_t offset  = 0;
                   // pointer into the bitmap that is
                   // used to write images into the avi
                   UCHAR* dst = (UCHAR*)_bits;
                   for (int y = _imgHeight-1; y >= 0; y--)
                   { // reversely scan the image. if y-rows of DIB are set in normal order, no compression will be available.
                       offset = _imgWidth * y;
                       for (int x = 0; x < _imgWidth; x++)
                       {
                           if (_isGreyValueImage)
                           {
                               r = iData[offset + x];
                               *dst++ = (UCHAR)r;
                               *dst++ = (UCHAR)r;
                               *dst++ = (UCHAR)r;
                           }
                           else
                           {
                               b = iData[offset + x]; // windows bitmap need reverse order: bgr instead of rgb
                               g = iData[offset + x + cStride          ];
                               r = iData[offset + x + cStride + cStride];

                               *dst++ = (UCHAR)r;
                               *dst++ = (UCHAR)g;
                               *dst++ = (UCHAR)b;
                           }
                           // alpha channel in input image is ignored
                       }
                   }

    Then I add it to the Encoder as followed as write as H264

    in_width   = c->width;
                    in_height  = c->height;
                    out_width  = c->width;
                    out_height = c->height;
                    ibytes = avpicture_get_size(PIX_FMT_BGR32, in_width, in_height);
                    obytes = avpicture_get_size(PIX_FMT_YUV420P, out_width, out_height);
                    outbuf_size = 100000 + c->width*c->height*(32>>3);      // allocate output buffer
                    outbuf = static_cast(malloc(outbuf_size));

                    if(!obytes)
                    {
                        std::string msg;
                        msg.append("Bytes cannot be allocated");
                        _messageFld->setStringValue(msg.c_str());
                    }
                    else
                    {
                        std::string msg;
                        msg.append("Bytes allocation done");
                        _messageFld->setStringValue(msg.c_str());
                    }
                    //create buffer for the output image
                    inbuffer  =  (uint8_t*)av_malloc(ibytes);
                    outbuffer =  (uint8_t*)av_malloc(obytes);
                    inbuffer  =  (uint8_t*)dst;

                    //create ffmpeg frame structures.  These do not allocate space for image data,
                    //just the pointers and other information about the image.
                    AVFrame* inpic = avcodec_alloc_frame();
                    AVFrame* outpic = avcodec_alloc_frame();

                    //this will set the pointers in the frame structures to the right points in
                    //the input and output buffers.
                    avpicture_fill((AVPicture*)inpic, inbuffer, PIX_FMT_BGR32, in_width, in_height);
                    avpicture_fill((AVPicture*)outpic, outbuffer, PIX_FMT_YUV420P, out_width, out_height);
                    av_image_alloc(outpic->data, outpic->linesize, c->width, c->height, c->pix_fmt, 1);
                    inpic->data[0] += inpic->linesize[0]*(_imgHeight-1);                                                      // flipping frame
                    inpic->linesize[0] = -inpic->linesize[0];    

                    if(!inpic)
                    {
                        std::string msg;
                        msg.append("Image is empty");
                        _messageFld->setStringValue(msg.c_str());
                    }
                    else
                    {
                        std::string msg;
                        msg.append("Picture has allocations");
                        _messageFld->setStringValue(msg.c_str());
                    }

                    //create the conversion context
                    fooContext = sws_getContext(in_width, in_height, PIX_FMT_BGR32, out_width, out_height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);
                    //perform the conversion
                    sws_scale(fooContext, inpic->data, inpic->linesize, 0, in_height, outpic->data, outpic->linesize);
                    //out_size = avcodec_encode_video(c, outbuf,outbuf_size, outpic);
                    if(!out_size)
                    {
                        std::string msg;
                        msg.append("Outsize is not valid");
                        _messageFld->setStringValue(msg.c_str());
                    }
                    else
                    {
                        std::string msg;
                        msg.append("Outsize is valid");
                        _messageFld->setStringValue(msg.c_str());
                    }
                        fwrite(outbuf, 1, out_size, f);
                        if(!fwrite)
                    {
                        std::string msg;
                        msg.append("Frames couldnt be written");
                        _messageFld->setStringValue(msg.c_str());
                    }
                    else
                    {
                        std::string msg;
                        msg.append("Frames written to the file");
                        _messageFld->setStringValue(msg.c_str());
                    }
                       // for (;out_size; i++)
                       // {
                             out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
                             std::string msg;                      
                             msg.append("Writing Frames");
                             _messageFld->setStringValue(msg.c_str());// encode the delayed frames
                             _numFramesFld->setIntValue(_numFramesFld->getIntValue()+1);
                             fwrite(outbuf, 1, out_size, f);
                       // }
                        outbuf[0] = 0x00;
                        outbuf[1] = 0x00;                                                                                               // add sequence end code to have a real mpeg file
                        outbuf[2] = 0x01;
                        outbuf[3] = 0xb7;
                        fwrite(outbuf, 1, 4, f);
    }

    Then close and clean the Image Buffer and file

     ML_TRACE_IN("MovieCreator::_endRecording()")
    if (_numFramesFld->getIntValue() == 0)
    {
       _messageFld->setStringValue("Empty movie, nothing saved.");
    }
    else
    {
       _messageFld->setStringValue("Movie written to disk.");
       _numFramesFld->setIntValue(0);
    if (_hbitmap)
    {
       DeleteObject(_hbitmap);
    }
    if (c != NULL)
    {
          av_free(outbuffer);    
          av_free(inpic);
          av_free(outpic);
          fclose(f);
          avcodec_close(c);                                                                                               // freeing memory
          free(outbuf);
          av_free(c);
    }
    }

    }

    I think the Main Problem is over here !!

                        //out_size = avcodec_encode_video(c, outbuf,outbuf_size, outpic);
  • FFmpeg/libav : YUV420P to RGB conversion

    17 janvier 2014, par learner

    I am working with Video encoding-decoding based on online tutorials. In the encoding section, the dummy image created is in YUV420P format. I need it to be in RGB or BGR format. Any idea how to do this ??

    #include
    #include
    #include

    #ifdef HAVE_AV_CONFIG_H
    #undef HAVE_AV_CONFIG_H
    #endif

    #include "libavcodec/avcodec.h"
    #include "libavutil/mathematics.h"

    #define INBUF_SIZE 4096
    #define AUDIO_INBUF_SIZE 20480
    #define AUDIO_REFILL_THRESH 4096

    /*
    * Video encoding example
    */
    static void video_encode_example(const char *filename)
    {
        AVCodec *codec;
        AVCodecContext *c = NULL;
        int i, out_size, size, x, y, outbuf_size;
        FILE *f;
        AVFrame *picture;
        uint8_t *outbuf, *picture_buf;

        printf("Video encoding\n");

        /* find the mpeg1 video encoder */
        codec = avcodec_find_encoder(CODEC_ID_MPEG1VIDEO);
        if (!codec)
        {
            fprintf(stderr, "codec not found\n");
            exit(1);
        }

        c = avcodec_alloc_context();
        picture = avcodec_alloc_frame();

        /* put sample parameters */
        c->bit_rate = 400000;
        /* resolution must be a multiple of two */
        c->width = 352;
        c->height = 288;
        /* frames per second */
        c->time_base= (AVRational){1,25};
        c->gop_size = 10; /* emit one intra frame every ten frames */
        c->max_b_frames=1;
        c->pix_fmt = PIX_FMT_YUV420P;

        /* open it */
        if (avcodec_open(c, codec) < 0)
        {
            fprintf(stderr, "could not open codec\n");
            exit(1);
        }

        f = fopen(filename, "wb");
        if (!f)
        {
            fprintf(stderr, "could not open file! %s\n", filename);
            exit(1);
        }

        /* alloc image and output buffer */
        outbuf_size = 100000;
        outbuf = malloc(outbuf_size);
        size = c->width * c->height;
        picture_buf = malloc((size * 3) / 2); /* size for YUV 420 */

        picture->data[0] = picture_buf;
        picture->data[1] = picture->data[0] + size;
        picture->data[2] = picture->data[1] + size / 4;
        picture->linesize[0] = c->width;
        picture->linesize[1] = c->width / 2;
        picture->linesize[2] = c->width / 2;

        /* encode 1 second of video */
        for(i=0; i<25; i++)
        {
            fflush(stdout);
            /* prepare a dummy image */
            /* Y */
            for(y=0; y < c->height; y++)
            {
                for(x=0; x < c->width; x++)
                {
                    picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3;
                }
            }

            /* Cb and Cr */
            for(y=0; y < c->height/2; y++)
            {
                for(x=0; x < c->width/2; x++)
                {
                    picture->data[1][y * picture->linesize[1] + x] = 128 + y + i * 2;
                    picture->data[2][y * picture->linesize[2] + x] = 64 + x + i * 5;
                }
            }

            /* encode the image */
            out_size = avcodec_encode_video(c, outbuf, outbuf_size, picture);
            printf("encoding frame %3d (size=%5d)\n", i, out_size);
            fwrite(outbuf, 1, out_size, f);
        }

        /* get the delayed frames */
        for(; out_size; i++)
        {
            fflush(stdout);

            out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
            printf("write frame %3d (size=%5d)\n", i, out_size);
            fwrite(outbuf, 1, out_size, f);
        }

        /* add sequence end code to have a real mpeg file */
        outbuf[0] = 0x00;
        outbuf[1] = 0x00;
        outbuf[2] = 0x01;
        outbuf[3] = 0xb7;
        fwrite(outbuf, 1, 4, f);
        fclose(f);
        free(picture_buf);
        free(outbuf);

        avcodec_close(c);
        av_free(c);
        av_free(picture);
        printf("\n");
    }

    /*
     * Video decoding example
     */

    static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,
                         char *filename)
    {
        FILE *f;
        int i;

        f=fopen(filename,"w");
        fprintf(f,"P5\n%d %d\n%d\n",xsize,ysize,255);
        for(i=0; i* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
        memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);

        printf("Video decoding\n");

        /* find the mpeg1 video decoder */
        codec = avcodec_find_decoder(CODEC_ID_MPEG1VIDEO);
        if (!codec) {
            fprintf(stderr, "codec not found\n");
            exit(1);
        }

        c = avcodec_alloc_context();
        picture = avcodec_alloc_frame();

        if(codec->capabilities&CODEC_CAP_TRUNCATED)
            c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */

        /* For some codecs, such as msmpeg4 and mpeg4, width and height
           MUST be initialized there because this information is not
           available in the bitstream. */

        /* open it */
        if (avcodec_open(c, codec) < 0) {
            fprintf(stderr, "could not open codec\n");
            exit(1);
        }

        /* the codec gives us the frame size, in samples */

        f = fopen(filename, "rb");
        if (!f) {
            fprintf(stderr, "could not open file! %s\n", filename);
            exit(1);
        }

        frame = 0;
        for(;;) {
            avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);
            if (avpkt.size == 0)
                break;

            /* NOTE1: some codecs are stream based (mpegvideo, mpegaudio)
               and this is the only method to use them because you cannot
               know the compressed data size before analysing it.

               BUT some other codecs (msmpeg4, mpeg4) are inherently frame
               based, so you must call them with all the data for one
               frame exactly. You must also initialize 'width' and
               'height' before initializing them. */

            /* NOTE2: some codecs allow the raw parameters (frame size,
               sample rate) to be changed at any frame. We handle this, so
               you should also take care of it */

            /* here, we use a stream based decoder (mpeg1video), so we
               feed decoder and see if it could decode a frame */

            avpkt.data = inbuf;
            while (avpkt.size > 0)
            {
                len = avcodec_decode_video2(c, picture, &got_picture, &avpkt);
                if (len < 0)
                {
                    fprintf(stderr, "Error while decoding frame %d\n", frame);
                    exit(1);
                }
                if (got_picture)
                {
                    printf("saving frame %3d\n", frame);
                    fflush(stdout);

                    /* the picture is allocated by the decoder. no need to
                       free it */
                    snprintf(buf, sizeof(buf), outfilename, frame);
                    pgm_save(picture->data[0], picture->linesize[0],
                             c->width, c->height, buf);
                    frame++;
                }
                avpkt.size -= len;
                avpkt.data += len;
            }
        }

        /* some codecs, such as MPEG, transmit the I and P frame with a
           latency of one frame. You must do the following to have a
           chance to get the last frame of the video */

        avpkt.data = NULL;
        avpkt.size = 0;
        len = avcodec_decode_video2(c, picture, &got_picture, &avpkt);
        if (got_picture)
        {
            printf("saving last frame %3d\n", frame);
            fflush(stdout);

            /* the picture is allocated by the decoder. no need to
               free it */
            snprintf(buf, sizeof(buf), outfilename, frame);
            pgm_save(picture->data[0], picture->linesize[0],
                     c->width, c->height, buf);
            frame++;
        }

        fclose(f);

        avcodec_close(c);
        av_free(c);
        av_free(picture);
        printf("\n");
    }

    int main(int argc, char **argv)
    {
        const char *filename;

        /* must be called before using avcodec lib */
        avcodec_init();

        /* register all the codecs */
        avcodec_register_all();

        if (argc <= 1)
        {
            video_encode_example("asdf.mpg");
            filename = "asdf.mpg";
        }
        else
        {
            filename = argv[1];
        }

        video_decode_example("%d.pgm", filename);

        return 0;
    }

    I tried with PIX_FMT_RGB24 and changing each channel accordingly. For this, I declared another :

    AVFrame *pictureRGB ; and then : pictureRGB = avcodec_alloc_frame() ;

    for(y=0; y < c->height; y++)
    {
        for(x=0; x < c->width; x++)
        {
             pictureRGB ->data[0][y * pictureRGB ->linesize[0] + x] = x + y + i * 3;
             pictureRGB ->data[1][y * pictureRGB ->linesize[1] + x] = x + y + i * 3;
             pictureRGB ->data[2][y * pictureRGB ->linesize[2] + x] = x + y + i * 3;
        }
    }

    But it gives errors ! I am totally new to this library. Is it possible to directly encode as a RGB dummy image rather than as YUV420P. Anybody out there with sound knowledge in this area ??? Thanks in advance !

  • Transcoding to vorbis using FFmpeg libraries, C++

    6 février 2014, par taansari

    I have made a test application to transcode to vorbis format (webm container).

    So far, based on FFmpeg examples, things are somewhat working, and output file plays properly, but sound in right channel is missing. I tried looking at different possibilities, but so far could not find any answer.

    For reference, this is the code I am using :

    #include "stdafx.h"
    #define MAX_AUDIO_PACKET_SIZE (128 * 1024)

    #include <iostream>
    #include <fstream>

    #include <string>
    #include <vector>
    #include <map>

    #include <deque>
    #include <queue>

    #include
    #include
    #include
    #include

    extern "C"
    {
    #include "libavcodec/avcodec.h"
    #include "libavformat/avformat.h"
    #include "libavdevice/avdevice.h"
    #include "libswscale/swscale.h"
    #include "libavutil/dict.h"
    #include "libavutil/error.h"
    #include "libavutil/opt.h"
    #include <libavutil></libavutil>fifo.h>
    #include <libavutil></libavutil>imgutils.h>
    #include <libavutil></libavutil>samplefmt.h>
    #include <libswresample></libswresample>swresample.h>
    }
    AVCodecID           outputAudioFormat = AV_CODEC_ID_VORBIS;


    static int sws_flags = SWS_BICUBIC;
    #define STREAM_DURATION   50.0
    #define STREAM_FRAME_RATE 25 /* 25 images/s */
    #define STREAM_NB_FRAMES  ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
    #define STREAM_PIX_FMT    AV_PIX_FMT_YUV420P /* default pix_fmt */


    AVFormatContext*    fmt_ctx= NULL;
    int                    audio_stream_index = -1;
    AVCodecContext *    codec_ctx_audio = NULL;
    AVCodec*            codec_audio = NULL;
    AVFrame*            decoded_frame = NULL;
    uint8_t**            audio_dst_data = NULL;
    int                    got_frame = 0;
    int                    audiobufsize = 0;
    AVPacket            input_packet;
    int                    audio_dst_linesize = 0;
    int                    audio_dst_bufsize = 0;
    SwrContext *        swrContext = NULL;

    AVOutputFormat *    output_format = NULL ;
    AVFormatContext *    output_fmt_ctx= NULL;
    AVStream *            audio_st = NULL;
    AVStream*           video_st = NULL;
    AVCodec *            audio_codec = NULL;
    AVCodec*            video_codec = NULL;
    double                audio_pts = 0.0;
    AVFrame *            out_frame = avcodec_alloc_frame();

    int                    audio_input_frame_size = 64;

    uint8_t *            audio_data_buf = NULL;
    uint8_t *            audio_out = NULL;
    int                    audio_bit_rate;
    int                    audio_sample_rate;
    int                    audio_channels;
    int                 sourceSampleRate=0;
    int                 destSampleRate = 0;

    int                 dst_nb_samples = 0;
    int                 pivotIndex = 0;
    int                 max_dst_nb_samples = 0;
    int                 samples_count=0;


    int decode_packet();
    int open_audio_input(char* src_filename);
    int decode_frame();

    int open_encoder(char* output_filename);
    AVStream *add_audio_stream(AVFormatContext *oc, AVCodec **codec,
       enum AVCodecID codec_id);
    int open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st);
    void close_audio(AVFormatContext *oc, AVStream *st);
    void write_audio_frame(uint8_t ** audio_src_data, int audio_src_bufsize);

    static AVFrame *frame;
    static AVPicture src_picture, dst_picture;
    static int frame_count;
    /* Add an output stream. */
    static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
                               enum AVCodecID codec_id)
    {
       AVCodecContext *c;
       AVStream *st;

       /* find the encoder */
       *codec = avcodec_find_encoder(codec_id);
       if (!(*codec)) {
           fprintf(stderr, "Could not find encoder for &#39;%s&#39;\n",
                   avcodec_get_name(codec_id));
           exit(1);
       }

       st = avformat_new_stream(oc, *codec);
       if (!st) {
           fprintf(stderr, "Could not allocate stream\n");
           exit(1);
       }
       st->id = oc->nb_streams-1;
       c = st->codec;

       switch ((*codec)->type) {
       case AVMEDIA_TYPE_AUDIO:
           c->sample_fmt  = (*codec)->sample_fmts ?
               (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
           c->bit_rate    = 64000;
           c->sample_rate = 44100;
           c->channels    = 2;
           break;

       case AVMEDIA_TYPE_VIDEO:
           c->codec_id = codec_id;

           c->bit_rate = 400000;
           /* Resolution must be a multiple of two. */
           c->width    = 352;
           c->height   = 288;
           /* timebase: This is the fundamental unit of time (in seconds) in terms
            * of which frame timestamps are represented. For fixed-fps content,
            * timebase should be 1/framerate and timestamp increments should be
            * identical to 1. */
           c->time_base.den = STREAM_FRAME_RATE;
           c->time_base.num = 1;
           c->gop_size      = 12; /* emit one intra frame every twelve frames at most */
           c->pix_fmt       = STREAM_PIX_FMT;
           if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
               /* just for testing, we also add B frames */
               c->max_b_frames = 2;
           }
           if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
               /* Needed to avoid using macroblocks in which some coeffs overflow.
                * This does not happen with normal video, it just happens here as
                * the motion of the chroma plane does not match the luma plane. */
               c->mb_decision = 2;
           }
       break;

       default:
           break;
       }

       /* Some formats want stream headers to be separate. */
       if (oc->oformat->flags &amp; AVFMT_GLOBALHEADER)
           c->flags |= CODEC_FLAG_GLOBAL_HEADER;

       return st;
    }


    static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
    {
       int ret;
       AVCodecContext *c = st->codec;

       /* open the codec */
       ret = avcodec_open2(c, codec, NULL);
       if (ret &lt; 0) {
           //fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
           exit(1);
       }

       /* allocate and init a re-usable frame */
       frame = av_frame_alloc();
       if (!frame) {
           fprintf(stderr, "Could not allocate video frame\n");
           exit(1);
       }
       frame->format = c->pix_fmt;
       frame->width = c->width;
       frame->height = c->height;

       /* Allocate the encoded raw picture. */
       ret = avpicture_alloc(&amp;dst_picture, c->pix_fmt, c->width, c->height);
       if (ret &lt; 0) {
           //fprintf(stderr, "Could not allocate picture: %s\n", av_err2str(ret));
           exit(1);
       }

       /* If the output format is not YUV420P, then a temporary YUV420P
        * picture is needed too. It is then converted to the required
        * output format. */
       if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
           ret = avpicture_alloc(&amp;src_picture, AV_PIX_FMT_YUV420P, c->width, c->height);
           if (ret &lt; 0) {
               //fprintf(stderr, "Could not allocate temporary picture: %s\n",
               //        av_err2str(ret));
               exit(1);
           }
       }

       /* copy data and linesize picture pointers to frame */
       *((AVPicture *)frame) = dst_picture;
    }

    int open_audio_input(char* src_filename)
    {
       int i =0;
       /* open input file, and allocate format context */
       if (avformat_open_input(&amp;fmt_ctx, src_filename, NULL, NULL) &lt; 0)
       {
           fprintf(stderr, "Could not open source file %s\n", src_filename);
           exit(1);
       }

       // Retrieve stream information
       if(avformat_find_stream_info(fmt_ctx, NULL)&lt;0)
           return -1; // Couldn&#39;t find stream information

       // Dump information about file onto standard error
       av_dump_format(fmt_ctx, 0, src_filename, 0);

       // Find the first video stream
       for(i=0; inb_streams; i++)
       {
           if(fmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO)
           {
               audio_stream_index=i;
               break;
           }
       }
       if ( audio_stream_index != -1 )
       {
           // Get a pointer to the codec context for the audio stream
           codec_ctx_audio=fmt_ctx->streams[audio_stream_index]->codec;

           // Find the decoder for the video stream
           codec_audio=avcodec_find_decoder(codec_ctx_audio->codec_id);
           if(codec_audio==NULL) {
               fprintf(stderr, "Unsupported audio codec!\n");
               return -1; // Codec not found
           }

           // Open codec
           AVDictionary *codecDictOptions = NULL;
           if(avcodec_open2(codec_ctx_audio, codec_audio, &amp;codecDictOptions)&lt;0)
               return -1; // Could not open codec

           // Set up SWR context once you&#39;ve got codec information
           swrContext = swr_alloc();
           av_opt_set_int(swrContext, "in_channel_layout",  codec_ctx_audio->channel_layout, 0);
           av_opt_set_int(swrContext, "out_channel_layout", codec_ctx_audio->channel_layout,  0);
           av_opt_set_int(swrContext, "in_sample_rate",     codec_ctx_audio->sample_rate, 0);
           av_opt_set_int(swrContext, "out_sample_rate",    codec_ctx_audio->sample_rate, 0);
           av_opt_set_sample_fmt(swrContext, "in_sample_fmt",  codec_ctx_audio->sample_fmt, 0);
           if ( outputAudioFormat == AV_CODEC_ID_VORBIS )
           {
               av_opt_set_sample_fmt(swrContext, "out_sample_fmt", AV_SAMPLE_FMT_FLTP,  0);
           }
           else
           {
               av_opt_set_sample_fmt(swrContext, "out_sample_fmt", AV_SAMPLE_FMT_S16,  0);
           }
           int rv = swr_init(swrContext);

           sourceSampleRate    =   destSampleRate = codec_ctx_audio->sample_rate;

           // Allocate audio frame
           if ( decoded_frame == NULL ) decoded_frame = avcodec_alloc_frame();
           int nb_planes = 0;
           AVStream* audio_stream = fmt_ctx->streams[audio_stream_index];
           nb_planes = av_sample_fmt_is_planar(codec_ctx_audio->sample_fmt) ? codec_ctx_audio->channels : 1;
           int tempSize =  sizeof(uint8_t *) * nb_planes;
           audio_dst_data = (uint8_t**)av_mallocz(tempSize);
           if (!audio_dst_data)
           {
               fprintf(stderr, "Could not allocate audio data buffers\n");
           }
           else
           {
               for ( int i = 0 ; i &lt; nb_planes ; i ++ )
               {
                   audio_dst_data[i] = NULL;
               }
           }
       }
    }

    int decode_frame()
    {
       int rv = 0;
       got_frame = 0;
       if ( fmt_ctx == NULL  )
       {
           return rv;
       }
       int ret = 0;
       audiobufsize = 0;
       rv = av_read_frame(fmt_ctx, &amp;input_packet);
       if ( rv &lt; 0 )
       {
           return rv;
       }
       rv = decode_packet();
       // Free the input_packet that was allocated by av_read_frame
       av_free_packet(&amp;input_packet);
       return rv;
    }

    int decode_packet()
    {
       int rv = 0;
       int ret = 0;

       //audio stream?
       if(input_packet.stream_index == audio_stream_index)
       {
           avcodec_get_frame_defaults(decoded_frame);
           while( input_packet.size > 0 )
           {
               int result = avcodec_decode_audio4(codec_ctx_audio, decoded_frame, &amp;got_frame, &amp;input_packet);
               if ( result &lt; 0)
               {
                   fprintf(stderr, "Error decoding audio frame\n");
                   //return ret;
               }
               else
               {
                   if ( got_frame )
                   {
                       dst_nb_samples = (int)av_rescale_rnd(swr_get_delay(swrContext, sourceSampleRate) + decoded_frame->nb_samples, sourceSampleRate, destSampleRate, AV_ROUND_UP);
                       if ( dst_nb_samples > max_dst_nb_samples )
                       {
                           max_dst_nb_samples = dst_nb_samples;
                           if ( audio_dst_data[0] )
                           {
                               av_freep(&amp;audio_dst_data[0]);
                               audio_dst_data[0] = NULL;
                           }
                       }
                       if ( audio_dst_data[0] == NULL )
                       {
                           if ( outputAudioFormat == AV_CODEC_ID_VORBIS )
                           {
                               ret = av_samples_alloc(audio_dst_data, &amp;audio_dst_linesize, codec_ctx_audio->channels,
                                   decoded_frame->nb_samples, (AVSampleFormat)AV_SAMPLE_FMT_FLTP, 0);
                           }
                           else
                           {
                               ret = av_samples_alloc(audio_dst_data, &amp;audio_dst_linesize, codec_ctx_audio->channels,
                                   decoded_frame->nb_samples, (AVSampleFormat)AV_SAMPLE_FMT_S16, 0);
                           }
                       }
                       /* TODO: extend return code of the av_samples_* functions so that this call is not needed */
                       int resampled  = swr_convert(swrContext, audio_dst_data, out_frame->nb_samples,
                           (const uint8_t **)(decoded_frame->extended_data), decoded_frame->nb_samples);
                       char str[900]="";
                       sprintf(str,"out_frame->nb_samples:\t%d; decoded_frame->nb_samples:\t%d",out_frame->nb_samples,decoded_frame->nb_samples );
                       if ( outputAudioFormat == AV_CODEC_ID_VORBIS )
                       {
                           audio_dst_bufsize  = av_samples_get_buffer_size(&amp;audio_dst_linesize, decoded_frame->channels, resampled, (AVSampleFormat)AV_SAMPLE_FMT_FLTP, 1);
                       }
                       else
                       {
                           audio_dst_bufsize  = av_samples_get_buffer_size(&amp;audio_dst_linesize, decoded_frame->channels, resampled, (AVSampleFormat)AV_SAMPLE_FMT_S16, 1);
                       }

                       input_packet.size -= result;
                       input_packet.data += result;
                   }
                   else
                   {
                       input_packet.size   =   0;
                       input_packet.data   =   NULL;
                   }
               }
           }
       }
       return rv;
    }

    int open_encoder(char* output_filename )
    {
       int rv = 0;

       /* allocate the output media context */
       AVOutputFormat *opfmt = NULL;

       avformat_alloc_output_context2(&amp;output_fmt_ctx, opfmt, NULL, output_filename);
       if (!output_fmt_ctx) {
           printf("Could not deduce output format from file extension: using MPEG.\n");
           avformat_alloc_output_context2(&amp;output_fmt_ctx, NULL, "mpeg", output_filename);
       }
       if (!output_fmt_ctx) {
           rv = -1;
       }
       else
       {
           output_format = output_fmt_ctx->oformat;
       }

       /* Add the audio stream using the default format codecs
       * and initialize the codecs. */
       audio_st = NULL;

       if ( output_fmt_ctx )
       {
           if (output_format->audio_codec != AV_CODEC_ID_NONE)
           {
               audio_st = add_audio_stream(output_fmt_ctx, &amp;audio_codec, output_format->audio_codec);
           }

           /* Now that all the parameters are set, we can open the audio and
           * video codecs and allocate the necessary encode buffers. */

           if (audio_st)
           {
               rv = open_audio(output_fmt_ctx, audio_codec, audio_st);
               if ( rv &lt; 0 ) return rv;
           }

           av_dump_format(output_fmt_ctx, 0, output_filename, 1);
           /* open the output file, if needed */
           if (!(output_format->flags &amp; AVFMT_NOFILE))
           {
               if (avio_open(&amp;output_fmt_ctx->pb, output_filename, AVIO_FLAG_WRITE) &lt; 0) {
                   fprintf(stderr, "Could not open &#39;%s&#39;\n", output_filename);
                   rv = -1;
               }
               else
               {
                   /* Write the stream header, if any. */
                   if (avformat_write_header(output_fmt_ctx, NULL) &lt; 0)
                   {
                       fprintf(stderr, "Error occurred when opening output file\n");
                       rv = -1;
                   }
               }
           }
       }

       return rv;
    }

    AVStream *add_audio_stream(AVFormatContext *oc, AVCodec **codec,
       enum AVCodecID codec_id)
    {
       AVCodecContext *c;
       AVStream *st;

       /* find the audio encoder */
       *codec = avcodec_find_encoder(codec_id);
       if (!(*codec)) {
           fprintf(stderr, "Could not find codec\n");
           exit(1);
       }

       st = avformat_new_stream(oc, *codec);
       if (!st) {
           fprintf(stderr, "Could not allocate stream\n");
           exit(1);
       }
       st->id = 1;

       c = st->codec;

       /* put sample parameters */
       if ( outputAudioFormat == AV_CODEC_ID_VORBIS )
       {
           c->sample_fmt  = AV_SAMPLE_FMT_FLTP;
       }
       else
       {
           c->sample_fmt  = AV_SAMPLE_FMT_S16;
       }

       c->bit_rate    = audio_bit_rate;
       c->sample_rate = audio_sample_rate;
       c->channels    = audio_channels;

       // some formats want stream headers to be separate
       if (oc->oformat->flags &amp; AVFMT_GLOBALHEADER)
           c->flags |= CODEC_FLAG_GLOBAL_HEADER;

       return st;
    }

    int open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
    {
       int ret=0;
       AVCodecContext *c;

       st->duration = fmt_ctx->duration;
       c = st->codec;

       /* open it */
       ret = avcodec_open2(c, codec, NULL) ;
       if ( ret &lt; 0)
       {
           fprintf(stderr, "could not open codec\n");
           return -1;
           //exit(1);
       }

       if (c->codec->capabilities &amp; CODEC_CAP_VARIABLE_FRAME_SIZE)
           audio_input_frame_size = 10000;
       else
           audio_input_frame_size = c->frame_size;
       out_frame->nb_samples = audio_input_frame_size;
       int tempSize = audio_input_frame_size *
           av_get_bytes_per_sample(c->sample_fmt) *
           c->channels;
       return ret;
    }

    void close_audio(AVFormatContext *oc, AVStream *st)
    {
       avcodec_close(st->codec);
    }

    void write_audio_frame(uint8_t ** audio_dst_data, int audio_dst_bufsize)
    {
       AVFormatContext *oc = output_fmt_ctx;
       AVStream *st = audio_st;
       if ( oc == NULL || st == NULL ) return;
       AVCodecContext *c;
       AVPacket pkt = { 0 }; // data and size must be 0;
       int got_packet=0, ret=0;

       av_init_packet(&amp;pkt);
       c = st->codec;

       out_frame->nb_samples = audio_input_frame_size;

       AVRational r;
       r.num = 1;
       r.den = c->sample_rate;
       out_frame->pts = av_rescale_q(samples_count, (AVRational)r, c->time_base);
       avcodec_fill_audio_frame(out_frame, c->channels, c->sample_fmt,
                                audio_dst_data[0], audio_dst_bufsize, 0);
       samples_count += out_frame->nb_samples;

       ret = avcodec_encode_audio2(c, &amp;pkt, out_frame, &amp;got_packet);
       if (ret &lt; 0)
       {
           return;
       }

       if (!got_packet)
           return;

       /* rescale output packet timestamp values from codec to stream timebase */
       pkt.pts = av_rescale_q_rnd(pkt.pts, c->time_base, st->time_base, (AVRounding )(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
       pkt.dts = av_rescale_q_rnd(pkt.dts, c->time_base, st->time_base, (AVRounding )(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
       pkt.duration = av_rescale_q(pkt.duration, c->time_base, st->time_base);
       pkt.stream_index = st->index;

       char str[999]="";
       sprintf(str,"out_frame->nb_samples:\t%d",out_frame->nb_samples);
       /* Write the compressed frame to the media file. */
       ret = av_interleaved_write_frame(oc, &amp;pkt);
       if (ret != 0)
       {
           exit(1);
       }
       av_free_packet(&amp;pkt);
    }

    void write_delayed_frames(AVFormatContext *oc, AVStream *st)
    {
       AVCodecContext *c = st->codec;
       int got_output = 0;
       int ret = 0;
       AVPacket pkt;
       pkt.data = NULL;
       pkt.size = 0;
       av_init_packet(&amp;pkt);
       int i = 0;
       for (got_output = 1; got_output; i++)
       {
           ret = avcodec_encode_audio2(c, &amp;pkt, NULL, &amp;got_output);
           if (ret &lt; 0)
           {
               fprintf(stderr, "error encoding frame\n");
               exit(1);
           }
           static int64_t tempPts = 0;
           static int64_t tempDts = 0;
           /* If size is zero, it means the image was buffered. */
           if (got_output)
           {
               pkt.pts = av_rescale_q_rnd(pkt.pts, c->time_base, st->time_base, (AVRounding )(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
               pkt.dts = av_rescale_q_rnd(pkt.dts, c->time_base, st->time_base, (AVRounding )(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
               pkt.duration = av_rescale_q(pkt.duration, c->time_base, st->time_base);
               pkt.stream_index = st->index;
               if ( c &amp;&amp; c->coded_frame &amp;&amp; c->coded_frame->key_frame)
                   pkt.flags |= AV_PKT_FLAG_KEY;
               /* Write the compressed frame to the media file. */
               ret = av_interleaved_write_frame(oc, &amp;pkt);
           }
           else
           {
               ret = 0;
           }
           av_free_packet(&amp;pkt);
       }
    }

    int main(int argc, char **argv)
    {
       /* register all formats and codecs */
       av_register_all();
       avcodec_register_all();
       avformat_network_init();
       avdevice_register_all();
       int i =0;
       int ret=0;
       char src_filename[90] = "test.mp2";
       char dst_filename[90] = "output.webm";
       outputAudioFormat = AV_CODEC_ID_VORBIS;
       open_audio_input(src_filename);
       if ( codec_ctx_audio->bit_rate == 0 ) codec_ctx_audio->bit_rate = 112000;
       audio_bit_rate        = codec_ctx_audio->bit_rate;
       audio_sample_rate    = codec_ctx_audio->sample_rate;
       audio_channels        = codec_ctx_audio->channels;
       open_encoder( dst_filename );
       int frames= 0;
       while(1)
       {
           int rv = decode_frame();
           if ( rv &lt; 0 )
           {
               break;
           }

           if (audio_st)
           {
               audio_pts = audio_st->pts.val * av_q2d(audio_st->time_base);
           }
           else
           {
               audio_pts = 0.0;
           }
           if ( codec_ctx_audio )
           {
               if ( got_frame )
               {
                   write_audio_frame( audio_dst_data, audio_dst_bufsize );
                   frames++;
               }
           }
           printf("\naudio_pts: %f", audio_pts);
       }
       while(1)
       {
           dst_nb_samples = (int)av_rescale_rnd(swr_get_delay(swrContext, sourceSampleRate) + decoded_frame->nb_samples, sourceSampleRate, destSampleRate, AV_ROUND_UP);
           if ( dst_nb_samples > max_dst_nb_samples )
           {
               max_dst_nb_samples = dst_nb_samples;
               if ( audio_dst_data[0] )
               {
                   av_freep(&amp;audio_dst_data[0]);
                   audio_dst_data[0] = NULL;
               }
           }
           if ( audio_dst_data[0] == NULL )
           {
               if ( outputAudioFormat == AV_CODEC_ID_VORBIS )
               {
                   ret = av_samples_alloc(audio_dst_data, NULL, codec_ctx_audio->channels,
                       decoded_frame->nb_samples, (AVSampleFormat)AV_SAMPLE_FMT_FLTP, 0);
               }
               else
               {
                   ret = av_samples_alloc(audio_dst_data, NULL, codec_ctx_audio->channels,
                       decoded_frame->nb_samples, (AVSampleFormat)AV_SAMPLE_FMT_S16, 0);
               }
           }
           int resampled = swr_convert(swrContext, audio_dst_data, out_frame->nb_samples,NULL, 0);
           if ( outputAudioFormat == AV_CODEC_ID_VORBIS )
           {
               audio_dst_bufsize  = av_samples_get_buffer_size(&amp;audio_dst_linesize, decoded_frame->channels, resampled, (AVSampleFormat)AV_SAMPLE_FMT_FLTP, 1);
           }
           else
           {
               audio_dst_bufsize  = av_samples_get_buffer_size(&amp;audio_dst_linesize, decoded_frame->channels, resampled, (AVSampleFormat)AV_SAMPLE_FMT_S16, 1);
           }
           if ( audio_dst_bufsize &lt;= 0 ) break;
           audio_pts = audio_st->pts.val * av_q2d(audio_st->time_base);
           printf("\naudio_pts: %f", audio_pts);
           write_audio_frame( audio_dst_data, audio_dst_bufsize );
       }
       write_delayed_frames( output_fmt_ctx, audio_st );
       av_write_trailer(output_fmt_ctx);
       close_audio( output_fmt_ctx, audio_st);
       swr_free(&amp;swrContext);
       avcodec_free_frame(&amp;out_frame);
       getch();
       return 0;
    }
    </queue></deque></map></vector></string></fstream></iostream>

    Working under Windows 7, Zeranoe FFmpeg 32 bit build :

    libavutil      52. 62.100 / 52. 62.100
    libavcodec     55. 47.101 / 55. 47.101
    libavformat    55. 22.103 / 55. 22.103
    libavdevice    55.  5.102 / 55.  5.102
    libavfilter     4.  1.100 /  4.  1.100
    libswscale      2.  5.101 /  2.  5.101
    libswresample   0. 17.104 /  0. 17.104
    libpostproc    52.  3.100 / 52.  3.100

    Could anyone point to the place where I might be misunderstanding things ?

    Thanks for any guidance in advance !