Recherche avancée

Médias (91)

Autres articles (73)

  • Websites made ​​with MediaSPIP

    2 mai 2011, par

    This page lists some websites based on MediaSPIP.

  • Personnaliser en ajoutant son logo, sa bannière ou son image de fond

    5 septembre 2013, par

    Certains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;

  • Creating farms of unique websites

    13 avril 2011, par

    MediaSPIP platforms can be installed as a farm, with a single "core" hosted on a dedicated server and used by multiple websites.
    This allows (among other things) : implementation costs to be shared between several different projects / individuals rapid deployment of multiple unique sites creation of groups of like-minded sites, making it possible to browse media in a more controlled and selective environment than the major "open" (...)

Sur d’autres sites (11250)

  • avformat/mxfenc : set/force channelcount in MXF D-10

    25 juin 2014, par Gaullier Nicolas
    avformat/mxfenc : set/force channelcount in MXF D-10
    

    There are interoperability issues with D-10 related to the channelcount property in the generic sound essence descriptor.

    On one side, SMPTE 386M requires channel count to be 4 or 8, other values being prohibited.
    The most widespread value is 8, which seems straightforward as it is the actual size of the allocated structure/disk space.
    At the end, it appears that some vendors or workflows do require this descriptor to be 8, and otherwise just "fail".

    On the other side, at least AVID and ffmpeg do write/set the channel count to the exact number of channels really "used",
    usually 2 or 4, or any other value. And on the decoding side, ffmpeg (for example) make use of the channel count for probing
    and only expose this limited number of audio streams
    (which make sense but has strong impact on ffmpeg command line usage, output, and downstream workflow).

    At the end, I find it pretty usefull to simply give ffmpeg the ability to force/set the channel count to any value the user wants.
    (there are turnaround using complex filters, pans, amerge etc., but it is quite boring and requires the command line to be adapted to the input file properties)

    Reviewed-by : Matthieu Bouron <matthieu.bouron@gmail.com>
    Signed-off-by : Michael Niedermayer <michaelni@gmx.at>

    • [DH] libavformat/mxfenc.c
  • How to fix ffmpeg's offical tutorials03 bug that sound does't work well ?

    31 janvier 2019, par xiaodai

    I want to learn to make a player with ffmpeg and sdl. The tutorial I used is this.[http://dranger.com/ffmpeg/tutorial03.html] Though I have resampled the audio from decode stream, the sound still plays with loud noise.

    I have no ideas to fix it anymore.

    I used the following :

    • the latest ffmpeg and sdl1
    • Visual Studio 2010
    // tutorial03.c
    // A pedagogical video player that will stream through every video frame as fast as it can
    // and play audio (out of sync).
    //
    // This tutorial was written by Stephen Dranger (dranger@gmail.com).
    //
    // Code based on FFplay, Copyright (c) 2003 Fabrice Bellard,
    // and a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de)
    // Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1
    //
    // Use the Makefile to build all examples.
    //
    // Run using
    // tutorial03 myvideofile.mpg
    //
    // to play the stream on your screen.

    extern "C"{
    #include <libavcodec></libavcodec>avcodec.h>
    #include <libavformat></libavformat>avformat.h>
    #include <libswscale></libswscale>swscale.h>
    #include <libavutil></libavutil>channel_layout.h>
    #include <libavutil></libavutil>common.h>
    #include <libavutil></libavutil>frame.h>
    #include <libavutil></libavutil>samplefmt.h>
    #include "libswresample/swresample.h"

    #include <sdl></sdl>SDL.h>
    #include <sdl></sdl>SDL_thread.h>
    };
    #ifdef __WIN32__
    #undef main /* Prevents SDL from overriding main() */
    #endif

    #include

    #define SDL_AUDIO_BUFFER_SIZE 1024
    #define MAX_AUDIO_FRAME_SIZE 192000

    struct SwrContext *audio_swrCtx;
    FILE *pFile=fopen("output.pcm", "wb");
    FILE *pFile_stream=fopen("output_stream.pcm","wb");
    int audio_len;
    typedef struct PacketQueue {
       AVPacketList *first_pkt, *last_pkt;
       int nb_packets;
       int size;
       SDL_mutex *mutex;
       SDL_cond *cond;
    } PacketQueue;

    PacketQueue audioq;

    int quit = 0;

    void packet_queue_init(PacketQueue *q) {
       memset(q, 0, sizeof(PacketQueue));
       q->mutex = SDL_CreateMutex();
       q->cond = SDL_CreateCond();
    }

    int packet_queue_put(PacketQueue *q, AVPacket *pkt) {

       AVPacketList *pkt1;

       if(av_dup_packet(pkt) &lt; 0) {
           return -1;
       }

       pkt1 = (AVPacketList *)av_malloc(sizeof(AVPacketList));

       if(!pkt1) {
           return -1;
       }

       pkt1->pkt = *pkt;
       pkt1->next = NULL;


       SDL_LockMutex(q->mutex);

       if(!q->last_pkt) {
           q->first_pkt = pkt1;
       }

       else {
           q->last_pkt->next = pkt1;
       }

       q->last_pkt = pkt1;
       q->nb_packets++;
       q->size += pkt1->pkt.size;
       SDL_CondSignal(q->cond);

       SDL_UnlockMutex(q->mutex);
       return 0;
    }

    static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) {
       AVPacketList *pkt1;
       int ret;

       SDL_LockMutex(q->mutex);

       for(;;) {

           if(quit) {
               ret = -1;
               break;
           }

           pkt1 = q->first_pkt;

           if(pkt1) {
               q->first_pkt = pkt1->next;

               if(!q->first_pkt) {
                   q->last_pkt = NULL;
               }

               q->nb_packets--;
               q->size -= pkt1->pkt.size;
               *pkt = pkt1->pkt;
               av_free(pkt1);
               ret = 1;
               break;

           } else if(!block) {
               ret = 0;
               break;

           } else {
               SDL_CondWait(q->cond, q->mutex);
           }
       }

       SDL_UnlockMutex(q->mutex);
       return ret;
    }

    int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) {


        static AVPacket pkt;
        static uint8_t *audio_pkt_data = NULL;
        static int audio_pkt_size = 0;
        static AVFrame frame;

        int len1, data_size = 0;

        for(;;) {
            while(audio_pkt_size > 0) {
                int got_frame = 0;
                len1 = avcodec_decode_audio4(aCodecCtx, &amp;frame, &amp;got_frame, &amp;pkt);

                if(len1 &lt; 0) {
                    /* if error, skip frame */
                    audio_pkt_size = 0;
                    break;
                }
                audio_pkt_data += len1;
                audio_pkt_size -= len1;
                data_size = 0;
                /*

                au_convert_ctx = swr_alloc();
                au_convert_ctx=swr_alloc_set_opts(au_convert_ctx,out_channel_layout, out_sample_fmt, out_sample_rate,
                in_channel_layout,pCodecCtx->sample_fmt , pCodecCtx->sample_rate,0, NULL);
                swr_init(au_convert_ctx);

                swr_convert(au_convert_ctx,&amp;out_buffer, MAX_AUDIO_FRAME_SIZE,(const uint8_t **)pFrame->data , pFrame->nb_samples);


                */
                if( got_frame ) {
                    audio_swrCtx=swr_alloc();
                    audio_swrCtx=swr_alloc_set_opts(audio_swrCtx,  // we're allocating a new context
                        AV_CH_LAYOUT_STEREO,//AV_CH_LAYOUT_STEREO,     // out_ch_layout
                        AV_SAMPLE_FMT_S16,         // out_sample_fmt
                        44100, // out_sample_rate
                        aCodecCtx->channel_layout, // in_ch_layout
                        aCodecCtx->sample_fmt,     // in_sample_fmt
                        aCodecCtx->sample_rate,    // in_sample_rate
                        0,                         // log_offset
                        NULL);                     // log_ctx
                    int ret=swr_init(audio_swrCtx);
                    int out_samples = av_rescale_rnd(swr_get_delay(audio_swrCtx, aCodecCtx->sample_rate) + 1024, 44100, aCodecCtx->sample_rate, AV_ROUND_UP);
                    ret=swr_convert(audio_swrCtx,&amp;audio_buf, MAX_AUDIO_FRAME_SIZE,(const uint8_t **)frame.data ,frame.nb_samples);
                    data_size =
                        av_samples_get_buffer_size
                        (
                        &amp;data_size,
                        av_get_channel_layout_nb_channels(AV_CH_LAYOUT_STEREO),
                        ret,
                        AV_SAMPLE_FMT_S16,
                        1
                        );
                     fwrite(audio_buf, 1, data_size, pFile);
                    //memcpy(audio_buf, frame.data[0], data_size);
                    swr_free(&amp;audio_swrCtx);
                }

                if(data_size &lt;= 0) {
                    /* No data yet, get more frames */
                    continue;
                }

                /* We have data, return it and come back for more later */
                return data_size;
            }

            if(pkt.data) {
                av_free_packet(&amp;pkt);
            }

            if(quit) {
                return -1;
            }

            if(packet_queue_get(&amp;audioq, &amp;pkt, 1) &lt; 0) {
                return -1;
            }

            audio_pkt_data = pkt.data;
            audio_pkt_size = pkt.size;
        }
    }



    void audio_callback(void *userdata, Uint8 *stream, int len) {

       AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
       int /*audio_len,*/ audio_size;

       static uint8_t audio_buf[(MAX_AUDIO_FRAME_SIZE * 3) / 2];
       static unsigned int audio_buf_size = 0;
       static unsigned int audio_buf_index = 0;

       //SDL_memset(stream, 0, len);
       while(len > 0) {

           if(audio_buf_index >= audio_buf_size) {
               /* We have already sent all our data; get more */
               audio_size = audio_decode_frame(aCodecCtx, audio_buf, audio_buf_size);

               if(audio_size &lt; 0) {
                   /* If error, output silence */
                   audio_buf_size = 1024; // arbitrary?
                   memset(audio_buf, 0, audio_buf_size);

               } else {
                   audio_buf_size = audio_size;
               }

               audio_buf_index = 0;
           }

           audio_len = audio_buf_size - audio_buf_index;

           if(audio_len > len) {
               audio_len = len;
           }

           memcpy(stream, (uint8_t *)audio_buf , audio_len);
           //SDL_MixAudio(stream,(uint8_t*)audio_buf,audio_len,SDL_MIX_MAXVOLUME);
           fwrite(audio_buf, 1, audio_len, pFile_stream);
           len -= audio_len;
           stream += audio_len;
           audio_buf_index += audio_len;
           audio_len=len;
       }
    }

    int main(int argc, char *argv[]) {
       AVFormatContext *pFormatCtx = NULL;
       int             i, videoStream, audioStream;
       AVCodecContext  *pCodecCtx = NULL;
       AVCodec         *pCodec = NULL;
       AVFrame         *pFrame = NULL;
       AVPacket        packet;
       int             frameFinished;

       //float           aspect_ratio;

       AVCodecContext  *aCodecCtx = NULL;
       AVCodec         *aCodec = NULL;

       SDL_Overlay     *bmp = NULL;
       SDL_Surface     *screen = NULL;
       SDL_Rect        rect;
       SDL_Event       event;
       SDL_AudioSpec   wanted_spec, spec;

       struct SwsContext   *sws_ctx            = NULL;
       AVDictionary        *videoOptionsDict   = NULL;
       AVDictionary        *audioOptionsDict   = NULL;

       if(argc &lt; 2) {
               fprintf(stderr, "Usage: test <file>\n");
               exit(1);
           }

           // Register all formats and codecs
       av_register_all();

       if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
           fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
           exit(1);
       }

       // Open video file
       if(avformat_open_input(&amp;pFormatCtx, argv[1]/*"file.mov"*/, NULL, NULL) != 0) {
           return -1;    // Couldn't open file
       }

       // Retrieve stream information
       if(avformat_find_stream_info(pFormatCtx, NULL) &lt; 0) {
           return -1;    // Couldn't find stream information
       }

       // Dump information about file onto standard error
       av_dump_format(pFormatCtx, 0, argv[1], 0);

       // Find the first video stream
       videoStream = -1;
       audioStream = -1;

       for(i = 0; i &lt; pFormatCtx->nb_streams; i++) {
           if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO &amp;&amp;
               videoStream &lt; 0) {
                   videoStream = i;
           }

           if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO &amp;&amp;
               audioStream &lt; 0) {
                   audioStream = i;
           }
       }

       if(videoStream == -1) {
           return -1;    // Didn't find a video stream
       }

       if(audioStream == -1) {
           return -1;
       }

       aCodecCtx = pFormatCtx->streams[audioStream]->codec;
       // Set audio settings from codec info
       wanted_spec.freq = 44100;
       wanted_spec.format = AUDIO_S16SYS;
       wanted_spec.channels = av_get_channel_layout_nb_channels(AV_CH_LAYOUT_STEREO);;
       wanted_spec.silence = 0;
       wanted_spec.samples = 1024;
       wanted_spec.callback = audio_callback;
       wanted_spec.userdata = aCodecCtx;

       if(SDL_OpenAudio(&amp;wanted_spec, &amp;spec) &lt; 0) {
           fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
           return -1;
       }


       aCodec = avcodec_find_decoder(aCodecCtx->codec_id);

       if(!aCodec) {
           fprintf(stderr, "Unsupported codec!\n");
           return -1;
       }

       avcodec_open2(aCodecCtx, aCodec, &amp;audioOptionsDict);

       // audio_st = pFormatCtx->streams[index]
       packet_queue_init(&amp;audioq);
       SDL_PauseAudio(0);

       // Get a pointer to the codec context for the video stream
       pCodecCtx = pFormatCtx->streams[videoStream]->codec;

       // Find the decoder for the video stream
       pCodec = avcodec_find_decoder(pCodecCtx->codec_id);

       if(pCodec == NULL) {
           fprintf(stderr, "Unsupported codec!\n");
           return -1; // Codec not found
       }

       // Open codec
       if(avcodec_open2(pCodecCtx, pCodec, &amp;videoOptionsDict) &lt; 0) {
           return -1;    // Could not open codec
       }

       // Allocate video frame
       pFrame = av_frame_alloc();

       // Make a screen to put our video

    #ifndef __DARWIN__
       screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
    #else
       screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0);
    #endif

       if(!screen) {
           fprintf(stderr, "SDL: could not set video mode - exiting\n");
           exit(1);
       }

       // Allocate a place to put our YUV image on that screen
       bmp = SDL_CreateYUVOverlay(pCodecCtx->width,
           pCodecCtx->height,
           SDL_YV12_OVERLAY,
           screen);
       sws_ctx =
           sws_getContext
           (
           pCodecCtx->width,
           pCodecCtx->height,
           pCodecCtx->pix_fmt,
           pCodecCtx->width,
           pCodecCtx->height,
           PIX_FMT_YUV420P,
           SWS_BILINEAR,
           NULL,
           NULL,
           NULL
           );


       // Read frames and save first five frames to disk
       i = 0;

       while(av_read_frame(pFormatCtx, &amp;packet) >= 0) {
           // Is this a packet from the video stream?
           if(packet.stream_index == videoStream) {
               // Decode video frame
               avcodec_decode_video2(pCodecCtx, pFrame, &amp;frameFinished,
                   &amp;packet);

               // Did we get a video frame?
               if(frameFinished) {
                   SDL_LockYUVOverlay(bmp);

                   AVPicture pict;
                   pict.data[0] = bmp->pixels[0];
                   pict.data[1] = bmp->pixels[2];
                   pict.data[2] = bmp->pixels[1];

                   pict.linesize[0] = bmp->pitches[0];
                   pict.linesize[1] = bmp->pitches[2];
                   pict.linesize[2] = bmp->pitches[1];

                   // Convert the image into YUV format that SDL uses
                   sws_scale
                       (
                       sws_ctx,
                       (uint8_t const * const *)pFrame->data,
                       pFrame->linesize,
                       0,
                       pCodecCtx->height,
                       pict.data,
                       pict.linesize
                       );

                   SDL_UnlockYUVOverlay(bmp);

                   rect.x = 0;
                   rect.y = 0;
                   rect.w = pCodecCtx->width;
                   rect.h = pCodecCtx->height;
                   SDL_DisplayYUVOverlay(bmp, &amp;rect);
                   SDL_Delay(40);
                   av_free_packet(&amp;packet);
               }

           } else if(packet.stream_index == audioStream) {
               packet_queue_put(&amp;audioq, &amp;packet);

           } else {
               av_free_packet(&amp;packet);
           }

           // Free the packet that was allocated by av_read_frame
           SDL_PollEvent(&amp;event);

           switch(event.type) {
           case SDL_QUIT:
               quit = 1;
               SDL_Quit();
               exit(0);
               break;

           default:
               break;
           }

       }

       // Free the YUV frame
       av_free(pFrame);
       /*swr_free(&amp;audio_swrCtx);*/
       // Close the codec
       avcodec_close(pCodecCtx);
       fclose(pFile);
       fclose(pFile_stream);
       // Close the video file
       avformat_close_input(&amp;pFormatCtx);

       return 0;
    }
    </file>

    I hope to play normally.

  • How to fix ffmpeg's official tutorials03 bug that sound does't work well ? [on hold]

    31 janvier 2019, par xiaodai

    I want to make a player with ffmpeg and sdl. The tutorial I used is this though I have resampled the audio from decode stream, the sound still plays with loud noise.

    I have no ideas to fix it anymore.

    I used the following :

    • the latest ffmpeg and sdl1
    • Visual Studio 2010
    // tutorial03.c
    // A pedagogical video player that will stream through every video frame as fast as it can
    // and play audio (out of sync).
    //
    // This tutorial was written by Stephen Dranger (dranger@gmail.com).
    //
    // Code based on FFplay, Copyright (c) 2003 Fabrice Bellard,
    // and a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de)
    // Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1
    //
    // Use the Makefile to build all examples.
    //
    // Run using
    // tutorial03 myvideofile.mpg
    //
    // to play the stream on your screen.

    extern "C"{
    #include <libavcodec></libavcodec>avcodec.h>
    #include <libavformat></libavformat>avformat.h>
    #include <libswscale></libswscale>swscale.h>
    #include <libavutil></libavutil>channel_layout.h>
    #include <libavutil></libavutil>common.h>
    #include <libavutil></libavutil>frame.h>
    #include <libavutil></libavutil>samplefmt.h>
    #include "libswresample/swresample.h"

    #include <sdl></sdl>SDL.h>
    #include <sdl></sdl>SDL_thread.h>
    };
    #ifdef __WIN32__
    #undef main /* Prevents SDL from overriding main() */
    #endif

    #include

    #define SDL_AUDIO_BUFFER_SIZE 1024
    #define MAX_AUDIO_FRAME_SIZE 192000

    struct SwrContext *audio_swrCtx;
    FILE *pFile=fopen("output.pcm", "wb");
    FILE *pFile_stream=fopen("output_stream.pcm","wb");
    int audio_len;
    typedef struct PacketQueue {
       AVPacketList *first_pkt, *last_pkt;
       int nb_packets;
       int size;
       SDL_mutex *mutex;
       SDL_cond *cond;
    } PacketQueue;

    PacketQueue audioq;

    int quit = 0;

    void packet_queue_init(PacketQueue *q) {
       memset(q, 0, sizeof(PacketQueue));
       q->mutex = SDL_CreateMutex();
       q->cond = SDL_CreateCond();
    }

    int packet_queue_put(PacketQueue *q, AVPacket *pkt) {

       AVPacketList *pkt1;

       if(av_dup_packet(pkt) &lt; 0) {
           return -1;
       }

       pkt1 = (AVPacketList *)av_malloc(sizeof(AVPacketList));

       if(!pkt1) {
           return -1;
       }

       pkt1->pkt = *pkt;
       pkt1->next = NULL;


       SDL_LockMutex(q->mutex);

       if(!q->last_pkt) {
           q->first_pkt = pkt1;
       }

       else {
           q->last_pkt->next = pkt1;
       }

       q->last_pkt = pkt1;
       q->nb_packets++;
       q->size += pkt1->pkt.size;
       SDL_CondSignal(q->cond);

       SDL_UnlockMutex(q->mutex);
       return 0;
    }

    static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) {
       AVPacketList *pkt1;
       int ret;

       SDL_LockMutex(q->mutex);

       for(;;) {

           if(quit) {
               ret = -1;
               break;
           }

           pkt1 = q->first_pkt;

           if(pkt1) {
               q->first_pkt = pkt1->next;

               if(!q->first_pkt) {
                   q->last_pkt = NULL;
               }

               q->nb_packets--;
               q->size -= pkt1->pkt.size;
               *pkt = pkt1->pkt;
               av_free(pkt1);
               ret = 1;
               break;

           } else if(!block) {
               ret = 0;
               break;

           } else {
               SDL_CondWait(q->cond, q->mutex);
           }
       }

       SDL_UnlockMutex(q->mutex);
       return ret;
    }

    int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) {


        static AVPacket pkt;
        static uint8_t *audio_pkt_data = NULL;
        static int audio_pkt_size = 0;
        static AVFrame frame;

        int len1, data_size = 0;

        for(;;) {
            while(audio_pkt_size > 0) {
                int got_frame = 0;
                len1 = avcodec_decode_audio4(aCodecCtx, &amp;frame, &amp;got_frame, &amp;pkt);

                if(len1 &lt; 0) {
                    /* if error, skip frame */
                    audio_pkt_size = 0;
                    break;
                }
                audio_pkt_data += len1;
                audio_pkt_size -= len1;
                data_size = 0;
                /*

                au_convert_ctx = swr_alloc();
                au_convert_ctx=swr_alloc_set_opts(au_convert_ctx,out_channel_layout, out_sample_fmt, out_sample_rate,
                in_channel_layout,pCodecCtx->sample_fmt , pCodecCtx->sample_rate,0, NULL);
                swr_init(au_convert_ctx);

                swr_convert(au_convert_ctx,&amp;out_buffer, MAX_AUDIO_FRAME_SIZE,(const uint8_t **)pFrame->data , pFrame->nb_samples);


                */
                if( got_frame ) {
                    audio_swrCtx=swr_alloc();
                    audio_swrCtx=swr_alloc_set_opts(audio_swrCtx,  // we're allocating a new context
                        AV_CH_LAYOUT_STEREO,//AV_CH_LAYOUT_STEREO,     // out_ch_layout
                        AV_SAMPLE_FMT_S16,         // out_sample_fmt
                        44100, // out_sample_rate
                        aCodecCtx->channel_layout, // in_ch_layout
                        aCodecCtx->sample_fmt,     // in_sample_fmt
                        aCodecCtx->sample_rate,    // in_sample_rate
                        0,                         // log_offset
                        NULL);                     // log_ctx
                    int ret=swr_init(audio_swrCtx);
                    int out_samples = av_rescale_rnd(swr_get_delay(audio_swrCtx, aCodecCtx->sample_rate) + 1024, 44100, aCodecCtx->sample_rate, AV_ROUND_UP);
                    ret=swr_convert(audio_swrCtx,&amp;audio_buf, MAX_AUDIO_FRAME_SIZE,(const uint8_t **)frame.data ,frame.nb_samples);
                    data_size =
                        av_samples_get_buffer_size
                        (
                        &amp;data_size,
                        av_get_channel_layout_nb_channels(AV_CH_LAYOUT_STEREO),
                        ret,
                        AV_SAMPLE_FMT_S16,
                        1
                        );
                     fwrite(audio_buf, 1, data_size, pFile);
                    //memcpy(audio_buf, frame.data[0], data_size);
                    swr_free(&amp;audio_swrCtx);
                }

                if(data_size &lt;= 0) {
                    /* No data yet, get more frames */
                    continue;
                }

                /* We have data, return it and come back for more later */
                return data_size;
            }

            if(pkt.data) {
                av_free_packet(&amp;pkt);
            }

            if(quit) {
                return -1;
            }

            if(packet_queue_get(&amp;audioq, &amp;pkt, 1) &lt; 0) {
                return -1;
            }

            audio_pkt_data = pkt.data;
            audio_pkt_size = pkt.size;
        }
    }



    void audio_callback(void *userdata, Uint8 *stream, int len) {

       AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
       int /*audio_len,*/ audio_size;

       static uint8_t audio_buf[(MAX_AUDIO_FRAME_SIZE * 3) / 2];
       static unsigned int audio_buf_size = 0;
       static unsigned int audio_buf_index = 0;

       //SDL_memset(stream, 0, len);
       while(len > 0) {

           if(audio_buf_index >= audio_buf_size) {
               /* We have already sent all our data; get more */
               audio_size = audio_decode_frame(aCodecCtx, audio_buf, audio_buf_size);

               if(audio_size &lt; 0) {
                   /* If error, output silence */
                   audio_buf_size = 1024; // arbitrary?
                   memset(audio_buf, 0, audio_buf_size);

               } else {
                   audio_buf_size = audio_size;
               }

               audio_buf_index = 0;
           }

           audio_len = audio_buf_size - audio_buf_index;

           if(audio_len > len) {
               audio_len = len;
           }

           memcpy(stream, (uint8_t *)audio_buf , audio_len);
           //SDL_MixAudio(stream,(uint8_t*)audio_buf,audio_len,SDL_MIX_MAXVOLUME);
           fwrite(audio_buf, 1, audio_len, pFile_stream);
           len -= audio_len;
           stream += audio_len;
           audio_buf_index += audio_len;
           audio_len=len;
       }
    }

    int main(int argc, char *argv[]) {
       AVFormatContext *pFormatCtx = NULL;
       int             i, videoStream, audioStream;
       AVCodecContext  *pCodecCtx = NULL;
       AVCodec         *pCodec = NULL;
       AVFrame         *pFrame = NULL;
       AVPacket        packet;
       int             frameFinished;

       //float           aspect_ratio;

       AVCodecContext  *aCodecCtx = NULL;
       AVCodec         *aCodec = NULL;

       SDL_Overlay     *bmp = NULL;
       SDL_Surface     *screen = NULL;
       SDL_Rect        rect;
       SDL_Event       event;
       SDL_AudioSpec   wanted_spec, spec;

       struct SwsContext   *sws_ctx            = NULL;
       AVDictionary        *videoOptionsDict   = NULL;
       AVDictionary        *audioOptionsDict   = NULL;

       if(argc &lt; 2) {
               fprintf(stderr, "Usage: test <file>\n");
               exit(1);
           }

           // Register all formats and codecs
       av_register_all();

       if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
           fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
           exit(1);
       }

       // Open video file
       if(avformat_open_input(&amp;pFormatCtx, argv[1]/*"file.mov"*/, NULL, NULL) != 0) {
           return -1;    // Couldn't open file
       }

       // Retrieve stream information
       if(avformat_find_stream_info(pFormatCtx, NULL) &lt; 0) {
           return -1;    // Couldn't find stream information
       }

       // Dump information about file onto standard error
       av_dump_format(pFormatCtx, 0, argv[1], 0);

       // Find the first video stream
       videoStream = -1;
       audioStream = -1;

       for(i = 0; i &lt; pFormatCtx->nb_streams; i++) {
           if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO &amp;&amp;
               videoStream &lt; 0) {
                   videoStream = i;
           }

           if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO &amp;&amp;
               audioStream &lt; 0) {
                   audioStream = i;
           }
       }

       if(videoStream == -1) {
           return -1;    // Didn't find a video stream
       }

       if(audioStream == -1) {
           return -1;
       }

       aCodecCtx = pFormatCtx->streams[audioStream]->codec;
       // Set audio settings from codec info
       wanted_spec.freq = 44100;
       wanted_spec.format = AUDIO_S16SYS;
       wanted_spec.channels = av_get_channel_layout_nb_channels(AV_CH_LAYOUT_STEREO);;
       wanted_spec.silence = 0;
       wanted_spec.samples = 1024;
       wanted_spec.callback = audio_callback;
       wanted_spec.userdata = aCodecCtx;

       if(SDL_OpenAudio(&amp;wanted_spec, &amp;spec) &lt; 0) {
           fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
           return -1;
       }


       aCodec = avcodec_find_decoder(aCodecCtx->codec_id);

       if(!aCodec) {
           fprintf(stderr, "Unsupported codec!\n");
           return -1;
       }

       avcodec_open2(aCodecCtx, aCodec, &amp;audioOptionsDict);

       // audio_st = pFormatCtx->streams[index]
       packet_queue_init(&amp;audioq);
       SDL_PauseAudio(0);

       // Get a pointer to the codec context for the video stream
       pCodecCtx = pFormatCtx->streams[videoStream]->codec;

       // Find the decoder for the video stream
       pCodec = avcodec_find_decoder(pCodecCtx->codec_id);

       if(pCodec == NULL) {
           fprintf(stderr, "Unsupported codec!\n");
           return -1; // Codec not found
       }

       // Open codec
       if(avcodec_open2(pCodecCtx, pCodec, &amp;videoOptionsDict) &lt; 0) {
           return -1;    // Could not open codec
       }

       // Allocate video frame
       pFrame = av_frame_alloc();

       // Make a screen to put our video

    #ifndef __DARWIN__
       screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
    #else
       screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0);
    #endif

       if(!screen) {
           fprintf(stderr, "SDL: could not set video mode - exiting\n");
           exit(1);
       }

       // Allocate a place to put our YUV image on that screen
       bmp = SDL_CreateYUVOverlay(pCodecCtx->width,
           pCodecCtx->height,
           SDL_YV12_OVERLAY,
           screen);
       sws_ctx =
           sws_getContext
           (
           pCodecCtx->width,
           pCodecCtx->height,
           pCodecCtx->pix_fmt,
           pCodecCtx->width,
           pCodecCtx->height,
           PIX_FMT_YUV420P,
           SWS_BILINEAR,
           NULL,
           NULL,
           NULL
           );


       // Read frames and save first five frames to disk
       i = 0;

       while(av_read_frame(pFormatCtx, &amp;packet) >= 0) {
           // Is this a packet from the video stream?
           if(packet.stream_index == videoStream) {
               // Decode video frame
               avcodec_decode_video2(pCodecCtx, pFrame, &amp;frameFinished,
                   &amp;packet);

               // Did we get a video frame?
               if(frameFinished) {
                   SDL_LockYUVOverlay(bmp);

                   AVPicture pict;
                   pict.data[0] = bmp->pixels[0];
                   pict.data[1] = bmp->pixels[2];
                   pict.data[2] = bmp->pixels[1];

                   pict.linesize[0] = bmp->pitches[0];
                   pict.linesize[1] = bmp->pitches[2];
                   pict.linesize[2] = bmp->pitches[1];

                   // Convert the image into YUV format that SDL uses
                   sws_scale
                       (
                       sws_ctx,
                       (uint8_t const * const *)pFrame->data,
                       pFrame->linesize,
                       0,
                       pCodecCtx->height,
                       pict.data,
                       pict.linesize
                       );

                   SDL_UnlockYUVOverlay(bmp);

                   rect.x = 0;
                   rect.y = 0;
                   rect.w = pCodecCtx->width;
                   rect.h = pCodecCtx->height;
                   SDL_DisplayYUVOverlay(bmp, &amp;rect);
                   SDL_Delay(40);
                   av_free_packet(&amp;packet);
               }

           } else if(packet.stream_index == audioStream) {
               packet_queue_put(&amp;audioq, &amp;packet);

           } else {
               av_free_packet(&amp;packet);
           }

           // Free the packet that was allocated by av_read_frame
           SDL_PollEvent(&amp;event);

           switch(event.type) {
           case SDL_QUIT:
               quit = 1;
               SDL_Quit();
               exit(0);
               break;

           default:
               break;
           }

       }

       // Free the YUV frame
       av_free(pFrame);
       /*swr_free(&amp;audio_swrCtx);*/
       // Close the codec
       avcodec_close(pCodecCtx);
       fclose(pFile);
       fclose(pFile_stream);
       // Close the video file
       avformat_close_input(&amp;pFormatCtx);

       return 0;
    }
    </file>

    I hope to play normally.