Recherche avancée

Médias (0)

Mot : - Tags -/page unique

Aucun média correspondant à vos critères n’est disponible sur le site.

Autres articles (47)

  • Other interesting software

    13 avril 2011, par

    We don’t claim to be the only ones doing what we do ... and especially not to assert claims to be the best either ... What we do, we just try to do it well and getting better ...
    The following list represents softwares that tend to be more or less as MediaSPIP or that MediaSPIP tries more or less to do the same, whatever ...
    We don’t know them, we didn’t try them, but you can take a peek.
    Videopress
    Website : http://videopress.com/
    License : GNU/GPL v2
    Source code : (...)

  • HTML5 audio and video support

    13 avril 2011, par

    MediaSPIP uses HTML5 video and audio tags to play multimedia files, taking advantage of the latest W3C innovations supported by modern browsers.
    The MediaSPIP player used has been created specifically for MediaSPIP and can be easily adapted to fit in with a specific theme.
    For older browsers the Flowplayer flash fallback is used.
    MediaSPIP allows for media playback on major mobile platforms with the above (...)

  • De l’upload à la vidéo finale [version standalone]

    31 janvier 2010, par

    Le chemin d’un document audio ou vidéo dans SPIPMotion est divisé en trois étapes distinctes.
    Upload et récupération d’informations de la vidéo source
    Dans un premier temps, il est nécessaire de créer un article SPIP et de lui joindre le document vidéo "source".
    Au moment où ce document est joint à l’article, deux actions supplémentaires au comportement normal sont exécutées : La récupération des informations techniques des flux audio et video du fichier ; La génération d’une vignette : extraction d’une (...)

Sur d’autres sites (6046)

  • How to capture sound from microphone with FFmpeg and then stream it in C/C++ ?

    9 octobre 2019, par Emad Kebriaei

    I’m trying to implementing the following line in C++ :

    fmpeg -ac 1 -f alsa -i hw:0,0 -acodec mp2 -ab 32k -ac 1 -re -f rtp rtp://localhost:1234

    The abovementioned line capture audio from the mic device and after some processing, stream it in RTP packet over UDP.
    I tried available resources but could not find such (or similar) implementation in C++.
    This is my code : (It was first developed by Lei Xiaouha to stream .flv file over RTMP. I changed in a way to capture audio from the microphone and store it in receive.mp2.)

    int main(int argc, char* argv[])
    {
       avdevice_register_all();
       av_register_all();
       //Network
       avformat_network_init();

       AVOutputFormat *ofmt = NULL;
       //Input AVFormatContext and Output AVFormatContext
       AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
       AVPacket pkt;
       const char *in_filename, *out_filename;
       int ret, i;
       int videoindex=-1;
       int frame_index=0;
       AVInputFormat *fmt = nullptr;

       out_filename = "./receive.mp2";
       fmt = av_find_input_format("alsa");
       in_filename = "hw:0,0";
       int cnt=0;
       //Input
       if ((ret = avformat_open_input(&ifmt_ctx, in_filename, fmt, nullptr)) < 0) {
           printf( "Could not open input file.");
           goto end;
       }
       if ((ret = avformat_find_stream_info(ifmt_ctx, nullptr)) < 0) {
           printf( "Failed to retrieve input stream information");
           goto end;
       }

       for(i=0; inb_streams; i++)
           if(ifmt_ctx->streams[i]->codecpar->codec_type==AVMEDIA_TYPE_VIDEO){
               videoindex=i;
               break;
           }

       av_dump_format(ifmt_ctx, 0, in_filename, 0);

       //Output
       avformat_alloc_output_context2(&ofmt_ctx, nullptr, "mp2", out_filename); //

       if (!ofmt_ctx) {
           printf( "Could not create output context\n");
           ret = AVERROR_UNKNOWN;
           goto end;
       }
       ofmt = ofmt_ctx->oformat;
       for (i = 0; i < ifmt_ctx->nb_streams; i++) {
           //Create output AVStream according to input AVStream
           AVStream *in_stream = ifmt_ctx->streams[i];
           AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
           if (!out_stream) {
               printf( "Failed allocating output stream\n");
               ret = AVERROR_UNKNOWN;
               goto end;
           }
           //Copy the settings of AVCodecContext
           ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
           if (ret < 0) {
               printf( "Failed to copy context from input to output stream codec context\n");
               goto end;
           }
           out_stream->codec->codec_tag = 0;
           if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
               out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
       }
       //Dump Format------------------
       av_dump_format(ofmt_ctx, 0, out_filename, 1);
       //Open output URL
       if (!(ofmt->flags & AVFMT_NOFILE)) {
           ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
           if (ret < 0) {
               printf( "Could not open output URL '%s'", out_filename);
               goto end;
           }
       }
       //Write file header
       ret = avformat_write_header(ofmt_ctx, nullptr);
       if (ret < 0) {
           printf( "Error occurred when opening output URL\n");
           goto end;
       }

    #if USE_H264BSF
       AVBitStreamFilterContext* h264bsfc =  av_bitstream_filter_init("h264_mp4toannexb");
    #endif

       while (1) {
           cnt++;
           AVStream *in_stream, *out_stream;
           //Get an AVPacket
           ret = av_read_frame(ifmt_ctx, &pkt);
           if (ret < 0)
               break;

           in_stream  = ifmt_ctx->streams[pkt.stream_index];
           out_stream = ofmt_ctx->streams[pkt.stream_index];
           /* copy packet */
           //Convert PTS/DTS
           pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
           pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
           pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
           pkt.pos = -1;
           //Print to Screen
           if(pkt.stream_index==videoindex){
               printf("Receive %8d video frames from input URL\n",frame_index);
               frame_index++;

    #if USE_H264BSF
               av_bitstream_filter_filter(h264bsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
    #endif
           }
           ret = av_write_frame(ofmt_ctx, &pkt);
    //        ret = av_interleaved_write_frame(ofmt_ctx, &pkt);

           if (ret < 0) {
               printf( "Error muxing packet\n");
               break;
           }

           av_free_packet(&pkt);
       if (cnt > 1000)
           break;
       }

    #if USE_H264BSF
       av_bitstream_filter_close(h264bsfc);
    #endif

       //Write file trailer
       printf("suse");
       av_write_trailer(ofmt_ctx);
    end:
       avformat_close_input(&ifmt_ctx);
       /* close output */
       if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
           avio_close(ofmt_ctx->pb);
       avformat_free_context(ofmt_ctx);
       if (ret < 0 && ret != AVERROR_EOF) {
           printf( "Error occurred.\n");
           return -1;
       }
       return 0;
    }

    Can anyone help me with this ?

  • FFMPEG. Read frame, process it, put it to output video. Copy sound stream unchanged

    9 décembre 2016, par Andrey Smorodov

    I want to apply processing to a video clip with sound track, extract and process frame by frame and write result to output file. Number of frames, size of frame and speed remains unchanged in output clip. Also I want to keep the same audio track as I have in source.

    I can read clip, decode frames and process then using opencv. Audio packets are also writes fine. I’m stuck on forming output video stream.

    The minimal runnable code I have for now (sorry it not so short, but cant do it shorter) :

    extern "C" {
    #include <libavutil></libavutil>timestamp.h>
    #include <libavformat></libavformat>avformat.h>
    #include "libavcodec/avcodec.h"
    #include <libavutil></libavutil>opt.h>
    #include <libavdevice></libavdevice>avdevice.h>
    #include <libswscale></libswscale>swscale.h>
    }
    #include "opencv2/opencv.hpp"

    #if LIBAVCODEC_VERSION_INT &lt; AV_VERSION_INT(55,28,1)
    #define av_frame_alloc  avcodec_alloc_frame
    #endif

    using namespace std;
    using namespace cv;

    static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
    {
       AVRational *time_base = &amp;fmt_ctx->streams[pkt->stream_index]->time_base;

       char buf1[AV_TS_MAX_STRING_SIZE] = { 0 };
       av_ts_make_string(buf1, pkt->pts);
       char buf2[AV_TS_MAX_STRING_SIZE] = { 0 };
       av_ts_make_string(buf1, pkt->dts);
       char buf3[AV_TS_MAX_STRING_SIZE] = { 0 };
       av_ts_make_string(buf1, pkt->duration);

       char buf4[AV_TS_MAX_STRING_SIZE] = { 0 };
       av_ts_make_time_string(buf1, pkt->pts, time_base);
       char buf5[AV_TS_MAX_STRING_SIZE] = { 0 };
       av_ts_make_time_string(buf1, pkt->dts, time_base);
       char buf6[AV_TS_MAX_STRING_SIZE] = { 0 };
       av_ts_make_time_string(buf1, pkt->duration, time_base);

       printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
           buf1, buf4,
           buf2, buf5,
           buf3, buf6,
           pkt->stream_index);

    }


    int main(int argc, char **argv)
    {
       AVOutputFormat *ofmt = NULL;
       AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
       AVPacket pkt;
       AVFrame *pFrame = NULL;
       AVFrame *pFrameRGB = NULL;
       int frameFinished = 0;
       pFrame = av_frame_alloc();
       pFrameRGB = av_frame_alloc();

       const char *in_filename, *out_filename;
       int ret, i;
       in_filename = "../../TestClips/Audio Video Sync Test.mp4";
       out_filename = "out.mp4";

       // Initialize FFMPEG
       av_register_all();
       // Get input file format context
       if ((ret = avformat_open_input(&amp;ifmt_ctx, in_filename, 0, 0)) &lt; 0)
       {
           fprintf(stderr, "Could not open input file '%s'", in_filename);
           goto end;
       }
       // Extract streams description
       if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) &lt; 0)
       {
           fprintf(stderr, "Failed to retrieve input stream information");
           goto end;
       }
       // Print detailed information about the input or output format,
       // such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
       av_dump_format(ifmt_ctx, 0, in_filename, 0);

       // Allocate an AVFormatContext for an output format.
       avformat_alloc_output_context2(&amp;ofmt_ctx, NULL, NULL, out_filename);
       if (!ofmt_ctx)
       {
           fprintf(stderr, "Could not create output context\n");
           ret = AVERROR_UNKNOWN;
           goto end;
       }

       // The output container format.
       ofmt = ofmt_ctx->oformat;

       // Allocating output streams
       for (i = 0; i &lt; ifmt_ctx->nb_streams; i++)
       {
           AVStream *in_stream = ifmt_ctx->streams[i];
           AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
           if (!out_stream)
           {
               fprintf(stderr, "Failed allocating output stream\n");
               ret = AVERROR_UNKNOWN;
               goto end;
           }
           ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
           if (ret &lt; 0)
           {
               fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
               goto end;
           }
           out_stream->codec->codec_tag = 0;
           if (ofmt_ctx->oformat->flags &amp; AVFMT_GLOBALHEADER)
           {
               out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
           }
       }

       // Show output format info
       av_dump_format(ofmt_ctx, 0, out_filename, 1);

       // Open output file
       if (!(ofmt->flags &amp; AVFMT_NOFILE))
       {
           ret = avio_open(&amp;ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
           if (ret &lt; 0)
           {
               fprintf(stderr, "Could not open output file '%s'", out_filename);
               goto end;
           }
       }
       // Write output file header
       ret = avformat_write_header(ofmt_ctx, NULL);
       if (ret &lt; 0)
       {
           fprintf(stderr, "Error occurred when opening output file\n");
           goto end;
       }

       // Search for input video codec info
       AVCodec *in_codec = nullptr;
       AVCodecContext* avctx = nullptr;

       int video_stream_index = -1;
       for (int i = 0; i &lt; ifmt_ctx->nb_streams; i++)
       {
           if (ifmt_ctx->streams[i]->codec->coder_type == AVMEDIA_TYPE_VIDEO)
           {
               video_stream_index = i;
               avctx = ifmt_ctx->streams[i]->codec;
               in_codec = avcodec_find_decoder(avctx->codec_id);
               if (!in_codec)
               {
                   fprintf(stderr, "in codec not found\n");
                   exit(1);
               }
               break;
           }
       }

       // Search for output video codec info
       AVCodec *out_codec = nullptr;
       AVCodecContext* o_avctx = nullptr;

       int o_video_stream_index = -1;
       for (int i = 0; i &lt; ofmt_ctx->nb_streams; i++)
       {
           if (ofmt_ctx->streams[i]->codec->coder_type == AVMEDIA_TYPE_VIDEO)
           {
               o_video_stream_index = i;
               o_avctx = ofmt_ctx->streams[i]->codec;
               out_codec = avcodec_find_encoder(o_avctx->codec_id);
               if (!out_codec)
               {
                   fprintf(stderr, "out codec not found\n");
                   exit(1);
               }
               break;
           }
       }

       // openCV pixel format
       AVPixelFormat pFormat = AV_PIX_FMT_RGB24;
       // Data size
       int numBytes = avpicture_get_size(pFormat, avctx->width, avctx->height);
       // allocate buffer
       uint8_t *buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));
       // fill frame structure
       avpicture_fill((AVPicture *)pFrameRGB, buffer, pFormat, avctx->width, avctx->height);
       // frame area
       int y_size = avctx->width * avctx->height;
       // Open input codec
       avcodec_open2(avctx, in_codec, NULL);
       // Main loop
       while (1)
       {
           AVStream *in_stream, *out_stream;
           ret = av_read_frame(ifmt_ctx, &amp;pkt);
           if (ret &lt; 0)
           {
               break;
           }
           in_stream = ifmt_ctx->streams[pkt.stream_index];
           out_stream = ofmt_ctx->streams[pkt.stream_index];
           log_packet(ifmt_ctx, &amp;pkt, "in");
           // copy packet
           pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
           pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
           pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
           pkt.pos = -1;

           log_packet(ofmt_ctx, &amp;pkt, "out");
           if (pkt.stream_index == video_stream_index)
           {
               avcodec_decode_video2(avctx, pFrame, &amp;frameFinished, &amp;pkt);
               if (frameFinished)
               {
                   struct SwsContext *img_convert_ctx;
                   img_convert_ctx = sws_getCachedContext(NULL,
                       avctx->width,
                       avctx->height,
                       avctx->pix_fmt,
                       avctx->width,
                       avctx->height,
                       AV_PIX_FMT_BGR24,
                       SWS_BICUBIC,
                       NULL,
                       NULL,
                       NULL);
                   sws_scale(img_convert_ctx,
                       ((AVPicture*)pFrame)->data,
                       ((AVPicture*)pFrame)->linesize,
                       0,
                       avctx->height,
                       ((AVPicture *)pFrameRGB)->data,
                       ((AVPicture *)pFrameRGB)->linesize);

                   sws_freeContext(img_convert_ctx);

                   // Do some image processing
                   cv::Mat img(pFrame->height, pFrame->width, CV_8UC3, pFrameRGB->data[0],false);
                   cv::GaussianBlur(img,img,Size(5,5),3);
                   cv::imshow("Display", img);
                   cv::waitKey(5);
                   // --------------------------------
                   // Transform back to initial format
                   // --------------------------------
                   img_convert_ctx = sws_getCachedContext(NULL,
                       avctx->width,
                       avctx->height,
                       AV_PIX_FMT_BGR24,
                       avctx->width,
                       avctx->height,
                       avctx->pix_fmt,
                       SWS_BICUBIC,
                       NULL,
                       NULL,
                       NULL);
                   sws_scale(img_convert_ctx,
                       ((AVPicture*)pFrameRGB)->data,
                       ((AVPicture*)pFrameRGB)->linesize,
                       0,
                       avctx->height,
                       ((AVPicture *)pFrame)->data,
                       ((AVPicture *)pFrame)->linesize);
                       // --------------------------------------------
                       // Something must be here
                       // --------------------------------------------
                       //
                       // Write fideo frame (How to write frame to output stream ?)
                       //
                       // --------------------------------------------
                        sws_freeContext(img_convert_ctx);
               }

           }
           else // write sound frame
           {
               ret = av_interleaved_write_frame(ofmt_ctx, &amp;pkt);
           }
           if (ret &lt; 0)
           {
               fprintf(stderr, "Error muxing packet\n");
               break;
           }
           // Decrease packet ref counter
           av_packet_unref(&amp;pkt);
       }
       av_write_trailer(ofmt_ctx);
    end:
       avformat_close_input(&amp;ifmt_ctx);
       // close output
       if (ofmt_ctx &amp;&amp; !(ofmt->flags &amp; AVFMT_NOFILE))
       {
           avio_closep(&amp;ofmt_ctx->pb);
       }
       avformat_free_context(ofmt_ctx);
       if (ret &lt; 0 &amp;&amp; ret != AVERROR_EOF)
       {
           char buf_err[AV_ERROR_MAX_STRING_SIZE] = { 0 };
           av_make_error_string(buf_err, AV_ERROR_MAX_STRING_SIZE, ret);
           fprintf(stderr, "Error occurred: %s\n", buf_err);
           return 1;
       }

       avcodec_close(avctx);
       av_free(pFrame);
       av_free(pFrameRGB);

       return 0;
    }
  • How to fix ffmpeg's official tutorials03 bug that sound does't work well ? [on hold]

    31 janvier 2019, par xiaodai

    I want to make a player with ffmpeg and sdl. The tutorial I used is this though I have resampled the audio from decode stream, the sound still plays with loud noise.

    I have no ideas to fix it anymore.

    I used the following :

    • the latest ffmpeg and sdl1
    • Visual Studio 2010
    // tutorial03.c
    // A pedagogical video player that will stream through every video frame as fast as it can
    // and play audio (out of sync).
    //
    // This tutorial was written by Stephen Dranger (dranger@gmail.com).
    //
    // Code based on FFplay, Copyright (c) 2003 Fabrice Bellard,
    // and a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de)
    // Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1
    //
    // Use the Makefile to build all examples.
    //
    // Run using
    // tutorial03 myvideofile.mpg
    //
    // to play the stream on your screen.

    extern "C"{
    #include <libavcodec></libavcodec>avcodec.h>
    #include <libavformat></libavformat>avformat.h>
    #include <libswscale></libswscale>swscale.h>
    #include <libavutil></libavutil>channel_layout.h>
    #include <libavutil></libavutil>common.h>
    #include <libavutil></libavutil>frame.h>
    #include <libavutil></libavutil>samplefmt.h>
    #include "libswresample/swresample.h"

    #include <sdl></sdl>SDL.h>
    #include <sdl></sdl>SDL_thread.h>
    };
    #ifdef __WIN32__
    #undef main /* Prevents SDL from overriding main() */
    #endif

    #include

    #define SDL_AUDIO_BUFFER_SIZE 1024
    #define MAX_AUDIO_FRAME_SIZE 192000

    struct SwrContext *audio_swrCtx;
    FILE *pFile=fopen("output.pcm", "wb");
    FILE *pFile_stream=fopen("output_stream.pcm","wb");
    int audio_len;
    typedef struct PacketQueue {
       AVPacketList *first_pkt, *last_pkt;
       int nb_packets;
       int size;
       SDL_mutex *mutex;
       SDL_cond *cond;
    } PacketQueue;

    PacketQueue audioq;

    int quit = 0;

    void packet_queue_init(PacketQueue *q) {
       memset(q, 0, sizeof(PacketQueue));
       q->mutex = SDL_CreateMutex();
       q->cond = SDL_CreateCond();
    }

    int packet_queue_put(PacketQueue *q, AVPacket *pkt) {

       AVPacketList *pkt1;

       if(av_dup_packet(pkt) &lt; 0) {
           return -1;
       }

       pkt1 = (AVPacketList *)av_malloc(sizeof(AVPacketList));

       if(!pkt1) {
           return -1;
       }

       pkt1->pkt = *pkt;
       pkt1->next = NULL;


       SDL_LockMutex(q->mutex);

       if(!q->last_pkt) {
           q->first_pkt = pkt1;
       }

       else {
           q->last_pkt->next = pkt1;
       }

       q->last_pkt = pkt1;
       q->nb_packets++;
       q->size += pkt1->pkt.size;
       SDL_CondSignal(q->cond);

       SDL_UnlockMutex(q->mutex);
       return 0;
    }

    static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) {
       AVPacketList *pkt1;
       int ret;

       SDL_LockMutex(q->mutex);

       for(;;) {

           if(quit) {
               ret = -1;
               break;
           }

           pkt1 = q->first_pkt;

           if(pkt1) {
               q->first_pkt = pkt1->next;

               if(!q->first_pkt) {
                   q->last_pkt = NULL;
               }

               q->nb_packets--;
               q->size -= pkt1->pkt.size;
               *pkt = pkt1->pkt;
               av_free(pkt1);
               ret = 1;
               break;

           } else if(!block) {
               ret = 0;
               break;

           } else {
               SDL_CondWait(q->cond, q->mutex);
           }
       }

       SDL_UnlockMutex(q->mutex);
       return ret;
    }

    int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) {


        static AVPacket pkt;
        static uint8_t *audio_pkt_data = NULL;
        static int audio_pkt_size = 0;
        static AVFrame frame;

        int len1, data_size = 0;

        for(;;) {
            while(audio_pkt_size > 0) {
                int got_frame = 0;
                len1 = avcodec_decode_audio4(aCodecCtx, &amp;frame, &amp;got_frame, &amp;pkt);

                if(len1 &lt; 0) {
                    /* if error, skip frame */
                    audio_pkt_size = 0;
                    break;
                }
                audio_pkt_data += len1;
                audio_pkt_size -= len1;
                data_size = 0;
                /*

                au_convert_ctx = swr_alloc();
                au_convert_ctx=swr_alloc_set_opts(au_convert_ctx,out_channel_layout, out_sample_fmt, out_sample_rate,
                in_channel_layout,pCodecCtx->sample_fmt , pCodecCtx->sample_rate,0, NULL);
                swr_init(au_convert_ctx);

                swr_convert(au_convert_ctx,&amp;out_buffer, MAX_AUDIO_FRAME_SIZE,(const uint8_t **)pFrame->data , pFrame->nb_samples);


                */
                if( got_frame ) {
                    audio_swrCtx=swr_alloc();
                    audio_swrCtx=swr_alloc_set_opts(audio_swrCtx,  // we're allocating a new context
                        AV_CH_LAYOUT_STEREO,//AV_CH_LAYOUT_STEREO,     // out_ch_layout
                        AV_SAMPLE_FMT_S16,         // out_sample_fmt
                        44100, // out_sample_rate
                        aCodecCtx->channel_layout, // in_ch_layout
                        aCodecCtx->sample_fmt,     // in_sample_fmt
                        aCodecCtx->sample_rate,    // in_sample_rate
                        0,                         // log_offset
                        NULL);                     // log_ctx
                    int ret=swr_init(audio_swrCtx);
                    int out_samples = av_rescale_rnd(swr_get_delay(audio_swrCtx, aCodecCtx->sample_rate) + 1024, 44100, aCodecCtx->sample_rate, AV_ROUND_UP);
                    ret=swr_convert(audio_swrCtx,&amp;audio_buf, MAX_AUDIO_FRAME_SIZE,(const uint8_t **)frame.data ,frame.nb_samples);
                    data_size =
                        av_samples_get_buffer_size
                        (
                        &amp;data_size,
                        av_get_channel_layout_nb_channels(AV_CH_LAYOUT_STEREO),
                        ret,
                        AV_SAMPLE_FMT_S16,
                        1
                        );
                     fwrite(audio_buf, 1, data_size, pFile);
                    //memcpy(audio_buf, frame.data[0], data_size);
                    swr_free(&amp;audio_swrCtx);
                }

                if(data_size &lt;= 0) {
                    /* No data yet, get more frames */
                    continue;
                }

                /* We have data, return it and come back for more later */
                return data_size;
            }

            if(pkt.data) {
                av_free_packet(&amp;pkt);
            }

            if(quit) {
                return -1;
            }

            if(packet_queue_get(&amp;audioq, &amp;pkt, 1) &lt; 0) {
                return -1;
            }

            audio_pkt_data = pkt.data;
            audio_pkt_size = pkt.size;
        }
    }



    void audio_callback(void *userdata, Uint8 *stream, int len) {

       AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
       int /*audio_len,*/ audio_size;

       static uint8_t audio_buf[(MAX_AUDIO_FRAME_SIZE * 3) / 2];
       static unsigned int audio_buf_size = 0;
       static unsigned int audio_buf_index = 0;

       //SDL_memset(stream, 0, len);
       while(len > 0) {

           if(audio_buf_index >= audio_buf_size) {
               /* We have already sent all our data; get more */
               audio_size = audio_decode_frame(aCodecCtx, audio_buf, audio_buf_size);

               if(audio_size &lt; 0) {
                   /* If error, output silence */
                   audio_buf_size = 1024; // arbitrary?
                   memset(audio_buf, 0, audio_buf_size);

               } else {
                   audio_buf_size = audio_size;
               }

               audio_buf_index = 0;
           }

           audio_len = audio_buf_size - audio_buf_index;

           if(audio_len > len) {
               audio_len = len;
           }

           memcpy(stream, (uint8_t *)audio_buf , audio_len);
           //SDL_MixAudio(stream,(uint8_t*)audio_buf,audio_len,SDL_MIX_MAXVOLUME);
           fwrite(audio_buf, 1, audio_len, pFile_stream);
           len -= audio_len;
           stream += audio_len;
           audio_buf_index += audio_len;
           audio_len=len;
       }
    }

    int main(int argc, char *argv[]) {
       AVFormatContext *pFormatCtx = NULL;
       int             i, videoStream, audioStream;
       AVCodecContext  *pCodecCtx = NULL;
       AVCodec         *pCodec = NULL;
       AVFrame         *pFrame = NULL;
       AVPacket        packet;
       int             frameFinished;

       //float           aspect_ratio;

       AVCodecContext  *aCodecCtx = NULL;
       AVCodec         *aCodec = NULL;

       SDL_Overlay     *bmp = NULL;
       SDL_Surface     *screen = NULL;
       SDL_Rect        rect;
       SDL_Event       event;
       SDL_AudioSpec   wanted_spec, spec;

       struct SwsContext   *sws_ctx            = NULL;
       AVDictionary        *videoOptionsDict   = NULL;
       AVDictionary        *audioOptionsDict   = NULL;

       if(argc &lt; 2) {
               fprintf(stderr, "Usage: test <file>\n");
               exit(1);
           }

           // Register all formats and codecs
       av_register_all();

       if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
           fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
           exit(1);
       }

       // Open video file
       if(avformat_open_input(&amp;pFormatCtx, argv[1]/*"file.mov"*/, NULL, NULL) != 0) {
           return -1;    // Couldn't open file
       }

       // Retrieve stream information
       if(avformat_find_stream_info(pFormatCtx, NULL) &lt; 0) {
           return -1;    // Couldn't find stream information
       }

       // Dump information about file onto standard error
       av_dump_format(pFormatCtx, 0, argv[1], 0);

       // Find the first video stream
       videoStream = -1;
       audioStream = -1;

       for(i = 0; i &lt; pFormatCtx->nb_streams; i++) {
           if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO &amp;&amp;
               videoStream &lt; 0) {
                   videoStream = i;
           }

           if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO &amp;&amp;
               audioStream &lt; 0) {
                   audioStream = i;
           }
       }

       if(videoStream == -1) {
           return -1;    // Didn't find a video stream
       }

       if(audioStream == -1) {
           return -1;
       }

       aCodecCtx = pFormatCtx->streams[audioStream]->codec;
       // Set audio settings from codec info
       wanted_spec.freq = 44100;
       wanted_spec.format = AUDIO_S16SYS;
       wanted_spec.channels = av_get_channel_layout_nb_channels(AV_CH_LAYOUT_STEREO);;
       wanted_spec.silence = 0;
       wanted_spec.samples = 1024;
       wanted_spec.callback = audio_callback;
       wanted_spec.userdata = aCodecCtx;

       if(SDL_OpenAudio(&amp;wanted_spec, &amp;spec) &lt; 0) {
           fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
           return -1;
       }


       aCodec = avcodec_find_decoder(aCodecCtx->codec_id);

       if(!aCodec) {
           fprintf(stderr, "Unsupported codec!\n");
           return -1;
       }

       avcodec_open2(aCodecCtx, aCodec, &amp;audioOptionsDict);

       // audio_st = pFormatCtx->streams[index]
       packet_queue_init(&amp;audioq);
       SDL_PauseAudio(0);

       // Get a pointer to the codec context for the video stream
       pCodecCtx = pFormatCtx->streams[videoStream]->codec;

       // Find the decoder for the video stream
       pCodec = avcodec_find_decoder(pCodecCtx->codec_id);

       if(pCodec == NULL) {
           fprintf(stderr, "Unsupported codec!\n");
           return -1; // Codec not found
       }

       // Open codec
       if(avcodec_open2(pCodecCtx, pCodec, &amp;videoOptionsDict) &lt; 0) {
           return -1;    // Could not open codec
       }

       // Allocate video frame
       pFrame = av_frame_alloc();

       // Make a screen to put our video

    #ifndef __DARWIN__
       screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
    #else
       screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0);
    #endif

       if(!screen) {
           fprintf(stderr, "SDL: could not set video mode - exiting\n");
           exit(1);
       }

       // Allocate a place to put our YUV image on that screen
       bmp = SDL_CreateYUVOverlay(pCodecCtx->width,
           pCodecCtx->height,
           SDL_YV12_OVERLAY,
           screen);
       sws_ctx =
           sws_getContext
           (
           pCodecCtx->width,
           pCodecCtx->height,
           pCodecCtx->pix_fmt,
           pCodecCtx->width,
           pCodecCtx->height,
           PIX_FMT_YUV420P,
           SWS_BILINEAR,
           NULL,
           NULL,
           NULL
           );


       // Read frames and save first five frames to disk
       i = 0;

       while(av_read_frame(pFormatCtx, &amp;packet) >= 0) {
           // Is this a packet from the video stream?
           if(packet.stream_index == videoStream) {
               // Decode video frame
               avcodec_decode_video2(pCodecCtx, pFrame, &amp;frameFinished,
                   &amp;packet);

               // Did we get a video frame?
               if(frameFinished) {
                   SDL_LockYUVOverlay(bmp);

                   AVPicture pict;
                   pict.data[0] = bmp->pixels[0];
                   pict.data[1] = bmp->pixels[2];
                   pict.data[2] = bmp->pixels[1];

                   pict.linesize[0] = bmp->pitches[0];
                   pict.linesize[1] = bmp->pitches[2];
                   pict.linesize[2] = bmp->pitches[1];

                   // Convert the image into YUV format that SDL uses
                   sws_scale
                       (
                       sws_ctx,
                       (uint8_t const * const *)pFrame->data,
                       pFrame->linesize,
                       0,
                       pCodecCtx->height,
                       pict.data,
                       pict.linesize
                       );

                   SDL_UnlockYUVOverlay(bmp);

                   rect.x = 0;
                   rect.y = 0;
                   rect.w = pCodecCtx->width;
                   rect.h = pCodecCtx->height;
                   SDL_DisplayYUVOverlay(bmp, &amp;rect);
                   SDL_Delay(40);
                   av_free_packet(&amp;packet);
               }

           } else if(packet.stream_index == audioStream) {
               packet_queue_put(&amp;audioq, &amp;packet);

           } else {
               av_free_packet(&amp;packet);
           }

           // Free the packet that was allocated by av_read_frame
           SDL_PollEvent(&amp;event);

           switch(event.type) {
           case SDL_QUIT:
               quit = 1;
               SDL_Quit();
               exit(0);
               break;

           default:
               break;
           }

       }

       // Free the YUV frame
       av_free(pFrame);
       /*swr_free(&amp;audio_swrCtx);*/
       // Close the codec
       avcodec_close(pCodecCtx);
       fclose(pFile);
       fclose(pFile_stream);
       // Close the video file
       avformat_close_input(&amp;pFormatCtx);

       return 0;
    }
    </file>

    I hope to play normally.