Recherche avancée

Médias (1)

Mot : - Tags -/biographie

Autres articles (68)

  • Personnaliser en ajoutant son logo, sa bannière ou son image de fond

    5 septembre 2013, par

    Certains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;

  • Submit bugs and patches

    13 avril 2011

    Unfortunately a software is never perfect.
    If you think you have found a bug, report it using our ticket system. Please to help us to fix it by providing the following information : the browser you are using, including the exact version as precise an explanation as possible of the problem if possible, the steps taken resulting in the problem a link to the site / page in question
    If you think you have solved the bug, fill in a ticket and attach to it a corrective patch.
    You may also (...)

  • Publier sur MédiaSpip

    13 juin 2013

    Puis-je poster des contenus à partir d’une tablette Ipad ?
    Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir

Sur d’autres sites (9000)

  • Wav File encoded with FFMPEG has issues with codecs while playing using VLC Player

    6 décembre 2012, par user924702

    I want to convert raw PCM data(Taken from Android Phone mic) into a libGSM Wave file. After encoding into file, VLC player shows right codec information and duration but unable to play contents. Please help me to find what I am doing wrong.

    Below is my code for encoding and header writing :

    void EncodeTest(uint8_t *audioData, size_t audioSize)
    {
       AVCodecContext  *audioCodec;
       AVCodec *codec;
       uint8_t *buf;    int bufSize, frameBytes;
       __android_log_print(ANDROID_LOG_INFO, DEBUG_TAG,"Lets encode :%u with size %d\n",(int)audioData, (int)audioSize);
       //Set up audio encoder
       codec = avcodec_find_encoder(CODEC_ID_GSM);
       if (codec == NULL){
           __android_log_print(ANDROID_LOG_ERROR, DEBUG_TAG,"ERROR:: Unable to find encoder(CODEC_ID_GSM)");
           codec = avcodec_find_encoder(CODEC_ID_GSM);
           if (codec == NULL){
               __android_log_print(ANDROID_LOG_ERROR, DEBUG_TAG,"ERROR:: Unable to find encoder(CODEC_ID_GSM)");
               return;
           }
       }
       audioCodec                  = avcodec_alloc_context();
       audioCodec->channels        = 1;
       audioCodec->sample_rate     = 8000;
       audioCodec->sample_fmt      = SAMPLE_FMT_S16;
       audioCodec->bit_rate        = 13200;
       audioCodec->priv_data       = gsm_create();

       switch(audioCodec->codec_id) {
           case CODEC_ID_GSM:
               audioCodec->frame_size = GSM_FRAME_SIZE;
               audioCodec->block_align = GSM_BLOCK_SIZE;
               int one = 1;
               gsm_option(audioCodec->priv_data, GSM_OPT_WAV49, &one);
               break;
           case CODEC_ID_GSM_MS: {
               int one = 1;
               gsm_option(audioCodec->priv_data, GSM_OPT_WAV49, &one);
               audioCodec->frame_size = 2*GSM_FRAME_SIZE;
               audioCodec->block_align = GSM_MS_BLOCK_SIZE;
           }
       }
       audioCodec->coded_frame= avcodec_alloc_frame();
       audioCodec->coded_frame->key_frame= 1;
       audioCodec->time_base       = (AVRational){1,  audioCodec->sample_rate};
       audioCodec->codec_type      = CODEC_TYPE_AUDIO;

       if (avcodec_open(audioCodec, codec) < 0){
           __android_log_print(ANDROID_LOG_ERROR, DEBUG_TAG,"ERROR:: Unable to avcodec_open");
           return;
       }

       bufSize     = FF_MIN_BUFFER_SIZE * 10;
       buf         = (uint8_t *)malloc(bufSize);
       if (buf == NULL) return;
       frameBytes = audioCodec->frame_size * audioCodec->channels * 2;
       FILE *fileWrite = fopen(FILE_NAME,"w+b");
       if(NULL == fileWrite){
           __android_log_print(ANDROID_LOG_ERROR, DEBUG_TAG,"ERROR:: Unable to open file for reading.");
       }
       /*Write wave header*/
       WriteWav(fileWrite, 32505);/*Just for test*/

       /*Lets encode raw packet and write into file after header.*/
       __android_log_print(ANDROID_LOG_INFO, DEBUG_TAG,"Lets Encode Actual Bytes");
       int nChunckSize = 0;
       while (audioSize >= frameBytes)
       {
           int packetSize;

           packetSize = avcodec_encode_audio(audioCodec, buf, bufSize, (short *)audioData);
           __android_log_print(ANDROID_LOG_INFO, DEBUG_TAG,"Encoder returned %d bytes of data\n", packetSize);
           nChunckSize += packetSize;
           audioData += frameBytes;
           audioSize -= frameBytes;
           if(NULL != fileWrite){
               fwrite(buf, packetSize, 1, fileWrite);
           }
           else{
               __android_log_print(ANDROID_LOG_ERROR, DEBUG_TAG,"Unable to open file for writting... NULL");
           }
       }
       if(NULL != fileWrite){
           fclose(fileWrite);
       }
       __android_log_print(ANDROID_LOG_INFO, DEBUG_TAG,"----- Done with nChunckSize: %d --- ",nChunckSize);
        __android_log_print(ANDROID_LOG_INFO, DEBUG_TAG,"*****************************");
       wavReadnDisplayHeader(FILE_NAME);
       __android_log_print(ANDROID_LOG_INFO, DEBUG_TAG,"*****************************");
       wavReadnDisplayHeader("/sdcard/Voicemail2.wav");
    }

    Header Writing :

    /** Writes WAV headers */
    void WriteWav(FILE *f, long int bytes)
    {
       /* quick and dirty */
       fwrite("RIFF",sizeof(char),4,f);                /*  0-3 */      //RIFF
       PutNum(bytesã8,f,1,4);                       /*  4-7 */      //ChunkSize
       fwrite("WAVEfmt ",sizeof(char),8,f);            /*  8-15 */     //WAVE Header + FMT header
       PutNum(16,f,1,4);                               /* 16-19 */     //Size of the fmt chunk
       PutNum(49,f,1,2);                                /* 20-21 */     //Audio format, 49=libgsm wave, 1=PCM,6=mulaw,7=alaw, 257=IBM Mu-Law, 258=IBM A-Law, 259=ADPCM
       PutNum(1,f,1,2);                                /* 22-23 */     //Number of channels 1=Mono 2=Sterio
       PutNum(8000,f,1,4);                             /* 24-27 */     //Sampling Frequency in Hz
       PutNum(2*8000,f,1,4);                           /* 28-31 */     //bytes per second /Sample/persec
       PutNum(2,f,1,2);                                /* 32-33 */     // 2=16-bit mono, 4=16-bit stereo
       PutNum(16,f,1,2);                                /* 34-35 */     // Number of bits per sample
       fwrite("data",sizeof(char),4,f);                /* 36-39 */    
       PutNum(bytes,f,1,4);                            /* 40-43 */     //Sampled data length  
    }

    Please help....

  • MediaCodec - save timing info for ffmpeg ?

    18 novembre 2014, par Mark

    I have a requirement to encrypt video before it hits the disk. It seems on Android the only way to do this is to use MediaCodec, and encrypt and save the raw h264 elementary streams. (The MediaRecorder and Muxer classes operate on FileDescriptors, not an OutputStream, so I can’t wrap it with a CipherOutputStream).

    Using the grafika code as a base, I’m able to save a raw h264 elementary stream by replacing the Muxer in the VideoEncoderCore class with a WriteableByteChannel, backed by a CipherOutputStream (code below, minus the CipherOutputStream).

    If I take the resulting output file over to the desktop I’m able to use ffmpeg to mux the h264 stream to a playable mp4 file. What’s missing however is timing information. ffmpeg always assumes 25fps. What I’m looking for is a way to save the timing info, perhaps to a separate file, that I can use to give ffmpeg the right information on the desktop.

    I’m not doing audio yet, but I can imagine I’ll need to do the same thing there, if I’m to have any hope of remotely accurate syncing.

    FWIW, I’m a total newbie here, and I really don’t know much of anything about SPS, NAL, Atoms, etc.

    /*
    * Copyright 2014 Google Inc. All rights reserved.
    *
    * Licensed under the Apache License, Version 2.0 (the "License");
    * you may not use this file except in compliance with the License.
    * You may obtain a copy of the License at
    *
    *      http://www.apache.org/licenses/LICENSE-2.0
    *
    * Unless required by applicable law or agreed to in writing, software
    * distributed under the License is distributed on an "AS IS" BASIS,
    * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    * See the License for the specific language governing permissions and
    * limitations under the License.
    */


    import android.media.MediaCodec;
    import android.media.MediaCodecInfo;
    import android.media.MediaFormat;
    import android.util.Log;
    import android.view.Surface;

    import java.io.BufferedOutputStream;
    import java.io.File;
    import java.io.FileOutputStream;
    import java.io.IOException;
    import java.nio.ByteBuffer;
    import java.nio.channels.Channels;
    import java.nio.channels.WritableByteChannel;

    /**
    * This class wraps up the core components used for surface-input video encoding.
    * <p>
    * Once created, frames are fed to the input surface.  Remember to provide the presentation
    * time stamp, and always call drainEncoder() before swapBuffers() to ensure that the
    * producer side doesn't get backed up.
    * </p><p>
    * This class is not thread-safe, with one exception: it is valid to use the input surface
    * on one thread, and drain the output on a different thread.
    */
    public class VideoEncoderCore {
       private static final String TAG = MainActivity.TAG;
       private static final boolean VERBOSE = false;

       // TODO: these ought to be configurable as well
       private static final String MIME_TYPE = "video/avc";    // H.264 Advanced Video Coding
       private static final int FRAME_RATE = 30;               // 30fps
       private static final int IFRAME_INTERVAL = 5;           // 5 seconds between I-frames

       private Surface mInputSurface;
       private MediaCodec mEncoder;
       private MediaCodec.BufferInfo mBufferInfo;
       private int mTrackIndex;
       //private MediaMuxer mMuxer;
       //private boolean mMuxerStarted;
       private WritableByteChannel outChannel;

       /**
        * Configures encoder and muxer state, and prepares the input Surface.
        */
       public VideoEncoderCore(int width, int height, int bitRate, File outputFile)
               throws IOException {
           mBufferInfo = new MediaCodec.BufferInfo();

           MediaFormat format = MediaFormat.createVideoFormat(MIME_TYPE, width, height);

           // Set some properties.  Failing to specify some of these can cause the MediaCodec
           // configure() call to throw an unhelpful exception.
           format.setInteger(MediaFormat.KEY_COLOR_FORMAT,
                   MediaCodecInfo.CodecCapabilities.COLOR_FormatSurface);
           format.setInteger(MediaFormat.KEY_BIT_RATE, bitRate);
           format.setInteger(MediaFormat.KEY_FRAME_RATE, FRAME_RATE);
           format.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, IFRAME_INTERVAL);
           if (VERBOSE) Log.d(TAG, "format: " + format);

           // Create a MediaCodec encoder, and configure it with our format.  Get a Surface
           // we can use for input and wrap it with a class that handles the EGL work.
           mEncoder = MediaCodec.createEncoderByType(MIME_TYPE);
           mEncoder.configure(format, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
           mInputSurface = mEncoder.createInputSurface();
           mEncoder.start();

           // Create a MediaMuxer.  We can't add the video track and start() the muxer here,
           // because our MediaFormat doesn't have the Magic Goodies.  These can only be
           // obtained from the encoder after it has started processing data.
           //
           // We're not actually interested in multiplexing audio.  We just want to convert
           // the raw H.264 elementary stream we get from MediaCodec into a .mp4 file.
           //mMuxer = new MediaMuxer(outputFile.toString(),
           //        MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4);

           mTrackIndex = -1;
           //mMuxerStarted = false;
           outChannel = Channels.newChannel(new BufferedOutputStream(new FileOutputStream(outputFile)));
       }

       /**
        * Returns the encoder's input surface.
        */
       public Surface getInputSurface() {
           return mInputSurface;
       }

       /**
        * Releases encoder resources.
        */
       public void release() {
           if (VERBOSE) Log.d(TAG, "releasing encoder objects");
           if (mEncoder != null) {
               mEncoder.stop();
               mEncoder.release();
               mEncoder = null;
           }
           try {
               outChannel.close();
           }
           catch (Exception e) {
               Log.e(TAG,"Couldn't close output stream.");
           }
       }

       /**
        * Extracts all pending data from the encoder and forwards it to the muxer.
        * </p><p>
        * If endOfStream is not set, this returns when there is no more data to drain.  If it
        * is set, we send EOS to the encoder, and then iterate until we see EOS on the output.
        * Calling this with endOfStream set should be done once, right before stopping the muxer.
        * </p><p>
        * We're just using the muxer to get a .mp4 file (instead of a raw H.264 stream).  We're
        * not recording audio.
        */
       public void drainEncoder(boolean endOfStream) {
           final int TIMEOUT_USEC = 10000;
           if (VERBOSE) Log.d(TAG, "drainEncoder(" + endOfStream + ")");

           if (endOfStream) {
               if (VERBOSE) Log.d(TAG, "sending EOS to encoder");
               mEncoder.signalEndOfInputStream();
           }

           ByteBuffer[] encoderOutputBuffers = mEncoder.getOutputBuffers();
           while (true) {
               int encoderStatus = mEncoder.dequeueOutputBuffer(mBufferInfo, TIMEOUT_USEC);
               if (encoderStatus == MediaCodec.INFO_TRY_AGAIN_LATER) {
                   // no output available yet
                   if (!endOfStream) {
                       break;      // out of while
                   } else {
                       if (VERBOSE) Log.d(TAG, "no output available, spinning to await EOS");
                   }
               } else if (encoderStatus == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
                   // not expected for an encoder
                   encoderOutputBuffers = mEncoder.getOutputBuffers();
               } else if (encoderStatus == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
                   // should happen before receiving buffers, and should only happen once
                   //if (mMuxerStarted) {
                   //    throw new RuntimeException("format changed twice");
                   //}
                   MediaFormat newFormat = mEncoder.getOutputFormat();
                   Log.d(TAG, "encoder output format changed: " + newFormat);

                   // now that we have the Magic Goodies, start the muxer
                   //mTrackIndex = mMuxer.addTrack(newFormat);
                   //mMuxer.start();
                   //mMuxerStarted = true;
               } else if (encoderStatus &lt; 0) {
                   Log.w(TAG, "unexpected result from encoder.dequeueOutputBuffer: " +
                           encoderStatus);
                   // let's ignore it
               } else {
                   ByteBuffer encodedData = encoderOutputBuffers[encoderStatus];
                   if (encodedData == null) {
                       throw new RuntimeException("encoderOutputBuffer " + encoderStatus +
                               " was null");
                   }

                   /*
                      FFMPEG needs this info.
                   if ((mBufferInfo.flags &amp; MediaCodec.BUFFER_FLAG_CODEC_CONFIG) != 0) {
                       // The codec config data was pulled out and fed to the muxer when we got
                       // the INFO_OUTPUT_FORMAT_CHANGED status.  Ignore it.
                       if (VERBOSE) Log.d(TAG, "ignoring BUFFER_FLAG_CODEC_CONFIG");
                       mBufferInfo.size = 0;
                   }
                   */

                   if (mBufferInfo.size != 0) {
                       /*
                       if (!mMuxerStarted) {
                           throw new RuntimeException("muxer hasn't started");
                       }
                       */

                       // adjust the ByteBuffer values to match BufferInfo (not needed?)
                       encodedData.position(mBufferInfo.offset);
                       encodedData.limit(mBufferInfo.offset + mBufferInfo.size);

                       try {
                           outChannel.write(encodedData);
                       }
                       catch (Exception e) {
                           Log.e(TAG,"Error writing output.",e);
                       }
                       if (VERBOSE) {
                           Log.d(TAG, "sent " + mBufferInfo.size + " bytes to muxer, ts=" +
                                   mBufferInfo.presentationTimeUs);
                       }
                   }

                   mEncoder.releaseOutputBuffer(encoderStatus, false);

                   if ((mBufferInfo.flags &amp; MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) {
                       if (!endOfStream) {
                           Log.w(TAG, "reached end of stream unexpectedly");
                       } else {
                           if (VERBOSE) Log.d(TAG, "end of stream reached");
                       }
                       break;      // out of while
                   }
               }
           }
       }
    }
    </p>
  • MP3 audio recording from an input device using the FFmpeg API

    25 novembre 2014, par Hascoet Julien

    I’ve been trying to use the ffmpeg library (I’m working in C with the ffmpeg API) to decode and encode in mp3 from my microphone on Linux. The mp3lane lib is installed and I manage to open all codecs and to decode input samples.
    Here are my input settings :

    Input #1, alsa, from 'default':
     Duration: N/A, start: 1416946099.454877, bitrate: 1536 kb/s
       Stream #1:0: Audio: pcm_s16le, 48000 Hz, 2 channels, s16, 1536 kb/s

    And I manage to decode it ; therefore, it gives me 2 channels of 64 samples after calling avcodec_decode_audio4 right after av_read_frame. The output frame that avcodec_decode_audio4 just gave me seems to be ok with 2 channels as well and 64 samples per channel. Packets are size of 256 and 16-bit*2*64 = 256 bytes so that makes sense.

    The problem is when i’m trying to encode this decoded frame with avcodec_encode_audio2 and the codec sets to AV_CODEC_ID_MP3 (I don’t have any codec opening issues) it gives me a segmentation fault (core dumped) whereas everything is allocated... I wonder that perhaps I have too many samples or not enough therefore the encode function is going where nothing is allocated...Probably i have to use some resampling methods but i have no clue.

    Is anyone ever try to encode in mp3 from an input device using the ffmpeg C API and to mux it in a mp3 file or even an avi file ? ( from a microphone)

    The ffmpeg command line works perfectly : ffmpeg -f alsa -i default out.mp3

    Here is my ffmpeg compilation setup with preinstalled stuffs :

    sudo apt-get install libasound2-dev      
    sudo apt-get install libmp3lame-dev

    ./configure --disable-static --enable-shared --enable-gpl --enable-libx264 --enable-libv4l2 --enable-gpl --enable-swscale --enable-libmp3lame

    sudo make install

    export LD_LIBRARY_PATH=/usr/local/lib

    Thank you guys !

    Here is the code is used, this is run with pthread after (see main()) :

    #define DEFAULT_AUDIO_INPUT_DRIVER_NAME         "alsa"
    #define DEFAULT_AUDIO_INPUT_DEVICE_NAME         "default"
    #define DEFAULT_USED_AUDIO_CODEC                AV_CODEC_ID_MP3
    #define DEFAULT_OUTPUT_AUDIO_FRAMERATE          44100
    #define DEFAULT_AUDIO_OUTPUT_FILE_NAME          "audioTest.mp3"



    /* Input and Output audio format.*/
    static AVFormatContext *ifmt_ctx = NULL;
    static AVFormatContext *ofmt_ctx = NULL; //from video

    /* Codec contexts used to encode input and output. */
    static AVCodecContext *dec_ctx = NULL;
    static AVCodecContext *enc_ctx = NULL;

    AVPacket audioPacket = { .data = NULL, .size = 0 };
    AVPacket audioEncodedPacket = { .data = NULL, .size = 0 };
    AVFrame *decodedAudioFrame = NULL;
    AVFrame *rescaledAudioFrame = NULL;
    AVStream *streamAudio = NULL;
    AVCodec *audioEncodeCodec = NULL;
    static struct SwrContext *swr_ctx;


    /* Audio configuration */
    char *AUDIO_INPUT_DRIVER_NAME           = {DEFAULT_AUDIO_INPUT_DRIVER_NAME};
    char *AUDIO_INPUT_DEVICE_NAME           = {DEFAULT_AUDIO_INPUT_DEVICE_NAME};
    enum AVCodecID AUDIO_ENCODED_CODEC_ID   = DEFAULT_USED_AUDIO_CODEC;
    int AUDIO_FRAME_RATE                    = DEFAULT_OUTPUT_AUDIO_FRAMERATE;
    char* AUDIO_OUTPUT_FILE_NAME            = {DEFAULT_AUDIO_OUTPUT_FILE_NAME};

    static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
                                     uint64_t channel_layout,
                                     int sample_rate, int nb_samples)
    {
       AVFrame *frame = av_frame_alloc();
       int ret;

       if (!frame) {
           syslog(LOG_ERR, "Error allocating an audio frame\n");
           exit(0);
       }

       frame->format = sample_fmt;
       frame->channel_layout = channel_layout;
       frame->sample_rate = sample_rate;
       frame->nb_samples = nb_samples;

       if (nb_samples) {
           ret = av_frame_get_buffer(frame, 0);
           if (ret &lt; 0) {
               syslog(LOG_ERR, "Error allocating an audio buffer\n");
               exit(0);
           }
       }
       return frame;
    }

    static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
    {
       AVRational *time_base = &amp;fmt_ctx->streams[pkt->stream_index]->time_base;

      syslog(LOG_INFO, "AUDIO pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
              av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
              av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
              av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base), pkt->stream_index);
    }

    static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
    {
       /* rescale output packet timestamp values from codec to stream timebase */
       //printf("Write Time Rescale\n");
       av_packet_rescale_ts(pkt, *time_base, st->time_base);
       pkt->stream_index = st->index;

       /* Write the compressed frame to the media file. */
       log_packet(fmt_ctx, pkt);
       //printf("Write In File Audio packet size of %d\n", pkt->size);
       //return av_interleaved_write_frame(fmt_ctx, pkt);
       return av_write_frame(fmt_ctx, pkt);
    }

    static void openAudioInput(const char *driverName, const char *deviceName){  
       int i; AVInputFormat *file_iformat = NULL;
       if((file_iformat = av_find_input_format(driverName)) == NULL){
           syslog(LOG_ERR ,"The %s doesn't seem to be registered\n", driverName);
           exit(0);
       }
       /* Open the device, in order to use the audio linux driver. */
       if((avformat_open_input(&amp;ifmt_ctx, deviceName, file_iformat, NULL)) &lt; 0){
           syslog(LOG_ERR ,"Error while trying to open %s.\n", deviceName);
           exit(0);
       }
       if((avformat_find_stream_info(ifmt_ctx, NULL)) &lt; 0) {
           syslog(LOG_ERR, "Cannot find stream information\n");
           exit(0);
       }
       /* Open decoder */
       //printf("Number of input stream: %d\n", ifmt_ctx->nb_streams);
       /*if(ifmt_ctx->streams[0]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
           printf("AUDIO_TYPE\n");*/
       for(i = 0; i &lt; ifmt_ctx->nb_streams; i++)
           if(ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO
                   || ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
               if(avcodec_open2(ifmt_ctx->streams[i]->codec,            
               avcodec_find_decoder(ifmt_ctx->streams[i]->codec->codec_id), NULL) &lt; 0){
                   syslog(LOG_ERR, "Cannot find stream information\n");
                   exit(0);
               }
       av_dump_format(ifmt_ctx, 1, deviceName, 0);
    }


    static void openAudioOutput(const char *deviceName, const char *fileName, enum AVCodecID encodeCodec){
       AVStream *out_stream = NULL, *in_stream = NULL;
       AVCodec *encoder;
       avformat_alloc_output_context2(&amp;ofmt_ctx, NULL, NULL, fileName);
       if(!ofmt_ctx){
           syslog(LOG_ERR, "Could not create output context\n");
           exit(0);
       }
       out_stream = avformat_new_stream(ofmt_ctx, NULL);
       if(!out_stream){
           syslog(LOG_ERR, "Failed allocating output stream\n");
           exit(0);
       }
       (ifmt_ctx!=NULL) ? in_stream = ifmt_ctx->streams[0] : exit(0);
       dec_ctx = in_stream->codec;
       enc_ctx = out_stream->codec;
       /* find encoder */
       encoder = avcodec_find_encoder(encodeCodec);
       enc_ctx->codec = encoder;
       /* AUDIO PARAMETERS */
       enc_ctx->sample_fmt = encoder->sample_fmts[0];
       enc_ctx->bit_rate = 128000; //added

           enc_ctx->sample_rate = dec_ctx->sample_rate;
           enc_ctx->channel_layout = AV_CH_LAYOUT_MONO;//dec_ctx->channel_layout;
           out_stream->time_base = enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
           enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);

       printf("Sample Rate: %d Number Encoded channels: %d\n", dec_ctx->sample_rate, enc_ctx->channels);
       /* Open encoder with the found codec */
       if(avcodec_open2(enc_ctx, encoder, NULL) &lt; 0) {
           syslog(LOG_ERR, "Cannot open audio encoder for stream\n");
           exit(0);
       }
       av_dump_format(ofmt_ctx, 0, fileName, 1);
       if (!(ofmt_ctx->oformat->flags &amp; AVFMT_NOFILE))
           if(avio_open(&amp;ofmt_ctx->pb, fileName, AVIO_FLAG_WRITE) &lt; 0){
               syslog(LOG_ERR, "Could not open output file '%s'", fileName);
               exit(0);
           }
       /* init muxer, write output file header */
       if(avformat_write_header(ofmt_ctx, NULL) &lt; 0){
           syslog(LOG_ERR, "Error occurred when opening output file\n");
           exit(0);
       }


       decodedAudioFrame = av_frame_alloc();
       rescaledAudioFrame = av_frame_alloc();
    }



    void initAudio(void){
       openAudioInput(AUDIO_INPUT_DRIVER_NAME, AUDIO_INPUT_DEVICE_NAME);
       openAudioOutput(AUDIO_INPUT_DEVICE_NAME, AUDIO_OUTPUT_FILE_NAME, AUDIO_ENCODED_CODEC_ID);
    }

    void *audioThread(void){
       int16_t *   samples;
       int gotDecodedFrame, dst_nb_samples, samples_count=0;
       int packetCounter = 0;

       int i = 0, got_packet, got_input, ret;
       float sizeOfFile = 0;

       AVPacket packet = { .data = NULL, .size = 0 };

       struct timespec t0, t1;
       int flags = fcntl(0, F_GETFL);
       flags = fcntl(0, F_SETFL, flags | O_NONBLOCK); //set non-blocking read on stdin
       packetCounter = 0;

       do{
           clock_gettime(CLOCK_REALTIME, &amp;t0);
           if ((av_read_frame(ifmt_ctx, &amp;audioPacket)) &lt; 0){
               break;
           }
           packetCounter++;

           clock_gettime(CLOCK_REALTIME, &amp;t1);
           av_init_packet(&amp;audioEncodedPacket);
           audioEncodedPacket.data = NULL;
           audioEncodedPacket.size = 0;

           if (avcodec_decode_audio4(dec_ctx, decodedAudioFrame, &amp;gotDecodedFrame, &amp;audioPacket) &lt; 0) {
               syslog(LOG_ERR ,"Can't Decode the packet received from the camera.\n");
               exit(0);
           }

           printf("Audio Decoded, Nb Channel %d, Samples per Channel %d, Size %d, PTS %ld\n",
               decodedAudioFrame->channels,
               decodedAudioFrame->nb_samples,
               decodedAudioFrame->pkt_size,
               decodedAudioFrame->pkt_pts);
           /*if((ret = swr_convert(swr_ctx, rescaledAudioFrame->data, rescaledAudioFrame->nb_samples,
           (const uint8_t **)decodedAudioFrame->data, decodedAudioFrame->nb_samples)) &lt; 0){
                   syslog(LOG_ERR, "Error while converting\n");
                   exit(0);
           }*/

           //decodedAudioFrame->pts = audioPacket.pts;//(int64_t)((1.0 / (float)64000) * (float)90 * (float)packetCounter);

           ret = avcodec_encode_audio2(enc_ctx, &amp;audioEncodedPacket, decodedAudioFrame, &amp;got_packet);



           printf("Audio encoded packet size = %d, packet nb = %d, sample rate = %d Ret Value = %d\n", audioEncodedPacket.size, packetCounter, enc_ctx->sample_rate, ret);
           audioPacket.pts = (int64_t)((1.0 / (float)enc_ctx->sample_rate) * (float)90 * (float)packetCounter);
           audioPacket.dts = audioPacket.pts-1;

           ret = write_frame(ofmt_ctx, &amp;enc_ctx->time_base, streamAudio, &amp;audioEncodedPacket);

           av_free_packet(&amp;audioEncodedPacket);

           ssize_t readVal = read(0, &amp;videoAudioThreadExit, 1); // read non-blocking

       }while(videoAudioThreadExit != 'q');

       syslog(LOG_INFO ,"End Audio Thread\n");
       return NULL;
    }






    int main(int argc, char** argv){
       int i=0;

       openlog ("TEST", LOG_CONS | LOG_PERROR | LOG_NDELAY, LOG_USER);
       syslog (LOG_INFO, "Syslog correctly loaded.\n");
       syslog (LOG_INFO, "Program started by user UID %d\n", getuid ());

       av_register_all();
       avdevice_register_all();
       avcodec_register_all();
       avfilter_register_all();

       printf("\n\n\t START GLOBAL INIT\n");
       initAudio();

       pthread_create(&amp;t[0], &amp;ctrl[0], (void*)audioThread, NULL);
       for(i=0;icode>