Recherche avancée

Médias (91)

Autres articles (41)

  • (Dés)Activation de fonctionnalités (plugins)

    18 février 2011, par

    Pour gérer l’ajout et la suppression de fonctionnalités supplémentaires (ou plugins), MediaSPIP utilise à partir de la version 0.2 SVP.
    SVP permet l’activation facile de plugins depuis l’espace de configuration de MediaSPIP.
    Pour y accéder, il suffit de se rendre dans l’espace de configuration puis de se rendre sur la page "Gestion des plugins".
    MediaSPIP est fourni par défaut avec l’ensemble des plugins dits "compatibles", ils ont été testés et intégrés afin de fonctionner parfaitement avec chaque (...)

  • Le plugin : Podcasts.

    14 juillet 2010, par

    Le problème du podcasting est à nouveau un problème révélateur de la normalisation des transports de données sur Internet.
    Deux formats intéressants existent : Celui développé par Apple, très axé sur l’utilisation d’iTunes dont la SPEC est ici ; Le format "Media RSS Module" qui est plus "libre" notamment soutenu par Yahoo et le logiciel Miro ;
    Types de fichiers supportés dans les flux
    Le format d’Apple n’autorise que les formats suivants dans ses flux : .mp3 audio/mpeg .m4a audio/x-m4a .mp4 (...)

  • Les autorisations surchargées par les plugins

    27 avril 2010, par

    Mediaspip core
    autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs

Sur d’autres sites (7156)

  • flv created using ffmpeg library plays too fast

    25 avril 2015, par Muhammad Ali

    I am muxing an h264 annex-b stream and an ADTS AAC stream coming from IP Camera into an FLV. I have gone through all the necessary things (that I knew of) e-g stripping ADTS header from AAC and converting H264 annex-b to AVC.

    I am able to create the flv file which plays but it plays fast. The params of my output format video codec are :-

    Time base = 1/60000   <-- I don't know why
    Bitrate = 591949 (591Kbps)
    GOP Size = 12
    FPS = 30 Fps (that's the rate encoder sends me data at)

    Params for output format audio codec are :-

    Timebase = 1/44100
    Bitrate = 45382 (45Kbps)
    Sample rate = 48000

    I am using NO_PTS for both audio and video.
    The resultant video has double the bit rate (2x(audio bitrate + vid bitrate)) and half the duration.
    If i play the resultant video in ffplay the video playsback fast so it ends quickly but audio plays on its original time. So even after the video has ended quickly the audio still plays till its full duration.

    If I set pts and dts equal to an increasing index (separate indices for audio and video) the video plays Super fast, bit rate shoots to an insane value and video duration gets very short but audio plays fine and on time.

    EDIT :

    Duration: 00:00:09.96, start: 0.000000, bitrate: 1230 kb/s
    Stream #0:0: Video: h264 (Main), yuvj420p(pc, bt709), 1280x720 [SAR 1:1 DAR 16:9], 591 kb/s, 30.33 fps, 59.94 tbr, 1k tbn, 59.94 tbc
    Stream #0:1: Audio: aac, 48000 Hz, mono, fltp, 45 kb/s

    Why is tbr 59.94 ? how was that calculated ? maybe that is the problem ?

    Code for muxing :

    if(packet.header.dataType == TRANSFER_PACKET_TYPE_H264)
             {

               if((packet.data[0] == 0x00) && (packet.data[1] == 0x00) && (packet.data[2]==0x00) && (packet.data[3]==0x01))
               {

                 unsigned char tempCurrFrameLength[4];
                 unsigned int nal_unit_length;
                 unsigned char nal_unit_type;
                 unsigned int cursor = 0;
                 int size = packet.header.dataLen;

                  do {

                       av_init_packet(&pkt);
                       int currFrameLength = 0;
                       if((packet.header.frameType == TRANSFER_FRAME_IDR_VIDEO) || (packet.header.frameType == TRANSFER_FRAME_I_VIDEO))
                       {
                         //pkt.flags        |= AV_PKT_FLAG_KEY;
                       }
                       pkt.stream_index  = packet.header.streamId;//0;//ost->st->index;     //stream index 0 for vid : 1 for aud
                       outStreamIndex = outputVideoStreamIndex;
                       /*vDuration += (packet.header.dataPTS - lastvPts);
                       lastvPts = packet.header.dataPTS;
                       pkt.pts = pkt.dts= packet.header.dataPTS;*/
                       pkt.pts = pkt.dts = AV_NOPTS_VALUE;

                       if(framebuff != NULL)
                       {
                         //printf("Framebuff has mem alloc : freeing 1\n\n");
                         free(framebuff);
                         framebuff = NULL;
                         //printf("free successfully \n\n");
                       }


                       nal_unit_length = GetOneNalUnit(&nal_unit_type, packet.data + cursor/*pData+cursor*/, size-cursor);
                       if(nal_unit_length > 0 && nal_unit_type > 0)
                       {

                       }
                       else
                       {
                         printf("Fatal error : nal unit lenth wrong \n\n");
                         exit(0);
                       }
                       write_header_done = 1;
                       //#define _USE_SPS_PPS    //comment this line to write everything on to the stream. SPS+PPSframeframe
                       #ifdef _USE_SPS_PPS
                       if (nal_unit_type == 0x07 /*NAL_SPS*/)
                       { // write sps


                         printf("Got SPS \n");
                         if (_sps == NULL)
                         {
                           _sps_size = nal_unit_length -4;
                           _sps = new U8[_sps_size];
                           memcpy(_sps, packet.data+cursor+4, _sps_size); //exclude start code 0x00000001
                         }

                       }
                       else if (nal_unit_type == 0x08/*NAL_PPS*/)
                       { // write pps

                         printf("Got PPS \n");
                         if (_pps == NULL)
                         {
                           _pps_size = nal_unit_length -4;
                           _pps = new U8[_pps_size];
                           memcpy(_pps, packet.data+cursor+4, _pps_size); //exclude start code 0x00000001

                           //out_stream->codec->extradata
                           //ofmt_ctx->streams[outputVideoStreamIndex]->codec->extradata
                           free(ofmt_ctx->streams[outputVideoStreamIndex]->codec->extradata);
                           ofmt_ctx->streams[outputVideoStreamIndex]->codec->extradata = (uint8_t*)av_mallocz(_sps_size + _pps_size);

                           memcpy(ofmt_ctx->streams[outputVideoStreamIndex]->codec->extradata,_sps,_sps_size);
                           memcpy(ofmt_ctx->streams[outputVideoStreamIndex]->codec->extradata + _sps_size,_pps,_pps_size);
                           ret = avformat_write_header(ofmt_ctx, NULL);
                           if (ret < 0) {
                                 //fprintf(stderr, "Error occurred when opening output file\n");
                                 printf("Error occured when opening output \n");
                                 exit(0);
                            }
                            write_header_done = 1;
                           printf("Done writing header \n");

                         }

                       }
                       //else
                       #endif  /*end _USE_SPS_PPS */
                       { //IDR Frame

                           videoPts++;
                           if( (nal_unit_type == 0x06) || (nal_unit_type == 0x09) || (nal_unit_type == 0x07) || (nal_unit_type == 0x08))
                           {
                             av_free_packet(&pkt);
                             cursor += nal_unit_length;
                             continue;
                           }

                         if( (nal_unit_type == 0x05) || (nal_unit_type == 0x05))
                         {
                           //videoPts++;
                         }
                         if ((nal_unit_type != 0x07) && (nal_unit_type != 0x08))
                         {

                           vDuration += (packet.header.dataPTS - lastvPts);
                           lastvPts = packet.header.dataPTS;
                           //pkt.pts = pkt.dts= packet.header.dataPTS;
                           pkt.pts = pkt.dts= AV_NOPTS_VALUE;//videoPts;
                         }
                         else
                         {
                           //probably sps pps ... no need to transmit. free the packet
                           //av_free_packet(&pkt);
                           pkt.pts = pkt.dts = AV_NOPTS_VALUE;
                         }


                         currFrameLength  = nal_unit_length - 4;//packet.header.dataLen -4;
                         tempCurrFrameLength[3] = currFrameLength;
                         tempCurrFrameLength[2] = currFrameLength>>8;
                         tempCurrFrameLength[1] = currFrameLength>>16;
                         tempCurrFrameLength[0] = currFrameLength>>24;



                         if(nal_unit_type == 0x05)
                         {  
                           pkt.flags        |= AV_PKT_FLAG_KEY;
                         }


                         framebuff = (unsigned char *)malloc(sizeof(unsigned char)* /*packet.header.dataLen*/nal_unit_length );
                         if(framebuff == NULL)
                         {
                           printf("Failed to allocate memory for frame \n\n ");
                           exit(0);
                         }


                         memcpy(framebuff, tempCurrFrameLength,0x04);
                         //memcpy(&framebuff[4], &packet.data[4] , currFrameLength);
                         //put_buffer(pData + cursor + 4, nal_unit_length - 4);// save ES data
                         memcpy(framebuff+4,packet.data + cursor + 4, currFrameLength );
                         pkt.data = framebuff;
                         pkt.size = nal_unit_length;//packet.header.dataLen ;

                         //printf("\nPrinting Frame| Size: %d | NALU Lenght: %d | NALU: %02x \n",pkt.size,nal_unit_length ,nal_unit_type);

                         /* GET READY TO TRANSMIT THE packet */
                         //pkt.duration = vDuration;
                         in_stream  = ifmt_ctx->streams[pkt.stream_index];
                         out_stream = ofmt_ctx->streams[outStreamIndex];
                         cn = out_stream->codec;
                         //av_packet_rescale_ts(&pkt, cn->time_base, out_stream->time_base);
                         //pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
                         //pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
                         //pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
                         pkt.pos = -1;
                         pkt.stream_index = outStreamIndex;

                          if (!write_header_done)
                          {
                           }
                           else
                           {
                         //doxygen suggests i use av_write_frame if i am taking care of interleaving
                         ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
                         //ret = av_write_frame(ofmt_ctx, &pkt);
                         if (ret < 0)
                          {
                              fprintf(stderr, "Error muxing Video packet\n");
                              continue;
                          }
                          }

                         /*for(int ii = 0; ii < pkt.size ; ii++)
                           printf("%02x ",framebuff[ii]);*/

                          av_free_packet(&pkt);
                          if(framebuff != NULL)
                           {
                             //printf("Framebuff has mem alloc : freeing 2\n\n");
                             free(framebuff);
                             framebuff = NULL;
                             //printf("Freeing successfully \n\n");
                           }

                         /* TRANSMIT DONE */

                       }
                     cursor += nal_unit_length;
                     }while(cursor < size);


               }
               else
               {
                 printf("This is not annex B bitstream \n\n");
                 for(int ii = 0; ii < packet.header.dataLen ; ii++)
                   printf("%02x ",packet.data[ii]);
                 printf("\n\n");
                 exit(0);
               }

               //video frame has been parsed completely.
               continue;

             }
           else if(packet.header.dataType == TRANSFER_PACKET_TYPE_AAC)
             {

               av_init_packet(&pkt);


               pkt.flags = 1;          
               pkt.pts = audioPts*1024;
               pkt.dts = audioPts*1024;
               //pkt.duration = 1024;    

               pkt.stream_index  = packet.header.streamId + 1;//1;//ost->st->index;     //stream index 0 for vid : 1 for aud
               outStreamIndex = outputAudioStreamIndex;
               //aDuration += (packet.header.dataPTS - lastaPts);
               //lastaPts = packet.header.dataPTS;

               //NOTE: audio sync requires this value
               pkt.pts = pkt.dts= AV_NOPTS_VALUE ;
               //pkt.pts = pkt.dts=audioPts++;
               pkt.data = (uint8_t *)packet.data;//raw_data;
               pkt.size          = packet.header.dataLen;
             }


           //packet.header.streamId
           //now assigning pkt.data in repsective if statements above
           //pkt.data          = (uint8_t *)packet.data;//raw_data;
           //pkt.size          = packet.header.dataLen;


           //pkt.duration = 24000;      //24000 assumed basd on observation

           //duration calculation
           /*if(packet.header.dataType == TRANSFER_PACKET_TYPE_H264)
             {
               pkt.duration = vDuration;

             }
           else*/ if(packet.header.dataType == TRANSFER_PACKET_TYPE_AAC)
             {
               //pkt.duration = aDuration;
             }

           in_stream  = ifmt_ctx->streams[pkt.stream_index];
           out_stream = ofmt_ctx->streams[outStreamIndex];

           cn = out_stream->codec;

           if(packet.header.dataType == TRANSFER_PACKET_TYPE_AAC)
            ret= av_bitstream_filter_filter(aacbsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, packet.data/*pkt.data*/, packet.header.dataLen, pkt.flags & AV_PKT_FLAG_KEY);

             if(ret < 0)
             {
               printf("Failed to execute aac bitstream filter \n\n");
               exit(0);
             }

           //if(packet.header.dataType == TRANSFER_PACKET_TYPE_H264)
           // av_bitstream_filter_filter(h264bsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, packet.data/*pkt.data*/, pkt.size, 0);

             pkt.flags = 1;              


              //NOTE : Commented the lines below synced audio and video streams

             //av_packet_rescale_ts(&pkt, cn->time_base, out_stream->time_base);

             //pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);

             //pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);


             //pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);



           //enabled on Tuesday
           pkt.pos = -1;

           pkt.stream_index = outStreamIndex;

           //doxygen suggests i use av_write_frame if i am taking care of interleaving
           ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
           //ret = av_write_frame(ofmt_ctx, &pkt);
           if (ret < 0)
            {
                fprintf(stderr, "Error muxing packet\n");
                continue;
            }

          av_free_packet(&pkt);
          if(framebuff != NULL)
           {
             //printf("Framebuff has mem alloc : freeing 2\n\n");
             free(framebuff);
             framebuff = NULL;
             //printf("Freeing successfully \n\n");
           }
         }
  • Video Recording on Android from Frames using javaCV

    23 juin 2016, par jawad bin zafar

    I am using this library javaCV to record video on android. They have provided a sample VideoRecording Activity But There is some bug which I could not figure out what I am doing it wrong or what is missing which causing this bug.

    package org.bytedeco.javacv.recordactivity;

    import android.app.Activity;
    import android.content.Context;
    import android.content.pm.ActivityInfo;
    import android.hardware.Camera;
    import android.hardware.Camera.PreviewCallback;
    import android.media.AudioFormat;
    import android.media.AudioRecord;
    import android.media.MediaRecorder;
    import android.os.Bundle;
    import android.os.PowerManager;
    import android.util.Log;
    import android.view.Display;
    import android.view.KeyEvent;
    import android.view.LayoutInflater;
    import android.view.SurfaceHolder;
    import android.view.SurfaceView;
    import android.view.View;
    import android.view.View.OnClickListener;
    import android.view.WindowManager;
    import android.widget.Button;
    import android.widget.LinearLayout;
    import android.widget.RelativeLayout;

    import java.io.IOException;
    import java.nio.ShortBuffer;

    import org.bytedeco.javacv.FFmpegFrameRecorder;

    import static org.bytedeco.javacpp.opencv_core.*;

    public class RecordActivity extends Activity implements OnClickListener {

       private final static String CLASS_LABEL = "RecordActivity";
       private final static String LOG_TAG = CLASS_LABEL;

       private PowerManager.WakeLock mWakeLock;

       private String ffmpeg_link = "/mnt/sdcard/stream.flv";

       long startTime = 0;
       boolean recording = false;

       private volatile FFmpegFrameRecorder recorder;

       private boolean isPreviewOn = false;

       private int sampleAudioRateInHz = 44100;
       private int imageWidth = 320;
       private int imageHeight = 240;
       private int frameRate = 30;

       /* audio data getting thread */
       private AudioRecord audioRecord;
       private AudioRecordRunnable audioRecordRunnable;
       private Thread audioThread;
       volatile boolean runAudioThread = true;

       /* video data getting thread */
       private Camera cameraDevice;
       private CameraView cameraView;

       private IplImage yuvIplimage = null;

       /* layout setting */
       private final int bg_screen_bx = 232;
       private final int bg_screen_by = 128;
       private final int bg_screen_width = 700;
       private final int bg_screen_height = 500;
       private final int bg_width = 1123;
       private final int bg_height = 715;
       private final int live_width = 640;
       private final int live_height = 480;
       private int screenWidth, screenHeight;
       private Button btnRecorderControl;

       /** The number of seconds in the continuous record loop (or 0 to disable loop). */
       final int RECORD_LENGTH = 10;
       IplImage[] images;
       long[] timestamps;
       ShortBuffer[] samples;
       int imagesIndex, samplesIndex;

       @Override
       public void onCreate(Bundle savedInstanceState) {
           super.onCreate(savedInstanceState);
           setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_LANDSCAPE);

           setContentView(R.layout.main);

           PowerManager pm = (PowerManager) getSystemService(Context.POWER_SERVICE);
           mWakeLock = pm.newWakeLock(PowerManager.SCREEN_BRIGHT_WAKE_LOCK, CLASS_LABEL);
           mWakeLock.acquire();

           initLayout();
       }


       @Override
       protected void onResume() {
           super.onResume();

           if (mWakeLock == null) {
              PowerManager pm = (PowerManager) getSystemService(Context.POWER_SERVICE);
              mWakeLock = pm.newWakeLock(PowerManager.SCREEN_BRIGHT_WAKE_LOCK, CLASS_LABEL);
              mWakeLock.acquire();
           }
       }

       @Override
       protected void onPause() {
           super.onPause();

           if (mWakeLock != null) {
               mWakeLock.release();
               mWakeLock = null;
           }
       }

       @Override
       protected void onDestroy() {
           super.onDestroy();

           recording = false;

           if (cameraView != null) {
               cameraView.stopPreview();
           }

           if(cameraDevice != null) {
              cameraDevice.stopPreview();
              cameraDevice.release();
              cameraDevice = null;
           }

           if (mWakeLock != null) {
               mWakeLock.release();
               mWakeLock = null;
           }
       }


       private void initLayout() {

           /* get size of screen */
           Display display = ((WindowManager) getSystemService(Context.WINDOW_SERVICE)).getDefaultDisplay();
           screenWidth = display.getWidth();
           screenHeight = display.getHeight();
           RelativeLayout.LayoutParams layoutParam = null;
           LayoutInflater myInflate = null;
           myInflate = (LayoutInflater) getSystemService(Context.LAYOUT_INFLATER_SERVICE);
           RelativeLayout topLayout = new RelativeLayout(this);
           setContentView(topLayout);
           LinearLayout preViewLayout = (LinearLayout) myInflate.inflate(R.layout.main, null);
           layoutParam = new RelativeLayout.LayoutParams(screenWidth, screenHeight);
           topLayout.addView(preViewLayout, layoutParam);

           /* add control button: start and stop */
           btnRecorderControl = (Button) findViewById(R.id.recorder_control);
           btnRecorderControl.setText("Start");
           btnRecorderControl.setOnClickListener(this);

           /* add camera view */
           int display_width_d = (int) (1.0 * bg_screen_width * screenWidth / bg_width);
           int display_height_d = (int) (1.0 * bg_screen_height * screenHeight / bg_height);
           int prev_rw, prev_rh;
           if (1.0 * display_width_d / display_height_d > 1.0 * live_width / live_height) {
               prev_rh = display_height_d;
               prev_rw = (int) (1.0 * display_height_d * live_width / live_height);
           } else {
               prev_rw = display_width_d;
               prev_rh = (int) (1.0 * display_width_d * live_height / live_width);
           }
           layoutParam = new RelativeLayout.LayoutParams(prev_rw, prev_rh);
           layoutParam.topMargin = (int) (1.0 * bg_screen_by * screenHeight / bg_height);
           layoutParam.leftMargin = (int) (1.0 * bg_screen_bx * screenWidth / bg_width);

           cameraDevice = Camera.open();
           Log.i(LOG_TAG, "cameara open");
           cameraView = new CameraView(this, cameraDevice);
           topLayout.addView(cameraView, layoutParam);
           Log.i(LOG_TAG, "cameara preview start: OK");
       }

       //---------------------------------------
       // initialize ffmpeg_recorder
       //---------------------------------------
       private void initRecorder() {

           Log.w(LOG_TAG,"init recorder");

           if (RECORD_LENGTH > 0) {
               imagesIndex = 0;
               images = new IplImage[RECORD_LENGTH * frameRate];
               timestamps = new long[images.length];
               for (int i = 0; i < images.length; i++) {
                   images[i] = IplImage.create(imageWidth, imageHeight, IPL_DEPTH_8U, 2);
                   timestamps[i] = -1;
               }
           } else if (yuvIplimage == null) {
               yuvIplimage = IplImage.create(imageWidth, imageHeight, IPL_DEPTH_8U, 2);
               Log.i(LOG_TAG, "create yuvIplimage");
           }

           Log.i(LOG_TAG, "ffmpeg_url: " + ffmpeg_link);
           recorder = new FFmpegFrameRecorder(ffmpeg_link, imageWidth, imageHeight, 1);
           recorder.setFormat("flv");
           recorder.setSampleRate(sampleAudioRateInHz);
           // Set in the surface changed method
           recorder.setFrameRate(frameRate);

           Log.i(LOG_TAG, "recorder initialize success");

           audioRecordRunnable = new AudioRecordRunnable();
           audioThread = new Thread(audioRecordRunnable);
           runAudioThread = true;
       }

       public void startRecording() {

           initRecorder();

           try {
               recorder.start();
               startTime = System.currentTimeMillis();
               recording = true;
               audioThread.start();

           } catch (FFmpegFrameRecorder.Exception e) {
               e.printStackTrace();
           }
       }

       public void stopRecording() {

           runAudioThread = false;
           try {
               audioThread.join();
           } catch (InterruptedException e) {
               e.printStackTrace();
           }
           audioRecordRunnable = null;
           audioThread = null;

           if (recorder != null && recording) {
               if (RECORD_LENGTH > 0) {
                   Log.v(LOG_TAG,"Writing frames");
                   try {
                       int firstIndex = imagesIndex % samples.length;
                       int lastIndex = (imagesIndex - 1) % images.length;
                       if (imagesIndex <= images.length) {
                           firstIndex = 0;
                           lastIndex = imagesIndex - 1;
                       }
                       if ((startTime = timestamps[lastIndex] - RECORD_LENGTH * 1000000L) < 0) {
                           startTime = 0;
                       }
                       if (lastIndex < firstIndex) {
                           lastIndex += images.length;
                       }
                       for (int i = firstIndex; i <= lastIndex; i++) {
                           long t = timestamps[i % timestamps.length] - startTime;
                           if (t >= 0) {
                               if (t > recorder.getTimestamp()) {
                                   recorder.setTimestamp(t);
                               }
                               recorder.record(images[i % images.length]);
                           }
                       }

                       firstIndex = samplesIndex % samples.length;
                       lastIndex = (samplesIndex - 1) % samples.length;
                       if (samplesIndex <= samples.length) {
                           firstIndex = 0;
                           lastIndex = samplesIndex - 1;
                       }
                       if (lastIndex < firstIndex) {
                           lastIndex += samples.length;
                       }
                       for (int i = firstIndex; i <= lastIndex; i++) {
                           recorder.record(samples[i % samples.length]);
                       }
                   } catch (FFmpegFrameRecorder.Exception e) {
                       Log.v(LOG_TAG,e.getMessage());
                       e.printStackTrace();
                   }
               }

               recording = false;
               Log.v(LOG_TAG,"Finishing recording, calling stop and release on recorder");
               try {
                   recorder.stop();
                   recorder.release();
               } catch (FFmpegFrameRecorder.Exception e) {
                   e.printStackTrace();
               }
               recorder = null;

           }
       }

       @Override
       public boolean onKeyDown(int keyCode, KeyEvent event) {

           if (keyCode == KeyEvent.KEYCODE_BACK) {
               if (recording) {
                   stopRecording();
               }

               finish();

               return true;
           }

           return super.onKeyDown(keyCode, event);
       }


       //---------------------------------------------
       // audio thread, gets and encodes audio data
       //---------------------------------------------
       class AudioRecordRunnable implements Runnable {

           @Override
           public void run() {
               android.os.Process.setThreadPriority(android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);

               // Audio
               int bufferSize;
               ShortBuffer audioData;
               int bufferReadResult;

               bufferSize = AudioRecord.getMinBufferSize(sampleAudioRateInHz,
                       AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
               audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, sampleAudioRateInHz,
                       AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, bufferSize);

               if (RECORD_LENGTH > 0) {
                   samplesIndex = 0;
                   samples = new ShortBuffer[RECORD_LENGTH * sampleAudioRateInHz * 2 / bufferSize + 1];
                   for (int i = 0; i < samples.length; i++) {
                       samples[i] = ShortBuffer.allocate(bufferSize);
                   }
               } else {
                   audioData = ShortBuffer.allocate(bufferSize);
               }

               Log.d(LOG_TAG, "audioRecord.startRecording()");
               audioRecord.startRecording();

               /* ffmpeg_audio encoding loop */
               while (runAudioThread) {
                   if (RECORD_LENGTH > 0) {
                       audioData = samples[samplesIndex++ % samples.length];
                       audioData.position(0).limit(0);
                   }
                   //Log.v(LOG_TAG,"recording? " + recording);
                   bufferReadResult = audioRecord.read(audioData.array(), 0, audioData.capacity());
                   audioData.limit(bufferReadResult);
                   if (bufferReadResult > 0) {
                       Log.v(LOG_TAG,"bufferReadResult: " + bufferReadResult);
                       // If "recording" isn't true when start this thread, it never get's set according to this if statement...!!!
                       // Why?  Good question...
                       if (recording) {
                           if (RECORD_LENGTH <= 0) try {
                               recorder.record(audioData);
                               //Log.v(LOG_TAG,"recording " + 1024*i + " to " + 1024*i+1024);
                           } catch (FFmpegFrameRecorder.Exception e) {
                               Log.v(LOG_TAG,e.getMessage());
                               e.printStackTrace();
                           }
                       }
                   }
               }
               Log.v(LOG_TAG,"AudioThread Finished, release audioRecord");

               /* encoding finish, release recorder */
               if (audioRecord != null) {
                   audioRecord.stop();
                   audioRecord.release();
                   audioRecord = null;
                   Log.v(LOG_TAG,"audioRecord released");
               }
           }
       }

       //---------------------------------------------
       // camera thread, gets and encodes video data
       //---------------------------------------------
       class CameraView extends SurfaceView implements SurfaceHolder.Callback, PreviewCallback {

           private SurfaceHolder mHolder;
           private Camera mCamera;

           public CameraView(Context context, Camera camera) {
               super(context);
               Log.w("camera","camera view");
               mCamera = camera;
               mHolder = getHolder();
               mHolder.addCallback(CameraView.this);
               mHolder.setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS);
               mCamera.setPreviewCallback(CameraView.this);
           }

           @Override
           public void surfaceCreated(SurfaceHolder holder) {
               try {
                   stopPreview();
                   mCamera.setPreviewDisplay(holder);
               } catch (IOException exception) {
                   mCamera.release();
                   mCamera = null;
               }
           }

           public void surfaceChanged(SurfaceHolder holder, int format, int width, int height) {
               Log.v(LOG_TAG,"Setting imageWidth: " + imageWidth + " imageHeight: " + imageHeight + " frameRate: " + frameRate);
               Camera.Parameters camParams = mCamera.getParameters();
               camParams.setPreviewSize(imageWidth, imageHeight);

               Log.v(LOG_TAG,"Preview Framerate: " + camParams.getPreviewFrameRate());

               camParams.setPreviewFrameRate(frameRate);
               mCamera.setParameters(camParams);
               startPreview();
           }

           @Override
           public void surfaceDestroyed(SurfaceHolder holder) {
               try {
                   mHolder.addCallback(null);
                   mCamera.setPreviewCallback(null);
               } catch (RuntimeException e) {
                   // The camera has probably just been released, ignore.
               }
           }

           public void startPreview() {
               if (!isPreviewOn && mCamera != null) {
                   isPreviewOn = true;
                   mCamera.startPreview();
               }
           }

           public void stopPreview() {
               if (isPreviewOn && mCamera != null) {
                   isPreviewOn = false;
                   mCamera.stopPreview();
               }
           }

           @Override
           public void onPreviewFrame(byte[] data, Camera camera) {
               if (audioRecord == null || audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING) {
                   startTime = System.currentTimeMillis();
                   return;
               }
               if (RECORD_LENGTH > 0) {
                   int i = imagesIndex++ % images.length;
                   yuvIplimage = images[i];
                   timestamps[i] = 1000 * (System.currentTimeMillis() - startTime);
               }
               /* get video data */
               if (yuvIplimage != null && recording) {
                   yuvIplimage.getByteBuffer().put(data);

                   if (RECORD_LENGTH <= 0) try {
                       Log.v(LOG_TAG,"Writing Frame");
                       long t = 1000 * (System.currentTimeMillis() - startTime);
                       if (t > recorder.getTimestamp()) {
                           recorder.setTimestamp(t);
                       }
                       recorder.record(yuvIplimage);
                   } catch (FFmpegFrameRecorder.Exception e) {
                       Log.v(LOG_TAG,e.getMessage());
                       e.printStackTrace();
                   }
               }
           }
       }

       @Override
       public void onClick(View v) {
           if (!recording) {
               startRecording();
               Log.w(LOG_TAG, "Start Button Pushed");
               btnRecorderControl.setText("Stop");
           } else {
               // This will trigger the audio recording loop to stop and then set isRecorderStart = false;
               stopRecording();
               Log.w(LOG_TAG, "Stop Button Pushed");
               btnRecorderControl.setText("Start");
           }
       }
    }

    I am getting this error

    The type org.bytedeco.javacpp.avutil$AVFrame cannot be resolved. It is indirectly referenced from required .class files

    at following line in the above code

    if (RECORD_LENGTH <= 0) try {
       recorder.record(audioData);
       //Log.v(LOG_TAG,"recording " + 1024*i + " to " + 1024*i+1024);
    }

    at recorder.record(audioData). I don’t know what I am doing wrong here. New to JavaCV. Any help will be appreciated.

  • libavcodec : how to encode with h264 codec ,with mp4 container using controllable frame rate and bitrate(through c code)

    26 mai 2016, par musimbate

    I am trying to record the screen of a pc and encode the recorded frames using h264 encoder
    and wrap them into a mp4 container.I want to do this because this super user link http://superuser.com/questions/300897/what-is-a-codec-e-g-divx-and-how-does-it-differ-from-a-file-format-e-g-mp/300997#300997 suggests it allows good trade-off between size and quality of the output file.

    The application I am working on should allow users to record a few hours of video and have the minimum output file size with decent quality.

    The code I have cooked up so far allows me to record and save .mpg(container) files with the mpeg1video encoder

    Running :

    ffmpeg -i test.mpg

    on the output file gives the following output :

    [mpegvideo @ 028c7400] Estimating duration from bitrate, this may be inaccurate
    Input #0, mpegvideo, from 'test.mpg':
     Duration: 00:00:00.29, bitrate: 104857 kb/s
       Stream #0:0: Video: mpeg1video, yuv420p(tv), 1366x768 [SAR 1:1 DAR 683:384], 104857 kb/s, 25 fps, 25 tbr, 1200k tbn, 25 tbc

    I have these settings for my output :

    const char * filename="test.mpg";
       int codec_id= AV_CODEC_ID_MPEG1VIDEO;
       AVCodec *codec11;
       AVCodecContext *outContext= NULL;
       int got_output;
       FILE *f;
       AVPacket pkt;
       uint8_t endcode[] = { 0, 0, 1, 0xb7 };

       /* put sample parameters */
       outContext->bit_rate = 400000;
       /* resolution must be a multiple of two */
       outContext->width=pCodecCtx->width;
       outContext->height=pCodecCtx->height;
       /* frames per second */
       outContext->time_base.num=1;
       outContext->time_base.den=25;
       /* emit one intra frame every ten frames
        * check frame pict_type before passing frame
        * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
        * then gop_size is ignored and the output of encoder
        * will always be I frame irrespective to gop_size
        */
       outContext->gop_size = 10;
       outContext->max_b_frames = 1;
       outContext->pix_fmt = AV_PIX_FMT_YUV420P;

    When I change int codec_id= AV_CODEC_ID_MPEG1VIDEO to int codec_id= AV_CODEC_ID_H264 i get a file that does not play with vlc.

    I have read that writing the

    uint8_t endcode[] = { 0, 0, 1, 0xb7 };

    array at the end of your file when finished encoding makes your file a legitimate mpeg file.It is written like this :

    fwrite(endcode, 1, sizeof(endcode), f);
       fclose(f);

    in my code. Should I do the same thing when I change my encoder to AV_CODEC_ID_H264 ?

    I am capturing using gdi input like this :

    AVDictionary* options = NULL;
       //Set some options
       //grabbing frame rate
       av_dict_set(&options,"framerate","30",0);
       AVInputFormat *ifmt=av_find_input_format("gdigrab");
       if(avformat_open_input(&pFormatCtx,"desktop",ifmt,&options)!=0){
           printf("Couldn't open input stream.\n");
           return -1;
           }

    I want to be able to modify my grabbing rate to optimize for the outptut file size
    but When I change it to 20 for example I get a video that plays so fast.How do
    I get a video that plays with normal speed with frames captured at 20 fps or any
    lower frame rate value ?

    While recording I get the following output on the standard error output :

    [gdigrab @ 00cdb8e0] Capturing whole desktop as 1366x768x32 at (0,0)
    Input #0, gdigrab, from '(null)':
     Duration: N/A, start: 1420718663.655713, bitrate: 1006131 kb/s
       Stream #0:0: Video: bmp, bgra, 1366x768, 1006131 kb/s, 29.97 tbr, 1000k tbn, 29.97 tbc
    [swscaler @ 00d24120] Warning: data is not aligned! This can lead to a speedloss
    [mpeg1video @ 00cdd160] AVFrame.format is not set
    [mpeg1video @ 00cdd160] AVFrame.width or height is not set
    [mpeg1video @ 00cdd160] AVFrame.format is not set
    [mpeg1video @ 00cdd160] AVFrame.width or height is not set
    [mpeg1video @ 00cdd160] AVFrame.format is not set

    How do I get rid of this error in my code ?

    In summary :
    1) How do I encode h264 video wrapped into mp4 container ?

    2) How do I capture at lower frame rates and still play
    the encoded video at normal speed ?

    3) How do I set the format(and which format—depends on the codec ?)
    and width and height info on the frames I write ?

    The code I am using in its entirety is shown below

    extern "C"
    {
    #include "libavcodec/avcodec.h"
    #include "libavformat/avformat.h"
    #include "libswscale/swscale.h"
    #include "libavdevice/avdevice.h"


    #include <libavutil></libavutil>opt.h>
    #include <libavutil></libavutil>channel_layout.h>
    #include <libavutil></libavutil>common.h>
    #include <libavutil></libavutil>imgutils.h>
    #include <libavutil></libavutil>mathematics.h>
    #include <libavutil></libavutil>samplefmt.h>
    //SDL
    #include "SDL.h"
    #include "SDL_thread.h"
    }

    //Output YUV420P
    #define OUTPUT_YUV420P 0
    //'1' Use Dshow
    //'0' Use GDIgrab
    #define USE_DSHOW 0

    int main(int argc, char* argv[])
    {

       //1.WE HAVE THE FORMAT CONTEXT
       //THIS IS FROM THE DESKTOP GRAB STREAM.
       AVFormatContext *pFormatCtx;
       int             i, videoindex;
       AVCodecContext  *pCodecCtx;
       AVCodec         *pCodec;

       av_register_all();
       avformat_network_init();

       //ASSIGN STH TO THE FORMAT CONTEXT.
       pFormatCtx = avformat_alloc_context();

       //Register Device
       avdevice_register_all();
       //Windows
    #ifdef _WIN32
    #if USE_DSHOW
       //Use dshow
       //
       //Need to Install screen-capture-recorder
       //screen-capture-recorder
       //Website: http://sourceforge.net/projects/screencapturer/
       //
       AVInputFormat *ifmt=av_find_input_format("dshow");
       //if(avformat_open_input(&amp;pFormatCtx,"video=screen-capture-recorder",ifmt,NULL)!=0){
       if(avformat_open_input(&amp;pFormatCtx,"video=UScreenCapture",ifmt,NULL)!=0){
           printf("Couldn't open input stream.\n");
           return -1;
       }
    #else
       //Use gdigrab
       AVDictionary* options = NULL;
       //Set some options
       //grabbing frame rate
       av_dict_set(&amp;options,"framerate","30",0);
       //The distance from the left edge of the screen or desktop
       //av_dict_set(&amp;options,"offset_x","20",0);
       //The distance from the top edge of the screen or desktop
       //av_dict_set(&amp;options,"offset_y","40",0);
       //Video frame size. The default is to capture the full screen
       //av_dict_set(&amp;options,"video_size","640x480",0);
       AVInputFormat *ifmt=av_find_input_format("gdigrab");
       if(avformat_open_input(&amp;pFormatCtx,"desktop",ifmt,&amp;options)!=0){
           printf("Couldn't open input stream.\n");
           return -1;
       }

    #endif
    #endif//FOR THE WIN32 THING.

       if(avformat_find_stream_info(pFormatCtx,NULL)&lt;0)
       {
           printf("Couldn't find stream information.\n");
           return -1;
       }
       videoindex=-1;
       for(i=0; inb_streams; i++)
           if(pFormatCtx->streams[i]->codec->codec_type
                   ==AVMEDIA_TYPE_VIDEO)
           {
               videoindex=i;
               break;
           }
       if(videoindex==-1)
       {
           printf("Didn't find a video stream.\n");
           return -1;
       }
       pCodecCtx=pFormatCtx->streams[videoindex]->codec;
       pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
       if(pCodec==NULL)
       {
           printf("Codec not found.\n");
           return -1;
       }
       if(avcodec_open2(pCodecCtx, pCodec,NULL)&lt;0)
       {
           printf("Could not open codec.\n");
           return -1;
       }

       //THIS IS WHERE YOU CONTROL THE FORMAT(THROUGH FRAMES).
       AVFrame *pFrame;

       pFrame=av_frame_alloc();

       int ret, got_picture;

       AVPacket *packet=(AVPacket *)av_malloc(sizeof(AVPacket));

       //TRY TO INIT THE PACKET HERE
        av_init_packet(packet);


       //Output Information-----------------------------
       printf("File Information---------------------\n");
       av_dump_format(pFormatCtx,0,NULL,0);
       printf("-------------------------------------------------\n");


    //&lt;&lt;--FOR WRITING MPG FILES
       //&lt;&lt;--START:PREPARE TO WRITE YOUR MPG FILE.

       const char * filename="test.mpg";
       int codec_id= AV_CODEC_ID_MPEG1VIDEO;



       AVCodec *codec11;
       AVCodecContext *outContext= NULL;
       int got_output;
       FILE *f;
       AVPacket pkt;
       uint8_t endcode[] = { 0, 0, 1, 0xb7 };

       printf("Encode video file %s\n", filename);

       /* find the mpeg1 video encoder */
       codec11 = avcodec_find_encoder((AVCodecID)codec_id);
       if (!codec11) {
           fprintf(stderr, "Codec not found\n");
           exit(1);
       }

       outContext = avcodec_alloc_context3(codec11);
       if (!outContext) {
           fprintf(stderr, "Could not allocate video codec context\n");
           exit(1);
       }

       /* put sample parameters */
       outContext->bit_rate = 400000;
       /* resolution must be a multiple of two */

       outContext->width=pCodecCtx->width;
       outContext->height=pCodecCtx->height;


       /* frames per second */
       outContext->time_base.num=1;
       outContext->time_base.den=25;

       /* emit one intra frame every ten frames
        * check frame pict_type before passing frame
        * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
        * then gop_size is ignored and the output of encoder
        * will always be I frame irrespective to gop_size
        */
       outContext->gop_size = 10;
       outContext->max_b_frames = 1;
       outContext->pix_fmt = AV_PIX_FMT_YUV420P;

       if (codec_id == AV_CODEC_ID_H264)
           av_opt_set(outContext->priv_data, "preset", "slow", 0);

       /* open it */
       if (avcodec_open2(outContext, codec11, NULL) &lt; 0) {
           fprintf(stderr, "Could not open codec\n");
           exit(1);
       }

       f = fopen(filename, "wb");
       if (!f) {
           fprintf(stderr, "Could not open %s\n", filename);
           exit(1);
       }


       AVFrame *outframe = av_frame_alloc();
       int nbytes = avpicture_get_size(outContext->pix_fmt,
                                      outContext->width,
                                      outContext->height);

       uint8_t* outbuffer = (uint8_t*)av_malloc(nbytes);

      //ASSOCIATE THE FRAME TO THE ALLOCATED BUFFER.
       avpicture_fill((AVPicture*)outframe, outbuffer,
                      AV_PIX_FMT_YUV420P,
                      outContext->width, outContext->height);

       SwsContext* swsCtx_ ;
       swsCtx_= sws_getContext(pCodecCtx->width,
                               pCodecCtx->height,
                               pCodecCtx->pix_fmt,
                               outContext->width, outContext->height,
                               outContext->pix_fmt,
                               SWS_BICUBIC, NULL, NULL, NULL);


       //HERE WE START PULLING PACKETS FROM THE SPECIFIED FORMAT CONTEXT.
       while(av_read_frame(pFormatCtx, packet)>=0)
       {
           if(packet->stream_index==videoindex)
           {
               ret= avcodec_decode_video2(pCodecCtx,
                                            pFrame,
                                            &amp;got_picture,packet );
               if(ret &lt; 0)
               {
                   printf("Decode Error.\n");
                   return -1;
               }
               if(got_picture)
               {

               sws_scale(swsCtx_, pFrame->data, pFrame->linesize,
                     0, pCodecCtx->height, outframe->data,
                     outframe->linesize);


               av_init_packet(&amp;pkt);
               pkt.data = NULL;    // packet data will be allocated by the encoder
               pkt.size = 0;


               ret = avcodec_encode_video2(outContext, &amp;pkt, outframe, &amp;got_output);
               if (ret &lt; 0) {
                  fprintf(stderr, "Error encoding frame\n");
                  exit(1);
                 }

               if (got_output) {
                   printf("Write frame %3d (size=%5d)\n", i, pkt.size);
                   fwrite(pkt.data, 1, pkt.size, f);
                   av_free_packet(&amp;pkt);
                  }

               }
           }

           av_free_packet(packet);
       }//THE LOOP TO PULL PACKETS FROM THE FORMAT CONTEXT ENDS HERE.



       //
       /* get the delayed frames */
       for (got_output = 1; got_output; i++) {
           //fflush(stdout);

           ret = avcodec_encode_video2(outContext, &amp;pkt, NULL, &amp;got_output);
           if (ret &lt; 0) {
               fprintf(stderr, "Error encoding frame\n");
               exit(1);
           }

           if (got_output) {
               printf("Write frame %3d (size=%5d)\n", i, pkt.size);
               fwrite(pkt.data, 1, pkt.size, f);
               av_free_packet(&amp;pkt);
           }
       }



       /* add sequence end code to have a real mpeg file */
       fwrite(endcode, 1, sizeof(endcode), f);
       fclose(f);

       avcodec_close(outContext);
       av_free(outContext);
       //av_freep(&amp;frame->data[0]);
       //av_frame_free(&amp;frame);

       //THIS WAS ADDED LATER
       av_free(outbuffer);

       avcodec_close(pCodecCtx);
       avformat_close_input(&amp;pFormatCtx);

       return 0;
    }

    Thank you for your time.