Recherche avancée

Médias (1)

Mot : - Tags -/iphone

Autres articles (104)

  • Gestion des droits de création et d’édition des objets

    8 février 2011, par

    Par défaut, beaucoup de fonctionnalités sont limitées aux administrateurs mais restent configurables indépendamment pour modifier leur statut minimal d’utilisation notamment : la rédaction de contenus sur le site modifiables dans la gestion des templates de formulaires ; l’ajout de notes aux articles ; l’ajout de légendes et d’annotations sur les images ;

  • Supporting all media types

    13 avril 2011, par

    Unlike most software and media-sharing platforms, MediaSPIP aims to manage as many different media types as possible. The following are just a few examples from an ever-expanding list of supported formats : images : png, gif, jpg, bmp and more audio : MP3, Ogg, Wav and more video : AVI, MP4, OGV, mpg, mov, wmv and more text, code and other data : OpenOffice, Microsoft Office (Word, PowerPoint, Excel), web (html, CSS), LaTeX, Google Earth and (...)

  • Keeping control of your media in your hands

    13 avril 2011, par

    The vocabulary used on this site and around MediaSPIP in general, aims to avoid reference to Web 2.0 and the companies that profit from media-sharing.
    While using MediaSPIP, you are invited to avoid using words like "Brand", "Cloud" and "Market".
    MediaSPIP is designed to facilitate the sharing of creative media online, while allowing authors to retain complete control of their work.
    MediaSPIP aims to be accessible to as many people as possible and development is based on expanding the (...)

Sur d’autres sites (11162)

  • ffmpeg c++/cli wrapper for using in c# . AccessViolationException after call dll function by it's pointer

    25 juillet 2015, par skynet_v

    My target is to write a c++/cli wrap arount ffmpeg library, using by importing ffmpeg functions from dll-modules.
    Later I will use this interface in c#.
    This is my challenge, don’t ask me why))

    So i’ve implemented Wrap class, which is listed below :

    namespace FFMpegWrapLib
    {
       public class Wrap
       {
       private:

       public:
           //wstring libavcodecDllName = "avcodec-56.dll";
           //wstring libavformatDllName = "avformat-56.dll";
           //wstring libswscaleDllName = "swscale-3.dll";
           //wstring libavutilDllName = "avutil-54.dll";

           HMODULE libavcodecDLL;
           HMODULE libavformatDLL;
           HMODULE libswsscaleDLL;
           HMODULE libavutilDLL;

           AVFormatContext     **pFormatCtx = nullptr;
           AVCodecContext      *pCodecCtxOrig = nullptr;
           AVCodecContext      *pCodecCtx = nullptr;
           AVCodec             **pCodec = nullptr;
           AVFrame             **pFrame = nullptr;
           AVFrame             **pFrameRGB = nullptr;
           AVPacket            *packet = nullptr;
           int                 *frameFinished;
           int                 numBytes;
           uint8_t             *buffer = nullptr;
           struct SwsContext   *sws_ctx = nullptr;

           void                Init();
           void                AVRegisterAll();
           void                Release();
           bool                SaveFrame(const char *pFileName, AVFrame * frame, int w, int h);
           bool                GetStreamInfo();
           int                 FindVideoStream();
           bool                OpenInput(const char* file);
           AVCodec*            FindDecoder();
           AVCodecContext*     AllocContext3();
           bool                CopyContext();
           bool                OpenCodec2();
           AVFrame*            AllocFrame();
           int                 PictureGetSize();
           void*               Alloc(size_t size);
           int                 PictureFill(AVPicture *, const uint8_t *, enum AVPixelFormat, int, int);
           SwsContext*         GetSwsContext(int, int, enum AVPixelFormat, int, int, enum AVPixelFormat, int, SwsFilter *, SwsFilter *, const double *);
           int                 ReadFrame(AVFormatContext *s, AVPacket *pkt);
           int                 DecodeVideo2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt);
           int                 SwsScale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[]);
           void                PacketFree(AVPacket *pkt);
           void                BufferFree(void *ptr);
           void                FrameFree(AVFrame **frame);
           int                 CodecClose(AVCodecContext *);
           void                CloseInput(AVFormatContext **);
           bool                SeekFrame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags);

           Wrap();
           ~Wrap();

           bool                GetVideoFrame(char* str_in_file, char* str_out_img, uint64_t time);
       };

       public ref class managedWrap
       {
       public:

           managedWrap(){}
           ~managedWrap(){ delete unmanagedWrap; }

           bool GetVideoFrameToFile(char* str_in_file, char* str_out_img, uint64_t time)
           {
               return unmanagedWrap->GetVideoFrame(str_in_file, str_out_img, time);
           }

           static Wrap* unmanagedWrap = new Wrap();
       };
    }

    So the imports to libavcodec and etc. are succesful.
    The problem is in AccessViolationException during calling dll func, for example, in OpenInput (i.e. av_open_input in native ffmpeg library)

    The OpenInput func code is below :

    bool FFMpegWrapLib::Wrap::OpenInput(const char* file)
    {
       typedef int avformat_open_input(AVFormatContext **, const char *, AVInputFormat *, AVDictionary **);

       avformat_open_input* pavformat_open_input = (avformat_open_input *)GetProcAddress(libavformatDLL, "avformat_open_input");
       if (pavformat_open_input == nullptr)
       {
           throw exception("Unable to find avformat_open_input function address in libavformat module");
           return false;
       }

       //pin_ptr<avformatcontext> pinFormatContext = &amp;(new interior_ptr<avformatcontext>(pCodecCtx));
       pFormatCtx = new AVFormatContext*;
       //*pFormatCtx = new AVFormatContext;


       int ret = pavformat_open_input(pFormatCtx, file, NULL, NULL); // here it fails

       return ret == 0;
    }
    </avformatcontext></avformatcontext>

    So the problem, i think, is that class-fields of Wrap class are in secure memory. And ffmpeg works with native memory, initialising pFormatCtx variable by it’s address.
    Can I avoid this, or it is impossible ?

  • Mix video and audio to mp4 file with ffmpeg but audio does't keep step with video when playback

    28 juillet 2015, par dragonfly

    I managed to write a program to record video(h264/aac) on android with ffmpeg. The detail is as follows :

    1. Implement android.hardware.Camera.PreviewCallback to capture every frame from camera (yuv image) and send it to the ffmpeg in the jni layer.

      @Override
      public void onPreviewFrame(byte[] data, Camera camera) {
         // Log.d(TAG, "onPreviewFrame");
         if (mIsRecording) {
             // Log.d(TAG, "handlePreviewFrame");
             Parameters param = camera.getParameters();
             Size s = param.getPreviewSize();
             handlePreviewFrame(data, s.width, s.height, mBufSize);
         }
         camera.addCallbackBuffer(mPreviewBuffer);
      }


      private void handlePreviewFrame(byte[] data, int width, int height, int size) {

         if (mFormats == ImageFormat.NV21) {
                 //process the yuv data

         }

         synchronized (mMuxer) {
             //jni api
             mMuxer.putAudioVisualData(mYuvData, size, 0);
         }
      }
    2. Use android.media.AudioRecord to read the pcm data from the microphone, write pcm data to ffmpeg in the jni layer in a loop.

      while (this.isRecording) {
         int ret = audioRecord.read(tempBuffer, 0, 1024);

         if (ret == AudioRecord.ERROR_INVALID_OPERATION) {
             throw new IllegalStateException(
                     "read() returned AudioRecord.ERROR_INVALID_OPERATION");
         } else if (ret == AudioRecord.ERROR_BAD_VALUE) {
             throw new IllegalStateException("read() returned AudioRecord.ERROR_BAD_VALUE");
         } else if (ret == AudioRecord.ERROR_INVALID_OPERATION) {
             throw new IllegalStateException(
                     "read() returned AudioRecord.ERROR_INVALID_OPERATION");
         }

         // 处理数据
         handleAudioData(tempBuffer, ret);
      }

      private void handleAudioData(short[] data, int size)
      {
         // convert to byte[]
         //Log.d("VideoCaptureActivity", "handleAudioData");
         ByteBuffer buffer = ByteBuffer.allocate(data.length * 2);
         buffer.order(ByteOrder.LITTLE_ENDIAN);
         buffer.asShortBuffer().put(data);
         buffer.limit(size * 2);
         byte[] bytes = buffer.array();
         synchronized (muxing) {
         Log.d(TAG, "putAudio Data :" + size*2);
         muxing.putAudioVisualData(bytes, size * 2, 1);
         }
      }
    3. mix audio and video data in the jni layer. I refer to the example : https://ffmpeg.org/doxygen/trunk/muxing_8c-source.html

    The problem is that the example demonstrates audio and video encoding from some dummy source data generated on the fly. I need to encode audio from microphone and video from camera.

    I think the reason of my failure is that the pts in the expample is not applicable for my situation. my av function code is as follows :

    static int write_video_frame(AVFormatContext *oc, OutputStream *ost, char *data,
           int size) {
       int ret;
       AVCodecContext *c;
       int got_packet = 0;

       c = ost->st->codec;

       AVPacket pkt = { 0 };
       av_init_packet(&amp;pkt);

       if (!video_st.hwcodec) {
           if (ost->zoom) {
               zoom(oc, ost, data);
           } else {
               avpicture_fill((AVPicture*) ost->frame, (const uint8_t *) data,
                       c->pix_fmt, c->width, c->height);
           }
           av_frame_make_writable(ost->frame);
           //ost->frame->pts = ost->next_pts++;
           ost->frame->pts = frame_count;
           /* encode the image */
           //ALOGI("avcodec_encode_video2 start");
           ret = avcodec_encode_video2(c, &amp;pkt, ost->frame, &amp;got_packet);
           //ALOGI("avcodec_encode_video2 end");
           if (ret &lt; 0) {
               ALOGE("Error encoding video frame: %s", av_err2str(ret));
               return -1;
           }
       } else {
           if (size != 0) {
               pkt.data = (uint8_t *) data;
               pkt.size = size;
               pkt.pts = pkt.dts = ost->next_pts++;
               got_packet = 1;
           }
       }

       if (got_packet) {
           //ALOGI("video write_frame start");
           //pkt.pts = (int) timestamp;
           ret = write_frame(oc, &amp;c->time_base, ost->st, &amp;pkt);
           //ALOGI("video write_frame end");
           if (ret &lt; 0) {
               ALOGE("Error while writing video frame: %s", av_err2str(ret));
               return -1;
           }
       }
       frame_count++;
       return 0;
    }





    static int write_audio_frame(AVFormatContext *oc, OutputStream *ost, char *data) {
       AVCodecContext *c;
       AVPacket pkt = { 0 }; // data and size must be 0;
       AVFrame *frame;
       int ret;
       int got_packet;
       int dst_nb_samples;

       av_init_packet(&amp;pkt);
       c = ost->st->codec;

       if (audio_st.speex_echo_cancellation == 1
               &amp;&amp; g_audio_echo_play_queue->start_flag == 1) {
           //ALOGI("encode_audio_handler in echo_cancel");
           QUEUE_ITEM* item = Get_Queue_Item(g_audio_echo_play_queue);
           if (item) {
               speex_dsp_echo_play_back((spx_int16_t *) item->data);
               //ALOGI("encode_audio_handler echo_play begin speex_echo_play_back");
               short *echo_processed = (short *) av_malloc(160 * sizeof(short));
               speex_dsp_echo_capture((spx_int16_t *) data, echo_processed);
               memcpy(data, (uint8_t *) echo_processed, 160);
               av_free(echo_processed);
               Free_Queue_Item(item, 1);
           }
       }

       frame = ost->tmp_frame;
       //update pts
       //frame->pts = ost->next_pts;
       //ost->next_pts += frame->nb_samples;
       if (frame) {
           /* convert samples from native format to destination codec format, using the resampler */
           /* compute destination number of samples */
           dst_nb_samples = av_rescale_rnd(
                   swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
                   c->sample_rate, c->sample_rate, AV_ROUND_UP);

           memcpy(frame->data[0], data, frame->nb_samples * 2);
           //frame->data[0] = data;

           /* when we pass a frame to the encoder, it may keep a reference to it
            * internally;
            * make sure we do not overwrite it here
            */
           ret = av_frame_make_writable(ost->frame);
           if (ret &lt; 0) {
               ALOGE("write_audio_frame av_frame_make_writable ERROR %s",
                       av_err2str(ret));
               return -1;
           }

           /* convert to destination format */
           ret = swr_convert(ost->swr_ctx, ost->frame->data, dst_nb_samples,
                   (const uint8_t **) frame->data, frame->nb_samples);

           if (ret &lt; 0) {
               ALOGI("Error while converting %s", av_err2str(ret));
               return -1;
           }
           frame = ost->frame;
           frame->pts = av_rescale_q(ost->samples_count,
                   (AVRational ) { 1, c->sample_rate }, c->time_base);
           ost->samples_count += dst_nb_samples;
       }
       ret = avcodec_encode_audio2(c, &amp;pkt, frame, &amp;got_packet);

       if (ret &lt; 0) {
           ALOGE("Error encoding audio frame: %s", av_err2str(ret));
           return -1;
       }

       if (got_packet) {
           //pkt.pts = (int) timestamp;

           ret = write_frame(oc, &amp;c->time_base, ost->st, &amp;pkt);
           if (ret &lt; 0) {
               ALOGE("Error while writing audio frame: %s", av_err2str(ret));
               return -1;
           }
       }
       return (frame || got_packet) ? 0 : 1;
    }

    How do I deal with the pts of video and audio stream for my situation ? Who can give me some advice ?

    Can I ignore the pts provided by ffmpeg and calculate the pts in the java layer by myself and transmit it to ffmpeg ?

  • Flv stream to sockets with ffmpeg, node.js and socket.io

    7 août 2015, par Dan-Levi Tømta

    so i am having some problems with understanding how to pull this off. I know that streaming video is a topic that is hard, and that there is a lot to take account off, but anyway here we go to start of learning how to stream video.

    I am using SocketIoClientDotNet as the node.js client for the c# application.

    I am sending byte arrays of the video to node which are creating a temporary file and appending the buffer to it. I have tried to set the source of the video element to that file but it doesnt read it as video and are all black. I have tried to download a copy of the file since it did not work and it turns out vlc cant play it either. Ok to the code :

    C#

    bool ffWorkerIsWorking = false;
    private void btnFFMpeg_Click(object sender, RoutedEventArgs e)
    {
       BackgroundWorker ffWorker = new BackgroundWorker();
       ffWorker.WorkerSupportsCancellation = true;
       ffWorker.DoWork += ((ffWorkerObj,ffWorkerEventArgs) =>
       {
           ffWorkerIsWorking = true;
           using (var FFProcess = new Process())
           {
               var processStartInfo = new ProcessStartInfo
               {
                   FileName = "ffmpeg.exe",
                   RedirectStandardInput = true,
                   RedirectStandardOutput = true,
                   UseShellExecute = false,
                   CreateNoWindow = true,
                   Arguments = " -loglevel panic -hide_banner -y -f gdigrab -draw_mouse 1 -i desktop -f flv -"
               };
               FFProcess.StartInfo = processStartInfo;
               FFProcess.Start();
               byte[] buffer = new byte[32768];
               using (MemoryStream ms = new MemoryStream())
               {
                   while (!FFProcess.HasExited)
                   {
                       int read = FFProcess.StandardOutput.BaseStream.Read(buffer, 0, buffer.Length);
                       if (read &lt;= 0)
                           break;
                       ms.Write(buffer, 0, read);
                       clientSocket.Emit("video", ms.ToArray());
                       ms.Flush();
                       if (!ffWorkerIsWorking)
                       {
                           ffWorker.CancelAsync();
                           break;
                       }
                   }
               }
           }
       });
       ffWorker.RunWorkerAsync();
    }

    JS (server)

    var buffer = new Buffer(32768);
    var isBuffering = false;
    var wstream;
    socket.on('video', function(data) {
       if(!isBuffering){
           wstream = fs.createWriteStream('fooTest.flv');
           isBuffering = true;
       }
       buffer = Buffer.concat([buffer, data]);
       fs.appendFile('public/fooTest.flv', buffer, function (err) {
         if (err) throw err;
         console.log('The "data to append" was appended to file!');
       });
    });

    What am i doing wrong here ?