Recherche avancée

Médias (1)

Mot : - Tags -/biomaping

Autres articles (99)

  • MediaSPIP 0.1 Beta version

    25 avril 2011, par

    MediaSPIP 0.1 beta is the first version of MediaSPIP proclaimed as "usable".
    The zip file provided here only contains the sources of MediaSPIP in its standalone version.
    To get a working installation, you must manually install all-software dependencies on the server.
    If you want to use this archive for an installation in "farm mode", you will also need to proceed to other manual (...)

  • Multilang : améliorer l’interface pour les blocs multilingues

    18 février 2011, par

    Multilang est un plugin supplémentaire qui n’est pas activé par défaut lors de l’initialisation de MediaSPIP.
    Après son activation, une préconfiguration est mise en place automatiquement par MediaSPIP init permettant à la nouvelle fonctionnalité d’être automatiquement opérationnelle. Il n’est donc pas obligatoire de passer par une étape de configuration pour cela.

  • Organiser par catégorie

    17 mai 2013, par

    Dans MédiaSPIP, une rubrique a 2 noms : catégorie et rubrique.
    Les différents documents stockés dans MédiaSPIP peuvent être rangés dans différentes catégories. On peut créer une catégorie en cliquant sur "publier une catégorie" dans le menu publier en haut à droite ( après authentification ). Une catégorie peut être rangée dans une autre catégorie aussi ce qui fait qu’on peut construire une arborescence de catégories.
    Lors de la publication prochaine d’un document, la nouvelle catégorie créée sera proposée (...)

Sur d’autres sites (12138)

  • building a voice recorder using html5 and javascript

    16 juillet 2014, par lama

    I want to build a voice recorder using HTML5 same as one found in gitHub JSSoundecorder, but what I want is for the user to be able to choose the file format before recording the voice.I can do this using ffmpeg. In other words the user must be able to select the audio format by check box (mp3,wma,pcm) and in the background code, the .wav file usually created by the program instead of displaying it, it should be converted by the format selected then displayed in the new format.this is the ffmpeg code we can use ,but I don’t know how to get the .wav audio file to convert it and show it.please if someone have ideas,or if can find demos I have been looking for weeks.this is the ffmpeg code :

      var fileName;
      var fileBuffer;

      function timeToSeconds(time) {
         var parts = time.split(":");
         return parseFloat(parts[0]) * 60 * 60 + parseFloat(parts[1]) * 60 + parseFloat(parts[2]) + parseFloat("0." + parts[3]);
     }

     // create ffmpeg worker
     function getFFMPEGWorker() {
         // regexps for extracting time from ffmpeg logs
         var durationRegexp = /Duration: (.*?), /
         var timeRegexp = /time=(.*?) /;
         var duration;

         var ffmpegWorker = new Worker('worker.js');
         var durationLine;
         ffmpegWorker.addEventListener('message', function(event) {
             var message = event.data;
             console.log(message.type);
             if (message.type === "ready" && window.File && window.FileList && window.FileReader) {
                 // script loaded, hide loader
                 $('#loading').hide();
             } else if (message.type == "stdout") {
                 console.log(message.data);
             } else if (message.type == "stderr") {
                 console.log(message.data);
                 // try to extract duration
                 if (durationRegexp.exec(message.data)) {
                     duration = timeToSeconds(durationRegexp.exec(message.data)[1]);
                 }
                 // try to extract time
                 if (timeRegexp.exec(message.data)) {
                     var time = timeToSeconds(timeRegexp.exec(message.data)[1]);
                     if (duration) {
                         $("#progress").text("Progress: " + Math.floor(time / duration * 100) + "%");
                         $("#progress").show();
                     }
                 }
             } else if (message.type == "done") {
                 var code = message.data.code;

                console.log(message.data);
                 var outFileNames = Object.keys(message.data.outputFiles);

                 console.log(outFileNames);
                 if (code == 0 && outFileNames.length) {
                     var outFileName = outFileNames[0];

                     var outFileBuffer = message.data.outputFiles[outFileName];

                     var src = window.URL.createObjectURL(new Blob([outFileBuffer]));

                     $("#downloadLink").attr('href', src);
                     $("#download").show();
                 } else {
                     $("#error").show();
                 }
                 $("#converting").hide();
                 $("#progress").hide();
             }
         }, false);
         return ffmpegWorker;
      }

      // create ffmpeg worker
      var ffmpegWorker = getFFMPEGWorker();
      var ffmpegRunning = false;

      $('#convert').click(function() {
         // terminate existing worker
         if (ffmpegRunning) {
             ffmpegWorker.terminate();
             ffmpegWorker = getFFMPEGWorker();
         }
         ffmpegRunning = true;

         // display converting animation
         $("#converting").show();
         $("#error").hide();

         // hide download div
         $("#download").hide();

         // change download file name
         var fileNameExt = fileName.substr(fileName.lastIndexOf('.') + 1);

         var outFileName = fileName.substr(0, fileName.lastIndexOf('.')) + "." + getOutFormat();

           $("#downloadLink").attr("download", outFileName);
         $("#downloadLink").text(outFileName);

         var arguments = [];
         arguments.push("-i");
         arguments.push(fileName);

         arguments.push("-b:a");
         arguments.push(getBitrate());

         switch (getOutFormat()) {
             case "mp3":
                 arguments.push("-acodec");
                 arguments.push("libmp3lame");
                 arguments.push("out.mp3");
                 break;

             case "wma":
                 arguments.push("-acodec");
                 arguments.push("wmav1");
                 arguments.push("out.asf");
                 break;

             case "pcm":
                 arguments.push("-f");
                 arguments.push("s16le");
                 arguments.push("-acodec");
                 arguments.push("pcm_s16le");
                 arguments.push("out.pcm");
         }

         ffmpegWorker.postMessage({

             type: "command",
             arguments: arguments,
             files: [
                 {
                     "name": fileName,
                     "buffer": fileBuffer
                 }
             ]

         });
     });

     function getOutFormat() {
         return $('input[name=format]:checked').val();
     }

     function getBitrate() {
         return $('input[name=bitrate]:checked').val();
     }

     // disable conversion at start
     $('#convert').attr('disabled', 'true');

     function readInputFile(file) {
         // disable conversion for the time of file loading
         $('#convert').attr('disabled', 'true');

         // load file content
         var reader = new FileReader();
         reader.onload = function(e) {
             $('#convert').removeAttr('disabled');
             fileName = file.name;
           console.log(fileName);
             fileBuffer = e.target.result;
         }
         reader.readAsArrayBuffer(file);

     }

     // reset file selector at start
     function resetInputFile() {
         $("#inFile").wrap('<form>').closest('form').get(0).reset();
         $("#inFile").unwrap();
     }
     resetInputFile();

     function handleFileSelect(event) {
         var files = event.target.files; // FileList object
     console.log(files);
         // files is a FileList of File objects. display first file name
         file = files[0];
         console.log(file);
         if (file) {
             $("#drop").text("Drop file here");
             readInputFile(file);


         }
     }


     // setup input file listeners

     document.getElementById('inFile').addEventListener('change', handleFileSelect, false);
    </form>
  • how to remove PTS gap within a file when transcoding with ffmpeg ?

    18 juillet 2017, par jsBaek

    i have a video which is from rtmp streaming.

    Since the broadcasting is on and off frequently,

    the archived file has PTS like below

    (assume that this is sec)

    0—10—20—30—40 120—130—140

    there’s discontinuity between 40 and 120 sec.

    duration of this file must be 60sec since there’s 80sec gap between 40 120.

    but when i transcoded this file, final duration became 140sec with 80sec of pausing parts.

    how can i transcode this file without "not existing" 80 sec so that output file became 60sec without redundant pausing 80 sec.

    i tried "+getpts" or "+igndts" options but they don’t work at all.

  • FFmpeg mux video use libavformat avcodec but output couldn't be played

    10 août 2017, par tqn

    I’m trying write a app that take an input video and crop it to square video and ignore audio stream. Because bad performance if using command, I’m trying to use libavcodec and libavformat to do it. But the output isn’t playable by any video player and duration is 0 although I wrote all frame. Here are my code.

    void convert_video(char* input) {
       AVFormatContext *pFormatCtx = NULL;
       int             i, videoStreamIndex;
       AVCodecContext  *pCodecCtx = NULL;
       AVCodec         *pCodec = NULL;
       AVFrame         *pFrame = NULL;
       AVFrame         *pFrameSquare = NULL;
       AVPacket        packet, outPacket;
       int             frameFinished;
       int             numBytes;
       uint8_t         *buffer = NULL;
       AVCodec         *pEncodec = NULL;
       AVFormatContext *poFormatCxt = NULL;
       MuxOutputStream    videoStream = {0}, audioStream = {0};
       int tar_w, tar_h;

       const enum AVPixelFormat pic_format = AV_PIX_FMT_YUV420P;
       const enum AVCodecID codec_id = AV_CODEC_ID_H264;
       AVDictionary    *optionsDict = NULL;
       char output[50];
       sprintf(output, "%soutput.mp4", ANDROID_SDCARD);

       // Register all formats and codecs
       av_register_all();

       // Open video file
       if(avformat_open_input(&amp;pFormatCtx, input, NULL, NULL)!=0)
           return; // Couldn't open file
       avformat_alloc_output_context2(&amp;poFormatCxt, NULL, NULL, output);

       // Retrieve stream information
       if(avformat_find_stream_info(pFormatCtx, NULL)&lt;0)
           return; // Couldn't find stream information

       // Find the first video stream
       videoStreamIndex=-1;
       for(i=0; inb_streams; i++)
           if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
               videoStreamIndex=i;
               break;
           }
       if(videoStreamIndex==-1)
           return; // Didn't find a video stream

       // Get a pointer to the codec context for the video stream
       pCodecCtx = pFormatCtx->streams[videoStreamIndex]->codec;
       tar_w = pCodecCtx->width > pCodecCtx->height ? pCodecCtx->height : pCodecCtx->width;
       tar_h = tar_w;

       // Find the decoder for the video stream
       pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
       pEncodec = avcodec_find_encoder(codec_id);

       add_stream_mux(&amp;videoStream, poFormatCxt, &amp;pEncodec, codec_id, tar_w, tar_h);
       videoStream.st[0].time_base = pFormatCtx->streams[videoStreamIndex]->time_base;
       videoStream.st[0].codec->time_base = videoStream.st[0].time_base;
       videoStream.st[0].codec->time_base.den *= videoStream.st[0].codec->ticks_per_frame;
    //    add_stream(&amp;audioStream, poFormatCxt, &amp;)
       open_video(poFormatCxt, pEncodec, &amp;videoStream, optionsDict);
       int ret = avio_open(&amp;poFormatCxt->pb, output, AVIO_FLAG_WRITE);

       // Open codec
       if(avcodec_open2(pCodecCtx, pCodec, &amp;optionsDict) &lt; 0)
           return; // Could not open codec

       ret = avformat_write_header(poFormatCxt, &amp;optionsDict);
       if (ret != 0) {
           ANDROID_LOG("Died");
       }

       // Allocate video frame
       pFrame=av_frame_alloc();
       pFrame->format = videoStream.st->codec->pix_fmt;
       pFrame->width = pCodecCtx->width;
       pFrame->height = pCodecCtx->height;
       av_frame_get_buffer(pFrame, 32);

       // Allocate an AVFrame structure
       pFrameSquare=av_frame_alloc();
       if(pFrameSquare==NULL)
           return;

       // Determine required buffer size and allocate buffer
       numBytes=avpicture_get_size(pic_format, tar_w,
                                   tar_h);
       buffer = (uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

       // Assign appropriate parts of buffer to image planes in pFrameSquare
       // Note that pFrameSquare is an AVFrame, but AVFrame is a superset
       // of AVPicture
       ret = avpicture_fill((AVPicture *)pFrameSquare, buffer, pic_format,
                      tar_w, tar_h);
       if (ret &lt; 0) {
           ANDROID_LOG("Can't fill picture");
           return;
       }

       // Read frames and save first five frames to disk
       i=0;
       ret = av_read_frame(pFormatCtx, &amp;packet);
       while(ret >= 0) {
           // Is this a packet from the video stream?
           if(packet.stream_index == videoStreamIndex) {
               // Decode video frame
    //            av_packet_rescale_ts(&amp;packet, videoStream.st->time_base, videoStream.st->codec->time_base);
               avcodec_decode_video2(pCodecCtx, pFrame, &amp;frameFinished,
                                     &amp;packet);
    //            while (!frameFinished) {
    //                avcodec_decode_video2(videoStream.st->codec, pFrame, &amp;frameFinished, NULL);
    //            }
               ANDROID_LOG("Trying to decode frame %d with result %d", i, frameFinished);
               ret = av_picture_crop((AVPicture*) pFrameSquare, (AVPicture*) pFrame, pic_format, 0, 0);
               if (ret &lt; 0) {
                   ANDROID_LOG("Can't crop image");
               }
    //            av_frame_get_best_effort_timestamp(pFrame);
    //            av_rescale_q()

               if(frameFinished) {

                   // Save the frame to disk
                   av_init_packet(&amp;outPacket);
    //                av_packet_rescale_ts(&amp;outPacket, videoStream.st->codec->time_base, videoStream.st->time_base);
                   pFrameSquare->width = tar_w;
                   pFrameSquare->height = tar_h;
                   pFrameSquare->format = pic_format;
                   pFrameSquare->pts = ++videoStream.next_pts;
                   ret = avcodec_encode_video2(videoStream.st->codec, &amp;outPacket, pFrameSquare, &amp;frameFinished);

    //                int count = 0;
    //                while (!frameFinished &amp;&amp; count++ &lt; 6) {
    //                    ret = avcodec_encode_video2(videoStream.st->codec, &amp;outPacket, NULL, &amp;frameFinished);
    //                }
                   if (frameFinished) {
                       ANDROID_LOG("Writing frame %d", i);
                       outPacket.stream_index = videoStreamIndex;
                       av_interleaved_write_frame(poFormatCxt, &amp;outPacket);
                   }
                   av_free_packet(&amp;outPacket);
               }
           }

           // Free the packet that was allocated by av_read_frameav_free_packet(&amp;packet);
           ret = av_read_frame(pFormatCtx, &amp;packet);
       }

       ret = av_write_trailer(poFormatCxt);
       if (ret &lt; 0) {
           ANDROID_LOG("Couldn't write trailer");
       } else {
           ANDROID_LOG("Video convert finished");
       }

       // Free the RGB image
       av_free(buffer);
       av_free(pFrameSquare);

       // Free the YUV frame
       av_free(pFrame);

       // Close the codec
       avcodec_close(pCodecCtx);
    //    avcodec_close(pEncodecCtx);

       // Close the video file
       avformat_close_input(&amp;pFormatCtx);

       return;
    }

    Helper

    #define STREAM_DURATION   10.0
    #define STREAM_FRAME_RATE 25 /* 25 images/s */
    #define STREAM_PIX_FMT    AV_PIX_FMT_YUV420P /* default pix_fmt */

    /* Add an output stream. */
    void add_stream_mux(MuxOutputStream *ost, AVFormatContext *oc,
                          AVCodec **codec,
                          enum AVCodecID codec_id, int width, int heigh)
    {
       AVCodecContext *codecCtx;
       int i;
       /* find the encoder */
       *codec = avcodec_find_encoder(codec_id);
       if (!(*codec)) {
           fprintf(stderr, "Could not find encoder for '%s'\n",
                   avcodec_get_name(codec_id));
           exit(1);
       }
       ost->st = avformat_new_stream(oc, *codec);
       if (!ost->st) {
           fprintf(stderr, "Could not allocate stream\n");
           exit(1);
       }
       ost->st->id = oc->nb_streams-1;
       codecCtx = ost->st->codec;
       switch ((*codec)->type) {
           case AVMEDIA_TYPE_AUDIO:
               codecCtx->sample_fmt  = (*codec)->sample_fmts ?
                                (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
               codecCtx->bit_rate    = 64000;
               codecCtx->sample_rate = 44100;
               if ((*codec)->supported_samplerates) {
                   codecCtx->sample_rate = (*codec)->supported_samplerates[0];
                   for (i = 0; (*codec)->supported_samplerates[i]; i++) {
                       if ((*codec)->supported_samplerates[i] == 44100)
                           codecCtx->sample_rate = 44100;
                   }
               }
               codecCtx->channels        = av_get_channel_layout_nb_channels(codecCtx->channel_layout);
               codecCtx->channel_layout = AV_CH_LAYOUT_STEREO;
               if ((*codec)->channel_layouts) {
                   codecCtx->channel_layout = (*codec)->channel_layouts[0];
                   for (i = 0; (*codec)->channel_layouts[i]; i++) {
                       if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
                           codecCtx->channel_layout = AV_CH_LAYOUT_STEREO;
                   }
               }
               codecCtx->channels        = av_get_channel_layout_nb_channels(codecCtx->channel_layout);
               ost->st->time_base = (AVRational){ 1, codecCtx->sample_rate };
               break;
           case AVMEDIA_TYPE_VIDEO:
               codecCtx->codec_id = codec_id;
               codecCtx->bit_rate = 400000;
               /* Resolution must be a multiple of two. */
               codecCtx->width    = width;
               codecCtx->height   = heigh;
               /* timebase: This is the fundamental unit of time (in seconds) in terms
                * of which frame timestamps are represented. For fixed-fps content,
                * timebase should be 1/framerate and timestamp increments should be
                * identical to 1. */
               ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
               codecCtx->time_base       = ost->st->time_base;
               codecCtx->gop_size      = 12; /* emit one intra frame every twelve frames at most */
               codecCtx->pix_fmt       = STREAM_PIX_FMT;
               if (codecCtx->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
                   /* just for testing, we also add B frames */
                   codecCtx->max_b_frames = 2;
               }
               if (codecCtx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
                   /* Needed to avoid using macroblocks in which some coeffs overflow.
                    * This does not happen with normal video, it just happens here as
                    * the motion of the chroma plane does not match the luma plane. */
                   codecCtx->mb_decision = 2;
               }
               break;
           default:
               break;
       }
       /* Some formats want stream headers to be separate. */
       if (oc->oformat->flags &amp; AVFMT_GLOBALHEADER)
           codecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    }
    static void open_video(AVFormatContext *oc, AVCodec *codec, MuxOutputStream *ost, AVDictionary *opt_arg)
    {
       int ret;
       AVCodecContext *c = ost->st->codec;
       AVDictionary *opt = NULL;
       av_dict_copy(&amp;opt, opt_arg, 0);
       /* open the codec */
       ret = avcodec_open2(c, codec, &amp;opt);
       av_dict_free(&amp;opt);
       if (ret &lt; 0) {
           fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
           exit(1);
       }
       /* allocate and init a re-usable frame */
       ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
       if (!ost->frame) {
           fprintf(stderr, "Could not allocate video frame\n");
           exit(1);
       }
       /* If the output format is not YUV420P, then a temporary YUV420P
        * picture is needed too. It is then converted to the required
        * output format. */
       ost->tmp_frame = NULL;
       if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
           ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
           if (!ost->tmp_frame) {
               fprintf(stderr, "Could not allocate temporary picture\n");
               exit(1);
           }
       }
    }

    I’m afraid that I set wrong pts or time_base of frame, and also when decoding or encoding, I see that some first frame is lost, frameFinished is 0. See a post that I’ve to flush decoder by avcodec_decode_video2(videoStream.st->codec, pFrame, &amp;frameFinished, NULL) but after try a few times, frameFinished still is 0, and with avcodec_encode_video2(videoStream.st->codec, &amp;outPacket, NULL, &amp;frameFinished) will throw error in next encode frame. So how I can get all frame that lost ? I’m using FFmpeg version 3.0.1