Recherche avancée

Médias (1)

Mot : - Tags -/biomaping

Autres articles (68)

  • Les tâches Cron régulières de la ferme

    1er décembre 2010, par

    La gestion de la ferme passe par l’exécution à intervalle régulier de plusieurs tâches répétitives dites Cron.
    Le super Cron (gestion_mutu_super_cron)
    Cette tâche, planifiée chaque minute, a pour simple effet d’appeler le Cron de l’ensemble des instances de la mutualisation régulièrement. Couplée avec un Cron système sur le site central de la mutualisation, cela permet de simplement générer des visites régulières sur les différents sites et éviter que les tâches des sites peu visités soient trop (...)

  • Mediabox : ouvrir les images dans l’espace maximal pour l’utilisateur

    8 février 2011, par

    La visualisation des images est restreinte par la largeur accordée par le design du site (dépendant du thème utilisé). Elles sont donc visibles sous un format réduit. Afin de profiter de l’ensemble de la place disponible sur l’écran de l’utilisateur, il est possible d’ajouter une fonctionnalité d’affichage de l’image dans une boite multimedia apparaissant au dessus du reste du contenu.
    Pour ce faire il est nécessaire d’installer le plugin "Mediabox".
    Configuration de la boite multimédia
    Dès (...)

  • Des sites réalisés avec MediaSPIP

    2 mai 2011, par

    Cette page présente quelques-uns des sites fonctionnant sous MediaSPIP.
    Vous pouvez bien entendu ajouter le votre grâce au formulaire en bas de page.

Sur d’autres sites (9046)

  • Specify percentage instead of time to ffmpeg

    12 janvier 2017, par user779159

    To get a thumbnail from an image halfway through the video I can do ffmpeg -ss 100 -i /tmp/video.mp4 -frames:v 1 -s 200x100 image.jpg. By using -ss 100 it gets a thumbnail at 100 seconds (which would be halfway through the video assuming the video is 200 seconds long).

    But if I don’t know the exact length of the video, in my application code I would need to use something like ffprobe to first determine the length of the video, and then divide it by 2 to get the thumbnail time.

    Is there a way to get ffmpeg to get the thumbnail at the percentage of the video you want ? So instead of specifying -ss 100, something like -ss 50% or -ss 20% to get a thumbnail from halfway or 20% into the file ?

    I know I can do this through application code, but it would be more efficient if there’s a way for ffmpeg to handle this itself.

  • Download,modify and upload video with S3

    9 février 2017, par Gold Fish

    I’m trying to write a Lambda function in nodejs that will download a file from S3 bucket, modify it, and then upload it back to another S3 bucket. For some reason, the algorithm prints the ’Modify Video’ log and then finishes and exit without error. What am I doing wrong ?

    var AWS = require('aws-sdk');
    var util = require('util');
    var ffmpeg = require('fluent-ffmpeg');
    var s3 = require('s3');

    // get reference to S3 client

    var awsS3Client = new AWS.S3();
    var options = {
     s3Client: awsS3Client,
    };
    var client = s3.createClient(options);

    exports.handler = function(event, context, callback) {
       // Read options from the event.
       console.log("Reading options from event:\n", util.inspect(event, {depth: 5}));
       var srcBucket = event.Records[0].s3.bucket.name;
       // Object key may have spaces or unicode non-ASCII characters.
       var srcKey    =
       decodeURIComponent(event.Records[0].s3.object.key.replace(/\+/g, " "));
       var dstBucket = srcBucket + "-dst";
       var dstKey    = "mod_" + srcKey;

       var dwnld_file_name = '/tmp/vid.mp4';
       var mdfy_file_name = '/tmp/mod_vid.mp4';

       // Sanity check: validate that source and destination are different buckets.
       if (srcBucket == dstBucket) {
           callback("Source and destination buckets are the same.");
           return;
       }

       // Infer the video type.
       var typeMatch = srcKey.match(/\.([^.]*)$/);
       if (!typeMatch) {
           callback("Could not determine the video type.");
           return;
       }
       var videoType = typeMatch[1];
       if (videoType != "mp4") {
           callback('Unsupported video type: ${videoType}');
           return;
       }
       console.log("Source bucket: ", srcBucket);
       console.log("srcKey: ", srcKey);
       console.log("Dest bucket: ", dstBucket);
       console.log("dstKey: ", dstKey);

       var params = {
         localFile: dwnld_file_name,

         s3Params: {
           Bucket: srcBucket,
           Key: srcKey,
         },
       };
       console.log("params for download: ", params);
       var downloader = client.downloadFile(params);
       downloader.on('error', function(err) {
         console.error("unable to download:", err.stack);
         callback("unable to download");
       });
       downloader.on('end', function() {
         console.log("done downloading");
         console.log("modify video");
         ffmpeg(dwnld_file_name)
             .setStartTime('00:00:01')
             .setDuration('1').output(mdfy_file_name).on('end', function() {
               console.log('Finished processing');
               params = {
                 localFile: mdfy_file_name,
                 //localFile: dwnld_file_name,
                 s3Params: {
                   Bucket: dstBucket,
                   Key: dstKey,
                 },
               };
               console.log("params for upload: ", params);
               var uploader = client.uploadFile(params);
               uploader.on('error', function(err) {
                 console.error("unable to upload:", err.stack);
                 callback("unable to upload");
               });
               uploader.on('end', function() {
                 console.log("done uploading");
                 callback('done');
                 return;
               });
           });  //
       });
    };
  • Memory leak when using ffmpeg

    21 janvier 2017, par se210

    I have implemented a class which spawns a thread for reading and queuing frames, and the main thread displays these frames via OpenGL. I try to free the allocated memory after binding the image data to a OpenGL texture, but it seems some memory is not freed properly. The memory usage keeps growing until the system runs out of memory and eventually the frame reader thread cannot grab new frames due to memory allocation failure. Would someone please help me on what I might have missed ? Thank you.

    This is the code for the frame reader thread :

    void AVIReader::frameReaderThreadFunc()
    {
       AVPacket packet;

       while (readFrames) {
           // Allocate necessary memory
           AVFrame* pFrame = av_frame_alloc();
           if (pFrame == nullptr)
           {
               continue;
           }

           AVFrame* pFrameRGB = av_frame_alloc();
           if (pFrameRGB == nullptr)
           {
               av_frame_free(&pFrame);
               continue;
           }

           // Determine required buffer size and allocate buffer
           int numBytes = avpicture_get_size(AV_PIX_FMT_RGB24, pCodecCtx->width,
               pCodecCtx->height);
           uint8_t* buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));

           if (buffer == nullptr)
           {
               av_frame_free(&pFrame);
               av_frame_free(&pFrameRGB);
               continue;
           }

           // Assign appropriate parts of buffer to image planes in pFrameRGB
           // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
           // of AVPicture
           avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_RGB24,
               pCodecCtx->width, pCodecCtx->height);

           if (av_read_frame(pFormatCtx, &packet) >= 0) {
               // Is this a packet from the video stream?
               if (packet.stream_index == videoStream) {
                   // Decode video frame
                   int frameFinished;
                   avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

                   if (frameFinished) {
                       // Convert the image from its native format to RGB
                       sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data,
                           pFrame->linesize, 0, pCodecCtx->height,
                           pFrameRGB->data, pFrameRGB->linesize);

                       VideoFrame vf;
                       vf.frame = pFrameRGB;
                       vf.pts = av_frame_get_best_effort_timestamp(pFrame) * time_base;
                       frameQueue.enqueue(vf);

                       av_frame_unref(pFrame);
                       av_frame_free(&pFrame);
                   }
               }
               //av_packet_unref(&packet);
               av_free_packet(&packet);
           }
       }
    }

    This is the code that grabs the queued frames and binds it to an OpenGL texture. I explicitly save the previous frame until I switch it out with the next frame. Otherwise, it seems to cause a segfault.

    void AVIReader::GrabAVIFrame()
    {
       if (curFrame.pts >= clock_pts)
       {
           return;
       }

       if (frameQueue.empty())
           return;

       // Get a packet from the queue
       VideoFrame videoFrame = frameQueue.top();
       while (!frameQueue.empty() && frameQueue.top().pts < clock_pts)
       {
           videoFrame = frameQueue.dequeue();
       }

       glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE, videoFrame.frame->data[0]);

       // release previous frame
       if (curFrame.frame)
       {
           av_free(curFrame.frame->data[0]);
       }
       av_frame_unref(curFrame.frame);

       // set current frame to new frame
       curFrame = videoFrame;
    }

    The frameQueue is a thread-safe priority queue that holds VideoFrame defined as :

    class VideoFrame {
    public:
       AVFrame* frame;
       double pts;
    };

    Update : There was a silly error in the ordering of setting current frame to new frame. I forgot to switch it back after trying some things out. I also incorporated @ivan_onys’s suggestion, but that does not seem to fix the problem.


    Update 2 : I adopted @Al Bundy’s suggestion to release pFrame and packet unconditionally, but the issue still persists.

    Since buffer is what contains the actual image data which needs to be used in glTexSubImage2D(), I cannot release it until I am done displaying it on the screen (otherwise I get a segfault). avpicture_fill() assigns frame->data[0] = buffer, so I think calling av_free(curFrame.frame->data[0]) ; on the previous frame after texture mapping the new frame should release the allocated buffer.

    Here is the updated frame reader thread code :

    void AVIReader::frameReaderThreadFunc()
    {
       AVPacket packet;

       while (readFrames) {
           // Allocate necessary memory
           AVFrame* pFrame = av_frame_alloc();
           if (pFrame == nullptr)
           {
               continue;
           }

           AVFrame* pFrameRGB = av_frame_alloc();
           if (pFrameRGB == nullptr)
           {
               av_frame_free(&pFrame);
               continue;
           }

           // Determine required buffer size and allocate buffer
           int numBytes = avpicture_get_size(AV_PIX_FMT_RGB24, pCodecCtx->width,
               pCodecCtx->height);
           uint8_t* buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));

           if (buffer == nullptr)
           {
               av_frame_free(&pFrame);
               av_frame_free(&pFrameRGB);
               continue;
           }

           // Assign appropriate parts of buffer to image planes in pFrameRGB
           // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
           // of AVPicture
           avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_RGB24,
               pCodecCtx->width, pCodecCtx->height);

           if (av_read_frame(pFormatCtx, &packet) >= 0) {
               // Is this a packet from the video stream?
               if (packet.stream_index == videoStream) {
                   // Decode video frame
                   int frameFinished;
                   avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

                   if (frameFinished) {
                       // Convert the image from its native format to RGB
                       sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data,
                           pFrame->linesize, 0, pCodecCtx->height,
                           pFrameRGB->data, pFrameRGB->linesize);

                       VideoFrame vf;
                       vf.frame = pFrameRGB;
                       vf.pts = av_frame_get_best_effort_timestamp(pFrame) * time_base;
                       frameQueue.enqueue(vf);
                   }
               }
           }
           av_frame_unref(pFrame);
           av_frame_free(&pFrame);
           av_packet_unref(&packet);
           av_free_packet(&packet);
       }
    }

    Solved : It turned out the leaks were happening when the packet was from a non-video stream (e.g. audio). I also needed to free resources on frames that are skipped in the while-loop of GrabAVIFrame().