Recherche avancée

Médias (1)

Mot : - Tags -/artwork

Autres articles (35)

  • Personnaliser en ajoutant son logo, sa bannière ou son image de fond

    5 septembre 2013, par

    Certains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;

  • Publier sur MédiaSpip

    13 juin 2013

    Puis-je poster des contenus à partir d’une tablette Ipad ?
    Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir

  • Les images

    15 mai 2013

Sur d’autres sites (6338)

  • Detecting Successful Conversion with ffmpeg

    22 février 2017, par J M

    I have code that scans a file system for videos files encoded with H.264 and re-encodes them with H.265. It runs pretty much on its own constantly, generating various log files for me to review periodically.

    One thing that I want to further improve is the successful conversion detection. Right now, a file returns as being successfully converted after it meets these criteria/checks :

    1. The output file exists
    2. ffprobe can detect that the output file is in hevc format
    3. The duration of the output file matches that of the input file (within 3 seconds)
    4. The length of the output file is greater than 30 MB (it’s rare that I have a video so short where after conversion it is less than this, usually this happens when an error occurs or conversion terminates early).

    Obviously, this is rather computationally intense as there are many file checks to confirm all of this information. I do this because if the file is detected as successful conversion, the old file is overwritten and the new converted file takes it’s place. I don’t want to overwrite a file because I overlooked a scenario where I think conversion is successful but was in fact not. The files are under a crashplan constant backup, so I don’t lose them, but I also do not go through and review every file.

    So, my basic question is if you see any area of improvement for this detection. My goal is to determine, to my best extent, if after conversion the video remains "playable". So deciding programmatically how/what that means is what I’m attempting to do.

    I can post code if you want it (powershell), but the question seems independent of actual program language choice.

  • AWS Lambda function for modify video

    4 février 2017, par Gold Fish

    I want to create a Lambda function that invoked whenever someone uploads to the S3 bucket. The purpose of the function is to take the uploaded file and if its a video file (mp4) so make a new file which is a preview of the last one (using ffmpeg). The Lambda function is written in nodejs.
    I took the code here for reference, but I do something wrong for I get an error saying that no input specified for SetStartTime :

    //dependecies
    var async = require('async');
    var AWS = require('aws-sdk');
    var util = require('util');
    var ffmpeg = require('fluent-ffmpeg');

    // get reference to S3 client
    var s3 = new AWS.S3();


    exports.handler = function(event, context, callback) {
       // Read options from the event.
       console.log("Reading options from event:\n", util.inspect(event, {depth: 5}));
       var srcBucket = event.Records[0].s3.bucket.name;
       // Object key may have spaces or unicode non-ASCII characters.
       var srcKey    =
       decodeURIComponent(event.Records[0].s3.object.key.replace(/\+/g, " "));  
       var dstBucket = srcBucket;
       var dstKey    = "preview_" + srcKey;

       // Sanity check: validate that source and destination are different buckets.
       if (srcBucket == dstBucket) {
           callback("Source and destination buckets are the same.");
           return;
       }

       // Infer the video type.
       var typeMatch = srcKey.match(/\.([^.]*)$/);
       if (!typeMatch) {
           callback("Could not determine the video type.");
           return;
       }
       var videoType = typeMatch[1];
       if (videoType != "mp4") {
           callback('Unsupported video type: ${videoType}');
           return;
       }

       // Download the video from S3, transform, and upload to a different S3 bucket.
       async.waterfall([
           function download(next) {
               // Download the video from S3 into a buffer.
               s3.getObject({
                       Bucket: srcBucket,
                       Key: srcKey
                   },
                   next);
               },
           function transform(response, next) {
           console.log("response.Body:\n", response.Body);
           ffmpeg(response.Body)
               .setStartTime('00:00:03')
               .setDuration('10')   //.output('public/videos/test/test.mp4')
           .toBuffer(videoType, function(err, buffer) {
                           if (err) {
                               next(err);
                           } else {
                               next(null, response.ContentType, buffer);
                           }
                    });
           },
           function upload(contentType, data, next) {
               // Stream the transformed image to a different S3 bucket.
               s3.putObject({
                       Bucket: dstBucket,
                       Key: dstKey,
                       Body: data,
                       ContentType: contentType
                   },
                   next);
               }
           ], function (err) {
               if (err) {
                   console.error(
                       'Unable to modify ' + srcBucket + '/' + srcKey +
                       ' and upload to ' + dstBucket + '/' + dstKey +
                       ' due to an error: ' + err
                   );
               } else {
                   console.log(
                       'Successfully modify ' + srcBucket + '/' + srcKey +
                       ' and uploaded to ' + dstBucket + '/' + dstKey
                   );
               }

               callback(null, "message");
           }
       );
    };

    So what am I doing wrong ?

  • Memory leak when using ffmpeg

    21 janvier 2017, par se210

    I have implemented a class which spawns a thread for reading and queuing frames, and the main thread displays these frames via OpenGL. I try to free the allocated memory after binding the image data to a OpenGL texture, but it seems some memory is not freed properly. The memory usage keeps growing until the system runs out of memory and eventually the frame reader thread cannot grab new frames due to memory allocation failure. Would someone please help me on what I might have missed ? Thank you.

    This is the code for the frame reader thread :

    void AVIReader::frameReaderThreadFunc()
    {
       AVPacket packet;

       while (readFrames) {
           // Allocate necessary memory
           AVFrame* pFrame = av_frame_alloc();
           if (pFrame == nullptr)
           {
               continue;
           }

           AVFrame* pFrameRGB = av_frame_alloc();
           if (pFrameRGB == nullptr)
           {
               av_frame_free(&pFrame);
               continue;
           }

           // Determine required buffer size and allocate buffer
           int numBytes = avpicture_get_size(AV_PIX_FMT_RGB24, pCodecCtx->width,
               pCodecCtx->height);
           uint8_t* buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));

           if (buffer == nullptr)
           {
               av_frame_free(&pFrame);
               av_frame_free(&pFrameRGB);
               continue;
           }

           // Assign appropriate parts of buffer to image planes in pFrameRGB
           // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
           // of AVPicture
           avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_RGB24,
               pCodecCtx->width, pCodecCtx->height);

           if (av_read_frame(pFormatCtx, &packet) >= 0) {
               // Is this a packet from the video stream?
               if (packet.stream_index == videoStream) {
                   // Decode video frame
                   int frameFinished;
                   avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

                   if (frameFinished) {
                       // Convert the image from its native format to RGB
                       sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data,
                           pFrame->linesize, 0, pCodecCtx->height,
                           pFrameRGB->data, pFrameRGB->linesize);

                       VideoFrame vf;
                       vf.frame = pFrameRGB;
                       vf.pts = av_frame_get_best_effort_timestamp(pFrame) * time_base;
                       frameQueue.enqueue(vf);

                       av_frame_unref(pFrame);
                       av_frame_free(&pFrame);
                   }
               }
               //av_packet_unref(&packet);
               av_free_packet(&packet);
           }
       }
    }

    This is the code that grabs the queued frames and binds it to an OpenGL texture. I explicitly save the previous frame until I switch it out with the next frame. Otherwise, it seems to cause a segfault.

    void AVIReader::GrabAVIFrame()
    {
       if (curFrame.pts >= clock_pts)
       {
           return;
       }

       if (frameQueue.empty())
           return;

       // Get a packet from the queue
       VideoFrame videoFrame = frameQueue.top();
       while (!frameQueue.empty() && frameQueue.top().pts < clock_pts)
       {
           videoFrame = frameQueue.dequeue();
       }

       glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE, videoFrame.frame->data[0]);

       // release previous frame
       if (curFrame.frame)
       {
           av_free(curFrame.frame->data[0]);
       }
       av_frame_unref(curFrame.frame);

       // set current frame to new frame
       curFrame = videoFrame;
    }

    The frameQueue is a thread-safe priority queue that holds VideoFrame defined as :

    class VideoFrame {
    public:
       AVFrame* frame;
       double pts;
    };

    Update : There was a silly error in the ordering of setting current frame to new frame. I forgot to switch it back after trying some things out. I also incorporated @ivan_onys’s suggestion, but that does not seem to fix the problem.


    Update 2 : I adopted @Al Bundy’s suggestion to release pFrame and packet unconditionally, but the issue still persists.

    Since buffer is what contains the actual image data which needs to be used in glTexSubImage2D(), I cannot release it until I am done displaying it on the screen (otherwise I get a segfault). avpicture_fill() assigns frame->data[0] = buffer, so I think calling av_free(curFrame.frame->data[0]) ; on the previous frame after texture mapping the new frame should release the allocated buffer.

    Here is the updated frame reader thread code :

    void AVIReader::frameReaderThreadFunc()
    {
       AVPacket packet;

       while (readFrames) {
           // Allocate necessary memory
           AVFrame* pFrame = av_frame_alloc();
           if (pFrame == nullptr)
           {
               continue;
           }

           AVFrame* pFrameRGB = av_frame_alloc();
           if (pFrameRGB == nullptr)
           {
               av_frame_free(&pFrame);
               continue;
           }

           // Determine required buffer size and allocate buffer
           int numBytes = avpicture_get_size(AV_PIX_FMT_RGB24, pCodecCtx->width,
               pCodecCtx->height);
           uint8_t* buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));

           if (buffer == nullptr)
           {
               av_frame_free(&pFrame);
               av_frame_free(&pFrameRGB);
               continue;
           }

           // Assign appropriate parts of buffer to image planes in pFrameRGB
           // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
           // of AVPicture
           avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_RGB24,
               pCodecCtx->width, pCodecCtx->height);

           if (av_read_frame(pFormatCtx, &packet) >= 0) {
               // Is this a packet from the video stream?
               if (packet.stream_index == videoStream) {
                   // Decode video frame
                   int frameFinished;
                   avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

                   if (frameFinished) {
                       // Convert the image from its native format to RGB
                       sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data,
                           pFrame->linesize, 0, pCodecCtx->height,
                           pFrameRGB->data, pFrameRGB->linesize);

                       VideoFrame vf;
                       vf.frame = pFrameRGB;
                       vf.pts = av_frame_get_best_effort_timestamp(pFrame) * time_base;
                       frameQueue.enqueue(vf);
                   }
               }
           }
           av_frame_unref(pFrame);
           av_frame_free(&pFrame);
           av_packet_unref(&packet);
           av_free_packet(&packet);
       }
    }

    Solved : It turned out the leaks were happening when the packet was from a non-video stream (e.g. audio). I also needed to free resources on frames that are skipped in the while-loop of GrabAVIFrame().