Recherche avancée

Médias (0)

Mot : - Tags -/flash

Aucun média correspondant à vos critères n’est disponible sur le site.

Autres articles (46)

  • Keeping control of your media in your hands

    13 avril 2011, par

    The vocabulary used on this site and around MediaSPIP in general, aims to avoid reference to Web 2.0 and the companies that profit from media-sharing.
    While using MediaSPIP, you are invited to avoid using words like "Brand", "Cloud" and "Market".
    MediaSPIP is designed to facilitate the sharing of creative media online, while allowing authors to retain complete control of their work.
    MediaSPIP aims to be accessible to as many people as possible and development is based on expanding the (...)

  • XMP PHP

    13 mai 2011, par

    Dixit Wikipedia, XMP signifie :
    Extensible Metadata Platform ou XMP est un format de métadonnées basé sur XML utilisé dans les applications PDF, de photographie et de graphisme. Il a été lancé par Adobe Systems en avril 2001 en étant intégré à la version 5.0 d’Adobe Acrobat.
    Étant basé sur XML, il gère un ensemble de tags dynamiques pour l’utilisation dans le cadre du Web sémantique.
    XMP permet d’enregistrer sous forme d’un document XML des informations relatives à un fichier : titre, auteur, historique (...)

  • Submit bugs and patches

    13 avril 2011

    Unfortunately a software is never perfect.
    If you think you have found a bug, report it using our ticket system. Please to help us to fix it by providing the following information : the browser you are using, including the exact version as precise an explanation as possible of the problem if possible, the steps taken resulting in the problem a link to the site / page in question
    If you think you have solved the bug, fill in a ticket and attach to it a corrective patch.
    You may also (...)

Sur d’autres sites (5335)

  • Download,modify and upload video with S3

    9 février 2017, par Gold Fish

    I’m trying to write a Lambda function in nodejs that will download a file from S3 bucket, modify it, and then upload it back to another S3 bucket. For some reason, the algorithm prints the ’Modify Video’ log and then finishes and exit without error. What am I doing wrong ?

    var AWS = require('aws-sdk');
    var util = require('util');
    var ffmpeg = require('fluent-ffmpeg');
    var s3 = require('s3');

    // get reference to S3 client

    var awsS3Client = new AWS.S3();
    var options = {
     s3Client: awsS3Client,
    };
    var client = s3.createClient(options);

    exports.handler = function(event, context, callback) {
       // Read options from the event.
       console.log("Reading options from event:\n", util.inspect(event, {depth: 5}));
       var srcBucket = event.Records[0].s3.bucket.name;
       // Object key may have spaces or unicode non-ASCII characters.
       var srcKey    =
       decodeURIComponent(event.Records[0].s3.object.key.replace(/\+/g, " "));
       var dstBucket = srcBucket + "-dst";
       var dstKey    = "mod_" + srcKey;

       var dwnld_file_name = '/tmp/vid.mp4';
       var mdfy_file_name = '/tmp/mod_vid.mp4';

       // Sanity check: validate that source and destination are different buckets.
       if (srcBucket == dstBucket) {
           callback("Source and destination buckets are the same.");
           return;
       }

       // Infer the video type.
       var typeMatch = srcKey.match(/\.([^.]*)$/);
       if (!typeMatch) {
           callback("Could not determine the video type.");
           return;
       }
       var videoType = typeMatch[1];
       if (videoType != "mp4") {
           callback('Unsupported video type: ${videoType}');
           return;
       }
       console.log("Source bucket: ", srcBucket);
       console.log("srcKey: ", srcKey);
       console.log("Dest bucket: ", dstBucket);
       console.log("dstKey: ", dstKey);

       var params = {
         localFile: dwnld_file_name,

         s3Params: {
           Bucket: srcBucket,
           Key: srcKey,
         },
       };
       console.log("params for download: ", params);
       var downloader = client.downloadFile(params);
       downloader.on('error', function(err) {
         console.error("unable to download:", err.stack);
         callback("unable to download");
       });
       downloader.on('end', function() {
         console.log("done downloading");
         console.log("modify video");
         ffmpeg(dwnld_file_name)
             .setStartTime('00:00:01')
             .setDuration('1').output(mdfy_file_name).on('end', function() {
               console.log('Finished processing');
               params = {
                 localFile: mdfy_file_name,
                 //localFile: dwnld_file_name,
                 s3Params: {
                   Bucket: dstBucket,
                   Key: dstKey,
                 },
               };
               console.log("params for upload: ", params);
               var uploader = client.uploadFile(params);
               uploader.on('error', function(err) {
                 console.error("unable to upload:", err.stack);
                 callback("unable to upload");
               });
               uploader.on('end', function() {
                 console.log("done uploading");
                 callback('done');
                 return;
               });
           });  //
       });
    };
  • Memory leak when using ffmpeg

    21 janvier 2017, par se210

    I have implemented a class which spawns a thread for reading and queuing frames, and the main thread displays these frames via OpenGL. I try to free the allocated memory after binding the image data to a OpenGL texture, but it seems some memory is not freed properly. The memory usage keeps growing until the system runs out of memory and eventually the frame reader thread cannot grab new frames due to memory allocation failure. Would someone please help me on what I might have missed ? Thank you.

    This is the code for the frame reader thread :

    void AVIReader::frameReaderThreadFunc()
    {
       AVPacket packet;

       while (readFrames) {
           // Allocate necessary memory
           AVFrame* pFrame = av_frame_alloc();
           if (pFrame == nullptr)
           {
               continue;
           }

           AVFrame* pFrameRGB = av_frame_alloc();
           if (pFrameRGB == nullptr)
           {
               av_frame_free(&pFrame);
               continue;
           }

           // Determine required buffer size and allocate buffer
           int numBytes = avpicture_get_size(AV_PIX_FMT_RGB24, pCodecCtx->width,
               pCodecCtx->height);
           uint8_t* buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));

           if (buffer == nullptr)
           {
               av_frame_free(&pFrame);
               av_frame_free(&pFrameRGB);
               continue;
           }

           // Assign appropriate parts of buffer to image planes in pFrameRGB
           // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
           // of AVPicture
           avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_RGB24,
               pCodecCtx->width, pCodecCtx->height);

           if (av_read_frame(pFormatCtx, &packet) >= 0) {
               // Is this a packet from the video stream?
               if (packet.stream_index == videoStream) {
                   // Decode video frame
                   int frameFinished;
                   avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

                   if (frameFinished) {
                       // Convert the image from its native format to RGB
                       sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data,
                           pFrame->linesize, 0, pCodecCtx->height,
                           pFrameRGB->data, pFrameRGB->linesize);

                       VideoFrame vf;
                       vf.frame = pFrameRGB;
                       vf.pts = av_frame_get_best_effort_timestamp(pFrame) * time_base;
                       frameQueue.enqueue(vf);

                       av_frame_unref(pFrame);
                       av_frame_free(&pFrame);
                   }
               }
               //av_packet_unref(&packet);
               av_free_packet(&packet);
           }
       }
    }

    This is the code that grabs the queued frames and binds it to an OpenGL texture. I explicitly save the previous frame until I switch it out with the next frame. Otherwise, it seems to cause a segfault.

    void AVIReader::GrabAVIFrame()
    {
       if (curFrame.pts >= clock_pts)
       {
           return;
       }

       if (frameQueue.empty())
           return;

       // Get a packet from the queue
       VideoFrame videoFrame = frameQueue.top();
       while (!frameQueue.empty() && frameQueue.top().pts < clock_pts)
       {
           videoFrame = frameQueue.dequeue();
       }

       glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE, videoFrame.frame->data[0]);

       // release previous frame
       if (curFrame.frame)
       {
           av_free(curFrame.frame->data[0]);
       }
       av_frame_unref(curFrame.frame);

       // set current frame to new frame
       curFrame = videoFrame;
    }

    The frameQueue is a thread-safe priority queue that holds VideoFrame defined as :

    class VideoFrame {
    public:
       AVFrame* frame;
       double pts;
    };

    Update : There was a silly error in the ordering of setting current frame to new frame. I forgot to switch it back after trying some things out. I also incorporated @ivan_onys’s suggestion, but that does not seem to fix the problem.


    Update 2 : I adopted @Al Bundy’s suggestion to release pFrame and packet unconditionally, but the issue still persists.

    Since buffer is what contains the actual image data which needs to be used in glTexSubImage2D(), I cannot release it until I am done displaying it on the screen (otherwise I get a segfault). avpicture_fill() assigns frame->data[0] = buffer, so I think calling av_free(curFrame.frame->data[0]) ; on the previous frame after texture mapping the new frame should release the allocated buffer.

    Here is the updated frame reader thread code :

    void AVIReader::frameReaderThreadFunc()
    {
       AVPacket packet;

       while (readFrames) {
           // Allocate necessary memory
           AVFrame* pFrame = av_frame_alloc();
           if (pFrame == nullptr)
           {
               continue;
           }

           AVFrame* pFrameRGB = av_frame_alloc();
           if (pFrameRGB == nullptr)
           {
               av_frame_free(&pFrame);
               continue;
           }

           // Determine required buffer size and allocate buffer
           int numBytes = avpicture_get_size(AV_PIX_FMT_RGB24, pCodecCtx->width,
               pCodecCtx->height);
           uint8_t* buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));

           if (buffer == nullptr)
           {
               av_frame_free(&pFrame);
               av_frame_free(&pFrameRGB);
               continue;
           }

           // Assign appropriate parts of buffer to image planes in pFrameRGB
           // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
           // of AVPicture
           avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_RGB24,
               pCodecCtx->width, pCodecCtx->height);

           if (av_read_frame(pFormatCtx, &packet) >= 0) {
               // Is this a packet from the video stream?
               if (packet.stream_index == videoStream) {
                   // Decode video frame
                   int frameFinished;
                   avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

                   if (frameFinished) {
                       // Convert the image from its native format to RGB
                       sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data,
                           pFrame->linesize, 0, pCodecCtx->height,
                           pFrameRGB->data, pFrameRGB->linesize);

                       VideoFrame vf;
                       vf.frame = pFrameRGB;
                       vf.pts = av_frame_get_best_effort_timestamp(pFrame) * time_base;
                       frameQueue.enqueue(vf);
                   }
               }
           }
           av_frame_unref(pFrame);
           av_frame_free(&pFrame);
           av_packet_unref(&packet);
           av_free_packet(&packet);
       }
    }

    Solved : It turned out the leaks were happening when the packet was from a non-video stream (e.g. audio). I also needed to free resources on frames that are skipped in the while-loop of GrabAVIFrame().

  • AWS Lambda function for modify video

    4 février 2017, par Gold Fish

    I want to create a Lambda function that invoked whenever someone uploads to the S3 bucket. The purpose of the function is to take the uploaded file and if its a video file (mp4) so make a new file which is a preview of the last one (using ffmpeg). The Lambda function is written in nodejs.
    I took the code here for reference, but I do something wrong for I get an error saying that no input specified for SetStartTime :

    //dependecies
    var async = require('async');
    var AWS = require('aws-sdk');
    var util = require('util');
    var ffmpeg = require('fluent-ffmpeg');

    // get reference to S3 client
    var s3 = new AWS.S3();


    exports.handler = function(event, context, callback) {
       // Read options from the event.
       console.log("Reading options from event:\n", util.inspect(event, {depth: 5}));
       var srcBucket = event.Records[0].s3.bucket.name;
       // Object key may have spaces or unicode non-ASCII characters.
       var srcKey    =
       decodeURIComponent(event.Records[0].s3.object.key.replace(/\+/g, " "));  
       var dstBucket = srcBucket;
       var dstKey    = "preview_" + srcKey;

       // Sanity check: validate that source and destination are different buckets.
       if (srcBucket == dstBucket) {
           callback("Source and destination buckets are the same.");
           return;
       }

       // Infer the video type.
       var typeMatch = srcKey.match(/\.([^.]*)$/);
       if (!typeMatch) {
           callback("Could not determine the video type.");
           return;
       }
       var videoType = typeMatch[1];
       if (videoType != "mp4") {
           callback('Unsupported video type: ${videoType}');
           return;
       }

       // Download the video from S3, transform, and upload to a different S3 bucket.
       async.waterfall([
           function download(next) {
               // Download the video from S3 into a buffer.
               s3.getObject({
                       Bucket: srcBucket,
                       Key: srcKey
                   },
                   next);
               },
           function transform(response, next) {
           console.log("response.Body:\n", response.Body);
           ffmpeg(response.Body)
               .setStartTime('00:00:03')
               .setDuration('10')   //.output('public/videos/test/test.mp4')
           .toBuffer(videoType, function(err, buffer) {
                           if (err) {
                               next(err);
                           } else {
                               next(null, response.ContentType, buffer);
                           }
                    });
           },
           function upload(contentType, data, next) {
               // Stream the transformed image to a different S3 bucket.
               s3.putObject({
                       Bucket: dstBucket,
                       Key: dstKey,
                       Body: data,
                       ContentType: contentType
                   },
                   next);
               }
           ], function (err) {
               if (err) {
                   console.error(
                       'Unable to modify ' + srcBucket + '/' + srcKey +
                       ' and upload to ' + dstBucket + '/' + dstKey +
                       ' due to an error: ' + err
                   );
               } else {
                   console.log(
                       'Successfully modify ' + srcBucket + '/' + srcKey +
                       ' and uploaded to ' + dstBucket + '/' + dstKey
                   );
               }

               callback(null, "message");
           }
       );
    };

    So what am I doing wrong ?