
Recherche avancée
Autres articles (81)
-
Gestion générale des documents
13 mai 2011, parMédiaSPIP ne modifie jamais le document original mis en ligne.
Pour chaque document mis en ligne il effectue deux opérations successives : la création d’une version supplémentaire qui peut être facilement consultée en ligne tout en laissant l’original téléchargeable dans le cas où le document original ne peut être lu dans un navigateur Internet ; la récupération des métadonnées du document original pour illustrer textuellement le fichier ;
Les tableaux ci-dessous expliquent ce que peut faire MédiaSPIP (...) -
Des sites réalisés avec MediaSPIP
2 mai 2011, parCette page présente quelques-uns des sites fonctionnant sous MediaSPIP.
Vous pouvez bien entendu ajouter le votre grâce au formulaire en bas de page. -
HTML5 audio and video support
13 avril 2011, parMediaSPIP uses HTML5 video and audio tags to play multimedia files, taking advantage of the latest W3C innovations supported by modern browsers.
The MediaSPIP player used has been created specifically for MediaSPIP and can be easily adapted to fit in with a specific theme.
For older browsers the Flowplayer flash fallback is used.
MediaSPIP allows for media playback on major mobile platforms with the above (...)
Sur d’autres sites (13601)
-
Revision 31199 : L’utilisation de document.write pouvait poser problème de temps en temps ...
29 août 2009, par kent1@… — LogL’utilisation de document.write pouvait poser problème de temps en temps ... si jQuery est disponible, on utilise jQuery.getScript pour charger le script
-
FFMpeg DVB Subtitles memory leak
13 janvier 2015, par WLGfxWhen decoding the subtitles track from an mpegts udp mutlicast stream I am getting a memory leak using avcodec_decode_subtitle2. The audio and video streams are fine. And all three streams are manually memory managed by pre-allocating all buffers.
There’s very little information about but I do believe there’s a patch somewhere.
I’m currently using ffmpeg 2.0.4 compiled for armv7-a for android.
In the process I’ve found that video streams are different resolutions, ie, 720x576 or 576x576 which doesn’t matter now as I am rendering the subtitles separately as an overlay on the video. My original decoding function (which is changing to render a separate overlay) is :
void ffProcessSubtitlePacket( AVPacket *pkt )
{
//LOGI("NATIVE FFMPEG SUBTITLE - Decoding subtitle packet");
int got = 0;
avcodec_decode_subtitle2(ffSubtitleContext, &ffSubtitleFrame, &got, pkt);
if ( got )
{
//LOGI("NATIVE FFMPEG SUBTITLE - Got subtitle frame");
//LOGI("NATIVE FFMPEG SUBTITLE - Format = %d, Start = %d, End = %d, Rects = %d, PTS = %llu, AudioPTS = %llu, PacketPTS = %llu",
// ffSubtitleFrame.format, ffSubtitleFrame.start_display_time,
// ffSubtitleFrame.end_display_time, ffSubtitleFrame.num_rects,
// ffSubtitleFrame.pts, ffAudioGetPTS(), pkt->pts);
// now add the subtitle data to the list ready
for ( int s = 0; s < ffSubtitleFrame.num_rects; s++ )
{
ffSubtitle *sub = (ffSubtitle*)mmAlloc(sizeof(ffSubtitle)); //new ffSubtitle;
if ( sub )
{
AVSubtitleRect *r = ffSubtitleFrame.rects[s];
AVPicture *p = &r->pict;
// set main data
sub->startPTS = pkt->pts + (uint64_t)ffSubtitleFrame.start_display_time;
sub->endPTS = pkt->pts + (uint64_t)ffSubtitleFrame.end_display_time * (uint64_t)500;
sub->nb_colors = r->nb_colors;
sub->xpos = r->x;
sub->ypos = r->y;
sub->width = r->w;
sub->height = r->h;
// allocate space for CLUT and image all in one chunk
sub->data = mmAlloc(r->nb_colors * 4 + r->w * r->h); //new char[r->nb_colors * 4 + r->w * r->h];
if ( sub->data )
{
// copy the CLUT data
memcpy(sub->data, p->data[1], r->nb_colors * 4);
// copy the bitmap onto the end
memcpy(sub->data + r->nb_colors * 4, p->data[0], r->w * r->h);
// check for duplicate subtitles and remove them as this
// one replaces it with a new bitmap data
int pos = ffSubtitles.size();
while ( pos-- )
{
ffSubtitle *s = ffSubtitles[pos];
if ( s->xpos == sub->xpos &&
s->ypos == sub->ypos &&
s->width == sub->width &&
s->height == sub->height )
{
//delete s;
ffSubtitles.erase( ffSubtitles.begin() + pos );
//LOGI("NATIVE FFMPEG SUBTITLE - Removed old duplicate subtitle, size %d", ffSubtitles.size());
}
}
// append to subtitles list
ffSubtitles.push_back( sub );
char *dat; // data pointer used for the CLUT table
//LOGI("NATIVE FFMPEG SUBTITLE - Added %d,%d - %d,%d, Queue %d, Length = %d",
// r->x, r->y, r->w, r->h, ffSubtitles.size(), ffSubtitleFrame.end_display_time);
// convert the CLUT (RGB) to YUV values
dat = sub->data;
for ( int c = 0; c < r->nb_colors; c++ )
{
int r = dat[0];
int g = dat[1];
int b = dat[2];
int y = ( ( 65 * r + 128 * g + 24 * b + 128) >> 8) + 16;
int u = ( ( -37 * r - 74 * g + 112 * b + 128) >> 8) + 128;
int v = ( ( 112 * r - 93 * g - 18 * b + 128) >> 8) + 128;
*dat++ = (char)y;
*dat++ = (char)u;
*dat++ = (char)v;
dat++; // skip the alpha channel
}
}
else
{
//delete sub;
sub = 0;
LOGI("NATIVE FFMPEG SUBTITLE - Memory allocation error CLUT and BITMAP");
}
}
else
{
LOGI("NATIVE FFMPEG SUBTITLE - Memory allocation error ffSubtitle struct");
mmGarbageCollect();
ffSubtitles.clear();
}
}
}
}
void ffSubtitleRenderCheck(int bpos)
{
if ( ffSubtitleID == -1 || !usingSubtitles )
{
// empty the list in case of memory leaks
ffSubtitles.clear();
mmGarbageCollect();
return;
}
uint64_t audioPTS = ffAudioGetPTS();
int pos = 0;
// draw the subtitle list to the YUV frames
char *yframe = ffVideoBuffers[bpos].yFrame;
char *uframe = ffVideoBuffers[bpos].uFrame;
char *vframe = ffVideoBuffers[bpos].vFrame;
int ywidth = fv.frameActualWidth; // actual width with padding
int uvwidth = fv.frameAWidthHalf; // and for uv frames
while ( pos < ffSubtitles.size() )
{
ffSubtitle *sub = ffSubtitles[pos];
if ( sub->startPTS >= audioPTS ) // okay to draw this one?
{
//LOGI("NATIVE FFMPEG SUBTITLE - Rendering subtitle bitmap %d", pos);
char *clut = sub->data; // colour table
char *dat = clut + sub->nb_colors * 4; // start of bitmap data
int w = sub->width;
int h = sub->height;
int x = sub->xpos;
int y = sub->ypos;
for ( int xpos = 0; xpos < w; xpos++ )
{
for ( int ypos = 0; ypos < h; ypos++ )
{
// get colour for pixel
char bcol = dat[ypos * w + xpos];
if ( bcol != 0 ) // ignore 0 pixels
{
char cluty = clut[bcol * 4 + 0]; // get colours from CLUT
char clutu = clut[bcol * 4 + 1];
char clutv = clut[bcol * 4 + 2];
// draw to Y frame
int newx = x + xpos;
int newy = y + ypos;
yframe[newy * ywidth + newx] = cluty;
// draw to uv frames if we have a quarter pixel only
if ( ( newy & 1 ) && ( newx & 1 ) )
{
uframe[(newy >> 1) * uvwidth + (newx >> 1)] = clutu;
vframe[(newy >> 1) * uvwidth + (newx >> 1)] = clutv;
}
}
}
}
}
pos++;
}
// Last thing is to erase timed out subtitles
pos = ffSubtitles.size();
while ( pos-- )
{
ffSubtitle *sub = ffSubtitles[pos];
if ( sub->endPTS < audioPTS )
{
//delete sub;
ffSubtitles.erase( ffSubtitles.begin() + pos );
//LOGI("NATIVE FFMPEG SUBTITLE - Removed timed out subtitle");
}
}
if ( ffSubtitles.size() == 0 )
{
// garbage collect the custom memory pool
mmGarbageCollect();
}
//LOGI("NATIVE FFMPEG SUBTITLE - Size of subtitle list = %d", ffSubtitles.size());
}Any information would be appreciated or would I have to upgrade to a later version of ffmpeg ?
-
FFMPEG libx265 encoding leaves memory unfreed after avcodec_free_context
28 août 2020, par ahugeatI am working on H265 encoding software and, in my unit tests, I have some weird memory leaks. To found them, I have modified the
encode_video.c
example from FFMPEG documentation. I have changed the resolution to correspond at a 4K video, I have adapted the bitrate and I have added a pause before context allocation and another one before the final return :

#include 
#include 
#include 
#include 

#include <libavcodec></libavcodec>avcodec.h>

#include <libavutil></libavutil>opt.h>
#include <libavutil></libavutil>imgutils.h>

static void encode(AVCodecContext *enc_ctx, AVFrame *frame, AVPacket *pkt,
 FILE *outfile)
{
 int ret;

 /* send the frame to the encoder */
 if (frame)
 printf("Send frame %3"PRId64"\n", frame->pts);

 ret = avcodec_send_frame(enc_ctx, frame);
 if (ret < 0) {
 fprintf(stderr, "Error sending a frame for encoding\n");
 exit(1);
 }

 while (ret >= 0) {
 ret = avcodec_receive_packet(enc_ctx, pkt);
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
 return;
 else if (ret < 0) {
 fprintf(stderr, "Error during encoding\n");
 exit(1);
 }

 printf("Write packet %3"PRId64" (size=%5d)\n", pkt->pts, pkt->size);
 fwrite(pkt->data, 1, pkt->size, outfile);
 av_packet_unref(pkt);
 }
}

int main(int argc, char **argv)
{
 const char *filename, *codec_name;
 const AVCodec *codec;
 AVCodecContext *c= NULL;
 int i, ret, x, y;
 FILE *f;
 AVFrame *frame;
 AVPacket *pkt;
 uint8_t endcode[] = { 0, 0, 1, 0xb7 };

 if (argc <= 2) {
 fprintf(stderr, "Usage: %s <output file="file"> <codec>\n", argv[0]);
 exit(0);
 }
 filename = argv[1];
 codec_name = argv[2];

 sleep(10);

 /* find the mpeg1video encoder */
 codec = avcodec_find_encoder_by_name(codec_name);
 if (!codec) {
 fprintf(stderr, "Codec '%s' not found\n", codec_name);
 exit(1);
 }

 c = avcodec_alloc_context3(codec);
 if (!c) {
 fprintf(stderr, "Could not allocate video codec context\n");
 exit(1);
 }

 pkt = av_packet_alloc();
 if (!pkt)
 exit(1);

 /* put sample parameters */
 c->bit_rate = 1000000;
 /* resolution must be a multiple of two */
 c->width = 3840;
 c->height = 2160;
 /* frames per second */
 c->time_base = (AVRational){1, 25};
 c->framerate = (AVRational){25, 1};

 /* emit one intra frame every ten frames
 * check frame pict_type before passing frame
 * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
 * then gop_size is ignored and the output of encoder
 * will always be I frame irrespective to gop_size
 */
 c->gop_size = 10;
 c->max_b_frames = 1;
 c->pix_fmt = AV_PIX_FMT_YUV420P;

 if (codec->id == AV_CODEC_ID_H264)
 av_opt_set(c->priv_data, "preset", "slow", 0);

 /* open it */
 ret = avcodec_open2(c, codec, NULL);
 if (ret < 0) {
 fprintf(stderr, "Could not open codec: %s\n", av_err2str(ret));
 exit(1);
 }

 f = fopen(filename, "wb");
 if (!f) {
 fprintf(stderr, "Could not open %s\n", filename);
 exit(1);
 }

 frame = av_frame_alloc();
 if (!frame) {
 fprintf(stderr, "Could not allocate video frame\n");
 exit(1);
 }
 frame->format = c->pix_fmt;
 frame->width = c->width;
 frame->height = c->height;

 ret = av_frame_get_buffer(frame, 0);
 if (ret < 0) {
 fprintf(stderr, "Could not allocate the video frame data\n");
 exit(1);
 }

 /* encode 1 second of video */
 for (i = 0; i < 25; i++) {
 fflush(stdout);

 /* make sure the frame data is writable */
 ret = av_frame_make_writable(frame);
 if (ret < 0)
 exit(1);

 /* prepare a dummy image */
 /* Y */
 for (y = 0; y < c->height; y++) {
 for (x = 0; x < c->width; x++) {
 frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
 }
 }

 /* Cb and Cr */
 for (y = 0; y < c->height/2; y++) {
 for (x = 0; x < c->width/2; x++) {
 frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
 frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
 }
 }

 frame->pts = i;

 /* encode the image */
 encode(c, frame, pkt, f);
 }

 /* flush the encoder */
 encode(c, NULL, pkt, f);

 /* add sequence end code to have a real MPEG file */
 if (codec->id == AV_CODEC_ID_MPEG1VIDEO || codec->id == AV_CODEC_ID_MPEG2VIDEO)
 fwrite(endcode, 1, sizeof(endcode), f);
 fclose(f);

 avcodec_free_context(&c);
 av_frame_free(&frame);
 av_packet_free(&pkt);

 sleep(10);

 return 0;
}
</codec></output>


I was expecting that the RAM memory usage at the first pause is the same as the second pause but there is about 55 Mo of difference. If I increase the number of encoded frames, this difference up to 390 Mo. I have tested this code under Linux Mint LMDE 4 (roughly same as Debian 10).


I guess this memory "leak" it isn't a real memory leak but that it's some internal values used by libx265 to be maybe reused for another encoding. But has there a way to free this memory through FFMPEG API ?