
Recherche avancée
Médias (1)
-
1 000 000 (wav version)
26 septembre 2011, par
Mis à jour : Avril 2013
Langue : English
Type : Audio
Autres articles (80)
-
Amélioration de la version de base
13 septembre 2013Jolie sélection multiple
Le plugin Chosen permet d’améliorer l’ergonomie des champs de sélection multiple. Voir les deux images suivantes pour comparer.
Il suffit pour cela d’activer le plugin Chosen (Configuration générale du site > Gestion des plugins), puis de configurer le plugin (Les squelettes > Chosen) en activant l’utilisation de Chosen dans le site public et en spécifiant les éléments de formulaires à améliorer, par exemple select[multiple] pour les listes à sélection multiple (...) -
Emballe médias : à quoi cela sert ?
4 février 2011, parCe plugin vise à gérer des sites de mise en ligne de documents de tous types.
Il crée des "médias", à savoir : un "média" est un article au sens SPIP créé automatiquement lors du téléversement d’un document qu’il soit audio, vidéo, image ou textuel ; un seul document ne peut être lié à un article dit "média" ; -
Participer à sa traduction
10 avril 2011Vous pouvez nous aider à améliorer les locutions utilisées dans le logiciel ou à traduire celui-ci dans n’importe qu’elle nouvelle langue permettant sa diffusion à de nouvelles communautés linguistiques.
Pour ce faire, on utilise l’interface de traduction de SPIP où l’ensemble des modules de langue de MediaSPIP sont à disposition. ll vous suffit de vous inscrire sur la liste de discussion des traducteurs pour demander plus d’informations.
Actuellement MediaSPIP n’est disponible qu’en français et (...)
Sur d’autres sites (8041)
-
ffmpeg : remove unnecessary hack for decoders which refuse to drain
2 mars 2017, par wm4 -
Restreaming an rtsp stream through ffmpeg on iOS
6 avril 2017, par animaonlineI have an iOS application that displays an rtsp stream from an IP camera on the local network, I would like to restream it to an external server in real time (Wowza to be specific) the server will take care of converting rtsp to HLS so that the users can view the live broadcast on their devices.
On a computer it would be pretty straight forward :
ffmpeg [input-options] -i [input-file] [output-options] [output-stream-URI]
But I need to do it programmatically on iOS, and I’m not really sure if it’s even possible. Anyone ?
-
C++ ffmpeg and SDL2 video rendering memory leak
10 avril 2017, par kj192I have made a small program what plays a video in SDL2.0 and FFmpeg.
The software does work and do it is purpose.
I have left the software running and I have faced a huge memory consumption and started to look online what can I do against it.
I have used the following tutorials :
http://www.developersite.org/906-59411-FFMPEG
http://ardrone-ailab-u-tokyo.blogspot.co.uk/2012/07/212-ardrone-20-video-decording-ffmpeg.htmlI wonder if someone can give advice what do I do wrong. I have tried valgrind but I can’t find any information. I have did try to comment out sections and what I have seen even if I’m not rendering to the display the memory usage is growing and after delete something still not been freed up :
if (av_read_frame(pFormatCtx, &packet) >= 0)
the whole source code is here :
main :#include
#include <ios>
#include <iostream>
#include <fstream>
#include
#include <sdl2></sdl2>SDL.h>
#include "video.h"
using namespace std;
void memory()
{
using std::ios_base;
using std::ifstream;
using std::string;
double vm_usage = 0.0;
double resident_set = 0.0;
// 'file' stat seems to give the most reliable results
//
ifstream stat_stream("/proc/self/stat",ios_base::in);
// dummy vars for leading entries in stat that we don't care about
//
string pid, comm, state, ppid, pgrp, session, tty_nr;
string tpgid, flags, minflt, cminflt, majflt, cmajflt;
string utime, stime, cutime, cstime, priority, nice;
string O, itrealvalue, starttime;
// the two fields we want
//
unsigned long vsize;
long rss;
stat_stream >> pid >> comm >> state >> ppid >> pgrp >> session >> tty_nr
>> tpgid >> flags >> minflt >> cminflt >> majflt >> cmajflt
>> utime >> stime >> cutime >> cstime >> priority >> nice
>> O >> itrealvalue >> starttime >> vsize >> rss; // don't care about the rest
stat_stream.close();
long page_size_kb = sysconf(_SC_PAGE_SIZE) / 1024; // in case x86-64 is configured to use 2MB pages
vm_usage = vsize / 1024.0;
resident_set = rss * page_size_kb;
std::cout<<"VM: " << vm_usage << " RE:"<< resident_set << std::endl;
}
int main()
{
//This example using 1280x800 video
av_register_all();
if( SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER ))
{
fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
exit(1);
}
SDL_Window* sdlWindow = SDL_CreateWindow("Video Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, 1280, 800, SDL_WINDOW_OPENGL);
if( !sdlWindow )
{
fprintf(stderr, "SDL: could not set video mode - exiting\n");
exit(1);
}
SDL_Renderer* sdlRenderer = SDL_CreateRenderer(sdlWindow, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC | SDL_RENDERER_TARGETTEXTURE);
SDL_Texture* sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_YV12, SDL_TEXTUREACCESS_STREAMING, 1280, 800);
if(!sdlTexture)
{
return -1;
}
SDL_SetTextureBlendMode(sdlTexture,SDL_BLENDMODE_BLEND );
//VIDEO RESOLUTION
SDL_Rect sdlRect;
sdlRect.x = 0;
sdlRect.y = 0;
sdlRect.w = 1280;
sdlRect.h = 800;
memory();
for(int i = 1; i < 6; i++)
{
memory();
video* vid = new video("vid.mp4");
while (!vid -> getFinished())
{
memory();
vid -> Update(sdlTexture);
SDL_RenderCopy(sdlRenderer,sdlTexture,&sdlRect,&sdlRect);
SDL_RenderPresent(sdlRenderer);
}
delete vid;
memory();
}
SDL_DestroyTexture(sdlTexture);
SDL_DestroyRenderer(sdlRenderer);
SDL_DestroyWindow(sdlWindow);
SDL_Quit();
return 0;
}
</fstream></iostream></ios>video.cpp
#include "video.h"
video::video(const std::string& name) : _finished(false)
{
av_register_all();
pFormatCtx = NULL;
pCodecCtxOrig = NULL;
pCodecCtx = NULL;
pCodec = NULL;
pFrame = NULL;
sws_ctx = NULL;
if (avformat_open_input(&pFormatCtx, name.c_str(), NULL, NULL) != 0)
{
_finished = true; // Couldn't open file
}
// Retrieve stream information
if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
{
_finished = true; // Couldn't find stream information
}
videoStream = -1;
for (i = 0; i < pFormatCtx->nb_streams; i++)
{
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
videoStream = i;
break;
}
}
if (videoStream == -1)
{
_finished = true; // Didn't find a video stream
}
// Get a pointer to the codec context for the video stream
pCodecCtxOrig = pFormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
pCodec = avcodec_find_decoder(pCodecCtxOrig->codec_id);
if (pCodec == NULL)
{
fprintf(stderr, "Unsupported codec!\n");
_finished = true; // Codec not found
}
pCodecCtx = avcodec_alloc_context3(pCodec);
if (avcodec_copy_context(pCodecCtx, pCodecCtxOrig) != 0)
{
fprintf(stderr, "Couldn't copy codec context");
_finished = true; // Error copying codec context
}
// Open codec
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
{
_finished = true; // Could not open codec
}
// Allocate video frame
pFrame = av_frame_alloc();
sws_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
AV_PIX_FMT_YUV420P,
SWS_BILINEAR,
NULL,
NULL,
NULL);
yPlaneSz = pCodecCtx->width * pCodecCtx->height;
uvPlaneSz = pCodecCtx->width * pCodecCtx->height / 4;
yPlane = (Uint8*)malloc(yPlaneSz);
uPlane = (Uint8*)malloc(uvPlaneSz);
vPlane = (Uint8*)malloc(uvPlaneSz);
if (!yPlane || !uPlane || !vPlane)
{
fprintf(stderr, "Could not allocate pixel buffers - exiting\n");
exit(1);
}
uvPitch = pCodecCtx->width / 2;
}
void video::Update(SDL_Texture* texture)
{
if (av_read_frame(pFormatCtx, &packet) >= 0)
{
// Is this a packet from the video stream?
if (packet.stream_index == videoStream)
{
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
// Did we get a video frame?
if (frameFinished)
{
AVPicture pict;
pict.data[0] = yPlane;
pict.data[1] = uPlane;
pict.data[2] = vPlane;
pict.linesize[0] = pCodecCtx->width;
pict.linesize[1] = uvPitch;
pict.linesize[2] = uvPitch;
// Convert the image into YUV format that SDL uses
sws_scale(sws_ctx, (uint8_t const * const *) pFrame->data,pFrame->linesize, 0, pCodecCtx->height, pict.data,pict.linesize);
SDL_UpdateYUVTexture(texture,NULL,yPlane,pCodecCtx->width,uPlane,uvPitch,vPlane,uvPitch);
}
}
// Free the packet that was allocated by av_read_frame
av_packet_unref(&packet);
av_freep(&packet);
}
else
{
av_packet_unref(&packet);
av_freep(&packet);
_finished = true;
}
}
bool video::getFinished()
{
return _finished;
}
video::~video()
{
av_packet_unref(&packet);
av_freep(&packet);
av_frame_free(&pFrame);
av_freep(&pFrame);
free(yPlane);
free(uPlane);
free(vPlane);
// Close the codec
avcodec_close(pCodecCtx);
avcodec_close(pCodecCtxOrig);
sws_freeContext(sws_ctx);
// Close the video file
for (int i = 0; i < pFormatCtx->nb_streams; i++)
{
AVStream *stream = pFormatCtx->streams[i];
avcodec_close(stream->codec);
}
avformat_close_input(&pFormatCtx);
/*av_dict_free(&optionsDict);
sws_freeContext(sws_ctx);
av_free_packet(&packet);
av_free(pFrameYUV);
av_free(buffer);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);*/
}video.h
#include <string>
#include <sdl2></sdl2>SDL.h>
#ifdef __cplusplus
extern "C" {
#endif
#include <libavcodec></libavcodec>avcodec.h>
#include <libavformat></libavformat>avformat.h>
#include <libswscale></libswscale>swscale.h>
#ifdef __cplusplus
}
#endif
class video
{
private:
bool _finished;
AVFormatContext *pFormatCtx;
int videoStream;
unsigned i;
AVCodecContext *pCodecCtxOrig;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame;
AVPacket packet;
int frameFinished;
struct SwsContext *sws_ctx;
Uint8 *yPlane, *uPlane, *vPlane;
size_t yPlaneSz, uvPlaneSz;
int uvPitch;
public:
video(const std::string& name);
~video();
void Update(SDL_Texture* texture);
bool getFinished();
};
</string>I’m looking forward to your answers