
Recherche avancée
Médias (91)
-
Les Miserables
9 décembre 2019, par
Mis à jour : Décembre 2019
Langue : français
Type : Textuel
-
VideoHandle
8 novembre 2019, par
Mis à jour : Novembre 2019
Langue : français
Type : Video
-
Somos millones 1
21 juillet 2014, par
Mis à jour : Juin 2015
Langue : français
Type : Video
-
Un test - mauritanie
3 avril 2014, par
Mis à jour : Avril 2014
Langue : français
Type : Textuel
-
Pourquoi Obama lit il mes mails ?
4 février 2014, par
Mis à jour : Février 2014
Langue : français
-
IMG 0222
6 octobre 2013, par
Mis à jour : Octobre 2013
Langue : français
Type : Image
Autres articles (73)
-
Websites made with MediaSPIP
2 mai 2011, parThis page lists some websites based on MediaSPIP.
-
Personnaliser en ajoutant son logo, sa bannière ou son image de fond
5 septembre 2013, parCertains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;
-
Creating farms of unique websites
13 avril 2011, parMediaSPIP platforms can be installed as a farm, with a single "core" hosted on a dedicated server and used by multiple websites.
This allows (among other things) : implementation costs to be shared between several different projects / individuals rapid deployment of multiple unique sites creation of groups of like-minded sites, making it possible to browse media in a more controlled and selective environment than the major "open" (...)
Sur d’autres sites (11250)
-
picking a frame and an overlapping audio from a video
28 mai 2018, par RAVII know how to extract frames and audio from a video using ffmpeg.
But what I want to is, to pick a random frame in that video, and then picking a random 1 second audio clip that overlaps in time with the sampled frame.
As of now I’m doing this by preprocessing the videos for frames and audio, but that is consuming a lot of disk-space.
So is there a easier way of achieving this ?
-
Display ffmpeg frames on opgel texture
24 octobre 2014, par nakiI am using
Dranger
tutorial01 (ffmpeg
) to decode the video and getAVI
frames. I want to useOpenGL
to display the video.http://dranger.com/ffmpeg/tutorial01.html
The main function is as follows :
int main (int argc, char** argv) {
// opengl stuff
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA);
glutInitWindowSize(800, 600);
glutCreateWindow("Hello GL");
glutReshapeFunc(changeViewport);
glutDisplayFunc(render);
GLenum err = glewInit();
if(GLEW_OK !=err){
fprintf(stderr, "GLEW error");
return 1;
}
glClear(GL_COLOR_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
GLuint texture;
glGenTextures(1, &texture); //Make room for our texture
glBindTexture(GL_TEXTURE_2D, texture);
//ffmpeg stuff
AVFormatContext *pFormatCtx = NULL;
int i, videoStream;
AVCodecContext *pCodecCtx = NULL;
AVCodec *pCodec = NULL;
AVFrame *pFrame = NULL;
AVFrame *pFrameRGB = NULL;
AVPacket packet;
int frameFinished;
int numBytes;
uint8_t *buffer = NULL;
AVDictionary *optionsDict = NULL;
if(argc < 2) {
printf("Please provide a movie file\n");
return -1;
}
// Register all formats and codecs
av_register_all();
// Open video file
if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
return -1; // Couldn't open file
// Retrieve stream information
if(avformat_find_stream_info(pFormatCtx, NULL)<0)
return -1; // Couldn't find stream information
// Dump information about file onto standard error
av_dump_format(pFormatCtx, 0, argv[1], 0);
// Find the first video stream
videoStream=-1;
for(i=0; inb_streams; i++)
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
videoStream=i;
break;
}
if(videoStream==-1)
return -1; // Didn't find a video stream
// Get a pointer to the codec context for the video stream
pCodecCtx=pFormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL) {
fprintf(stderr, "Unsupported codec!\n");
return -1; // Codec not found
}
// Open codec
if(avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0)
return -1; // Could not open codec
// Allocate video frame
pFrame=av_frame_alloc();
// Allocate an AVFrame structure
pFrameRGB=av_frame_alloc();
if(pFrameRGB==NULL)
return -1;
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
pCodecCtx->height);
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
struct SwsContext *sws_ctx = sws_getContext(pCodecCtx->width,
pCodecCtx->height, pCodecCtx->pix_fmt, 800,
600, PIX_FMT_RGB24, SWS_BICUBIC, NULL,
NULL, NULL);
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
pCodecCtx->width, pCodecCtx->height);
// Read frames and save first five frames to disk
i=0;
while(av_read_frame(pFormatCtx, &packet)>=0) {
// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
&packet);
// Did we get a video frame?
if(frameFinished) {
// Convert the image from its native format to RGB
/* sws_scale
(
sws_ctx,
(uint8_t const * const *)pFrame->data,
pFrame->linesize,
0,
pCodecCtx->height,
pFrameRGB->data,
pFrameRGB->linesize
);
*/
sws_scale(sws_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
// additional opengl
glBindTexture(GL_TEXTURE_2D, texture);
//gluBuild2DMipmaps(GL_TEXTURE_2D, 3, pCodecCtx->width, pCodecCtx->height, GL_RGB, GL_UNSIGNED_INT, pFrameRGB->data[0]);
// glTexSubImage2D(GL_TEXTURE_2D, 0, 0,0, 840, 460, GL_RGB, GL_UNSIGNED_BYTE, pFrameRGB->data[0]);
glTexImage2D(GL_TEXTURE_2D, //Always GL_TEXTURE_2D
0, //0 for now
GL_RGB, //Format OpenGL uses for image
pCodecCtx->width, pCodecCtx->height, //Width and height
0, //The border of the image
GL_RGB, //GL_RGB, because pixels are stored in RGB format
GL_UNSIGNED_BYTE, //GL_UNSIGNED_BYTE, because pixels are stored
//as unsigned numbers
pFrameRGB->data[0]); //The actual pixel data
// additional opengl end
// Save the frame to disk
if(++i<=5)
SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height,
i);
}
}
glColor3f(1,1,1);
glBindTexture(GL_TEXTURE_2D, texture);
glBegin(GL_QUADS);
glTexCoord2f(0,1);
glVertex3f(0,0,0);
glTexCoord2f(1,1);
glVertex3f(pCodecCtx->width,0,0);
glTexCoord2f(1,0);
glVertex3f(pCodecCtx->width, pCodecCtx->height,0);
glTexCoord2f(0,0);
glVertex3f(0,pCodecCtx->height,0);
glEnd();
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
// Free the RGB image
av_free(buffer);
av_free(pFrameRGB);
// Free the YUV frame
av_free(pFrame);
// Close the codec
avcodec_close(pCodecCtx);
// Close the video file
avformat_close_input(&pFormatCtx);
return 0;
}Unfortunately i could not find my solution here
ffmpeg video to opengl texture
The program compiles but does not show any video on the texture. Just a
OpenGL
window is created. -
FFMPEG In Memory processing with output format supported over HTTP [on hold]
29 septembre 2018, par Holy_diverCurrent Implementation :
Java application receives images/videos over HTTP, these are saved to disk for manipulation to be done. Output is saved to disk then copied to HTTP output stream.
Please suggest ways in which cut the need to save images on disk.image2Pipe is one option but how to create it using byte[] present in java memory. Also for output mp4 format needs to saved first on disk, Is there any option by which i can start sending output as soon as first frame is generated.(matroska not supported in browsers)