
Recherche avancée
Médias (1)
-
The Great Big Beautiful Tomorrow
28 octobre 2011, par
Mis à jour : Octobre 2011
Langue : English
Type : Texte
Autres articles (60)
-
Librairies et logiciels spécifiques aux médias
10 décembre 2010, parPour un fonctionnement correct et optimal, plusieurs choses sont à prendre en considération.
Il est important, après avoir installé apache2, mysql et php5, d’installer d’autres logiciels nécessaires dont les installations sont décrites dans les liens afférants. Un ensemble de librairies multimedias (x264, libtheora, libvpx) utilisées pour l’encodage et le décodage des vidéos et sons afin de supporter le plus grand nombre de fichiers possibles. Cf. : ce tutoriel ; FFMpeg avec le maximum de décodeurs et (...) -
Installation en mode ferme
4 février 2011, parLe mode ferme permet d’héberger plusieurs sites de type MediaSPIP en n’installant qu’une seule fois son noyau fonctionnel.
C’est la méthode que nous utilisons sur cette même plateforme.
L’utilisation en mode ferme nécessite de connaïtre un peu le mécanisme de SPIP contrairement à la version standalone qui ne nécessite pas réellement de connaissances spécifique puisque l’espace privé habituel de SPIP n’est plus utilisé.
Dans un premier temps, vous devez avoir installé les mêmes fichiers que l’installation (...) -
Installation en mode standalone
4 février 2011, parL’installation de la distribution MediaSPIP se fait en plusieurs étapes : la récupération des fichiers nécessaires. À ce moment là deux méthodes sont possibles : en installant l’archive ZIP contenant l’ensemble de la distribution ; via SVN en récupérant les sources de chaque modules séparément ; la préconfiguration ; l’installation définitive ;
[mediaspip_zip]Installation de l’archive ZIP de MediaSPIP
Ce mode d’installation est la méthode la plus simple afin d’installer l’ensemble de la distribution (...)
Sur d’autres sites (5765)
-
Publish RTMP stream to Red5 Server form iOS camera
7 septembre 2015, par Mohammad AsifPlease look at following code, I have transformed
CMSampleBufferRef
intoAV_CODEC_ID_H264
but I don’t know how to transmit it to Red5 server.Thanks,
- (void) captureOutput:(AVCaptureOutput *)captureOutput
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
fromConnection:(AVCaptureConnection *)connection {
//NSLog(@"This is working ....");
// [connection setVideoOrientation: [self deviceOrientation] ];
if( !CMSampleBufferDataIsReady(sampleBuffer) )
{
NSLog( @"sample buffer is not ready. Skipping sample" );
return;
} else {
if (captureOutput == videoOutput) {
CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CVPixelBufferLockBaseAddress(pixelBuffer, 0);
// access the data
float width = CVPixelBufferGetWidth(pixelBuffer);
float height = CVPixelBufferGetHeight(pixelBuffer);
//float bytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 0);
unsigned char *rawPixelBase = (unsigned char *)CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0);
// Convert the raw pixel base to h.264 format
if (codec == nil) {
codec = 0;
context = 0;
frame = 0;
fmt = avformat_alloc_context();
//avformat_write_header(fmt, NULL);
codec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (codec == 0) {
NSLog(@"Codec not found!!");
return;
}
context = avcodec_alloc_context3(codec);
if (!context) {
NSLog(@"Context no bueno.");
return;
}
// Bit rate
context->bit_rate = 400000; // HARD CODE
context->bit_rate_tolerance = 10;
// Resolution
// Frames Per Second
context->time_base = (AVRational) {1,25};
context->gop_size = 1;
//context->max_b_frames = 1;
context->width = width;
context->height = height;
context->pix_fmt = PIX_FMT_YUV420P;
// Open the codec
if (avcodec_open2(context, codec, 0) < 0) {
NSLog(@"Unable to open codec");
return;
}
// Create the frame
frame = av_frame_alloc();
if (!frame) {
NSLog(@"Unable to alloc frame");
return;
}
}
context->width = width;
context->height = height;
frame->format = context->pix_fmt;
frame->width = context->width;
frame->height = context->height;
//int nbytes = avpicture_get_size(context->pix_fmt, context->width, context->height);
//uint8_t* outbuffer = (uint8_t*)av_malloc(nbytes);
// AVFrame *pFrameDecoded = avcodec_alloc_frame();
// int num_bytes2 = avpicture_get_size(context->pix_fmt, frame->width, frame->height);
// uint8_t* frame2_buffer2 = (uint8_t *)av_malloc(num_bytes2 * sizeof(uint8_t));
// avpicture_fill((AVPicture*)pFrameDecoded, frame2_buffer2, PIX_FMT_YUVJ422P, 320, 240);
frame->pts = (1.0 / 30) * 60 * count;
avpicture_fill((AVPicture *) frame, rawPixelBase, context->pix_fmt, frame->width, frame->height);
int got_output = 0;
av_init_packet(&packet);
//avcodec_encode_video2(context, &packet, frame, &got_output);
do {
avcodec_encode_video2(context, &packet, frame, &got_output);
//avcodec_decode_video2(context, &packet, NULL, &got_output);
//*... handle received packet*/
if (isFirstPacket) {
[rtmp sendCreateStreamPacket];
isFirstPacket = false;
//av_dump_format(fmt, 0, [kRtmpEP UTF8String], 1);
avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv", [kRtmpEP UTF8String]); //RTMP
}
packet.stream_index = ofmt_ctx->nb_streams;
av_interleaved_write_frame(ofmt_ctx, &packet);
count ++;
//[rtmp write:[NSData dataWithBytes:packet.data length:packet.size]];
} while(got_output);
// Unlock the pixel data
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
//[rtmp write:[NSData dataWithBytes:packet.data length:packet.size]];
} else {
}
} } -
H264 decoding using ffmpeg
14 juillet 2016, par KindermannI’m trying to decode a video stream with ffmpeg library, that’s how I do it basically :
void video_decode(const char *filename)
{
AVCodec *codec;
AVCodecContext *c= NULL;
int frame_count=0;
FILE *f;
AVFrame *frame;
uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
AVPacket avpkt;
av_init_packet(&avpkt);
memset(inbuf + INBUF_SIZE, 0, AV_INPUT_BUFFER_PADDING_SIZE);
printf("Decoding video file...\n");
/* find the h264 video decoder */
codec = avcodec_find_decoder(AV_CODEC_ID_H264);
c = avcodec_alloc_context3(codec);
c->bit_rate = 400000;
c->width = 1920;
c->height = 1080;
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
frame = av_frame_alloc();
for (;;) {
avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);
if (avpkt.size == 0)
break;
avpkt.data = inbuf;
while(avpkt.size > 0){
int len, got_frame;
len = avcodec_decode_video2(c, frame, &got_frame, &avpkt);
if (len < 0) {
fprintf(stderr, "Errr while decding frame %d\n", frame_count);
exit (1);
}
if (got_frame) {
//Print out frame information..
}
if (avpkt.data) {
avpkt.size -= len;
avpkt.data += len;
}
}
}
}But I got the following outputs :
Decoding video file...
[h264 @ 0x303c040] decode_slice_header error
[h264 @ 0x303c040] decode_slice_header error
[h264 @ 0x303c040] decode_slice_header error
[h264 @ 0x303c040] no frame!
Errr while decding frame 0Obviously the initiation of codec was incomplete. Do you have experience with h264 api ? Any help would be appreciated.
-
Access Violation at avcodec_encode_video2()
23 mars 2016, par bot1131357I am trying to understand the FFmpeg API by following online examples available but it seems that the FFmpeg API has changed over time, making most of the examples obsolete ; I hope some of you can help me make more sense of the FFmpeg API examples.
I am currently trying to understand the encoding-example from FFmpeg, but I am getting an Access Violation error at this line :
out_size = avcodec_encode_video2(codecCtx, &avpkt, picture, &got_packet);
where I get "Unhandled exception at 0x77c29e42 in test01_encode.exe : 0xC0000005 : Access violation reading location 0xccccccc8." from Visual Studio.
I understand that avcodec_encode_video() is deprecated in favour of avcodec_encode_video2(), which uses AVPacket. I’ve allocated a buffer to data member of AVPacket and set its size, but still the same. What did I miss ?
The library that I’m using is ffmpeg-20160219-git-98a0053-win32-dev. I would really really appreciate if you could help me out of this confusion.
(Side : What does it mean by "get delayed frames" and why are we encoding by specifying AVFrame* parameter as NULL ?)
/*
* Video encoding example
*/
char filename[] = "test.mpg";
int main(int argc, char** argv)
{
AVCodec *codec;
AVCodecContext *codecCtx= NULL;
int i, out_size, size, x, y, outbuf_size;
FILE *f;
AVFrame *picture;
uint8_t *outbuf, *picture_buf;
printf("Video encoding\n");
// Register all formats and codecs
av_register_all();
/* find the mpeg1 video encoder */
codec = avcodec_find_encoder(AV_CODEC_ID_MPEG1VIDEO);
if (!codec) {
fprintf(stderr, "codec not found\n");
exit(1);
}
codecCtx= avcodec_alloc_context3(codec);
picture= av_frame_alloc();
/* put sample parameters */
codecCtx->bit_rate = 400000;
/* resolution must be a multiple of two */
codecCtx->width = 352;
codecCtx->height = 288;
/* frames per second */
//codecCtx->time_base= (AVRational){1,25};
codecCtx->time_base.num = 1;
codecCtx->time_base.den = 25;
codecCtx->gop_size = 10; /* emit one intra frame every ten frames */
codecCtx->max_b_frames=1;
codecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
/* open it */
if (avcodec_open2(codecCtx, codec, NULL) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}
fopen_s(&f,filename, "wb");
if (!f) {
fprintf(stderr, "could not open %s\n", filename);
exit(1);
}
/* alloc image and output buffer */
outbuf_size = 100000;
outbuf = (uint8_t*) malloc(outbuf_size);
size = codecCtx->width * codecCtx->height;
picture_buf = (uint8_t*) malloc((size * 3) / 2); /* size for YUV 420 */
picture->data[0] = picture_buf;
picture->data[1] = picture->data[0] + size;
picture->data[2] = picture->data[1] + size / 4;
picture->linesize[0] = codecCtx->width;
picture->linesize[1] = codecCtx->width / 2;
picture->linesize[2] = codecCtx->width / 2;
picture->width = codecCtx->width;
picture->height = codecCtx->height;
picture->format = codecCtx->pix_fmt;
AVPacket avpkt;
int got_packet;
avpkt.size=av_image_get_buffer_size(codecCtx->pix_fmt, codecCtx->width,
codecCtx->height,1);
avpkt.data = (uint8_t *)av_malloc(avpkt.size*sizeof(uint8_t));
/* encode 1 second of video */
for(i=0;i<25;i++) {
fflush(stdout);
/* prepare a dummy image */
/* Y */
for(y=0;yheight;y++) {
for(x=0;xwidth;x++) {
picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3;
}
}
/* Cb and Cr */
for(y=0;yheight/2;y++) {
for(x=0;xwidth/2;x++) {
picture->data[1][y * picture->linesize[1] + x] = 128 + y + i * 2;
picture->data[2][y * picture->linesize[2] + x] = 64 + x + i * 5;
}
}
/* encode the image */
//out_size = avcodec_encode_video(codecCtx, outbuf, outbuf_size, picture);
// <access violation="violation">
out_size = avcodec_encode_video2(codecCtx, &avpkt, picture, &got_packet);
printf("encoding frame %3d (size=%5d)\n", i, out_size);
//fwrite(outbuf, 1, out_size, f);
fwrite(avpkt.data, 1, avpkt.size, f);
}
/* get the delayed frames */
for(; out_size; i++) {
fflush(stdout);
//out_size = avcodec_encode_video(codecCtx, outbuf, outbuf_size, NULL);
out_size = avcodec_encode_video2(codecCtx, &avpkt, NULL, &got_packet);
printf("write frame %3d (size=%5d)\n", i, out_size);
//fwrite(outbuf, 1, out_size, f);
fwrite(avpkt.data, 1, avpkt.size, f);
}
/* add sequence end code to have a real mpeg file */
outbuf[0] = 0x00;
outbuf[1] = 0x00;
outbuf[2] = 0x01;
outbuf[3] = 0xb7;
fwrite(outbuf, 1, 4, f);
fclose(f);
free(picture_buf);
free(outbuf);
avcodec_close(codecCtx);
av_free(codecCtx);
av_free(picture);
printf("\n");
}
</access>