
Recherche avancée
Autres articles (98)
-
MediaSPIP : Modification des droits de création d’objets et de publication définitive
11 novembre 2010, parPar défaut, MediaSPIP permet de créer 5 types d’objets.
Toujours par défaut les droits de création et de publication définitive de ces objets sont réservés aux administrateurs, mais ils sont bien entendu configurables par les webmestres.
Ces droits sont ainsi bloqués pour plusieurs raisons : parce que le fait d’autoriser à publier doit être la volonté du webmestre pas de l’ensemble de la plateforme et donc ne pas être un choix par défaut ; parce qu’avoir un compte peut servir à autre choses également, (...) -
Soumettre améliorations et plugins supplémentaires
10 avril 2011Si vous avez développé une nouvelle extension permettant d’ajouter une ou plusieurs fonctionnalités utiles à MediaSPIP, faites le nous savoir et son intégration dans la distribution officielle sera envisagée.
Vous pouvez utiliser la liste de discussion de développement afin de le faire savoir ou demander de l’aide quant à la réalisation de ce plugin. MediaSPIP étant basé sur SPIP, il est également possible d’utiliser le liste de discussion SPIP-zone de SPIP pour (...) -
MediaSPIP Player : problèmes potentiels
22 février 2011, parLe lecteur ne fonctionne pas sur Internet Explorer
Sur Internet Explorer (8 et 7 au moins), le plugin utilise le lecteur Flash flowplayer pour lire vidéos et son. Si le lecteur ne semble pas fonctionner, cela peut venir de la configuration du mod_deflate d’Apache.
Si dans la configuration de ce module Apache vous avez une ligne qui ressemble à la suivante, essayez de la supprimer ou de la commenter pour voir si le lecteur fonctionne correctement : /** * GeSHi (C) 2004 - 2007 Nigel McNie, (...)
Sur d’autres sites (9122)
-
fate : add tests for cdxl video
21 février 2012, par Paul B Maholfate : add tests for cdxl video
-
Decode audio and video and process both streams — ffmpeg, sdl, opencv
6 mai 2013, par EricMy goal is to proceed on audio and video of mpeg-2 file independently, and to keep synchronicity on both flows. Duration of video is about 1 or 2 minutes maximum.
-
First, following this post "opencv for reading videos (and do the process),ffmpeg for audio , and SDL used to play both" sounds perfect. I have done some modification on the code considering recent ffmpeg naming changes. Compilation with cmake on 64-bits machine is fine. I get an error "Unsupported codec [3]" when opening codec.
The code is following. -
Second, I looking for code dealing with synchronicity on both flows.
#include "opencv/highgui.h"
#include "opencv/cv.h"
#ifndef INT64_C
#define INT64_C(c) (c ## LL)
#define UINT64_C(c) (c ## ULL)
#endif
extern "C"{
#include <sdl></sdl>SDL.h>
#include <sdl></sdl>SDL_thread.h>
#include <libavcodec></libavcodec>avcodec.h>
#include <libavformat></libavformat>avformat.h>
}
#include <iostream>
#include
#include
using namespace cv;
#define SDL_AUDIO_BUFFER_SIZE 1024
typedef struct PacketQueue
{
AVPacketList *first_pkt, *last_pkt;
int nb_packets;
int size;
SDL_mutex *mutex;
SDL_cond *cond;
} PacketQueue;
PacketQueue audioq;
int audioStream = -1;
int videoStream = -1;
int quit = 0;
SDL_Surface* screen = NULL;
SDL_Surface* surface = NULL;
AVFormatContext* pFormatCtx = NULL;
AVCodecContext* aCodecCtx = NULL;
AVCodecContext* pCodecCtx = NULL;
void show_frame(IplImage* img){
if (!screen){
screen = SDL_SetVideoMode(img->width, img->height, 0, 0);
if (!screen){
fprintf(stderr, "SDL: could not set video mode - exiting\n");
exit(1);
}
}
// Assuming IplImage packed as BGR 24bits
SDL_Surface* surface = SDL_CreateRGBSurfaceFrom((void*)img->imageData,
img->width,
img->height,
img->depth * img->nChannels,
img->widthStep,
0xff0000, 0x00ff00, 0x0000ff, 0
);
SDL_BlitSurface(surface, 0, screen, 0);
SDL_Flip(screen);
}
void packet_queue_init(PacketQueue *q){
memset(q, 0, sizeof(PacketQueue));
q->mutex = SDL_CreateMutex();
q->cond = SDL_CreateCond();
}
int packet_queue_put(PacketQueue *q, AVPacket *pkt){
AVPacketList *pkt1;
if (av_dup_packet(pkt) < 0){
return -1;
}
pkt1 = (AVPacketList*) av_malloc(sizeof(AVPacketList));
//pkt1 = (AVPacketList*) malloc(sizeof(AVPacketList));
if (!pkt1) return -1;
pkt1->pkt = *pkt;
pkt1->next = NULL;
SDL_LockMutex(q->mutex);
if (!q->last_pkt)
q->first_pkt = pkt1;
else
q->last_pkt->next = pkt1;
q->last_pkt = pkt1;
q->nb_packets++;
q->size += pkt1->pkt.size;
SDL_CondSignal(q->cond);
SDL_UnlockMutex(q->mutex);
return 0;
}
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block){
AVPacketList *pkt1;
int ret;
SDL_LockMutex(q->mutex);
for (;;){
if( quit){
ret = -1;
break;
}
pkt1 = q->first_pkt;
if (pkt1){
q->first_pkt = pkt1->next;
if (!q->first_pkt)
q->last_pkt = NULL;
q->nb_packets--;
q->size -= pkt1->pkt.size;
*pkt = pkt1->pkt;
av_free(pkt1);
//free(pkt1);
ret = 1;
break;
}
else if (!block){
ret = 0;
break;
}
else{
SDL_CondWait(q->cond, q->mutex);
}
}
SDL_UnlockMutex(q->mutex);
return ret;
}
int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size){
static AVPacket pkt;
static uint8_t *audio_pkt_data = NULL;
static int audio_pkt_size = 0;
int len1, data_size;
for (;;){
while (audio_pkt_size > 0){
data_size = buf_size;
len1 = avcodec_decode_audio3(aCodecCtx, (int16_t*)audio_buf, &data_size, &pkt);
if (len1 < 0){
// if error, skip frame
audio_pkt_size = 0;
break;
}
audio_pkt_data += len1;
audio_pkt_size -= len1;
if (data_size <= 0){
// No data yet, get more frames
continue;
}
// We have data, return it and come back for more later
return data_size;
}
if (pkt.data)
av_free_packet(&pkt);
if (quit) return -1;
if (packet_queue_get(&audioq, &pkt, 1) < 0) return -1;
audio_pkt_data = pkt.data;
audio_pkt_size = pkt.size;
}
}
void audio_callback(void *userdata, Uint8 *stream, int len){
AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
int len1, audio_size;
static uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
static unsigned int audio_buf_size = 0;
static unsigned int audio_buf_index = 0;
while (len > 0){
if (audio_buf_index >= audio_buf_size){
// We have already sent all our data; get more
audio_size = audio_decode_frame(aCodecCtx, audio_buf, sizeof(audio_buf));
if(audio_size < 0){
// If error, output silence
audio_buf_size = 1024; // arbitrary?
memset(audio_buf, 0, audio_buf_size);
}
else{
audio_buf_size = audio_size;
}
audio_buf_index = 0;
}
len1 = audio_buf_size - audio_buf_index;
if (len1 > len)
len1 = len;
memcpy(stream, (uint8_t *)audio_buf + audio_buf_index, len1);
len -= len1;
stream += len1;
audio_buf_index += len1;
}
}
void setup_ffmpeg(char* filename)
{
if (avformat_open_input(&pFormatCtx, filename, NULL, NULL) != 0){
fprintf(stderr, "FFmpeg failed to open file %s!\n", filename);
exit(-1);
}
if (av_find_stream_info(pFormatCtx) < 0){
fprintf(stderr, "FFmpeg failed to retrieve stream info!\n");
exit(-1);
}
// Dump information about file onto standard error
av_dump_format(pFormatCtx, 0, filename, 0);
// Find the first video stream
int i = 0;
for (i; i < pFormatCtx->nb_streams; i++){
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && videoStream < 0){
videoStream = i;
}
if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && audioStream < 0){
audioStream = i;
}
}
if (videoStream == -1){
fprintf(stderr, "No video stream found in %s!\n", filename);
exit(-1);
}
if (audioStream == -1){
fprintf(stderr, "No audio stream found in %s!\n", filename);
exit(-1);
}
// Get a pointer to the codec context for the audio stream
aCodecCtx = pFormatCtx->streams[audioStream]->codec;
// Set audio settings from codec info
SDL_AudioSpec wanted_spec;
wanted_spec.freq = aCodecCtx->sample_rate;
wanted_spec.format = AUDIO_S16SYS;
wanted_spec.channels = aCodecCtx->channels;
wanted_spec.silence = 0;
wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
wanted_spec.callback = audio_callback;
wanted_spec.userdata = aCodecCtx;
SDL_AudioSpec spec;
if (SDL_OpenAudio(&wanted_spec, &spec) < 0){
fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
exit(-1);
}
AVCodec* aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
if (!aCodec){
fprintf(stderr, "Unsupported codec [1]!\n");
exit(-1);
}
avcodec_open(aCodecCtx, aCodec);
// audio_st = pFormatCtx->streams[index]
packet_queue_init(&audioq);
SDL_PauseAudio(0);
// Get a pointer to the codec context for the video stream
pCodecCtx = pFormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
AVCodec* pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (pCodec == NULL){
fprintf(stderr, "Unsupported codec [2]!\n");
exit(-1); // Codec not found
}
// Open codec
if (avcodec_open(pCodecCtx, pCodec) < 0){
fprintf(stderr, "Unsupported codec [3]!\n");
exit(-1); // Could not open codec
}
}
int main(int argc, char* argv[])
{
if (argc < 2){
std::cout << "Usage: " << argv[0] << " <video>" << std::endl;
return -1;
}
av_register_all();
// Init SDL
if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER))
{
fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
return -1;
}
// Init ffmpeg and setup some SDL stuff related to Audio
setup_ffmpeg(argv[1]);
VideoCapture cap(argv[1]);
if (!cap.isOpened()){
std::cout << "Failed to load file!" << std::endl;
return -1;
}
AVPacket packet;
while (av_read_frame(pFormatCtx, &packet) >= 0)
{
if (packet.stream_index == videoStream)
{
// Actually this is were SYNC between audio/video would happen.
// Right now I assume that every VIDEO packet contains an entire video frame, and that's not true. A video frame can be made by multiple packets!
// But for the time being, assume 1 video frame == 1 video packet,
// so instead of reading the frame through ffmpeg, I read it through OpenCV.
Mat frame;
cap >> frame; // get a new frame from camera
// do some processing on the frame, either as a Mat or as IplImage.
// For educational purposes, applying a lame grayscale conversion
IplImage ipl_frame = frame;
for (int i = 0; i < ipl_frame.width * ipl_frame.height * ipl_frame.nChannels; i += ipl_frame.nChannels)
{
ipl_frame.imageData[i] = (ipl_frame.imageData[i] + ipl_frame.imageData[i+1] + ipl_frame.imageData[i+2])/3; //B
ipl_frame.imageData[i+1] = (ipl_frame.imageData[i] + ipl_frame.imageData[i+1] + ipl_frame.imageData[i+2])/3; //G
ipl_frame.imageData[i+2] = (ipl_frame.imageData[i] + ipl_frame.imageData[i+1] + ipl_frame.imageData[i+2])/3; //R
}
// Display it on SDL window
show_frame(&ipl_frame);
av_free_packet(&packet);
}
else if (packet.stream_index == audioStream)
{
packet_queue_put(&audioq, &packet);
}
else
{
av_free_packet(&packet);
}
SDL_Event event;
SDL_PollEvent(&event);
switch (event.type)
{
case SDL_QUIT:
SDL_FreeSurface(surface);
SDL_Quit();
break;
default:
break;
}
}
// the camera will be deinitialized automatically in VideoCapture destructor
// Close the codec
avcodec_close(pCodecCtx);
// Close the video file
av_close_input_file(pFormatCtx);
return 0;
}
</video></iostream> -
-
iPhone camera shooting video using the AVCaptureSession and using ffmpeg CMSampleBufferRef a change in h.264 format is the issue. please advice
4 janvier 2012, par isaiahMy goal is h.264/AAC , mpeg2-ts streaming to server from iphone device.
Current my source is FFmpeg+libx264 compile success. I Know gnu License. I want the demo program.
I'm want to know that
1.CMSampleBufferRef to AVPicture data is success ?
avpicture_fill((AVPicture*)pFrame, rawPixelBase, PIX_FMT_RGB32, width, height);
pFrame linesize and data is not null but pst -9233123123 . outpic also .
Because of this I have to guess 'non-strictly-monotonic PTS' message2.This log is repeat.
encoding frame (size= 0)
encoding frame = "" , 'avcodec_encode_video' return 0 is success but always 0 .I don't know what to do...
2011-06-01 15:15:14.199 AVCam[1993:7303] pFrame = avcodec_alloc_frame();
2011-06-01 15:15:14.207 AVCam[1993:7303] avpicture_fill = 1228800
Video encoding
2011-0601 15:5:14.215 AVCam[1993:7303] codec = 5841844
[libx264 @ 0x1441e00] using cpu capabilities: ARMv6 NEON
[libx264 @ 0x1441e00] profile Constrained Baseline, level 2.0[libx264 @ 0x1441e00] non-strictly-monotonic PTS
encoding frame (size= 0)
encoding frame
[libx264 @ 0x1441e00] final ratefactor: 26.743.I have to guess 'non-strictly-monotonic PTS' message is the cause of all problems.
what is this 'non-strictly-monotonic PTS' .this is source
(void) captureOutput:(AVCaptureOutput *)captureOutput
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
fromConnection:(AVCaptureConnection *)connection
{
if( !CMSampleBufferDataIsReady(sampleBuffer) )
{
NSLog( @"sample buffer is not ready. Skipping sample" );
return;
}
if( [isRecordingNow isEqualToString:@"YES"] )
{
lastSampleTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
if( videoWriter.status != AVAssetWriterStatusWriting )
{
[videoWriter startWriting];
[videoWriter startSessionAtSourceTime:lastSampleTime];
}
if( captureOutput == videooutput )
{
[self newVideoSample:sampleBuffer];
CVImageBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CVPixelBufferLockBaseAddress(pixelBuffer, 0);
// access the data
int width = CVPixelBufferGetWidth(pixelBuffer);
int height = CVPixelBufferGetHeight(pixelBuffer);
unsigned char *rawPixelBase = (unsigned char *)CVPixelBufferGetBaseAddress(pixelBuffer);
AVFrame *pFrame;
pFrame = avcodec_alloc_frame();
pFrame->quality = 0;
NSLog(@"pFrame = avcodec_alloc_frame(); ");
// int bytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 0);
// int bytesSize = height * bytesPerRow ;
// unsigned char *pixel = (unsigned char*)malloc(bytesSize);
// unsigned char *rowBase = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0);
// memcpy (pixel, rowBase, bytesSize);
int avpicture_fillNum = avpicture_fill((AVPicture*)pFrame, rawPixelBase, PIX_FMT_RGB32, width, height);//PIX_FMT_RGB32//PIX_FMT_RGB8
//NSLog(@"rawPixelBase = %i , rawPixelBase -s = %s",rawPixelBase, rawPixelBase);
NSLog(@"avpicture_fill = %i",avpicture_fillNum);
//NSLog(@"width = %i,height = %i",width, height);
// Do something with the raw pixels here
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
//avcodec_init();
//avdevice_register_all();
av_register_all();
AVCodec *codec;
AVCodecContext *c= NULL;
int out_size, size, outbuf_size;
//FILE *f;
uint8_t *outbuf;
printf("Video encoding\n");
/* find the mpeg video encoder */
codec =avcodec_find_encoder(CODEC_ID_H264);//avcodec_find_encoder_by_name("libx264"); //avcodec_find_encoder(CODEC_ID_H264);//CODEC_ID_H264);
NSLog(@"codec = %i",codec);
if (!codec) {
fprintf(stderr, "codec not found\n");
exit(1);
}
c= avcodec_alloc_context();
/* put sample parameters */
c->bit_rate = 400000;
c->bit_rate_tolerance = 10;
c->me_method = 2;
/* resolution must be a multiple of two */
c->width = 352;//width;//352;
c->height = 288;//height;//288;
/* frames per second */
c->time_base= (AVRational){1,25};
c->gop_size = 10;//25; /* emit one intra frame every ten frames */
//c->max_b_frames=1;
c->pix_fmt = PIX_FMT_YUV420P;
c ->me_range = 16;
c ->max_qdiff = 4;
c ->qmin = 10;
c ->qmax = 51;
c ->qcompress = 0.6f;
/* open it */
if (avcodec_open(c, codec) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}
/* alloc image and output buffer */
outbuf_size = 100000;
outbuf = malloc(outbuf_size);
size = c->width * c->height;
AVFrame* outpic = avcodec_alloc_frame();
int nbytes = avpicture_get_size(PIX_FMT_YUV420P, c->width, c->height);
//create buffer for the output image
uint8_t* outbuffer = (uint8_t*)av_malloc(nbytes);
#pragma mark -
fflush(stdout);
<pre>// int numBytes = avpicture_get_size(PIX_FMT_YUV420P, c->width, c->height);
// uint8_t *buffer = (uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
//
// //UIImage *image = [UIImage imageNamed:[NSString stringWithFormat:@"10%d", i]];
// CGImageRef newCgImage = [self imageFromSampleBuffer:sampleBuffer];//[image CGImage];
//
// CGDataProviderRef dataProvider = CGImageGetDataProvider(newCgImage);
// CFDataRef bitmapData = CGDataProviderCopyData(dataProvider);
// buffer = (uint8_t *)CFDataGetBytePtr(bitmapData);
//
// avpicture_fill((AVPicture*)pFrame, buffer, PIX_FMT_RGB8, c->width, c->height);
avpicture_fill((AVPicture*)outpic, outbuffer, PIX_FMT_YUV420P, c->width, c->height);
struct SwsContext* fooContext = sws_getContext(c->width, c->height,
PIX_FMT_RGB8,
c->width, c->height,
PIX_FMT_YUV420P,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
//perform the conversion
sws_scale(fooContext, pFrame->data, pFrame->linesize, 0, c->height, outpic->data, outpic->linesize);
// Here is where I try to convert to YUV
/* encode the image */
out_size = avcodec_encode_video(c, outbuf, outbuf_size, outpic);
printf("encoding frame (size=%5d)\n", out_size);
printf("encoding frame %s\n", outbuf);
//fwrite(outbuf, 1, out_size, f);
// free(buffer);
// buffer = NULL;
/* add sequence end code to have a real mpeg file */
// outbuf[0] = 0x00;
// outbuf[1] = 0x00;
// outbuf[2] = 0x01;
// outbuf[3] = 0xb7;
//fwrite(outbuf, 1, 4, f);
//fclose(f);
free(outbuf);
avcodec_close(c);
av_free(c);
av_free(pFrame);
printf("\n");
</pre>