
Recherche avancée
Médias (1)
-
Rennes Emotion Map 2010-11
19 octobre 2011, par
Mis à jour : Juillet 2013
Langue : français
Type : Texte
Autres articles (107)
-
La gestion des forums
3 novembre 2011, parSi les forums sont activés sur le site, les administrateurs ont la possibilité de les gérer depuis l’interface d’administration ou depuis l’article même dans le bloc de modification de l’article qui se trouve dans la navigation de la page.
Accès à l’interface de modération des messages
Lorsqu’il est identifié sur le site, l’administrateur peut procéder de deux manières pour gérer les forums.
S’il souhaite modifier (modérer, déclarer comme SPAM un message) les forums d’un article particulier, il a à sa (...) -
Personnaliser en ajoutant son logo, sa bannière ou son image de fond
5 septembre 2013, parCertains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;
-
Le profil des utilisateurs
12 avril 2011, parChaque utilisateur dispose d’une page de profil lui permettant de modifier ses informations personnelle. Dans le menu de haut de page par défaut, un élément de menu est automatiquement créé à l’initialisation de MediaSPIP, visible uniquement si le visiteur est identifié sur le site.
L’utilisateur a accès à la modification de profil depuis sa page auteur, un lien dans la navigation "Modifier votre profil" est (...)
Sur d’autres sites (11089)
-
mp4 video file generated using ffmpeg, works on desktop but doesn't work on android emulator
8 mai 2014, par NikeshI am generating a video that converts images into .mp4 video file. Every thing works fine. File is also generated as mp4 but the file doesn’t run on android device, says cannot play video.
Please find the attached sample code which convert single jpeg file into video (.mp4)
Please help me to resolve this issue. Thanks.
JNIEXPORT void JNICALL Java_roman10_ffmpegtst_VideoBrowser_videoExample(JNIEnv *pEnv, jobject pObj, char* imagefile,char* videoFile)
{
avcodec_init();
av_register_all();
avcodec_register_all();
AVCodecContext *pOCtx= NULL;
AVCodec *pOCodex = NULL;
LOGE(10,"Start videoExample");
pOCodex = avcodec_find_encoder(AV_CODEC_ID_MPEG1VIDEO);
if (!pOCodex) {
LOGE(10,"Cannot find encoder %s", pOCodex);
exit(1);
}
pOCtx= avcodec_alloc_context3(pOCodex);
uint8_t *outbuf;
int i, out_size;
pOCtx->bit_rate = 400000;
pOCtx->width = 640;
pOCtx->height = 480;
AVRational rational = {1,25};
pOCtx->time_base= rational;
pOCtx->gop_size = 10; /* emit one intra frame every ten frames */
pOCtx->max_b_frames=1;
pOCtx->pix_fmt = AV_PIX_FMT_YUV420P;
LOGE(10,"Start avcodec_open2");
int ret = avcodec_open2(pOCtx,pOCodex,NULL);
if(ret < 0)
{
return;
}
LOGE(10,"End avcodec_open2");
AVFormatContext *pIFormatCtx = NULL;
ret = avformat_open_input(&pIFormatCtx, imagefile, NULL, NULL);
if(ret < 0)
{
//Cant't open jpg file
return;
}
av_dump_format(pIFormatCtx, 0, imagefile, 0);
AVCodecContext *pICodecCtx; //output codec context
pICodecCtx = pIFormatCtx->streams[0]->codec;
/*pICodecCtx->width = 640;
pICodecCtx->height = 480;
pICodecCtx->pix_fmt = PIX_FMT_YUV420P;*/
AVCodec *pICodec = avcodec_find_decoder(pICodecCtx->codec_id); //output codec
// Open codec
ret = avcodec_open2(pICodecCtx, pICodec,NULL);
if(ret < 0)
{
//Can't find the decoder
return;
}
AVFrame *pIFrame = avcodec_alloc_frame();
if (!pIFrame)
{
//Can't alloc the input frame
return ;
}
int bufSize = avpicture_get_size(AV_PIX_FMT_YUVJ420P, pICodecCtx->width, pICodecCtx->height);
uint8_t *buffer = (uint8_t *) av_malloc(bufSize * sizeof(uint8_t));
avpicture_fill((AVPicture *) pIFrame, buffer, AV_PIX_FMT_YUVJ420P, pICodecCtx->width, pICodecCtx->height);
FILE *outputFile;
outputFile = fopen(videoFile, "w+");
if (!outputFile) {
LOGE(10,"could not open ");
exit(1);
}
int outbuf_size = 100000;
outbuf = (uint8_t*)malloc(outbuf_size);
AVPacket packet;
int frameFinished;
int framesNumber = 0;
while (av_read_frame(pIFormatCtx, &packet) >= 0)
{
if(packet.stream_index != 0)
continue;
ret = avcodec_decode_video2(pICodecCtx, pIFrame, &frameFinished, &packet);
if (ret > 0)
{
pIFrame->quality = 4;
for(i=0;i<25;i++) {
fflush(stdout);
/* encode the image */
out_size = avcodec_encode_video(pOCtx, outbuf, outbuf_size, pIFrame);
fwrite(outbuf, 1, out_size, outputFile);
}
}
}
/* get the delayed frames */
for(; out_size; i++) {
fflush(stdout);
out_size = avcodec_encode_video(pOCtx, outbuf, outbuf_size, NULL);
fwrite(outbuf, 1, out_size, outputFile);
}
/* add sequence end code to have a real mpeg file */
outbuf[0] = 0x00;
outbuf[1] = 0x00;
outbuf[2] = 0x01;
outbuf[3] = 0xb7;
fwrite(outbuf, 1, 4, outputFile);
fclose(outputFile);
free(outbuf);
avcodec_close(pOCtx);
av_free(pOCtx);
av_free(pIFrame);
} -
Gstreamer pipeline to scale down video before streaming
20 novembre 2014, par r3dsm0k3Here is what Im trying to achieve.
Im streaming from a Logitech C920 camera on beaglebone black with gstreamer. I have to save a copy of the video saved locally while it is streaming. I have achieved that with tee.
Logitech camera gives h264 encoded video at a certain bitrate, mostly very high.Im streaming from a moving car on 3G, and the network is not good enough to send the stream to nginx-rtmp server Im using to re-distribute thus gives strong artifacts in the result.
Im able to alter the bitrate of captured video using uvch264.
But then, the locally saved video also would have lower bitrate.Is there anyway of capturing a higher bitrate 1080p video from the camera and sending a lower resolution, lower bitrate video the streaming server ?
Following is the pipeline I have currently.
gst-launch-1.0 -v -e uvch264src initial-bitrate=400000 average-bitrate=400000 iframe-period=3000 device=/dev/video0 name=src auto-start=true src.vidsrc ! queue ! video/x-h264,width=1920,height=1080,framerate=30/1 ! h264parse ! flvmux streamable=true name=flvmuxer ! queue ! tee name=t ! queue ! filesink location=/mnt/test.flv t. ! queue ! rtmpsink location=$SERVER/hls/$CAM1
I could also try sending the higher bitrate video to a
udpsink
instead ofrtmpsink
and with another gstreamer process parallely and takes the data using audpsink
and probably post process/ re-encode and send to rtmp server.Im also limited by the processing speed BeagleBone has to do for encoding the videos. Currently Im trying for 1 camera and in the finished project I would like to have 2 cameras connected. Upload speed Im getting for the network is under 1Mbps.
How do I solve this with less load on the BeagleBone ? Im very open to a new architecture as well.
-
A newbie Struggling with FFmpeg Video Encoding
19 avril 2014, par iJoseFrom last 1 week i have been struggling with FFmpeg video encoding.
I am capturing the video from the device camera using UIImagePickerController.
and the encoding it using the following function.After Encoding i am not able to save the video to my device camera roll.
i used UIVideoAtPathIsCompatibleWithSavedPhotosAlbum(filepath) it returns Zero GOD KNOWS WHY
+(void)videoEncoder:(NSString *)filename
{
avcodec_register_all();
int codec_id = AV_CODEC_ID_MPEG4;
AVCodec *codec;
AVCodecContext *c= NULL;
int i, ret, x, y, got_output;
FILE *f;
AVFrame *frame;
AVPacket pkt;
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
printf("Encode video file %s\n", [filename UTF8String]);
/* find the mpeg1 video encoder */
codec = avcodec_find_encoder(codec_id);
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
/* put sample parameters */
c->bit_rate = 400000;
/* resolution must be a multiple of two */
c->width = 352;
c->height = 288;
/* frames per second */
c->time_base= (AVRational){1,25};
c->gop_size = 10; /* emit one intra frame every ten frames */
c->max_b_frames=1;
c->pix_fmt = AV_PIX_FMT_YUV420P;
if(codec_id == AV_CODEC_ID_MPEG4)
av_opt_set(c->priv_data, "preset", "slow", 0);
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
f = fopen([filename UTF8String], "wb");
if (!f)
{
fprintf(stderr, "Could not open %s\n", [filename UTF8String]);
exit(1);
}
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
/* the image can be allocated by any means and av_image_alloc() is
* just the most convenient way if av_malloc() is to be used */
ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
c->pix_fmt, 32);
if (ret < 0) {
fprintf(stderr, "Could not allocate raw picture buffer\n");
exit(1);
}
/* encode 1 second of video */
for(i=0;i<25;i++) {
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
fflush(stdout);
/* prepare a dummy image */
/* Y */
for(y=0;yheight;y++) {
for(x=0;xwidth;x++) {
frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
}
}
/* Cb and Cr */
for(y=0;yheight/2;y++) {
for(x=0;xwidth/2;x++) {
frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
}
}
frame->pts = i;
/* encode the image */
ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
fwrite(pkt.data, 1, pkt.size, f);
av_free_packet(&pkt);
}
}
/* get the delayed frames */
for (got_output = 1; got_output; i++) {
fflush(stdout);
ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
fwrite(pkt.data, 1, pkt.size, f);
av_free_packet(&pkt);
}
}
/* add sequence end code to have a real mpeg file */
fwrite(endcode, 1, sizeof(endcode), f);
fclose(f);
avcodec_close(c);
av_free(c);
av_freep(&frame->data[0]);
av_frame_free(&frame);
printf("\n");
}