
Recherche avancée
Médias (1)
-
The pirate bay depuis la Belgique
1er avril 2013, par
Mis à jour : Avril 2013
Langue : français
Type : Image
Autres articles (69)
-
La file d’attente de SPIPmotion
28 novembre 2010, parUne file d’attente stockée dans la base de donnée
Lors de son installation, SPIPmotion crée une nouvelle table dans la base de donnée intitulée spip_spipmotion_attentes.
Cette nouvelle table est constituée des champs suivants : id_spipmotion_attente, l’identifiant numérique unique de la tâche à traiter ; id_document, l’identifiant numérique du document original à encoder ; id_objet l’identifiant unique de l’objet auquel le document encodé devra être attaché automatiquement ; objet, le type d’objet auquel (...) -
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir -
Les formats acceptés
28 janvier 2010, parLes commandes suivantes permettent d’avoir des informations sur les formats et codecs gérés par l’installation local de ffmpeg :
ffmpeg -codecs ffmpeg -formats
Les format videos acceptés en entrée
Cette liste est non exhaustive, elle met en exergue les principaux formats utilisés : h264 : H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 m4v : raw MPEG-4 video format flv : Flash Video (FLV) / Sorenson Spark / Sorenson H.263 Theora wmv :
Les formats vidéos de sortie possibles
Dans un premier temps on (...)
Sur d’autres sites (6609)
-
Use ffmpeg example remuxing.c to record rtmp live stream to mp4 file, it add an empty elst box, how to forbid it ?
1er octobre 2016, par 谢金运 -
record rtsp stream to file(muxing)
11 avril 2014, par user3521863AVFormatContext *g_oc = NULL;
AVStream *g_in_audio_st, *g_in_video_st;
AVStream *g_out_audio_st, *g_out_video_st;
int audio_pts = 0, video_pts = 0, audio_dts = 0, video_dts = 0;
int last_video_pts = 0;
AVPacket outpkt, *av_pkt;
// initialize video codec
static void init_video_codec(AVFormatContext *context) {
LOGI(1, "enter init_video_codec");
AVFormatContext *in_format_ctx = NULL;
AVCodecContext *avcodec_ctx = NULL;
int fps = 0;
if(context->streams[1]->r_frame_rate.num != AV_NOPTS_VALUE &&
context->streams[1]->r_frame_rate.den != 0)
fps = context->streams[1]->r_frame_rate.num / context->streams[1]->r_frame_rate.den;
else
fps = 25;
g_out_video_st = avformat_new_stream(g_oc, context->streams[1]->codec->codec);
LOGI(1, "video avformat_new_stream");
if( g_out_video_st == NULL ) {
LOGE(1, "Fail to Allocate Output Video Stream");
return ;
}
else {
LOGI(1, "Allocated Video Stream");
if( avcodec_copy_context(g_out_video_st->codec, context->streams[1]->codec) != 0 ) {
LOGE(1, "Failed to video Copy Context");
return ;
}
else {
LOGI(1, "Success to video Copy Context");
// how to setting video stream parameter?
g_out_video_st->sample_aspect_ratio.den = g_in_video_st->codec->sample_aspect_ratio.den;
g_out_video_st->sample_aspect_ratio.num = g_in_video_st->codec->sample_aspect_ratio.num;
g_out_video_st->codec->codec_id = g_in_video_st->codec->codec->id;
g_out_video_st->codec->time_base.num = 1;
g_out_video_st->codec->time_base.den = fps * (g_in_video_st->codec->ticks_per_frame);
g_out_video_st->time_base.num = 1;
g_out_video_st->time_base.den = 1000;
g_out_video_st->r_frame_rate.num = fps;
g_out_video_st->r_frame_rate.den = 1;
g_out_video_st->avg_frame_rate.den = 1;
g_out_video_st->avg_frame_rate.num = fps;
g_out_video_st->codec->width = g_frame_width;
g_out_video_st->codec->height = g_frame_height;
g_out_video_st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
}
LOGI(1, "end video init");
}
// initialize audio codec
static void init_audio_codec(AVFormatContext *context) {
LOGI(1, "enter init_audio_codec");
AVFormatContext *in_format_ctx = NULL;
AVCodecContext *avcodec_ctx = NULL;
g_out_audio_st = avformat_new_stream(g_oc, context->streams[0]->codec->codec);
LOGI(1, "audio avformat_new_stream");
if( avcodec_copy_context(g_out_audio_st->codec, context->streams[0]->codec) != 0 ) {
LOGE(1, "Failed to Copy audio Context");
return ;
}
else {
LOGI(1, "Success to Copy audio Context");
// how to setting video stream parameter?
g_out_audio_st->codec->codec_id = g_in_audio_st->codec->codec_id;
g_out_audio_st->codec->codec_tag = 0;
g_out_audio_st->pts = g_in_audio_st->pts;
g_out_audio_st->time_base.num = g_in_audio_st->time_base.num;
g_out_audio_st->time_base.den = g_in_audio_st->time_base.den;
g_out_audio_st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
LOGI(1, "end init audio");
}
// write video stream
static void write_video_stream(AVPacket *pkt) {
av_pkt = NULL;
av_pkt = pkt;
if( pkt == NULL || sizeof(*pkt) == 0 )
return;
av_rescale_q(av_pkt->pts, g_in_video_st->time_base, g_in_video_st->codec->time_base);
av_rescale_q(av_pkt->dts, g_in_video_st->time_base, g_in_video_st->codec->time_base);
av_init_packet(&outpkt);
if( av_pkt->pts != AV_NOPTS_VALUE ) {
if( last_video_pts == video_pts ) {
video_pts++;
last_video_pts = video_pts;
}
outpkt.pts = video_pts;
}
else {
outpkt.pts = AV_NOPTS_VALUE;
}
if( av_pkt->dts == AV_NOPTS_VALUE )
outpkt.dts = AV_NOPTS_VALUE;
else
outpkt.dts = video_pts;
outpkt.data = av_pkt->data;
outpkt.size = av_pkt->size;
outpkt.stream_index = av_pkt->stream_index;
outpkt.flags |= AV_PKT_FLAG_KEY;
last_video_pts = video_pts;
if(av_interleaved_write_frame(g_oc, &outpkt) < 0) {
// if(av_write_frame(g_oc, &outpkt) < 0) {
LOGE(1, "Failed Video Write");
}
else {
g_out_video_st->codec->frame_number++;
}
if( !&outpkt || sizeof(outpkt) == 0 )
return;
if( !av_pkt || sizeof(*av_pkt) == 0 )
return;
av_free_packet(&outpkt);
}
// write audio stream
static void write_audio_stream(AVPacket *pkt) {
av_pkt = NULL;
av_pkt = pkt;
if( pkt == NULL || sizeof(*pkt) == 0 )
return;
av_rescale_q(av_pkt->pts, g_in_audio_st->time_base, g_in_audio_st->codec->time_base);
av_rescale_q(av_pkt->dts, g_in_audio_st->time_base, g_in_audio_st->codec->time_base);
av_init_packet(&outpkt);
if(av_pkt->pts != AV_NOPTS_VALUE)
outpkt.pts = audio_pts;
else
outpkt.pts = AV_NOPTS_VALUE;
if(av_pkt->dts == AV_NOPTS_VALUE)
outpkt.dts = AV_NOPTS_VALUE;
else {
outpkt.dts = audio_pts;
if( outpkt.pts >= outpkt.dts)
outpkt.dts = outpkt.pts;
if(outpkt.dts == audio_dts)
outpkt.dts++;
if(outpkt.pts < outpkt.dts) {
outpkt.pts = outpkt.dts;
audio_pts = outpkt.pts;
}
outpkt.data = av_pkt->data;
outpkt.size = av_pkt->size;
outpkt.stream_index = av_pkt->stream_index;
outpkt.flags |= AV_PKT_FLAG_KEY;
video_pts = audio_pts;
audio_pts++;
if( av_interleaved_write_frame(g_oc, &outpkt) < 0 ) {
// if( av_write_frame(g_oc, &outpkt) < 0 ) {
LOGE(1, "Failed Audio Write");
}
else {
g_out_audio_st->codec->frame_number++;
}
if( !&outpkt || sizeof(outpkt) == 0 )
return;
if( !av_pkt || sizeof(*av_pkt) == 0 )
return;
av_free_packet(&outpkt);
}
}here result : recorded file
here full source : player.cI want to record rtsp stream to file on playing
i try tested video and audio streams while changing the parameters
but this result file does not match sync between video and audio
i try search about ffmpeg but almost command run or video recording was only.
please advice me. -
better way to record desktop via ffmpeg
29 mars 2014, par Maged E Williamin my form there's a timer that capture the screen as jpeg every 85 ms, it captures until the user shutdown the windows or electricity cut off so the last frame(image) may get corrupt which is okay for me, now when the user log in the form run on the start up now on the on load event it ask if there images in temp folder if yes it open
ffmpeg
to convert all images into one video but that seems to take a lot of time i have +300000 image and that because it capture the screen for at least a 10 hour every day but that will take a lot of time to convert them, i wonder if there any other way to directly record at a video and when electricity cut off that video file stay good with no corruption and with that i mean :
i can't scroll or navigate
here is my ffmpeg command :
-f image2 -framerate 10 -i C:\\Temp\\%06d.jpeg -c:v libx264 -r 10 -crf 37 -pix_fmt yuv420p C:Video\\" + s + ".mp4