
Recherche avancée
Médias (91)
-
Chuck D with Fine Arts Militia - No Meaning No
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
Paul Westerberg - Looking Up in Heaven
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
Le Tigre - Fake French
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
Thievery Corporation - DC 3000
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
Dan the Automator - Relaxation Spa Treatment
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
Gilberto Gil - Oslodum
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
Autres articles (21)
-
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir -
Librairies et logiciels spécifiques aux médias
10 décembre 2010, parPour un fonctionnement correct et optimal, plusieurs choses sont à prendre en considération.
Il est important, après avoir installé apache2, mysql et php5, d’installer d’autres logiciels nécessaires dont les installations sont décrites dans les liens afférants. Un ensemble de librairies multimedias (x264, libtheora, libvpx) utilisées pour l’encodage et le décodage des vidéos et sons afin de supporter le plus grand nombre de fichiers possibles. Cf. : ce tutoriel ; FFMpeg avec le maximum de décodeurs et (...) -
MediaSPIP Init et Diogène : types de publications de MediaSPIP
11 novembre 2010, parÀ l’installation d’un site MediaSPIP, le plugin MediaSPIP Init réalise certaines opérations dont la principale consiste à créer quatre rubriques principales dans le site et de créer cinq templates de formulaire pour Diogène.
Ces quatre rubriques principales (aussi appelées secteurs) sont : Medias ; Sites ; Editos ; Actualités ;
Pour chacune de ces rubriques est créé un template de formulaire spécifique éponyme. Pour la rubrique "Medias" un second template "catégorie" est créé permettant d’ajouter (...)
Sur d’autres sites (5182)
-
Adding a text on top of video using libavfilter API
19 février 2013, par prajwalI am trying to add a visible text ‘sampletext’ on a video and store in another file called “myvideo.mp4”
Below is my code.(developed from *doc/examples/filtering_video.c *example)
include
#define _XOPEN_SOURCE 600 /* for usleep */
#include <libavcodec></libavcodec>avcodec.h>
#include <libavformat></libavformat>avformat.h>
#include <libavfilter></libavfilter>avfiltergraph.h>
#include <libavfilter></libavfilter>avcodec.h>
#include <libavfilter></libavfilter>buffersink.h>
#include <libavfilter></libavfilter>buffersrc.h>
const char *filter_descr = "drawtext=fontcolor=white:fontfile=/usr/share/fonts/liberation/LiberationMono-Bold.ttf :rate=30000/1001:text='sampletext'";
static AVFormatContext *fmt_ctx;
static AVCodecContext *dec_ctx;
AVFilterContext *buffersink_ctx;
AVFilterContext *buffersrc_ctx;
AVFilterGraph *filter_graph;
static int video_stream_index = -1;
static int64_t last_pts = AV_NOPTS_VALUE;
static int open_input_file(const char *filename)
{
int ret;
AVCodec *dec;
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
return ret;
}
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
return ret;
}
/* select the video stream */
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the input file\n");
return ret;
}
video_stream_index = ret;
dec_ctx = fmt_ctx->streams[video_stream_index]->codec;
/* init the video decoder */
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
return ret;
}
return 0;
}
static int init_filters(const char *filters_descr)
{
char args[512];
int ret;
AVFilter *buffersrc = avfilter_get_by_name("buffer");
AVFilter *buffersink = avfilter_get_by_name("ffbuffersink");
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
AVBufferSinkParams *buffersink_params;
filter_graph = avfilter_graph_alloc();
/* buffer video source: the decoded frames from the decoder will be inserted here. */
snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
dec_ctx->time_base.num, dec_ctx->time_base.den,
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
args, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
return ret;
}
/* buffer video sink: to terminate the filter chain. */
buffersink_params = av_buffersink_params_alloc();
buffersink_params->pixel_fmts = pix_fmts;
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
NULL, buffersink_params, filter_graph);
av_free(buffersink_params);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
return ret;
}
/* Endpoints for the filter graph. */
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = NULL;
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = NULL;
if ((ret = avfilter_graph_parse(filter_graph, filters_descr,
&inputs, &outputs, NULL)) < 0)
return ret;
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
return ret;
return 0;
}
static void save_picref(AVFilterBufferRef *picref, AVRational time_base)
{
int x, y;
uint8_t *p0, *p;
int64_t delay;
FILE *pFile;
char szFilename[32]="myvideo.mp4";
// Open file
pFile=fopen(szFilename, "wb");
if(pFile==NULL)
return;
if (picref->pts != AV_NOPTS_VALUE) {
if (last_pts != AV_NOPTS_VALUE) {
/* sleep roughly the right amount of time;
* usleep is in microseconds, just like AV_TIME_BASE. */
delay = av_rescale_q(picref->pts - last_pts,
time_base, AV_TIME_BASE_Q);
if (delay > 0 && delay < 1000000)
usleep(delay);
}
last_pts = picref->pts;
}
fprintf(pFile, "P6\n%d %d\n255\n", picref->video->w, picref->video->h);
// Write pixel data
for (y = 0; y < picref->video->h; y++) {
p = p0;
fwrite(picref->data[0], 1, picref->video->w, pFile);
p0 += picref->linesize[0];
}
// Close file
fclose(pFile);
}
int main(int argc, char **argv)
{
int ret;
AVPacket packet;
AVFrame frame;
int got_frame;
if (argc != 2) {
fprintf(stderr, "Usage: %s file\n", argv[0]);
exit(1);
}
avcodec_register_all();
av_register_all();
avfilter_register_all();
if ((ret = open_input_file(argv[1])) < 0)
goto end;
if ((ret = init_filters(filter_descr)) < 0)
goto end;
/* read all packets */
while (1) {
AVFilterBufferRef *picref;
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
break;
if (packet.stream_index == video_stream_index) {
avcodec_get_frame_defaults(&frame);
got_frame = 0;
ret = avcodec_decode_video2(dec_ctx, &frame, &got_frame, &packet);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error decoding video\n");
break;
}
if (got_frame) {
frame.pts = av_frame_get_best_effort_timestamp(&frame);
/* push the decoded frame into the filtergraph */
if (av_buffersrc_add_frame(buffersrc_ctx, &frame, 0) < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
break;
}
/* pull filtered pictures from the filtergraph */
while (1) {
ret = av_buffersink_get_buffer_ref(buffersink_ctx, &picref, 0);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
break;
if (ret < 0)
goto end;
if (picref) {
save_picref(picref, buffersink_ctx->inputs[0]->time_base);
avfilter_unref_bufferp(&picref);
}
}
}
}
av_free_packet(&packet);
}
end:
avfilter_graph_free(&filter_graph);
if (dec_ctx)
avcodec_close(dec_ctx);
avformat_close_input(&fmt_ctx);
if (ret < 0 && ret != AVERROR_EOF) {
char buf[1024];
av_strerror(ret, buf, sizeof(buf));
fprintf(stderr, "Error occurred: %s\n", buf);
exit(1);
}
exit(0);
}After compilation and execution, when I try to play the video using
ffplay myvideo.mp4 *
I am getting the following error.
myvideo.mp4 : Invalid data found when processing input*
i am unable to resolve the error,can anyone pls help me out in solving the error.
-
An compiling error I met when I use ffmpeg to develop
29 janvier 2018, par StanunsI use this command to compile the file :
g++ rec.cpp -o rec -I /usr/local/include -L /usr/local/lib -lavformat -lavcodec -lavutil -lavdevice -lswscale -lasound -ldl -pthread -lz -lbz2 -lswresample -llzma -lva -lX11
After excute it I got the error :
/usr/bin/ld: /usr/local/lib/libavdevice.a(xcbgrab.o): undefined reference to symbol 'xcb_setup_pixmap_formats_length'//usr/lib/x86_64-linux-gnu/libxcb.so.1: error adding symbols: DSO missing from command line
collect2: error: ld returned 1 exit statusHow could I cope with this issue ?
-
How to set pts and dts of AVPacket from RTP timestamps while muxing VP8 RTP stream to webm using ffmpeg libavformat ?
30 janvier 2018, par user2595786I am using ffmpeg libavformat library to write a video only webm file. I recieve VP8 encoded rtp stream on my server. I have successfully grouped the rtp byte stream (from rtp payload) into individual frames, and constructed a AVPacket. I am NOT re-encoding the payload to VP8 here as it is already vp8 encoded.
I am writing the AVPacket to the file using av_write_interleaved() method. Though I am getting a webm file as output, it is not playing at all. When I checked for the info on the file using mkv tool’s ’mkvinfo’ command, I found the following info :
+ EBML head
|+ EBML version: 1
|+ EBML read version: 1
|+ EBML maximum ID length: 4
|+ EBML maximum size length: 8
|+ Doc type: webm
|+ Doc type version: 2
|+ Doc type read version: 2
+ Segment, size 2142500
|+ Seek head (subentries will be skipped)
|+ EbmlVoid (size: 170)
|+ Segment information
| + Timestamp scale: 1000000
| + Multiplexing application: Lavf58.0.100
| + Writing application: Lavf58.0.100
| + Duration: 78918744.480s (21921:52:24.480)
|+ Segment tracks
| + A track
| + Track number: 1 (track ID for mkvmerge & mkvextract: 0)
| + Track UID: 1
| + Lacing flag: 0
| + Name: Video Track
| + Language: eng
| + Codec ID: V_VP8
| + Track type: video
| + Default duration: 1.000ms (1000.000 frames/fields per second for a
video track)
| + Video track
| + Pixel width: 640
| + Pixel height: 480
|+ Tags
| + Tag
| + Targets
| + Simple
| + Name: ENCODER
| + String: Lavf58.0.100
| + Tag
| + Targets
| + TrackUID: 1
| + Simple
| + Name: DURATION
| + String: 21921:52:24.4800000
|+ ClusterAs we can see, the duration of the stream is very disproportionately high. (My valid stream duration should be around 8-10 secs). And, the frame rate in the track info is also not what I am setting it to be. I am setting frame rate as 25 fps.
I am applying av_scale_q(rtpTimeStamp, codec_timebase, stream_timebase) and setting the rescaled rtpTimeStamp as pts and dts values. My guess is my way of setting pts and dts is wrong. Please help me how to set pts and dts values on the AVPacket, so as get a working webm file with proper meta info on it.
EDIT :
The following is the code I call to init the library :
#define STREAM_FRAME_RATE 25
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P
typedef struct OutputStream {
AVStream *st;
AVCodecContext *enc;
AVFrame *frame;
} OutputStream;
typedef struct WebMWriter {
OutputStream *audioStream, *videoStream;
AVFormatContext *ctx;
AVOutputFormat *outfmt;
AVCodec *audioCodec, *videoCodec;
} WebMWriter;
static OutputStream audioStream = { 0 }, videoStream = { 0 };
WebMWriter *init(char *filename)
{
av_register_all();
AVFormatContext *ctx = NULL;
AVCodec *audioCodec = NULL, *videoCodec = NULL;
const char *fmt_name = NULL;
const char *file_name = filename;
int alloc_status = avformat_alloc_output_context2(&ctx, NULL, fmt_name, file_name);
if(!ctx)
return NULL;
AVOutputFormat *fmt = (*ctx).oformat;
AVDictionary *video_opt = NULL;
av_dict_set(&video_opt, "language", "eng", 0);
av_dict_set(&video_opt, "title", "Video Track", 0);
if(fmt->video_codec != AV_CODEC_ID_NONE)
{
addStream(&videoStream, ctx, &videoCodec, AV_CODEC_ID_VP8, video_opt);
}
if(videoStream.st)
openVideo1(&videoStream, videoCodec, NULL);
av_dump_format(ctx, 0, file_name, 1);
int ret = -1;
/* open the output file, if needed */
if (!(fmt->flags & AVFMT_NOFILE)) {
ret = avio_open(&ctx->pb, file_name, AVIO_FLAG_WRITE);
if (ret < 0) {
printf("Could not open '%s': %s\n", file_name, av_err2str(ret));
return NULL;
}
}
/* Write the stream header, if any. */
AVDictionary *format_opt = NULL;
ret = avformat_write_header(ctx, &format_opt);
if (ret < 0) {
fprintf(stderr, "Error occurred when opening output file: %s\n",
av_err2str(ret));
return NULL;
}
WebMWriter *webmWriter = malloc(sizeof(struct WebMWriter));
webmWriter->ctx = ctx;
webmWriter->outfmt = fmt;
webmWriter->audioStream = &audioStream;
webmWriter->videoStream = &videoStream;
webmWriter->videoCodec = videoCodec;
return webmWriter;
}The following is the openVideo() method :
void openVideo1(OutputStream *out_st, AVCodec *codec, AVDictionary *opt_arg)
{
AVCodecContext *codec_ctx = out_st->enc;
int ret = -1;
AVDictionary *opt = NULL;
if(opt_arg != NULL)
{
av_dict_copy(&opt, opt_arg, 0);
ret = avcodec_open2(codec_ctx, codec, &opt);
}
else
{
ret = avcodec_open2(codec_ctx, codec, NULL);
}
/* copy the stream parameters to the muxer */
ret = avcodec_parameters_from_context(out_st->st->codecpar, codec_ctx);
if (ret < 0) {
printf("Could not copy the stream parameters\n");
exit(1);
}
}The following is the addStream() method :
void addStream(OutputStream *out_st, AVFormatContext *ctx, AVCodec **cdc, enum AVCodecID codecId, AVDictionary *opt_arg)
{
(*cdc) = avcodec_find_encoder(codecId);
if(!(*cdc)) {
exit(1);
}
/*as we are passing a NULL AVCodec cdc, So AVCodecContext codec_ctx will not be allocated, we have to do it explicitly */
AVStream *st = avformat_new_stream(ctx, *cdc);
if(!st) {
exit(1);
}
out_st->st = st;
st->id = ctx->nb_streams-1;
AVDictionary *opt = NULL;
av_dict_copy(&opt, opt_arg, 0);
st->metadata = opt;
AVCodecContext *codec_ctx = st->codec;
if (!codec_ctx) {
fprintf(stderr, "Could not alloc an encoding context\n");
exit(1);
}
out_st->enc = codec_ctx;
codec_ctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
switch ((*cdc)->type) {
case AVMEDIA_TYPE_AUDIO:
codec_ctx->codec_id = codecId;
codec_ctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
codec_ctx->bit_rate = 64000;
codec_ctx->sample_rate = 48000;
codec_ctx->channels = 2;//1;
codec_ctx->channel_layout = AV_CH_LAYOUT_STEREO;
codec_ctx->codec_type = AVMEDIA_TYPE_AUDIO;
codec_ctx->time_base = (AVRational){1,STREAM_FRAME_RATE};
break;
case AVMEDIA_TYPE_VIDEO:
codec_ctx->codec_id = codecId;
codec_ctx->bit_rate = 90000;
codec_ctx->width = 640;
codec_ctx->height = 480;
codec_ctx->time_base = (AVRational){1,STREAM_FRAME_RATE};
codec_ctx->gop_size = 12;
codec_ctx->pix_fmt = STREAM_PIX_FMT;
codec_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
break;
default:
break;
}
/* Some formats want stream headers to be separate. */
if (ctx->oformat->flags & AVFMT_GLOBALHEADER)
codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}The following is the code I call to write a frame of data to the file :
int writeVideoStream(AVFormatContext *ctx, AVStream *st, uint8_t *data, int size, long frameTimeStamp, int isKeyFrame, AVCodecContext *codec_ctx)
{
AVRational rat = st->time_base;
AVPacket pkt = {0};
av_init_packet(&pkt);
void *opaque = NULL;
int flags = AV_BUFFER_FLAG_READONLY;
AVBufferRef *bufferRef = av_buffer_create(data, size, NULL, opaque, flags);
pkt.buf = bufferRef;
pkt.data = data;
pkt.size = size;
pkt.stream_index = st->index;
pkt.pts = pkt.dts = frameTimeStamp;
pkt.pts = av_rescale_q(pkt.pts, codec_ctx->time_base, st->time_base);
pkt.dts = av_rescale_q(pkt.dts, codec_ctx->time_base, st->time_base);
if(isKeyFrame == 1)
pkt.flags |= AV_PKT_FLAG_KEY;
int ret = av_interleaved_write_frame(ctx, &pkt);
return ret;
}NOTE :
Here ’frameTimeStamp’ is the rtp timeStamp on the rtp packet of that frame.EDIT 2.0 :
My updated addStream() method with codecpars changes :
void addStream(OutputStream *out_st, AVFormatContext *ctx, AVCodec **cdc, enum AVCodecID codecId, AVDictionary *opt_arg)
{
(*cdc) = avcodec_find_encoder(codecId);
if(!(*cdc)) {
printf("@@@@@ couldnt find codec \n");
exit(1);
}
AVStream *st = avformat_new_stream(ctx, *cdc);
if(!st) {
printf("@@@@@ couldnt init stream\n");
exit(1);
}
out_st->st = st;
st->id = ctx->nb_streams-1;
AVCodecParameters *codecpars = st->codecpar;
codecpars->codec_id = codecId;
codecpars->codec_type = (*cdc)->type;
AVDictionary *opt = NULL;
av_dict_copy(&opt, opt_arg, 0);
st->metadata = opt;
//av_dict_free(&opt);
AVCodecContext *codec_ctx = st->codec;
if (!codec_ctx) {
fprintf(stderr, "Could not alloc an encoding context\n");
exit(1);
}
out_st->enc = codec_ctx;
//since opus is experimental codec
//codec_ctx->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
switch ((*cdc)->type) {
case AVMEDIA_TYPE_AUDIO:
codec_ctx->codec_id = codecId;
codec_ctx->sample_fmt = AV_SAMPLE_FMT_FLTP;//AV_SAMPLE_FMT_U8 or AV_SAMPLE_FMT_S16;
codec_ctx->bit_rate = 64000;
codec_ctx->sample_rate = 48000;
codec_ctx->channels = 2;//1;
codec_ctx->channel_layout = AV_CH_LAYOUT_STEREO; //AV_CH_LAYOUT_MONO;
codec_ctx->codec_type = AVMEDIA_TYPE_AUDIO;
codec_ctx->time_base = (AVRational){1,STREAM_FRAME_RATE};
codecpars->format = codec_ctx->sample_fmt;
codecpars->channels = codec_ctx->channels;
codecpars->sample_rate = codec_ctx->sample_rate;
break;
case AVMEDIA_TYPE_VIDEO:
codec_ctx->codec_id = codecId;
codec_ctx->bit_rate = 90000;
codec_ctx->width = 640;
codec_ctx->height = 480;
codec_ctx->time_base = (AVRational){1,STREAM_FRAME_RATE};
codec_ctx->gop_size = 12;
codec_ctx->pix_fmt = STREAM_PIX_FMT;
//codec_ctx->max_b_frames = 1;
codec_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
codec_ctx->framerate = av_inv_q(codec_ctx->time_base);
st->avg_frame_rate = codec_ctx->framerate;//(AVRational){25000, 1000};
codecpars->format = codec_ctx->pix_fmt;
codecpars->width = codec_ctx->width;
codecpars->height = codec_ctx->height;
codecpars->sample_aspect_ratio = (AVRational){codec_ctx->width, codec_ctx->height};
break;
default:
break;
}
codecpars->bit_rate = codec_ctx->bit_rate;
int ret = avcodec_parameters_to_context(codec_ctx, codecpars);
if (ret < 0) {
printf("Could not copy the stream parameters\n");
exit(1);
}
/* Some formats want stream headers to be separate. */
if (ctx->oformat->flags & AVFMT_GLOBALHEADER)
codec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}