
Recherche avancée
Autres articles (103)
-
Multilang : améliorer l’interface pour les blocs multilingues
18 février 2011, parMultilang est un plugin supplémentaire qui n’est pas activé par défaut lors de l’initialisation de MediaSPIP.
Après son activation, une préconfiguration est mise en place automatiquement par MediaSPIP init permettant à la nouvelle fonctionnalité d’être automatiquement opérationnelle. Il n’est donc pas obligatoire de passer par une étape de configuration pour cela. -
Formulaire personnalisable
21 juin 2013, parCette page présente les champs disponibles dans le formulaire de publication d’un média et il indique les différents champs qu’on peut ajouter. Formulaire de création d’un Media
Dans le cas d’un document de type média, les champs proposés par défaut sont : Texte Activer/Désactiver le forum ( on peut désactiver l’invite au commentaire pour chaque article ) Licence Ajout/suppression d’auteurs Tags
On peut modifier ce formulaire dans la partie :
Administration > Configuration des masques de formulaire. (...) -
ANNEXE : Les plugins utilisés spécifiquement pour la ferme
5 mars 2010, parLe site central/maître de la ferme a besoin d’utiliser plusieurs plugins supplémentaires vis à vis des canaux pour son bon fonctionnement. le plugin Gestion de la mutualisation ; le plugin inscription3 pour gérer les inscriptions et les demandes de création d’instance de mutualisation dès l’inscription des utilisateurs ; le plugin verifier qui fournit une API de vérification des champs (utilisé par inscription3) ; le plugin champs extras v2 nécessité par inscription3 (...)
Sur d’autres sites (11620)
-
What am I doing wrong with my audio writing in ffmpeg ? [on hold]
12 septembre 2014, par Michael NguyenI’m trying to splice multiple video sources into one. I’m having trouble understanding the audio portion of it. Rather I should say, the audio part of my code doesn’t seem to work. I don’t understand it. Could somebody help me understand what I am doing wrong ? The method doing all the work is called renderMovieRequest
Thanks in advance.
My entire code can be found here : http://pastebin.com/rAZkU3XZ
Any help would be appreciated.
below is a snippet of the code (it’s too long otherwise)int64_t timeBase;
bool seek(AVFormatContext *pFormatCtx, int frameIndex){
if(!pFormatCtx)
return false;
int64_t seekTarget = int64_t(frameIndex) * timeBase;
if(av_seek_frame(pFormatCtx, -1, seekTarget, AVSEEK_FLAG_ANY) < 0) {
ELOG("av_seek_frame failed.");
return false;
}
return true;
}
typedef struct OutputStream {
AVStream *st;
/* pts of the next frame that will be generated */
int64_t next_pts;
int samples_count;
AVFrame *frame;
AVFrame *tmp_frame;
float t, tincr, tincr2;
struct SwsContext *sws_ctx;
struct SwrContext *swr_ctx;
} OutputStream;
static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
{
/* rescale output packet timestamp values from codec to stream timebase */
av_packet_rescale_ts(pkt, *time_base, st->time_base);
pkt->stream_index = st->index;
/* Write the compressed frame to the media file. */
log_packet(fmt_ctx, pkt);
return av_interleaved_write_frame(fmt_ctx, pkt);
}
/* Add an output stream. */
static void add_stream(OutputStream *ost, AVFormatContext *oc,
AVCodec **codec,
enum AVCodecID codec_id) {
AVCodecContext *c;
int i;
/* find the encoder */
*codec = avcodec_find_encoder(codec_id);
if (!(*codec)) {
ELOG("Could not find encoder for '%s'\n", avcodec_get_name(codec_id));
return;
}
ost->st = avformat_new_stream(oc, *codec);
if (!ost->st) {
ELOG("Could not allocate stream\n");
return;
}
ost->st->id = oc->nb_streams-1;
c = ost->st->codec;
switch ((*codec)->type) {
case AVMEDIA_TYPE_AUDIO:
c->sample_fmt = (*codec)->sample_fmts ?
(*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
c->bit_rate = 64000;
c->sample_rate = 44100;
if ((*codec)->supported_samplerates) {
c->sample_rate = (*codec)->supported_samplerates[0];
for (i = 0; (*codec)->supported_samplerates[i]; i++) {
if ((*codec)->supported_samplerates[i] == 44100)
c->sample_rate = 44100;
}
}
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
c->channel_layout = AV_CH_LAYOUT_STEREO;
if ((*codec)->channel_layouts) {
c->channel_layout = (*codec)->channel_layouts[0];
for (i = 0; (*codec)->channel_layouts[i]; i++) {
if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
c->channel_layout = AV_CH_LAYOUT_STEREO;
}
}
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
ost->st->time_base = (AVRational){ 1, c->sample_rate };
break;
case AVMEDIA_TYPE_VIDEO:
c->codec_id = codec_id;
c->bit_rate = 400000;
/* Resolution must be a multiple of two. */
// c->width = 352;
// c->height = 288;
c->width = 1280;
c->height = 720;
/* timebase: This is the fundamental unit of time (in seconds) in terms
* of which frame timestamps are represented. For fixed-fps content,
* timebase should be 1/framerate and timestamp increments should be
* identical to 1. */
ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
c->time_base = ost->st->time_base;
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
c->pix_fmt = STREAM_PIX_FMT;
if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
/* just for testing, we also add B frames */
c->max_b_frames = 2;
}
if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
/* Needed to avoid using macroblocks in which some coeffs overflow.
* This does not happen with normal video, it just happens here as
* the motion of the chroma plane does not match the luma plane. */
c->mb_decision = 2;
}
break;
default:
break;
}
/* Some formats want stream headers to be separate. */
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
/**************************************************************/
/* audio output */
static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
uint64_t channel_layout,
int sample_rate, int nb_samples)
{
AVFrame *frame = av_frame_alloc();
int ret;
if (!frame) {
fprintf(stderr, "Error allocating an audio frame\n");
exit(1);
}
frame->format = sample_fmt;
frame->channel_layout = channel_layout;
frame->sample_rate = sample_rate;
frame->nb_samples = nb_samples;
if (nb_samples) {
ret = av_frame_get_buffer(frame, 0);
if (ret < 0) {
fprintf(stderr, "Error allocating an audio buffer\n");
exit(1);
}
}
return frame;
}
static int open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
{
AVCodecContext *c;
int nb_samples;
int ret;
AVDictionary *opt = NULL;
c = ost->st->codec;
/* open it */
av_dict_copy(&opt, opt_arg, 0);
ret = avcodec_open2(c, codec, &opt);
av_dict_free(&opt);
if (ret < 0) {
ELOG("Could not open audio codec: %s\n", av_err2str(ret));
return ret;
}
/* init signal generator */
ost->t = 0;
ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
/* increment frequency by 110 Hz per second */
ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
nb_samples = 10000;
else
nb_samples = c->frame_size;
ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout,
c->sample_rate, nb_samples);
ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
c->sample_rate, nb_samples);
/* create resampler context */
ost->swr_ctx = swr_alloc();
if (!ost->swr_ctx) {
ELOG("Could not allocate resampler context\n");
return -300;
}
/* set options */
av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0);
av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0);
av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
/* initialize the resampling context */
if ((ret = swr_init(ost->swr_ctx)) < 0) {
ELOG("Failed to initialize the resampling context: %i\n", ret);
return ret;
}
return 0;
}
/*
* encode one audio frame and send it to the muxer
* return 1 when encoding is finished, 0 otherwise
*/
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost, AVFrame *frame)
{
AVCodecContext *c;
AVPacket pkt = { 0 }; // data and size must be 0;
// AVFrame *frame;
int ret;
int got_packet;
int dst_nb_samples;
av_init_packet(&pkt);
c = ost->st->codec;
// frame = get_audio_frame(ost);
if (frame) {
/* convert samples from native format to destination codec format, using the resampler */
/* compute destination number of samples */
dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
c->sample_rate, c->sample_rate, AV_ROUND_UP);
av_assert0(dst_nb_samples == frame->nb_samples);
/* when we pass a frame to the encoder, it may keep a reference to it
* internally;
* make sure we do not overwrite it here
*/
ret = av_frame_make_writable(ost->frame);
if (ret < 0) {
ELOG("Unable to prepare frame for writing: Error code: %s", av_err2str(ret));
return ret;
}
/* convert to destination format */
ret = swr_convert(ost->swr_ctx,
ost->frame->data, dst_nb_samples,
(const uint8_t **)frame->data, frame->nb_samples);
if (ret < 0) {
ELOG("Error while converting: %s\n", av_err2str(ret));
return -1;
}
frame = ost->frame;
frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
ost->samples_count += dst_nb_samples;
}
ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
if (ret < 0) {
ELOG("Error encoding audio frame: %s\n", av_err2str(ret));
return -1;
}
if (got_packet) {
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
if (ret < 0) {
ELOG( "Error while writing audio frame: %s\n", av_err2str(ret));
return -1;
}
}
return (frame || got_packet) ? 0 : 1;
}
/**************************************************************/
/* video output */
static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
{
AVFrame *picture;
int ret;
picture = av_frame_alloc();
if (!picture)
return NULL;
picture->format = pix_fmt;
picture->width = width;
picture->height = height;
/* allocate the buffers for the frame data */
ret = av_frame_get_buffer(picture, 32);
if (ret < 0) {
fprintf(stderr, "Could not allocate frame data.\n");
exit(1);
}
return picture;
}
static int open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
{
int ret;
AVCodecContext *c = ost->st->codec;
AVDictionary *opt = NULL;
av_dict_copy(&opt, opt_arg, 0);
/* open the codec */
ret = avcodec_open2(c, codec, &opt);
av_dict_free(&opt);
if (ret < 0) {
ELOG("Could not open video codec: %s\n", av_err2str(ret));
return ret;
}
/* allocate and init a re-usable frame */
DLOG("Allocate and init a are-usable frame: %i x %i Format: %i", c->width, c->height, c->pix_fmt);
ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
if (!ost->frame) {
ELOG("Could not allocate video frame\n");
return -100;
}
/* If the output format is not YUV420P, then a temporary YUV420P
* picture is needed too. It is then converted to the required
* output format. */
ost->tmp_frame = NULL;
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
DLOG("input format is not YUV420P converting to size %i x %i", c->width, c->height);
ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
if (!ost->tmp_frame) {
ELOG("Could not allocate temporary picture\n");
return -200;
}
}
return 0;
}
/*
* encode one video frame and send it to the muxer
* return 1 when encoding is finished, 0 otherwise
*/
static int write_video_frame(AVFormatContext *oc, OutputStream *ost, AVFrame *frame)
{
int ret;
AVCodecContext *c;
int got_packet = 0;
c = ost->st->codec;
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
/* a hack to avoid data copy with some raw video muxers */
AVPacket pkt;
av_init_packet(&pkt);
if (!frame)
return 1;
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = ost->st->index;
pkt.data = (uint8_t *)frame;
pkt.size = sizeof(AVPicture);
pkt.pts = pkt.dts = frame->pts;
av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);
ret = av_interleaved_write_frame(oc, &pkt);
} else {
AVPacket pkt = { 0 };
av_init_packet(&pkt);
/* encode the image */
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
if (ret < 0) {
fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
exit(1);
}
if (got_packet) {
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
} else {
ret = 0;
}
}
if (ret < 0) {
fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
exit(1);
}
return (frame || got_packet) ? 0 : 1;
}
static void close_stream(AVFormatContext *oc, OutputStream *ost)
{
avcodec_close(ost->st->codec);
av_frame_free(&ost->frame);
av_frame_free(&ost->tmp_frame);
sws_freeContext(ost->sws_ctx);
swr_free(&ost->swr_ctx);
}
int renderMovieRequest(movieRequest *movieRequestObj, string outputPath) {
AVOutputFormat *ofmt = NULL;
AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
AVFormatContext *pFormatCtx = NULL;
AVCodec *audio_codec, *video_codec;
OutputStream video_st = { 0 }, audio_st = { 0 };
size_t i;
int videoStream, audioStream;
AVCodecContext *pCodecCtx = NULL;
AVCodec *pCodec = NULL;
AVFrame *pFrame = NULL;
AVFrame *pFrameRGB = NULL;
AVPacket packet = { 0 };
int frameFinished;
int audioFrameFinished;
int numBytes;
uint8_t *buffer = NULL;
AVDictionary *optionsDict = NULL;
AVDictionary *opt = NULL;
struct SwsContext *sws_ctx = NULL;
const char *in_filename, *out_filename;
int ret;
int have_audio = 0, have_video = 0;
int encode_audio = 0, encode_video = 0;
processProtobuf(movieRequestObj);
out_filename = outputPath.c_str();
av_register_all();
DLOG("attempting to create context for output file %s", out_filename);
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
if (!ofmt_ctx) {
ELOG("Could not create output context\n");
ret = AVERROR_UNKNOWN;
return ret; //goto end;
}
ofmt = ofmt_ctx->oformat;
/* Add the audio and video streams using the default format codecs
* and initialize the codecs. */
if (ofmt->video_codec != AV_CODEC_ID_NONE) {
add_stream(&video_st, ofmt_ctx, &video_codec, ofmt->video_codec);
have_video = 1;
encode_video = 1;
}
if (ofmt->audio_codec != AV_CODEC_ID_NONE) {
add_stream(&audio_st, ofmt_ctx, &audio_codec, ofmt->audio_codec);
have_audio = 1;
encode_audio = 1;
}
DLOG("allocate encode buffers");
/* Now that all the parameters are set, we can open the audio and
* video codecs and allocate the necessary encode buffers. */
if (have_video)
open_video(ofmt_ctx, video_codec, &video_st, opt);
if (have_audio) {
DLOG("Opening audio codec");
open_audio(ofmt_ctx, audio_codec, &audio_st, opt);
}
DLOG("open output file for writing");
/* open the output file, if needed */
if (!(ofmt->flags & AVFMT_NOFILE)) {
ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
if (ret < 0) {
ELOG( "Could not open '%s': %s\n", out_filename, av_err2str(ret));
return 1;
}
}
/* Write the stream header, if any. */
ret = avformat_write_header(ofmt_ctx, &opt);
if (ret < 0) {
ELOG("Error occurred when opening output file: %s\n", av_err2str(ret));
return 1;
}
vector<clipshptr> * clips = &(movieRequestObj->clips);
DLOG("ready to process clips: %i", clips->size());
for (size_t clipIdx = 0; clipIdx < clips->size(); ++clipIdx) {
shared_ptr<clip> currentClip = clips->at(clipIdx);
switch (currentClip->getClipType()) {
case VIDEO_CLIP: {
DLOG("clip is a video clip...");
shared_ptr<videoclip> vidClip = dynamic_pointer_cast<videoclip>(clips->at(clipIdx));
if (vidClip->shouldHaveSegments) {
// open the file for reading and create a temporary file for output
in_filename = vidClip->vidFileName.c_str();
DLOG("Opening %s for reading", in_filename);
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
ELOG("Could not open input file '%s'", in_filename);
return ret; //goto end;
}
if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
ELOG("Failed to retrieve input stream information");
return ret; //goto end;
}
av_dump_format(ifmt_ctx, 0, in_filename, 0);
videoStream = -1;
audioStream = -1;
// setup input format context and output format context;
// AVStream *video_in_stream = NULL;
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
videoStream=i;
// video_in_stream = ifmt_ctx->streams[i];
}
else if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO) {
audioStream=i;
// video_in_stream = ifmt_ctx->streams[i];
}
}
if (videoStream == -1) {
DLOG("not a video stream.");
continue;
}
// Get a pointer to the codec context for the video stream
pCodecCtx = ifmt_ctx->streams[videoStream]->codec;
if (pCodecCtx == NULL) {
ELOG("Error in getting pointer to codec for vidstream");
}
DLOG("Input pixel format: %i ", pCodecCtx->pix_fmt);
// Find the decoder for the video stream
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL) {
ELOG("Unsupported codec!\n");
return -1; // Codec not found
}
// Open codec
if(avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0) {
ELOG("Unable to open codec");
return -1; // Could not open codec
}
// get the timebase
timeBase = (int64_t(pCodecCtx->time_base.num) * AV_TIME_BASE) / int64_t(pCodecCtx->time_base.den);
// Allocate video frame
pFrame=av_frame_alloc();
// Allocate an AVFrame structure
pFrameRGB=av_frame_alloc();
if(pFrameRGB==NULL)
return -1;
// Determine required buffer size and allocate buffer
// numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
numBytes = avpicture_get_size(PIX_FMT_RGB24, movieRequestObj->width, movieRequestObj->height);
DLOG("Buffer size allocated: %i x %i: %i ", movieRequestObj->width, movieRequestObj->height, numBytes);
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
sws_ctx = sws_getContext
(
pCodecCtx->width,
pCodecCtx->height,
pCodecCtx->pix_fmt,
movieRequestObj->width,
movieRequestObj->height,
PIX_FMT_RGB24,
SWS_BILINEAR,
NULL,
NULL,
NULL
);
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, movieRequestObj->width, movieRequestObj->height);
size_t numSegments = vidClip->segments.size();
DLOG("Found %i segments to process", numSegments);
for (size_t segmentIdx = 0; segmentIdx < numSegments; ++segmentIdx) {
// seek to the right position
int frameOffset = vidClip->segments.at(segmentIdx).first;
int clipDuration = vidClip->segments.at(segmentIdx).second;
DLOG("Starting Frame Number: %i Duration: %i", frameOffset, clipDuration);
seek(ifmt_ctx, frameOffset);
// loop for X frames where X is < frameOffset + clipDuration; clipDuration is the length of the clip in terms of frames
for (int frameIdx = frameOffset; frameIdx < (frameOffset + clipDuration); ++frameIdx) {
av_init_packet(&packet);
int avReadResult = 0;
int continueRecording = 1;
while ((continueRecording == 1) && (frameIdx < (frameOffset + clipDuration) )) {
avReadResult = av_read_frame(ifmt_ctx, &packet);
if(avReadResult != 0){
if (avReadResult != AVERROR_EOF) {
ELOG("av_read_frame error: %i", avReadResult );
} else {
ILOG("End of input file");
}
continueRecording = 0;
}
// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrameRGB, &frameFinished, &packet);
// Did we get a video frame?
if(frameFinished) {
// Convert the image from its native format to RGB
sws_scale
(
sws_ctx,
(uint8_t const * const *)pFrame->data,
pFrame->linesize,
0,
pCodecCtx->height,
pFrameRGB->data,
pFrameRGB->linesize
);
write_video_frame(ofmt_ctx, &video_st, pFrameRGB);
frameIdx++;
}
}
else if (packet.stream_index == audioStream) {
// Decode audio frame
DLOG("Audio frame found");
avcodec_decode_audio4(pCodecCtx, pFrameRGB, &audioFrameFinished, &packet);
if (audioFrameFinished) {
// write the audio frame to file
write_audio_frame(ofmt_ctx, &audio_st, pFrameRGB);
}
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
// Free the RGB image
}
}
DLOG("Cleaning up frame allocations");
av_free(buffer);
av_free(pFrameRGB);
// Free the YUV frame
av_free(pFrame);
} // end video clip processing
}
break;
case TITLE_CLIP: {
}
break;
default:
ELOG("Failed to identify clip");
break;
} // end switch statement
DLOG("Finished processing clip #%i", clipIdx);
avformat_close_input(&ifmt_ctx);
} // end main for loop -> clip iteration
/* Write the trailer, if any. The trailer must be written before you
* close the CodecContexts open when you wrote the header; otherwise
* av_write_trailer() may try to use memory that was freed on
* av_codec_close(). */
av_write_trailer(ofmt_ctx);
/* Close each codec. */
if (have_video)
close_stream(ofmt_ctx, &video_st);
if (have_audio)
close_stream(ofmt_ctx, &audio_st);
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE)) {
/* Close the output file. */
avio_close(ofmt_ctx->pb);
}
DLOG("Closing input format context");
avformat_close_input(&ifmt_ctx);
DLOG("Free ouptut format context");
avformat_free_context(ofmt_ctx);
if (ret < 0 && ret != AVERROR_EOF) {
ELOG( "Error occurred: %s\n", av_err2str(ret));
return 1;
}
return 0;
}
#ifdef __cplusplus
}
#endif
</videoclip></videoclip></clip></clipshptr> -
ffmpeg and streaming video - frame time issue
6 mai 2014, par 4ntoineI’ve compiled ffmpeg-android (https://github.com/appunite/AndroidFFmpeg) and it works for files.
The problem is that it shows nothing for network streams (both rtmp and hls) as frame timestamp is too big and it seems to be not from video beginning (as for file). Then it waits for the first frame time which for network stream is too big :player_decode_video copying...
05-05 18:11:26.994: INFO/player.c(16998): player_decode_video Decoded video frame: 568.233000, time_base: 51140970
05-05 18:11:26.994: INFO/player.c(16998): player_wait_for_frame[0] start
...
05-05 18:11:30.587: INFO/player.c(16998): player_wait_for_frame[0 = Video] = (568.233000) - (0.000977)
05-05 18:11:30.587: INFO/player.c(16998): player_wait_for_frame[0] Waiting for frame: sleeping: 568232023
05-05 18:11:31.088: INFO/player.c(16998): player_wait_for_frame[0] timeout
05-05 18:11:31.088: INFO/player.c(16998): player_wait_for_frame[0 = Video] = (568.233000) - (0.501542)
05-05 18:11:31.088: INFO/player.c(16998): player_wait_for_frame[0] Waiting for frame: sleeping: 567731458
05-05 18:11:31.588: INFO/player.c(16998): player_wait_for_frame[0] timeout
05-05 18:11:31.588: INFO/player.c(16998): player_wait_for_frame[0 = Video] = (568.233000) - (1.002778)
05-05 18:11:31.588: INFO/player.c(16998): player_wait_for_frame[0] Waiting for frame: sleeping: 567230222
05-05 18:11:32.089: INFO/player.c(16998): player_wait_for_frame[0] timeout
05-05 18:11:32.089: INFO/player.c(16998): player_wait_for_frame[0 = Video] = (568.233000) - (1.504563)
05-05 18:11:32.089: INFO/player.c(16998): player_wait_for_frame[0] Waiting for frame: sleeping: 566728437
05-05 18:11:32.590: INFO/player.c(16998): player_wait_for_frame[0] timeout
05-05 18:11:32.590: INFO/player.c(16998): player_wait_for_frame[0 = Video] = (568.233000) - (2.005708)
05-05 18:11:32.590: INFO/player.c(16998): player_wait_for_frame[0] Waiting for frame: sleeping: 566227292also i can’t figure out what timestamp for frame for network stream means (for file frames it’s a time from video beginning)
for file it’s clear : time_base is frame number and timestamp is time from video beginning and it plays the files :
player_decode_video Decoded video frame: 0.320000, time_base: 8
05-05 18:32:42.344: INFO/player.c(16998): player_wait_for_frame[0] start
05-05 18:32:42.344: INFO/player.c(16998): player_wait_for_frame[0 = Video] = (0.320000) - (0.344337)
05-05 18:32:42.344: INFO/player.c(16998): player_wait_for_frame[0] Waiting for frame: sleeping: -24337
05-05 18:32:42.344: INFO/player.c(16998): player_wait_for_frame[0] finish[0]
05-05 18:32:42.344: INFO/player.c(16998): player_update_time: 0.346169/4.000000
05-05 18:32:42.344: INFO/player.c(16998): player_decode waiting for frame[0]
05-05 18:32:42.344: INFO/player.c(16998): player_decode decoding frame[0]
05-05 18:32:42.344: INFO/player.c(16998): player_decode_video decoding
05-05 18:32:42.344: INFO/player.c(16998): player_decode_video copy wait
05-05 18:32:42.344: INFO/player.c(16998): player_read_from_stream Read frame
05-05 18:32:42.344: INFO/player.c(16998): player_read_from_stream looking for stream
05-05 18:32:42.344: INFO/player.c(16998): player_read_from_stream stream found [0]
05-05 18:32:42.344: INFO/player.c(16998): player_read_from_stream waiting for queue
05-05 18:32:42.344: INFO/player.c(16998): Format: WINDOW_FORMAT_RGBA_8888
05-05 18:32:42.344: INFO/player.c(16998): Buffer: width: 1280, height: 720, stride: 1280
05-05 18:32:42.344: INFO/player.c(16998): player_decode_video copying...
05-05 18:32:42.425: INFO/player.c(16998): player_decode_video Decoded video frame: 0.360000, time_base: 9
05-05 18:32:42.425: INFO/player.c(16998): player_wait_for_frame[0] start
05-05 18:32:42.425: INFO/player.c(16998): player_wait_for_frame[0 = Video] = (0.360000) - (0.427994)
05-05 18:32:42.425: INFO/player.c(16998): player_wait_for_frame[0] Waiting for frame: sleeping: -67994
05-05 18:32:42.425: INFO/player.c(16998): player_wait_for_frame[0] finish[0]
05-05 18:32:42.425: INFO/player.c(16998): player_update_time: 0.429214/4.000000
05-05 18:32:42.425: INFO/player.c(16998): player_decode waiting for frame[0]
05-05 18:32:42.425: INFO/player.c(16998): player_decode decoding frame[0]
05-05 18:32:42.425: INFO/player.c(16998): player_decode_video decoding
05-05 18:32:42.425: INFO/player.c(16998): player_read_from_stream Read frame
05-05 18:32:42.425: INFO/player.c(16998): player_read_from_stream looking for stream
05-05 18:32:42.425: INFO/player.c(16998): player_read_from_stream stream found [0]
05-05 18:32:42.425: INFO/player.c(16998): player_read_from_stream waiting for queue
05-05 18:32:42.425: INFO/player.c(16998): player_decode_video copy wait
05-05 18:32:42.435: INFO/player.c(16998): Format: WINDOW_FORMAT_RGBA_8888
05-05 18:32:42.435: INFO/player.c(16998): Buffer: width: 1280, height: 720, stride: 1280
05-05 18:32:42.435: INFO/player.c(16998): player_decode_video copying...
05-05 18:32:42.495: INFO/player.c(16998): player_decode_video Decoded video frame: 0.400000, time_base: 10
05-05 18:32:42.495: INFO/player.c(16998): player_wait_for_frame[0] start
05-05 18:32:42.495: INFO/player.c(16998): player_wait_for_frame[0 = Video] = (0.400000) - (0.494742)
05-05 18:32:42.495: INFO/player.c(16998): player_wait_for_frame[0] Waiting for frame: sleeping: -94742
05-05 18:32:42.495: INFO/player.c(16998): player_wait_for_frame[0] finish[0]
05-05 18:32:42.495: INFO/player.c(16998): player_update_time: 0.495993/4.000000
05-05 18:32:42.495: INFO/player.c(16998): player_decode waiting for frame[0]
05-05 18:32:42.495: INFO/player.c(16998): player_decode decoding frame[0]
05-05 18:32:42.495: INFO/player.c(16998): player_decode_video decoding
05-05 18:32:42.495: INFO/player.c(16998): player_decode_video copy wait
05-05 18:32:42.495: INFO/player.c(16998): player_read_from_stream Read frame
05-05 18:32:42.495: INFO/player.c(16998): Format: WINDOW_FORMAT_RGBA_8888
05-05 18:32:42.495: INFO/player.c(16998): Buffer: width: 1280, height: 720, stride: 1280
05-05 18:32:42.495: INFO/player.c(16998): player_decode_video copying...
05-05 18:32:42.495: INFO/player.c(16998): player_read_from_stream looking for stream
05-05 18:32:42.495: INFO/player.c(16998): player_read_from_stream stream found [0]
05-05 18:32:42.495: INFO/player.c(16998): player_read_from_stream waiting for queue
05-05 18:32:42.555: INFO/player.c(16998): player_decode_video Decoded video frame: 0.440000, time_base: 11
05-05 18:32:42.555: INFO/player.c(16998): player_wait_for_frame[0] start
05-05 18:32:42.555: INFO/player.c(16998): player_wait_for_frame[0 = Video] = (0.440000) - (0.556698)
05-05 18:32:42.555: INFO/player.c(16998): player_wait_for_frame[0] Waiting for frame: sleeping: -116698
05-05 18:32:42.555: INFO/player.c(16998): player_wait_for_frame[0] finish[0]
05-05 18:32:42.555: INFO/player.c(16998): player_update_time: 0.557858/4.000000
05-05 18:32:42.555: INFO/player.c(16998): player_decode waiting for frame[0]
05-05 18:32:42.555: INFO/player.c(16998): player_read_from_stream Read frame
05-05 18:32:42.555: INFO/player.c(16998): player_read_from_stream looking for stream
05-05 18:32:42.555: INFO/player.c(16998): player_read_from_stream stream found [0]
05-05 18:32:42.555: INFO/player.c(16998): player_read_from_stream waiting for queue
05-05 18:32:42.555: INFO/player.c(16998): player_decode decoding frame[0]
05-05 18:32:42.555: INFO/player.c(16998): player_decode_video decoding
05-05 18:32:42.555: INFO/player.c(16998): player_decode_video copy wait
05-05 18:32:42.565: INFO/player.c(16998): Format: WINDOW_FORMAT_RGBA_8888
05-05 18:32:42.565: INFO/player.c(16998): Buffer: width: 1280, height: 720, stride: 1280
05-05 18:32:42.565: INFO/player.c(16998): player_decode_video copying...
05-05 18:32:42.625: INFO/player.c(16998): player_decode_video Decoded video frame: 0.480000, time_base: 12So what is the meaning of timestamp for stream and what should i change to make it playing streams (rtmp/hls - its can be opened now but it wait for frame time which is far in the future) ?
UPDATE :
int64_t pts = av_frame_get_best_effort_timestamp(frame);
if (pts == AV_NOPTS_VALUE) {
pts = 0;
}
int64_t time = av_rescale_q(pts, stream->time_base, AV_TIME_BASE_Q);
LOGI(10,
"player_decode_video Decoded video frame: %f, time_base: %" SCNd64,
time/1000000.0, pts);
player_wait_for_frame(player, time, stream_no);it extracts time which is far in future :
player_wait_for_frame[0 = Video] = (568.233000) - (0.000977)
Is it correct ?
-
Error using FFMPEG to convert each input image into H264 compiling in Visual Studio running in MevisLab
21 février 2014, par user3012914I am creating a ML Module in MevisLab Framework, I am using FFMPEG to convert each image i get into a H264 Video and save it after I get all the frames. But unfortunately I have problem allocating the output buffer size. The application crashes when I include this in my code.If I am not including it, the output file size is just 4kb. Nothing is stored in it.
I am also not very sure whether it is correct way of getting the HBitmap into the Encoder. Would be great to have your suggestions.
My Code :
BITMAPINFO bitmapInfo;
HDC hdc;
ZeroMemory(&bitmapInfo, sizeof(bitmapInfo));
BITMAPINFOHEADER &bitmapInfoHeader = bitmapInfo.bmiHeader;
bitmapInfoHeader.biSize = sizeof(bitmapInfoHeader);
bitmapInfoHeader.biWidth = _imgWidth;
bitmapInfoHeader.biHeight = _imgHeight;
bitmapInfoHeader.biPlanes = 1;
bitmapInfoHeader.biBitCount = 24;
bitmapInfoHeader.biCompression = BI_RGB;
bitmapInfoHeader.biSizeImage = ((bitmapInfoHeader.biWidth * bitmapInfoHeader.biBitCount / 8 + 3) & 0xFFFFFFFC) * bitmapInfoHeader.biHeight;
bitmapInfoHeader.biXPelsPerMeter = 10000;
bitmapInfoHeader.biYPelsPerMeter = 10000;
bitmapInfoHeader.biClrUsed = 0;
bitmapInfoHeader.biClrImportant = 0;
//RGBQUAD* Ref = new RGBQUAD[_imgWidth,_imgHeight];
HDC hdcscreen = GetDC(0);
hdc = CreateCompatibleDC(hdcscreen);
ReleaseDC(0, hdcscreen);
_hbitmap = CreateDIBSection(hdc, (BITMAPINFO*) &bitmapInfoHeader, DIB_RGB_COLORS, &_bits, NULL, NULL);To get the BitMap I use the above code. Then I allocate the Codec Context as followed
c->bit_rate = 400000;
// resolution must be a multiple of two
c->width = 1920;
c->height = 1080;
// frames per second
frame_rate = _framesPerSecondFld->getIntValue();
//AVRational rational = {1,10};
//c->time_base = (AVRational){1,25};
//c->time_base = (AVRational){1,25};
c->gop_size = 10; // emit one intra frame every ten frames
c->max_b_frames = 1;
c->keyint_min = 1; //minimum GOP size
c->time_base.num = 1; // framerate numerator
c->time_base.den = _framesPerSecondFld->getIntValue();
c->i_quant_factor = (float)0.71; // qscale factor between P and I frames
c->pix_fmt = AV_PIX_FMT_RGB32;
std::string msg;
msg.append("Context is stored");
_messageFld->setStringValue(msg.c_str());I create the Bitmap Image as followed from the input
PagedImage *inImg = getUpdatedInputImage(0);
ML_CHECK(inImg);
ImageVector imgExt = inImg->getImageExtent();
if ((imgExt.x = _imgWidth) && (imgExt.y == _imgHeight))
{
if (((imgExt.x % 4)==0) && ((imgExt.y % 4) == 0))
{
// read out input image and write output image into video
// get input image as an array
void* imgData = NULL;
SubImageBox imageBox(imgExt); // get the whole image
getTile(inImg, imageBox, MLuint8Type, &imgData);
iData = (MLuint8*)imgData;
int r = 0; int g = 0;int b = 0;
// since we have only images with
// a z-ext of 1, we can compute the c stride as follows
int cStride = _imgWidth * _imgHeight;
uint8_t offset = 0;
// pointer into the bitmap that is
// used to write images into the avi
UCHAR* dst = (UCHAR*)_bits;
for (int y = _imgHeight-1; y >= 0; y--)
{ // reversely scan the image. if y-rows of DIB are set in normal order, no compression will be available.
offset = _imgWidth * y;
for (int x = 0; x < _imgWidth; x++)
{
if (_isGreyValueImage)
{
r = iData[offset + x];
*dst++ = (UCHAR)r;
*dst++ = (UCHAR)r;
*dst++ = (UCHAR)r;
}
else
{
b = iData[offset + x]; // windows bitmap need reverse order: bgr instead of rgb
g = iData[offset + x + cStride ];
r = iData[offset + x + cStride + cStride];
*dst++ = (UCHAR)r;
*dst++ = (UCHAR)g;
*dst++ = (UCHAR)b;
}
// alpha channel in input image is ignored
}
}Then I add it to the Encoder as followed as write as H264
in_width = c->width;
in_height = c->height;
out_width = c->width;
out_height = c->height;
ibytes = avpicture_get_size(PIX_FMT_BGR32, in_width, in_height);
obytes = avpicture_get_size(PIX_FMT_YUV420P, out_width, out_height);
outbuf_size = 100000 + c->width*c->height*(32>>3); // allocate output buffer
outbuf = static_cast(malloc(outbuf_size));
if(!obytes)
{
std::string msg;
msg.append("Bytes cannot be allocated");
_messageFld->setStringValue(msg.c_str());
}
else
{
std::string msg;
msg.append("Bytes allocation done");
_messageFld->setStringValue(msg.c_str());
}
//create buffer for the output image
inbuffer = (uint8_t*)av_malloc(ibytes);
outbuffer = (uint8_t*)av_malloc(obytes);
inbuffer = (uint8_t*)dst;
//create ffmpeg frame structures. These do not allocate space for image data,
//just the pointers and other information about the image.
AVFrame* inpic = avcodec_alloc_frame();
AVFrame* outpic = avcodec_alloc_frame();
//this will set the pointers in the frame structures to the right points in
//the input and output buffers.
avpicture_fill((AVPicture*)inpic, inbuffer, PIX_FMT_BGR32, in_width, in_height);
avpicture_fill((AVPicture*)outpic, outbuffer, PIX_FMT_YUV420P, out_width, out_height);
av_image_alloc(outpic->data, outpic->linesize, c->width, c->height, c->pix_fmt, 1);
inpic->data[0] += inpic->linesize[0]*(_imgHeight-1); // flipping frame
inpic->linesize[0] = -inpic->linesize[0];
if(!inpic)
{
std::string msg;
msg.append("Image is empty");
_messageFld->setStringValue(msg.c_str());
}
else
{
std::string msg;
msg.append("Picture has allocations");
_messageFld->setStringValue(msg.c_str());
}
//create the conversion context
fooContext = sws_getContext(in_width, in_height, PIX_FMT_BGR32, out_width, out_height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);
//perform the conversion
sws_scale(fooContext, inpic->data, inpic->linesize, 0, in_height, outpic->data, outpic->linesize);
//out_size = avcodec_encode_video(c, outbuf,outbuf_size, outpic);
if(!out_size)
{
std::string msg;
msg.append("Outsize is not valid");
_messageFld->setStringValue(msg.c_str());
}
else
{
std::string msg;
msg.append("Outsize is valid");
_messageFld->setStringValue(msg.c_str());
}
fwrite(outbuf, 1, out_size, f);
if(!fwrite)
{
std::string msg;
msg.append("Frames couldnt be written");
_messageFld->setStringValue(msg.c_str());
}
else
{
std::string msg;
msg.append("Frames written to the file");
_messageFld->setStringValue(msg.c_str());
}
// for (;out_size; i++)
// {
out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
std::string msg;
msg.append("Writing Frames");
_messageFld->setStringValue(msg.c_str());// encode the delayed frames
_numFramesFld->setIntValue(_numFramesFld->getIntValue()+1);
fwrite(outbuf, 1, out_size, f);
// }
outbuf[0] = 0x00;
outbuf[1] = 0x00; // add sequence end code to have a real mpeg file
outbuf[2] = 0x01;
outbuf[3] = 0xb7;
fwrite(outbuf, 1, 4, f);
}Then close and clean the Image Buffer and file
ML_TRACE_IN("MovieCreator::_endRecording()")
if (_numFramesFld->getIntValue() == 0)
{
_messageFld->setStringValue("Empty movie, nothing saved.");
}
else
{
_messageFld->setStringValue("Movie written to disk.");
_numFramesFld->setIntValue(0);
if (_hbitmap)
{
DeleteObject(_hbitmap);
}
if (c != NULL)
{
av_free(outbuffer);
av_free(inpic);
av_free(outpic);
fclose(f);
avcodec_close(c); // freeing memory
free(outbuf);
av_free(c);
}
}}
I think the Main Problem is over here !!
//out_size = avcodec_encode_video(c, outbuf,outbuf_size, outpic);