
Recherche avancée
Autres articles (99)
-
Multilang : améliorer l’interface pour les blocs multilingues
18 février 2011, parMultilang est un plugin supplémentaire qui n’est pas activé par défaut lors de l’initialisation de MediaSPIP.
Après son activation, une préconfiguration est mise en place automatiquement par MediaSPIP init permettant à la nouvelle fonctionnalité d’être automatiquement opérationnelle. Il n’est donc pas obligatoire de passer par une étape de configuration pour cela. -
Use, discuss, criticize
13 avril 2011, parTalk to people directly involved in MediaSPIP’s development, or to people around you who could use MediaSPIP to share, enhance or develop their creative projects.
The bigger the community, the more MediaSPIP’s potential will be explored and the faster the software will evolve.
A discussion list is available for all exchanges between users. -
Les vidéos
21 avril 2011, parComme les documents de type "audio", Mediaspip affiche dans la mesure du possible les vidéos grâce à la balise html5 .
Un des inconvénients de cette balise est qu’elle n’est pas reconnue correctement par certains navigateurs (Internet Explorer pour ne pas le nommer) et que chaque navigateur ne gère en natif que certains formats de vidéos.
Son avantage principal quant à lui est de bénéficier de la prise en charge native de vidéos dans les navigateur et donc de se passer de l’utilisation de Flash et (...)
Sur d’autres sites (8210)
-
How to create RTMP/RTSP player using FFMpeg for ios
17 avril 2017, par LogicI am working on an app using RTMP/RTSP links to broadcast/play live audio/video.As iOS devices support HTTP , but my requirement is to play RTMP/RTSP Links.
I want to create my custom player using FFMpeg framework.I have searched over internet , ried many solutions but did not find any solution. -
ffmeg mux video and audio into a mp4 file, no sound in quicktime player
10 novembre 2014, par user2789801I’m using ffmpeg to mux a video file and a audio file into a single mp4 file.The mp4 file plays fine on windows, but it has no sound in quicktime player on mac. And I get a error message "2041 invalid sample description".
Here’s what I’m doing,
First, I open the video file and the audio file, init a output frame context.
Then I add a video stream and a audio stream according to the video and audio files.
Then write the header, then start muxing, then write the trailer.Here’s my code
#include "CoreRender.h"
CoreRender::CoreRender(const char* _vp, const char * _ap, const char * _op)
{
sprintf(videoPath, "%s", _vp);
sprintf(audioPath, "%s", _ap);
sprintf(outputPath, "%s", _op);
formatContext_video = NULL;
formatContext_audio = NULL;
formatContext_output = NULL;
videoStreamIdx = -1;
outputVideoStreamIdx = -1;
videoStreamIdx = -1;
audioStreamIdx = -1;
outputVideoStreamIdx = -1;
outputAudioStreamIdx = -1;
av_init_packet(&pkt);
init();
}
void CoreRender::init()
{
av_register_all();
avcodec_register_all();
// allocate a memory for the AVFrame object
frame = (AVFrame *)av_mallocz(sizeof(AVFrame));
rgbFrame = (AVFrame *)av_mallocz(sizeof(AVFrame));
if (avformat_open_input(&formatContext_video, videoPath, 0, 0) < 0)
{
release();
}
if (avformat_find_stream_info(formatContext_video, 0) < 0)
{
release();
}
if (avformat_open_input(&formatContext_audio, audioPath, 0, 0) < 0)
{
release();
}
if (avformat_find_stream_info(formatContext_audio, 0) < 0)
{
release();
}
avformat_alloc_output_context2(&formatContext_output, NULL, NULL, outputPath);
if (!formatContext_output)
{
release();
}
ouputFormat = formatContext_output->oformat;
for (int i = 0; i < formatContext_video->nb_streams; i++)
{
// create the output AVStream according to the input AVStream
if (formatContext_video->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
{
videoStreamIdx = i;
AVStream * in_stream = formatContext_video->streams[i];
AVStream * out_stream = avformat_new_stream(formatContext_output, in_stream->codec->codec);
if (! out_stream)
{
release();
}
outputVideoStreamIdx = out_stream->index;
if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0)
{
release();
}
out_stream->codec->codec_tag = 0;
if (formatContext_output->oformat->flags & AVFMT_GLOBALHEADER)
{
out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
break;
}
}
for (int i = 0; i < formatContext_audio->nb_streams; i++)
{
if (formatContext_audio->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
{
AVCodec *encoder;
encoder = avcodec_find_encoder(AV_CODEC_ID_AAC);
audioStreamIdx = i;
AVStream *in_stream = formatContext_audio->streams[i];
AVStream *out_stream = avformat_new_stream(formatContext_output, encoder);
if (!out_stream)
{
release();
}
outputAudioStreamIdx = out_stream->index;
AVCodecContext *dec_ctx, *enc_ctx;
dec_ctx = in_stream->codec;
enc_ctx = out_stream->codec;
enc_ctx->sample_rate = dec_ctx->sample_rate;
enc_ctx->channel_layout = dec_ctx->channel_layout;
enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
enc_ctx->time_base = { 1, enc_ctx->sample_rate };
enc_ctx->bit_rate = 480000;
if (avcodec_open2(enc_ctx, encoder, NULL) < 0)
{
release();
}
if (formatContext_output->oformat->flags & AVFMT_GLOBALHEADER)
{
out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
break;
}
}
if (!(ouputFormat->flags & AVFMT_NOFILE))
{
if (avio_open(&formatContext_output->pb, outputPath, AVIO_FLAG_WRITE) < 0)
{
release();
}
}
if (avformat_write_header(formatContext_output, NULL) < 0)
{
release();
}
}
void CoreRender::mux()
{
// find the decoder for the audio codec
codecContext_a = formatContext_audio->streams[audioStreamIdx]->codec;
codec_a = avcodec_find_decoder(codecContext_a->codec_id);
if (codec == NULL)
{
avformat_close_input(&formatContext_audio);
release();
}
codecContext_a = avcodec_alloc_context3(codec_a);
if (codec_a->capabilities&CODEC_CAP_TRUNCATED)
codecContext_a->flags |= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */
if (avcodec_open2(codecContext_a, codec_a, NULL) < 0)
{
avformat_close_input(&formatContext_audio);
release();
}
int frame_index = 0;
int64_t cur_pts_v = 0, cur_pts_a = 0;
while (true)
{
AVFormatContext *ifmt_ctx;
int stream_index = 0;
AVStream *in_stream, *out_stream;
if (av_compare_ts(cur_pts_v,
formatContext_video->streams[videoStreamIdx]->time_base,
cur_pts_a,
formatContext_audio->streams[audioStreamIdx]->time_base) <= 0)
{
ifmt_ctx = formatContext_video;
stream_index = outputVideoStreamIdx;
if (av_read_frame(ifmt_ctx, &pkt) >=0)
{
do
{
if (pkt.stream_index == videoStreamIdx)
{
cur_pts_v = pkt.pts;
break;
}
} while (av_read_frame(ifmt_ctx, &pkt) >= 0);
}
else
{
break;
}
}
else
{
ifmt_ctx = formatContext_audio;
stream_index = outputAudioStreamIdx;
if (av_read_frame(ifmt_ctx, &pkt) >=0)
{
do
{
if (pkt.stream_index == audioStreamIdx)
{
cur_pts_a = pkt.pts;
break;
}
} while (av_read_frame(ifmt_ctx, &pkt) >=0);
processAudio();
}
else
{
break;
}
}
in_stream = ifmt_ctx->streams[pkt.stream_index];
out_stream = formatContext_output->streams[stream_index];
if (pkt.pts == AV_NOPTS_VALUE)
{
AVRational time_base1 = in_stream->time_base;
int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate);
pkt.pts = (double)(frame_index * calc_duration) / (double)(av_q2d(time_base1) * AV_TIME_BASE);
pkt.dts = pkt.pts;
pkt.duration = (double)calc_duration / (double)(av_q2d(time_base1) * AV_TIME_BASE);
frame_index++;
}
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (enum AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (enum AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
pkt.pos = -1;
pkt.stream_index = stream_index;
LOGE("Write 1 Packet. size:%5d\tpts:%8d", pkt.size, pkt.pts);
if (av_interleaved_write_frame(formatContext_output, &pkt) < 0)
{
break;
}
av_free_packet(&pkt);
}
av_write_trailer(formatContext_output);
}
void CoreRender::processAudio()
{
int got_frame_v = 0;
AVFrame *tempFrame = (AVFrame *)av_mallocz(sizeof(AVFrame));
avcodec_decode_audio4(formatContext_audio->streams[audioStreamIdx]->codec, tempFrame, &got_frame_v, &pkt);
if (got_frame_v)
{
tempFrame->pts = av_frame_get_best_effort_timestamp(tempFrame);
int ret;
int got_frame_local;
int * got_frame = &got_frame_v;
AVPacket enc_pkt;
int(*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) = avcodec_encode_audio2;
if (!got_frame)
{
got_frame = &got_frame_local;
}
// encode filtered frame
enc_pkt.data = NULL;
enc_pkt.size = 0;
av_init_packet(&enc_pkt);
ret = enc_func(codecContext_a, &enc_pkt, tempFrame, got_frame);
av_frame_free(&tempFrame);
av_frame_free(&tempFrame);
if (ret < 0)
{
return ;
}
if (!(*got_frame))
{
return ;
}
enc_pkt.stream_index = outputAudioStreamIdx;
av_packet_rescale_ts(&enc_pkt,
formatContext_output->streams[outputAudioStreamIdx]->codec->time_base,
formatContext_output->streams[outputAudioStreamIdx]->time_base);
}
}
void CoreRender::release()
{
avformat_close_input(&formatContext_video);
avformat_close_input(&formatContext_audio);
if (formatContext_output&& !(ouputFormat->flags & AVFMT_NOFILE))
avio_close(formatContext_output->pb);
avformat_free_context(formatContext_output);
}
CoreRender::~CoreRender()
{
}As you can see, I transcode the audio format into aac, and keep the video as it is.
Here’s how I use itCoreRender render("d:\\bg.mp4", "d:\\music.mp3", "d:\\output.mp4");
render.mux();
return 0;The video file is always in h.264 format.
So what I’m doing wrong ? -
Taille du player en mode html5 (safari/chrome)
7 mai 2013Descriptif du bug issu de notre discussion sur IRC :