
Recherche avancée
Médias (1)
-
The Slip - Artworks
26 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Texte
Autres articles (65)
-
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir -
Support audio et vidéo HTML5
10 avril 2011MediaSPIP utilise les balises HTML5 video et audio pour la lecture de documents multimedia en profitant des dernières innovations du W3C supportées par les navigateurs modernes.
Pour les navigateurs plus anciens, le lecteur flash Flowplayer est utilisé.
Le lecteur HTML5 utilisé a été spécifiquement créé pour MediaSPIP : il est complètement modifiable graphiquement pour correspondre à un thème choisi.
Ces technologies permettent de distribuer vidéo et son à la fois sur des ordinateurs conventionnels (...) -
HTML5 audio and video support
13 avril 2011, parMediaSPIP uses HTML5 video and audio tags to play multimedia files, taking advantage of the latest W3C innovations supported by modern browsers.
The MediaSPIP player used has been created specifically for MediaSPIP and can be easily adapted to fit in with a specific theme.
For older browsers the Flowplayer flash fallback is used.
MediaSPIP allows for media playback on major mobile platforms with the above (...)
Sur d’autres sites (9148)
-
ffmpeg and boost::asio NULL pointer
9 avril 2015, par GeorgiI am trying to make a special video software which will run on multiple core machines.
I want many c++ object to stream video files and many other c++ objects to store the streamed data into file.
I have created some simple classes, but when I try to create 2 and more objects I got :
opening stream9079.sdp
[udp @ 0xaef5380] bind failed: Address already in use
Could not open input file stream9079.sdp
Segmentation fault (core dumped)When I use only one object everything is fine.
I use the following code
int main(int argc, char **argv)
{
boost::asio::io_service ios;
boost::asio::io_service ios1;
Channel *channels[100];
channels[0] = new Channel(ios, 9078, atoi(argv[1]));
channels[0]->StartTimer(0);
channels[1] = new Channel(ios1, 9079, atoi(argv[1]));
channels[1]->StartTimer(0);
boost::thread t(boost::bind(&worker, &ios));
boost::thread t1(boost::bind(&worker, &ios1));
t.join();
t1.join();
CEVLOG_MSG << "done" << std::endl;
return 0;
}My
Channel
class implementation is :#include "channel.hpp"
#include "utils.hpp"
#include "boost/lexical_cast.hpp"
Channel::Channel(boost::asio::io_service &ioP, int i, bool to_send):
Runnable(ioP),
work( new boost::asio::io_service::work(ioP) ),
ofmt(NULL),
ifmt_ctx(NULL),
ofmt_ctx(NULL)
{
id = i;
sender = to_send;
if (sender)
{
input.assign("/home/georgi/Downloads/video/IMG_0019.MOV");
output.assign("rtp://10.101.3.60:"); output += boost::lexical_cast(id);
}
else
{
input.assign("stream"); input += boost::lexical_cast(id); input += ".sdp";
output.assign("test"); output += boost::lexical_cast(id); output += ".mp4";
}
video_idx = audio_idx = sub_idx = -1;
if (OpenInput())
{
if (sender)
OpenOutput(eStreamOutput);
else
OpenOutput(eFileOutput);
}
}
Channel::~Channel()
{
av_write_trailer(ofmt_ctx);
avformat_close_input(&ifmt_ctx);
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
avio_closep(&ofmt_ctx->pb);
avformat_free_context(ofmt_ctx);
work.reset();
}
bool Channel::OpenInput()
{
CEVLOG_MSG << "opening " << input << std::endl;
int ret;
if ((ret = avformat_open_input(&ifmt_ctx, input.c_str(), 0, 0)) < 0)
{
CEVLOG_ERR << "Could not open input file " << input << std::endl;
return false;
}
CEVLOG_MSG << " " << ifmt_ctx << std::endl;
if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0)
{
CEVLOG_ERR << "Failed to retrieve input stream information" << std::endl;
return false;
}
ifmt_ctx->flags |= AVFMT_FLAG_GENPTS;
//read and set timestamps to 0
av_read_frame(ifmt_ctx, &pkt);
pkt.pts = pkt.dts = 0;
return true;
}
bool Channel::OpenOutput(tOutputType WhatToOpen)
{
int SDP_size;
switch (WhatToOpen)
{
case eFileOutput:
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, output.c_str());
break;
case eStreamOutput:
avformat_alloc_output_context2(&ofmt_ctx, NULL, "rtp", output.c_str());
char SDP[4096];
SDP_size = 4096;
av_sdp_create(&ofmt_ctx, 1, SDP, SDP_size);
CEVLOG_DBG << "SDP=" << SDP << std::endl;
break;
default:
assert(false);
break;
}
if (!ofmt_ctx)
{
CEVLOG_ERR << "Could not create output context" << std::endl;
return false;
}
ofmt = ofmt_ctx->oformat;
video_idx = FindIndex(AVMEDIA_TYPE_VIDEO);
if (!(ofmt->flags & AVFMT_NOFILE))
{
if (avio_open(&ofmt_ctx->pb, output.c_str(), AVIO_FLAG_WRITE) < 0)
{
CEVLOG_ERR << "Could not open output file " << output << std::endl;
return false;
}
}
if (avformat_write_header(ofmt_ctx, NULL) < 0)
{
CEVLOG_ERR << "Error occurred when opening output file " << output << std::endl;
return false;
}
return true;
}
unsigned int Channel::FindIndex(AVMediaType Type)
{
int idx;
for (idx = 0; idx < ifmt_ctx->nb_streams; idx++)
{
if (ifmt_ctx->streams[idx]->codec->codec_type == Type)
{
AVStream *in_stream = ifmt_ctx->streams[idx];
AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
if (!out_stream)
{
CEVLOG_ERR << "Failed allocating output stream" << std::endl;
break;
}
if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0)
{
CEVLOG_ERR << "Failed to copy context from input to output stream codec context" << std::endl;
break;
}
out_stream->codec->codec_tag = 0;
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
{
out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
break;
}
}
return idx;
}
void Channel::Callback()
{
if (sender)
SendVideo();
else
RecvVideo();
}
void Channel::SendVideo()
{
int ret = av_read_frame(ifmt_ctx, &pkt);
int time_ms = 0;
if (ret != 0)
{
av_write_trailer(ofmt_ctx);
work.reset();
return;
}
if (pkt.stream_index == video_idx)
{
AVStream *in_stream = ifmt_ctx->streams[pkt.stream_index];
AVStream *out_stream = ofmt_ctx->streams[pkt.stream_index];
AVRational time_base = ifmt_ctx->streams[video_idx]->time_base;
char timestamp[100];
time_ms = 1000 * 1000 * strtof(timestamp2char(timestamp, pkt.duration, &time_base), NULL);
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF);
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF);
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
pkt.pos = -1;
ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
if (ret < 0)
{
CEVLOG_ERR << "Error muxing packet" << std::endl;
return;
}
}
av_free_packet(&pkt);
StartTimer(time_ms);
}
void Channel::RecvVideo()
{
int ret = av_read_frame(ifmt_ctx, &pkt);
if (ret != 0)
{
//Some error or end of stream is detected. Write file trailer
av_write_trailer(ofmt_ctx);
work.reset();
return;
}
//if is NOT video just continue reading
if (pkt.stream_index == video_idx)
{
AVStream *in_stream = ifmt_ctx->streams[pkt.stream_index];
AVStream *out_stream = ofmt_ctx->streams[pkt.stream_index];
AVRational time_base = ifmt_ctx->streams[video_idx]->time_base;
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF);
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF);
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
pkt.pos = -1;
ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
if (ret < 0)
{
CEVLOG_ERR << "Error muxing packet" << std::endl;
return;
}
}
av_free_packet(&pkt);
StartTimer(0);
} -
Revision 1f05b19e69 : webmdec : Fix read_frame return value for calls after EOS webm_read_frame assume
30 mars 2015, par Vignesh VenkatasubramanianChanged Paths :
Modify /webmdec.cc
Modify /webmdec.h
webmdec : Fix read_frame return value for calls after EOSwebm_read_frame assumes that it won’t be called once end of file
is reached. But for frame parallel mode that turns out to be not
true. this patch fixes that behavior by checking for EOS and
returning the appropriate value for subsequent calls.Change-Id : Ie2fddbe00493a0f96c4172c67be1eb719f0fe8ed
-
FFmpeg C API - syncing video and audio
10 novembre 2015, par Justin BradleyI am trimming video and having a hard getting the audio to sync correctly. The code below is as close as I’ve gotten it work. I’ve tried both re-encoding and not re-encoding the output streams.
The video trims correctly and is written to the output container. The audio stream also trims correctly, but is written to the front of the output container. For example if the trim length is 10s - the correct portion of audio plays for 10s and then the correct portion of video plays.
//////// audio stream ////////
const AVStream *input_stream_audio = input_container->streams[audio_stream_index];
const AVCodec *decoder_audio = avcodec_find_decoder(input_stream_audio->codec->codec_id);
if(!decoder_audio) {
cleanup(decoded_packet, output_container, decoded_frame);
avformat_close_input(&input_container);
LOGE("=> Audio decoder not found");
return -1;
}
if(avcodec_open2(input_stream_audio->codec, decoder_audio, NULL) < 0) {
cleanup(decoded_packet, output_container, decoded_frame);
avformat_close_input(&input_container);
LOGE("=> Error opening audio decoder");
return -1;
}
AVStream *output_stream_audio = avformat_new_stream(output_container, NULL);
if(avcodec_copy_context(output_stream_audio->codec, input_stream_audio->codec) != 0){
LOGE("=> Failed to Copy audio Context ");
return -1;
}
else {
LOGI("=> Copied audio context ");
output_stream_audio->codec->codec_id = input_stream_audio->codec->codec_id;
output_stream_audio->codec->codec_tag = 0;
output_stream_audio->pts = input_stream_audio->pts;
output_stream_audio->time_base.num = input_stream_audio->time_base.num;
output_stream_audio->time_base.den = input_stream_audio->time_base.den;
}
if(avio_open(&output_container->pb, output_file, AVIO_FLAG_WRITE) < 0) {
cleanup(decoded_packet, output_container, decoded_frame);
avformat_close_input(&input_container);
LOGE("=> Error opening output file");
return -1;
}
// allocate frame for conversion
decoded_frame = avcodec_alloc_frame();
if(!decoded_frame) {
cleanup(decoded_packet, output_container, decoded_frame);
avformat_close_input(&input_container);
LOGE("=> Error allocating frame");
return -1;
}
av_dump_format(input_container, 0, input_file, 0);
avformat_write_header(output_container, NULL);
av_init_packet(&decoded_packet);
decoded_packet.data = NULL;
decoded_packet.size = 0;
int current_frame_num = 1;
int current_frame_num_audio = 1;
int got_frame, len;
AVRational default_timebase;
default_timebase.num = 1;
default_timebase.den = AV_TIME_BASE;
int64_t starttime_int64 = av_rescale_q((int64_t)( 12.0 * AV_TIME_BASE ), AV_TIME_BASE_Q, input_stream->time_base);
int64_t endtime_int64 = av_rescale_q((int64_t)( 18.0 * AV_TIME_BASE ), AV_TIME_BASE_Q, input_stream->time_base);
LOGI("=> starttime_int64: %" PRId64, starttime_int64);
LOGI("=> endtime_int64: %" PRId64, endtime_int64);
int64_t starttime_int64_audio = av_rescale_q((int64_t)( 12.0 * AV_TIME_BASE ), AV_TIME_BASE_Q, input_stream_audio->time_base);
int64_t endtime_int64_audio = av_rescale_q((int64_t)( 18.0 * AV_TIME_BASE ), AV_TIME_BASE_Q, input_stream_audio->time_base);
LOGI("=> starttime_int64_audio: %" PRId64, starttime_int64_audio);
LOGI("=> endtime_int64_audio: %" PRId64, endtime_int64_audio);
// loop input container and decode frames
while(av_read_frame(input_container, &decoded_packet)>=0) {
// video packets
if (decoded_packet.stream_index == video_stream_index) {
len = avcodec_decode_video2(input_stream->codec, decoded_frame, &got_frame, &decoded_packet);
if(len < 0) {
cleanup(decoded_packet, output_container, decoded_frame);
avformat_close_input(&input_container);
LOGE("=> No frames to decode");
return -1;
}
// this is the trim range we're looking for
if(got_frame && decoded_frame->pkt_pts >= starttime_int64 && decoded_frame->pkt_pts <= endtime_int64) {
av_init_packet(&encoded_packet);
encoded_packet.data = NULL;
encoded_packet.size = 0;
ret = avcodec_encode_video2(output_stream->codec, &encoded_packet, decoded_frame, &got_frame);
if (ret < 0) {
cleanup(decoded_packet, output_container, decoded_frame);
avformat_close_input(&input_container);
LOGE("=> Error encoding frames");
return ret;
}
if(got_frame) {
if (output_stream->codec->coded_frame->key_frame) {
encoded_packet.flags |= AV_PKT_FLAG_KEY;
}
encoded_packet.stream_index = output_stream->index;
encoded_packet.pts = av_rescale_q(current_frame_num, output_stream->codec->time_base, output_stream->time_base);
encoded_packet.dts = av_rescale_q(current_frame_num, output_stream->codec->time_base, output_stream->time_base);
ret = av_interleaved_write_frame(output_container, &encoded_packet);
if (ret < 0) {
cleanup(decoded_packet, output_container, decoded_frame);
avformat_close_input(&input_container);
LOGE("=> Error encoding frames");
return ret;
}
else {
current_frame_num +=1;
}
}
av_free_packet(&encoded_packet);
}
}
// audio packets
else if(decoded_packet.stream_index == audio_stream_index) {
// this is the trim range we're looking for
if(decoded_packet.pts >= starttime_int64_audio && decoded_packet.pts <= endtime_int64_audio) {
av_init_packet(&encoded_packet);
encoded_packet.data = decoded_packet.data;
encoded_packet.size = decoded_packet.size;
encoded_packet.stream_index = audio_stream_index;
encoded_packet.pts = av_rescale_q(current_frame_num_audio, output_stream_audio->codec->time_base, output_stream_audio->time_base);
encoded_packet.dts = av_rescale_q(current_frame_num_audio, output_stream_audio->codec->time_base, output_stream_audio->time_base);
ret = av_interleaved_write_frame(output_container, &encoded_packet);
if (ret < 0) {
cleanup(decoded_packet, output_container, decoded_frame);
avformat_close_input(&input_container);
LOGE("=> Error encoding frames");
return ret;
}
else {
current_frame_num_audio +=1;
}
av_free_packet(&encoded_packet);
}
}
}Edit
I have slight improvement on the initial code. The audio and video are still not perfectly synced, but the original problem of the audio playing first followed by the video is resolved.
I’m now writing the decoded packet to the output container rather than re-encoding it.
In the end though I have the same problem - the trimmed video’s audio and video streams are not perfectly synced.
// audio packets
else if(decoded_packet.stream_index == audio_stream_index) {
// this is the trim range we're looking for
if(decoded_packet.pts >= starttime_int64_audio && decoded_packet.pts <= endtime_int64_audio) {
ret = av_interleaved_write_frame(output_container, &decoded_packet);
if (ret < 0) {
cleanup(decoded_packet, output_container, decoded_frame);
avformat_close_input(&input_container);
LOGE("=> Error writing audio frame (%s)", av_err2str(ret));
return ret;
}
else {
current_frame_num_audio +=1;
}
}
else if(decoded_frame->pkt_pts > endtime_int64_audio) {
audio_copy_complete = true;
}
}