
Recherche avancée
Médias (2)
-
Exemple de boutons d’action pour une collection collaborative
27 février 2013, par
Mis à jour : Mars 2013
Langue : français
Type : Image
-
Exemple de boutons d’action pour une collection personnelle
27 février 2013, par
Mis à jour : Février 2013
Langue : English
Type : Image
Autres articles (10)
-
HTML5 audio and video support
13 avril 2011, parMediaSPIP uses HTML5 video and audio tags to play multimedia files, taking advantage of the latest W3C innovations supported by modern browsers.
The MediaSPIP player used has been created specifically for MediaSPIP and can be easily adapted to fit in with a specific theme.
For older browsers the Flowplayer flash fallback is used.
MediaSPIP allows for media playback on major mobile platforms with the above (...) -
Support de tous types de médias
10 avril 2011Contrairement à beaucoup de logiciels et autres plate-formes modernes de partage de documents, MediaSPIP a l’ambition de gérer un maximum de formats de documents différents qu’ils soient de type : images (png, gif, jpg, bmp et autres...) ; audio (MP3, Ogg, Wav et autres...) ; vidéo (Avi, MP4, Ogv, mpg, mov, wmv et autres...) ; contenu textuel, code ou autres (open office, microsoft office (tableur, présentation), web (html, css), LaTeX, Google Earth) (...)
-
Support audio et vidéo HTML5
10 avril 2011MediaSPIP utilise les balises HTML5 video et audio pour la lecture de documents multimedia en profitant des dernières innovations du W3C supportées par les navigateurs modernes.
Pour les navigateurs plus anciens, le lecteur flash Flowplayer est utilisé.
Le lecteur HTML5 utilisé a été spécifiquement créé pour MediaSPIP : il est complètement modifiable graphiquement pour correspondre à un thème choisi.
Ces technologies permettent de distribuer vidéo et son à la fois sur des ordinateurs conventionnels (...)
Sur d’autres sites (4492)
-
How to concat mp4 files using libffmpeg in c program ?
1er août 2013, par chichienI know ffmpeg command line is easy, but how to programmatically implement? I'm not good at this,here is some code from internet, it is used to convert .mp4 to .ts,and i made some changes,but the audio stream problem persists:
#include
#include
#include
#include
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
#include "libavutil/avutil.h"
#include "libavutil/rational.h"
#include "libavdevice/avdevice.h"
#include "libavutil/mathematics.h"
#include "libswscale/swscale.h"
static AVStream* add_output_stream(AVFormatContext* output_format_context, AVStream* input_stream)
{
AVCodecContext* input_codec_context = NULL;
AVCodecContext* output_codec_context = NULL;
AVStream* output_stream = NULL;
output_stream = av_new_stream(output_format_context, 0);
if (!output_stream)
{
printf("Call av_new_stream function failed\n");
return NULL;
}
input_codec_context = input_stream->codec;
output_codec_context = output_stream->codec;
output_codec_context->codec_id = input_codec_context->codec_id;
output_codec_context->codec_type = input_codec_context->codec_type;
output_codec_context->codec_tag = input_codec_context->codec_tag;
output_codec_context->bit_rate = input_codec_context->bit_rate;
output_codec_context->extradata = input_codec_context->extradata;
output_codec_context->extradata_size = input_codec_context->extradata_size;
if (av_q2d(input_codec_context->time_base) * input_codec_context->ticks_per_frame > av_q2d(input_stream->time_base) && av_q2d(input_stream->time_base) < 1.0 / 1000)
{
output_codec_context->time_base = input_codec_context->time_base;
output_codec_context->time_base.num *= input_codec_context->ticks_per_frame;
}
else
{
output_codec_context->time_base = input_stream->time_base;
}
switch (input_codec_context->codec_type)
{
case AVMEDIA_TYPE_AUDIO:
output_codec_context->channel_layout = input_codec_context->channel_layout;
output_codec_context->sample_rate = input_codec_context->sample_rate;
output_codec_context->channels = input_codec_context->channels;
output_codec_context->frame_size = input_codec_context->frame_size;
if ((input_codec_context->block_align == 1 && input_codec_context->codec_id == CODEC_ID_MP3) || input_codec_context->codec_id == CODEC_ID_AC3)
{
output_codec_context->block_align = 0;
}
else
{
output_codec_context->block_align = input_codec_context->block_align;
}
break;
case AVMEDIA_TYPE_VIDEO:
output_codec_context->pix_fmt = input_codec_context->pix_fmt;
output_codec_context->width = input_codec_context->width;
output_codec_context->height = input_codec_context->height;
output_codec_context->has_b_frames = input_codec_context->has_b_frames;
if (output_format_context->oformat->flags & AVFMT_GLOBALHEADER)
{
output_codec_context->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
break;
default:
break;
}
return output_stream;
}
//[[** from ffmpeg.c
static void write_frame(AVFormatContext *s, AVPacket *pkt, AVCodecContext *avctx, AVBitStreamFilterContext *bsfc){
int ret;
while(bsfc){
AVPacket new_pkt= *pkt;
int a= av_bitstream_filter_filter(bsfc, avctx, NULL,
&new_pkt.data, &new_pkt.size,
pkt->data, pkt->size,
pkt->flags & AV_PKT_FLAG_KEY);
if(a>0){
av_free_packet(pkt);
new_pkt.destruct= av_destruct_packet;
} else if(a<0){
fprintf(stderr, "%s failed for stream %d, codec %s\n",
bsfc->filter->name, pkt->stream_index,
avctx->codec ? avctx->codec->name : "copy");
//print_error("", a);
//if (exit_on_error)
// ffmpeg_exit(1);
}
*pkt= new_pkt;
bsfc= bsfc->next;
}
ret= av_interleaved_write_frame(s, pkt);
if(ret < 0){
//print_error("av_interleaved_write_frame()", ret);
fprintf(stderr, "av_interleaved_write_frame(%d)\n", ret);
exit(1);
}
}
//]]**
int main(int argc, char* argv[])
{
const char* input;
const char* output;
const char* output_prefix = NULL;
char* segment_duration_check = 0;
const char* index = NULL;
char* tmp_index = NULL;
const char* http_prefix = NULL;
long max_tsfiles = NULL;
double prev_segment_time = 0;
double segment_duration = 0;
AVInputFormat* ifmt = NULL;
AVOutputFormat* ofmt = NULL;
AVFormatContext* ic = NULL;
AVFormatContext* oc = NULL;
AVStream* video_st = NULL;
AVStream* audio_st = NULL;
AVCodec* codec = NULL;
AVDictionary* pAVDictionary = NULL;
long frame_count = 0;
if (argc != 3) {
fprintf(stderr, "Usage: %s inputfile outputfile\n", argv[0]);
exit(1);
}
input = argv[1];
output = argv[2];
av_register_all();
char szError[256] = {0};
int nRet = avformat_open_input(&ic, input, ifmt, &pAVDictionary);
if (nRet != 0)
{
av_strerror(nRet, szError, 256);
printf(szError);
printf("\n");
printf("Call avformat_open_input function failed!\n");
return 0;
}
if (av_find_stream_info(ic) < 0)
{
printf("Call av_find_stream_info function failed!\n");
return 0;
}
ofmt = av_guess_format("mpegts", NULL, NULL);
if (!ofmt)
{
printf("Call av_guess_format function failed!\n");
return 0;
}
oc = avformat_alloc_context();
if (!oc)
{
printf("Call av_guess_format function failed!\n");
return 0;
}
oc->oformat = ofmt;
int video_index = -1, audio_index = -1;
for (unsigned int i = 0; i < ic->nb_streams && (video_index < 0 || audio_index < 0); i++)
{
switch (ic->streams[i]->codec->codec_type)
{
case AVMEDIA_TYPE_VIDEO:
video_index = i;
ic->streams[i]->discard = AVDISCARD_NONE;
video_st = add_output_stream(oc, ic->streams[i]);
break;
case AVMEDIA_TYPE_AUDIO:
audio_index = i;
ic->streams[i]->discard = AVDISCARD_NONE;
audio_st = add_output_stream(oc, ic->streams[i]);
break;
default:
ic->streams[i]->discard = AVDISCARD_ALL;
break;
}
}
codec = avcodec_find_decoder(video_st->codec->codec_id);
if (codec == NULL)
{
printf("Call avcodec_find_decoder function failed!\n");
return 0;
}
if (avcodec_open(video_st->codec, codec) < 0)
{
printf("Call avcodec_open function failed !\n");
return 0;
}
if (avio_open(&oc->pb, output, AVIO_FLAG_WRITE) < 0)
{
return 0;
}
if (avformat_write_header(oc, &pAVDictionary))
{
printf("Call avformat_write_header function failed.\n");
return 0;
}
//[[++
AVBitStreamFilterContext *bsfc = av_bitstream_filter_init("h264_mp4toannexb");
//AVBitStreamFilterContext *absfc = av_bitstream_filter_init("aac_adtstoasc");
if (!bsfc) {
fprintf(stderr, "bsf init error!\n");
return -1;
}
//]]++
int decode_done = 0;
do
{
double segment_time = 0;
AVPacket packet;
decode_done = av_read_frame(ic, &packet);
if (decode_done < 0)
break;
if (av_dup_packet(&packet) < 0)
{
printf("Call av_dup_packet function failed\n");
av_free_packet(&packet);
break;
}
//[[**
if (packet.stream_index == audio_index) {
segment_time = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
nRet = av_interleaved_write_frame(oc, &packet);
} else if (packet.stream_index == video_index) {
if (packet.flags & AV_PKT_FLAG_KEY) {
segment_time = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
} else {
segment_time = prev_segment_time;
}
//nRet = av_interleaved_write_frame(oc, &packet);
write_frame(oc, &packet, video_st->codec, bsfc);
}
//]]**
if (nRet < 0)
{
printf("Call av_interleaved_write_frame function failed: %d\n", nRet);
}
else if (nRet > 0)
{
printf("End of stream requested\n");
av_free_packet(&packet);
break;
}
av_free_packet(&packet);
frame_count++;
}while(!decode_done);
av_write_trailer(oc);
printf("frame_count = %d\n", frame_count);
av_bitstream_filter_close(bsfc);
avcodec_close(video_st->codec);
for(unsigned int k = 0; k < oc->nb_streams; k++)
{
av_freep(&oc->streams[k]->codec);
av_freep(&oc->streams[k]);
}
av_free(oc);
//getchar();
return 0;
}Compile this code, to got an executable file named
muxts
, and then :$ ./muxts vid1.mp4 vid1.ts
No error message printed,but the audio stream was unsynchronized and noise。Check the .ts file using ffmpeg :
$ ffmpeg -i vid1.ts
ffmpeg version 0.8.14-tessus, Copyright (c) 2000-2013 the FFmpeg developers
built on Jul 29 2013 17:05:18 with llvm_gcc 4.2.1 (Based on Apple Inc. build 5658) (LLVM build 2336.1.00)
configuration: --prefix=/usr/local --arch=x86_64 --as=yasm --extra-version=tessus --enable-gpl --enable-nonfree --enable-version3 --disable-ffplay --enable-libvorbis --enable-libmp3lame --enable-libx264 --enable-libxvid --enable-bzlib --enable-zlib --enable-postproc --enable-filters --enable-runtime-cpudetect --enable-debug=3 --disable-optimizations
libavutil 51. 9. 1 / 51. 9. 1
libavcodec 53. 8. 0 / 53. 8. 0
libavformat 53. 5. 0 / 53. 5. 0
libavdevice 53. 1. 1 / 53. 1. 1
libavfilter 2. 23. 0 / 2. 23. 0
libswscale 2. 0. 0 / 2. 0. 0
libpostproc 51. 2. 0 / 51. 2. 0
Seems stream 0 codec frame rate differs from container frame rate: 180000.00 (180000/1) -> 90000.00 (180000/2)
Input #0, mpegts, from 'vid1.ts':
Duration: 00:00:03.75, start: 0.000000, bitrate: 3656 kb/s
Program 1
Metadata:
service_name : Service01
service_provider: FFmpeg
Stream #0.0[0x100]: Video: h264 (Baseline), yuv420p, 640x480, 90k tbr, 90k tbn, 180k tbc
Stream #0.1[0x101]: Audio: aac, 48000 Hz, mono, s16, 190 kb/s
At least one output file must be specifiedWhat should i do ?
If this issue fixed , how can i concat multi .ts files into single .mp4 file ?
-
Revision 38fa487164 : Shortcut 8x8/16x16 inverse 2D-DCT This commit brought back the shortcut impleme
27 juillet 2013, par Jingning HanChanged Paths :
Modify /vp9/decoder/vp9_idct_blk.c
Modify /vp9/encoder/vp9_encodemb.c
Shortcut 8x8/16x16 inverse 2D-DCTThis commit brought back the shortcut implementation of 8x8/16x16
inverse 2D-DCT. When the eob <= 10, it skips the inverse transform
operations on row 4:7/4:15 in the first round. For bus_cif at 1000
kbps, this provides about 2% speed-up at speed 0.Change-Id : I453e2d72956467d75be4ad8c04b4482ab889d572
-
Revision 0359ad7f9a : Skip inverse transform when eob is zero When all the transform coefficients wer
23 juillet 2013, par Jingning HanChanged Paths :
Modify /vp9/encoder/vp9_encodemb.c
Skip inverse transform when eob is zeroWhen all the transform coefficients were quantized to zero, skip
the inverse transform operation. For bus_cif at 1000 kbps, the
runtime goes from 154967ms -> 149842ms, i.e., about 3% speed-up,
at speed 0.Change-Id : Ic0a813fff5e28972d4888ee42d8747846a6c3cc6