
Recherche avancée
Médias (1)
-
Rennes Emotion Map 2010-11
19 octobre 2011, par
Mis à jour : Juillet 2013
Langue : français
Type : Texte
Autres articles (40)
-
Personnaliser en ajoutant son logo, sa bannière ou son image de fond
5 septembre 2013, parCertains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;
-
Ecrire une actualité
21 juin 2013, parPrésentez les changements dans votre MédiaSPIP ou les actualités de vos projets sur votre MédiaSPIP grâce à la rubrique actualités.
Dans le thème par défaut spipeo de MédiaSPIP, les actualités sont affichées en bas de la page principale sous les éditoriaux.
Vous pouvez personnaliser le formulaire de création d’une actualité.
Formulaire de création d’une actualité Dans le cas d’un document de type actualité, les champs proposés par défaut sont : Date de publication ( personnaliser la date de publication ) (...) -
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir
Sur d’autres sites (5778)
-
Playing Mp3 file using FFmpeg on Android
2 avril 2017, par satyresThis question has been asked a lot but no code has worked for me . i’ve been able to play a file decoded with ffmpeg on Android but it’s noisy and glitchy.
i’ve found code in book called "linux sound programming" using latest ffmpeg version to decode an mp3 file.
the code tries to decode an mp3 file to pcm and then put it in a file called output.
what i want to do is to get the byte decoded on the fly and send them to AudioTrack in Java.void JNICALL Java_com_example_home_hellondk_MainActivity_loadFile
(JNIEnv* env, jobject obj,jstring file,jbyteArray array)
{
jboolean isfilenameCopy;
const char * filename = (*env)->GetStringUTFChars(env, file,
&isfilenameCopy);
jclass cls = (*env)->GetObjectClass(env, obj);
jmethodID play = (*env)->GetMethodID(env, cls, "playSound", "([BI)V");
AVCodec *codec;
AVCodecContext *c= NULL;
int len;
FILE *f, *outfile;
uint8_t inbuf[AUDIO_INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
AVPacket avpkt;
AVFrame *decoded_frame = NULL;
AVFormatContext* container=NULL;
av_init_packet(&avpkt);
int num_streams = 0;
int sample_size = 0;
printf("Decode audio file %s \n", filename);
LOGE("Decode audio file %s\n", filename);
/* find the MPEG audio decoder */
/* codec = avcodec_find_decoder(AV_CODEC_ID_MP3);
if (!codec) {
fprintf(stderr, "Codec not found\n");
LOGE("Codec not found\n");
exit(1);
}*/
int lError;
if ((lError = avformat_open_input(&container, filename, NULL, NULL))
!= 0) {
LOGE("Error open source file: %d", lError);
exit(1);
}
if ((lError = avformat_find_stream_info(container,NULL)) < 0) {
LOGE("Error find stream information: %d", lError);
exit(1);
}
LOGE("Stage 1.5");
LOGE("audio format: %s", container->iformat->name);
LOGE("audio bitrate: %llu", container->bit_rate);
int stream_id = -1;
// To find the first audio stream. This process may not be necessary
// if you can gurarantee that the container contains only the desired
// audio stream
LOGE("nb_streams: %d", container->nb_streams);
int i;
for (i = 0; i < container->nb_streams; i++) {
if (container->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
stream_id = i;
LOGE("stream_id: %d", stream_id);
break;
}
}
AVCodecContext* codec_context = container->streams[stream_id]->codec;
codec = avcodec_find_decoder(codec_context->codec_id);
LOGE("stream_id: %d", stream_id);
LOGE("codec %s", codec->name);
if (!codec) {
fprintf(stderr, "codec not found\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate audio codec context\n");
LOGE("Could not allocate audio codec context\n");
exit(1);
}
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
LOGE("Could not open codec\n");
exit(1);
}
f = fopen(filename, "rb");
if (!f) {
fprintf(stderr, "Could not open %s\n", filename);
LOGE("Could not open %s\n",filename);
exit(1);
}
const char *outfilename;
outfile = fopen(outfilename, "wb");
if (!outfile) {
av_free(c);
exit(1);
}
avpkt.data = inbuf;
avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f);
LOGE("Stage 5");
/* decode until eof */
while (avpkt.size > 0) {
int got_frame = 0;
if (!decoded_frame) {
if (!(decoded_frame = av_frame_alloc())) {
fprintf(stderr, "out of memory\n");
exit(1);
}
} else {
av_frame_unref(decoded_frame);
}
printf("Stream idx %d\n", avpkt.stream_index);
len = avcodec_decode_audio4(c, decoded_frame, &got_frame, &avpkt);
if (len < 0) {
fprintf(stderr, "Error while decoding\n");
exit(1);
}
if (got_frame) {
printf("Decoded frame nb_samples %d, format %d\n",
decoded_frame->nb_samples,
decoded_frame->format);
if (decoded_frame->data[1] != NULL)
printf("Data[1] not null\n");
else
printf("Data[1] is null\n");
/* if a frame has been decoded, output it */
int data_size = av_samples_get_buffer_size(NULL, c->channels,
decoded_frame->nb_samples,
c->sample_fmt, 1);
// first time: count the number of planar streams
if (num_streams == 0) {
while (num_streams < AV_NUM_DATA_POINTERS &&
decoded_frame->data[num_streams] != NULL)
num_streams++;
printf("Number of streams %d\n", num_streams);
}
// first time: set sample_size from 0 to e.g 2 for 16-bit data
if (sample_size == 0) {
sample_size =
data_size / (num_streams * decoded_frame->nb_samples);
}
int m, n;
for (n = 0; n < decoded_frame->nb_samples; n++) {
// interleave the samples from the planar streams
for (m = 0; m < num_streams; m++) {
fwrite(&decoded_frame->data[m][n*sample_size],
1, sample_size, outfile);
}
}
/* jbyte *bytes = (*env)->GetByteArrayElements(env, array, NULL);
memcpy(bytes, decoded_frame->data[1], data_size);
(*env)->ReleaseByteArrayElements(env, array, bytes, 0);
(*env)->CallVoidMethod(env, obj, play, array, data_size);
*/
}
avpkt.size -= len;
avpkt.data += len;
if (avpkt.size < AUDIO_REFILL_THRESH) {
/* Refill the input buffer, to avoid trying to decode
* incomplete frames. Instead of this, one could also use
* a parser, or use a proper container format through
* libavformat. */
memmove(inbuf, avpkt.data, avpkt.size);
avpkt.data = inbuf;
len = fread(avpkt.data + avpkt.size, 1,
AUDIO_INBUF_SIZE - avpkt.size, f);
if (len > 0)
avpkt.size += len;
}
}
fclose(f);
avcodec_free_context(&c);
av_frame_free(&decoded_frame);
}the decoded bytes are in this section
fwrite(&decoded_frame->data[m][n*sample_size], 1, sample_size, outfile);
the code that let you send bytes to java is this :
jbyte *bytes = (*env)->GetByteArrayElements(env, array, NULL);
memcpy(bytes, decoded_frame->data[0], data_size);
(*env)->ReleaseByteArrayElements(env, array, bytes, 0);
(*env)->CallVoidMethod(env, obj, play, array, data_size);i’ve been working on it now for more than a week and nothing worked for me.
Thank you in advance for your help
-
NodeJS piping with ffmpeg
8 février 2014, par GnapI wanted to do a HTTP live stream on a screen cast with using ffmpeg, nodejs and html5 . I wanted it to be as real time as possible. However, I find that my video received by the client was behind by 1 2 seconds (On Chrome/Chromium). I am using vp8/webm as my codec.
I have eliminated the following factors as such :
1) Network : I have tried serving and receiving the video file locally by stating the video source to be 127.0.0.1:PORT or localhost:PORT
2) ffmpeg encoding speed:I have tried outputting the file locally, it the "delay" seems to be negligible.
3) Chrome internal buffer. The buffer was accounted to be 0.07s 0.08s.On the nodeJS side, I have a child process that runs the ffmpeg command, and did a ffmpeg.stdout.pipe(res) ; <— ffmpeg is child_process.spawn(...)
So it seems that the ffmpeg.std.pipe(res) of nodejs seems to be the one delaying the video stream. Am I correct in assuming so ? Is there anyway that I may reduce the delay ?
-
HTTP Header for Duration of a MP4 for HTML 5 video
9 mars 2014, par MustafaI am trying to stream MP4 video as it is encoded from a webserver. I believe I used the appropriate flags, but it is not working correctly. When I download the video from my stream and open it with VLC, it properly shows the duration. Since a socket is not seekable, I assume it writes the metadata to end ? My Chrome browser always shows 8 seconds duration. The first 8 seconds plays at the normal speed, but afterwards the pause button turns into play button and the video plays very fast, probably as fast as it is recieved. However the audio is played at normal speed. I tried
document.getElementById('myVid').duration = 20000
but it is a readonly field.I wonder, is there anyway to explicitly state the duration in HTTP headers or in any other way ? I cannot find any documentation about it.
ffmpeg -i - -vcodec libx264 -acodec libvo_aacenc -ar 44100 -ac 2 -ab 128000 -f mp4 -movflags frag_keyframe+faststart pipe:1 -fflags +genpts -re -profile baseline -level 30 -preset fast
To close-voters, that thinks it is not programming related, I use it in my own server I coded, and I need to set the duration programatically via JavaScript or setting the HTTP header. I believe it may be related to both ffmpeg or http headers, that's why I posted it here.
app.get("/video/*", function(req,res){
res.writeHead(200, {
'Content-Type': 'video/mp4',
});
var dir = req.url.split("/").splice(2).join("/");
var buf = new Buffer(dir, 'base64');
var src = buf.toString();
var Transcoder = require('stream-transcoder');
var stream = fs.createReadStream(src);
// I added my own flags to this module, they are at below:
new Transcoder(stream)
.videoCodec('libx264')
.audioCodec("libvo_aacenc")
.sampleRate(44100)
.channels(2)
.audioBitrate(128 * 1000)
.format('mp4')
.on('finish', function() {
console.log("finished");
})
.stream().pipe(res);
});exec function in that stream-transcoder module,
a.push("-fflags");
a.push("+genpts");
a.push("-re");
a.push("-profile");
a.push("baseline");
a.push("-level");
a.push("30");
a.push("-preset");
a.push("fast");
a.push("-strict");
a.push("experimental");
a.push("-frag_duration");
a.push("" + 2 * (1000 * 1000));
var child = spawn('ffmpeg', a, {
cwd: os.tmpdir()
});