Recherche avancée

Médias (3)

Mot : - Tags -/collection

Autres articles (99)

  • Submit bugs and patches

    13 avril 2011

    Unfortunately a software is never perfect.
    If you think you have found a bug, report it using our ticket system. Please to help us to fix it by providing the following information : the browser you are using, including the exact version as precise an explanation as possible of the problem if possible, the steps taken resulting in the problem a link to the site / page in question
    If you think you have solved the bug, fill in a ticket and attach to it a corrective patch.
    You may also (...)

  • Activation de l’inscription des visiteurs

    12 avril 2011, par

    Il est également possible d’activer l’inscription des visiteurs ce qui permettra à tout un chacun d’ouvrir soit même un compte sur le canal en question dans le cadre de projets ouverts par exemple.
    Pour ce faire, il suffit d’aller dans l’espace de configuration du site en choisissant le sous menus "Gestion des utilisateurs". Le premier formulaire visible correspond à cette fonctionnalité.
    Par défaut, MediaSPIP a créé lors de son initialisation un élément de menu dans le menu du haut de la page menant (...)

  • Contribute to documentation

    13 avril 2011

    Documentation is vital to the development of improved technical capabilities.
    MediaSPIP welcomes documentation by users as well as developers - including : critique of existing features and functions articles contributed by developers, administrators, content producers and editors screenshots to illustrate the above translations of existing documentation into other languages
    To contribute, register to the project users’ mailing (...)

Sur d’autres sites (11214)

  • Using ffmpeg to record 15 minute segments of a live audio stream

    6 août 2015, par David

    I currently have a code setup to record a live streaming audio file from a given url to an mp3 file until I terminate the program.

    fmpeg(obj.url)
    .noVideo()
    .format('mp3')
       .on('error', function(err, stdout, stderr) {
       console.error(err);
       console.error(stdout);
       console.error(stderr)
     })

    I already have it set up so that it will create a new file every time the ffmpeg is called. All I need is a way to have ffmpeg stop, and then start again, every 15 minutes.

    I know duration will stop it after 15 minutes, but I don’t know of anything that will start the same ffmpeg file again after this.

  • Live streaming using FFMPEG to web audio api

    19 janvier 2014, par Nayan

    I am trying to stream audio using node.js + ffmpeg to browsers connected in LAN only using web audio api.

    Not using element because it's adding it's own buffer of 8 to 10 secs and I want to get maximum high latency possible (around 1 to 2 sec max).

    Audio plays successfully but audio is very choppy and noisy.

    Here is my node.js (server side) file :

    var ws = require('websocket.io'),
    server = ws.listen(3000);
    var child_process = require("child_process");
    var i = 0;
    server.on('connection', function (socket)
    {

    console.log('New client connected');

    var ffmpeg = child_process.spawn("ffmpeg",[
       "-re","-i",
       "A.mp3","-f",
       "f32le",
       "pipe:1"                     // Output to STDOUT
       ]);

    ffmpeg.stdout.on('data', function(data)
    {
       var buff = new Buffer(data);
       socket.send(buff.toString('base64'));
    });
    });

    And here is my HTML :

    var audioBuffer = null;
    var context = null;
    window.addEventListener('load', init, false);
    function init() {
       try {
           context = new webkitAudioContext();
       } catch(e) {
           alert('Web Audio API is not supported in this browser');
       }
    }

    var ws = new WebSocket("ws://localhost:3000/");

    ws.onmessage = function(message)
    {
       var d1 = base64DecToArr(message.data).buffer;
       var d2 = new DataView(d1);

       var data = new Float32Array(d2.byteLength / Float32Array.BYTES_PER_ELEMENT);
       for (var jj = 0; jj < data.length; ++jj)
       {
           data[jj] = d2.getFloat32(jj * Float32Array.BYTES_PER_ELEMENT, true);
       }

       var audioBuffer = context.createBuffer(2, data.length, 44100);
       audioBuffer.getChannelData(0).set(data);

       var source = context.createBufferSource(); // creates a sound source
       source.buffer = audioBuffer;
       source.connect(context.destination); // connect the source to the context's destination (the speakers)
       source.start(0);
    };

    Can any one advise what is wrong ?

    Regards,
    Nayan

  • FFmpeg encoding live audio to aac issue

    12 juillet 2015, par Ruurd Adema

    I’m trying to encode live raw audio coming from a Blackmagic Decklink input card to a mov file with AAC encoding.

    The issue is that the audio sounds distorted and plays to fast.

    I created the software based on a couple of examples/tutorials including the Dranger tutorial and examples on Github (and of course the examples in the FFmpeg codebase).

    Honestly, at this moment I don’t exactly know what the cause of the problem is. I’m thinking about PTS/DTS values or a timebase mismatch (because of the too fast playout), I tried a lot of things, including working with an av_audio_fifo.

    • When outputting to the mov file with the AV_CODEC_ID_PCM_S16LE codec, everything works well
    • When outputting to the mov file with the AV_CODEC_ID_AAC codec, the problems occur
    • When writing RAW audio VLC media info shows :
      Type : Audio, Codec : PCM S16 LE (sowt), Language : English, Channels : Stereo, Sample rate : 48000 Hz, Bits per sample.
    • When writing with AAC codec VLC media info shows :
      Type : Audio, Codec : MPEG AAC Audio (mp4a), Language : English, Channels : Stereo, Sample rate : 48000 Hz.

    Any idea(s) of what’s causing the problems ?

    Code

    // Create output context

    output_filename = "/root/movies/encoder_debug.mov";
    output_format_name = "mov";
    if (avformat_alloc_output_context2(&output_fmt_ctx, NULL, output_format_name, output_filename) < 0)
    {
       printf("[ERROR] Unable to allocate output format context for output: %s\n", output_filename);
    }

    // Create audio output stream

    static AVStream *encoder_add_audio_stream(AVFormatContext *oc, enum AVCodecID codec_id)
    {
       AVCodecContext  *c;
       AVCodec         *codec;
       AVStream        *st;

       st = avformat_new_stream(oc, NULL);
       if (!st)
       {
           printf("[ERROR] Could not allocate new audio stream!\n");
           exit(-1);
       }

       c                 = st->codec;
       c->codec_id       = codec_id;
       c->codec_type     = AVMEDIA_TYPE_AUDIO;
       c->sample_fmt     = AV_SAMPLE_FMT_S16;
       c->sample_rate    = decklink_config()->audio_samplerate;
       c->channels       = decklink_config()->audio_channel_count;
       c->channel_layout = av_get_default_channel_layout(decklink_config()->audio_channel_count);
       c->time_base.den  = decklink_config()->audio_samplerate;
       c->time_base.num  = 1;

       if (codec_id == AV_CODEC_ID_AAC)
       {
           c->bit_rate       = 96000;  
           //c->profile = FF_PROFILE_AAC_MAIN; //FIXME Generates error: "Unable to set the AOT 1: Invalid config"
           // Allow the use of the experimental AAC encoder
           c->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
       }

       // Some formats want stream headers to be seperate (global?)
       if (oc->oformat->flags & AVFMT_GLOBALHEADER)
       {
           c->flags |= CODEC_FLAG_GLOBAL_HEADER;
       }

       codec = avcodec_find_encoder(c->codec_id);
       if (!codec)
       {
           printf("[ERROR] Audio codec not found\n");
           exit(-1);
       }

       if (avcodec_open2(c, codec, NULL) < 0)
       {
           printf("[ERROR] Could not open audio codec\n");
           exit(-1);
       }

       return st;
    }

    // En then, at every incoming frame this function gets called:

    void encoder_handle_incoming_frame(IDeckLinkVideoInputFrame *videoframe, IDeckLinkAudioInputPacket *audiopacket)
    {
       void *pixels = NULL;
       int   pitch = 0;
       int got_packet = 0;

       void *audiopacket_data          = NULL;
       long  audiopacket_sample_count  = 0;
       long  audiopacket_size          = 0;
       long  audiopacket_channel_count = 2;

       if (audiopacket)
       {  
           AVPacket pkt = {0,0,0,0,0,0,0,0,0,0,0,0,0,0};
           AVFrame *frame;
           BMDTimeValue audio_pts;
           int requested_size;
           static int last_pts1, last_pts2 = 0;

           audiopacket_sample_count  = audiopacket->GetSampleFrameCount();
           audiopacket_channel_count = decklink_config()->audio_channel_count;
           audiopacket_size          = audiopacket_sample_count * (decklink_config()->audio_sampletype/8) * audiopacket_channel_count;

           audiopacket->GetBytes(&audiopacket_data);

           av_init_packet(&pkt);    

           printf("\n=== Audiopacket: %d ===\n", audio_stream->codec->frame_number);

           if (AUDIO_TYPE == AV_CODEC_ID_PCM_S16LE)
           {
               audiopacket->GetPacketTime(&audio_pts, audio_stream->time_base.den);

               pkt.pts          = audio_pts;
               pkt.dts          = pkt.pts;
               pkt.flags       |= AV_PKT_FLAG_KEY;                 // TODO: Make sure if this still applies
               pkt.stream_index = audio_stream->index;
               pkt.data         = (uint8_t *)audiopacket_data;
               pkt.size         = audiopacket_size;

               printf("[PACKET] size:              %d\n", pkt.size);
               printf("[PACKET] pts:               %li\n", pkt.pts);
               printf("[PACKET] pts delta:         %li\n", pkt.pts - last_pts2);
               printf("[PACKET] duration:          %d\n", pkt.duration);
               last_pts2 = pkt.pts;

               av_interleaved_write_frame(output_fmt_ctx, &pkt);
           }
           else if (AUDIO_TYPE == AV_CODEC_ID_AAC)
           {
               frame = av_frame_alloc();
               frame->format = audio_stream->codec->sample_fmt;
               frame->channel_layout = audio_stream->codec->channel_layout;
               frame->sample_rate = audio_stream->codec->sample_rate;
               frame->nb_samples = audiopacket_sample_count;

               requested_size = av_samples_get_buffer_size(NULL, audio_stream->codec->channels, audio_stream->codec->frame_size, audio_stream->codec->sample_fmt, 1);

               audiopacket->GetPacketTime(&audio_pts, audio_stream->time_base.den);

               printf("[DEBUG] Sample format:      %d\n", frame->format);
               printf("[DEBUG] Channel layout:     %li\n", frame->channel_layout);
               printf("[DEBUG] Sample rate:        %d\n", frame->sample_rate);
               printf("[DEBUG] NB Samples:         %d\n", frame->nb_samples);
               printf("[DEBUG] Datasize:           %li\n", audiopacket_size);
               printf("[DEBUG] Requested datasize: %d\n", requested_size);
               printf("[DEBUG] Too less/much:      %li\n", audiopacket_size - requested_size);
               printf("[DEBUG] Framesize:          %d\n", audio_stream->codec->frame_size);
               printf("[DEBUG] Audio pts:          %li\n", audio_pts);
               printf("[DEBUG] Audio pts delta:    %li\n", audio_pts - last_pts1);
               last_pts1 = audio_pts;

               frame->pts = audio_pts;

               if (avcodec_fill_audio_frame(frame, audiopacket_channel_count, audio_stream->codec->sample_fmt, (const uint8_t *)audiopacket_data, audiopacket_size, 0) < 0)
               {
                   printf("[ERROR] Filling audioframe failed!\n");
                   exit(-1);
               }

               got_packet = 0;
               if (avcodec_encode_audio2(audio_stream->codec, &pkt, frame, &got_packet) != 0)
               {
                   printf("[ERROR] Encoding audio failed\n");
               }

               if (got_packet)
               {
                   pkt.stream_index = audio_stream->index;
                   pkt.flags       |= AV_PKT_FLAG_KEY;

                   //printf("[PACKET] size:              %d\n", pkt.size);
                   //printf("[PACKET] pts:               %li\n", pkt.pts);
                   //printf("[PACKET] pts delta:         %li\n", pkt.pts - last_pts2);
                   //printf("[PACKET] duration:          %d\n", pkt.duration);
                   //printf("[PACKET] timebase codec:    %d/%d\n", audio_stream->codec->time_base.num, audio_stream->codec->time_base.den);
                   //printf("[PACKET] timebase stream:   %d/%d\n", audio_stream->time_base.num, audio_stream->time_base.den);
                   last_pts2 = pkt.pts;

                   av_interleaved_write_frame(output_fmt_ctx, &pkt);
               }
               av_frame_free(&frame);
           }

           av_free_packet(&pkt);
       }
       else
       {
           printf("[WARNING] No audiopacket received!\n");
       }

       static int count = 0;
       count++;
    }