Recherche avancée

Médias (2)

Mot : - Tags -/documentation

Autres articles (63)

  • Personnaliser en ajoutant son logo, sa bannière ou son image de fond

    5 septembre 2013, par

    Certains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;

  • Publier sur MédiaSpip

    13 juin 2013

    Puis-je poster des contenus à partir d’une tablette Ipad ?
    Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir

  • MediaSPIP version 0.1 Beta

    16 avril 2011, par

    MediaSPIP 0.1 beta est la première version de MediaSPIP décrétée comme "utilisable".
    Le fichier zip ici présent contient uniquement les sources de MediaSPIP en version standalone.
    Pour avoir une installation fonctionnelle, il est nécessaire d’installer manuellement l’ensemble des dépendances logicielles sur le serveur.
    Si vous souhaitez utiliser cette archive pour une installation en mode ferme, il vous faudra également procéder à d’autres modifications (...)

Sur d’autres sites (7879)

  • Audio/Video encoding with ffmpeg

    16 mars 2017, par Vroz

    Audio/Video encoding with ffmpeg :

    I am trying to create an avi file with encoded video and audio, using ffmpeg.

    First, I create the file :

               //define BITRATE 10000000
               //define GOP 300
               //define FPS 60
               //define VIDEOTYPE "avi"

               if (!encoder_->createFile(QFileInfo(*(videoFile_.data())).absoluteFilePath(), targetRect.width(), targetRect.height(), BITRATE*(1000 / FPS), GOP, 1000))    

    The buffers are initialized as :

               audio_outbuf_size = 44100 * 0.005 * 16; //5ms of audio should be encoded, each time this function is called
               audio_outbuf = new uint8_t[audio_outbuf_size];

               outbuf_size = getWidth()*getHeight() * 3;        
               outbuf = new uint8_t[outbuf_size];

    Then add audio and video streams (audio : CODEC_ID_PCM_S16LE, 16000 kb/s and 44100 Hz, video : PIX_FMT_YUV420P)

               void MediaMuxer::addAudioStream(QString fileName, ffmpeg::CodecID codec_id)
               {
                   // Add the audio stream
                   ffmpeg::AVCodec *encoder = avcodec_find_encoder(codec_id);
                   pAudioStream_ = ffmpeg::av_new_stream(pOutputFormatCtx_, 0);
                   if (!pAudioStream_) {
                       printf("Could not allocate stream\n");
                       return;
                   }

                   pAudioCodecCtx_ = pAudioStream_->codec;
                   pAudioCodecCtx_->codec_id = codec_id;
                   pAudioCodecCtx_->codec_type = ffmpeg::AVMEDIA_TYPE_AUDIO;
                   pAudioCodecCtx_->sample_fmt = ffmpeg::AV_SAMPLE_FMT_S16;
                   pAudioCodecCtx_->sample_fmt = encoder->sample_fmts[0];
                   pAudioCodecCtx_->bit_rate = 16000;
                   //pAudioCodecCtx_->bit_rate = 64000;
                   pAudioCodecCtx_->sample_rate = N;
                   pAudioCodecCtx_->channels = 1;

                   pAudioCodecCtx_->time_base.den = FPS;
                   pAudioCodecCtx_->time_base.num = 1;

                   avcodec_thread_init(pAudioCodecCtx_, 10);

                   // some formats want stream headers to be separate
                   if (pOutputFormatCtx_->oformat->flags & AVFMT_GLOBALHEADER)
                       pAudioCodecCtx_->flags |= CODEC_FLAG_GLOBAL_HEADER;

                   if (av_set_parameters(pOutputFormatCtx_, NULL) < 0)
                   {
                       printf("Invalid output format parameters\n");
                       return;
                   }

                   //ffmpeg::dump_format(pOutputFormatCtx_, 0, fileName.toStdString().c_str(), 1);

                   // open_video
                   // find the audio encoder
                   pAudioCodec_ = avcodec_find_encoder(pAudioCodecCtx_->codec_id);
                   if (!pAudioCodec_)
                   {
                       printf("codec not found\n");
                       return;
                   }
                   // open the codec
                   if (avcodec_open(pAudioCodecCtx_, pAudioCodec_) < 0)
                   {
                       printf("could not open codec\n");
                       return;
                   }

                   // Allocate memory for output
                   if (!initAudioOutputBuf())
                   {
                       printf("Can't allocate memory for audio output bitstream\n");
                       return;
                   }

                   // Allocate the audio frame
                   if (!initAudioFrame())
                   {
                       printf("Can't init audio frame\n");
                       return;
                   }

                   if (url_fopen(&pOutputFormatCtx_->pb, fileName.toStdString().c_str(), URL_WRONLY) < 0)
                   {
                       printf("Could not open '%s'\n", fileName.toStdString().c_str());
                       return;
                   }
                   av_write_header(pOutputFormatCtx_);
               }

               void MediaMuxer::addVideoStream(QString fileName)
               {
                   // Add the video stream
                   pVideoStream_ = ffmpeg::av_new_stream(pOutputFormatCtx_, 0);
                   if (!pVideoStream_)
                   {
                       printf("Could not allocate stream\n");
                       return;
                   }

                   pVideoCodecCtx_ = pVideoStream_->codec;
                   pVideoCodecCtx_->codec_id = pOutputFormat_->video_codec;
                   pVideoCodecCtx_->codec_type = ffmpeg::AVMEDIA_TYPE_VIDEO;

                   pVideoCodecCtx_->bit_rate = Bitrate;
                   pVideoCodecCtx_->width = getWidth();
                   pVideoCodecCtx_->height = getHeight();
                   pVideoCodecCtx_->time_base.den = FPS;
                   pVideoCodecCtx_->time_base.num = 1;
                   pVideoCodecCtx_->gop_size = Gop;
                   pVideoCodecCtx_->pix_fmt = ffmpeg::PIX_FMT_YUV420P;

                   avcodec_thread_init(pVideoCodecCtx_, 10);

                   // some formats want stream headers to be separate
                   if (pOutputFormatCtx_->oformat->flags & AVFMT_GLOBALHEADER)
                       pVideoCodecCtx_->flags |= CODEC_FLAG_GLOBAL_HEADER;


                   if (av_set_parameters(pOutputFormatCtx_, NULL) < 0)
                   {
                       printf("Invalid output format parameters\n");
                       return;
                   }

                   //ffmpeg::dump_format(pOutputFormatCtx_, 0, fileName.toStdString().c_str(), 1);

                   // open_video
                   // find the video encoder
                   pVideoCodec_ = avcodec_find_encoder(pVideoCodecCtx_->codec_id);
                   if (!pVideoCodec_)
                   {
                       printf("codec not found\n");
                       return;
                   }
                   // open the codec
                   if (avcodec_open(pVideoCodecCtx_, pVideoCodec_) < 0)
                   {
                       printf("could not open codec\n");
                       return;
                   }

                   // Allocate memory for output
                   if (!initOutputBuf())
                   {
                       printf("Can't allocate memory for output bitstream\n");
                       return;
                   }

                   // Allocate the YUV frame
                   if (!initFrame())
                   {
                       printf("Can't init frame\n");
                       return;
                   }

                   if (url_fopen(&pOutputFormatCtx_->pb, fileName.toStdString().c_str(), URL_WRONLY) < 0)
                   {
                       printf("Could not open '%s'\n", fileName.toStdString().c_str());
                       return;
                   }
                   av_write_header(pOutputFormatCtx_);
               }

    Finally, I call alternatively encodeVideo/encodeAudio to encode video and PCM audio frames at specific recording times(pts) :

               int MediaMuxer::encodeVideo(const QImage &img, unsigned pts)
               {
                   convertImage_sws(img);     // SWS conversion
                   pVideoCodecCtx_->coded_frame->pts = pts;  // Set the time stamp
                   int out_size = ffmpeg::avcodec_encode_video(pVideoCodecCtx_, outbuf, outbuf_size, ppicture);        
                   pVideoCodecCtx_->coded_frame->pts = pts;  // Set the time stamp

                   if (out_size > 0)
                   {
                       ffmpeg::av_init_packet(&pkt);      
                       if (pVideoCodecCtx_->coded_frame->pts != (0x8000000000000000LL))
                           pkt.pts = av_rescale_q(pVideoCodecCtx_->coded_frame->pts, pVideoCodecCtx_->time_base, pVideoStream_->time_base);
                       if (pVideoCodecCtx_->coded_frame->key_frame)
                           pkt.flags |= AV_PKT_FLAG_KEY;

                       pkt.stream_index = pVideoStream_->index;
                       pkt.data = outbuf;
                       pkt.size = out_size;
                       int ret = ffmpeg::av_interleaved_write_frame(pOutputFormatCtx_, &pkt);      
                       if (ret<0)
                           return -1;

                   }
                   return out_size;
               }

               int MediaMuxer::encodeAudio(unsigned pts)
               {
                   pAudioCodecCtx_->coded_frame->pts = pts;  // Set the time stamp

                   // simple sound encoding    
                   int16_t samples[220] = { 0 }; // buffer
                   int n;                // buffer index
                   double Fs = 44100.0;  // sampling frequency

                   // Generate audio data
                   for (n = 0; n < 220; ++n)   //220 samples (44100*.005sec as the interval between 2 video frames is 10ms)
                       samples[n] = 16383.0 * sin(n*1000.0*2.0*M_PI / Fs); //sine wav

                   int  out_size = ffmpeg::avcodec_encode_audio(pAudioCodecCtx_, audio_outbuf, audio_outbuf_size, (const short*)samples);

                   pAudioCodecCtx_->coded_frame->pts = pts;  // Set the time stamp

                   if (out_size>0)
                   {
                       // Packet
                       ffmpeg::AVPacket pkt = { 0 };
                       av_init_packet(&pkt);
                       pkt.data = NULL; // packet data will be allocated by the encoder
                       pkt.size = 0;
                       if (pAudioCodecCtx_->coded_frame->pts != (0x8000000000000000LL))
                           pkt.pts = av_rescale_q(pAudioCodecCtx_->coded_frame->pts, pAudioCodecCtx_->time_base, pAudioStream_->time_base);
                       if (pAudioCodecCtx_->coded_frame->key_frame)
                           pkt.flags |= AV_PKT_FLAG_KEY;

                       pkt.stream_index = pAudioStream_->index;
                       pkt.data = audio_outbuf;

                       pkt.size = out_size;
                       int ret = av_interleaved_write_frame(pOutputFormatCtx_, &pkt);
                       if (ret<0)
                           return -1;
                       av_free_packet(&pkt);
                   }      
                   //end simple sound encoding

                   return pkt.size;
               }

    The result is a nice video with some audio behind (either a regular beeping sound at regular intervals but ending way earlier than the video or a continuous longer sound that also last shorter than the video).

    I want to generate a beeping sound each time the function encodeAudio() is called - at non-regular intervals. I have tried to modify the sampling rate, the buffer size, the pkt size and the number of samples but without any success. I also tried to set the pts at different times but it did not get me where I want to be. Could someone please help ?

  • ffmpeg stream replication of multiple sources to multiple destinations

    16 mars 2017, par dpx

    I’m looking to replicate multiple streams from a single source, to multiple destinations. So for instance 12 streams with a dest of 1.1.1.1:1000-1011 being reflected to two destinations @ 2.2.2.2:1000-1011 and 3.3.3.3:1000-1011

    I don’t want any stream processing, no modification, just using -vcodec / -acodec. Receiving MPEG-TS and reflecting MPEG-TS at the same bitrate.

    How would I run this with FFMPEG ? Trying to read the documentation but it’s not quite clear on how to handle this.

  • FFMPEG AAC encoding causes audio to be lower in pitch

    14 février 2017, par Paul Knopf

    I built a sample application that encodes AAC (from PortAudio) into a MP4 container (no video stream).

    The resulting audio is lower in pitch.

    #include "stdafx.h"
    #include "TestRecording.h"
    #include "libffmpeg.h"

    TestRecording::TestRecording()
    {
    }


    TestRecording::~TestRecording()
    {
    }

    struct RecordingContext
    {
       RecordingContext()
       {
           formatContext = NULL;
           audioStream = NULL;
           audioFrame = NULL;
           audioFrameframeNumber = 0;
       }

       libffmpeg::AVFormatContext* formatContext;
       libffmpeg::AVStream* audioStream;
       libffmpeg::AVFrame* audioFrame;
       int audioFrameframeNumber;
    };

    static int AudioRecordCallback(const void *inputBuffer, void *outputBuffer,
       unsigned long framesPerBuffer,
       const PaStreamCallbackTimeInfo* timeInfo,
       PaStreamCallbackFlags statusFlags,
       void *userData)
    {
       RecordingContext* recordingContext = (RecordingContext*)userData;

       libffmpeg::avcodec_fill_audio_frame(recordingContext->audioFrame,
           recordingContext->audioFrame->channels,
           recordingContext->audioStream->codec->sample_fmt,
           static_cast<const unsigned="unsigned">(inputBuffer),
           (framesPerBuffer * sizeof(float) * recordingContext->audioFrame->channels),
           0);

       libffmpeg::AVPacket pkt;
       libffmpeg::av_init_packet(&amp;pkt);
       pkt.data = NULL;
       pkt.size = 0;

       int gotpacket;
       int result = avcodec_encode_audio2(recordingContext->audioStream->codec, &amp;pkt, recordingContext->audioFrame, &amp;gotpacket);

       if (result &lt; 0)
       {
           LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't encode the audio frame to acc");
           return paContinue;
       }

       if (gotpacket)
       {
           pkt.stream_index = recordingContext->audioStream->index;
           recordingContext->audioFrameframeNumber++;

           // this codec requires no bitstream filter, just send it to the muxer!
           result = libffmpeg::av_write_frame(recordingContext->formatContext, &amp;pkt);
           if (result &lt; 0)
           {
               LOG(ERROR) &lt;&lt; "Couldn't write the encoded audio frame";
               libffmpeg::av_free_packet(&amp;pkt);
               return paContinue;
           }

           libffmpeg::av_free_packet(&amp;pkt);
       }

       return paContinue;
    }

    static bool InitializeRecordingContext(RecordingContext* recordingContext)
    {
       int result = libffmpeg::avformat_alloc_output_context2(&amp;recordingContext->formatContext, NULL, NULL, "C:\\Users\\Paul\\Desktop\\test.mp4");
       if (result &lt; 0)
       {
           LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't create output format context");
           return false;
       }

       libffmpeg::AVCodec *audioCodec;
       audioCodec = libffmpeg::avcodec_find_encoder(libffmpeg::AV_CODEC_ID_AAC);
       if (audioCodec == NULL)
       {
           LOG(ERROR) &lt;&lt; "Couldn't find the encoder for AAC";
       }

       recordingContext->audioStream = libffmpeg::avformat_new_stream(recordingContext->formatContext, audioCodec);
       if (!recordingContext->audioStream)
       {
           LOG(ERROR) &lt;&lt; "Couldn't create the audio stream";
           return false;
       }

       recordingContext->audioStream->codec->bit_rate = 64000;
       recordingContext->audioStream->codec->sample_fmt = libffmpeg::AV_SAMPLE_FMT_FLTP;
       recordingContext->audioStream->codec->sample_rate = 48000;
       recordingContext->audioStream->codec->channel_layout = AV_CH_LAYOUT_STEREO;
       recordingContext->audioStream->codec->channels = libffmpeg::av_get_channel_layout_nb_channels(recordingContext->audioStream->codec->channel_layout);

       recordingContext->audioStream->codecpar->bit_rate = recordingContext->audioStream->codec->bit_rate;
       recordingContext->audioStream->codecpar->format = recordingContext->audioStream->codec->sample_fmt;
       recordingContext->audioStream->codecpar->sample_rate = recordingContext->audioStream->codec->sample_rate;
       recordingContext->audioStream->codecpar->channel_layout = recordingContext->audioStream->codec->channel_layout;
       recordingContext->audioStream->codecpar->channels = recordingContext->audioStream->codec->channels;

       result = libffmpeg::avcodec_open2(recordingContext->audioStream->codec, audioCodec, NULL);
       if (result &lt; 0)
       {
           LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't open the audio codec");
           return false;
       }

       // create a new frame to store the audio samples
       recordingContext->audioFrame = libffmpeg::av_frame_alloc();
       if (!recordingContext->audioFrame)
       {
           LOG(ERROR) &lt;&lt; "Couldn't alloce the output audio frame";
           return false;
       }

       recordingContext->audioFrame->nb_samples = recordingContext->audioStream->codec->frame_size;
       recordingContext->audioFrame->channel_layout = recordingContext->audioStream->codec->channel_layout;
       recordingContext->audioFrame->channels = recordingContext->audioStream->codec->channels;
       recordingContext->audioFrame->format = recordingContext->audioStream->codec->sample_fmt;
       recordingContext->audioFrame->sample_rate = recordingContext->audioStream->codec->sample_rate;

       result = libffmpeg::av_frame_get_buffer(recordingContext->audioFrame, 0);
       if (result &lt; 0)
       {
           LOG(ERROR) &lt;&lt; "Coudln't initialize the output audio frame buffer";
           return false;
       }

       // some formats want video_stream headers to be separate  
       if (!strcmp(recordingContext->formatContext->oformat->name, "mp4") || !strcmp(recordingContext->formatContext->oformat->name, "mov") || !strcmp(recordingContext->formatContext->oformat->name, "3gp"))
       {
           recordingContext->audioStream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
       }

       // open the ouput file
       if (!(recordingContext->formatContext->oformat->flags &amp; AVFMT_NOFILE))
       {
           result = libffmpeg::avio_open(&amp;recordingContext->formatContext->pb, recordingContext->formatContext->filename, AVIO_FLAG_WRITE);
           if (result &lt; 0)
           {
               LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't open the output file");
               return false;
           }
       }

       // write the stream headers
       result = libffmpeg::avformat_write_header(recordingContext->formatContext, NULL);
       if (result &lt; 0)
       {
           LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't write the headers to the file");
           return false;
       }

       return true;
    }

    static bool FinalizeRecordingContext(RecordingContext* recordingContext)
    {
       int result = 0;

       // write the trailing information
       if (recordingContext->formatContext->pb)
       {
           result = libffmpeg::av_write_trailer(recordingContext->formatContext);
           if (result &lt; 0)
           {
               LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't write the trailer information");
               return false;
           }
       }

       // close all the codes
       for (int i = 0; i &lt; (int)recordingContext->formatContext->nb_streams; i++)
       {
           result = libffmpeg::avcodec_close(recordingContext->formatContext->streams[i]->codec);
           if (result &lt; 0)
           {
               LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't close the codec");
               return false;
           }
       }

       // close the output file
       if (recordingContext->formatContext->pb)
       {
           if (!(recordingContext->formatContext->oformat->flags &amp; AVFMT_NOFILE))
           {
               result = libffmpeg::avio_close(recordingContext->formatContext->pb);
               if (result &lt; 0)
               {
                   LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't close the output file");
                   return false;
               }
           }
       }

       // free the format context and all of its data
       libffmpeg::avformat_free_context(recordingContext->formatContext);

       recordingContext->formatContext = NULL;
       recordingContext->audioStream = NULL;

       if (recordingContext->audioFrame)
       {
           libffmpeg::av_frame_free(&amp;recordingContext->audioFrame);
           recordingContext->audioFrame = NULL;
       }

       return true;
    }

    int TestRecording::Test()
    {
       PaError result = paNoError;

       result = Pa_Initialize();
       if (result != paNoError) LOGINT_WITH_MESSAGE(ERROR, result, "Error initializing audio device framework");

       RecordingContext recordingContext;
       if (!InitializeRecordingContext(&amp;recordingContext))
       {
           LOG(ERROR) &lt;&lt; "Couldn't start recording file";
           return 0;
       }

       auto defaultDevice = Pa_GetDefaultInputDevice();
       auto deviceInfo = Pa_GetDeviceInfo(defaultDevice);

       PaStreamParameters  inputParameters;
       inputParameters.device = defaultDevice;
       inputParameters.channelCount = 2;
       inputParameters.sampleFormat = paFloat32;
       inputParameters.suggestedLatency = deviceInfo->defaultLowInputLatency;
       inputParameters.hostApiSpecificStreamInfo = NULL;

       PaStream* stream = NULL;
       result = Pa_OpenStream(
           &amp;stream,
           &amp;inputParameters,
           NULL,
           48000,
           1024,
           paClipOff,
           AudioRecordCallback,
           &amp;recordingContext);
       if (result != paNoError)LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't open the audio stream");

       result = Pa_StartStream(stream);
       if (result != paNoError)LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't start the audio stream");

       Sleep(1000 * 5);

       result = Pa_StopStream(stream);
       if (result != paNoError)LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't stop the audio stream");

       if (!FinalizeRecordingContext(&amp;recordingContext)) LOG(ERROR) &lt;&lt; "Couldn't stop recording file";

       result = Pa_CloseStream(stream);
       if (result != paNoError)LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't stop the audio stream");

       return 0;
    }
    </const>

    Here is the stdout, in case it helps.

    https://gist.github.com/pauldotknopf/9f24a604ce1f8a081aa68da1bf169e98

    Why is the audio lower in pitch ? I assume I am overlooking a parameter that needs to be configured between PortAudio and FFMPEG. Is there something super obvious that I am missing ?