Recherche avancée

Médias (1)

Mot : - Tags -/artwork

Autres articles (55)

  • Publier sur MédiaSpip

    13 juin 2013

    Puis-je poster des contenus à partir d’une tablette Ipad ?
    Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir

  • Keeping control of your media in your hands

    13 avril 2011, par

    The vocabulary used on this site and around MediaSPIP in general, aims to avoid reference to Web 2.0 and the companies that profit from media-sharing.
    While using MediaSPIP, you are invited to avoid using words like "Brand", "Cloud" and "Market".
    MediaSPIP is designed to facilitate the sharing of creative media online, while allowing authors to retain complete control of their work.
    MediaSPIP aims to be accessible to as many people as possible and development is based on expanding the (...)

  • Ajouter notes et légendes aux images

    7 février 2011, par

    Pour pouvoir ajouter notes et légendes aux images, la première étape est d’installer le plugin "Légendes".
    Une fois le plugin activé, vous pouvez le configurer dans l’espace de configuration afin de modifier les droits de création / modification et de suppression des notes. Par défaut seuls les administrateurs du site peuvent ajouter des notes aux images.
    Modification lors de l’ajout d’un média
    Lors de l’ajout d’un média de type "image" un nouveau bouton apparait au dessus de la prévisualisation (...)

Sur d’autres sites (8141)

  • Increase/Decrease audio volume using FFmpeg

    9 juin 2016, par williamtroup

    I’m am currently using C# invokes to call the FFmpeg APIs to handle video and audio. I have the following code in place to extract the audio from a video and write it to a file.

    while (ffmpeg.av_read_frame(formatContext, &packet) >= 0)
    {
       if (packet.stream_index == streamIndex)
       {
           while (packet.size > 0)
           {
               int frameDecoded;
               int frameDecodedResult = ffmpeg.avcodec_decode_audio4(codecContext, frame, &frameDecoded, packet);

               if (frameDecoded > 0 && frameDecodedResult >= 0)
               {
                   //writeAudio.WriteFrame(frame);

                   packet.data += totalBytesDecoded;
                   packet.size -= totalBytesDecoded;
               }
           }

           frameIndex++;
       }

       Avcodec.av_free_packet(&packet);
    }

    This is all working correctly. I’m currently using the FFmpeg.AutoGen project for the API access.

    I want to be able to increase/decrease the volume of the audio before its written to the file, but I cannot seem to find a command or any help with this. Does it have to be done manually ?

    Update 1 :

    After receiving some help, this is the class layout I have :

    public unsafe class FilterVolume
    {
       #region Private Member Variables

       private AVFilterGraph* m_filterGraph = null;
       private AVFilterContext* m_aBufferSourceFilterContext = null;
       private AVFilterContext* m_aBufferSinkFilterContext = null;

       #endregion

       #region Private Constant Member Variables

       private const int EAGAIN = 11;

       #endregion

       public FilterVolume(AVCodecContext* codecContext, AVStream* stream, float volume)
       {
           CodecContext = codecContext;
           Stream = stream;
           Volume = volume;

           Initialise();
       }

       public AVFrame* Adjust(AVFrame* frame)
       {
           AVFrame* returnFilteredFrame = ffmpeg.av_frame_alloc();

           if (m_aBufferSourceFilterContext != null && m_aBufferSinkFilterContext != null)
           {
               int bufferSourceAddFrameResult = ffmpeg.av_buffersrc_add_frame(m_aBufferSourceFilterContext, frame);
               if (bufferSourceAddFrameResult < 0)
               {
               }

               int bufferSinkGetFrameResult = ffmpeg.av_buffersink_get_frame(m_aBufferSinkFilterContext, returnFilteredFrame);
               if (bufferSinkGetFrameResult < 0 && bufferSinkGetFrameResult != -EAGAIN)
               {
               }
           }

           return returnFilteredFrame;
       }

       public void Dispose()
       {
           Cleanup(m_filterGraph);
       }

       #region Private Properties

       private AVCodecContext* CodecContext { get; set; }
       private AVStream* Stream { get; set; }
       private float Volume { get; set; }

       #endregion

       #region Private Setup Helper Functions

       private void Initialise()
       {
           m_filterGraph = GetAllocatedFilterGraph();

           string aBufferFilterArguments = string.Format("sample_fmt={0}:channel_layout={1}:sample_rate={2}:time_base={3}/{4}",
               (int)CodecContext->sample_fmt,
               CodecContext->channel_layout,
               CodecContext->sample_rate,
               Stream->time_base.num,
               Stream->time_base.den);

           AVFilterContext* aBufferSourceFilterContext = CreateFilter("abuffer", m_filterGraph, aBufferFilterArguments);
           AVFilterContext* volumeFilterContext = CreateFilter("volume", m_filterGraph, string.Format("volume={0}", Volume));
           AVFilterContext* aBufferSinkFilterContext = CreateFilter("abuffersink", m_filterGraph);

           LinkFilter(aBufferSourceFilterContext, volumeFilterContext);
           LinkFilter(volumeFilterContext, aBufferSinkFilterContext);

           SetFilterGraphConfiguration(m_filterGraph, null);

           m_aBufferSourceFilterContext = aBufferSourceFilterContext;
           m_aBufferSinkFilterContext = aBufferSinkFilterContext;
       }

       #endregion

       #region Private Cleanup Helper Functions

       private static void Cleanup(AVFilterGraph* filterGraph)
       {
           if (filterGraph != null)
           {
               ffmpeg.avfilter_graph_free(&filterGraph);
           }
       }

       #endregion

       #region Provate Helpers

       private AVFilterGraph* GetAllocatedFilterGraph()
       {
           AVFilterGraph* filterGraph = ffmpeg.avfilter_graph_alloc();
           if (filterGraph == null)
           {
           }

           return filterGraph;
       }

       private AVFilter* GetFilterByName(string name)
       {
           AVFilter* filter = ffmpeg.avfilter_get_by_name(name);
           if (filter == null)
           {
           }

           return filter;
       }

       private void SetFilterGraphConfiguration(AVFilterGraph* filterGraph, void* logContext)
       {
           int filterGraphConfigResult = ffmpeg.avfilter_graph_config(filterGraph, logContext);
           if (filterGraphConfigResult < 0)
           {
           }
       }

       private AVFilterContext* CreateFilter(string filterName, AVFilterGraph* filterGraph, string filterArguments = null)
       {
           AVFilter* filter = GetFilterByName(filterName);
           AVFilterContext* filterContext;

           int aBufferFilterCreateResult = ffmpeg.avfilter_graph_create_filter(&filterContext, filter, filterName, filterArguments, null, filterGraph);
           if (aBufferFilterCreateResult < 0)
           {
           }

           return filterContext;
       }

       private void LinkFilter(AVFilterContext* source, AVFilterContext* destination)
       {
           int filterLinkResult = ffmpeg.avfilter_link(source, 0, destination, 0);
           if (filterLinkResult < 0)
           {
           }
       }

       #endregion
    }

    The Adjust() function is called after a frame is decoded. I’m currently getting a -22 error when av_buffersrc_add_frame() is called. This indicates that a parameter is invalid, but after debugging, I cannot see anything that would be causing this.

    This is how the code is called :

    while (ffmpeg.av_read_frame(formatContext, &packet) >= 0)
    {
       if (packet.stream_index == streamIndex)
       {
           while (packet.size > 0)
           {
               int frameDecoded;
               int frameDecodedResult = ffmpeg.avcodec_decode_audio4(codecContext, frame, &frameDecoded, packet);

               if (frameDecoded > 0 && frameDecodedResult >= 0)
               {
                   AVFrame* filteredFrame = m_filterVolume.Adjust(frame);

                   //writeAudio.WriteFrame(filteredFrame);

                   packet.data += totalBytesDecoded;
                   packet.size -= totalBytesDecoded;
               }
           }

           frameIndex++;
       }

       Avcodec.av_free_packet(&packet);
    }

    Update 2 :

    Cracked it. The "channel_layout" option in the filter argument string is supposed to be a hexadecimal. This is what the string formatting should look like :

    string aBufferFilterArguments = string.Format("sample_fmt={0}:channel_layout=0x{1}:sample_rate={2}:time_base={3}/{4}",
       (int)CodecContext->sample_fmt,
       CodecContext->channel_layout,
       CodecContext->sample_rate,
       Stream->time_base.num,
       Stream->time_base.den);
  • avfilter/vf_xmedian : allow to control eof handling

    30 décembre 2020, par Paul B Mahol
    avfilter/vf_xmedian : allow to control eof handling
    
    • [DH] libavfilter/vf_xmedian.c
  • Draw time of the current frame in seconds with just 2 digits after point

    13 janvier 2023, par 124bit

    I can’t find the way to draw time of the current frame in seconds with just 2 digits after point.
I tried “drawtext”, and it does well, however it gives or int seconds, or float with many digits =(