Recherche avancée

Médias (0)

Mot : - Tags -/upload

Aucun média correspondant à vos critères n’est disponible sur le site.

Autres articles (106)

  • MediaSPIP 0.1 Beta version

    25 avril 2011, par

    MediaSPIP 0.1 beta is the first version of MediaSPIP proclaimed as "usable".
    The zip file provided here only contains the sources of MediaSPIP in its standalone version.
    To get a working installation, you must manually install all-software dependencies on the server.
    If you want to use this archive for an installation in "farm mode", you will also need to proceed to other manual (...)

  • MediaSPIP version 0.1 Beta

    16 avril 2011, par

    MediaSPIP 0.1 beta est la première version de MediaSPIP décrétée comme "utilisable".
    Le fichier zip ici présent contient uniquement les sources de MediaSPIP en version standalone.
    Pour avoir une installation fonctionnelle, il est nécessaire d’installer manuellement l’ensemble des dépendances logicielles sur le serveur.
    Si vous souhaitez utiliser cette archive pour une installation en mode ferme, il vous faudra également procéder à d’autres modifications (...)

  • Personnaliser en ajoutant son logo, sa bannière ou son image de fond

    5 septembre 2013, par

    Certains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;

Sur d’autres sites (14556)

  • How to reparse video with stable "overall bit rate" ? (FFmpeg)

    20 février 2018, par user3360601

    I have such code below :

    #include
    #include
    #include

    extern "C"
    {
    #include <libavcodec></libavcodec>avcodec.h>
    #include <libavformat></libavformat>avformat.h>
    #include <libavfilter></libavfilter>buffersink.h>
    #include <libavfilter></libavfilter>buffersrc.h>
    #include <libavutil></libavutil>opt.h>
    #include <libavutil></libavutil>pixdesc.h>
    }

    static AVFormatContext *ifmt_ctx;
    static AVFormatContext *ofmt_ctx;

    typedef struct StreamContext {
       AVCodecContext *dec_ctx;
       AVCodecContext *enc_ctx;
    } StreamContext;
    static StreamContext *stream_ctx;

    static int open_input_file(const char *filename)
    {
       int ret;
       unsigned int i;

       ifmt_ctx = NULL;
       if ((ret = avformat_open_input(&amp;ifmt_ctx, filename, NULL, NULL)) &lt; 0) {
           av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
           return ret;
       }

       if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) &lt; 0) {
           av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
           return ret;
       }

       stream_ctx = (StreamContext *) av_mallocz_array(ifmt_ctx->nb_streams, sizeof(*stream_ctx));
       if (!stream_ctx)
           return AVERROR(ENOMEM);

       for (i = 0; i &lt; ifmt_ctx->nb_streams; i++) {
           AVStream *stream = ifmt_ctx->streams[i];
           AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);
           AVCodecContext *codec_ctx;
           if (!dec) {
               av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);
               return AVERROR_DECODER_NOT_FOUND;
           }
           codec_ctx = avcodec_alloc_context3(dec);
           if (!codec_ctx) {
               av_log(NULL, AV_LOG_ERROR, "Failed to allocate the decoder context for stream #%u\n", i);
               return AVERROR(ENOMEM);
           }
           ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);
           if (ret &lt; 0) {
               av_log(NULL, AV_LOG_ERROR, "Failed to copy decoder parameters to input decoder context "
                   "for stream #%u\n", i);
               return ret;
           }
           /* Reencode video &amp; audio and remux subtitles etc. */
           if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
               || codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
               if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
                   codec_ctx->framerate = av_guess_frame_rate(ifmt_ctx, stream, NULL);
               /* Open decoder */
               ret = avcodec_open2(codec_ctx, dec, NULL);
               if (ret &lt; 0) {
                   av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
                   return ret;
               }
           }
           stream_ctx[i].dec_ctx = codec_ctx;
       }

       av_dump_format(ifmt_ctx, 0, filename, 0);
       return 0;
    }

    static int open_output_file(const char *filename)
    {
       AVStream *out_stream;
       AVStream *in_stream;
       AVCodecContext *dec_ctx, *enc_ctx;
       AVCodec *encoder;
       int ret;
       unsigned int i;

       ofmt_ctx = NULL;
       avformat_alloc_output_context2(&amp;ofmt_ctx, NULL, NULL, filename);
       if (!ofmt_ctx) {
           av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
           return AVERROR_UNKNOWN;
       }


       for (i = 0; i &lt; ifmt_ctx->nb_streams; i++) {
           out_stream = avformat_new_stream(ofmt_ctx, NULL);
           if (!out_stream) {
               av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
               return AVERROR_UNKNOWN;
           }

           in_stream = ifmt_ctx->streams[i];
           dec_ctx = stream_ctx[i].dec_ctx;

           //ofmt_ctx->bit_rate = ifmt_ctx->bit_rate;
           ofmt_ctx->duration = ifmt_ctx->duration;

           if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
               || dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
               /* in this example, we choose transcoding to same codec */
               encoder = avcodec_find_encoder(dec_ctx->codec_id);
               if (!encoder) {
                   av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
                   return AVERROR_INVALIDDATA;
               }
               enc_ctx = avcodec_alloc_context3(encoder);
               if (!enc_ctx) {
                   av_log(NULL, AV_LOG_FATAL, "Failed to allocate the encoder context\n");
                   return AVERROR(ENOMEM);
               }

               /* In this example, we transcode to same properties (picture size,
               * sample rate etc.). These properties can be changed for output
               * streams easily using filters */
               if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
                   enc_ctx->gop_size = dec_ctx->gop_size;
                   enc_ctx->bit_rate = dec_ctx->bit_rate;
                   enc_ctx->height = dec_ctx->height;
                   enc_ctx->width = dec_ctx->width;
                   enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
                   /* take first format from list of supported formats */
                   if (encoder->pix_fmts)
                       enc_ctx->pix_fmt = encoder->pix_fmts[0];
                   else
                       enc_ctx->pix_fmt = dec_ctx->pix_fmt;
                   /* video time_base can be set to whatever is handy and supported by encoder */
                   enc_ctx->time_base = av_inv_q(dec_ctx->framerate);
                   enc_ctx->framerate = av_guess_frame_rate(ifmt_ctx, in_stream, NULL);
               }
               else {
                   enc_ctx->gop_size = dec_ctx->gop_size;
                   enc_ctx->bit_rate = dec_ctx->bit_rate;
                   enc_ctx->sample_rate = dec_ctx->sample_rate;
                   enc_ctx->channel_layout = dec_ctx->channel_layout;
                   enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
                   /* take first format from list of supported formats */
                   enc_ctx->sample_fmt = encoder->sample_fmts[0];
                   //enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
                   enc_ctx->time_base.num = 1;
                   enc_ctx->time_base.den = enc_ctx->sample_rate;

                   enc_ctx->framerate = av_guess_frame_rate(ifmt_ctx, in_stream, NULL);
               }

               /* Third parameter can be used to pass settings to encoder */
               ret = avcodec_open2(enc_ctx, encoder, NULL);
               if (ret &lt; 0) {
                   av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
                   return ret;
               }
               ret = avcodec_parameters_from_context(out_stream->codecpar, enc_ctx);
               if (ret &lt; 0) {
                   av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i);
                   return ret;
               }
               if (ofmt_ctx->oformat->flags &amp; AVFMT_GLOBALHEADER)
                   enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

               out_stream->time_base = enc_ctx->time_base;
               stream_ctx[i].enc_ctx = enc_ctx;
           }
           else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
               av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
               return AVERROR_INVALIDDATA;
           }
           else {
               /* if this stream must be remuxed */
               ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
               if (ret &lt; 0) {
                   av_log(NULL, AV_LOG_ERROR, "Copying parameters for stream #%u failed\n", i);
                   return ret;
               }
               out_stream->time_base = in_stream->time_base;
           }

       }
       av_dump_format(ofmt_ctx, 0, filename, 1);

       if (!(ofmt_ctx->oformat->flags &amp; AVFMT_NOFILE)) {
           ret = avio_open(&amp;ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
           if (ret &lt; 0) {
               av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
               return ret;
           }
       }

       /* init muxer, write output file header */
       ret = avformat_write_header(ofmt_ctx, NULL);
       if (ret &lt; 0) {
           av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
           return ret;
       }

       return 0;
    }

    int main(int argc, char **argv)
    {
       int ret;
       AVPacket packet = {0};
       packet.data = NULL;
       packet.size = 0 ;
       AVFrame *frame = NULL;
       enum AVMediaType type;
       unsigned int stream_index;
       unsigned int i;
       int got_frame;
       int(*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);

       if (argc != 3) {
           av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file="file" /> <output file="file">\n", argv[0]);
           return 1;
       }

       av_register_all();
       avfilter_register_all();

       if ((ret = open_input_file(argv[1])) &lt; 0)
           goto end;
       if ((ret = open_output_file(argv[2])) &lt; 0)
           goto end;

       /* read all packets */
       while (1) {
           if ((ret = av_read_frame(ifmt_ctx, &amp;packet)) &lt; 0)
               break;
           stream_index = packet.stream_index;
           type = ifmt_ctx->streams[packet.stream_index]->codecpar->codec_type;
           av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n", stream_index);

           /* remux this frame without reencoding */
           av_packet_rescale_ts(&amp;packet,
               ifmt_ctx->streams[stream_index]->time_base,
               ofmt_ctx->streams[stream_index]->time_base);

           ret = av_interleaved_write_frame(ofmt_ctx, &amp;packet);
           if (ret &lt; 0)
               goto end;

           av_packet_unref(&amp;packet);
       }

       av_write_trailer(ofmt_ctx);
    end:
       av_packet_unref(&amp;packet);
       av_frame_free(&amp;frame);
       for (i = 0; i &lt; ifmt_ctx->nb_streams; i++) {
           avcodec_free_context(&amp;stream_ctx[i].dec_ctx);
           if (ofmt_ctx &amp;&amp; ofmt_ctx->nb_streams > i &amp;&amp; ofmt_ctx->streams[i] &amp;&amp; stream_ctx[i].enc_ctx)
               avcodec_free_context(&amp;stream_ctx[i].enc_ctx);
       }
       av_free(stream_ctx);
       avformat_close_input(&amp;ifmt_ctx);
       if (ofmt_ctx &amp;&amp; !(ofmt_ctx->oformat->flags &amp; AVFMT_NOFILE))
           avio_closep(&amp;ofmt_ctx->pb);
       avformat_free_context(ofmt_ctx);

       return ret ? 1 : 0;
    }
    </output>

    This is a little bit changed code from official example of using ffmpeg called transcoding.c

    I only read packets from one stream and write them to another stream. It works fine.

    Proof below.
    enter image description here

    then I add to main a condition. If it is a packet with video frame, I will decode it, then encode and write to another stream. No other actions with frame.

    My addition code below :

    static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
       int ret;
       int got_frame_local;
       AVPacket enc_pkt;
       int(*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) = avcodec_encode_video2 ;

       if (!got_frame)
           got_frame = &amp;got_frame_local;

       av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
       /* encode filtered frame */
       enc_pkt.data = NULL;
       enc_pkt.size = 0;
       av_init_packet(&amp;enc_pkt);
       ret = enc_func(stream_ctx[stream_index].enc_ctx, &amp;enc_pkt,
           filt_frame, got_frame);
       if (ret &lt; 0)
           return ret;
       if (!(*got_frame))
           return 0;

       /* prepare packet for muxing */
       enc_pkt.stream_index = stream_index;
       av_packet_rescale_ts(&amp;enc_pkt,
           stream_ctx[stream_index].enc_ctx->time_base,
           ofmt_ctx->streams[stream_index]->time_base);

       av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
       /* mux encoded frame */
       ret = av_interleaved_write_frame(ofmt_ctx, &amp;enc_pkt);
       return ret;
    }

    static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
    {
       int ret;
       av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");

       while (1) {
           av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");

           ret = encode_write_frame(frame, stream_index, NULL);
           if (ret &lt; 0)
               break;
           break;
       }

       return ret;
    }


    int main(int argc, char **argv)
    {
       int ret;
       AVPacket packet = {0};
       packet.data = NULL;
       packet.size = 0 ;
       AVFrame *frame = NULL;
       enum AVMediaType type;
       unsigned int stream_index;
       unsigned int i;
       int got_frame;
       int(*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);

       if (argc != 3) {
           av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file="file" /> <output file="file">\n", argv[0]);
           return 1;
       }

       av_register_all();
       avfilter_register_all();

       if ((ret = open_input_file(argv[1])) &lt; 0)
           goto end;
       if ((ret = open_output_file(argv[2])) &lt; 0)
           goto end;

       /* read all packets */
       while (1) {
           if ((ret = av_read_frame(ifmt_ctx, &amp;packet)) &lt; 0)
               break;
           stream_index = packet.stream_index;
           type = ifmt_ctx->streams[packet.stream_index]->codecpar->codec_type;
           av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
               stream_index);

            if (type == AVMEDIA_TYPE_VIDEO)
            {
                av_log(NULL, AV_LOG_DEBUG, "Going to reencode&amp;filter the frame\n");
                frame = av_frame_alloc();
                if (!frame) {
                    ret = AVERROR(ENOMEM);
                    break;
                }
                av_packet_rescale_ts(&amp;packet,
                    ifmt_ctx->streams[stream_index]->time_base,
                    stream_ctx[stream_index].dec_ctx->time_base);
                dec_func = avcodec_decode_video2;
                ret = dec_func(stream_ctx[stream_index].dec_ctx, frame,
                    &amp;got_frame, &amp;packet);
                if (ret &lt; 0) {
                    av_frame_free(&amp;frame);
                    av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
                    break;
                }

                if (got_frame) {
                    frame->pts = frame->best_effort_timestamp;
                    ret = filter_encode_write_frame(frame, stream_index);
                    av_frame_free(&amp;frame);
                    if (ret &lt; 0)
                        goto end;
                }
                else {
                    av_frame_free(&amp;frame);
                }
            }
            else
           {
               /* remux this frame without reencoding */
               av_packet_rescale_ts(&amp;packet,
                   ifmt_ctx->streams[stream_index]->time_base,
                   ofmt_ctx->streams[stream_index]->time_base);

               ret = av_interleaved_write_frame(ofmt_ctx, &amp;packet);
               if (ret &lt; 0)
                   goto end;
           }
           av_packet_unref(&amp;packet);
       }

       av_write_trailer(ofmt_ctx);
    end:
       av_packet_unref(&amp;packet);
       av_frame_free(&amp;frame);
       for (i = 0; i &lt; ifmt_ctx->nb_streams; i++) {
           avcodec_free_context(&amp;stream_ctx[i].dec_ctx);
           if (ofmt_ctx &amp;&amp; ofmt_ctx->nb_streams > i &amp;&amp; ofmt_ctx->streams[i] &amp;&amp; stream_ctx[i].enc_ctx)
               avcodec_free_context(&amp;stream_ctx[i].enc_ctx);
       }
       av_free(stream_ctx);
       avformat_close_input(&amp;ifmt_ctx);
       if (ofmt_ctx &amp;&amp; !(ofmt_ctx->oformat->flags &amp; AVFMT_NOFILE))
           avio_closep(&amp;ofmt_ctx->pb);
       avformat_free_context(ofmt_ctx);

       return ret ? 1 : 0;
    }
    </output>

    And the result is different.

    For a test I took a SampleVideo_1280x720_1mb.flv.
    It has

    File size : 1.00 MiB
    Overall bit rate : 1 630 kb/s

    After my decode/encode actions the result became :

    File size : 1.23 MiB
    Overall bit rate : 2 005 kb/s

    Other parameters (video bit rate, audio bit rate, etc) are the same.
    enter image description here

    What am I doing wrong ? How to control overall bit rate ? I suppose, something wrong with encoder/decoder, but what ?

    UPD :
    I get that when in function open_input_file I write

    if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
    {
        codec_ctx->framerate = av_guess_frame_rate(ifmt_ctx, stream, NULL);
    }

    I get what I get (bigger size and bit rate).

    And when in this function I write

    if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
    {
       codec_ctx = ifmt_ctx->streams[i]->codec;
    }

    I get smaller size and bit rate.

    File size : 900 KiB
    Overall bit rate : 1 429 kb/s

    But how to get the exactly size and frame rate as in the original file ?

  • h264 lossless coding

    19 juillet 2022, par cloudraven

    Is it possible to do completely lossless encoding in h264 ? By lossless, I mean that if I feed it a series of frames and encode them, and then if I extract all the frames from the encoded video, I will get the exact same frames as in the input, pixel by pixel, frame by frame. Is that actually possible ?&#xA;Take this example :

    &#xA;&#xA;

    I generate a bunch of frames, then I encode the image sequence to an uncompressed AVI (with something like virtualdub), I then apply lossless h264 (the help files claim that setting —qp 0 makes lossless compression, but I am not sure if that means that there is no loss at any point of the process or that just the quantization is lossless). I can then extract the frames from the resulting h264 video with something like mplayer.

    &#xA;&#xA;

    I tried with Handbrake first, but it turns out it doesn't support lossless encoding. I tried x264 but it crashes. It may be because my source AVI file is in RGB colorspace instead of YV12. I don't know how to feed a series of YV12 bitmaps and in what format to x264 anyway, so I cannot even try.

    &#xA;&#xA;

    In summary what I want to know if that is there a way to go from

    &#xA;&#xA;

    Series of lossless bitmaps (in any colorspace) -> some transformation -> h264 encode -> h264 decode -> some transformation -> the original series of lossless bitmaps

    &#xA;&#xA;

    If there a way to achieve this ?

    &#xA;&#xA;

    EDIT : There is a VERY valid point about lossless H264 not making too much sense. I am well aware that there is no way I could tell (with just my eyes) the difference between and uncompressed clip and another compressed at a high rate in H264, but I don't think it is not without uses. For example, it may be useful for storing video for editing without taking huge amounts of space and not losing quality and spending too much encoding time every time the file is saved.

    &#xA;&#xA;

    UPDATE 2 : Now x264 doesn't crash. I can use as sources either avisynth or lossless yv12 lagarith (to avoid the colorspace compression warning). Howerver, even with —qp 0 and a rgb or yv12 source I still get some differences, minimal but present. This is troubling, because all the information I have found on lossless predictive coding (—qp 0) claims that the whole encoding should be lossless, but I am unable to verifiy this.

    &#xA;

  • h264 lossless coding

    29 septembre 2014, par cloudraven

    Is it possible to do completely lossless encoding in h264 ? By lossless, I mean that if I feed it a series of frames and encode them, and then if I extract all the frames from the encoded video, I will get the exact same frames as in the input, pixel by pixel, frame by frame. Is that actually possible ?
    Take this example :

    I generate a bunch of frames, then I encode the image sequence to an uncompressed AVI (with something like virtualdub), I then apply lossless h264 (the help files claim that setting —qp 0 makes lossless compression, but I am not sure if that means that there is no loss at any point of the process or that just the quantization is lossless). I can then extract the frames from the resulting h264 video with something like mplayer.

    I tried with Handbrake first, but it turns out it doesn’t support lossless encoding. I tried x264 but it crashes. It may be because my source AVI file is in RGB colorspace instead of YV12. I don’t know how to feed a series of YV12 bitmaps and in what format to x264 anyway, so I cannot even try.

    In summary what I want to know if that is there a way to go from

    Series of lossless bitmaps (in any colorspace) -> some transformation -> h264 encode -> h264 decode -> some transformation -> the original series of lossless bitmaps

    If there a way to achieve this ?

    EDIT : There is a VERY valid point about lossless H264 not making too much sense. I am well aware that there is no way I could tell (with just my eyes) the difference between and uncompressed clip and another compressed at a high rate in H264, but I don’t think it is not without uses. For example, it may be useful for storing video for editing without taking huge amounts of space and not losing quality and spending too much encoding time every time the file is saved.

    UPDATE 2 : Now x264 doesn’t crash. I can use as sources either avisynth or lossless yv12 lagarith (to avoid the colorspace compression warning). Howerver, even with —qp 0 and a rgb or yv12 source I still get some differences, minimal but present. This is troubling, because all the information I have found on lossless predictive coding (—qp 0) claims that the whole encoding should be lossless, but I am unable to verifiy this.