Recherche avancée

Médias (0)

Mot : - Tags -/médias

Aucun média correspondant à vos critères n’est disponible sur le site.

Autres articles (74)

  • Le profil des utilisateurs

    12 avril 2011, par

    Chaque utilisateur dispose d’une page de profil lui permettant de modifier ses informations personnelle. Dans le menu de haut de page par défaut, un élément de menu est automatiquement créé à l’initialisation de MediaSPIP, visible uniquement si le visiteur est identifié sur le site.
    L’utilisateur a accès à la modification de profil depuis sa page auteur, un lien dans la navigation "Modifier votre profil" est (...)

  • Configurer la prise en compte des langues

    15 novembre 2010, par

    Accéder à la configuration et ajouter des langues prises en compte
    Afin de configurer la prise en compte de nouvelles langues, il est nécessaire de se rendre dans la partie "Administrer" du site.
    De là, dans le menu de navigation, vous pouvez accéder à une partie "Gestion des langues" permettant d’activer la prise en compte de nouvelles langues.
    Chaque nouvelle langue ajoutée reste désactivable tant qu’aucun objet n’est créé dans cette langue. Dans ce cas, elle devient grisée dans la configuration et (...)

  • Les autorisations surchargées par les plugins

    27 avril 2010, par

    Mediaspip core
    autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs

Sur d’autres sites (12580)

  • Delphi, TBitmap (rgb) to YCbCr colors format

    18 octobre 2019, par Alexander M.

    have a video encoding example from http://www.delphiffmpeg.com - need to convert a set of TBitmaps to YCbCr (YUV), how should we do it ? the example contains dummy colors :

     (* encode 1 second of video *)
     idx := 1;
     for i := 0 to 25 - 1 do
     begin
       av_init_packet(@pkt);
       pkt.data := nil;    // packet data will be allocated by the encoder
       pkt.size := 0;

       //fflush(stdout);
       (* prepare a dummy image *)
       (* Y *)
       for y := 0 to c.height - 1 do
         for x := 0 to c.width - 1 do
           PByte(@PAnsiChar(frame.data[0])[y * frame.linesize[0] + x])^ := x + y + i * 3;

       (* Cb and Cr *)
       for y := 0 to c.height div 2 - 1 do
         for x := 0 to c.width div 2 - 1 do
         begin
           PByte(@PAnsiChar(frame.data[1])[y * frame.linesize[1] + x])^ := 128 + y + i * 2;
           PByte(@PAnsiChar(frame.data[2])[y * frame.linesize[2] + x])^ := 64 + x + i * 5;
         end;

       frame.pts := i;

       (* encode the image *)
       ret := avcodec_encode_video2(c, @pkt, frame, @got_output);
       if ret < 0 then
       begin
         Writeln(ErrOutput, 'Error encoding frame');
         ExitCode := 1;
         Exit;
       end;

       if got_output <> 0 then
       begin
         Writeln(Format('Write frame %d (size=%d)', [idx, pkt.size]));
         FileWrite(f, pkt.data^, pkt.size);
         av_packet_unref(@pkt);
         Inc(idx);
       end;
     end;

    But we need to convert bitmaps to YCbCr..instead of filling pixels with dummy image. Here is a full source code :

    (*
    * Video encoding example
    *)
    procedure video_encode_example(const filename: string; codec_id: TAVCodecID);
    const
     endcode: array[0..3] of Byte = ( 0, 0, 1, $b7 );
    var
     codec: PAVCodec;
     c: PAVCodecContext;
     idx, i, ret, x, y, got_output: Integer;
     f: THandle;
     frame: PAVFrame;
     pkt: TAVPacket;
    begin
     Writeln(Format('Encode video file %s', [filename]));

     (* find the mpeg1 video encoder *)
     codec := avcodec_find_encoder(codec_id);
     if not Assigned(codec) then
     begin
       Writeln(ErrOutput, 'Codec not found');
       ExitCode := 1;
       Exit;
     end;

     c := avcodec_alloc_context3(codec);
     if not Assigned(c) then
     begin
       Writeln(ErrOutput, 'Could not allocate video codec context');
       ExitCode := 1;
       Exit;
     end;

     (* put sample parameters *)
     c.bit_rate := 400000;
     (* resolution must be a multiple of two *)
     c.width := 352;
     c.height := 288;
     (* frames per second *)
     c.time_base.num := 1;
     c.time_base.den := 25;
     (* emit one intra frame every ten frames
      * check frame pict_type before passing frame
      * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
      * then gop_size is ignored and the output of encoder
      * will always be I frame irrespective to gop_size
      *)
     c.gop_size := 10;
     c.max_b_frames := 1;
     c.pix_fmt := AV_PIX_FMT_YUV420P;

     if codec_id = AV_CODEC_ID_H264 then
       av_opt_set(c.priv_data, 'preset', 'slow', 0);

     (* open it *)
     if avcodec_open2(c, codec, nil) < 0 then
     begin
       Writeln(ErrOutput, 'Could not open codec');
       ExitCode := 1;
       Exit;
     end;

     f := FileCreate(filename);
     if f = INVALID_HANDLE_VALUE then
     begin
       Writeln(ErrOutput, Format('Could not open %s', [filename]));
       ExitCode := 1;
       Exit;
     end;

     frame := av_frame_alloc();
     if not Assigned(frame) then
     begin
       Writeln(ErrOutput, 'Could not allocate video frame');
       ExitCode := 1;
       Exit;
     end;
     frame.format := Ord(c.pix_fmt);
     frame.width  := c.width;
     frame.height := c.height;

     (* the image can be allocated by any means and av_image_alloc() is
      * just the most convenient way if av_malloc() is to be used *)
     ret := av_image_alloc(@frame.data[0], @frame.linesize[0], c.width, c.height,
                          c.pix_fmt, 32);
     if ret < 0 then
     begin
       Writeln(ErrOutput, 'Could not allocate raw picture buffer');
       ExitCode := 1;
       Exit;
     end;

     (* encode 1 second of video *)
     idx := 1;
     for i := 0 to 25 - 1 do
     begin
       av_init_packet(@pkt);
       pkt.data := nil;    // packet data will be allocated by the encoder
       pkt.size := 0;

       //fflush(stdout);
       (* prepare a dummy image *)
       (* Y *)
       for y := 0 to c.height - 1 do
         for x := 0 to c.width - 1 do
           PByte(@PAnsiChar(frame.data[0])[y * frame.linesize[0] + x])^ := x + y + i * 3;

       (* Cb and Cr *)
       for y := 0 to c.height div 2 - 1 do
         for x := 0 to c.width div 2 - 1 do
         begin
           PByte(@PAnsiChar(frame.data[1])[y * frame.linesize[1] + x])^ := 128 + y + i * 2;
           PByte(@PAnsiChar(frame.data[2])[y * frame.linesize[2] + x])^ := 64 + x + i * 5;
         end;

       frame.pts := i;

       (* encode the image *)
       ret := avcodec_encode_video2(c, @pkt, frame, @got_output);
       if ret < 0 then
       begin
         Writeln(ErrOutput, 'Error encoding frame');
         ExitCode := 1;
         Exit;
       end;

       if got_output <> 0 then
       begin
         Writeln(Format('Write frame %d (size=%d)', [idx, pkt.size]));
         FileWrite(f, pkt.data^, pkt.size);
         av_packet_unref(@pkt);
         Inc(idx);
       end;
     end;

     (* get the delayed frames *)
     repeat
       //fflush(stdout);

       ret := avcodec_encode_video2(c, @pkt, nil, @got_output);
       if ret < 0 then
       begin
         Writeln(ErrOutput, 'Error encoding frame');
         ExitCode := 1;
         Exit;
       end;

       if got_output <> 0 then
       begin
         Writeln(Format('Write frame %d (size=%d)', [idx, pkt.size]));
         FileWrite(f, pkt.data^, pkt.size);
         av_packet_unref(@pkt);
         Inc(idx);
       end;
     until got_output = 0;

     (* add sequence end code to have a real mpeg file *)
     FileWrite(f, endcode[0], SizeOf(endcode));
     FileClose(f);

     avcodec_close(c);
     av_free(c);
     av_freep(@frame.data[0]);
     av_frame_free(@frame);
     Writeln('');
    end;

    yes we know this formula, but what should we do with (* Cb and Cr *) loop that goes up to c.height div 2 - 1 and c.width div 2 - 1 ? all our experiments make correct image geometry but incorrect colors... Here is what we have :

    ( Y )
       for y := 0 to c.height - 1 do
       begin
         Line := image.ScanLine[y];
         for x := 0 to c.width - 1 do
         begin
           Yy := Round(Line[x].R*0.29900 + Line[x].G*0.58700 + Line[x].B*0.11400);
           PByte(@PAnsiChar(frame.data[0])[y * frame.linesize[0] + x])^ := Yy;
         end;
       end;
       ( Cb and Cr )
       for y := 0 to c.height div 2 - 1 do
       begin
         Pixels := image.ScanLine[y];
         for x := 0 to c.width div 2 - 1 do
         begin
           Cb := Round(Line[x].R  -0.16874 - Line[x].G  0.33126 + Line[x].B * 0.50000) + 128;
           Cr := Round(Line[x].R   0.50000 - Line[x].G  0.41869 - Line[x].B * 0.08131) + 64;
           PByte(@PAnsiChar(frame.data[1])[y * frame.linesize[1] + x])^ := Cr;
           PByte(@PAnsiChar(frame.data[2])[y * frame.linesize[2] + x])^ := Cb;
           //PByte(@PAnsiChar(frame.data[1])[y  frame.linesize[1] + x])^ := 128 + y + i  2;
           //PByte(@PAnsiChar(frame.data[2])[y  frame.linesize[2] + x])^ := 64 + x + i  5;
         end;
       end;

    How this should be fixed ?

  • duration change after transcode ts

    25 décembre 2017, par Feilong Luo

    i have a problem about transcode with ffmpeg

    i want to cover m3u8 to mp4, so i transcode every ts file first, and then concat them to a mp4, but i found that the duration will be bigger than source file.

    source file is :
    http://oc7iy3eta.bkt.clouddn.com/src_20.ts

    after transcode, test file is :
    http://oc7iy3eta.bkt.clouddn.com/test_20.ts

    i use the command as bellow to change to 5fps, and 400k bitrate :

    sudo ffmpeg -analyzeduration 2147483647 -probesize 2147483647 -nostdin -y -v warning -i ./src_20.ts -threads 3 -movflags faststart -metadata:s:v rotate=0 -chunk_duration 520000 -video_track_timescale 25000 -pix_fmt yuv420p -copytb 1 -vcodec libx264 -b:v 400000 -minrate 400000 -maxrate 400000 -bufsize 500k -force_key_frames "expr:gte(t,n_forced*2)" -vsync 1 -r 5 -s 544*960 -acodec libfaac -async 1 ./test_20.ts

    i use ffprobe command to see video info :

    source file info :

    Duration : 00:00:01.26, start : 28.346989, bitrate : 921 kb/s
    Program 1
    Metadata :
    service_name : Service01
    service_provider : FFmpeg
    Stream #0:0[0x100] : Audio : aac ([15][0][0][0] / 0x000F), 44100 Hz, stereo, fltp, 23 kb/s
    Stream #0:1[0x101] : Video : h264 (High) ([27][0][0][0] / 0x001B), yuv420p, 544x960, 10.67 tbr, 90k tbn, 180k tbc

    test file :

    Input #0, mpegts, from ’test_20.ts’ :
    Duration : 00:00:01.62, start : 1.576778, bitrate : 447 kb/s
    Program 1
    Metadata :
    service_name : Service01
    service_provider : FFmpeg
    Stream #0:0[0x100] : Video : h264 (High) ([27][0][0][0] / 0x001B), yuv420p, 544x960, 5 fps, 5 tbr, 90k tbn, 10 tbc
    Stream #0:1[0x101] : Audio : aac ([15][0][0][0] / 0x000F), 44100 Hz, stereo, fltp, 5 kb/s

    =======================================================================

    question

    so , we can see that the duration of src file is 1.26s , but after transcode, the test file is 1.62s.

    why ? can anybody help

  • ffmpeg API muxing h264 endoced frames to mkv

    24 mars 2017, par Pawel K

    Hi I’m having some problems with muxing h264 encoded frames into mkv container using code of ffmpeg-3.2.4.
    I have ended up with the following code that is a mashup of code found on SO and muxing.c example of ffmpeg :
    (and yes I am aware that it is ugly, no errors checked etc. it is meant to be like that for clarity :) )

    char *filename = "out.mkv";
    const uint8_t SPS[] = { 0x67, 0x42, 0x40, 0x1F, 0x96, 0x54, 0x02, 0x80, 0x2D, 0xD0, 0x0F, 0x39, 0xEA };
    const uint8_t PPS[] = { 0x68, 0xCE, 0x38, 0x80 };
    int fps = 5;

    typedef struct OutputStream
    {
      AVStream *st;
      AVCodecContext *enc;

      /* pts of the next frame that will be generated */
      int64_t next_pts;
      int samples_count;
      AVFrame *frame;
      AVFrame *tmp_frame;
      float t, tincr, tincr2;
      struct SwsContext *sws_ctx;
      struct SwrContext *swr_ctx;
    } OutputStream;

    static void avlog_cb(void *s, int level, const char *szFmt, va_list varg)
    {
      vprintf(szFmt, varg);
    }

    void main()
    {
      AVOutputFormat *fmt;
      AVFormatContext *formatCtx;
      AVCodec *audio_codec;
      AVCodec *video_codec;
      OutputStream video_st = { 0 };
      OutputStream audio_st = { 0 };
      av_register_all();

      av_log_set_level(AV_LOG_TRACE);
      //av_log_set_callback(avlog_cb);

      //allocate output and format ctxs
      avformat_alloc_output_context2(&formatCtx, NULL, NULL, filename);
      fmt = formatCtx->oformat;

      //allocate streams
      video_codec = avcodec_find_encoder(fmt->video_codec);
      video_st.st = avformat_new_stream(formatCtx, NULL);
      video_st.st->id = 0;

      AVCodecContext *codecCtx =  avcodec_alloc_context3(video_codec);
      fmt->video_codec = AV_CODEC_ID_H264;
      video_st.enc = codecCtx;

      codecCtx->codec_id = fmt->video_codec;
      codecCtx->bit_rate = 400000;
      codecCtx->width  = 1080;
      codecCtx->height = 720;
      codecCtx->profile = FF_PROFILE_H264_CONSTRAINED_BASELINE;
      codecCtx->level = 31;

      video_st.st->time_base = (AVRational){ 1, fps };
      codecCtx->time_base = video_st.st->time_base;
      codecCtx->gop_size = 4;
      codecCtx->pix_fmt = AV_PIX_FMT_YUV420P;

      //open video codec
      codecCtx->extradata_size = 24;
      codecCtx->extradata = (uint8_t *)av_malloc(codecCtx->extradata_size);
      uint8_t extra_data_array[] = { 0x01, SPS[1], SPS[2], SPS[3], 0xFF, 0xE1, 0xc0, 0, 0x42, 0x40, 0x1F, 0x96, 0x54, 0x02, 0x80, 0x2D, 0xD0, 0x0F, 0x39, 0xEA, 0x03, 0xCE, 0x38, 0x80 };
      memcpy(codecCtx->extradata, extra_data_array, codecCtx->extradata_size);

      AVCodecContext *c = video_st.enc;
      AVDictionary *opt = NULL;
      avcodec_open2(c, video_codec, &opt);
      avcodec_parameters_from_context(video_st.st->codecpar, c);

      //open output file
      avio_open(&formatCtx->pb, filename, AVIO_FLAG_WRITE);

      //write header
      int res = avformat_write_header(formatCtx, NULL);

      //write frames

      // get the frames from file
      uint32_t u32frameCnt = 0;

      do
      {
         int8_t i8frame_name[64] = "";

         uint8_t  *pu8framePtr = NULL;
         AVPacket pkt = { 0 };

         av_init_packet(&pkt);
         sprintf(i8frame_name, "frames/frame%d.bin", u32frameCnt++);
         //reading frames from files
         FILE *ph264Frame = fopen(i8frame_name, "r");
         if(NULL == ph264Frame)
         {
            goto leave;
         }

         //get file size
         fseek(ph264Frame, 0L, SEEK_END);
         uint32_t u32file_size = 0;
         u32file_size = ftell(ph264Frame);
         fseek(ph264Frame, 0L, SEEK_SET);

         pu8framePtr = malloc(u32file_size);
         uint32_t u32readout = fread(pu8framePtr, 1, u32file_size, ph264Frame);

         //if the read frame is a key frame i.e. nalu hdr type = 5 set it as a key frame
         if(0x65 == pu8framePtr[4])
         {
            pkt.flags = AV_PKT_FLAG_KEY;
         }
         pkt.data = (uint8_t *)pu8framePtr;
         pkt.size = u32readout;
         pkt.pts  = u32frameCnt;
         pkt.dts  = pkt.pts;

         av_packet_rescale_ts(&pkt, c->time_base, video_st.st->time_base);
         pkt.stream_index = video_st.st->index;
         av_interleaved_write_frame(formatCtx, &pkt);
         free(pu8framePtr);
         fclose(ph264Frame);
      }
      while(1);
    leave:

      av_write_trailer(formatCtx);
      av_dump_format(formatCtx, 0, filename, 1);
      avcodec_free_context(&video_st.enc);
      avio_closep(&formatCtx->pb);
      avformat_free_context(formatCtx);
    }

    It can be compiled with the following command line (after adding headers) :

    gcc file.c -o test_app -I/usr/local/include -L/usr/local/lib -lxcb-shm -lxcb -lX11 -lx264 -lm -lz -pthread -lswresample -lswscale -lavcodec -lavformat -lavdevice -lavutil

    The files that are read are valid annexB stream (valid as in it’s playable in vlc after concatenating into file) it is a Constrained Baseline 3.1 profile H264 and it comes from an IPcam’s interleaved RTCP/RTP stream (demuxed)

    The result is ... well I don’t see the picture. I get only black screen with the progress bar and timer running. I don’t know if I do something wrong with setting up the codecs and streams, or it’s just wrong timestamps.
    I know I got them wrong in some manner but I don’t understand that fully yet (how to calculate the correct presentation times), i.e. the stream and the codec both contain time_base field, and then I know that the sample rate of the video is 90kHz and the frame rate is 5 fps

    On top of it all the examples I’ve found have to some extend deprecated parts that change the flow/meaning of the application and that doesn’t help at all so thus If anyone could help I would appreciate it (I think not only me I would guess)

    Regards, Pawel