Recherche avancée

Médias (0)

Mot : - Tags -/logo

Aucun média correspondant à vos critères n’est disponible sur le site.

Autres articles (67)

  • Personnaliser en ajoutant son logo, sa bannière ou son image de fond

    5 septembre 2013, par

    Certains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;

  • Ecrire une actualité

    21 juin 2013, par

    Présentez les changements dans votre MédiaSPIP ou les actualités de vos projets sur votre MédiaSPIP grâce à la rubrique actualités.
    Dans le thème par défaut spipeo de MédiaSPIP, les actualités sont affichées en bas de la page principale sous les éditoriaux.
    Vous pouvez personnaliser le formulaire de création d’une actualité.
    Formulaire de création d’une actualité Dans le cas d’un document de type actualité, les champs proposés par défaut sont : Date de publication ( personnaliser la date de publication ) (...)

  • Publier sur MédiaSpip

    13 juin 2013

    Puis-je poster des contenus à partir d’une tablette Ipad ?
    Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir

Sur d’autres sites (8192)

  • FFmpeg c++ H264 decoding error

    12 avril 2015, par Yoohoo

    I have a program which capture video from webcam, encode with ffmpeg, encoded packet then write to buffer. At the receiver side, read from buffer decode with ffmpeg and play.

    Now I merge sender and receiver in one program for testing. It works fine with AV_CODEC_ID_MPEG1VIDEO, but when I change the ffmpeg codec to AV_CODEC_ID_H264, at the decoding progress, it shows error :

    error

    The whole program is here FYI, I made a loop to let the whole progress run twice.

    What is the cause of the error, is there anything special for H264 ? Thanks in advance !

    #include

    extern "C" {

    #include <libavutil></libavutil>opt.h>
    #include <libavcodec></libavcodec>avcodec.h>
    #include <libavutil></libavutil>channel_layout.h>
    #include <libavutil></libavutil>common.h>
    #include <libavutil></libavutil>imgutils.h>
    #include <libavutil></libavutil>mathematics.h>
    #include <libavutil></libavutil>samplefmt.h>
    #include <libswscale></libswscale>swscale.h>
    #include "v4l2.h"
    }
    #include "opencv2/highgui/highgui.hpp"
    #include <iostream>

    using namespace cv;
    using namespace std;
    #define INBUF_SIZE 4096
    static uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];



    static AVCodec *codec;
    static AVCodecContext *c= NULL;
    static int  ret,   got_output;
    static int frame_count;
    static FILE *f;

    static AVPacket pkt;
    static AVFrame *frame;
    static AVFrame *frameDecode;
    static AVFrame *framergb;
    static uint8_t endcode[] = { 0, 0, 1, 0xb7 };
    static AVPacket avpkt;
    int totalSize=0;

    #define SUBSITY     3



    static int decode_write_frame(AVCodecContext *avctx,
                             AVFrame *frame, int *frame_count, AVPacket *pkt, int last)
    {
    int len, got_frame;
    char buf[1024];
    struct SwsContext *convert_ctx;
    Mat m;
    AVFrame dst;

    len = avcodec_decode_video2(avctx, frame, &amp;got_frame, pkt);
    if (len &lt; 0) {
       fprintf(stderr, "Error while decoding frame %d\n", *frame_count);
       return len;
    }
    if (got_frame) {
       printf("Saving %s frame %3d\n", last ? "last " : "", *frame_count);
       fflush(stdout);

    int w = avctx->width;
    int h = avctx->height;

    /*convert AVFrame to opencv Mat frame*/

    m = cv::Mat(h, w, CV_8UC3);
    dst.data[0] = (uint8_t *)m.data;
    avpicture_fill( (AVPicture *)&amp;dst, dst.data[0], PIX_FMT_BGR24, w, h);

    enum PixelFormat src_pixfmt = (enum PixelFormat)frame->format;
    enum PixelFormat dst_pixfmt = PIX_FMT_BGR24;
    convert_ctx = sws_getContext(w, h, src_pixfmt, w, h, dst_pixfmt,
                       SWS_FAST_BILINEAR, NULL, NULL, NULL);

    if(convert_ctx == NULL) {
       fprintf(stderr, "Cannot initialize the conversion context!\n");
       exit(1);
    }

    sws_scale(convert_ctx, frame->data, frame->linesize, 0, h,
                       dst.data, dst.linesize);


       imshow("MyVideo", m);
       //video.write(m);
       waitKey(10); //wait next frame time


       (*frame_count)++;
    }
    if (pkt->data) {
       pkt->size -= len;
       pkt->data += len;
    }
    return 0;
    }


    static void video_decode_example(char *inbufout)
    {
    int bytes;
    uint8_t *buffer;

    av_init_packet(&amp;avpkt);


    memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);


    codec = avcodec_find_decoder(AV_CODEC_ID_H264);
    if (!codec) {
       fprintf(stderr, "Codec not found\n");
       exit(1);
    }

    c = avcodec_alloc_context3(codec);
    if (!c) {
       fprintf(stderr, "Could not allocate video codec context\n");
       exit(1);
    }

    if(codec->capabilities&amp;CODEC_CAP_TRUNCATED)
       c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */


    /* open it */
    if (avcodec_open2(c, codec, NULL) &lt; 0) {
       fprintf(stderr, "Could not open codec\n");
       exit(1);
    }


    frameDecode = avcodec_alloc_frame();
    if (!frameDecode) {
       fprintf(stderr, "Could not allocate video frame\n");
       exit(1);
    }


    bytes=avpicture_get_size(PIX_FMT_RGB24, CAMER_WIDTH, CAMER_HEIGHT);
    buffer=(uint8_t *)av_malloc(bytes*sizeof(uint8_t));
    avpicture_fill((AVPicture *)framergb, buffer, PIX_FMT_RGB24,
                   CAMER_WIDTH, CAMER_HEIGHT);*/

    frame_count = 0;

    namedWindow("MyVideo",CV_WINDOW_AUTOSIZE); //create a window called "MyVideo"


    int size1=0;
    for(;;) {

       memcpy(inbuf,inbufout+size1,INBUF_SIZE);
       size1+=INBUF_SIZE;
       if (size1>(totalSize-INBUF_SIZE))
           break;
       avpkt.size=INBUF_SIZE;


       avpkt.data = inbuf;

       /*frame by frame process*/

       while (avpkt.size > 0)
           if (decode_write_frame(c, frameDecode, &amp;frame_count, &amp;avpkt, 0) &lt; 0)
               exit(1);
    }
    avpkt.data = NULL;
    avpkt.size = 0;
    decode_write_frame(c, frameDecode, &amp;frame_count, &amp;avpkt, 1);
    }
    static void init_video_encode(const char *filename, AVCodecID codec_id, int max_f)
    {

    printf("Encode video file %s\n", filename);

    /* find the mpeg1 video encoder */
    codec = avcodec_find_encoder(codec_id);
    if (!codec) {
       fprintf(stderr, "Codec not found\n");
       exit(1);
    }

    c = avcodec_alloc_context3(codec);
    if (!c) {
       fprintf(stderr, "Could not allocate video codec context\n");
       exit(1);
    }

    /* put sample parameters */
    c->bit_rate = 400000;
    /* resolution must be a multiple of two */
    c->width = 640;
    c->height = 480;
    /* frames per second */
    c->time_base= (AVRational){1,25};
    c->gop_size = 10; /* emit one intra frame every ten frames */
    c->max_b_frames=max_f;
    c->pix_fmt = AV_PIX_FMT_YUV420P;

    if(codec_id == AV_CODEC_ID_H264)
       av_opt_set(c->priv_data, "preset", "slow", 0);

    /* open it */
    if (avcodec_open2(c, codec, NULL) &lt; 0) {
       fprintf(stderr, "Could not open codec\n");
       exit(1);
    }

    frame = avcodec_alloc_frame();
    if (!frame) {
       fprintf(stderr, "Could not allocate video frame\n");
       exit(1);
    }
    frame->format = c->pix_fmt;
    frame->width  = c->width;
    frame->height = c->height;


    ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
                        c->pix_fmt, 32);

    /* get the delayed frames */
    if (ret &lt; 0) {
       fprintf(stderr, "Could not allocate raw picture buffer\n");
       exit(1);
    }

    printf("\n");
    }

    int video_encode(int frameNo,char *inbufout)
    {
    static int count = 0;
    static int i = 0;

    /* encode 1 frame of video */
    av_init_packet(&amp;pkt);
    pkt.data = NULL;    // packet data will be allocated by the encoder
    pkt.size = 0;
    //cout&lt;&lt;"\nBefore YUV\n";
    if(count == 0)
    read_yuv420(frame->data[0]);
    count ++;

    if(count == SUBSITY) {
    count = 0;
    }

    frame->pts = i++;

    /* encode the image */
    ret = avcodec_encode_video2(c, &amp;pkt, frame, &amp;got_output);
    if (ret &lt; 0) {
        fprintf(stderr, "Error encoding frame\n");
        return -1;
    }

    if (got_output) {
        printf("Write frame %3d (size=%5d)\n", i, pkt.size);
        memcpy(inbufout+totalSize,pkt.data,pkt.size);
        totalSize+=pkt.size;
        fwrite(pkt.data, 1, pkt.size, f);

        av_free_packet(&amp;pkt);
    }
    return 0;
    }

    void cancle_encode(void)
    {
    fclose(f);
    avcodec_close(c);
    av_free(c);
    av_freep(&amp;frame->data[0]);
    avcodec_free_frame(&amp;frame);
    }


    int main(int argc, char **argv)
    {
    int i;


    char inbufout[25*50*(INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE)];
    if(init_v4l2() &lt; 0) {
    printf("can't open camera\n");
    return 0;
    }

    /* register all the codecs */
    avcodec_register_all();



    for(int j=0;j&lt;2;j++){
       //init_video_encode("test.mpg", AV_CODEC_ID_MPEG1VIDEO, 15);
       init_video_encode("test.mpg", AV_CODEC_ID_H264, 15);
       //for(i = 0;i&lt; 10*15;i++ ) {
       for(i = 0;i&lt; 25*10;i++ ) {
       if(video_encode(i,inbufout) &lt; 0)
           return 0;
       }
       cout&lt;&lt;"\n"&lt;code></iostream>
  • Increase Duration of a video FFMPEG C++

    9 avril 2015, par Shahroz Tariq

    I am using the code from the samples of FFmpeg which encodes a picture into a video. All I want to do is to give it a series of pictures and it gives me a video with each picture is taking one second`Code below is just taking one picture from my file system & creating video from it

    AVCodec *codec;
    AVCodecContext *c = NULL;
    int i, ret, x, y, got_output;
    FILE *f;

    AVPacket pkt;
    uint8_t endcode[] = { 0, 0, 1, 0xb7 };

    printf("Encode video file %s\n", filename);

    /* find the mpeg1 video encoder */
    codec = avcodec_find_encoder((AVCodecID)codec_id);
    if (!codec)
    {
       fprintf(stderr, "Codec not found\n");
       exit(1);
    }

    c = avcodec_alloc_context3(codec);
    if (!c)
    {
       fprintf(stderr, "Could not allocate video codec context\n");
       exit(1);
    }

    /* put sample parameters */
    c->bit_rate = 400000;
    /* resolution must be a multiple of two */
    c->width = 200;
    c->height = 200;
    /* frames per second */
    AVRational rational;
    rational.num = 1;
    rational.den = 25;
    c->time_base = rational;
    /* emit one intra frame every ten frames
    * check frame pict_type before passing frame
    * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
    * then gop_size is ignored and the output of encoder
    * will always be I frame irrespective to gop_size
    */
    c->gop_size = 10;
    c->max_b_frames = 1;
    c->pix_fmt = AV_PIX_FMT_YUV420P;

    if (codec_id == AV_CODEC_ID_H264)
       av_opt_set(c->priv_data, "preset", "slow", 0);

    /* open it */
    if (avcodec_open2(c, codec, NULL) &lt; 0)
    {
       fprintf(stderr, "Could not open codec\n");
       exit(1);
    }

    fopen_s(&amp;f, filename, "wb");
    if (!f)
    {
       fprintf(stderr, "Could not open %s\n", filename);
       exit(1);
    }
    AVFrame *frame = OpenImage("..\\..\\..\\..\\..\\..\\1.jpg");
    //frame = av_frame_alloc();
    if (!frame)
    {
       fprintf(stderr, "Could not allocate video frame\n");
       exit(1);
    }

    frame->format = c->pix_fmt;
    frame->width = c->width;
    frame->height = c->height;
    /* the image can be allocated by any means and av_image_alloc() is
    * just the most convenient way if av_malloc() is to be used */

    int screenHeight = 200;
    int screenWidth = 200;
    for (i = 0; i &lt; 25; i++)
    {
       av_init_packet(&amp;pkt);
       pkt.data = NULL;    // packet data will be allocated by the encoder
       pkt.size = 0;

       fflush(stdout);



       frame->pts = i;

       /* encode the image */
       ret = avcodec_encode_video2(c, &amp;pkt, frame, &amp;got_output);
       if (ret &lt; 0)
       {
           fprintf(stderr, "Error encoding frame\n");
           exit(1);
       }

       if (got_output)
       {
           printf("Write frame %3d (size=%5d)\n", i, pkt.size);
           fwrite(pkt.data, 1, pkt.size, f);
           av_free_packet(&amp;pkt);
       }
    }

    /* get the delayed frames */
    for (got_output = 1; got_output; i++)
    {
       fflush(stdout);

       ret = avcodec_encode_video2(c, &amp;pkt, NULL, &amp;got_output);
       if (ret &lt; 0)
       {
           fprintf(stderr, "Error encoding frame\n");
           exit(1);
       }

       if (got_output)
       {
           printf("Write frame %3d (size=%5d)\n", i, pkt.size);
           fwrite(pkt.data, 1, pkt.size, f);
           av_free_packet(&amp;pkt);
       }
    }

    /* add sequence end code to have a real mpeg file */
    fwrite(endcode, 1, sizeof(endcode), f);
    fclose(f);

    avcodec_close(c);
    av_free(c);
    av_freep(&amp;frame->data[0]);
    av_frame_free(&amp;frame);
    printf("\n");`
  • Create MPEG-DASH Initialization segment

    5 janvier 2016, par Mahout

    I am looking to convert between HLS and MPEG Dash. I do not access to the original fully concatenated video file, only the individual HLS segments.

    In doing this transformation to MPEG Dash I need to supply an initialziation segment for the Dash manifest .mpd file.

    My questions are :

    1. What is the structure of a Dash video initialization segment ?
    2. How can I generate/create one without the need for the original full file ?

    Perhaps a solution would involve getting MP4Box to convert the ’.ts’ HLS segments to Dash ’.m4s’ segments which are self initializing, but I am unsure how to go about this this ?

    Any ideas are much appreciated.

    Many thanks.

    UPDATE :
    Snippet to stream using original hls segments. Video plays all the way through but is just black.

     <representation width="426" height="238" framerate="25" bandwidth="400000">
       <segmentlist timescale="25000" duration="112500">
              <segmenturl media="video_0_400000/hls/segment_0.ts"></segmenturl>
              <segmenturl media="video_0_400000/hls/segment_1.ts"></segmenturl>
             <segmenturl media="video_0_400000/hls/segment_2.ts"></segmenturl>
       </segmentlist>
      </representation>