Recherche avancée

Médias (3)

Mot : - Tags -/plugin

Autres articles (76)

  • Amélioration de la version de base

    13 septembre 2013

    Jolie sélection multiple
    Le plugin Chosen permet d’améliorer l’ergonomie des champs de sélection multiple. Voir les deux images suivantes pour comparer.
    Il suffit pour cela d’activer le plugin Chosen (Configuration générale du site > Gestion des plugins), puis de configurer le plugin (Les squelettes > Chosen) en activant l’utilisation de Chosen dans le site public et en spécifiant les éléments de formulaires à améliorer, par exemple select[multiple] pour les listes à sélection multiple (...)

  • Emballe médias : à quoi cela sert ?

    4 février 2011, par

    Ce plugin vise à gérer des sites de mise en ligne de documents de tous types.
    Il crée des "médias", à savoir : un "média" est un article au sens SPIP créé automatiquement lors du téléversement d’un document qu’il soit audio, vidéo, image ou textuel ; un seul document ne peut être lié à un article dit "média" ;

  • Le plugin : Gestion de la mutualisation

    2 mars 2010, par

    Le plugin de Gestion de mutualisation permet de gérer les différents canaux de mediaspip depuis un site maître. Il a pour but de fournir une solution pure SPIP afin de remplacer cette ancienne solution.
    Installation basique
    On installe les fichiers de SPIP sur le serveur.
    On ajoute ensuite le plugin "mutualisation" à la racine du site comme décrit ici.
    On customise le fichier mes_options.php central comme on le souhaite. Voilà pour l’exemple celui de la plateforme mediaspip.net :
    < ?php (...)

Sur d’autres sites (9125)

  • FFMPEG error : Exactly one scaler algorithm must be chosen

    28 mai 2015, par Dave_Dev

    I am currently working on an FFMPEG project. I am trying to convert an RGB image into a YUV image using this code (I found it in the internet last night) :

    void Decode::video_encode_example(const char *filename, int codec_id)

       {

       AVCodec *codec;

       AVCodecContext *c= NULL;

       int i, ret, x, y, got_output;
       FILE *f;
       AVFrame *frame;
       AVPacket pkt;
       uint8_t endcode[] = { 0, 0, 1, 0xb7 };

       printf("Encode video file %s\n", filename);

       /* find the mpeg1 video encoder */
       codec = avcodec_find_encoder((enum AVCodecID)codec_id);
       if (!codec) {
           fprintf(stderr, "Codec not found\n");
           exit(1);
       }

       c = avcodec_alloc_context3(codec);
       if (!c) {
           fprintf(stderr, "Could not allocate video codec context\n");
           exit(2);
       }

       /* put sample parameters */
       c->bit_rate = 400000;
       /* resolution must be a multiple of two */
       c->width = 352;
       c->height = 288;
       /* frames per second */
       c->time_base = (AVRational){1,25};
       /* emit one intra frame every ten frames
        * check frame pict_type before passing frame
        * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
        * then gop_size is ignored and the output of encoder
        * will always be I frame irrespective to gop_size
        */
       c->gop_size = 10;
       c->max_b_frames = 1;
       c->pix_fmt = AV_PIX_FMT_YUV420P;

       if (codec_id == AV_CODEC_ID_H264)
           av_opt_set(c->priv_data, "preset", "slow", 0);

       /* open it */
       if (avcodec_open2(c, codec, NULL) &lt; 0) {
           fprintf(stderr, "Could not open codec\n");
           exit(3);
       }

       f = fopen(filename, "wb");
       if (!f) {
           fprintf(stderr, "Could not open %s\n", filename);
           exit(4);
       }

       frame = avcodec_alloc_frame();// Dans une version plus récente c'est av_frame_alloc
       if (!frame) {
           fprintf(stderr, "Could not allocate video frame\n");
           exit(5);
       }
       frame->format = c->pix_fmt;
       frame->width  = c->width;
       frame->height = c->height;

       /* the image can be allocated by any means and av_image_alloc() is
        * just the most convenient way if av_malloc() is to be used */
       ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
                            c->pix_fmt, 32);
       if (ret &lt; 0) {
           fprintf(stderr, "Could not allocate raw picture buffer\n");
           exit(6);
       }

       //
       // RGB to YUV:
       //    http://stackoverflow.com/questions/16667687/how-to-convert-rgb-from-yuv420p-for-ffmpeg-encoder
       //
       // Create some dummy RGB "frame"
       uint8_t *rgba32Data = new uint8_t[4*c->width*c->height];

       SwsContext * ctx = sws_getContext(c->width, c->height,
                                         AV_PIX_FMT_RGBA, c->width, c->height,
                                         AV_PIX_FMT_YUV420P, 0, 0, 0, 0);


       /* encode 1 second of video */
       for (i = 0; i &lt; 25; i++) {
           av_init_packet(&amp;pkt);
           pkt.data = NULL;    // packet data will be allocated by the encoder
           pkt.size = 0;


           fflush(stdout);
           /* prepare a dummy image */
           /* Y */
           //        for (y = 0; y &lt; c->height; y++) {
           //            for (x = 0; x &lt; c->width; x++) {
           //                frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
           //            }
           //        }
           //
           //        /* Cb and Cr */
           //        for (y = 0; y &lt; c->height/2; y++) {
           //            for (x = 0; x &lt; c->width/2; x++) {
           //                frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
           //                frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
           //            }
           //        }

           uint8_t *pos = rgba32Data;
           for (y = 0; y &lt; c->height; y++)
           {
               for (x = 0; x &lt; c->width; x++)
               {
                   pos[0] = i / (float)25 * 255;
                   pos[1] = 0;
                   pos[2] = x / (float)(c->width) * 255;
                   pos[3] = 255;
                   pos += 4;
               }
           }

           uint8_t * inData[1] = { rgba32Data }; // RGBA32 have one plane
           //
           // NOTE: In a more general setting, the rows of your input image may
           //       be padded; that is, the bytes per row may not be 4 * width.
           //       In such cases, inLineSize should be set to that padded width.
           //
           int inLinesize[1] = { 4*c->width }; // RGBA stride
           sws_scale(ctx, inData, inLinesize, 0, c->height, frame->data, frame->linesize);

           frame->pts = i;

           /* encode the image */
           ret = avcodec_encode_video2(c, &amp;pkt, frame, &amp;got_output);
           if (ret &lt; 0) {
               fprintf(stderr, "Error encoding frame\n");
               exit(7);
           }

           if (got_output) {
               printf("Write frame %3d (size=%5d)\n", i, pkt.size);
               fwrite(pkt.data, 1, pkt.size, f);
               av_free_packet(&amp;pkt);
           }
       }

       /* get the delayed frames */
       for (got_output = 1; got_output; i++) {
           fflush(stdout);

           ret = avcodec_encode_video2(c, &amp;pkt, NULL, &amp;got_output);
           if (ret &lt; 0) {
               fprintf(stderr, "Error encoding frame\n");
               exit(8);
           }

           if (got_output) {
               printf("Write frame %3d (size=%5d)\n", i, pkt.size);
               fwrite(pkt.data, 1, pkt.size, f);
               av_free_packet(&amp;pkt);
           }
       }

       /* add sequence end code to have a real mpeg file */
       fwrite(endcode, 1, sizeof(endcode), f);
       fclose(f);

       avcodec_close(c);
       av_free(c);
       av_freep(&amp;frame->data[0]);
       avcodec_free_frame(&amp;frame);// Dans une version plus récente c'est av_frame_alloc
       printf("\n");
       }

           int main()
           {

               Decode d;

               avcodec_register_all();

               d.video_encode_example("/home/Dave/Desktop/test.mpg",AV_CODEC_ID_MPEG2VIDEO);

           }

    When I run this application, my Linux terminal shows me the following error :

    [swscaler @ 0x1e1dc60] Exactly one scaler algorithm must be chosen
    Segmentation fault (core dumped)

    I do not know what is actually happening. Could you help me please ?

  • C++, FFmpeg, save AVPacket infomation into videostate structure in ffplay.c

    28 mai 2015, par Yoohoo

    I am currently working on a project that tests video streaming. In the project, video stream is encoded with H.264 before send and decoded after receive, using FFmpeg codec and functions.

    I can encode video stream by

    init_video_encode(AV_CODEC_ID_H264);

    where

    static void init_video_encode(AVCodecID codec_id){
    codec = avcodec_find_encoder(codec_id);
    if (!codec) {
       fprintf(stderr, "Codec not found\n");
       exit(1);
    }

    c = avcodec_alloc_context3(codec);
    if (!c) {
       fprintf(stderr, "Could not allocate video codec context\n");
       exit(1);
    }

    /* put sample parameters */
    c->bit_rate = 400000;
    /* resolution must be a multiple of two */
    c->width = 640;
    c->height = 480;
    /* frames per second */
    c->time_base= (AVRational){1,25};
    c->gop_size = 10; /* emit one intra frame every ten frames */
    c->max_b_frames=max_f;
    c->pix_fmt = AV_PIX_FMT_YUV420P;

    if(codec_id == AV_CODEC_ID_H264)
       av_opt_set(c->priv_data, "preset", "slow", 0);

    /* open it */
    if (avcodec_open2(c, codec, NULL) &lt; 0) {
       fprintf(stderr, "Could not open codec\n");
       exit(1);
    }

    frame = avcodec_alloc_frame();
    if (!frame) {
       fprintf(stderr, "Could not allocate video frame\n");
       exit(1);
    }
    frame->format = c->pix_fmt;
    frame->width  = c->width;
    frame->height = c->height;

    /* the image can be allocated by any means and av_image_alloc() is
    * just the most convenient way if av_malloc() is to be used */
    ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
                        c->pix_fmt, 32);

    /* get the delayed frames */
    if (ret &lt; 0) {
       fprintf(stderr, "Could not allocate raw picture buffer\n");
       exit(1);
    }
       av_init_packet(&amp;pkt);

    //}

       pkt.data = NULL;    // packet data will be allocated by the encoder
       pkt.size = 0;
       //cout&lt;&lt;"\nBefore YUV\n";
       if(count == 0)
       read_yuv420(frame->data[0]);
       count ++;
      // cout&lt;&lt;"\nAfter YUV\n";
       if(count == SUBSITY) {
       count = 0;
       }

       frame->pts = i++;

       /* encode the image */
       ret = avcodec_encode_video2(c, &amp;pkt, frame, &amp;got_output);
       if (ret &lt; 0) {
            fprintf(stderr, "Error encoding frame\n");
            return -1;
       }
       //cout&lt;&lt;"\nRecord the Video\n";
       if (got_output) {
            //printf("Write frame %3d (size=%5d)\n", i, pkt.size);
            //cout&lt;&lt;"\nBefore Memcpy\n\n\n";

            memcpy(inbufout+totalSize,pkt.data,pkt.size);
            //cout&lt;&lt;"\nAfter Memcpy\n\n\n";
            totalSize+=pkt.size;

    The video encoder works very well, if I write the encoded packet into a .h264 file, it can be played. But at the decoder side, I cannot decode the received packet with :

       codec = avcodec_find_decoder(AV_CODEC_ID_H264);
       if (!codec) {
           fprintf(stderr, "Codec not found\n");
           exit(1);
       }

       c = avcodec_alloc_context3(codec);
       if (!c) {
           fprintf(stderr, "Could not allocate video codec context\n");
           exit(1);
       }

       if(codec->capabilities&amp;CODEC_CAP_TRUNCATED)
           c->flags|= CODEC_FLAG_TRUNCATED;

       /* open it */
       if (avcodec_open2(c, codec, NULL) &lt; 0) {
           fprintf(stderr, "Could not open codec\n");
           exit(1);
       }

       frame = avcodec_alloc_frame();
       if (!frame) {
           fprintf(stderr, "Could not allocate video frame\n");
           exit(1);
       }
    len = avcodec_decode_video2(avctx, frame, &amp;got_frame, pkt);
       if (len &lt; 0) {
           fprintf(stderr, "Error while decoding frame %d\n", *frame_count);
           return len;
       }

    The reason of failure is lacking parser, I have tried to build a parser but failed......

    Therefore I am wondering using ffplay.c as a header file in my receiver program so that I can use it as the decoder and player.

    I have took a look at ffplay.c, it actually fetch file into a videostate structure and processing it. The fetching part is from line 3188 of ffplay.c :

    VideoState *is;

       is = av_mallocz(sizeof(VideoState));
       if (!is)
           return NULL;
       av_strlcpy(is->filename, filename, sizeof(is->filename));
       is->iformat = iformat;
       is->ytop    = 0;
       is->xleft   = 0;

       /* start video display */
       if (frame_queue_init(&amp;is->pictq, &amp;is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) &lt; 0)
           goto fail;
       if (frame_queue_init(&amp;is->subpq, &amp;is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) &lt; 0)
           goto fail;
       if (frame_queue_init(&amp;is->sampq, &amp;is->audioq, SAMPLE_QUEUE_SIZE, 1) &lt; 0)
           goto fail;

       packet_queue_init(&amp;is->videoq);
       packet_queue_init(&amp;is->audioq);
       packet_queue_init(&amp;is->subtitleq);

       is->continue_read_thread = SDL_CreateCond();

       init_clock(&amp;is->vidclk, &amp;is->videoq.serial);
       init_clock(&amp;is->audclk, &amp;is->audioq.serial);
       init_clock(&amp;is->extclk, &amp;is->extclk.serial);
       is->audio_clock_serial = -1;
       is->av_sync_type = av_sync_type;
       is->read_tid     = SDL_CreateThread(read_thread, is);
       if (!is->read_tid) {
    fail:
           stream_close(is);
           return NULL;
       }

    Now instead of fetching file, I want to modify ffplay.c code so that let it fetch the received packet, I can save received packet to AVPacket by

    static AVPacket avpkt;
    avpkt.data = inbuf;

    My question is : how to put AVPacket information into videostate structure ?

  • FFMEPG error : Exactly one scaler algorithm must be chosen

    28 mai 2015, par Dave_Dev

    I am currently working on a FFMPEG project. I am trying to convert a RGB image in a YUV image using this code (I found it in the internet last night) :

    void Decode::video_encode_example(const char *filename, int codec_id)

       {

       AVCodec *codec;

       AVCodecContext *c= NULL;

       int i, ret, x, y, got_output;
       FILE *f;
       AVFrame *frame;
       AVPacket pkt;
       uint8_t endcode[] = { 0, 0, 1, 0xb7 };

       printf("Encode video file %s\n", filename);

       /* find the mpeg1 video encoder */
       codec = avcodec_find_encoder((enum AVCodecID)codec_id);
       if (!codec) {
           fprintf(stderr, "Codec not found\n");
           exit(1);
       }

       c = avcodec_alloc_context3(codec);
       if (!c) {
           fprintf(stderr, "Could not allocate video codec context\n");
           exit(2);
       }

       /* put sample parameters */
       c->bit_rate = 400000;
       /* resolution must be a multiple of two */
       c->width = 352;
       c->height = 288;
       /* frames per second */
       c->time_base = (AVRational){1,25};
       /* emit one intra frame every ten frames
        * check frame pict_type before passing frame
        * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
        * then gop_size is ignored and the output of encoder
        * will always be I frame irrespective to gop_size
        */
       c->gop_size = 10;
       c->max_b_frames = 1;
       c->pix_fmt = AV_PIX_FMT_YUV420P;

       if (codec_id == AV_CODEC_ID_H264)
           av_opt_set(c->priv_data, "preset", "slow", 0);

       /* open it */
       if (avcodec_open2(c, codec, NULL) &lt; 0) {
           fprintf(stderr, "Could not open codec\n");
           exit(3);
       }

       f = fopen(filename, "wb");
       if (!f) {
           fprintf(stderr, "Could not open %s\n", filename);
           exit(4);
       }

       frame = avcodec_alloc_frame();// Dans une version plus récente c'est av_frame_alloc
       if (!frame) {
           fprintf(stderr, "Could not allocate video frame\n");
           exit(5);
       }
       frame->format = c->pix_fmt;
       frame->width  = c->width;
       frame->height = c->height;

       /* the image can be allocated by any means and av_image_alloc() is
        * just the most convenient way if av_malloc() is to be used */
       ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
                            c->pix_fmt, 32);
       if (ret &lt; 0) {
           fprintf(stderr, "Could not allocate raw picture buffer\n");
           exit(6);
       }

       //
       // RGB to YUV:
       //    http://stackoverflow.com/questions/16667687/how-to-convert-rgb-from-yuv420p-for-ffmpeg-encoder
       //
       // Create some dummy RGB "frame"
       uint8_t *rgba32Data = new uint8_t[4*c->width*c->height];

       SwsContext * ctx = sws_getContext(c->width, c->height,
                                         AV_PIX_FMT_RGBA, c->width, c->height,
                                         AV_PIX_FMT_YUV420P, 0, 0, 0, 0);


       /* encode 1 second of video */
       for (i = 0; i &lt; 25; i++) {
           av_init_packet(&amp;pkt);
           pkt.data = NULL;    // packet data will be allocated by the encoder
           pkt.size = 0;


           fflush(stdout);
           /* prepare a dummy image */
           /* Y */
           //        for (y = 0; y &lt; c->height; y++) {
           //            for (x = 0; x &lt; c->width; x++) {
           //                frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
           //            }
           //        }
           //
           //        /* Cb and Cr */
           //        for (y = 0; y &lt; c->height/2; y++) {
           //            for (x = 0; x &lt; c->width/2; x++) {
           //                frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
           //                frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
           //            }
           //        }

           uint8_t *pos = rgba32Data;
           for (y = 0; y &lt; c->height; y++)
           {
               for (x = 0; x &lt; c->width; x++)
               {
                   pos[0] = i / (float)25 * 255;
                   pos[1] = 0;
                   pos[2] = x / (float)(c->width) * 255;
                   pos[3] = 255;
                   pos += 4;
               }
           }

           uint8_t * inData[1] = { rgba32Data }; // RGBA32 have one plane
           //
           // NOTE: In a more general setting, the rows of your input image may
           //       be padded; that is, the bytes per row may not be 4 * width.
           //       In such cases, inLineSize should be set to that padded width.
           //
           int inLinesize[1] = { 4*c->width }; // RGBA stride
           sws_scale(ctx, inData, inLinesize, 0, c->height, frame->data, frame->linesize);

           frame->pts = i;

           /* encode the image */
           ret = avcodec_encode_video2(c, &amp;pkt, frame, &amp;got_output);
           if (ret &lt; 0) {
               fprintf(stderr, "Error encoding frame\n");
               exit(7);
           }

           if (got_output) {
               printf("Write frame %3d (size=%5d)\n", i, pkt.size);
               fwrite(pkt.data, 1, pkt.size, f);
               av_free_packet(&amp;pkt);
           }
       }

       /* get the delayed frames */
       for (got_output = 1; got_output; i++) {
           fflush(stdout);

           ret = avcodec_encode_video2(c, &amp;pkt, NULL, &amp;got_output);
           if (ret &lt; 0) {
               fprintf(stderr, "Error encoding frame\n");
               exit(8);
           }

           if (got_output) {
               printf("Write frame %3d (size=%5d)\n", i, pkt.size);
               fwrite(pkt.data, 1, pkt.size, f);
               av_free_packet(&amp;pkt);
           }
       }

       /* add sequence end code to have a real mpeg file */
       fwrite(endcode, 1, sizeof(endcode), f);
       fclose(f);

       avcodec_close(c);
       av_free(c);
       av_freep(&amp;frame->data[0]);
       avcodec_free_frame(&amp;frame);// Dans une version plus récente c'est av_frame_alloc
       printf("\n");
       }

           int main()
           {

               Decode d;

               avcodec_register_all();

               d.video_encode_example("/home/Dave/Desktop/test.mpg",AV_CODEC_ID_MPEG2VIDEO);

           }

    When I run this application, my Linux terminal shows me the following error :

    [swscaler @ 0x1e1dc60] Exactly one scaler algorithm must be chosen
    Segmentation fault (core dumped)

    I do not know what is actually happening. Could you help me please ?

    Best regards
    Dave_Dev