Recherche avancée

Médias (33)

Mot : - Tags -/creative commons

Autres articles (34)

  • Support audio et vidéo HTML5

    10 avril 2011

    MediaSPIP utilise les balises HTML5 video et audio pour la lecture de documents multimedia en profitant des dernières innovations du W3C supportées par les navigateurs modernes.
    Pour les navigateurs plus anciens, le lecteur flash Flowplayer est utilisé.
    Le lecteur HTML5 utilisé a été spécifiquement créé pour MediaSPIP : il est complètement modifiable graphiquement pour correspondre à un thème choisi.
    Ces technologies permettent de distribuer vidéo et son à la fois sur des ordinateurs conventionnels (...)

  • Librairies et binaires spécifiques au traitement vidéo et sonore

    31 janvier 2010, par

    Les logiciels et librairies suivantes sont utilisées par SPIPmotion d’une manière ou d’une autre.
    Binaires obligatoires FFMpeg : encodeur principal, permet de transcoder presque tous les types de fichiers vidéo et sonores dans les formats lisibles sur Internet. CF ce tutoriel pour son installation ; Oggz-tools : outils d’inspection de fichiers ogg ; Mediainfo : récupération d’informations depuis la plupart des formats vidéos et sonores ;
    Binaires complémentaires et facultatifs flvtool2 : (...)

  • De l’upload à la vidéo finale [version standalone]

    31 janvier 2010, par

    Le chemin d’un document audio ou vidéo dans SPIPMotion est divisé en trois étapes distinctes.
    Upload et récupération d’informations de la vidéo source
    Dans un premier temps, il est nécessaire de créer un article SPIP et de lui joindre le document vidéo "source".
    Au moment où ce document est joint à l’article, deux actions supplémentaires au comportement normal sont exécutées : La récupération des informations techniques des flux audio et video du fichier ; La génération d’une vignette : extraction d’une (...)

Sur d’autres sites (5287)

  • FFMPEG - FFV1 frame encoding crashes on cleaning up

    21 mai 2016, par Yan

    I’m trying to implement a frame encoding functionality using the ffmpeg c-api. I am receiving frames from a camera which are in the Gray16le format. I want to convert encode them using the ffv1 encoder and copy the resulting frame into the variable "data". This is the code that I got so far. It seems to be working in a sense that it doesn’t crash until the part where I am freeing up my variables.

    /* Video compression variables///////////////////////////////////*/
    struct timeval stop, start;

    AVCodec *codec;
    AVCodecContext *context= NULL;
    const AVPixFmtDescriptor *avPixDesc = NULL; // used to get bits per pixel
    int ret, got_output;
    int bufferSize = 0; // Size of encoded image frame in bytes
    //uint8_t* outBuffer;
    AVFrame *inFrame; //
    AVPacket pkt;
    Data* data;
    /* Video compression ///////////////////////////////////*/

    Frame* frame;
    /////////////////////////////////////////////////////////////////////////
    // start frame compression - current codec is ffv1
    //////////////////////////////////////////////////////////////////////////

    gettimeofday(&start, NULL); // get current time
    avcodec_register_all(); // register all the codecs
    codec = avcodec_find_encoder(AV_CODEC_ID_FFV1); // find the ffv1 encoder
    if (!codec) {
       fprintf(stderr, "Codec not found\n");
       exit(1);
    }

    context = avcodec_alloc_context3(codec);
    if (!context) {
       fprintf(stderr, "Could not allocate video codec context\n");
       exit(1);
    }

    frame = getFrame(); // get frame so we can set context params

    /* put sample parameters */
    context->bit_rate = 400000; // from example, half might also work
    /* resolution must be a multiple of two */
    context->width = frame->size[0];
    context->height = frame->size[1];
    /* frames per second */
    context->time_base = (AVRational){1,22}; // 22 fps

    context->gop_size = 1; // typical for ffv1 codec
    context->max_b_frames = 1; // set to 1 for now, the higher the b-frames count, the higher the needed ressources
    context->pix_fmt = AV_PIX_FMT_GRAY16LE ; // same as source, Y , 16bpp, little-endian, 12 of the 16 pixels are used

    /* open it */
    if (avcodec_open2(context, codec, NULL) < 0) {
           fprintf(stderr, "Could not open codec\n");
           exit(1);
    }

    inFrame = av_frame_alloc();
    if(!inFrame)
    {
       printf("Could not allocate video frame\n! Exciting..");
       exit(1);
    }

    // allocate image in inFrame
    ret = av_image_alloc(inFrame->data, inFrame->linesize, context->width, context->height, context->pix_fmt, 16);
    if(ret<0)
    {
       printf("Error allocating image of inFrame! Exiting..\n");
       exit(1);
    }

    // copy data of frame of type Frame* into frame of type AVFrame* so we can use ffmpeg to encode it
    int picFill = avpicture_fill((AVPicture*)inFrame, (uint8_t*)frame->image, context->pix_fmt, context->width, context->height);

    if(picFill < 0)
    {
       printf("Error filling inFrame with frame->image! Exiting..\n");
       exit(1);
    }
    else
    {
       printf("Successfully filled inFrame with frame->image..\n");
       printf("Size of bytes filled:  %d", picFill);
    }

    inFrame->width = context->width;
    inFrame->height = context->height;
    inFrame->format = context->pix_fmt;

    if(frame->image[0] == NULL)
    {
           printf("Error! frame->image[0] == NULL.. Exiting..\n");
           exit(1);
    }

    fflush(stdout);
    int i=0;

    // start encoding
    while(!got_output) // while we didn't get a complete package
    {
       /* Start encoding the given frame */
       av_init_packet(&pkt);
       pkt.data = NULL;    // packet data will be allocated by the encoder
       pkt.size = 0;

       i++;

       /* encode the image */
       ret = avcodec_encode_video2(context, &pkt, inFrame, &got_output);
       if (ret < 0) {
               fprintf(stderr, "Error encoding frame\n");
               exit(1);
       }

       inFrame->pts = i;

       if(got_output)
       {
           printf("Got a valid package after %d frames..\n", i);
           // encoding of frame done, adapt "data"-field accordingly
           avPixDesc = av_pix_fmt_desc_get(context->pix_fmt); // Get pixelFormat descriptor
           bufferSize = av_image_get_buffer_size(context->pix_fmt, inFrame->width, inFrame->height,16);
           if(bufferSize <= 0)
           {
               printf("Error! Buffersize of encoded frame is <= 0, exciting...\n");
           }
           else
           {
               printf("Buffersize determined to be %d\n", bufferSize);
           }

           data->m_size[0] = inFrame->width;
           data->m_size[1] = inFrame->height;
           data->m_bytesPerPixel = av_get_bits_per_pixel(avPixDesc)/8;

           if (0 != av_get_bits_per_pixel(avPixDesc) % 8)
                   data->m_bytesPerPixel += 1;

           printf("Buffersize is: %d, should be %d\n", bufferSize, inFrame->width * inFrame->height * data->m_bytesPerPixel);
           data->m_image = malloc(bufferSize);
           printf("copying data into final variable...\n");

           memcpy(data->m_image, pkt.data, bufferSize); // copy data from ffmpeg frame
           printf("copying of data done\n");

           printf("Unrefing packet..\n");
           av_packet_unref(&pkt);
           printf("Unrefing packet done..\n");
       }
       else
       {
           printf("Didnt get package, so we get and encode next frame..\n");
           frame = getFrame(); // get next frame            

           picFill = avpicture_fill((AVPicture*)inFrame, (uint8_t*)frame->image, context->pix_fmt, context->width, context->height);
           if(!picFill)
           {
               printf("Error filling frame with data!!..\n");
               exit(1);
           }
           else
           {
               printf("Size required to store received frame in AVFrame in bytes: %d", picFill);
           }
       }
    }

    printf("\nDone with encoding.. cleaning up..\n");
    printf("Closing context...\n");
    avcodec_close(context);
    printf("Closing context done...\n");
    printf("Freeing context...\n");
    av_free(context);
    printf("Freeing context done...\n");
    if(inFrame->data[0] != NULL)
    {
       printf("avfreep() pointer to FFMPEG frame data...\n");
       av_freep(&inFrame->data[0]);
       printf("Freeing pointer to FFMPEG frame data done...\n");
    }
    else
    {
       printf("infRame->data[0] was not deleted because it was NULL\n");
    }

    printf("Freeing frame...\n");
    av_frame_free(&inFrame);
    printf("Freeing inFrame done...\n");
    printf("Compression of frame done...\n");
    gettimeofday(&stop, NULL);
    printf("took %lu ms\n", (stop.tv_usec - start.tv_usec) / 1000);

    This is the output that I am getting when I run the program :

    [ffv1 @ 0x75101970] bits_per_raw_sample > 8, forcing range coder
    Successfully filled inFrame with frame->image..
    Size of bytes filled:  1377792Got a valid package after 1 frames..
    Buffersize determined to be 1377792
    Buffersize is: 1377792, should be 1377792
    copying data into final variable...
    copying of data done
    Unrefing packet..
    Unrefing packet done..

    Done with encoding.. cleaning up..
    Closing context...
    Closing context done...
    Freeing context...
    Freeing context done...
    avfreep() pointer to FFMPEG frame data...
    *** Error in `./encoding': free(): invalid pointer: 0x74a66428 ***
    Aborted

    The error seems to occur when calling the av_freep() function. If you could point me in the right direction, it would be greatly appreciated ! This is my first time working with the ffmpeg api and I feel that I am not so close to my goal, though I spent quite some time looking for the error already..

    Best regards !

  • Streaming video (C# using FFmpeg AutoGen) sends multiple data requests

    14 juillet 2016, par williamtroup

    I’ve written a video generator that rights a video in h264 format (mp4). When I stream the video from my azure service, i’m seeing the following network traffic :

    enter image description here

    The AVCodecContext layout I’m using is as follows :

    AVCodec* videoCodec = ffmpeg.avcodec_find_encoder(AVCodecID.AV_CODEC_ID_H264)
    AVCodecContext* videoCodecContext = ffmpeg.avcodec_alloc_context3(videoCodec);
    videoCodecContext->bit_rate = 400000;
    videoCodecContext->width = 1280;
    videoCodecContext->height = 720;
    videoCodecContext->gop_size = 12;
    videoCodecContext->max_b_frames = 1;
    videoCodecContext->pix_fmt = videoCodec->pix_fmts[0];
    videoCodecContext->codec_id = videoCodec->id;
    videoCodecContext->codec_type = videoCodec->type;
    videoCodecContext->time_base = new AVRational
    {
       num = 1,
       den = 30
    };

    ffmpeg.av_opt_set(videoCodecContext->priv_data, "preset", "ultrafast");

    I’m also tried setting the "movflags" option for avformat_write_header() via an AVDictionary, but then av_write_trailer() returns -2, cause the file to not finish writing.

    I cannot figure out how to solve this problem. Videos generating using Windows Movie Maker stream perfectly.

    I know this has something to do with mdat and mov positions.

    Also, this appears to only happening in Google Chrome.

  • QTRLE is supported by ffmpeg ?

    22 avril 2016, par Sagar
    • I am trying to create video using QTRLE format with RGB24 or ARGB pixel format in ffmpeg library.But video is not played.
    • But is i use H264 format with yuv format then it’s working fine.
    • Is QTRLE is not supported by ffmpeg or i m doing any mistake ?

      static void video_encode_example(const char *filename, int codec_id)
      {
             AVCodec *codec;
             AVCodecContext *c = NULL;
             int i, ret, x, y, got_output;
             FILE *f;
             AVFrame *frame;
             AVPacket pkt;
             uint8_t endcode[] = { 0, 0, 1, 0xb7 };

             printf("Encode video file %s\n", filename);

             /* find the mpeg1 video encoder */

             codec = avcodec_find_encoder((AVCodecID) codec_id);
             //  codec = 28;
             if (!codec) {
                 fprintf(stderr, "Codec not found : %d\n", codec_id);
                 exit(1);
             }

             c = avcodec_alloc_context3(codec);

             if (!c) {
                 fprintf(stderr, "Could not allocate video codec context\n");
                 exit(1);
             }

             /* put sample parameters */
             c->bit_rate = 400000;
             /* resolution must be a multiple of two */
             c->width = 352;
             c->height = 288;
             /* frames per second */
             c->time_base = (AVRational ) { 1, 25 };
             /* emit one intra frame every ten frames
              * check frame pict_type before passing frame
              * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
              * then gop_size is ignored and the output of encoder
              * will always be I frame irrespective to gop_size
              */
             c->gop_size = 10;
             c->max_b_frames = 1;
             c->pix_fmt = AV_PIX_FMT_RGB24;

             if (codec_id == AV_CODEC_ID_H264)
                 av_opt_set(c->priv_data, "preset", "slow", 0);

             /* open it */
             if (avcodec_open2(c, codec, NULL) < 0) {
                 fprintf(stderr, "Could not open codec\n");
                 exit(1);
             }

             f = fopen(filename, "wb");
             if (!f) {
                 fprintf(stderr, "Could not open %s\n", filename);
                 exit(1);
             }

             frame = av_frame_alloc();
             if (!frame) {
                 fprintf(stderr, "Could not allocate video frame\n");
                 exit(1);
             }
             frame->format = c->pix_fmt;
             frame->width = c->width;
             frame->height = c->height;

             /* the image can be allocated by any means and av_image_alloc() is
              * just the most convenient way if av_malloc() is to be used */
             ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
                     c->pix_fmt, 32);
             if (ret < 0) {
                 fprintf(stderr, "Could not allocate raw picture buffer\n");
                 exit(1);
             }
             ////////////////////////////////////
             cv::Mat input = imread("image for creating video(image.png)");
             Mat output;
             resize(input, input, Size(c->width, c->height));
             cvtColor(input, input, CV_BGR2RGB);

             av_image_fill_arrays(frame->data, frame->linesize, input.data,
                     c->pix_fmt, c->width, c->height, 1);

             ////////////////////////////////////

             for (i = 0; i < 250; i++) {
                 av_init_packet(&pkt);
                 pkt.data = NULL;    // packet data will be allocated by the encoder
                 pkt.size = 0;

                 fflush(stdout);

                 frame->pts = i;

                 /* encode the image */
                 ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
                 if (ret < 0) {
                     fprintf(stderr, "Error encoding frame\n");
                     exit(1);
                 }

                 if (got_output) {
                     printf("Write frame %3d (size=%5d)\n", i, pkt.size);
                     fwrite(pkt.data, 1, pkt.size, f);
                     av_packet_unref(&pkt);
                 }
             }

             /* get the delayed frames */
             for (got_output = 1; got_output; i++) {
                 fflush(stdout);

                 ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
                 if (ret < 0) {
                     fprintf(stderr, "Error encoding frame\n");
                     exit(1);
                 }

                 if (got_output) {
                     printf("Write frame %3d (size=%5d)\n", i, pkt.size);
                     fwrite(pkt.data, 1, pkt.size, f);
                     av_packet_unref(&pkt);
                 }
             }

             /* add sequence end code to have a real mpeg file */
             fwrite(endcode, 1, sizeof(endcode), f);
             fclose(f);

             avcodec_close(c);
             av_free(c);
         //  av_freep(&frame->data[0]);
             av_frame_free(&frame);
             printf("\n");
      }

      int main() {
         av_register_all();
         video_encode_example("test.mov", AV_CODEC_ID_QTRLE);
      }