Recherche avancée

Médias (0)

Mot : - Tags -/page unique

Aucun média correspondant à vos critères n’est disponible sur le site.

Autres articles (80)

  • La sauvegarde automatique de canaux SPIP

    1er avril 2010, par

    Dans le cadre de la mise en place d’une plateforme ouverte, il est important pour les hébergeurs de pouvoir disposer de sauvegardes assez régulières pour parer à tout problème éventuel.
    Pour réaliser cette tâche on se base sur deux plugins SPIP : Saveauto qui permet une sauvegarde régulière de la base de donnée sous la forme d’un dump mysql (utilisable dans phpmyadmin) mes_fichiers_2 qui permet de réaliser une archive au format zip des données importantes du site (les documents, les éléments (...)

  • Encodage et transformation en formats lisibles sur Internet

    10 avril 2011

    MediaSPIP transforme et ré-encode les documents mis en ligne afin de les rendre lisibles sur Internet et automatiquement utilisables sans intervention du créateur de contenu.
    Les vidéos sont automatiquement encodées dans les formats supportés par HTML5 : MP4, Ogv et WebM. La version "MP4" est également utilisée pour le lecteur flash de secours nécessaire aux anciens navigateurs.
    Les documents audios sont également ré-encodés dans les deux formats utilisables par HTML5 :MP3 et Ogg. La version "MP3" (...)

  • Contribute to documentation

    13 avril 2011

    Documentation is vital to the development of improved technical capabilities.
    MediaSPIP welcomes documentation by users as well as developers - including : critique of existing features and functions articles contributed by developers, administrators, content producers and editors screenshots to illustrate the above translations of existing documentation into other languages
    To contribute, register to the project users’ mailing (...)

Sur d’autres sites (6716)

  • Why doesn't this FFmpeg code create a video from a series of images ?

    20 octobre 2011, par user551117

    I have successfully compiled the FFmpeg library for use in an iOS application. I would like to use it for encoding a video from a series of images, but I can't seem to make it work.

    The following is the code that I am using to encode this video :

    AVCodec *codec;
    AVCodecContext *c= NULL;
    int i, out_size, size, outbuf_size;
    FILE *f;
    AVFrame *picture;
    uint8_t *outbuf;

    printf("Video encoding\n");

    /// find the mpeg video encoder
    codec=avcodec_find_encoder(CODEC_ID_MPEG4);
    //codec = avcodec_find_encoder(CODEC_ID_MPEG4);
    if (!codec) {
       fprintf(stderr, "codec not found\n");
       exit(1);
    }

    c= avcodec_alloc_context();
    picture= avcodec_alloc_frame();

    // put sample parameters
    c->bit_rate = 400000;
    /// resolution must be a multiple of two
    c->width = 320;
    c->height = 480;
    //frames per second
    c->time_base= (AVRational){1,25};
    c->gop_size = 10; /// emit one intra frame every ten frames
    c->max_b_frames=1;
    c->pix_fmt = PIX_FMT_YUV420P;

    //open it
    if (avcodec_open(c, codec) < 0) {
       fprintf(stderr, "could not open codec\n");
       exit(1);
    }

    f = fopen([[NSTemporaryDirectory() stringByAppendingPathComponent:filename] UTF8String], "w");
    if (!f) {
       fprintf(stderr, "could not open %s\n",[filename UTF8String]);
       exit(1);
    }

    // alloc image and output buffer
    outbuf_size = 100000;
    outbuf = malloc(outbuf_size);
    size = c->width * c->height;

    #pragma mark -
    AVFrame* outpic = avcodec_alloc_frame();
    int nbytes = avpicture_get_size(PIX_FMT_YUV420P, c->width, c->height);

    //create buffer for the output image
    uint8_t* outbuffer = (uint8_t*)av_malloc(nbytes);

    #pragma mark -  
    for(i=1;i<48;i++) {
       fflush(stdout);

       int numBytes = avpicture_get_size(PIX_FMT_YUV420P, c->width, c->height);
       uint8_t *buffer = (uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

       UIImage *image = [UIImage imageNamed:[NSString stringWithFormat:@"%d.png", i]];
       CGImageRef newCgImage = [image CGImage];

       CGDataProviderRef dataProvider = CGImageGetDataProvider(newCgImage);
       CFDataRef bitmapData = CGDataProviderCopyData(dataProvider);
       buffer = (uint8_t *)CFDataGetBytePtr(bitmapData);  

       avpicture_fill((AVPicture*)picture, buffer, PIX_FMT_RGB24, c->width, c->height);
       avpicture_fill((AVPicture*)outpic, outbuffer, PIX_FMT_YUV420P, c->width, c->height);

       struct SwsContext* fooContext = sws_getContext(c->width, c->height,
                                                      PIX_FMT_RGB24,
                                                      c->width, c->height,
                                                      PIX_FMT_YUV420P,
                                                      SWS_FAST_BILINEAR, NULL, NULL, NULL);

       //perform the conversion
       sws_scale(fooContext, picture->data, picture->linesize, 0, c->height, outpic->data, outpic->linesize);
       // Here is where I try to convert to YUV

       // encode the image

       out_size = avcodec_encode_video(c, outbuf, outbuf_size, outpic);
       printf("encoding frame %3d (size=%5d)\n", i, out_size);
       fwrite(outbuf, 1, out_size, f);

       free(buffer);
       buffer = NULL;      

    }

    // get the delayed frames
    for(; out_size; i++) {
       fflush(stdout);

       out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
       printf("write frame %3d (size=%5d)\n", i, out_size);
       fwrite(outbuf, 1, outbuf_size, f);      
    }

    // add sequence end code to have a real mpeg file
    outbuf[0] = 0x00;
    outbuf[1] = 0x00;
    outbuf[2] = 0x01;
    outbuf[3] = 0xb7;
    fwrite(outbuf, 1, 4, f);
    fclose(f);
    free(outbuf);

    avcodec_close(c);
    av_free(c);
    av_free(picture);
    printf("\n");

    What could be wrong with this code ?

  • MPEG-TS Segments HTTP Live Streaming

    5 juin 2013, par user1069624

    I'm trying to interleave MPEG-TS segments but failing. One set of segments was actually captured using the built in camera in the laptop, then encoded using FFMPEG with the following command :

    ffmpeg -er 4 -y -f video4linux2 -s 640x480 -r 30 -i %s -isync -f mpegts -acodec libmp3lame -ar 48000 -ab 64k -s 640x480 -vcodec libx264 -fflags +genpts -b 386k -coder 0 -me_range 16 -keyint_min 25 -i_qfactor 0.71 -bt 386k -maxrate 386k -bufsize 386k -qcomp 0.6 -qmin 10 -qmax 51 -qdiff 4 -aspect 640:480

    And the other one is an avi file that was encoded using the following command :

    fmpeg -er 4 -y -f avi -s 640x480 -r 30 -i ./DSCF2021.AVI -vbsf dump_extra -f mpegts -acodec libmp3lame -ar 48000 -ab 64k -s 640x480 -vcodec libx264 -fflags +genpts -b 386k -coder 0 -me_range 16 -keyint_min 25 -i_qfactor 0.71 -bt 386k -maxrate 386k -bufsize 386k -qcomp 0.6 -qmin 10 -qmax 51 -qdiff 4 -aspect 640:480

    Then the output is segmented into ts segments using an open source segmenter.

    If both come from the same source (both from the camera) they work fine. However in this case, the second set of segments freeze. Time passes, but the video does not move..
    So i think it's an encoding problem. So my question is, how should i change the ffmpeg command for this to work ?

    By interleave I mean, having a playlist with the first set of segments, and another playlist with the other set of segments, and having the client call one then the other (HTTP Live Streaming)

    The ffprobe output of one of the first set of segments :

    Input #0, mpegts, from 'live1.ts':
     Duration: 00:00:09.76, start: 1.400000, bitrate: 281 kb/s
     Program 1 Service01
       Metadata:
         name            : Service01
         provider_name   : FFmpeg
       Stream #0.0[0x100]: Video: h264, yuv420p, 640x480 [PAR 1:1 DAR 4:3], 29.92 fps, 29.92 tbr, 90k tbn, 59.83 tbc
       Stream #0.1[0x101]: Audio: aac, 48000 Hz, stereo, s16, 111 kb/s

    The ffprobe output of one of the second set of segments :

    Input #0, mpegts, from 'ad1.ts':
     Duration: 00:00:09.64, start: 1.400000, bitrate: 578 kb/s
     Program 1 Service01
       Metadata:
         name            : Service01
         provider_name   : FFmpeg
       Stream #0.0[0x100]: Video: h264, yuv420p, 640x480 [PAR 1:1 DAR 4:3], 25 fps, 25 tbr, 90k tbn, 50 tbc
       Stream #0.1[0x101]: Audio: aac, 48000 Hz, stereo, s16, 22 kb/s

    Thank you,

  • FFmpeg sample code for creating a video file from still images JNI Android

    21 juin 2012, par anish

    How i modify the following FFMPEG sample code for creating a video file from still images that i am having in my android phone. I am using JNI for invoking ffmpeg.

    JNIEXPORT void JNICALL videoEncodeExample((JNIEnv *pEnv, jobject pObj, jstring filename)

       {
        AVCodec *codec;
        AVCodecContext *c= NULL;
        int i, out_size, size, x, y, outbuf_size;
        FILE *f;
        AVFrame *picture;
        uint8_t *outbuf, *picture_buf;

        printf("Video encoding\n");

        /* find the mpeg1 video encoder */
        codec = avcodec_find_encoder(CODEC_ID_MPEG1VIDEO);
        if (!codec) {
            fprintf(stderr, "codec not found\n");
            exit(1);
        }

        c= avcodec_alloc_context();
        picture= avcodec_alloc_frame();

        /* put sample parameters */
        c->bit_rate = 400000;
        /* resolution must be a multiple of two */
        c->width = 352;
        c->height = 288;
        /* frames per second */
        c->time_base= (AVRational){1,25};
        c->gop_size = 10; /* emit one intra frame every ten frames */
        c->max_b_frames=1;
        c->pix_fmt = PIX_FMT_YUV420P;

        /* open it */
        if (avcodec_open(c, codec) < 0) {
            fprintf(stderr, "could not open codec\n");
            exit(1);
        }

        f = fopen(filename, "wb");
        if (!f) {
            fprintf(stderr, "could not open %s\n", filename);
            exit(1);
        }

        /* alloc image and output buffer */
        outbuf_size = 100000;
        outbuf = malloc(outbuf_size);
        size = c->width * c->height;
        picture_buf = malloc((size * 3) / 2); /* size for YUV 420 */

        picture->data[0] = picture_buf;
        picture->data[1] = picture->data[0] + size;
        picture->data[2] = picture->data[1] + size / 4;
        picture->linesize[0] = c->width;
        picture->linesize[1] = c->width / 2;
        picture->linesize[2] = c->width / 2;

        /* encode 1 second of video */
        for(i=0;i<25;i++) {
            fflush(stdout);
            /* prepare a dummy image */
            /* Y */
            for(y=0;yheight;y++) {
                for(x=0;xwidth;x++) {
                    picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3;
                }
            }

            /* Cb and Cr */
            for(y=0;yheight/2;y++) {
                for(x=0;xwidth/2;x++) {
                    picture->data[1][y * picture->linesize[1] + x] = 128 + y + i * 2;
                    picture->data[2][y * picture->linesize[2] + x] = 64 + x + i * 5;
                }
            }

            /* encode the image */
            out_size = avcodec_encode_video(c, outbuf, outbuf_size, picture);
            printf("encoding frame %3d (size=%5d)\n", i, out_size);
            fwrite(outbuf, 1, out_size, f);
        }

        /* get the delayed frames */
        for(; out_size; i++) {
            fflush(stdout);

            out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
            printf("write frame %3d (size=%5d)\n", i, out_size);
            fwrite(outbuf, 1, out_size, f);
        }

        /* add sequence end code to have a real mpeg file */
        outbuf[0] = 0x00;
        outbuf[1] = 0x00;
        outbuf[2] = 0x01;
        outbuf[3] = 0xb7;
        fwrite(outbuf, 1, 4, f);
        fclose(f);
        free(picture_buf);
        free(outbuf);

        avcodec_close(c);
        av_free(c);
        av_free(picture);
        printf("\n");
       }

    Thanks and Regards
    Anish