Recherche avancée

Médias (1)

Mot : - Tags -/MediaSPIP 0.2

Autres articles (68)

  • Le profil des utilisateurs

    12 avril 2011, par

    Chaque utilisateur dispose d’une page de profil lui permettant de modifier ses informations personnelle. Dans le menu de haut de page par défaut, un élément de menu est automatiquement créé à l’initialisation de MediaSPIP, visible uniquement si le visiteur est identifié sur le site.
    L’utilisateur a accès à la modification de profil depuis sa page auteur, un lien dans la navigation "Modifier votre profil" est (...)

  • Configurer la prise en compte des langues

    15 novembre 2010, par

    Accéder à la configuration et ajouter des langues prises en compte
    Afin de configurer la prise en compte de nouvelles langues, il est nécessaire de se rendre dans la partie "Administrer" du site.
    De là, dans le menu de navigation, vous pouvez accéder à une partie "Gestion des langues" permettant d’activer la prise en compte de nouvelles langues.
    Chaque nouvelle langue ajoutée reste désactivable tant qu’aucun objet n’est créé dans cette langue. Dans ce cas, elle devient grisée dans la configuration et (...)

  • XMP PHP

    13 mai 2011, par

    Dixit Wikipedia, XMP signifie :
    Extensible Metadata Platform ou XMP est un format de métadonnées basé sur XML utilisé dans les applications PDF, de photographie et de graphisme. Il a été lancé par Adobe Systems en avril 2001 en étant intégré à la version 5.0 d’Adobe Acrobat.
    Étant basé sur XML, il gère un ensemble de tags dynamiques pour l’utilisation dans le cadre du Web sémantique.
    XMP permet d’enregistrer sous forme d’un document XML des informations relatives à un fichier : titre, auteur, historique (...)

Sur d’autres sites (6734)

  • FFmpeg mux video use libavformat avcodec but output couldn't be played

    10 août 2017, par tqn

    I’m trying write a app that take an input video and crop it to square video and ignore audio stream. Because bad performance if using command, I’m trying to use libavcodec and libavformat to do it. But the output isn’t playable by any video player and duration is 0 although I wrote all frame. Here are my code.

    void convert_video(char* input) {
       AVFormatContext *pFormatCtx = NULL;
       int             i, videoStreamIndex;
       AVCodecContext  *pCodecCtx = NULL;
       AVCodec         *pCodec = NULL;
       AVFrame         *pFrame = NULL;
       AVFrame         *pFrameSquare = NULL;
       AVPacket        packet, outPacket;
       int             frameFinished;
       int             numBytes;
       uint8_t         *buffer = NULL;
       AVCodec         *pEncodec = NULL;
       AVFormatContext *poFormatCxt = NULL;
       MuxOutputStream    videoStream = {0}, audioStream = {0};
       int tar_w, tar_h;

       const enum AVPixelFormat pic_format = AV_PIX_FMT_YUV420P;
       const enum AVCodecID codec_id = AV_CODEC_ID_H264;
       AVDictionary    *optionsDict = NULL;
       char output[50];
       sprintf(output, "%soutput.mp4", ANDROID_SDCARD);

       // Register all formats and codecs
       av_register_all();

       // Open video file
       if(avformat_open_input(&pFormatCtx, input, NULL, NULL)!=0)
           return; // Couldn't open file
       avformat_alloc_output_context2(&poFormatCxt, NULL, NULL, output);

       // Retrieve stream information
       if(avformat_find_stream_info(pFormatCtx, NULL)<0)
           return; // Couldn't find stream information

       // Find the first video stream
       videoStreamIndex=-1;
       for(i=0; inb_streams; i++)
           if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
               videoStreamIndex=i;
               break;
           }
       if(videoStreamIndex==-1)
           return; // Didn't find a video stream

       // Get a pointer to the codec context for the video stream
       pCodecCtx = pFormatCtx->streams[videoStreamIndex]->codec;
       tar_w = pCodecCtx->width > pCodecCtx->height ? pCodecCtx->height : pCodecCtx->width;
       tar_h = tar_w;

       // Find the decoder for the video stream
       pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
       pEncodec = avcodec_find_encoder(codec_id);

       add_stream_mux(&videoStream, poFormatCxt, &pEncodec, codec_id, tar_w, tar_h);
       videoStream.st[0].time_base = pFormatCtx->streams[videoStreamIndex]->time_base;
       videoStream.st[0].codec->time_base = videoStream.st[0].time_base;
       videoStream.st[0].codec->time_base.den *= videoStream.st[0].codec->ticks_per_frame;
    //    add_stream(&audioStream, poFormatCxt, &)
       open_video(poFormatCxt, pEncodec, &videoStream, optionsDict);
       int ret = avio_open(&poFormatCxt->pb, output, AVIO_FLAG_WRITE);

       // Open codec
       if(avcodec_open2(pCodecCtx, pCodec, &optionsDict) < 0)
           return; // Could not open codec

       ret = avformat_write_header(poFormatCxt, &optionsDict);
       if (ret != 0) {
           ANDROID_LOG("Died");
       }

       // Allocate video frame
       pFrame=av_frame_alloc();
       pFrame->format = videoStream.st->codec->pix_fmt;
       pFrame->width = pCodecCtx->width;
       pFrame->height = pCodecCtx->height;
       av_frame_get_buffer(pFrame, 32);

       // Allocate an AVFrame structure
       pFrameSquare=av_frame_alloc();
       if(pFrameSquare==NULL)
           return;

       // Determine required buffer size and allocate buffer
       numBytes=avpicture_get_size(pic_format, tar_w,
                                   tar_h);
       buffer = (uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

       // Assign appropriate parts of buffer to image planes in pFrameSquare
       // Note that pFrameSquare is an AVFrame, but AVFrame is a superset
       // of AVPicture
       ret = avpicture_fill((AVPicture *)pFrameSquare, buffer, pic_format,
                      tar_w, tar_h);
       if (ret < 0) {
           ANDROID_LOG("Can't fill picture");
           return;
       }

       // Read frames and save first five frames to disk
       i=0;
       ret = av_read_frame(pFormatCtx, &packet);
       while(ret >= 0) {
           // Is this a packet from the video stream?
           if(packet.stream_index == videoStreamIndex) {
               // Decode video frame
    //            av_packet_rescale_ts(&packet, videoStream.st->time_base, videoStream.st->codec->time_base);
               avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
                                     &packet);
    //            while (!frameFinished) {
    //                avcodec_decode_video2(videoStream.st->codec, pFrame, &frameFinished, NULL);
    //            }
               ANDROID_LOG("Trying to decode frame %d with result %d", i, frameFinished);
               ret = av_picture_crop((AVPicture*) pFrameSquare, (AVPicture*) pFrame, pic_format, 0, 0);
               if (ret < 0) {
                   ANDROID_LOG("Can't crop image");
               }
    //            av_frame_get_best_effort_timestamp(pFrame);
    //            av_rescale_q()

               if(frameFinished) {

                   // Save the frame to disk
                   av_init_packet(&outPacket);
    //                av_packet_rescale_ts(&outPacket, videoStream.st->codec->time_base, videoStream.st->time_base);
                   pFrameSquare->width = tar_w;
                   pFrameSquare->height = tar_h;
                   pFrameSquare->format = pic_format;
                   pFrameSquare->pts = ++videoStream.next_pts;
                   ret = avcodec_encode_video2(videoStream.st->codec, &outPacket, pFrameSquare, &frameFinished);

    //                int count = 0;
    //                while (!frameFinished && count++ < 6) {
    //                    ret = avcodec_encode_video2(videoStream.st->codec, &outPacket, NULL, &frameFinished);
    //                }
                   if (frameFinished) {
                       ANDROID_LOG("Writing frame %d", i);
                       outPacket.stream_index = videoStreamIndex;
                       av_interleaved_write_frame(poFormatCxt, &outPacket);
                   }
                   av_free_packet(&outPacket);
               }
           }

           // Free the packet that was allocated by av_read_frameav_free_packet(&packet);
           ret = av_read_frame(pFormatCtx, &packet);
       }

       ret = av_write_trailer(poFormatCxt);
       if (ret < 0) {
           ANDROID_LOG("Couldn't write trailer");
       } else {
           ANDROID_LOG("Video convert finished");
       }

       // Free the RGB image
       av_free(buffer);
       av_free(pFrameSquare);

       // Free the YUV frame
       av_free(pFrame);

       // Close the codec
       avcodec_close(pCodecCtx);
    //    avcodec_close(pEncodecCtx);

       // Close the video file
       avformat_close_input(&pFormatCtx);

       return;
    }

    Helper

    #define STREAM_DURATION   10.0
    #define STREAM_FRAME_RATE 25 /* 25 images/s */
    #define STREAM_PIX_FMT    AV_PIX_FMT_YUV420P /* default pix_fmt */

    /* Add an output stream. */
    void add_stream_mux(MuxOutputStream *ost, AVFormatContext *oc,
                          AVCodec **codec,
                          enum AVCodecID codec_id, int width, int heigh)
    {
       AVCodecContext *codecCtx;
       int i;
       /* find the encoder */
       *codec = avcodec_find_encoder(codec_id);
       if (!(*codec)) {
           fprintf(stderr, "Could not find encoder for '%s'\n",
                   avcodec_get_name(codec_id));
           exit(1);
       }
       ost->st = avformat_new_stream(oc, *codec);
       if (!ost->st) {
           fprintf(stderr, "Could not allocate stream\n");
           exit(1);
       }
       ost->st->id = oc->nb_streams-1;
       codecCtx = ost->st->codec;
       switch ((*codec)->type) {
           case AVMEDIA_TYPE_AUDIO:
               codecCtx->sample_fmt  = (*codec)->sample_fmts ?
                                (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
               codecCtx->bit_rate    = 64000;
               codecCtx->sample_rate = 44100;
               if ((*codec)->supported_samplerates) {
                   codecCtx->sample_rate = (*codec)->supported_samplerates[0];
                   for (i = 0; (*codec)->supported_samplerates[i]; i++) {
                       if ((*codec)->supported_samplerates[i] == 44100)
                           codecCtx->sample_rate = 44100;
                   }
               }
               codecCtx->channels        = av_get_channel_layout_nb_channels(codecCtx->channel_layout);
               codecCtx->channel_layout = AV_CH_LAYOUT_STEREO;
               if ((*codec)->channel_layouts) {
                   codecCtx->channel_layout = (*codec)->channel_layouts[0];
                   for (i = 0; (*codec)->channel_layouts[i]; i++) {
                       if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
                           codecCtx->channel_layout = AV_CH_LAYOUT_STEREO;
                   }
               }
               codecCtx->channels        = av_get_channel_layout_nb_channels(codecCtx->channel_layout);
               ost->st->time_base = (AVRational){ 1, codecCtx->sample_rate };
               break;
           case AVMEDIA_TYPE_VIDEO:
               codecCtx->codec_id = codec_id;
               codecCtx->bit_rate = 400000;
               /* Resolution must be a multiple of two. */
               codecCtx->width    = width;
               codecCtx->height   = heigh;
               /* timebase: This is the fundamental unit of time (in seconds) in terms
                * of which frame timestamps are represented. For fixed-fps content,
                * timebase should be 1/framerate and timestamp increments should be
                * identical to 1. */
               ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
               codecCtx->time_base       = ost->st->time_base;
               codecCtx->gop_size      = 12; /* emit one intra frame every twelve frames at most */
               codecCtx->pix_fmt       = STREAM_PIX_FMT;
               if (codecCtx->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
                   /* just for testing, we also add B frames */
                   codecCtx->max_b_frames = 2;
               }
               if (codecCtx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
                   /* Needed to avoid using macroblocks in which some coeffs overflow.
                    * This does not happen with normal video, it just happens here as
                    * the motion of the chroma plane does not match the luma plane. */
                   codecCtx->mb_decision = 2;
               }
               break;
           default:
               break;
       }
       /* Some formats want stream headers to be separate. */
       if (oc->oformat->flags & AVFMT_GLOBALHEADER)
           codecCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    }
    static void open_video(AVFormatContext *oc, AVCodec *codec, MuxOutputStream *ost, AVDictionary *opt_arg)
    {
       int ret;
       AVCodecContext *c = ost->st->codec;
       AVDictionary *opt = NULL;
       av_dict_copy(&opt, opt_arg, 0);
       /* open the codec */
       ret = avcodec_open2(c, codec, &opt);
       av_dict_free(&opt);
       if (ret < 0) {
           fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
           exit(1);
       }
       /* allocate and init a re-usable frame */
       ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
       if (!ost->frame) {
           fprintf(stderr, "Could not allocate video frame\n");
           exit(1);
       }
       /* If the output format is not YUV420P, then a temporary YUV420P
        * picture is needed too. It is then converted to the required
        * output format. */
       ost->tmp_frame = NULL;
       if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
           ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
           if (!ost->tmp_frame) {
               fprintf(stderr, "Could not allocate temporary picture\n");
               exit(1);
           }
       }
    }

    I’m afraid that I set wrong pts or time_base of frame, and also when decoding or encoding, I see that some first frame is lost, frameFinished is 0. See a post that I’ve to flush decoder by avcodec_decode_video2(videoStream.st->codec, pFrame, &frameFinished, NULL) but after try a few times, frameFinished still is 0, and with avcodec_encode_video2(videoStream.st->codec, &outPacket, NULL, &frameFinished) will throw error in next encode frame. So how I can get all frame that lost ? I’m using FFmpeg version 3.0.1

  • C Code Using FFmpeg libraries - Compilation Error

    17 août 2013, par Ardatr

    I've been recently trying to compile C code that uses the FFmpeg libraries ; however, the compilation fails due to a reason that I cannot determine.

    The code that I've been trying to compile is the filtering_audio.c file on the Doxygen Documentation website for FFmpeg (I'll provide the link, since the code is too long to quote here) : http://ffmpeg.org/doxygen/trunk/doc_2examples_2filtering_audio_8c-example.html

    I use gcc to compile the code :

       gcc filter.c -lavformat -lavcodec -lavfilter  -lavutil

    And I get the following undefined reference errors :

       /tmp/cc90K2S5.o: In function `init_filters':
       filter.c:(.text+0x3e5): undefined reference to `av_int_list_length_for_size'
       filter.c:(.text+0x407): undefined reference to `av_int_list_length_for_size'
       filter.c:(.text+0x42d): undefined reference to `av_opt_set_bin'
       filter.c:(.text+0x482): undefined reference to `av_int_list_length_for_size'
       filter.c:(.text+0x4a4): undefined reference to `av_int_list_length_for_size'
       filter.c:(.text+0x4ca): undefined reference to `av_opt_set_bin'
       filter.c:(.text+0x51f): undefined reference to `av_int_list_length_for_size'
       filter.c:(.text+0x541): undefined reference to `av_int_list_length_for_size'
       filter.c:(.text+0x567): undefined reference to `av_opt_set_bin'
       /tmp/cc90K2S5.o: In function `print_frame':
       filter.c:(.text+0x76b): undefined reference to `av_frame_get_channel_layout'
       /tmp/cc90K2S5.o: In function `main':
       filter.c:(.text+0x831): undefined reference to `av_frame_alloc'
       filter.c:(.text+0x83d): undefined reference to `av_frame_alloc'
       filter.c:(.text+0x9de): undefined reference to `av_buffersrc_add_frame_flags'
       filter.c:(.text+0xa16): undefined reference to `av_buffersink_get_frame'
       filter.c:(.text+0xa58): undefined reference to `av_frame_unref'
       filter.c:(.text+0xab6): undefined reference to `av_frame_free'
       filter.c:(.text+0xac5): undefined reference to `av_frame_free'
       collect2: error: ld returned 1 exit status

    I understand that the undefined reference error indicates that it wasn't able to find the functions referenced from filtering_audio.c, but this doesn't make sense, since these functions should exist in the FFmpeg libraries.

    Any help is appreciated, thank you !

  • Green screen writing FLV file libavformat

    5 mai 2013, par YYZ

    I've written a piece of C++ code that can capture webcam video frame, decode them, convert them to YUV420P, encode them and then write them to a file. If I use the mpeg2 codec and write to a .mpg file, everything works perfectly. But, if I use flv, then the output produced is just a green screen. I'm not sure if there are different encoder settings I need to set for encoding flv video ? Here's my code(the relevant parts) :

    Encoder settings :

    c->codec_id = codec_id;
    c->bit_rate = 400000;
    // Resolution must be a multiple of two.
    c->width    = 352;
    c->height   = 288;
    c->time_base.den = STREAM_FRAME_RATE;
    c->time_base.num = 1;
    //emit one intra frame every twelve frames at most
    c->gop_size      = 12;
    c->pix_fmt       = STREAM_PIX_FMT;

    Write the frames

    int ret;
    uint8_t *buffer = NULL;
    static struct SwsContext *sws_ctx;

    //Setup the codec context, and set its width and height to be equal to the input video width and height
    AVCodecContext *c = st->codec;
    c->width = pCodecCtx->width;
    c->height = pCodecCtx->height;

    av_init_packet(&packet);
    frameYUV = avcodec_alloc_frame();

    //Determine how big the buffer will need to be to store the captured frame
    numBytes = avpicture_get_size(STREAM_PIX_FMT,pCodecCtx->width,pCodecCtx->height);

    //Allocate the needed buffer size
    buffer = new uint8_t[numBytes];
    sws_ctx = sws_getContext(pCodecCtx->width,pCodecCtx->height,pCodecCtx->pix_fmt,
                           pCodecCtx->width,pCodecCtx->height,
                           STREAM_PIX_FMT,SWS_BICUBIC,NULL,NULL,NULL);

    //Fill the output frame
    avpicture_fill((AVPicture *)frameYUV,buffer,STREAM_PIX_FMT,pCodecCtx->width,pCodecCtx->height);

    //Read a video frame in
    av_read_frame(pFormatCtx,&packet);

    //Decode the contents of packet into pFrame
    avcodec_decode_video2(pCodecCtx,pFrame,&frameFinished,&packet);

    //Scale pFrame into frameYUV, and convert to PIXFMTYUV420P
    sws_scale
    (
       sws_ctx,
       (uint8_t const * const *)pFrame->data,
       pFrame->linesize,
       0,
       pCodecCtx->height,
       frameYUV->data,
       frameYUV->linesize
    );
    av_init_packet(&samsPacket);
    //Encode frameYUV
    avcodec_encode_video2(c, &samsPacket, frameYUV, &gotSamsPacket);

    AVPacket pkt = { 0 };
    int got_packet;
    av_init_packet(&pkt);
    // encode the image
    ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
    if (ret < 0){
       debugLogStreamSave->debug("Error encoding video frame");
       exit(1);
    }
    if (!ret && got_packet && pkt.size){
       pkt.stream_index = st->index;

       // Write the compressed frame to our output
       ret = av_interleaved_write_frame(oc, &pkt);

    Any help would be appreciated !