Recherche avancée

Médias (91)

Autres articles (71)

  • Contribute to a better visual interface

    13 avril 2011

    MediaSPIP is based on a system of themes and templates. Templates define the placement of information on the page, and can be adapted to a wide range of uses. Themes define the overall graphic appearance of the site.
    Anyone can submit a new graphic theme or template and make it available to the MediaSPIP community.

  • ANNEXE : Les plugins utilisés spécifiquement pour la ferme

    5 mars 2010, par

    Le site central/maître de la ferme a besoin d’utiliser plusieurs plugins supplémentaires vis à vis des canaux pour son bon fonctionnement. le plugin Gestion de la mutualisation ; le plugin inscription3 pour gérer les inscriptions et les demandes de création d’instance de mutualisation dès l’inscription des utilisateurs ; le plugin verifier qui fournit une API de vérification des champs (utilisé par inscription3) ; le plugin champs extras v2 nécessité par inscription3 (...)

  • Récupération d’informations sur le site maître à l’installation d’une instance

    26 novembre 2010, par

    Utilité
    Sur le site principal, une instance de mutualisation est définie par plusieurs choses : Les données dans la table spip_mutus ; Son logo ; Son auteur principal (id_admin dans la table spip_mutus correspondant à un id_auteur de la table spip_auteurs)qui sera le seul à pouvoir créer définitivement l’instance de mutualisation ;
    Il peut donc être tout à fait judicieux de vouloir récupérer certaines de ces informations afin de compléter l’installation d’une instance pour, par exemple : récupérer le (...)

Sur d’autres sites (8674)

  • conversion from cv::mat to avframe, encode in h264 dont writing correctly [on hold]

    15 novembre 2014, par danics

    i create a method for converting between cv::mat to avframe with PIX_FMT_YUV420P format (oframe format) but _convCtx always return null, whats wrong ? thanks in advance ! ((i solve this)) but i have another probelm in end of this question please

    void processToFrame(cv::Mat* _mat, AVFrame* oframe)
     {
         // Create and allocate the conversion frame.
         if (_oframe == nullptr) {
             _oframe = avcodec_alloc_frame();  
             if (_oframe == nullptr)
                 throw std::runtime_error("Matrix Converter: Could not allocate the output frame.");

             avpicture_alloc(reinterpret_cast(_oframe),
                 PIX_FMT_BGR24, _mat->cols, _mat->rows);            
         }

         avpicture_fill(reinterpret_cast(_oframe),
             (uint8_t *)_mat->data,
             AV_PIX_FMT_BGR24,
             _mat->cols,
             _mat->rows);

         // Convert the image from its native format to BGR.
         if (_convCtx == nullptr) {
             _convCtx = sws_getContext(
                 oframe->width, oframe->height, (enum AVPixelFormat) oframe->format,
                 _oframe->width, _oframe->height, PIX_FMT_BGR24,
                 SWS_BICUBIC, nullptr, nullptr, nullptr);
         }
         if (_convCtx == nullptr)
             throw std::runtime_error("Matrix Converter: Unable to initialize the conversion context.");  

         // Scales the source data according to our SwsContext settings.
         if (sws_scale(_convCtx,
             _oframe->data, _oframe->linesize, 0, _oframe->height,
             oframe->data, oframe->linesize) < 0)
             throw std::runtime_error("Matrix Converter: Pixel format conversion not supported.");
     }

    create oframe

    void createVideoFile(const char* filename, int w, int h, int codec_id, int fps)
     {
         /* find the mpeg1 video encoder */
         if(codec == nullptr)
         {
             /* find the mpeg1 video encoder */
             codec = avcodec_find_encoder((AVCodecID)codec_id);
             if (!codec) {
                 throw std::runtime_error("codec not found\n");
             }
         }

         if(c == nullptr)
         {
             c = avcodec_alloc_context3(codec);
             if(!c)
             {
                 throw std::runtime_error("Could not allocate video codec context\n");
             }
             /* put sample parameters */
             c->bit_rate = 400000;
             /* resolution must be a multiple of two */
             c->width = 2 * (w / 2);
             c->height = 2 * (h / 2);
             /* frames per second */
             AVRational ar;
             ar.den = 1;
             ar.num = fps;

             c->time_base= ar; //(AVRational){1,25};
             c->gop_size = 10; /* emit one intra frame every ten frames */
             c->max_b_frames=1;
             c->pix_fmt = PIX_FMT_YUV420P;

             if(codec_id == AV_CODEC_ID_H264)
                 av_opt_set(c->priv_data, "preset", "slow", 0);

             /* open it */
             if (avcodec_open2(c, codec, NULL) < 0) {
                 throw std::runtime_error("Could not open codec\n");
             }

             f = fopen(filename, "wb");
             if (!f) {
                 throw std::runtime_error("could not open file\n");
             }

             frame = avcodec_alloc_frame();
             if (!frame) {
                 throw std::runtime_error("Could not allocate video frame\n");
             }
             frame->format = c->pix_fmt;
             frame->width  = c->width;
             frame->height = c->height;

             /* the image can be allocated by any means and av_image_alloc() is
              * just the most convenient way if av_malloc() is to be used */

             int ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height, c->pix_fmt, 32);

             if (ret < 0) {
                 throw std::runtime_error("Could not allocate raw picture buffer\n");
             }
         }
    }

    read mat images

    void video_encode_example(const cv::Mat& fin)
    {
       AVPacket pkt;
       /* encode 1 second of video */
       av_init_packet(&pkt);
       pkt.data = NULL;    // packet data will be allocated by the encoder
       pkt.size = 0;

       fflush(stdout);

       cv::Mat res;
       cv::resize(fin, res, cv::Size(c->width, c->height));

       processToFrame(&res, frame);
       frame->pts = i;

       /* encode the image */
       int ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
       if (ret < 0) {
       throw std::runtime_error("Error encoding frame\n");
       }

       if (got_output) {
           printf("Write frame %3d (size=%5d)\n", i, pkt.size);
           fwrite(pkt.data, 1, pkt.size, f);
           av_free_packet(&pkt);
       }
       i++;
    }

    update :

    i can solve the conversion problem by setting of _oframe.width = _mat.cols and _oframe.height = _mat.rows, avpicture_alloc dont set this value itself.

    now i have the other problem my videowriting class seems write ok because windows slideshow show one of frames, but i cant view video in my players, this is my release function, i dont know whats wrong endcoding or endcoder structure parameter like pts and else.

    void video_release()
       {
           /* get the delayed frames */
           for (got_output = 1; got_output; i++) {
               fflush(stdout);

               int ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
               if (ret < 0) {
                   throw std::runtime_error("Error encoding frame\n");
               }

               if (got_output) {
                   printf("Write frame %3d (size=%5d)\n", i, pkt.size);
                   fwrite(pkt.data, 1, pkt.size, f);
                   av_free_packet(&pkt);
               }
           }

           uint8_t endcode[] = { 0, 0, 1, 0xb7 };

           /* add sequence end code to have a real mpeg file */
           fwrite(endcode, 1, sizeof(endcode), f);
           fclose(f);

           avcodec_close(c);
           av_free(c);
           av_freep(&frame->data[0]);
           avcodec_free_frame(&frame);

           c = nullptr;
           codec = nullptr;
           frame = nullptr;

           if (_convCtx)
               sws_freeContext(_convCtx);

           if (_oframe) {
               //if (_oframe->data)
                   //av_free(_oframe->data[0]);
               av_free(_oframe);
           }

           _oframe = nullptr;
           _convCtx = nullptr;
       }
  • Use ffmpeg to stream live content to azure media services

    9 juin 2016, par Dadicool

    I’ve been trying to stream content to azure media services using ffmpeg as it’s one of the options described here : http://azure.microsoft.com/blog/2014/09/18/azure-media-services-rtmp-support-and-live-encoders/

    My command is :

    ffmpeg -v verbose -i 300.mp4 -strict -2 -c:a aac -b:a 128k -ar 44100 -r 30 -g 60 -keyint_min 60 -b:v 400000 -c:v libx264 -preset medium -bufsize 400k -maxrate 400k -f flv rtmp://nessma-****.channel.mediaservices.windows.net:1935/live/584c99f5c47f424d9e83ac95364331e7

    I have made sure that the streaming endpoint has one active streaming unit, I also made sure that the channel is actually Ready and I even get it to start streaming (which makes a PublishURL available).

    When I execute the ffmpeg command to start streaming, I keep getting the following error :

    ffmpeg version 2.5.2 Copyright (c) 2000-2014 the FFmpeg developers
     built on Dec 30 2014 11:31:18 with llvm-gcc 4.2.1 (LLVM build 2336.11.00)
     configuration: --prefix=/Volumes/Ramdisk/sw --enable-gpl --enable-pthreads --enable-version3 --enable-libspeex --enable-libvpx --disable-decoder=libvpx --enable-libmp3lame --enable-libtheora --enable-libvorbis --enable-libx264 --enable-avfilter --enable-libopencore_amrwb --enable-libopencore_amrnb --enable-filters --enable-libgsm --enable-libvidstab --enable-libx265 --arch=x86_64 --enable-runtime-cpudetect
     libavutil      54. 15.100 / 54. 15.100
     libavcodec     56. 13.100 / 56. 13.100
     libavformat    56. 15.102 / 56. 15.102
     libavdevice    56.  3.100 / 56.  3.100
     libavfilter     5.  2.103 /  5.  2.103
     libswscale      3.  1.101 /  3.  1.101
     libswresample   1.  1.100 /  1.  1.100
     libpostproc    53.  3.100 / 53.  3.100
    Routing option strict to both codec and muxer layer
    [mov,mp4,m4a,3gp,3g2,mj2 @ 0x7f9a0a002c00] overread end of atom 'colr' by 1 bytes
    [mov,mp4,m4a,3gp,3g2,mj2 @ 0x7f9a0a002c00] stream 0, timescale not set
    [mov,mp4,m4a,3gp,3g2,mj2 @ 0x7f9a0a002c00] max_analyze_duration 5000000 reached at 5003637 microseconds
    Input #0, mov,mp4,m4a,3gp,3g2,mj2, from '300.mp4':
     Metadata:
       major_brand     : mp42
       minor_version   : 0
       compatible_brands: mp42isomavc1
       creation_time   : 2014-01-11 05:39:32
       genre           : Trailer
       artist          : Warner Bros.
       title           : 300: Rise of an Empire - Trailer 2
       encoder         : HandBrake 0.9.9 2013051800
       date            : 2014
     Duration: 00:02:33.24, start: 0.000000, bitrate: 7377 kb/s
       Stream #0:0(und): Video: h264 (High) (avc1 / 0x31637661), yuv420p(tv, bt709), 1920x1080 (1920x1088), 7219 kb/s, 23.98 fps, 23.98 tbr, 90k tbn, 47.95 tbc (default)
       Metadata:
         creation_time   : 2014-01-11 05:39:32
         encoder         : JVT/AVC Coding
       Stream #0:1(eng): Audio: aac (LC) (mp4a / 0x6134706D), 48000 Hz, stereo, fltp, 157 kb/s (default)
       Metadata:
         creation_time   : 2014-01-11 05:39:32
       Stream #0:2: Video: mjpeg, yuvj420p(pc, bt470bg/unknown/unknown), 101x150 [SAR 72:72 DAR 101:150], 90k tbr, 90k tbn, 90k tbc
    rtmp://nessma-****.channel.mediaservices.windows.net:1935/live/584c99f5c47f424d9e83ac95364331e7: Input/output error

    The Azure blog post clearly states that this should be possible but I can’t find a working example anywhere.

    Environment :

    • MacOS Maverick
    • FFMPEG installed from official build
    • 300.mp4 : 1080p trailer of the latest 300 movie
  • FFmpeg muxing to avi

    2 septembre 2015, par Uncia

    sI have program, that succefully shows h264 stream using SDL : I’m getting h264 frame, decode it using ffmpeg and draw using SDL.
    Also I can write frames to file (using fwrite) and play this file through ffplay.

    But I want to mux data to the avi and face some problems in av_write_frame.

    Here is my code :

    ...
    /*Initializing format context - outFormatContext is the member of my class*/
    AVOutputFormat *outFormat;
    outFormat = av_guess_format(NULL,"out.avi",NULL);
    outFormat->video_codec = AV_CODEC_ID_H264;
    outFormat->audio_codec = AV_CODEC_ID_NONE;
    avformat_alloc_output_context2(&outFormatContext, outFormat, NULL, "out.avi");
    AVCodec *outCodec;
    AVStream *outStream = add_stream(outFormatContext, &outCodec, outFormatContext->oformat->video_codec);
    avcodec_open2(outStream->codec, outCodec, NULL);
    av_dump_format(outFormatContext, 0, "out.avi", 1);
    if (avio_open(&outFormatContext->pb, "out.avi", AVIO_FLAG_WRITE) < 0)
       throw Exception("Couldn't open file");
    if (avformat_write_header(outFormatContext, NULL) < 0)
       throw Exception("Couldn't write to file");
    //I don't have exceptions here - so there is 6KB header in out.avi.
    ...

    static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
                           enum AVCodecID codec_id)
    {
    AVCodecContext *c;
    AVStream *st;
    /* find the encoder */
    *codec = avcodec_find_encoder(codec_id);
    if (!(*codec))
       throw("Could not find encoder");
    st = avformat_new_stream(oc, *codec);
    if (!st)
       throw ("Could not allocate stream");
    st->id = oc->nb_streams-1;
    c = st->codec;
    c->bit_rate = 400000;
    /* Resolution must be a multiple of two. */
    c->width    = 1920;
    c->height   = 1080;
    c->pix_fmt  = PIX_FMT_YUV420P;
    c->flags = 0;
    c->time_base.num = 1;
    c->time_base.den = 25;
    c->gop_size      = 12; /* emit one intra frame every twelve frames at most */
    return st;
    }
    ...
    /* Part of decoding loop. There is AVPacket packet - h264 packet;
    int ret = av_write_frame(outFormatContext, &packet); //it return -22 code - Invadlid argument;
    if (avcodec_decode_video2(pCodecCtx, pFrame, &frameDecoded, &packet) < 0)
       return;
    if (frameDecoded)
    {
      //SDL stuff
    }

    Also i tried to use avcodec_encode_video2 (encode pFrame back to the H264) next to the SDL stuff but encoding is not working - i’ve got empty packets :( It is the second problem.

    Using av_interleaved_write_frame causes acces violation.

    Code of the muxing part i copied from ffmpeg muxing example (https://www.ffmpeg.org/doxygen/2.1/doc_2examples_2muxing_8c-example.html)