Recherche avancée

Médias (1)

Mot : - Tags -/publier

Autres articles (61)

  • Les vidéos

    21 avril 2011, par

    Comme les documents de type "audio", Mediaspip affiche dans la mesure du possible les vidéos grâce à la balise html5 .
    Un des inconvénients de cette balise est qu’elle n’est pas reconnue correctement par certains navigateurs (Internet Explorer pour ne pas le nommer) et que chaque navigateur ne gère en natif que certains formats de vidéos.
    Son avantage principal quant à lui est de bénéficier de la prise en charge native de vidéos dans les navigateur et donc de se passer de l’utilisation de Flash et (...)

  • Les autorisations surchargées par les plugins

    27 avril 2010, par

    Mediaspip core
    autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs

  • Personnaliser l’affichage de mon Médiaspip

    27 mai 2013

    Vous pouvez modifier la configuration du squelette afin de personnaliser votre Médiaspip Voir aussi plus d’informations en suivant ce lien
    Comment supprimer le nombre de vues d’affichage d’un média ?
    Administrer > Gestion du squelette > Pages des articles et médias Cocher dans "Informations non affichées sur les pages de médias" les paramètres que vous ne souhaitez pas afficher.
    Comment supprimer le titre de mon Médiaspip dans le bandeau horizontal ?
    Administrer > Gestion du squelette > (...)

Sur d’autres sites (9211)

  • Revision 34014 : gerer le logo d’auteur dans le forum (compatible avec le plugin gravatar)

    28 décembre 2009, par fil@… — Log

    gerer le logo d’auteur dans le forum (compatible avec le plugin gravatar)

  • FFmpeg how to apply "aac_adtstoasc" and "h264_mp4toannexb" bitstream filters to transcode to h.264 with AAC

    9 juillet 2015, par larod

    I’ve been struggling with this issue for about a month, I have studied FFmpeg documentation more specifically transcode_aac.c, transcoding.c, decoding_encoding.c and Handbrake’s implementation which is really dense.

    The error that I’m getting is the following : [mp4 @ 0x11102f800] Malformed AAC bitstream detected: use the audio bitstream filter 'aac_adtstoasc' to fix it ('-bsf:a aac_adtstoasc' option with ffmpeg).

    The research I’ve done points to a filter that needs to be implemented.

    FIX:AAC in some container format (FLV, MP4, MKV etc.) need "aac_adtstoasc" bitstream filter (BSF).

    I know I can do the following :

    AVBitStreamFilterContext* aacbsfc =  av_bitstream_filter_init("aac_adtstoasc");

    And then do something like this :

    av_bitstream_filter_filter(aacbsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);

    What eludes me is when to filter the AVPacket, is it before calling av_packet_rescale_ts or inside init_filter. I would greatly appreciate if someone can point me in the right direction. Thanks in advance.

    // Variables
    AVFormatContext *_ifmt_ctx, *_ofmt_ctx;
    FilteringContext *_filter_ctx;
    AVBitStreamFilterContext *_h264bsfc;
    AVBitStreamFilterContext *_aacbsfc;
    NSURL *_srcURL, *_dstURL;

    - (IBAction)trancode:(id)sender {
           NSLog(@"%s %@",__func__, _mediaFile.fsName);
           int ret, got_frame;
           int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);
           unsigned int stream_index, i;
           enum AVMediaType type;
           AVPacket packet = {.data = NULL, .size = 0};
           AVFrame *frame = NULL;
           _h264bsfc = av_bitstream_filter_init("h264_mp4toannexb");
           _aacbsfc =  av_bitstream_filter_init("aac_adtstoasc");

       _srcURL = [Utility urlFromBookmark:_mediaFile.bookmark];
       if ([_srcURL startAccessingSecurityScopedResource]) {
           NSString *newFileName = [[_srcURL.lastPathComponent stringByDeletingPathExtension]stringByAppendingPathExtension:@"mp4"];
           _dstURL = [NSURL fileURLWithPath:[[_srcURL URLByDeletingLastPathComponent]URLByAppendingPathComponent:newFileName].path isDirectory:NO];

           [AppDelegate ffmpegRegisterAll];

           ret = open_input_file(_srcURL.path.fileSystemRepresentation);
           if (ret < 0) {
               NSLog(@"Error openning input file.");
           }

           ret = open_output_file(_dstURL.path.fileSystemRepresentation);
           if (ret < 0) {
               NSLog(@"Error openning output file.");
           }

           ret = init_filters();
           if (ret < 0) {
               NSLog(@"Error initializing filters.");
           }

           AVBitStreamFilterContext *h264bsfc = av_bitstream_filter_init("h264_mp4toannexb");
           AVBitStreamFilterContext* aacbsfc =  av_bitstream_filter_init("aac_adtstoasc");
           // Transcode *******************************************************************************
           while (1) {
               if ((ret = av_read_frame(_ifmt_ctx, &packet)) < 0) {
                   break;
               }
               stream_index = packet.stream_index;
               type = _ifmt_ctx->streams[packet.stream_index]->codec->codec_type;
               av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n", stream_index);



               if (_filter_ctx[stream_index].filter_graph) {
                   av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
                   frame = av_frame_alloc();
                   if (!frame) {
                       ret = AVERROR(ENOMEM);
                       break;
                   }

                   av_packet_rescale_ts(&packet, _ifmt_ctx->streams[stream_index]->time_base, _ifmt_ctx->streams[stream_index]->codec->time_base);
                   dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 : avcodec_decode_audio4;
                   ret = dec_func(_ifmt_ctx->streams[stream_index]->codec, frame, &got_frame, &packet);
                   if (ret < 0) {
                       av_frame_free(&frame);
                       av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
                       break;
                   }

                   if (got_frame) {
                       frame->pts = av_frame_get_best_effort_timestamp(frame);
                       ret = filter_encode_write_frame(frame, stream_index);
                       av_frame_free(&frame);
                       if (ret < 0)
                           goto end;
                   } else {
                       av_frame_free(&frame);
                   }
               } else {
                   /* remux this frame without reencoding */
                   av_packet_rescale_ts(&packet,
                                        _ifmt_ctx->streams[stream_index]->time_base,
                                        _ofmt_ctx->streams[stream_index]->time_base);

                   ret = av_interleaved_write_frame(_ofmt_ctx, &packet);
                   if (ret < 0)
                       goto end;
               }
               av_free_packet(&packet);
           }
           // *****************************************************************************************

           /* flush filters and encoders */
           for (i = 0; i < _ifmt_ctx->nb_streams; i++) {
               /* flush filter */
               if (!_filter_ctx[i].filter_graph)
                   continue;
               ret = filter_encode_write_frame(NULL, i);
               if (ret < 0) {
                   av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
                   goto end;
               }

               /* flush encoder */
               ret = flush_encoder(i);
               if (ret < 0) {
                   av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
                   goto end;
               }
           }
           av_write_trailer(_ofmt_ctx);
           av_bitstream_filter_close(h264bsfc);
           av_bitstream_filter_close(aacbsfc);
       } else {
           NSLog(@"Unable to resolve url for %@",_mediaFile.url.lastPathComponent);
       }
       [_srcURL stopAccessingSecurityScopedResource];

    end:
       av_free_packet(&packet);
       av_frame_free(&frame);
       for (i = 0; i < _ifmt_ctx->nb_streams; i++) {
           avcodec_close(_ifmt_ctx->streams[i]->codec);
           if (_ofmt_ctx && _ofmt_ctx->nb_streams > i && _ofmt_ctx->streams[i] && _ofmt_ctx->streams[i]->codec)
               avcodec_close(_ofmt_ctx->streams[i]->codec);
           if (_filter_ctx && _filter_ctx[i].filter_graph)
               avfilter_graph_free(&_filter_ctx[i].filter_graph);
       }
       av_free(_filter_ctx);
       avformat_close_input(&_ifmt_ctx);
       if (_ofmt_ctx && !(_ofmt_ctx->oformat->flags & AVFMT_NOFILE))
           avio_closep(&_ofmt_ctx->pb);
       avformat_free_context(_ofmt_ctx);
    }

    The following method is used to open the input file and create the ifmt_ctx.

    int open_input_file(const char *filename) {
       int ret;
       unsigned int i;

       _ifmt_ctx = NULL;
       if ((ret = avformat_open_input(&_ifmt_ctx, filename, NULL, NULL)) < 0) {
           av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
           return ret;
       }

       if ((ret = avformat_find_stream_info(_ifmt_ctx, NULL)) < 0) {
           av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
           return ret;
       }

       for (i = 0; i < _ifmt_ctx->nb_streams; i++) {
           AVStream *stream;
           AVCodecContext *codec_ctx;
           stream = _ifmt_ctx->streams[i];
           codec_ctx = stream->codec;
           /* Reencode video & audio and remux subtitles etc. */
           if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
               || codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
               /* Open decoder */
               ret = avcodec_open2(codec_ctx,
                                   avcodec_find_decoder(codec_ctx->codec_id), NULL);
               if (ret < 0) {
                   av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
                   return ret;
               }
           }
       }

       // Remove later
       av_dump_format(_ifmt_ctx, 0, filename, 0);
       return 0;
    }

    This method is used to open the output file and create the output format context.

    int open_output_file(const char *filename) {
       AVStream *out_stream;
       AVStream *in_stream;
       AVCodecContext *dec_ctx, *enc_ctx;
       AVCodec *encoder;
       int ret;
       unsigned int i;

       _ofmt_ctx = NULL;
       avformat_alloc_output_context2(&_ofmt_ctx, NULL, NULL, filename);
       if (!_ofmt_ctx) {
           av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
           return AVERROR_UNKNOWN;
       }


       for (i = 0; i < _ifmt_ctx->nb_streams; i++) {
           out_stream = avformat_new_stream(_ofmt_ctx, NULL);
           if (!out_stream) {
               av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
               return AVERROR_UNKNOWN;
           }

           in_stream = _ifmt_ctx->streams[i];
           dec_ctx = in_stream->codec;
           enc_ctx = out_stream->codec;

           if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
               // set video stream
               encoder = avcodec_find_encoder(AV_CODEC_ID_H264);
               avcodec_get_context_defaults3(enc_ctx, encoder);
               av_opt_set(enc_ctx->priv_data, "preset", "slow", 0);
               enc_ctx->height = dec_ctx->height;
               enc_ctx->width = dec_ctx->width;
               enc_ctx->bit_rate = dec_ctx->bit_rate;
               enc_ctx->time_base = out_stream->time_base = dec_ctx->time_base;
               enc_ctx->pix_fmt = encoder->pix_fmts[0];

               ret = avcodec_open2(enc_ctx, encoder, NULL);
               if (ret < 0) {
                   av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
                   return ret;
               }

           } else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
               // set audio stream
               //encoder = avcodec_find_encoder(AV_CODEC_ID_AAC);
               encoder = avcodec_find_encoder_by_name("libfdk_aac");
               avcodec_get_context_defaults3(enc_ctx, encoder);
               enc_ctx->profile = FF_PROFILE_AAC_HE_V2;
               enc_ctx->sample_rate = dec_ctx->sample_rate;
               enc_ctx->channel_layout = dec_ctx->channel_layout;
               enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
               enc_ctx->sample_fmt = encoder->sample_fmts[0];
               enc_ctx->time_base = out_stream->time_base = (AVRational){1, enc_ctx->sample_rate};
               enc_ctx->bit_rate = dec_ctx->bit_rate;

               ret = avcodec_open2(enc_ctx, encoder, NULL);
               if (ret < 0) {
                   av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
                   return ret;
               }

           } else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
               // deal with error
               av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
               return AVERROR_INVALIDDATA;
           } else {
               // remux stream
               ret = avcodec_copy_context(_ofmt_ctx->streams[i]->codec,
                                          _ifmt_ctx->streams[i]->codec);
               if (ret < 0) {
                   av_log(NULL, AV_LOG_ERROR, "Copying stream context failed\n");
                   return ret;
               }
           }

           if (_ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) {
               enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
           }
       }

       av_dump_format(_ofmt_ctx, 0, filename, 1);

       NSURL *openFileURL = [Utility openPanelAt:[NSURL URLWithString:_dstURL.URLByDeletingLastPathComponent.path]
                                       withTitle:@"Transcode File"
                                         message:@"Please allow Maví to create the new file."
                                       andPrompt:@"Grant Access"];

       openFileURL = [openFileURL URLByAppendingPathComponent:_dstURL.lastPathComponent isDirectory:NO];
       if (!(_ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
           ret = avio_open(&_ofmt_ctx->pb, openFileURL.fileSystemRepresentation, AVIO_FLAG_WRITE);
           if (ret < 0) {
               av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
               return ret;
           }
       }

       /* init muxer, write output file header */
       ret = avformat_write_header(_ofmt_ctx, NULL);
       if (ret < 0) {
           av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
           return ret;
       }

       return 0;
    }

    These two methods deal with initialising the filters and filtering.

    int init_filters(void) {
       const char *filter_spec;
       unsigned int i;
       int ret;
       _filter_ctx = av_malloc_array(_ifmt_ctx->nb_streams, sizeof(*_filter_ctx));
       if (!_filter_ctx)
           return AVERROR(ENOMEM);

       for (i = 0; i < _ifmt_ctx->nb_streams; i++) {
           _filter_ctx[i].buffersrc_ctx  = NULL;
           _filter_ctx[i].buffersink_ctx = NULL;
           _filter_ctx[i].filter_graph   = NULL;
           if (!(_ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO
                 || _ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO))
               continue;


           if (_ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
               filter_spec = "null"; /* passthrough (dummy) filter for video */
           else
               filter_spec = "anull"; /* passthrough (dummy) filter for audio */
           ret = init_filter(&_filter_ctx[i], _ifmt_ctx->streams[i]->codec,
                             _ofmt_ctx->streams[i]->codec, filter_spec);
           if (ret)
               return ret;
       }
       return 0;
    }
    int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx, AVCodecContext *enc_ctx, const char *filter_spec) {
       char args[512];
       int ret = 0;
       AVFilter *buffersrc = NULL;
       AVFilter *buffersink = NULL;
       AVFilterContext *buffersrc_ctx = NULL;
       AVFilterContext *buffersink_ctx = NULL;
       AVFilterInOut *outputs = avfilter_inout_alloc();
       AVFilterInOut *inputs  = avfilter_inout_alloc();
       AVFilterGraph *filter_graph = avfilter_graph_alloc();

       if (!outputs || !inputs || !filter_graph) {
           ret = AVERROR(ENOMEM);
           goto end;
       }

       if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
           buffersrc = avfilter_get_by_name("buffer");
           buffersink = avfilter_get_by_name("buffersink");
           if (!buffersrc || !buffersink) {
               av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
               ret = AVERROR_UNKNOWN;
               goto end;
           }

           snprintf(args, sizeof(args),
                    "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
                    dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
                    dec_ctx->time_base.num, dec_ctx->time_base.den,
                    dec_ctx->sample_aspect_ratio.num,
                    dec_ctx->sample_aspect_ratio.den);

           ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
                                              args, NULL, filter_graph);
           if (ret < 0) {
               av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
               goto end;
           }

           ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
                                              NULL, NULL, filter_graph);
           if (ret < 0) {
               av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
               goto end;
           }

           ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
                                (uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
                                AV_OPT_SEARCH_CHILDREN);
           if (ret < 0) {
               av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
               goto end;
           }
       } else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
           buffersrc = avfilter_get_by_name("abuffer");
           buffersink = avfilter_get_by_name("abuffersink");
           if (!buffersrc || !buffersink) {
               av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
               ret = AVERROR_UNKNOWN;
               goto end;
           }

           if (!dec_ctx->channel_layout)
               dec_ctx->channel_layout =
               av_get_default_channel_layout(dec_ctx->channels);
           snprintf(args, sizeof(args),
                    "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
                    dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
                    av_get_sample_fmt_name(dec_ctx->sample_fmt),
                    dec_ctx->channel_layout);
           ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
                                              args, NULL, filter_graph);
           if (ret < 0) {
               av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
               goto end;
           }

           ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
                                              NULL, NULL, filter_graph);
           if (ret < 0) {
               av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
               goto end;
           }

           ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
                                (uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
                                AV_OPT_SEARCH_CHILDREN);
           if (ret < 0) {
               av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
               goto end;
           }

           ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
                                (uint8_t*)&enc_ctx->channel_layout,
                                sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
           if (ret < 0) {
               av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
               goto end;
           }

           ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
                                (uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
                                AV_OPT_SEARCH_CHILDREN);
           if (ret < 0) {
               av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
               goto end;
           }
       } else {
           ret = AVERROR_UNKNOWN;
           goto end;
       }

       /* Endpoints for the filter graph. */
       outputs->name       = av_strdup("in");
       outputs->filter_ctx = buffersrc_ctx;
       outputs->pad_idx    = 0;
       outputs->next       = NULL;

       inputs->name       = av_strdup("out");
       inputs->filter_ctx = buffersink_ctx;
       inputs->pad_idx    = 0;
       inputs->next       = NULL;

       if (!outputs->name || !inputs->name) {
           ret = AVERROR(ENOMEM);
           goto end;
       }

       if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
                                           &inputs, &outputs, NULL)) < 0)
           goto end;

       if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
           goto end;

       /* Fill FilteringContext */
       fctx->buffersrc_ctx = buffersrc_ctx;
       fctx->buffersink_ctx = buffersink_ctx;
       fctx->filter_graph = filter_graph;

    end:
       avfilter_inout_free(&inputs);
       avfilter_inout_free(&outputs);

       return ret;
    }

    Finally these two methods take care of writing the frames.

    int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
       int ret;
       int got_frame_local;
       AVPacket enc_pkt;
       int (*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
       (_ifmt_ctx->streams[stream_index]->codec->codec_type ==
        AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;

       if (!got_frame)
           got_frame = &got_frame_local;

       av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
       /* encode filtered frame */
       enc_pkt.data = NULL;
       enc_pkt.size = 0;
       av_init_packet(&enc_pkt);
       ret = enc_func(_ofmt_ctx->streams[stream_index]->codec, &enc_pkt,
                      filt_frame, got_frame);
       av_frame_free(&filt_frame);
       if (ret < 0)
           return ret;
       if (!(*got_frame))
           return 0;

       /* prepare packet for muxing */
       enc_pkt.stream_index = stream_index;
       av_packet_rescale_ts(&enc_pkt,
                            _ofmt_ctx->streams[stream_index]->codec->time_base,
                            _ofmt_ctx->streams[stream_index]->time_base);

       av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
       /* mux encoded frame */
       ret = av_interleaved_write_frame(_ofmt_ctx, &enc_pkt);
       return ret;
    }
    int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
    {
       int ret;
       AVFrame *filt_frame;

       av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
       /* push the decoded frame into the filtergraph */
       ret = av_buffersrc_add_frame_flags(_filter_ctx[stream_index].buffersrc_ctx,
                                          frame, 0);
       if (ret < 0) {
           av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
           return ret;
       }

       /* pull filtered frames from the filtergraph */
       while (1) {
           filt_frame = av_frame_alloc();
           if (!filt_frame) {
               ret = AVERROR(ENOMEM);
               break;
           }
           av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
           ret = av_buffersink_get_frame(_filter_ctx[stream_index].buffersink_ctx,
                                         filt_frame);
           if (ret < 0) {
               /* if no more frames for output - returns AVERROR(EAGAIN)
                * if flushed and no more frames for output - returns AVERROR_EOF
                * rewrite retcode to 0 to show it as normal procedure completion
                */
               if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
                   ret = 0;
               av_frame_free(&filt_frame);
               break;
           }

           filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
           ret = encode_write_frame(filt_frame, stream_index, NULL);
           if (ret < 0)
               break;
       }

       return ret;
    }
  • Live555 : X264 Stream Live source based on "testOnDemandRTSPServer"

    26 octobre 2017, par user2660369

    I am trying to create a rtsp Server that streams the OpenGL output of my program. I had a look at How to write a Live555 FramedSource to allow me to stream H.264 live, but I need the stream to be unicast. So I had a look at testOnDemandRTSPServer. Using the same Code fails. To my understanding I need to provide memory in which I store my h264 frames so the OnDemandServer can read them on Demand.

    H264VideoStreamServerMediaSubsession.cpp

    H264VideoStreamServerMediaSubsession*
    H264VideoStreamServerMediaSubsession::createNew(UsageEnvironment& env,
                             Boolean reuseFirstSource) {
     return new H264VideoStreamServerMediaSubsession(env, reuseFirstSource);
    }

    H264VideoStreamServerMediaSubsession::H264VideoStreamServerMediaSubsession(UsageEnvironment& env, Boolean reuseFirstSource)
     : OnDemandServerMediaSubsession(env, reuseFirstSource), fAuxSDPLine(NULL), fDoneFlag(0), fDummyRTPSink(NULL) {
    }

    H264VideoStreamServerMediaSubsession::~H264VideoStreamServerMediaSubsession() {
     delete[] fAuxSDPLine;
    }

    static void afterPlayingDummy(void* clientData) {
     H264VideoStreamServerMediaSubsession* subsess = (H264VideoStreamServerMediaSubsession*)clientData;
     subsess->afterPlayingDummy1();
    }

    void H264VideoStreamServerMediaSubsession::afterPlayingDummy1() {
     // Unschedule any pending 'checking' task:
     envir().taskScheduler().unscheduleDelayedTask(nextTask());
     // Signal the event loop that we're done:
     setDoneFlag();
    }

    static void checkForAuxSDPLine(void* clientData) {
     H264VideoStreamServerMediaSubsession* subsess = (H264VideoStreamServerMediaSubsession*)clientData;
     subsess->checkForAuxSDPLine1();
    }

    void H264VideoStreamServerMediaSubsession::checkForAuxSDPLine1() {
     char const* dasl;

     if (fAuxSDPLine != NULL) {
       // Signal the event loop that we're done:
       setDoneFlag();
     } else if (fDummyRTPSink != NULL && (dasl = fDummyRTPSink->auxSDPLine()) != NULL) {
       fAuxSDPLine = strDup(dasl);
       fDummyRTPSink = NULL;

       // Signal the event loop that we're done:
       setDoneFlag();
     } else {
       // try again after a brief delay:
       int uSecsToDelay = 100000; // 100 ms
       nextTask() = envir().taskScheduler().scheduleDelayedTask(uSecsToDelay,
                     (TaskFunc*)checkForAuxSDPLine, this);
     }
    }

    char const* H264VideoStreamServerMediaSubsession::getAuxSDPLine(RTPSink* rtpSink, FramedSource* inputSource) {
     if (fAuxSDPLine != NULL) return fAuxSDPLine; // it's already been set up (for a previous client)

     if (fDummyRTPSink == NULL) { // we're not already setting it up for another, concurrent stream
       // Note: For H264 video files, the 'config' information ("profile-level-id" and "sprop-parameter-sets") isn't known
       // until we start reading the file.  This means that "rtpSink"s "auxSDPLine()" will be NULL initially,
       // and we need to start reading data from our file until this changes.
       fDummyRTPSink = rtpSink;

       // Start reading the file:
       fDummyRTPSink->startPlaying(*inputSource, afterPlayingDummy, this);

       // Check whether the sink's 'auxSDPLine()' is ready:
       checkForAuxSDPLine(this);
     }

     envir().taskScheduler().doEventLoop(&fDoneFlag);

     return fAuxSDPLine;
    }

    FramedSource* H264VideoStreamServerMediaSubsession::createNewStreamSource(unsigned /*clientSessionId*/, unsigned& estBitrate) {
     estBitrate = 500; // kb
     megamol::remotecontrol::View3D_MRC *parent = (megamol::remotecontrol::View3D_MRC*)this->parent;
     return H264VideoStreamFramer::createNew(envir(), parent->h264FramedSource);
    }

    RTPSink* H264VideoStreamServerMediaSubsession::createNewRTPSink(Groupsock* rtpGroupsock, unsigned char rtpPayloadTypeIfDynamic, FramedSource* /*inputSource*/) {
     return H264VideoRTPSink::createNew(envir(), rtpGroupsock, rtpPayloadTypeIfDynamic);
    }

    FramedSource.cpp

    H264FramedSource* H264FramedSource::createNew(UsageEnvironment& env,
                                             unsigned preferredFrameSize,
                                             unsigned playTimePerFrame)
    {
       return new H264FramedSource(env, preferredFrameSize, playTimePerFrame);
    }

    H264FramedSource::H264FramedSource(UsageEnvironment& env,
                                  unsigned preferredFrameSize,
                                  unsigned playTimePerFrame)
       : FramedSource(env),
       fPreferredFrameSize(fMaxSize),
       fPlayTimePerFrame(playTimePerFrame),
       fLastPlayTime(0),
       fCurIndex(0)
    {

       x264_param_default_preset(&param, "veryfast", "zerolatency");
       param.i_threads = 1;
       param.i_width = 1024;
       param.i_height = 768;
       param.i_fps_num = 30;
       param.i_fps_den = 1;
       // Intra refres:
       param.i_keyint_max = 60;
       param.b_intra_refresh = 1;
       //Rate control:
       param.rc.i_rc_method = X264_RC_CRF;
       param.rc.f_rf_constant = 25;
       param.rc.f_rf_constant_max = 35;
       param.i_sps_id = 7;
       //For streaming:
       param.b_repeat_headers = 1;
       param.b_annexb = 1;
       x264_param_apply_profile(&param, "baseline");

       param.i_log_level = X264_LOG_ERROR;

       encoder = x264_encoder_open(&param);
       pic_in.i_type            = X264_TYPE_AUTO;
       pic_in.i_qpplus1         = 0;
       pic_in.img.i_csp         = X264_CSP_I420;
       pic_in.img.i_plane       = 3;


       x264_picture_alloc(&pic_in, X264_CSP_I420, 1024, 768);

       convertCtx = sws_getContext(1024, 768, PIX_FMT_RGBA, 1024, 768, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);
       eventTriggerId = envir().taskScheduler().createEventTrigger(deliverFrame0);
    }

    H264FramedSource::~H264FramedSource()
    {
       envir().taskScheduler().deleteEventTrigger(eventTriggerId);
       eventTriggerId = 0;
    }

    void H264FramedSource::AddToBuffer(uint8_t* buf, int surfaceSizeInBytes)
    {
       uint8_t* surfaceData = (new uint8_t[surfaceSizeInBytes]);

       memcpy(surfaceData, buf, surfaceSizeInBytes);

       int srcstride = 1024*4;
       sws_scale(convertCtx, &surfaceData, &srcstride,0, 768, pic_in.img.plane, pic_in.img.i_stride);
       x264_nal_t* nals = NULL;
       int i_nals = 0;
       int frame_size = -1;


       frame_size = x264_encoder_encode(encoder, &nals, &i_nals, &pic_in, &pic_out);

       static bool finished = false;

       if (frame_size >= 0)
       {
       static bool alreadydone = false;
       if(!alreadydone)
       {

           x264_encoder_headers(encoder, &nals, &i_nals);
           alreadydone = true;
       }
       for(int i = 0; i < i_nals; ++i)
       {
           m_queue.push(nals[i]);
       }
       }
       delete [] surfaceData;
       surfaceData = nullptr;

       envir().taskScheduler().triggerEvent(eventTriggerId, this);
    }

    void H264FramedSource::doGetNextFrame()
    {
       deliverFrame();
    }

    void H264FramedSource::deliverFrame0(void* clientData)
    {
       ((H264FramedSource*)clientData)->deliverFrame();
    }

    void H264FramedSource::deliverFrame()
    {
       x264_nal_t nalToDeliver;

       if (fPlayTimePerFrame > 0 && fPreferredFrameSize > 0) {
       if (fPresentationTime.tv_sec == 0 && fPresentationTime.tv_usec == 0) {
           // This is the first frame, so use the current time:
           gettimeofday(&fPresentationTime, NULL);
       } else {
           // Increment by the play time of the previous data:
           unsigned uSeconds   = fPresentationTime.tv_usec + fLastPlayTime;
           fPresentationTime.tv_sec += uSeconds/1000000;
           fPresentationTime.tv_usec = uSeconds%1000000;
       }

       // Remember the play time of this data:
       fLastPlayTime = (fPlayTimePerFrame*fFrameSize)/fPreferredFrameSize;
       fDurationInMicroseconds = fLastPlayTime;
       } else {
       // We don't know a specific play time duration for this data,
       // so just record the current time as being the 'presentation time':
       gettimeofday(&fPresentationTime, NULL);
       }

       if(!m_queue.empty())
       {
       m_queue.wait_and_pop(nalToDeliver);

       uint8_t* newFrameDataStart = (uint8_t*)0xD15EA5E;

       newFrameDataStart = (uint8_t*)(nalToDeliver.p_payload);
       unsigned newFrameSize = nalToDeliver.i_payload;

       // Deliver the data here:
       if (newFrameSize > fMaxSize) {
           fFrameSize = fMaxSize;
           fNumTruncatedBytes = newFrameSize - fMaxSize;
       }
       else {
           fFrameSize = newFrameSize;
       }

       memcpy(fTo, nalToDeliver.p_payload, nalToDeliver.i_payload);

       FramedSource::afterGetting(this);
       }
    }

    Relevant part of the RTSP-Server Therad

     RTSPServer* rtspServer = RTSPServer::createNew(*(parent->env), 8554, NULL);
     if (rtspServer == NULL) {
       *(parent->env) << "Failed to create RTSP server: " << (parent->env)->getResultMsg() << "\n";
       exit(1);
     }
     char const* streamName = "Stream";
     parent->h264FramedSource = H264FramedSource::createNew(*(parent->env), 0, 0);
     H264VideoStreamServerMediaSubsession *h264VideoStreamServerMediaSubsession = H264VideoStreamServerMediaSubsession::createNew(*(parent->env), true);
     h264VideoStreamServerMediaSubsession->parent = parent;
     sms->addSubsession(h264VideoStreamServerMediaSubsession);
     rtspServer->addServerMediaSession(sms);

     parent->env->taskScheduler().doEventLoop(); // does not return

    Once a connection exists the render loop calls

    h264FramedSource->AddToBuffer(videoData, 1024*768*4);