Recherche avancée

Médias (1)

Mot : - Tags -/stallman

Autres articles (97)

  • MediaSPIP 0.1 Beta version

    25 avril 2011, par

    MediaSPIP 0.1 beta is the first version of MediaSPIP proclaimed as "usable".
    The zip file provided here only contains the sources of MediaSPIP in its standalone version.
    To get a working installation, you must manually install all-software dependencies on the server.
    If you want to use this archive for an installation in "farm mode", you will also need to proceed to other manual (...)

  • Multilang : améliorer l’interface pour les blocs multilingues

    18 février 2011, par

    Multilang est un plugin supplémentaire qui n’est pas activé par défaut lors de l’initialisation de MediaSPIP.
    Après son activation, une préconfiguration est mise en place automatiquement par MediaSPIP init permettant à la nouvelle fonctionnalité d’être automatiquement opérationnelle. Il n’est donc pas obligatoire de passer par une étape de configuration pour cela.

  • ANNEXE : Les plugins utilisés spécifiquement pour la ferme

    5 mars 2010, par

    Le site central/maître de la ferme a besoin d’utiliser plusieurs plugins supplémentaires vis à vis des canaux pour son bon fonctionnement. le plugin Gestion de la mutualisation ; le plugin inscription3 pour gérer les inscriptions et les demandes de création d’instance de mutualisation dès l’inscription des utilisateurs ; le plugin verifier qui fournit une API de vérification des champs (utilisé par inscription3) ; le plugin champs extras v2 nécessité par inscription3 (...)

Sur d’autres sites (11454)

  • Muxing H264 packets into a MPEGTS container using libav*

    30 avril 2024, par Lucen

    I'm writing a C++ program where I need to encode packets in h264 format and mux them to a MPEG TS container. For the encoding part, I based my code on the encode_video example (https://ffmpeg.org/doxygen/trunk/encode_video_8c-example.html#a9) provided in FFMPEG documentation, and it seems to work fine. In particular, I generate a std::vector of packets which I sequentially write to an output .ts file for debug. Such .ts file plays fine with SMPlayer, and a ffproba command gives

    


    >> ffprobe  -print_format json -show_format -show_streams out.ts
Input #0, h264, from 'out.ts':
  Duration: N/A, bitrate: N/A
    Stream #0:0: Video: h264 (Main), yuv420p(progressive), 640x480 [SAR 1:1 DAR 4:3], 25 fps, 25 tbr, 1200k tbn, 50 tbc
    "streams": [
        {
            "index": 0,
            "codec_name": "h264",
            "codec_long_name": "H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10",
            "profile": "Main",
            "codec_type": "video",
            "codec_time_base": "1/50",
            "codec_tag_string": "[0][0][0][0]",
            "codec_tag": "0x0000",
            "width": 640,
            "height": 480,
            "coded_width": 640,
            "coded_height": 480,
            "has_b_frames": 1,
            "sample_aspect_ratio": "1:1",
            "display_aspect_ratio": "4:3",
            "pix_fmt": "yuv420p",
            "level": 30,
            "chroma_location": "left",
            "field_order": "progressive",
            "refs": 1,
            "is_avc": "false",
            "nal_length_size": "0",
            "r_frame_rate": "25/1",
            "avg_frame_rate": "25/1",
            "time_base": "1/1200000",
            "bits_per_raw_sample": "8",
            "disposition": {
                "default": 0,
                "dub": 0,
                "original": 0,
                "comment": 0,
                "lyrics": 0,
                "karaoke": 0,
                "forced": 0,
                "hearing_impaired": 0,
                "visual_impaired": 0,
                "clean_effects": 0,
                "attached_pic": 0,
                "timed_thumbnails": 0
            }
        }
    ],
    "format": {
        "filename": "out.ts",
        "nb_streams": 1,
        "nb_programs": 0,
        "format_name": "h264",
        "format_long_name": "raw H.264 video",
        "size": "435443",
        "probe_score": 51
    }
}



    


    The dts and pts timestamps are also set.
However, if I try to mux them in MPEG TS format, using as a base the example mux.c (https://ffmpeg.org/doxygen/trunk/mux_8c-example.html), it doesn't work. A shortened version of my muxing code is as follows : (the variables ending with "_" are class fields)

    


    int MyProcessing::Mux(const std::string outputFilename) {
    AVFormatContext *muxingContest;
    avformat_alloc_output_context2(&muxingContest, NULL, NULL, m_output.c_str());

    auto outFormat = muxingContest->oformat;
    outFormat->video_codec = AV_CODEC_ID_H264;

    AVStream *outStream;
    const AVCodec *codec;

    Mux_AddStream(&outStream, muxingContest, &codec, outFormat->video_codec);

    AVDictionary *opt = nullptr;
    Mux_OpenVideo(muxingContest, codec, outStream, opt);
 
    if (!(muxingContest->flags & AVFMT_NOFILE)) {
      avio_open(&muxingContest->pb, m_output.c_str(), AVIO_FLAG_WRITE);
    }
    avformat_write_header(muxingContest, &opt);

    auto muxOk = true;
    size_t countMuxedFrames = 0;
    while ((muxOk) && (countMuxedFrames < packets_.size())) {
        muxOk = !MuxPacket(muxingContest, outStream, packets_[countMuxedFrames], &opt);
        countMuxedFrames++;
    }

    av_write_trailer(muxingContest);
    if (!(muxCodecContextPtr_->flags & AVFMT_NOFILE)) avio_closep(&muxingContest->pb);
 
    return 0;
}


int MyProcessing::Mux_AddStream(AVStream **stream, AVFormatContext *format, const AVCodec **codec, enum AVCodecID codecId) {
    *codec = avcodec_find_encoder(codecId);
    muxPacketTmpPtr_ = av_packet_alloc();
    *stream = avformat_new_stream(format, *codec);
    (*stream)->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
    (*stream)->id = format->nb_streams-1;
    (*stream)->index = 0;
    muxCodecContextPtr_ = avcodec_alloc_context3(*codec);
    Mux_FillCodecContext(*muxCodecContextPtr_, codecId, **stream);
    if (format->oformat->flags & AVFMT_GLOBALHEADER)
        muxCodecContextPtr_->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    return 0;
}


void MyProcessing::Mux_FillCodecContext(AVCodecContext &cc, enum AVCodecID codecId, AVStream &stream) {
    cc.codec_id = codecId;
    cc.bit_rate = 400000;
    cc.width    = outputWidth_;
    cc.height   = outputHeight_;
    cc.time_base  = stream.time_base;
    cc.gop_size = 10;
    cc.max_b_frames = 1;
    cc.gop_size      = 12;
    cc.pix_fmt       = AV_PIX_FMT_YUV420P;
    if (cc.codec_id == AV_CODEC_ID_MPEG2VIDEO) cc.max_b_frames = 2;
    if (cc.codec_id == AV_CODEC_ID_MPEG1VIDEO)  cc.mb_decision = 2;
    av_opt_set(&cc, "preset", "slow", 0);
    av_opt_set(&cc, "tune", "zerolatency", 0);
}


int MyProcessing::Mux_OpenVideo(AVFormatContext *format, const AVCodec *codec, AVStream *stream, AVDictionary *opt_arg) {
    AVDictionary *opt = nullptr;
    av_dict_copy(&opt, opt_arg, 0);
    muxCodecContextPtr_->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    avcodec_open2(muxCodecContextPtr_, codec, &opt);
    av_dict_free(&opt);
    avcodec_parameters_from_context(stream->codecpar, muxCodecContextPtr_);
    return 0;
}

int MyProcessing::MuxPacket(AVFormatContext *format, AVStream *stream, AVPacket &pkt, AVDictionary **opt) {
    AVBitStreamFilterContext *bsf = av_bitstream_filter_init("h264_mp4toannexb");
    AVPacket filteredPkt = pkt;
    auto filterResult = av_bitstream_filter_filter(bsf, format->streams[stream->index]->codec, NULL,
                                         &filteredPkt.data, &filteredPkt.size,
                                         pkt.data, pkt.size,
                                         pkt.flags & AV_PKT_FLAG_KEY);

    if (filterResult < 0) return filterResult;
    else {
        filteredPkt.buf = av_buffer_create(filteredPkt.data, filteredPkt.size,
                                       av_buffer_default_free, NULL, 0);
    }
    av_bitstream_filter_close(bsf);
    filteredPkt.stream_index = stream->index;
    filteredPkt.dts = filteredPkt.pts;
    filteredPkt.duration = ((double)stream->time_base.num / (double)stream->time_base.den) / STREAM_FRAME_RATE;
    av_packet_rescale_ts(&filteredPkt, muxCodecContextPtr_->time_base, stream->time_base); // rescale output packet timestamp values from codec to stream timebase
    auto writePktResult = av_write_frame(format, &filteredPkt);
   // auto writePktResult = av_interleaved_write_frame(format, &filteredPkt);
    return 0;
}



    


    The console error is

    


    [mpegts @ 0x55555736edc0] H.264 bitstream malformed, no startcode found, use the video bitstream filter 'h264_mp4toannexb' to fix it ('-bsf:v h264_mp4toannexb' option with ffmpeg)


    


    It Is telling me to apply the h264_mp4toannexb filter. As you see from the code, I've put the filtering accordingly, but the error message persists (unless I'm applying the filter in a wrong way).

    


    In the last lines of method MuxPacket(), if I uncomment the line with av_interleaved_write_frame() and comment the previous one, I get the same error, as well as a seg fault. Inspecting with GDB, the call stack for the seg fault is as follows :

    


    #0  __memmove_avx_unaligned_erms () at ../sysdeps/x86_64/multiarch/memmove-vec-unaligned-erms.S:440
#1  0x00007ffff67c7cb6 in av_packet_copy_props () at /lib/x86_64-linux-gnu/libavcodec.so.58
#2  0x00007ffff67c8447 in av_packet_ref () at /lib/x86_64-linux-gnu/libavcodec.so.58
#3  0x00007ffff7e2fa13 in  () at /lib/x86_64-linux-gnu/libavformat.so.58
#4  0x00007ffff7e2fb11 in  () at /lib/x86_64-linux-gnu/libavformat.so.58
#5  0x00007ffff7e30575 in av_interleaved_write_frame () at /lib/x86_64-linux-gnu/libavformat.so.58


    


    I tried to look at solutions online, but they are either old or they don't work. Some of the things I tried and didn't work :

    


      

    1. Putting the line
    2. 


    


    muxCodecContextPtr_->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;


    


    in Mux() after the call to avformat_alloc_output_context2.

    


      

    1. Setting
    2. 


    


    packet.flags |= AV_PKT_FLAG_KEY;


    


    before the call to av_write_frame / av_interleaved_write_frame.

    


      

    1. Trying to write by hand to the file the starting code as described here Need to convert h264 stream from annex-b format to AVCC format.

      


    2. 


    3. Playing with parameters in Mux_FillCodecContext().

      


    4. 


    


  • Crash on ffmpeg avcodec_encode_video in a Console app

    5 mars 2013, par Robel sharma

    I want make an encoder which encode a raw image into h263 format.But after loading and initializing ffmpeg library I got crash on avcodec_encode_video for a demo image.

    int _tmain(int argc, _TCHAR* argv[]) {
       avcodec_register_all();
       AVCodec *codec;
       AVCodecContext *c= NULL;
       int i, ret, x, y, got_output;
       FILE *f;
       AVFrame *frame;
       AVPacket pkt;

       int out_size, size, outbuf_size;

       AVFrame *picture;
       uint8_t *outbuf, *picture_buf;

       AVRational rp;  

       rp.den = 1;
       rp.num = 25;
       uint8_t endcode[] = { 0, 0, 1, 0xb7 };

       codec = avcodec_find_encoder(CODEC_ID_H263);

       c = avcodec_alloc_context3(codec);
       picture= avcodec_alloc_frame();
       c->bit_rate = 400000;
       /* resolution must be a multiple of two */
       c->width = 352;
       c->height = 288;
       /* frames per second */
       //c->time_base= (AVRational){1,25};
       c->time_base = rp;
       c->gop_size = 10; /* emit one intra frame every ten frames */
       c->max_b_frames=1;
       c->pix_fmt = PIX_FMT_YUV420P;
       avcodec_open(c, codec);


       outbuf_size = 100000;
       outbuf = (uint8_t*)malloc(outbuf_size);
       size = c->width * c->height;
       picture_buf = (uint8_t*)malloc((size * 3) / 2); /* size for YUV 420 */

       picture->data[0] = picture_buf;
       picture->data[1] = picture->data[0] + size;
       picture->data[2] = picture->data[1] + size / 4;
       picture->linesize[0] = c->width;
       picture->linesize[1] = c->width / 2;
       picture->linesize[2] = c->width / 2;

       /* encode 1 second of video */
       for(i=0;i<25;i++) {
           fflush(stdout);
           /* prepare a dummy image */
           /* Y */
           for(y=0;yheight;y++) {
               for(x=0;xwidth;x++) {
                   picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3;
               }
           }
           /* Cb and Cr */
           for(y=0;yheight/2;y++) {
               for(x=0;xwidth/2;x++) {
                   picture->data[1][y * picture->linesize[1] + x] = 128 + y + i * 2;
                   picture->data[2][y * picture->linesize[2] + x] = 64 + x + i * 5;
               }
           }
           /* encode the image */

           **Crash is here** --->                 ///////////////////////////////////////////////////
           out_size = avcodec_encode_video(c, outbuf, outbuf_size, picture);

           printf("encoding frame %3d (size=%5d)\n", i, out_size);
           fwrite(outbuf, 1, out_size, f);
       }
       /* get the delayed frames */
       for(; out_size; i++) {
           fflush(stdout);
           out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
           printf("write frame %3d (size=%5d)\n", i, out_size);
           fwrite(outbuf, 1, out_size, f);
       }
       /* add sequence end code to have a real mpeg file */
       outbuf[0] = 0x00;
       outbuf[1] = 0x00;
       outbuf[2] = 0x01;
       outbuf[3] = 0xb7;
       fwrite(outbuf, 1, 4, f);
       fclose(f);
       free(picture_buf);
       free(outbuf);

       avcodec_close(c);
       av_free(c);
       av_free(picture);
       printf("\n");
       return 0;
    }
  • closed (H264 track 1 is not valid : sprop-parameter-sets is missing (96 packetization-mode=1)

    8 janvier 2024, par MMingY

    I used FFmpeg6.1 to stream RTSP, but I received the following errors on the server :

    


    


    closed (H264 track 1 is not valid : sprop-parameter-sets is missing (96 packetization-mode=1),client:Error occurred when opening output Server returned 400 Bad Request.

    


    


    #include &#xA;#include &#xA;#include &#xA;&#xA;#include <libavcodec></libavcodec>avcodec.h>&#xA;#include <libavformat></libavformat>avformat.h>&#xA;#include <libavutil></libavutil>opt.h>&#xA;#include <libavutil></libavutil>imgutils.h>&#xA;#include <libavutil></libavutil>time.h>&#xA;&#xA;static void encode(AVCodecContext *enc_ctx, AVFrame *frame, AVPacket *pkt,&#xA;                   AVFormatContext *outFormatCtx) {&#xA;    int ret;&#xA;&#xA;    /* send the frame to the encoder */&#xA;    if (frame)&#xA;        printf("Send frame %3"PRId64"\n", frame->pts);&#xA;&#xA;    ret = avcodec_send_frame(enc_ctx, frame);&#xA;    if (ret &lt; 0) {&#xA;        char errbuf[AV_ERROR_MAX_STRING_SIZE];&#xA;        av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);&#xA;        fprintf(stderr, "Error sending a frame for encoding ,%s\n", errbuf);&#xA;        exit(1);&#xA;    }&#xA;&#xA;    while (ret >= 0) {&#xA;        ret = avcodec_receive_packet(enc_ctx, pkt);&#xA;        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)&#xA;            return;&#xA;        else if (ret &lt; 0) {&#xA;            fprintf(stderr, "Error during encoding\n");&#xA;            exit(1);&#xA;        }&#xA;&#xA;        printf("Write packet %3"PRId64" (size=%5d)\n", pkt->pts, pkt->size);&#xA;        av_write_frame(outFormatCtx, pkt); // Write the packet to the RTMP stream&#xA;        av_packet_unref(pkt);&#xA;    }&#xA;}&#xA;&#xA;int main(int argc, char **argv) {&#xA;    av_log_set_level(AV_LOG_DEBUG);&#xA;    const char *rtmp_url, *codec_name;&#xA;    const AVCodec *codec;&#xA;    AVCodecContext *codecContext = NULL;&#xA;    int i, ret, x, y;&#xA;    AVFormatContext *outFormatCtx;&#xA;    AVStream *st;&#xA;    AVFrame *frame;&#xA;    AVPacket *pkt;&#xA;    uint8_t endcode[] = {0, 0, 1, 0xb7};&#xA;&#xA;    if (argc &lt;= 3) {&#xA;        fprintf(stderr, "Usage: %s <rtmp url="url"> <codec>\n", argv[0]);&#xA;        exit(0);&#xA;    }&#xA;    rtmp_url = argv[1];&#xA;    codec_name = argv[2];&#xA;    avformat_network_init();&#xA;    /* find the mpeg1video encoder */&#xA;//    codec = avcodec_find_encoder_by_name(codec_name);&#xA;//    codec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);&#xA;//    codec = avcodec_find_encoder(AV_CODEC_ID_VP9);&#xA;//    codec = avcodec_find_encoder(AV_CODEC_ID_MPEG2VIDEO);&#xA;    codec = avcodec_find_encoder(AV_CODEC_ID_H264);&#xA;//    codec = avcodec_find_encoder(AV_CODEC_ID_AV1);&#xA;//    codec = avcodec_find_encoder(AV_CODEC_ID_H265);&#xA;    if (!codec) {&#xA;        fprintf(stderr, "Codec &#x27;%s&#x27; not found\n", codec_name);&#xA;        exit(1);&#xA;    }&#xA;    codecContext = avcodec_alloc_context3(codec);&#xA;    if (!codecContext) {&#xA;        fprintf(stderr, "Could not allocate video codec context\n");&#xA;        exit(1);&#xA;    }&#xA;&#xA;    /* ... (rest of the setup code) ... */&#xA;/* put sample parameters */&#xA;    codecContext->bit_rate = 400000;&#xA;    /* resolution must be a multiple of two */&#xA;    codecContext->width = 352;&#xA;    codecContext->height = 288;&#xA;    /* frames per second */&#xA;    codecContext->time_base = (AVRational) {1, 25};&#xA;    codecContext->framerate = (AVRational) {25, 1};&#xA;&#xA;    /* emit one intra frame every ten frames&#xA;     * check frame pict_type before passing frame&#xA;     * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I&#xA;     * then gop_size is ignored and the output of encoder&#xA;     * will always be I frame irrespective to gop_size&#xA;     */&#xA;    codecContext->gop_size = 10;&#xA;    codecContext->max_b_frames = 1;&#xA;    codecContext->pix_fmt = AV_PIX_FMT_YUV420P;&#xA;&#xA;&#xA;&#xA;    /* Open the RTSP output */&#xA;//    const AVOutputFormat *ofmt = av_guess_format("tcp", NULL, NULL);&#xA;    const AVOutputFormat *ofmt = av_guess_format("rtsp", rtmp_url, NULL);&#xA;//    const AVOutputFormat *ofmt = av_guess_format("flv", NULL, NULL);&#xA;//    const AVOutputFormat *ofmt = av_guess_format("rtmp", NULL, NULL);&#xA;//    const AVOutputFormat *ofmt = av_guess_format("mpegts", NULL, NULL);&#xA;//    const AVOutputFormat *ofmt = av_guess_format("mp4", NULL, NULL);&#xA;    if (!ofmt) {&#xA;        fprintf(stderr, "Could not find output format\n");&#xA;        exit(1);&#xA;    }&#xA;&#xA;    /* Allocate the output context */&#xA;&#xA;/*    outFormatCtx = avformat_alloc_context();&#xA;    if (!outFormatCtx) {&#xA;        fprintf(stderr, "Could not allocate output context\n");&#xA;        exit(1);&#xA;    }*/&#xA;&#xA;    // 打开输出  这个会导致outFormatCtx 中的stream 为空,并且产生这个问题[rtsp @ 00000204f6218b80] No streams to mux were specified&#xA;    if (avformat_alloc_output_context2(&amp;outFormatCtx, ofmt, "rtsp", rtmp_url) != 0) {&#xA;        fprintf(stderr, "Could not allocate output context\n");&#xA;        return 1;&#xA;    }&#xA;&#xA;&#xA;    outFormatCtx->oformat = ofmt;&#xA;    outFormatCtx->url = av_strdup(rtmp_url);&#xA;&#xA;    /* Add a video stream */&#xA;    st = avformat_new_stream(outFormatCtx, codec);&#xA;    if (!st) {&#xA;        fprintf(stderr, "Could not allocate stream\n");&#xA;        exit(1);&#xA;    }&#xA;    st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;&#xA;    st->codecpar->codec_id = codec->id;&#xA;    st->codecpar->width = 352;&#xA;    st->codecpar->height = 288;&#xA;    st->codecpar->format = AV_PIX_FMT_YUV420P;&#xA;//    st->codecpar = c;&#xA;//    st->codecpar->format = AV_PIX_FMT_YUV420P;&#xA;    // Set video stream parameters&#xA;//    st->codecpar->framerate = (AVRational) {25, 1};&#xA;//    st->id=outFormatCtx->nb_streams-1;&#xA;    /* Set the output URL */&#xA;    av_dict_set(&amp;outFormatCtx->metadata, "url", rtmp_url, 0);&#xA;&#xA;&#xA;    pkt = av_packet_alloc();&#xA;    if (!pkt)&#xA;        exit(1);&#xA;&#xA;&#xA;    if (codec->id == AV_CODEC_ID_H264)&#xA;        av_opt_set(codecContext->priv_data, "preset", "slow", 0);&#xA;&#xA;    AVDictionary *opt = NULL;&#xA;/*    av_dict_set(&amp;opt, "rtsp_transport", "udp", 0);&#xA;    av_dict_set(&amp;opt, "announce_port", "1935", 0);&#xA;    av_dict_set(&amp;opt, "enable-protocol", "rtsp", 0);&#xA;    av_dict_set(&amp;opt, "protocol_whitelist", "file,udp,tcp,rtp,rtsp", 0);&#xA;    av_dict_set(&amp;opt, "enable-protocol", "rtp", 0);&#xA;    av_dict_set(&amp;opt, "enable-protocol", "rtsp", 0);&#xA;    av_dict_set(&amp;opt, "enable-protocol", "udp", 0);&#xA;    av_dict_set(&amp;opt, "enable-muxer", "rtsp", 0);&#xA;    av_dict_set(&amp;opt, "enable-muxer", "rtp", 0);*/&#xA;    av_dict_set(&amp;opt, "rtsp_transport", "tcp", 0);&#xA;    av_dict_set(&amp;opt, "stimeout", "2000000", 0);&#xA;    av_dict_set(&amp;opt, "max_delay", "500000", 0);&#xA;    av_dict_set(&amp;opt, "sprop-parameter-sets", "asdgasdfs", AV_DICT_APPEND);&#xA;    /* open it */&#xA;    ret = avcodec_open2(codecContext, codec, &amp;opt);&#xA;    if (ret &lt; 0) {&#xA;        fprintf(stderr, "Could not open codec: %s\n", av_err2str(ret));&#xA;        exit(1);&#xA;    }&#xA;/*    // 打开RTSP输出URL  微软AI给出的代码&#xA;    if (!(outFormatCtx->oformat->flags &amp; AVFMT_NOFILE)) {&#xA;        int ret = avio_open(&amp;outFormatCtx->pb, rtmp_url, AVIO_FLAG_WRITE);&#xA;        if (ret &lt; 0) {&#xA;//            std::cerr &lt;&lt; "Could not open output URL " &lt;&lt; out_url &lt;&lt; std::endl;&#xA;            fprintf(stderr, "Could not open output URL  %s\n", av_err2str(ret));&#xA;            return -1;&#xA;        }&#xA;    }*/&#xA;&#xA;    avcodec_parameters_to_context(codecContext, st->codecpar);&#xA;&#xA;/*    AVDictionary *options = NULL;&#xA;    av_dict_set(&amp;options, "rtsp_transport", "tcp", 0);&#xA;    av_dict_set(&amp;options, "stimeout", "2000000", 0);&#xA;    av_dict_set(&amp;options, "max_delay", "500000", 0);&#xA;    // 初始化输出&#xA;    av_dict_set(&amp;options, "rtsp_transport", "tcp", 0);&#xA;//设置 接收包间隔最大延迟,微秒&#xA;    av_dict_set(&amp;options, "max_delay", "200000", 0);&#xA;// rtmp、rtsp延迟控制到最小&#xA;    av_dict_set(&amp;options, "fflags", "nobuffer", 0);&#xA;// 在进行网络操作时允许的最大等待时间。5秒&#xA;    av_dict_set(&amp;options, "timeout", "5000000", 0);&#xA;//设置 阻塞超时,否则可能在流断开时连接发生阻塞,微秒&#xA;    av_dict_set(&amp;options, "stimeout", "3000000", 0);&#xA;//设置 find_stream_info 最大时长,微秒&#xA;    av_dict_set(&amp;options, "analyzeduration", "1000000", 0);*/&#xA;    av_dict_set(&amp;opt, "preset", "medium", 0);&#xA;    av_dict_set(&amp;opt, "tune", "zerolatency", 0);&#xA;    av_dict_set(&amp;opt, "profile", "baseline", 0);&#xA;    av_dump_format(outFormatCtx, 0, rtmp_url, 1);&#xA;&#xA;    if (avformat_init_output(outFormatCtx, &amp;opt) != 0) {&#xA;        fprintf(stderr, "Error initializing output\n");&#xA;        return 1;&#xA;    }&#xA;    if (!(ofmt->flags &amp; AVFMT_NOFILE)) {&#xA;        ret = avio_open(&amp;outFormatCtx->pb, rtmp_url, AVIO_FLAG_WRITE);&#xA;        if (ret &lt; 0) {&#xA;            fprintf(stderr, "Could not open output file &#x27;%s&#x27;", rtmp_url);&#xA;            exit(1);&#xA;        }&#xA;    }&#xA;    /*   这种方式修改没有效果,无法添加修改SDP&#xA;     * av_dict_set(&amp;st->metadata, "title", "Cool video", 0);&#xA;       av_dict_set(&amp;st->metadata, "Content-Base", " rtsp://10.45.12.141/h264/ch1/main/av_stream/", 0);&#xA;       av_dict_set(&amp;st->metadata, "sprop-parameter-sets", "sdsfwedeo", 0);*/&#xA;    AVCodecParameters *codecParams = st->codecpar;&#xA;    const char *spropParameterSets = "Z0IACpZTBYmI,aMlWsA==";  // 替换为实际的sprop-parameter-sets值&#xA;    av_dict_set(&amp;st->metadata, "sprop-parameter-sets", spropParameterSets, 0);&#xA;    avcodec_parameters_to_context(codecContext, st->codecpar);&#xA;    AVFormatContext *avFormatContext[1];&#xA;    avFormatContext[0] = outFormatCtx;&#xA;    char spd[2048];&#xA;    av_sdp_create(avFormatContext, 1, spd, sizeof(spd));&#xA;    printf("%s\n", spd);&#xA;/*    ret = avio_open(&amp;outFormatCtx->pb, rtmp_url, AVIO_FLAG_WRITE);&#xA;    if (ret &lt; 0) {&#xA;        fprintf(stderr, "Could not open output ,%s\n", av_err2str(ret));&#xA;        exit(1);&#xA;    }*/&#xA;&#xA;/*// 设置 H264 参数&#xA;    AVDictionary *params = NULL;&#xA;    av_dict_set(&amp;params, "profile", "main", 0);&#xA;    av_dict_set(&amp;params, "level", "3.1", 0);&#xA;&#xA;// 获取 `sprop-parameter-sets` 参数&#xA;    AVPacket *extradata = av_packet_alloc();&#xA;//    avcodec_parameters_from_context(extradata->data, codecContext);&#xA;&#xA;// 获取 `sprop-parameter-sets` 参数的大小&#xA;    int sprop_parameter_sets_size = extradata->size;&#xA;&#xA;// 释放资源&#xA;    av_packet_free(&amp;extradata);&#xA;&#xA;// 设置 `sprop-parameter-sets` 参数&#xA;    uint8_t *sprop_parameter_sets = extradata->data;&#xA;    codecContext->extradata = sprop_parameter_sets;&#xA;    codecContext->extradata_size = sprop_parameter_sets_size;*/&#xA;&#xA;    /* Write the header */&#xA;//    ret = avformat_write_header(outFormatCtx, NULL);&#xA;    ret = avformat_write_header(outFormatCtx, &amp;opt);&#xA;    if (ret != 0) {&#xA;        fprintf(stderr, "Error occurred when opening output %s\n", av_err2str(ret));&#xA;        exit(1);&#xA;    }&#xA;&#xA;    frame = av_frame_alloc();&#xA;    if (!frame) {&#xA;        fprintf(stderr, "Could not allocate video frame\n");&#xA;        exit(1);&#xA;    }&#xA;//    frame->format = c->pix_fmt;&#xA;//    frame->format = AV_PIX_FMT_YUV420P;&#xA;    frame->format = 0;&#xA;    frame->width = codecContext->width;&#xA;    frame->height = codecContext->height;&#xA;&#xA;    ret = av_frame_get_buffer(frame, 0);&#xA;    if (ret &lt; 0) {&#xA;        fprintf(stderr, "Could not allocate the video frame data ,%s\n", av_err2str(ret));&#xA;        exit(1);&#xA;    }&#xA;&#xA;    /* encode 1 second of video */&#xA;    for (i = 0; i &lt; 2500; i&#x2B;&#x2B;) {&#xA;        /* ... (rest of the encoding loop) ... */&#xA;        fflush(stdout);&#xA;&#xA;        /* make sure the frame data is writable */&#xA;        ret = av_frame_make_writable(frame);&#xA;        if (ret &lt; 0)&#xA;            exit(1);&#xA;&#xA;        /* prepare a dummy image */&#xA;        /* Y */&#xA;        for (y = 0; y &lt; codecContext->height; y&#x2B;&#x2B;) {&#xA;            for (x = 0; x &lt; codecContext->width; x&#x2B;&#x2B;) {&#xA;                frame->data[0][y * frame->linesize[0] &#x2B; x] = x &#x2B; y &#x2B; i * 3;&#xA;            }&#xA;        }&#xA;&#xA;        /* Cb and Cr */&#xA;        for (y = 0; y &lt; codecContext->height / 2; y&#x2B;&#x2B;) {&#xA;            for (x = 0; x &lt; codecContext->width / 2; x&#x2B;&#x2B;) {&#xA;                frame->data[1][y * frame->linesize[1] &#x2B; x] = 128 &#x2B; y &#x2B; i * 2;&#xA;                frame->data[2][y * frame->linesize[2] &#x2B; x] = 64 &#x2B; x &#x2B; i * 5;&#xA;            }&#xA;        }&#xA;&#xA;        frame->pts = i;&#xA;&#xA;        /* encode the image */&#xA;        encode(codecContext, frame, pkt, outFormatCtx);&#xA;    }&#xA;&#xA;    /* flush the encoder */&#xA;    encode(codecContext, NULL, pkt, outFormatCtx);&#xA;&#xA;    /* Write the trailer */&#xA;    av_write_trailer(outFormatCtx);&#xA;&#xA;    /* Close the output */&#xA;    avformat_free_context(outFormatCtx);&#xA;&#xA;    avcodec_free_context(&amp;codecContext);&#xA;    av_frame_free(&amp;frame);&#xA;    av_packet_free(&amp;pkt);&#xA;&#xA;    return 0;&#xA;}&#xA;</codec></rtmp>

    &#xA;

    I searched online for how to add "prop parameter sets", but I used their method but none of them worked. I also used WireShark to capture packets, but during the communication process, there was still no "prop parameter sets". Here is the method I tried :

    &#xA;

      AVDictionary *opt = NULL;&#xA; av_dict_set(&amp;opt, "sprop-parameter-sets", "asdgasdfs", 0);&#xA;

    &#xA;