
Recherche avancée
Médias (91)
-
Spitfire Parade - Crisis
15 mai 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
Wired NextMusic
14 mai 2011, par
Mis à jour : Février 2012
Langue : English
Type : Video
-
Video d’abeille en portrait
14 mai 2011, par
Mis à jour : Février 2012
Langue : français
Type : Video
-
Sintel MP4 Surround 5.1 Full
13 mai 2011, par
Mis à jour : Février 2012
Langue : English
Type : Video
-
Carte de Schillerkiez
13 mai 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Texte
-
Publier une image simplement
13 avril 2011, par ,
Mis à jour : Février 2012
Langue : français
Type : Video
Autres articles (83)
-
Le profil des utilisateurs
12 avril 2011, parChaque utilisateur dispose d’une page de profil lui permettant de modifier ses informations personnelle. Dans le menu de haut de page par défaut, un élément de menu est automatiquement créé à l’initialisation de MediaSPIP, visible uniquement si le visiteur est identifié sur le site.
L’utilisateur a accès à la modification de profil depuis sa page auteur, un lien dans la navigation "Modifier votre profil" est (...) -
Configurer la prise en compte des langues
15 novembre 2010, parAccéder à la configuration et ajouter des langues prises en compte
Afin de configurer la prise en compte de nouvelles langues, il est nécessaire de se rendre dans la partie "Administrer" du site.
De là, dans le menu de navigation, vous pouvez accéder à une partie "Gestion des langues" permettant d’activer la prise en compte de nouvelles langues.
Chaque nouvelle langue ajoutée reste désactivable tant qu’aucun objet n’est créé dans cette langue. Dans ce cas, elle devient grisée dans la configuration et (...) -
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir
Sur d’autres sites (14073)
-
FFMPEG Presentation Time Stamps (PTS) calculation in RTSP stream
8 décembre 2020, par BadaBudaBuduBelow please find en raw example of my code for your better understanding of what it does. Please note that this is an updated (removed deprecated methods, etc.) example code by myself from the official FFMPEG documentation complemented by my encoder.


/// STD
#include <iostream>
#include <string>

/// FFMPEG
extern "C"
{
 #include <libavformat></libavformat>avformat.h>
 #include <libswscale></libswscale>swscale.h>
 #include <libavutil></libavutil>imgutils.h>
}

/// VideoLib
#include <tools></tools>multimediaprocessing.h>
#include 
#include 
#include <enums></enums>codec.h>
#include <enums></enums>pixelformat.h>

/// OpenCV
#include <opencv2></opencv2>opencv.hpp>

inline static const char *inputRtspAddress = "rtsp://192.168.0.186:8080/video/h264";

int main()
{
 AVFormatContext* formatContext = nullptr;

 AVStream* audioStream = nullptr;
 AVStream* videoStream = nullptr;
 AVCodec* audioCodec = nullptr;
 AVCodec* videoCodec = nullptr;
 AVCodecContext* audioCodecContext = nullptr;
 AVCodecContext* videoCodecContext = nullptr;
 vl::AudioSettings audioSettings;
 vl::VideoSettings videoSettings;

 int audioIndex = -1;
 int videoIndex = -1;

 SwsContext* swsContext = nullptr;
 std::vector frameBuffer;
 AVFrame* frame = av_frame_alloc();
 AVFrame* decoderFrame = av_frame_alloc();

 AVPacket packet;
 cv::Mat mat;

 vl::tools::MultimediaProcessing multimediaProcessing("rtsp://127.0.0.1:8080/stream", vl::configs::rtspStream, 0, vl::enums::EPixelFormat::ABGR);

 // *** OPEN STREAM *** //
 if(avformat_open_input(&formatContext, inputRtspAddress, nullptr, nullptr) < 0)
 {
 std::cout << "Failed to open input." << std::endl;
 return EXIT_FAILURE;
 }

 if(avformat_find_stream_info(formatContext, nullptr) < 0)
 {
 std::cout << "Failed to find stream info." << std::endl;
 return EXIT_FAILURE;
 }

 // *** FIND DECODER FOR BOTH AUDIO AND VIDEO STREAM *** //
 audioCodec = avcodec_find_decoder(AVCodecID::AV_CODEC_ID_AAC);
 videoCodec = avcodec_find_decoder(AVCodecID::AV_CODEC_ID_H264);

 if(audioCodec == nullptr || videoCodec == nullptr)
 {
 std::cout << "No AUDIO or VIDEO in stream." << std::endl;
 return EXIT_FAILURE;
 }

 // *** FIND STREAM FOR BOTH AUDIO AND VIDEO STREAM *** //

 audioIndex = av_find_best_stream(formatContext, AVMEDIA_TYPE_AUDIO, -1, -1, &audioCodec, 0);
 videoIndex = av_find_best_stream(formatContext, AVMEDIA_TYPE_VIDEO, -1, -1, &videoCodec, 0);

 if(audioIndex < 0 || videoIndex < 0)
 {
 std::cout << "Failed to find AUDIO or VIDEO stream." << std::endl;
 return EXIT_FAILURE;
 }

 audioStream = formatContext->streams[audioIndex];
 videoStream = formatContext->streams[videoIndex];

 // *** ALLOC CODEC CONTEXT FOR BOTH AUDIO AND VIDEO STREAM *** //
 audioCodecContext = avcodec_alloc_context3(audioCodec);
 videoCodecContext = avcodec_alloc_context3(videoCodec);

 if(audioCodecContext == nullptr || videoCodecContext == nullptr)
 {
 std::cout << "Can not allocate AUDIO or VIDEO context." << std::endl;
 return EXIT_FAILURE;
 }

 if(avcodec_parameters_to_context(audioCodecContext, formatContext->streams[audioIndex]->codecpar) < 0 || avcodec_parameters_to_context(videoCodecContext, formatContext->streams[videoIndex]->codecpar) < 0)
 {
 std::cout << "Can not fill AUDIO or VIDEO codec context." << std::endl;
 return EXIT_FAILURE;
 }

 if(avcodec_open2(audioCodecContext, audioCodec, nullptr) < 0 || avcodec_open2(videoCodecContext, videoCodec, nullptr) < 0)
 {
 std::cout << "Failed to open AUDIO codec" << std::endl;
 return EXIT_FAILURE;
 }

 // *** INITIALIZE MULTIMEDIA PROCESSING *** //
 std::vector<unsigned char="char"> extraData(audioStream->codecpar->extradata_size);
 std::copy_n(audioStream->codecpar->extradata, extraData.size(), extraData.begin());

 audioSettings.sampleRate = audioStream->codecpar->sample_rate,
 audioSettings.bitrate = audioStream->codecpar->bit_rate,
 audioSettings.codec = vl::enums::EAudioCodec::AAC,
 audioSettings.channels = audioStream->codecpar->channels,
 audioSettings.bitsPerCodedSample = audioStream->codecpar->bits_per_coded_sample,
 audioSettings.bitsPerRawSample = audioStream->codecpar->bits_per_raw_sample,
 audioSettings.blockAlign = audioStream->codecpar->block_align,
 audioSettings.channelLayout = audioStream->codecpar->channel_layout,
 audioSettings.format = audioStream->codecpar->format,
 audioSettings.frameSize = audioStream->codecpar->frame_size,
 audioSettings.codecExtraData = std::move(extraData);

 videoSettings.width = 1920;
 videoSettings.height = 1080;
 videoSettings.framerate = 25;
 videoSettings.pixelFormat = vl::enums::EPixelFormat::ARGB;
 videoSettings.bitrate = 8000 * 1000;
 videoSettings.codec = vl::enums::EVideoCodec::H264;

 multimediaProcessing.initEncoder(videoSettings, audioSettings);

 // *** INITIALIZE SWS CONTEXT *** //
 swsContext = sws_getCachedContext(nullptr, videoCodecContext->width, videoCodecContext->height, videoCodecContext->pix_fmt, videoCodecContext->width, videoCodecContext->height, AV_PIX_FMT_RGBA, SWS_FAST_BILINEAR, nullptr, nullptr, nullptr);

 if (const auto inReturn = av_image_get_buffer_size(AV_PIX_FMT_RGBA, videoCodecContext->width, videoCodecContext->height, 1); inReturn > 0)
 {
 frameBuffer.reserve(inReturn);
 }
 else
 {
 std::cout << "Can not get buffer size." << std::endl;
 return EXIT_FAILURE;
 }

 if (const auto inReturn = av_image_fill_arrays(frame->data, frame->linesize, frameBuffer.data(), AV_PIX_FMT_RGBA, videoCodecContext->width, videoCodecContext->height, 1); inReturn < 0)
 {
 std::cout << "Can not fill buffer arrays." << std::endl;
 return EXIT_FAILURE;
 }

 // *** MAIN LOOP *** //
 while(true)
 {
 // Return the next frame of a stream.
 if(av_read_frame(formatContext, &packet) == 0)
 {
 if(packet.stream_index == videoIndex) // Check if it is video packet.
 {
 // Send packet to decoder.
 if(avcodec_send_packet(videoCodecContext, &packet) == 0)
 {
 int returnCode = avcodec_receive_frame(videoCodecContext, decoderFrame); // Get Frame from decoder.

 if (returnCode == 0) // Transform frame and send it to encoder. And re-stream that.
 {
 sws_scale(swsContext, decoderFrame->data, decoderFrame->linesize, 0, decoderFrame->height, frame->data, frame->linesize);

 mat = cv::Mat(videoCodecContext->height, videoCodecContext->width, CV_8UC4, frameBuffer.data(), frame->linesize[0]);

 cv::resize(mat, mat, cv::Size(1920, 1080), cv::INTER_NEAREST);

 multimediaProcessing.encode(mat.data, packet.dts, packet.dts, packet.flags == AV_PKT_FLAG_KEY); // Thise line sends cv::Mat to encoder and re-streams it.

 av_packet_unref(&packet);
 }
 else if(returnCode == AVERROR(EAGAIN))
 {
 av_frame_unref(decoderFrame);
 av_freep(decoderFrame);
 }
 else
 {
 av_frame_unref(decoderFrame);
 av_freep(decoderFrame);

 std::cout << "Error during decoding." << std::endl;
 return EXIT_FAILURE;
 }
 }
 }
 else if(packet.stream_index == audioIndex) // Check if it is audio packet.
 {
 std::vector vectorPacket(packet.data, packet.data + packet.size);

 multimediaProcessing.addAudioPacket(vectorPacket, packet.dts, packet.dts);
 }
 else
 {
 av_packet_unref(&packet);
 }
 }
 else
 {
 std::cout << "Can not send video packet to decoder." << std::endl;
 std::this_thread::sleep_for(std::chrono::seconds(1));
 }
 }

 return EXIT_SUCCESS;
 }
</unsigned></string></iostream>


What does It do ?


It takes a single RTSP stream to decode its data so I can, for example, draw something to its frames or whatever, and then stream it under a different address.


Basically, I am opening the RTSP stream, check if it does contain both audio and video streams, and find a decoder for them. Then I create an encoder to which I will tell how the output stream should look like and that's it.


And this point I will create an endless loop Where I will read all packets coming from the input stream, then decode it does something to it and again encode it and re=stream it.


What is the issue ?


If you take a closer look I am sending both video and audio frame together with lastly received PTS and DTS contained in AVPacket, to the encoder.


The PTS and DTS from the point when I receive the first AVPacket looks for example like this.


IN AUDIO STREAM :




-22783, -21759, -20735, -19711, -18687, -17663, -16639, -15615, -14591, -13567, -12543, -11519, -10495, -9471, -8447, -7423, -6399, -5375, -4351, -3327, -2303, -1279, -255, 769, 1793, 2817, 3841, 4865, 5889, 6913, 7937, 8961, 9985, 11009, 12033, 13057, 14081, 15105, 16129, 17153




As you can see it is every time incremented by 1024 and that is a sample rate of the audio stream. Quite clear here.


IN VIDEO STREAM :




86400, 90000, 93600, 97200, 100800, 104400, 108000, 111600, 115200, 118800, 122400, 126000, 129600, 133200, 136800, 140400, 144000, 147600, 151200, 154800, 158400, 162000, 165600




As you can see it is every time incremented by 3600 but WHY ?. What this number actually mean ?


From what I can understand, those received PTS and DTS are for the following :


DTS should tell the encoder when it should start encoding the frame so the frame in time are in the correct order and not mishmashed.


PTS should say the correct time when the frame should be played/displayed in the output stream so the frame in time are in the correct order and not mishmashed.


What I am trying to achieve ?


As I said I need to restream a RTSP stream. I can not use PTS and DTS which comes from received AVPackets, because at some point it can happen that the input stream can randomly close and I need to open it again. The problem is that when I actually do it, then the PTS and DTS start to generate again from the minus values same as you could see in the samples. I CAN NOT send those "new" PTS and DTS to the encoder because they are now lower than the encoder/muxer expects.


I need to continually stream something (both audio and video), even it is a blank black screen or silent audio. And each frame the PTS and DTS should rise by a specific number. I need to figure out how the increment is calculated.


----------------------------------


The final result should look like a mosaic of multiple input streams in a single output stream. A single input stream (main) has both audio and video and the rest (side) has just video. Some of those streams can randomly close in time and I need to ensure that it will be back again once it is possible.


-
Dreamcast Serial Extractor
31 décembre 2017, par Multimedia Mike — Sega DreamcastIt has not been a very productive year for blogging. But I started the year by describing an unfinished project that I developed for the Sega Dreamcast, so I may as well end the year the same way. The previous project was a media player. That initiative actually met with some amount of success and could have developed into something interesting if I had kept at it.
By contrast, this post describes an effort that was ultimately a fool’s errand that I spent way too much time trying to make work.
Problem Statement
In my neverending quest to analyze the structure of video games while also hoarding a massive collection of them (though I’m proud to report that I did play at least a few of them this past year), I wanted to be able to extract the data from my many Dreamcast titles, both games and demo discs. I had a tool called the DC Coder’s Cable, a serial cable that enables communication between a Dreamcast and a PC. With the right software, you could dump an entire Dreamcast GD-ROM, which contained a gigabyte worth of sectors.Problem : The dumping software (named ‘dreamrip’ and written by noted game hacker BERO) operated in a very basic mode, methodically dumping sector after sector and sending it down the serial cable. This meant that it took about 28 hours to extract all the data on a single disc by running at the maximum speed of 115,200 bits/second, or about 11 kilobytes/second. I wanted to create a faster method.
The Pitch
I formed a mental model of dreamrip’s operation that looked like this :
As an improvement, I envisioned this beautiful architecture :
Architectural Assumptions
My proposed architecture was predicated on the assumption that the disc reading and serial output functions were both I/O-bound operations and that the CPU would be idle much of the time. My big idea was to use that presumably idle CPU time to compress the sectors before sending them over the wire. As long as the CPU can compress the data faster than 11 kbytes/sec, it should be a win. In order to achieve this, I broke the main program into 3 threads :- The first thread reads the sectors ; more specifically, it asks the drive firmware to please read the sectors and make the data available in system RAM
- The second thread waits for sector data to appear in memory and then compresses it
- The third thread takes the compressed data when it is ready and shuffles it out through the serial cable
Simple and elegant, right ?
For data track compression, I wanted to start with zlib in order to prove the architecture, but then also try bzip2 or lzma. As long as they could compress data faster than the serial port could write it, then it should be a win. For audio track compression, I wanted to use the Flake FLAC encoder. According to my notes, I did get both bzip2 compression and the Flake compressor working on the Dreamcast. I recall choosing Flake over the official FLAC encoder because it was much simpler and had fewer dependencies, always an important consideration for platforms such as this.
Problems
I worked for quite awhile on this project. I have a lot of notes recorded but a lot of the problems I had remain a bit vague in my memory. However, there was one problem I discovered that eventually sunk the entire initiative :The serial output operation is CPU-bound.
My initial mental model was that the a buffer could be “handed off” to the serial subsystem and the CPU could go back to doing other work. Nope. Turns out that the CPU was participating at every step of the serial transfer.
Further, I eventually dug into the serial driver code and learned that there was already some compression taking place via the miniLZO library.
Lessons Learned
- Recognize the assumptions that you’re making up front at the start of the project.
- Prototype in order to ensure plausibility
- Profile to make sure you’re optimizing the right thing (this is something I have learned again and again).
Another interesting tidbit from my notes : it doesn’t matter how many sectors you read at a time, the overall speed is roughly the same. I endeavored to read 1000 2048-byte data sectors, 1 or 10 or 100 at a time, or all 1000 at once. My results :
- 1 : 19442 ms
- 10 : 19207 ms
- 100 : 19194 ms
- 1000 : 19320 ms
No difference. That surprised me.
Side Benefits
At one point, I needed to understand how BERO’s dreamrip software was operating. I knew I used to have the source code but I could no longer find it. Instead, I decided to try to reverse engineer what I needed from the SH-4 binary image that I had. It wasn’t an ELF image ; rather, it was a raw binary meant to be loaded at a particular memory location which makes it extra challenging for ‘objdump’. This led to me asking my most viewed and upvoted question on Stack Overflow : “Disassembling A Flat Binary File Using objdump”. The next day, it also led me to post one of my most upvoted answers when I found the solution elsewhere.Strangely, I have since tried out the command line shown in my answer and have been unable to make it work. But people keep upvoting both the question and the answer.
Eventually this all became moot when I discovered a misplaced copy of the source code on one of my computers.
I strongly recall binging through the Alias TV show while I was slogging away on this project, so I guess that’s a positive association since I got so many fun screenshots out of it.
The Final Resolution
Strangely, I was still determined to make this project work even though the Dreamcast SD adapter arrived for me about halfway through the effort. Part of this was just stubbornness, but part of it was my assumptions about serial port speeds, in particular, my assumption that there was a certain speed-of-light type of limitation on serial port speeds so that the SD adapter, operating over the DC’s serial port, would not be appreciably faster than the serial cable.This turned out to be very incorrect. In fact, the SD adapter is capable of extracting an entire gigabyte disc image in 35-40 minutes. This is the method I have since been using to extract Dreamcast disc images.
The post Dreamcast Serial Extractor first appeared on Breaking Eggs And Making Omelettes.
-
FFmpeg:A General error in an external library occurred when using FFmpeg6.1's avcodec_send_frame
4 janvier 2024, par MMingYI have the same code that can successfully push streams (rtmp) in the environment, but in the Android environment, I fail with an error message. The error message method is avcodec_send_frame in ffmpeg6.1. By the way, I compiled the FFmpeg library on Android myself, and I downloaded the official package for Win11. I will provide the code for Android and Win11 below.


android :


static void encode(AVCodecContext *enc_ctx, AVFrame *frame, AVPacket *pkt,

 AVFormatContext *outFormatCtx) {
 int ret;

 /* send the frame to the encoder */
 if (frame)
 LOGE2("Send frame %ld\n", frame->pts);

 ret = avcodec_send_frame(enc_ctx, frame);
 if (ret < 0) {
 char errbuf[AV_ERROR_MAX_STRING_SIZE];
 av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);
 LOGE2("Error sending a frame for encoding ,%s\n", errbuf);
// exit(1);
 return;
 }

 while (ret >= 0) {
 ret = avcodec_receive_packet(enc_ctx, pkt);
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
 return;
 else if (ret < 0) {
 fprintf(stderr, "Error during encoding\n");
 exit(1);
 }

 printf("Write packet (size=%5d)\n", pkt->pts);
 /* ret = av_interleaved_write_frame(outFormatCtx, pkt);
 if (ret < 0) {
 LOGE2("write frame err=%s", av_err2str(ret));
 break;
 }*/
// printf("Write packet %3"PRId64" (size=%5d)\n", pkt->pts, pkt->size);
 av_write_frame(outFormatCtx, pkt); // Write the packet to the RTMP stream
 av_packet_unref(pkt);
 }
}

PUSHER_FUNC(int, testPush, jstring yuvPath, jstring outputPath) {
 const char *yvu_path = env->GetStringUTFChars(yuvPath, JNI_FALSE);
 const char *output_path = env->GetStringUTFChars(outputPath, JNI_FALSE);
 const char *rtmp_url = output_path;
 const AVCodec *codec;
 AVCodecContext *codecContext = NULL;
 AVFormatContext *outFormatCtx;
 int ret = 0;
 AVStream *outStream;
 AVFrame *frame;
 AVPacket *pkt;
 int i, x, y;
 avformat_network_init();

 codec = avcodec_find_encoder(AV_CODEC_ID_H264);
// codec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);
// codec = avcodec_find_encoder(AV_CODEC_ID_H265);
 if (!codec) {
 LOGE2("JNI Error finding H.264 encoder");
 return -1;
 }
 codecContext = avcodec_alloc_context3(codec);
 if (!codecContext) {
 fprintf(stderr, "Could not allocate video codec context\n");
 return -1;
 }

 /* Allocate the output context */
 outFormatCtx = avformat_alloc_context();
 if (!outFormatCtx) {
 fprintf(stderr, "Could not allocate output context\n");
 return -1;
 }

 /* Open the RTMP output */
 const AVOutputFormat *ofmt = av_guess_format("flv", NULL, NULL);
// const AVOutputFormat *ofmt = av_guess_format("mpegts", NULL, NULL);
// const AVOutputFormat *ofmt = av_guess_format("mp4", NULL, NULL);
 if (!ofmt) {
 fprintf(stderr, "Could not find output format\n");
 return -1;
 }
 outFormatCtx->oformat = ofmt;
 outFormatCtx->url = av_strdup(rtmp_url);
 /* Add a video stream */
 outStream = avformat_new_stream(outFormatCtx, codec);
 if (!outStream) {
 fprintf(stderr, "Could not allocate stream\n");
 return -1;
 }
 outStream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
 outStream->codecpar->codec_id = codec->id;
 outStream->codecpar->width = 352;
 outStream->codecpar->height = 288;

 /* Set the output URL */
 av_dict_set(&outFormatCtx->metadata, "url", rtmp_url, 0);

 pkt = av_packet_alloc();
 if (!pkt)
 return -1;

 /* ... (rest of the setup code) ... */
/* put sample parameters */
 codecContext->bit_rate = 400000;
 /* resolution must be a multiple of two */
 codecContext->width = 352;
 codecContext->height = 288;
 /* frames per second */
 codecContext->time_base = (AVRational) {1, 25};
 codecContext->framerate = (AVRational) {25, 1};

 /* emit one intra frame every ten frames
 * check frame pict_type before passing frame
 * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
 * then gop_size is ignored and the output of encoder
 * will always be I frame irrespective to gop_size
 */
 codecContext->gop_size = 10;
 codecContext->max_b_frames = 1;
 codecContext->pix_fmt = AV_PIX_FMT_YUV420P;

 if (codec->id == AV_CODEC_ID_H264)
 av_opt_set(codecContext->priv_data, "preset", "slow", 0);

 /* open it */
 ret = avcodec_open2(codecContext, codec, NULL);
 if (ret < 0) {
 LOGE2("JNI Error opening codec eer%s", av_err2str(ret));
 return ret;
 }

 avcodec_parameters_to_context(codecContext, outStream->codecpar);

 if (avio_open(&outFormatCtx->pb, rtmp_url, AVIO_FLAG_WRITE)) {
 fprintf(stderr, "Could not open output\n");
 return ret;
 }
 /* Write the header */
 if (avformat_write_header(outFormatCtx, NULL) != 0) {
 fprintf(stderr, "Error occurred when opening output\n");
 return ret;
 }

 frame = av_frame_alloc();
 if (!frame) {
 fprintf(stderr, "Could not allocate video frame\n");
 return -1;
 }
 frame->format = codecContext->pix_fmt;
 frame->format = AV_PIX_FMT_YUV420P;
 frame->format = 0;
 frame->width = codecContext->width;
 frame->height = codecContext->height;

 ret = av_frame_get_buffer(frame, 0);
 if (ret < 0) {
 fprintf(stderr, "Could not allocate the video frame data ,%s\n", av_err2str(ret));
 return ret;
 }

 /* FILE *yuv_file = fopen(yvu_path, "rb");
 if (yuv_file == NULL) {
 LOGE2("cannot open h264 file");
 return -1;
 }*/

 /* encode 1 second of video */
 for (i = 0; i < 25000; i++) {
// for (i = 0; i < 25; i++) {
// fflush(stdout);

 /* make sure the frame data is writable */
 ret = av_frame_make_writable(frame);
 if (ret < 0)
 exit(1);

 /* prepare a dummy image */
 /* Y */
 for (y = 0; y < codecContext->height; y++) {
 for (x = 0; x < codecContext->width; x++) {
 frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
 }
 }

 /* Cb and Cr */
 for (y = 0; y < codecContext->height / 2; y++) {
 for (x = 0; x < codecContext->width / 2; x++) {
 frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
 frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
 }
 }

 frame->pts = i;

 /* encode the image */
 encode(codecContext, frame, pkt, outFormatCtx);
 }

// fclose(yuv_file);

 /* flush the encoder */
 encode(codecContext, NULL, pkt, outFormatCtx);

 /* Write the trailer */
 av_write_trailer(outFormatCtx);

 /* Close the output */
 avformat_free_context(outFormatCtx);

 avcodec_free_context(&codecContext);
 av_frame_free(&frame);
 av_packet_free(&pkt);
}



win11:


#include 
#include 
#include 

#include <libavcodec></libavcodec>avcodec.h>
#include <libavformat></libavformat>avformat.h>
#include <libavutil></libavutil>opt.h>
#include <libavutil></libavutil>imgutils.h>
#include <libavutil></libavutil>time.h>

static void encode(AVCodecContext *enc_ctx, AVFrame *frame, AVPacket *pkt,
 AVFormatContext *outFormatCtx) {
 int ret;

 /* send the frame to the encoder */
 if (frame)
 printf("Send frame %3"PRId64"\n", frame->pts);

 ret = avcodec_send_frame(enc_ctx, frame);
 if (ret < 0) {
 char errbuf[AV_ERROR_MAX_STRING_SIZE];
 av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);
 fprintf(stderr, "Error sending a frame for encoding ,%s\n", errbuf);
 exit(1);
 }

 while (ret >= 0) {
 ret = avcodec_receive_packet(enc_ctx, pkt);
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
 return;
 else if (ret < 0) {
 fprintf(stderr, "Error during encoding\n");
 exit(1);
 }

 printf("Write packet %3"PRId64" (size=%5d)\n", pkt->pts, pkt->size);
 av_write_frame(outFormatCtx, pkt); // Write the packet to the RTMP stream
 av_packet_unref(pkt);
 }
}

int main(int argc, char **argv) {
 av_log_set_level(AV_LOG_DEBUG);
 const char *rtmp_url, *codec_name;
 const AVCodec *codec;
 AVCodecContext *codecContext = NULL;
 int i, ret, x, y;
 AVFormatContext *outFormatCtx;
 AVStream *st;
 AVFrame *frame;
 AVPacket *pkt;
 uint8_t endcode[] = {0, 0, 1, 0xb7};

 if (argc <= 3) {
 fprintf(stderr, "Usage: %s <rtmp url="url"> <codec>\n", argv[0]);
 exit(0);
 }
 rtmp_url = argv[1];
 codec_name = argv[2];
 avformat_network_init();
 /* find the mpeg1video encoder */
// codec = avcodec_find_encoder_by_name(codec_name);
// codec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);
// codec = avcodec_find_encoder(AV_CODEC_ID_VP9);
// codec = avcodec_find_encoder(AV_CODEC_ID_MPEG2VIDEO);
// codec = avcodec_find_encoder(AV_CODEC_ID_H264);
 codec = avcodec_find_encoder(AV_CODEC_ID_H264);
// codec = avcodec_find_encoder(AV_CODEC_ID_AV1);
// codec = avcodec_find_encoder(AV_CODEC_ID_H265);
 if (!codec) {
 fprintf(stderr, "Codec '%s' not found\n", codec_name);
 exit(1);
 }
 codecContext = avcodec_alloc_context3(codec);
 if (!codecContext) {
 fprintf(stderr, "Could not allocate video codec context\n");
 exit(1);
 }

 /* Allocate the output context */
 outFormatCtx = avformat_alloc_context();
 if (!outFormatCtx) {
 fprintf(stderr, "Could not allocate output context\n");
 exit(1);
 }

 /* Open the RTMP output */
 const AVOutputFormat *ofmt = av_guess_format("flv", NULL, NULL);
// const AVOutputFormat *ofmt = av_guess_format("MKV", NULL, NULL);
// const AVOutputFormat *ofmt = av_guess_format("rtmp", NULL, NULL);
// const AVOutputFormat *ofmt = av_guess_format("mpegts", NULL, NULL);
// const AVOutputFormat *ofmt = av_guess_format("mp4", NULL, NULL);
 if (!ofmt) {
 fprintf(stderr, "Could not find output format\n");
 exit(1);
 }
 outFormatCtx->oformat = ofmt;
 outFormatCtx->url = av_strdup(rtmp_url);
 /* Add a video stream */
 st = avformat_new_stream(outFormatCtx, codec);
 if (!st) {
 fprintf(stderr, "Could not allocate stream\n");
 exit(1);
 }
 st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
 st->codecpar->codec_id = codec->id;
 st->codecpar->width = 352;
 st->codecpar->height = 288;
// st->codecpar = c;
// st->codecpar->format = AV_PIX_FMT_YUV420P;
 // Set video stream parameters
// st->codecpar->framerate = (AVRational){25, 1};

 /* Set the output URL */
 av_dict_set(&outFormatCtx->metadata, "url", rtmp_url, 0);


 pkt = av_packet_alloc();
 if (!pkt)
 exit(1);

 /* ... (rest of the setup code) ... */
/* put sample parameters */
 codecContext->bit_rate = 400000;
 /* resolution must be a multiple of two */
 codecContext->width = 352;
 codecContext->height = 288;
 /* frames per second */
 codecContext->time_base = (AVRational) {1, 25};
 codecContext->framerate = (AVRational) {25, 1};

 /* emit one intra frame every ten frames
 * check frame pict_type before passing frame
 * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
 * then gop_size is ignored and the output of encoder
 * will always be I frame irrespective to gop_size
 */
 codecContext->gop_size = 10;
 codecContext->max_b_frames = 1;
 codecContext->pix_fmt = AV_PIX_FMT_YUV420P;

 if (codec->id == AV_CODEC_ID_H264)
 av_opt_set(codecContext->priv_data, "preset", "slow", 0);

 /* open it */
 ret = avcodec_open2(codecContext, codec, NULL);
 if (ret < 0) {
 fprintf(stderr, "Could not open codec: %s\n", av_err2str(ret));
 exit(1);
 }

 avcodec_parameters_to_context(codecContext, st->codecpar);

 if (avio_open(&outFormatCtx->pb, rtmp_url, AVIO_FLAG_WRITE)) {
 fprintf(stderr, "Could not open output\n");
 exit(1);
 }
 /* Write the header */
 if (avformat_write_header(outFormatCtx, NULL) != 0) {
 fprintf(stderr, "Error occurred when opening output\n");
 exit(1);
 }

 frame = av_frame_alloc();
 if (!frame) {
 fprintf(stderr, "Could not allocate video frame\n");
 exit(1);
 }
// frame->format = c->pix_fmt;
// frame->format = AV_PIX_FMT_YUV420P;
 frame->format = 0;
 frame->width = codecContext->width;
 frame->height = codecContext->height;

 ret = av_frame_get_buffer(frame, 0);
 if (ret < 0) {
 fprintf(stderr, "Could not allocate the video frame data ,%s\n", av_err2str(ret));
 exit(1);
 }

 /* encode 1 second of video */
 for (i = 0; i < 2500; i++) {
 /* ... (rest of the encoding loop) ... */
 fflush(stdout);

 /* make sure the frame data is writable */
 ret = av_frame_make_writable(frame);
 if (ret < 0)
 exit(1);

 /* prepare a dummy image */
 /* Y */
 for (y = 0; y < codecContext->height; y++) {
 for (x = 0; x < codecContext->width; x++) {
 frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
 }
 }

 /* Cb and Cr */
 for (y = 0; y < codecContext->height / 2; y++) {
 for (x = 0; x < codecContext->width / 2; x++) {
 frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
 frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
 }
 }

 frame->pts = i;

 /* encode the image */
 encode(codecContext, frame, pkt, outFormatCtx);
 }

 /* flush the encoder */
 encode(codecContext, NULL, pkt, outFormatCtx);

 /* Write the trailer */
 av_write_trailer(outFormatCtx);

 /* Close the output */
 avformat_free_context(outFormatCtx);

 avcodec_free_context(&codecContext);
 av_frame_free(&frame);
 av_packet_free(&pkt);

 return 0;
}
</codec></rtmp>


I suspect it's an issue with the ffmpeg library I compiled, so I searched for a step to compile ffmpeg on GitHub, but the package it compiled still has the same problem. I don't know what to do now.