
Recherche avancée
Médias (91)
-
999,999
26 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
The Slip - Artworks
26 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Texte
-
Demon seed (wav version)
26 septembre 2011, par
Mis à jour : Avril 2013
Langue : English
Type : Audio
-
The four of us are dying (wav version)
26 septembre 2011, par
Mis à jour : Avril 2013
Langue : English
Type : Audio
-
Corona radiata (wav version)
26 septembre 2011, par
Mis à jour : Avril 2013
Langue : English
Type : Audio
-
Lights in the sky (wav version)
26 septembre 2011, par
Mis à jour : Avril 2013
Langue : English
Type : Audio
Autres articles (97)
-
Personnaliser les catégories
21 juin 2013, parFormulaire de création d’une catégorie
Pour ceux qui connaissent bien SPIP, une catégorie peut être assimilée à une rubrique.
Dans le cas d’un document de type catégorie, les champs proposés par défaut sont : Texte
On peut modifier ce formulaire dans la partie :
Administration > Configuration des masques de formulaire.
Dans le cas d’un document de type média, les champs non affichés par défaut sont : Descriptif rapide
Par ailleurs, c’est dans cette partie configuration qu’on peut indiquer le (...) -
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir -
Des sites réalisés avec MediaSPIP
2 mai 2011, parCette page présente quelques-uns des sites fonctionnant sous MediaSPIP.
Vous pouvez bien entendu ajouter le votre grâce au formulaire en bas de page.
Sur d’autres sites (16631)
-
FFmpeg:A General error in an external library occurred when using FFmpeg6.1's avcodec_send_frame
4 janvier 2024, par MMingYI have the same code that can successfully push streams (rtmp) in the environment, but in the Android environment, I fail with an error message. The error message method is avcodec_send_frame in ffmpeg6.1. By the way, I compiled the FFmpeg library on Android myself, and I downloaded the official package for Win11. I will provide the code for Android and Win11 below.


android :


static void encode(AVCodecContext *enc_ctx, AVFrame *frame, AVPacket *pkt,

 AVFormatContext *outFormatCtx) {
 int ret;

 /* send the frame to the encoder */
 if (frame)
 LOGE2("Send frame %ld\n", frame->pts);

 ret = avcodec_send_frame(enc_ctx, frame);
 if (ret < 0) {
 char errbuf[AV_ERROR_MAX_STRING_SIZE];
 av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);
 LOGE2("Error sending a frame for encoding ,%s\n", errbuf);
// exit(1);
 return;
 }

 while (ret >= 0) {
 ret = avcodec_receive_packet(enc_ctx, pkt);
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
 return;
 else if (ret < 0) {
 fprintf(stderr, "Error during encoding\n");
 exit(1);
 }

 printf("Write packet (size=%5d)\n", pkt->pts);
 /* ret = av_interleaved_write_frame(outFormatCtx, pkt);
 if (ret < 0) {
 LOGE2("write frame err=%s", av_err2str(ret));
 break;
 }*/
// printf("Write packet %3"PRId64" (size=%5d)\n", pkt->pts, pkt->size);
 av_write_frame(outFormatCtx, pkt); // Write the packet to the RTMP stream
 av_packet_unref(pkt);
 }
}

PUSHER_FUNC(int, testPush, jstring yuvPath, jstring outputPath) {
 const char *yvu_path = env->GetStringUTFChars(yuvPath, JNI_FALSE);
 const char *output_path = env->GetStringUTFChars(outputPath, JNI_FALSE);
 const char *rtmp_url = output_path;
 const AVCodec *codec;
 AVCodecContext *codecContext = NULL;
 AVFormatContext *outFormatCtx;
 int ret = 0;
 AVStream *outStream;
 AVFrame *frame;
 AVPacket *pkt;
 int i, x, y;
 avformat_network_init();

 codec = avcodec_find_encoder(AV_CODEC_ID_H264);
// codec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);
// codec = avcodec_find_encoder(AV_CODEC_ID_H265);
 if (!codec) {
 LOGE2("JNI Error finding H.264 encoder");
 return -1;
 }
 codecContext = avcodec_alloc_context3(codec);
 if (!codecContext) {
 fprintf(stderr, "Could not allocate video codec context\n");
 return -1;
 }

 /* Allocate the output context */
 outFormatCtx = avformat_alloc_context();
 if (!outFormatCtx) {
 fprintf(stderr, "Could not allocate output context\n");
 return -1;
 }

 /* Open the RTMP output */
 const AVOutputFormat *ofmt = av_guess_format("flv", NULL, NULL);
// const AVOutputFormat *ofmt = av_guess_format("mpegts", NULL, NULL);
// const AVOutputFormat *ofmt = av_guess_format("mp4", NULL, NULL);
 if (!ofmt) {
 fprintf(stderr, "Could not find output format\n");
 return -1;
 }
 outFormatCtx->oformat = ofmt;
 outFormatCtx->url = av_strdup(rtmp_url);
 /* Add a video stream */
 outStream = avformat_new_stream(outFormatCtx, codec);
 if (!outStream) {
 fprintf(stderr, "Could not allocate stream\n");
 return -1;
 }
 outStream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
 outStream->codecpar->codec_id = codec->id;
 outStream->codecpar->width = 352;
 outStream->codecpar->height = 288;

 /* Set the output URL */
 av_dict_set(&outFormatCtx->metadata, "url", rtmp_url, 0);

 pkt = av_packet_alloc();
 if (!pkt)
 return -1;

 /* ... (rest of the setup code) ... */
/* put sample parameters */
 codecContext->bit_rate = 400000;
 /* resolution must be a multiple of two */
 codecContext->width = 352;
 codecContext->height = 288;
 /* frames per second */
 codecContext->time_base = (AVRational) {1, 25};
 codecContext->framerate = (AVRational) {25, 1};

 /* emit one intra frame every ten frames
 * check frame pict_type before passing frame
 * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
 * then gop_size is ignored and the output of encoder
 * will always be I frame irrespective to gop_size
 */
 codecContext->gop_size = 10;
 codecContext->max_b_frames = 1;
 codecContext->pix_fmt = AV_PIX_FMT_YUV420P;

 if (codec->id == AV_CODEC_ID_H264)
 av_opt_set(codecContext->priv_data, "preset", "slow", 0);

 /* open it */
 ret = avcodec_open2(codecContext, codec, NULL);
 if (ret < 0) {
 LOGE2("JNI Error opening codec eer%s", av_err2str(ret));
 return ret;
 }

 avcodec_parameters_to_context(codecContext, outStream->codecpar);

 if (avio_open(&outFormatCtx->pb, rtmp_url, AVIO_FLAG_WRITE)) {
 fprintf(stderr, "Could not open output\n");
 return ret;
 }
 /* Write the header */
 if (avformat_write_header(outFormatCtx, NULL) != 0) {
 fprintf(stderr, "Error occurred when opening output\n");
 return ret;
 }

 frame = av_frame_alloc();
 if (!frame) {
 fprintf(stderr, "Could not allocate video frame\n");
 return -1;
 }
 frame->format = codecContext->pix_fmt;
 frame->format = AV_PIX_FMT_YUV420P;
 frame->format = 0;
 frame->width = codecContext->width;
 frame->height = codecContext->height;

 ret = av_frame_get_buffer(frame, 0);
 if (ret < 0) {
 fprintf(stderr, "Could not allocate the video frame data ,%s\n", av_err2str(ret));
 return ret;
 }

 /* FILE *yuv_file = fopen(yvu_path, "rb");
 if (yuv_file == NULL) {
 LOGE2("cannot open h264 file");
 return -1;
 }*/

 /* encode 1 second of video */
 for (i = 0; i < 25000; i++) {
// for (i = 0; i < 25; i++) {
// fflush(stdout);

 /* make sure the frame data is writable */
 ret = av_frame_make_writable(frame);
 if (ret < 0)
 exit(1);

 /* prepare a dummy image */
 /* Y */
 for (y = 0; y < codecContext->height; y++) {
 for (x = 0; x < codecContext->width; x++) {
 frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
 }
 }

 /* Cb and Cr */
 for (y = 0; y < codecContext->height / 2; y++) {
 for (x = 0; x < codecContext->width / 2; x++) {
 frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
 frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
 }
 }

 frame->pts = i;

 /* encode the image */
 encode(codecContext, frame, pkt, outFormatCtx);
 }

// fclose(yuv_file);

 /* flush the encoder */
 encode(codecContext, NULL, pkt, outFormatCtx);

 /* Write the trailer */
 av_write_trailer(outFormatCtx);

 /* Close the output */
 avformat_free_context(outFormatCtx);

 avcodec_free_context(&codecContext);
 av_frame_free(&frame);
 av_packet_free(&pkt);
}



win11:


#include 
#include 
#include 

#include <libavcodec></libavcodec>avcodec.h>
#include <libavformat></libavformat>avformat.h>
#include <libavutil></libavutil>opt.h>
#include <libavutil></libavutil>imgutils.h>
#include <libavutil></libavutil>time.h>

static void encode(AVCodecContext *enc_ctx, AVFrame *frame, AVPacket *pkt,
 AVFormatContext *outFormatCtx) {
 int ret;

 /* send the frame to the encoder */
 if (frame)
 printf("Send frame %3"PRId64"\n", frame->pts);

 ret = avcodec_send_frame(enc_ctx, frame);
 if (ret < 0) {
 char errbuf[AV_ERROR_MAX_STRING_SIZE];
 av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);
 fprintf(stderr, "Error sending a frame for encoding ,%s\n", errbuf);
 exit(1);
 }

 while (ret >= 0) {
 ret = avcodec_receive_packet(enc_ctx, pkt);
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
 return;
 else if (ret < 0) {
 fprintf(stderr, "Error during encoding\n");
 exit(1);
 }

 printf("Write packet %3"PRId64" (size=%5d)\n", pkt->pts, pkt->size);
 av_write_frame(outFormatCtx, pkt); // Write the packet to the RTMP stream
 av_packet_unref(pkt);
 }
}

int main(int argc, char **argv) {
 av_log_set_level(AV_LOG_DEBUG);
 const char *rtmp_url, *codec_name;
 const AVCodec *codec;
 AVCodecContext *codecContext = NULL;
 int i, ret, x, y;
 AVFormatContext *outFormatCtx;
 AVStream *st;
 AVFrame *frame;
 AVPacket *pkt;
 uint8_t endcode[] = {0, 0, 1, 0xb7};

 if (argc <= 3) {
 fprintf(stderr, "Usage: %s <rtmp url="url"> <codec>\n", argv[0]);
 exit(0);
 }
 rtmp_url = argv[1];
 codec_name = argv[2];
 avformat_network_init();
 /* find the mpeg1video encoder */
// codec = avcodec_find_encoder_by_name(codec_name);
// codec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);
// codec = avcodec_find_encoder(AV_CODEC_ID_VP9);
// codec = avcodec_find_encoder(AV_CODEC_ID_MPEG2VIDEO);
// codec = avcodec_find_encoder(AV_CODEC_ID_H264);
 codec = avcodec_find_encoder(AV_CODEC_ID_H264);
// codec = avcodec_find_encoder(AV_CODEC_ID_AV1);
// codec = avcodec_find_encoder(AV_CODEC_ID_H265);
 if (!codec) {
 fprintf(stderr, "Codec '%s' not found\n", codec_name);
 exit(1);
 }
 codecContext = avcodec_alloc_context3(codec);
 if (!codecContext) {
 fprintf(stderr, "Could not allocate video codec context\n");
 exit(1);
 }

 /* Allocate the output context */
 outFormatCtx = avformat_alloc_context();
 if (!outFormatCtx) {
 fprintf(stderr, "Could not allocate output context\n");
 exit(1);
 }

 /* Open the RTMP output */
 const AVOutputFormat *ofmt = av_guess_format("flv", NULL, NULL);
// const AVOutputFormat *ofmt = av_guess_format("MKV", NULL, NULL);
// const AVOutputFormat *ofmt = av_guess_format("rtmp", NULL, NULL);
// const AVOutputFormat *ofmt = av_guess_format("mpegts", NULL, NULL);
// const AVOutputFormat *ofmt = av_guess_format("mp4", NULL, NULL);
 if (!ofmt) {
 fprintf(stderr, "Could not find output format\n");
 exit(1);
 }
 outFormatCtx->oformat = ofmt;
 outFormatCtx->url = av_strdup(rtmp_url);
 /* Add a video stream */
 st = avformat_new_stream(outFormatCtx, codec);
 if (!st) {
 fprintf(stderr, "Could not allocate stream\n");
 exit(1);
 }
 st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
 st->codecpar->codec_id = codec->id;
 st->codecpar->width = 352;
 st->codecpar->height = 288;
// st->codecpar = c;
// st->codecpar->format = AV_PIX_FMT_YUV420P;
 // Set video stream parameters
// st->codecpar->framerate = (AVRational){25, 1};

 /* Set the output URL */
 av_dict_set(&outFormatCtx->metadata, "url", rtmp_url, 0);


 pkt = av_packet_alloc();
 if (!pkt)
 exit(1);

 /* ... (rest of the setup code) ... */
/* put sample parameters */
 codecContext->bit_rate = 400000;
 /* resolution must be a multiple of two */
 codecContext->width = 352;
 codecContext->height = 288;
 /* frames per second */
 codecContext->time_base = (AVRational) {1, 25};
 codecContext->framerate = (AVRational) {25, 1};

 /* emit one intra frame every ten frames
 * check frame pict_type before passing frame
 * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
 * then gop_size is ignored and the output of encoder
 * will always be I frame irrespective to gop_size
 */
 codecContext->gop_size = 10;
 codecContext->max_b_frames = 1;
 codecContext->pix_fmt = AV_PIX_FMT_YUV420P;

 if (codec->id == AV_CODEC_ID_H264)
 av_opt_set(codecContext->priv_data, "preset", "slow", 0);

 /* open it */
 ret = avcodec_open2(codecContext, codec, NULL);
 if (ret < 0) {
 fprintf(stderr, "Could not open codec: %s\n", av_err2str(ret));
 exit(1);
 }

 avcodec_parameters_to_context(codecContext, st->codecpar);

 if (avio_open(&outFormatCtx->pb, rtmp_url, AVIO_FLAG_WRITE)) {
 fprintf(stderr, "Could not open output\n");
 exit(1);
 }
 /* Write the header */
 if (avformat_write_header(outFormatCtx, NULL) != 0) {
 fprintf(stderr, "Error occurred when opening output\n");
 exit(1);
 }

 frame = av_frame_alloc();
 if (!frame) {
 fprintf(stderr, "Could not allocate video frame\n");
 exit(1);
 }
// frame->format = c->pix_fmt;
// frame->format = AV_PIX_FMT_YUV420P;
 frame->format = 0;
 frame->width = codecContext->width;
 frame->height = codecContext->height;

 ret = av_frame_get_buffer(frame, 0);
 if (ret < 0) {
 fprintf(stderr, "Could not allocate the video frame data ,%s\n", av_err2str(ret));
 exit(1);
 }

 /* encode 1 second of video */
 for (i = 0; i < 2500; i++) {
 /* ... (rest of the encoding loop) ... */
 fflush(stdout);

 /* make sure the frame data is writable */
 ret = av_frame_make_writable(frame);
 if (ret < 0)
 exit(1);

 /* prepare a dummy image */
 /* Y */
 for (y = 0; y < codecContext->height; y++) {
 for (x = 0; x < codecContext->width; x++) {
 frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
 }
 }

 /* Cb and Cr */
 for (y = 0; y < codecContext->height / 2; y++) {
 for (x = 0; x < codecContext->width / 2; x++) {
 frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
 frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
 }
 }

 frame->pts = i;

 /* encode the image */
 encode(codecContext, frame, pkt, outFormatCtx);
 }

 /* flush the encoder */
 encode(codecContext, NULL, pkt, outFormatCtx);

 /* Write the trailer */
 av_write_trailer(outFormatCtx);

 /* Close the output */
 avformat_free_context(outFormatCtx);

 avcodec_free_context(&codecContext);
 av_frame_free(&frame);
 av_packet_free(&pkt);

 return 0;
}
</codec></rtmp>


I suspect it's an issue with the ffmpeg library I compiled, so I searched for a step to compile ffmpeg on GitHub, but the package it compiled still has the same problem. I don't know what to do now.


-
Problems with outputting stream format as RTMP via FFmpeg C-API
9 janvier 2024, par dongrixinyuI am using FFmpeg's C API to push video streams
rtmp://....
into an SRS server.

The input stream is an MP4 file namedjuren-30s.mp4
.

The output stream is also an MP4 file namedjuren-30s-5.mp4
.

My piece of code (see further down) works fine when used in the following steps :

mp4 -> demux -> decode -> rgb images -> encode -> mux -> mp4
.

Problem :


When I changed the output stream to an online RTMP url named
rtmp://ip:port/live/stream_nb_23
(just an example, you can change it according to your server and rules.)

result : This code would be corrupted
mp4 -> rtmp(flv)
.

What I've tried :


Changing the output format

I changed the output format param to become flv when I initialized theavformat_alloc_output_context2
. But this didn't help.

Debugging the output

When I executedffprobe rtmp://ip:port/live/xxxxxxx
, I got the following errors and did not know why :

[h264 @ 0x55a925e3ba80] luma_log2_weight_denom 12 is out of range
[h264 @ 0x55a925e3ba80] Missing reference picture, default is 2
[h264 @ 0x55a925e3ba80] concealing 8003 DC, 8003 AC, 8003 MV errors in P frame
[h264 @ 0x55a925e3ba80] QP 4294966938 out of range
[h264 @ 0x55a925e3ba80] decode_slice_header error
[h264 @ 0x55a925e3ba80] no frame!
[h264 @ 0x55a925e3ba80] luma_log2_weight_denom 21 is out of range
[h264 @ 0x55a925e3ba80] luma_log2_weight_denom 10 is out of range
[h264 @ 0x55a925e3ba80] chroma_log2_weight_denom 12 is out of range
[h264 @ 0x55a925e3ba80] Missing reference picture, default is 0
[h264 @ 0x55a925e3ba80] decode_slice_header error
[h264 @ 0x55a925e3ba80] QP 4294967066 out of range
[h264 @ 0x55a925e3ba80] decode_slice_header error
[h264 @ 0x55a925e3ba80] no frame!
[h264 @ 0x55a925e3ba80] QP 341 out of range
[h264 @ 0x55a925e3ba80] decode_slice_header error



I am confused about the difference between MP4 and RTMP of how to use FFmpeg C-API to produce a correct output stream format.


Besides, I also wanna learn how to convert video and audio streams into other formats using FFmpeg C-api, such as
flv
,ts
,rtsp
, etc.

Code to reproduce the problem :


- 

-
I have also put the project files (code, video files) on Github.


-
The C code shown below can be specifically found at Main.c which is a minimum version to reproduce. It can be reproduced and run successfully.








So how to make this code output to RTMP without getting issue of an unplayable video ?


#include 
#include "libavformat/avformat.h"
int main()
{
 int ret = 0; int err;

 //Open input file
 char filename[] = "juren-30s.mp4";
 AVFormatContext *fmt_ctx = avformat_alloc_context();
 if (!fmt_ctx) {
 printf("error code %d \n",AVERROR(ENOMEM));
 return ENOMEM;
 }
 if((err = avformat_open_input(&fmt_ctx, filename,NULL,NULL)) < 0){
 printf("can not open file %d \n",err);
 return err;
 }

 //Open the decoder
 AVCodecContext *avctx = avcodec_alloc_context3(NULL);
 ret = avcodec_parameters_to_context(avctx, fmt_ctx->streams[0]->codecpar);
 if (ret < 0){
 printf("error code %d \n",ret);
 return ret;
 }
 AVCodec *codec = avcodec_find_decoder(avctx->codec_id);
 if ((ret = avcodec_open2(avctx, codec, NULL)) < 0) {
 printf("open codec faile %d \n",ret);
 return ret;
 }

 //Open the output file container
 char filename_out[] = "juren-30s-5.mp4";
 AVFormatContext *fmt_ctx_out = NULL;
 err = avformat_alloc_output_context2(&fmt_ctx_out, NULL, NULL, filename_out);
 if (!fmt_ctx_out) {
 printf("error code %d \n",AVERROR(ENOMEM));
 return ENOMEM;
 }
 //Add all the way to the container context
 AVStream *st = avformat_new_stream(fmt_ctx_out, NULL);
 st->time_base = fmt_ctx->streams[0]->time_base;

 AVCodecContext *enc_ctx = NULL;
 
 AVPacket *pt = av_packet_alloc();
 AVFrame *frame = av_frame_alloc();
 AVPacket *pkt_out = av_packet_alloc();

 int frame_num = 0; int read_end = 0;
 
 for(;;){
 if( 1 == read_end ){ break;}

 ret = av_read_frame(fmt_ctx, pkt);
 //Skip and do not process audio packets
 if( 1 == pkt->stream_index ){
 av_packet_unref(pt);
 continue;
 }

 if ( AVERROR_EOF == ret) {
 //After reading the file, the data and size of pkt should be null at this time
 avcodec_send_packet(avctx, NULL);
 }else {
 if( 0 != ret){
 printf("read error code %d \n",ret);
 return ENOMEM;
 }else{
 retry:
 if (avcodec_send_packet(avctx, pkt) == AVERROR(EAGAIN)) {
 printf("Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
 //Here you can consider sleeping for 0.1 seconds and returning EAGAIN. This is usually because there is a bug in ffmpeg's internal API.
 goto retry;
 }
 //Release the encoded data in pkt
 av_packet_unref(pt);
 }

 }

 //The loop keeps reading data from the decoder until there is no more data to read.
 for(;;){
 //Read AVFrame
 ret = avcodec_receive_frame(avctx, frame);
 /* Release the YUV data in the frame,
 * Since av_frame_unref is called in the avcodec_receive_frame function, the following code can be commented.
 * So we don't need to manually unref this AVFrame
 * */
 //off_frame_unref(frame);

 if( AVERROR(EAGAIN) == ret ){
 //Prompt EAGAIN means the decoder needs more AVPackets
 //Jump out of the first layer of for and let the decoder get more AVPackets
 break;
 }else if( AVERROR_EOF == ret ){
 /* The prompt AVERROR_EOF means that an AVPacket with both data and size NULL has been sent to the decoder before.
 * Sending NULL AVPacket prompts the decoder to flush out all cached frames.
 * Usually a NULL AVPacket is sent only after reading the input file, or when another video stream needs to be decoded with an existing decoder.
 *
 * */

 /* Send null AVFrame to the encoder and let the encoder flush out the remaining data.
 * */
 ret = avcodec_send_frame(enc_ctx, NULL);
 for(;;){
 ret = avcodec_receive_packet(enc_ctx, pkt_out);
 //It is impossible to return EAGAIN here, if there is any, exit directly.
 if (ret == AVERROR(EAGAIN)){
 printf("avcodec_receive_packet error code %d \n",ret);
 return ret;
 }
 
 if ( AVERROR_EOF == ret ){ break; }
 
 //Encode the AVPacket, print some information first, and then write it to the file.
 printf("pkt_out size : %d \n",pkt_out->size);
 //Set the stream_index of AVPacket so that you know which stream it is.
 pkt_out->stream_index = st->index;
 //Convert the time base of AVPacket to the time base of the output stream.
 pkt_out->pts = av_rescale_q_rnd(pkt_out->pts, fmt_ctx->streams[0]->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
 pkt_out->dts = av_rescale_q_rnd(pkt_out->dts, fmt_ctx->streams[0]->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
 pkt_out->duration = av_rescale_q_rnd(pkt_out->duration, fmt_ctx->streams[0]->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);


 ret = av_interleaved_write_frame(fmt_ctx_out, pkt_out);
 if (ret < 0) {
 printf("av_interleaved_write_frame faile %d \n",ret);
 return ret;
 }
 av_packet_unref(pt_out);
 }
 av_write_trailer(fmt_ctx_out);
 //Jump out of the second layer of for, the file has been decoded.
 read_end = 1;
 break;
 }else if( ret >= 0 ){
 //Only when a frame is decoded can the encoder be initialized.
 if( NULL == enc_ctx ){
 //Open the encoder and set encoding information.
 AVCodec *encode = avcodec_find_encoder(AV_CODEC_ID_H264);
 enc_ctx = avcodec_alloc_context3(encode);
 enc_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
 enc_ctx->bit_rate = 400000;
 enc_ctx->framerate = avctx->framerate;
 enc_ctx->gop_size = 30;
 enc_ctx->max_b_frames = 10;
 enc_ctx->profile = FF_PROFILE_H264_MAIN;
 
 /*
 * In fact, the following information is also available in the container. You can also open the encoder directly in the container at the beginning.
 * I took these encoder parameters from AVFrame because the difference in the container is final.
 * Because the AVFrame you decoded may go through a filter, the information will be transformed after the filter, but this article does not use filters.
 */
 
 //The time base of the encoder should be the time base of AVFrame, because AVFrame is the input. The time base of AVFrame is the time base of the stream.
 enc_ctx->time_base = fmt_ctx->streams[0]->time_base;
 enc_ctx->width = fmt_ctx->streams[0]->codecpar->width;
 enc_ctx->height = fmt_ctx->streams[0]->codecpar->height;
 enc_ctx->sample_aspect_ratio = st->sample_aspect_ratio = frame->sample_aspect_ratio;
 enc_ctx->pix_fmt = frame->format;
 enc_ctx->color_range = frame->color_range;
 enc_ctx->color_primaries = frame->color_primaries;
 enc_ctx->color_trc = frame->color_trc;
 enc_ctx->colorspace = frame->colorspace;
 enc_ctx->chroma_sample_location = frame->chroma_location;

 /* Note that the value of this field_order is different for different videos. I have written it here.
 * Because the video in this article is AV_FIELD_PROGRESSIVE
 * The production environment needs to process different videos
 */
 enc_ctx->field_order = AV_FIELD_PROGRESSIVE;

 /* Now we need to copy the encoder parameters to the stream. When decoding, assign parameters from the stream to the decoder.
 * Now let’s do it in reverse.
 * */
 ret = avcodec_parameters_from_context(st->codecpar,enc_ctx);
 if (ret < 0){
 printf("error code %d \n",ret);
 return ret;
 }
 if ((ret = avcodec_open2(enc_ctx, encode, NULL)) < 0) {
 printf("open codec faile %d \n",ret);
 return ret;
 }

 //Formally open the output file
 if ((ret = avio_open2(&fmt_ctx_out->pb, filename_out, AVIO_FLAG_WRITE,&fmt_ctx_out->interrupt_callback,NULL)) < 0) {
 printf("avio_open2 fail %d \n",ret);
 return ret;
 }

 //Write the file header first.
 ret = avformat_write_header(fmt_ctx_out,NULL);
 if (ret < 0) {
 printf("avformat_write_header fail %d \n",ret);
 return ret;
 }

 }

 //Send AVFrame to the encoder, and then continuously read AVPacket
 ret = avcodec_send_frame(enc_ctx, frame);
 if (ret < 0) {
 printf("avcodec_send_frame fail %d \n",ret);
 return ret;
 }
 for(;;){
 ret = avcodec_receive_packet(enc_ctx, pkt_out);
 if (ret == AVERROR(EAGAIN)){ break; }
 
 if (ret < 0){
 printf("avcodec_receive_packet fail %d \n",ret);
 return ret;
 }
 
 //Encode the AVPacket, print some information first, and then write it to the file.
 printf("pkt_out size : %d \n",pkt_out->size);

 //Set the stream_index of AVPacket so that you know which stream it is.
 pkt_out->stream_index = st->index;
 
 //Convert the time base of AVPacket to the time base of the output stream.
 pkt_out->pts = av_rescale_q_rnd(pkt_out->pts, fmt_ctx->streams[0]->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
 pkt_out->dts = av_rescale_q_rnd(pkt_out->dts, fmt_ctx->streams[0]->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
 pkt_out->duration = av_rescale_q_rnd(pkt_out->duration, fmt_ctx->streams[0]->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);

 ret = av_interleaved_write_frame(fmt_ctx_out, pkt_out);
 if (ret < 0) {
 printf("av_interleaved_write_frame faile %d \n",ret);
 return ret;
 }
 av_packet_unref(pt_out);
 }

 }
 else{ printf("other fail \n"); return ret;}
 }
 }
 
 av_frame_free(&frame); av_packet_free(&pt); av_packet_free(&pkt_out);
 
 //Close the encoder and decoder.
 avcodec_close(avctx); avcodec_close(enc_ctx);

 //Release container memory.
 avformat_free_context(fmt_ctx);

 //Must adjust avio_closep, otherwise the data may not be written in, it will be 0kb
 avio_closep(&fmt_ctx_out->pb);
 avformat_free_context(fmt_ctx_out);
 printf("done \n");

 return 0;
}



This problem has haunted over my head for about three weeks. I still have no idea where the key bug exists. Really appreciate it if any FFmpeg expert could help me.


-
-
Problems with outputting stream format as RTMP about FFmpeg C-API
27 novembre 2023, par dongrixinyuI am using FFmpeg's C API to push video streams
rtmp://....
into an SRS server.

The input stream is an MP4 file namedjuren-30s.mp4
.

The output stream is also an MP4 file namedjuren-30s-5.mp4
.

My piece of code (see further down) works fine when used in the following steps :

mp4 -> demux -> decode -> rgb images -> encode -> mux -> mp4
.

Problem :


When I changed the output stream to an online RTMP url named
rtmp://ip:port/live/stream_nb_23
(just an example, you can change it according to your server and rules.)

result : This code would be corrupted
mp4 -> rtmp(flv)
.

What I've tried :


Changing the output format

I changed the output format param to become flv when I initialized theavformat_alloc_output_context2
. But this didn't help.

Debugging the output

When I executedffprobe rtmp://ip:port/live/xxxxxxx
, I got the following errors and did not know why :

[h264 @ 0x55a925e3ba80] luma_log2_weight_denom 12 is out of range
[h264 @ 0x55a925e3ba80] Missing reference picture, default is 2
[h264 @ 0x55a925e3ba80] concealing 8003 DC, 8003 AC, 8003 MV errors in P frame
[h264 @ 0x55a925e3ba80] QP 4294966938 out of range
[h264 @ 0x55a925e3ba80] decode_slice_header error
[h264 @ 0x55a925e3ba80] no frame!
[h264 @ 0x55a925e3ba80] luma_log2_weight_denom 21 is out of range
[h264 @ 0x55a925e3ba80] luma_log2_weight_denom 10 is out of range
[h264 @ 0x55a925e3ba80] chroma_log2_weight_denom 12 is out of range
[h264 @ 0x55a925e3ba80] Missing reference picture, default is 0
[h264 @ 0x55a925e3ba80] decode_slice_header error
[h264 @ 0x55a925e3ba80] QP 4294967066 out of range
[h264 @ 0x55a925e3ba80] decode_slice_header error
[h264 @ 0x55a925e3ba80] no frame!
[h264 @ 0x55a925e3ba80] QP 341 out of range
[h264 @ 0x55a925e3ba80] decode_slice_header error



I am confused about the difference between MP4 and RTMP of how to use FFmpeg C-API to produce a correct output stream format.


Besides, I also wanna learn how to convert video and audio streams into other formats using FFmpeg C-api, such as
flv
,ts
,rtsp
, etc.

Code to reproduce the problem :


- 

-
I have also put the project files (code, video files) on Github.


-
The C code shown below can be specifically found at Main.c which is a minimum version to reproduce. It can be reproduced and run successfully.








So how to make this code output to RTMP without getting issue of an unplayable video ?


#include 
#include "libavformat/avformat.h"
int main()
{
 int ret = 0; int err;

 //Open input file
 char filename[] = "juren-30s.mp4";
 AVFormatContext *fmt_ctx = avformat_alloc_context();
 if (!fmt_ctx) {
 printf("error code %d \n",AVERROR(ENOMEM));
 return ENOMEM;
 }
 if((err = avformat_open_input(&fmt_ctx, filename,NULL,NULL)) < 0){
 printf("can not open file %d \n",err);
 return err;
 }

 //Open the decoder
 AVCodecContext *avctx = avcodec_alloc_context3(NULL);
 ret = avcodec_parameters_to_context(avctx, fmt_ctx->streams[0]->codecpar);
 if (ret < 0){
 printf("error code %d \n",ret);
 return ret;
 }
 AVCodec *codec = avcodec_find_decoder(avctx->codec_id);
 if ((ret = avcodec_open2(avctx, codec, NULL)) < 0) {
 printf("open codec faile %d \n",ret);
 return ret;
 }

 //Open the output file container
 char filename_out[] = "juren-30s-5.mp4";
 AVFormatContext *fmt_ctx_out = NULL;
 err = avformat_alloc_output_context2(&fmt_ctx_out, NULL, NULL, filename_out);
 if (!fmt_ctx_out) {
 printf("error code %d \n",AVERROR(ENOMEM));
 return ENOMEM;
 }
 //Add all the way to the container context
 AVStream *st = avformat_new_stream(fmt_ctx_out, NULL);
 st->time_base = fmt_ctx->streams[0]->time_base;

 AVCodecContext *enc_ctx = NULL;
 
 AVPacket *pt = av_packet_alloc();
 AVFrame *frame = av_frame_alloc();
 AVPacket *pkt_out = av_packet_alloc();

 int frame_num = 0; int read_end = 0;
 
 for(;;){
 if( 1 == read_end ){ break;}

 ret = av_read_frame(fmt_ctx, pkt);
 //Skip and do not process audio packets
 if( 1 == pkt->stream_index ){
 av_packet_unref(pt);
 continue;
 }

 if ( AVERROR_EOF == ret) {
 //After reading the file, the data and size of pkt should be null at this time
 avcodec_send_packet(avctx, NULL);
 }else {
 if( 0 != ret){
 printf("read error code %d \n",ret);
 return ENOMEM;
 }else{
 retry:
 if (avcodec_send_packet(avctx, pkt) == AVERROR(EAGAIN)) {
 printf("Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
 //Here you can consider sleeping for 0.1 seconds and returning EAGAIN. This is usually because there is a bug in ffmpeg's internal API.
 goto retry;
 }
 //Release the encoded data in pkt
 av_packet_unref(pt);
 }

 }

 //The loop keeps reading data from the decoder until there is no more data to read.
 for(;;){
 //Read AVFrame
 ret = avcodec_receive_frame(avctx, frame);
 /* Release the YUV data in the frame,
 * Since av_frame_unref is called in the avcodec_receive_frame function, the following code can be commented.
 * So we don't need to manually unref this AVFrame
 * */
 //off_frame_unref(frame);

 if( AVERROR(EAGAIN) == ret ){
 //Prompt EAGAIN means the decoder needs more AVPackets
 //Jump out of the first layer of for and let the decoder get more AVPackets
 break;
 }else if( AVERROR_EOF == ret ){
 /* The prompt AVERROR_EOF means that an AVPacket with both data and size NULL has been sent to the decoder before.
 * Sending NULL AVPacket prompts the decoder to flush out all cached frames.
 * Usually a NULL AVPacket is sent only after reading the input file, or when another video stream needs to be decoded with an existing decoder.
 *
 * */

 /* Send null AVFrame to the encoder and let the encoder flush out the remaining data.
 * */
 ret = avcodec_send_frame(enc_ctx, NULL);
 for(;;){
 ret = avcodec_receive_packet(enc_ctx, pkt_out);
 //It is impossible to return EAGAIN here, if there is any, exit directly.
 if (ret == AVERROR(EAGAIN)){
 printf("avcodec_receive_packet error code %d \n",ret);
 return ret;
 }
 
 if ( AVERROR_EOF == ret ){ break; }
 
 //Encode the AVPacket, print some information first, and then write it to the file.
 printf("pkt_out size : %d \n",pkt_out->size);
 //Set the stream_index of AVPacket so that you know which stream it is.
 pkt_out->stream_index = st->index;
 //Convert the time base of AVPacket to the time base of the output stream.
 pkt_out->pts = av_rescale_q_rnd(pkt_out->pts, fmt_ctx->streams[0]->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
 pkt_out->dts = av_rescale_q_rnd(pkt_out->dts, fmt_ctx->streams[0]->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
 pkt_out->duration = av_rescale_q_rnd(pkt_out->duration, fmt_ctx->streams[0]->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);


 ret = av_interleaved_write_frame(fmt_ctx_out, pkt_out);
 if (ret < 0) {
 printf("av_interleaved_write_frame faile %d \n",ret);
 return ret;
 }
 av_packet_unref(pt_out);
 }
 av_write_trailer(fmt_ctx_out);
 //Jump out of the second layer of for, the file has been decoded.
 read_end = 1;
 break;
 }else if( ret >= 0 ){
 //Only when a frame is decoded can the encoder be initialized.
 if( NULL == enc_ctx ){
 //Open the encoder and set encoding information.
 AVCodec *encode = avcodec_find_encoder(AV_CODEC_ID_H264);
 enc_ctx = avcodec_alloc_context3(encode);
 enc_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
 enc_ctx->bit_rate = 400000;
 enc_ctx->framerate = avctx->framerate;
 enc_ctx->gop_size = 30;
 enc_ctx->max_b_frames = 10;
 enc_ctx->profile = FF_PROFILE_H264_MAIN;
 
 /*
 * In fact, the following information is also available in the container. You can also open the encoder directly in the container at the beginning.
 * I took these encoder parameters from AVFrame because the difference in the container is final.
 * Because the AVFrame you decoded may go through a filter, the information will be transformed after the filter, but this article does not use filters.
 */
 
 //The time base of the encoder should be the time base of AVFrame, because AVFrame is the input. The time base of AVFrame is the time base of the stream.
 enc_ctx->time_base = fmt_ctx->streams[0]->time_base;
 enc_ctx->width = fmt_ctx->streams[0]->codecpar->width;
 enc_ctx->height = fmt_ctx->streams[0]->codecpar->height;
 enc_ctx->sample_aspect_ratio = st->sample_aspect_ratio = frame->sample_aspect_ratio;
 enc_ctx->pix_fmt = frame->format;
 enc_ctx->color_range = frame->color_range;
 enc_ctx->color_primaries = frame->color_primaries;
 enc_ctx->color_trc = frame->color_trc;
 enc_ctx->colorspace = frame->colorspace;
 enc_ctx->chroma_sample_location = frame->chroma_location;

 /* Note that the value of this field_order is different for different videos. I have written it here.
 * Because the video in this article is AV_FIELD_PROGRESSIVE
 * The production environment needs to process different videos
 */
 enc_ctx->field_order = AV_FIELD_PROGRESSIVE;

 /* Now we need to copy the encoder parameters to the stream. When decoding, assign parameters from the stream to the decoder.
 * Now let’s do it in reverse.
 * */
 ret = avcodec_parameters_from_context(st->codecpar,enc_ctx);
 if (ret < 0){
 printf("error code %d \n",ret);
 return ret;
 }
 if ((ret = avcodec_open2(enc_ctx, encode, NULL)) < 0) {
 printf("open codec faile %d \n",ret);
 return ret;
 }

 //Formally open the output file
 if ((ret = avio_open2(&fmt_ctx_out->pb, filename_out, AVIO_FLAG_WRITE,&fmt_ctx_out->interrupt_callback,NULL)) < 0) {
 printf("avio_open2 fail %d \n",ret);
 return ret;
 }

 //Write the file header first.
 ret = avformat_write_header(fmt_ctx_out,NULL);
 if (ret < 0) {
 printf("avformat_write_header fail %d \n",ret);
 return ret;
 }

 }

 //Send AVFrame to the encoder, and then continuously read AVPacket
 ret = avcodec_send_frame(enc_ctx, frame);
 if (ret < 0) {
 printf("avcodec_send_frame fail %d \n",ret);
 return ret;
 }
 for(;;){
 ret = avcodec_receive_packet(enc_ctx, pkt_out);
 if (ret == AVERROR(EAGAIN)){ break; }
 
 if (ret < 0){
 printf("avcodec_receive_packet fail %d \n",ret);
 return ret;
 }
 
 //Encode the AVPacket, print some information first, and then write it to the file.
 printf("pkt_out size : %d \n",pkt_out->size);

 //Set the stream_index of AVPacket so that you know which stream it is.
 pkt_out->stream_index = st->index;
 
 //Convert the time base of AVPacket to the time base of the output stream.
 pkt_out->pts = av_rescale_q_rnd(pkt_out->pts, fmt_ctx->streams[0]->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
 pkt_out->dts = av_rescale_q_rnd(pkt_out->dts, fmt_ctx->streams[0]->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
 pkt_out->duration = av_rescale_q_rnd(pkt_out->duration, fmt_ctx->streams[0]->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);

 ret = av_interleaved_write_frame(fmt_ctx_out, pkt_out);
 if (ret < 0) {
 printf("av_interleaved_write_frame faile %d \n",ret);
 return ret;
 }
 av_packet_unref(pt_out);
 }

 }
 else{ printf("other fail \n"); return ret;}
 }
 }
 
 av_frame_free(&frame); av_packet_free(&pt); av_packet_free(&pkt_out);
 
 //Close the encoder and decoder.
 avcodec_close(avctx); avcodec_close(enc_ctx);

 //Release container memory.
 avformat_free_context(fmt_ctx);

 //Must adjust avio_closep, otherwise the data may not be written in, it will be 0kb
 avio_closep(&fmt_ctx_out->pb);
 avformat_free_context(fmt_ctx_out);
 printf("done \n");

 return 0;
}



-