
Recherche avancée
Médias (2)
-
Exemple de boutons d’action pour une collection collaborative
27 février 2013, par
Mis à jour : Mars 2013
Langue : français
Type : Image
-
Exemple de boutons d’action pour une collection personnelle
27 février 2013, par
Mis à jour : Février 2013
Langue : English
Type : Image
Autres articles (58)
-
Use, discuss, criticize
13 avril 2011, parTalk to people directly involved in MediaSPIP’s development, or to people around you who could use MediaSPIP to share, enhance or develop their creative projects.
The bigger the community, the more MediaSPIP’s potential will be explored and the faster the software will evolve.
A discussion list is available for all exchanges between users. -
Participer à sa traduction
10 avril 2011Vous pouvez nous aider à améliorer les locutions utilisées dans le logiciel ou à traduire celui-ci dans n’importe qu’elle nouvelle langue permettant sa diffusion à de nouvelles communautés linguistiques.
Pour ce faire, on utilise l’interface de traduction de SPIP où l’ensemble des modules de langue de MediaSPIP sont à disposition. ll vous suffit de vous inscrire sur la liste de discussion des traducteurs pour demander plus d’informations.
Actuellement MediaSPIP n’est disponible qu’en français et (...) -
MediaSPIP Player : problèmes potentiels
22 février 2011, parLe lecteur ne fonctionne pas sur Internet Explorer
Sur Internet Explorer (8 et 7 au moins), le plugin utilise le lecteur Flash flowplayer pour lire vidéos et son. Si le lecteur ne semble pas fonctionner, cela peut venir de la configuration du mod_deflate d’Apache.
Si dans la configuration de ce module Apache vous avez une ligne qui ressemble à la suivante, essayez de la supprimer ou de la commenter pour voir si le lecteur fonctionne correctement : /** * GeSHi (C) 2004 - 2007 Nigel McNie, (...)
Sur d’autres sites (12068)
-
Trouble syncing libavformat/ffmpeg with x264 and RTP
26 décembre 2012, par Jacob PeddicordI've been working on some streaming software that takes live feeds
from various kinds of cameras and streams over the network using
H.264. To accomplish this, I'm using the x264 encoder directly (with
the "zerolatency" preset) and feeding NALs as they are available to
libavformat to pack into RTP (ultimately RTSP). Ideally, this
application should be as real-time as possible. For the most part,
this has been working well.Unfortunately, however, there is some sort of synchronization issue :
any video playback on clients seems to show a few smooth frames,
followed by a short pause, then more frames ; repeat. Additionally,
there appears to be approximately a 4-second delay. This happens with
every video player I've tried : Totem, VLC, and basic gstreamer pipes.I've boiled it all down to a somewhat small test case :
#include
#include
#include
#include
#include <libavformat></libavformat>avformat.h>
#include <libswscale></libswscale>swscale.h>
#define WIDTH 640
#define HEIGHT 480
#define FPS 30
#define BITRATE 400000
#define RTP_ADDRESS "127.0.0.1"
#define RTP_PORT 49990
struct AVFormatContext* avctx;
struct x264_t* encoder;
struct SwsContext* imgctx;
uint8_t test = 0x80;
void create_sample_picture(x264_picture_t* picture)
{
// create a frame to store in
x264_picture_alloc(picture, X264_CSP_I420, WIDTH, HEIGHT);
// fake image generation
// disregard how wrong this is; just writing a quick test
int strides = WIDTH / 8;
uint8_t* data = malloc(WIDTH * HEIGHT * 3);
memset(data, test, WIDTH * HEIGHT * 3);
test = (test << 1) | (test >> (8 - 1));
// scale the image
sws_scale(imgctx, (const uint8_t* const*) &data, &strides, 0, HEIGHT,
picture->img.plane, picture->img.i_stride);
}
int encode_frame(x264_picture_t* picture, x264_nal_t** nals)
{
// encode a frame
x264_picture_t pic_out;
int num_nals;
int frame_size = x264_encoder_encode(encoder, nals, &num_nals, picture, &pic_out);
// ignore bad frames
if (frame_size < 0)
{
return frame_size;
}
return num_nals;
}
void stream_frame(uint8_t* payload, int size)
{
// initalize a packet
AVPacket p;
av_init_packet(&p);
p.data = payload;
p.size = size;
p.stream_index = 0;
p.flags = AV_PKT_FLAG_KEY;
p.pts = AV_NOPTS_VALUE;
p.dts = AV_NOPTS_VALUE;
// send it out
av_interleaved_write_frame(avctx, &p);
}
int main(int argc, char* argv[])
{
// initalize ffmpeg
av_register_all();
// set up image scaler
// (in-width, in-height, in-format, out-width, out-height, out-format, scaling-method, 0, 0, 0)
imgctx = sws_getContext(WIDTH, HEIGHT, PIX_FMT_MONOWHITE,
WIDTH, HEIGHT, PIX_FMT_YUV420P,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
// set up encoder presets
x264_param_t param;
x264_param_default_preset(&param, "ultrafast", "zerolatency");
param.i_threads = 3;
param.i_width = WIDTH;
param.i_height = HEIGHT;
param.i_fps_num = FPS;
param.i_fps_den = 1;
param.i_keyint_max = FPS;
param.b_intra_refresh = 0;
param.rc.i_bitrate = BITRATE;
param.b_repeat_headers = 1; // whether to repeat headers or write just once
param.b_annexb = 1; // place start codes (1) or sizes (0)
// initalize
x264_param_apply_profile(&param, "high");
encoder = x264_encoder_open(&param);
// at this point, x264_encoder_headers can be used, but it has had no effect
// set up streaming context. a lot of error handling has been ommitted
// for brevity, but this should be pretty standard.
avctx = avformat_alloc_context();
struct AVOutputFormat* fmt = av_guess_format("rtp", NULL, NULL);
avctx->oformat = fmt;
snprintf(avctx->filename, sizeof(avctx->filename), "rtp://%s:%d", RTP_ADDRESS, RTP_PORT);
if (url_fopen(&avctx->pb, avctx->filename, URL_WRONLY) < 0)
{
perror("url_fopen failed");
return 1;
}
struct AVStream* stream = av_new_stream(avctx, 1);
// initalize codec
AVCodecContext* c = stream->codec;
c->codec_id = CODEC_ID_H264;
c->codec_type = AVMEDIA_TYPE_VIDEO;
c->flags = CODEC_FLAG_GLOBAL_HEADER;
c->width = WIDTH;
c->height = HEIGHT;
c->time_base.den = FPS;
c->time_base.num = 1;
c->gop_size = FPS;
c->bit_rate = BITRATE;
avctx->flags = AVFMT_FLAG_RTP_HINT;
// write the header
av_write_header(avctx);
// make some frames
for (int frame = 0; frame < 10000; frame++)
{
// create a sample moving frame
x264_picture_t* pic = (x264_picture_t*) malloc(sizeof(x264_picture_t));
create_sample_picture(pic);
// encode the frame
x264_nal_t* nals;
int num_nals = encode_frame(pic, &nals);
if (num_nals < 0)
printf("invalid frame size: %d\n", num_nals);
// send out NALs
for (int i = 0; i < num_nals; i++)
{
stream_frame(nals[i].p_payload, nals[i].i_payload);
}
// free up resources
x264_picture_clean(pic);
free(pic);
// stream at approx 30 fps
printf("frame %d\n", frame);
usleep(33333);
}
return 0;
}This test shows black lines on a white background that
should move smoothly to the left. It has been written for ffmpeg 0.6.5
but the problem can be reproduced on 0.8 and 0.10 (from what I've tested so far). I've taken some shortcuts in error handling to make this example as short as
possible while still showing the problem, so please excuse some of the
nasty code. I should also note that while an SDP is not used here, I
have tried using that already with similar results. The test can be
compiled with :gcc -g -std=gnu99 streamtest.c -lswscale -lavformat -lx264 -lm -lpthread -o streamtest
It can be played with gtreamer directly :
gst-launch udpsrc port=49990 ! application/x-rtp,payload=96,clock-rate=90000 ! rtph264depay ! decodebin ! xvimagesink
You should immediately notice the stuttering. One common "fix" I've
seen all over the Internet is to add sync=false to the pipeline :gst-launch udpsrc port=49990 ! application/x-rtp,payload=96,clock-rate=90000 ! rtph264depay ! decodebin ! xvimagesink sync=false
This causes playback to be smooth (and near-realtime), but is a
non-solution and only works with gstreamer. I'd like to fix the
problem at the source. I've been able to stream with near-identical
parameters using raw ffmpeg and haven't had any issues :ffmpeg -re -i sample.mp4 -vcodec libx264 -vpre ultrafast -vpre baseline -b 400000 -an -f rtp rtp://127.0.0.1:49990 -an
So clearly I'm doing something wrong. But what is it ?
-
Problems with outputting stream format as RTMP via FFmpeg C-API
9 janvier 2024, par dongrixinyuI am using FFmpeg's C API to push video streams
rtmp://....
into an SRS server.

The input stream is an MP4 file namedjuren-30s.mp4
.

The output stream is also an MP4 file namedjuren-30s-5.mp4
.

My piece of code (see further down) works fine when used in the following steps :

mp4 -> demux -> decode -> rgb images -> encode -> mux -> mp4
.

Problem :


When I changed the output stream to an online RTMP url named
rtmp://ip:port/live/stream_nb_23
(just an example, you can change it according to your server and rules.)

result : This code would be corrupted
mp4 -> rtmp(flv)
.

What I've tried :


Changing the output format

I changed the output format param to become flv when I initialized theavformat_alloc_output_context2
. But this didn't help.

Debugging the output

When I executedffprobe rtmp://ip:port/live/xxxxxxx
, I got the following errors and did not know why :

[h264 @ 0x55a925e3ba80] luma_log2_weight_denom 12 is out of range
[h264 @ 0x55a925e3ba80] Missing reference picture, default is 2
[h264 @ 0x55a925e3ba80] concealing 8003 DC, 8003 AC, 8003 MV errors in P frame
[h264 @ 0x55a925e3ba80] QP 4294966938 out of range
[h264 @ 0x55a925e3ba80] decode_slice_header error
[h264 @ 0x55a925e3ba80] no frame!
[h264 @ 0x55a925e3ba80] luma_log2_weight_denom 21 is out of range
[h264 @ 0x55a925e3ba80] luma_log2_weight_denom 10 is out of range
[h264 @ 0x55a925e3ba80] chroma_log2_weight_denom 12 is out of range
[h264 @ 0x55a925e3ba80] Missing reference picture, default is 0
[h264 @ 0x55a925e3ba80] decode_slice_header error
[h264 @ 0x55a925e3ba80] QP 4294967066 out of range
[h264 @ 0x55a925e3ba80] decode_slice_header error
[h264 @ 0x55a925e3ba80] no frame!
[h264 @ 0x55a925e3ba80] QP 341 out of range
[h264 @ 0x55a925e3ba80] decode_slice_header error



I am confused about the difference between MP4 and RTMP of how to use FFmpeg C-API to produce a correct output stream format.


Besides, I also wanna learn how to convert video and audio streams into other formats using FFmpeg C-api, such as
flv
,ts
,rtsp
, etc.

Code to reproduce the problem :


- 

-
I have also put the project files (code, video files) on Github.


-
The C code shown below can be specifically found at Main.c which is a minimum version to reproduce. It can be reproduced and run successfully.








So how to make this code output to RTMP without getting issue of an unplayable video ?


#include 
#include "libavformat/avformat.h"
int main()
{
 int ret = 0; int err;

 //Open input file
 char filename[] = "juren-30s.mp4";
 AVFormatContext *fmt_ctx = avformat_alloc_context();
 if (!fmt_ctx) {
 printf("error code %d \n",AVERROR(ENOMEM));
 return ENOMEM;
 }
 if((err = avformat_open_input(&fmt_ctx, filename,NULL,NULL)) < 0){
 printf("can not open file %d \n",err);
 return err;
 }

 //Open the decoder
 AVCodecContext *avctx = avcodec_alloc_context3(NULL);
 ret = avcodec_parameters_to_context(avctx, fmt_ctx->streams[0]->codecpar);
 if (ret < 0){
 printf("error code %d \n",ret);
 return ret;
 }
 AVCodec *codec = avcodec_find_decoder(avctx->codec_id);
 if ((ret = avcodec_open2(avctx, codec, NULL)) < 0) {
 printf("open codec faile %d \n",ret);
 return ret;
 }

 //Open the output file container
 char filename_out[] = "juren-30s-5.mp4";
 AVFormatContext *fmt_ctx_out = NULL;
 err = avformat_alloc_output_context2(&fmt_ctx_out, NULL, NULL, filename_out);
 if (!fmt_ctx_out) {
 printf("error code %d \n",AVERROR(ENOMEM));
 return ENOMEM;
 }
 //Add all the way to the container context
 AVStream *st = avformat_new_stream(fmt_ctx_out, NULL);
 st->time_base = fmt_ctx->streams[0]->time_base;

 AVCodecContext *enc_ctx = NULL;
 
 AVPacket *pt = av_packet_alloc();
 AVFrame *frame = av_frame_alloc();
 AVPacket *pkt_out = av_packet_alloc();

 int frame_num = 0; int read_end = 0;
 
 for(;;){
 if( 1 == read_end ){ break;}

 ret = av_read_frame(fmt_ctx, pkt);
 //Skip and do not process audio packets
 if( 1 == pkt->stream_index ){
 av_packet_unref(pt);
 continue;
 }

 if ( AVERROR_EOF == ret) {
 //After reading the file, the data and size of pkt should be null at this time
 avcodec_send_packet(avctx, NULL);
 }else {
 if( 0 != ret){
 printf("read error code %d \n",ret);
 return ENOMEM;
 }else{
 retry:
 if (avcodec_send_packet(avctx, pkt) == AVERROR(EAGAIN)) {
 printf("Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
 //Here you can consider sleeping for 0.1 seconds and returning EAGAIN. This is usually because there is a bug in ffmpeg's internal API.
 goto retry;
 }
 //Release the encoded data in pkt
 av_packet_unref(pt);
 }

 }

 //The loop keeps reading data from the decoder until there is no more data to read.
 for(;;){
 //Read AVFrame
 ret = avcodec_receive_frame(avctx, frame);
 /* Release the YUV data in the frame,
 * Since av_frame_unref is called in the avcodec_receive_frame function, the following code can be commented.
 * So we don't need to manually unref this AVFrame
 * */
 //off_frame_unref(frame);

 if( AVERROR(EAGAIN) == ret ){
 //Prompt EAGAIN means the decoder needs more AVPackets
 //Jump out of the first layer of for and let the decoder get more AVPackets
 break;
 }else if( AVERROR_EOF == ret ){
 /* The prompt AVERROR_EOF means that an AVPacket with both data and size NULL has been sent to the decoder before.
 * Sending NULL AVPacket prompts the decoder to flush out all cached frames.
 * Usually a NULL AVPacket is sent only after reading the input file, or when another video stream needs to be decoded with an existing decoder.
 *
 * */

 /* Send null AVFrame to the encoder and let the encoder flush out the remaining data.
 * */
 ret = avcodec_send_frame(enc_ctx, NULL);
 for(;;){
 ret = avcodec_receive_packet(enc_ctx, pkt_out);
 //It is impossible to return EAGAIN here, if there is any, exit directly.
 if (ret == AVERROR(EAGAIN)){
 printf("avcodec_receive_packet error code %d \n",ret);
 return ret;
 }
 
 if ( AVERROR_EOF == ret ){ break; }
 
 //Encode the AVPacket, print some information first, and then write it to the file.
 printf("pkt_out size : %d \n",pkt_out->size);
 //Set the stream_index of AVPacket so that you know which stream it is.
 pkt_out->stream_index = st->index;
 //Convert the time base of AVPacket to the time base of the output stream.
 pkt_out->pts = av_rescale_q_rnd(pkt_out->pts, fmt_ctx->streams[0]->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
 pkt_out->dts = av_rescale_q_rnd(pkt_out->dts, fmt_ctx->streams[0]->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
 pkt_out->duration = av_rescale_q_rnd(pkt_out->duration, fmt_ctx->streams[0]->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);


 ret = av_interleaved_write_frame(fmt_ctx_out, pkt_out);
 if (ret < 0) {
 printf("av_interleaved_write_frame faile %d \n",ret);
 return ret;
 }
 av_packet_unref(pt_out);
 }
 av_write_trailer(fmt_ctx_out);
 //Jump out of the second layer of for, the file has been decoded.
 read_end = 1;
 break;
 }else if( ret >= 0 ){
 //Only when a frame is decoded can the encoder be initialized.
 if( NULL == enc_ctx ){
 //Open the encoder and set encoding information.
 AVCodec *encode = avcodec_find_encoder(AV_CODEC_ID_H264);
 enc_ctx = avcodec_alloc_context3(encode);
 enc_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
 enc_ctx->bit_rate = 400000;
 enc_ctx->framerate = avctx->framerate;
 enc_ctx->gop_size = 30;
 enc_ctx->max_b_frames = 10;
 enc_ctx->profile = FF_PROFILE_H264_MAIN;
 
 /*
 * In fact, the following information is also available in the container. You can also open the encoder directly in the container at the beginning.
 * I took these encoder parameters from AVFrame because the difference in the container is final.
 * Because the AVFrame you decoded may go through a filter, the information will be transformed after the filter, but this article does not use filters.
 */
 
 //The time base of the encoder should be the time base of AVFrame, because AVFrame is the input. The time base of AVFrame is the time base of the stream.
 enc_ctx->time_base = fmt_ctx->streams[0]->time_base;
 enc_ctx->width = fmt_ctx->streams[0]->codecpar->width;
 enc_ctx->height = fmt_ctx->streams[0]->codecpar->height;
 enc_ctx->sample_aspect_ratio = st->sample_aspect_ratio = frame->sample_aspect_ratio;
 enc_ctx->pix_fmt = frame->format;
 enc_ctx->color_range = frame->color_range;
 enc_ctx->color_primaries = frame->color_primaries;
 enc_ctx->color_trc = frame->color_trc;
 enc_ctx->colorspace = frame->colorspace;
 enc_ctx->chroma_sample_location = frame->chroma_location;

 /* Note that the value of this field_order is different for different videos. I have written it here.
 * Because the video in this article is AV_FIELD_PROGRESSIVE
 * The production environment needs to process different videos
 */
 enc_ctx->field_order = AV_FIELD_PROGRESSIVE;

 /* Now we need to copy the encoder parameters to the stream. When decoding, assign parameters from the stream to the decoder.
 * Now let’s do it in reverse.
 * */
 ret = avcodec_parameters_from_context(st->codecpar,enc_ctx);
 if (ret < 0){
 printf("error code %d \n",ret);
 return ret;
 }
 if ((ret = avcodec_open2(enc_ctx, encode, NULL)) < 0) {
 printf("open codec faile %d \n",ret);
 return ret;
 }

 //Formally open the output file
 if ((ret = avio_open2(&fmt_ctx_out->pb, filename_out, AVIO_FLAG_WRITE,&fmt_ctx_out->interrupt_callback,NULL)) < 0) {
 printf("avio_open2 fail %d \n",ret);
 return ret;
 }

 //Write the file header first.
 ret = avformat_write_header(fmt_ctx_out,NULL);
 if (ret < 0) {
 printf("avformat_write_header fail %d \n",ret);
 return ret;
 }

 }

 //Send AVFrame to the encoder, and then continuously read AVPacket
 ret = avcodec_send_frame(enc_ctx, frame);
 if (ret < 0) {
 printf("avcodec_send_frame fail %d \n",ret);
 return ret;
 }
 for(;;){
 ret = avcodec_receive_packet(enc_ctx, pkt_out);
 if (ret == AVERROR(EAGAIN)){ break; }
 
 if (ret < 0){
 printf("avcodec_receive_packet fail %d \n",ret);
 return ret;
 }
 
 //Encode the AVPacket, print some information first, and then write it to the file.
 printf("pkt_out size : %d \n",pkt_out->size);

 //Set the stream_index of AVPacket so that you know which stream it is.
 pkt_out->stream_index = st->index;
 
 //Convert the time base of AVPacket to the time base of the output stream.
 pkt_out->pts = av_rescale_q_rnd(pkt_out->pts, fmt_ctx->streams[0]->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
 pkt_out->dts = av_rescale_q_rnd(pkt_out->dts, fmt_ctx->streams[0]->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
 pkt_out->duration = av_rescale_q_rnd(pkt_out->duration, fmt_ctx->streams[0]->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);

 ret = av_interleaved_write_frame(fmt_ctx_out, pkt_out);
 if (ret < 0) {
 printf("av_interleaved_write_frame faile %d \n",ret);
 return ret;
 }
 av_packet_unref(pt_out);
 }

 }
 else{ printf("other fail \n"); return ret;}
 }
 }
 
 av_frame_free(&frame); av_packet_free(&pt); av_packet_free(&pkt_out);
 
 //Close the encoder and decoder.
 avcodec_close(avctx); avcodec_close(enc_ctx);

 //Release container memory.
 avformat_free_context(fmt_ctx);

 //Must adjust avio_closep, otherwise the data may not be written in, it will be 0kb
 avio_closep(&fmt_ctx_out->pb);
 avformat_free_context(fmt_ctx_out);
 printf("done \n");

 return 0;
}



This problem has haunted over my head for about three weeks. I still have no idea where the key bug exists. Really appreciate it if any FFmpeg expert could help me.


-
-
Problems with outputting stream format as RTMP about FFmpeg C-API
27 novembre 2023, par dongrixinyuI am using FFmpeg's C API to push video streams
rtmp://....
into an SRS server.

The input stream is an MP4 file namedjuren-30s.mp4
.

The output stream is also an MP4 file namedjuren-30s-5.mp4
.

My piece of code (see further down) works fine when used in the following steps :

mp4 -> demux -> decode -> rgb images -> encode -> mux -> mp4
.

Problem :


When I changed the output stream to an online RTMP url named
rtmp://ip:port/live/stream_nb_23
(just an example, you can change it according to your server and rules.)

result : This code would be corrupted
mp4 -> rtmp(flv)
.

What I've tried :


Changing the output format

I changed the output format param to become flv when I initialized theavformat_alloc_output_context2
. But this didn't help.

Debugging the output

When I executedffprobe rtmp://ip:port/live/xxxxxxx
, I got the following errors and did not know why :

[h264 @ 0x55a925e3ba80] luma_log2_weight_denom 12 is out of range
[h264 @ 0x55a925e3ba80] Missing reference picture, default is 2
[h264 @ 0x55a925e3ba80] concealing 8003 DC, 8003 AC, 8003 MV errors in P frame
[h264 @ 0x55a925e3ba80] QP 4294966938 out of range
[h264 @ 0x55a925e3ba80] decode_slice_header error
[h264 @ 0x55a925e3ba80] no frame!
[h264 @ 0x55a925e3ba80] luma_log2_weight_denom 21 is out of range
[h264 @ 0x55a925e3ba80] luma_log2_weight_denom 10 is out of range
[h264 @ 0x55a925e3ba80] chroma_log2_weight_denom 12 is out of range
[h264 @ 0x55a925e3ba80] Missing reference picture, default is 0
[h264 @ 0x55a925e3ba80] decode_slice_header error
[h264 @ 0x55a925e3ba80] QP 4294967066 out of range
[h264 @ 0x55a925e3ba80] decode_slice_header error
[h264 @ 0x55a925e3ba80] no frame!
[h264 @ 0x55a925e3ba80] QP 341 out of range
[h264 @ 0x55a925e3ba80] decode_slice_header error



I am confused about the difference between MP4 and RTMP of how to use FFmpeg C-API to produce a correct output stream format.


Besides, I also wanna learn how to convert video and audio streams into other formats using FFmpeg C-api, such as
flv
,ts
,rtsp
, etc.

Code to reproduce the problem :


- 

-
I have also put the project files (code, video files) on Github.


-
The C code shown below can be specifically found at Main.c which is a minimum version to reproduce. It can be reproduced and run successfully.








So how to make this code output to RTMP without getting issue of an unplayable video ?


#include 
#include "libavformat/avformat.h"
int main()
{
 int ret = 0; int err;

 //Open input file
 char filename[] = "juren-30s.mp4";
 AVFormatContext *fmt_ctx = avformat_alloc_context();
 if (!fmt_ctx) {
 printf("error code %d \n",AVERROR(ENOMEM));
 return ENOMEM;
 }
 if((err = avformat_open_input(&fmt_ctx, filename,NULL,NULL)) < 0){
 printf("can not open file %d \n",err);
 return err;
 }

 //Open the decoder
 AVCodecContext *avctx = avcodec_alloc_context3(NULL);
 ret = avcodec_parameters_to_context(avctx, fmt_ctx->streams[0]->codecpar);
 if (ret < 0){
 printf("error code %d \n",ret);
 return ret;
 }
 AVCodec *codec = avcodec_find_decoder(avctx->codec_id);
 if ((ret = avcodec_open2(avctx, codec, NULL)) < 0) {
 printf("open codec faile %d \n",ret);
 return ret;
 }

 //Open the output file container
 char filename_out[] = "juren-30s-5.mp4";
 AVFormatContext *fmt_ctx_out = NULL;
 err = avformat_alloc_output_context2(&fmt_ctx_out, NULL, NULL, filename_out);
 if (!fmt_ctx_out) {
 printf("error code %d \n",AVERROR(ENOMEM));
 return ENOMEM;
 }
 //Add all the way to the container context
 AVStream *st = avformat_new_stream(fmt_ctx_out, NULL);
 st->time_base = fmt_ctx->streams[0]->time_base;

 AVCodecContext *enc_ctx = NULL;
 
 AVPacket *pt = av_packet_alloc();
 AVFrame *frame = av_frame_alloc();
 AVPacket *pkt_out = av_packet_alloc();

 int frame_num = 0; int read_end = 0;
 
 for(;;){
 if( 1 == read_end ){ break;}

 ret = av_read_frame(fmt_ctx, pkt);
 //Skip and do not process audio packets
 if( 1 == pkt->stream_index ){
 av_packet_unref(pt);
 continue;
 }

 if ( AVERROR_EOF == ret) {
 //After reading the file, the data and size of pkt should be null at this time
 avcodec_send_packet(avctx, NULL);
 }else {
 if( 0 != ret){
 printf("read error code %d \n",ret);
 return ENOMEM;
 }else{
 retry:
 if (avcodec_send_packet(avctx, pkt) == AVERROR(EAGAIN)) {
 printf("Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
 //Here you can consider sleeping for 0.1 seconds and returning EAGAIN. This is usually because there is a bug in ffmpeg's internal API.
 goto retry;
 }
 //Release the encoded data in pkt
 av_packet_unref(pt);
 }

 }

 //The loop keeps reading data from the decoder until there is no more data to read.
 for(;;){
 //Read AVFrame
 ret = avcodec_receive_frame(avctx, frame);
 /* Release the YUV data in the frame,
 * Since av_frame_unref is called in the avcodec_receive_frame function, the following code can be commented.
 * So we don't need to manually unref this AVFrame
 * */
 //off_frame_unref(frame);

 if( AVERROR(EAGAIN) == ret ){
 //Prompt EAGAIN means the decoder needs more AVPackets
 //Jump out of the first layer of for and let the decoder get more AVPackets
 break;
 }else if( AVERROR_EOF == ret ){
 /* The prompt AVERROR_EOF means that an AVPacket with both data and size NULL has been sent to the decoder before.
 * Sending NULL AVPacket prompts the decoder to flush out all cached frames.
 * Usually a NULL AVPacket is sent only after reading the input file, or when another video stream needs to be decoded with an existing decoder.
 *
 * */

 /* Send null AVFrame to the encoder and let the encoder flush out the remaining data.
 * */
 ret = avcodec_send_frame(enc_ctx, NULL);
 for(;;){
 ret = avcodec_receive_packet(enc_ctx, pkt_out);
 //It is impossible to return EAGAIN here, if there is any, exit directly.
 if (ret == AVERROR(EAGAIN)){
 printf("avcodec_receive_packet error code %d \n",ret);
 return ret;
 }
 
 if ( AVERROR_EOF == ret ){ break; }
 
 //Encode the AVPacket, print some information first, and then write it to the file.
 printf("pkt_out size : %d \n",pkt_out->size);
 //Set the stream_index of AVPacket so that you know which stream it is.
 pkt_out->stream_index = st->index;
 //Convert the time base of AVPacket to the time base of the output stream.
 pkt_out->pts = av_rescale_q_rnd(pkt_out->pts, fmt_ctx->streams[0]->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
 pkt_out->dts = av_rescale_q_rnd(pkt_out->dts, fmt_ctx->streams[0]->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
 pkt_out->duration = av_rescale_q_rnd(pkt_out->duration, fmt_ctx->streams[0]->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);


 ret = av_interleaved_write_frame(fmt_ctx_out, pkt_out);
 if (ret < 0) {
 printf("av_interleaved_write_frame faile %d \n",ret);
 return ret;
 }
 av_packet_unref(pt_out);
 }
 av_write_trailer(fmt_ctx_out);
 //Jump out of the second layer of for, the file has been decoded.
 read_end = 1;
 break;
 }else if( ret >= 0 ){
 //Only when a frame is decoded can the encoder be initialized.
 if( NULL == enc_ctx ){
 //Open the encoder and set encoding information.
 AVCodec *encode = avcodec_find_encoder(AV_CODEC_ID_H264);
 enc_ctx = avcodec_alloc_context3(encode);
 enc_ctx->codec_type = AVMEDIA_TYPE_VIDEO;
 enc_ctx->bit_rate = 400000;
 enc_ctx->framerate = avctx->framerate;
 enc_ctx->gop_size = 30;
 enc_ctx->max_b_frames = 10;
 enc_ctx->profile = FF_PROFILE_H264_MAIN;
 
 /*
 * In fact, the following information is also available in the container. You can also open the encoder directly in the container at the beginning.
 * I took these encoder parameters from AVFrame because the difference in the container is final.
 * Because the AVFrame you decoded may go through a filter, the information will be transformed after the filter, but this article does not use filters.
 */
 
 //The time base of the encoder should be the time base of AVFrame, because AVFrame is the input. The time base of AVFrame is the time base of the stream.
 enc_ctx->time_base = fmt_ctx->streams[0]->time_base;
 enc_ctx->width = fmt_ctx->streams[0]->codecpar->width;
 enc_ctx->height = fmt_ctx->streams[0]->codecpar->height;
 enc_ctx->sample_aspect_ratio = st->sample_aspect_ratio = frame->sample_aspect_ratio;
 enc_ctx->pix_fmt = frame->format;
 enc_ctx->color_range = frame->color_range;
 enc_ctx->color_primaries = frame->color_primaries;
 enc_ctx->color_trc = frame->color_trc;
 enc_ctx->colorspace = frame->colorspace;
 enc_ctx->chroma_sample_location = frame->chroma_location;

 /* Note that the value of this field_order is different for different videos. I have written it here.
 * Because the video in this article is AV_FIELD_PROGRESSIVE
 * The production environment needs to process different videos
 */
 enc_ctx->field_order = AV_FIELD_PROGRESSIVE;

 /* Now we need to copy the encoder parameters to the stream. When decoding, assign parameters from the stream to the decoder.
 * Now let’s do it in reverse.
 * */
 ret = avcodec_parameters_from_context(st->codecpar,enc_ctx);
 if (ret < 0){
 printf("error code %d \n",ret);
 return ret;
 }
 if ((ret = avcodec_open2(enc_ctx, encode, NULL)) < 0) {
 printf("open codec faile %d \n",ret);
 return ret;
 }

 //Formally open the output file
 if ((ret = avio_open2(&fmt_ctx_out->pb, filename_out, AVIO_FLAG_WRITE,&fmt_ctx_out->interrupt_callback,NULL)) < 0) {
 printf("avio_open2 fail %d \n",ret);
 return ret;
 }

 //Write the file header first.
 ret = avformat_write_header(fmt_ctx_out,NULL);
 if (ret < 0) {
 printf("avformat_write_header fail %d \n",ret);
 return ret;
 }

 }

 //Send AVFrame to the encoder, and then continuously read AVPacket
 ret = avcodec_send_frame(enc_ctx, frame);
 if (ret < 0) {
 printf("avcodec_send_frame fail %d \n",ret);
 return ret;
 }
 for(;;){
 ret = avcodec_receive_packet(enc_ctx, pkt_out);
 if (ret == AVERROR(EAGAIN)){ break; }
 
 if (ret < 0){
 printf("avcodec_receive_packet fail %d \n",ret);
 return ret;
 }
 
 //Encode the AVPacket, print some information first, and then write it to the file.
 printf("pkt_out size : %d \n",pkt_out->size);

 //Set the stream_index of AVPacket so that you know which stream it is.
 pkt_out->stream_index = st->index;
 
 //Convert the time base of AVPacket to the time base of the output stream.
 pkt_out->pts = av_rescale_q_rnd(pkt_out->pts, fmt_ctx->streams[0]->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
 pkt_out->dts = av_rescale_q_rnd(pkt_out->dts, fmt_ctx->streams[0]->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
 pkt_out->duration = av_rescale_q_rnd(pkt_out->duration, fmt_ctx->streams[0]->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);

 ret = av_interleaved_write_frame(fmt_ctx_out, pkt_out);
 if (ret < 0) {
 printf("av_interleaved_write_frame faile %d \n",ret);
 return ret;
 }
 av_packet_unref(pt_out);
 }

 }
 else{ printf("other fail \n"); return ret;}
 }
 }
 
 av_frame_free(&frame); av_packet_free(&pt); av_packet_free(&pkt_out);
 
 //Close the encoder and decoder.
 avcodec_close(avctx); avcodec_close(enc_ctx);

 //Release container memory.
 avformat_free_context(fmt_ctx);

 //Must adjust avio_closep, otherwise the data may not be written in, it will be 0kb
 avio_closep(&fmt_ctx_out->pb);
 avformat_free_context(fmt_ctx_out);
 printf("done \n");

 return 0;
}



-