
Recherche avancée
Autres articles (38)
-
Use, discuss, criticize
13 avril 2011, parTalk to people directly involved in MediaSPIP’s development, or to people around you who could use MediaSPIP to share, enhance or develop their creative projects.
The bigger the community, the more MediaSPIP’s potential will be explored and the faster the software will evolve.
A discussion list is available for all exchanges between users. -
HTML5 audio and video support
13 avril 2011, parMediaSPIP uses HTML5 video and audio tags to play multimedia files, taking advantage of the latest W3C innovations supported by modern browsers.
The MediaSPIP player used has been created specifically for MediaSPIP and can be easily adapted to fit in with a specific theme.
For older browsers the Flowplayer flash fallback is used.
MediaSPIP allows for media playback on major mobile platforms with the above (...) -
De l’upload à la vidéo finale [version standalone]
31 janvier 2010, parLe chemin d’un document audio ou vidéo dans SPIPMotion est divisé en trois étapes distinctes.
Upload et récupération d’informations de la vidéo source
Dans un premier temps, il est nécessaire de créer un article SPIP et de lui joindre le document vidéo "source".
Au moment où ce document est joint à l’article, deux actions supplémentaires au comportement normal sont exécutées : La récupération des informations techniques des flux audio et video du fichier ; La génération d’une vignette : extraction d’une (...)
Sur d’autres sites (5600)
-
FFmpeg:A General error in an external library occurred when using FFmpeg6.1's avcodec_send_frame
4 janvier 2024, par MMingYI have the same code that can successfully push streams (rtmp) in the environment, but in the Android environment, I fail with an error message. The error message method is avcodec_send_frame in ffmpeg6.1. By the way, I compiled the FFmpeg library on Android myself, and I downloaded the official package for Win11. I will provide the code for Android and Win11 below.


android :


static void encode(AVCodecContext *enc_ctx, AVFrame *frame, AVPacket *pkt,

 AVFormatContext *outFormatCtx) {
 int ret;

 /* send the frame to the encoder */
 if (frame)
 LOGE2("Send frame %ld\n", frame->pts);

 ret = avcodec_send_frame(enc_ctx, frame);
 if (ret < 0) {
 char errbuf[AV_ERROR_MAX_STRING_SIZE];
 av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);
 LOGE2("Error sending a frame for encoding ,%s\n", errbuf);
// exit(1);
 return;
 }

 while (ret >= 0) {
 ret = avcodec_receive_packet(enc_ctx, pkt);
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
 return;
 else if (ret < 0) {
 fprintf(stderr, "Error during encoding\n");
 exit(1);
 }

 printf("Write packet (size=%5d)\n", pkt->pts);
 /* ret = av_interleaved_write_frame(outFormatCtx, pkt);
 if (ret < 0) {
 LOGE2("write frame err=%s", av_err2str(ret));
 break;
 }*/
// printf("Write packet %3"PRId64" (size=%5d)\n", pkt->pts, pkt->size);
 av_write_frame(outFormatCtx, pkt); // Write the packet to the RTMP stream
 av_packet_unref(pkt);
 }
}

PUSHER_FUNC(int, testPush, jstring yuvPath, jstring outputPath) {
 const char *yvu_path = env->GetStringUTFChars(yuvPath, JNI_FALSE);
 const char *output_path = env->GetStringUTFChars(outputPath, JNI_FALSE);
 const char *rtmp_url = output_path;
 const AVCodec *codec;
 AVCodecContext *codecContext = NULL;
 AVFormatContext *outFormatCtx;
 int ret = 0;
 AVStream *outStream;
 AVFrame *frame;
 AVPacket *pkt;
 int i, x, y;
 avformat_network_init();

 codec = avcodec_find_encoder(AV_CODEC_ID_H264);
// codec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);
// codec = avcodec_find_encoder(AV_CODEC_ID_H265);
 if (!codec) {
 LOGE2("JNI Error finding H.264 encoder");
 return -1;
 }
 codecContext = avcodec_alloc_context3(codec);
 if (!codecContext) {
 fprintf(stderr, "Could not allocate video codec context\n");
 return -1;
 }

 /* Allocate the output context */
 outFormatCtx = avformat_alloc_context();
 if (!outFormatCtx) {
 fprintf(stderr, "Could not allocate output context\n");
 return -1;
 }

 /* Open the RTMP output */
 const AVOutputFormat *ofmt = av_guess_format("flv", NULL, NULL);
// const AVOutputFormat *ofmt = av_guess_format("mpegts", NULL, NULL);
// const AVOutputFormat *ofmt = av_guess_format("mp4", NULL, NULL);
 if (!ofmt) {
 fprintf(stderr, "Could not find output format\n");
 return -1;
 }
 outFormatCtx->oformat = ofmt;
 outFormatCtx->url = av_strdup(rtmp_url);
 /* Add a video stream */
 outStream = avformat_new_stream(outFormatCtx, codec);
 if (!outStream) {
 fprintf(stderr, "Could not allocate stream\n");
 return -1;
 }
 outStream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
 outStream->codecpar->codec_id = codec->id;
 outStream->codecpar->width = 352;
 outStream->codecpar->height = 288;

 /* Set the output URL */
 av_dict_set(&outFormatCtx->metadata, "url", rtmp_url, 0);

 pkt = av_packet_alloc();
 if (!pkt)
 return -1;

 /* ... (rest of the setup code) ... */
/* put sample parameters */
 codecContext->bit_rate = 400000;
 /* resolution must be a multiple of two */
 codecContext->width = 352;
 codecContext->height = 288;
 /* frames per second */
 codecContext->time_base = (AVRational) {1, 25};
 codecContext->framerate = (AVRational) {25, 1};

 /* emit one intra frame every ten frames
 * check frame pict_type before passing frame
 * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
 * then gop_size is ignored and the output of encoder
 * will always be I frame irrespective to gop_size
 */
 codecContext->gop_size = 10;
 codecContext->max_b_frames = 1;
 codecContext->pix_fmt = AV_PIX_FMT_YUV420P;

 if (codec->id == AV_CODEC_ID_H264)
 av_opt_set(codecContext->priv_data, "preset", "slow", 0);

 /* open it */
 ret = avcodec_open2(codecContext, codec, NULL);
 if (ret < 0) {
 LOGE2("JNI Error opening codec eer%s", av_err2str(ret));
 return ret;
 }

 avcodec_parameters_to_context(codecContext, outStream->codecpar);

 if (avio_open(&outFormatCtx->pb, rtmp_url, AVIO_FLAG_WRITE)) {
 fprintf(stderr, "Could not open output\n");
 return ret;
 }
 /* Write the header */
 if (avformat_write_header(outFormatCtx, NULL) != 0) {
 fprintf(stderr, "Error occurred when opening output\n");
 return ret;
 }

 frame = av_frame_alloc();
 if (!frame) {
 fprintf(stderr, "Could not allocate video frame\n");
 return -1;
 }
 frame->format = codecContext->pix_fmt;
 frame->format = AV_PIX_FMT_YUV420P;
 frame->format = 0;
 frame->width = codecContext->width;
 frame->height = codecContext->height;

 ret = av_frame_get_buffer(frame, 0);
 if (ret < 0) {
 fprintf(stderr, "Could not allocate the video frame data ,%s\n", av_err2str(ret));
 return ret;
 }

 /* FILE *yuv_file = fopen(yvu_path, "rb");
 if (yuv_file == NULL) {
 LOGE2("cannot open h264 file");
 return -1;
 }*/

 /* encode 1 second of video */
 for (i = 0; i < 25000; i++) {
// for (i = 0; i < 25; i++) {
// fflush(stdout);

 /* make sure the frame data is writable */
 ret = av_frame_make_writable(frame);
 if (ret < 0)
 exit(1);

 /* prepare a dummy image */
 /* Y */
 for (y = 0; y < codecContext->height; y++) {
 for (x = 0; x < codecContext->width; x++) {
 frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
 }
 }

 /* Cb and Cr */
 for (y = 0; y < codecContext->height / 2; y++) {
 for (x = 0; x < codecContext->width / 2; x++) {
 frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
 frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
 }
 }

 frame->pts = i;

 /* encode the image */
 encode(codecContext, frame, pkt, outFormatCtx);
 }

// fclose(yuv_file);

 /* flush the encoder */
 encode(codecContext, NULL, pkt, outFormatCtx);

 /* Write the trailer */
 av_write_trailer(outFormatCtx);

 /* Close the output */
 avformat_free_context(outFormatCtx);

 avcodec_free_context(&codecContext);
 av_frame_free(&frame);
 av_packet_free(&pkt);
}



win11:


#include 
#include 
#include 

#include <libavcodec></libavcodec>avcodec.h>
#include <libavformat></libavformat>avformat.h>
#include <libavutil></libavutil>opt.h>
#include <libavutil></libavutil>imgutils.h>
#include <libavutil></libavutil>time.h>

static void encode(AVCodecContext *enc_ctx, AVFrame *frame, AVPacket *pkt,
 AVFormatContext *outFormatCtx) {
 int ret;

 /* send the frame to the encoder */
 if (frame)
 printf("Send frame %3"PRId64"\n", frame->pts);

 ret = avcodec_send_frame(enc_ctx, frame);
 if (ret < 0) {
 char errbuf[AV_ERROR_MAX_STRING_SIZE];
 av_strerror(ret, errbuf, AV_ERROR_MAX_STRING_SIZE);
 fprintf(stderr, "Error sending a frame for encoding ,%s\n", errbuf);
 exit(1);
 }

 while (ret >= 0) {
 ret = avcodec_receive_packet(enc_ctx, pkt);
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
 return;
 else if (ret < 0) {
 fprintf(stderr, "Error during encoding\n");
 exit(1);
 }

 printf("Write packet %3"PRId64" (size=%5d)\n", pkt->pts, pkt->size);
 av_write_frame(outFormatCtx, pkt); // Write the packet to the RTMP stream
 av_packet_unref(pkt);
 }
}

int main(int argc, char **argv) {
 av_log_set_level(AV_LOG_DEBUG);
 const char *rtmp_url, *codec_name;
 const AVCodec *codec;
 AVCodecContext *codecContext = NULL;
 int i, ret, x, y;
 AVFormatContext *outFormatCtx;
 AVStream *st;
 AVFrame *frame;
 AVPacket *pkt;
 uint8_t endcode[] = {0, 0, 1, 0xb7};

 if (argc <= 3) {
 fprintf(stderr, "Usage: %s <rtmp url="url"> <codec>\n", argv[0]);
 exit(0);
 }
 rtmp_url = argv[1];
 codec_name = argv[2];
 avformat_network_init();
 /* find the mpeg1video encoder */
// codec = avcodec_find_encoder_by_name(codec_name);
// codec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);
// codec = avcodec_find_encoder(AV_CODEC_ID_VP9);
// codec = avcodec_find_encoder(AV_CODEC_ID_MPEG2VIDEO);
// codec = avcodec_find_encoder(AV_CODEC_ID_H264);
 codec = avcodec_find_encoder(AV_CODEC_ID_H264);
// codec = avcodec_find_encoder(AV_CODEC_ID_AV1);
// codec = avcodec_find_encoder(AV_CODEC_ID_H265);
 if (!codec) {
 fprintf(stderr, "Codec '%s' not found\n", codec_name);
 exit(1);
 }
 codecContext = avcodec_alloc_context3(codec);
 if (!codecContext) {
 fprintf(stderr, "Could not allocate video codec context\n");
 exit(1);
 }

 /* Allocate the output context */
 outFormatCtx = avformat_alloc_context();
 if (!outFormatCtx) {
 fprintf(stderr, "Could not allocate output context\n");
 exit(1);
 }

 /* Open the RTMP output */
 const AVOutputFormat *ofmt = av_guess_format("flv", NULL, NULL);
// const AVOutputFormat *ofmt = av_guess_format("MKV", NULL, NULL);
// const AVOutputFormat *ofmt = av_guess_format("rtmp", NULL, NULL);
// const AVOutputFormat *ofmt = av_guess_format("mpegts", NULL, NULL);
// const AVOutputFormat *ofmt = av_guess_format("mp4", NULL, NULL);
 if (!ofmt) {
 fprintf(stderr, "Could not find output format\n");
 exit(1);
 }
 outFormatCtx->oformat = ofmt;
 outFormatCtx->url = av_strdup(rtmp_url);
 /* Add a video stream */
 st = avformat_new_stream(outFormatCtx, codec);
 if (!st) {
 fprintf(stderr, "Could not allocate stream\n");
 exit(1);
 }
 st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
 st->codecpar->codec_id = codec->id;
 st->codecpar->width = 352;
 st->codecpar->height = 288;
// st->codecpar = c;
// st->codecpar->format = AV_PIX_FMT_YUV420P;
 // Set video stream parameters
// st->codecpar->framerate = (AVRational){25, 1};

 /* Set the output URL */
 av_dict_set(&outFormatCtx->metadata, "url", rtmp_url, 0);


 pkt = av_packet_alloc();
 if (!pkt)
 exit(1);

 /* ... (rest of the setup code) ... */
/* put sample parameters */
 codecContext->bit_rate = 400000;
 /* resolution must be a multiple of two */
 codecContext->width = 352;
 codecContext->height = 288;
 /* frames per second */
 codecContext->time_base = (AVRational) {1, 25};
 codecContext->framerate = (AVRational) {25, 1};

 /* emit one intra frame every ten frames
 * check frame pict_type before passing frame
 * to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
 * then gop_size is ignored and the output of encoder
 * will always be I frame irrespective to gop_size
 */
 codecContext->gop_size = 10;
 codecContext->max_b_frames = 1;
 codecContext->pix_fmt = AV_PIX_FMT_YUV420P;

 if (codec->id == AV_CODEC_ID_H264)
 av_opt_set(codecContext->priv_data, "preset", "slow", 0);

 /* open it */
 ret = avcodec_open2(codecContext, codec, NULL);
 if (ret < 0) {
 fprintf(stderr, "Could not open codec: %s\n", av_err2str(ret));
 exit(1);
 }

 avcodec_parameters_to_context(codecContext, st->codecpar);

 if (avio_open(&outFormatCtx->pb, rtmp_url, AVIO_FLAG_WRITE)) {
 fprintf(stderr, "Could not open output\n");
 exit(1);
 }
 /* Write the header */
 if (avformat_write_header(outFormatCtx, NULL) != 0) {
 fprintf(stderr, "Error occurred when opening output\n");
 exit(1);
 }

 frame = av_frame_alloc();
 if (!frame) {
 fprintf(stderr, "Could not allocate video frame\n");
 exit(1);
 }
// frame->format = c->pix_fmt;
// frame->format = AV_PIX_FMT_YUV420P;
 frame->format = 0;
 frame->width = codecContext->width;
 frame->height = codecContext->height;

 ret = av_frame_get_buffer(frame, 0);
 if (ret < 0) {
 fprintf(stderr, "Could not allocate the video frame data ,%s\n", av_err2str(ret));
 exit(1);
 }

 /* encode 1 second of video */
 for (i = 0; i < 2500; i++) {
 /* ... (rest of the encoding loop) ... */
 fflush(stdout);

 /* make sure the frame data is writable */
 ret = av_frame_make_writable(frame);
 if (ret < 0)
 exit(1);

 /* prepare a dummy image */
 /* Y */
 for (y = 0; y < codecContext->height; y++) {
 for (x = 0; x < codecContext->width; x++) {
 frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
 }
 }

 /* Cb and Cr */
 for (y = 0; y < codecContext->height / 2; y++) {
 for (x = 0; x < codecContext->width / 2; x++) {
 frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
 frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
 }
 }

 frame->pts = i;

 /* encode the image */
 encode(codecContext, frame, pkt, outFormatCtx);
 }

 /* flush the encoder */
 encode(codecContext, NULL, pkt, outFormatCtx);

 /* Write the trailer */
 av_write_trailer(outFormatCtx);

 /* Close the output */
 avformat_free_context(outFormatCtx);

 avcodec_free_context(&codecContext);
 av_frame_free(&frame);
 av_packet_free(&pkt);

 return 0;
}
</codec></rtmp>


I suspect it's an issue with the ffmpeg library I compiled, so I searched for a step to compile ffmpeg on GitHub, but the package it compiled still has the same problem. I don't know what to do now.


-
ffserver : Cast time_t value when using it in a format string.
29 décembre 2015, par Carl Eugen Hoyos -
Revision dc2c62eba8 : Cast away Windows warning Subtracting the pointers promoted to a signed type.
19 décembre 2013, par JohannChanged Paths :
Modify /vp9/decoder/vp9_decodeframe.c
Cast away Windows warningSubtracting the pointers promoted to a signed type.
Change-Id : Ied0e822a1756ed7b2f514efafcb6dce4efb9b9d6