
Recherche avancée
Médias (1)
-
The Slip - Artworks
26 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Texte
Autres articles (39)
-
ANNEXE : Les plugins utilisés spécifiquement pour la ferme
5 mars 2010, parLe site central/maître de la ferme a besoin d’utiliser plusieurs plugins supplémentaires vis à vis des canaux pour son bon fonctionnement. le plugin Gestion de la mutualisation ; le plugin inscription3 pour gérer les inscriptions et les demandes de création d’instance de mutualisation dès l’inscription des utilisateurs ; le plugin verifier qui fournit une API de vérification des champs (utilisé par inscription3) ; le plugin champs extras v2 nécessité par inscription3 (...)
-
Ajouter notes et légendes aux images
7 février 2011, parPour pouvoir ajouter notes et légendes aux images, la première étape est d’installer le plugin "Légendes".
Une fois le plugin activé, vous pouvez le configurer dans l’espace de configuration afin de modifier les droits de création / modification et de suppression des notes. Par défaut seuls les administrateurs du site peuvent ajouter des notes aux images.
Modification lors de l’ajout d’un média
Lors de l’ajout d’un média de type "image" un nouveau bouton apparait au dessus de la prévisualisation (...) -
HTML5 audio and video support
13 avril 2011, parMediaSPIP uses HTML5 video and audio tags to play multimedia files, taking advantage of the latest W3C innovations supported by modern browsers.
The MediaSPIP player used has been created specifically for MediaSPIP and can be easily adapted to fit in with a specific theme.
For older browsers the Flowplayer flash fallback is used.
MediaSPIP allows for media playback on major mobile platforms with the above (...)
Sur d’autres sites (9772)
-
Failed to use h264_v4l2 codec in ffmpeg to decode video
6 janvier, par wangt13I am working on an embedded Linux system (kernel-5.10.24) and I want to use
ffmpeg
libraries (ffmpeg-4.4.4) to do video decoding.

The C code is as follows, it uses
h264_v4l2m2m
decoder to decode the video,

#include <libavcodec></libavcodec>avcodec.h>
#include <libavformat></libavformat>avformat.h>
#include <libavutil></libavutil>imgutils.h>
#include <libavutil></libavutil>opt.h>
#include <libswscale></libswscale>swscale.h>
#include 
#include 

int main(int argc, char *argv[]) {
 if (argc < 3) {
 printf("Usage: %s \n", argv[0]);
 return -1;
 }

 const char *input_file = argv[1];
 const char *output_file = argv[2];

 AVFormatContext *fmt_ctx = NULL;
 AVCodecContext *codec_ctx = NULL;
 AVCodec *codec = NULL;
 AVPacket pkt;
 AVFrame *frame = NULL;
 AVFrame *rgb_frame = NULL;
 struct SwsContext *sws_ctx = NULL;

 FILE *output = NULL;
 int video_stream_index = -1;

 avformat_network_init();

 if (avformat_open_input(&fmt_ctx, input_file, NULL, NULL) < 0) {
 fprintf(stderr, "Could not open input file %s\n", input_file);
 return -1;
 }

 if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
 fprintf(stderr, "Could not find stream information\n");
 return -1;
 }

 for (int i = 0; i < fmt_ctx->nb_streams; i++) {
 if (fmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
 video_stream_index = i;
 break;
 }
 }

 if (video_stream_index == -1) {
 fprintf(stderr, "Could not find video stream\n");
 return -1;
 }

 //// codec = avcodec_find_decoder(fmt_ctx->streams[video_stream_index]->codecpar->codec_id);
 codec = avcodec_find_decoder_by_name("h264_v4l2m2m");
 if (!codec) {
 fprintf(stderr, "Codec not found\n");
 return -1;
 }

 codec_ctx = avcodec_alloc_context3(codec);
 if (!codec_ctx) {
 fprintf(stderr, "Could not allocate codec context\n");
 return -1;
 }

 if (avcodec_parameters_to_context(codec_ctx, fmt_ctx->streams[video_stream_index]->codecpar) < 0) {
 fprintf(stderr, "Failed to copy codec parameters to decoder context\n");
 return -1;
 }

 if (avcodec_open2(codec_ctx, codec, NULL) < 0) {
 fprintf(stderr, "Could not open codec\n");
 return -1;
 }

 output = fopen(output_file, "wb");
 if (!output) {
 fprintf(stderr, "Could not open output file %s\n", output_file);
 return -1;
 }

 frame = av_frame_alloc();
 rgb_frame = av_frame_alloc();
 if (!frame || !rgb_frame) {
 fprintf(stderr, "Could not allocate frames\n");
 return -1;
 }

 int width = codec_ctx->width;
 int height = codec_ctx->height;
 int num_bytes = av_image_get_buffer_size(AV_PIX_FMT_RGB24, width, height, 1);
 uint8_t *buffer = (uint8_t *)av_malloc(num_bytes * sizeof(uint8_t));
 av_image_fill_arrays(rgb_frame->data, rgb_frame->linesize, buffer, AV_PIX_FMT_RGB24, width, height, 1);

printf("XXXXXXXXXXXX width: %d, height: %d, fmt: %d\n", width, height, codec_ctx->pix_fmt);
 sws_ctx = sws_getContext(width, height, codec_ctx->pix_fmt,
 width, height, AV_PIX_FMT_RGB24,
 SWS_BILINEAR, NULL, NULL, NULL);
 if (!sws_ctx) {
 fprintf(stderr, "Could not initialize the conversion context\n");
 return -1;
 }

 while (av_read_frame(fmt_ctx, &pkt) >= 0) {
 if (pkt.stream_index == video_stream_index) {
 int ret = avcodec_send_packet(codec_ctx, &pkt);
 if (ret < 0) {
 fprintf(stderr, "Error sending packet for decoding\n");
 return -1;
 }

 while (ret >= 0) {
 ret = avcodec_receive_frame(codec_ctx, frame);
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
 break;
 } else if (ret < 0) {
 fprintf(stderr, "Error during decoding\n");
 return -1;
 }

 sws_scale(sws_ctx, (const uint8_t *const *)frame->data, frame->linesize,
 0, height, rgb_frame->data, rgb_frame->linesize);

 fprintf(output, "P6\n%d %d\n255\n", width, height);
 fwrite(rgb_frame->data[0], 1, num_bytes, output);
 }
 }
 av_packet_unref(&pkt);
 }

 fclose(output);
 av_frame_free(&frame);
 av_frame_free(&rgb_frame);
 avcodec_free_context(&codec_ctx);
 avformat_close_input(&fmt_ctx);
 sws_freeContext(sws_ctx);

 return 0;
}



It ran with some error logs from
swscale
as follows,

# ./test_ffmpeg ./test.mp4 /tmp/output
[h264_v4l2m2m @ 0x1d76320] Using device /dev/video0
[h264_v4l2m2m @ 0x1d76320] driver 'mysoc-vdec' on card 'msoc-vdec' in mplane mode
[h264_v4l2m2m @ 0x1d76320] requesting formats: output=H264 capture=NV12
[h264_v4l2m2m @ 0x1d76320] the v4l2 driver does not support end of stream VIDIOC_SUBSCRIBE_EVENT
XXXXXXXXXXXX width: 1280, height: 720, fmt: 0
[swscaler @ 0x1dadaa0] No accelerated colorspace conversion found from yuv420p to rgb24.
[h264_v4l2m2m @ 0x1d76320] VIDIOC_G_SELECTION ioctl
[swscaler @ 0x1dadaa0] bad src image pointers
[swscaler @ 0x1dadaa0] bad src image pointers
[swscaler @ 0x1dadaa0] bad src image pointers
[swscaler @ 0x1dadaa0] bad src image pointers
[swscaler @ 0x1dadaa0] bad src image pointers
[swscaler @ 0x1dadaa0] bad src image pointers
[swscaler @ 0x1dadaa0] bad src image pointers
[swscaler @ 0x1dadaa0] bad src image pointers
[swscaler @ 0x1dadaa0] bad src image pointers
[swscaler @ 0x1dadaa0] bad src image pointers
[swscaler @ 0x1dadaa0] bad src image pointers
[swscaler @ 0x1dadaa0] bad src image pointers
......



And it ran for about 4 seconds, while the
test.mp4
is about 13 seconds.
If I did NOT specify theh264_v4l2m2m
as the decoder, there is NObad src image pointers
and its run-time is as long as themp4
file.

What is wrong with above codes using
h264_v4l2m2m
and how to fix it ?

-
Confused about x264 and encoding video frames
26 février 2015, par spartygwI built a test driver for encoding a series of images I have captured. I am using libx264 and based my driver off of this guy’s answer :
In my case I am starting out by reading in a JPG image and converting to YUV and passing that same frame over and over in a loop to the x264 encoder.
My expectation was that since the frame is the same that the output from the encoder would be very small and constant.
Instead I find that the NAL payload is varied from a few bytes to a few KB and also varies highly depending on the frame rate I specify in the encoder parameters.
Obviously I don’t understand video encoding. Why does the output size vary so much ?
int main()
{
Image image(WIDTH, HEIGHT);
image.FromJpeg("frame-1.jpg");
unsigned char *data = image.GetRGB();
x264_param_t param;
x264_param_default_preset(&param, "fast", "zerolatency");
param.i_threads = 1;
param.i_width = WIDTH;
param.i_height = HEIGHT;
param.i_fps_num = FPS;
param.i_fps_den = 1;
// Intra refres:
param.i_keyint_max = FPS;
param.b_intra_refresh = 1;
//Rate control:
param.rc.i_rc_method = X264_RC_CRF;
param.rc.f_rf_constant = FPS-5;
param.rc.f_rf_constant_max = FPS+5;
//For streaming:
param.b_repeat_headers = 1;
param.b_annexb = 1;
x264_param_apply_profile(&param, "baseline");
// initialize the encoder
x264_t* encoder = x264_encoder_open(&param);
x264_picture_t pic_in, pic_out;
x264_picture_alloc(&pic_in, X264_CSP_I420, WIDTH, HEIGHT);
// X264 expects YUV420P data use libswscale
// (from ffmpeg) to convert images to the right format
struct SwsContext* convertCtx =
sws_getContext(WIDTH, HEIGHT, PIX_FMT_RGB24, WIDTH, HEIGHT,
PIX_FMT_YUV420P, SWS_FAST_BILINEAR,
NULL, NULL, NULL);
// encoding is as simple as this then, for each frame do:
// data is a pointer to your RGB structure
int srcstride = WIDTH*3; //RGB stride is just 3*width
sws_scale(convertCtx, &data, &srcstride, 0, HEIGHT,
pic_in.img.plane, pic_in.img.i_stride);
x264_nal_t* nals;
int i_nals;
int frame_size =
x264_encoder_encode(encoder, &nals, &i_nals, &pic_in, &pic_out);
int max_loop=15;
int this_loop=1;
while (frame_size >= 0 && --max_loop)
{
cout << "------------" << this_loop++ << "-----------------\n";
cout << "Frame size = " << frame_size << endl;
cout << "output has " << pic_out.img.i_csp << " colorspace\n";
cout << "output has " << pic_out.img.i_plane << " # img planes\n";
cout << "i_nals = " << i_nals << endl;
for (int n=0; n -
avdevice/decklink_enc : don't take for granted that first frame to decklink output...
3 mars 2023, par Devin Heitmuelleravdevice/decklink_enc : don't take for granted that first frame to decklink output will be PTS 0
The existing code assumed that the first frame received by the decklink
output would always be PTS zero. However if running in other timing
modes than the default of CBR, items such as frame dropping at the
beginning may result in starting at a non-zero PTS.For example, in our setup because we discard probing data and run
with "-vsync 2" the first video frame scheduled to the decklink
output will have a PTS around 170. Scheduling frames too far into
the future will either fail or cause a backlog of frames scheduled
far enough into the future that the entire pipeline will stall.Issue can be reproduced with the following command-line :
./ffmpeg -copyts -i foo.ts -f decklink -vcodec v210 -ac 2 'DeckLink Duo (4)'
Keep track of the PTS of the first frame received, so that when
we enable start playback we can provide that value to the decklink
driver.Thanks to Marton Balint for review and suggestion to use
AV_NOPTS_VALUE rather than zero for the initial value.Signed-off-by : Devin Heitmueller <dheitmueller@ltnglobal.com>
Signed-off-by : Marton Balint <cus@passwd.hu>