
Recherche avancée
Médias (1)
-
Collections - Formulaire de création rapide
19 février 2013, par
Mis à jour : Février 2013
Langue : français
Type : Image
Autres articles (111)
-
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir -
Script d’installation automatique de MediaSPIP
25 avril 2011, parAfin de palier aux difficultés d’installation dues principalement aux dépendances logicielles coté serveur, un script d’installation "tout en un" en bash a été créé afin de faciliter cette étape sur un serveur doté d’une distribution Linux compatible.
Vous devez bénéficier d’un accès SSH à votre serveur et d’un compte "root" afin de l’utiliser, ce qui permettra d’installer les dépendances. Contactez votre hébergeur si vous ne disposez pas de cela.
La documentation de l’utilisation du script d’installation (...) -
Ajouter des informations spécifiques aux utilisateurs et autres modifications de comportement liées aux auteurs
12 avril 2011, parLa manière la plus simple d’ajouter des informations aux auteurs est d’installer le plugin Inscription3. Il permet également de modifier certains comportements liés aux utilisateurs (référez-vous à sa documentation pour plus d’informations).
Il est également possible d’ajouter des champs aux auteurs en installant les plugins champs extras 2 et Interface pour champs extras.
Sur d’autres sites (11682)
-
FFMEPG error : Exactly one scaler algorithm must be chosen
28 mai 2015, par Dave_DevI am currently working on a FFMPEG project. I am trying to convert a RGB image in a YUV image using this code (I found it in the internet last night) :
void Decode::video_encode_example(const char *filename, int codec_id)
{
AVCodec *codec;
AVCodecContext *c= NULL;
int i, ret, x, y, got_output;
FILE *f;
AVFrame *frame;
AVPacket pkt;
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
printf("Encode video file %s\n", filename);
/* find the mpeg1 video encoder */
codec = avcodec_find_encoder((enum AVCodecID)codec_id);
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(2);
}
/* put sample parameters */
c->bit_rate = 400000;
/* resolution must be a multiple of two */
c->width = 352;
c->height = 288;
/* frames per second */
c->time_base = (AVRational){1,25};
/* emit one intra frame every ten frames
* check frame pict_type before passing frame
* to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
* then gop_size is ignored and the output of encoder
* will always be I frame irrespective to gop_size
*/
c->gop_size = 10;
c->max_b_frames = 1;
c->pix_fmt = AV_PIX_FMT_YUV420P;
if (codec_id == AV_CODEC_ID_H264)
av_opt_set(c->priv_data, "preset", "slow", 0);
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(3);
}
f = fopen(filename, "wb");
if (!f) {
fprintf(stderr, "Could not open %s\n", filename);
exit(4);
}
frame = avcodec_alloc_frame();// Dans une version plus récente c'est av_frame_alloc
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(5);
}
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
/* the image can be allocated by any means and av_image_alloc() is
* just the most convenient way if av_malloc() is to be used */
ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
c->pix_fmt, 32);
if (ret < 0) {
fprintf(stderr, "Could not allocate raw picture buffer\n");
exit(6);
}
//
// RGB to YUV:
// http://stackoverflow.com/questions/16667687/how-to-convert-rgb-from-yuv420p-for-ffmpeg-encoder
//
// Create some dummy RGB "frame"
uint8_t *rgba32Data = new uint8_t[4*c->width*c->height];
SwsContext * ctx = sws_getContext(c->width, c->height,
AV_PIX_FMT_RGBA, c->width, c->height,
AV_PIX_FMT_YUV420P, 0, 0, 0, 0);
/* encode 1 second of video */
for (i = 0; i < 25; i++) {
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
fflush(stdout);
/* prepare a dummy image */
/* Y */
// for (y = 0; y < c->height; y++) {
// for (x = 0; x < c->width; x++) {
// frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
// }
// }
//
// /* Cb and Cr */
// for (y = 0; y < c->height/2; y++) {
// for (x = 0; x < c->width/2; x++) {
// frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
// frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
// }
// }
uint8_t *pos = rgba32Data;
for (y = 0; y < c->height; y++)
{
for (x = 0; x < c->width; x++)
{
pos[0] = i / (float)25 * 255;
pos[1] = 0;
pos[2] = x / (float)(c->width) * 255;
pos[3] = 255;
pos += 4;
}
}
uint8_t * inData[1] = { rgba32Data }; // RGBA32 have one plane
//
// NOTE: In a more general setting, the rows of your input image may
// be padded; that is, the bytes per row may not be 4 * width.
// In such cases, inLineSize should be set to that padded width.
//
int inLinesize[1] = { 4*c->width }; // RGBA stride
sws_scale(ctx, inData, inLinesize, 0, c->height, frame->data, frame->linesize);
frame->pts = i;
/* encode the image */
ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(7);
}
if (got_output) {
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
fwrite(pkt.data, 1, pkt.size, f);
av_free_packet(&pkt);
}
}
/* get the delayed frames */
for (got_output = 1; got_output; i++) {
fflush(stdout);
ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(8);
}
if (got_output) {
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
fwrite(pkt.data, 1, pkt.size, f);
av_free_packet(&pkt);
}
}
/* add sequence end code to have a real mpeg file */
fwrite(endcode, 1, sizeof(endcode), f);
fclose(f);
avcodec_close(c);
av_free(c);
av_freep(&frame->data[0]);
avcodec_free_frame(&frame);// Dans une version plus récente c'est av_frame_alloc
printf("\n");
}
int main()
{
Decode d;
avcodec_register_all();
d.video_encode_example("/home/Dave/Desktop/test.mpg",AV_CODEC_ID_MPEG2VIDEO);
}When I run this application, my Linux terminal shows me the following error :
[swscaler @ 0x1e1dc60] Exactly one scaler algorithm must be chosen
Segmentation fault (core dumped)I do not know what is actually happening. Could you help me please ?
Best regards
Dave_Dev -
FFmpeg using Intel quicksync
3 mars 2016, par KevinAI’m trying to use FFmpeg with Intel QuickSync(qsv)
Finding the codec works, but when I go to open the codec I get a -40
I’ve traced it to :
ret = MFXVideoENCODE_GetVideoParam(q->session, &q->param) ;Below in my initialzation code :
AVCodec* m_codec = ::avcodec_find_encoder_by_name("h264_qsv");
if (!m_codec){
DBGPRINTF("Could not find encoder");
return E_INVALIDARG;
}
AVCodecContext* m_context = ::avcodec_alloc_context3(m_codec);
if (!m_context){
DBGPRINTF("Could not alloc AV context");
return E_INVALIDARG;
}
mfxIMPL impl = MFX_IMPL_AUTO;
mfxVersion ver = { { 1, 1 } };
MFXInit(impl, &ver, &m_qsvContext->session);
m_qsvContext->iopattern = MFX_IOPATTERN_IN_OPAQUE_MEMORY;
m_qsvContext->opaque_alloc = 1;
m_qsvContext->nb_opaque_surfaces = 16;
m_context->hwaccel_context = m_qsvContext;
m_context->profile = FF_PROFILE_H264_BASELINE;
AVRational fps;
AVRational sar;
fps.num = static_cast(m_targetFPS);
fps.den = 1;
sar.num = m_iHeightOut;
sar.den = m_iWidthOut;
//m_context->bit_rate = 400000;
m_context->width = m_iWidthOut;
m_context->height = m_iHeightOut;
m_context->has_b_frames = 0;
m_context->sample_aspect_ratio = sar;
m_context->time_base = fps;
m_context->gop_size = s_keyFramesMax;
m_context->pix_fmt = AV_PIX_FMT_QSV;
m_context->flags |= AV_CODEC_FLAG_QSCALE;
m_context->flags |= CODEC_FLAG_PASS1;
m_context->thread_count = 1;
m_context->codec_type = AVMEDIA_TYPE_VIDEO;
::av_opt_set(m_context->priv_data, "preset", "fast", 0);
::av_opt_set(m_context->priv_data, "look_ahead", "0", 0);
int ret = avcodec_open2(context, codec, nullptr);
if (ret < 0) {
DBGPRINTF("Could not open codec h264_qsv with code %d", ret);
return ret;
}I’m obviously missing something, but i’m not sure what. Can someone help point me in the correct direction ?
-
Create MPEG-DASH Initialization segment
5 janvier 2016, par MahoutI am looking to convert between HLS and MPEG Dash. I do not access to the original fully concatenated video file, only the individual HLS segments.
In doing this transformation to MPEG Dash I need to supply an initialziation segment for the Dash manifest .mpd file.
My questions are :
- What is the structure of a Dash video initialization segment ?
- How can I generate/create one without the need for the original full file ?
Perhaps a solution would involve getting
MP4Box
to convert the ’.ts’ HLS segments to Dash ’.m4s’ segments which are self initializing, but I am unsure how to go about this this ?Any ideas are much appreciated.
Many thanks.
UPDATE :
Snippet to stream using original hls segments. Video plays all the way through but is just black.<representation width="426" height="238" framerate="25" bandwidth="400000">
<segmentlist timescale="25000" duration="112500">
<segmenturl media="video_0_400000/hls/segment_0.ts"></segmenturl>
<segmenturl media="video_0_400000/hls/segment_1.ts"></segmenturl>
<segmenturl media="video_0_400000/hls/segment_2.ts"></segmenturl>
</segmentlist>
</representation>