
Recherche avancée
Médias (2)
-
SPIP - plugins - embed code - Exemple
2 septembre 2013, par
Mis à jour : Septembre 2013
Langue : français
Type : Image
-
Publier une image simplement
13 avril 2011, par ,
Mis à jour : Février 2012
Langue : français
Type : Video
Autres articles (62)
-
Mise à jour de la version 0.1 vers 0.2
24 juin 2013, parExplications des différents changements notables lors du passage de la version 0.1 de MediaSPIP à la version 0.3. Quelles sont les nouveautés
Au niveau des dépendances logicielles Utilisation des dernières versions de FFMpeg (>= v1.2.1) ; Installation des dépendances pour Smush ; Installation de MediaInfo et FFprobe pour la récupération des métadonnées ; On n’utilise plus ffmpeg2theora ; On n’installe plus flvtool2 au profit de flvtool++ ; On n’installe plus ffmpeg-php qui n’est plus maintenu au (...) -
Configurer la prise en compte des langues
15 novembre 2010, parAccéder à la configuration et ajouter des langues prises en compte
Afin de configurer la prise en compte de nouvelles langues, il est nécessaire de se rendre dans la partie "Administrer" du site.
De là, dans le menu de navigation, vous pouvez accéder à une partie "Gestion des langues" permettant d’activer la prise en compte de nouvelles langues.
Chaque nouvelle langue ajoutée reste désactivable tant qu’aucun objet n’est créé dans cette langue. Dans ce cas, elle devient grisée dans la configuration et (...) -
Websites made with MediaSPIP
2 mai 2011, parThis page lists some websites based on MediaSPIP.
Sur d’autres sites (9123)
-
Make 2 videos on top of each other in a 1080×1920 scene
6 octobre 2023, par Byte meI am trying to make two videos on top of each other. Right now i am scaling both to 1080x960 using ffmpeg and putting them together using vstack. Unfortunetely no success. Can anyone help me ?


ffmpeg()
 .input('./placeholder.mp4').videoCodec('copy')
 .input("./scaled_YT.mp4").videoCodec('copy')
 .complexFilter([
 `[0:v]scale=1080x960[v0];[1:v]scale=1080x960[v1];[v0][v1]vstack=inputs=2[v]`
 ], ['v'])
 .toFormat('mp4')
 .on('end', () => {
 console.log('Files have been merged!');
 })
 .on('error', (err) => {
 console.error('Error:', err)
 })
 .save(outputPath);



Error: Error: ffmpeg exited with code 1: 
 at ChildProcess.<anonymous> (D:\Discord Bots\TEMP_TEST\done_projects\videoEditor_bot\node_modules\fluent-ffmpeg\lib\processor.js:182:22) 
 at ChildProcess.emit (node:events:513:28)
 at ChildProcess._handle.onexit (node:internal/child_process:291:12)
</anonymous>


-
libswscale error Slice Parameters 0, 1080 are invalid
3 mai 2023, par lokit khemkaI am trying to scale a video from 1080p to 480p. For that, I have setup swscaler context as :


encoder_sc->sws_ctx = sws_getContext(1920, 1080,
 AV_PIX_FMT_YUV420P, 
 854, 480, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL );



However, when I am calling the scale frame function as


sws_scale_frame(encoder->sws_ctx, input_frame, input_frame);



However, when I do that I am getting the error
Slice parameter 0, 1080 are in valid
. I am very new to FFMPEG and video processing in general. I could not find any solution while searching. Any help is greatly appreciated.

EDIT : I am including the entire source code because I cannot seem to solve the issue.




typedef struct StreamingContext{
 AVFormatContext* avfc;
 AVCodec *video_avc;
 AVCodec *audio_avc;
 AVStream *video_avs;
 AVStream *audio_avs;
 AVCodecContext *video_avcc;
 AVCodecContext *audio_avcc;
 int video_index;
 int audio_index;
 char* filename;
 struct SwsContext *sws_ctx;
}StreamingContext;


typedef struct StreamingParams{
 char copy_video;
 char copy_audio;
 char *output_extension;
 char *muxer_opt_key;
 char *muxer_opt_value;
 char *video_codec;
 char *audio_codec;
 char *codec_priv_key;
 char *codec_priv_value;
}StreamingParams;


int prepare_video_encoder(StreamingContext *encoder_sc, AVCodecContext *decoder_ctx, AVRational input_framerate,
 StreamingParams sp)
{
 encoder_sc->video_avs = avformat_new_stream(encoder_sc->avfc, NULL);
 encoder_sc->video_avc = avcodec_find_encoder_by_name(sp.video_codec);
 if (!encoder_sc->video_avc)
 {
 logging("Cannot find the Codec.");
 return -1;
 }

 encoder_sc->video_avcc = avcodec_alloc_context3(encoder_sc->video_avc);
 if (!encoder_sc->video_avcc)
 {
 logging("Could not allocate memory for Codec Context.");
 return -1;
 }

 av_opt_set(encoder_sc->video_avcc->priv_data, "preset", "fast", 0);
 if (sp.codec_priv_key && sp.codec_priv_value)
 av_opt_set(encoder_sc->video_avcc->priv_data, sp.codec_priv_key, sp.codec_priv_value, 0);

 encoder_sc->video_avcc->height = decoder_ctx->height;
 encoder_sc->video_avcc->width = decoder_ctx->width;
 encoder_sc->video_avcc->sample_aspect_ratio = decoder_ctx->sample_aspect_ratio;

 if (encoder_sc->video_avc->pix_fmts)
 encoder_sc->video_avcc->pix_fmt = encoder_sc->video_avc->pix_fmts[0];
 else
 encoder_sc->video_avcc->pix_fmt = decoder_ctx->pix_fmt;

 encoder_sc->video_avcc->bit_rate = 2 * 1000 * 1000;
 encoder_sc->video_avcc->rc_buffer_size = 4 * 1000 * 1000;
 encoder_sc->video_avcc->rc_max_rate = 2 * 1000 * 1000;
 encoder_sc->video_avcc->rc_min_rate = 2.5 * 1000 * 1000;

 encoder_sc->video_avcc->time_base = av_inv_q(input_framerate);
 encoder_sc->video_avs->time_base = encoder_sc->video_avcc->time_base;

 //Creating Scaling Context
 encoder_sc->sws_ctx = sws_getContext(1920, 1080,
 decoder_ctx->pix_fmt, 
 854, 480, encoder_sc->video_avcc->pix_fmt, SWS_BICUBIC, NULL, NULL, NULL );
 if (!encoder_sc->sws_ctx){logging("Cannot Create Scaling Context."); return -1;}

 if (avcodec_open2(encoder_sc->video_avcc, encoder_sc->video_avc, NULL) < 0)
 {
 logging("Could not open the Codec.");
 return -1;
 }
 avcodec_parameters_from_context(encoder_sc->video_avs->codecpar, encoder_sc->video_avcc);
 return 0;
}



int transcode_video(StreamingContext *decoder, StreamingContext *encoder, AVPacket *input_packet, AVFrame *input_frame, AVFrame *scaled_frame)
{
 int response = avcodec_send_packet(decoder->video_avcc, input_packet);
 if (response < 0)
 {
 logging("Error while sending the Packet to Decoder: %s", av_err2str(response));
 return response;
 }

 while (response >= 0)
 {
 response = avcodec_receive_frame(decoder->video_avcc, input_frame);
 
 if (response == AVERROR(EAGAIN) || response == AVERROR_EOF)
 {
 break;
 }
 else if (response < 0)
 {
 logging("Error while receiving frame from Decoder: %s", av_err2str(response));
 return response;
 }
 if (response >= 0)
 {
 scaled_frame->format = encoder->video_avcc->pix_fmt;
 scaled_frame->width = 854;
 scaled_frame->height = 480;
 sws_scale_frame(encoder->sws_ctx, scaled_frame, input_frame);
 //ERROR is in the scaled_frame
 if (encode_video(decoder, encoder, scaled_frame)) 
 return -1;
 }

 av_frame_unref(input_frame);
 }
 return 0;
}




-
Unable to allocate 47.5 MiB for an array with shape (1080, 1920, 3) and data type float64
21 août 2022, par eragonIam try to create a large video(longer than 3h)by CompositeVideoClip using moviepy.
The problem is it take too much ram (i have 32gb ram).It takes the whole ram (99%) by create a bunch of ffmpeg-win64-v4.2.2.exe ffmpeg-win64-v4.2.2.exe





after it a while it said Unable to allocate 47.5 MiB for an array with shape (1080, 1920, 3) and data type float64.
here is my code :


def CombieVideo():
 global curentVideoLengt
 masterVideo = NULL
 for videoUrl in videoFiles:
 print(videoUrl)
 video = VideoFileClip(videoUrl).fx(vfx.fadein,1).fx(vfx.fadeout,1)
 curentVideoLengt += video.duration
 if curentVideoLengt >= (audioLen*60*60):
 break
 if masterVideo== NULL:
 masterVideo= video
 else:
 masterVideo = CompositeVideoClip([masterVideo,video])
 
 if curentVideoLengt < (audioLen*60*60):
 videoUrl=random.choice(videoFiles)
 print(videoUrl)
 video =video(videoUrl).fx(vfx.fadein,1).fx(vfx.fadeout,1)
 curentVideoLengt= curentVideoLengt+video.duration
 masterVideo = CompositeVideoClip([masterVideo,video])
 CombieVideo()
 else:
 masterVideo.audio = CompositeAudioClip(audios)
 masterVideo.write_videofile('./MasterVideo/output_video.avi', fps=30, threads=4, codec="png")
 
CombieVideo()