
Recherche avancée
Autres articles (19)
-
Personnaliser les catégories
21 juin 2013, parFormulaire de création d’une catégorie
Pour ceux qui connaissent bien SPIP, une catégorie peut être assimilée à une rubrique.
Dans le cas d’un document de type catégorie, les champs proposés par défaut sont : Texte
On peut modifier ce formulaire dans la partie :
Administration > Configuration des masques de formulaire.
Dans le cas d’un document de type média, les champs non affichés par défaut sont : Descriptif rapide
Par ailleurs, c’est dans cette partie configuration qu’on peut indiquer le (...) -
Les formats acceptés
28 janvier 2010, parLes commandes suivantes permettent d’avoir des informations sur les formats et codecs gérés par l’installation local de ffmpeg :
ffmpeg -codecs ffmpeg -formats
Les format videos acceptés en entrée
Cette liste est non exhaustive, elle met en exergue les principaux formats utilisés : h264 : H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 m4v : raw MPEG-4 video format flv : Flash Video (FLV) / Sorenson Spark / Sorenson H.263 Theora wmv :
Les formats vidéos de sortie possibles
Dans un premier temps on (...) -
Supporting all media types
13 avril 2011, parUnlike most software and media-sharing platforms, MediaSPIP aims to manage as many different media types as possible. The following are just a few examples from an ever-expanding list of supported formats : images : png, gif, jpg, bmp and more audio : MP3, Ogg, Wav and more video : AVI, MP4, OGV, mpg, mov, wmv and more text, code and other data : OpenOffice, Microsoft Office (Word, PowerPoint, Excel), web (html, CSS), LaTeX, Google Earth and (...)
Sur d’autres sites (5909)
-
Can ffmpeg perform difference matte keying ?
31 juillet 2020, par MichaelIs it possible to do difference matte keying with ffmpeg ? Specifically, I have three inputs, a static image showing a background, and two video streams. I want to replace every pixel in the first video stream that matches the corresponding pixel in the static image with the pixel in the second video stream.


I have looked through some of the filters and see colorkey and chromakey, but those require a fixed color range in the background for replacement.


-
Where can discord bots find the ffmpeg buildpack heroku
11 août 2020, par IsiahI'm making a bot which outputs a mp3 file to the voice chat with discord.py, It works localy by using :


vc.play(discord.FFmpegPCMAudio(executable="ffmpeg/bin/ffmpeg.exe", source=noise.mp3))


However I'm now hosting it on Heroku, i have the buildpack installed but how can my code access it in replacement of the code above


-
Frame sliced into 2 under some degree. How fix it ?
26 novembre 2020, par Алекс АникейI trying use h264 codec in videochat application. And in some reason frame sliced into 2 triangle (picture below). I try send my desktop image to another person and get this image on another client.


What settings i set wrong ?
My code :


Init :


VCSession *vc_new_x264(Logger *log, ToxAV *av, uint32_t friend_number, toxav_video_receive_frame_cb *cb, void *cb_data,
 VCSession *vc)
{

 if (x264_param_default_preset(&param, "slow", "zerolatency") < 0) {
 // goto fail;
 }

 param.i_csp = X264_CSP_I420;
 param.i_width = 1920;
 param.i_height = 1080;
 vc->h264_enc_width = param.i_width;
 vc->h264_enc_height = param.i_height;

 param.i_keyint_max = 30;

 param.b_vfr_input = 1; /* VFR input. If 1, use timebase and timestamps for ratecontrol purposes.
 * If 0, use fps only. */
 param.i_timebase_num = 1; // 1 ms = timebase units = (1/1000)s
 param.i_timebase_den = 1000; // 1 ms = timebase units = (1/1000)s
 param.b_repeat_headers = 1;
 param.b_annexb = 1;

 param.rc.f_rate_tolerance = VIDEO_F_RATE_TOLERANCE_H264;
 param.rc.i_vbv_buffer_size = 1500;
 param.rc.i_vbv_max_bitrate = VIDEO_BITRATE_INITIAL_VALUE_H264 * 1;

 vc->h264_enc_bitrate = VIDEO_BITRATE_INITIAL_VALUE_H264;

 param.rc.i_qp_min = 13;
 param.rc.i_qp_max = 35; // max quantizer for x264

 vc->h264_enc_bitrate = VIDEO_BITRATE_INITIAL_VALUE_H264;

 param.rc.b_stat_read = 0;
 param.rc.b_stat_write = 0;


 if (x264_param_apply_profile(&param,
 "high") < 0) { // "baseline", "main", "high", "high10", "high422", "high444"
 // goto fail;
 }


 if (x264_picture_alloc(&(vc->h264_in_pic), param.i_csp, param.i_width, param.i_height) < 0) {
 // goto fail;
 }

 vc->h264_encoder = x264_encoder_open(&param);

 AVCodec *codec = NULL;
 vc->h264_decoder = NULL;
 avcodec_register_all();
 codec = NULL;

 codec = avcodec_find_decoder(AV_CODEC_ID_H264);

 if (!codec) {
 LOGGER_WARNING(log, "codec not found H264 on decoder");
 }

 vc->h264_decoder = avcodec_alloc_context3(codec);

 if (codec->capabilities & AV_CODEC_CAP_TRUNCATED) {
 vc->h264_decoder->flags |= AV_CODEC_FLAG_TRUNCATED; /* we do not send complete frames */
 }


 vc->h264_decoder->delay = 5;

 if (avcodec_open2(vc->h264_decoder, codec, NULL) < 0) {
 LOGGER_WARNING(log, "could not open codec H264 on decoder");
 }


 return vc;
}



Get frame and decoding him :


void vc_iterate_x264(VCSession *vc)
{

 if (!vc) {
 return;
 }

 pthread_mutex_lock(vc->queue_mutex);

 struct RTPMessage *p;

 if (!rb_read(vc->vbuf_raw, (void **)&p)) {
 LOGGER_TRACE(vc->log, "no Video frame data available");
 pthread_mutex_unlock(vc->queue_mutex);
 return;
 }

 pthread_mutex_unlock(vc->queue_mutex);
 const struct RTPHeader *const header = &p->header;

 uint32_t full_data_len;

 if (header->flags & RTP_LARGE_FRAME) {
 full_data_len = header->data_length_full;
 LOGGER_WARNING(vc->log, "vc_iterate:001:full_data_len=%d", (int)full_data_len);
 } else {
 full_data_len = p->len;
 if (header->data_length_lower != full_data_len)
 {
 LOGGER_ERROR("Data header and packet don't equal: %d - header %d - packet", header->data_length_lower, full_data_len);
 }
 LOGGER_DEBUG(vc->log, "vc_iterate:002");
 }

 decode_frame_h264(vc, p, full_data_len);
}

void decode_frame_h264(VCSession *vc,
 struct RTPMessage *p,
 uint32_t full_data_len)
{

 AVPacket *compr_data;
 compr_data = av_packet_alloc();


 uint8_t *tmp_buf = calloc(1, full_data_len + FF_INPUT_BUFFER_PADDING_SIZE);
 memcpy(tmp_buf, p->data, full_data_len);

 compr_data->data = tmp_buf; // p->data;
 compr_data->size = (int)full_data_len; // hmm, "int" again

 avcodec_send_packet(vc->h264_decoder, compr_data);

 int ret_ = 0;
 while (ret_ >= 0) {
 AVFrame *frame = av_frame_alloc();
 ret_ = avcodec_receive_frame(vc->h264_decoder, frame);
 if (ret_ == AVERROR(EAGAIN) || ret_ == AVERROR_EOF) {
 // error
 break;
 } else if (ret_ < 0) {
 // Error during decoding
 break;
 } else if (ret_ == 0) {
 vc->vcb(vc->av, vc->friend_number, frame->width, frame->height,
 (const uint8_t *)frame->data[0],
 (const uint8_t *)frame->data[1],
 (const uint8_t *)frame->data[2],
 frame->linesize[0], frame->linesize[1],
 frame->linesize[2], vc->vcb_user_data);
 } else {
 // some other error
 }
 av_frame_free(&frame);
 }
 av_packet_free(&compr_data);
 free(tmp_buf);
 free(p);
}



Send frame and encoding :


bool toxav_video_send_frame(ToxAV *av, uint32_t friend_number, uint16_t width, uint16_t height, const uint8_t *y,
 const uint8_t *u, const uint8_t *v, Toxav_Err_Send_Frame *error, int16_t kf_max_dist, vpx_codec_er_flags_t error_resilient,
 unsigned int my_lag_in_frames, uint16_t kf_mode, uint16_t quality_mode)
{
 Toxav_Err_Send_Frame rc = TOXAV_ERR_SEND_FRAME_OK;
 ToxAVCall *call;
 uint64_t video_frame_record_timestamp = current_time_monotonic(av->m->mono_time);

 int vpx_encode_flags = 0;

 pthread_mutex_lock(call->mutex_video);
 pthread_mutex_unlock(av->mutex);

 if (y == nullptr || u == nullptr || v == nullptr) {
 pthread_mutex_unlock(call->mutex_video);
 rc = TOXAV_ERR_SEND_FRAME_NULL;
 goto RETURN;
 }


 if (call->video_rtp->ssrc < VIDEO_SEND_X_KEYFRAMES_FIRST) {
 // Key frame flag for first frames
 vpx_encode_flags = VPX_EFLAG_FORCE_KF;
 LOGGER_INFO(av->m->log, "I_FRAME_FLAG:%d only-i-frame mode", call->video_rtp->ssrc);

 ++call->video_rtp->ssrc;
 } else if (call->video_rtp->ssrc == VIDEO_SEND_X_KEYFRAMES_FIRST) {
 // normal keyframe placement
 vpx_encode_flags = 0;
 LOGGER_INFO(av->m->log, "I_FRAME_FLAG:%d normal mode", call->video_rtp->ssrc);

 ++call->video_rtp->ssrc;
 }


 x264_nal_t *nal = NULL;
 int i_frame_size = 0;

 uint32_t result = encode_frame_h264(av, friend_number, width, height,
 y, u, v,
 &video_frame_record_timestamp,
 vpx_encode_flags,
 &nal,
 &i_frame_size);
 if (result != 0) {
 pthread_mutex_unlock(call->mutex_video);
 rc = TOXAV_ERR_SEND_FRAME_INVALID;
 goto RETURN;
 }

 ++call->video->frame_counter;

 rc = send_frames_h264(av, friend_number, width, height,
 y, u, v, call,
 &video_frame_record_timestamp,
 vpx_encode_flags,
 &nal,
 &i_frame_size,
 &rc);

 pthread_mutex_unlock(call->mutex_video);

RETURN:

 if (error) {
 *error = rc;
 }

 return rc == TOXAV_ERR_SEND_FRAME_OK;
}

uint32_t send_frames_h264(ToxAV *av, uint32_t friend_number, uint16_t width, uint16_t height,
 const uint8_t *y,
 const uint8_t *u, const uint8_t *v, ToxAVCall *call,
 uint64_t *video_frame_record_timestamp,
 int vpx_encode_flags,
 x264_nal_t **nal,
 int *i_frame_size,
 TOXAV_ERR_SEND_FRAME *rc)
{

 if (*i_frame_size > 0) {

 // use the record timestamp that was actually used for this frame
 *video_frame_record_timestamp = (uint64_t)call->video->h264_in_pic.i_pts;
 const uint32_t frame_length_in_bytes = *i_frame_size;
 const int keyframe = (int)call->video->h264_out_pic.b_keyframe;

 LOGGER_DEBUG(av->m->log, "video packet record time: %lu", (*video_frame_record_timestamp));

 int res = rtp_send_data
 (
 call->video_rtp,
 (const uint8_t *)((*nal)->p_payload),
 frame_length_in_bytes,
 keyframe,
 *video_frame_record_timestamp,
 av->m->log
 );

 (*video_frame_record_timestamp)++;

 if (res < 0) {
 LOGGER_WARNING(av->m->log, "Could not send video frame: %s", strerror(errno));
 *rc = TOXAV_ERR_SEND_FRAME_RTP_FAILED;
 return 1;
 }

 return 0;
 } else {
 *rc = TOXAV_ERR_SEND_FRAME_RTP_FAILED;
 return 1;
 }

}



I get image like this :