
Recherche avancée
Autres articles (76)
-
Des sites réalisés avec MediaSPIP
2 mai 2011, parCette page présente quelques-uns des sites fonctionnant sous MediaSPIP.
Vous pouvez bien entendu ajouter le votre grâce au formulaire en bas de page. -
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir -
HTML5 audio and video support
13 avril 2011, parMediaSPIP uses HTML5 video and audio tags to play multimedia files, taking advantage of the latest W3C innovations supported by modern browsers.
The MediaSPIP player used has been created specifically for MediaSPIP and can be easily adapted to fit in with a specific theme.
For older browsers the Flowplayer flash fallback is used.
MediaSPIP allows for media playback on major mobile platforms with the above (...)
Sur d’autres sites (14252)
-
how can i use the sox library to process the audio frame decoded by ffmpeg ?
14 juin 2023, par bishop使用sox音频处理库处理经过ffmpeg解码得到的音频帧,


//使用sox处理音频
 int sox_count = 9;
 if (StreamType::AUDIO == stream_type_ ) {
 sox_init();

 sox_signalinfo_t* in_signal = new sox_signalinfo_t();
 in_signal->rate = frame->sample_rate;
 in_signal->channels = frame->ch_layout.nb_channels;
 in_signal->length = frame->nb_samples * in_signal->channels;
 in_signal->precision = av_get_bytes_per_sample(static_cast<avsampleformat>(frame->format)) * 8;

 //= new sox_format_t();
 sox_encodinginfo_t* in_encoding = new sox_encodinginfo_t ();
 in_encoding->encoding = SOX_ENCODING_SIGN2 ;
 sox_format_t* tempFormat = sox_open_mem_read(frame->data[0],
 frame->linesize[0],
 in_signal, in_encoding, "raw");

 //sox_write(tempFormat, reinterpret_cast<const>(frame->data[0]), frame->nb_samples);

 sox_signalinfo_t* out_signal = in_signal; // 输出音频的参数与输入音频相同
 //out_signal->rate = new_sample_rate; // 新的采样率,根据需要进行修改

 sox_encodinginfo_t* out_encoding = in_encoding; // 输出音频的编码格式与输入音频相同

 sox_format_t* outputFormat = sox_open_mem_write(frame->data[0],
 frame->linesize[0],
 out_signal, out_encoding, "raw", nullptr);

 // 3. 使用SoX处理临时文件中的音频数据
 sox_effects_chain_t* chain = sox_create_effects_chain(&tempFormat->encoding, &outputFormat->encoding);
 sox_effect_t* effect;
 char* args[10];

 effect = sox_create_effect(sox_find_effect("input"));
 args[0] = (char*)tempFormat; // 变调参数,可以根据需求进行修改
 assert(sox_effect_options(effect, 1, args) ==SOX_SUCCESS);
 assert(sox_add_effect(chain, effect, &tempFormat->signal, &tempFormat->signal) == SOX_SUCCESS);
 free(effect);


 effect = sox_create_effect(sox_find_effect("vol"));
 args[0] = "200dB", assert(sox_effect_options(effect, 1, args) == SOX_SUCCESS);
 /* Add the effect to the end of the effects processing chain: */
 assert(sox_add_effect(chain, effect, &tempFormat->signal, &tempFormat->signal) == SOX_SUCCESS);
 free(effect);

 /*
 effect = sox_create_effect(sox_find_effect("pitch"));
 args[0] = {"50.0"}; // 变调参数,可以根据需求进行修改
 assert(sox_effect_options(effect, 1, args) == SOX_SUCCESS);
 assert(sox_add_effect(chain, effect, &tempFormat->signal, &outputFormat->signal) == SOX_SUCCESS);
 free(effect);
 */

 effect = sox_create_effect(sox_find_effect("output"));
 args[0] = (char*)outputFormat; // 变调参数,可以根据需求进行修改
 assert(sox_effect_options(effect, 1, args) == SOX_SUCCESS);
 if(sox_add_effect(chain, effect, &tempFormat->signal, &outputFormat->signal) == SOX_SUCCESS) {
 std::cout<<"true"</assert(sox_add_effect(chain, effect, &tempFormat->signal, &outputFormat->signal) == SOX_SUCCESS);
 free(effect);

 // 4. 处理音频数据
 sox_flow_effects(chain, nullptr, nullptr);

 fflush((FILE*)outputFormat->fp); 
 memcpy(frame->data[1], frame->data[0], frame->linesize[0]);

 // 释放资源
 sox_delete_effects_chain(chain);
 sox_close(tempFormat);
 sox_close(outputFormat);
 sox_quit();

 }
</const></avsampleformat>


error :"input : : this handler does not support this data size"


when execute in the line of "sox_flow_effects(chain, nullptr, nullptr) ;"


how can i fixed this problem ?


i have changed the val of "sox_format_t* tempFormat = sox_open_mem_read(frame->data[0],
frame->linesize[0],
in_signal, in_encoding, "raw") ;" buffer_size,but also not right.


-
Decoded YUV shows green edge when rendered with OpenGL
2 février 2023, par AlexAny idea, why decoded YUV -> RGB(shader conversion) has that extra green edge on the right side ?

Almost any 1080X1920 video seems to have this issue.



A screen recording of the issue is uploaded here https://imgur.com/a/JtUZq4h


Once I manually scale up the texture width, I can see it fills up to the viewport, but it would be nice if I could fix the actual cause. Is it some padding that's part of YUV colorspace ? What else could it be ?


My model is -1 to 1, filling the entire width

The texture coordinates are also 0 to 1 ratio

float vertices[] = {
 -1.0, 1.0f, 0.0f, 0.0, // top left
 1.0f, 1.0f, 1.0, 0.0, // top right
 -1.0f, -1.0f, 0.0f, 1.0f, // bottom left
 1.0f, -1.0f, 1.0f, 1.0f // bottom right
};



Fragment Shader


#version 330 core

in vec2 TexCoord;

out vec4 FragColor;
precision highp float;
uniform sampler2D textureY;
uniform sampler2D textureU;
uniform sampler2D textureV;
uniform float alpha;
uniform vec2 texScale;


void main()
{
 float y = texture(textureY, TexCoord / texScale).r;
 float u = texture(textureU, TexCoord / texScale).r - 0.5; 
 float v = texture(textureV, TexCoord / texScale).r - 0.5;
 
 vec3 rgb;
 
 //yuv - 709
 rgb.r = clamp(y + (1.402 * v), 0, 255);
 rgb.g = clamp(y - (0.2126 * 1.5748 / 0.7152) * u - (0.0722 * 1.8556 / 0.7152) * v, 0, 255);
 rgb.b = clamp(y + (1.8556 * u), 0,255);

 FragColor = vec4(rgb, 1.0);
} 



Texture Class


class VideoTexture {
 public:
 VideoTexture(Decoder *dec) : decoder(dec) {
 glGenTextures(1, &texture1);
 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
 glBindTexture(GL_TEXTURE_2D, texture1);
 glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, decoder->frameQueue.first().linesize[0], decoder->frameQueue.first().height, 0, format, GL_UNSIGNED_BYTE, 0);
 glGenerateMipmap(GL_TEXTURE_2D);

 glGenTextures(1, &texture2);
 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
 glBindTexture(GL_TEXTURE_2D, texture2);
 glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, decoder->frameQueue.first().linesize[1], decoder->frameQueue.first().height / 2, 0, format, GL_UNSIGNED_BYTE, 0);
 glGenerateMipmap(GL_TEXTURE_2D);

 glGenTextures(1, &texture3);
 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
 glBindTexture(GL_TEXTURE_2D, texture3);
 glTexImage2D(GL_TEXTURE_2D, 0, internalFormat, decoder->frameQueue.first().linesize[2], decoder->frameQueue.first().height / 2, 0, format, GL_UNSIGNED_BYTE, 0);
 glGenerateMipmap(GL_TEXTURE_2D);
 }

 void Render(Shader *shader, Gui *gui) {
 if (decoder->frameQueue.isEmpty()) {
 return;
 }

 glActiveTexture(GL_TEXTURE0);
 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, decoder->frameQueue.first().linesize[0], decoder->frameQueue.first().height, format, GL_UNSIGNED_BYTE, decoder->frameQueue.at(currentFrame).data[0]);
 glBindTexture(GL_TEXTURE_2D, texture1);
 shader->setInt("textureY", 0);

 glActiveTexture(GL_TEXTURE1);
 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, decoder->frameQueue.first().linesize[1], decoder->frameQueue.first().height / 2, format, GL_UNSIGNED_BYTE, decoder->frameQueue.at(currentFrame).data[1]);
 glBindTexture(GL_TEXTURE_2D, texture2);
 shader->setInt("textureU", 1);

 glActiveTexture(GL_TEXTURE2);
 glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, decoder->frameQueue.first().linesize[2], decoder->frameQueue.first().height / 2, format, GL_UNSIGNED_BYTE, decoder->frameQueue.at(currentFrame).data[2]);
 glBindTexture(GL_TEXTURE_2D, texture3);
 shader->setInt("textureV", 2);
 }

 ~VideoTexture() {
 printf("\nVideo texture destructor");
 glDeleteTextures(1, &texture1);
 glDeleteTextures(1, &texture2);
 glDeleteTextures(1, &texture3);
 }

 private:
 GLuint texture1;
 GLuint texture2;
 GLuint texture3;
 GLint internalFormat = GL_RG8;
 GLint format = GL_RED;
 int currentFrame = 0;
 Decoder *decoder;
}



-
How do I sync 4 videos in a grid to play the same frame at the same time ?
28 décembre 2022, par PirateApp- 

- 4 of us have recorded ourselves playing a game and want to create a 4 x 4 video grid
- The game has cutscenes at the beginning followed by each person having their unique part for the rest of the video
- I am looking to synchronize the grid such that it starts at the same place in the cutscene for everyone
- Kindly take a look at what is happening currently. The cutscene is off by a few seconds for everyone
- Imagine a time offset a,b,c,d such that when I add this offet to each video, the entire video grid will be in sync
- How to find this a,b,c,d and more importantly how to add it in filter_complex














I used the ffmpeg command below to generate a 4 x 4 video grid and it seems to work


ffmpeg
 -i nano_prologue.mkv -i macko_nimble_guardian.mkv -i nano_nimble_guardian.mkv -i ghost_nimble_guardian_subtle_arrow_1.mp4
 -filter_complex "
 nullsrc=size=1920x1080 [base];
 [0:v] setpts=PTS-STARTPTS, scale=960x540 [upperleft];
 [1:v] setpts=PTS-STARTPTS, scale=960x540 [upperright];
 [2:v] setpts=PTS-STARTPTS, scale=960x540 [lowerleft];
 [3:v] setpts=PTS-STARTPTS, scale=960x540 [lowerright];
 [base][upperleft] overlay=shortest=1 [tmp1];
 [tmp1][upperright] overlay=shortest=1:x=960 [tmp2];
 [tmp2][lowerleft] overlay=shortest=1:y=540 [tmp3];
 [tmp3][lowerright] overlay=shortest=1:x=960:y=540
 "
 -c:v libx264 output.mkv



My problem though is that since each of us starts recording at slightly different times, the cutscenes are out of sync


As per the screenshot below, you can see that each video has the same scene starting at a slightly different time.


Is there a way to find where the same frame will start on all videos and then sync each video to start from that frame or 20 seconds before that frame ?




UPDATE 1


i have figured out the offset for each video in millisecond precision using the following technique


take a screenshot of the first video at a particular point in the cutscene and save image as png and run the script below for the remaining 3 videos to find out where this screenshot appears in each video


ffmpeg -i "video2.mp4" -r 1 -loop 1 -i screenshot.png -an -filter_complex "blend=difference:shortest=1,blackframe=90:32" -f null -



Use the command above to search for the offset in every video for that cutscene


It gave me this


VIDEO 3 OFFSET


[Parsed_blackframe_1 @ 0x600003af00b0] frame:3144 pblack:92 pts:804861 t:52.399805 type:P last_keyframe:3120

[Parsed_blackframe_1 @ 0x600003af00b0] frame:3145 pblack:96 pts:805117 t:52.416471 type:P last_keyframe:3120



VIDEO 2 OFFSET


[Parsed_blackframe_1 @ 0x6000014dc0b0] frame:3629 pblack:91 pts:60483 t:60.483000 type:P last_keyframe:3500



VIDEO 4 OFFSET


[Parsed_blackframe_1 @ 0x600002f84160] frame:2885 pblack:93 pts:48083 t:48.083000 type:P last_keyframe:2880

[Parsed_blackframe_1 @ 0x600002f84160] frame:2886 pblack:96 pts:48100 t:48.100000 type:P last_keyframe:2880



Now how do I use filter_complex to say start each video at either the frame above or the timestamp above ?. I would like to include say 10 seconds before the above frame in each video so that it starts from the beginning


UPDATE 2


This command currently gives me a 100% synced video, how do I make it start 15 seconds before the specified frame numbers and how to make it use the audio track from video 2 instead ?


ffmpeg
 -i v_nimble_guardian.mkv -i macko_nimble_guardian.mkv -i ghost_nimble_guardian_subtle_arrow_1.mp4 -i nano_nimble_guardian.mkv
 -filter_complex "
 nullsrc=size=1920x1080 [base];
 [0:v] trim=start_pts=49117,setpts=PTS-STARTPTS, scale=960x540 [upperleft];
 [1:v] trim=start_pts=50483,setpts=PTS-STARTPTS, scale=960x540 [upperright];
 [2:v] trim=start_pts=795117,setpts=PTS-STARTPTS, scale=960x540 [lowerleft];
 [3:v] trim=start_pts=38100,setpts=PTS-STARTPTS, scale=960x540 [lowerright];
 [base][upperleft] overlay=shortest=1 [tmp1];
 [tmp1][upperright] overlay=shortest=1:x=960 [tmp2];
 [tmp2][lowerleft] overlay=shortest=1:y=540 [tmp3];
 [tmp3][lowerright] overlay=shortest=1:x=960:y=540
 "
 -c:v libx264 output.mkv