
Recherche avancée
Médias (3)
-
MediaSPIP Simple : futur thème graphique par défaut ?
26 septembre 2013, par
Mis à jour : Octobre 2013
Langue : français
Type : Video
-
GetID3 - Bloc informations de fichiers
9 avril 2013, par
Mis à jour : Mai 2013
Langue : français
Type : Image
-
GetID3 - Boutons supplémentaires
9 avril 2013, par
Mis à jour : Avril 2013
Langue : français
Type : Image
Autres articles (69)
-
MediaSPIP v0.2
21 juin 2013, parMediaSPIP 0.2 est la première version de MediaSPIP stable.
Sa date de sortie officielle est le 21 juin 2013 et est annoncée ici.
Le fichier zip ici présent contient uniquement les sources de MediaSPIP en version standalone.
Comme pour la version précédente, il est nécessaire d’installer manuellement l’ensemble des dépendances logicielles sur le serveur.
Si vous souhaitez utiliser cette archive pour une installation en mode ferme, il vous faudra également procéder à d’autres modifications (...) -
Les autorisations surchargées par les plugins
27 avril 2010, parMediaspip core
autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs -
Configuration spécifique pour PHP5
4 février 2011, parPHP5 est obligatoire, vous pouvez l’installer en suivant ce tutoriel spécifique.
Il est recommandé dans un premier temps de désactiver le safe_mode, cependant, s’il est correctement configuré et que les binaires nécessaires sont accessibles, MediaSPIP devrait fonctionner correctement avec le safe_mode activé.
Modules spécifiques
Il est nécessaire d’installer certains modules PHP spécifiques, via le gestionnaire de paquet de votre distribution ou manuellement : php5-mysql pour la connectivité avec la (...)
Sur d’autres sites (10553)
-
Recording voice using HTML5 and processing it with ffmpeg
22 mars 2015, par user3789242I need to use ffmpeg in my javascript/HTML5 project which allows the user to select the format he wants the audio to open with.I don’t know anything about ffmpeg and I’ve been doing lots of research I don’t know how to use it in my project. I found an example https://github.com/sopel39/audioconverter.js but the problem how can I install the ffmpeg.js which is 8 mg to m project. please if someone can help me I’ll be very thankfull
here is my full code :the javascript page :
// variables
var leftchannel = [];
var rightchannel = [];
var recorder = null;
var recording = false;
var recordingLength = 0;
var volume = null;
var audioInput = null;
var sampleRate = 44100;
var audioContext = null;
var context = null;
var outputString;
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia;
if (navigator.getUserMedia){
navigator.getUserMedia({audio:true}, success, function(e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');
function getVal(value)
{
// if R is pressed, we start recording
if ( value == "record"){
recording = true;
// reset the buffers for the new recording
leftchannel.length = rightchannel.length = 0;
recordingLength = 0;
document.getElementById('output').innerHTML="Recording now...";
// if S is pressed, we stop the recording and package the WAV file
} else if ( value == "stop" ){
// we stop recording
recording = false;
document.getElementById('output').innerHTML="Building wav file...";
// we flat the left and right channels down
var leftBuffer = mergeBuffers ( leftchannel, recordingLength );
var rightBuffer = mergeBuffers ( rightchannel, recordingLength );
// we interleave both channels together
var interleaved = interleave ( leftBuffer, rightBuffer );
var buffer = new ArrayBuffer(44 + interleaved.length * 2);
var view = new DataView(buffer);
// RIFF chunk descriptor
writeUTFBytes(view, 0, 'RIFF');
view.setUint32(4, 44 + interleaved.length * 2, true);
writeUTFBytes(view, 8, 'WAVE');
// FMT sub-chunk
writeUTFBytes(view, 12, 'fmt ');
view.setUint32(16, 16, true);
view.setUint16(20, 1, true);
// stereo (2 channels)
view.setUint16(22, 2, true);
view.setUint32(24, sampleRate, true);
view.setUint32(28, sampleRate * 4, true);
view.setUint16(32, 4, true);
view.setUint16(34, 16, true);
// data sub-chunk
writeUTFBytes(view, 36, 'data');
view.setUint32(40, interleaved.length * 2, true);
var lng = interleaved.length;
var index = 44;
var volume = 1;
for (var i = 0; i < lng; i++){
view.setInt16(index, interleaved[i] * (0x7FFF * volume), true);
index += 2;
}
var blob = new Blob ( [ view ], { type : 'audio/wav' } );
// let's save it locally
document.getElementById('output').innerHTML='Handing off the file now...';
var url = (window.URL || window.webkitURL).createObjectURL(blob);
var li = document.createElement('li');
var au = document.createElement('audio');
var hf = document.createElement('a');
au.controls = true;
au.src = url;
hf.href = url;
hf.download = 'audio_recording_' + new Date().getTime() + '.wav';
hf.innerHTML = hf.download;
li.appendChild(au);
li.appendChild(hf);
recordingList.appendChild(li);
}
}
function success(e){
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
volume = context.createGain();
// creates an audio node from the microphone incoming stream(source)
source = context.createMediaStreamSource(e);
// connect the stream(source) to the gain node
source.connect(volume);
var bufferSize = 2048;
recorder = context.createScriptProcessor(bufferSize, 2, 2);
//node for the visualizer
analyser = context.createAnalyser();
analyser.smoothingTimeConstant = 0.3;
analyser.fftSize = 512;
splitter = context.createChannelSplitter();
//when recording happens
recorder.onaudioprocess = function(e){
if (!recording) return;
var left = e.inputBuffer.getChannelData (0);
var right = e.inputBuffer.getChannelData (1);
leftchannel.push (new Float32Array (left));
rightchannel.push (new Float32Array (right));
recordingLength += bufferSize;
// get the average for the first channel
var array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
var c=document.getElementById("myCanvas");
var ctx = c.getContext("2d");
// clear the current state
ctx.clearRect(0, 0, 1000, 325);
var gradient = ctx.createLinearGradient(0,0,0,300);
gradient.addColorStop(1,'#000000');
gradient.addColorStop(0.75,'#ff0000');
gradient.addColorStop(0.25,'#ffff00');
gradient.addColorStop(0,'#ffffff');
// set the fill style
ctx.fillStyle=gradient;
drawSpectrum(array);
function drawSpectrum(array) {
for ( var i = 0; i < (array.length); i++ ){
var value = array[i];
ctx.fillRect(i*5,325-value,3,325);
}
}
}
function getAverageVolume(array) {
var values = 0;
var average;
var length = array.length;
// get all the frequency amplitudes
for (var i = 0; i < length; i++) {
values += array[i];
}
average = values / length;
return average;
}
// we connect the recorder(node to destination(speakers))
volume.connect(splitter);
splitter.connect(analyser, 0, 0);
analyser.connect(recorder);
recorder.connect(context.destination);
}
function mergeBuffers(channelBuffer, recordingLength){
var result = new Float32Array(recordingLength);
var offset = 0;
var lng = channelBuffer.length;
for (var i = 0; i < lng; i++){
var buffer = channelBuffer[i];
result.set(buffer, offset);
offset += buffer.length;
}
return result;
}
function interleave(leftChannel, rightChannel){
var length = leftChannel.length + rightChannel.length;
var result = new Float32Array(length);
var inputIndex = 0;
for (var index = 0; index < length; ){
result[index++] = leftChannel[inputIndex];
result[index++] = rightChannel[inputIndex];
inputIndex++;
}
return result;
}
function writeUTFBytes(view, offset, string){
var lng = string.length;
for (var i = 0; i < lng; i++){
view.setUint8(offset + i, string.charCodeAt(i));
}
}and here is the html code :
<code class="echappe-js"><script src="http://stackoverflow.com/feeds/tag/js/functions.js"></script>
-
How to send large x264 NAL over RTMP ?
17 septembre 2017, par samgakI’m trying to stream video over RTMP using x264 and rtmplib in C++ on Windows.
So far I have managed to encode and stream a test video pattern consisting of animated multi-colored vertical lines that I generate in code. It’s possible to start and stop the stream, and start and stop the player, and it works every time. However, as soon as I modify it to send encoded camera frames instead of the test pattern, the streaming becomes very unreliable. It only starts <20% of the time, and stopping and restarting doesn’t work.
After searching around for answers I concluded that it must be because the NAL size is too large (my test pattern is mostly flat color so it encodes to a very small size), and there is an Ethernet packet limit of around 1400 bytes that affects it. So, I tried to make x264 only output NALs under 1200 bytes, by setting
i_slice_max_size
in my x264 setup :if (x264_param_default_preset(&param, "veryfast", "zerolatency") < 0)
return false;
param.i_csp = X264_CSP_I420;
param.i_threads = 1;
param.i_width = width; //set frame width
param.i_height = height; //set frame height
param.b_cabac = 0;
param.i_bframe = 0;
param.b_interlaced = 0;
param.rc.i_rc_method = X264_RC_ABR;
param.i_level_idc = 21;
param.rc.i_bitrate = 128;
param.b_intra_refresh = 1;
param.b_annexb = 1;
param.i_keyint_max = 25;
param.i_fps_num = 15;
param.i_fps_den = 1;
param.i_slice_max_size = 1200;
if (x264_param_apply_profile(&param, "baseline") < 0)
return false;This reduces the NAL size, but it doesn’t seem to make any difference to the reliability issues.
I’ve also tried fragmenting the NALs, using this Java code and RFC 3984 (RTP Payload Format for H.264 Video) as a reference, but it doesn’t work at all (code below), the server says "stream has stopped" immediately after it starts. I’ve tried including and excluding the NAL header (with the timestamp etc) in each fragment or just the first, but it doesn’t work for me either way.
I’m pretty sure my issue has to be with the NAL size and not PPS/SPS or anything like that (as in this question) or with my network connection or test server, because everything works fine with the test pattern.
I’m sending
NAL_PPS
andNAL_SPS
(only once), and allNAL_SLICE_IDR
andNAL_SLICE
packets. I’m ignoringNAL_SEI
and not sending it.One thing that is confusing me is that the source code that I can find on the internet that does similar things to what I want doesn’t match up with what the RFC specifies. For example, RFC 3984 section 5.3 defines the NAL octet, which should have the NAL type in the lower 5 bits and the NRI in bits 5 and 6 (bit 7 is zero). The types NAL_SLICE_IDR and NAL_SLICE have values of 5 and 1 respectively, which are the ones in table 7-1 of this document (PDF) referenced by the RFC and also the ones output by x264. But the code that actually works sets the NAL octet to 39 (0x27) and 23 (0x17), for reasons unknown to me. When implementing fragmented NALs, I’ve tried both following the spec and using the values copied over from the working code, but neither works.
Any help appreciated.
void sendNAL(unsigned char* buf, int len)
{
Logging::LogNumber("sendNAL", len);
RTMPPacket * packet;
long timeoffset = GetTickCount() - startTime;
if (buf[2] == 0x00) { //00 00 00 01
buf += 4;
len -= 4;
}
else if (buf[2] == 0x01) { //00 00 01
buf += 3;
len -= 3;
}
else
{
Logging::LogStdString("INVALID x264 FRAME!");
}
int type = buf[0] & 0x1f;
int maxNALSize = 1200;
if (len <= maxNALSize)
{
packet = (RTMPPacket *)malloc(RTMP_HEAD_SIZE + len + 9);
memset(packet, 0, RTMP_HEAD_SIZE);
packet->m_body = (char *)packet + RTMP_HEAD_SIZE;
packet->m_nBodySize = len + 9;
unsigned char *body = (unsigned char *)packet->m_body;
memset(body, 0, len + 9);
body[0] = 0x27;
if (type == NAL_SLICE_IDR) {
body[0] = 0x17;
}
body[1] = 0x01; //nal unit
body[2] = 0x00;
body[3] = 0x00;
body[4] = 0x00;
body[5] = (len >> 24) & 0xff;
body[6] = (len >> 16) & 0xff;
body[7] = (len >> 8) & 0xff;
body[8] = (len) & 0xff;
memcpy(&body[9], buf, len);
packet->m_hasAbsTimestamp = 0;
packet->m_packetType = RTMP_PACKET_TYPE_VIDEO;
if (rtmp != NULL) {
packet->m_nInfoField2 = rtmp->m_stream_id;
}
packet->m_nChannel = 0x04;
packet->m_headerType = RTMP_PACKET_SIZE_LARGE;
packet->m_nTimeStamp = timeoffset;
if (rtmp != NULL) {
RTMP_SendPacket(rtmp, packet, QUEUE_RTMP);
}
free(packet);
}
else
{
packet = (RTMPPacket *)malloc(RTMP_HEAD_SIZE + maxNALSize + 90);
memset(packet, 0, RTMP_HEAD_SIZE);
// split large NAL into multiple smaller ones:
int sentBytes = 0;
bool firstFragment = true;
while (sentBytes < len)
{
// decide how many bytes to send in this fragment:
int fragmentSize = maxNALSize;
if (sentBytes + fragmentSize > len)
fragmentSize = len - sentBytes;
bool lastFragment = (sentBytes + fragmentSize) >= len;
packet->m_body = (char *)packet + RTMP_HEAD_SIZE;
int headerBytes = firstFragment ? 10 : 2;
packet->m_nBodySize = fragmentSize + headerBytes;
unsigned char *body = (unsigned char *)packet->m_body;
memset(body, 0, fragmentSize + headerBytes);
//key frame
int NALtype = 0x27;
if (type == NAL_SLICE_IDR) {
NALtype = 0x17;
}
// Set FU-A indicator
body[0] = (byte)((NALtype & 0x60) & 0xFF); // FU indicator NRI
body[0] += 28; // 28 = FU - A (fragmentation unit A) see RFC: https://tools.ietf.org/html/rfc3984
// Set FU-A header
body[1] = (byte)(NALtype & 0x1F); // FU header type
body[1] += (firstFragment ? 0x80 : 0) + (lastFragment ? 0x40 : 0); // Start/End bits
body[2] = 0x01; //nal unit
body[3] = 0x00;
body[4] = 0x00;
body[5] = 0x00;
body[6] = (len >> 24) & 0xff;
body[7] = (len >> 16) & 0xff;
body[8] = (len >> 8) & 0xff;
body[9] = (len) & 0xff;
//copy data
memcpy(&body[headerBytes], buf + sentBytes, fragmentSize);
packet->m_hasAbsTimestamp = 0;
packet->m_packetType = RTMP_PACKET_TYPE_VIDEO;
if (rtmp != NULL) {
packet->m_nInfoField2 = rtmp->m_stream_id;
}
packet->m_nChannel = 0x04;
packet->m_headerType = RTMP_PACKET_SIZE_LARGE;
packet->m_nTimeStamp = timeoffset;
if (rtmp != NULL) {
RTMP_SendPacket(rtmp, packet, TRUE);
}
sentBytes += fragmentSize;
firstFragment = false;
}
free(packet);
}
} -
Video streamign with FFMpeg and Nest.js+Next.js
17 septembre 2024, par AizenHere is my problem : I have one video src 1080p (on the frontend). On the frontend, I send this video-route to the backend :


const req = async()=>{try{const res = await axios.get('/catalog/item',{params:{SeriesName:seriesName}});return {data:res.data};}catch(err){console.log(err);return false;}}const fetchedData = await req();-On the backend i return seriesName.Now i can make a full path,what the video is,and where it is,code:



const videoUrl = 'C:/Users/arMori/Desktop/RedditClone/reddit/public/videos';console.log('IT VideoURL',videoUrl);



const selectedFile = `${videoUrl}/${fetchedData.data.VideoSource}/${seriesName}-1080p.mp4`
console.log(`ITS'S SELECTED FILE: ${selectedFile}`);



Ok, I have my src 1080p, now is the time to send it to the backend :


const response = await axios.post('/videoFormat', {videoUrl:selectedFile})console.log('Это консоль лог путей: ',response.data);const videoPaths = response.data;



Backend takes it and FFMpqg makes two types of resolution,720p and 480p,save it to the temp storage on backend, and then returns two routes where these videos stores


async videoUpload(videoUrl:string){try{const tempDir = C:/Users/arMori/Desktop/RedditClone/reddit_back/src/video/temp;const inputFile = videoUrl;console.log('VIDEOURL: ',videoUrl);



const outputFiles = [];
 
 await this.createDirectories(tempDir); 
 outputFiles.push(await this.convertVideo(inputFile, '1280x720', '720p.mp4'));
 outputFiles.push(await this.convertVideo(inputFile, '854x480', '480p.mp4'));
 console.log('OUTUPT FILES SERVICE: ',outputFiles);
 
 return outputFiles;

 }catch(err){
 console.error('VideoFormatterService Error: ',err);
 
 }
}

private convertVideo(inputPath:string,resolution:string,outputFileName:string):Promise<string>{
 const temp = `C:/Users/arMori/Desktop/RedditClone/reddit_back/src/video/temp`;
 return new Promise(async(resolve,reject)=>{
 const height = resolution.split('x')[1];
 console.log('HIEGHT: ',height);
 
 const outputDir = `C:/Users/arMori/Desktop/RedditClone/reddit_back/src/video/temp/${height}p`;
 const outputPath = join(outputDir, outputFileName);
 const isExists = await fs.access(outputPath).then(() => true).catch(() => false);
 if(isExists){ 
 console.log(`File already exists: ${outputPath}`);
 return resolve(outputPath)
 };
 ffmpeg(inputPath)
 .size(`${resolution}`)
 .videoCodec('libx264') // Кодек H.264
 .audioCodec('aac') 
 .output(outputPath)
 .on('end',()=>resolve(outputPath))
 .on('error',(err)=>reject(err))
 .run()
 
 })
}

private async createDirectories(temp:string){
 try{
 const dir720p = `${temp}/720p`;
 const dir480p = `${temp}/480p`;
 const dir720pExists = await fs.access(dir720p).then(() => true).catch(() => false);
 const dir480pExists = await fs.access(dir480p).then(() => true).catch(() => false);
 if(dir720pExists && dir480pExists){
 console.log('FILES ALIVE');
 return;
 }
 if (!dir720pExists) {
 await fs.mkdir(dir720p, { recursive: true });
 console.log('Папка 720p создана');
 }
 
 if (!dir480pExists) {
 await fs.mkdir(dir480p, { recursive: true });
 console.log('Папка 480p создана');
 }
 } catch (err) {
 console.error('Ошибка при создании директорий:', err);
 }
}
</string>


Continue frontentd code :


let videoPath;

if (quality === '720p') {
 videoPath = videoPaths[0];
} else if (quality === '480p') {
 videoPath = videoPaths[1];
}

if (!videoPath) {
 console.error('Video path not found!');
 return;
}

// Получаем видео по его пути
console.log('VIDEOPATH LOG: ',videoPath);
 
const videoRes = await axios.get('/videoFormat/getVideo', { 
 params: { path: videoPath } ,
 headers: { Range: 'bytes=0-' },
 responseType: 'blob'
 });
 console.log('Video fetched: ', videoRes);
 const videoBlob = new Blob([videoRes.data], { type: 'video/mp4' });
 const videoURL = URL.createObjectURL(videoBlob);
 return videoURL;
 /* console.log('Видео успешно загружено:', response.data); */
 } catch (error) {
 console.error('Ошибка при загрузке видео:', error);
 }
}



Here I just choose one of the route and make a new GET request (VideoRes), now in the controller in the backend, I'm trying to do a video streaming :


@Public()
 @Get('/getVideo')
 async getVideo(@Query('path') videoPath:string,@Req() req:Request,@Res() res:Response){
 try {
 console.log('PATH ARGUMENT: ',videoPath);
 console.log('VIDEOPATH IN SERVICE: ',videoPath);
 const videoSize = (await fs.stat(videoPath)).size;
 const CHUNK_SIZE = 10 ** 6;
 const range = req.headers['range'] as string | undefined;
 if (!range) {
 return new ForbiddenException('Range не найденно');
 }
 const start = Number(range.replace(/\D/g,""));
 const end = Math.min(start + CHUNK_SIZE,videoSize - 1);

 const contentLength = end - start + 1;
 const videoStream = fsSync.createReadStream(videoPath, { start, end });
 const headers = {
 'Content-Range':`bytes ${start}-${end}/${videoSize}`,
 'Accept-Ranges':'bytes',
 'Content-Length':contentLength,
 'Content-Type':'video/mp4'
 }
 
 res.writeHead(206,headers);

 // Передаем поток в ответ
 videoStream.pipe(res);
 

 // Если возникнет ошибка при стриминге, логируем ошибку
 videoStream.on('error', (error) => {
 console.error('Ошибка при чтении видео:', error);
 res.status(500).send('Ошибка при чтении видео');
 });
 } catch (error) {
 console.error('Ошибка при обработке запросов:', error);
 return res.status(400).json({ message: 'Ошибка при обработке getVideo запросов' });
 }
 }



Send to the frontend


res.writeHead(206,headers);



In the frontend, I make blob url for video src and return it


const videoBlob = new Blob([videoRes.data], { type: 'video/mp4' });const videoURL = URL.createObjectURL(videoBlob);return videoURL;



And assign src to the video :


useVideo(seriesName,quality).then(src => {
 if (src) {
 console.log('ITS VIDEOLOGISC GOIDA!');
 if(!playRef.current) return;
 
 const oldURL = playRef.current.src;
 if (oldURL && oldURL.startsWith('blob:')) {
 URL.revokeObjectURL(oldURL);
 }
 playRef.current.pause();
 playRef.current.src = '';
 setQuality(quality);
 console.log('SRC: ',src);
 
 playRef.current.src = src;
 playRef.current.load();
 console.log('ITS VIDEOURL GOIDA!');
 togglePlayPause();
 }
 })
 .catch(err => console.error('Failed to fetch video', err));



But the problem is :




Vinland-Saga:1 Uncaught (in promise) NotSupportedError : Failed to load because no supported source was found




And I don't know why...


I tried everything, but I don't understand why src is incorrect..