
Recherche avancée
Médias (1)
-
The pirate bay depuis la Belgique
1er avril 2013, par
Mis à jour : Avril 2013
Langue : français
Type : Image
Autres articles (100)
-
List of compatible distributions
26 avril 2011, parThe table below is the list of Linux distributions compatible with the automated installation script of MediaSPIP. Distribution nameVersion nameVersion number Debian Squeeze 6.x.x Debian Weezy 7.x.x Debian Jessie 8.x.x Ubuntu The Precise Pangolin 12.04 LTS Ubuntu The Trusty Tahr 14.04
If you want to help us improve this list, you can provide us access to a machine whose distribution is not mentioned above or send the necessary fixes to add (...) -
Les autorisations surchargées par les plugins
27 avril 2010, parMediaspip core
autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs -
Activation de l’inscription des visiteurs
12 avril 2011, parIl est également possible d’activer l’inscription des visiteurs ce qui permettra à tout un chacun d’ouvrir soit même un compte sur le canal en question dans le cadre de projets ouverts par exemple.
Pour ce faire, il suffit d’aller dans l’espace de configuration du site en choisissant le sous menus "Gestion des utilisateurs". Le premier formulaire visible correspond à cette fonctionnalité.
Par défaut, MediaSPIP a créé lors de son initialisation un élément de menu dans le menu du haut de la page menant (...)
Sur d’autres sites (9054)
-
how to add audio using ffmpeg when recording video from browser and streaming to Youtube/Twitch ?
26 juillet 2021, par Tosh VelagaI have a web application I am working on that allows the user to stream video from their browser and simultaneously livestream to both Youtube and Twitch using ffmpeg. The application works fine when I don't need to send any of the audio. Currently I am getting the error below when I try to record video and audio. I am new to using ffmpeg and so any help would be greatly appreciated. Here is also my repo if needed : https://github.com/toshvelaga/livestream


Here is my node.js server with ffmpeg


const child_process = require('child_process') // To be used later for running FFmpeg
const express = require('express')
const http = require('http')
const WebSocketServer = require('ws').Server
const NodeMediaServer = require('node-media-server')
const app = express()
const cors = require('cors')
const path = require('path')
const logger = require('morgan')
require('dotenv').config()

app.use(logger('dev'))
app.use(cors())

app.use(express.json({ limit: '200mb', extended: true }))
app.use(
 express.urlencoded({ limit: '200mb', extended: true, parameterLimit: 50000 })
)

var authRouter = require('./routes/auth')
var compareCodeRouter = require('./routes/compareCode')

app.use('/', authRouter)
app.use('/', compareCodeRouter)

if (process.env.NODE_ENV === 'production') {
 // serve static content
 // npm run build
 app.use(express.static(path.join(__dirname, 'client/build')))

 app.get('*', (req, res) => {
 res.sendFile(path.join(__dirname, 'client/build', 'index.html'))
 })
}

const PORT = process.env.PORT || 8080

app.listen(PORT, () => {
 console.log(`Server is starting on port ${PORT}`)
})

const server = http.createServer(app).listen(3000, () => {
 console.log('Listening on PORT 3000...')
})


const wss = new WebSocketServer({
 server: server,
})

wss.on('connection', (ws, req) => {
 const ffmpeg = child_process.spawn('ffmpeg', [
 // works fine when I use this but when I need audio problems arise
 // '-f',
 // 'lavfi',
 // '-i',
 // 'anullsrc',

 '-i',
 '-',

 '-f',
 'flv',
 '-c',
 'copy',
 `${process.env.TWITCH_STREAM_ADDRESS}`,
 '-f',
 'flv',
 '-c',
 'copy',
 `${process.env.YOUTUBE_STREAM_ADDRESS}`,
 // '-f',
 // 'flv',
 // '-c',
 // 'copy',
 // `${process.env.FACEBOOK_STREAM_ADDRESS}`,
 ])

 ffmpeg.on('close', (code, signal) => {
 console.log(
 'FFmpeg child process closed, code ' + code + ', signal ' + signal
 )
 ws.terminate()
 })

 ffmpeg.stdin.on('error', (e) => {
 console.log('FFmpeg STDIN Error', e)
 })

 ffmpeg.stderr.on('data', (data) => {
 console.log('FFmpeg STDERR:', data.toString())
 })

 ws.on('message', (msg) => {
 console.log('DATA', msg)
 ffmpeg.stdin.write(msg)
 })

 ws.on('close', (e) => {
 console.log('kill: SIGINT')
 ffmpeg.kill('SIGINT')
 })
})

const config = {
 rtmp: {
 port: 1935,
 chunk_size: 60000,
 gop_cache: true,
 ping: 30,
 ping_timeout: 60,
 },
 http: {
 port: 8000,
 allow_origin: '*',
 },
}

var nms = new NodeMediaServer(config)
nms.run()



Here is my frontend code that records the video/audio and sends to server :


import React, { useState, useEffect, useRef } from 'react'
import Navbar from '../../components/Navbar/Navbar'
import './Dashboard.css'

const CAPTURE_OPTIONS = {
 audio: true,
 video: true,
}

function Dashboard() {
 const [mute, setMute] = useState(false)
 const videoRef = useRef()
 const ws = useRef()
 const mediaStream = useUserMedia(CAPTURE_OPTIONS)

 let liveStream
 let liveStreamRecorder

 if (mediaStream && videoRef.current && !videoRef.current.srcObject) {
 videoRef.current.srcObject = mediaStream
 }

 const handleCanPlay = () => {
 videoRef.current.play()
 }

 useEffect(() => {
 ws.current = new WebSocket(
 window.location.protocol.replace('http', 'ws') +
 '//' + // http: -> ws:, https: -> wss:
 'localhost:3000'
 )

 ws.current.onopen = () => {
 console.log('WebSocket Open')
 }

 return () => {
 ws.current.close()
 }
 }, [])

 const startStream = () => {
 liveStream = videoRef.current.captureStream(30) // 30 FPS
 liveStreamRecorder = new MediaRecorder(liveStream, {
 mimeType: 'video/webm;codecs=h264',
 videoBitsPerSecond: 3 * 1024 * 1024,
 })
 liveStreamRecorder.ondataavailable = (e) => {
 ws.current.send(e.data)
 console.log('send data', e.data)
 }
 // Start recording, and dump data every second
 liveStreamRecorder.start(1000)
 }

 const stopStream = () => {
 liveStreamRecorder.stop()
 ws.current.close()
 }

 const toggleMute = () => {
 setMute(!mute)
 }

 return (
 <>
 <navbar></navbar>
 <div style="{{" classname="'main'">
 <div>
 
 </div>
 <div classname="'button-container'">
 <button>Go Live</button>
 <button>Stop Recording</button>
 <button>Share Screen</button>
 <button>Mute</button>
 </div>
 </div>
 >
 )
}

const useUserMedia = (requestedMedia) => {
 const [mediaStream, setMediaStream] = useState(null)

 useEffect(() => {
 async function enableStream() {
 try {
 const stream = await navigator.mediaDevices.getUserMedia(requestedMedia)
 setMediaStream(stream)
 } catch (err) {
 console.log(err)
 }
 }

 if (!mediaStream) {
 enableStream()
 } else {
 return function cleanup() {
 mediaStream.getVideoTracks().forEach((track) => {
 track.stop()
 })
 }
 }
 }, [mediaStream, requestedMedia])

 return mediaStream
}

export default Dashboard



-
Why my code that based on ffmpeg can't sync video' time and audio's time ?
6 juillet 2021, par ZSpirytusBackground


Recently, I use ffmpeg to write my first Android video player. But video channel's time is faster than audio channel's time about 2 3 times.


Code


In short, I use PacketDispatcher to read AVPacket from http hlv source :


void PacketDispatcher::RealDispatch() {
 while (GetStatus() != DISPATCHER_STOP) {
 while (GetStatus() == DISPATCHER_PAUSE) {
 LOGD(TAG, "wait signal");
 pthread_mutex_lock(&mutex);
 pthread_cond_wait(&cond, &mutex);
 pthread_mutex_unlock(&mutex);
 }

 AVPacket *av_packet = av_packet_alloc();
 int ret = av_read_frame(av_format_context, av_packet);
 if (ret) {
 LOGE(TAG, "av_read_frame ret=%d", ret);
 break;
 }

 // PacketDispatcher is who read the AVPacket from http hlv source 
 // and dispatch to decoder by its stream index.
 decoder_map[av_packet->stream_index]->Push(av_packet);
 }
}



And then, Decoder written by Producer-Consumer Pattern, Decoder maintain a queue that store all the AVPacket received from PacketDispatcher. The code like this :


// write to the queue
void BaseDecoder::Push(AVPacket *av_packet) {
 pthread_mutex_lock(&av_packet_queue_mutex);
 av_packet_queue.push(av_packet);
 pthread_cond_signal(&av_packet_queue_cond);
 pthread_mutex_unlock(&av_packet_queue_mutex);
}

// real decode logic
void BaseDecoder::RealDecode() {
 SetDecoderState(START);
 LOGI(LogSpec(), "start decode");

 while (true) {
 // 1. check decoder status and queue size to decide if call thread.wait

 // 2. send packet to codec
 AVPacket* av_packet = av_packet_queue.front();
 int ret = avcodec_send_packet(av_codec_ctx, av_packet);

 // 3. read frame from codec
 AVFrame *av_frame = av_frame_alloc();
 ret = avcodec_receive_frame(av_codec_ctx, av_frame);

 if (m_render) {
 // 3. custom decode logic overrided by child class
 void *decode_result = DecodeFrame(av_frame);
 if (decode_result) {
 // 4. dispatch to render
 m_render->Render(decode_result);
 } else {
 LOGD("BaseDecoder", "decode_result=nullptr");
 }
 }
 }
}



Finally, I do rendering logic in Render. Render also written by Producer-Consumer Pattern, it maintain a queue that store AVFrame received from Decoder, the code like this :


// write AVFrame
void BaseRender::Render(void *frame_data) {
 Lock();
 frame_queue.push(frame_data);
 Signal();
 UnLock();
}

// render to surface or Open SL
void BaseRender::RealRender() {
 // frame data that contain frame pts and other metadata
 frame_data->pts = av_frame->pts = av_frame->best_effort_timestamp * av_q2d(GetTimeBase());
 // video only
 frame_data->video_extra_delay = av_frame->repeat_pict * 1.0 / fps * 2.0;
 if (m_render_synchronizer && m_render_synchronizer->Sync(frame_data)) {
 continue;
 }
}



And then, the synchronizer will decide to sleep time or drop video frame according to the frame pts, frame pts is :


frame_data->pts = av_frame->best_effort_timestamp * av_q2d(GetTimeBase());



Also, video extra delay is :


frame_data->video_extra_delay = av_frame->repeat_pict * 1.0 / fps * 2.0;



RenderSynchronizer code like this :


bool RenderSynchronizer::Sync(void *frame_data) {
 auto base_frame_data = static_cast<baseframedata>(frame_data);
 if (base_frame_data->media_type == AVMEDIA_TYPE_AUDIO) {
 return ReceiveAudioFrame(static_cast<pcmdata>(frame_data));
 } else if (base_frame_data->media_type == AVMEDIA_TYPE_VIDEO) {
 return ReceiveVideoFrame(static_cast<rgbadata>(frame_data));
 }
 return false;
}

bool RenderSynchronizer::ReceiveAudioFrame(PCMData *pcm_data) {
 audio_pts = pcm_data->pts;
 return false;
}

bool RenderSynchronizer::ReceiveVideoFrame(RGBAData *rgba_data) {
 video_pts = rgba_data->pts;

 if (audio_pts <= 0 || video_pts <= 0) {
 return false;
 }

 double diff = video_pts - audio_pts;
 if (diff > 0) {
 if (diff > 1) {
 av_usleep((unsigned int) (rgba_data->extra_delay * 1000000.0));
 } else {
 av_usleep((unsigned int) ((diff + rgba_data->extra_delay) * 1000000.0));
 }
 return false;
 } else if (diff < 0) {
 LOGD(TAG, "drop video frame");
 return true;
 } else {
 return false;
 }
}
</rgbadata></pcmdata></baseframedata>


Why my code can not sync video time and audio time ? Thanks for your reading and answers.


-
no video above 360P with sound ytdl-core
26 juin 2021, par AshishI am able to download youtube separate videos and audios. but merging not working. 0 KB file is downloading


let info = await ytdl.getInfo(req.body.videoId);
let formatVideo = ytdl.chooseFormat(info.formats, { quality: req.body.format.itag, filter: 'videoonly' });
let formatAudio = ytdl.chooseFormat(info.formats, { quality: req.body.audioTag });
// res.json({video: formatVideo, audio: formatAudio});
const tracker = {
 start: Date.now(),
 audio: { downloaded: 0, total: Infinity },
 video: { downloaded: 0, total: Infinity },
 merged: { frame: 0, speed: '0x', fps: 0 },
};
res.header('Content-Disposition', `attachment; filename=${fileName}`);

cp.exec(`ffmpeg -i ${videoStream} -i ${audioStream} -c copy ${fileName}`, (error, success) => {
 if (!error) {
 console.log(success);
 } else {
 console.log(error);
 }
});
// Prepare the progress bar
let progressbarHandle = null;
const progressbarInterval = 1000;
const showProgress = () => {
 console.log('test');
 readline.cursorTo(process.stdout, 0);
 const toMB = i => (i / 1024 / 1024).toFixed(2);

 process.stdout.write(`Audio | ${(tracker.audio.downloaded / tracker.audio.total * 100).toFixed(2)}% processed `);
 process.stdout.write(`(${toMB(tracker.audio.downloaded)}MB of ${toMB(tracker.audio.total)}MB).${' '.repeat(10)}\n`);

 process.stdout.write(`Video | ${(tracker.video.downloaded / tracker.video.total * 100).toFixed(2)}% processed `);
 process.stdout.write(`(${toMB(tracker.video.downloaded)}MB of ${toMB(tracker.video.total)}MB).${' '.repeat(10)}\n`);

 process.stdout.write(`Merged | processing frame ${tracker.merged.frame} `);
 process.stdout.write(`(at ${tracker.merged.fps} fps => ${tracker.merged.speed}).${' '.repeat(10)}\n`);

 process.stdout.write(`running for: ${((Date.now() - tracker.start) / 1000 / 60).toFixed(2)} Minutes.`);
 readline.moveCursor(process.stdout, 0, -3);
};

// Start the ffmpeg child process
const ffmpegProcess = cp.spawn(ffmpeg, [
 // Remove ffmpeg's console spamming
 '-loglevel', '8', '-hide_banner',
 // Redirect/Enable progress messages
 '-progress', 'pipe:3',
 // Set inputs
 '-i', 'pipe:4',
 '-i', 'pipe:5',
 // Map audio & video from streams
 '-map', '0:a',
 '-map', '1:v',
 // Keep encoding
 '-c:v', 'copy',
 // Define output file
 `${fileName}`,
], {
 windowsHide: true,
 stdio: [
 /* Standard: stdin, stdout, stderr */
 'inherit', 'inherit', 'inherit',
 /* Custom: pipe:3, pipe:4, pipe:5 */
 'pipe', 'pipe', 'pipe', 'pipe'
 ],
});
ffmpegProcess.on('close', () => {
 console.log('done');
 // Cleanup
 process.stdout.write('\n\n\n\n');
 clearInterval(progressbarHandle);
 console.log(tracker, '146');
});

// Link streams
// FFmpeg creates the transformer streams and we just have to insert / read data
ffmpegProcess.stdio[3].on('data', chunk => {
 console.log(chunk, '152');
 // Start the progress bar
 if (!progressbarHandle) progressbarHandle = setInterval(showProgress, progressbarInterval);
 // Parse the param=value list returned by ffmpeg
 const lines = chunk.toString().trim().split('\n');
 const args = {};
 for (const l of lines) {
 const [key, value] = l.split('=');
 args[key.trim()] = value.trim();
 }
 tracker.merged = args;
 // console.log(tracker.merged, '162');
});
// audio.pipe(ffmpegProcess.stdio[4]);
// video.pipe(ffmpegProcess.stdio[5]);
const audioStream = await requestObj.get(formatAudio.url);
const videoStream = await requestObj.get(formatVideo.url);
audioStream.pipe(ffmpegProcess.stdio[4]);
videoStream.pipe(ffmpegProcess.stdio[5]);
ffmpegProcess.stdio[6].pipe(res);



here requestObj is a node js 'request' module