Recherche avancée

Médias (1)

Mot : - Tags -/pirate bay

Autres articles (100)

  • List of compatible distributions

    26 avril 2011, par

    The table below is the list of Linux distributions compatible with the automated installation script of MediaSPIP. Distribution nameVersion nameVersion number Debian Squeeze 6.x.x Debian Weezy 7.x.x Debian Jessie 8.x.x Ubuntu The Precise Pangolin 12.04 LTS Ubuntu The Trusty Tahr 14.04
    If you want to help us improve this list, you can provide us access to a machine whose distribution is not mentioned above or send the necessary fixes to add (...)

  • Les autorisations surchargées par les plugins

    27 avril 2010, par

    Mediaspip core
    autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs

  • Activation de l’inscription des visiteurs

    12 avril 2011, par

    Il est également possible d’activer l’inscription des visiteurs ce qui permettra à tout un chacun d’ouvrir soit même un compte sur le canal en question dans le cadre de projets ouverts par exemple.
    Pour ce faire, il suffit d’aller dans l’espace de configuration du site en choisissant le sous menus "Gestion des utilisateurs". Le premier formulaire visible correspond à cette fonctionnalité.
    Par défaut, MediaSPIP a créé lors de son initialisation un élément de menu dans le menu du haut de la page menant (...)

Sur d’autres sites (9054)

  • how to add audio using ffmpeg when recording video from browser and streaming to Youtube/Twitch ?

    26 juillet 2021, par Tosh Velaga

    I have a web application I am working on that allows the user to stream video from their browser and simultaneously livestream to both Youtube and Twitch using ffmpeg. The application works fine when I don't need to send any of the audio. Currently I am getting the error below when I try to record video and audio. I am new to using ffmpeg and so any help would be greatly appreciated. Here is also my repo if needed : https://github.com/toshvelaga/livestream Node Server

    


    Here is my node.js server with ffmpeg

    


    const child_process = require('child_process') // To be used later for running FFmpeg
const express = require('express')
const http = require('http')
const WebSocketServer = require('ws').Server
const NodeMediaServer = require('node-media-server')
const app = express()
const cors = require('cors')
const path = require('path')
const logger = require('morgan')
require('dotenv').config()

app.use(logger('dev'))
app.use(cors())

app.use(express.json({ limit: '200mb', extended: true }))
app.use(
  express.urlencoded({ limit: '200mb', extended: true, parameterLimit: 50000 })
)

var authRouter = require('./routes/auth')
var compareCodeRouter = require('./routes/compareCode')

app.use('/', authRouter)
app.use('/', compareCodeRouter)

if (process.env.NODE_ENV === 'production') {
  // serve static content
  // npm run build
  app.use(express.static(path.join(__dirname, 'client/build')))

  app.get('*', (req, res) => {
    res.sendFile(path.join(__dirname, 'client/build', 'index.html'))
  })
}

const PORT = process.env.PORT || 8080

app.listen(PORT, () => {
  console.log(`Server is starting on port ${PORT}`)
})

const server = http.createServer(app).listen(3000, () => {
  console.log('Listening on PORT 3000...')
})


const wss = new WebSocketServer({
  server: server,
})

wss.on('connection', (ws, req) => {
  const ffmpeg = child_process.spawn('ffmpeg', [
    // works fine when I use this but when I need audio problems arise
    // '-f',
    // 'lavfi',
    // '-i',
    // 'anullsrc',

    '-i',
    '-',

    '-f',
    'flv',
    '-c',
    'copy',
    `${process.env.TWITCH_STREAM_ADDRESS}`,
    '-f',
    'flv',
    '-c',
    'copy',
    `${process.env.YOUTUBE_STREAM_ADDRESS}`,
    // '-f',
    // 'flv',
    // '-c',
    // 'copy',
    // `${process.env.FACEBOOK_STREAM_ADDRESS}`,
  ])

  ffmpeg.on('close', (code, signal) => {
    console.log(
      'FFmpeg child process closed, code ' + code + ', signal ' + signal
    )
    ws.terminate()
  })

  ffmpeg.stdin.on('error', (e) => {
    console.log('FFmpeg STDIN Error', e)
  })

  ffmpeg.stderr.on('data', (data) => {
    console.log('FFmpeg STDERR:', data.toString())
  })

  ws.on('message', (msg) => {
    console.log('DATA', msg)
    ffmpeg.stdin.write(msg)
  })

  ws.on('close', (e) => {
    console.log('kill: SIGINT')
    ffmpeg.kill('SIGINT')
  })
})

const config = {
  rtmp: {
    port: 1935,
    chunk_size: 60000,
    gop_cache: true,
    ping: 30,
    ping_timeout: 60,
  },
  http: {
    port: 8000,
    allow_origin: '*',
  },
}

var nms = new NodeMediaServer(config)
nms.run()


    


    Here is my frontend code that records the video/audio and sends to server :

    


    import React, { useState, useEffect, useRef } from &#x27;react&#x27;&#xA;import Navbar from &#x27;../../components/Navbar/Navbar&#x27;&#xA;import &#x27;./Dashboard.css&#x27;&#xA;&#xA;const CAPTURE_OPTIONS = {&#xA;  audio: true,&#xA;  video: true,&#xA;}&#xA;&#xA;function Dashboard() {&#xA;  const [mute, setMute] = useState(false)&#xA;  const videoRef = useRef()&#xA;  const ws = useRef()&#xA;  const mediaStream = useUserMedia(CAPTURE_OPTIONS)&#xA;&#xA;  let liveStream&#xA;  let liveStreamRecorder&#xA;&#xA;  if (mediaStream &amp;&amp; videoRef.current &amp;&amp; !videoRef.current.srcObject) {&#xA;    videoRef.current.srcObject = mediaStream&#xA;  }&#xA;&#xA;  const handleCanPlay = () => {&#xA;    videoRef.current.play()&#xA;  }&#xA;&#xA;  useEffect(() => {&#xA;    ws.current = new WebSocket(&#xA;      window.location.protocol.replace(&#x27;http&#x27;, &#x27;ws&#x27;) &#x2B;&#xA;        &#x27;//&#x27; &#x2B; // http: -> ws:, https: -> wss:&#xA;        &#x27;localhost:3000&#x27;&#xA;    )&#xA;&#xA;    ws.current.onopen = () => {&#xA;      console.log(&#x27;WebSocket Open&#x27;)&#xA;    }&#xA;&#xA;    return () => {&#xA;      ws.current.close()&#xA;    }&#xA;  }, [])&#xA;&#xA;  const startStream = () => {&#xA;    liveStream = videoRef.current.captureStream(30) // 30 FPS&#xA;    liveStreamRecorder = new MediaRecorder(liveStream, {&#xA;      mimeType: &#x27;video/webm;codecs=h264&#x27;,&#xA;      videoBitsPerSecond: 3 * 1024 * 1024,&#xA;    })&#xA;    liveStreamRecorder.ondataavailable = (e) => {&#xA;      ws.current.send(e.data)&#xA;      console.log(&#x27;send data&#x27;, e.data)&#xA;    }&#xA;    // Start recording, and dump data every second&#xA;    liveStreamRecorder.start(1000)&#xA;  }&#xA;&#xA;  const stopStream = () => {&#xA;    liveStreamRecorder.stop()&#xA;    ws.current.close()&#xA;  }&#xA;&#xA;  const toggleMute = () => {&#xA;    setMute(!mute)&#xA;  }&#xA;&#xA;  return (&#xA;    &lt;>&#xA;      <navbar></navbar>&#xA;      <div style="{{" classname="&#x27;main&#x27;">&#xA;        <div>&#xA;          &#xA;        </div>&#xA;        <div classname="&#x27;button-container&#x27;">&#xA;          <button>Go Live</button>&#xA;          <button>Stop Recording</button>&#xA;          <button>Share Screen</button>&#xA;          <button>Mute</button>&#xA;        </div>&#xA;      </div>&#xA;    >&#xA;  )&#xA;}&#xA;&#xA;const useUserMedia = (requestedMedia) => {&#xA;  const [mediaStream, setMediaStream] = useState(null)&#xA;&#xA;  useEffect(() => {&#xA;    async function enableStream() {&#xA;      try {&#xA;        const stream = await navigator.mediaDevices.getUserMedia(requestedMedia)&#xA;        setMediaStream(stream)&#xA;      } catch (err) {&#xA;        console.log(err)&#xA;      }&#xA;    }&#xA;&#xA;    if (!mediaStream) {&#xA;      enableStream()&#xA;    } else {&#xA;      return function cleanup() {&#xA;        mediaStream.getVideoTracks().forEach((track) => {&#xA;          track.stop()&#xA;        })&#xA;      }&#xA;    }&#xA;  }, [mediaStream, requestedMedia])&#xA;&#xA;  return mediaStream&#xA;}&#xA;&#xA;export default Dashboard&#xA;

    &#xA;

  • Why my code that based on ffmpeg can't sync video' time and audio's time ?

    6 juillet 2021, par ZSpirytus

    Background

    &#xA;

    Recently, I use ffmpeg to write my first Android video player. But video channel's time is faster than audio channel's time about 2 3 times.

    &#xA;

    Code

    &#xA;

    In short, I use PacketDispatcher to read AVPacket from http hlv source :

    &#xA;

    void PacketDispatcher::RealDispatch() {&#xA;    while (GetStatus() != DISPATCHER_STOP) {&#xA;        while (GetStatus() == DISPATCHER_PAUSE) {&#xA;            LOGD(TAG, "wait signal");&#xA;            pthread_mutex_lock(&amp;mutex);&#xA;            pthread_cond_wait(&amp;cond, &amp;mutex);&#xA;            pthread_mutex_unlock(&amp;mutex);&#xA;        }&#xA;&#xA;        AVPacket *av_packet = av_packet_alloc();&#xA;        int ret = av_read_frame(av_format_context, av_packet);&#xA;        if (ret) {&#xA;            LOGE(TAG, "av_read_frame ret=%d", ret);&#xA;            break;&#xA;        }&#xA;&#xA;        // PacketDispatcher is who read the AVPacket from http hlv source &#xA;        // and dispatch to decoder by its stream index.&#xA;        decoder_map[av_packet->stream_index]->Push(av_packet);&#xA;    }&#xA;}&#xA;

    &#xA;

    And then, Decoder written by Producer-Consumer Pattern, Decoder maintain a queue that store all the AVPacket received from PacketDispatcher. The code like this :

    &#xA;

    // write to the queue&#xA;void BaseDecoder::Push(AVPacket *av_packet) {&#xA;    pthread_mutex_lock(&amp;av_packet_queue_mutex);&#xA;    av_packet_queue.push(av_packet);&#xA;    pthread_cond_signal(&amp;av_packet_queue_cond);&#xA;    pthread_mutex_unlock(&amp;av_packet_queue_mutex);&#xA;}&#xA;&#xA;// real decode logic&#xA;void BaseDecoder::RealDecode() {&#xA;    SetDecoderState(START);&#xA;    LOGI(LogSpec(), "start decode");&#xA;&#xA;    while (true) {&#xA;        // 1. check decoder status and queue size to decide if call thread.wait&#xA;&#xA;        // 2. send packet to codec&#xA;        AVPacket* av_packet = av_packet_queue.front();&#xA;        int ret = avcodec_send_packet(av_codec_ctx, av_packet);&#xA;&#xA;        // 3. read frame from codec&#xA;        AVFrame *av_frame = av_frame_alloc();&#xA;        ret = avcodec_receive_frame(av_codec_ctx, av_frame);&#xA;&#xA;        if (m_render) {&#xA;            // 3. custom decode logic overrided by child class&#xA;            void *decode_result = DecodeFrame(av_frame);&#xA;            if (decode_result) {&#xA;                // 4. dispatch to render&#xA;                m_render->Render(decode_result);&#xA;            } else {&#xA;                LOGD("BaseDecoder", "decode_result=nullptr");&#xA;            }&#xA;        }&#xA;    }&#xA;}&#xA;

    &#xA;

    Finally, I do rendering logic in Render. Render also written by Producer-Consumer Pattern, it maintain a queue that store AVFrame received from Decoder, the code like this :

    &#xA;

    // write AVFrame&#xA;void BaseRender::Render(void *frame_data) {&#xA;    Lock();&#xA;    frame_queue.push(frame_data);&#xA;    Signal();&#xA;    UnLock();&#xA;}&#xA;&#xA;// render to surface or Open SL&#xA;void BaseRender::RealRender() {&#xA;    // frame data that contain frame pts and other metadata&#xA;    frame_data->pts = av_frame->pts = av_frame->best_effort_timestamp * av_q2d(GetTimeBase());&#xA;    // video only&#xA;    frame_data->video_extra_delay = av_frame->repeat_pict * 1.0 / fps * 2.0;&#xA;    if (m_render_synchronizer &amp;&amp; m_render_synchronizer->Sync(frame_data)) {&#xA;        continue;&#xA;    }&#xA;}&#xA;

    &#xA;

    And then, the synchronizer will decide to sleep time or drop video frame according to the frame pts, frame pts is :

    &#xA;

    frame_data->pts = av_frame->best_effort_timestamp * av_q2d(GetTimeBase());&#xA;

    &#xA;

    Also, video extra delay is :

    &#xA;

    frame_data->video_extra_delay = av_frame->repeat_pict * 1.0 / fps * 2.0;&#xA;

    &#xA;

    RenderSynchronizer code like this :

    &#xA;

    bool RenderSynchronizer::Sync(void *frame_data) {&#xA;    auto base_frame_data = static_cast<baseframedata>(frame_data);&#xA;    if (base_frame_data->media_type == AVMEDIA_TYPE_AUDIO) {&#xA;        return ReceiveAudioFrame(static_cast<pcmdata>(frame_data));&#xA;    } else if (base_frame_data->media_type == AVMEDIA_TYPE_VIDEO) {&#xA;        return ReceiveVideoFrame(static_cast<rgbadata>(frame_data));&#xA;    }&#xA;    return false;&#xA;}&#xA;&#xA;bool RenderSynchronizer::ReceiveAudioFrame(PCMData *pcm_data) {&#xA;    audio_pts = pcm_data->pts;&#xA;    return false;&#xA;}&#xA;&#xA;bool RenderSynchronizer::ReceiveVideoFrame(RGBAData *rgba_data) {&#xA;    video_pts = rgba_data->pts;&#xA;&#xA;    if (audio_pts &lt;= 0 || video_pts &lt;= 0) {&#xA;        return false;&#xA;    }&#xA;&#xA;    double diff = video_pts - audio_pts;&#xA;    if (diff > 0) {&#xA;        if (diff > 1) {&#xA;            av_usleep((unsigned int) (rgba_data->extra_delay * 1000000.0));&#xA;        } else {&#xA;            av_usleep((unsigned int) ((diff &#x2B; rgba_data->extra_delay) * 1000000.0));&#xA;        }&#xA;        return false;&#xA;    } else if (diff &lt; 0) {&#xA;        LOGD(TAG, "drop video frame");&#xA;        return true;&#xA;    } else {&#xA;        return false;&#xA;    }&#xA;}&#xA;</rgbadata></pcmdata></baseframedata>

    &#xA;

    Why my code can not sync video time and audio time ? Thanks for your reading and answers.

    &#xA;

  • no video above 360P with sound ytdl-core

    26 juin 2021, par Ashish

    I am able to download youtube separate videos and audios. but merging not working. 0 KB file is downloading

    &#xA;

    let info = await ytdl.getInfo(req.body.videoId);&#xA;let formatVideo = ytdl.chooseFormat(info.formats, { quality: req.body.format.itag, filter: &#x27;videoonly&#x27; });&#xA;let formatAudio = ytdl.chooseFormat(info.formats, { quality: req.body.audioTag });&#xA;// res.json({video: formatVideo, audio: formatAudio});&#xA;const tracker = {&#xA;  start: Date.now(),&#xA;  audio: { downloaded: 0, total: Infinity },&#xA;  video: { downloaded: 0, total: Infinity },&#xA;  merged: { frame: 0, speed: &#x27;0x&#x27;, fps: 0 },&#xA;};&#xA;res.header(&#x27;Content-Disposition&#x27;, `attachment; filename=${fileName}`);&#xA;&#xA;cp.exec(`ffmpeg -i ${videoStream} -i ${audioStream} -c copy ${fileName}`, (error, success) => {&#xA;  if (!error) {&#xA;    console.log(success);&#xA;  } else {&#xA;    console.log(error);&#xA;  }&#xA;});&#xA;// Prepare the progress bar&#xA;let progressbarHandle = null;&#xA;const progressbarInterval = 1000;&#xA;const showProgress = () => {&#xA;  console.log(&#x27;test&#x27;);&#xA;  readline.cursorTo(process.stdout, 0);&#xA;  const toMB = i => (i / 1024 / 1024).toFixed(2);&#xA;&#xA;  process.stdout.write(`Audio  | ${(tracker.audio.downloaded / tracker.audio.total * 100).toFixed(2)}% processed `);&#xA;  process.stdout.write(`(${toMB(tracker.audio.downloaded)}MB of ${toMB(tracker.audio.total)}MB).${&#x27; &#x27;.repeat(10)}\n`);&#xA;&#xA;  process.stdout.write(`Video  | ${(tracker.video.downloaded / tracker.video.total * 100).toFixed(2)}% processed `);&#xA;  process.stdout.write(`(${toMB(tracker.video.downloaded)}MB of ${toMB(tracker.video.total)}MB).${&#x27; &#x27;.repeat(10)}\n`);&#xA;&#xA;  process.stdout.write(`Merged | processing frame ${tracker.merged.frame} `);&#xA;  process.stdout.write(`(at ${tracker.merged.fps} fps => ${tracker.merged.speed}).${&#x27; &#x27;.repeat(10)}\n`);&#xA;&#xA;  process.stdout.write(`running for: ${((Date.now() - tracker.start) / 1000 / 60).toFixed(2)} Minutes.`);&#xA;  readline.moveCursor(process.stdout, 0, -3);&#xA;};&#xA;&#xA;// Start the ffmpeg child process&#xA;const ffmpegProcess = cp.spawn(ffmpeg, [&#xA;  // Remove ffmpeg&#x27;s console spamming&#xA;  &#x27;-loglevel&#x27;, &#x27;8&#x27;, &#x27;-hide_banner&#x27;,&#xA;  // Redirect/Enable progress messages&#xA;  &#x27;-progress&#x27;, &#x27;pipe:3&#x27;,&#xA;  // Set inputs&#xA;  &#x27;-i&#x27;, &#x27;pipe:4&#x27;,&#xA;  &#x27;-i&#x27;, &#x27;pipe:5&#x27;,&#xA;  // Map audio &amp; video from streams&#xA;  &#x27;-map&#x27;, &#x27;0:a&#x27;,&#xA;  &#x27;-map&#x27;, &#x27;1:v&#x27;,&#xA;  // Keep encoding&#xA;  &#x27;-c:v&#x27;, &#x27;copy&#x27;,&#xA;  // Define output file&#xA;  `${fileName}`,&#xA;], {&#xA;  windowsHide: true,&#xA;  stdio: [&#xA;    /* Standard: stdin, stdout, stderr */&#xA;    &#x27;inherit&#x27;, &#x27;inherit&#x27;, &#x27;inherit&#x27;,&#xA;    /* Custom: pipe:3, pipe:4, pipe:5 */&#xA;    &#x27;pipe&#x27;, &#x27;pipe&#x27;, &#x27;pipe&#x27;, &#x27;pipe&#x27;&#xA;  ],&#xA;});&#xA;ffmpegProcess.on(&#x27;close&#x27;, () => {&#xA;  console.log(&#x27;done&#x27;);&#xA;  // Cleanup&#xA;  process.stdout.write(&#x27;\n\n\n\n&#x27;);&#xA;  clearInterval(progressbarHandle);&#xA;  console.log(tracker, &#x27;146&#x27;);&#xA;});&#xA;&#xA;// Link streams&#xA;// FFmpeg creates the transformer streams and we just have to insert / read data&#xA;ffmpegProcess.stdio[3].on(&#x27;data&#x27;, chunk => {&#xA;  console.log(chunk, &#x27;152&#x27;);&#xA;  // Start the progress bar&#xA;  if (!progressbarHandle) progressbarHandle = setInterval(showProgress, progressbarInterval);&#xA;  // Parse the param=value list returned by ffmpeg&#xA;  const lines = chunk.toString().trim().split(&#x27;\n&#x27;);&#xA;  const args = {};&#xA;  for (const l of lines) {&#xA;    const [key, value] = l.split(&#x27;=&#x27;);&#xA;    args[key.trim()] = value.trim();&#xA;  }&#xA;  tracker.merged = args;&#xA;  // console.log(tracker.merged, &#x27;162&#x27;);&#xA;});&#xA;// audio.pipe(ffmpegProcess.stdio[4]);&#xA;// video.pipe(ffmpegProcess.stdio[5]);&#xA;const audioStream = await requestObj.get(formatAudio.url);&#xA;const videoStream = await requestObj.get(formatVideo.url);&#xA;audioStream.pipe(ffmpegProcess.stdio[4]);&#xA;videoStream.pipe(ffmpegProcess.stdio[5]);&#xA;ffmpegProcess.stdio[6].pipe(res);&#xA;

    &#xA;

    here requestObj is a node js 'request' module

    &#xA;