
Recherche avancée
Médias (91)
-
Les Miserables
9 décembre 2019, par
Mis à jour : Décembre 2019
Langue : français
Type : Textuel
-
VideoHandle
8 novembre 2019, par
Mis à jour : Novembre 2019
Langue : français
Type : Video
-
Somos millones 1
21 juillet 2014, par
Mis à jour : Juin 2015
Langue : français
Type : Video
-
Un test - mauritanie
3 avril 2014, par
Mis à jour : Avril 2014
Langue : français
Type : Textuel
-
Pourquoi Obama lit il mes mails ?
4 février 2014, par
Mis à jour : Février 2014
Langue : français
-
IMG 0222
6 octobre 2013, par
Mis à jour : Octobre 2013
Langue : français
Type : Image
Autres articles (43)
-
MediaSPIP : Modification des droits de création d’objets et de publication définitive
11 novembre 2010, parPar défaut, MediaSPIP permet de créer 5 types d’objets.
Toujours par défaut les droits de création et de publication définitive de ces objets sont réservés aux administrateurs, mais ils sont bien entendu configurables par les webmestres.
Ces droits sont ainsi bloqués pour plusieurs raisons : parce que le fait d’autoriser à publier doit être la volonté du webmestre pas de l’ensemble de la plateforme et donc ne pas être un choix par défaut ; parce qu’avoir un compte peut servir à autre choses également, (...) -
Personnaliser les catégories
21 juin 2013, parFormulaire de création d’une catégorie
Pour ceux qui connaissent bien SPIP, une catégorie peut être assimilée à une rubrique.
Dans le cas d’un document de type catégorie, les champs proposés par défaut sont : Texte
On peut modifier ce formulaire dans la partie :
Administration > Configuration des masques de formulaire.
Dans le cas d’un document de type média, les champs non affichés par défaut sont : Descriptif rapide
Par ailleurs, c’est dans cette partie configuration qu’on peut indiquer le (...) -
HTML5 audio and video support
13 avril 2011, parMediaSPIP uses HTML5 video and audio tags to play multimedia files, taking advantage of the latest W3C innovations supported by modern browsers.
The MediaSPIP player used has been created specifically for MediaSPIP and can be easily adapted to fit in with a specific theme.
For older browsers the Flowplayer flash fallback is used.
MediaSPIP allows for media playback on major mobile platforms with the above (...)
Sur d’autres sites (8427)
-
Reasons for "Segmentation fault (core dumped)" when using Python extension and FFmpeg
24 août 2021, par Christian VorhemusI want to write a Python C extension that includes a function
convertVideo()
that converts a video from one format to another making use of FFmpeg 3.4.8 (the libav* libaries). The code of the extension is at the end of the question. The extension compiles successfully but whenever I open Python and want to call it (using a simple Python wrapper code that I don't include here), I get

Python 3.7.10 (default, May 2 2021, 18:28:10)
[GCC 9.1.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import myModule
>>> myModule.convert_video("/home/admin/video.h264", "/home/admin/video.mp4")
convert 0
convert 1
Format raw H.264 video, duration -9223372036854775808 us
convert 2
Segmentation fault (core dumped)



The interesting thing is, I wrote a simple helper program
test_convert.cc
that callsconvertVideo()
like so

#include 
#include 

int convertVideo(const char *in_filename, const char *out_filename);

int main() {
 int res = convertVideo("/home/admin/video.h264", "/home/admin/video.mp4");
 return 0;
}



and I compiled this program making use of the shared library that Python generates when building the C extension like so


gcc test_convert.cc /usr/lib/python3.7/site-packages/_myModule.cpython-37m-aarch64-linux-gnu.so -o test_convert



And it works ! The output is


root# ./test_convert
convert 0
convert 1
Format raw H.264 video, duration -9223372036854775808 us
convert 2
convert 3
convert 4
convert 5
convert 6
Output #0, mp4, to '/home/admin/video.mp4':
 Stream #0:0: Video: h264 (High), yuv420p(tv, bt470bg, progressive), 1280x720 [SAR 1:1 DAR 16:9], q=2-31
convert 7



The extension code looks like this


#include 

#include 
#include 
#include 
#include 
#include 
#include 
#include 

extern "C"
{
#include "libavformat/avformat.h"
#include "libavutil/imgutils.h"
}

int convertVideo(const char *in_filename, const char *out_filename)
{
 // Input AVFormatContext and Output AVFormatContext
 AVFormatContext *input_format_context = avformat_alloc_context();
 AVPacket pkt;

 int ret, i;
 int frame_index = 0;
 printf("convert 0\n");
 av_register_all();
 printf("convert 1\n");
 // Input
 if ((ret = avformat_open_input(&input_format_context, in_filename, NULL,
 NULL)) < 0)
 {
 printf("Could not open input file.");
 return 1;
 }
 else
 {
 printf("Format %s, duration %lld us\n",
 input_format_context->iformat->long_name,
 input_format_context->duration);
 }
 printf("convert 2\n");
 if ((ret = avformat_find_stream_info(input_format_context, 0)) < 0)
 {
 printf("Failed to retrieve input stream information");
 return 1;
 }
 printf("convert 3\n");
 AVFormatContext *output_format_context = avformat_alloc_context();
 AVPacket packet;
 int stream_index = 0;
 int *streams_list = NULL;
 int number_of_streams = 0;
 int fragmented_mp4_options = 0;
 printf("convert 4\n");
 avformat_alloc_output_context2(&output_format_context, NULL, NULL,
 out_filename);
 if (!output_format_context)
 {
 fprintf(stderr, "Could not create output context\n");
 ret = AVERROR_UNKNOWN;
 return 1;
 }
 printf("convert 5\n");
 AVOutputFormat *fmt = av_guess_format(0, out_filename, 0);
 output_format_context->oformat = fmt;

 number_of_streams = input_format_context->nb_streams;
 streams_list =
 (int *)av_mallocz_array(number_of_streams, sizeof(*streams_list));

 if (!streams_list)
 {
 ret = AVERROR(ENOMEM);
 return 1;
 }
 for (i = 0; i < input_format_context->nb_streams; i++)
 {
 AVStream *out_stream;
 AVStream *in_stream = input_format_context->streams[i];
 AVCodecParameters *in_codecpar = in_stream->codecpar;
 if (in_codecpar->codec_type != AVMEDIA_TYPE_AUDIO &&
 in_codecpar->codec_type != AVMEDIA_TYPE_VIDEO &&
 in_codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE)
 {
 streams_list[i] = -1;
 continue;
 }
 streams_list[i] = stream_index++;

 out_stream = avformat_new_stream(output_format_context, NULL);
 if (!out_stream)
 {
 fprintf(stderr, "Failed allocating output stream\n");
 ret = AVERROR_UNKNOWN;
 return 1;
 }
 ret = avcodec_parameters_copy(out_stream->codecpar, in_codecpar);
 if (ret < 0)
 {
 fprintf(stderr, "Failed to copy codec parameters\n");
 return 1;
 }
 }
 printf("convert 6\n");
 av_dump_format(output_format_context, 0, out_filename, 1);
 if (!(output_format_context->oformat->flags & AVFMT_NOFILE))
 {
 ret = avio_open(&output_format_context->pb, out_filename, AVIO_FLAG_WRITE);
 if (ret < 0)
 {
 fprintf(stderr, "Could not open output file '%s'", out_filename);
 return 1;
 }
 }
 AVDictionary *opts = NULL;
 printf("convert 7\n");
 ret = avformat_write_header(output_format_context, &opts);
 if (ret < 0)
 {
 fprintf(stderr, "Error occurred when opening output file\n");
 return 1;
 }
 int n = 0;

 while (1)
 {
 AVStream *in_stream, *out_stream;
 ret = av_read_frame(input_format_context, &packet);
 if (ret < 0)
 break;
 in_stream = input_format_context->streams[packet.stream_index];
 if (packet.stream_index >= number_of_streams ||
 streams_list[packet.stream_index] < 0)
 {
 av_packet_unref(&packet);
 continue;
 }
 packet.stream_index = streams_list[packet.stream_index];

 out_stream = output_format_context->streams[packet.stream_index];

 out_stream->codec->time_base.num = 1;
 out_stream->codec->time_base.den = 30;

 packet.pts = n * 3000;
 packet.dts = n * 3000;
 packet.duration = 3000;

 packet.pos = -1;

 ret = av_interleaved_write_frame(output_format_context, &packet);
 if (ret < 0)
 {
 fprintf(stderr, "Error muxing packet\n");
 break;
 }
 av_packet_unref(&packet);
 n++;
 }

 av_write_trailer(output_format_context);
 avformat_close_input(&input_format_context);
 if (output_format_context &&
 !(output_format_context->oformat->flags & AVFMT_NOFILE))
 avio_closep(&output_format_context->pb);
 avformat_free_context(output_format_context);
 av_freep(&streams_list);
 if (ret < 0 && ret != AVERROR_EOF)
 {
 fprintf(stderr, "Error occurred\n");
 return 1;
 }
 return 0;
}
// PyMethodDef and other orchestration code is skipped



What is the reason that the code works as expected in my test_convert but not within Python ?


-
How to use ffmpeg.js in a ReactJS project
1er septembre 2021, par L3M0LMy react app is using ffmpeg.wasm (https://github.com/ffmpegwasm/ffmpeg.wasm) but because of the "recent" issues with
SharedArrayBuffer
I have to move the project to ffmpeg.js (https://github.com/Kagami/ffmpeg.js).

Here is my problem. I installed the library (
npm i ffmpeg.js
) and tried to use the simple code provided on the github page for the web workers to test if it's working :

const worker = new Worker("ffmpeg-worker-webm.js");
worker.onmessage = function(e) {
 const msg = e.data;
 switch (msg.type) {
 case "ready":
 worker.postMessage({type: "run", arguments: ["-version"]});
 break;
 case "stdout":
 console.log(msg.data);
 break;
 case "stderr":
 console.log(msg.data);
 break;
 case "done":
 console.log(msg.data);
 break;
 }
};



but the
onmessage
method never get's called, instead I get

GET https://localhost:3000/ffmpeg-worker-webm.js 403 (Forbidden)



I'm new to the web worker topic and I could not find enough articles about this problem to wrap my head around (in fact, most of the articles use the exact same code as I do and apparently it works for them). Is the problem localhost specific or is it a ReactJS problem and I'm not able to use the ffmpeg.js library at all ? Can someone guide me on how to solve this issue ?


-
how to add audio using ffmpeg when recording video from browser and streaming to Youtube/Twitch ?
26 juillet 2021, par Tosh VelagaI have a web application I am working on that allows the user to stream video from their browser and simultaneously livestream to both Youtube and Twitch using ffmpeg. The application works fine when I don't need to send any of the audio. Currently I am getting the error below when I try to record video and audio. I am new to using ffmpeg and so any help would be greatly appreciated. Here is also my repo if needed : https://github.com/toshvelaga/livestream


Here is my node.js server with ffmpeg


const child_process = require('child_process') // To be used later for running FFmpeg
const express = require('express')
const http = require('http')
const WebSocketServer = require('ws').Server
const NodeMediaServer = require('node-media-server')
const app = express()
const cors = require('cors')
const path = require('path')
const logger = require('morgan')
require('dotenv').config()

app.use(logger('dev'))
app.use(cors())

app.use(express.json({ limit: '200mb', extended: true }))
app.use(
 express.urlencoded({ limit: '200mb', extended: true, parameterLimit: 50000 })
)

var authRouter = require('./routes/auth')
var compareCodeRouter = require('./routes/compareCode')

app.use('/', authRouter)
app.use('/', compareCodeRouter)

if (process.env.NODE_ENV === 'production') {
 // serve static content
 // npm run build
 app.use(express.static(path.join(__dirname, 'client/build')))

 app.get('*', (req, res) => {
 res.sendFile(path.join(__dirname, 'client/build', 'index.html'))
 })
}

const PORT = process.env.PORT || 8080

app.listen(PORT, () => {
 console.log(`Server is starting on port ${PORT}`)
})

const server = http.createServer(app).listen(3000, () => {
 console.log('Listening on PORT 3000...')
})


const wss = new WebSocketServer({
 server: server,
})

wss.on('connection', (ws, req) => {
 const ffmpeg = child_process.spawn('ffmpeg', [
 // works fine when I use this but when I need audio problems arise
 // '-f',
 // 'lavfi',
 // '-i',
 // 'anullsrc',

 '-i',
 '-',

 '-f',
 'flv',
 '-c',
 'copy',
 `${process.env.TWITCH_STREAM_ADDRESS}`,
 '-f',
 'flv',
 '-c',
 'copy',
 `${process.env.YOUTUBE_STREAM_ADDRESS}`,
 // '-f',
 // 'flv',
 // '-c',
 // 'copy',
 // `${process.env.FACEBOOK_STREAM_ADDRESS}`,
 ])

 ffmpeg.on('close', (code, signal) => {
 console.log(
 'FFmpeg child process closed, code ' + code + ', signal ' + signal
 )
 ws.terminate()
 })

 ffmpeg.stdin.on('error', (e) => {
 console.log('FFmpeg STDIN Error', e)
 })

 ffmpeg.stderr.on('data', (data) => {
 console.log('FFmpeg STDERR:', data.toString())
 })

 ws.on('message', (msg) => {
 console.log('DATA', msg)
 ffmpeg.stdin.write(msg)
 })

 ws.on('close', (e) => {
 console.log('kill: SIGINT')
 ffmpeg.kill('SIGINT')
 })
})

const config = {
 rtmp: {
 port: 1935,
 chunk_size: 60000,
 gop_cache: true,
 ping: 30,
 ping_timeout: 60,
 },
 http: {
 port: 8000,
 allow_origin: '*',
 },
}

var nms = new NodeMediaServer(config)
nms.run()



Here is my frontend code that records the video/audio and sends to server :


import React, { useState, useEffect, useRef } from 'react'
import Navbar from '../../components/Navbar/Navbar'
import './Dashboard.css'

const CAPTURE_OPTIONS = {
 audio: true,
 video: true,
}

function Dashboard() {
 const [mute, setMute] = useState(false)
 const videoRef = useRef()
 const ws = useRef()
 const mediaStream = useUserMedia(CAPTURE_OPTIONS)

 let liveStream
 let liveStreamRecorder

 if (mediaStream && videoRef.current && !videoRef.current.srcObject) {
 videoRef.current.srcObject = mediaStream
 }

 const handleCanPlay = () => {
 videoRef.current.play()
 }

 useEffect(() => {
 ws.current = new WebSocket(
 window.location.protocol.replace('http', 'ws') +
 '//' + // http: -> ws:, https: -> wss:
 'localhost:3000'
 )

 ws.current.onopen = () => {
 console.log('WebSocket Open')
 }

 return () => {
 ws.current.close()
 }
 }, [])

 const startStream = () => {
 liveStream = videoRef.current.captureStream(30) // 30 FPS
 liveStreamRecorder = new MediaRecorder(liveStream, {
 mimeType: 'video/webm;codecs=h264',
 videoBitsPerSecond: 3 * 1024 * 1024,
 })
 liveStreamRecorder.ondataavailable = (e) => {
 ws.current.send(e.data)
 console.log('send data', e.data)
 }
 // Start recording, and dump data every second
 liveStreamRecorder.start(1000)
 }

 const stopStream = () => {
 liveStreamRecorder.stop()
 ws.current.close()
 }

 const toggleMute = () => {
 setMute(!mute)
 }

 return (
 <>
 <navbar></navbar>
 <div style="{{" classname="'main'">
 <div>
 
 </div>
 <div classname="'button-container'">
 <button>Go Live</button>
 <button>Stop Recording</button>
 <button>Share Screen</button>
 <button>Mute</button>
 </div>
 </div>
 >
 )
}

const useUserMedia = (requestedMedia) => {
 const [mediaStream, setMediaStream] = useState(null)

 useEffect(() => {
 async function enableStream() {
 try {
 const stream = await navigator.mediaDevices.getUserMedia(requestedMedia)
 setMediaStream(stream)
 } catch (err) {
 console.log(err)
 }
 }

 if (!mediaStream) {
 enableStream()
 } else {
 return function cleanup() {
 mediaStream.getVideoTracks().forEach((track) => {
 track.stop()
 })
 }
 }
 }, [mediaStream, requestedMedia])

 return mediaStream
}

export default Dashboard