
Recherche avancée
Médias (91)
-
Head down (wav version)
26 septembre 2011, par
Mis à jour : Avril 2013
Langue : English
Type : Audio
-
Echoplex (wav version)
26 septembre 2011, par
Mis à jour : Avril 2013
Langue : English
Type : Audio
-
Discipline (wav version)
26 septembre 2011, par
Mis à jour : Avril 2013
Langue : English
Type : Audio
-
Letting you (wav version)
26 septembre 2011, par
Mis à jour : Avril 2013
Langue : English
Type : Audio
-
1 000 000 (wav version)
26 septembre 2011, par
Mis à jour : Avril 2013
Langue : English
Type : Audio
-
999 999 (wav version)
26 septembre 2011, par
Mis à jour : Avril 2013
Langue : English
Type : Audio
Autres articles (5)
-
Les autorisations surchargées par les plugins
27 avril 2010, parMediaspip core
autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs -
Gestion de la ferme
2 mars 2010, parLa ferme est gérée dans son ensemble par des "super admins".
Certains réglages peuvent être fais afin de réguler les besoins des différents canaux.
Dans un premier temps il utilise le plugin "Gestion de mutualisation" -
Les tâches Cron régulières de la ferme
1er décembre 2010, parLa gestion de la ferme passe par l’exécution à intervalle régulier de plusieurs tâches répétitives dites Cron.
Le super Cron (gestion_mutu_super_cron)
Cette tâche, planifiée chaque minute, a pour simple effet d’appeler le Cron de l’ensemble des instances de la mutualisation régulièrement. Couplée avec un Cron système sur le site central de la mutualisation, cela permet de simplement générer des visites régulières sur les différents sites et éviter que les tâches des sites peu visités soient trop (...)
Sur d’autres sites (2046)
-
Get total time/progress from multiple FFmpeg terminal commands
24 septembre 2018, par katarotyI have a class that executes multiple
FFmpeg
commands inAndroid
. The problem is that I have no idea how to get total time and then update the progress as the commands run.Since there are so many I am not even sure if it is possible.
So hence my question : Is it possible to get total time/progress or at least estimated time/progress and then update it onProgress.
And here is my class :
public class AudioProcessor {
private Context context;
private FFmpeg ffmpeg;
private AudioProcessorListener listener;
private File micPcmFile;
private File backgroundMp3File;
private File pcmtowavTempFile;
private File mp3towavTempFile;
private File combinedwavTempFile;
private File outputFile;
private File volumeChangedTempFile;
public AudioProcessor(Context context) {
ffmpeg = FFmpeg.getInstance(context);
this.context = context;
}
/**
* Program main method. Starts running program
* @throws Exception
*/
public void process() throws Exception {
if (!ffmpeg.isSupported()) {
Log.e("AudioProcessor", "FFMPEG not supported! Cannot convert audio!");
throw new Exception("FFMPeg has to be supported");
}
if (!checkIfAllFilesPresent()) {
Log.e("AudioProcessor", "All files are not set yet. Please set file first");
throw new Exception("Files are not set!");
}
listener.onStart();
prepare();
convertPCMToWav();
}
/**
* Prepares program
*/
private void prepare() {
prepareTempFiles();
}
/**
* Converts PCM to wav file. Automatically create new file.
*/
private void convertPCMToWav() {
System.out.println("AudioProcessor: Convert PCM TO Wav");
//ffmpeg -f s16le -ar 44.1k -ac 2 -i file.pcm file.wav
String[] cmd = { "-f" , "s16le", "-ar", "44.1k", "-i", micPcmFile.toString(), pcmtowavTempFile.toString()};
ffmpeg.execute(cmd, new ExecuteBinaryResponseHandler() {
@Override
public void onSuccess(String message) {
super.onSuccess(message);
convertMP3ToWav();
}
@Override
public void onFailure(String message) {
super.onFailure(message);
onError(message);
}
});
}
/**
* Converts mp3 file to wav file.
* Automatically creates Wav file
*/
private void convertMP3ToWav() {
//ffmpeg -i file.mp3 file.wav
String[] cmd = { "-i" , backgroundMp3File.toString(), mp3towavTempFile.toString() };
ffmpeg.execute(cmd, new ExecuteBinaryResponseHandler() {
@Override
public void onSuccess(String message) {
super.onSuccess(message);
changeMicAudio();
}
@Override
public void onFailure(String message) {
super.onFailure(message);
onError(message);
}
});
}
/**
* Combines 2 wav files into one wav file. Overlays audio
*/
private void combineWavs() {
//ffmpeg -i C:\Users\VR1\Desktop\_mp3.wav -i C:\Users\VR1\Desktop\_pcm.wav -filter_complex amix=inputs=2:duration=first:dropout_transition=3 C:\Users\VR1\Desktop\out.wav
String[] cmd = { "-i" , pcmtowavTempFile.toString(), "-i", volumeChangedTempFile.toString(), "-filter_complex", "amix=inputs=2:duration=first:dropout_transition=3", combinedwavTempFile.toString()};
ffmpeg.execute(cmd, new ExecuteBinaryResponseHandler() {
@Override
public void onSuccess(String message) {
super.onSuccess(message);
encodeWavToAAC();
}
@Override
public void onFailure(String message) {
super.onFailure(message);
onError(message);
}
});
}
private void changeMicAudio(){
//ffmpeg -i input.wav -filter:a "volume=1.5" output.wav
String[] cmdy = { "-i", mp3towavTempFile.toString(), "-af", "volume=0.9", volumeChangedTempFile.toString()};
ffmpeg.execute(cmdy, new ExecuteBinaryResponseHandler() {
@Override
public void onSuccess(String message) {
combineWavs();
super.onSuccess(message);
}
@Override
public void onFailure(String message) {
super.onFailure(message);
}
});
}
/**
* Do something on error. Releases program data (deletes files)
* @param message
*/
private void onError(String message) {
release();
if (listener != null) {
listener.onError(message);
}
}
/**
* Encode to AAC
*/
private void encodeWavToAAC() {
//ffmpeg -i file.wav -c:a aac -b:a 128k -f adts output.m4a
String[] cmd = { "-i" , combinedwavTempFile.toString(), "-c:a", "aac", "-b:a", "128k", "-f", "adts", outputFile.toString()};
ffmpeg.execute(cmd, new ExecuteBinaryResponseHandler() {
@Override
public void onSuccess(String message) {
super.onSuccess(message);
if (listener != null) {
listener.onSuccess(outputFile);
}
release();
}
@Override
public void onFailure(String message) {
super.onFailure(message);
onError(message);
}
});
}
/**
* Uninitializes class
*/
private void release() {
if (listener != null) {
listener.onFinish();
}
destroyTempFiles();
}
/**
* Prepares temp required files by deleteing them if they exsist.
* Files cannot exists before ffmpeg actions. FFMpeg automatically creates those files.
*/
private void prepareTempFiles() {
pcmtowavTempFile = new File(context.getFilesDir()+ Common.TEMP_LOCAL_DIR + "/" + "_pcm.wav");
mp3towavTempFile = new File(context.getFilesDir()+ Common.TEMP_LOCAL_DIR + "/" + "_mp3.wav");
combinedwavTempFile = new File(context.getFilesDir()+ Common.TEMP_LOCAL_DIR + "/" + "_combined.wav");
volumeChangedTempFile = new File(context.getFilesDir()+ Common.TEMP_LOCAL_DIR + "/" + "_volumeChanged.wav");
if (pcmtowavTempFile.exists()) {
destroyTempFiles();
}
}
/**
* Destroys temp required files
*/
private void destroyTempFiles() {
pcmtowavTempFile.delete();
mp3towavTempFile.delete();
combinedwavTempFile.delete();
volumeChangedTempFile.delete();
}
/**
* Checks if all files are set, so we can process them
* @return - all files ready
*/
private boolean checkIfAllFilesPresent() {
if(micPcmFile == null || backgroundMp3File == null || outputFile == null) {
Log.e("AudioProcessor", "All files are not set! Set all files!");
return false;
}
return true;
}
public void setOutputFile(File outputFile) {
this.outputFile = outputFile;
}
public void setListener(AudioProcessorListener listener) {
this.listener = listener;
}
public void setMicPcmFile(File micPcmFile) {
this.micPcmFile = micPcmFile;
}
public void setBackgroundMp3File(File backgroundMp3File) {
this.backgroundMp3File = backgroundMp3File;
}
public interface AudioProcessorListener {
void onStart();
void onSuccess(File output);
void onError(String message);
void onFinish();
}
}Since the whole thing is taking a long time, if someone can recommend something that would take less time, then it would also help. A 2 minute video usually takes like 50s.
-
Raspberry Pi and FFMpeg live streaming video to backend Node.js server, but how do I deliver it to the front end ?
2 août 2023, par qwet142I'm attempting this setup to live stream video from a Raspberry Pi to a publicly available website, and from the backend server using Node.js and Express, I would like to serve it to the front end with minimal latency available to many viewers (will be hosted somewhere like Netlify to handle distribution).


WebRTC is too complex for my use case and timeframe. HLC appears to have too high a latency as I need <4s consistently. I would like to something compatible with Node.js. I am a beginner in this domain, familiar only with general programming and web development.


-
Problems decoding a gstreamer pipeline into images using node.js
18 septembre 2023, par JK2018I have this Gstreamer pipeline that launches a test video , encodes it in h264 and sends it via udp on localhost 5000.


gst-launch-1.0 -v videotestsrc ! videoconvert ! x264enc tune=zerolatency ! h264parse ! queue ! rtph264pay ! udpsink host=127.0.0.1 port=5000



I run this pipeline in my terminal.


Then I have a minimalistic node.js server that is suposed to receive the udp stream coming from the gstreamer pipeline, decode the video into images.
Finally I emit each image (not a fragment of an image) over a socket.


I have tried several approaches unsuccessfully.
My first attempt was using a node gstreamer library and use gstreamer to decode the udp and re encode as images.


My second attempt was using ffmpeg library.
Here is the code below :


const express = require("express");
const http = require("http");
const { Server } = require("socket.io");
const socketIO = require("socket.io");
const cors = require("cors");
const app = express();
const server = http.createServer(app);
const dgram = require("dgram");
const ffmpeg = require("fluent-ffmpeg");

const io = socketIO(server, {
 cors: {
 origin: "http://localhost:3001",
 methods: ["GET", "POST"],
 },
});

app.use(
 cors({
 origin: "http://localhost:3001",
 methods: ["GET", "POST"],
 })
);

const udpServer = dgram.createSocket("udp4");

io.on("connection", (socket) => {
 console.log("A client connected");

 socket.on("disconnect", () => {
 console.log("A client disconnected");
 });
});

// Function to decode an H.264 video frame
function decodeH264Video(inputData, callback) {
 const command = ffmpeg()
 .input(inputData)
 .inputFormat("h264")
 .inputOptions(["-c:v h264"])
 .toFormat("image2")
 .on("end", () => {
 console.log("Decoding complete");
 })
 .on("error", (err) => {
 console.error("Error decoding video:", err);
 })
 .pipe();

 callback(command);
}

// Function to convert a decoded video frame to an image (JPEG format)
function convertVideoFrameToImage(decodedData, callback) {
 const imageStream = decodedData.pipe();
 const imageBuffer = [];

 imageStream.on("data", (chunk) => {
 imageBuffer.push(chunk);
 });

 imageStream.on("end", () => {
 const imageData = Buffer.concat(imageBuffer);
 callback(imageData);
 });
}

udpServer.on("message", (message) => {
 // Decode the UDP packet containing H.264 encoded video
 decodeH264Video(message, (decodedVideo) => {
 // Process the decoded video frame and convert it to an image
 convertVideoFrameToImage(decodedVideo, (imageData) => {
 // Send the image data to connected clients
 io.sockets.emit("image", { imageData: imageData.toString("base64") });
 });
 });
});

udpServer.bind(5000);

server.listen(3000, () => {
 console.log("Server is running on port 3000");
});



Any help is more than welcome