
Recherche avancée
Médias (1)
-
Video d’abeille en portrait
14 mai 2011, par
Mis à jour : Février 2012
Langue : français
Type : Video
Autres articles (88)
-
List of compatible distributions
26 avril 2011, parThe table below is the list of Linux distributions compatible with the automated installation script of MediaSPIP. Distribution nameVersion nameVersion number Debian Squeeze 6.x.x Debian Weezy 7.x.x Debian Jessie 8.x.x Ubuntu The Precise Pangolin 12.04 LTS Ubuntu The Trusty Tahr 14.04
If you want to help us improve this list, you can provide us access to a machine whose distribution is not mentioned above or send the necessary fixes to add (...) -
Participer à sa traduction
10 avril 2011Vous pouvez nous aider à améliorer les locutions utilisées dans le logiciel ou à traduire celui-ci dans n’importe qu’elle nouvelle langue permettant sa diffusion à de nouvelles communautés linguistiques.
Pour ce faire, on utilise l’interface de traduction de SPIP où l’ensemble des modules de langue de MediaSPIP sont à disposition. ll vous suffit de vous inscrire sur la liste de discussion des traducteurs pour demander plus d’informations.
Actuellement MediaSPIP n’est disponible qu’en français et (...) -
Gestion des droits de création et d’édition des objets
8 février 2011, parPar défaut, beaucoup de fonctionnalités sont limitées aux administrateurs mais restent configurables indépendamment pour modifier leur statut minimal d’utilisation notamment : la rédaction de contenus sur le site modifiables dans la gestion des templates de formulaires ; l’ajout de notes aux articles ; l’ajout de légendes et d’annotations sur les images ;
Sur d’autres sites (12059)
-
librosa can't load wav file in aws lambda docker
30 novembre 2022, par Luka SavicI have an AWS Lambda function created using Docker.
I have librosa installed, ffmpeg installed using the solution from this question : install ffmpeg on amazon ecr linux python


I checked in a Lambda function with
os.system("ffmpeg -version")
and I managed to get valid output, stating different versions and parts of ffmpeg.

Problem is that when I do
librosa.load(wav_file)
it gives the following error :

/your/path/.venv/lib/python3.9/site-packages/librosa/util/decorators.py:88: UserWarning: PySoundFile failed. Trying audioread instead.
 return f(*args, **kwargs) 



From what I've read, librosa should natively support .wav files, even without ffmpeg, and even though I have ffmpeg installed, it doesn't work.


One more information, .wav file was downloaded, player, and loaded with librosa on my local PC without any problems. I tried also on different wav and mp3 files, and the problems were still there.


-
AWS Lambda in Node JS with FFMPEG Lambda Layer
29 mars 2023, par mwcwge23I'm trying to make a Lambda that takes a video and puts a watermark image on it.
I'm using Lambda with NodeJS and FFMPEG Lambda Layer I took from here :
https://serverlessrepo.aws.amazon.com/applications/us-east-1/145266761615/ffmpeg-lambda-layer


I got these two errors and I don't have a clue what do I did wrong :
errors


Please help me :)


(by the way, if you have an easier solution to put a watermark image on video that'll also be great)


That's my code (trying to put a watermark image on a video file) :


const express = require("express");
const childProcess = require("child_process");
const path = require("path");
const fs = require("fs");
const util = require("util");
const os = require("os");
const { fileURLToPath } = require("url");
const { v4: uuidv4 } = require("uuid");
const bodyParser = require("body-parser");
const awsServerlessExpressMiddleware = require("aws-serverless-express/middleware");
const AWS = require("aws-sdk");
const workdir = os.tmpdir();

const s3 = new AWS.S3();

// declare a new express app
const app = express();
app.use(bodyParser.json());
app.use(awsServerlessExpressMiddleware.eventContext());

// Enable CORS for all methods
app.use(function (req, res, next) {
 res.header("Access-Control-Allow-Origin", "*");
 res.header("Access-Control-Allow-Headers", "*");
 next();
});

const downloadFileFromS3 = function (bucket, fileKey, filePath) {
 "use strict";
 console.log("downloading", bucket, fileKey, filePath);
 return new Promise(function (resolve, reject) {
 const file = fs.createWriteStream(filePath),
 stream = s3
 .getObject({
 Bucket: bucket,
 Key: fileKey,
 })
 .createReadStream();
 stream.on("error", reject);
 file.on("error", reject);
 file.on("finish", function () {
 console.log("downloaded", bucket, fileKey);
 resolve(filePath);
 });
 stream.pipe(file);
 });
};

const uploadFileToS3 = function (bucket, fileKey, filePath, contentType) {
 "use strict";
 console.log("uploading", bucket, fileKey, filePath);
 return s3
 .upload({
 Bucket: bucket,
 Key: fileKey,
 Body: fs.createReadStream(filePath),
 ACL: "private",
 ContentType: contentType,
 })
 .promise();
};

const spawnPromise = function (command, argsarray, envOptions) {
 return new Promise((resolve, reject) => {
 console.log("executing", command, argsarray.join(" "));
 const childProc = childProcess.spawn(
 command,
 argsarray,
 envOptions || { env: process.env, cwd: process.cwd() }
 ),
 resultBuffers = [];
 childProc.stdout.on("data", (buffer) => {
 console.log(buffer.toString());
 resultBuffers.push(buffer);
 });
 childProc.stderr.on("data", (buffer) => console.error(buffer.toString()));
 childProc.on("exit", (code, signal) => {
 console.log(`${command} completed with ${code}:${signal}`);
 if (code || signal) {
 reject(`${command} failed with ${code || signal}`);
 } else {
 resolve(Buffer.concat(resultBuffers).toString().trim());
 }
 });
 });
};

app.post("/api/addWatermark", async (req, res) => {
 try {
 const bucketName = "bucketName ";
 const uniqeName = uuidv4() + Date.now();
 const outputPath = path.join(workdir, uniqeName + ".mp4");
 const key = "file_example_MP4_480_1_5MG.mp4";
 const localFilePath = path.join(workdir, key);
 const watermarkPngKey = "watermark.png";
 const watermarkLocalFilePath = path.join(workdir, watermarkPngKey);

 downloadFileFromS3(bucketName, key, localFilePath)
 .then(() => {
 downloadFileFromS3(bucketName, watermarkPngKey, watermarkLocalFilePath)
 .then(() => {
 fs.readFile(localFilePath, (err, data) => {
 if (!err && data) {
 console.log("successsss111");
 }
 });
 fs.readFile(watermarkLocalFilePath, (err, data) => {
 if (!err && data) {
 console.log("successsss222");
 }
 });

 fs.readFile(outputPath, (err, data) => {
 if (!err && data) {
 console.log("successsss3333");
 }
 });

 spawnPromise(
 "/opt/bin/ffmpeg",
 [
 "-i",
 localFilePath,
 "-i",
 watermarkLocalFilePath,
 "-filter_complex",
 `[1]format=rgba,colorchannelmixer=aa=0.5[logo];[0][logo]overlay=5:H-h-5:format=auto,format=yuv420p`,
 "-c:a",
 "copy",
 outputPath,
 ],
 { env: process.env, cwd: workdir }
 )
 .then(() => {
 uploadFileToS3(
 bucketName,
 uniqeName + ".mp4",
 outputPath,
 "mp4"
 );
 });
 });
 });
 } catch (err) {
 console.log({ err });
 res.json({ err });
 }
});

app.listen(8136, function () {
 console.log("App started");
});

module.exports = app;




-
Encoding audio_common messages to OPUS
14 juin 2023, par djangbahevans

I am trying to stream microphone and camera data to Amazon KVS WebRTC. I'm able to make video work using this package (adapted for noetic) however I am struggling to make audio work. I'm using the audio_capture package to get mp3 frames. I'm trying to convert this to OPUS frames before streaming to KVS, but I'm unsure how to do this. I wrote this bit of code based on the small resources I can find on using ffmpeg, but it's not working.
avcodec_fill_audio_frame
is returning -22.

#include "opus_encoder.h"

OPUSEncoder::OPUSEncoder() {
 av_register_all();
 codecContext == nullptr;
}

OPUSEncoder::~OPUSEncoder() {
 if (codecContext != nullptr) {
 avcodec_free_context(&codecContext);
 }
}

int OPUSEncoder::Initialize(int Fs, int channels) {
 AVCodec *codec = avcodec_find_encoder(AV_CODEC_ID_OPUS);
 if (!codec) {
 printf("Codec not found\n");
 return -1;
 }

 codecContext = avcodec_alloc_context3(codec);
 if (!codecContext) {
 printf("Could not allocate audio codec context\n");
 return -1;
 }

 codecContext->sample_fmt = AV_SAMPLE_FMT_S16;
 codecContext->bit_rate = 128000;
 codecContext->sample_rate = Fs;
 codecContext->channel_layout = av_get_default_channel_layout(channels);
 codecContext->channels = channels;

 if (avcodec_open2(codecContext, codec, nullptr) < 0) {
 printf("Could not open codec\n");
 return -1;
 }

 return 0;
}

int OPUSEncoder::Encode(const uint8_t *audio_data, int frameSize,
 uint8_t *out) {
 AVPacket pkt;
 av_init_packet(&pkt);
 pkt.data = nullptr;
 pkt.size = 0;

 AVFrame *frame = av_frame_alloc();
 frame->nb_samples = frameSize;
 frame->format = codecContext->sample_fmt;
 frame->channel_layout = codecContext->channel_layout;

 int ret = avcodec_fill_audio_frame(frame, codecContext->channels,
 codecContext->sample_fmt, audio_data,
 frameSize * 2, 0);
 if (ret < 0) {
 printf("Error filling audio frame: %d\n", ret);
 return -1;
 }

 ret = avcodec_send_frame(codecContext, frame);
 if (ret < 0) {
 printf("Error sending the frame to the encoder\n");
 return -1;
 }

 while (ret >= 0) {
 ret = avcodec_receive_packet(codecContext, &pkt);
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
 return 0;
 } else if (ret < 0) {
 printf("Error encoding audio frame\n");
 return -1;
 }

 memcpy(out, pkt.data, pkt.size);
 out += pkt.size;
 av_packet_unref(&pkt);
 }

 av_frame_free(&frame);

 return 0;
}