
Recherche avancée
Médias (17)
-
Matmos - Action at a Distance
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
DJ Dolores - Oslodum 2004 (includes (cc) sample of “Oslodum” by Gilberto Gil)
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
Danger Mouse & Jemini - What U Sittin’ On ? (starring Cee Lo and Tha Alkaholiks)
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
Cornelius - Wataridori 2
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
The Rapture - Sister Saviour (Blackstrobe Remix)
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
Chuck D with Fine Arts Militia - No Meaning No
15 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
Autres articles (100)
-
ANNEXE : Les plugins utilisés spécifiquement pour la ferme
5 mars 2010, parLe site central/maître de la ferme a besoin d’utiliser plusieurs plugins supplémentaires vis à vis des canaux pour son bon fonctionnement. le plugin Gestion de la mutualisation ; le plugin inscription3 pour gérer les inscriptions et les demandes de création d’instance de mutualisation dès l’inscription des utilisateurs ; le plugin verifier qui fournit une API de vérification des champs (utilisé par inscription3) ; le plugin champs extras v2 nécessité par inscription3 (...)
-
Problèmes fréquents
10 mars 2010, parPHP et safe_mode activé
Une des principales sources de problèmes relève de la configuration de PHP et notamment de l’activation du safe_mode
La solution consiterait à soit désactiver le safe_mode soit placer le script dans un répertoire accessible par apache pour le site -
Support de tous types de médias
10 avril 2011Contrairement à beaucoup de logiciels et autres plate-formes modernes de partage de documents, MediaSPIP a l’ambition de gérer un maximum de formats de documents différents qu’ils soient de type : images (png, gif, jpg, bmp et autres...) ; audio (MP3, Ogg, Wav et autres...) ; vidéo (Avi, MP4, Ogv, mpg, mov, wmv et autres...) ; contenu textuel, code ou autres (open office, microsoft office (tableur, présentation), web (html, css), LaTeX, Google Earth) (...)
Sur d’autres sites (11057)
-
Are there any alternatives to SharedArrayBuffer, or methods for video editing in a web browser ?
26 juillet 2023, par Govinda RegmiI'm working on a web-based video editing application using ffmeg that heavily relies on SharedArrayBuffer. Unfortunately, I've encountered a roadblock with the "Cross-Origin-Embedder-Policy : require-corp | credentialless" and "Cross-Origin-Opener-Policy : same-origin" headers. While these headers allow the usage of SharedArrayBuffer, they restrict other essential features, such as rendering images from an s3 bucket and script of TinyMce text editor.


I am trying to achieve
video editor like this


I am using "next" : "12.1.6" and
I tried to implement ffmeg like this :


import { useEffect, useState } from "react";

import { useDebounce } from "use-debounce";
import { createFFmpeg, fetchFile } from "@ffmpeg/ffmpeg";

import styles from "../videoEditor.module.scss";
import RangeInput from "../range-input/RangeInput";
import * as helpers from "../../../../utils/videoHelpers";

const FF = createFFmpeg({
 log: true,
 corePath: "https://unpkg.com/@ffmpeg/core@0.10.0/dist/ffmpeg-core.js",
});

(async function () {
 await FF.load();
})();

export const VideoTrimmer = ({
 videoFile,
 trimmedVideoFile,
 isConfirmClicked,
 setTrimmedVideoFile,
 onConfirmClickHandler,
}) => {
 const [URL, setURL] = useState([]);
 const [thumbNails, setThumbNails] = useState([]);
 const [videoMeta, setVideoMeta] = useState(null);
 const [inputVideoFile, setInputVideoFile] = useState(null);
 const [thumbnailIsProcessing, setThumbnailIsProcessing] = useState(false);

 const [rStart, setRstart] = useState(0);
 const [debouncedRstart] = useDebounce(rStart, 500);

 const [rEnd, setRend] = useState(10);
 const [debouncedRend] = useDebounce(rEnd, 500);

 const handleLoadedData = async (e) => {
 const el = e.target;
 const meta = {
 name: inputVideoFile.name,
 duration: el.duration,
 videoWidth: 50,
 videoHeight: 50,
 };
 setVideoMeta(meta);
 const thumbNails = await getThumbnails(meta);
 setThumbNails(thumbNails);
 };

 const getThumbnails = async ({ duration }) => {
 if (!FF.isLoaded()) await FF.load();
 setThumbnailIsProcessing(true);
 let MAX_NUMBER_OF_IMAGES = 15;
 let NUMBER_OF_IMAGES = duration < MAX_NUMBER_OF_IMAGES ? duration : 15;
 let offset =
 duration === MAX_NUMBER_OF_IMAGES ? 1 : duration / NUMBER_OF_IMAGES;

 const arrayOfImageURIs = [];
 FF.FS("writeFile", inputVideoFile.name, await fetchFile(inputVideoFile));

 for (let i = 0; i < NUMBER_OF_IMAGES; i++) {
 let startTimeInSecs = helpers.toTimeString(Math.round(i * offset));

 try {
 await FF.run(
 "-ss",
 startTimeInSecs,
 "-i",
 inputVideoFile.name,
 "-t",
 "00:00:1.000",
 "-vf",
 `scale=150:-1`,
 `img${i}.png`,
 );
 const data = FF.FS("readFile", `img${i}.png`);

 let blob = new Blob([data.buffer], { type: "image/png" });
 let dataURI = await helpers.readFileAsBase64(blob);
 FF.FS("unlink", `img${i}.png`);
 arrayOfImageURIs.push(dataURI);
 } catch (error) {
 // console.log({ message: error });
 }
 }
 setThumbnailIsProcessing(false);

 return arrayOfImageURIs;
 };
 const handleTrim = async () => {
 // setTrimIsProcessing(true);
 let startTime = ((rStart / 100) * videoMeta.duration).toFixed(2);
 let offset = ((rEnd / 100) * videoMeta.duration - startTime).toFixed(2);
 try {
 FF.FS("writeFile", inputVideoFile.name, await fetchFile(inputVideoFile));
 await FF.run(
 "-ss",
 helpers.toTimeString(startTime),
 "-i",
 inputVideoFile.name,
 "-t",
 helpers.toTimeString(offset),
 "-c",
 "copy",
 "ping.mp4",
 );
 const data = FF.FS("readFile", "ping.mp4");
 const dataURL = await helpers.readFileAsBase64(
 new Blob([data.buffer], { type: "video/mp4" }),
 );

 setTrimmedVideoFile(dataURL);
 } catch (error) {
 // console.log(error);
 } finally {
 // setTrimIsProcessing(false);
 }
 };

 const handleRangeChange = (type, event) => {
 const limit = parseInt((120 / videoMeta.duration) * 100);
 if (type === "start") {
 if (rEnd - rStart > limit) {
 setRend(parseInt(event.target.value) + limit);
 setRstart(parseInt(event.target.value));
 } else {
 setRstart(parseInt(event.target.value));
 }
 } else if (type === "end") {
 if (rEnd - rStart > limit) {
 setRstart(parseInt(event.target.value) - limit);
 setRend(parseInt(event.target.value));
 } else {
 setRend(parseInt(event.target.value));
 }
 }
 };

 useEffect(() => {
 if (videoMeta?.duration > 120) {
 const limit = parseInt((120 / videoMeta.duration) * 100);
 setRend(limit);
 }
 }, [videoMeta?.duration]);

 useEffect(() => {
 const videoFormData = new FormData();
 if (videoFile) {
 videoFormData.append("file", videoFile);
 const handleChange = async () => {
 setInputVideoFile(videoFile);
 setURL(await helpers.readFileAsBase64(videoFile));
 };
 handleChange();
 }
 }, []);

 useEffect(() => {
 if (videoMeta) {
 onConfirmClickHandler(handleTrim);
 }
 }, [isConfirmClicked]);

 useEffect(() => {
 if (debouncedRend == rEnd && debouncedRstart == rStart && videoMeta) {
 handleTrim();
 }
 }, [debouncedRend, debouncedRstart, videoMeta]);

 return (
 <>
 <article classname="grid_txt_2">
 
 {trimmedVideoFile ? (
 
 ) : (
 
 )}
 
 </article>
 
 >
 );
};



next.config.js


const nextConfig = {
 async headers() {
 return [
 {
 source: "/(.*)",
 headers: [
 { key: "Cross-Origin-Opener-Policy", value: "same-origin" },
 { key: "Cross-Origin-Embedder-Policy", value: "credentialless" },
 ],
 },
 ];
 },
 
};



This works seamlessly in Chrome and Edge, but it encounter issues (SharedArrayBuffer is not defined) in Firefox and Safari. How can we ensure it functions impeccably across all major browsers ?


When utilizing key : "Cross-Origin-Embedder-Policy", value : "require-corp" , I encounter an error while fetching images/scripts from cross-origin sources, resulting in "net::ERR_BLOCKED_BY_RESPONSE.NotSameOriginAfterDefaultedToSameOriginByCoep 200 (OK)". Cany you suggest me how can I resolve this issue ?


-
Converting a voice recording into an mp3
21 juillet 2023, par Raphael MFor a vue.js messaging project, I'm using the wavesurfer.js library to record voice messages. However Google chrome gives me an audio/webm blob and Safari gives me an audio/mp4 blob.


I'm trying to find a solution to transcode the blob into audio/mp3. I've tried several methods, including ffmpeg. However, ffmpeg gives me an error when compiling "npm run dev" : "Can't resolve '/node_modules/@ffmpeg/core/dist/ffmpeg-core.js'".


"@ffmpeg/core": "^0.11.0",
"@ffmpeg/ffmpeg": "^0.11.6"



I tried to downgrade ffmpeg


"@ffmpeg/core": "^0.9.0",
"@ffmpeg/ffmpeg": "^0.9.8"



I no longer get the error message when compiling, but when I want to convert my audio stream, the console displays a problem with SharedBuffer : "Uncaught (in promise) ReferenceError : SharedArrayBuffer is not defined".


Here's my complete code below.
Is there a reliable way of transcoding the audio stream into mp3 ?


Can you give me an example ?


Thanks


<template>
 <div class="left-panel">
 <header class="radial-blue">
 <div class="container">
 <h1 class="mb-30">Posez votre première question à nos thérapeutes</h1>
 <p><b>Attention</b>, vous disposez seulement de 2 messages. Veillez à les utiliser de manière judicieuse !</p>
 <div class="available-messages">
 <div class="item disabled">
 <span>Message 1</span>
 </div>
 <div class="item">
 <span>Message 2</span>
 </div>
 </div>
 </div>
 </header>
 </div>
 <div class="right-panel">
 <div class="messagerie bg-light">
 <messaging ref="messagingComponent"></messaging>
 <footer>
 <button type="button"><img src="http://stackoverflow.com/assets/backoffice/images/record-start.svg" style='max-width: 300px; max-height: 300px' /></button>
 <div class="loading-animation">
 <img src="http://stackoverflow.com/assets/backoffice/images/record-loading.svg" style='max-width: 300px; max-height: 300px' />
 </div>
 <button type="button"><img src="http://stackoverflow.com/assets/backoffice/images/record-stop.svg" style='max-width: 300px; max-height: 300px' /></button>
 <div class="textarea gradient text-dark">
 <textarea placeholder="Posez votre question"></textarea>
 </div>
 <div class="loading-text">Chargement de votre microphone en cours...</div>
 <div class="loading-text">Envoi de votre message en cours...</div>
 <div ref="visualizer"></div>
 <button type="button"><img src="http://stackoverflow.com/assets/backoffice/images/send.svg" style='max-width: 300px; max-height: 300px' /></button>
 <div>
 {{ formatTimer() }}
 </div>
 </footer>
 </div>
 </div>
</template>

<code class="echappe-js"><script>&#xA;import Messaging from "./Messaging.vue";&#xA;import { createFFmpeg, fetchFile } from &#x27;@ffmpeg/ffmpeg&#x27;;&#xA;&#xA;export default {&#xA; data() {&#xA; return {&#xA; isMicrophoneLoading: false,&#xA; isSubmitLoading: false,&#xA; isMobile: false,&#xA; isMessagerie: false,&#xA; isRecording: false,&#xA; audioUrl: &#x27;&#x27;,&#xA; messageText: &#x27;&#x27;,&#xA; message:null,&#xA; wavesurfer: null,&#xA; access:(this.isMobile?&#x27;denied&#x27;:&#x27;granted&#x27;),&#xA; maxMinutes: 5,&#xA; orangeTimer: 3,&#xA; redTimer: 4,&#xA; timer: 0,&#xA; timerInterval: null,&#xA; ffmpeg: null,&#xA; };&#xA; },&#xA; components: {&#xA; Messaging,&#xA; },&#xA; mounted() {&#xA; this.checkScreenSize();&#xA; window.addEventListener(&#x27;resize&#x27;, this.checkScreenSize);&#xA;&#xA; if(!this.isMobile)&#xA; {&#xA; this.$moment.locale(&#x27;fr&#x27;);&#xA; window.addEventListener(&#x27;beforeunload&#x27;, (event) => {&#xA; if (this.isMessagerie) {&#xA; event.preventDefault();&#xA; event.returnValue = &#x27;&#x27;;&#xA; }&#xA; });&#xA;&#xA; this.initializeWaveSurfer();&#xA; }&#xA; },&#xA; beforeUnmount() {&#xA; window.removeEventListener(&#x27;resize&#x27;, this.checkScreenSize);&#xA; },&#xA; methods: {&#xA; checkScreenSize() {&#xA; this.isMobile = window.innerWidth < 1200;&#xA;&#xA; const windowHeight = window.innerHeight;&#xA; const navbarHeight = this.$navbarHeight;&#xA; let padding = parseInt(navbarHeight &#x2B;181);&#xA;&#xA; const messageListHeight = windowHeight - padding;&#xA; this.$refs.messagingComponent.$refs.messageList.style.height = messageListHeight &#x2B; &#x27;px&#x27;;&#xA; },&#xA; showMessagerie() {&#xA; this.isMessagerie = true;&#xA; this.$refs.messagingComponent.scrollToBottom();&#xA; },&#xA; checkMicrophoneAccess() {&#xA; if (navigator.mediaDevices &amp;&amp; navigator.mediaDevices.getUserMedia) {&#xA;&#xA; return navigator.mediaDevices.getUserMedia({audio: true})&#xA; .then(function (stream) {&#xA; stream.getTracks().forEach(function (track) {&#xA; track.stop();&#xA; });&#xA; return true;&#xA; })&#xA; .catch(function (error) {&#xA; console.error(&#x27;Erreur lors de la demande d\&#x27;acc&#xE8;s au microphone:&#x27;, error);&#xA; return false;&#xA; });&#xA; } else {&#xA; console.error(&#x27;getUserMedia n\&#x27;est pas support&#xE9; par votre navigateur.&#x27;);&#xA; return false;&#xA; }&#xA; },&#xA; initializeWaveSurfer() {&#xA; this.wavesurfer = this.$wavesurfer.create({&#xA; container: &#x27;#visualizer&#x27;,&#xA; barWidth: 3,&#xA; barHeight: 1.5,&#xA; height: 46,&#xA; responsive: true,&#xA; waveColor: &#x27;rgba(108,115,202,0.3)&#x27;,&#xA; progressColor: &#x27;rgba(108,115,202,1)&#x27;,&#xA; cursorColor: &#x27;transparent&#x27;&#xA; });&#xA;&#xA; this.record = this.wavesurfer.registerPlugin(this.$recordPlugin.create());&#xA; },&#xA; startRecording() {&#xA; const _this = this;&#xA; this.isMicrophoneLoading = true;&#xA;&#xA; setTimeout(() =>&#xA; {&#xA; _this.checkMicrophoneAccess().then(function (accessible)&#xA; {&#xA; if (accessible) {&#xA; _this.record.startRecording();&#xA;&#xA; _this.record.once(&#x27;startRecording&#x27;, () => {&#xA; _this.isMicrophoneLoading = false;&#xA; _this.isRecording = true;&#xA; _this.updateChildMessage( &#x27;server&#x27;, &#x27;Allez-y ! Vous pouvez enregistrer votre message audio maintenant. La dur&#xE9;e maximale autoris&#xE9;e pour votre enregistrement est de 5 minutes.&#x27;, &#x27;text&#x27;, &#x27;&#x27;, &#x27;Message automatique&#x27;);&#xA; _this.startTimer();&#xA; });&#xA; } else {&#xA; _this.isRecording = false;&#xA; _this.isMicrophoneLoading = false;&#xA; _this.$swal.fire({&#xA; title: &#x27;Microphone non d&#xE9;tect&#xE9;&#x27;,&#xA; html: &#x27;<p>Le microphone de votre appareil est inaccessible ou l\&#x27;acc&#xE8;s a &#xE9;t&#xE9; refus&#xE9;.</p><p>Merci de v&#xE9;rifier les param&#xE8;tres de votre navigateur afin de v&#xE9;rifier les autorisations de votre microphone.</p>&#x27;,&#xA; footer: &#x27;<a href='http://stackoverflow.com/contact'>Vous avez besoin d\&#x27;aide ?</a>&#x27;,&#xA; });&#xA; }&#xA; });&#xA; }, 100);&#xA; },&#xA; stopRecording() {&#xA; this.stopTimer();&#xA; this.isRecording = false;&#xA; this.isSubmitLoading = true;&#xA; this.record.stopRecording();&#xA;&#xA; this.record.once(&#x27;stopRecording&#x27;, () => {&#xA; const blobUrl = this.record.getRecordedUrl();&#xA; fetch(blobUrl).then(response => response.blob()).then(blob => {&#xA; this.uploadAudio(blob);&#xA; });&#xA; });&#xA; },&#xA; startTimer() {&#xA; this.timerInterval = setInterval(() => {&#xA; this.timer&#x2B;&#x2B;;&#xA; if (this.timer === this.maxMinutes * 60) {&#xA; this.stopRecording();&#xA; }&#xA; }, 1000);&#xA; },&#xA; stopTimer() {&#xA; clearInterval(this.timerInterval);&#xA; this.timer = 0;&#xA; },&#xA; formatTimer() {&#xA; const minutes = Math.floor(this.timer / 60);&#xA; const seconds = this.timer % 60;&#xA; const formattedMinutes = minutes < 10 ? `0${minutes}` : minutes;&#xA; const formattedSeconds = seconds < 10 ? `0${seconds}` : seconds;&#xA; return `${formattedMinutes}:${formattedSeconds}`;&#xA; },&#xA; async uploadAudio(blob)&#xA; {&#xA; const format = blob.type === &#x27;audio/webm&#x27; ? &#x27;webm&#x27; : &#x27;mp4&#x27;;&#xA;&#xA; // Convert the blob to MP3&#xA; const mp3Blob = await this.convertToMp3(blob, format);&#xA;&#xA; const s3 = new this.$AWS.S3({&#xA; accessKeyId: &#x27;xxx&#x27;,&#xA; secretAccessKey: &#x27;xxx&#x27;,&#xA; region: &#x27;eu-west-1&#x27;&#xA; });&#xA;&#xA; var currentDate = new Date();&#xA; var filename = currentDate.getDate().toString() &#x2B; &#x27;-&#x27; &#x2B; currentDate.getMonth().toString() &#x2B; &#x27;-&#x27; &#x2B; currentDate.getFullYear().toString() &#x2B; &#x27;--&#x27; &#x2B; currentDate.getHours().toString() &#x2B; &#x27;-&#x27; &#x2B; currentDate.getMinutes().toString() &#x2B; &#x27;.mp4&#x27;;&#xA;&#xA; const params = {&#xA; Bucket: &#x27;xxx/audio&#x27;,&#xA; Key: filename,&#xA; Body: mp3Blob,&#xA; ACL: &#x27;public-read&#x27;,&#xA; ContentType: &#x27;audio/mp3&#x27;&#xA; }&#xA;&#xA; s3.upload(params, (err, data) => {&#xA; if (err) {&#xA; console.error(&#x27;Error uploading audio:&#x27;, err)&#xA; } else {&#xA; const currentDate = this.$moment();&#xA; const timestamp = currentDate.format(&#x27;dddd DD MMMM YYYY HH:mm&#x27;);&#xA;&#xA; this.updateChildMessage( &#x27;client&#x27;, &#x27;&#x27;, &#x27;audio&#x27;, mp3Blob, timestamp);&#xA; this.isSubmitLoading = false;&#xA; }&#xA; });&#xA; },&#xA; async convertToMp3(blob, format) {&#xA; const ffmpeg = createFFmpeg({ log: true });&#xA; await ffmpeg.load();&#xA;&#xA; const inputPath = &#x27;input.&#x27; &#x2B; format;&#xA; const outputPath = &#x27;output.mp3&#x27;;&#xA;&#xA; ffmpeg.FS(&#x27;writeFile&#x27;, inputPath, await fetchFile(blob));&#xA;&#xA; await ffmpeg.run(&#x27;-i&#x27;, inputPath, &#x27;-acodec&#x27;, &#x27;libmp3lame&#x27;, outputPath);&#xA;&#xA; const mp3Data = ffmpeg.FS(&#x27;readFile&#x27;, outputPath);&#xA; const mp3Blob = new Blob([mp3Data.buffer], { type: &#x27;audio/mp3&#x27; });&#xA;&#xA; ffmpeg.FS(&#x27;unlink&#x27;, inputPath);&#xA; ffmpeg.FS(&#x27;unlink&#x27;, outputPath);&#xA;&#xA; return mp3Blob;&#xA; },&#xA; sendMessage() {&#xA; this.isSubmitLoading = true;&#xA; if (this.messageText.trim() !== &#x27;&#x27;) {&#xA; const emmet = &#x27;client&#x27;;&#xA; const text = this.escapeHTML(this.messageText)&#xA; .replace(/\n/g, &#x27;<br>&#x27;);&#xA;&#xA; const currentDate = this.$moment();&#xA; const timestamp = currentDate.format(&#x27;dddd DD MMMM YYYY HH:mm&#x27;);&#xA;&#xA; this.$nextTick(() => {&#xA; this.messageText = &#x27;&#x27;;&#xA;&#xA; const textarea = document.getElementById(&#x27;messageTextarea&#x27;);&#xA; if (textarea) {&#xA; textarea.scrollTop = 0;&#xA; textarea.scrollLeft = 0;&#xA; }&#xA; });&#xA;&#xA; this.updateChildMessage(emmet, text, &#x27;text&#x27;, &#x27;&#x27;, timestamp);&#xA; this.isSubmitLoading = false;&#xA; }&#xA; },&#xA; escapeHTML(text) {&#xA; const map = {&#xA; &#x27;&amp;&#x27;: &#x27;&amp;amp;&#x27;,&#xA; &#x27;<&#x27;: &#x27;&amp;lt;&#x27;,&#xA; &#x27;>&#x27;: &#x27;&amp;gt;&#x27;,&#xA; &#x27;"&#x27;: &#x27;&amp;quot;&#x27;,&#xA; "&#x27;": &#x27;&amp;#039;&#x27;,&#xA; "`": &#x27;&amp;#x60;&#x27;,&#xA; "/": &#x27;&amp;#x2F;&#x27;&#xA; };&#xA; return text.replace(/[&amp;<>"&#x27;`/]/g, (match) => map[match]);&#xA; },&#xA; updateChildMessage(emmet, text, type, blob, timestamp) {&#xA; const newMessage = {&#xA; id: this.$refs.messagingComponent.lastMessageId &#x2B; 1,&#xA; emmet: emmet,&#xA; text: text,&#xA; type: type,&#xA; blob: blob,&#xA; timestamp: timestamp&#xA; };&#xA;&#xA; this.$refs.messagingComponent.updateMessages(newMessage);&#xA; }&#xA; },&#xA;};&#xA;</script>



-
Is it feasible to create FFmpegFrameGrabber one by one for single FFmpegFrameRecorder and maintain video stream keep alive ?
12 juillet 2023, par zhoutianThe reason why I ask these question is I got byte [] of container data(name is dhav) one by one and I need to push that data continuously to RTMP to play。


What's the current progress I made ?


For now ,I can push data to RTMP and play RTMP by VLC just for few seconds,then the RTMP stream is end .


because the grabber created by inputstream only contain a few of the data come from ByteBuffer ,when that inputstream is end, the RTMP is closed.


synchronized (buffer) {
 buffer.flip();
 byte[] bytes = new byte[buffer.remaining()];
 buffer.get(bytes);
 buffer.clear();
 isByteBufferFull[0] = false;
 try {
 grabAndPush(bytes, SRS_PUSH_ADDRESS);
 } catch (Exception e) {
 //throw new RuntimeException(e);
 }

 }



private static synchronized void grabAndPush(byte[] bytes, String pushAddress) throws Exception {
 avutil.av_log_set_level(avutil.AV_LOG_INFO);
 FFmpegLogCallback.set();

 FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(new ByteArrayInputStream(bytes));
...
}



So can anyone tell me how to keep the RTMP aways alive by FFmpegFrameGrabber and FFmpegFrameRecorder when the source data come from one by one.
very appreciate 😃


this is my code :


import lombok.extern.slf4j.Slf4j;
import org.bytedeco.ffmpeg.avcodec.AVCodecParameters;
import org.bytedeco.ffmpeg.avformat.AVFormatContext;
import org.bytedeco.ffmpeg.avformat.AVStream;
import org.bytedeco.ffmpeg.global.avcodec;
import org.bytedeco.ffmpeg.global.avutil;
import org.bytedeco.javacv.FFmpegFrameGrabber;
import org.bytedeco.javacv.FFmpegFrameRecorder;
import org.bytedeco.javacv.FFmpegLogCallback;
import org.bytedeco.javacv.Frame;
import org.jfjy.ch2ji.ecctv.dh.api.ApiService;
import org.jfjy.ch2ji.ecctv.dh.callback.RealPlayCallback;

import java.io.ByteArrayInputStream;
import java.nio.ByteBuffer;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

@Slf4j
public class GetBytes2PushRTMPNew2 {

 private static final String SRS_PUSH_ADDRESS = "rtmp://127.0.0.1:1935/live/livestream";

 static int BUFFER_CAPACITY = 1 * 1024 * 1024;

 public static void main(String[] args) throws Exception {
 FFmpegLogCallback.set();
 ApiService apiService = new ApiService();
 Long login = apiService.login("10.3.0.54", 8801, "admin", "xxxx");
 ByteBuffer buffer = ByteBuffer.allocate(BUFFER_CAPACITY);
 final boolean[] isByteBufferFull = {false};
 apiService.startRealPlay(new RealPlayCallback() {
 @Override
 public void apply(Long aLong, Integer integer, byte[] bytes) {
 try {
 //push data to bytebuffer
 synchronized (buffer) {
 if (buffer.remaining() > bytes.length) {
 buffer.put(bytes);
 } else {
 isByteBufferFull[0] = true;
 }
 }
 } catch (Exception e) {
 throw new RuntimeException(e);
 }
 }
 }, 0, 0);

 ExecutorService executorService = Executors.newFixedThreadPool(1);
 executorService.execute(new Runnable() {
 @Override
 public void run() {
 while (true) {
 //get data from bytebuffer when buffer is full
 synchronized (isByteBufferFull) {
 if (isByteBufferFull[0]) {
 synchronized (buffer) {
 buffer.flip();
 byte[] bytes = new byte[buffer.remaining()];
 buffer.get(bytes);
 buffer.clear();
 isByteBufferFull[0] = false;
 try {
 //using grabber and recorder to push RTMP
 grabAndPush(bytes, SRS_PUSH_ADDRESS);
 } catch (Exception e) {
 //throw new RuntimeException(e);
 }

 }
 }
 }
 try {
 Thread.sleep(500);
 } catch (InterruptedException e) {
 throw new RuntimeException(e);
 }
 }

 }
 });
 while (true) {

 }
 }

 private static synchronized void grabAndPush(byte[] bytes, String pushAddress) throws Exception {
 avutil.av_log_set_level(avutil.AV_LOG_INFO);
 FFmpegLogCallback.set();

 FFmpegFrameGrabber grabber = new FFmpegFrameGrabber(new ByteArrayInputStream(bytes));


 grabber.setFormat("dhav");
 grabber.start();

 AVFormatContext avFormatContext = grabber.getFormatContext();

 int streamNum = avFormatContext.nb_streams();

 if (streamNum < 1) {
 log.error("no media!");
 return;
 }

 int frameRate = (int) grabber.getVideoFrameRate();
 if (0 == frameRate) {
 frameRate = 15;
 }
 log.info("frameRate[{}],duration[{}]秒,nb_streams[{}]",
 frameRate,
 avFormatContext.duration() / 1000000,
 avFormatContext.nb_streams());

 for (int i = 0; i < streamNum; i++) {
 AVStream avStream = avFormatContext.streams(i);
 AVCodecParameters avCodecParameters = avStream.codecpar();
 log.info("stream index[{}],codec type[{}],codec ID[{}]", i, avCodecParameters.codec_type(), avCodecParameters.codec_id());
 }

 int frameWidth = grabber.getImageWidth();
 int frameHeight = grabber.getImageHeight();
 int audioChannels = grabber.getAudioChannels();

 log.info("frameWidth[{}],frameHeight[{}],audioChannels[{}]",
 frameWidth,
 frameHeight,
 audioChannels);

 FFmpegFrameRecorder recorder = new FFmpegFrameRecorder(pushAddress,
 frameWidth,
 frameHeight,
 audioChannels);

 recorder.setVideoCodec(avcodec.AV_CODEC_ID_H264);
 recorder.setInterleaved(true);

 recorder.setFormat("flv");

 recorder.setFrameRate(frameRate);

 recorder.setGopSize(frameRate);

 recorder.setAudioChannels(grabber.getAudioChannels());


 recorder.start();


 Frame frame;


 log.info("start push");

 int videoFrameNum = 0;
 int audioFrameNum = 0;
 int dataFrameNum = 0;

 int interVal = 1000 / frameRate;
 interVal /= 8;

 while (null != (frame = grabber.grab())) {

 if (null != frame.image) {
 videoFrameNum++;
 }

 if (null != frame.samples) {
 audioFrameNum++;
 }

 if (null != frame.data) {
 dataFrameNum++;
 }

 recorder.record(frame);

 Thread.sleep(interVal);
 }

 log.info("push complete,videoFrameNum[{}],audioFrameNum[{}],dataFrameNum[{}]",
 videoFrameNum,
 audioFrameNum,
 dataFrameNum);

 recorder.close();
 grabber.close();
 }


}