
Recherche avancée
Médias (2)
-
Rennes Emotion Map 2010-11
19 octobre 2011, par
Mis à jour : Juillet 2013
Langue : français
Type : Texte
-
Carte de Schillerkiez
13 mai 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Texte
Autres articles (64)
-
MediaSPIP version 0.1 Beta
16 avril 2011, parMediaSPIP 0.1 beta est la première version de MediaSPIP décrétée comme "utilisable".
Le fichier zip ici présent contient uniquement les sources de MediaSPIP en version standalone.
Pour avoir une installation fonctionnelle, il est nécessaire d’installer manuellement l’ensemble des dépendances logicielles sur le serveur.
Si vous souhaitez utiliser cette archive pour une installation en mode ferme, il vous faudra également procéder à d’autres modifications (...) -
MediaSPIP 0.1 Beta version
25 avril 2011, parMediaSPIP 0.1 beta is the first version of MediaSPIP proclaimed as "usable".
The zip file provided here only contains the sources of MediaSPIP in its standalone version.
To get a working installation, you must manually install all-software dependencies on the server.
If you want to use this archive for an installation in "farm mode", you will also need to proceed to other manual (...) -
Personnaliser en ajoutant son logo, sa bannière ou son image de fond
5 septembre 2013, parCertains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;
Sur d’autres sites (11189)
-
ffmpeg Audiosegment error in get audio chunks in socketIo server in python
26 janvier 2024, par a_crszkvc30Last_NameColI want to send each audio chunk every minute.
this is the test code and i want to save audiofile and audio chunk file.
then, i will combine two audio files stop button was worked correctly but with set time function is not worked in python server.
there is python server code with socketio


def handle_voice(sid,data): # blob 으로 들어온 데이터 
 # BytesIO를 사용하여 메모리 상에서 오디오 데이터를 로드
 audio_segment = AudioSegment.from_file(BytesIO(data), format="webm")
 directory = "dddd"
 # 오디오 파일로 저장
 #directory = str(names_sid.get(sid))
 if not os.path.exists(directory):
 os.makedirs(directory)
 
 # 오디오 파일로 저장
 file_path = os.path.join(directory, f'{sid}.wav')
 audio_segment.export(file_path, format='wav') 
 print('오디오 파일 저장 완료')`
 



and there is client






 
 
 <code class="echappe-js"><script src="https://cdnjs.cloudflare.com/ajax/libs/socket.io/4.5.2/socket.io.js"></script>




 






<script>&#xA; var socket = io(&#x27;http://127.0.0.1:5000&#x27;);&#xA; const record = document.getElementById("record")&#xA; const stop = document.getElementById("stop")&#xA; const soundClips = document.getElementById("sound-clips")&#xA; const chkHearMic = document.getElementById("chk-hear-mic")&#xA;&#xA; const audioCtx = new(window.AudioContext || window.webkitAudioContext)() // 오디오 컨텍스트 정의&#xA;&#xA; const analyser = audioCtx.createAnalyser()&#xA; // const distortion = audioCtx.createWaveShaper()&#xA; // const gainNode = audioCtx.createGain()&#xA; // const biquadFilter = audioCtx.createBiquadFilter()&#xA;&#xA; function makeSound(stream) {&#xA; const source = audioCtx.createMediaStreamSource(stream)&#xA; socket.connect()&#xA; source.connect(analyser)&#xA; // analyser.connect(distortion)&#xA; // distortion.connect(biquadFilter)&#xA; // biquadFilter.connect(gainNode)&#xA; // gainNode.connect(audioCtx.destination) // connecting the different audio graph nodes together&#xA; analyser.connect(audioCtx.destination)&#xA;&#xA; }&#xA;&#xA; if (navigator.mediaDevices) {&#xA; console.log(&#x27;getUserMedia supported.&#x27;)&#xA;&#xA; const constraints = {&#xA; audio: true&#xA; }&#xA; let chunks = []&#xA;&#xA; navigator.mediaDevices.getUserMedia(constraints)&#xA; .then(stream => {&#xA;&#xA; const mediaRecorder = new MediaRecorder(stream)&#xA; &#xA; chkHearMic.onchange = e => {&#xA; if(e.target.checked == true) {&#xA; audioCtx.resume()&#xA; makeSound(stream)&#xA; } else {&#xA; audioCtx.suspend()&#xA; }&#xA; }&#xA; &#xA; record.onclick = () => {&#xA; mediaRecorder.start(1000)&#xA; console.log(mediaRecorder.state)&#xA; console.log("recorder started")&#xA; record.style.background = "red"&#xA; record.style.color = "black"&#xA; }&#xA;&#xA; stop.onclick = () => {&#xA; mediaRecorder.stop()&#xA; console.log(mediaRecorder.state)&#xA; console.log("recorder stopped")&#xA; record.style.background = ""&#xA; record.style.color = ""&#xA; }&#xA;&#xA; mediaRecorder.onstop = e => {&#xA; console.log("data available after MediaRecorder.stop() called.")&#xA; const bb = new Blob(chunks, { &#x27;type&#x27; : &#x27;audio/wav&#x27; })&#xA; socket.emit(&#x27;voice&#x27;,bb)&#xA; const clipName = prompt("오디오 파일 제목을 입력하세요.", new Date())&#xA;&#xA; const clipContainer = document.createElement(&#x27;article&#x27;)&#xA; const clipLabel = document.createElement(&#x27;p&#x27;)&#xA; const audio = document.createElement(&#x27;audio&#x27;)&#xA; const deleteButton = document.createElement(&#x27;button&#x27;)&#xA;&#xA; clipContainer.classList.add(&#x27;clip&#x27;)&#xA; audio.setAttribute(&#x27;controls&#x27;, &#x27;&#x27;)&#xA; deleteButton.innerHTML = "삭제"&#xA; clipLabel.innerHTML = clipName&#xA;&#xA; clipContainer.appendChild(audio)&#xA; clipContainer.appendChild(clipLabel)&#xA; clipContainer.appendChild(deleteButton)&#xA; soundClips.appendChild(clipContainer)&#xA;&#xA; audio.controls = true&#xA; const blob = new Blob(chunks, {&#xA; &#x27;type&#x27;: &#x27;audio/ogg codecs=opus&#x27;&#xA; })&#xA;&#xA; chunks = []&#xA; const audioURL = URL.createObjectURL(blob)&#xA; audio.src = audioURL&#xA; console.log("recorder stopped")&#xA;&#xA; deleteButton.onclick = e => {&#xA; evtTgt = e.target&#xA; evtTgt .parentNode.parentNode.removeChild(evtTgt.parentNode)&#xA; }&#xA; }&#xA;&#xA; mediaRecorder.ondataavailable = function(e) {&#xA; chunks.push(e.data)&#xA; if (chunks.length >= 5)&#xA; {&#xA; const bloddb = new Blob(chunks, { &#x27;type&#x27; : &#x27;audio/wav&#x27; })&#xA; socket.emit(&#x27;voice&#x27;, bloddb)&#xA; &#xA; chunks = []&#xA; }&#xA; mediaRecorder.sendData = function(buffer) {&#xA; const bloddb = new Blob(buffer, { &#x27;type&#x27; : &#x27;audio/wav&#x27; })&#xA; socket.emit(&#x27;voice&#x27;, bloddb)&#xA;}&#xA;};&#xA; })&#xA; .catch(err => {&#xA; console.log(&#x27;The following error occurred: &#x27; &#x2B; err)&#xA; })&#xA; }&#xA; </script>




ask exception was never retrieved
future: <task finished="finished" coro="<InstrumentedAsyncServer._handle_event_internal()" defined="defined" at="at"> exception=CouldntDecodeError('Decoding failed. ffmpeg returned error code: 3199971767\n\nOutput from ffmpeg/avlib:\n\nffmpeg version 6.1.1-full_build-www.gyan.dev Copyright (c) 2000-2023 the FFmpeg developers\r\n built with gcc 12.2.0 (Rev10, Built by MSYS2 project)\r\n configuration: --enable-gpl --enable-version3 --enable-static --pkg-config=pkgconf --disable-w32threads --disable-autodetect --enable-fontconfig --enable-iconv --enable-gnutls --enable-libxml2 --enable-gmp --enable-bzlib --enable-lzma --enable-libsnappy --enable-zlib --enable-librist --enable-libsrt --enable-libssh --enable-libzmq --enable-avisynth --enable-libbluray --enable-libcaca --enable-sdl2 --enable-libaribb24 --enable-libaribcaption --enable-libdav1d --enable-libdavs2 --enable-libuavs3d --enable-libzvbi --enable-librav1e --enable-libsvtav1 --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxavs2 --enable-libxvid --enable-libaom --enable-libjxl --enable-libopenjpeg --enable-libvpx --enable-mediafoundation --enable-libass --enable-frei0r --enable-libfreetype --enable-libfribidi --enable-libharfbuzz --enable-liblensfun --enable-libvidstab --enable-libvmaf --enable-libzimg --enable-amf --enable-cuda-llvm --enable-cuvid --enable-ffnvcodec --enable-nvdec --enable-nvenc --enable-dxva2 --enable-d3d11va --enable-libvpl --enable-libshaderc --enable-vulkan --enable-libplacebo --enable-opencl --enable-libcdio --enable-libgme --enable-libmodplug --enable-libopenmpt --enable-libopencore-amrwb --enable-libmp3lame --enable-libshine --enable-libtheora --enable-libtwolame --enable-libvo-amrwbenc --enable-libcodec2 --enable-libilbc --enable-libgsm --enable-libopencore-amrnb --enable-libopus --enable-libspeex --enable-libvorbis --enable-ladspa --enable-libbs2b --enable-libflite --enable-libmysofa --enable-librubberband --enable-libsoxr --enable-chromaprint\r\n libavutil 58. 29.100 / 58. 29.100\r\n libavcodec 60. 31.102 / 60. 31.102\r\n libavformat 60. 16.100 / 60. 16.100\r\n libavdevice 60. 3.100 / 60. 3.100\r\n libavfilter 9. 12.100 / 9. 12.100\r\n libswscale 7. 5.100 / 7. 5.100\r\n libswresample 4. 12.100 / 4. 12.100\r\n libpostproc 57. 3.100 / 57. 3.100\r\n[cache @ 000001d9828efe40] Inner protocol failed to seekback end : -40\r\n[matroska,webm @ 000001d9828efa00] EBML header parsing failed\r\n[cache @ 000001d9828efe40] Statistics, cache hits:0 cache misses:3\r\n[in#0 @ 000001d9828da3c0] Error opening input: Invalid data found when processing input\r\nError opening input file cache:pipe:0.\r\nError opening input files: Invalid data found when processing input\r\n')>
Traceback (most recent call last):
 File "f:\fastapi-socketio-wb38\.vent\Lib\site-packages\socketio\async_admin.py", line 276, in _handle_event_internal
 ret = await self.sio.__handle_event_internal(server, sid, eio_sid,
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 File "f:\fastapi-socketio-wb38\.vent\Lib\site-packages\socketio\async_server.py", line 597, in _handle_event_internal
 r = await server._trigger_event(data[0], namespace, sid, *data[1:])
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 File "f:\fastapi-socketio-wb38\.vent\Lib\site-packages\socketio\async_server.py", line 635, in _trigger_event
 ret = handler(*args)
 ^^^^^^^^^^^^^^
 File "f:\fastapi-socketio-wb38\Python-Javascript-Websocket-Video-Streaming--main\poom2.py", line 153, in handle_voice
 audio_segment = AudioSegment.from_file(BytesIO(data), format="webm")
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 File "f:\fastapi-socketio-wb38\.vent\Lib\site-packages\pydub\audio_segment.py", line 773, in from_file
 raise CouldntDecodeError(
pydub.exceptions.CouldntDecodeError: Decoding failed. ffmpeg returned error code: 3199971767

Output from ffmpeg/avlib:

ffmpeg version 6.1.1-full_build-www.gyan.dev Copyright (c) 2000-2023 the FFmpeg developers
 built with gcc 12.2.0 (Rev10, Built by MSYS2 project)
 configuration: --enable-gpl --enable-version3 --enable-static --pkg-config=pkgconf --disable-w32threads --disable-autodetect --enable-fontconfig --enable-iconv --enable-gnutls --enable-libxml2 --enable-gmp --enable-bzlib --enable-lzma --enable-libsnappy --enable-zlib --enable-librist --enable-libsrt --enable-libssh --enable-libzmq --enable-avisynth --enable-libbluray --enable-libcaca --enable-sdl2 --enable-libaribb24 --enable-libaribcaption --enable-libdav1d --enable-libdavs2 --enable-libuavs3d --enable-libzvbi --enable-librav1e --enable-libsvtav1 --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxavs2 --enable-libxvid --enable-libaom --enable-libjxl --enable-libopenjpeg --enable-libvpx --enable-mediafoundation --enable-libass --enable-frei0r --enable-libfreetype --enable-libfribidi --enable-libharfbuzz --enable-liblensfun --enable-libvidstab --enable-libvmaf --enable-libzimg --enable-amf --enable-cuda-llvm --enable-cuvid --enable-ffnvcodec --enable-nvdec --enable-nvenc --enable-dxva2 --enable-d3d11va --enable-libvpl --enable-libshaderc --enable-vulkan --enable-libplacebo --enable-opencl --enable-libcdio --enable-libgme --enable-libmodplug --enable-libopenmpt --enable-libopencore-amrwb --enable-libmp3lame --enable-libshine --enable-libtheora --enable-libtwolame --enable-libvo-amrwbenc --enable-libcodec2 --enable-libilbc --enable-libgsm --enable-libopencore-amrnb --enable-libopus --enable-libspeex --enable-libvorbis --enable-ladspa --enable-libbs2b --enable-libflite --enable-libmysofa --enable-librubberband --enable-libsoxr --enable-chromaprint
 libavutil 58. 29.100 / 58. 29.100
 libavcodec 60. 31.102 / 60. 31.102
 libavformat 60. 16.100 / 60. 16.100
 libavdevice 60. 3.100 / 60. 3.100
 libavfilter 9. 12.100 / 9. 12.100
 libswscale 7. 5.100 / 7. 5.100
 libswresample 4. 12.100 / 4. 12.100
 libpostproc 57. 3.100 / 57. 3.100
[cache @ 000001d9828efe40] Inner protocol failed to seekback end : -40
[matroska,webm @ 000001d9828efa00] EBML header parsing failed
[cache @ 000001d9828efe40] Statistics, cache hits:0 cache misses:3
[in#0 @ 000001d9828da3c0] Error opening input: Invalid data found when processing input
Error opening input file cache:pipe:0.
Error opening input files: Invalid data found when processing input
</task>


im using version of ffmpeg-6.1.1-full_build.
i dont know this error exist the stop button sent event correctly. but chunk data was not work correctly in python server.
my english was so bad. sry


-
How to stop ffmpeg when there's no incoming rtmp stream
5 juillet 2016, par M. IrichI use ffmpeg together with nginx-rtmp.
The thing is ffmpeg doesn’t finish the process when the stream’s finishedI use the following command :
ffmpeg -i 'rtmp://localhost:443/live/test' -loglevel debug -c:a libfdk_aac -b:a 192k -c:v libx264 -profile baseline -preset superfast -tune zerolatency -b:v 2500k -maxrate 4500k -minrate 1500k -bufsize 9000k -keyint_min 15 -g 15 -f dash -use_timeline 1 -use_template 1 -min_seg_duration 5000 -y /tmp/dash/test/test.mpd
but even the stream’s not running ffmpeg still can’t finish the process and is waiting for the rtmp stream
Successfully parsed a group of options.
Opening an input file: rtmp://localhost:443/live/test.
[rtmp @ 0x2ba2160] No default whitelist set
[tcp @ 0x2ba2720] No default whitelist set
[rtmp @ 0x2ba2160] Handshaking...
[rtmp @ 0x2ba2160] Type answer 3
[rtmp @ 0x2ba2160] Server version 13.14.10.13
[rtmp @ 0x2ba2160] Proto = rtmp, path = /live/test, app = live, fname = test
[rtmp @ 0x2ba2160] Server bandwidth = 5000000
[rtmp @ 0x2ba2160] Client bandwidth = 5000000
[rtmp @ 0x2ba2160] New incoming chunk size = 4096
[rtmp @ 0x2ba2160] Creating stream...
[rtmp @ 0x2ba2160] Sending play command for 'test'Is it possible to limit the latency time to several seconds ?
Sorry for any possible mistakes - English’s not my native language.
-
How to Stream RTP (IP camera) Into React App setup
10 novembre 2024, par sharon2469I am trying to transfer a live broadcast from an IP camera or any other broadcast coming from an RTP/RTSP source to my REACT application. BUT MUST BE LIVE


My setup at the moment is :


IP Camera -> (RTP) -> FFmpeg -> (udp) -> Server(nodeJs) -> (WebRTC) -> React app


In the current situation, There is almost no delay, but there are some things here that I can't avoid and I can't understand why, and here is my question :


1) First, is the SETUP even correct and this is the only way to Stream RTP video in Web app ?


2) Is it possible to avoid re-encode the stream , RTP transmission necessarily comes in H.264, hence I don't really need to execute the following command :


return spawn('ffmpeg', [
 '-re', // Read input at its native frame rate Important for live-streaming
 '-probesize', '32', // Set probing size to 32 bytes (32 is minimum)
 '-analyzeduration', '1000000', // An input duration of 1 second
 '-c:v', 'h264', // Video codec of input video
 '-i', 'rtp://238.0.0.2:48888', // Input stream URL
 '-map', '0:v?', // Select video from input stream
 '-c:v', 'libx264', // Video codec of output stream
 '-preset', 'ultrafast', // Faster encoding for lower latency
 '-tune', 'zerolatency', // Optimize for zero latency
 // '-s', '768x480', // Adjust the resolution (experiment with values)
 '-f', 'rtp', `rtp://127.0.0.1:${udpPort}` // Output stream URL
]);



As you can se in this command I re-encode to libx264, But if I set FFMPEG a parameter '-c:v' :'copy' instead of '-c:v', 'libx264' then FFMPEG throw an error says : that it doesn't know how to encode h264 and only knows what is libx264-> Basically, I want to stop the re-encode because there is really no need for it, because the stream is already encoded to H264. Are there certain recommendations that can be made ?


3) I thought about giving up the FFMPEG completely, but the RTP packets arrive at a size of 1200+ BYTES when WEBRTC is limited to up to 1280 BYTE. Is there a way to manage these sabotages without damaging the video and is it to enter this world ? I guess there is the whole story with the JITTER BUFFER here


This is my server side code (THIS IS JUST A TEST CODE)


import {
 MediaStreamTrack,
 randomPort,
 RTCPeerConnection,
 RTCRtpCodecParameters,
 RtpPacket,
} from 'werift'
import {Server} from "ws";
import {createSocket} from "dgram";
import {spawn} from "child_process";
import LoggerFactory from "./logger/loggerFactory";

//

const log = LoggerFactory.getLogger('ServerMedia')

// Websocket server -> WebRTC
const serverPort = 8888
const server = new Server({port: serverPort});
log.info(`Server Media start om port: ${serverPort}`);

// UDP server -> ffmpeg
const udpPort = 48888
const udp = createSocket("udp4");
// udp.bind(udpPort, () => {
// udp.addMembership("238.0.0.2");
// })
udp.bind(udpPort)
log.info(`UDP port: ${udpPort}`)


const createFFmpegProcess = () => {
 log.info(`Start ffmpeg process`)
 return spawn('ffmpeg', [
 '-re', // Read input at its native frame rate Important for live-streaming
 '-probesize', '32', // Set probing size to 32 bytes (32 is minimum)
 '-analyzeduration', '1000000', // An input duration of 1 second
 '-c:v', 'h264', // Video codec of input video
 '-i', 'rtp://238.0.0.2:48888', // Input stream URL
 '-map', '0:v?', // Select video from input stream
 '-c:v', 'libx264', // Video codec of output stream
 '-preset', 'ultrafast', // Faster encoding for lower latency
 '-tune', 'zerolatency', // Optimize for zero latency
 // '-s', '768x480', // Adjust the resolution (experiment with values)
 '-f', 'rtp', `rtp://127.0.0.1:${udpPort}` // Output stream URL
 ]);

}

let ffmpegProcess = createFFmpegProcess();


const attachFFmpegListeners = () => {
 // Capture standard output and print it
 ffmpegProcess.stdout.on('data', (data) => {
 log.info(`FFMPEG process stdout: ${data}`);
 });

 // Capture standard error and print it
 ffmpegProcess.stderr.on('data', (data) => {
 console.error(`ffmpeg stderr: ${data}`);
 });

 // Listen for the exit event
 ffmpegProcess.on('exit', (code, signal) => {
 if (code !== null) {
 log.info(`ffmpeg process exited with code ${code}`);
 } else if (signal !== null) {
 log.info(`ffmpeg process killed with signal ${signal}`);
 }
 });
};


attachFFmpegListeners();


server.on("connection", async (socket) => {
 const payloadType = 96; // It is a numerical value that is assigned to each codec in the SDP offer/answer exchange -> for H264
 // Create a peer connection with the codec parameters set in advance.
 const pc = new RTCPeerConnection({
 codecs: {
 audio: [],
 video: [
 new RTCRtpCodecParameters({
 mimeType: "video/H264",
 clockRate: 90000, // 90000 is the default value for H264
 payloadType: payloadType,
 }),
 ],
 },
 });

 const track = new MediaStreamTrack({kind: "video"});


 udp.on("message", (data) => {
 console.log(data)
 const rtp = RtpPacket.deSerialize(data);
 rtp.header.payloadType = payloadType;
 track.writeRtp(rtp);
 });

 udp.on("error", (err) => {
 console.log(err)

 });

 udp.on("close", () => {
 console.log("close")
 });

 pc.addTransceiver(track, {direction: "sendonly"});

 await pc.setLocalDescription(await pc.createOffer());
 const sdp = JSON.stringify(pc.localDescription);
 socket.send(sdp);

 socket.on("message", (data: any) => {
 if (data.toString() === 'resetFFMPEG') {
 ffmpegProcess.kill('SIGINT');
 log.info(`FFMPEG process killed`)
 setTimeout(() => {
 ffmpegProcess = createFFmpegProcess();
 attachFFmpegListeners();
 }, 5000)
 } else {
 pc.setRemoteDescription(JSON.parse(data));
 }
 });
});



And this fronted :





 
 
 <code class="echappe-js"><script&#xA; crossorigin&#xA; src="https://unpkg.com/react@16/umd/react.development.js"&#xA; ></script>

<script&#xA; crossorigin&#xA; src="https://unpkg.com/react-dom@16/umd/react-dom.development.js"&#xA; ></script>

<script&#xA; crossorigin&#xA; src="https://cdnjs.cloudflare.com/ajax/libs/babel-core/5.8.34/browser.min.js"&#xA; ></script>

<script src="https://cdn.jsdelivr.net/npm/babel-regenerator-runtime@6.5.0/runtime.min.js"></script>








<script type="text/babel">&#xA; let rtc;&#xA;&#xA; const App = () => {&#xA; const [log, setLog] = React.useState([]);&#xA; const videoRef = React.useRef();&#xA; const socket = new WebSocket("ws://localhost:8888");&#xA; const [peer, setPeer] = React.useState(null); // Add state to keep track of the peer connection&#xA;&#xA; React.useEffect(() => {&#xA; (async () => {&#xA; await new Promise((r) => (socket.onopen = r));&#xA; console.log("open websocket");&#xA;&#xA; const handleOffer = async (offer) => {&#xA; console.log("new offer", offer.sdp);&#xA;&#xA; const updatedPeer = new RTCPeerConnection({&#xA; iceServers: [],&#xA; sdpSemantics: "unified-plan",&#xA; });&#xA;&#xA; updatedPeer.onicecandidate = ({ candidate }) => {&#xA; if (!candidate) {&#xA; const sdp = JSON.stringify(updatedPeer.localDescription);&#xA; console.log(sdp);&#xA; socket.send(sdp);&#xA; }&#xA; };&#xA;&#xA; updatedPeer.oniceconnectionstatechange = () => {&#xA; console.log(&#xA; "oniceconnectionstatechange",&#xA; updatedPeer.iceConnectionState&#xA; );&#xA; };&#xA;&#xA; updatedPeer.ontrack = (e) => {&#xA; console.log("ontrack", e);&#xA; videoRef.current.srcObject = e.streams[0];&#xA; };&#xA;&#xA; await updatedPeer.setRemoteDescription(offer);&#xA; const answer = await updatedPeer.createAnswer();&#xA; await updatedPeer.setLocalDescription(answer);&#xA;&#xA; setPeer(updatedPeer);&#xA; };&#xA;&#xA; socket.onmessage = (ev) => {&#xA; const data = JSON.parse(ev.data);&#xA; if (data.type === "offer") {&#xA; handleOffer(data);&#xA; } else if (data.type === "resetFFMPEG") {&#xA; // Handle the resetFFMPEG message&#xA; console.log("FFmpeg reset requested");&#xA; }&#xA; };&#xA; })();&#xA; }, []); // Added socket as a dependency to the useEffect hook&#xA;&#xA; const sendRequestToResetFFmpeg = () => {&#xA; socket.send("resetFFMPEG");&#xA; };&#xA;&#xA; return (&#xA; <div>&#xA; Video: &#xA; <video ref={videoRef} autoPlay muted />&#xA; <button onClick={() => sendRequestToResetFFmpeg()}>Reset FFMPEG</button>&#xA; </div>&#xA; );&#xA; };&#xA;&#xA; ReactDOM.render(<App />, document.getElementById("app1"));&#xA;</script>