Recherche avancée

Médias (2)

Mot : - Tags -/map

Autres articles (64)

  • MediaSPIP version 0.1 Beta

    16 avril 2011, par

    MediaSPIP 0.1 beta est la première version de MediaSPIP décrétée comme "utilisable".
    Le fichier zip ici présent contient uniquement les sources de MediaSPIP en version standalone.
    Pour avoir une installation fonctionnelle, il est nécessaire d’installer manuellement l’ensemble des dépendances logicielles sur le serveur.
    Si vous souhaitez utiliser cette archive pour une installation en mode ferme, il vous faudra également procéder à d’autres modifications (...)

  • MediaSPIP 0.1 Beta version

    25 avril 2011, par

    MediaSPIP 0.1 beta is the first version of MediaSPIP proclaimed as "usable".
    The zip file provided here only contains the sources of MediaSPIP in its standalone version.
    To get a working installation, you must manually install all-software dependencies on the server.
    If you want to use this archive for an installation in "farm mode", you will also need to proceed to other manual (...)

  • Personnaliser en ajoutant son logo, sa bannière ou son image de fond

    5 septembre 2013, par

    Certains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;

Sur d’autres sites (11189)

  • ffmpeg Audiosegment error in get audio chunks in socketIo server in python

    26 janvier 2024, par a_crszkvc30Last_NameCol

    I want to send each audio chunk every minute.
this is the test code and i want to save audiofile and audio chunk file.
then, i will combine two audio files stop button was worked correctly but with set time function is not worked in python server.
there is python server code with socketio

    


    def handle_voice(sid,data): # blob 으로 들어온 데이터 
    # BytesIO를 사용하여 메모리 상에서 오디오 데이터를 로드
    audio_segment = AudioSegment.from_file(BytesIO(data), format="webm")
    directory = "dddd"
    # 오디오 파일로 저장
    #directory = str(names_sid.get(sid))
    if not os.path.exists(directory):
        os.makedirs(directory)
 
    # 오디오 파일로 저장
    file_path = os.path.join(directory, f'{sid}.wav')
    audio_segment.export(file_path, format='wav') 
    print('오디오 파일 저장 완료')`
 


    


    and there is client

    


    &#xA;&#xA;&#xA;&#xA;    &#xA;    &#xA;    <code class="echappe-js">&lt;script src=&quot;https://cdnjs.cloudflare.com/ajax/libs/socket.io/4.5.2/socket.io.js&quot;&gt;&lt;/script&gt;&#xA;&#xA;&#xA;&#xA;    &#xA;    
    

    &#xA;

    &#xA;

    &#xA; &lt;script&gt;&amp;#xA;        var socket = io(&amp;#x27;http://127.0.0.1:5000&amp;#x27;);&amp;#xA;        const record = document.getElementById(&quot;record&quot;)&amp;#xA;        const stop = document.getElementById(&quot;stop&quot;)&amp;#xA;        const soundClips = document.getElementById(&quot;sound-clips&quot;)&amp;#xA;        const chkHearMic = document.getElementById(&quot;chk-hear-mic&quot;)&amp;#xA;&amp;#xA;        const audioCtx = new(window.AudioContext || window.webkitAudioContext)() // 오디오 컨텍스트 정의&amp;#xA;&amp;#xA;        const analyser = audioCtx.createAnalyser()&amp;#xA;        //        const distortion = audioCtx.createWaveShaper()&amp;#xA;        //        const gainNode = audioCtx.createGain()&amp;#xA;        //        const biquadFilter = audioCtx.createBiquadFilter()&amp;#xA;&amp;#xA;        function makeSound(stream) {&amp;#xA;            const source = audioCtx.createMediaStreamSource(stream)&amp;#xA;            socket.connect()&amp;#xA;            source.connect(analyser)&amp;#xA;            //            analyser.connect(distortion)&amp;#xA;            //            distortion.connect(biquadFilter)&amp;#xA;            //            biquadFilter.connect(gainNode)&amp;#xA;            //            gainNode.connect(audioCtx.destination) // connecting the different audio graph nodes together&amp;#xA;            analyser.connect(audioCtx.destination)&amp;#xA;&amp;#xA;        }&amp;#xA;&amp;#xA;        if (navigator.mediaDevices) {&amp;#xA;            console.log(&amp;#x27;getUserMedia supported.&amp;#x27;)&amp;#xA;&amp;#xA;            const constraints = {&amp;#xA;                audio: true&amp;#xA;            }&amp;#xA;            let chunks = []&amp;#xA;&amp;#xA;            navigator.mediaDevices.getUserMedia(constraints)&amp;#xA;                .then(stream =&gt; {&amp;#xA;&amp;#xA;                    const mediaRecorder = new MediaRecorder(stream)&amp;#xA;                    &amp;#xA;                    chkHearMic.onchange = e =&gt; {&amp;#xA;                        if(e.target.checked == true) {&amp;#xA;                            audioCtx.resume()&amp;#xA;                            makeSound(stream)&amp;#xA;                        } else {&amp;#xA;                            audioCtx.suspend()&amp;#xA;                        }&amp;#xA;                    }&amp;#xA;                    &amp;#xA;                    record.onclick = () =&gt; {&amp;#xA;                        mediaRecorder.start(1000)&amp;#xA;                        console.log(mediaRecorder.state)&amp;#xA;                        console.log(&quot;recorder started&quot;)&amp;#xA;                        record.style.background = &quot;red&quot;&amp;#xA;                        record.style.color = &quot;black&quot;&amp;#xA;                    }&amp;#xA;&amp;#xA;                    stop.onclick = () =&gt; {&amp;#xA;                        mediaRecorder.stop()&amp;#xA;                        console.log(mediaRecorder.state)&amp;#xA;                        console.log(&quot;recorder stopped&quot;)&amp;#xA;                        record.style.background = &quot;&quot;&amp;#xA;                        record.style.color = &quot;&quot;&amp;#xA;                    }&amp;#xA;&amp;#xA;                    mediaRecorder.onstop = e =&gt; {&amp;#xA;                        console.log(&quot;data available after MediaRecorder.stop() called.&quot;)&amp;#xA;                        const bb = new Blob(chunks, { &amp;#x27;type&amp;#x27; : &amp;#x27;audio/wav&amp;#x27; })&amp;#xA;                        socket.emit(&amp;#x27;voice&amp;#x27;,bb)&amp;#xA;                        const clipName = prompt(&quot;오디오 파일 제목을 입력하세요.&quot;, new Date())&amp;#xA;&amp;#xA;                        const clipContainer = document.createElement(&amp;#x27;article&amp;#x27;)&amp;#xA;                        const clipLabel = document.createElement(&amp;#x27;p&amp;#x27;)&amp;#xA;                        const audio = document.createElement(&amp;#x27;audio&amp;#x27;)&amp;#xA;                        const deleteButton = document.createElement(&amp;#x27;button&amp;#x27;)&amp;#xA;&amp;#xA;                        clipContainer.classList.add(&amp;#x27;clip&amp;#x27;)&amp;#xA;                        audio.setAttribute(&amp;#x27;controls&amp;#x27;, &amp;#x27;&amp;#x27;)&amp;#xA;                        deleteButton.innerHTML = &quot;삭제&quot;&amp;#xA;                        clipLabel.innerHTML = clipName&amp;#xA;&amp;#xA;                        clipContainer.appendChild(audio)&amp;#xA;                        clipContainer.appendChild(clipLabel)&amp;#xA;                        clipContainer.appendChild(deleteButton)&amp;#xA;                        soundClips.appendChild(clipContainer)&amp;#xA;&amp;#xA;                        audio.controls = true&amp;#xA;                        const blob = new Blob(chunks, {&amp;#xA;                            &amp;#x27;type&amp;#x27;: &amp;#x27;audio/ogg codecs=opus&amp;#x27;&amp;#xA;                        })&amp;#xA;&amp;#xA;                        chunks = []&amp;#xA;                        const audioURL = URL.createObjectURL(blob)&amp;#xA;                        audio.src = audioURL&amp;#xA;                        console.log(&quot;recorder stopped&quot;)&amp;#xA;&amp;#xA;                        deleteButton.onclick = e =&gt; {&amp;#xA;                            evtTgt = e.target&amp;#xA;                            evtTgt  .parentNode.parentNode.removeChild(evtTgt.parentNode)&amp;#xA;                        }&amp;#xA;                    }&amp;#xA;&amp;#xA;                  mediaRecorder.ondataavailable = function(e) {&amp;#xA;                    chunks.push(e.data)&amp;#xA;                    if (chunks.length &gt;= 5)&amp;#xA;                    {&amp;#xA;                        const bloddb = new Blob(chunks, { &amp;#x27;type&amp;#x27; : &amp;#x27;audio/wav&amp;#x27; })&amp;#xA;                        socket.emit(&amp;#x27;voice&amp;#x27;, bloddb)&amp;#xA;                         &amp;#xA;                        chunks = []&amp;#xA;                    }&amp;#xA;                    mediaRecorder.sendData = function(buffer) {&amp;#xA;                        const bloddb = new Blob(buffer, { &amp;#x27;type&amp;#x27; : &amp;#x27;audio/wav&amp;#x27; })&amp;#xA;                        socket.emit(&amp;#x27;voice&amp;#x27;, bloddb)&amp;#xA;}&amp;#xA;};&amp;#xA;                })&amp;#xA;                .catch(err =&gt; {&amp;#xA;                    console.log(&amp;#x27;The following error occurred: &amp;#x27; &amp;#x2B; err)&amp;#xA;                })&amp;#xA;        }&amp;#xA;    &lt;/script&gt;&#xA;&#xA;

    &#xA;

    ask exception was never retrieved&#xA;future: <task finished="finished" coro="<InstrumentedAsyncServer._handle_event_internal()" defined="defined" at="at"> exception=CouldntDecodeError(&#x27;Decoding failed. ffmpeg returned error code: 3199971767\n\nOutput from ffmpeg/avlib:\n\nffmpeg version 6.1.1-full_build-www.gyan.dev Copyright (c) 2000-2023 the FFmpeg developers\r\n  built with gcc 12.2.0 (Rev10, Built by MSYS2 project)\r\n  configuration: --enable-gpl --enable-version3 --enable-static --pkg-config=pkgconf --disable-w32threads --disable-autodetect --enable-fontconfig --enable-iconv --enable-gnutls --enable-libxml2 --enable-gmp --enable-bzlib --enable-lzma --enable-libsnappy --enable-zlib --enable-librist --enable-libsrt --enable-libssh --enable-libzmq --enable-avisynth --enable-libbluray --enable-libcaca --enable-sdl2 --enable-libaribb24 --enable-libaribcaption --enable-libdav1d --enable-libdavs2 --enable-libuavs3d --enable-libzvbi --enable-librav1e --enable-libsvtav1 --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxavs2 --enable-libxvid --enable-libaom --enable-libjxl --enable-libopenjpeg --enable-libvpx --enable-mediafoundation --enable-libass --enable-frei0r --enable-libfreetype --enable-libfribidi --enable-libharfbuzz --enable-liblensfun --enable-libvidstab --enable-libvmaf --enable-libzimg --enable-amf --enable-cuda-llvm --enable-cuvid --enable-ffnvcodec --enable-nvdec --enable-nvenc --enable-dxva2 --enable-d3d11va --enable-libvpl --enable-libshaderc --enable-vulkan --enable-libplacebo --enable-opencl --enable-libcdio --enable-libgme --enable-libmodplug --enable-libopenmpt --enable-libopencore-amrwb --enable-libmp3lame --enable-libshine --enable-libtheora --enable-libtwolame --enable-libvo-amrwbenc --enable-libcodec2 --enable-libilbc --enable-libgsm --enable-libopencore-amrnb --enable-libopus --enable-libspeex --enable-libvorbis --enable-ladspa --enable-libbs2b --enable-libflite --enable-libmysofa --enable-librubberband --enable-libsoxr --enable-chromaprint\r\n  libavutil      58. 29.100 / 58. 29.100\r\n  libavcodec     60. 31.102 / 60. 31.102\r\n  libavformat    60. 16.100 / 60. 16.100\r\n  libavdevice    60.  3.100 / 60.  3.100\r\n  libavfilter     9. 12.100 /  9. 12.100\r\n  libswscale      7.  5.100 /  7.  5.100\r\n  libswresample   4. 12.100 /  4. 12.100\r\n  libpostproc    57.  3.100 / 57.  3.100\r\n[cache @ 000001d9828efe40] Inner protocol failed to seekback end : -40\r\n[matroska,webm @ 000001d9828efa00] EBML header parsing failed\r\n[cache @ 000001d9828efe40] Statistics, cache hits:0 cache misses:3\r\n[in#0 @ 000001d9828da3c0] Error opening input: Invalid data found when processing input\r\nError opening input file cache:pipe:0.\r\nError opening input files: Invalid data found when processing input\r\n&#x27;)>&#xA;Traceback (most recent call last):&#xA;  File "f:\fastapi-socketio-wb38\.vent\Lib\site-packages\socketio\async_admin.py", line 276, in _handle_event_internal&#xA;    ret = await self.sio.__handle_event_internal(server, sid, eio_sid,&#xA;          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^&#xA;  File "f:\fastapi-socketio-wb38\.vent\Lib\site-packages\socketio\async_server.py", line 597, in _handle_event_internal&#xA;    r = await server._trigger_event(data[0], namespace, sid, *data[1:])&#xA;        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^&#xA;  File "f:\fastapi-socketio-wb38\.vent\Lib\site-packages\socketio\async_server.py", line 635, in _trigger_event&#xA;    ret = handler(*args)&#xA;          ^^^^^^^^^^^^^^&#xA;  File "f:\fastapi-socketio-wb38\Python-Javascript-Websocket-Video-Streaming--main\poom2.py", line 153, in handle_voice&#xA;    audio_segment = AudioSegment.from_file(BytesIO(data), format="webm")&#xA;                    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^&#xA;  File "f:\fastapi-socketio-wb38\.vent\Lib\site-packages\pydub\audio_segment.py", line 773, in from_file&#xA;    raise CouldntDecodeError(&#xA;pydub.exceptions.CouldntDecodeError: Decoding failed. ffmpeg returned error code: 3199971767&#xA;&#xA;Output from ffmpeg/avlib:&#xA;&#xA;ffmpeg version 6.1.1-full_build-www.gyan.dev Copyright (c) 2000-2023 the FFmpeg developers&#xA;  built with gcc 12.2.0 (Rev10, Built by MSYS2 project)&#xA;  configuration: --enable-gpl --enable-version3 --enable-static --pkg-config=pkgconf --disable-w32threads --disable-autodetect --enable-fontconfig --enable-iconv --enable-gnutls --enable-libxml2 --enable-gmp --enable-bzlib --enable-lzma --enable-libsnappy --enable-zlib --enable-librist --enable-libsrt --enable-libssh --enable-libzmq --enable-avisynth --enable-libbluray --enable-libcaca --enable-sdl2 --enable-libaribb24 --enable-libaribcaption --enable-libdav1d --enable-libdavs2 --enable-libuavs3d --enable-libzvbi --enable-librav1e --enable-libsvtav1 --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxavs2 --enable-libxvid --enable-libaom --enable-libjxl --enable-libopenjpeg --enable-libvpx --enable-mediafoundation --enable-libass --enable-frei0r --enable-libfreetype --enable-libfribidi --enable-libharfbuzz --enable-liblensfun --enable-libvidstab --enable-libvmaf --enable-libzimg --enable-amf --enable-cuda-llvm --enable-cuvid --enable-ffnvcodec --enable-nvdec --enable-nvenc --enable-dxva2 --enable-d3d11va --enable-libvpl --enable-libshaderc --enable-vulkan --enable-libplacebo --enable-opencl --enable-libcdio --enable-libgme --enable-libmodplug --enable-libopenmpt --enable-libopencore-amrwb --enable-libmp3lame --enable-libshine --enable-libtheora --enable-libtwolame --enable-libvo-amrwbenc --enable-libcodec2 --enable-libilbc --enable-libgsm --enable-libopencore-amrnb --enable-libopus --enable-libspeex --enable-libvorbis --enable-ladspa --enable-libbs2b --enable-libflite --enable-libmysofa --enable-librubberband --enable-libsoxr --enable-chromaprint&#xA;  libavutil      58. 29.100 / 58. 29.100&#xA;  libavcodec     60. 31.102 / 60. 31.102&#xA;  libavformat    60. 16.100 / 60. 16.100&#xA;  libavdevice    60.  3.100 / 60.  3.100&#xA;  libavfilter     9. 12.100 /  9. 12.100&#xA;  libswscale      7.  5.100 /  7.  5.100&#xA;  libswresample   4. 12.100 /  4. 12.100&#xA;  libpostproc    57.  3.100 / 57.  3.100&#xA;[cache @ 000001d9828efe40] Inner protocol failed to seekback end : -40&#xA;[matroska,webm @ 000001d9828efa00] EBML header parsing failed&#xA;[cache @ 000001d9828efe40] Statistics, cache hits:0 cache misses:3&#xA;[in#0 @ 000001d9828da3c0] Error opening input: Invalid data found when processing input&#xA;Error opening input file cache:pipe:0.&#xA;Error opening input files: Invalid data found when processing input&#xA;</task>

    &#xA;

    im using version of ffmpeg-6.1.1-full_build.&#xA;i dont know this error exist the stop button sent event correctly. but chunk data was not work correctly in python server.&#xA;my english was so bad. sry

    &#xA;

  • How to stop ffmpeg when there's no incoming rtmp stream

    5 juillet 2016, par M. Irich

    I use ffmpeg together with nginx-rtmp.
    The thing is ffmpeg doesn’t finish the process when the stream’s finished

    I use the following command :

    ffmpeg  -i 'rtmp://localhost:443/live/test' -loglevel debug  -c:a libfdk_aac -b:a 192k -c:v libx264 -profile baseline -preset superfast -tune zerolatency -b:v 2500k -maxrate 4500k -minrate 1500k -bufsize 9000k -keyint_min 15 -g 15 -f dash -use_timeline 1 -use_template 1 -min_seg_duration 5000 -y /tmp/dash/test/test.mpd

    but even the stream’s not running ffmpeg still can’t finish the process and is waiting for the rtmp stream

    Successfully parsed a group of options.
    Opening an input file: rtmp://localhost:443/live/test.
    [rtmp @ 0x2ba2160] No default whitelist set
    [tcp @ 0x2ba2720] No default whitelist set
    [rtmp @ 0x2ba2160] Handshaking...
    [rtmp @ 0x2ba2160] Type answer 3
    [rtmp @ 0x2ba2160] Server version 13.14.10.13
    [rtmp @ 0x2ba2160] Proto = rtmp, path = /live/test, app = live, fname = test
    [rtmp @ 0x2ba2160] Server bandwidth = 5000000
    [rtmp @ 0x2ba2160] Client bandwidth = 5000000
    [rtmp @ 0x2ba2160] New incoming chunk size = 4096
    [rtmp @ 0x2ba2160] Creating stream...
    [rtmp @ 0x2ba2160] Sending play command for 'test'

    Is it possible to limit the latency time to several seconds ?

    Sorry for any possible mistakes - English’s not my native language.

  • How to Stream RTP (IP camera) Into React App setup

    10 novembre 2024, par sharon2469

    I am trying to transfer a live broadcast from an IP camera or any other broadcast coming from an RTP/RTSP source to my REACT application. BUT MUST BE LIVE

    &#xA;

    My setup at the moment is :

    &#xA;

    IP Camera -> (RTP) -> FFmpeg -> (udp) -> Server(nodeJs) -> (WebRTC) -> React app

    &#xA;

    In the current situation, There is almost no delay, but there are some things here that I can't avoid and I can't understand why, and here is my question :

    &#xA;

    1) First, is the SETUP even correct and this is the only way to Stream RTP video in Web app ?

    &#xA;

    2) Is it possible to avoid re-encode the stream , RTP transmission necessarily comes in H.264, hence I don't really need to execute the following command :

    &#xA;

        return spawn(&#x27;ffmpeg&#x27;, [&#xA;    &#x27;-re&#x27;,                              // Read input at its native frame rate Important for live-streaming&#xA;    &#x27;-probesize&#x27;, &#x27;32&#x27;,                 // Set probing size to 32 bytes (32 is minimum)&#xA;    &#x27;-analyzeduration&#x27;, &#x27;1000000&#x27;,      // An input duration of 1 second&#xA;    &#x27;-c:v&#x27;, &#x27;h264&#x27;,                     // Video codec of input video&#xA;    &#x27;-i&#x27;, &#x27;rtp://238.0.0.2:48888&#x27;,      // Input stream URL&#xA;    &#x27;-map&#x27;, &#x27;0:v?&#x27;,                     // Select video from input stream&#xA;    &#x27;-c:v&#x27;, &#x27;libx264&#x27;,                  // Video codec of output stream&#xA;    &#x27;-preset&#x27;, &#x27;ultrafast&#x27;,             // Faster encoding for lower latency&#xA;    &#x27;-tune&#x27;, &#x27;zerolatency&#x27;,             // Optimize for zero latency&#xA;    // &#x27;-s&#x27;, &#x27;768x480&#x27;,                    // Adjust the resolution (experiment with values)&#xA;    &#x27;-f&#x27;, &#x27;rtp&#x27;, `rtp://127.0.0.1:${udpPort}` // Output stream URL&#xA;]);&#xA;

    &#xA;

    As you can se in this command I re-encode to libx264, But if I set FFMPEG a parameter '-c:v' :'copy' instead of '-c:v', 'libx264' then FFMPEG throw an error says : that it doesn't know how to encode h264 and only knows what is libx264-> Basically, I want to stop the re-encode because there is really no need for it, because the stream is already encoded to H264. Are there certain recommendations that can be made ?

    &#xA;

    3) I thought about giving up the FFMPEG completely, but the RTP packets arrive at a size of 1200+ BYTES when WEBRTC is limited to up to 1280 BYTE. Is there a way to manage these sabotages without damaging the video and is it to enter this world ? I guess there is the whole story with the JITTER BUFFER here

    &#xA;

    This is my server side code (THIS IS JUST A TEST CODE)

    &#xA;

    import {&#xA;    MediaStreamTrack,&#xA;    randomPort,&#xA;    RTCPeerConnection,&#xA;    RTCRtpCodecParameters,&#xA;    RtpPacket,&#xA;} from &#x27;werift&#x27;&#xA;import {Server} from "ws";&#xA;import {createSocket} from "dgram";&#xA;import {spawn} from "child_process";&#xA;import LoggerFactory from "./logger/loggerFactory";&#xA;&#xA;//&#xA;&#xA;const log = LoggerFactory.getLogger(&#x27;ServerMedia&#x27;)&#xA;&#xA;// Websocket server -> WebRTC&#xA;const serverPort = 8888&#xA;const server = new Server({port: serverPort});&#xA;log.info(`Server Media start om port: ${serverPort}`);&#xA;&#xA;// UDP server -> ffmpeg&#xA;const udpPort = 48888&#xA;const udp = createSocket("udp4");&#xA;// udp.bind(udpPort, () => {&#xA;//     udp.addMembership("238.0.0.2");&#xA;// })&#xA;udp.bind(udpPort)&#xA;log.info(`UDP port: ${udpPort}`)&#xA;&#xA;&#xA;const createFFmpegProcess = () => {&#xA;    log.info(`Start ffmpeg process`)&#xA;    return spawn(&#x27;ffmpeg&#x27;, [&#xA;        &#x27;-re&#x27;,                              // Read input at its native frame rate Important for live-streaming&#xA;        &#x27;-probesize&#x27;, &#x27;32&#x27;,                 // Set probing size to 32 bytes (32 is minimum)&#xA;        &#x27;-analyzeduration&#x27;, &#x27;1000000&#x27;,      // An input duration of 1 second&#xA;        &#x27;-c:v&#x27;, &#x27;h264&#x27;,                     // Video codec of input video&#xA;        &#x27;-i&#x27;, &#x27;rtp://238.0.0.2:48888&#x27;,      // Input stream URL&#xA;        &#x27;-map&#x27;, &#x27;0:v?&#x27;,                     // Select video from input stream&#xA;        &#x27;-c:v&#x27;, &#x27;libx264&#x27;,                  // Video codec of output stream&#xA;        &#x27;-preset&#x27;, &#x27;ultrafast&#x27;,             // Faster encoding for lower latency&#xA;        &#x27;-tune&#x27;, &#x27;zerolatency&#x27;,             // Optimize for zero latency&#xA;        // &#x27;-s&#x27;, &#x27;768x480&#x27;,                    // Adjust the resolution (experiment with values)&#xA;        &#x27;-f&#x27;, &#x27;rtp&#x27;, `rtp://127.0.0.1:${udpPort}` // Output stream URL&#xA;    ]);&#xA;&#xA;}&#xA;&#xA;let ffmpegProcess = createFFmpegProcess();&#xA;&#xA;&#xA;const attachFFmpegListeners = () => {&#xA;    // Capture standard output and print it&#xA;    ffmpegProcess.stdout.on(&#x27;data&#x27;, (data) => {&#xA;        log.info(`FFMPEG process stdout: ${data}`);&#xA;    });&#xA;&#xA;    // Capture standard error and print it&#xA;    ffmpegProcess.stderr.on(&#x27;data&#x27;, (data) => {&#xA;        console.error(`ffmpeg stderr: ${data}`);&#xA;    });&#xA;&#xA;    // Listen for the exit event&#xA;    ffmpegProcess.on(&#x27;exit&#x27;, (code, signal) => {&#xA;        if (code !== null) {&#xA;            log.info(`ffmpeg process exited with code ${code}`);&#xA;        } else if (signal !== null) {&#xA;            log.info(`ffmpeg process killed with signal ${signal}`);&#xA;        }&#xA;    });&#xA;};&#xA;&#xA;&#xA;attachFFmpegListeners();&#xA;&#xA;&#xA;server.on("connection", async (socket) => {&#xA;    const payloadType = 96; // It is a numerical value that is assigned to each codec in the SDP offer/answer exchange -> for H264&#xA;    // Create a peer connection with the codec parameters set in advance.&#xA;    const pc = new RTCPeerConnection({&#xA;        codecs: {&#xA;            audio: [],&#xA;            video: [&#xA;                new RTCRtpCodecParameters({&#xA;                    mimeType: "video/H264",&#xA;                    clockRate: 90000, // 90000 is the default value for H264&#xA;                    payloadType: payloadType,&#xA;                }),&#xA;            ],&#xA;        },&#xA;    });&#xA;&#xA;    const track = new MediaStreamTrack({kind: "video"});&#xA;&#xA;&#xA;    udp.on("message", (data) => {&#xA;        console.log(data)&#xA;        const rtp = RtpPacket.deSerialize(data);&#xA;        rtp.header.payloadType = payloadType;&#xA;        track.writeRtp(rtp);&#xA;    });&#xA;&#xA;    udp.on("error", (err) => {&#xA;        console.log(err)&#xA;&#xA;    });&#xA;&#xA;    udp.on("close", () => {&#xA;        console.log("close")&#xA;    });&#xA;&#xA;    pc.addTransceiver(track, {direction: "sendonly"});&#xA;&#xA;    await pc.setLocalDescription(await pc.createOffer());&#xA;    const sdp = JSON.stringify(pc.localDescription);&#xA;    socket.send(sdp);&#xA;&#xA;    socket.on("message", (data: any) => {&#xA;        if (data.toString() === &#x27;resetFFMPEG&#x27;) {&#xA;            ffmpegProcess.kill(&#x27;SIGINT&#x27;);&#xA;            log.info(`FFMPEG process killed`)&#xA;            setTimeout(() => {&#xA;                ffmpegProcess = createFFmpegProcess();&#xA;                attachFFmpegListeners();&#xA;            }, 5000)&#xA;        } else {&#xA;            pc.setRemoteDescription(JSON.parse(data));&#xA;        }&#xA;    });&#xA;});&#xA;

    &#xA;

    And this fronted :

    &#xA;

    &#xA;&#xA;&#xA;    &#xA;    &#xA;    <code class="echappe-js">&lt;script&amp;#xA;            crossorigin&amp;#xA;            src=&quot;https://unpkg.com/react@16/umd/react.development.js&quot;&amp;#xA;    &gt;&lt;/script&gt;&#xA;    &lt;script&amp;#xA;            crossorigin&amp;#xA;            src=&quot;https://unpkg.com/react-dom@16/umd/react-dom.development.js&quot;&amp;#xA;    &gt;&lt;/script&gt;&#xA;    &lt;script&amp;#xA;            crossorigin&amp;#xA;            src=&quot;https://cdnjs.cloudflare.com/ajax/libs/babel-core/5.8.34/browser.min.js&quot;&amp;#xA;    &gt;&lt;/script&gt;&#xA;    &lt;script src=&quot;https://cdn.jsdelivr.net/npm/babel-regenerator-runtime@6.5.0/runtime.min.js&quot;&gt;&lt;/script&gt;&#xA;&#xA;&#xA;
    &#xA;

    &#xA;

    &#xA;&lt;script type=&quot;text/babel&quot;&gt;&amp;#xA;    let rtc;&amp;#xA;&amp;#xA;    const App = () =&gt; {&amp;#xA;        const [log, setLog] = React.useState([]);&amp;#xA;        const videoRef = React.useRef();&amp;#xA;        const socket = new WebSocket(&quot;ws://localhost:8888&quot;);&amp;#xA;        const [peer, setPeer] = React.useState(null); // Add state to keep track of the peer connection&amp;#xA;&amp;#xA;        React.useEffect(() =&gt; {&amp;#xA;            (async () =&gt; {&amp;#xA;                await new Promise((r) =&gt; (socket.onopen = r));&amp;#xA;                console.log(&quot;open websocket&quot;);&amp;#xA;&amp;#xA;                const handleOffer = async (offer) =&gt; {&amp;#xA;                    console.log(&quot;new offer&quot;, offer.sdp);&amp;#xA;&amp;#xA;                    const updatedPeer = new RTCPeerConnection({&amp;#xA;                        iceServers: [],&amp;#xA;                        sdpSemantics: &quot;unified-plan&quot;,&amp;#xA;                    });&amp;#xA;&amp;#xA;                    updatedPeer.onicecandidate = ({ candidate }) =&gt; {&amp;#xA;                        if (!candidate) {&amp;#xA;                            const sdp = JSON.stringify(updatedPeer.localDescription);&amp;#xA;                            console.log(sdp);&amp;#xA;                            socket.send(sdp);&amp;#xA;                        }&amp;#xA;                    };&amp;#xA;&amp;#xA;                    updatedPeer.oniceconnectionstatechange = () =&gt; {&amp;#xA;                        console.log(&amp;#xA;                            &quot;oniceconnectionstatechange&quot;,&amp;#xA;                            updatedPeer.iceConnectionState&amp;#xA;                        );&amp;#xA;                    };&amp;#xA;&amp;#xA;                    updatedPeer.ontrack = (e) =&gt; {&amp;#xA;                        console.log(&quot;ontrack&quot;, e);&amp;#xA;                        videoRef.current.srcObject = e.streams[0];&amp;#xA;                    };&amp;#xA;&amp;#xA;                    await updatedPeer.setRemoteDescription(offer);&amp;#xA;                    const answer = await updatedPeer.createAnswer();&amp;#xA;                    await updatedPeer.setLocalDescription(answer);&amp;#xA;&amp;#xA;                    setPeer(updatedPeer);&amp;#xA;                };&amp;#xA;&amp;#xA;                socket.onmessage = (ev) =&gt; {&amp;#xA;                    const data = JSON.parse(ev.data);&amp;#xA;                    if (data.type === &quot;offer&quot;) {&amp;#xA;                        handleOffer(data);&amp;#xA;                    } else if (data.type === &quot;resetFFMPEG&quot;) {&amp;#xA;                        // Handle the resetFFMPEG message&amp;#xA;                        console.log(&quot;FFmpeg reset requested&quot;);&amp;#xA;                    }&amp;#xA;                };&amp;#xA;            })();&amp;#xA;        }, []); // Added socket as a dependency to the useEffect hook&amp;#xA;&amp;#xA;        const sendRequestToResetFFmpeg = () =&gt; {&amp;#xA;            socket.send(&quot;resetFFMPEG&quot;);&amp;#xA;        };&amp;#xA;&amp;#xA;        return (&amp;#xA;            &lt;div&gt;&amp;#xA;                Video: &amp;#xA;                &lt;video ref={videoRef} autoPlay muted /&gt;&amp;#xA;                &lt;button onClick={() =&gt; sendRequestToResetFFmpeg()}&gt;Reset FFMPEG&lt;/button&gt;&amp;#xA;            &lt;/div&gt;&amp;#xA;        );&amp;#xA;    };&amp;#xA;&amp;#xA;    ReactDOM.render(&lt;App /&gt;, document.getElementById(&quot;app1&quot;));&amp;#xA;&lt;/script&gt;&#xA;&#xA;&#xA;

    &#xA;