Recherche avancée

Médias (91)

Autres articles (97)

  • ANNEXE : Les plugins utilisés spécifiquement pour la ferme

    5 mars 2010, par

    Le site central/maître de la ferme a besoin d’utiliser plusieurs plugins supplémentaires vis à vis des canaux pour son bon fonctionnement. le plugin Gestion de la mutualisation ; le plugin inscription3 pour gérer les inscriptions et les demandes de création d’instance de mutualisation dès l’inscription des utilisateurs ; le plugin verifier qui fournit une API de vérification des champs (utilisé par inscription3) ; le plugin champs extras v2 nécessité par inscription3 (...)

  • L’agrémenter visuellement

    10 avril 2011

    MediaSPIP est basé sur un système de thèmes et de squelettes. Les squelettes définissent le placement des informations dans la page, définissant un usage spécifique de la plateforme, et les thèmes l’habillage graphique général.
    Chacun peut proposer un nouveau thème graphique ou un squelette et le mettre à disposition de la communauté.

  • Les autorisations surchargées par les plugins

    27 avril 2010, par

    Mediaspip core
    autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs

Sur d’autres sites (8105)

  • Main process is held by ffmpeg command

    6 octobre 2024, par Michael Lopez

    I created a python program for handling my Arlo Camera. To do that I have been using the pyaarlo library (https://github.com/twrecked/pyaarlo) to catch camera's events.
The goal is to monitor if there is an active stream on cameras, get the RTSP stream url and reStream it to a HLS playlist for local usage.

    


    Here the python code :

    


    import asyncio
from decouple import config
import logging
from my_pyaarlo import PyArlo
import urllib.parse
from queue import Queue
import signal

# Read config from ENV (unchanged)
ARLO_USER = config('ARLO_USER')
ARLO_PASS = config('ARLO_PASS')
IMAP_HOST = config('IMAP_HOST')
IMAP_USER = config('IMAP_USER')
IMAP_PASS = config('IMAP_PASS')
DEBUG = config('DEBUG', default=False, cast=bool)
PYAARLO_BACKEND = config('PYAARLO_BACKEND', default=None)
PYAARLO_REFRESH_DEVICES = config('PYAARLO_REFRESH_DEVICES', default=0, cast=int)
PYAARLO_STREAM_TIMEOUT = config('PYAARLO_STREAM_TIMEOUT', default=0, cast=int)
PYAARLO_STORAGE_DIR = config('PYAARLO_STORAGE_DIR', default=None)
PYAARLO_ECDH_CURVE = config('PYAARLO_ECDH_CURVE', default=None)

# Initialize logging
logging.basicConfig(
    level=logging.DEBUG if DEBUG else logging.INFO,
    format='%(asctime)s [%(levelname)s] %(name)s: %(message)s'
)
logger = logging.getLogger(__name__)

ffmpeg_processes = {}
event_queue = Queue()
shutdown_event = asyncio.Event()

async def handle_idle_event(camera):
    logger.info(f"Idle event detected for camera: {camera.name}")
    await stop_ffmpeg_stream(camera.name)

async def get_stream_url(camera):
    try:
        # Attempt to get the stream URL
        stream_url = await asyncio.to_thread(camera.get_stream()
        if stream_url:
            return stream_url
        else:
            logger.warning(f"Unable to get stream URL for {camera.name}. Stream might not be active.")
            return None
    except Exception as e:
        logger.error(f"Error getting stream URL for {camera.name}: {e}")
        return None

async def handle_user_stream_active_event(camera):
    logger.info(f"User stream active event detected for camera: {camera.name}")

    # Get the stream URL
    stream_url = await get_stream_url(camera)
    if stream_url:
        logger.info(f"Stream URL for {camera.name}: {stream_url}")
        await start_ffmpeg_stream(camera.name, stream_url)
    else:
        logger.warning(f"No stream URL available for {camera.name}")

async def event_handler(device, attr, value):
    logger.debug(f"Event: {device.name}, Attribute: {attr}, Value: {value}")
    if attr == 'activityState':
        if value == 'idle':
            await handle_idle_event(device)
        elif value in ['userStreamActive']:
            await handle_user_stream_active_event(device)
    elif attr == 'mediaUploadNotification':
        logger.info(f"Media uploaded for camera: {device.name}")

def sync_event_handler(device, attr, value):
    # This function will be called by PyArlo's synchronous callbacks
    event_queue.put((device, attr, value))

async def process_event_queue():
    while not shutdown_event.is_set():
        try:
            if not event_queue.empty():
                device, attr, value = event_queue.get()
                await event_handler(device, attr, value)
            await asyncio.sleep(0.1)  # Small delay to prevent busy-waiting
        except asyncio.CancelledError:
            break
        except Exception as e:
            logger.error(f"Error processing event: {e}")

async def display_status(arlo):
    while not shutdown_event.is_set():
        print("\n--- Camera Statuses ---")
        for camera in arlo.cameras:
            print(f"{camera.name}: {camera.state}")
        print("------------------------")
        await asyncio.sleep(5)

async def start_ffmpeg_stream(camera_name, stream_url):
    if camera_name not in ffmpeg_processes:
        output_hls = f"/tmp/{camera_name}.m3u8"

        try:
            new_url = urllib.parse.quote(stream_url.encode(), safe=':/?&=')
            logger.info(f"NEW_URL: {new_url}")

            ffmpeg_cmd = [
                "ffmpeg", "-hide_banner", "-loglevel", "quiet", "-nostats", "-nostdin", "-y", "-re",
                "-i", new_url,
                "-c:v", "libx264", "-preset", "veryfast",
                "-an", "-sn",
                "-f", "hls", "-hls_time", "4", "-hls_list_size", "10",
                "-hls_flags", "delete_segments", output_hls,
            ]
            logger.info(f"Starting FFmpeg command: {ffmpeg_cmd}")
            
            process = await asyncio.create_subprocess_exec(
                *ffmpeg_cmd,
                stdout=asyncio.subprocess.DEVNULL,
                stderr=asyncio.subprocess.DEVNULL
            )
            ffmpeg_processes[camera_name] = process
            logger.info(f"Started ffmpeg process with PID: {process.pid}")

        except Exception as e:
            logger.error(f"Error starting FFmpeg for {camera_name}: {e}")

async def stop_ffmpeg_stream(camera_name):
    logger.info(f"Stopping ffmpeg process for {camera_name}")
    ffmpeg_process = ffmpeg_processes.pop(camera_name, None)
    if ffmpeg_process:
        ffmpeg_process.terminate()

        try:
            await ffmpeg_process.wait()
            logger.info(f"{camera_name} stopped successfully")
        except Exception as e:
            print(f"FFMPEG Process didn't stop in time, forcefully terminating: {e}")
            ffmpeg_process.kill()
    else:
        logger.info(f"FFmpeg process for {camera_name} already stopped")

async def shutdown(signal, loop):
    logger.info(f"Received exit signal {signal.name}...")
    shutdown_event.set()
    tasks = [t for t in asyncio.all_tasks() if t is not asyncio.current_task()]
    [task.cancel() for task in tasks]
    logger.info(f"Cancelling {len(tasks)} outstanding tasks")
    await asyncio.gather(*tasks, return_exceptions=True)
    loop.stop()

async def main():
    # Initialize PyArlo
    arlo_args = {
        'username': ARLO_USER,
        'password': ARLO_PASS,
        'tfa_source': 'imap',
        'tfa_type': 'email',
        'tfa_host': IMAP_HOST,
        'tfa_username': IMAP_USER,
        'tfa_password': IMAP_PASS,
        'save_session': True,
        'verbose_debug': DEBUG
    }

    # Add optional arguments
    for arg, value in [
        ('refresh_devices_every', PYAARLO_REFRESH_DEVICES),
        ('stream_timeout', PYAARLO_STREAM_TIMEOUT),
        ('backend', PYAARLO_BACKEND),
        ('storage_dir', PYAARLO_STORAGE_DIR),
        ('ecdh_curve', PYAARLO_ECDH_CURVE)
    ]:
        if value:
            arlo_args[arg] = value
    
    try:
        arlo = await asyncio.to_thread(PyArlo, **arlo_args)
    except Exception as e:
        logger.error(f"Failed to initialize PyArlo: {e}")
        return

    logger.info("Connected to Arlo. Monitoring events...")

    # Register event handlers for each camera
    for camera in arlo.cameras:
        camera.add_attr_callback('*', sync_event_handler)

    # Start the status display task
    status_task = asyncio.create_task(display_status(arlo))

    # Start the event processing task
    event_processing_task = asyncio.create_task(process_event_queue())

    # Set up signal handlers
    loop = asyncio.get_running_loop()
    for s in (signal.SIGHUP, signal.SIGTERM, signal.SIGINT):
        loop.add_signal_handler(
            s, lambda s=s: asyncio.create_task(shutdown(s, loop)))

    try:
        # Keep the main coroutine running
        while not shutdown_event.is_set():
            try:
                await asyncio.sleep(1)
            except asyncio.CancelledError:
                break
    except Exception as e:
        logger.error(f"Unexpected error in main loop: {e}")
    finally:
        logger.info("Shutting down...")
        for camera_name in list(ffmpeg_processes.keys()):
            await stop_ffmpeg_stream(camera_name)
        
        # Cancel and wait for all tasks
        tasks = [status_task, event_processing_task]
        for task in tasks:
            if not task.done():
                task.cancel()
        await asyncio.gather(*tasks, return_exceptions=True)
        
        logger.info("Program terminated.")

if __name__ == "__main__":
    try:
        asyncio.run(main())
    except KeyboardInterrupt:
        logger.info("Keyboard interrupt received. Exiting.")
    except Exception as e:
        logger.error(f"Unhandled exception: {e}")
    finally:
        logger.info("Program exit complete.")


    


    My issue is about the ffmpeg command which is hold the main process (or the event loop) when it runs, blocking the events coming from the pyaarlo library. The state of the camera continues to work with the good information.

    


    I tried lot of things, without asyncio, with multiprocessing, with subprocess, ... the behavior is always the same. In some cases, I received the idle event after the key interrupt.

    


    Another information :

    


      

    • When I stop the active stream, the event is not received but when I start the stream just after, that event is received.
    • 


    • When I run the same ffmpeg command but with a local long video file, everything is Ok. So, it why I guess that the ffmpeg command is impacting the main process.
    • 


    


    I succedeed in running the ffmpeg command with rtsp url stream but without a loop event monitoring :

    


    import asyncio
import signal
import sys
import os

async def run_infinite_command():
    # Start a simple HTTP server as our "infinite" command
    url = "rstp://localhost:8554/camera1/stream" # it is a fake url
    ffmpeg_cmd = [
        "ffmpeg", "-re", "-i", url,
        "-c:v", "libx264", "-preset", "veryfast",
        "-c:a", "copy",
        "-f", "hls", "-hls_time", "4", "-hls_list_size", "10",
        "-hls_flags", "delete_segments", "/tmp/output.m3u8"
    ]
    
    process = await asyncio.create_subprocess_exec(
        *ffmpeg_cmd,
        stdout=asyncio.subprocess.DEVNULL,
        stderr=asyncio.subprocess.DEVNULL
    )
    
    print(f"Started HTTP server with PID: {process.pid}")
    return process

async def main():
    # Start the infinite command
    process = await run_infinite_command()

    # Run the main loop for a few seconds
    for i in range(10):
        print(f"Main loop iteration {i+1}")
        await asyncio.sleep(1)

    # Stop the infinite command
    print("Stopping the HTTP server...")
    if sys.platform == "win32":
        # On Windows, we need to use CTRL_C_EVENT
        os.kill(process.pid, signal.CTRL_C_EVENT)
    else:
        # On Unix-like systems, we can use SIGTERM
        process.send_signal(signal.SIGTERM)

    # Wait for the process to finish
    try:
        await asyncio.wait_for(process.wait(), timeout=5.0)
        print("HTTP server stopped successfully")
    except asyncio.TimeoutError:
        print("HTTP server didn't stop in time, forcefully terminating")
        process.kill()

    print("Program finished")

if __name__ == "__main__":
    asyncio.run(main())


    


    With this script, the ffmpeg command is correctly launched and terminated after the for loop.

    


    Could you help ?

    


  • FFmpeg what is the correct way to manually write silence through pipe:0 ?

    19 juillet 2023, par Bohdan Petrenko

    I have an ffmpeg process running with this parameters :

    


    ffmpeg -y -f s16le -ac {Channels} -ar 48000 -re -use_wallclock_as_timestamps true -i pipe:0 -f segment -segment_time {_segmentSize} -segment_list \"{_segmentListPath}\" -segment_format mp3 -segment_wrap 2 -reset_timestamps 0 -af aresample=async=1 \"{_filePath}\"


    


    I also have a DateTimeOffsetwhich represents the time when the recording was started. When an FFMpeg process is created, I need to add some some amount of silence that equals to the delay between current time and when the recording was started. This delay may be bigger than ffmpeg segments, so I calculate it relatively to the time when last ffmpeg segment should begin.
I store silence in a static byte array with length of two ffmpeg segments :

    


    _silenceBuffer ??= new byte[_segmentSize * 2 * Channels * SampleRate * 2];


    


    I tried two ways of writing silence :

    


    First code I tried is this :

    


    var delay = DateTimeOffset.UtcNow - RecordingStartDateTime;

var time = CalculateRelativeMilliseconds(delay.TotalMilliseconds); // this returns time based on current segment. It works fine.

var amount = (int)(time * 2 * Channels * SampleRate / 1000);

WriterStream.Write(_silenceBuffer, 0, amount);


    


    As the result, I have a very loud noise everywhere in output from ffmpeg. It brokes audio, so this way doesn't work for me.

    


    Second code I tried is this :

    


    var delay = DateTimeOffset.UtcNow - RecordingStartDateTime;

var time = CalculateRelativeMilliseconds(delay.TotalMilliseconds); // this returns time based on current segment. It works fine.

var amount = (int)time * 2 * Channels * SampleRate / 1000;

WriterStream.Write(_silenceBuffer, 0, amount);


    


    Difference between first and second code is that now I cast only time to int type, not the result of the whole expression. But it also doesn't work. This time at the beginning I have no silence I wrote, the recording begins with voice data I piped after writing silence. But if I use this ffmpeg command :

    


    ffmpeg -y -f s16le -ac {Channels} -ar 48000 -i pipe:0 -f segment -segment_time {_segmentSize} -segment_list \"{_segmentListPath}\" -segment_format mp3 -segment_wrap 2 -reset_timestamps 0 \"{_filePath}\"


    


    Then it works as expected. Recording begins with silence what I need, and then goes voice data I piped.

    


    So, how can I manually calculate and write silence to my ffmpeg instance ? Is there some universal way of writing and calculating silence that will work with any ffmpeg command ? I don`t want to use filters and other ffmpeg instances for offsetting piped voice data, because I do it only once per session. I think that I can write silence with byte arrays. I look forward to any suggestions.

    


  • avformat_seek_file timestamps not using the correct time base

    19 juin 2021, par Charlie

    I am in the process of creating a memory loader for ffmpeg to add more functionality. I have audio playing and working, but am having an issue with avformat_seek_file timestamps using the wrong format.

    


    avformat.avformat_seek_file(file.context, -1, 0, timestamp, timestamp, 0)


    


    From looking at the docs it says if the stream index is -1 that the time should be based on AV_TIME_BASE. When I load the file through avformat_open_input with a null AVFormatContext and a filename, this works as expected.

    


    However when I create my own AVIOContext and AVFormatContext through avio_alloc_context and avformat_alloc_context respectively, the timestamps are no longer based on AV_TIME_BASE. When testing I received an access violation when I first tried seeking, and upon investigating, it seems that the timestamps are based on actual seconds now. How can I make these custom contexts time based on AV_TIME_BASE ?

    


    The only difference between the two are the custom loading of AVIOContext and AVFormatContext :

    


        data = fileobject.read()

    ld = len(data)

    buf = libavutil.avutil.av_malloc(ld)
    ptr_buf = cast(buf, c_char_p)

    ptr = ctypes.create_string_buffer(ld)
    memmove(ptr, data, ld)

    seeker = libavformat.ffmpeg_seek_func(seek_data)
    reader = libavformat.ffmpeg_read_func(read_data)
    writer = libavformat.ffmpeg_read_func(write_data)

    format = libavformat.avformat.avio_alloc_context(ptr_buf, buf_size, 0,
                                                     ptr_data,
                                                     reader,
                                                     writer,
                                                     seeker
                                                     )

    file.context = libavformat.avformat.avformat_alloc_context()
    file.context.contents.pb = format
    file.context.contents.flags |= AVFMT_FLAG_CUSTOM_IO

    result = avformat.avformat_open_input(byref(file.context),
                                          b"",
                                          None,
                                          None)

    if result != 0:
        raise FFmpegException('avformat_open_input in ffmpeg_open_filename returned an error opening file '
                              + filename.decode("utf8")
                              + ' Error code: ' + str(result))

    result = avformat.avformat_find_stream_info(file.context, None)
    if result < 0:
        raise FFmpegException('Could not find stream info')

    return file



    


    Here is the filename code that does work :

    


        result = avformat.avformat_open_input(byref(file.context),
                                          filename,
                                          None,
                                          None)
    if result != 0:
        raise FFmpegException('avformat_open_input in ffmpeg_open_filename returned an error opening file '
                              + filename.decode("utf8")
                              + ' Error code: ' + str(result))

    result = avformat.avformat_find_stream_info(file.context, None)
    if result < 0:
        raise FFmpegException('Could not find stream info')

    return file


    


    I am new to ffmpeg, but any help fixing this discrepancy is greatly appreciated.