
Recherche avancée
Autres articles (59)
-
List of compatible distributions
26 avril 2011, parThe table below is the list of Linux distributions compatible with the automated installation script of MediaSPIP. Distribution nameVersion nameVersion number Debian Squeeze 6.x.x Debian Weezy 7.x.x Debian Jessie 8.x.x Ubuntu The Precise Pangolin 12.04 LTS Ubuntu The Trusty Tahr 14.04
If you want to help us improve this list, you can provide us access to a machine whose distribution is not mentioned above or send the necessary fixes to add (...) -
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir -
Submit enhancements and plugins
13 avril 2011If you have developed a new extension to add one or more useful features to MediaSPIP, let us know and its integration into the core MedisSPIP functionality will be considered.
You can use the development discussion list to request for help with creating a plugin. As MediaSPIP is based on SPIP - or you can use the SPIP discussion list SPIP-Zone.
Sur d’autres sites (7769)
-
ffmpeg pipe process ends right after writing first buffer data to input stream and does not keep running
6 mai, par Taketo MatsunagaI have been trying to convert 16bit PCM (s16le) audio data to webm using ffmpeg in C#.
But the process ends right after the writing the first buffer data to standard input.
I has exited with the status 0, meaning success. But do not know why....
Could anyone tell me why ?


I apprecite it if you could support me.


public class SpeechService : ISpeechService
 {
 
 /// <summary>
 /// Defines the _audioInputStream
 /// </summary>
 private readonly MemoryStream _audioInputStream = new MemoryStream();

 public async Task SendPcmAsWebmViaWebSocketAsync(
 MemoryStream pcmAudioStream,
 int sampleRate,
 int channels) 
 {
 string inputFormat = "s16le";

 var ffmpegProcessInfo = new ProcessStartInfo
 {
 FileName = _ffmpegPath,
 Arguments =
 $"-f {inputFormat} -ar {sampleRate} -ac {channels} -i pipe:0 " +
 $"-f webm pipe:1",
 RedirectStandardInput = true,
 RedirectStandardOutput = true,
 RedirectStandardError = true,
 UseShellExecute = false,
 CreateNoWindow = true,
 };

 _ffmpegProcess = new Process { StartInfo = ffmpegProcessInfo };

 Console.WriteLine("Starting FFmpeg process...");
 try
 {

 if (!await Task.Run(() => _ffmpegProcess.Start()))
 {
 Console.Error.WriteLine("Failed to start FFmpeg process.");
 return;
 }
 Console.WriteLine("FFmpeg process started.");

 }
 catch (Exception ex)
 {
 Console.Error.WriteLine($"Error starting FFmpeg process: {ex.Message}");
 throw;
 }

 var encodeAndSendTask = Task.Run(async () =>
 {
 try
 {
 using var ffmpegOutputStream = _ffmpegProcess.StandardOutput.BaseStream;
 byte[] buffer = new byte[8192]; // Temporary buffer to read data
 byte[] sendBuffer = new byte[8192]; // Buffer to accumulate data for sending
 int sendBufferIndex = 0; // Tracks the current size of sendBuffer
 int bytesRead;

 Console.WriteLine("Reading WebM output from FFmpeg and sending via WebSocket...");
 while (true)
 {
 if ((bytesRead = await ffmpegOutputStream.ReadAsync(buffer, 0, buffer.Length)) > 0)
 {
 // Copy data to sendBuffer
 Array.Copy(buffer, 0, sendBuffer, sendBufferIndex, bytesRead);
 sendBufferIndex += bytesRead;

 // If sendBuffer is full, send it via WebSocket
 if (sendBufferIndex >= sendBuffer.Length)
 {
 var segment = new ArraySegment<byte>(sendBuffer, 0, sendBuffer.Length);
 _ws.SendMessage(segment);
 sendBufferIndex = 0; // Reset the index after sending
 }
 }
 }
 }
 catch (OperationCanceledException)
 {
 Console.WriteLine("Encode/Send operation cancelled.");
 }
 catch (IOException ex) when (ex.InnerException is ObjectDisposedException)
 {
 Console.WriteLine("Stream was closed, likely due to process exit or cancellation.");
 }
 catch (Exception ex)
 {
 Console.Error.WriteLine($"Error during encoding/sending: {ex}");
 }
 });

 var errorReadTask = Task.Run(async () =>
 {
 Console.WriteLine("Starting to read FFmpeg stderr...");
 using var errorReader = _ffmpegProcess.StandardError;
 try
 {
 string? line;
 while ((line = await errorReader.ReadLineAsync()) != null) 
 {
 Console.WriteLine($"[FFmpeg stderr] {line}");
 }
 }
 catch (OperationCanceledException) { Console.WriteLine("FFmpeg stderr reading cancelled."); }
 catch (TimeoutException) { Console.WriteLine("FFmpeg stderr reading timed out (due to cancellation)."); }
 catch (Exception ex) { Console.Error.WriteLine($"Error reading FFmpeg stderr: {ex.Message}"); }
 Console.WriteLine("Finished reading FFmpeg stderr.");
 });

 }

 public async Task AppendAudioBuffer(AudioMediaBuffer audioBuffer)
 {
 try
 {
 // audio for a 1:1 call
 var bufferLength = audioBuffer.Length;
 if (bufferLength > 0)
 {
 var buffer = new byte[bufferLength];
 Marshal.Copy(audioBuffer.Data, buffer, 0, (int)bufferLength);

 _logger.Info("_ffmpegProcess.HasExited:" + _ffmpegProcess.HasExited);
 using var ffmpegInputStream = _ffmpegProcess.StandardInput.BaseStream;
 await ffmpegInputStream.WriteAsync(buffer, 0, buffer.Length);
 await ffmpegInputStream.FlushAsync(); // バッファをフラッシュ
 _logger.Info("Wrote buffer data.");

 }
 }
 catch (Exception e)
 {
 _logger.Error(e, "Exception happend writing to input stream");
 }
 }

</byte>


Starting FFmpeg process...
FFmpeg process started.
Starting to read FFmpeg stderr...
Reading WebM output from FFmpeg and sending via WebSocket...
[FFmpeg stderr] ffmpeg version 7.1.1-essentials_build-www.gyan.dev Copyright (c) 2000-2025 the FFmpeg developers
[FFmpeg stderr] built with gcc 14.2.0 (Rev1, Built by MSYS2 project)
[FFmpeg stderr] configuration: --enable-gpl --enable-version3 --enable-static --disable-w32threads --disable-autodetect --enable-fontconfig --enable-iconv --enable-gnutls --enable-libxml2 --enable-gmp --enable-bzlib --enable-lzma --enable-zlib --enable-libsrt --enable-libssh --enable-libzmq --enable-avisynth --enable-sdl2 --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxvid --enable-libaom --enable-libopenjpeg --enable-libvpx --enable-mediafoundation --enable-libass --enable-libfreetype --enable-libfribidi --enable-libharfbuzz --enable-libvidstab --enable-libvmaf --enable-libzimg --enable-amf --enable-cuda-llvm --enable-cuvid --enable-dxva2 --enable-d3d11va --enable-d3d12va --enable-ffnvcodec --enable-libvpl --enable-nvdec --enable-nvenc --enable-vaapi --enable-libgme --enable-libopenmpt --enable-libopencore-amrwb --enable-libmp3lame --enable-libtheora --enable-libvo-amrwbenc --enable-libgsm --enable-libopencore-amrnb --enable-libopus --enable-libspeex --enable-libvorbis --enable-librubberband
[FFmpeg stderr] libavutil 59. 39.100 / 59. 39.100
[FFmpeg stderr] libavcodec 61. 19.101 / 61. 19.101
[FFmpeg stderr] libavformat 61. 7.100 / 61. 7.100
[FFmpeg stderr] libavdevice 61. 3.100 / 61. 3.100
[FFmpeg stderr] libavfilter 10. 4.100 / 10. 4.100
[FFmpeg stderr] libswscale 8. 3.100 / 8. 3.100
[FFmpeg stderr] libswresample 5. 3.100 / 5. 3.100
[FFmpeg stderr] libpostproc 58. 3.100 / 58. 3.100

[2025-05-06 15:44:43,598][INFO][XbLogger.cs:85] _ffmpegProcess.HasExited:False
[2025-05-06 15:44:43,613][INFO][XbLogger.cs:85] Wrote buffer data.
[2025-05-06 15:44:43,613][INFO][XbLogger.cs:85] Wrote buffer data.
[FFmpeg stderr] [aist#0:0/pcm_s16le @ 0000025ec8d36040] Guessed Channel Layout: mono
[FFmpeg stderr] Input #0, s16le, from 'pipe:0':
[FFmpeg stderr] Duration: N/A, bitrate: 256 kb/s
[FFmpeg stderr] Stream #0:0: Audio: pcm_s16le, 16000 Hz, mono, s16, 256 kb/s
[FFmpeg stderr] Stream mapping:
[FFmpeg stderr] Stream #0:0 -> #0:0 (pcm_s16le (native) -> opus (libopus))
[FFmpeg stderr] [libopus @ 0000025ec8d317c0] No bit rate set. Defaulting to 64000 bps.
[FFmpeg stderr] Output #0, webm, to 'pipe:1':
[FFmpeg stderr] Metadata:
[FFmpeg stderr] encoder : Lavf61.7.100
[FFmpeg stderr] Stream #0:0: Audio: opus, 16000 Hz, mono, s16, 64 kb/s
[FFmpeg stderr] Metadata:
[FFmpeg stderr] encoder : Lavc61.19.101 libopus
[FFmpeg stderr] [out#0/webm @ 0000025ec8d36200] video:0KiB audio:1KiB subtitle:0KiB other streams:0KiB global headers:0KiB muxing overhead: 67.493113%
[FFmpeg stderr] size= 1KiB time=00:00:00.04 bitrate= 243.2kbits/s speed=2.81x
Finished reading FFmpeg stderr.
[2025-05-06 15:44:44,101][INFO][XbLogger.cs:85] _ffmpegProcess.HasExited:True
[2025-05-06 15:44:44,132][ERROR][XbLogger.cs:67] Exception happend writing to input stream
System.ObjectDisposedException: Cannot access a closed file.
 at System.IO.FileStream.WriteAsync(Byte[] buffer, Int32 offset, Int32 count, CancellationToken cancellationToken)
 at System.IO.Stream.WriteAsync(Byte[] buffer, Int32 offset, Int32 count)
 at EchoBot.Media.SpeechService.AppendAudioBuffer(AudioMediaBuffer audioBuffer) in C:\Users\tm068\Documents\workspace\myprj\xbridge-teams-bot\src\EchoBot\Media\SpeechService.cs:line 242



I am expecting the ffmpeg process keep running.


-
When adding a new module to nginx, an error occurred in the configuration assembly
16 octobre 2019, par AmbasadorOn Centos 6, I am rebuilding nginx to add the module "nginx-rtmp-module-master"
./configure --prefix=/etc/nginx --sbin-path=/usr/sbin/nginx
--conf-path=/etc/nginx/nginx.conf
--error-log-path=/var/log/nginx/error.log
--http-log-path=/var/log/nginx/access.log --pid-path=/var/run/nginx.pid
--lock-path=/var/run/nginx.lock
--http-client-body-temp-path=/var/cache/nginx/client_temp
--http-proxy-temp-path=/var/cache/nginx/proxy_temp
--http-fastcgi-temp-path=/var/cache/nginx/fastcgi_temp
--http-uwsgi-temp-path=/var/cache/nginx/uwsgi_temp
--http-scgi-temp-path=/var/cache/nginx/scgi_temp --user=nginx
--group=nginx
--with-openssl=/builddir/build/BUILD/bx-nginx-1.16.1/openssl-1.1.1c
--with-openssl-opt=enable-tls1_3 --with-http_ssl_module
--with-http_realip_module --with-http_addition_module
--with-http_sub_module --with-http_dav_module --with-http_flv_module
--with-http_mp4_module --with-http_gunzip_module
--with-http_gzip_static_module --with-http_random_index_module
--with-http_secure_link_module --with-http_stub_status_module
--with-http_auth_request_module --with-http_v2_module --with-mail
--with-mail_ssl_module --with-file-aio --with-ipv6
--add-module=../nginx-rtmp-module-masterAfter the configuration is completed in the console, the following :
....
configuring additional modules
adding module in ../nginx-rtmp-module-master
+ ngx_rtmp_module was configured
checking for PCRE library ... found
checking for PCRE JIT support ... not found
checking for zlib library ... found
creating objs/Makefile
Configuration summary
+ using system PCRE library
+ using OpenSSL library:
/builddir/build/BUILD/bx-nginx-1.16.1/openssl-1.1.1c
+ using system zlib library
nginx path prefix: "/etc/nginx"
nginx binary file: "/usr/sbin/nginx"
nginx modules path: "/etc/nginx/modules"
nginx configuration prefix: "/etc/nginx"
nginx configuration file: "/etc/nginx/nginx.conf"
nginx pid file: "/var/run/nginx.pid"
nginx error log file: "/var/log/nginx/error.log"
nginx http access log file: "/var/log/nginx/access.log"
nginx http client request body temporary files:
"/var/cache/nginx/client_temp"
nginx http proxy temporary files: "/var/cache/nginx/proxy_temp"
nginx http fastcgi temporary files: "/var/cache/nginx/fastcgi_temp"
nginx http uwsgi temporary files: "/var/cache/nginx/uwsgi_temp"
nginx http scgi temporary files: "/var/cache/nginx/scgi_temp"
./configure: warning: the "--with-ipv6" option is deprecatedAfter trying to build using the
make
command, I get an error :[root@216501 nginx-1.16.1]# make
make -f objs/Makefile
make[1]: Entering directory `/root/nginx-1.16.1'
cd /builddir/build/BUILD/bx-nginx-1.16.1/openssl-1.1.1c \
&& if [ -f Makefile ]; then make clean; fi \
&& ./config
--prefix=/builddir/build/BUILD/bx-nginx-1.16.1/openssl-1.1.1c/.openssl
no-shared no-threads enable-tls1_3 \
&& make \
&& make install_sw LIBDIR=lib
/bin/sh: line 0: cd:
/builddir/build/BUILD/bx-nginx-1.16.1/openssl-1.1.1c: No such file or
directory
make[1]: ***
[/builddir/build/BUILD/bx-nginx-1.16.1/openssl-1.1.1c/.openssl/include/openssl/ssl.h]
Error 1
make[1]: Leaving directory `/root/nginx-1.16.1'
make: *** [build] Error 2How to fix the error to correctly install the module I need ?
-
Tensorflow Video Parsing : ValueError : Convert None with Unsupported Type of class 'NoneType
17 février 2024, par John SiddarthI am trying to learn how to use classifications on video data. I try to run an example from Tensorflow.


I installed related tools using this command :


# The way this tutorial uses the `TimeDistributed` layer requires TF>=2.10
pip install -U "tensorflow>=2.10.0"

pip install remotezip tqdm opencv-python
pip install -q git+https://github.com/tensorflow/docs



Download the video file by
curl -O https://upload.wikimedia.org/wikipedia/commons/8/86/End_of_a_jam.ogv
.

The code :


import tqdm
import random
import pathlib
import itertools
import collections

import os
import cv2
import numpy as np
import remotezip as rz

import tensorflow as tf

# Some modules to display an animation using imageio.
import imageio
from IPython import display
from urllib import request
from tensorflow_docs.vis import embed

def format_frames(frame, output_size):
 """
 Pad and resize an image from a video.

 Args:
 frame: Image that needs to resized and padded. 
 output_size: Pixel size of the output frame image.

 Return:
 Formatted frame with padding of specified output size.
 """
 frame = tf.image.convert_image_dtype(frame, tf.float32)
 frame = tf.image.resize_with_pad(frame, *output_size)
 return frame

def frames_from_video_file(video_path, n_frames, output_size = (224,224), frame_step = 15):
 """
 Creates frames from each video file present for each category.

 Args:
 video_path: File path to the video.
 n_frames: Number of frames to be created per video file.
 output_size: Pixel size of the output frame image.

 Return:
 An NumPy array of frames in the shape of (n_frames, height, width, channels).
 """
 # Read each video frame by frame
 result = []
 src = cv2.VideoCapture(str(video_path)) 

 video_length = src.get(cv2.CAP_PROP_FRAME_COUNT)

 need_length = 1 + (n_frames - 1) * frame_step

 if need_length > video_length:
 start = 0
 else:
 max_start = video_length - need_length
 start = random.randint(0, max_start + 1)

 src.set(cv2.CAP_PROP_POS_FRAMES, start)
 # ret is a boolean indicating whether read was successful, frame is the image itself
 ret, frame = src.read()
 result.append(format_frames(frame, output_size))

 for _ in range(n_frames - 1):
 for _ in range(frame_step):
 ret, frame = src.read()
 if ret:
 frame = format_frames(frame, output_size)
 result.append(frame)
 else:
 result.append(np.zeros_like(result[0]))
 src.release()
 result = np.array(result)[..., [2, 1, 0]]

 return result

video_path = "End_of_a_jam.ogv"

sample_video = frames_from_video_file(video_path, n_frames = 10)
sample_video.shape



Here are the summary of the troubleshooting I did :


- 

-
Check File Path : I verified that the file path to the video was correct and accessible from the Python environment.


-
Verify File Permissions : I ensured that the video file had the necessary permissions to be read by the Python code.


-
Test with Absolute Path : I attempted to use an absolute file path to access the video file to eliminate any ambiguity in the file's location.


-
Check File Format and Encoding : I examined the video file to ensure it was in a supported format and encoded properly for reading by OpenCV.












What could be a cause ?


-