
Recherche avancée
Médias (91)
-
Spitfire Parade - Crisis
15 mai 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
Wired NextMusic
14 mai 2011, par
Mis à jour : Février 2012
Langue : English
Type : Video
-
Video d’abeille en portrait
14 mai 2011, par
Mis à jour : Février 2012
Langue : français
Type : Video
-
Sintel MP4 Surround 5.1 Full
13 mai 2011, par
Mis à jour : Février 2012
Langue : English
Type : Video
-
Carte de Schillerkiez
13 mai 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Texte
-
Publier une image simplement
13 avril 2011, par ,
Mis à jour : Février 2012
Langue : français
Type : Video
Autres articles (78)
-
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir -
Demande de création d’un canal
12 mars 2010, parEn fonction de la configuration de la plateforme, l’utilisateur peu avoir à sa disposition deux méthodes différentes de demande de création de canal. La première est au moment de son inscription, la seconde, après son inscription en remplissant un formulaire de demande.
Les deux manières demandent les mêmes choses fonctionnent à peu près de la même manière, le futur utilisateur doit remplir une série de champ de formulaire permettant tout d’abord aux administrateurs d’avoir des informations quant à (...) -
Diogene : création de masques spécifiques de formulaires d’édition de contenus
26 octobre 2010, parDiogene est un des plugins ? SPIP activé par défaut (extension) lors de l’initialisation de MediaSPIP.
A quoi sert ce plugin
Création de masques de formulaires
Le plugin Diogène permet de créer des masques de formulaires spécifiques par secteur sur les trois objets spécifiques SPIP que sont : les articles ; les rubriques ; les sites
Il permet ainsi de définir en fonction d’un secteur particulier, un masque de formulaire par objet, ajoutant ou enlevant ainsi des champs afin de rendre le formulaire (...)
Sur d’autres sites (7668)
-
How to get .mp4 videos from motion on a Raspberry Pi ?
3 novembre 2017, par MaartiI use motion on my laptop and it works perfectly in any format. But when I use it on my Raspberry Pi 3 (Raspbian Jessie) with the Raspberry Camera V2, the only formats that work are :
.avi
and.swf
.When I choose any other format, the output video is a "0 sec video" that is played and closed instantly.
I would like to have
.mp4
or.ogg
output so I can read it easily with HTML5.Here is the motion codec documentation.
Here is my config file :
############################################################
# Daemon
############################################################
# Start in daemon (background) mode and release terminal (default: off)
daemon on
# File to store the process ID, also called pid file. (default: not defined)
process_id_file /var/run/motion/motion.pid
############################################################
# Basic Setup Mode
############################################################
# Start in Setup-Mode, daemon disabled. (default: off)
setup_mode off
# Use a file to save logs messages, if not defined stderr and syslog is used. (default: not defined)
#logfile /mnt/camshare/Cam1/motion.log
logfile /tmp/motion.log
# Level of log messages [1..9] (EMR, ALR, CRT, ERR, WRN, NTC, INF, DBG, ALL). (default: 6 / NTC)
log_level 2
# Filter to log messages by type (COR, STR, ENC, NET, DBL, EVT, TRK, VID, ALL). (default: ALL)
log_type all
###########################################################
# Capture device options
############################################################
# Videodevice to be used for capturing (default /dev/video0)
# for FreeBSD default is /dev/bktr0
#videodevice /dev/video0
# v4l2_palette allows to choose preferable palette to be use by motion
# to capture from those supported by your videodevice. (default: 17)
# E.g. if your videodevice supports both V4L2_PIX_FMT_SBGGR8 and
# V4L2_PIX_FMT_MJPEG then motion will by default use V4L2_PIX_FMT_MJPEG.
# Setting v4l2_palette to 2 forces motion to use V4L2_PIX_FMT_SBGGR8
# instead.
#
# Values :
# V4L2_PIX_FMT_SN9C10X : 0 'S910'
# V4L2_PIX_FMT_SBGGR16 : 1 'BYR2'
# V4L2_PIX_FMT_SBGGR8 : 2 'BA81'
# V4L2_PIX_FMT_SPCA561 : 3 'S561'
# V4L2_PIX_FMT_SGBRG8 : 4 'GBRG'
# V4L2_PIX_FMT_SGRBG8 : 5 'GRBG'
# V4L2_PIX_FMT_PAC207 : 6 'P207'
# V4L2_PIX_FMT_PJPG : 7 'PJPG'
# V4L2_PIX_FMT_MJPEG : 8 'MJPEG'
# V4L2_PIX_FMT_JPEG : 9 'JPEG'
# V4L2_PIX_FMT_RGB24 : 10 'RGB3'
# V4L2_PIX_FMT_SPCA501 : 11 'S501'
# V4L2_PIX_FMT_SPCA505 : 12 'S505'
# V4L2_PIX_FMT_SPCA508 : 13 'S508'
# V4L2_PIX_FMT_UYVY : 14 'UYVY'
# V4L2_PIX_FMT_YUYV : 15 'YUYV'
# V4L2_PIX_FMT_YUV422P : 16 '422P'
# V4L2_PIX_FMT_YUV420 : 17 'YU12'
#
v4l2_palette 7
# Tuner device to be used for capturing using tuner as source (default /dev/tuner0)
# This is ONLY used for FreeBSD. Leave it commented out for Linux
; tunerdevice /dev/tuner0
# The video input to be used (default: -1)
# Should normally be set to 0 or 1 for video/TV cards, and -1 for USB cameras
input -1
# The video norm to use (only for video capture and TV tuner cards)
# Values: 0 (PAL), 1 (NTSC), 2 (SECAM), 3 (PAL NC no colour). Default: 0 (PAL)
norm 0
# The frequency to set the tuner to (kHz) (only for TV tuner cards) (default: 0)
frequency 0
# Rotate image this number of degrees. The rotation affects all saved images as
# well as movies. Valid values: 0 (default = no rotation), 90, 180 and 270.
rotate 0
# Image width (pixels). Valid range: Camera dependent, default: 352
#width 1024
width 640
# Image height (pixels). Valid range: Camera dependent, default: 288
#height 576
height 480
# Maximum number of frames to be captured per second.
# Valid range: 2-100. Default: 100 (almost no limit).
framerate 15
# Minimum time in seconds between capturing picture frames from the camera.
# Default: 0 = disabled - the capture rate is given by the camera framerate.
# This option is used when you want to capture images at a rate lower than 2 per second.
minimum_frame_time 0
# URL to use if you are using a network camera, size will be autodetected (incl http:// ftp:// mjpg:// or file:///)
# Must be a URL that returns single jpeg pictures or a raw mjpeg stream. Default: Not defined
;netcam_url http://127.0.0.1/cgi-bin/raspicam.sh
# Username and password for network camera (only if required). Default: not defined
# Syntax is user:password
; netcam_userpass value
# The setting for keep-alive of network socket, should improve performance on compatible net cameras.
# off: The historical implementation using HTTP/1.0, closing the socket after each http request.
# force: Use HTTP/1.0 requests with keep alive header to reuse the same connection.
# on: Use HTTP/1.1 requests that support keep alive as default.
# Default: off
netcam_keepalive off
# URL to use for a netcam proxy server, if required, e.g. "http://myproxy".
# If a port number other than 80 is needed, use "http://myproxy:1234".
# Default: not defined
; netcam_proxy value
# Set less strict jpeg checks for network cameras with a poor/buggy firmware.
# Default: off
netcam_tolerant_check off
# Let motion regulate the brightness of a video device (default: off).
# The auto_brightness feature uses the brightness option as its target value.
# If brightness is zero auto_brightness will adjust to average brightness value 128.
# Only recommended for cameras without auto brightness
auto_brightness off
# Set the initial brightness of a video device.
# If auto_brightness is enabled, this value defines the average brightness level
# which Motion will try and adjust to.
# Valid range 0-255, default 0 = disabled
brightness 0
# Set the contrast of a video device.
# Valid range 0-255, default 0 = disabled
contrast 0
# Set the saturation of a video device.
# Valid range 0-255, default 0 = disabled
saturation 0
# Set the hue of a video device (NTSC feature).
# Valid range 0-255, default 0 = disabled
hue 0
############################################################
# File "camera" support - read raw YUV data from a file
############################################################
#filecam_path /home/pi/test-cap/motion-mmal.capture
############################################################
# OpenMax/MMAL camera support for Raspberry Pi
############################################################
mmalcam_name vc.ril.camera
#mmalcam_control_params
#mmalcam_raw_capture_file /home/pi/motion-mmal.capture
# Switch this setting to "on" to use the still image mode of the Pi's camera
# instead of video. This gives a wider field of view, but requires
# a much slower frame-rate to achieve exposure stability
# (e.g. 0.25 fps or slower). You can use the minimum_frame_time
# parameter above to achieve this
mmalcam_use_still off
############################################################
# Round Robin (multiple inputs on same video device name)
############################################################
# Number of frames to capture in each roundrobin step (default: 1)
roundrobin_frames 1
# Number of frames to skip before each roundrobin step (default: 1)
roundrobin_skip 1
# Try to filter out noise generated by roundrobin (default: off)
switchfilter off
############################################################
# Motion Detection Settings:
############################################################
# Threshold for number of changed pixels in an image that
# triggers motion detection (default: 1500)
threshold 1500
# Automatically tune the threshold down if possible (default: off)
threshold_tune off
# Noise threshold for the motion detection (default: 32)
noise_level 32
# Automatically tune the noise threshold (default: on)
noise_tune on
# Despeckle motion image using (e)rode or (d)ilate or (l)abel (Default: not defined)
# Recommended value is EedDl. Any combination (and number of) of E, e, d, and D is valid.
# (l)abeling must only be used once and the 'l' must be the last letter.
# Comment out to disable
despeckle_filter EedDl
# Detect motion in predefined areas (1 - 9). Areas are numbered like that: 1 2 3
# A script (on_area_detected) is started immediately when motion is 4 5 6
# detected in one of the given areas, but only once during an event. 7 8 9
# One or more areas can be specified with this option. Take care: This option
# does NOT restrict detection to these areas! (Default: not defined)
; area_detect value
# PGM file to use as a sensitivity mask.
# Full path name to. (Default: not defined)
; mask_file value
# Dynamically create a mask file during operation (default: 0)
# Adjust speed of mask changes from 0 (off) to 10 (fast)
smart_mask_speed 0
# Ignore sudden massive light intensity changes given as a percentage of the picture
# area that changed intensity. Valid range: 0 - 100 , default: 0 = disabled
lightswitch 0
# Picture frames must contain motion at least the specified number of frames
# in a row before they are detected as true motion. At the default of 1, all
# motion is detected. Valid range: 1 to thousands, recommended 1-5
minimum_motion_frames 1
# Specifies the number of pre-captured (buffered) pictures from before motion
# was detected that will be output at motion detection.
# Recommended range: 0 to 5 (default: 0)
# Do not use large values! Large values will cause Motion to skip video frames and
# cause unsmooth movies. To smooth movies use larger values of post_capture instead.
pre_capture 2
# Number of frames to capture after motion is no longer detected (default: 0)
post_capture 2
# Event Gap is the seconds of no motion detection that triggers the end of an event.
# An event is defined as a series of motion images taken within a short timeframe.
# Recommended value is 60 seconds (Default). The value -1 is allowed and disables
# events causing all Motion to be written to one single movie file and no pre_capture.
# If set to 0, motion is running in gapless mode. Movies don't have gaps anymore. An
# event ends right after no more motion is detected and post_capture is over.
event_gap 60
# Maximum length in seconds of an mpeg movie
# When value is exceeded a new movie file is created. (Default: 0 = infinite)
# ATTENTION: when you're not using the motion build from the tutorial, it might fail with error 'Unknown config option "max_mpeg_time"'
# the use this line instead:
# max_movie_time 60
max_movie_time 60
# Always save images even if there was no motion (default: off)
emulate_motion off
############################################################
# Image File Output
############################################################
# Output 'normal' pictures when motion is detected (default: on)
# Valid values: on, off, first, best, center
# When set to 'first', only the first picture of an event is saved.
# Picture with most motion of an event is saved when set to 'best'.
# Picture with motion nearest center of picture is saved when set to 'center'.
# Can be used as preview shot for the corresponding movie.
output_pictures best
# Output pictures with only the pixels moving object (ghost images) (default: off)
output_debug_pictures off
# The quality (in percent) to be used by the jpeg compression (default: 75)
quality 75
# Type of output images
# Valid values: jpeg, ppm (default: jpeg)
picture_type jpeg
############################################################
# FFMPEG related options
# Film (movies) file output, and deinterlacing of the video input
# The options movie_filename and timelapse_filename are also used
# by the ffmpeg feature
############################################################
# Use ffmpeg to encode movies in realtime (default: off)
ffmpeg_output_movies on
# Use ffmpeg to make movies with only the pixels moving
# object (ghost images) (default: off)
ffmpeg_output_debug_movies off
# Use ffmpeg to encode a timelapse movie
# Default value 0 = off - else save frame every Nth second
ffmpeg_timelapse 0
# The file rollover mode of the timelapse video
# Valid values: hourly, daily (default), weekly-sunday, weekly-monday, monthly, manual
ffmpeg_timelapse_mode daily
# Bitrate to be used by the ffmpeg encoder (default: 400000)
# This option is ignored if ffmpeg_variable_bitrate is not 0 (disabled)
ffmpeg_bps 500000
# Enables and defines variable bitrate for the ffmpeg encoder.
# ffmpeg_bps is ignored if variable bitrate is enabled.
# Valid values: 0 (default) = fixed bitrate defined by ffmpeg_bps,
# or the range 2 - 31 where 2 means best quality and 31 is worst.
ffmpeg_variable_bitrate 5
# Codec to used by ffmpeg for the video compression.
# Timelapse mpegs are always made in mpeg1 format independent from this option.
# Supported formats are: mpeg1 (ffmpeg-0.4.8 only), mpeg4 (default), and msmpeg4.
# mpeg1 - gives you files with extension .mpg
# mpeg4 or msmpeg4 - gives you files with extension .avi
# msmpeg4 is recommended for use with Windows Media Player because
# it requires no installation of codec on the Windows client.
# swf - gives you a flash film with extension .swf
# flv - gives you a flash video with extension .flv
# ffv1 - FF video codec 1 for Lossless Encoding ( experimental )
# mov - QuickTime ( testing )
# ogg - Ogg/Theora ( testing )
#ffmpeg_video_codec msmpeg4
ffmpeg_video_codec mp4
# Use ffmpeg to deinterlace video. Necessary if you use an analog camera
# and see horizontal combing on moving objects in video or pictures.
# (default: off)
ffmpeg_deinterlace off
############################################################
# SDL Window
############################################################
# Number of motion thread to show in SDL Window (default: 0 = disabled)
#sdl_threadnr 0
############################################################
# External pipe to video encoder
# Replacement for FFMPEG builtin encoder for ffmpeg_output_movies only.
# The options movie_filename and timelapse_filename are also used
# by the ffmpeg feature
#############################################################
# Bool to enable or disable extpipe (default: off)
use_extpipe off
# External program (full path and opts) to pipe raw video to
# Generally, use '-' for STDIN...
;extpipe mencoder -demuxer rawvideo -rawvideo w=320:h=240:i420 -ovc x264 -x264encopts bframes=4:frameref=1:subq=1:scenecut=-1:nob_adapt:threads=1:keyint=1000:8x8dct:vbv_bufsize=4000:crf=24:partitions=i8x8,i4x4:vbv_maxrate=800:no-chroma-me -vf denoise3d=16:12:48:4,pp=lb -of avi -o %f.avi - -fps %fps
############################################################
# Snapshots (Traditional Periodic Webcam File Output)
############################################################
# Make automated snapshot every N seconds (default: 0 = disabled)
snapshot_interval 0
############################################################
# Text Display
# %Y = year, %m = month, %d = date,
# %H = hour, %M = minute, %S = second, %T = HH:MM:SS,
# %v = event, %q = frame number, %t = thread (camera) number,
# %D = changed pixels, %N = noise level, \n = new line,
# %i and %J = width and height of motion area,
# %K and %L = X and Y coordinates of motion center
# %C = value defined by text_event - do not use with text_event!
# You can put quotation marks around the text to allow
# leading spaces
############################################################
# Locate and draw a box around the moving object.
# Valid values: on, off, preview (default: off)
# Set to 'preview' will only draw a box in preview_shot pictures.
locate_motion_mode off
# Set the look and style of the locate box if enabled.
# Valid values: box, redbox, cross, redcross (default: box)
# Set to 'box' will draw the traditional box.
# Set to 'redbox' will draw a red box.
# Set to 'cross' will draw a little cross to mark center.
# Set to 'redcross' will draw a little red cross to mark center.
locate_motion_style box
# Draws the timestamp using same options as C function strftime(3)
# Default: %Y-%m-%d\n%T = date in ISO format and time in 24 hour clock
# Text is placed in lower right corner
text_right %d.%m.%Y\n%T
# Draw a user defined text on the images using same options as C function strftime(3)
# Default: Not defined = no text
# Text is placed in lower left corner
; text_left CAMERA %t
text_left HofCam
# Draw the number of changed pixed on the images (default: off)
# Will normally be set to off except when you setup and adjust the motion settings
# Text is placed in upper right corner
text_changes off
# This option defines the value of the special event conversion specifier %C
# You can use any conversion specifier in this option except %C. Date and time
# values are from the timestamp of the first image in the current event.
# Default: %Y%m%d%H%M%S
# The idea is that %C can be used filenames and text_left/right for creating
# a unique identifier for each event.
text_event %Y%m%d%H%M%S
# Draw characters at twice normal size on images. (default: off)
text_double on
# Text to include in a JPEG EXIF comment
# May be any text, including conversion specifiers.
# The EXIF timestamp is included independent of this text.
;exif_text %i%J/%K%L
############################################################
# Target Directories and filenames For Images And Films
# For the options snapshot_, picture_, movie_ and timelapse_filename
# you can use conversion specifiers
# %Y = year, %m = month, %d = date,
# %H = hour, %M = minute, %S = second,
# %v = event, %q = frame number, %t = thread (camera) number,
# %D = changed pixels, %N = noise level,
# %i and %J = width and height of motion area,
# %K and %L = X and Y coordinates of motion center
# %C = value defined by text_event
# Quotation marks round string are allowed.
############################################################
# Target base directory for pictures and films
# Recommended to use absolute path. (Default: current working directory)
target_dir /home/pi
# File path for snapshots (jpeg or ppm) relative to target_dir
# Default: %v-%Y%m%d%H%M%S-snapshot
# Default value is equivalent to legacy oldlayout option
# For Motion 3.0 compatible mode choose: %Y/%m/%d/%H/%M/%S-snapshot
# File extension .jpg or .ppm is automatically added so do not include this.
# Note: A symbolic link called lastsnap.jpg created in the target_dir will always
# point to the latest snapshot, unless snapshot_filename is exactly 'lastsnap'
snapshot_filename %v-%Y%m%d%H%M%S-snapshot
# File path for motion triggered images (jpeg or ppm) relative to target_dir
# Default: %v-%Y%m%d%H%M%S-%q
# Default value is equivalent to legacy oldlayout option
# For Motion 3.0 compatible mode choose: %Y/%m/%d/%H/%M/%S-%q
# File extension .jpg or .ppm is automatically added so do not include this
# Set to 'preview' together with best-preview feature enables special naming
# convention for preview shots. See motion guide for details
picture_filename %v-%Y%m%d%H%M%S-%q
# File path for motion triggered ffmpeg films (movies) relative to target_dir
# Default: %v-%Y%m%d%H%M%S
# Default value is equivalent to legacy oldlayout option
# For Motion 3.0 compatible mode choose: %Y/%m/%d/%H%M%S
# File extension .mpg or .avi is automatically added so do not include this
# This option was previously called ffmpeg_filename
movie_filename %v-%Y%m%d%H%M%S
# File path for timelapse movies relative to target_dir
# Default: %Y%m%d-timelapse
# Default value is near equivalent to legacy oldlayout option
# For Motion 3.0 compatible mode choose: %Y/%m/%d-timelapse
# File extension .mpg is automatically added so do not include this
timelapse_filename %Y%m%d-timelapse
############################################################
# Global Network Options
############################################################
# Enable or disable IPV6 for http control and stream (default: off )
ipv6_enabled off
############################################################
# Live Stream Server
############################################################
# The mini-http server listens to this port for requests (default: 0 = disabled)
stream_port 8080
# Quality of the jpeg (in percent) images produced (default: 50)
stream_quality 50
# Output frames at 1 fps when no motion is detected and increase to the
# rate given by stream_maxrate when motion is detected (default: off)
stream_motion on
# Maximum framerate for stream streams (default: 1)
stream_maxrate 4
# Restrict stream connections to localhost only (default: on)
stream_localhost off
# Limits the number of images per connection (default: 0 = unlimited)
# Number can be defined by multiplying actual stream rate by desired number of seconds
# Actual stream rate is the smallest of the numbers framerate and stream_maxrate
stream_limit 0
# Set the authentication method (default: 0)
# 0 = disabled
# 1 = Basic authentication
# 2 = MD5 digest (the safer authentication)
stream_auth_method 0
# Authentication for the stream. Syntax username:password
# Default: not defined (Disabled)
; stream_authentication username:password
############################################################
# HTTP Based Control
############################################################
# TCP/IP port for the http server to listen on (default: 0 = disabled)
webcontrol_port 8081
# Restrict control connections to localhost only (default: on)
webcontrol_localhost off
# Output for http server, select off to choose raw text plain (default: on)
webcontrol_html_output on
# Authentication for the http based control. Syntax username:password
# Default: not defined (Disabled)
; webcontrol_authentication username:password
############################################################
# Tracking (Pan/Tilt)
#############################################################
# Type of tracker (0=none (default), 1=stepper, 2=iomojo, 3=pwc, 4=generic, 5=uvcvideo, 6=servo)
# The generic type enables the definition of motion center and motion size to
# be used with the conversion specifiers for options like on_motion_detected
track_type 0
# Enable auto tracking (default: off)
track_auto off
# Serial port of motor (default: none)
;track_port /dev/ttyS0
# Motor number for x-axis (default: 0)
;track_motorx 0
# Set motorx reverse (default: 0)
;track_motorx_reverse 0
# Motor number for y-axis (default: 0)
;track_motory 1
# Set motory reverse (default: 0)
;track_motory_reverse 0
# Maximum value on x-axis (default: 0)
;track_maxx 200
# Minimum value on x-axis (default: 0)
;track_minx 50
# Maximum value on y-axis (default: 0)
;track_maxy 200
# Minimum value on y-axis (default: 0)
;track_miny 50
# Center value on x-axis (default: 0)
;track_homex 128
# Center value on y-axis (default: 0)
;track_homey 128
# ID of an iomojo camera if used (default: 0)
track_iomojo_id 0
# Angle in degrees the camera moves per step on the X-axis
# with auto-track (default: 10)
# Currently only used with pwc type cameras
track_step_angle_x 10
[...] -
FFMPEG not enough data (x y), trying to decode anyway
7 juin 2016, par Forest J. HandfordI’m trying to make videos of Direct3D games using a C# app. For non-Direct3D games I stream images from Graphics.CopyFromScreen which works. When I copy the screen from Direct3D and stream it to FFMPEG I get :
[bmp @ 00000276b0b9c280] not enough data (5070 < 129654), trying to
decode anywayAn MP4 file is created, but it is always 0 bytes.
To get screenshots from Direct3D, I am using Justin Stenning’s Direct3DHook. This produces images MUCH bigger than when I get images from Graphics.CopyFromScreen (8 MB vs 136 KB). I’ve tried increasing the buffer (-bufsize) but the number on the left of the error is not impacted.
I’ve tried resizing the image to 1/6th the original. That reduces the number on the right, but does not eliminate it. Even when the number on the right is close to what I have for Graphics.CopyFromScreen I get an error. Here is a sample of the current code :
using System;
using System.Diagnostics;
using System.Threading;
using System.Drawing;
using Capture.Hook;
using Capture.Interface;
using Capture;
using System.IO;
namespace GameRecord
{
public class Video
{
private const int VID_FRAME_FPS = 8;
private const int SIZE_MODIFIER = 6;
private const double FRAMES_PER_MS = VID_FRAME_FPS * 0.001;
private const int SLEEP_INTERVAL = 2;
private const int CONSTANT_RATE_FACTOR = 18; // Lower crf = Higher Quality https://trac.ffmpeg.org/wiki/Encode/H.264
private Image image;
private Capture captureScreen;
private int processId = 0;
private Process process;
private CaptureProcess captureProcess;
private Process launchingFFMPEG;
private string arg;
private int frame = 0;
private Size? resize = null;
/// <summary>
/// Generates the Videos by gathering frames and processing via FFMPEG.
/// </summary>
public void RecordScreenTillGameEnd(string exe, OutputDirectory outputDirectory, CustomMessageBox alertBox, Thread workerThread)
{
AttachProcess(exe);
RequestD3DScreenShot();
while (image == null) ;
Logger.log.Info("Launching FFMPEG ....");
resize = new Size(image.Width / SIZE_MODIFIER, image.Height / SIZE_MODIFIER);
// H.264 can let us do 8 FPS in high res . . . but must be licensed for commercial use.
arg = "-f image2pipe -framerate " + VID_FRAME_FPS + " -i pipe:.bmp -pix_fmt yuv420p -crf " +
CONSTANT_RATE_FACTOR + " -preset ultrafast -s " + resize.Value.Width + "x" +
resize.Value.Height + " -vcodec libx264 -bufsize 30000k -y \"" +
outputDirectory.pathToVideo + "\"";
launchingFFMPEG = new Process
{
StartInfo = new ProcessStartInfo
{
FileName = "ffmpeg",
Arguments = arg,
UseShellExecute = false,
CreateNoWindow = true,
RedirectStandardInput = true,
RedirectStandardError = true
}
};
launchingFFMPEG.Start();
Stopwatch stopWatch = Stopwatch.StartNew(); //creates and start the instance of Stopwatch
do
{
Thread.Sleep(SLEEP_INTERVAL);
} while (workerThread.IsAlive);
Logger.log.Info("Total frames: " + frame + " Expected frames: " + (ExpectedFrames(stopWatch.ElapsedMilliseconds) - 1));
launchingFFMPEG.StandardInput.Close();
#if DEBUG
string line;
while ((line = launchingFFMPEG.StandardError.ReadLine()) != null)
{
Logger.log.Debug(line);
}
#endif
launchingFFMPEG.Close();
alertBox.Show();
}
void RequestD3DScreenShot()
{
captureProcess.CaptureInterface.BeginGetScreenshot(new Rectangle(0, 0, 0, 0), new TimeSpan(0, 0, 2), Callback, resize, (ImageFormat)Enum.Parse(typeof(ImageFormat), "Bitmap"));
}
private void AttachProcess(string exe)
{
Thread.Sleep(300);
Process[] processes = Process.GetProcessesByName(Path.GetFileNameWithoutExtension(exe));
foreach (Process currProcess in processes)
{
// Simply attach to the first one found.
// If the process doesn't have a mainwindowhandle yet, skip it (we need to be able to get the hwnd to set foreground etc)
if (currProcess.MainWindowHandle == IntPtr.Zero)
{
continue;
}
// Skip if the process is already hooked (and we want to hook multiple applications)
if (HookManager.IsHooked(currProcess.Id))
{
continue;
}
Direct3DVersion direct3DVersion = Direct3DVersion.AutoDetect;
CaptureConfig cc = new CaptureConfig()
{
Direct3DVersion = direct3DVersion,
ShowOverlay = false
};
processId = currProcess.Id;
process = currProcess;
var captureInterface = new CaptureInterface();
captureInterface.RemoteMessage += new MessageReceivedEvent(CaptureInterface_RemoteMessage);
captureProcess = new CaptureProcess(process, cc, captureInterface);
break;
}
Thread.Sleep(10);
if (captureProcess == null)
{
ShowUser.Exception("No executable found matching: '" + exe + "'");
}
}
/// <summary>
/// The callback for when the screenshot has been taken
/// </summary>
///
///
///
void Callback(IAsyncResult result)
{
using (Screenshot screenshot = captureProcess.CaptureInterface.EndGetScreenshot(result))
if (screenshot != null && screenshot.Data != null && arg != null)
{
if (image != null)
{
image.Dispose();
}
image = screenshot.ToBitmap();
// image.Save("D3DImageTest.bmp");
image.Save(launchingFFMPEG.StandardInput.BaseStream, System.Drawing.Imaging.ImageFormat.Bmp);
launchingFFMPEG.StandardInput.Flush();
frame++;
}
if (frame < 5)
{
Thread t = new Thread(new ThreadStart(RequestD3DScreenShot));
t.Start();
}
else
{
Logger.log.Info("Done getting shots from D3D.");
}
}
/// <summary>
/// Display messages from the target process
/// </summary>
///
private void CaptureInterface_RemoteMessage(MessageReceivedEventArgs message)
{
Logger.log.Info(message);
}
}
}When I search the internet for the error all I get is the FFMPEG source code, which has not proven to be illuminating. I have been able to save the image directly to disk, which makes me feel like it is not an issue with disposing the data. I have also tried only grabbing one frame, but that produces the same error, which suggests to me it is not a threading issue.
Here is the full sample of stderr :
2016-06-02 18:29:38,046 === ffmpeg version N-79143-g8ff0f6a Copyright (c) 2000-2016 the FFmpeg developers
2016-06-02 18:29:38,047 === built with gcc 5.3.0 (GCC)
2016-06-02 18:29:38,048 === configuration: --enable-gpl
--enable-version3 --disable-w32threads --enable-avisynth --enable-bzlib --enable-fontconfig --enable-frei0r --enable-gnutls --enable-iconv --enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca --enable-libdcadec --enable-libfreetype --enable-libgme --enable-libgsm --enable-libilbc --enable-libmodplug --enable-libmfx --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-libopus --enable-librtmp --enable-libschroedinger --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libtheora --enable-libtwolame --enable-libvidstab --enable-libvo-amrwbenc --enable-libvorbis --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxavs --enable-libxvid --enable-libzimg --enable-lzma --enable-decklink --enable-zlib
2016-06-02 18:29:38,062 === libavutil 55. 19.100 / 55. 19.100
2016-06-02 18:29:38,063 === libavcodec 57. 30.100 / 57. 30.100
2016-06-02 18:29:38,064 === libavformat 57. 29.101 / 57. 29.101
2016-06-02 18:29:38,064 === libavdevice 57. 0.101 / 57. 0.101
2016-06-02 18:29:38,065 === libavfilter 6. 40.102 / 6. 40.102
2016-06-02 18:29:38,066 === libswscale 4. 0.100 / 4. 0.100
2016-06-02 18:29:38,067 === libswresample 2. 0.101 / 2. 0.101
2016-06-02 18:29:38,068 === libpostproc 54. 0.100 / 54. 0.100
2016-06-02 18:29:38,068 === [bmp @ 000002cd7e5cc280] not enough data (13070 < 8294454), trying to decode anyway
2016-06-02 18:29:38,069 === [bmp @ 000002cd7e5cc280] not enough data (13016 < 8294400)
2016-06-02 18:29:38,069 === Input #0, image2pipe, from 'pipe:.bmp':
2016-06-02 18:29:38,262 === Duration: N/A, bitrate: N/A
2016-06-02 18:29:38,262 === Stream #0:0: Video: bmp, bgra, 1920x1080, 8 tbr, 8 tbn, 8 tbc
2016-06-02 18:29:38,263 === [libx264 @ 000002cd7e5d59a0] VBV bufsize set but maxrate unspecified, ignored
2016-06-02 18:29:38,264 === [libx264 @ 000002cd7e5d59a0] using cpu capabilities: MMX2 SSE2Fast SSSE3 SSE4.2 AVX FMA3 AVX2 LZCNT BMI2
2016-06-02 18:29:38,265 === [libx264 @ 000002cd7e5d59a0] profile Constrained Baseline, level 1.1
2016-06-02 18:29:38,266 === [libx264 @ 000002cd7e5d59a0] 264 - core 148 r2665 a01e339 - H.264/MPEG-4 AVC codec - Copyleft 2003-2016 - http://www.videolan.org/x264.html - options: cabac=0 ref=1 deblock=0:0:0 analyse=0:0 me=dia subme=0 psy=1 psy_rd=1.00:0.00 mixed_ref=0 me_range=16 chroma_me=1 trellis=0 8x8dct=0 cqm=0 deadzone=21,11 fast_pskip=1 chroma_qp_offset=0 threads=6 lookahead_threads=1 sliced_threads=0 nr=0 decimate=1 interlaced=0 bluray_compat=0 constrained_intra=0 bframes=0 weightp=0 keyint=250 keyint_min=8 scenecut=0 intra_refresh=0 rc=crf mbtree=0 crf=18.0 qcomp=0.60 qpmin=0 qpmax=69 qpstep=4 ip_ratio=1.40 aq=0
2016-06-02 18:29:38,463 === Output #0, mp4, to 'C:\Users\fores\AppData\Roaming\Affectiva\n_Artifacts_20160602_182857\GameplayVidOut.mp4':
2016-06-02 18:29:38,464 === Metadata:
2016-06-02 18:29:38,465 === encoder : Lavf57.29.101
2016-06-02 18:29:38,469 === Stream #0:0: Video: h264 (libx264) ([33][0][0][0] / 0x0021), yuv420p, 320x180, q=-1--1, 8 fps, 16384 tbn, 8 tbc
2016-06-02 18:29:38,470 === Metadata:
2016-06-02 18:29:38,472 === encoder : Lavc57.30.100 libx264
2016-06-02 18:29:38,474 === Side data:
2016-06-02 18:29:38,475 === cpb: bitrate max/min/avg: 0/0/0 buffer size: 30000000 vbv_delay: -1
2016-06-02 18:29:38,476 === Stream mapping:
2016-06-02 18:29:38,477 === Stream #0:0 -> #0:0 (bmp (native) -> h264 (libx264))
2016-06-02 18:29:38,480 === [bmp @ 000002cd7e5cc9a0] not enough data (13070 < 8294454), trying to decode anyway
2016-06-02 18:29:38,662 === [bmp @ 000002cd7e5cc9a0] not enough data (13016 < 8294400)
2016-06-02 18:29:38,662 === Error while decoding stream #0:0: Invalid data found when processing input
2016-06-02 18:29:38,663 === frame= 0 fps=0.0 q=0.0 Lsize= 0kB time=00:00:00.00 bitrate=N/A speed= 0x
2016-06-02 18:29:38,663 === video:0kB audio:0kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead: unknown
2016-06-02 18:29:38,664 === Conversion failed!In memory, the current image is 320 pixels wide and 180 pixels long. The pixel format is Format32bppRgb. The horizontal and vertical resolutions seem odd, they are both 96.01199. When filed to disk here is ffprobe output for the file :
ffprobe version N-79143-g8ff0f6a Copyright (c) 2007-2016 the FFmpeg developers
built with gcc 5.3.0 (GCC)
configuration: --enable-gpl --enable-version3 --disable-w32threads --enable-avisynth --enable-bzlib --enable-fontconfig --enable-frei0r --enable-gnutls --enable-iconv --enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca --enable-libdcadec --enable-libfreetype --enable-libgme --enable-libgsm --enable-libilbc --enable-libmodplug --enable-libmfx --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-libopus --enable-librtmp --enable-libschroedinger --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libtheora --enable-libtwolame --enable-libvidstab --enable-libvo-amrwbenc --enable-libvorbis --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxavs --enable-libxvid --enable-libzimg --enable-lzma --enable-decklink --enable-zlib
libavutil 55. 19.100 / 55. 19.100
libavcodec 57. 30.100 / 57. 30.100
libavformat 57. 29.101 / 57. 29.101
libavdevice 57. 0.101 / 57. 0.101
libavfilter 6. 40.102 / 6. 40.102
libswscale 4. 0.100 / 4. 0.100
libswresample 2. 0.101 / 2. 0.101
libpostproc 54. 0.100 / 54. 0.100
Input #0, png_pipe, from 'C:\Users\fores\git\game-playtest-tool\GamePlayTest\bin\x64\Debug\D3DFromCapture.bmp':
Duration: N/A, bitrate: N/A
Stream #0:0: Video: png, rgba(pc), 1920x1080 [SAR 3779:3779 DAR 16:9], 25 tbr, 25 tbn, 25 tbcHere is a PNG version of an example screenshot from the current code (playing Portal 2) :
Any ideas would be greatly appreciated. My current workaround is to save the files to the HDD and compile the video after gameplay, but it’s a far less performant option. Thank you !
-
Updating SDL yuv Texture
15 juin 2015, par madprogrammer2015I am receiving an H.264 video stream and successfully decoding it with FFMPEG. It can display the first frame of data but then after that the screen never updates. It just appears to become a static image. I am using YUV pixel format, and I am receiving it in that format as well. Also I am using SDL_UpdateYUVTexture().
Here is my code :
int main()
{
WORD wVersionRequested;
WSADATA wsaData;
int wsaerr;
if (SDL_Init(SDL_INIT_EVERYTHING)) {
fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
exit(1);
}
// Using MAKEWORD macro, Winsock version request 2.2
wVersionRequested = MAKEWORD(2, 2);
wsaerr = WSAStartup(wVersionRequested, &wsaData);
if (wsaerr != 0)
{
/* Tell the user that we could not find a usable */
/* WinSock DLL.*/
printf("The Winsock dll not found!\n");
return 0;
}
else
{
printf("The Winsock dll found!\n");
printf("The status: %s.\n", wsaData.szSystemStatus);
}
/* Confirm that the WinSock DLL supports 2.2.*/
/* Note that if the DLL supports versions greater */
/* than 2.2 in addition to 2.2, it will still return */
/* 2.2 in wVersion since that is the version we */
/* requested. */
if (LOBYTE(wsaData.wVersion) != 2 || HIBYTE(wsaData.wVersion) != 2)
{
/* Tell the user that we could not find a usable */
/* WinSock DLL.*/
printf("The dll do not support the Winsock version %u.%u!\n", LOBYTE(wsaData.wVersion), HIBYTE(wsaData.wVersion));
WSACleanup();
return 0;
}
else
{
printf("The dll supports the Winsock version %u.%u!\n", LOBYTE(wsaData.wVersion), HIBYTE(wsaData.wVersion));
printf("The highest version this dll can support: %u.%u\n", LOBYTE(wsaData.wHighVersion), HIBYTE(wsaData.wHighVersion));
}
ULONG localif;
/*INT Ret;
HANDLE ThreadHandle;
DWORD ThreadId;
WSAEVENT AcceptEvent;
char buf[1024];
int buflen = 1024, rc, err;*/
SOCKET s;
SOCKET ns;
SOCKADDR_IN multi, safrom;
int fromlen;
int totalSize = 0;
AVCodec *codec;
AVCodecContext *codecContext;
int frame;
int got_picture;
AVFrame *picture;
AVPacket packet;
SwsContext* convertContext;
uint16_t i = 1;
//std::queue<madproto> queue;
//std::list<madproto> list;
AVCodecParserContext *parser;
std::vector buffer;
//moodycamel::ConcurrentQueue<madproto> protoQueue;
SDL_Window *window;
SDL_Renderer *renderer;
SDL_Texture *bmp;
SDL_Rect rect;
file.open("log.txt");
s = socket(AF_INET, SOCK_STREAM, IPPROTO_RM);
multi.sin_family = AF_INET;
multi.sin_port = htons(5150);
multi.sin_addr.s_addr = inet_addr("234.5.6.7");
int bindResult = bind(s, (PSOCKADDR)&multi, sizeof(multi));
if (bindResult < 0)
{
std::cout << "bindResult: " << WSAGetLastError() << std::endl;
}
listen(s, 10);
//if ((AcceptEvent = WSACreateEvent()) == WSA_INVALID_EVENT)
//{
// printf("WSACreateEvent() failed with error %d\n", WSAGetLastError());
// return 1;
//}
//else
// printf("WSACreateEvent() is OK!\n");
//// Create a worker thread to service completed I/O requests
//if ((ThreadHandle = CreateThread(NULL, 0, WorkerThread, (LPVOID)AcceptEvent, 0, &ThreadId)) == NULL)
//{
// printf("CreateThread() failed with error %d\n", GetLastError());
// return 1;
//}
//else
// printf("CreateThread() should be fine!\n");
localif = inet_addr("192.168.1.2");
setsockopt(s, IPPROTO_RM, RM_ADD_RECEIVE_IF, (char *)&localif, sizeof(localif));
fromlen = sizeof(safrom);
ns = accept(s, (SOCKADDR *)&safrom, &fromlen);
closesocket(s); // Don't need to listen anymore
std::string received;
av_register_all();
int horizontal = 0;
int vertical = 0;
GetDesktopResolution(horizontal, vertical);
codec = avcodec_find_decoder(CODEC_ID_H264);
if (!codec) {
std::cout << "codec not found" << std::endl;
std::cin.get();
}
codecContext = avcodec_alloc_context3(codec);
/*if (codec->capabilities & CODEC_CAP_TRUNCATED)
codecContext->flags |= CODEC_FLAG_TRUNCATED;*/
//codecContext->flags |= CODEC_FLAG_LOW_DELAY;
codecContext->flags2 |= CODEC_FLAG2_CHUNKS;
codecContext->width = horizontal;
codecContext->height = vertical;
codecContext->codec_id = CODEC_ID_H264;
codecContext->codec_type = AVMEDIA_TYPE_VIDEO;
codecContext->pix_fmt = PIX_FMT_YUV420P;
codecContext->thread_type = 0;
if (avcodec_open2(codecContext, codec, NULL) < 0) {
std::cout << "could not open codec" << std::endl;
std::cin.get();
}
convertContext = sws_getContext(
codecContext->width,
codecContext->height,
PIX_FMT_RGB32,
codecContext->width,
codecContext->height,
PIX_FMT_YUV420P,
SWS_BICUBIC,
NULL,
NULL,
NULL
);
parser = av_parser_init(CODEC_ID_H264);
picture = av_frame_alloc();
if (ns == INVALID_SOCKET)
{
std::cout << "accept didn't work!" << std::endl;
std::cin.get();
}
/*if (WSASetEvent(AcceptEvent) == FALSE)
{
printf("WSASetEvent() failed with error %d\n", WSAGetLastError());
return 1;
}
else
printf("WSASetEvent() should be working!\n");*/
window = SDL_CreateWindow("YUV", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, codecContext->width, codecContext->height, SDL_WINDOW_SHOWN);
renderer = SDL_CreateRenderer(window, -1, 0);
bmp = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING, codecContext->width, codecContext->height);
//receive = SDL_CreateThread(receiveThread, "ReceiveThread", (void *)NULL);
bool quit = false;
rect.x = 0;
rect.y = 0;
rect.w = codecContext->width;
rect.h = codecContext->height;
while (!quit)
{
while (true)
{
MadProto proto;
int result = recvfrom(ns, (char *)&proto, sizeof(MadProto), 0, (struct sockaddr *)&multi, &fromlen);
if (result < 0)
{
std::cout << "receive failed! error: " << WSAGetLastError() << std::endl;
break;
}
else
{
std::cout << "receive successful, received " << result << " bytes" << std::endl;
if (ntohs(proto.frame_end) == 1)
{
uint8_t *outbuffer = NULL;
int outBufSize = 0;
int rc = av_parser_parse2(parser, codecContext, &outbuffer, &outBufSize, buffer.data(), buffer.size(), 0, 0, 0);
if (outBufSize <= 0)
{
std::cout << "parsing failed!" << std::endl;
std::cout << "outBufSize: " << outBufSize << std::endl;
break;
}
if (rc)
{
std::cout << "rc: " << rc << std::endl;
std::cout << "parsing successful!" << std::endl;
//std::cin.get();
av_init_packet(&packet);
packet.size = outBufSize;
packet.data = outbuffer;
frame = avcodec_decode_video2(codecContext, picture, &got_picture, &packet);
if (frame < 0)
{
std::cout << "decoding was unsuccessful!" << std::endl;
break;
}
if (got_picture)
{
std::cout << "decoding was successful!" << std::endl;
std::cout << "decoded length was: " << frame << std::endl;
buffer.empty();
//std::cin.get();
int code = SDL_UpdateYUVTexture(bmp, NULL, picture->data[0], picture->linesize[0],
picture->data[1], picture->linesize[1],
picture->data[2], picture->linesize[2]);
if (code < 0)
{
std::cout << "unable to update texture " << SDL_GetError() << std::endl;
std::cin.get();
}
code = SDL_RenderClear(renderer);
if (code < 0)
{
std::cout << "renderer clear failed " << SDL_GetError() << std::endl;
std::cin.get();
}
code = SDL_RenderCopy(renderer, bmp, NULL, &rect);
if (code < 0)
{
std::cout << "renderer copy failed " << SDL_GetError() << std::endl;
std::cin.get();
}
SDL_RenderPresent(renderer);
SDL_Delay(40);
}
av_free_packet(&packet);
}
}
else
{
std::copy(proto.payload, proto.payload + ntohs(proto.nal_length), std::back_inserter(buffer));
std::cout << "frame is continuing!" << std::endl;
//queue.push(proto);
//list.push_front(proto);
}
}
}
SDL_WaitEvent(&event);
switch (event.type)
{
case SDL_QUIT:
quit = true;
break;
}
}
std::cout << "closing everything!" << std::endl;
av_frame_free(&picture);
closesocket(ns);
fclose(f);
std::cin.get();
return 0;
}
</madproto></madproto></madproto>