
Recherche avancée
Autres articles (53)
-
Websites made with MediaSPIP
2 mai 2011, parThis page lists some websites based on MediaSPIP.
-
Creating farms of unique websites
13 avril 2011, parMediaSPIP platforms can be installed as a farm, with a single "core" hosted on a dedicated server and used by multiple websites.
This allows (among other things) : implementation costs to be shared between several different projects / individuals rapid deployment of multiple unique sites creation of groups of like-minded sites, making it possible to browse media in a more controlled and selective environment than the major "open" (...) -
Contribute to a better visual interface
13 avril 2011MediaSPIP is based on a system of themes and templates. Templates define the placement of information on the page, and can be adapted to a wide range of uses. Themes define the overall graphic appearance of the site.
Anyone can submit a new graphic theme or template and make it available to the MediaSPIP community.
Sur d’autres sites (8682)
-
Cutting a live stream into separate mp4 files
9 juin 2017, par FearhunterI am doing a research for cutting a live stream in piece and save it as mp4 files. I am using this source for the proof of concept :
And this is the example code I use :
using System;
using System.Collections.Generic;
using System.Configuration;
using System.IO;
using System.Linq;
using System.Net;
using System.Security.Cryptography;
using System.Text;
using System.Threading.Tasks;
using Microsoft.WindowsAzure.MediaServices.Client;
using Newtonsoft.Json.Linq;
namespace AMSLiveTest
{
class Program
{
private const string StreamingEndpointName = "streamingendpoint001";
private const string ChannelName = "channel001";
private const string AssetlName = "asset001";
private const string ProgramlName = "program001";
// Read values from the App.config file.
private static readonly string _mediaServicesAccountName =
ConfigurationManager.AppSettings["MediaServicesAccountName"];
private static readonly string _mediaServicesAccountKey =
ConfigurationManager.AppSettings["MediaServicesAccountKey"];
// Field for service context.
private static CloudMediaContext _context = null;
private static MediaServicesCredentials _cachedCredentials = null;
static void Main(string[] args)
{
// Create and cache the Media Services credentials in a static class variable.
_cachedCredentials = new MediaServicesCredentials(
_mediaServicesAccountName,
_mediaServicesAccountKey);
// Used the cached credentials to create CloudMediaContext.
_context = new CloudMediaContext(_cachedCredentials);
IChannel channel = CreateAndStartChannel();
// Set the Live Encoder to point to the channel's input endpoint:
string ingestUrl = channel.Input.Endpoints.FirstOrDefault().Url.ToString();
// Use the previewEndpoint to preview and verify
// that the input from the encoder is actually reaching the Channel.
string previewEndpoint = channel.Preview.Endpoints.FirstOrDefault().Url.ToString();
IProgram program = CreateAndStartProgram(channel);
ILocator locator = CreateLocatorForAsset(program.Asset, program.ArchiveWindowLength);
IStreamingEndpoint streamingEndpoint = CreateAndStartStreamingEndpoint();
GetLocatorsInAllStreamingEndpoints(program.Asset);
// Once you are done streaming, clean up your resources.
Cleanup(streamingEndpoint, channel);
}
public static IChannel CreateAndStartChannel()
{
//If you want to change the Smooth fragments to HLS segment ratio, you would set the ChannelCreationOptions’s Output property.
IChannel channel = _context.Channels.Create(
new ChannelCreationOptions
{
Name = ChannelName,
Input = CreateChannelInput(),
Preview = CreateChannelPreview()
});
//Starting and stopping Channels can take some time to execute. To determine the state of operations after calling Start or Stop, query the IChannel.State .
channel.Start();
return channel;
}
private static ChannelInput CreateChannelInput()
{
return new ChannelInput
{
StreamingProtocol = StreamingProtocol.RTMP,
AccessControl = new ChannelAccessControl
{
IPAllowList = new List<iprange>
{
new IPRange
{
Name = "TestChannelInput001",
// Setting 0.0.0.0 for Address and 0 for SubnetPrefixLength
// will allow access to IP addresses.
Address = IPAddress.Parse("0.0.0.0"),
SubnetPrefixLength = 0
}
}
}
};
}
private static ChannelPreview CreateChannelPreview()
{
return new ChannelPreview
{
AccessControl = new ChannelAccessControl
{
IPAllowList = new List<iprange>
{
new IPRange
{
Name = "TestChannelPreview001",
// Setting 0.0.0.0 for Address and 0 for SubnetPrefixLength
// will allow access to IP addresses.
Address = IPAddress.Parse("0.0.0.0"),
SubnetPrefixLength = 0
}
}
}
};
}
public static void UpdateCrossSiteAccessPoliciesForChannel(IChannel channel)
{
var clientPolicy =
@"<?xml version=""1.0"" encoding=""utf-8""?>
<policy>
<domain uri=""></domain>
<resource path=""></resource>"" include-subpaths=""true""/>
</policy>
";
var xdomainPolicy =
@"<?xml version=""1.0"" ?>
";
channel.CrossSiteAccessPolicies.ClientAccessPolicy = clientPolicy;
channel.CrossSiteAccessPolicies.CrossDomainPolicy = xdomainPolicy;
channel.Update();
}
public static IProgram CreateAndStartProgram(IChannel channel)
{
IAsset asset = _context.Assets.Create(AssetlName, AssetCreationOptions.None);
// Create a Program on the Channel. You can have multiple Programs that overlap or are sequential;
// however each Program must have a unique name within your Media Services account.
IProgram program = channel.Programs.Create(ProgramlName, TimeSpan.FromHours(3), asset.Id);
program.Start();
return program;
}
public static ILocator CreateLocatorForAsset(IAsset asset, TimeSpan ArchiveWindowLength)
{
// You cannot create a streaming locator using an AccessPolicy that includes write or delete permissions.
var locator = _context.Locators.CreateLocator
(
LocatorType.OnDemandOrigin,
asset,
_context.AccessPolicies.Create
(
"Live Stream Policy",
ArchiveWindowLength,
AccessPermissions.Read
)
);
return locator;
}
public static IStreamingEndpoint CreateAndStartStreamingEndpoint()
{
var options = new StreamingEndpointCreationOptions
{
Name = StreamingEndpointName,
ScaleUnits = 1,
AccessControl = GetAccessControl(),
CacheControl = GetCacheControl()
};
IStreamingEndpoint streamingEndpoint = _context.StreamingEndpoints.Create(options);
streamingEndpoint.Start();
return streamingEndpoint;
}
private static StreamingEndpointAccessControl GetAccessControl()
{
return new StreamingEndpointAccessControl
{
IPAllowList = new List<iprange>
{
new IPRange
{
Name = "Allow all",
Address = IPAddress.Parse("0.0.0.0"),
SubnetPrefixLength = 0
}
},
AkamaiSignatureHeaderAuthenticationKeyList = new List<akamaisignatureheaderauthenticationkey>
{
new AkamaiSignatureHeaderAuthenticationKey
{
Identifier = "My key",
Expiration = DateTime.UtcNow + TimeSpan.FromDays(365),
Base64Key = Convert.ToBase64String(GenerateRandomBytes(16))
}
}
};
}
private static byte[] GenerateRandomBytes(int length)
{
var bytes = new byte[length];
using (var rng = new RNGCryptoServiceProvider())
{
rng.GetBytes(bytes);
}
return bytes;
}
private static StreamingEndpointCacheControl GetCacheControl()
{
return new StreamingEndpointCacheControl
{
MaxAge = TimeSpan.FromSeconds(1000)
};
}
public static void UpdateCrossSiteAccessPoliciesForStreamingEndpoint(IStreamingEndpoint streamingEndpoint)
{
var clientPolicy =
@"<?xml version=""1.0"" encoding=""utf-8""?>
<policy>
<domain uri=""></domain>
<resource path=""></resource>"" include-subpaths=""true""/>
</policy>
";
var xdomainPolicy =
@"<?xml version=""1.0"" ?>
";
streamingEndpoint.CrossSiteAccessPolicies.ClientAccessPolicy = clientPolicy;
streamingEndpoint.CrossSiteAccessPolicies.CrossDomainPolicy = xdomainPolicy;
streamingEndpoint.Update();
}
public static void GetLocatorsInAllStreamingEndpoints(IAsset asset)
{
var locators = asset.Locators.Where(l => l.Type == LocatorType.OnDemandOrigin);
var ismFile = asset.AssetFiles.AsEnumerable().FirstOrDefault(a => a.Name.EndsWith(".ism"));
var template = new UriTemplate("{contentAccessComponent}/{ismFileName}/manifest");
var urls = locators.SelectMany(l =>
_context
.StreamingEndpoints
.AsEnumerable()
.Where(se => se.State == StreamingEndpointState.Running)
.Select(
se =>
template.BindByPosition(new Uri("http://" + se.HostName),
l.ContentAccessComponent,
ismFile.Name)))
.ToArray();
}
public static void Cleanup(IStreamingEndpoint streamingEndpoint,
IChannel channel)
{
if (streamingEndpoint != null)
{
streamingEndpoint.Stop();
streamingEndpoint.Delete();
}
IAsset asset;
if (channel != null)
{
foreach (var program in channel.Programs)
{
asset = _context.Assets.Where(se => se.Id == program.AssetId)
.FirstOrDefault();
program.Stop();
program.Delete();
if (asset != null)
{
foreach (var l in asset.Locators)
l.Delete();
asset.Delete();
}
}
channel.Stop();
channel.Delete();
}
}
}
}
</akamaisignatureheaderauthenticationkey></iprange></iprange></iprange>Now I want to make a method to cut a live stream for example every 15 minutes and save it as mp4 but don’t know where to start.
Can someone point me in the right direction ?
Kind regards
UPDATE :
I want to save the mp4 files on my hard disk.
-
Using ffmpeg and ffserver to create a 2x1 live stream fails with unconnected output error
31 janvier 2021, par weevilknievelI want to combine 2 RTSP streams (CCTV cameras) into a horizontal 2x1 strip and convert to webm for use in a HTML5 webpage.



I am able to convert the streams into an mpeg or avi file easily, but as soon as I try to post it to ffserver ffm feed, I hit a wall.



Here is my working ffmpeg command which writes out to a file :



ffmpeg -rtsp_transport tcp -thread_queue_size 12 -i "rtsp://192.168.1.132:554/user=admin&password=mypassword&channel=1&stream=1.sdp" -i "rtsp://192.168.1.132:554/user=admin&password=mypassword&channel=2&stream=1.sdp" -filter_complex "[0:v][1:v] hstack=inputs=2 [v]" -map "[v]" -r 30 output.mpg




It seems the ffmpeg process is outputting two streams and one of them is MPEG ? In the above command I had to put a frame rate "-r 30" into the command otherwise I got an error which said that MPEG 1/2 did not support a framerate of 5/1 ??



As soon as I try and stream to ffserver, I get an error :



ffmpeg -rtsp_transport tcp -thread_queue_size 12 -i "rtsp://192.168.1.132:554/user=admin&password=mypassword&channel=1&stream=1.sdp" -i "rtsp://192.168.1.132:554/user=admin&password=mypassword&channel=2&stream=1.sdp" -filter_complex "[0:v][1:v] hstack=inputs=2 [v]" -map "[v]" -r 30 http://localhost:8090/feed5.ffm




The error I get is "Filter hstack has an unconnected output" :



ffmpeg version N-86111-ga441aa90e8-static http://johnvansickle.com/ffmpeg/ Copyright (c) 2000-2017 the FFmpeg developers
 built with gcc 5.4.1 (Debian 5.4.1-8) 20170304
 configuration: --enable-gpl --enable-version3 --enable-static --disable-debug --disable-ffplay --disable-indev=sndio --disable-outdev=sndio --cc=gcc-5 --enable-fontconfig --enable-frei0r --enable-gnutls --enable-gray --enable-libass --enable-libfreetype --enable-libfribidi --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-libopus --enable-librtmp --enable-libsoxr --enable-libspeex --enable-libtheora --enable-libvidstab --enable-libvo-amrwbenc --enable-libvorbis --enable-libvpx --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxvid --enable-libzimg
 libavutil 55. 63.100 / 55. 63.100
 libavcodec 57. 96.101 / 57. 96.101
 libavformat 57. 72.101 / 57. 72.101
 libavdevice 57. 7.100 / 57. 7.100
 libavfilter 6. 89.101 / 6. 89.101
 libswscale 4. 7.101 / 4. 7.101
 libswresample 2. 8.100 / 2. 8.100
 libpostproc 54. 6.100 / 54. 6.100
Input #0, rtsp, from 'rtsp://192.168.1.132:554/user=admin&password=mypassword&channel=1&stream=1.sdp':
 Duration: N/A, start: 0.166667, bitrate: N/A
 Stream #0:0: Video: h264 (High), yuv420p(progressive), 352x288, 6 fps, 6 tbr, 90k tbn, 180k tbc
Input #1, rtsp, from 'rtsp://192.168.1.132:554/user=admin&password=mypassword&channel=2&stream=1.sdp':
 Duration: N/A, start: 0.166667, bitrate: N/A
 Stream #1:0: Video: h264 (High), yuv420p(progressive), 352x288, 6 fps, 6 tbr, 90k tbn, 180k tbc
Filter hstack has an unconnected output




My ffserver conf is very basic :



HTTPPort 8090
HTTPBindAddress 0.0.0.0
MaxHTTPConnections 2000
MaxClients 1000
MaxBandwidth 100000
CustomLog -

<feed>
File /mnt/ramdisk/feed5.ffm
Truncate
FileMaxSize 5M
ACL allow 127.0.0.1
</feed>

<stream>
 Format webm
 Feed feed5.ffm
 VideoCodec libvpx
 VideoSize 640x360
 NoAudio
 AVOptionVideo quality realtime
 StartSendOnKey
 VideoBitRate 140
</stream>

<stream>
Format status
ACL allow localhost
ACL allow 192.168.0.0 192.168.255.255
</stream>
<redirect>
URL http://www.ffmpeg.org/
</redirect>




Edit :



Here is the debug output from the ffmpeg command above :



ffmpeg -v debug -rtsp_transport tcp -thread_queue_size 12 -i "rtsp://192.168.1.132:554/user=admin&password=mypassword&channel=1&stream=1.sdp" -i "rtsp://192.168.1.132:554/user=admin&password=mypassword&channel=2&stream=1.sdp" -filter_complex "[0:v][1:v] hstack=inputs=2 [v]" -map "[v]" -r 30 http://localhost:8090/feed5.ffm
ffmpeg version N-86111-ga441aa90e8-static http://johnvansickle.com/ffmpeg/ Copyright (c) 2000-2017 the FFmpeg developers
 built with gcc 5.4.1 (Debian 5.4.1-8) 20170304
 configuration: --enable-gpl --enable-version3 --enable-static --disable-debug --disable-ffplay --disable-indev=sndio --disable-outdev=sndio --cc=gcc-5 --enable-fontconfig --enable-frei0r --enable-gnutls --enable-gray --enable-libass --enable-libfreetype --enable-libfribidi --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-libopus --enable-librtmp --enable-libsoxr --enable-libspeex --enable-libtheora --enable-libvidstab --enable-libvo-amrwbenc --enable-libvorbis --enable-libvpx --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxvid --enable-libzimg
 libavutil 55. 63.100 / 55. 63.100
 libavcodec 57. 96.101 / 57. 96.101
 libavformat 57. 72.101 / 57. 72.101
 libavdevice 57. 7.100 / 57. 7.100
 libavfilter 6. 89.101 / 6. 89.101
 libswscale 4. 7.101 / 4. 7.101
 libswresample 2. 8.100 / 2. 8.100
 libpostproc 54. 6.100 / 54. 6.100
Splitting the commandline.
Reading option '-v' ... matched as option 'v' (set logging level) with argument 'debug'.
Reading option '-rtsp_transport' ... matched as AVOption 'rtsp_transport' with argument 'tcp'.
Reading option '-thread_queue_size' ... matched as option 'thread_queue_size' (set the maximum number of queued packets from the demuxer) with argument '12'.
Reading option '-i' ... matched as input url with argument 'rtsp://192.168.1.132:554/user=admin&password=mypassword&channel=1&stream=1.sdp'.
Reading option '-i' ... matched as input url with argument 'rtsp://192.168.1.132:554/user=admin&password=mypassword&channel=2&stream=1.sdp'.
Reading option '-filter_complex' ... matched as option 'filter_complex' (create a complex filtergraph) with argument '[0:v][1:v] hstack=inputs=2 [v]'.
Reading option '-map' ... matched as option 'map' (set input stream mapping) with argument '[v]'.
Reading option '-r' ... matched as option 'r' (set frame rate (Hz value, fraction or abbreviation)) with argument '30'.
Reading option 'http://localhost:8090/feed5.ffm' ... matched as output url.
Finished splitting the commandline.
Parsing a group of options: global .
Applying option v (set logging level) with argument debug.
Applying option filter_complex (create a complex filtergraph) with argument [0:v][1:v] hstack=inputs=2 [v].
Successfully parsed a group of options.
Parsing a group of options: input url rtsp://192.168.1.132:554/user=admin&password=mypassword&channel=1&stream=1.sdp.
Applying option thread_queue_size (set the maximum number of queued packets from the demuxer) with argument 12.
Successfully parsed a group of options.
Opening an input file: rtsp://192.168.1.132:554/user=admin&password=mypassword&channel=1&stream=1.sdp.
[tcp @ 0x58fb5e0] No default whitelist set
[rtsp @ 0x58f9700] SDP:
v=0
o=- 38990265062388 38990265062388 IN IP4 192.168.1.132
a=range:npt=0-
m=video 0 RTP/AVP 96
c=IN IP4 0.0.0.0
a=rtpmap:96 H264/90000
a=framerate:0S
a=fmtp:96 profile-level-id=640014; packetization-mode=1; sprop-parameter-sets=Z2QAFK2EAQwgCGEAQwgCGEAQwgCEK1CwSyA=,aO48sA==
a=control:trackID=3

Failed to parse interval end specification ''
[rtsp @ 0x58f9700] video codec set to: h264
[rtsp @ 0x58f9700] RTP Profile IDC: 64 Profile IOP: 0 Level: 14
[rtsp @ 0x58f9700] RTP Packetization Mode: 1
[rtsp @ 0x58f9700] Extradata set to 0x58fb940 (size: 38)
[rtsp @ 0x58f9700] setting jitter buffer size to 0
[rtsp @ 0x58f9700] hello state=0
[h264 @ 0x58fca00] nal_unit_type: 7, nal_ref_idc: 3
[h264 @ 0x58fca00] nal_unit_type: 8, nal_ref_idc: 3
[h264 @ 0x58fca00] nal_unit_type: 7, nal_ref_idc: 3
[h264 @ 0x58fca00] nal_unit_type: 8, nal_ref_idc: 3
[h264 @ 0x58fca00] unknown SEI type 229
[h264 @ 0x58fca00] nal_unit_type: 7, nal_ref_idc: 3
[h264 @ 0x58fca00] nal_unit_type: 8, nal_ref_idc: 3
[h264 @ 0x58fca00] nal_unit_type: 6, nal_ref_idc: 0
[h264 @ 0x58fca00] nal_unit_type: 5, nal_ref_idc: 3
[h264 @ 0x58fca00] unknown SEI type 229
[h264 @ 0x58fca00] Reinit context to 352x288, pix_fmt: yuv420p
[h264 @ 0x58fca00] nal_unit_type: 1, nal_ref_idc: 3
 Last message repeated 5 times
[h264 @ 0x58fca00] unknown SEI type 229
 Last message repeated 1 times
[rtsp @ 0x58f9700] All info found
[rtsp @ 0x58f9700] Setting avg frame rate based on r frame rate
Input #0, rtsp, from 'rtsp://192.168.1.132:554/user=admin&password=mypassword&channel=1&stream=1.sdp':
 Duration: N/A, start: 0.166667, bitrate: N/A
 Stream #0:0, 28, 1/90000: Video: h264 (High), 1 reference frame, yuv420p(progressive, left), 352x288, 0/1, 6 fps, 6 tbr, 90k tbn, 180k tbc
Successfully opened the file.
Parsing a group of options: input url rtsp://192.168.1.132:554/user=admin&password=mypassword&channel=2&stream=1.sdp.
Successfully parsed a group of options.
Opening an input file: rtsp://192.168.1.132:554/user=admin&password=mypassword&channel=2&stream=1.sdp.
[tcp @ 0x59f4280] No default whitelist set
[rtsp @ 0x59f7620] SDP:
v=0
o=- 38990265062388 38990265062388 IN IP4 192.168.1.132
a=range:npt=0-
m=video 0 RTP/AVP 96
c=IN IP4 0.0.0.0
a=rtpmap:96 H264/90000
a=framerate:0S
a=fmtp:96 profile-level-id=640014; packetization-mode=1; sprop-parameter-sets=Z2QAFK2EAQwgCGEAQwgCGEAQwgCEK1CwSyA=,aO48sA==
a=control:trackID=3

Failed to parse interval end specification ''
[rtsp @ 0x59f7620] video codec set to: h264
[rtsp @ 0x59f7620] RTP Profile IDC: 64 Profile IOP: 0 Level: 14
[rtsp @ 0x59f7620] RTP Packetization Mode: 1
[rtsp @ 0x59f7620] Extradata set to 0x59f3670 (size: 38)
[rtp @ 0x59f62a0] No default whitelist set
[udp @ 0x58fb6a0] No default whitelist set
[udp @ 0x58fb6a0] end receive buffer size reported is 131072
[udp @ 0x591b940] No default whitelist set
[udp @ 0x591b940] end receive buffer size reported is 131072
[rtsp @ 0x59f7620] setting jitter buffer size to 500
[rtsp @ 0x59f7620] hello state=0
[h264 @ 0x59e4b20] nal_unit_type: 7, nal_ref_idc: 3
[h264 @ 0x59e4b20] nal_unit_type: 8, nal_ref_idc: 3
[h264 @ 0x59e4b20] nal_unit_type: 7, nal_ref_idc: 3
[h264 @ 0x59e4b20] nal_unit_type: 8, nal_ref_idc: 3
[h264 @ 0x59e4b20] unknown SEI type 229
[h264 @ 0x59e4b20] nal_unit_type: 7, nal_ref_idc: 3
[h264 @ 0x59e4b20] nal_unit_type: 8, nal_ref_idc: 3
[h264 @ 0x59e4b20] nal_unit_type: 6, nal_ref_idc: 0
[h264 @ 0x59e4b20] nal_unit_type: 5, nal_ref_idc: 3
[h264 @ 0x59e4b20] unknown SEI type 229
[h264 @ 0x59e4b20] Reinit context to 352x288, pix_fmt: yuv420p
[h264 @ 0x59e4b20] nal_unit_type: 1, nal_ref_idc: 3
 Last message repeated 5 times
[h264 @ 0x59e4b20] unknown SEI type 229
 Last message repeated 1 times
[rtsp @ 0x59f7620] All info found
[rtsp @ 0x59f7620] Setting avg frame rate based on r frame rate
Input #1, rtsp, from 'rtsp://192.168.1.132:554/user=admin&password=mypassword&channel=2&stream=1.sdp':
 Duration: N/A, start: 0.166667, bitrate: N/A
 Stream #1:0, 28, 1/90000: Video: h264 (High), 1 reference frame, yuv420p(progressive, left), 352x288, 0/1, 6 fps, 6 tbr, 90k tbn, 180k tbc
Successfully opened the file.
detected 2 logical cores
[Parsed_hstack_0 @ 0x59f7cc0] Setting 'inputs' to value '2'
Parsing a group of options: output url http://localhost:8090/feed5.ffm.
Applying option map (set input stream mapping) with argument [v].
Applying option r (set frame rate (Hz value, fraction or abbreviation)) with argument 30.
Successfully parsed a group of options.
Opening an output file: http://localhost:8090/feed5.ffm.
[http @ 0x59aa700] Setting default whitelist 'http,https,tls,rtp,tcp,udp,crypto,httpproxy'
[http @ 0x59aa700] request: GET /feed5.ffm HTTP/1.1
User-Agent: Lavf/57.72.101
Accept: */*
Range: bytes=0-
Connection: close
Host: localhost:8090
Icy-MetaData: 1


[ffm @ 0x5917d00] Format ffm probed with size=2048 and score=101
[NULL @ 0x5bce3e0] Setting entry with key 'video_size' to value '640x360'
[NULL @ 0x5bce3e0] Setting entry with key 'b' to value '140000'
[NULL @ 0x5bce3e0] Setting entry with key 'time_base' to value '1/5'
[NULL @ 0x5bce3e0] Setting entry with key 'bt' to value '35000'
[NULL @ 0x5bce3e0] Setting entry with key 'rc_eq' to value 'tex^qComp'
[NULL @ 0x5bce3e0] Setting entry with key 'maxrate' to value '280000'
[NULL @ 0x5bce3e0] Setting entry with key 'bufsize' to value '280000'
[AVIOContext @ 0x5915b80] Statistics: 4096 bytes read, 0 seeks
[http @ 0x5915f00] Setting default whitelist 'http,https,tls,rtp,tcp,udp,crypto,httpproxy'
[http @ 0x5915f00] request: POST /feed5.ffm HTTP/1.1
Transfer-Encoding: chunked
User-Agent: Lavf/57.72.101
Accept: */*
Connection: close
Host: localhost:8090
Icy-MetaData: 1


Successfully opened the file.
Filter hstack has an unconnected output
[AVIOContext @ 0x59ab080] Statistics: 0 seeks, 0 writeouts




I am stumped - any clues ?



Thanks in advance.


-
ADD Image overlay to ffmpeg video stream
1er juillet 2017, par ChrisI am new to ffmpeg and want to add an HUD to the video stream, so a few questions.
- What file do I need to edit.
- What do I need to do to achieve this.
Thanks in advance. Also I am VERY new to all of this, I will need instructions step by step
I saw other questions saying to add this :
ffmpeg -n -i video.mp4 -i logo.png -filter_complex "[0:v]setsar=sar=1[v];[v][1]blend=all_mode='overlay':all_opacity=0.7" -movflags +faststart tmb/video.mp4
But I dont know where to put it, i entered it in the terminal and got this :
pi@raspberrypi:~ $ ffmpeg -n -i video.mp4 -i logo.png -filter_complex "[0:v]setsar=sar=1[v];[v][1]blend=all_mode='overlay':all_opacity=0.7" -movflags +faststart tmb/video.mp4
ffmpeg version N-86215-gb5228e4 Copyright (c) 2000-2017 the FFmpeg developers
built with gcc 4.9.2 (Raspbian 4.9.2-10)
configuration: --arch=armel --target-os=linux --enable-gpl --enable-libx264 --enable-nonfree --extra-libs=-ldl
libavutil 55. 63.100 / 55. 63.100
libavcodec 57. 96.101 / 57. 96.101
libavformat 57. 72.101 / 57. 72.101
libavdevice 57. 7.100 / 57. 7.100
libavfilter 6. 90.100 / 6. 90.100
libswscale 4. 7.101 / 4. 7.101
libswresample 2. 8.100 / 2. 8.100
libpostproc 54. 6.100 / 54. 6.100
video.mp4: No such file or directoryI dont understand what i am supposed to do with the video.mp4 ?
HERE IS THE SCRIPT THAT SENDS THE VIDEO.
import subprocess
import shlex
import re
import os
import time
import urllib2
import platform
import json
import sys
import base64
import random
import argparse
parser = argparse.ArgumentParser(description='robot control')
parser.add_argument('camera_id')
parser.add_argument('video_device_number', default=0, type=int)
parser.add_argument('--kbps', default=450, type=int)
parser.add_argument('--brightness', default=75, type=int, help='camera brightness')
parser.add_argument('--contrast', default=75, type=int, help='camera contrast')
parser.add_argument('--saturation', default=15, type=int, help='camera saturation')
parser.add_argument('--rotate180', default=False, type=bool, help='rotate image 180 degrees')
parser.add_argument('--env', default="prod")
args = parser.parse_args()
server = "runmyrobot.com"
#server = "52.52.213.92"
from socketIO_client import SocketIO, LoggingNamespace
# enable raspicam driver in case a raspicam is being used
os.system("sudo modprobe bcm2835-v4l2")
if args.env == "dev":
print "using dev port 8122"
port = 8122
elif args.env == "prod":
print "using prod port 8022"
port = 8022
else:
print "invalid environment"
sys.exit(0)
print "initializing socket io"
print "server:", server
print "port:", port
socketIO = SocketIO(server, port, LoggingNamespace)
print "finished initializing socket io"
#ffmpeg -f qtkit -i 0 -f mpeg1video -b 400k -r 30 -s 320x240 http://52.8.81.124:8082/hello/320/240/
def onHandleCameraCommand(*args):
#thread.start_new_thread(handle_command, args)
print args
socketIO.on('command_to_camera', onHandleCameraCommand)
def onHandleTakeSnapshotCommand(*args):
print "taking snapshot"
inputDeviceID = streamProcessDict['device_answer']
snapShot(platform.system(), inputDeviceID)
with open ("snapshot.jpg", 'rb') as f:
data = f.read()
print "emit"
socketIO.emit('snapshot', {'image':base64.b64encode(data)})
socketIO.on('take_snapshot_command', onHandleTakeSnapshotCommand)
def randomSleep():
"""A short wait is good for quick recovery, but sometimes a longer delay is needed or it will just keep trying and failing short intervals, like because the system thinks the port is still in use and every retry makes the system think it's still in use. So, this has a high likelihood of picking a short interval, but will pick a long one sometimes."""
timeToWait = random.choice((0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 5))
print "sleeping", timeToWait
time.sleep(timeToWait)
def getVideoPort():
url = 'http://%s/get_video_port/%s' % (server, cameraIDAnswer)
for retryNumber in range(2000):
try:
print "GET", url
response = urllib2.urlopen(url).read()
break
except:
print "could not open url ", url
time.sleep(2)
return json.loads(response)['mpeg_stream_port']
def getAudioPort():
url = 'http://%s/get_audio_port/%s' % (server, cameraIDAnswer)
for retryNumber in range(2000):
try:
print "GET", url
response = urllib2.urlopen(url).read()
break
except:
print "could not open url ", url
time.sleep(2)
return json.loads(response)['audio_stream_port']
def runFfmpeg(commandLine):
print commandLine
ffmpegProcess = subprocess.Popen(shlex.split(commandLine))
print "command started"
return ffmpegProcess
def handleDarwin(deviceNumber, videoPort, audioPort):
p = subprocess.Popen(["ffmpeg", "-list_devices", "true", "-f", "qtkit", "-i", "dummy"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
print err
deviceAnswer = raw_input("Enter the number of the camera device for your robot from the list above: ")
commandLine = 'ffmpeg -f qtkit -i %s -f mpeg1video -b 400k -r 30 -s 320x240 http://%s:%s/hello/320/240/' % (deviceAnswer, server, videoPort)
process = runFfmpeg(commandLine)
return {'process': process, 'device_answer': deviceAnswer}
def handleLinux(deviceNumber, videoPort, audioPort):
print "sleeping to give the camera time to start working"
randomSleep()
print "finished sleeping"
#p = subprocess.Popen(["ffmpeg", "-list_devices", "true", "-f", "qtkit", "-i", "dummy"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#out, err = p.communicate()
#print err
os.system("v4l2-ctl -c brightness={brightness} -c contrast={contrast} -c saturation={saturation}".format(brightness=args.brightness,
contrast=args.contrast,
saturation=args.saturation))
if deviceNumber is None:
deviceAnswer = raw_input("Enter the number of the camera device for your robot: ")
else:
deviceAnswer = str(deviceNumber)
#commandLine = '/usr/local/bin/ffmpeg -s 320x240 -f video4linux2 -i /dev/video%s -f mpeg1video -b 1k -r 20 http://runmyrobot.com:%s/hello/320/240/' % (deviceAnswer, videoPort)
#commandLine = '/usr/local/bin/ffmpeg -s 640x480 -f video4linux2 -i /dev/video%s -f mpeg1video -b 150k -r 20 http://%s:%s/hello/640/480/' % (deviceAnswer, server, videoPort)
# For new JSMpeg
#commandLine = '/usr/local/bin/ffmpeg -f v4l2 -framerate 25 -video_size 640x480 -i /dev/video%s -f mpegts -codec:v mpeg1video -s 640x480 -b:v 250k -bf 0 http://%s:%s/hello/640/480/' % (deviceAnswer, server, videoPort) # ClawDaddy
#commandLine = '/usr/local/bin/ffmpeg -s 1280x720 -f video4linux2 -i /dev/video%s -f mpeg1video -b 1k -r 20 http://runmyrobot.com:%s/hello/1280/720/' % (deviceAnswer, videoPort)
if args.rotate180:
rotationOption = "-vf transpose=2,transpose=2"
else:
rotationOption = ""
# video with audio
videoCommandLine = '/usr/local/bin/ffmpeg -f v4l2 -framerate 25 -video_size 640x480 -i /dev/video%s %s -f mpegts -codec:v mpeg1video -s 640x480 -b:v %dk -bf 0 -muxdelay 0.001 http://%s:%s/hello/640/480/' % (deviceAnswer, rotationOption, args.kbps, server, videoPort)
audioCommandLine = '/usr/local/bin/ffmpeg -f alsa -ar 44100 -ac 1 -i hw:1 -f mpegts -codec:a mp2 -b:a 32k -muxdelay 0.001 http://%s:%s/hello/640/480/' % (server, audioPort)
print videoCommandLine
print audioCommandLine
videoProcess = runFfmpeg(videoCommandLine)
audioProcess = runFfmpeg(audioCommandLine)
return {'video_process': videoProcess, 'audioProcess': audioProcess, 'device_answer': deviceAnswer}
def handleWindows(deviceNumber, videoPort):
p = subprocess.Popen(["ffmpeg", "-list_devices", "true", "-f", "dshow", "-i", "dummy"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
lines = err.split('\n')
count = 0
devices = []
for line in lines:
#if "] \"" in line:
# print "line:", line
m = re.search('.*\\"(.*)\\"', line)
if m != None:
#print line
if m.group(1)[0:1] != '@':
print count, m.group(1)
devices.append(m.group(1))
count += 1
if deviceNumber is None:
deviceAnswer = raw_input("Enter the number of the camera device for your robot from the list above: ")
else:
deviceAnswer = str(deviceNumber)
device = devices[int(deviceAnswer)]
commandLine = 'ffmpeg -s 640x480 -f dshow -i video="%s" -f mpegts -codec:v mpeg1video -b 200k -r 20 http://%s:%s/hello/640/480/' % (device, server, videoPort)
process = runFfmpeg(commandLine)
return {'process': process, 'device_answer': device}
def handleWindowsScreenCapture(deviceNumber, videoPort):
p = subprocess.Popen(["ffmpeg", "-list_devices", "true", "-f", "dshow", "-i", "dummy"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
lines = err.split('\n')
count = 0
devices = []
for line in lines:
#if "] \"" in line:
# print "line:", line
m = re.search('.*\\"(.*)\\"', line)
if m != None:
#print line
if m.group(1)[0:1] != '@':
print count, m.group(1)
devices.append(m.group(1))
count += 1
if deviceNumber is None:
deviceAnswer = raw_input("Enter the number of the camera device for your robot from the list above: ")
else:
deviceAnswer = str(deviceNumber)
device = devices[int(deviceAnswer)]
commandLine = 'ffmpeg -f dshow -i video="screen-capture-recorder" -vf "scale=640:480" -f mpeg1video -b 50k -r 20 http://%s:%s/hello/640/480/' % (server, videoPort)
print "command line:", commandLine
process = runFfmpeg(commandLine)
return {'process': process, 'device_answer': device}
def snapShot(operatingSystem, inputDeviceID, filename="snapshot.jpg"):
try:
os.remove('snapshot.jpg')
except:
print "did not remove file"
commandLineDict = {
'Darwin': 'ffmpeg -y -f qtkit -i %s -vframes 1 %s' % (inputDeviceID, filename),
'Linux': '/usr/local/bin/ffmpeg -y -f video4linux2 -i /dev/video%s -vframes 1 -q:v 1000 -vf scale=320:240 %s' % (inputDeviceID, filename),
'Windows': 'ffmpeg -y -s 320x240 -f dshow -i video="%s" -vframes 1 %s' % (inputDeviceID, filename)}
print commandLineDict[operatingSystem]
os.system(commandLineDict[operatingSystem])
def startVideoCapture():
videoPort = getVideoPort()
audioPort = getAudioPort()
print "video port:", videoPort
print "audio port:", audioPort
#if len(sys.argv) >= 3:
# deviceNumber = sys.argv[2]
#else:
# deviceNumber = None
deviceNumber = args.video_device_number
result = None
if platform.system() == 'Darwin':
result = handleDarwin(deviceNumber, videoPort, audioPort)
elif platform.system() == 'Linux':
result = handleLinux(deviceNumber, videoPort, audioPort)
elif platform.system() == 'Windows':
#result = handleWindowsScreenCapture(deviceNumber, videoPort)
result = handleWindows(deviceNumber, videoPort, audioPort)
else:
print "unknown platform", platform.system()
return result
def timeInMilliseconds():
return int(round(time.time() * 1000))
def main():
print "main"
streamProcessDict = None
twitterSnapCount = 0
while True:
socketIO.emit('send_video_status', {'send_video_process_exists': True,
'camera_id':cameraIDAnswer})
if streamProcessDict is not None:
print "stopping previously running ffmpeg (needs to happen if this is not the first iteration)"
streamProcessDict['process'].kill()
print "starting process just to get device result" # this should be a separate function so you don't have to do this
streamProcessDict = startVideoCapture()
inputDeviceID = streamProcessDict['device_answer']
print "stopping video capture"
streamProcessDict['process'].kill()
#print "sleeping"
#time.sleep(3)
#frameCount = int(round(time.time() * 1000))
videoWithSnapshots = False
while videoWithSnapshots:
frameCount = timeInMilliseconds()
print "taking single frame image"
snapShot(platform.system(), inputDeviceID, filename="single_frame_image.jpg")
with open ("single_frame_image.jpg", 'rb') as f:
# every so many frames, post a snapshot to twitter
#if frameCount % 450 == 0:
if frameCount % 6000 == 0:
data = f.read()
print "emit"
socketIO.emit('snapshot', {'frame_count':frameCount, 'image':base64.b64encode(data)})
data = f.read()
print "emit"
socketIO.emit('single_frame_image', {'frame_count':frameCount, 'image':base64.b64encode(data)})
time.sleep(0)
#frameCount += 1
if False:
if platform.system() != 'Windows':
print "taking snapshot"
snapShot(platform.system(), inputDeviceID)
with open ("snapshot.jpg", 'rb') as f:
data = f.read()
print "emit"
# skip sending the first image because it's mostly black, maybe completely black
#todo: should find out why this black image happens
if twitterSnapCount > 0:
socketIO.emit('snapshot', {'image':base64.b64encode(data)})
print "starting video capture"
streamProcessDict = startVideoCapture()
# This loop counts out a delay that occurs between twitter snapshots.
# Every 50 seconds, it kills and restarts ffmpeg.
# Every 40 seconds, it sends a signal to the server indicating status of processes.
period = 2*60*60 # period in seconds between snaps
for count in range(period):
time.sleep(1)
if count % 20 == 0:
socketIO.emit('send_video_status', {'send_video_process_exists': True,
'camera_id':cameraIDAnswer})
if count % 40 == 30:
print "stopping video capture just in case it has reached a state where it's looping forever, not sending video, and not dying as a process, which can happen"
streamProcessDict['video_process'].kill()
streamProcessDict['audio_process'].kill()
time.sleep(1)
if count % 80 == 75:
print "send status about this process and its child process ffmpeg"
ffmpegProcessExists = streamProcessDict['process'].poll() is None
socketIO.emit('send_video_status', {'send_video_process_exists': True,
'ffmpeg_process_exists': ffmpegProcessExists,
'camera_id':cameraIDAnswer})
#if count % 190 == 180:
# print "reboot system in case the webcam is not working"
# os.system("sudo reboot")
# if the video stream process dies, restart it
if streamProcessDict['video_process'].poll() is not None or streamProcessDict['audio_process'].poll():
# wait before trying to start ffmpeg
print "ffmpeg process is dead, waiting before trying to restart"
randomSleep()
streamProcessDict = startVideoCapture()
twitterSnapCount += 1
if __name__ == "__main__":
#if len(sys.argv) > 1:
# cameraIDAnswer = sys.argv[1]
#else:
# cameraIDAnswer = raw_input("Enter the Camera ID for your robot, you can get it by pointing a browser to the runmyrobot server %s: " % server)
cameraIDAnswer = args.camera_id
main()ERROR :
ffmpeg -n -f mpegts -i http://54.183.232.63:12221 -i logo.png -filter_complex "[0:v]setsar=sar=1[v];[v][1]blend=all_mode='overlay':all_opacity=0.7" -movflags +faststart tmb/video.mp4
ffmpeg version N-86215-gb5228e4 Copyright (c) 2000-2017 the FFmpeg developers
built with gcc 4.9.2 (Raspbian 4.9.2-10)
configuration: --arch=armel --target-os=linux --enable-gpl --enable-libx264 --enable-nonfree --extra-libs=-ldl
libavutil 55. 63.100 / 55. 63.100
libavcodec 57. 96.101 / 57. 96.101
libavformat 57. 72.101 / 57. 72.101
libavdevice 57. 7.100 / 57. 7.100
libavfilter 6. 90.100 / 6. 90.100
libswscale 4. 7.101 / 4. 7.101
libswresample 2. 8.100 / 2. 8.100
libpostproc 54. 6.100 / 54. 6.100
[mpegts @ 0x1a57390] Could not detect TS packet size, defaulting to non-FEC/DVHS
http://54.183.232.63:12221: could not find codec parameters