
Recherche avancée
Autres articles (46)
-
Keeping control of your media in your hands
13 avril 2011, parThe vocabulary used on this site and around MediaSPIP in general, aims to avoid reference to Web 2.0 and the companies that profit from media-sharing.
While using MediaSPIP, you are invited to avoid using words like "Brand", "Cloud" and "Market".
MediaSPIP is designed to facilitate the sharing of creative media online, while allowing authors to retain complete control of their work.
MediaSPIP aims to be accessible to as many people as possible and development is based on expanding the (...) -
Submit bugs and patches
13 avril 2011Unfortunately a software is never perfect.
If you think you have found a bug, report it using our ticket system. Please to help us to fix it by providing the following information : the browser you are using, including the exact version as precise an explanation as possible of the problem if possible, the steps taken resulting in the problem a link to the site / page in question
If you think you have solved the bug, fill in a ticket and attach to it a corrective patch.
You may also (...) -
Ajouter notes et légendes aux images
7 février 2011, parPour pouvoir ajouter notes et légendes aux images, la première étape est d’installer le plugin "Légendes".
Une fois le plugin activé, vous pouvez le configurer dans l’espace de configuration afin de modifier les droits de création / modification et de suppression des notes. Par défaut seuls les administrateurs du site peuvent ajouter des notes aux images.
Modification lors de l’ajout d’un média
Lors de l’ajout d’un média de type "image" un nouveau bouton apparait au dessus de la prévisualisation (...)
Sur d’autres sites (5335)
-
libavformat : calling avformat_open_input 2 times results in decoding white frames
25 avril 2017, par explodus- pre build ffmpeg libs format/util/scale
- version 57.56.101
- don’t use any deprecated function
- use the actual style
- av_read_frame -> avcodec_send_packet -> avcodec_receive_frame -> sws_scale
Everything is fine on the first run, but when i wanna load/open another file i only get white frames.
void video::app::flush_cached_frames() {
if (nullptr == avcontext)
return;
if (nullptr == avpicture)
return;
// send an empty packet which instructs the codec to start flushing
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
avcodec_send_packet(avcontext, &pkt);
// drain the codec
while (true) {
int r = avcodec_receive_frame(avcontext, avpicture);
if (r != 0)
break;
}
}
void video::app::close_avi() {
flush_cached_frames();
if (avformat && avstream)
seek_to_frame(0);
avstream = nullptr;
if (avfile)
fclose(avfile);
avfile = nullptr;
if (avcontext)
avcodec_close(avcontext);
avcontext = nullptr;
if (avformat)
avformat_free_context(avformat);
avformat = nullptr;
if (sws_ctx)
sws_freeContext(sws_ctx);
sws_ctx = nullptr;
if (avparser)
av_parser_close(avparser);
avparser = nullptr;
if (avinbuf)
av_free(avinbuf);
avinbuf = nullptr;
}I think i close anything perfectly. Has anyone an idea ?
edit1 : init/load
unsigned video::app::load(const std::string& name) {
_file = name_;
close_avi();
av_register_all();
avcodec_register_all();
av_init_packet(&avpkt);
AVCodecID codec_id = AV_CODEC_ID_H264;
int64_t duration = 0;
double fps = .0;
int ret = 0;
{
av_log_set_level(1);
avfile = fopen(name_.c_str(), "rb");
avformat = avformat_alloc_context();
ret = avformat_open_input(&avformat, name_.c_str(), nullptr, nullptr);
ret = avformat_find_stream_info(avformat, nullptr);
duration = avformat->duration;
avstream = nullptr;
if (avformat->nb_streams == 1) {
avstream = avformat->streams[0];
} else {
avstream = avformat->streams[av_find_default_stream_index(avformat)];
}
if (avstream) {
fps = (double(avstream->avg_frame_rate.num) / double(avstream->avg_frame_rate.den));
codec_id = avstream->codecpar->codec_id;
duration = avstream->duration;
_vid.v_width = avstream->codecpar->width;
_vid.v_height = avstream->codecpar->height;
_vid.lastframe = duration / fps;
_vid.lastframe = avstream->nb_frames;
}
avcodec = avcodec_find_decoder(avstream->codecpar->codec_id);
avparser = av_parser_init(avcodec->id);
avcontext = avcodec_alloc_context3(avcodec);
avcontext->flags |= AVFMT_FLAG_NONBLOCK;
avcontext->flags |= AVFMT_FLAG_FLUSH_PACKETS;
avcontext->flags |= AVFMT_FLAG_DISCARD_CORRUPT;
avcontext->flags |= AVFMT_FLAG_NOBUFFER;
ret = avcodec_parameters_to_context(avcontext, avstream->codecpar);
ret = avcodec_open2(avcontext, avcodec, nullptr);
// Determine required buffer size and allocate buffer
auto numBytes = av_image_get_buffer_size(
AV_PIX_FMT_BGRA
, avcontext->width
, avcontext->height
, 1);
if (avinbuf)
av_free(avinbuf);
avinbuf = nullptr;
avinbuf = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));
ret = av_image_fill_arrays(
avrgb->data
, avrgb->linesize
, avinbuf
, AV_PIX_FMT_BGRA
, avcontext->width
, avcontext->height
, 1);
sws_ctx = sws_getContext(
avcontext->width
, avcontext->height
, avcontext->pix_fmt
, avcontext->width
, avcontext->height
, AV_PIX_FMT_BGRA
, SWS_BILINEAR
, nullptr
, nullptr
, nullptr
);
}
int err = (sws_ctx && avcontext && avformat) ? 0 : 1;
// ...
}getting the frame :
uint8_t * video::app::get_frame(uint32_t frame) {
if (!avcontext)
return nullptr;
if (!avformat)
return nullptr;
if (!avpicture)
return nullptr;
if (!avfile)
return nullptr;
try {
int ret = 0;
if (avpicture->data)
av_frame_unref(avpicture);
while (true) {
if ((ret = av_read_frame(avformat, &avpkt)) < 0)
break;
if (avpkt.stream_index == avstream->index) {
ret = avcodec_send_packet(avcontext, &avpkt);
if (ret < 0)
break;
while (ret >= 0) {
ret = avcodec_receive_frame(avcontext, avpicture);
if (ret == AVERROR_EOF) {
return nullptr;
} else if (ret == -11) {
avpkt.data = nullptr;
avpkt.size = 0;
break;
} else if (ret < 0) {
return nullptr;
}
if (ret == AVERROR(EAGAIN)) {
avpkt.data = nullptr;
avpkt.size = 0;
break;
}
if (ret >= 0) {
int linesize[AV_NUM_DATA_POINTERS] = {
avpicture->linesize[0]
, avpicture->linesize[1]
, avpicture->linesize[2]
, avpicture->linesize[3]
, avpicture->linesize[4]
, avpicture->linesize[5]
, avpicture->linesize[6]
, avpicture->linesize[7]
};
uint8_t * data[AV_NUM_DATA_POINTERS] = {
avpicture->data[0]
, avpicture->data[1]
, avpicture->data[2]
, avpicture->data[3]
, avpicture->data[4]
, avpicture->data[5]
, avpicture->data[6]
, avpicture->data[7]
};
{
// flip the frame, never ever touch this thing again!
// If the planes in the image are unequal size(e.g.YUV420) you need to adapt the height.
auto h = avcontext->height;
for (int i = 0; i < 4; i++) {
if (i)
data[i] += linesize[i] * ((h >> 1) - 1);
else
data[i] += linesize[i] * (h - 1);
linesize[i] = -linesize[i];
}
}
ret = sws_scale(
sws_ctx
, (uint8_t const * const *)data
, linesize
, 0
, avcontext->height
, avrgb->data
, avrgb->linesize);
av_packet_unref(&avpkt);
currPts = avpkt.dts;
currPts *= av_q2d(avstream->time_base);
usleep(1000000 * (currPts - prevPts));
prevPts = currPts;
return avrgb->data[0];
}
}
}
av_packet_unref(&avpkt);
}
} catch (...) {
}
return nullptr;
} -
Cutting a live stream into separate mp4 files
9 juin 2017, par FearhunterI am doing a research for cutting a live stream in piece and save it as mp4 files. I am using this source for the proof of concept :
And this is the example code I use :
using System;
using System.Collections.Generic;
using System.Configuration;
using System.IO;
using System.Linq;
using System.Net;
using System.Security.Cryptography;
using System.Text;
using System.Threading.Tasks;
using Microsoft.WindowsAzure.MediaServices.Client;
using Newtonsoft.Json.Linq;
namespace AMSLiveTest
{
class Program
{
private const string StreamingEndpointName = "streamingendpoint001";
private const string ChannelName = "channel001";
private const string AssetlName = "asset001";
private const string ProgramlName = "program001";
// Read values from the App.config file.
private static readonly string _mediaServicesAccountName =
ConfigurationManager.AppSettings["MediaServicesAccountName"];
private static readonly string _mediaServicesAccountKey =
ConfigurationManager.AppSettings["MediaServicesAccountKey"];
// Field for service context.
private static CloudMediaContext _context = null;
private static MediaServicesCredentials _cachedCredentials = null;
static void Main(string[] args)
{
// Create and cache the Media Services credentials in a static class variable.
_cachedCredentials = new MediaServicesCredentials(
_mediaServicesAccountName,
_mediaServicesAccountKey);
// Used the cached credentials to create CloudMediaContext.
_context = new CloudMediaContext(_cachedCredentials);
IChannel channel = CreateAndStartChannel();
// Set the Live Encoder to point to the channel's input endpoint:
string ingestUrl = channel.Input.Endpoints.FirstOrDefault().Url.ToString();
// Use the previewEndpoint to preview and verify
// that the input from the encoder is actually reaching the Channel.
string previewEndpoint = channel.Preview.Endpoints.FirstOrDefault().Url.ToString();
IProgram program = CreateAndStartProgram(channel);
ILocator locator = CreateLocatorForAsset(program.Asset, program.ArchiveWindowLength);
IStreamingEndpoint streamingEndpoint = CreateAndStartStreamingEndpoint();
GetLocatorsInAllStreamingEndpoints(program.Asset);
// Once you are done streaming, clean up your resources.
Cleanup(streamingEndpoint, channel);
}
public static IChannel CreateAndStartChannel()
{
//If you want to change the Smooth fragments to HLS segment ratio, you would set the ChannelCreationOptions’s Output property.
IChannel channel = _context.Channels.Create(
new ChannelCreationOptions
{
Name = ChannelName,
Input = CreateChannelInput(),
Preview = CreateChannelPreview()
});
//Starting and stopping Channels can take some time to execute. To determine the state of operations after calling Start or Stop, query the IChannel.State .
channel.Start();
return channel;
}
private static ChannelInput CreateChannelInput()
{
return new ChannelInput
{
StreamingProtocol = StreamingProtocol.RTMP,
AccessControl = new ChannelAccessControl
{
IPAllowList = new List<iprange>
{
new IPRange
{
Name = "TestChannelInput001",
// Setting 0.0.0.0 for Address and 0 for SubnetPrefixLength
// will allow access to IP addresses.
Address = IPAddress.Parse("0.0.0.0"),
SubnetPrefixLength = 0
}
}
}
};
}
private static ChannelPreview CreateChannelPreview()
{
return new ChannelPreview
{
AccessControl = new ChannelAccessControl
{
IPAllowList = new List<iprange>
{
new IPRange
{
Name = "TestChannelPreview001",
// Setting 0.0.0.0 for Address and 0 for SubnetPrefixLength
// will allow access to IP addresses.
Address = IPAddress.Parse("0.0.0.0"),
SubnetPrefixLength = 0
}
}
}
};
}
public static void UpdateCrossSiteAccessPoliciesForChannel(IChannel channel)
{
var clientPolicy =
@"<?xml version=""1.0"" encoding=""utf-8""?>
<policy>
<domain uri=""></domain>
<resource path=""></resource>"" include-subpaths=""true""/>
</policy>
";
var xdomainPolicy =
@"<?xml version=""1.0"" ?>
";
channel.CrossSiteAccessPolicies.ClientAccessPolicy = clientPolicy;
channel.CrossSiteAccessPolicies.CrossDomainPolicy = xdomainPolicy;
channel.Update();
}
public static IProgram CreateAndStartProgram(IChannel channel)
{
IAsset asset = _context.Assets.Create(AssetlName, AssetCreationOptions.None);
// Create a Program on the Channel. You can have multiple Programs that overlap or are sequential;
// however each Program must have a unique name within your Media Services account.
IProgram program = channel.Programs.Create(ProgramlName, TimeSpan.FromHours(3), asset.Id);
program.Start();
return program;
}
public static ILocator CreateLocatorForAsset(IAsset asset, TimeSpan ArchiveWindowLength)
{
// You cannot create a streaming locator using an AccessPolicy that includes write or delete permissions.
var locator = _context.Locators.CreateLocator
(
LocatorType.OnDemandOrigin,
asset,
_context.AccessPolicies.Create
(
"Live Stream Policy",
ArchiveWindowLength,
AccessPermissions.Read
)
);
return locator;
}
public static IStreamingEndpoint CreateAndStartStreamingEndpoint()
{
var options = new StreamingEndpointCreationOptions
{
Name = StreamingEndpointName,
ScaleUnits = 1,
AccessControl = GetAccessControl(),
CacheControl = GetCacheControl()
};
IStreamingEndpoint streamingEndpoint = _context.StreamingEndpoints.Create(options);
streamingEndpoint.Start();
return streamingEndpoint;
}
private static StreamingEndpointAccessControl GetAccessControl()
{
return new StreamingEndpointAccessControl
{
IPAllowList = new List<iprange>
{
new IPRange
{
Name = "Allow all",
Address = IPAddress.Parse("0.0.0.0"),
SubnetPrefixLength = 0
}
},
AkamaiSignatureHeaderAuthenticationKeyList = new List<akamaisignatureheaderauthenticationkey>
{
new AkamaiSignatureHeaderAuthenticationKey
{
Identifier = "My key",
Expiration = DateTime.UtcNow + TimeSpan.FromDays(365),
Base64Key = Convert.ToBase64String(GenerateRandomBytes(16))
}
}
};
}
private static byte[] GenerateRandomBytes(int length)
{
var bytes = new byte[length];
using (var rng = new RNGCryptoServiceProvider())
{
rng.GetBytes(bytes);
}
return bytes;
}
private static StreamingEndpointCacheControl GetCacheControl()
{
return new StreamingEndpointCacheControl
{
MaxAge = TimeSpan.FromSeconds(1000)
};
}
public static void UpdateCrossSiteAccessPoliciesForStreamingEndpoint(IStreamingEndpoint streamingEndpoint)
{
var clientPolicy =
@"<?xml version=""1.0"" encoding=""utf-8""?>
<policy>
<domain uri=""></domain>
<resource path=""></resource>"" include-subpaths=""true""/>
</policy>
";
var xdomainPolicy =
@"<?xml version=""1.0"" ?>
";
streamingEndpoint.CrossSiteAccessPolicies.ClientAccessPolicy = clientPolicy;
streamingEndpoint.CrossSiteAccessPolicies.CrossDomainPolicy = xdomainPolicy;
streamingEndpoint.Update();
}
public static void GetLocatorsInAllStreamingEndpoints(IAsset asset)
{
var locators = asset.Locators.Where(l => l.Type == LocatorType.OnDemandOrigin);
var ismFile = asset.AssetFiles.AsEnumerable().FirstOrDefault(a => a.Name.EndsWith(".ism"));
var template = new UriTemplate("{contentAccessComponent}/{ismFileName}/manifest");
var urls = locators.SelectMany(l =>
_context
.StreamingEndpoints
.AsEnumerable()
.Where(se => se.State == StreamingEndpointState.Running)
.Select(
se =>
template.BindByPosition(new Uri("http://" + se.HostName),
l.ContentAccessComponent,
ismFile.Name)))
.ToArray();
}
public static void Cleanup(IStreamingEndpoint streamingEndpoint,
IChannel channel)
{
if (streamingEndpoint != null)
{
streamingEndpoint.Stop();
streamingEndpoint.Delete();
}
IAsset asset;
if (channel != null)
{
foreach (var program in channel.Programs)
{
asset = _context.Assets.Where(se => se.Id == program.AssetId)
.FirstOrDefault();
program.Stop();
program.Delete();
if (asset != null)
{
foreach (var l in asset.Locators)
l.Delete();
asset.Delete();
}
}
channel.Stop();
channel.Delete();
}
}
}
}
</akamaisignatureheaderauthenticationkey></iprange></iprange></iprange>Now I want to make a method to cut a live stream for example every 15 minutes and save it as mp4 but don’t know where to start.
Can someone point me in the right direction ?
Kind regards
UPDATE :
I want to save the mp4 files on my hard disk.
-
FFMpeg estimated execution time
14 juin 2017, par JuviI’m using FFmpegAndroid library in my project to overlay a video.
The ffmpeg process is inside a service and I want to show the user a notification with progress to determine the progress of the process.
I’ve went through the outputs of the ffmpeg but there’s nothing that specify the estimated duration time.
Maybe it’s possible to calculate it by other params that shown in the output such as fps, bitrate or speed but I have no clue..Any ideas ?