
Recherche avancée
Médias (1)
-
SWFUpload Process
6 septembre 2011, par
Mis à jour : Septembre 2011
Langue : français
Type : Texte
Autres articles (49)
-
Websites made with MediaSPIP
2 mai 2011, parThis page lists some websites based on MediaSPIP.
-
Creating farms of unique websites
13 avril 2011, parMediaSPIP platforms can be installed as a farm, with a single "core" hosted on a dedicated server and used by multiple websites.
This allows (among other things) : implementation costs to be shared between several different projects / individuals rapid deployment of multiple unique sites creation of groups of like-minded sites, making it possible to browse media in a more controlled and selective environment than the major "open" (...) -
Les autorisations surchargées par les plugins
27 avril 2010, parMediaspip core
autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs
Sur d’autres sites (7660)
-
FFMPEG audio transcoding using libav* libraries
10 février 2014, par vinvinodI am writing an audio transcoding application using ffmpeg libraries.
Here is my code/*
* File: main.cpp
* Author: vinod
* Compile with "g++ -std=c++11 -o audiotranscode main.cpp -lavformat -lavcodec -lavutil -lavfilter"
*
*/
#if !defined PRId64 || PRI_MACROS_BROKEN
#undef PRId64
#define PRId64 "lld"
#endif
#define __STDC_FORMAT_MACROS
#ifdef __cplusplus
extern "C" {
#endif
#include
#include
#include <sys></sys>types.h>
#include
#include <libavutil></libavutil>imgutils.h>
#include <libavutil></libavutil>samplefmt.h>
#include <libavutil></libavutil>frame.h>
#include <libavutil></libavutil>timestamp.h>
#include <libavformat></libavformat>avformat.h>
#include <libavfilter></libavfilter>avfilter.h>
#include <libavfilter></libavfilter>buffersrc.h>
#include <libavfilter></libavfilter>buffersink.h>
#include <libswscale></libswscale>swscale.h>
#include <libavutil></libavutil>opt.h>
#ifdef __cplusplus
}
#endif
#include <iostream>
using namespace std;
int select_stream, got_frame, got_packet;
AVFormatContext *in_fmt_ctx = NULL, *out_fmt_ctx = NULL;
AVCodec *dec_codec = NULL, * enc_codec = NULL;
AVStream *audio_st = NULL;
AVCodecContext *enc_ctx = NULL, *dec_ctx = NULL;
AVFrame *pFrame = NULL, * pFrameFiltered = NULL;
AVFilterGraph *filter_graph = NULL;
AVFilterContext *buffersrc_ctx = NULL;
AVFilterContext *buffersink_ctx = NULL;
AVPacket packet;
string inFileName = "/home/vinod/vinod/Media/univac.webm";
string outFileName = "audio_extracted.m4a";
int target_bit_rate = 128000,
sample_rate = 22050,
channels = 1;
AVSampleFormat sample_fmt = AV_SAMPLE_FMT_S16;
string filter_description = "aresample=22050,aformat=sample_fmts=s16:channel_layouts=mono";
int log_averror(int errcode)
{
char *errbuf = (char *) calloc(AV_ERROR_MAX_STRING_SIZE, sizeof(char));
av_strerror(errcode, errbuf, AV_ERROR_MAX_STRING_SIZE);
std::cout << "Error - " << errbuf << std::endl;
delete [] errbuf;
return -1;
}
/**
* Initialize conversion filter */
int initialize_audio_filter()
{
char args[512];
int ret;
AVFilter *buffersrc = avfilter_get_by_name("abuffer");
AVFilter *buffersink = avfilter_get_by_name("abuffersink");
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
filter_graph = avfilter_graph_alloc();
const enum AVSampleFormat out_sample_fmts[] = {sample_fmt, AV_SAMPLE_FMT_NONE};
const int64_t out_channel_layouts[] = {av_get_default_channel_layout(out_fmt_ctx -> streams[0] -> codec -> channels), -1};
const int out_sample_rates[] = {out_fmt_ctx -> streams[0] -> codec -> sample_rate, -1};
if (!dec_ctx->channel_layout)
dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels);
snprintf(args, sizeof(args), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%" PRIx64,
in_fmt_ctx -> streams[select_stream] -> time_base.num, in_fmt_ctx -> streams[select_stream] -> time_base.den,
dec_ctx->sample_rate,
av_get_sample_fmt_name(dec_ctx->sample_fmt),
dec_ctx->channel_layout);
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in", args, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
return -1;
}
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out", NULL, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
return ret;
}
ret = av_opt_set_int_list(buffersink_ctx, "sample_fmts", out_sample_fmts, -1,
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
return ret;
}
ret = av_opt_set_int_list(buffersink_ctx, "channel_layouts", out_channel_layouts, -1,
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
return ret;
}
ret = av_opt_set_int_list(buffersink_ctx, "sample_rates", out_sample_rates, -1,
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
return ret;
}
/* Endpoints for the filter graph. */
outputs -> name = av_strdup("in");
outputs -> filter_ctx = buffersrc_ctx;
outputs -> pad_idx = 0;
outputs -> next = NULL;
/* Endpoints for the filter graph. */
inputs -> name = av_strdup("out");
inputs -> filter_ctx = buffersink_ctx;
inputs -> pad_idx = 0;
inputs -> next = NULL;
string filter_desc = filter_description;
if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_desc.c_str(), &inputs, &outputs, NULL)) < 0) {
log_averror(ret);
exit(1);
}
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0) {
log_averror(ret);
exit(1);
}
/* Print summary of the sink buffer
* Note: args buffer is reused to store channel layout string */
AVFilterLink *outlink = buffersink_ctx->inputs[0];
av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout);
av_log(NULL, AV_LOG_INFO, "Output: srate:%dHz fmt:%s chlayout:%s\n",
(int) outlink->sample_rate,
(char *) av_x_if_null(av_get_sample_fmt_name((AVSampleFormat) outlink->format), "?"),
args);
return 0;
}
/*
*
*/
int main(int argc, char **argv)
{
int ret;
cout << "Hello World" << endl;
printf("abcd");
avcodec_register_all();
av_register_all();
avfilter_register_all();
/* open input file, and allocate format context */
if (avformat_open_input(&in_fmt_ctx, inFileName.c_str(), NULL, NULL) < 0) {
std::cout << "error opening input file - " << inFileName << std::endl;
return -1;
}
/* retrieve stream information */
if (avformat_find_stream_info(in_fmt_ctx, NULL) < 0) {
std::cerr << "Could not find stream information in the input file " << inFileName << std::endl;
}
/* Dump format details */
printf("\n ---------------------------------------------------------------------- \n");
av_dump_format(in_fmt_ctx, 0, inFileName.c_str(), 0);
printf("\n ---------------------------------------------------------------------- \n");
/* Choose a audio stream */
select_stream = av_find_best_stream(in_fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &dec_codec, 0);
if (select_stream == AVERROR_STREAM_NOT_FOUND) {
std::cerr << "No audio stream found" << std::endl;
return -1;
}
if (select_stream == AVERROR_DECODER_NOT_FOUND) {
std::cerr << "No suitable decoder found" << std::endl;
return -1;
}
dec_ctx = in_fmt_ctx -> streams[ select_stream] -> codec;
av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
/* init the audio decoder */
if ((ret = avcodec_open2(dec_ctx, dec_codec, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open audio decoder\n");
return ret;
}
/* allocate output context */
ret = avformat_alloc_output_context2(&out_fmt_ctx, NULL, NULL,
outFileName.c_str());
if (ret < 0) {
std::cerr << "Could not create output context for the file " << outFileName << std::endl;
return -1;
}
/* find the encoder */
enum AVCodecID codec_id = out_fmt_ctx -> oformat -> audio_codec;
enc_codec = avcodec_find_encoder(codec_id);
if (!(enc_codec)) {
std::cerr << "Could not find encoder for - " << avcodec_get_name(codec_id) << std::endl;
return -1;
}
/* add a new stream */
audio_st = avformat_new_stream(out_fmt_ctx, enc_codec);
if (!audio_st) {
std::cerr << "Could not add audio stream - " << std::endl;
}
/* Initialise audio codec */
audio_st -> id = out_fmt_ctx -> nb_streams - 1;
enc_ctx = audio_st -> codec;
enc_ctx -> codec_id = codec_id;
enc_ctx -> codec_type = AVMEDIA_TYPE_AUDIO;
enc_ctx -> bit_rate = target_bit_rate;
enc_ctx -> sample_rate = sample_rate;
enc_ctx -> sample_fmt = sample_fmt;
enc_ctx -> channels = channels;
enc_ctx -> channel_layout = av_get_default_channel_layout(enc_ctx -> channels);
/* Some formats want stream headers to be separate. */
if (out_fmt_ctx -> oformat -> flags & AVFMT_GLOBALHEADER) {
enc_ctx -> flags |= CODEC_FLAG_GLOBAL_HEADER;
}
ret = avcodec_open2(out_fmt_ctx -> streams[0] -> codec, enc_codec, NULL);
if (ret < 0) {
std::cerr << "Could not create codec context for the file " << outFileName << std::endl;
return -1;
}
/* Initialize filter */
initialize_audio_filter();
if (!(out_fmt_ctx -> oformat -> flags & AVFMT_NOFILE)) {
int ret = avio_open(& out_fmt_ctx -> pb, outFileName.c_str(),
AVIO_FLAG_WRITE);
if (ret < 0) {
log_averror(ret);
return -1;
}
}
/* Write header */
if (avformat_write_header(out_fmt_ctx, NULL) < 0) {
if (ret < 0) {
log_averror(ret);
return -1;
}
}
/* Allocate frame */
pFrame = av_frame_alloc();
if (!pFrame) {
std::cerr << "Could not allocate frame\n";
return -1;
}
pFrameFiltered = av_frame_alloc();
if (!pFrameFiltered) {
std::cerr << "Could not allocate frame\n";
return -1;
}
av_init_packet(&packet);
packet.data = NULL;
packet.size = 0;
/* Read packet from the stream */
while (av_read_frame(in_fmt_ctx, &packet) >= 0) {
if (packet.stream_index == select_stream) {
avcodec_get_frame_defaults(pFrame);
ret = avcodec_decode_audio4(dec_ctx, pFrame, &got_frame, &packet);
if (ret < 0) {
log_averror(ret);
return ret;
}
printf("Decoded packet pts : %ld ", packet.pts);
printf("Frame Best Effor pts : %ld \n", pFrame->best_effort_timestamp);
/* Set frame pts */
pFrame -> pts = av_frame_get_best_effort_timestamp(pFrame);
if (got_frame) {
/* push the decoded frame into the filtergraph */
ret = av_buffersrc_add_frame_flags(buffersrc_ctx, pFrame, AV_BUFFERSRC_FLAG_KEEP_REF);
if (ret < 0) {
log_averror(ret);
return ret;
}
/* pull filtered frames from the filtergraph */
while (1) {
ret = av_buffersink_get_frame(buffersink_ctx, pFrameFiltered);
if ((ret == AVERROR(EAGAIN)) || (ret == AVERROR_EOF)) {
break;
}
if (ret < 0) {
printf("Error while getting filtered frames from filtergraph\n");
log_averror(ret);
return -1;
}
/* Initialize the packets */
AVPacket encodedPacket = {0};
av_init_packet(&encodedPacket);
ret = avcodec_encode_audio2(out_fmt_ctx -> streams[0] -> codec, &encodedPacket, pFrameFiltered, &got_packet);
if (!ret && got_packet && encodedPacket.size) {
/* Set correct pts and dts */
if (encodedPacket.pts != AV_NOPTS_VALUE) {
encodedPacket.pts = av_rescale_q(encodedPacket.pts, buffersink_ctx -> inputs[0] -> time_base,
out_fmt_ctx -> streams[0] -> time_base);
}
if (encodedPacket.dts != AV_NOPTS_VALUE) {
encodedPacket.dts = av_rescale_q(encodedPacket.dts, buffersink_ctx -> inputs[0] -> time_base,
out_fmt_ctx -> streams[0] -> time_base);
}
printf("Encoded packet pts %ld\n", encodedPacket.pts);
/* Write the compressed frame to the media file. */
ret = av_interleaved_write_frame(out_fmt_ctx, &encodedPacket);
if (ret < 0) {
log_averror(ret);
return -1;
}
} else if (ret < 0) {
log_averror(ret);
return -1;
}
av_frame_unref(pFrameFiltered);
}
av_frame_unref(pFrame);
}
}
}
/* Flush delayed frames from encoder*/
got_packet=1;
while (got_packet) {
AVPacket encodedPacket = {0};
av_init_packet(&encodedPacket);
ret = avcodec_encode_audio2(out_fmt_ctx -> streams[0] -> codec, &encodedPacket, NULL, &got_packet);
if (!ret && got_packet && encodedPacket.size) {
/* Set correct pts and dts */
if (encodedPacket.pts != AV_NOPTS_VALUE) {
encodedPacket.pts = av_rescale_q(encodedPacket.pts, buffersink_ctx -> inputs[0] -> time_base,
out_fmt_ctx -> streams[0] -> time_base);
}
if (encodedPacket.dts != AV_NOPTS_VALUE) {
encodedPacket.dts = av_rescale_q(encodedPacket.dts, buffersink_ctx -> inputs[0] -> time_base,
out_fmt_ctx -> streams[0] -> time_base);
}
printf("Encoded packet pts %ld\n", encodedPacket.pts);
/* Write the compressed frame to the media file. */
ret = av_interleaved_write_frame(out_fmt_ctx, &encodedPacket);
if (ret < 0) {
log_averror(ret);
return -1;
}
} else if (ret < 0) {
log_averror(ret);
return -1;
}
}
/* Write Trailer */
av_write_trailer(out_fmt_ctx);
avfilter_graph_free(&filter_graph);
if (dec_ctx)
avcodec_close(dec_ctx);
avformat_close_input(&in_fmt_ctx);
av_frame_free(&pFrame);
av_frame_free(&pFrameFiltered);
if (!(out_fmt_ctx -> oformat -> flags & AVFMT_NOFILE))
avio_close(out_fmt_ctx -> pb);
avcodec_close(out_fmt_ctx->streams[0]->codec);
avformat_free_context(out_fmt_ctx);
return 0;
}
</iostream>The audio file after transcoding is same duration as the input. But its completely noisy. Can somebody tell me what I am doing wrong here !
-
ffmpeg takes screenshots all at once
19 décembre 2020, par oo92I am trying to use ffmpeg to grab screenshots of twitch streams. This is my command :


ffmpeg -i https://stream-link.m3u8 
-ss 30 
-frames:v 10 
-r 1
-f 
image2 
/home/me/Desktop/images/image-dataset/my-dataset/World-of-Warcraft/filename.jpg&



My goal is to take 1 screenshot per second for a total of 10 screenshots. I've added the
&
at the end of thefilename.jpg
to take these screenshots concurrently. However, the screenshots I get are all the same thing. No difference. The exact spitting moment in all 10 images. My take is that ffmpeg is not listening to-frames:v 10
and instead takes 10 screenshots all at once.

I tried to run it sequentially and the same problem happened. So the issue isn't concurrency..


Is there a way I can fix this ?


Output :


ffmpeg version 4.1.3-0ppa1~18.04 Copyright (c) 2000-2019 the FFmpeg developers
 built with gcc 7 (Ubuntu 7.3.0-27ubuntu1~18.04)
 configuration: --prefix=/usr --extra-version='0ppa1~18.04' --toolchain=hardened --libdir=/usr/lib/x86_64-linux-gnu --incdir=/usr/include/x86_64-linux-gnu --arch=amd64 --enable-gpl --disable-stripping --enable-avresample --disable-filter=resample --enable-avisynth --enable-gnutls --enable-ladspa --enable-libaom --enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca --enable-libcdio --enable-libcodec2 --enable-libflite --enable-libfontconfig --enable-libfreetype --enable-libfribidi --enable-libgme --enable-libgsm --enable-libjack --enable-libmp3lame --enable-libmysofa --enable-libopenjpeg --enable-libopenmpt --enable-libopus --enable-libpulse --enable-librsvg --enable-librubberband --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libssh --enable-libtheora --enable-libtwolame --enable-libvidstab --enable-libvorbis --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx265 --enable-libxml2 --enable-libxvid --enable-libzmq --enable-libzvbi --enable-lv2 --enable-omx --enable-openal --enable-opengl --enable-sdl2 --enable-nonfree --enable-libfdk-aac --enable-libdc1394 --enable-libdrm --enable-libiec61883 --enable-chromaprint --enable-frei0r --enable-libx264 --enable-shared
 libavutil 56. 22.100 / 56. 22.100
 libavcodec 58. 35.100 / 58. 35.100
 libavformat 58. 20.100 / 58. 20.100
 libavdevice 58. 5.100 / 58. 5.100
 libavfilter 7. 40.101 / 7. 40.101
 libavresample 4. 0. 0 / 4. 0. 0
 libswscale 5. 3.100 / 5. 3.100
 libswresample 3. 3.100 / 3. 3.100
 libpostproc 55. 3.100 / 55. 3.100
[https @ 0x560f4b435740] Opening 'https://video-edge-c2b56c.yto01.abs.hls.ttvnw.net/v1/segment/CvEEnTLMdXdKmicT1KsEeYg1j5_ppzV6USTh15nprTzVGZJwL0MnMggHmPmKVCVuec2AugqeIOO45aJMEiH7dUQH1yME6GtL5yH3YDONOR-lj-un-kd99iQtu_DePSfgU47MbqXvI3iw1JKfhgAZVMPQYv_OrwIvXlYdFBTpRQZpFUPVa8dlISmsaT_ypAGJwZYEfcmhN9Ar-yLcPyhTEt4NsP5wzshGwq3IgwY6givcSiKeHRAQ6RzyyYO2ZmnnWXavHM1McEmCbpMk55UU9-NXmtfMPE7Q0jOrkHfPqD8qr4t3-tzRUbNhO7PXa6DgsQRAYICiRxZ0aq5vGIHZD75mOVcl2t9AVNS6R2z5KpvjT24rryxb5mEvebzlq_QMHw83GJtbXv4eCmUnxVObI3idGbYbYkInccc07Yv6oOlUYg804gA6abt_jLFP9jC0tiBjEJIwwb0NJAeuX1YE-b-nL2-qlDWeHY_ZtOvZFCcaeiL-3j2FXIW-mn90bccZMHy1vb1V2tFJEOs5C7YjJ57jh4exm01PGp9ErfFDz--oGc5ZZgvLas3nWz6Imox6yYt0NQ7rZtXyA_6PWtuJBXcqGF7Q5OT4-Eg_m20lc2XuK-hzEZIWU_Pofm33-mXvtjNtXg_e-fkJzIUr4VDNx59cJpbEhS8DhWkqKZgAvQ39rahnpXEkRwt6Se8LH8X4zUU_Fx0k4ApDSGoJGPOJIWlnd8jp_c_8ELQB3lsqx58ppCkiqWtEIZqnkERMNdVJMOPfiJO-wFcStxw214rGr1UgPn-23LlBDOTU7dUnPgN2AYHnL7eggyTliBxGxizGcTjtjRIQpNzpVE-1SqRZ9wgremNhGhoMwxLy-8hlWMju1_Jt.ts' for reading
[https @ 0x55a8b4055d80] Opening 'https://video-weaver.yto01.hls.ttvnw.net/v1/playlist/CvoDrRxumiLM_XBX6zGlLKqZTlxcWMGRJwMy5c0gn038EHoSxU9uBxeTnSk7l8iZ3NDmmtNp2hexUJYQbXh06ClRvm8Vz41PPd2N-k-5GKNIse6MrFrNloxt7p9nEXdXx-8Dw2TofURJX0PloMSiGci_hCnrDJ38BdWg_78f8WFYjZrm5TYnVSDgZw37cFG8FeBJJV2PxF7gEGltrKs7MQ5kbDApHrRFSrYVkbvACCuxbSKZyRB7I6NmKrGVwBZfGLqh9vMXw0_jJAU21qPB9ku459OibJS4JDBTVt4l2MlASp8dhcbTICmLnEBBOIDe7RciSSpqYhq4Hil8TMunMRFc2UdDJS3GjXtw0BApLZMxOf-EXhHJf5f6-ALKEW7FRPsO6JCYQGpspLq2snUKuWqj9UBcLHnGjjY5cUHjvui81aalyz5fcDZK-pxUFhCZucpOxCDDYhfSLABqmM3eIentP8hlcwZg0ydy419Tj2lqI08ug9PUFBlMJWWcuwck5uIGIOshQVNEDIXJt_PJKao5dSgMGZzsIEUPi6Rlf2yUk7z-adjtADJjOzBzNV6QKtS00BiFTvbbYjiLydJffAEgeerVOG0_tkHZopfYmpJsp43Sr-hXTAk2cYSMIN__NqO7qAJzTF1rWXkeAo3wuWPz2mICJOSdq6SfSL0SEKb7mHVzjUerNSPqt7ZHWBYaDAeIk7sXJ_-6ZP1wyQ.m3u8' for reading
[hls,applehttp @ 0x5559d4ab0940] Opening 'https://video-edge-c2ab34.yto01.abs.hls.ttvnw.net/v1/segment/CtwE4wYV6Jd9ji9SF4DQupaPzeu0nawqExYayXT0VS3D-AgoiiuciSxoAUXEKj1TsevSSJHWs3AdkMaTZs_rYrXkKhvA8xgq_Ojp7MmrYrgDZi_vZSfoIWBLdpNN8RN8ISUtPOXWpRM8hZvgQOI00EKSlvcZoJ44OoZfvVQaN16IkjiP1K7TdrbiG_W43yYo8W3wrprTrI-gSK0yQdDYK58ROKuXwsWra6zJp1DTJLTV8L4u3ClJoDlBaoYSMEwnEVs_p7gBpNw9rn66mqtsGn_vCMBfoqrsUfCEem7cFvC3ph1iqfubbIc5LSkRdHekoYHDualvn-ScmkCBLMXn6U4SeFdSRrElMz2mgJM0t7DA6PEzz_SEh1harsM19_Z9REoIwWr1tzHWFU4WgTWrDbylsV2i11rBqAHWlk5_V8fP4CIFhiWyfxp8G_j1nmy1z-tC8Ttln6KkMFcGlvWR6tsPr4hUtPvrY6m05FMYfsmvDJVJhW8fFAppcVfPyGFMqQ6GRPo0CMrENqkIENTvMjQJt2GEyw8GOECucYWIbuhCOLLW0dyz-iJlVNYvObeGue18U79sneEAkjoxJuxDI61r5hOMwf3HXpSIUpk2sOzfjcded4KbvMjgIQxK9M-2qnmTkyfV756rGaT_9SrugKiZUdh_vOy9lDwFPN2GYq6P6Vcy4HKpM0ZCR2AMTe3PFXreDraCzfrmzwRULBG9AiNxs-JhYJ3_w-OSBhzbINJng-PwFnL9Dsm87NIYvc_CRUWT5jkJHws8BpzSJtZCXcZ3XgYGDe49balTzhB8VBIQIFWcbgO26YM0po0CxgZLiBoMoN25nK5P9U3FecXL.ts' for reading
[https @ 0x5602e5468a40] Opening 'https://video-weaver.yto01.hls.ttvnw.net/v1/playlist/Cv0DvhMhaY8nQyvgCVb8Knpnq4LS_QdntEu9nstdrurt6Vtc57dIoii04-Kau7quO62wCXKso-x4719ws7inztepZcc7sWrfbDuDCqL_D41hxTa1fJnZp8bdtEPU7dKIk7coJvUw7foVkVkoA6KkZF5BTSpzOL-9BAWdsIFAZ82dC9LziZCno-7j4_77jbCfSOdZdliNsHjwJkbeal9sp-JJB7sesAvRhT8Jdb6-6m_I1kOawK_NxzS4NBP7SlXD5wSNnSD_Y9TvInwhqC1Q09EqcpYP3JmDZwVTX0ld7vzcW2jsiLA48EOv62ji2Q99PdosWX23ZTH335v1AdQ0iX2nX4G0rNMdGJ5dYzAUezfiH0gFaIiQJmBo1WlFh45BrOqlB4VMQuhDZ47hJTHDOv2qcn9PmvO6N6ENj5A5FJuRbvjVfe7mUrNjaKfb5za7qmQ1usdRhxxl7srRF_Y1fnMLfIMuQw1gMRQAkISYP1Ism4pv4blEu-5td3C2JteGadUaczx8ZLSnWTJQwTJ_o8PzzYaVW900EU0kZuiQ6RgdpJmc3UYrLQ-OUxoudkt4GgjuoSHl8qCiP8nIwSvXnbGg1CH_mCBv_kiuWygbLrY9YVGz3wS67Dn7D6hNJ1tKmK4FP2I5UKasBX4qvqdGAnMhKKqbsITunYuSqgeGNxoSEEuS_9Sp0pxjnKpSOikkMZQaDL8_ZLSngkCEdWFBsg.m3u8' for reading
[https @ 0x561509d623c0] Opening 'https://video-edge-c2b644.yto01.abs.hls.ttvnw.net/v1/segment/Cu8ENRC9AI5iVzpgoEk2vmbOSFnED42oE5UOg4w4bYv_9bGEfbiubVy80d6REOQe0pV8xCdDMNxUGEpzMh_utuxCuvoCDIA5GwCPBI-wNINQ0yaERpzyMLzLzI8X5Qw6ps2MO_G5FeX08aPup3-ngBd-yGSZDzuQgBvMoxGI3ANZ-VEy-EmHZx5P7MbpYQUZ7jXO5ZzMZH5dcc2fCtU8Fhn3rudz2Z17a7PISmdN7IqGpWIgk2XZ7k0RG4Xq71wNZzfiKu78MAs-DkdBa0L3fBvIqFNS47tEU7YJ257Dk4K4tHnoMmVprTpleM5HdkBs9b6KA9n8YOfMSi3SLLaA49fixyoYlAGPu5lXSDflXKc4ffChFnPOUHz0Pcb-hHwiYy7KJtMG_ItInI_LZt3Q-qiKOnO_PFTkBjKUjYoZyRvvD8yb2gd8BbwaF7xpOG206QO_nNASoWyqDk7B4fyyARVUD84pPuo3E06FjYDeioH2FhGU0Yyd8MXhWTawa5KKgA0TqoP9Dt_TRz9xHuPFMJWBo39GuDGqofa0yCriDsPZQJg4evm49d-1yAd4NePMmHuC3mY76ojx9Tc4zW3tSo1c-8FdGoy20X4DK911JgMZZcAjj3fbudnk1Gpg52qixw2PTo_81xj6dO6r0EOddK9o-BDDQ9u-MoRVtodGOmdtjNlurC4nXYjyJxomkqgBSh3RrYcc0ZotEYPbBN6oW2HjOLCMtkajuvov_Z78wnO_0-VTn6Z4QIJHhzhFdwpRv7sBrwCQoZJf12lg9mTyDvn-zvmYpEqlpSx2zUAZJf1RY1nHVpSjsWgKb47sVZHPnpMSEHa5MFbY5k0uJLxysEuJjCQaDEvCUfXIcwtNL3l9JA.ts' for reading
 Last message repeated 1 times
[https @ 0x55ce2ebdf1c0] Opening 'https://video-edge-c2a4ec.yto01.abs.hls.ttvnw.net/v1/segment/CtUEspzXywhKD--is2mZ6lL0j5XqcvMtesaiCc06h1S2IcnmlUxRP1Rg9qT-wcqbftcpAr5DqtE0o-qFAr-bxrl8TRjAbNT-2qzZ7BJLAXQ-Q6Pl5Otv1haFvbdYN7H7Dok0SFJoE7rKXIwXyTy_KIIp7AsBdvlLN2vxG-mc-v1SynWjaXQ-ehgiG_5Tde_dVQo9wWBA44SuM7yGh-eQwl4KoM7R6XrwdZ2-xnsw-9cVsZvZNpcBAIJecAUqHcSkNsjTy3UGtN2lJHM-Ryfy9oT4Q-fOnuWj98DqkWIVVGvM3pHdN6RXWpc7GG-zCbuE-4EohTGfgCwvWeWPps4HDpOuJjwOYTRuBe6SnN6TT-nT-c5-QaZHhnKulixMLjzDQQD-MJGYKjFhv7DwfWL183RYOh20PirZnmMxxfbDmZC8PFHGOKqZ5K-1Fnsm6PyHtNOutlB6-kENTuXIEpYMrD30AV2LKFctHb3a_HY3lkAPqCbeYCtP2GZn6sV9cnPNZqN1YReu0BPvcVvMxM2tLo-iYl2dVksTho1QfIHZY7HYhYfUXk5Z_lw6M3XSgCFwC853tfbEIdPF2grF9FsJJzxhiIwl83rxsRAKRSNa2ikYYKzwOyQ0O4i-gcCXhAe-OrO9ZM8kd5OcVAe8XMWwRdEwYg0qTrerebQzoSm2Xy88cL0unGDZhZWRWxLjxNWAfLOYPT5Kp2QKK44YkwdKJxWgXTAXiNe2u89StyfyeFxHHdVrh77W-jLHKtOJs_TqtYVavu8-i_ltjstw_Q64kckKMFXCKBlSEhCCynQIIIrDLZki-ratekOHGgwhQO0qQG1pCWiaesQ.ts' for reading
[https @ 0x5602e4b9e1c0] Opening 'https://video-edge-c2b1f4.yto01.abs.hls.ttvnw.net/v1/segment/CsMEmSWiHUi9qiWeeVziTCwieY_8ABpoaUI2uCB_Y48jHDMEB9OH_mmlfnR9-gHHPyF6dZCdtj4xqgCrgu3Rx4GtZ5UWEPd1VPv-WBUPL-YxskwN7ZpwwiJ-OT_8A5ty6K3s57s0piPK97044lYfE_sKJtelKHix26WOr6qcbkt2voyPWRH_jok7eQuG-y2xfJOmIu5-1TqIaG0IepiqVvfDAaAReHOu_jId0roBGse2q6vcWMrB4MX6TCH3foLxm_1XcQnD4G4mwRN1BdSbhdOKkOv93kf8u-KYS1wCTb_vz05EBmINqZbtOYgkWd0NRuRyMwVg8pnI7qMQZzO-NZuxmAIRR1aEZvaciughFgcGgJzczkqQ8lo_5m8MMzojjuUbPvwUAOynjmpKBPTpry2Fp1O4R-vPQJUqrPvIRMwTFevQVg_mcqAVSoH2uKprkatLWJs_Km9WBR3pDkcGK8J_foVe0Sk-2N4X-IUzBRpKt6RZltl_SF5hzX2ciZoMQdlZN0srlcZg8oiW5B8ou0liBPD_fLoVxj0PG9cj5j9cEWxkDdwx54u0hE0fW-HKk5CXcIuXpD93iruqdLyG6tcPyp247S1rJ0XSq0GwEmAqZSY5s-YIGu5o2HZuQmL214HgnCN1plLZqZ4zdFJHSW9t61C3_tM_-IiFrQ1_BIltiqYsPTW_EFmaNvty9FvHQFx0XdGbBq1bv9pp6Bj_EtT0qukTSkEH8EBDhJGYFYWDfvG7Tn2-zW2HXIIarIgR7xRscE4fEhCVgyfim5444ACROhoPPx_rGgwlXw1up2iUzAYgbaw.ts' for reading
[https @ 0x5602e4b3b9c0] Opening 'https://video-edge-c2b1f4.yto01.abs.hls.ttvnw.net/v1/segment/CvIEWgSaqH8uzWG7t10EwAuefWhmLnNnXtN7Pw6-8eiisUARX4pH-v1I3oLUhOukV3svmhCp1XxQN-UkZx8_m6sjxa3ntRmA6ImIDlbBEVuxZ4lGSMlw4G-cuC44L9xlgCjMSTZcEZa6WYCBRToWx6g2AaA2u3Gf8rZ0JpFmWAee8X0K2s9OH1CcQvfR1RofszXRu4aQSd-v6cniMyIpPpjAZixHLQf_Adyf9Iuvws8ybtiV2llhT6x8dRvvh6S5n2XzlsxC9sihjOnNc_dhav4jlSeHHUJPqh36n0Ea-asPjzNBOCJrnNxLcNgItgeI6qVs9iD8shU50RlLoBNtgNycRy_ZgQ6vBR0k9hApi02qb1BKFOhIsWGvCi7NSknqnr3vhObWvl-fIGWKRNAsU7uc9aZet2btaUb900j2Vaau-77AlbvnyAqpL0Di2KR9ybHDNpUZsVzZh-reW3uKu-pAkIEKkzI5rDP7-H9QaK73nnB3XuSK_H-x_WF7qrcgAd_0XTqcrnEtPLDzD_2H4aZxdglqDQyxanhq3cFXf-qa0ux3ZilLQMBkzjZlFsNOT8ms9kfpC7HG7B_h7AIiDnu8EoIDKBQlxfzYG6ruQACGZoVFE3MiYEqgnpFzKsAWx1jujCV_h5Jogv4kxnrvzDtzwxWXsyrwo3gUTeTrEu62Mge6T9GRGE8Ny7q7L8Ng5d0FDAM1mSDFsuwaleGQDndIMa3J6TODcfI4gL1Tu1aEUSen1gWSRcUy1uQyQQ9zJSmc7Uaw8uMLyP3BzadJm51c3MoNTzLBsvktjhd3B2KHbWY1lXEh2AHA16VGWXpq7CD0ELMSEN1fF4x7RWpFxubjbxVNVv4aDCwwPCpEeEggW8Rfpw.ts' for reading
[hls,applehttp @ 0x5559d4ab0940] Opening 'https://video-edge-c2ab34.yto01.abs.hls.ttvnw.net/v1/segment/Ct4EMOLp6yFfEv2tgPtzLuzFmDFWtPkvqlBMyma5AlNsAFNWM3-m0d5Wm_DmdvMETEeVjPcGk1XEOcnfkHTFnGKsPn9rmqNNyWmXQp8jH1k5LGjiHTYVO8pa-6CuCMb16fGy9Fp1Xm0nXqLe4ZL0Fs6EV1-U2UfVyOqNFjcvwoAiE9cQyBUxmj9h1Tlo5kHsVd1TxtYRrHZs3sQepHkwLIDVSThuy-RKU-9oARCTfOP6USEgDpxkbWjtB-DGTUorX8x95mlCLwDMkmkM2-m9pUYdeP-PslZV1-IJPOVv4-G97PsV-TqQy1szQh_k9jiQzp_Bu8wB5RrVqdpMqli5YRMoqOmGCQo0i41g8y1Y-JZVF3JmK7G2SJ7MgdLEFaEaUBNz4XW6aGV1kL16E3PMRKqaYKoS21vwQP0E808fYr_0KrUhpQzzxPHKMKNmhX63S3O80FcH3GVmzlm0dXdVSEXV1TepsZG5NHqR7xgQt_WsjV21EuWCzGTpbrqMfCIH0nrlyowYpZm1ew3qREq1SfKPt4TophGYJRHjMD7B5loycfp2XjCyDmOX6cTc8RK7lQg1F6uukU_aW_Z8hbBuVbY8sqroms8ajmZETP3uo9VG6JFoJaNxDfMtplI10JMFHbBp0VZdRtaxxaPCQ3BBmWgl8vLckHjcY44JE_PDALwIHkQqecZo2GQoz2NhymilntSt_VFMm3Ib6yF_fujjwtafU7Xh5Cf9MBnk-IYHg2WZYLESPVB2Ia8jhkqFiHgYR0YLLLl_mflwBh6mFzSozfT4mw4_FK_esvspE1Zq80CEEhDxl0J0i39gKAbxRh87tVrkGgy0aNClJH41Wuni7yE.ts' for reading
[https @ 0x560f4b3d29c0] Opening 'https://video-edge-c2b56c.yto01.abs.hls.ttvnw.net/v1/segment/CsUEh-QaugK4bHVvi-wL8bzgE5-MsFiYcWoTXcwI-gzFEPycSDGrrqCPSAyov1gQsy2KYKm6Odcenh_UHBDJgG0dfHGOB9izlOatI3dONn14Z5BzKoZ84JntuWvM6ZfbaCRHZQFvHDJLwUAB_RZweqDHLnCwZr52Ma--xNhWaPoHfBBz_fGb23iSRFigxMjPEe-48lpQaF-XD9UPwjNx9Jkn6d0xNto7yCkLnPHVAMc08rI6ciMMFvU3Ul2p64VBhCzQhmweVuMmymRsw92BCEk3gAUYL7i_cYbJk1ws9kI6OHJlrZJmSgELoM7Zr6HKTxyWRsS0CfPqEUz0ZNgkxULIj9pjcPmuTZOWPtRhRGl1T0xZzo-OMcTOaVLV88rpiSVugoBVD8gBLCOxfAp0vBrS4_4daWeS_9RpmkKnwwfJk2yr6vvty2lEMKRE7oTDqy_ldgPequXbA89hX29SX8rxqUQr8OTtS89J5nhgPVQ3U6kcXsAMY1wGfZWMK2x2OO7drkKby3W2enhmMTcSzki7h2gJGVVSsfhs0OH2brSM0GCsYyJXMog_6bdP_CnusgyE2_aDW6sWCp-EZuKxSsLnTJohcNzlSqAxmR-xEMZwESiH_kUs1G-4rWZYgn2-fDAcRx8sSUkcmXKHlf-cU4WRa6KMi6XWi14ag_u4QeovJH5CB5kLnLmW0SyERfvwYlNOrsx5pD4AV_Rs3ENQU0S3pLkF0yhP8yZRwwhaAgcmfZQd2r0q_idRWFNl91pV_mJjp9tXgoYSEIWei-BAQVPqD4MP5qlkY4IaDGIUgwHWb2hkYmELMw.ts' for reading
http://twitch.tv/fleeceking
Input #0, hls,applehttp, from 'https://video-weaver.yto01.hls.ttvnw.net/v1/playlist/CuwDB7kg-rOx72I_T4TigQrbopLHvPc81zIoRat4x_gQcFkNXwclpj4t85zVFcLvU1InuO74I__WzB0KJjTt-5Zz0-tYb4sjEKTP_KvAbYVJQlEdWoYLqI8qiQatuZ1xR452s0-SNKWZNElN-lQmsf6v0woji7tmMrTcNCbD5lkeUGbZGHREtJyY5POK7fjEQ7sBTXxdU3zZSOTXemXHa0OMMzkOJ4NDnebAbpS9VIwG5Rmp8YFwGgwnA3qzF3dOulpeMHcdfUzuaiv96ySbnCoC3tdDWx3J7f862vMQ1O6yUbMiIa6n3id0xUr6oU63zM2QvxHYJAGZPvaXHEjwV4W8QX3db5RHHfPDVUOyQm8MKVsPlpt9nGhZ_4fPggOfBtJhuFS1apLRduVVu2w3jcoKbtubgwCUKBMQ5TS4cjQs52Ii83tPtZRVrmThN6MPlLlmwh_1CUS0qxZBlrpxLjGJjTfJJGTXLlR3DeO4J-5jNQq4yH3gzbKpmG1GwlQA9ulbBm2oexGUw03UCLjsroh32vyZbM7yDux9_w0v0FoXp8oo-beCssfLdS4OPkMe1lb40k42bDjB-FhwuI5ssd9GmXu1cjNen9GzbQI0HegaxOpZy54FZa3K1HB0TZbWsKIZ06F9rUlDbEF5Dt5xEhC2FzGsH16e1iZMLqrjwH3KGgy31QJgMBmysrogh4A.m3u8':
 Duration: N/A, start: 60.000000, bitrate: N/A
 Program 0 
 Metadata:
 variant_bitrate : 0
 Stream #0:0: Audio: aac (LC) ([15][0][0][0] / 0x000F), 44100 Hz, stereo, fltp
 Metadata:
 variant_bitrate : 0
 Stream #0:1: Video: h264 (Main) ([27][0][0][0] / 0x001B), yuv420p(tv, unknown/bt709/unknown), 1920x1080, 29.97 tbr, 90k tbn, 2k tbc
 Metadata:
 variant_bitrate : 0
 Stream #0:2: Data: timed_id3 (ID3 / 0x20334449)
 Metadata:
 variant_bitrate : 0
Stream mapping:
 Stream #0:1 -> #0:0 (h264 (native) -> mjpeg (native))
Press [q] to stop, [?] for help
ffmpeg version 4.1.3-0ppa1~18.04 Copyright (c) 2000-2019 the FFmpeg developers
 built with gcc 7 (Ubuntu 7.3.0-27ubuntu1~18.04)
 configuration: --prefix=/usr --extra-version='0ppa1~18.04' --toolchain=hardened --libdir=/usr/lib/x86_64-linux-gnu --incdir=/usr/include/x86_64-linux-gnu --arch=amd64 --enable-gpl --disable-stripping --enable-avresample --disable-filter=resample --enable-avisynth --enable-gnutls --enable-ladspa --enable-libaom --enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca --enable-libcdio --enable-libcodec2 --enable-libflite --enable-libfontconfig --enable-libfreetype --enable-libfribidi --enable-libgme --enable-libgsm --enable-libjack --enable-libmp3lame --enable-libmysofa --enable-libopenjpeg --enable-libopenmpt --enable-libopus --enable-libpulse --enable-librsvg --enable-librubberband --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libssh --enable-libtheora --enable-libtwolame --enable-libvidstab --enable-libvorbis --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx265 --enable-libxml2 --enable-libxvid --enable-libzmq --enable-libzvbi --enable-lv2 --enable-omx --enable-openal --enable-opengl --enable-sdl2 --enable-nonfree --enable-libfdk-aac --enable-libdc1394 --enable-libdrm --enable-libiec61883 --enable-chromaprint --enable-frei0r --enable-libx264 --enable-shared
 libavutil 56. 22.100 / 56. 22.100
 libavcodec 58. 35.100 / 58. 35.100
 libavformat 58. 20.100 / 58. 20.100
 libavdevice 58. 5.100 / 58. 5.100
 libavfilter 7. 40.101 / 7. 40.101
 libavresample 4. 0. 0 / 4. 0. 0
 libswscale 5. 3.100 / 5. 3.100
 libswresample 3. 3.100 / 3. 3.100
 libpostproc 55. 3.100 / 55. 3.100
[swscaler @ 0x55ce30ae93c0] deprecated pixel format used, make sure you did set range correctly
[swscaler @ 0x5559d60827c0] deprecated pixel format used, make sure you did set range correctly
Output #0, image2, to '/home/onur/Desktop/stream-hatchet/streamhatchet-logorec/logorec-dataset/Hearthstone/39909606413_1608336776_%03d.jpg':
 Metadata:
 encoder : Lavf58.20.100
 Stream #0:0: Video: mjpeg, yuvj420p(pc), 1920x1080, q=2-31, 200 kb/s, 0.50 fps, 0.50 tbn, 0.50 tbc
 Metadata:
 variant_bitrate : 0
 encoder : Lavc58.35.100 mjpeg
 Side data:
 cpb: bitrate max/min/avg: 0/0/200000 buffer size: 0 vbv_delay: -1
frame= 10 fps=0.7 q=15.9 Lsize=N/A time=00:00:20.00 bitrate=N/A dup=10 drop=1 speed=1.36x 
video:1108kB audio:0kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead: unknown
[hls,applehttp @ 0x55cf64902940] Opening 'https://video-edge-c2b1ac.yto01.abs.hls.ttvnw.net/v1/segment/CuwEUt6AwX7y0HJIK0gOgU5pOMXzfQyF3Pc26fGLIUKOngj0g1IFk_XeZucFgCMpYYYDlwf7lEN_XzEnaUN5-9qXva6W4DHtPAlRgWK73aytjJWh82d4qhfMVdqwiwdtHkNBU1WhEeVbWtaAkTcMbXMeghf8WFdcGmHXs2ZGgexZxMS7VVxS39WETlXWLDd7MFxXIu-9upX2NRiu4CXW-XKK4OIMJ86lFy1VARNMG9JUPMGFCZsoNOh5dNe8uFNAnBub1KPP30z3va-IrmhDBi49Ab7pcYhhnXkLUoSwETFq_ZdTHbPLLxR21kXxR6KrWEPrHFPH4xW-zfiNGDmzNS9s91mSDj4UiqhwU00jThtmSBjOlfJzXeKuE80GfnfBneYpsMpSmPQj8mN_xQ771-aQBPlnL3XUdMFk-pVvjaiFUPOnqRiLsU6wuKldwSPhEntkmufP8VgtFYZOWDDlD4-s0tgHC8tu5eKf9zaQLIe_lmKv6lOgwaO2XJsqERijJQ1wOwq9otQYF4kBRp_I-yaJS3azB0JDqFrEZvRAQqP9xtiEzkeJWBvFrwsneuKdMNMX8IXqSA1pM0zyJ_kd4IfZMdl5-4Hs7bzLgyqPlbphh2bkOQ7OnIXt1Hx7P4MkucOvFcWVA0jf89BGMGzrGKqjmIRuLtyM1wRVxWFXj3EakEIIx3yQoqCrd_WBAN-V5Xn4gvRn1MKyn_xQCFrpw4kO7eIYRFzO63askrPWo3GqyMPR5CYq0x8KuydUVlI19BzUCGiWoAlQhqLWAjG34EtIwlWhTaDRWVzUnmsG2S5XYFGCDWyCnAIZaRih3iwSEF7dzzIbAkAaukPxE1UWqNIaDFdi5FCo6z74AhwmPQ.ts' for reading
[https @ 0x5602e545fac0] Opening 'https://video-weaver.yto01.hls.ttvnw.net/v1/playlist/Cv0DvhMhaY8nQyvgCVb8Knpnq4LS_QdntEu9nstdrurt6Vtc57dIoii04-Kau7quO62wCXKso-x4719ws7inztepZcc7sWrfbDuDCqL_D41hxTa1fJnZp8bdtEPU7dKIk7coJvUw7foVkVkoA6KkZF5BTSpzOL-9BAWdsIFAZ82dC9LziZCno-7j4_77jbCfSOdZdliNsHjwJkbeal9sp-JJB7sesAvRhT8Jdb6-6m_I1kOawK_NxzS4NBP7SlXD5wSNnSD_Y9TvInwhqC1Q09EqcpYP3JmDZwVTX0ld7vzcW2jsiLA48EOv62ji2Q99PdosWX23ZTH335v1AdQ0iX2nX4G0rNMdGJ5dYzAUezfiH0gFaIiQJmBo1WlFh45BrOqlB4VMQuhDZ47hJTHDOv2qcn9PmvO6N6ENj5A5FJuRbvjVfe7mUrNjaKfb5za7qmQ1usdRhxxl7srRF_Y1fnMLfIMuQw1gMRQAkISYP1Ism4pv4blEu-5td3C2JteGadUaczx8ZLSnWTJQwTJ_o8PzzYaVW900EU0kZuiQ6RgdpJmc3UYrLQ-OUxoudkt4GgjuoSHl8qCiP8nIwSvXnbGg1CH_mCBv_kiuWygbLrY9YVGz3wS67Dn7D6hNJ1tKmK4FP2I5UKasBX4qvqdGAnMhKKqbsITunYuSqgeGNxoSEEuS_9Sp0pxjnKpSOikkMZQaDL8_ZLSngkCEdWFBsg.m3u8' for reading
frame= 0 fps=0.0 q=0.0 size=N/A time=00:00:00.00 bitrate=N/A speed= 0x 



-
WebVTT Discussions at FOMS
1er janvier 2014, par silviaAt the recent FOMS (Foundations of Open Media Software and Standards) Developer Workshop, we had a massive focus on WebVTT and the state of its feature set. You will find links to summaries of the individual discussions in the FOMS Schedule page. Here are some of the key results I went away with.
1. WebVTT Regions
The key driving force for improvements to WebVTT continues to be the accurate representation of CEA608/708 captioning. As part of that drive, we’ve introduced regions (the CEA708 “window” concept) to WebVTT. WebVTT regions satisfy multiple requirements of CEA608/708 captions :
- support for rollup captions
- support for background color and border color on a group of cues independent of the background color of the individual cue
- possibility to move a group of cues from one location on screen to a different
- support to specify an anchor point and a growth direction for cues when their text size changes
- support for specifying a fixed number of lines to be rendered
- possibility to specify which region is rendered in front of which other one when regions overlap
While WebVTT regions enable us to satisfy all of the above points, the specification isn’t actually complete yet and some of the above needs aren’t satisfied yet.
We have an open bug to move a region elsewhere. A first discussion at FOMS seemed to to indicate that we’ll have to add syntax for updating a region at a particular time and thus give region definitions a way to be valid only for a certain time frame. I can imagine that the region definitions that we have in the header of the WebVTT file now would have an implicitly defined time frame from the start to the end of the file, but can be overruled by a re-definition anywhere within the WebVTT file. That redefinition needs to provide a start and end time.
We registered a bug to add specifying the width and height of regions (and possibly of cues) by em (i.e. by multiples of the largest character in a font). This should allow us to have the region grow/shrink around the region anchor point with a change of font size by script or a user. em specifications should also be applied to cues – that matches the column count of CEA708/608 better.
When regions overlap, the original region extension spec already suggested a “layer” cue setting. It will be easy to add it.
Another change that we will ultimately need is the “scroll” setting : we will need to introduce support for scrolling text down or from left-to-right or right-to-left, e.g. vertical scrolling text seems to be used in some Chinese caption use cases.
2. Unify Rendering Approach
The introduction of regions created a second code path in the rendering spec with some duplication. At FOMS we discussed if it was possible to unify that. The suggestion is to render all cues into a region. Those that are not part of a region would be rendered into an anonymous region that covers the complete viewport. There may be some consequences to this, e.g. cue settings should be usable across all cues, no matter whether or not part of a region, and avoiding cue overlap may need to be done within regions.
Here’s a rough outline of the path of the new rendering algorithm :
(1) Render the regions :
Specified Region Anonymous Region Render values as given : Render following values : - width
- lines
- regionanchor
- viewportanchor
- scroll
- 100%
- videoheight/lineheight
- 0,0
- 0,0
- none
(2) Render the cues :
- Create a cue box and put it in its region (anonymous if none given).
- Calculate position & size of cue box from cue settings (position, line, size).
- Calculate position of cue text inside cue box from remaining cue settings (vertical, align).
3. Vertical Features
WebVTT includes vertical rendering, both right-to-left and left-to-right. However, regions are not defined for vertical. Eventually, we’re going to have to look at the vertical features of WebVTT with more details and figure out whether the spec is working for them and what real-world requirements we have missed. We hope we can get some help from users in countries where vertically rendered captions/subtitles are the norm.
4. Best Practices
Some of he WebVTT users at FOMS suggested it would be advantageous to start a list of “best practices” for how to author captions with WebVTT. Example recommendations are :
- Use line numbers only to position cues from top or bottom of viewport. Don’t use otherwise.
- Note that when the user increases the fontsize in rollup captions and thus introduces new line breaks, your cues will roll by faster because the number of lines of a rollup is fixed.
- Make sure to use &lrm ; and &rlm ; UTF-8 markers to control the directionality of your text.
It would be nice if somebody started such a document.
5. Non-caption use cases
Instead of continuing to look back and improve our support of captions/subtitles in WebVTT, one session at FOMS also went ahead and looked forward to other use cases. The following requirements came out of this :
5.1 Preview Thumbnails
A common use case for timed data is the use of preview thumbnails on the navigation bar of videos. A native implementation of preview thumbnails would allow crawlers and search engines to have a standardised way of extracting timed images for media files, so introduction of a new @kind value “thumbnails” was suggested.
The content of a “thumbnails” cue could be any of :
- an image URL
- a sprite URL to a single image
- a spatial & temporal media fragment URL to a media resource
- base64 encoded image (data URI)
- an iframe offset to the media resource
The suggestion is to allow anything that would work in a img @src attribute as value in a cue of @kind=”thumbnails”. Responsive images might also be useful for a track of @kind=”thumbnails”. It may even be possible to define an inband thumbnail track based on the track of @kind=”thumbnails”. Such cues should also work in the JavaScript track API.
5.2 Chapter markers
There is interest to put richer content than just a chapter title into chapter cues. Often, chapters consist of a title, text and and image. The text is not so important, but the image is used almost everywhere that chapters are used. There may be a need to extend chapter cue content with images, similar to what a @kind=”thumbnails” track offers.
The conclusion that we arrived at was that we need to make @kind=”thumbnails” work first and then look at using the learnings from that to extend @kind=”chapters”.
5.3 Inband tracks for live video
A difficult topic was opened with the question of how to transport text tracks in live video. In live captioning, end times are never created for cues, but are implied by the start time of the next cue. This is a use case that hasn’t been addressed in HTML5/WebVTT yet. An old proposal to allow a special end time value of “NEXT” was discussed and recommended for adoption. Also, there was support for the spec change that stops blocking loading VTT until all cues have been loaded.
5.4 Cross-domain VTT loading
A brief discussion centered around the fact that the spec disallows cross-domain loading of WebVTT files, but that no browser implements this. This needs to be discussion at the HTML WG level.
6. Regions in live captioning
The final topic that we discussed was how we could provide support for regions in live captioning.
- The currently active region definitions will need to be come part of every header of every VTT file segment that HLS uses, so it’s available in case the cues in the segment file reference it.
- “NEXT” in end time markers would make authoring of live captioned VTT files easier.
- If the application wants to use 1 word at a time and doesn’t want to delay sending the word until the full cue is authored (e.g. in a Hangout type environment), we will need to introduce the concept of “cue continuation markers”, so we know that a cue could be extended with the next VTT file fragment.
This is an extensive and impressive amount of discussion around WebVTT and a lot of new work to be performed in the future. I’m very grateful for all the people who have contributed to these discussions at FOMS and will hopefully continue to help get the specifications right.