
Recherche avancée
Médias (1)
-
Revolution of Open-source and film making towards open film making
6 octobre 2011, par
Mis à jour : Juillet 2013
Langue : English
Type : Texte
Autres articles (39)
-
Mise à disposition des fichiers
14 avril 2011, parPar défaut, lors de son initialisation, MediaSPIP ne permet pas aux visiteurs de télécharger les fichiers qu’ils soient originaux ou le résultat de leur transformation ou encodage. Il permet uniquement de les visualiser.
Cependant, il est possible et facile d’autoriser les visiteurs à avoir accès à ces documents et ce sous différentes formes.
Tout cela se passe dans la page de configuration du squelette. Il vous faut aller dans l’espace d’administration du canal, et choisir dans la navigation (...) -
Les autorisations surchargées par les plugins
27 avril 2010, parMediaspip core
autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs -
Récupération d’informations sur le site maître à l’installation d’une instance
26 novembre 2010, parUtilité
Sur le site principal, une instance de mutualisation est définie par plusieurs choses : Les données dans la table spip_mutus ; Son logo ; Son auteur principal (id_admin dans la table spip_mutus correspondant à un id_auteur de la table spip_auteurs)qui sera le seul à pouvoir créer définitivement l’instance de mutualisation ;
Il peut donc être tout à fait judicieux de vouloir récupérer certaines de ces informations afin de compléter l’installation d’une instance pour, par exemple : récupérer le (...)
Sur d’autres sites (4968)
-
How to use ffmpeg api to make a filter overlay water mark ?
6 septembre 2022, par Leon LeeOS : Ubuntu 20.04


FFmpeg : 4.4.0


Test video :


Input #0, hevc, from './videos/akiyo_352x288p25.265' :
Duration : N/A, bitrate : N/A
Stream #0:0 : Video : hevc (Main), yuv420p(tv), 352x288, 25 fps, 25 tbr, 1200k tbn, 25 tbc


Test watermark :


200*200.png


I copy ffmpeg official example.


Compiler no error, run no error , but i can't see add watermark


Here is my code


#include <libavcodec></libavcodec>avcodec.h>
#include <libavformat></libavformat>avformat.h>
#include <libavutil></libavutil>opt.h>
#include <libavfilter></libavfilter>buffersink.h>
#include <libavfilter></libavfilter>buffersrc.h>
int open_input_file(AVFormatContext *fmt, AVCodecContext **codecctx, AVCodec *codec, const char *filename, int index)
{
 int ret = 0;
 char msg[500];
 *codecctx = avcodec_alloc_context3(codec);
 ret = avcodec_parameters_to_context(*codecctx, fmt->streams[index]->codecpar);
 if (ret < 0)
 {
 printf("avcodec_parameters_to_context error,ret:%d\n", ret);
 
 return -1;
 }

 // open 解码器
 ret = avcodec_open2(*codecctx, codec, NULL);
 if (ret < 0)
 {
 printf("avcodec_open2 error,ret:%d\n", ret);
 
 return -2;
 }
 printf("pix:%d\n", (*codecctx)->pix_fmt);
 return ret;
}

int init_filter(AVFilterContext **buffersrc_ctx, AVFilterContext **buffersink_ctx, AVFilterGraph **filter_graph, AVStream *stream, AVCodecContext *codecctx, const char *filter_desc)
{
 int ret = -1;
 char args[512];
 char msg[500];
 const AVFilter *buffersrc = avfilter_get_by_name("buffer");
 const AVFilter *buffersink = avfilter_get_by_name("buffersink");

 AVFilterInOut *input = avfilter_inout_alloc();
 AVFilterInOut *output = avfilter_inout_alloc();

 AVRational time_base = stream->time_base;
 enum AVPixelFormat pix_fmts[] = {AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE};

 if (!output || !input || !filter_graph)
 {
 ret = -1;
 printf("avfilter_graph_alloc/avfilter_inout_alloc error,ret:%d\n", ret);
 
 goto end;
 }
 snprintf(args, sizeof(args), "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d", codecctx->width, codecctx->height, codecctx->pix_fmt, stream->time_base.num, stream->time_base.den, codecctx->sample_aspect_ratio.num, codecctx->sample_aspect_ratio.den);
 ret = avfilter_graph_create_filter(buffersrc_ctx, buffersrc, "in", args, NULL, *filter_graph);
 if (ret < 0)
 {
 printf("avfilter_graph_create_filter buffersrc error,ret:%d\n", ret);
 
 goto end;
 }

 ret = avfilter_graph_create_filter(buffersink_ctx, buffersink, "out", NULL, NULL, *filter_graph);
 if (ret < 0)
 {
 printf("avfilter_graph_create_filter buffersink error,ret:%d\n", ret);
 
 goto end;
 }
 ret = av_opt_set_int_list(*buffersink_ctx, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
 if (ret < 0)
 {
 printf("av_opt_set_int_list error,ret:%d\n", ret);
 
 goto end;
 }
 /*
 * The buffer source output must be connected to the input pad of
 * the first filter described by filters_descr; since the first
 * filter input label is not specified, it is set to "in" by
 * default.
 */
 output->name = av_strdup("in");
 output->filter_ctx = *buffersrc_ctx;
 output->pad_idx = 0;
 output->next = NULL;

 /*
 * The buffer sink input must be connected to the output pad of
 * the last filter described by filters_descr; since the last
 * filter output label is not specified, it is set to "out" by
 * default.
 */
 input->name = av_strdup("out");
 input->filter_ctx = *buffersink_ctx;
 input->pad_idx = 0;
 input->next = NULL;

 if ((ret = avfilter_graph_parse_ptr(*filter_graph, filter_desc, &input, &output, NULL)) < 0)
 {
 printf("avfilter_graph_parse_ptr error,ret:%d\n", ret);
 
 goto end;
 }

 if ((ret = avfilter_graph_config(*filter_graph, NULL)) < 0)
 {
 printf("avfilter_graph_config error,ret:%d\n", ret);
 
 goto end;
 }
 end:
 avfilter_inout_free(&input);
 avfilter_inout_free(&output);
 return ret;
}

int main(int argc, char **argv)
{
 int ret;
 char msg[500];
 const char *filter_descr = "drawbox=x=100:y=100:w=100:h=100:color=pink@0.5"; // OK
 //const char *filter_descr = "movie=200.png[wm];[in][wm]overlay=10:10[out]"; //Test
 // const char *filter_descr = "scale=640:360,transpose=cclock";
 AVFormatContext *pFormatCtx = NULL;
 AVCodecContext *pCodecCtx;
 AVFilterContext *buffersink_ctx;
 AVFilterContext *buffersrc_ctx;
 AVFilterGraph *filter_graph;
 AVCodec *codec;
 int video_stream_index = -1;

 AVPacket packet;
 AVFrame *pFrame;
 AVFrame *pFrame_out;
 filter_graph = avfilter_graph_alloc();
 FILE *fp_yuv = fopen("test.yuv", "wb+");
 ret = avformat_open_input(&pFormatCtx, argv[1], NULL, NULL);
 if (ret < 0)
 {
 printf("avformat_open_input error,ret:%d\n", ret);
 
 return -1;
 }

 ret = avformat_find_stream_info(pFormatCtx, NULL);
 if (ret < 0)
 {
 printf("avformat_find_stream_info error,ret:%d\n", ret);
 
 return -2;
 }

 ret = av_find_best_stream(pFormatCtx, AVMEDIA_TYPE_VIDEO, -1, -1, &codec, 0);
 if (ret < 0)
 {
 printf("av_find_best_stream error,ret:%d\n", ret);
 
 return -3;
 }
 // 获取到视频流索引
 video_stream_index = ret;

 av_dump_format(pFormatCtx, 0, argv[1], 0);
 if ((ret = open_input_file(pFormatCtx, &pCodecCtx, codec, argv[1], video_stream_index)) < 0)
 {
 ret = -1;
 printf("open_input_file error,ret:%d\n", ret);
 
 goto end;
 }

 if ((ret = init_filter(&buffersrc_ctx, &buffersink_ctx, &filter_graph, pFormatCtx->streams[video_stream_index], pCodecCtx, filter_descr)) < 0)
 {
 ret = -2;
 printf("init_filter error,ret:%d\n", ret);
 
 goto end;
 }
 pFrame = av_frame_alloc();
 pFrame_out = av_frame_alloc();
 while (1)
 {
 if ((ret = av_read_frame(pFormatCtx, &packet)) < 0)
 break;

 if (packet.stream_index == video_stream_index)
 {
 ret = avcodec_send_packet(pCodecCtx, &packet);
 if (ret < 0)
 {
 printf("avcodec_send_packet error,ret:%d\n", ret);
 
 break;
 }

 while (ret >= 0)
 {
 ret = avcodec_receive_frame(pCodecCtx, pFrame);
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
 {
 break;
 }
 else if (ret < 0)
 {
 printf("avcodec_receive_frame error,ret:%d\n", ret);
 
 goto end;
 }

 pFrame->pts = pFrame->best_effort_timestamp;

 /* push the decoded frame into the filtergraph */
 ret = av_buffersrc_add_frame_flags(buffersrc_ctx, pFrame, AV_BUFFERSRC_FLAG_KEEP_REF);
 if (ret < 0)
 {
 printf("av_buffersrc_add_frame_flags error,ret:%d\n", ret);
 
 break;
 }

 /* pull filtered frames from the filtergraph */
 while (1)
 {
 ret = av_buffersink_get_frame(buffersink_ctx, pFrame_out);
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
 break;
 if (ret < 0)
 goto end;
 if (pFrame_out->format == AV_PIX_FMT_YUV420P)
 {
 //Y, U, V
 for (int i = 0; i < pFrame_out->height; i++)
 {
 fwrite(pFrame_out->data[0] + pFrame_out->linesize[0] * i, 1, pFrame_out->width, fp_yuv);
 }
 for (int i = 0; i < pFrame_out->height / 2; i++)
 {
 fwrite(pFrame_out->data[1] + pFrame_out->linesize[1] * i, 1, pFrame_out->width / 2, fp_yuv);
 }
 for (int i = 0; i < pFrame_out->height / 2; i++)
 {
 fwrite(pFrame_out->data[2] + pFrame_out->linesize[2] * i, 1, pFrame_out->width / 2, fp_yuv);
 }
 }
 av_frame_unref(pFrame_out);
 }
 av_frame_unref(pFrame);
 }
 }
 av_packet_unref(&packet);
 }
 end:
 avcodec_free_context(&pCodecCtx);
 fclose(fp_yuv);
}



-
ffmpeg xfade audio out of sync
1er décembre 2020, par Richardhi I am using ffmpeg xfade to concat two clips, the transition looks fine, but the second clip has no audio in the output. I don't find more information about this in https://trac.ffmpeg.org/wiki/Xfade and the official doc. Can I keep the audio of both clips in sync ?


I'm puttting the complete log below, it's massive


ffmpeg -i teacher1.mp4 -i teacher2.mp4 -filter_complex xfade=transition=fadewhite:duration=1:offset=14.5 output.mp4
ffmpeg version 4.3.1-static https://johnvansickle.com/ffmpeg/ Copyright (c) 2000-2020 the FFmpeg developers
 built with gcc 8 (Debian 8.3.0-6)
 configuration: --enable-gpl --enable-version3 --enable-static --disable-debug --disable-ffplay --disable-indev=sndio --disable-outdev=sndio --cc=gcc --enable-fontconfig --enable-frei0r --enable-gnutls --enable-gmp --enable-libgme --enable-gray --enable-libaom --enable-libfribidi --enable-libass --enable-libvmaf --enable-libfreetype --enable-libmp3lame --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-librubberband --enable-libsoxr --enable-libspeex --enable-libsrt --enable-libvorbis --enable-libopus --enable-libtheora --enable-libvidstab --enable-libvo-amrwbenc --enable-libvpx --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxml2 --enable-libdav1d --enable-libxvid --enable-libzvbi --enable-libzimg
 libavutil 56. 51.100 / 56. 51.100
 libavcodec 58. 91.100 / 58. 91.100
 libavformat 58. 45.100 / 58. 45.100
 libavdevice 58. 10.100 / 58. 10.100
 libavfilter 7. 85.100 / 7. 85.100
 libswscale 5. 7.100 / 5. 7.100
 libswresample 3. 7.100 / 3. 7.100
 libpostproc 55. 7.100 / 55. 7.100
Input #0, mov,mp4,m4a,3gp,3g2,mj2, from 'teacher1.mp4':
 Metadata:
 major_brand : isom
 minor_version : 512
 compatible_brands: isomiso2avc1mp41
 encoder : Lavf58.29.100
 Duration: 00:00:15.02, start: 0.000000, bitrate: 2447 kb/s
 Stream #0:0(und): Video: h264 (High) (avc1 / 0x31637661), yuv420p, 1920x1080, 2315 kb/s, 25 fps, 25 tbr, 12800 tbn, 50 tbc (default)
 Metadata:
 handler_name : VideoHandler
 Stream #0:1(und): Audio: aac (LC) (mp4a / 0x6134706D), 44100 Hz, stereo, fltp, 128 kb/s (default)
 Metadata:
 handler_name : SoundHandler
Input #1, mov,mp4,m4a,3gp,3g2,mj2, from 'teacher2.mp4':
 Metadata:
 major_brand : isom
 minor_version : 512
 compatible_brands: isomiso2avc1mp41
 encoder : Lavf58.29.100
 Duration: 00:00:15.02, start: 0.000000, bitrate: 1643 kb/s
 Stream #1:0(und): Video: h264 (High) (avc1 / 0x31637661), yuv420p, 1920x1080, 1509 kb/s, 25 fps, 25 tbr, 12800 tbn, 50 tbc (default)
 Metadata:
 handler_name : VideoHandler
 Stream #1:1(und): Audio: aac (LC) (mp4a / 0x6134706D), 44100 Hz, stereo, fltp, 128 kb/s (default)
 Metadata:
 handler_name : SoundHandler
Stream mapping:
 Stream #0:0 (h264) -> xfade:main (graph 0)
 Stream #1:0 (h264) -> xfade:xfade (graph 0)
 xfade (graph 0) -> Stream #0:0 (libx264)
 Stream #0:1 -> #0:1 (aac (native) -> aac (native))
Press [q] to stop, [?] for help
[libx264 @ 0x64603c0] using cpu capabilities: MMX2 SSE2Fast SSSE3 SSE4.2 AVX FMA3 BMI2 AVX2
[libx264 @ 0x64603c0] profile High 4:4:4 Predictive, level 4.0, 4:4:4, 8-bit
[libx264 @ 0x64603c0] 264 - core 161 r3018 db0d417 - H.264/MPEG-4 AVC codec - Copyleft 2003-2020 - http://www.videolan.org/x264.html - options: cabac=1 ref=3 deblock=1:0:0 analyse=0x3:0x113 me=hex subme=7 psy=1 psy_rd=1.00:0.00 mixed_ref=1 me_range=16 chroma_me=1 trellis=1 8x8dct=1 cqm=0 deadzone=21,11 fast_pskip=1 chroma_qp_offset=4 threads=6 lookahead_threads=1 sliced_threads=0 nr=0 decimate=1 interlaced=0 bluray_compat=0 constrained_intra=0 bframes=3 b_pyramid=2 b_adapt=1 b_bias=0 direct=1 weightb=1 open_gop=0 weightp=2 keyint=250 keyint_min=25 scenecut=40 intra_refresh=0 rc_lookahead=40 rc=crf mbtree=1 crf=23.0 qcomp=0.60 qpmin=0 qpmax=69 qpstep=4 ip_ratio=1.40 aq=1:1.00
Output #0, mp4, to 'output.mp4':
 Metadata:
 major_brand : isom
 minor_version : 512
 compatible_brands: isomiso2avc1mp41
 encoder : Lavf58.45.100
 Stream #0:0: Video: h264 (libx264) (avc1 / 0x31637661), yuv444p, 1920x1080, q=-1--1, 25 fps, 12800 tbn, 25 tbc (default)
 Metadata:
 encoder : Lavc58.91.100 libx264
 Side data:
 cpb: bitrate max/min/avg: 0/0/0 buffer size: 0 vbv_delay: N/A
 Stream #0:1(und): Audio: aac (LC) (mp4a / 0x6134706D), 44100 Hz, stereo, fltp, 128 kb/s (default)
 Metadata:
 handler_name : SoundHandler
 encoder : Lavc58.91.100 aac
frame= 738 fps= 25 q=-1.0 Lsize= 7317kB time=00:00:29.40 bitrate=2038.9kbits/s speed=1.01x 
video:7064kB audio:236kB subtitle:0kB other streams:0kB global headers:0kB muxing overhead: 0.241368%
[libx264 @ 0x64603c0] frame I:8 Avg QP:16.56 size:100307
[libx264 @ 0x64603c0] frame P:332 Avg QP:19.50 size: 15211
[libx264 @ 0x64603c0] frame B:398 Avg QP:24.35 size: 3467
[libx264 @ 0x64603c0] consecutive B-frames: 6.2% 62.6% 8.9% 22.2%
[libx264 @ 0x64603c0] mb I I16..4: 18.7% 60.5% 20.9%
[libx264 @ 0x64603c0] mb P I16..4: 4.5% 7.6% 0.6% P16..4: 24.1% 5.6% 1.9% 0.0% 0.0% skip:55.7%
[libx264 @ 0x64603c0] mb B I16..4: 0.4% 0.5% 0.1% B16..8: 14.3% 0.9% 0.1% direct: 0.4% skip:83.3% L0:42.9% L1:53.9% BI: 3.3%
[libx264 @ 0x64603c0] 8x8 transform intra:59.2% inter:84.3%
[libx264 @ 0x64603c0] coded y,u,v intra: 25.3% 11.9% 13.0% inter: 3.2% 1.7% 1.8%
[libx264 @ 0x64603c0] i16 v,h,dc,p: 49% 25% 15% 12%
[libx264 @ 0x64603c0] i8 v,h,dc,ddl,ddr,vr,hd,vl,hu: 31% 17% 37% 2% 2% 3% 2% 3% 2%
[libx264 @ 0x64603c0] i4 v,h,dc,ddl,ddr,vr,hd,vl,hu: 28% 19% 15% 6% 7% 7% 6% 6% 5%
[libx264 @ 0x64603c0] Weighted P-Frames: Y:3.3% UV:1.5%
[libx264 @ 0x64603c0] ref P L0: 66.0% 8.0% 18.5% 7.5% 0.0%
[libx264 @ 0x64603c0] ref B L0: 84.9% 13.1% 2.0%
[libx264 @ 0x64603c0] ref B L1: 98.8% 1.2%
[libx264 @ 0x64603c0] kb/s:1960.04
[aac @ 0x64d1280] Qavg: 171.394



-
How to enable LHLS in FFMPEG 4.1 ?
27 décembre 2020, par mehdi.rI am trying to create a low latency CMAF video stream using FFMPEG.
To do so, I would like to enable the
lhls
option in FFMPEG in order to have the#EXT-X-PREFETCH
tag written in the HLS manifest.


From the FFMPEG doc :



https://www.ffmpeg.org/ffmpeg-all.html





Enable Low-latency HLS(LHLS). Adds #EXT-X-PREFETCH tag with current >segment’s URI. Apple doesn’t have an official spec for LHLS. Meanwhile >hls.js player folks are trying to standardize a open LHLS spec. The >draft spec is available in https://github.com/video-dev/hlsjs->rfcs/blob/lhls-spec/proposals/0001-lhls.md This option will also try >to comply with the above open spec, till Apple’s spec officially >supports it. Applicable only when streaming and hls_playlist options >are enabled. This is an experimental feature.





I am using the following command with FFMPEG 4.1 :



ffmpeg -re -i ~/Documents/videos/BigBuckBunny.mp4 \
 -map 0 -map 0 -map 0 -c:a aac -c:v libx264 -tune zerolatency \
 -b:v:0 2000k -s:v:0 1280x720 -profile:v:0 high \
 -b:v:1 1500k -s:v:1 640x340 -profile:v:1 main \
 -b:v:2 500k -s:v:2 320x170 -profile:v:2 baseline \
 -bf 1 \
 -keyint_min 24 -g 24 -sc_threshold 0 -b_strategy 0 -ar:a:1 22050 -use_timeline 1 -use_template 1 \
 -window_size 5 -adaptation_sets "id=0,streams=v id=1,streams=a" \
 -hls_playlist 1 -seg_duration 1 -streaming 1 -strict experimental -lhls 1 -remove_at_exit 1 \
 -f dash manifest.mpd





The kind of HLS manifest I obtained for a specific resolution :



#EXTM3U
#EXT-X-VERSION:6
#EXT-X-TARGETDURATION:1
#EXT-X-MEDIA-SEQUENCE:8
#EXT-X-MAP:URI="init-stream0.mp4"
#EXTINF:0.998458,
#EXT-X-PROGRAM-DATE-TIME:2019-06-21T18:13:56.966+0900
chunk-stream0-00008.mp4
#EXTINF:0.998458,
#EXT-X-PROGRAM-DATE-TIME:2019-06-21T18:13:57.964+0900
chunk-stream0-00009.mp4
#EXTINF:0.998458,
#EXT-X-PROGRAM-DATE-TIME:2019-06-21T18:13:58.963+0900
chunk-stream0-00010.mp4
#EXTINF:0.998458,
#EXT-X-PROGRAM-DATE-TIME:2019-06-21T18:13:59.961+0900
chunk-stream0-00011.mp4
#EXTINF:1.021678,
#EXT-X-PROGRAM-DATE-TIME:2019-06-21T18:14:00.960+0900
chunk-stream0-00012.mp4
...





As you can see the
#EXT-X-PREFETCH
tag is missing.


Any help would be highly appreciated.



Edit



I also compiled FFmpeg from its master branch by doing the following :



nasm



sudo apt-get install nasm mingw-w64




Codecs



sudo apt-get install libx265-dev libnuma-dev libx264-dev libvpx-dev libfdk-aac-dev libmp3lame-dev libopus-dev




FFmpeg



mkdir lhls
cd lhls 
git init 
git clone https://github.com/FFmpeg/FFmpeg.git
cd FFmpeg 
git checkout master




AOM (inside FFmpeg dir)



git -C aom pull 2> /dev/null || git clone --depth 1 https://aomedia.googlesource.com/aom && \
mkdir -p aom_build && \
cd aom_build && \
PATH="$HOME/bin:$PATH" cmake -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX="$HOME/ffmpeg_build" -DENABLE_SHARED=off -DENABLE_NASM=on ../aom && \
PATH="$HOME/bin:$PATH" make && \
make install
cd..




Compiling



PATH="$HOME/bin:$PATH" PKG_CONFIG_PATH="$HOME/ffmpeg_build/lib/pkgconfig" ./configure \
 --prefix="$HOME/ffmpeg_build" \
 --pkg-config-flags="--static" \
 --extra-cflags="-I$HOME/ffmpeg_build/include" \
 --extra-ldflags="-L$HOME/ffmpeg_build/lib" \
 --extra-libs="-lpthread -lm" \
 --bindir="$HOME/bin" \
 --enable-gpl \
 --enable-libaom \
 --enable-libass \
 --enable-libfdk-aac \
 --enable-libfreetype \
 --enable-libmp3lame \
 --enable-libopus \
 --enable-libvorbis \
 --enable-libvpx \
 --enable-libx264 \
 --enable-libx265 \
 --enable-nonfree && \
PATH="$HOME/bin:$PATH" make 




Unfortunately, the
#EXT-X-PREFETCH
is still missing in the HLS Manifest.


I also tried nightly builds from https://ffmpeg.zeranoe.com/builds/ , same result.



Any help would be highly appreciated.



EDIT 2 :resolved



Thanks to @aergistal and @Gyan , the
#EXT-X-PREFETCH
tag is now present in my HLS manifest.


Here the FFMPEG command I am using :



./ffmpeg -re -i ~/videos/BigBuckBunny.mp4 -loglevel debug \
 -map 0 -map 0 -map 0 -c:a aac -c:v libx264 -tune zerolatency \
 -b:v:0 2000k -s:v:0 1280x720 -profile:v:0 high -b:v:1 1500k -s:v:1 640x340 -profile:v:1 main -b:v:2 500k -s:v:2 320x170 -profile:v:2 baseline -bf 1 \
 -keyint_min 24 -g 24 -sc_threshold 0 -b_strategy 0 -ar:a:1 22050 -use_timeline 1 -use_template 1 -window_size 5 \
 -adaptation_sets "id=0,streams=v id=1,streams=a" -hls_playlist 1 -seg_duration 3 -streaming 1 \
 -strict experimental -lhls 1 -remove_at_exit 0 -master_m3u8_publish_rate 3 \
 -f dash -method PUT -http_persistent 1 https://example.com/manifest.mpd




Apparently the mime types are not passed to the server & FFmpeg seems to ignore the
-headers
option.