
Recherche avancée
Médias (2)
-
Rennes Emotion Map 2010-11
19 octobre 2011, par
Mis à jour : Juillet 2013
Langue : français
Type : Texte
-
Carte de Schillerkiez
13 mai 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Texte
Autres articles (104)
-
Gestion des droits de création et d’édition des objets
8 février 2011, parPar défaut, beaucoup de fonctionnalités sont limitées aux administrateurs mais restent configurables indépendamment pour modifier leur statut minimal d’utilisation notamment : la rédaction de contenus sur le site modifiables dans la gestion des templates de formulaires ; l’ajout de notes aux articles ; l’ajout de légendes et d’annotations sur les images ;
-
Supporting all media types
13 avril 2011, parUnlike most software and media-sharing platforms, MediaSPIP aims to manage as many different media types as possible. The following are just a few examples from an ever-expanding list of supported formats : images : png, gif, jpg, bmp and more audio : MP3, Ogg, Wav and more video : AVI, MP4, OGV, mpg, mov, wmv and more text, code and other data : OpenOffice, Microsoft Office (Word, PowerPoint, Excel), web (html, CSS), LaTeX, Google Earth and (...)
-
Keeping control of your media in your hands
13 avril 2011, parThe vocabulary used on this site and around MediaSPIP in general, aims to avoid reference to Web 2.0 and the companies that profit from media-sharing.
While using MediaSPIP, you are invited to avoid using words like "Brand", "Cloud" and "Market".
MediaSPIP is designed to facilitate the sharing of creative media online, while allowing authors to retain complete control of their work.
MediaSPIP aims to be accessible to as many people as possible and development is based on expanding the (...)
Sur d’autres sites (11224)
-
how can I take audio file(mp3) and video file(mp4) as input in Java spring ?
29 juillet 2022, par Jawad Un Islam AbirI am using this code to read the audio file. but don't know how to take user input.


InputStream inputStream = new FileInputStream("example.ogg");
FFmpegInput input = new FFmpegInput(inputStream);
FFmpegSourceStream stream = input.open(inputFormat);

stream.registerStreams();

AudioSourceSubstream audioSourceSubstream = null;
for(MediaSourceSubstream substream : stream.getSubstreams()) {
 if (substream.getMediaType() != MediaType.AUDIO) continue;

 audioSourceSubstream = (AudioSourceSubstream) substream;
}

if (audioSourceSubstream == null) throw new NullPointerException();



-
How to minimize latency in ffmpeg stream Java ?
13 juillet 2022, par Taavi SõerdI need to stream ffmpeg video feed in android studio and need minimal latency. Code below has achieved that when playing on galaxy s21 ultra but when I play it on galaxy tab then it's like in slow motion. When i set buffer size to 0 I get minimal latency but can't actually even see the video as it's all corrupted (all gray and colored noise).


public class Decode implements Runnable {
public Activity activity;
AVFrame pFrameRGB;
SwsContext sws_ctx;
ByteBuffer bitmapBuffer;
Bitmap bmp;
byte[] array;
int imageViewWidth = 0;
int imageViewHeight = 0;
boolean imageChanged = true;
int v_stream_idx = -1;
int klv_stream_idx = -1;

boolean imageDrawMutex = false;

boolean imageIsSet = false;
ImageView imageView = MainActivity.getmInstanceActivity().findViewById(R.id.imageView);

String mFilename = "udp://@" + MainActivity.connectionIP;;
UasDatalinkLocalSet mLatestDls;

public Decode(Activity _activity) {
 this.activity = _activity;
}

public void create_decoder(AVCodecContext codec_ctx) {
 imageChanged = true;

 // Determine required buffer size and allocate buffer
 int numBytes =av_image_get_buffer_size(AV_PIX_FMT_RGBA, codec_ctx.width(),
 codec_ctx.height(), 1);
 BytePointer buffer = new BytePointer(av_malloc(numBytes));

 bmp = Bitmap.createBitmap(codec_ctx.width(), codec_ctx.height(), Bitmap.Config.ARGB_8888);

 array = new byte[codec_ctx.width() * codec_ctx.height() * 4];
 bitmapBuffer = ByteBuffer.wrap(array);

 sws_ctx = sws_getContext(
 codec_ctx.width(),
 codec_ctx.height(),
 codec_ctx.pix_fmt(),
 codec_ctx.width(),
 codec_ctx.height(),
 AV_PIX_FMT_RGBA,
 SWS_POINT,
 null,
 null,
 (DoublePointer) null
 );

 if (sws_ctx == null) {
 Log.d("app", "Can not use sws");
 throw new IllegalStateException();
 }

 av_image_fill_arrays(pFrameRGB.data(), pFrameRGB.linesize(),
 buffer, AV_PIX_FMT_RGBA, codec_ctx.width(), codec_ctx.height(), 1);
}

@Override
public void run() {
 Log.d("app", "Start decoder");

 int ret = -1, i = 0;
 String vf_path = mFilename;

 AVFormatContext fmt_ctx = new AVFormatContext(null);
 AVPacket pkt = new AVPacket();


 AVDictionary multicastDict = new AVDictionary();

 av_dict_set(multicastDict, "rtsp_transport", "udp_multicast", 0);

 av_dict_set(multicastDict, "localaddr", getIPAddress(true), 0);
 av_dict_set(multicastDict, "reuse", "1", 0);

 av_dict_set(multicastDict, "buffer_size", "0.115M", 0);

 ret = avformat_open_input(fmt_ctx, vf_path, null, multicastDict);
 if (ret < 0) {
 Log.d("app", String.format("Open video file %s failed \n", vf_path));
 byte[] error_message = new byte[1024];
 int elen = av_strerror(ret, error_message, 1024);
 String s = new String(error_message, 0, 20);
 Log.d("app", String.format("Return: %d", ret));
 Log.d("app", String.format("Message: %s", s));
 throw new IllegalStateException();
 }
 
 if (avformat_find_stream_info(fmt_ctx, (PointerPointer) null) < 0) {
 //System.exit(-1);
 Log.d("app", "Stream info not found");
 }


 avformat.av_dump_format(fmt_ctx, 0, mFilename, 0);

 int nstreams = fmt_ctx.nb_streams();

 for (i = 0; i < fmt_ctx.nb_streams(); i++) {
 if (fmt_ctx.streams(i).codecpar().codec_type() == AVMEDIA_TYPE_VIDEO) {
 v_stream_idx = i;
 }
 if (fmt_ctx.streams(i).codecpar().codec_type() == AVMEDIA_TYPE_DATA) {
 klv_stream_idx = i;
 }
 }
 if (v_stream_idx == -1) {
 Log.d("app", "Cannot find video stream");
 throw new IllegalStateException();
 } else {
 Log.d("app", String.format("Video stream %d with resolution %dx%d\n", v_stream_idx,
 fmt_ctx.streams(v_stream_idx).codecpar().width(),
 fmt_ctx.streams(v_stream_idx).codecpar().height()));
 }

 AVCodecContext codec_ctx = avcodec_alloc_context3(null);
 avcodec_parameters_to_context(codec_ctx, fmt_ctx.streams(v_stream_idx).codecpar());


 AVCodec codec = avcodec_find_decoder(codec_ctx.codec_id());


 AVDictionary avDictionary = new AVDictionary();

 av_dict_set(avDictionary, "fflags", "nobuffer", 0);


 if (codec == null) {
 Log.d("app", "Unsupported codec for video file");
 throw new IllegalStateException();
 }
 ret = avcodec_open2(codec_ctx, codec, avDictionary);
 if (ret < 0) {
 Log.d("app", "Can not open codec");
 throw new IllegalStateException();
 }

 AVFrame frm = av_frame_alloc();

 // Allocate an AVFrame structure
 pFrameRGB = av_frame_alloc();
 if (pFrameRGB == null) {
 //System.exit(-1);
 Log.d("app", "unable to init pframergb");
 }

 create_decoder(codec_ctx);

 int width = codec_ctx.width();
 int height = codec_ctx.height();

 double fps = 15;
 

 while (true) {
 try {
 Thread.sleep(1);
 } catch (Exception e) {

 }

 try {
 if (av_read_frame(fmt_ctx, pkt) >= 0) {
 if (pkt.stream_index() == v_stream_idx) {
 avcodec_send_packet(codec_ctx, pkt);

 if (codec_ctx.width() != width || codec_ctx.height() != height) {
 create_decoder(codec_ctx);
 width = codec_ctx.width();
 height = codec_ctx.height();
 }
 }

 if (pkt.stream_index() == klv_stream_idx) {

 byte[] klvDataBuffer = new byte[pkt.size()];

 for (int j = 0; j < pkt.size(); j++) {
 klvDataBuffer[j] = pkt.data().get(j);
 }

 try {
 KLV k = new KLV(klvDataBuffer, KLV.KeyLength.SixteenBytes, KLV.LengthEncoding.BER);
 byte[] main_payload = k.getValue();

 // decode the Uas Datalink Local Set from main_payload binary blob.
 mLatestDls = new UasDatalinkLocalSet(main_payload);

 if (mLatestDls != null) {

 MainActivity.getmInstanceActivity().runOnUiThread(new Runnable() {
 @RequiresApi(api = Build.VERSION_CODES.Q)
 @Override
 public void run() {
 MainActivity.getmInstanceActivity().updateKlv(mLatestDls);
 }
 });
 }
 } catch (Exception e) {
 e.printStackTrace();
 }
 
 }

 int wasFrameDecoded = 0;
 while (wasFrameDecoded >= 0) {
 wasFrameDecoded = avcodec_receive_frame(codec_ctx, frm);

 if (wasFrameDecoded >= 0) {
 // get clip fps
 fps = 15; //av_q2d(fmt_ctx.streams(v_stream_idx).r_frame_rate());

 sws_scale(
 sws_ctx,
 frm.data(),
 frm.linesize(),
 0,
 codec_ctx.height(),
 pFrameRGB.data(),
 pFrameRGB.linesize()
 );

 if(!imageDrawMutex) {
 MainActivity.getmInstanceActivity().runOnUiThread(new Runnable() {
 @Override
 public void run() {
 if (imageIsSet) {
 imageDrawMutex = true;
 pFrameRGB.data(0).position(0).get(array);
 bitmapBuffer.rewind();
 bmp.copyPixelsFromBuffer(bitmapBuffer);

 if (imageChanged) {
 (imageView).setImageBitmap(bmp);
 imageChanged = false;
 }

 (imageView).invalidate();
 imageDrawMutex = false;
 } else {
 (imageView).setImageBitmap(bmp);
 imageIsSet = true;
 }
 }
 });
 }
 }
 }
 av_packet_unref(pkt);

 }
 } catch (Exception e) {
 e.printStackTrace();
 }

 if (false) {
 Log.d("threads", "false");

 av_frame_free(frm);

 avcodec_close(codec_ctx);
 avcodec_free_context(codec_ctx);

 avformat_close_input(fmt_ctx);
 }
 }
}



This code is running in Android Studio with Java. I'm quite new on this topic so not really sure even where to start.
What could be the cause of that ?


-
Error in making animation through ffmpeg (python3.9)
20 avril 2024, par Taehyung GhimWhen I try to make 2D animation map for cow tracking(matching 2 camera views) through ffmpeg, following error occurs.



raise subprocess.CalledProcessError(subprocess.CalledProcessError: Command '['ffmpeg', '-f', 'rawvideo', '-vcodec', 'rawvideo', '-s', '4000x4000', '-pix_fmt', 'rgba', '-r', '5', '-loglevel', 'error', '-i', 'pipe:', '-vcodec', 'h264', '-pix_fmt', 'yuv420p', '-metadata', 'artist=Me', '-y', '../out_detect/run7/TRACKS_ANIMATION_fused.mp4']' returned non-zero exit status 1.




Following is the full error :



Plotting the last 1800.9391813674797 frames.
INFO:Animation.save using <class>
INFO:MovieWriter._run: running command: ffmpeg -f rawvideo -vcodec rawvideo -s 4000x4000 -pix_fmt rgba -r 5 -loglevel error -i pipe: -vcodec h264 -pix_fmt yuv420p -metadata artist=Me -y ../out_detect/run7/TRACKS_ANIMATION_fused.mp4
WARNING:MovieWriter stderr:
[libopenh264 @ 0x55b93df81fc0] [OpenH264] this = 0x0x55b93df8ef10, Error:ParamValidationExt(), width > 0, height > 0, width * height <= 9437184, invalid 4000 x 4000 in dependency layer settings!
[libopenh264 @ 0x55b93df81fc0] [OpenH264] this = 0x0x55b93df8ef10, Error:WelsInitEncoderExt(), ParamValidationExt failed return 2.
[libopenh264 @ 0x55b93df81fc0] [OpenH264] this = 0x0x55b93df8ef10, Error:CWelsH264SVCEncoder::Initialize(), WelsInitEncoderExt failed.
[libopenh264 @ 0x55b93df81fc0] Initialize failed
Error initializing output stream 0:0 -- Error while opening encoder for output stream #0:0 - maybe incorrect parameters such as bit_rate, rate, width or height
 

 Traceback (most recent call last):
 File "/home/rom/anaconda3/envs/cow_tracking_env/lib/python3.9/site-packages/matplotlib/animation.py", line 236, in saving
 yield self
 File "/home/rom/anaconda3/envs/cow_tracking_env/lib/python3.9/site-packages/matplotlib/animation.py", line 1095, in save
 writer.grab_frame(**savefig_kwargs)
 File "/home/rom/anaconda3/envs/cow_tracking_env/lib/python3.9/site-packages/matplotlib/animation.py", line 353, in grab_frame
 self.fig.savefig(self._proc.stdin, format=self.frame_format,
 File "/home/rom/anaconda3/envs/cow_tracking_env/lib/python3.9/site-packages/matplotlib/figure.py", line 3012, in savefig
 self.canvas.print_figure(fname, **kwargs)
 File "/home/rom/anaconda3/envs/cow_tracking_env/lib/python3.9/site-packages/matplotlib/backend_bases.py", line 2314, in print_figure
 result = print_method(
 File "/home/rom/anaconda3/envs/cow_tracking_env/lib/python3.9/site-packages/matplotlib/backend_bases.py", line 1643, in wrapper
 return func(*args, **kwargs)
 File "/home/rom/anaconda3/envs/cow_tracking_env/lib/python3.9/site-packages/matplotlib/_api/deprecation.py", line 412, in wrapper
 return func(*inner_args, **inner_kwargs)
 File "/home/rom/anaconda3/envs/cow_tracking_env/lib/python3.9/site-packages/matplotlib/backends/backend_agg.py", line 486, in print_raw
 fh.write(renderer.buffer_rgba())
 BrokenPipeError: [Errno 32] Broken pipe
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
 File "/home/rom/PycharmProjects/cow_tracking_package/tracking-cows/main.py", line 330, in <module>
 inference_tracking_video(opt=args, device=dev, detector=detector, keypoint_tfm=keypoint_tfm,
 File "/home/rom/PycharmProjects/cow_tracking_package/tracking-cows/tracking.py", line 325, in inference_tracking_video
 postprocess_tracking_results(track_args=track_args, cfg_postprocess=cfg_matching_parameters.POSTPROCESS,
 File "/home/rom/PycharmProjects/cow_tracking_package/tracking-cows/postprocessing/postprocess_results.py", line 90, in postprocess_tracking_results
 postprocess_trajectories(track_args=track_args, analysis_matching_cfg=cfg_analysis)
 File "/home/rom/PycharmProjects/cow_tracking_package/tracking-cows/postprocessing/postprocess_results.py", line 58, in postprocess_trajectories
 analyse_trajectories(analysis_arguments, full_width, full_height, video_fps, frame_rate_animation)
 File "/home/rom/PycharmProjects/cow_tracking_package/tracking-cows/postprocessing/trajectory_postprocess.py", line 115, in analyse_trajectories
 create_virtual_map_animation_final(opt.save_dir, final_matching_both_cams, color_dict3, full_width,
 File "/home/rom/PycharmProjects/cow_tracking_package/tracking-cows/output/output_plot_fused_trajectory_animation.py", line 236, in create_virtual_map_animation_final
 virtual_map_animation.save(traj_file_path, writer=writer)
 File "/home/rom/anaconda3/envs/cow_tracking_env/lib/python3.9/site-packages/matplotlib/animation.py", line 1095, in save
 writer.grab_frame(**savefig_kwargs)
 File "/home/rom/anaconda3/envs/cow_tracking_env/lib/python3.9/contextlib.py", line 137, in __exit__
 self.gen.throw(typ, value, traceback)
 File "/home/rom/anaconda3/envs/cow_tracking_env/lib/python3.9/site-packages/matplotlib/animation.py", line 238, in saving
 self.finish()
 File "/home/rom/anaconda3/envs/cow_tracking_env/lib/python3.9/site-packages/matplotlib/animation.py", line 344, in finish
 self._cleanup() # Inline _cleanup() once cleanup() is removed.
 File "/home/rom/anaconda3/envs/cow_tracking_env/lib/python3.9/site-packages/matplotlib/animation.py", line 375, in _cleanup
 raise subprocess.CalledProcessError(
subprocess.CalledProcessError: Command '['ffmpeg', '-f', 'rawvideo', '-vcodec', 'rawvideo', '-s', '4000x4000', '-pix_fmt', 'rgba', '-r', '5', '-loglevel', 'error', '-i', 'pipe:', '-vcodec', 'h264', '-pix_fmt', 'yuv420p', '-metadata', 'artist=Me', '-y', '../out_detect/run7/TRACKS_ANIMATION_fused.mp4']' returned non-zero exit status 1.

</module></class>


ffmpeg version is 4.3 built with gcc 7.3.0. and OS is Ubuntu 20.04
and my conda env is below.


channels:
 - pytorch
 - defaults
dependencies:
 - _libgcc_mutex=0.1=main
 - _openmp_mutex=4.5=1_gnu
 - blas=1.0=mkl
 - bzip2=1.0.8=h7b6447c_0
 - ca-certificates=2021.10.26=h06a4308_2
 - certifi=2021.10.8=py39h06a4308_2
 - cudatoolkit=11.3.1=h2bc3f7f_2
 - ffmpeg=4.3=hf484d3e_0
 - freetype=2.11.0=h70c0345_0
 - giflib=5.2.1=h7b6447c_0
 - gmp=6.2.1=h2531618_2
 - gnutls=3.6.15=he1e5248_0
 - intel-openmp=2021.4.0=h06a4308_3561
 - jpeg=9d=h7f8727e_0
 - lame=3.100=h7b6447c_0
 - lcms2=2.12=h3be6417_0
 - ld_impl_linux-64=2.35.1=h7274673_9
 - libffi=3.3=he6710b0_2
 - libgcc-ng=9.3.0=h5101ec6_17
 - libgomp=9.3.0=h5101ec6_17
 - libiconv=1.15=h63c8f33_5
 - libidn2=2.3.2=h7f8727e_0
 - libpng=1.6.37=hbc83047_0
 - libstdcxx-ng=9.3.0=hd4cf53a_17
 - libtasn1=4.16.0=h27cfd23_0
 - libtiff=4.2.0=h85742a9_0
 - libunistring=0.9.10=h27cfd23_0
 - libuv=1.40.0=h7b6447c_0
 - libwebp=1.2.0=h89dd481_0
 - libwebp-base=1.2.0=h27cfd23_0
 - lz4-c=1.9.3=h295c915_1
 - mkl=2021.4.0=h06a4308_640
 - mkl-service=2.4.0=py39h7f8727e_0
 - mkl_fft=1.3.1=py39hd3c417c_0
 - mkl_random=1.2.2=py39h51133e4_0
 - ncurses=6.3=h7f8727e_2
 - nettle=3.7.3=hbbd107a_1
 - numpy=1.21.2=py39h20f2e39_0
 - numpy-base=1.21.2=py39h79a1101_0
 - olefile=0.46=pyhd3eb1b0_0
 - openh264=2.1.0=hd408876_0
 - openssl=1.1.1m=h7f8727e_0
 - pillow=8.4.0=py39h5aabda8_0
 - pip=21.2.4=py39h06a4308_0
 - python=3.9.7=h12debd9_1
 - pytorch=1.10.0=py3.9_cuda11.3_cudnn8.2.0_0
 - pytorch-mutex=1.0=cuda
 - readline=8.1=h27cfd23_0
 - setuptools=58.0.4=py39h06a4308_0
 - six=1.16.0=pyhd3eb1b0_0
 - sqlite=3.36.0=hc218d9a_0
 - tk=8.6.11=h1ccaba5_0
 - torchaudio=0.10.0=py39_cu113
 - torchvision=0.11.1=py39_cu113
 - typing_extensions=3.10.0.2=pyh06a4308_0
 - wheel=0.37.0=pyhd3eb1b0_1
 - xz=5.2.5=h7b6447c_0
 - zlib=1.2.11=h7b6447c_3
 - zstd=1.4.9=haebb681_0
 - pip:
 - absl-py==1.0.0
 - addict==2.4.0
 - cachetools==4.2.4
 - charset-normalizer==2.0.8
 - cloudpickle==2.0.0
 - cycler==0.11.0
 - cython==0.29.24
 - docutils==0.18.1
 - easydict==1.9
 - filterpy==1.4.5
 - fonttools==4.28.2
 - geohash2==1.1
 - google-auth==2.3.3
 - google-auth-oauthlib==0.4.6
 - grpcio==1.42.0
 - idna==3.3
 - imageio==2.13.5
 - importlib-metadata==4.8.2
 - joblib==1.1.0
 - kiwisolver==1.3.2
 - loguru==0.6.0
 - markdown==3.3.6
 - matplotlib==3.5.0
 - natsort==8.0.2
 - networkx==2.6.3
 - oauthlib==3.1.1
 - opencv-python==4.5.4.60
 - packaging==21.3
 - pandas==1.3.4
 - protobuf==3.19.1
 - pyasn1==0.4.8
 - pyasn1-modules==0.2.8
 - pycocotools==2.0.4
 - pyparsing==3.0.6
 - pyqt5==5.15.6
 - pyqt5-qt5==5.15.2
 - pyqt5-sip==12.9.0
 - python-dateutil==2.8.2
 - pytz==2021.3
 - pytz-deprecation-shim==0.1.0.post0
 - pywavelets==1.2.0
 - pyyaml==6.0
 - requests==2.26.0
 - requests-oauthlib==1.3.0
 - rsa==4.8
 - scikit-image==0.19.1
 - scikit-learn==1.0.2
 - scipy==1.7.3
 - seaborn==0.11.2
 - setuptools-scm==6.3.2
 - shapely==1.8.0
 - sklearn==0.0
 - split-folders==0.4.3
 - tabulate==0.8.9
 - tensorboard==2.7.0
 - tensorboard-data-server==0.6.1
 - tensorboard-plugin-wit==1.8.0
 - terminaltables==3.1.10
 - thop==0.0.31-2005241907
 - threadpoolctl==3.1.0
 - tifffile==2021.11.2
 - timm==0.4.12
 - tomli==1.2.2
 - tqdm==4.62.3
 - traja==0.2.8
 - tzdata==2021.5
 - tzlocal==4.1
 - urllib3==1.26.7
 - werkzeug==2.0.2
 - yacs==0.1.8
 - yapf==0.32.0
 - zipp==3.6.0



I also installed ffmpy through conda.


It will be very grateful if anyone could help me.