
Recherche avancée
Médias (1)
-
Rennes Emotion Map 2010-11
19 octobre 2011, par
Mis à jour : Juillet 2013
Langue : français
Type : Texte
Autres articles (53)
-
Installation en mode standalone
4 février 2011, parL’installation de la distribution MediaSPIP se fait en plusieurs étapes : la récupération des fichiers nécessaires. À ce moment là deux méthodes sont possibles : en installant l’archive ZIP contenant l’ensemble de la distribution ; via SVN en récupérant les sources de chaque modules séparément ; la préconfiguration ; l’installation définitive ;
[mediaspip_zip]Installation de l’archive ZIP de MediaSPIP
Ce mode d’installation est la méthode la plus simple afin d’installer l’ensemble de la distribution (...) -
MediaSPIP en mode privé (Intranet)
17 septembre 2013, parÀ partir de la version 0.3, un canal de MediaSPIP peut devenir privé, bloqué à toute personne non identifiée grâce au plugin "Intranet/extranet".
Le plugin Intranet/extranet, lorsqu’il est activé, permet de bloquer l’accès au canal à tout visiteur non identifié, l’empêchant d’accéder au contenu en le redirigeant systématiquement vers le formulaire d’identification.
Ce système peut être particulièrement utile pour certaines utilisations comme : Atelier de travail avec des enfants dont le contenu ne doit pas (...) -
Contribute to documentation
13 avril 2011Documentation is vital to the development of improved technical capabilities.
MediaSPIP welcomes documentation by users as well as developers - including : critique of existing features and functions articles contributed by developers, administrators, content producers and editors screenshots to illustrate the above translations of existing documentation into other languages
To contribute, register to the project users’ mailing (...)
Sur d’autres sites (7620)
-
Can not add tmcd stream using libavcodec to replicate behavior of ffmpeg -timecode option
2 août, par Sailor JerryI'm trying to replicate option of command line ffmpeg -timecode in my C/C++ code. For some reasons the tcmd stream is not written to the output file. However the av_dump_format shows it in run time


Here is my minimal test


#include <iostream>
extern "C" {
#include <libavcodec></libavcodec>avcodec.h>
#include <libavformat></libavformat>avformat.h>
#include <libavutil></libavutil>avutil.h>
#include <libswscale></libswscale>swscale.h>
#include <libavutil></libavutil>opt.h>
#include <libavutil></libavutil>imgutils.h>
#include <libavutil></libavutil>samplefmt.h>
}
bool checkProResAvailability() {
 const AVCodec* codec = avcodec_find_encoder_by_name("prores_ks");
 if (!codec) {
 std::cerr << "ProRes codec not available. Please install FFmpeg with ProRes support." << std::endl;
 return false;
 }
 return true;
}

int main(){
 av_log_set_level(AV_LOG_INFO);

 const char* outputFileName = "test_tmcd.mov";
 AVFormatContext* formatContext = nullptr;
 AVCodecContext* videoCodecContext = nullptr;

 if (!checkProResAvailability()) {
 return -1;
 }

 std::cout << "Creating test file with tmcd stream: " << outputFileName << std::endl;

 // Allocate the output format context
 if (avformat_alloc_output_context2(&formatContext, nullptr, "mov", outputFileName) < 0) {
 std::cerr << "Failed to allocate output context!" << std::endl;
 return -1;
 }

 if (avio_open(&formatContext->pb, outputFileName, AVIO_FLAG_WRITE) < 0) {
 std::cerr << "Failed to open output file!" << std::endl;
 avformat_free_context(formatContext);
 return -1;
 }

 // Find ProRes encoder
 const AVCodec* videoCodec = avcodec_find_encoder_by_name("prores_ks");
 if (!videoCodec) {
 std::cerr << "Failed to find the ProRes encoder!" << std::endl;
 avio_close(formatContext->pb);
 avformat_free_context(formatContext);
 return -1;
 }

 // Video stream setup
 AVStream* videoStream = avformat_new_stream(formatContext, nullptr);
 if (!videoStream) {
 std::cerr << "Failed to create video stream!" << std::endl;
 avio_close(formatContext->pb);
 avformat_free_context(formatContext);
 return -1;
 }

 videoCodecContext = avcodec_alloc_context3(videoCodec);
 if (!videoCodecContext) {
 std::cerr << "Failed to allocate video codec context!" << std::endl;
 avio_close(formatContext->pb);
 avformat_free_context(formatContext);
 return -1;
 }

 videoCodecContext->width = 1920;
 videoCodecContext->height = 1080;
 videoCodecContext->pix_fmt = AV_PIX_FMT_YUV422P10;
 videoCodecContext->time_base = (AVRational){1, 30}; // Set FPS: 30
 videoCodecContext->bit_rate = 2000000;

 if (avcodec_open2(videoCodecContext, videoCodec, nullptr) < 0) {
 std::cerr << "Failed to open ProRes codec!" << std::endl;
 avcodec_free_context(&videoCodecContext);
 avio_close(formatContext->pb);
 avformat_free_context(formatContext);
 return -1;
 }

 if (avcodec_parameters_from_context(videoStream->codecpar, videoCodecContext) < 0) {
 std::cerr << "Failed to copy codec parameters to video stream!" << std::endl;
 avcodec_free_context(&videoCodecContext);
 avio_close(formatContext->pb);
 avformat_free_context(formatContext);
 return -1;
 }

 videoStream->time_base = videoCodecContext->time_base;

 // Timecode stream setup
 AVStream* timecodeStream = avformat_new_stream(formatContext, nullptr);
 if (!timecodeStream) {
 std::cerr << "Failed to create timecode stream!" << std::endl;
 avcodec_free_context(&videoCodecContext);
 avio_close(formatContext->pb);
 avformat_free_context(formatContext);
 return -1;
 }

 timecodeStream->codecpar->codec_type = AVMEDIA_TYPE_DATA;
 timecodeStream->codecpar->codec_id = AV_CODEC_ID_TIMED_ID3;
 timecodeStream->codecpar->codec_tag = MKTAG('t', 'm', 'c', 'd'); // Timecode tag
 timecodeStream->time_base = (AVRational){1, 30}; // FPS: 30

 if (av_dict_set(&timecodeStream->metadata, "timecode", "00:00:30:00", 0) < 0) {
 std::cerr << "Failed to set timecode metadata!" << std::endl;
 avcodec_free_context(&videoCodecContext);
 avio_close(formatContext->pb);
 avformat_free_context(formatContext);
 return -1;
 }

 // Write container header
 if (avformat_write_header(formatContext, nullptr) < 0) {
 std::cerr << "Failed to write file header!" << std::endl;
 avcodec_free_context(&videoCodecContext);
 avio_close(formatContext->pb);
 avformat_free_context(formatContext);
 return -1;
 }

 // Encode a dummy video frame
 AVFrame* frame = av_frame_alloc();
 if (!frame) {
 std::cerr << "Failed to allocate video frame!" << std::endl;
 avcodec_free_context(&videoCodecContext);
 avio_close(formatContext->pb);
 avformat_free_context(formatContext);
 return -1;
 }

 frame->format = videoCodecContext->pix_fmt;
 frame->width = videoCodecContext->width;
 frame->height = videoCodecContext->height;

 if (av_image_alloc(frame->data, frame->linesize, frame->width, frame->height, videoCodecContext->pix_fmt, 32) < 0) {
 std::cerr << "Failed to allocate frame buffer!" << std::endl;
 av_frame_free(&frame);
 avcodec_free_context(&videoCodecContext);
 avio_close(formatContext->pb);
 avformat_free_context(formatContext);
 return -1;
 }

 // Fill frame with black
 memset(frame->data[0], 0, frame->linesize[0] * frame->height); // Y plane
 memset(frame->data[1], 128, frame->linesize[1] * frame->height / 2); // U plane
 memset(frame->data[2], 128, frame->linesize[2] * frame->height / 2); // V plane

 // Encode the frame
 AVPacket packet;
 av_init_packet(&packet);
 packet.data = nullptr;
 packet.size = 0;

 if (avcodec_send_frame(videoCodecContext, frame) == 0) {
 if (avcodec_receive_packet(videoCodecContext, &packet) == 0) {
 packet.stream_index = videoStream->index;
 av_interleaved_write_frame(formatContext, &packet);
 av_packet_unref(&packet);
 }
 }

 av_frame_free(&frame);

 // Write a dummy packet for the timecode stream
 AVPacket tmcdPacket;
 av_init_packet(&tmcdPacket);
 tmcdPacket.stream_index = timecodeStream->index;
 tmcdPacket.flags |= AV_PKT_FLAG_KEY;
 tmcdPacket.data = nullptr; // Empty packet for timecode
 tmcdPacket.size = 0;
 tmcdPacket.pts = 0; // Set necessary PTS
 tmcdPacket.dts = 0;
 av_interleaved_write_frame(formatContext, &tmcdPacket);

 // Write trailer
 if (av_write_trailer(formatContext) < 0) {
 std::cerr << "Failed to write file trailer!" << std::endl;
 }

 av_dump_format(formatContext, 0, "test.mov", 1);

 // Cleanup
 avcodec_free_context(&videoCodecContext);
 avio_close(formatContext->pb);
 avformat_free_context(formatContext);

 std::cout << "Test file with timecode created successfully: " << outputFileName << std::endl;

 return 0;
}
</iostream>


The code output is :


Creating test file with tmcd stream: test_tmcd.mov
[prores_ks @ 0x11ce05790] Autoselected HQ profile to keep best quality. It can be overridden through -profile option.
[mov @ 0x11ce04f20] Timestamps are unset in a packet for stream 0. This is deprecated and will stop working in the future. Fix your code to set the timestamps properly
[mov @ 0x11ce04f20] Encoder did not produce proper pts, making some up.
Output #0, mov, to 'test.mov':
 Metadata:
 encoder : Lavf61.7.100
 Stream #0:0: Video: prores (HQ) (apch / 0x68637061), yuv422p10le, 1920x1080, q=2-31, 2000 kb/s, 15360 tbn
 Stream #0:1: Data: timed_id3 (tmcd / 0x64636D74)
 Metadata:
 timecode : 00:00:30:00
Test file with timecode created successfully: test_tmcd.mov



The ffprobe output is :


$ ffprobe test_tmcd.mov
ffprobe version 7.1.1 Copyright (c) 2007-2025 the FFmpeg developers
 built with Apple clang version 16.0.0 (clang-1600.0.26.6)
 configuration: --prefix=/opt/homebrew/Cellar/ffmpeg/7.1.1_3 --enable-shared --enable-pthreads --enable-version3 --cc=clang --host-cflags= --host-ldflags='-Wl,-ld_classic' --enable-ffplay --enable-gnutls --enable-gpl --enable-libaom --enable-libaribb24 --enable-libbluray --enable-libdav1d --enable-libharfbuzz --enable-libjxl --enable-libmp3lame --enable-libopus --enable-librav1e --enable-librist --enable-librubberband --enable-libsnappy --enable-libsrt --enable-libssh --enable-libsvtav1 --enable-libtesseract --enable-libtheora --enable-libvidstab --enable-libvmaf --enable-libvorbis --enable-libvpx --enable-libwebp --enable-libx264 --enable-libx265 --enable-libxml2 --enable-libxvid --enable-lzma --enable-libfontconfig --enable-libfreetype --enable-frei0r --enable-libass --enable-libopencore-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-libspeex --enable-libsoxr --enable-libzmq --enable-libzimg --disable-libjack --disable-indev=jack --enable-videotoolbox --enable-audiotoolbox --enable-neon
 libavutil 59. 39.100 / 59. 39.100
 libavcodec 61. 19.101 / 61. 19.101
 libavformat 61. 7.100 / 61. 7.100
 libavdevice 61. 3.100 / 61. 3.100
 libavfilter 10. 4.100 / 10. 4.100
 libswscale 8. 3.100 / 8. 3.100
 libswresample 5. 3.100 / 5. 3.100
 libpostproc 58. 3.100 / 58. 3.100
Input #0, mov,mp4,m4a,3gp,3g2,mj2, from 'test_tmcd.mov':
 Metadata:
 major_brand : qt 
 minor_version : 512
 compatible_brands: qt 
 encoder : Lavf61.7.100
 Duration: N/A, start: 0.000000, bitrate: N/A
 Stream #0:0[0x1]: Video: prores (HQ) (apch / 0x68637061), yuv422p10le, 1920x1080, 15360 tbn (default)
 Metadata:
 handler_name : VideoHandler
 vendor_id : FFMP
$ 




Spent hours with all AI models, no help. Appeal to the human intelligence now


-
NumPy array of a video changes from the original after writing into the same video
29 mars 2021, par RashiqI have a video (
test.mkv
) that I have converted into a 4D NumPy array - (frame, height, width, color_channel). I have even managed to convert that array back into the same video (test_2.mkv
) without altering anything. However, after reading this new,test_2.mkv
, back into a new NumPy array, the array of the first video is different from the second video's array i.e. their hashes don't match and thenumpy.array_equal()
function returns false. I have tried using both python-ffmpeg and scikit-video but cannot get the arrays to match.

Python-ffmpeg attempt :


import ffmpeg
import numpy as np
import hashlib

file_name = 'test.mkv'

# Get video dimensions and framerate
probe = ffmpeg.probe(file_name)
video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None)
width = int(video_stream['width'])
height = int(video_stream['height'])
frame_rate = video_stream['avg_frame_rate']

# Read video into buffer
out, error = (
 ffmpeg
 .input(file_name, threads=120)
 .output("pipe:", format='rawvideo', pix_fmt='rgb24')
 .run(capture_stdout=True)
)

# Convert video buffer to array
video = (
 np
 .frombuffer(out, np.uint8)
 .reshape([-1, height, width, 3])
)

# Convert array to buffer
video_buffer = (
 np.ndarray
 .flatten(video)
 .tobytes()
)

# Write buffer back into a video
process = (
 ffmpeg
 .input('pipe:', format='rawvideo', s='{}x{}'.format(width, height))
 .output("test_2.mkv", r=frame_rate)
 .overwrite_output()
 .run_async(pipe_stdin=True)
)
process.communicate(input=video_buffer)

# Read the newly written video
out_2, error = (
 ffmpeg
 .input("test_2.mkv", threads=40)
 .output("pipe:", format='rawvideo', pix_fmt='rgb24')
 .run(capture_stdout=True)
)

# Convert new video into array
video_2 = (
 np
 .frombuffer(out_2, np.uint8)
 .reshape([-1, height, width, 3])
)

# Video dimesions change
print(f'{video.shape} vs {video_2.shape}') # (844, 1080, 608, 3) vs (2025, 1080, 608, 3)
print(f'{np.array_equal(video, video_2)}') # False

# Hashes don't match
print(hashlib.sha256(bytes(video_2)).digest()) # b'\x88\x00\xc8\x0ed\x84!\x01\x9e\x08 \xd0U\x9a(\x02\x0b-\xeeA\xecU\xf7\xad0xa\x9e\\\xbck\xc3'
print(hashlib.sha256(bytes(video)).digest()) # b'\x9d\xc1\x07xh\x1b\x04I\xed\x906\xe57\xba\xf3\xf1k\x08\xfa\xf1\xfaM\x9a\xcf\xa9\t8\xf0\xc9\t\xa9\xb7'



Scikit-video attempt :


import skvideo.io as sk
import numpy as np

video_data = sk.vread('test.mkv')

sk.vwrite('test_2_ski.mkv', video_data)

video_data_2 = sk.vread('test_2_ski.mkv')

# Dimensions match but...
print(video_data.shape) # (844, 1080, 608, 3)
print(video_data_2.shape) # (844, 1080, 608, 3)

# ...array elements don't
print(np.array_equal(video_data, video_data_2)) # False

# Hashes don't match either
print(hashlib.sha256(bytes(video_2)).digest()) # b'\x8b?]\x8epD:\xd9B\x14\xc7\xba\xect\x15G\xfaRP\xde\xad&EC\x15\xc3\x07\n{a[\x80'
print(hashlib.sha256(bytes(video)).digest()) # b'\x9d\xc1\x07xh\x1b\x04I\xed\x906\xe57\xba\xf3\xf1k\x08\xfa\xf1\xfaM\x9a\xcf\xa9\t8\xf0\xc9\t\xa9\xb7'



I don't understand where I'm going wrong and both the respective documentations do not highlight how to do this particular task. Any help is appreciated. Thank you.


-
FFmpeg get frame rate
22 septembre 2021, par zhin dinsI have several images and I am reproducing them in 78.7ms, I am creating like the 80s video effect. But, I am unable to find the correct ms, and this images with the original videos are unsync.


I dumped the video to images using this command => ffmpeg -i *.mp4 the80effect/img-%d.jpg And now, I have 48622 frames. The video FPS is 24


So, 48622/24 = 2025 +- I cannot use 2025ms since those images will load very slow. And the and the approximate value is 78.7ms per frame/image


How can I find the correct value ? The video duration in seconds is 2026. I have tried all math to find this but I'm failing. How many images (one frame) per msCould you help me ? Thank you.