
Recherche avancée
Autres articles (112)
-
MediaSPIP version 0.1 Beta
16 avril 2011, parMediaSPIP 0.1 beta est la première version de MediaSPIP décrétée comme "utilisable".
Le fichier zip ici présent contient uniquement les sources de MediaSPIP en version standalone.
Pour avoir une installation fonctionnelle, il est nécessaire d’installer manuellement l’ensemble des dépendances logicielles sur le serveur.
Si vous souhaitez utiliser cette archive pour une installation en mode ferme, il vous faudra également procéder à d’autres modifications (...) -
MediaSPIP 0.1 Beta version
25 avril 2011, parMediaSPIP 0.1 beta is the first version of MediaSPIP proclaimed as "usable".
The zip file provided here only contains the sources of MediaSPIP in its standalone version.
To get a working installation, you must manually install all-software dependencies on the server.
If you want to use this archive for an installation in "farm mode", you will also need to proceed to other manual (...) -
Websites made with MediaSPIP
2 mai 2011, parThis page lists some websites based on MediaSPIP.
Sur d’autres sites (15442)
-
building a voice recorder using html5 and javascript
16 juillet 2014, par lamaI want to build a voice recorder using HTML5 same as one found in gitHub JSSoundecorder, but what I want is for the user to be able to choose the file format before recording the voice.I can do this using ffmpeg. In other words the user must be able to select the audio format by check box (mp3,wma,pcm) and in the background code, the .wav file usually created by the program instead of displaying it, it should be converted by the format selected then displayed in the new format.this is the ffmpeg code we can use ,but I don’t know how to get the .wav audio file to convert it and show it.please if someone have ideas,or if can find demos I have been looking for weeks.this is the ffmpeg code :
var fileName;
var fileBuffer;
function timeToSeconds(time) {
var parts = time.split(":");
return parseFloat(parts[0]) * 60 * 60 + parseFloat(parts[1]) * 60 + parseFloat(parts[2]) + parseFloat("0." + parts[3]);
}
// create ffmpeg worker
function getFFMPEGWorker() {
// regexps for extracting time from ffmpeg logs
var durationRegexp = /Duration: (.*?), /
var timeRegexp = /time=(.*?) /;
var duration;
var ffmpegWorker = new Worker('worker.js');
var durationLine;
ffmpegWorker.addEventListener('message', function(event) {
var message = event.data;
console.log(message.type);
if (message.type === "ready" && window.File && window.FileList && window.FileReader) {
// script loaded, hide loader
$('#loading').hide();
} else if (message.type == "stdout") {
console.log(message.data);
} else if (message.type == "stderr") {
console.log(message.data);
// try to extract duration
if (durationRegexp.exec(message.data)) {
duration = timeToSeconds(durationRegexp.exec(message.data)[1]);
}
// try to extract time
if (timeRegexp.exec(message.data)) {
var time = timeToSeconds(timeRegexp.exec(message.data)[1]);
if (duration) {
$("#progress").text("Progress: " + Math.floor(time / duration * 100) + "%");
$("#progress").show();
}
}
} else if (message.type == "done") {
var code = message.data.code;
console.log(message.data);
var outFileNames = Object.keys(message.data.outputFiles);
console.log(outFileNames);
if (code == 0 && outFileNames.length) {
var outFileName = outFileNames[0];
var outFileBuffer = message.data.outputFiles[outFileName];
var src = window.URL.createObjectURL(new Blob([outFileBuffer]));
$("#downloadLink").attr('href', src);
$("#download").show();
} else {
$("#error").show();
}
$("#converting").hide();
$("#progress").hide();
}
}, false);
return ffmpegWorker;
}
// create ffmpeg worker
var ffmpegWorker = getFFMPEGWorker();
var ffmpegRunning = false;
$('#convert').click(function() {
// terminate existing worker
if (ffmpegRunning) {
ffmpegWorker.terminate();
ffmpegWorker = getFFMPEGWorker();
}
ffmpegRunning = true;
// display converting animation
$("#converting").show();
$("#error").hide();
// hide download div
$("#download").hide();
// change download file name
var fileNameExt = fileName.substr(fileName.lastIndexOf('.') + 1);
var outFileName = fileName.substr(0, fileName.lastIndexOf('.')) + "." + getOutFormat();
$("#downloadLink").attr("download", outFileName);
$("#downloadLink").text(outFileName);
var arguments = [];
arguments.push("-i");
arguments.push(fileName);
arguments.push("-b:a");
arguments.push(getBitrate());
switch (getOutFormat()) {
case "mp3":
arguments.push("-acodec");
arguments.push("libmp3lame");
arguments.push("out.mp3");
break;
case "wma":
arguments.push("-acodec");
arguments.push("wmav1");
arguments.push("out.asf");
break;
case "pcm":
arguments.push("-f");
arguments.push("s16le");
arguments.push("-acodec");
arguments.push("pcm_s16le");
arguments.push("out.pcm");
}
ffmpegWorker.postMessage({
type: "command",
arguments: arguments,
files: [
{
"name": fileName,
"buffer": fileBuffer
}
]
});
});
function getOutFormat() {
return $('input[name=format]:checked').val();
}
function getBitrate() {
return $('input[name=bitrate]:checked').val();
}
// disable conversion at start
$('#convert').attr('disabled', 'true');
function readInputFile(file) {
// disable conversion for the time of file loading
$('#convert').attr('disabled', 'true');
// load file content
var reader = new FileReader();
reader.onload = function(e) {
$('#convert').removeAttr('disabled');
fileName = file.name;
console.log(fileName);
fileBuffer = e.target.result;
}
reader.readAsArrayBuffer(file);
}
// reset file selector at start
function resetInputFile() {
$("#inFile").wrap('<form>').closest('form').get(0).reset();
$("#inFile").unwrap();
}
resetInputFile();
function handleFileSelect(event) {
var files = event.target.files; // FileList object
console.log(files);
// files is a FileList of File objects. display first file name
file = files[0];
console.log(file);
if (file) {
$("#drop").text("Drop file here");
readInputFile(file);
}
}
// setup input file listeners
document.getElementById('inFile').addEventListener('change', handleFileSelect, false);
</form> -
While using skvideo.io.FFmpegReader and skvideo.io.FFmpegWriter for video throughput the input video and output video length differ
28 juin 2024, par Kaesebrotus AnonymousI have an h264 encoded mp4 video of about 27.5 minutes length and I am trying to create a copy of the video which excludes the first 5 frames. I am using scikit-video and ffmpeg in python for this purpose. I do not have a GPU, so I am using libx264 codec for the output video.


It generally works and the output video excludes the first 5 frames. Somehow, the output video results in a length of about 22 minutes. When visually checking the videos, the shorter video does seem slightly faster and I can identify the same frames at different timestamps. In windows explorer, when clicking properties and then details, both videos' frame rates show as 20.00 fps.


So, my goal is to have both videos of the same length, except for the loss of the first 5 frames which should result in a duration difference of 0.25 seconds, and use the same (almost same) codec and not lose quality.


Can anyone explain why this apparent loss of frames is happening ?


Thank you for your interest in helping me, please find the details below.


Here is a minimal example of what I have done.


framerate = str(20)
reader = skvideo.io.FFmpegReader(inputvideo.mp4, inputdict={'-r': framerate})
writer = skvideo.io.FFmpegWriter(outputvideo.mp4, outputdict={'-vcodec': 'libx264', '-r': framerate})

for idx,frame in enumerate(reader.nextFrame()):
 if idx < 5:
 continue
 writer.writeFrame(frame)

reader.close()
writer.close()



When I read the output video again using FFmpegReader and check the .probeInfo, I can see that the output video has less frames in total. I have also managed to replicate the same problem for shorter videos (now not excluding the first 5 frames, but only throughputting a video), e.g. 10 seconds input turns to 8 seconds output with less frames. I have also tried playing around with further parameters of the outputdict, e.g. -pix_fmt, -b. I have tried to set -time_base in the output dict to the same value as in the inputdict, but that did not seem to have the desired effect. I am not sure if the name of the parameter is right.


For additional info, I am providing the .probeInfo of the input video, of which I used 10 seconds, and the .probeInfo of the 8 second output video it produced.


**input video** .probeInfo:
input dict

{'video': OrderedDict([('@index', '0'),
 ('@codec_name', 'h264'),
 ('@codec_long_name',
 'H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10'),
 ('@profile', 'High 4:4:4 Predictive'),
 ('@codec_type', 'video'),
 ('@codec_tag_string', 'avc1'),
 ('@codec_tag', '0x31637661'),
 ('@width', '4096'),
 ('@height', '3000'),
 ('@coded_width', '4096'),
 ('@coded_height', '3000'),
 ('@closed_captions', '0'),
 ('@film_grain', '0'),
 ('@has_b_frames', '0'),
 ('@sample_aspect_ratio', '1:1'),
 ('@display_aspect_ratio', '512:375'),
 ('@pix_fmt', 'yuv420p'),
 ('@level', '60'),
 ('@chroma_location', 'left'),
 ('@field_order', 'progressive'),
 ('@refs', '1'),
 ('@is_avc', 'true'),
 ('@nal_length_size', '4'),
 ('@id', '0x1'),
 ('@r_frame_rate', '20/1'),
 ('@avg_frame_rate', '20/1'),
 ('@time_base', '1/1200000'),
 ('@start_pts', '0'),
 ('@start_time', '0.000000'),
 ('@duration_ts', '1984740000'),
 ('@duration', '1653.950000'),
 ('@bit_rate', '3788971'),
 ('@bits_per_raw_sample', '8'),
 ('@nb_frames', '33079'),
 ('@extradata_size', '43'),
 ('disposition',
 OrderedDict([('@default', '1'),
 ('@dub', '0'),
 ('@original', '0'),
 ('@comment', '0'),
 ('@lyrics', '0'),
 ('@karaoke', '0'),
 ('@forced', '0'),
 ('@hearing_impaired', '0'),
 ('@visual_impaired', '0'),
 ('@clean_effects', '0'),
 ('@attached_pic', '0'),
 ('@timed_thumbnails', '0'),
 ('@non_diegetic', '0'),
 ('@captions', '0'),
 ('@descriptions', '0'),
 ('@metadata', '0'),
 ('@dependent', '0'),
 ('@still_image', '0')])),
 ('tags',
 OrderedDict([('tag',
 [OrderedDict([('@key', 'language'),
 ('@value', 'und')]),
 OrderedDict([('@key', 'handler_name'),
 ('@value', 'VideoHandler')]),
 OrderedDict([('@key', 'vendor_id'),
 ('@value', '[0][0][0][0]')])])]))])}

**output video** .probeInfo:
{'video': OrderedDict([('@index', '0'),
 ('@codec_name', 'h264'),
 ('@codec_long_name',
 'H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10'),
 ('@profile', 'High'),
 ('@codec_type', 'video'),
 ('@codec_tag_string', 'avc1'),
 ('@codec_tag', '0x31637661'),
 ('@width', '4096'),
 ('@height', '3000'),
 ('@coded_width', '4096'),
 ('@coded_height', '3000'),
 ('@closed_captions', '0'),
 ('@film_grain', '0'),
 ('@has_b_frames', '2'),
 ('@pix_fmt', 'yuv420p'),
 ('@level', '60'),
 ('@chroma_location', 'left'),
 ('@field_order', 'progressive'),
 ('@refs', '1'),
 ('@is_avc', 'true'),
 ('@nal_length_size', '4'),
 ('@id', '0x1'),
 ('@r_frame_rate', '20/1'),
 ('@avg_frame_rate', '20/1'),
 ('@time_base', '1/10240'),
 ('@start_pts', '0'),
 ('@start_time', '0.000000'),
 ('@duration_ts', '82944'),
 ('@duration', '8.100000'),
 ('@bit_rate', '3444755'),
 ('@bits_per_raw_sample', '8'),
 ('@nb_frames', '162'),
 ('@extradata_size', '47'),
 ('disposition',
 OrderedDict([('@default', '1'),
 ('@dub', '0'),
 ('@original', '0'),
 ('@comment', '0'),
 ('@lyrics', '0'),
 ('@karaoke', '0'),
 ('@forced', '0'),
 ('@hearing_impaired', '0'),
 ('@visual_impaired', '0'),
 ('@clean_effects', '0'),
 ('@attached_pic', '0'),
 ('@timed_thumbnails', '0'),
 ('@non_diegetic', '0'),
 ('@captions', '0'),
 ('@descriptions', '0'),
 ('@metadata', '0'),
 ('@dependent', '0'),
 ('@still_image', '0')])),
 ('tags',
 OrderedDict([('tag',
 [OrderedDict([('@key', 'language'),
 ('@value', 'und')]),
 OrderedDict([('@key', 'handler_name'),
 ('@value', 'VideoHandler')]),
 OrderedDict([('@key', 'vendor_id'),
 ('@value', '[0][0][0][0]')]),
 OrderedDict([('@key', 'encoder'),
 ('@value',
 'Lavc61.8.100 libx264')])])]))])}



I used 10 seconds by adding this to the bottom of the loop shown above :


if idx >= 200:
 break



-
webp : add a special case for a huffman table with only 1 symbol
4 décembre 2013, par Justin Ruggleswebp : add a special case for a huffman table with only 1 symbol
The vlc reader cannot handle 0-bit huffman codes. For most
situations WebP uses the "simple" huffman coding for this case,
but that will only handle symbols up to 255. For the LZ77 distance
codes, larger symbol values are needed, so it can happen in rare
cases that a normal huffman table is used that only has a single
symbol.