
Recherche avancée
Médias (1)
-
Video d’abeille en portrait
14 mai 2011, par
Mis à jour : Février 2012
Langue : français
Type : Video
Autres articles (89)
-
Organiser par catégorie
17 mai 2013, parDans MédiaSPIP, une rubrique a 2 noms : catégorie et rubrique.
Les différents documents stockés dans MédiaSPIP peuvent être rangés dans différentes catégories. On peut créer une catégorie en cliquant sur "publier une catégorie" dans le menu publier en haut à droite ( après authentification ). Une catégorie peut être rangée dans une autre catégorie aussi ce qui fait qu’on peut construire une arborescence de catégories.
Lors de la publication prochaine d’un document, la nouvelle catégorie créée sera proposée (...) -
Récupération d’informations sur le site maître à l’installation d’une instance
26 novembre 2010, parUtilité
Sur le site principal, une instance de mutualisation est définie par plusieurs choses : Les données dans la table spip_mutus ; Son logo ; Son auteur principal (id_admin dans la table spip_mutus correspondant à un id_auteur de la table spip_auteurs)qui sera le seul à pouvoir créer définitivement l’instance de mutualisation ;
Il peut donc être tout à fait judicieux de vouloir récupérer certaines de ces informations afin de compléter l’installation d’une instance pour, par exemple : récupérer le (...) -
Personnaliser les catégories
21 juin 2013, parFormulaire de création d’une catégorie
Pour ceux qui connaissent bien SPIP, une catégorie peut être assimilée à une rubrique.
Dans le cas d’un document de type catégorie, les champs proposés par défaut sont : Texte
On peut modifier ce formulaire dans la partie :
Administration > Configuration des masques de formulaire.
Dans le cas d’un document de type média, les champs non affichés par défaut sont : Descriptif rapide
Par ailleurs, c’est dans cette partie configuration qu’on peut indiquer le (...)
Sur d’autres sites (10249)
-
Recording voice using HTML5 and processing it with ffmpeg
22 mars 2015, par user3789242I need to use ffmpeg in my javascript/HTML5 project which allows the user to select the format he wants the audio to open with.I don’t know anything about ffmpeg and I’ve been doing lots of research I don’t know how to use it in my project. I found an example https://github.com/sopel39/audioconverter.js but the problem how can I install the ffmpeg.js which is 8 mg to m project. please if someone can help me I’ll be very thankfull
here is my full code :the javascript page :
// variables
var leftchannel = [];
var rightchannel = [];
var recorder = null;
var recording = false;
var recordingLength = 0;
var volume = null;
var audioInput = null;
var sampleRate = 44100;
var audioContext = null;
var context = null;
var outputString;
if (!navigator.getUserMedia)
navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia ||
navigator.mozGetUserMedia ||
navigator.msGetUserMedia;
if (navigator.getUserMedia){
navigator.getUserMedia({audio:true}, success, function(e) {
alert('Error capturing audio.');
});
} else alert('getUserMedia not supported in this browser.');
function getVal(value)
{
// if R is pressed, we start recording
if ( value == "record"){
recording = true;
// reset the buffers for the new recording
leftchannel.length = rightchannel.length = 0;
recordingLength = 0;
document.getElementById('output').innerHTML="Recording now...";
// if S is pressed, we stop the recording and package the WAV file
} else if ( value == "stop" ){
// we stop recording
recording = false;
document.getElementById('output').innerHTML="Building wav file...";
// we flat the left and right channels down
var leftBuffer = mergeBuffers ( leftchannel, recordingLength );
var rightBuffer = mergeBuffers ( rightchannel, recordingLength );
// we interleave both channels together
var interleaved = interleave ( leftBuffer, rightBuffer );
var buffer = new ArrayBuffer(44 + interleaved.length * 2);
var view = new DataView(buffer);
// RIFF chunk descriptor
writeUTFBytes(view, 0, 'RIFF');
view.setUint32(4, 44 + interleaved.length * 2, true);
writeUTFBytes(view, 8, 'WAVE');
// FMT sub-chunk
writeUTFBytes(view, 12, 'fmt ');
view.setUint32(16, 16, true);
view.setUint16(20, 1, true);
// stereo (2 channels)
view.setUint16(22, 2, true);
view.setUint32(24, sampleRate, true);
view.setUint32(28, sampleRate * 4, true);
view.setUint16(32, 4, true);
view.setUint16(34, 16, true);
// data sub-chunk
writeUTFBytes(view, 36, 'data');
view.setUint32(40, interleaved.length * 2, true);
var lng = interleaved.length;
var index = 44;
var volume = 1;
for (var i = 0; i < lng; i++){
view.setInt16(index, interleaved[i] * (0x7FFF * volume), true);
index += 2;
}
var blob = new Blob ( [ view ], { type : 'audio/wav' } );
// let's save it locally
document.getElementById('output').innerHTML='Handing off the file now...';
var url = (window.URL || window.webkitURL).createObjectURL(blob);
var li = document.createElement('li');
var au = document.createElement('audio');
var hf = document.createElement('a');
au.controls = true;
au.src = url;
hf.href = url;
hf.download = 'audio_recording_' + new Date().getTime() + '.wav';
hf.innerHTML = hf.download;
li.appendChild(au);
li.appendChild(hf);
recordingList.appendChild(li);
}
}
function success(e){
audioContext = window.AudioContext || window.webkitAudioContext;
context = new audioContext();
volume = context.createGain();
// creates an audio node from the microphone incoming stream(source)
source = context.createMediaStreamSource(e);
// connect the stream(source) to the gain node
source.connect(volume);
var bufferSize = 2048;
recorder = context.createScriptProcessor(bufferSize, 2, 2);
//node for the visualizer
analyser = context.createAnalyser();
analyser.smoothingTimeConstant = 0.3;
analyser.fftSize = 512;
splitter = context.createChannelSplitter();
//when recording happens
recorder.onaudioprocess = function(e){
if (!recording) return;
var left = e.inputBuffer.getChannelData (0);
var right = e.inputBuffer.getChannelData (1);
leftchannel.push (new Float32Array (left));
rightchannel.push (new Float32Array (right));
recordingLength += bufferSize;
// get the average for the first channel
var array = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteFrequencyData(array);
var c=document.getElementById("myCanvas");
var ctx = c.getContext("2d");
// clear the current state
ctx.clearRect(0, 0, 1000, 325);
var gradient = ctx.createLinearGradient(0,0,0,300);
gradient.addColorStop(1,'#000000');
gradient.addColorStop(0.75,'#ff0000');
gradient.addColorStop(0.25,'#ffff00');
gradient.addColorStop(0,'#ffffff');
// set the fill style
ctx.fillStyle=gradient;
drawSpectrum(array);
function drawSpectrum(array) {
for ( var i = 0; i < (array.length); i++ ){
var value = array[i];
ctx.fillRect(i*5,325-value,3,325);
}
}
}
function getAverageVolume(array) {
var values = 0;
var average;
var length = array.length;
// get all the frequency amplitudes
for (var i = 0; i < length; i++) {
values += array[i];
}
average = values / length;
return average;
}
// we connect the recorder(node to destination(speakers))
volume.connect(splitter);
splitter.connect(analyser, 0, 0);
analyser.connect(recorder);
recorder.connect(context.destination);
}
function mergeBuffers(channelBuffer, recordingLength){
var result = new Float32Array(recordingLength);
var offset = 0;
var lng = channelBuffer.length;
for (var i = 0; i < lng; i++){
var buffer = channelBuffer[i];
result.set(buffer, offset);
offset += buffer.length;
}
return result;
}
function interleave(leftChannel, rightChannel){
var length = leftChannel.length + rightChannel.length;
var result = new Float32Array(length);
var inputIndex = 0;
for (var index = 0; index < length; ){
result[index++] = leftChannel[inputIndex];
result[index++] = rightChannel[inputIndex];
inputIndex++;
}
return result;
}
function writeUTFBytes(view, offset, string){
var lng = string.length;
for (var i = 0; i < lng; i++){
view.setUint8(offset + i, string.charCodeAt(i));
}
}and here is the html code :
<code class="echappe-js"><script src="http://stackoverflow.com/feeds/tag/js/functions.js"></script>
-
FFMPEG - Multi Track, Multi Channel file to discrete mono files
26 novembre 2020, par vadeI have files which are multi track, and multi channel (ie, track 1 may be 5.1, track 2 may be stereo, track 3 may be stereo etc)


I am looking to output every channel from every track into its own 'unrolled' discrete mono file.


example media :


ffprobe version 4.3.1-0york0~18.04 Copyright (c) 2007-2020 the FFmpeg developers
 built with gcc 7 (Ubuntu 7.5.0-3ubuntu1~18.04)
 configuration: --prefix=/usr --extra-version='0york0~18.04' --toolchain=hardened --libdir=/usr/lib/x86_64-linux-gnu --incdir=/usr/include/x86_64-linux-gnu --arch=amd64 --enable-gpl --disable-stripping --enable-avresample --disable-filter=resample --enable-gnutls --enable-ladspa --enable-libaom --enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca --enable-libcdio --enable-libcodec2 --enable-libflite --enable-libfontconfig --enable-libfreetype --enable-libfribidi --enable-libgme --enable-libgsm --enable-libjack --enable-libmp3lame --enable-libmysofa --enable-libopenjpeg --enable-libopenmpt --enable-libopus --enable-libpulse --enable-librabbitmq --enable-librsvg --enable-librubberband --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libsrt --enable-libssh --enable-libtheora --enable-libtwolame --enable-libvidstab --enable-libvorbis --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx265 --enable-libxml2 --enable-libxvid --enable-libzmq --enable-libzvbi --enable-lv2 --enable-omx --enable-openal --enable-opencl --enable-opengl --enable-sdl2 --enable-libzimg --enable-pocketsphinx --enable-libdc1394 --enable-libdrm --enable-libiec61883 --enable-chromaprint --enable-frei0r --enable-libx264 --enable-shared
 libavutil 56. 51.100 / 56. 51.100
 libavcodec 58. 91.100 / 58. 91.100
 libavformat 58. 45.100 / 58. 45.100
 libavdevice 58. 10.100 / 58. 10.100
 libavfilter 7. 85.100 / 7. 85.100
 libavresample 4. 0. 0 / 4. 0. 0
 libswscale 5. 7.100 / 5. 7.100
 libswresample 3. 7.100 / 3. 7.100
 libpostproc 55. 7.100 / 55. 7.100
[mxf @ 0x55d3e7fc2680] wrapping of stream 0 is unknown
[jpeg2000 @ 0x55d3e805ce00] End mismatch 1
 Last message repeated 1 times
Input #0, mxf, from 'redacted.mxf':
 Metadata:
 operational_pattern_ul: 060e2b34.04010101.0d010201.01010900
 modification_date: 2019-10-03T09:58:16.368000Z
 uid : f6267ae2-680e-4357-9b1d-c77c045d3cd7
 generation_uid : e7e6f5a1-6f15-4df5-aea8-a41f3ef535d6
 company_name : redacted
 product_name : redacted
 product_version : 11.6.1.5.301404
 product_uid : 84ae5ffc-4710-11dd-a6fe-0010c629ec73
 application_platform: 4KICR1
 material_package_umid: 0x060A2B340101010501010D2013000000BE3608F3135E48AD99E4340643E47F22
 timecode : 00:59:20:00
 Duration: 00:26:16.07, start: 0.000000, bitrate: 139194 kb/s
 Stream #0:0: Video: jpeg2000, yuv422p10le(progressive), 1920x1080, SAR 1:1 DAR 16:9, 23.98 tbr, 23.98 tbn, 23.98 tbc
 Metadata:
 file_package_umid: 0x060A2B340101010501010D201300000091A43E578B86490698045924FA9EECC5
 track_name : Picture
 Stream #0:1: Audio: pcm_s24le, 48000 Hz, 6 channels, s32 (24 bit), 6912 kb/s
 Metadata:
 file_package_umid: 0x060A2B340101010501010D201300000091A43E578B86490698045924FA9EECC5
 track_name : Sound
 Stream #0:2: Audio: pcm_s24le, 48000 Hz, 2 channels, s32 (24 bit), 2304 kb/s
 Metadata:
 file_package_umid: 0x060A2B340101010501010D201300000091A43E578B86490698045924FA9EECC5
 track_name : Sound
 Stream #0:3: Audio: pcm_s24le, 48000 Hz, 2 channels, s32 (24 bit), 2304 kb/s
 Metadata:
 file_package_umid: 0x060A2B340101010501010D201300000091A43E578B86490698045924FA9EECC5
 track_name : Sound
 Stream #0:4: Audio: pcm_s24le, 48000 Hz, 2 channels, s32 (24 bit), 2304 kb/s
 Metadata:
 file_package_umid: 0x060A2B340101010501010D201300000091A43E578B86490698045924FA9EECC5
 track_name : Sound
 Stream #0:5: Data: none
 Metadata:
 file_package_umid: 0x060A2B340101010501010D201300000091A43E578B86490698045924FA9EECC5
 track_name : Auxiliary Data
 data_type : vbi_vanc_smpte_436M
Unsupported codec with id 0 for input stream 5



These files are vendor qualified masters, and the track / channel combinations vary between vendors, so some might be stereo, 5.1, 7.1 order, some might be all discrete mono already, some might be discrete stereo, 5.1, and mono tracks. Its all a mix. So im looking for some general strategy that gracefully handles all channels from all tracks.


Now I have seen various strategies documented to handle discretizing audio via ffmpeg docs, but none of them seem to show how to target different channels from different tracks. Im sure its a
pebkac
error, but I'd love some guidance.

I have tried both a
map_channel
approach as well as a-filtercomplex channelsplit
approach.

ffmpeg -i redacted.mxf -ss 60 \
-map_channel 0.1.0 -t 10 track_1_0.wav \
-map_channel 0.1.1 -t 10 track_1_1.wav \
-map_channel 0.1.2 -t 10 track_1_2.wav \
-map_channel 0.1.3 -t 10 track_1_3.wav \
-map_channel 0.1.4 -t 10 track_1_4.wav \
-map_channel 0.1.5 -t 10 track_1_5.wav \
-map_channel 0.2.0 -t 10 track_2_0.wav \
-map_channel 0.2.1 -t 10 track_2_1.wav \
-map_channel 0.3.0 -t 10 track_3_0.wav \
-map_channel 0.3.1 -t 10 track_3_1.wav \
-map_channel 0.4.0 -t 10 track_4_0.wav \
-map_channel 0.4.1 -t 10 track_4_1.wav 



However, the output files are not all mono, some are marked as 5.1. I dont believe they are inheriting a sane / correct channel layout (mono) - but the output files that are marked 5.1 are nonsensical, as they are all sourced from stereo tracks. ie track_2_0.wav track_2_1.wav, track_3_0.wav, track_3_1.wav, track_4_0.wav, track_4_1.wav. Which seems odd. Track 1_0 from the above command outputs a sane media info :


File size : 938 KiB
Duration : 10s 0ms
Overall bit rate mode : Constant
Overall bit rate : 768 Kbps
Writing application : Lavf58.45.100

Audio
Format : PCM
Format settings : Little / Signed
Codec ID : 1
Duration : 10s 0ms
Bit rate mode : Constant
Bit rate : 768 Kbps
Channel(s) : 1 channel
Sampling rate : 48.0 KHz
Bit depth : 16 bits
Stream size : 938 KiB (100%)




However the second and 3rd track have the wrong channel layout and an unexpected codec id :


Format : Wave
File size : 5.49 MiB
Duration : 10s 0ms
Overall bit rate mode : Constant
Overall bit rate : 4 608 Kbps
Writing application : Lavf58.45.100

Audio
Format : PCM
Format settings : Little / Signed
Codec ID : 00000001-0000-0010-8000-00AA00389B71
Duration : 10s 0ms
Bit rate mode : Constant
Bit rate : 4 608 Kbps
Channel(s) : 6 channels
Channel layout : L R C LFE Lb Rb
Sampling rate : 48.0 KHz
Bit depth : 16 bits
Stream size : 5.49 MiB (100%)




Additionally re : map_channel, there are some docs that cast doubt that its the right approach :




Note that currently each output stream can only contain channels from a single input stream ; you can’t for example use "-map_channel" to pick multiple input audio channels contained in different streams (from the same or different files) and merge them into a single output stream. It is therefore not currently possible, for example, to turn two separate mono streams into a single stereo stream. However splitting a stereo stream into two single channel mono streams is possible.




Using filter complex, the docs/bug tracker have an example of discretizing 5.1 and marking mono. I can target the tracks I want, and get a valid filter chain as seen in debug log reporting, however I only get audio for the 1st track :


ffmpeg -y -v 40 -i redacted.mxf -ss 60 \
 -disposition:a default \
 -filter_complex \
 "[0:a:0]channelsplit=channel_layout=5.1[c1][c2][c3][c4][c5][c6],\
 [c1]aformat=channel_layouts=mono[c1],\
 [c2]aformat=channel_layouts=mono[c2],\
 [c3]aformat=channel_layouts=mono[c3],\
 [c4]aformat=channel_layouts=mono[c4],\
 [c5]aformat=channel_layouts=mono[c5],\
 [c6]aformat=channel_layouts=mono[c6],\
 [0:a:1]channelsplit=channel_layout=stereo[c7][c8],\
 [c7]aformat=channel_layouts=mono[c7],\
 [c8]aformat=channel_layouts=mono[c8],\
 [0:a:2]channelsplit=channel_layout=stereo[c9][c10],\
 [c9]aformat=channel_layouts=mono[c9],\
 [c10]aformat=channel_layouts=mono[c10],\
 [0:a:3]channelsplit=channel_layout=stereo[c11][c12],\
 [c11]aformat=channel_layouts=mono[c11],\
 [c12]aformat=channel_layouts=mono[c12]"\
 -map "[c1]" -t 10 1.wav\
 -map "[c2]" -t 10 2.wav\
 -map "[c3]" -t 10 3.wav\
 -map "[c4]" -t 10 4.wav\
 -map "[c5]" -t 10 5.wav\
 -map "[c6]" -t 10 6.wav\
 -map "[c7]" -t 10 7.wav\
 -map "[c8]" -t 10 8.wav\
 -map "[c9]" -t 10 9.wav\
 -map "[c10]" -t 10 10.wav\
 -map "[c11]" -t 10 11.wav\
 -map "[c12]" -t 10 12.wav



TL/DR ;


In short, how does one export every channel of every track as a discrete mono audio track (regardless of the channel layouts ?)


Thank you !


-
Screeching white sound coming while playing audio as a raw stream
27 avril 2020, par Sri Nithya SharabheshwaranandaI. Background



- 

- I am trying to make an application which helps to match subtitles to the audio waveform very accurately at the waveform level, at the word level or even at the character level.
- The audio is expected to be Sanskrit chants (Yoga, rituals etc.) which are extremely long compound words [ example - aṅganyā-sokta-mātaro-bījam is traditionally one word broken only to assist reading ]
- The input transcripts / subtitles might be roughly in sync at the sentence/verse level but surely would not be in sync at the word level.
- The application should be able to figure out points of silence in the audio waveform, so that it can guess the start and end points of each word (or even letter/consonant/vowel in a word), such that the audio-chanting and visual-subtitle at the word level (or even at letter/consonant/vowel level) perfectly match, and the corresponding UI just highlights or animates the exact word (or even letter) in the subtitle line which is being chanted at that moment, and also show that word (or even the letter/consonant/vowel) in bigger font. This app's purpose is to assist learning Sanskrit chanting.
- It is not expected to be a 100% automated process, nor 100% manual but a mix where the application should assist the human as much as possible.













II. Following is the first code I wrote for this purpose, wherein



- 

- First I open a mp3 (or any audio format) file,
- Seek to some arbitrary point in the timeline of the audio file // as of now playing from zero offset
- Get the audio data in raw format for 2 purposes - (1) playing it and (2) drawing the waveform.
- Playing the raw audio data using standard java audio libraries











III. The problem I am facing is, between every cycle there is screeching sound.



- 

- Probably I need to close the line between cycles ? Sounds simple, I can try.
- But I am also wondering if this overall approach itself is correct ? Any tip, guide, suggestion, link would be really helpful.
- Also I just hard coded the sample-rate etc ( 44100Hz etc. ), are these good to set as default presets or it should depend on the input format ?









IV. Here is the code



import com.github.kokorin.jaffree.StreamType;
import com.github.kokorin.jaffree.ffmpeg.FFmpeg;
import com.github.kokorin.jaffree.ffmpeg.FFmpegProgress;
import com.github.kokorin.jaffree.ffmpeg.FFmpegResult;
import com.github.kokorin.jaffree.ffmpeg.NullOutput;
import com.github.kokorin.jaffree.ffmpeg.PipeOutput;
import com.github.kokorin.jaffree.ffmpeg.ProgressListener;
import com.github.kokorin.jaffree.ffprobe.Stream;
import com.github.kokorin.jaffree.ffmpeg.UrlInput;
import com.github.kokorin.jaffree.ffprobe.FFprobe;
import com.github.kokorin.jaffree.ffprobe.FFprobeResult;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.DataLine;
import javax.sound.sampled.SourceDataLine;


public class FFMpegToRaw {
 Path BIN = Paths.get("f:\\utilities\\ffmpeg-20190413-0ad0533-win64-static\\bin");
 String VIDEO_MP4 = "f:\\org\\TEMPLE\\DeviMahatmyamRecitationAudio\\03_01_Devi Kavacham.mp3";
 FFprobe ffprobe;
 FFmpeg ffmpeg;

 public void basicCheck() throws Exception {
 if (BIN != null) {
 ffprobe = FFprobe.atPath(BIN);
 } else {
 ffprobe = FFprobe.atPath();
 }
 FFprobeResult result = ffprobe
 .setShowStreams(true)
 .setInput(VIDEO_MP4)
 .execute();

 for (Stream stream : result.getStreams()) {
 System.out.println("Stream " + stream.getIndex()
 + " type " + stream.getCodecType()
 + " duration " + stream.getDuration(TimeUnit.SECONDS));
 } 
 if (BIN != null) {
 ffmpeg = FFmpeg.atPath(BIN);
 } else {
 ffmpeg = FFmpeg.atPath();
 }

 //Sometimes ffprobe can't show exact duration, use ffmpeg trancoding to NULL output to get it
 final AtomicLong durationMillis = new AtomicLong();
 FFmpegResult fFmpegResult = ffmpeg
 .addInput(
 UrlInput.fromUrl(VIDEO_MP4)
 )
 .addOutput(new NullOutput())
 .setProgressListener(new ProgressListener() {
 @Override
 public void onProgress(FFmpegProgress progress) {
 durationMillis.set(progress.getTimeMillis());
 }
 })
 .execute();
 System.out.println("audio size - "+fFmpegResult.getAudioSize());
 System.out.println("Exact duration: " + durationMillis.get() + " milliseconds");
 }

 public void toRawAndPlay() throws Exception {
 ProgressListener listener = new ProgressListener() {
 @Override
 public void onProgress(FFmpegProgress progress) {
 System.out.println(progress.getFrame());
 }
 };

 // code derived from : https://stackoverflow.com/questions/32873596/play-raw-pcm-audio-received-in-udp-packets

 int sampleRate = 44100;//24000;//Hz
 int sampleSize = 16;//Bits
 int channels = 1;
 boolean signed = true;
 boolean bigEnd = false;
 String format = "s16be"; //"f32le"

 //https://trac.ffmpeg.org/wiki/audio types
 final AudioFormat af = new AudioFormat(sampleRate, sampleSize, channels, signed, bigEnd);
 final DataLine.Info info = new DataLine.Info(SourceDataLine.class, af);
 final SourceDataLine line = (SourceDataLine) AudioSystem.getLine(info);

 line.open(af, 4096); // format , buffer size
 line.start();

 OutputStream destination = new OutputStream() {
 @Override public void write(int b) throws IOException {
 throw new UnsupportedOperationException("Nobody uses thi.");
 }
 @Override public void write(byte[] b, int off, int len) throws IOException {
 String o = new String(b);
 boolean showString = false;
 System.out.println("New output ("+ len
 + ", off="+off + ") -> "+(showString?o:"")); 
 // output wave form repeatedly

 if(len%2!=0) {
 len -= 1;
 System.out.println("");
 }
 line.write(b, off, len);
 System.out.println("done round");
 }
 };

 // src : http://blog.wudilabs.org/entry/c3d357ed/?lang=en-US
 FFmpegResult result = FFmpeg.atPath(BIN).
 addInput(UrlInput.fromPath(Paths.get(VIDEO_MP4))).
 addOutput(PipeOutput.pumpTo(destination).
 disableStream(StreamType.VIDEO). //.addArgument("-vn")
 setFrameRate(sampleRate). //.addArguments("-ar", sampleRate)
 addArguments("-ac", "1").
 setFormat(format) //.addArguments("-f", format)
 ).
 setProgressListener(listener).
 execute();

 // shut down audio
 line.drain();
 line.stop();
 line.close();

 System.out.println("result = "+result.toString());
 }

 public static void main(String[] args) throws Exception {
 FFMpegToRaw raw = new FFMpegToRaw();
 raw.basicCheck();
 raw.toRawAndPlay();
 }
}





Thank You