
Recherche avancée
Médias (9)
-
Stereo master soundtrack
17 octobre 2011, par
Mis à jour : Octobre 2011
Langue : English
Type : Audio
-
Elephants Dream - Cover of the soundtrack
17 octobre 2011, par
Mis à jour : Octobre 2011
Langue : English
Type : Image
-
#7 Ambience
16 octobre 2011, par
Mis à jour : Juin 2015
Langue : English
Type : Audio
-
#6 Teaser Music
16 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
-
#5 End Title
16 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
-
#3 The Safest Place
16 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
Autres articles (36)
-
Mise à jour de la version 0.1 vers 0.2
24 juin 2013, parExplications des différents changements notables lors du passage de la version 0.1 de MediaSPIP à la version 0.3. Quelles sont les nouveautés
Au niveau des dépendances logicielles Utilisation des dernières versions de FFMpeg (>= v1.2.1) ; Installation des dépendances pour Smush ; Installation de MediaInfo et FFprobe pour la récupération des métadonnées ; On n’utilise plus ffmpeg2theora ; On n’installe plus flvtool2 au profit de flvtool++ ; On n’installe plus ffmpeg-php qui n’est plus maintenu au (...) -
Les autorisations surchargées par les plugins
27 avril 2010, parMediaspip core
autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs -
HTML5 audio and video support
13 avril 2011, parMediaSPIP uses HTML5 video and audio tags to play multimedia files, taking advantage of the latest W3C innovations supported by modern browsers.
The MediaSPIP player used has been created specifically for MediaSPIP and can be easily adapted to fit in with a specific theme.
For older browsers the Flowplayer flash fallback is used.
MediaSPIP allows for media playback on major mobile platforms with the above (...)
Sur d’autres sites (6869)
-
how to play audio from a video file in c#
10 août 2014, par Ivan LisovichFor read video file I use ffmpeg libraries(http://ffmpeg.zeranoe.com/builds/) build ffmpeg-2.2.3-win32-dev.7z.
manage c++ code for read video file :void VideoFileReader::Read( String^ fileName, System::Collections::Generic::List^ imageData, System::Collections::Generic::List^>^ audioData )
{
char *nativeFileName = ManagedStringToUnmanagedUTF8Char(fileName);
libffmpeg::AVFormatContext *pFormatCtx = NULL;
libffmpeg::AVCodec *pCodec = NULL;
libffmpeg::AVCodec *aCodec = NULL;
libffmpeg::av_register_all();
if(libffmpeg::avformat_open_input(&pFormatCtx, nativeFileName, NULL, NULL) != 0)
{
throw gcnew System::Exception( "Couldn't open file" );
}
if(libffmpeg::avformat_find_stream_info(pFormatCtx, NULL) < 0)
{
throw gcnew System::Exception( "Couldn't find stream information" );
}
libffmpeg::av_dump_format(pFormatCtx, 0, nativeFileName, 0);
int videoStream = libffmpeg::av_find_best_stream(pFormatCtx, libffmpeg::AVMEDIA_TYPE_VIDEO, -1, -1, &pCodec, 0);
int audioStream = libffmpeg::av_find_best_stream(pFormatCtx, libffmpeg::AVMEDIA_TYPE_AUDIO, -1, -1, &aCodec, 0);
if(videoStream == -1)
{
throw gcnew System::Exception( "Didn't find a video stream" );
}
if(audioStream == -1)
{
throw gcnew System::Exception( "Didn't find a audio stream" );
}
libffmpeg::AVCodecContext *aCodecCtx = pFormatCtx->streams[audioStream]->codec;
libffmpeg::avcodec_open2(aCodecCtx, aCodec, NULL);
m_channels = aCodecCtx->channels;
m_sampleRate = aCodecCtx->sample_rate;
m_bitsPerSample = aCodecCtx->bits_per_coded_sample;
libffmpeg::AVCodecContext *pCodecCtx = pFormatCtx->streams[videoStream]->codec;
if(libffmpeg::avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
{
throw gcnew System::Exception( "Could not open codec" );
}
m_width = pCodecCtx->width;
m_height = pCodecCtx->height;
m_framesCount = pFormatCtx->streams[videoStream]->nb_frames;
if (pFormatCtx->streams[videoStream]->r_frame_rate.den == 0)
{
m_frameRate = 25;
}
else
{
m_frameRate = pFormatCtx->streams[videoStream]->r_frame_rate.num / pFormatCtx->streams[videoStream]->r_frame_rate.den;
if (m_frameRate == 0)
{
m_frameRate = 25;
}
}
libffmpeg::AVFrame *pFrame = libffmpeg::av_frame_alloc();
int numBytes = libffmpeg::avpicture_get_size(libffmpeg::PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
libffmpeg::uint8_t *buffer = (libffmpeg::uint8_t *)libffmpeg::av_malloc(numBytes*sizeof(libffmpeg::uint8_t));
struct libffmpeg::SwsContext *sws_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, libffmpeg::PIX_FMT_RGB24, SWS_BILINEAR, NULL, NULL, NULL);
libffmpeg::AVPacket packet;
libffmpeg::AVFrame *filt_frame = libffmpeg::av_frame_alloc();
while(av_read_frame(pFormatCtx, &packet) >= 0)
{
if(packet.stream_index == videoStream)
{
System::Drawing::Bitmap ^bitmap = nullptr;
int frameFinished;
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
if(frameFinished)
{
bitmap = gcnew System::Drawing::Bitmap( pCodecCtx->width, pCodecCtx->height, System::Drawing::Imaging::PixelFormat::Format24bppRgb );
System::Drawing::Imaging::BitmapData^ bitmapData = bitmap->LockBits( System::Drawing::Rectangle( 0, 0, pCodecCtx->width, pCodecCtx->height ), System::Drawing::Imaging::ImageLockMode::ReadOnly, System::Drawing::Imaging::PixelFormat::Format24bppRgb );
libffmpeg::uint8_t* ptr = reinterpret_cast( static_cast( bitmapData->Scan0 ) );
libffmpeg::uint8_t* srcData[4] = { ptr, NULL, NULL, NULL };
int srcLinesize[4] = { bitmapData->Stride, 0, 0, 0 };
libffmpeg::sws_scale( sws_ctx, (libffmpeg::uint8_t const * const *)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, srcData, srcLinesize );
bitmap->UnlockBits( bitmapData );
}
imageData->Add(bitmap);
}
else if(packet.stream_index == audioStream)
{
int b = av_dup_packet(&packet);
if(b >= 0) {
int audio_pkt_size = packet.size;
libffmpeg::uint8_t* audio_pkt_data = packet.data;
while(audio_pkt_size > 0)
{
int got_frame = 0;
int len1 = libffmpeg::avcodec_decode_audio4(aCodecCtx, pFrame, &got_frame, &packet);
if(len1 < 0)
{
audio_pkt_size = 0;
break;
}
audio_pkt_data += len1;
audio_pkt_size -= len1;
if (got_frame)
{
int data_size = libffmpeg::av_samples_get_buffer_size ( NULL, aCodecCtx->channels, pFrame->nb_samples, aCodecCtx->sample_fmt, 1 );
array<byte>^ managedBuf = gcnew array<byte>(data_size);
System::IntPtr iptr = System::IntPtr( pFrame->data[0] );
System::Runtime::InteropServices::Marshal::Copy( iptr, managedBuf, 0, data_size );
audioData->Add(managedBuf);
}
}
}
}
libffmpeg::av_free_packet(&packet);
}
libffmpeg::av_free(buffer);
libffmpeg::av_free(pFrame);
libffmpeg::avcodec_close(pCodecCtx);
libffmpeg::avformat_close_input(&pFormatCtx);
delete [] nativeFileName;
}
</byte></byte>This function returns my images in imageData list and audio in audioData list ;
I normal draw image in my c# code, but i don’t playing audio data.
I try playing audio in NAudio library. But I hear crackle in speakers instead of sounds.
Code in c# playing audio :var WaveFormat = new WaveFormat(m_sampleRate, 16, m_channels)
var _waveProvider = new BufferedWaveProvider(WaveFormat) { DiscardOnBufferOverflow = true, BufferDuration = TimeSpan.FromMilliseconds(_fileReader.Length) };
var _waveOut = new DirectSoundOut();
_waveOut.Init(_waveProvider);
_waveOut.Play();
foreach (var data in audioData)
{
_waveProvider.AddSamples(data, 0, data.Length);
}What am I doing wrong ?
-
Is there a way to extract frames from a video file using ffmpeg to memory and make some manipulation on each frame ?
28 octobre 2022, par Rojer BriefThe goal is to extract each time a frame from the video file then make histogram from the image and then to move to the next frame. this way all the frames.


The frames extraction and the histogram manipulation is working fine when the frames have saved as images on the hard disk. but now i want to do it all in memory.


to extract the frames i'm using ffmpeg because i think it's fast enough :


ffmpeg -r 1 -i MyVid.mp4 -r 1 "$filename%03d.png



for now i'm using the ffmpeg in command prompt window.


with this command it will save on the hard disk over 65000 images(frames).
but instead saving them on the hard disk i wonder if i can make the histogram manipulation on each frame in memory instead saving all the 65000 frames to the hard disk.


then i want to find specific images using the histogram and save to the hard disk this frames.


the histogram part for now is also using files from the hard disk and not from the memory :


private void btnLoadHistogram_Click(object sender, System.EventArgs e)
 {
 string[] files = Directory.GetFiles(@"d:\screenshots\", "*.jpg");

 for (int i = 0; i < files.Length; i++)
 {
 sbInfo.Text = "Loading image";
 if (pbImage.Image != null)
 pbImage.Image.Dispose();

 pbImage.Image = Image.FromFile(files[i]);//txtFileName.Text);

 Application.DoEvents();

 sbInfo.Text = "Computing histogram";
 long[] myValues = GetHistogram(new Bitmap(pbImage.Image));

 Histogram.DrawHistogram(myValues);

 sbInfo.Text = ""; 
 } 
 }

 public long[] GetHistogram(System.Drawing.Bitmap picture)
 {
 long[] myHistogram = new long[256];

 for (int i=0;i3;
 myHistogram[Temp]++;
 }

 return myHistogram;
 }



and the code of the class of the constrol HistogramaDesenat :


using System;
using System.Collections;
using System.ComponentModel;
using System.Drawing;
using System.Data;
using System.Windows.Forms;

namespace Histograma
{
 /// <summary>
 /// Summary description for HistogramaDesenat.
 /// </summary>
 public class HistogramaDesenat : System.Windows.Forms.UserControl
 {
 /// <summary> 
 /// Required designer variable.
 /// </summary>
 private System.ComponentModel.Container components = null;

 public HistogramaDesenat()
 {
 // This call is required by the Windows.Forms Form Designer.
 InitializeComponent();

 // TODO: Add any initialization after the InitializeComponent call

 this.Paint += new PaintEventHandler(HistogramaDesenat_Paint);
 this.Resize+=new EventHandler(HistogramaDesenat_Resize);
 }

 /// <summary> 
 /// Clean up any resources being used.
 /// </summary>
 protected override void Dispose( bool disposing )
 {
 if( disposing )
 {
 if(components != null)
 {
 components.Dispose();
 }
 }
 base.Dispose( disposing );
 }

 #region Component Designer generated code
 /// <summary> 
 /// Required method for Designer support - do not modify 
 /// the contents of this method with the code editor.
 /// </summary>
 private void InitializeComponent()
 {
 // 
 // HistogramaDesenat
 // 
 this.Font = new System.Drawing.Font("Tahoma", 8.25F, System.Drawing.FontStyle.Regular, System.Drawing.GraphicsUnit.Point, ((System.Byte)(0)));
 this.Name = "HistogramaDesenat";
 this.Size = new System.Drawing.Size(208, 176);
 }
 #endregion

 private void HistogramaDesenat_Paint(object sender, PaintEventArgs e)
 {
 if (myIsDrawing)
 {

 Graphics g = e.Graphics;
 Pen myPen = new Pen(new SolidBrush(myColor),myXUnit);
 //The width of the pen is given by the XUnit for the control.
 for (int i=0;i/We draw each line 
 g.DrawLine(myPen,
 new PointF(myOffset + (i*myXUnit), this.Height - myOffset), 
 new PointF(myOffset + (i*myXUnit), this.Height - myOffset - myValues[i] * myYUnit));

 //We plot the coresponding index for the maximum value.
 if (myValues[i]==myMaxValue)
 {
 SizeF mySize = g.MeasureString(i.ToString(),myFont);

 g.DrawString(i.ToString(),myFont,new SolidBrush(myColor),
 new PointF(myOffset + (i*myXUnit) - (mySize.Width/2), this.Height - myFont.Height ),
 System.Drawing.StringFormat.GenericDefault);
 }
 }

 //We draw the indexes for 0 and for the length of the array beeing plotted
 g.DrawString("0",myFont, new SolidBrush(myColor),new PointF(myOffset,this.Height - myFont.Height),System.Drawing.StringFormat.GenericDefault);
 g.DrawString((myValues.Length-1).ToString(),myFont, 
 new SolidBrush(myColor),
 new PointF(myOffset + (myValues.Length * myXUnit) - g.MeasureString((myValues.Length-1).ToString(),myFont).Width,
 this.Height - myFont.Height),
 System.Drawing.StringFormat.GenericDefault);

 //We draw a rectangle surrounding the control.
 g.DrawRectangle(new System.Drawing.Pen(new SolidBrush(Color.Black),1),0,0,this.Width-1,this.Height-1);
 }

 }

 long myMaxValue;
 private long[] myValues;
 private bool myIsDrawing;

 private float myYUnit; //this gives the vertical unit used to scale our values
 private float myXUnit; //this gives the horizontal unit used to scale our values
 private int myOffset = 20; //the offset, in pixels, from the control margins.

 private Color myColor = Color.Black;
 private Font myFont = new Font("Tahoma",10);

 [Category("Histogram Options")]
 [Description ("The distance from the margins for the histogram")]
 public int Offset
 {
 set
 {
 if (value>0)
 myOffset= value;
 }
 get
 {
 return myOffset;
 }
 }

 [Category("Histogram Options")]
 [Description ("The color used within the control")]
 public Color DisplayColor
 {
 set
 {
 myColor = value;
 }
 get
 {
 return myColor;
 }
 }

 /// <summary>
 /// We draw the histogram on the control
 /// </summary>
 /// The values beeing draw
 public void DrawHistogram(long[] Values)
 {
 myValues = new long[Values.Length];
 Values.CopyTo(myValues,0);

 myIsDrawing = true;
 myMaxValue = getMaxim(myValues);

 ComputeXYUnitValues();

 this.Refresh();
 }

 /// <summary>
 /// We get the highest value from the array
 /// </summary>
 /// The array of values in which we look
 /// <returns>The maximum value</returns>
 private long getMaxim(long[] Vals)
 {
 if (myIsDrawing)
 {
 long max = 0;
 for (int i=0;i max)
 max = Vals[i];
 }
 return max;
 }
 return 1;
 }

 private void HistogramaDesenat_Resize(object sender, EventArgs e)
 {
 if (myIsDrawing)
 {
 ComputeXYUnitValues();
 }
 this.Refresh();
 }

 private void ComputeXYUnitValues()
 {
 myYUnit = (float) (this.Height - (2 * myOffset)) / myMaxValue;
 myXUnit = (float) (this.Width - (2 * myOffset)) / (myValues.Length-1);
 }
 }
}



so in the end this is what i want to do :


- 

-
extract the frames from the video file in memory using the ffmpeg.


-
instead using Directory.GetFiles i want to make the histogram manipulation on each frame from the memory that is extracted by the ffmpeg.


-
each extracted frame image to use the histogram to find if there is a lightning(weather lightning) in the image.


-
if there is a lightning save the frame image to the hard disk.












-
-
ffmpeg/c++ Encode additional information of video frame with ffmpeg
29 janvier 2018, par 8793I am new with ffmpeg & video encoding, after looking for some related questions on this page, I found this post which is very useful to understand the overview process of ffmpeg.
However, my work not only needs to manipulate with Mat frame, after extract important information from video (extract edge, position of edge block, type of each edge block, block number, motion vector), I have to encode and send them to client. I tried to find an example code for this part but it seems nobody have done it before.
My problems is how to encode these additional information along with video frame, and send both to client. I read about Huffman Coding which can help lossless compression, But is it possible encode edge & motion data using huffman coding while encoding video frame using ffmpeg ? I’m doing experiment using udp protocol.
I can not find any information about this.
I read into metadata & side information in ffmpeg but it’s not what I want to do.I hope if you can give me an advice or a directions to research into this area, so I can understand and try to implement it. If there is any example code for this case, I would be very grateful for your sharing.
Thank you so much.
Below is encoder part on server side :
int encode(Mat& input_frame, EncodedCallback callback, void* userdata = nullptr) {
AVPacket pkt;
/* encode 1 second of video */
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
int size = 0;
fflush(stdout);
cvtFrame2AVFrameYUV420(input_frame, &frame);
static int time;
frame->pts = time++;
/* encode the image */
ret = avcodec_send_frame(c, frame);
if (ret < 0) {
fprintf(stderr, "Error avcodec_send_frame\n");
exit(1);
}
nbFramesEncoded++;
ret = avcodec_receive_packet(c, &pkt);
if (!isFirstFrameEmmited) {
nbNeededFramesInBuffer++;
printf("nbNeededFramesInBuffer: %d\n", nbNeededFramesInBuffer);
}
if (ret < 0) {
if (ret == -EAGAIN) {
//output is not available, we must send more input
} else {
fprintf(stderr, "Error avcodec_receive_packet %d\n", ret);
exit(1);
}
} else {
if (callback) {
callback(pkt, userdata);
}
size = pkt.size + 4;
av_packet_unref(&pkt);
}
return size;
}Below is code to handle frame processing (presently we check & send motioned block to client)
void updateFrame(Mat& frame) {
//Get all Streams ready
bool isReady = true;
if (!frameStreamer->encoder->isFirstFrameEmmited) {
frameStreamer->sendFrame(frame);
isReady = false;
}
for (int yidx = 0; yidx < gridSize.height; yidx++) {
for (int xidx = 0; xidx < gridSize.width; xidx++) {
StreamPtr& stream = streamGrid[yidx][xidx];
if (!stream->encoder->isFirstFrameEmmited) {
Mat block = frame(stream->irect);
stream->sendFrame(block);
isReady = false;
}
}
}
if (isReady == false) {
return;
}
if (pGray.empty()) {
frameStreamer->sendFrame(frame);
frameStreamer->sendFrame(frame);
cvtColor(frame, pGray, CV_BGR2GRAY);
return;
}
//Motion Detection
Mat gray;
cvtColor(frame, gray, CV_BGR2GRAY);
Mat diff;
absdiff(gray, pGray, diff);
threshold(diff, diff, NOISE_THRESHOLD, 255, CV_THRESH_BINARY);
if (HEAT_IMAGE) {
gray.copyTo(diff, diff);
imshow("Gray", gray);
threshold(diff, diff, HEAT_THRESH, 255, CV_THRESH_TOZERO);
}
if (USE_MORPH_NOISE) {
Morph_Noise(diff);
}
Mat motionImg = Mat::zeros(frameSize, CV_8UC3);
//Block Classification
int nbModifiedBlocks = 0;
for (int yidx = 0; yidx < gridSize.height; yidx++) {
for (int xidx = 0; xidx < gridSize.width; xidx++) {
Rect irect(xidx * blockSize.width, yidx * blockSize.height,
blockSize.width, blockSize.height);
int blockDiff = sum(diff(irect))[0];
if (blockDiff > BLOCK_THRESHOLD * 255) {
this->blockCls.at<uchar>(yidx, xidx) = MODI_BLOCK;
nbModifiedBlocks++;
} else {
this->blockCls.at<uchar>(yidx, xidx) = SKIP_BLOCK;
}
}
}
//Send
if (nbModifiedBlocks > this->nbBlocksThresh) {
nbSentBytes += this->frameStreamer->sendFrame(frame);
} else {
for (int yidx = 0; yidx < gridSize.height; yidx++) {
for (int xidx = 0; xidx < gridSize.width; xidx++) {
uchar cls = this->blockCls.at<uchar>(yidx, xidx);
StreamPtr& stream = streamGrid[yidx][xidx];
bool send = false;
if (cls == MODI_BLOCK) {
if (DEBUG_NETWORK) {
printf("Normal (%d, %d): ", xidx, yidx);
}
send = true;
stream->encoder->nbFramesBuffered = stream->encoder->nbNeededFramesInBuffer;
rectangle(motionImg, stream->irect, Scalar(0, 0, 255), CV_FILLED);
} else if (stream->encoder->nbFramesBuffered > 0) {
if (DEBUG_NETWORK) {
printf("Extra (%d, %d): ", xidx, yidx);
}
send = true;
stream->encoder->nbFramesBuffered--;
stream->encoder->nbFlushFrames++;
rectangle(motionImg, stream->irect, Scalar(0, 255, 0), CV_FILLED);
}
if (send) {
Mat block = frame(stream->irect);
nbSentBytes += stream->sendFrame(block);
gray(stream->irect).copyTo(pGray(stream->irect));
}
}
}
}
</uchar></uchar></uchar>}