
Recherche avancée
Autres articles (52)
-
Les autorisations surchargées par les plugins
27 avril 2010, parMediaspip core
autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs -
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir -
Des sites réalisés avec MediaSPIP
2 mai 2011, parCette page présente quelques-uns des sites fonctionnant sous MediaSPIP.
Vous pouvez bien entendu ajouter le votre grâce au formulaire en bas de page.
Sur d’autres sites (9970)
-
Intsallation of ffmpeg-php on Amazon Ec2 Linux AMI
1er décembre 2015, par TannyI am about two days into attempting to install FFMPEG-php with dependencies on an AWS EC2 instance running the Amazon Linux AMI. I’ve installed FFMPEG, and have read reportedly successful instructions on installing on Red Hat/Fedora. I have followed a number of tutorials and forum articles to do so, but have had no luck yet. As far as I can tell, the main problems are as followed :
I have installed all the dependency for ffmpeg-php. I run the following command successfully.
$wget http://downloads.sourceforge.net/project/ffmpeg-php/ffmpeg-php/0.6.0/ffmpeg-php-0.6.0.tbz2
$tar xvfj ffmpeg-php-0.6.0.tbz2
$phpizeBut when I run the following command it throw the error like below :
$sudo ./configure
configure : error : ffmpeg shared libraries not found. Make sure ffmpeg is compiled as shared libraries using the —enable-shared option}
I have used enable shared option with shared enable option but it throw the same error.
On to my question : Has anyone successfully installed FFMPEG-php on Amazon Linux ? Is there a fundamental incompatibility ? If anyone could share specific instructions on installing ffmpeg-php on amazon linux I would be greatly appreciative. Any other insights/experiences would also be appreciated.
-
ffmpeg c++/cli wrapper for using in c# . AccessViolationException after call dll function by it's pointer
25 juillet 2015, par skynet_vMy target is to write a c++/cli wrap arount ffmpeg library, using by importing ffmpeg functions from dll-modules.
Later I will use this interface in c#.
This is my challenge, don’t ask me why))So i’ve implemented Wrap class, which is listed below :
namespace FFMpegWrapLib
{
public class Wrap
{
private:
public:
//wstring libavcodecDllName = "avcodec-56.dll";
//wstring libavformatDllName = "avformat-56.dll";
//wstring libswscaleDllName = "swscale-3.dll";
//wstring libavutilDllName = "avutil-54.dll";
HMODULE libavcodecDLL;
HMODULE libavformatDLL;
HMODULE libswsscaleDLL;
HMODULE libavutilDLL;
AVFormatContext **pFormatCtx = nullptr;
AVCodecContext *pCodecCtxOrig = nullptr;
AVCodecContext *pCodecCtx = nullptr;
AVCodec **pCodec = nullptr;
AVFrame **pFrame = nullptr;
AVFrame **pFrameRGB = nullptr;
AVPacket *packet = nullptr;
int *frameFinished;
int numBytes;
uint8_t *buffer = nullptr;
struct SwsContext *sws_ctx = nullptr;
void Init();
void AVRegisterAll();
void Release();
bool SaveFrame(const char *pFileName, AVFrame * frame, int w, int h);
bool GetStreamInfo();
int FindVideoStream();
bool OpenInput(const char* file);
AVCodec* FindDecoder();
AVCodecContext* AllocContext3();
bool CopyContext();
bool OpenCodec2();
AVFrame* AllocFrame();
int PictureGetSize();
void* Alloc(size_t size);
int PictureFill(AVPicture *, const uint8_t *, enum AVPixelFormat, int, int);
SwsContext* GetSwsContext(int, int, enum AVPixelFormat, int, int, enum AVPixelFormat, int, SwsFilter *, SwsFilter *, const double *);
int ReadFrame(AVFormatContext *s, AVPacket *pkt);
int DecodeVideo2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt);
int SwsScale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[]);
void PacketFree(AVPacket *pkt);
void BufferFree(void *ptr);
void FrameFree(AVFrame **frame);
int CodecClose(AVCodecContext *);
void CloseInput(AVFormatContext **);
bool SeekFrame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags);
Wrap();
~Wrap();
bool GetVideoFrame(char* str_in_file, char* str_out_img, uint64_t time);
};
public ref class managedWrap
{
public:
managedWrap(){}
~managedWrap(){ delete unmanagedWrap; }
bool GetVideoFrameToFile(char* str_in_file, char* str_out_img, uint64_t time)
{
return unmanagedWrap->GetVideoFrame(str_in_file, str_out_img, time);
}
static Wrap* unmanagedWrap = new Wrap();
};
}So the imports to libavcodec and etc. are succesful.
The problem is in AccessViolationException during calling dll func, for example, in OpenInput (i.e. av_open_input in native ffmpeg library)The OpenInput func code is below :
bool FFMpegWrapLib::Wrap::OpenInput(const char* file)
{
typedef int avformat_open_input(AVFormatContext **, const char *, AVInputFormat *, AVDictionary **);
avformat_open_input* pavformat_open_input = (avformat_open_input *)GetProcAddress(libavformatDLL, "avformat_open_input");
if (pavformat_open_input == nullptr)
{
throw exception("Unable to find avformat_open_input function address in libavformat module");
return false;
}
//pin_ptr<avformatcontext> pinFormatContext = &(new interior_ptr<avformatcontext>(pCodecCtx));
pFormatCtx = new AVFormatContext*;
//*pFormatCtx = new AVFormatContext;
int ret = pavformat_open_input(pFormatCtx, file, NULL, NULL); // here it fails
return ret == 0;
}
</avformatcontext></avformatcontext>So the problem, i think, is that class-fields of Wrap class are in secure memory. And ffmpeg works with native memory, initialising pFormatCtx variable by it’s address.
Can I avoid this, or it is impossible ? -
Mix video and audio to mp4 file with ffmpeg but audio does't keep step with video when playback
28 juillet 2015, par dragonflyI managed to write a program to record video(
h264
/aac
) on android with ffmpeg. The detail is as follows :-
Implement
android.hardware.Camera.PreviewCallback
to capture every frame from camera (yuv image
) and send it to the ffmpeg in the jni layer.@Override
public void onPreviewFrame(byte[] data, Camera camera) {
// Log.d(TAG, "onPreviewFrame");
if (mIsRecording) {
// Log.d(TAG, "handlePreviewFrame");
Parameters param = camera.getParameters();
Size s = param.getPreviewSize();
handlePreviewFrame(data, s.width, s.height, mBufSize);
}
camera.addCallbackBuffer(mPreviewBuffer);
}
private void handlePreviewFrame(byte[] data, int width, int height, int size) {
if (mFormats == ImageFormat.NV21) {
//process the yuv data
}
synchronized (mMuxer) {
//jni api
mMuxer.putAudioVisualData(mYuvData, size, 0);
}
} -
Use
android.media.AudioRecord
to read the pcm data from the microphone, write pcm data to ffmpeg in the jni layer in a loop.while (this.isRecording) {
int ret = audioRecord.read(tempBuffer, 0, 1024);
if (ret == AudioRecord.ERROR_INVALID_OPERATION) {
throw new IllegalStateException(
"read() returned AudioRecord.ERROR_INVALID_OPERATION");
} else if (ret == AudioRecord.ERROR_BAD_VALUE) {
throw new IllegalStateException("read() returned AudioRecord.ERROR_BAD_VALUE");
} else if (ret == AudioRecord.ERROR_INVALID_OPERATION) {
throw new IllegalStateException(
"read() returned AudioRecord.ERROR_INVALID_OPERATION");
}
// 处理数据
handleAudioData(tempBuffer, ret);
}
private void handleAudioData(short[] data, int size)
{
// convert to byte[]
//Log.d("VideoCaptureActivity", "handleAudioData");
ByteBuffer buffer = ByteBuffer.allocate(data.length * 2);
buffer.order(ByteOrder.LITTLE_ENDIAN);
buffer.asShortBuffer().put(data);
buffer.limit(size * 2);
byte[] bytes = buffer.array();
synchronized (muxing) {
Log.d(TAG, "putAudio Data :" + size*2);
muxing.putAudioVisualData(bytes, size * 2, 1);
}
} -
mix audio and video data in the jni layer. I refer to the example : https://ffmpeg.org/doxygen/trunk/muxing_8c-source.html
The problem is that the example demonstrates audio and video encoding from some dummy source data generated on the fly. I need to encode audio from microphone and video from camera.
I think the reason of my failure is that the pts in the expample is not applicable for my situation. my av function code is as follows :
static int write_video_frame(AVFormatContext *oc, OutputStream *ost, char *data,
int size) {
int ret;
AVCodecContext *c;
int got_packet = 0;
c = ost->st->codec;
AVPacket pkt = { 0 };
av_init_packet(&pkt);
if (!video_st.hwcodec) {
if (ost->zoom) {
zoom(oc, ost, data);
} else {
avpicture_fill((AVPicture*) ost->frame, (const uint8_t *) data,
c->pix_fmt, c->width, c->height);
}
av_frame_make_writable(ost->frame);
//ost->frame->pts = ost->next_pts++;
ost->frame->pts = frame_count;
/* encode the image */
//ALOGI("avcodec_encode_video2 start");
ret = avcodec_encode_video2(c, &pkt, ost->frame, &got_packet);
//ALOGI("avcodec_encode_video2 end");
if (ret < 0) {
ALOGE("Error encoding video frame: %s", av_err2str(ret));
return -1;
}
} else {
if (size != 0) {
pkt.data = (uint8_t *) data;
pkt.size = size;
pkt.pts = pkt.dts = ost->next_pts++;
got_packet = 1;
}
}
if (got_packet) {
//ALOGI("video write_frame start");
//pkt.pts = (int) timestamp;
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
//ALOGI("video write_frame end");
if (ret < 0) {
ALOGE("Error while writing video frame: %s", av_err2str(ret));
return -1;
}
}
frame_count++;
return 0;
}
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost, char *data) {
AVCodecContext *c;
AVPacket pkt = { 0 }; // data and size must be 0;
AVFrame *frame;
int ret;
int got_packet;
int dst_nb_samples;
av_init_packet(&pkt);
c = ost->st->codec;
if (audio_st.speex_echo_cancellation == 1
&& g_audio_echo_play_queue->start_flag == 1) {
//ALOGI("encode_audio_handler in echo_cancel");
QUEUE_ITEM* item = Get_Queue_Item(g_audio_echo_play_queue);
if (item) {
speex_dsp_echo_play_back((spx_int16_t *) item->data);
//ALOGI("encode_audio_handler echo_play begin speex_echo_play_back");
short *echo_processed = (short *) av_malloc(160 * sizeof(short));
speex_dsp_echo_capture((spx_int16_t *) data, echo_processed);
memcpy(data, (uint8_t *) echo_processed, 160);
av_free(echo_processed);
Free_Queue_Item(item, 1);
}
}
frame = ost->tmp_frame;
//update pts
//frame->pts = ost->next_pts;
//ost->next_pts += frame->nb_samples;
if (frame) {
/* convert samples from native format to destination codec format, using the resampler */
/* compute destination number of samples */
dst_nb_samples = av_rescale_rnd(
swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
c->sample_rate, c->sample_rate, AV_ROUND_UP);
memcpy(frame->data[0], data, frame->nb_samples * 2);
//frame->data[0] = data;
/* when we pass a frame to the encoder, it may keep a reference to it
* internally;
* make sure we do not overwrite it here
*/
ret = av_frame_make_writable(ost->frame);
if (ret < 0) {
ALOGE("write_audio_frame av_frame_make_writable ERROR %s",
av_err2str(ret));
return -1;
}
/* convert to destination format */
ret = swr_convert(ost->swr_ctx, ost->frame->data, dst_nb_samples,
(const uint8_t **) frame->data, frame->nb_samples);
if (ret < 0) {
ALOGI("Error while converting %s", av_err2str(ret));
return -1;
}
frame = ost->frame;
frame->pts = av_rescale_q(ost->samples_count,
(AVRational ) { 1, c->sample_rate }, c->time_base);
ost->samples_count += dst_nb_samples;
}
ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
if (ret < 0) {
ALOGE("Error encoding audio frame: %s", av_err2str(ret));
return -1;
}
if (got_packet) {
//pkt.pts = (int) timestamp;
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
if (ret < 0) {
ALOGE("Error while writing audio frame: %s", av_err2str(ret));
return -1;
}
}
return (frame || got_packet) ? 0 : 1;
}How do I deal with the pts of video and audio stream for my situation ? Who can give me some advice ?
Can I ignore the pts provided by ffmpeg and calculate the pts in the java layer by myself and transmit it to ffmpeg ?
-