
Recherche avancée
Autres articles (47)
-
Gestion générale des documents
13 mai 2011, parMédiaSPIP ne modifie jamais le document original mis en ligne.
Pour chaque document mis en ligne il effectue deux opérations successives : la création d’une version supplémentaire qui peut être facilement consultée en ligne tout en laissant l’original téléchargeable dans le cas où le document original ne peut être lu dans un navigateur Internet ; la récupération des métadonnées du document original pour illustrer textuellement le fichier ;
Les tableaux ci-dessous expliquent ce que peut faire MédiaSPIP (...) -
Les autorisations surchargées par les plugins
27 avril 2010, parMediaspip core
autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs -
Use, discuss, criticize
13 avril 2011, parTalk to people directly involved in MediaSPIP’s development, or to people around you who could use MediaSPIP to share, enhance or develop their creative projects.
The bigger the community, the more MediaSPIP’s potential will be explored and the faster the software will evolve.
A discussion list is available for all exchanges between users.
Sur d’autres sites (8965)
-
android ffmpeg bad video output
20 août 2014, par Sujith ManjavanaI’m following this tutorial to create my first ffmpeg app. I have successfully build the shared libs and compiled the project without any errors. But when i run the app on my nexus 5 the output is this
Here is the native code
#include <libavcodec></libavcodec>avcodec.h>
#include <libavformat></libavformat>avformat.h>
#include <libswscale></libswscale>swscale.h>
#include <libavutil></libavutil>pixfmt.h>
#include
#include
#include
#include <android></android>native_window.h>
#include <android></android>native_window_jni.h>
#define LOG_TAG "android-ffmpeg-tutorial02"
#define LOGI(...) __android_log_print(4, LOG_TAG, __VA_ARGS__);
#define LOGE(...) __android_log_print(6, LOG_TAG, __VA_ARGS__);
ANativeWindow* window;
char *videoFileName;
AVFormatContext *formatCtx = NULL;
int videoStream;
AVCodecContext *codecCtx = NULL;
AVFrame *decodedFrame = NULL;
AVFrame *frameRGBA = NULL;
jobject bitmap;
void* buffer;
struct SwsContext *sws_ctx = NULL;
int width;
int height;
int stop;
jint naInit(JNIEnv *pEnv, jobject pObj, jstring pFileName) {
AVCodec *pCodec = NULL;
int i;
AVDictionary *optionsDict = NULL;
videoFileName = (char *)(*pEnv)->GetStringUTFChars(pEnv, pFileName, NULL);
LOGI("video file name is %s", videoFileName);
// Register all formats and codecs
av_register_all();
// Open video file
if(avformat_open_input(&formatCtx, videoFileName, NULL, NULL)!=0)
return -1; // Couldn't open file
// Retrieve stream information
if(avformat_find_stream_info(formatCtx, NULL)<0)
return -1; // Couldn't find stream information
// Dump information about file onto standard error
av_dump_format(formatCtx, 0, videoFileName, 0);
// Find the first video stream
videoStream=-1;
for(i=0; inb_streams; i++) {
if(formatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
videoStream=i;
break;
}
}
if(videoStream==-1)
return -1; // Didn't find a video stream
// Get a pointer to the codec context for the video stream
codecCtx=formatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
pCodec=avcodec_find_decoder(codecCtx->codec_id);
if(pCodec==NULL) {
fprintf(stderr, "Unsupported codec!\n");
return -1; // Codec not found
}
// Open codec
if(avcodec_open2(codecCtx, pCodec, &optionsDict)<0)
return -1; // Could not open codec
// Allocate video frame
decodedFrame=avcodec_alloc_frame();
// Allocate an AVFrame structure
frameRGBA=avcodec_alloc_frame();
if(frameRGBA==NULL)
return -1;
return 0;
}
jobject createBitmap(JNIEnv *pEnv, int pWidth, int pHeight) {
int i;
//get Bitmap class and createBitmap method ID
jclass javaBitmapClass = (jclass)(*pEnv)->FindClass(pEnv, "android/graphics/Bitmap");
jmethodID mid = (*pEnv)->GetStaticMethodID(pEnv, javaBitmapClass, "createBitmap", "(IILandroid/graphics/Bitmap$Config;)Landroid/graphics/Bitmap;");
//create Bitmap.Config
//reference: https://forums.oracle.com/thread/1548728
const wchar_t* configName = L"ARGB_8888";
int len = wcslen(configName);
jstring jConfigName;
if (sizeof(wchar_t) != sizeof(jchar)) {
//wchar_t is defined as different length than jchar(2 bytes)
jchar* str = (jchar*)malloc((len+1)*sizeof(jchar));
for (i = 0; i < len; ++i) {
str[i] = (jchar)configName[i];
}
str[len] = 0;
jConfigName = (*pEnv)->NewString(pEnv, (const jchar*)str, len);
} else {
//wchar_t is defined same length as jchar(2 bytes)
jConfigName = (*pEnv)->NewString(pEnv, (const jchar*)configName, len);
}
jclass bitmapConfigClass = (*pEnv)->FindClass(pEnv, "android/graphics/Bitmap$Config");
jobject javaBitmapConfig = (*pEnv)->CallStaticObjectMethod(pEnv, bitmapConfigClass,
(*pEnv)->GetStaticMethodID(pEnv, bitmapConfigClass, "valueOf", "(Ljava/lang/String;)Landroid/graphics/Bitmap$Config;"), jConfigName);
//create the bitmap
return (*pEnv)->CallStaticObjectMethod(pEnv, javaBitmapClass, mid, pWidth, pHeight, javaBitmapConfig);
}
jintArray naGetVideoRes(JNIEnv *pEnv, jobject pObj) {
jintArray lRes;
if (NULL == codecCtx) {
return NULL;
}
lRes = (*pEnv)->NewIntArray(pEnv, 2);
if (lRes == NULL) {
LOGI(1, "cannot allocate memory for video size");
return NULL;
}
jint lVideoRes[2];
lVideoRes[0] = codecCtx->width;
lVideoRes[1] = codecCtx->height;
(*pEnv)->SetIntArrayRegion(pEnv, lRes, 0, 2, lVideoRes);
return lRes;
}
void naSetSurface(JNIEnv *pEnv, jobject pObj, jobject pSurface) {
if (0 != pSurface) {
// get the native window reference
window = ANativeWindow_fromSurface(pEnv, pSurface);
// set format and size of window buffer
ANativeWindow_setBuffersGeometry(window, 0, 0, WINDOW_FORMAT_RGBA_8888);
} else {
// release the native window
ANativeWindow_release(window);
}
}
jint naSetup(JNIEnv *pEnv, jobject pObj, int pWidth, int pHeight) {
width = pWidth;
height = pHeight;
//create a bitmap as the buffer for frameRGBA
bitmap = createBitmap(pEnv, pWidth, pHeight);
if (AndroidBitmap_lockPixels(pEnv, bitmap, &buffer) < 0)
return -1;
//get the scaling context
sws_ctx = sws_getContext (
codecCtx->width,
codecCtx->height,
codecCtx->pix_fmt,
pWidth,
pHeight,
AV_PIX_FMT_RGBA,
SWS_BILINEAR,
NULL,
NULL,
NULL
);
// Assign appropriate parts of bitmap to image planes in pFrameRGBA
// Note that pFrameRGBA is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)frameRGBA, buffer, AV_PIX_FMT_RGBA,
pWidth, pHeight);
return 0;
}
void finish(JNIEnv *pEnv) {
//unlock the bitmap
AndroidBitmap_unlockPixels(pEnv, bitmap);
av_free(buffer);
// Free the RGB image
av_free(frameRGBA);
// Free the YUV frame
av_free(decodedFrame);
// Close the codec
avcodec_close(codecCtx);
// Close the video file
avformat_close_input(&formatCtx);
}
void decodeAndRender(JNIEnv *pEnv) {
ANativeWindow_Buffer windowBuffer;
AVPacket packet;
int i=0;
int frameFinished;
int lineCnt;
while(av_read_frame(formatCtx, &packet)>=0 && !stop) {
// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
// Decode video frame
avcodec_decode_video2(codecCtx, decodedFrame, &frameFinished,
&packet);
// Did we get a video frame?
if(frameFinished) {
// Convert the image from its native format to RGBA
sws_scale
(
sws_ctx,
(uint8_t const * const *)decodedFrame->data,
decodedFrame->linesize,
0,
codecCtx->height,
frameRGBA->data,
frameRGBA->linesize
);
// lock the window buffer
if (ANativeWindow_lock(window, &windowBuffer, NULL) < 0) {
LOGE("cannot lock window");
} else {
// draw the frame on buffer
LOGI("copy buffer %d:%d:%d", width, height, width*height*4);
LOGI("window buffer: %d:%d:%d", windowBuffer.width,
windowBuffer.height, windowBuffer.stride);
memcpy(windowBuffer.bits, buffer, width * height * 4);
// unlock the window buffer and post it to display
ANativeWindow_unlockAndPost(window);
// count number of frames
++i;
}
}
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
LOGI("total No. of frames decoded and rendered %d", i);
finish(pEnv);
}
/**
* start the video playback
*/
void naPlay(JNIEnv *pEnv, jobject pObj) {
//create a new thread for video decode and render
pthread_t decodeThread;
stop = 0;
pthread_create(&decodeThread, NULL, decodeAndRender, NULL);
}
/**
* stop the video playback
*/
void naStop(JNIEnv *pEnv, jobject pObj) {
stop = 1;
}
jint JNI_OnLoad(JavaVM* pVm, void* reserved) {
JNIEnv* env;
if ((*pVm)->GetEnv(pVm, (void **)&env, JNI_VERSION_1_6) != JNI_OK) {
return -1;
}
JNINativeMethod nm[8];
nm[0].name = "naInit";
nm[0].signature = "(Ljava/lang/String;)I";
nm[0].fnPtr = (void*)naInit;
nm[1].name = "naSetSurface";
nm[1].signature = "(Landroid/view/Surface;)V";
nm[1].fnPtr = (void*)naSetSurface;
nm[2].name = "naGetVideoRes";
nm[2].signature = "()[I";
nm[2].fnPtr = (void*)naGetVideoRes;
nm[3].name = "naSetup";
nm[3].signature = "(II)I";
nm[3].fnPtr = (void*)naSetup;
nm[4].name = "naPlay";
nm[4].signature = "()V";
nm[4].fnPtr = (void*)naPlay;
nm[5].name = "naStop";
nm[5].signature = "()V";
nm[5].fnPtr = (void*)naStop;
jclass cls = (*env)->FindClass(env, "roman10/tutorial/android_ffmpeg_tutorial02/MainActivity");
//Register methods with env->RegisterNatives.
(*env)->RegisterNatives(env, cls, nm, 6);
return JNI_VERSION_1_6;
}Here is the build.sh
#!/bin/bash
NDK=$HOME/Desktop/adt/android-ndk-r9
SYSROOT=$NDK/platforms/android-9/arch-arm/
TOOLCHAIN=$NDK/toolchains/arm-linux-androideabi-4.8/prebuilt/linux-x86_64
function build_one
{
./configure \
--prefix=$PREFIX \
--enable-shared \
--disable-static \
--disable-doc \
--disable-ffmpeg \
--disable-ffplay \
--disable-ffprobe \
--disable-ffserver \
--disable-avdevice \
--disable-doc \
--disable-symver \
--cross-prefix=$TOOLCHAIN/bin/arm-linux-androideabi- \
--target-os=linux \
--arch=arm \
--enable-cross-compile \
--sysroot=$SYSROOT \
--extra-cflags="-Os -fpic $ADDI_CFLAGS" \
--extra-ldflags="$ADDI_LDFLAGS" \
$ADDITIONAL_CONFIGURE_FLAG
make clean
make
make install
}
CPU=arm
PREFIX=$(pwd)/android/$CPU
ADDI_CFLAGS="-marm"
build_oneIt works on the Galaxy tab2. what can i do to make it work on all devices ?? Please help me..
-
Encoding raw YUV420P to h264 with AVCodec on iOS
4 janvier 2013, par WadeI am trying to encode a single YUV420P image gathered from a
CMSampleBuffer
to anAVPacket
so that I can send h264 video over the network with RTMP.The posted code example seems to work as
avcodec_encode_video2
returns0
(Success) howevergot_output
is also0
(AVPacket
is empty).Does anyone have any experience with encoding video on iOS devices that might know what I am doing wrong ?
- (void) captureOutput:(AVCaptureOutput *)captureOutput
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
fromConnection:(AVCaptureConnection *)connection {
// sampleBuffer now contains an individual frame of raw video frames
CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CVPixelBufferLockBaseAddress(pixelBuffer, 0);
// access the data
int width = CVPixelBufferGetWidth(pixelBuffer);
int height = CVPixelBufferGetHeight(pixelBuffer);
int bytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 0);
unsigned char *rawPixelBase = (unsigned char *)CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0);
// Convert the raw pixel base to h.264 format
AVCodec *codec = 0;
AVCodecContext *context = 0;
AVFrame *frame = 0;
AVPacket packet;
//avcodec_init();
avcodec_register_all();
codec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (codec == 0) {
NSLog(@"Codec not found!!");
return;
}
context = avcodec_alloc_context3(codec);
if (!context) {
NSLog(@"Context no bueno.");
return;
}
// Bit rate
context->bit_rate = 400000; // HARD CODE
context->bit_rate_tolerance = 10;
// Resolution
context->width = width;
context->height = height;
// Frames Per Second
context->time_base = (AVRational) {1,25};
context->gop_size = 1;
//context->max_b_frames = 1;
context->pix_fmt = PIX_FMT_YUV420P;
// Open the codec
if (avcodec_open2(context, codec, 0) < 0) {
NSLog(@"Unable to open codec");
return;
}
// Create the frame
frame = avcodec_alloc_frame();
if (!frame) {
NSLog(@"Unable to alloc frame");
return;
}
frame->format = context->pix_fmt;
frame->width = context->width;
frame->height = context->height;
avpicture_fill((AVPicture *) frame, rawPixelBase, context->pix_fmt, frame->width, frame->height);
int got_output = 0;
av_init_packet(&packet);
avcodec_encode_video2(context, &packet, frame, &got_output)
// Unlock the pixel data
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
// Send the data over the network
[self uploadData:[NSData dataWithBytes:packet.data length:packet.size] toRTMP:self.rtmp_OutVideoStream];
}Note : It is known that this code has memory leaks because I am not freeing the memory that is dynamically allocated.
UPDATE
I updated my code to use @pogorskiy method. I only try to upload the frame if got output returns 1 and clear the buffer once I am done encoding video frames.
-
How to combine all flv files and images into a single video file using ffmpeg and PHP ?
22 octobre 2012, par KannanHow to combine all flv files and images into a single video file using ffmpeg and PHP,
Details : I have a list of images and videos. I already created video from images amoung list of files using the following command, but don't know how to combine other video files resides in the same directory together with this created video
$command = "ffmpeg -r 0.3 -qscale 2 -i ".$imagesFolder.$vFolder."%d.jpg ".$imagesFolder.$vFolder."raw.flv";
passthru($command. ' 2>&1');Note : my directory contains
01.jpg,02.jpg,03.jpg,04.flv,05.jpg,06.flv
etc.,
I had created the video file from images, but dnt know how to combine other video files with this one.Update : I created the video file from images, am stuck on mixing other video files(flvs) with the generated one,and there are many video files,cannt say two or three, so i'm asking here that is any technique there for combining video files as well like images.