Recherche avancée

Médias (0)

Mot : - Tags -/gis

Aucun média correspondant à vos critères n’est disponible sur le site.

Autres articles (47)

  • Les autorisations surchargées par les plugins

    27 avril 2010, par

    Mediaspip core
    autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs

  • Demande de création d’un canal

    12 mars 2010, par

    En fonction de la configuration de la plateforme, l’utilisateur peu avoir à sa disposition deux méthodes différentes de demande de création de canal. La première est au moment de son inscription, la seconde, après son inscription en remplissant un formulaire de demande.
    Les deux manières demandent les mêmes choses fonctionnent à peu près de la même manière, le futur utilisateur doit remplir une série de champ de formulaire permettant tout d’abord aux administrateurs d’avoir des informations quant à (...)

  • Other interesting software

    13 avril 2011, par

    We don’t claim to be the only ones doing what we do ... and especially not to assert claims to be the best either ... What we do, we just try to do it well and getting better ...
    The following list represents softwares that tend to be more or less as MediaSPIP or that MediaSPIP tries more or less to do the same, whatever ...
    We don’t know them, we didn’t try them, but you can take a peek.
    Videopress
    Website : http://videopress.com/
    License : GNU/GPL v2
    Source code : (...)

Sur d’autres sites (3848)

  • FFmpeg on android is crashing in avcodec_decode_video2 function

    6 juin 2015, par Matt Wolfe

    FFmpeg is crashing on : libavcodec/utils.c avcodec_decode_video2 around line 2400

    ret = avctx->codec->decode(avctx, picture, got_picture_ptr, &tmp);

    So I’ve compiled ffmpeg on android using the following configure script (based from here ) :

    prefix=${src_root}/ffmpeg/android/arm

    addi_cflags="-marm -Os -fpic"
    addi_ldflags=""

    ./configure \
    --prefix=${prefix} \
    --target-os=linux \
    --arch=arm \
    --enable-shared \
    --disable-doc \
    --disable-programs \
    --disable-symver \
    --cross-prefix=${TOOLCHAIN}/bin/arm-linux-androideabi- \
    --enable-cross-compile \
    --enable-decoder=aac \
    --enable-decoder=mpeg4 \
    --enable-decoder=h263 \
    --enable-decoder=flv \
    --enable-decoder=mpegvideo \
    --enable-decoder=mpeg2video \
    --sysroot=${SYSROOT} \
    --extra-cflags="${addi_cflags}" \
    --pkg-config=$(which pkg-config) >> ${build_log} 2>&1 || die "Couldn't configure ffmpeg"

    The *.so files get copied over into my projects which I reference from my Android.mk script :

    LOCAL_PATH := $(call my-dir)
    FFMPEG_PATH=/path/to/android-ffmpeg-with-rtmp/build/dist

    include $(CLEAR_VARS)
    LOCAL_MODULE := libavcodec
    LOCAL_SRC_FILES :=$(FFMPEG_PATH)/lib/libavcodec-56.so
    include $(PREBUILT_SHARED_LIBRARY)

    include $(CLEAR_VARS)
    LOCAL_MODULE := libavdevice
    LOCAL_SRC_FILES :=$(FFMPEG_PATH)/lib/libavdevice-56.so
    include $(PREBUILT_SHARED_LIBRARY)

    include $(CLEAR_VARS)
    LOCAL_MODULE := libavfilter
    LOCAL_SRC_FILES :=$(FFMPEG_PATH)/lib/libavfilter-5.so
    include $(PREBUILT_SHARED_LIBRARY)

    include $(CLEAR_VARS)
    LOCAL_MODULE := libavformat
    LOCAL_SRC_FILES :=$(FFMPEG_PATH)/lib/libavformat-56.so
    include $(PREBUILT_SHARED_LIBRARY)

    include $(CLEAR_VARS)
    LOCAL_MODULE := libavutil
    LOCAL_SRC_FILES :=$(FFMPEG_PATH)/lib/libavutil-54.so
    include $(PREBUILT_SHARED_LIBRARY)

    include $(CLEAR_VARS)
    LOCAL_MODULE := libswresample
    LOCAL_SRC_FILES :=$(FFMPEG_PATH)/lib/libswresample-1.so
    include $(PREBUILT_SHARED_LIBRARY)

    include $(CLEAR_VARS)
    LOCAL_MODULE := libswscale
    LOCAL_SRC_FILES :=$(FFMPEG_PATH)/lib/libswscale-3.so
    include $(PREBUILT_SHARED_LIBRARY)

    include $(CLEAR_VARS)
    LOCAL_LDLIBS := -llog
    LOCAL_C_INCLUDES := $(FFMPEG_PATH)/include
    #LOCAL_PRELINK_MODULE := false
    LOCAL_MODULE    := axonffmpeg
    LOCAL_SRC_FILES := libffmpeg.c
    LOCAL_CFLAGS := -g
    LOCAL_SHARED_LIBRARIES := libavcodec libavdevice libavfilter libavformat libavutil libswresample libswscale
    include $(BUILD_SHARED_LIBRARY)

    I’m building a little wrapper to decode frames (mpeg4 video,part 2 simple profile) that come from an external camera :

    #include
    #include
    #include <android></android>log.h>

    #include <libavutil></libavutil>opt.h>
    #include <libavcodec></libavcodec>avcodec.h>
    #include <libavutil></libavutil>channel_layout.h>
    #include <libavutil></libavutil>common.h>
    #include <libavutil></libavutil>imgutils.h>
    #include <libavutil></libavutil>mathematics.h>
    #include <libavutil></libavutil>samplefmt.h>

    #define DEBUG_TAG "LibFFMpeg:NDK"

    AVCodec *codec;
    AVFrame *current_frame;
    AVCodecContext *context;

    int resWidth, resHeight, bitRate;

    void my_log_callback(void *ptr, int level, const char *fmt, va_list vargs);

    jint Java_com_mycompany_axonv2_LibFFMpeg_initDecoder(JNIEnv * env, jobject this,
     jint _resWidth, jint _resHeight, jint _bitRate)
    {
        __android_log_print(ANDROID_LOG_DEBUG, DEBUG_TAG, "initDecoder called");

       int len;

       resWidth = _resWidth;
       resHeight = _resHeight;
       bitRate = _bitRate;
       av_log_set_callback(my_log_callback);
       av_log_set_level(AV_LOG_VERBOSE);
       avcodec_register_all();
       codec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);
       if (!codec) {
         __android_log_print(ANDROID_LOG_ERROR, DEBUG_TAG, "codec %d not found", AV_CODEC_ID_MPEG4);
         return -1;
       }
       context = avcodec_alloc_context3(codec);    
       if (!context) {
         __android_log_print(ANDROID_LOG_ERROR, DEBUG_TAG,  "Could not allocate codec context");
         return -1;
       }

       context->width = resWidth;
       context->height = resHeight;
       context->bit_rate = bitRate;
       context->pix_fmt = AV_PIX_FMT_YUV420P;
       context->time_base.den = 6;
       context->time_base.num = 1;
       int openRet = avcodec_open2(context, codec, NULL);
       if (openRet &lt; 0) {
         __android_log_print(ANDROID_LOG_ERROR, DEBUG_TAG,  "Could not open codec, error:%d", openRet);
         return -1;
       }
       current_frame = av_frame_alloc();    
       if (!current_frame) {
         __android_log_print(ANDROID_LOG_ERROR, DEBUG_TAG,  "Could not allocate video frame");
         return -1;
       }    
       return 0;    
    }


    void my_log_callback(void *ptr, int level, const char *fmt, va_list vargs) {

     __android_log_print (level, DEBUG_TAG, fmt, vargs);

    }

    jint Java_com_mycompany_axonv2_LibFFMpeg_queueFrameForDecoding(JNIEnv * env, jobject this,
     jlong pts, jbyteArray jBuffer)
    {

       __android_log_print(ANDROID_LOG_DEBUG, DEBUG_TAG, "queueFrameForDecoding called");

       AVPacket avpkt;
       av_init_packet(&amp;avpkt);
       int buffer_len = (*env)->GetArrayLength(env, jBuffer);
       uint8_t* buffer = (uint8_t *) (*env)->GetByteArrayElements(env, jBuffer,0);
       int got_frame = 0;
       __android_log_print(ANDROID_LOG_DEBUG, DEBUG_TAG, "copied %d bytes into uint8_t* buffer", buffer_len);

       av_packet_from_data(&amp;avpkt, buffer, buffer_len);
       __android_log_print(ANDROID_LOG_DEBUG, DEBUG_TAG, "av_packet_from_data called");

       avpkt.pts = pts;
       int ret = avcodec_decode_video2(context, current_frame, &amp;got_frame, &amp;avpkt);

       __android_log_print(ANDROID_LOG_DEBUG, DEBUG_TAG, "avcodec_decode_video2 returned %d" , ret);

       (*env)->ReleaseByteArrayElements(env, jBuffer, (jbyte*) buffer, 0);
       __android_log_print(ANDROID_LOG_DEBUG, DEBUG_TAG, "ReleaseByteArrayElements()");

       return 0;
    }

    Alright so the init function above works fine and the queueFrameForDecoding works up until the avcodec_decode_video2 function. I’m not expecting it to work just quite yet however as I’ve been logging output as to where we get in that function, I’ve found that there is a call (in avutil.c) :
    (around line 2400 in the latest code)

    avcodec_decode_video2(...) {
      ....
           ret = avctx->codec->decode(avctx, picture, got_picture_ptr, &amp;tmp);

    init runs fine and finds the codec and all that. Everything works great up until the avcodec_decode_video2 call :

    *** *** *** *** *** *** *** *** *** *** *** *** *** *** *** ***
    Build fingerprint: 'samsung/klteuc/klteatt:4.4.2/KOT49H/G900AUCU2ANG3:user/release-keys'
    Revision: '14'
    pid: 19355, tid: 22584, name: BluetoothReadTh  >>> com.mycompany.axonv2 &lt;&lt;&lt;
    signal 11 (SIGSEGV), code 1 (SEGV_MAPERR), fault addr 00000000
    r0 79308400  r1 79491710  r2 7b0b4a70  r3 7b0b49e8
    r4 79308400  r5 79491710  r6 00000000  r7 7b0b49e8
    r8 7b0b4a70  r9 7b0b4a80  sl 795106d8  fp 00000000
    ip 00000000  sp 7b0b49b8  lr 7ba05c18  pc 00000000  cpsr 600f0010
    d0  206c616768616c62  d1  6564206365646f63
    d2  756f722065646f63  d3  20736920656e6974
    d4  0b0a01000a0a0a0b  d5  0a630a01000a0a0a
    d6  0a630a011a00f80a  d7  0b130a011a00f90a
    d8  0000000000000000  d9  0000000000000000
    d10 0000000000000000  d11 0000000000000000
    d12 0000000000000000  d13 0000000000000000
    d14 0000000000000000  d15 0000000000000000
    d16 6369705f746f6720  d17 7274705f65727574
    d18 8000000000000000  d19 00000b9e42bd5730
    d20 0000000000000000  d21 0000000000000000
    d22 7b4fd10400000000  d23 773b894877483b68
    d24 0000000000000000  d25 3fc2f112df3e5244
    d26 40026bb1bbb55516  d27 0000000000000000
    d28 0000000000000000  d29 0000000000000000
    d30 0000000000000000  d31 0000000000000000
    scr 60000010
    backtrace:
    #00  pc 00000000  <unknown>
    #01  pc 00635c14  /data/app-lib/com.mycompany.axonv2-6/libavcodec-56.so (avcodec_decode_video2+1128)
    </unknown>

    I don’t understand why it’s crashing when trying to call the decode function. I’ve looked into the codec function pointer list and this should be calling ff_h263_decode_frame (source, libavcodec/mpeg4videodec.c) :

    AVCodec ff_mpeg4_decoder = {
       .name                  = "mpeg4",
       .long_name             = NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
       .type                  = AVMEDIA_TYPE_VIDEO,
       .id                    = AV_CODEC_ID_MPEG4,
       .priv_data_size        = sizeof(Mpeg4DecContext),
       .init                  = decode_init,
       .close                 = ff_h263_decode_end,
       .decode                = ff_h263_decode_frame,
       .capabilities          = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 |
                                CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY |
                                CODEC_CAP_FRAME_THREADS,
       .flush                 = ff_mpeg_flush,
       .max_lowres            = 3,
       .pix_fmts              = ff_h263_hwaccel_pixfmt_list_420,
       .profiles              = NULL_IF_CONFIG_SMALL(mpeg4_video_profiles),
       .update_thread_context = ONLY_IF_THREADS_ENABLED(mpeg4_update_thread_context),
       .priv_class = &amp;mpeg4_class,
    };

    I know that the ff_h263_decode_frame function isn’t being called because I added logging to it and none of that gets printed.
    However, if I just call ff_h263_decode_frame directly from avcodec_decode_video2 then my logging gets output. I don’t want to call this function directly though and would rather get the ffmpeg framework working correctly. Is there something wrong with how I’ve configured ffmpeg ? I have added mpegvideo, mpeg2video, flv, h263, to the configure script but none have them have helped (they should be included automatically by —enable-decoder=mpeg4).

    Any help would be greatly appreciated.

  • Files created with "ffmpeg hevc_nvenc" do not play on TV. (with video codec SDK 9.1 of nvidia)

    29 janvier 2020, par Dashhh

    Problem

    • Files created with hevc_nvenc do not play on TV. (samsung smart tv, model unknown)
      Related to my ffmpeg build is below.

    FFmpeg build conf

    $ ffmpeg -buildconf
       --enable-cuda
       --enable-cuvid
       --enable-nvenc
       --enable-nonfree
       --enable-libnpp
       --extra-cflags=-I/path/cuda/include
       --extra-ldflags=-L/path/cuda/lib64
       --prefix=/prefix/ffmpeg_build
       --pkg-config-flags=--static
       --extra-libs='-lpthread -lm'
       --extra-cflags=-I/prefix/ffmpeg_build/include
       --extra-ldflags=-L/prefix/ffmpeg_build/lib
       --enable-gpl
       --enable-nonfree
       --enable-version3
       --disable-stripping
       --enable-avisynth
       --enable-libass
       --enable-libfontconfig
       --enable-libfreetype
       --enable-libfribidi
       --enable-libgme
       --enable-libgsm
       --enable-librubberband
       --enable-libshine
       --enable-libsnappy
       --enable-libssh
       --enable-libtwolame
       --enable-libwavpack
       --enable-libzvbi
       --enable-openal
       --enable-sdl2
       --enable-libdrm
       --enable-frei0r
       --enable-ladspa
       --enable-libpulse
       --enable-libsoxr
       --enable-libspeex
       --enable-avfilter
       --enable-postproc
       --enable-pthreads
       --enable-libfdk-aac
       --enable-libmp3lame
       --enable-libopus
       --enable-libtheora
       --enable-libvorbis
       --enable-libvpx
       --enable-libx264
       --enable-libx265
       --disable-ffplay
       --enable-libopenjpeg
       --enable-libwebp
       --enable-libxvid
       --enable-libvidstab
       --enable-libopenh264
       --enable-zlib
       --enable-openssl

    ffmpeg Command

    • Command about FFmpeg encoding
    ffmpeg -ss 1800 -vsync 0 -hwaccel cuvid -hwaccel_device 0 \
    -c:v h264_cuvid -i /data/input.mp4 -t 10 \
    -filter_complex "\
    [0:v]hwdownload,format=nv12,format=yuv420p,\
    scale=iw*2:ih*2" -gpu 0 -c:v hevc_nvenc -pix_fmt yuv444p16le -preset slow -rc cbr_hq -b:v 5000k -maxrate 7000k -bufsize 1000k -acodec aac -ac 2 -dts_delta_threshold 1000 -ab 128k -flags global_header ./makevideo_nvenc_hevc.mp4

    Full log about This Command - check this full log

    The reason for adding "-color_ " in the command is as follows.

    • HDR video after creating bt2020 + smpte2084 video using nvidia hardware accelerator. (I’m studying to make HDR videos. I’m not sure if this is right.)

    How can I make a video using ffmpeg hevc_nvenc and have it play on TV ?


    Things i’ve done

    Here’s what I’ve researched about why it doesn’t work.
    - The header information is not properly included in the resulting video file. So I used a program called nvhsp to add SEI and VUI information inside the video. See below for the commands and logs used.

    nvhsp is open source for writing VUI and SEI bitstrings in raw video. nvhsp link

    # make rawvideo for nvhsp
    $  ffmpeg -vsync 0 -hwaccel cuvid -hwaccel_device 0 -c:v h264_cuvid \
    -i /data/input.mp4 -t 10 \
    -filter_complex "[0:v]hwdownload,format=nv12,\
    format=yuv420p,scale=iw*2:ih*2" \
    -gpu 0 -c:v hevc_nvenc -f rawvideo output_for_nvhsp.265

    # use nvhsp
    $ python nvhsp.py ./output_for_nvhsp.265 -colorprim bt2020 \
    -transfer smpte-st-2084 -colormatrix bt2020nc \
    -maxcll "1000,300" -videoformat ntsc -full_range tv \
    -masterdisplay "G (13250,34500) B (7500,3000 ) R (34000,16000) WP (15635,16450) L (10000000,1)" \
    ./after_nvhsp_proc_output.265

    Parsing the infile:

    ==========================

    Prepending SEI data
    Starting new SEI NALu ...
    SEI message with MaxCLL = 1000 and MaxFall = 300 created in SEI NAL
    SEI message Mastering Display Data G (13250,34500) B (7500,3000) R (34000,16000) WP (15635,16450) L (10000000,1) created in SEI NAL
    Looking for SPS ......... [232, 22703552]
    SPS_Nals_addresses [232, 22703552]
    SPS NAL Size 488
    Starting reading SPS NAL contents
    Reading of SPS NAL finished. Read 448 of SPS NALu data.

    Making modified SPS NALu ...
    Made modified SPS NALu-OK
    New SEI prepended
    Writing new stream ...
    Progress: 100%
    =====================
    Done!

    File nvhsp_after_output.mp4 created.

    # after process
    $ ffmpeg -y -f rawvideo -r 25 -s 3840x2160 -pix_fmt yuv444p16le -color_primaries bt2020 -color_trc smpte2084  -colorspace bt2020nc -color_range tv -i ./1/after_nvhsp_proc_output.265 -vcodec copy  ./1/result.mp4 -hide_banner

    Truncating packet of size 49766400 to 3260044
    [rawvideo @ 0x40a6400] Estimating duration from bitrate, this may be inaccurate
    Input #0, rawvideo, from './1/nvhsp_after_output.265':
     Duration: N/A, start: 0.000000, bitrate: 9953280 kb/s
       Stream #0:0: Video: rawvideo (Y3[0][16] / 0x10003359), yuv444p16le(tv, bt2020nc/bt2020/smpte2084), 3840x2160, 9953280 kb/s, 25 tbr, 25 tbn, 25 tbc
    [mp4 @ 0x40b0440] Could not find tag for codec rawvideo in stream #0, codec not currently supported in container
    Could not write header for output file #0 (incorrect codec parameters ?): Invalid argument
    Stream mapping:
     Stream #0:0 -> #0:0 (copy)
       Last message repeated 1 times

    Goal

    • I want to generate matadata normally when encoding a video through hevc_nvenc.

    • I want to create a video through hevc_nvenc and play HDR Video on smart tv with 10bit color depth support.


    Additional

    • Is it normal for ffmpeg hevc_nvenc not to generate metadata in the resulting video file ? or is it a bug ?

    • Please refer to the image below. (*’알 수 없음’ meaning ’unknown’)

      • if you need more detail file info, check this Gist Link (by ffprobe)
        hevc_nvenc metadata
    • However, if you encode a file in libx265, the attribute information is entered correctly as shown below.

      • if you need more detail file info, check this Gist Link
        libx265 metadata

    However, when using hevc_nvenc, all information is missing.

    • i used option -show_streams -show_programs -show_format -show_data -of json -show_frames -show_log 56 at ffprobe
  • android ffmpeg bad video output

    20 août 2014, par Sujith Manjavana

    I’m following this tutorial to create my first ffmpeg app. I have successfully build the shared libs and compiled the project without any errors. But when i run the app on my nexus 5 the output is this this

    Here is the native code

    #include <libavcodec></libavcodec>avcodec.h>
    #include <libavformat></libavformat>avformat.h>
    #include <libswscale></libswscale>swscale.h>
    #include <libavutil></libavutil>pixfmt.h>

    #include
    #include

    #include
    #include <android></android>native_window.h>
    #include <android></android>native_window_jni.h>

    #define LOG_TAG "android-ffmpeg-tutorial02"
    #define LOGI(...) __android_log_print(4, LOG_TAG, __VA_ARGS__);
    #define LOGE(...) __android_log_print(6, LOG_TAG, __VA_ARGS__);

    ANativeWindow*      window;
    char                *videoFileName;
    AVFormatContext     *formatCtx = NULL;
    int                 videoStream;
    AVCodecContext      *codecCtx = NULL;
    AVFrame             *decodedFrame = NULL;
    AVFrame             *frameRGBA = NULL;
    jobject             bitmap;
    void*               buffer;
    struct SwsContext   *sws_ctx = NULL;
    int                 width;
    int                 height;
    int                 stop;

    jint naInit(JNIEnv *pEnv, jobject pObj, jstring pFileName) {
       AVCodec         *pCodec = NULL;
       int             i;
       AVDictionary    *optionsDict = NULL;

       videoFileName = (char *)(*pEnv)->GetStringUTFChars(pEnv, pFileName, NULL);
       LOGI("video file name is %s", videoFileName);
       // Register all formats and codecs
       av_register_all();
       // Open video file
       if(avformat_open_input(&amp;formatCtx, videoFileName, NULL, NULL)!=0)
           return -1; // Couldn't open file
       // Retrieve stream information
       if(avformat_find_stream_info(formatCtx, NULL)&lt;0)
           return -1; // Couldn't find stream information
       // Dump information about file onto standard error
       av_dump_format(formatCtx, 0, videoFileName, 0);
       // Find the first video stream
       videoStream=-1;
       for(i=0; inb_streams; i++) {
           if(formatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
               videoStream=i;
               break;
           }
       }
       if(videoStream==-1)
           return -1; // Didn't find a video stream
       // Get a pointer to the codec context for the video stream
       codecCtx=formatCtx->streams[videoStream]->codec;
       // Find the decoder for the video stream
       pCodec=avcodec_find_decoder(codecCtx->codec_id);
       if(pCodec==NULL) {
           fprintf(stderr, "Unsupported codec!\n");
           return -1; // Codec not found
       }
       // Open codec
       if(avcodec_open2(codecCtx, pCodec, &amp;optionsDict)&lt;0)
           return -1; // Could not open codec
       // Allocate video frame
       decodedFrame=avcodec_alloc_frame();
       // Allocate an AVFrame structure
       frameRGBA=avcodec_alloc_frame();
       if(frameRGBA==NULL)
           return -1;
       return 0;
    }

    jobject createBitmap(JNIEnv *pEnv, int pWidth, int pHeight) {
       int i;
       //get Bitmap class and createBitmap method ID
       jclass javaBitmapClass = (jclass)(*pEnv)->FindClass(pEnv, "android/graphics/Bitmap");
       jmethodID mid = (*pEnv)->GetStaticMethodID(pEnv, javaBitmapClass, "createBitmap", "(IILandroid/graphics/Bitmap$Config;)Landroid/graphics/Bitmap;");
       //create Bitmap.Config
       //reference: https://forums.oracle.com/thread/1548728
       const wchar_t* configName = L"ARGB_8888";
       int len = wcslen(configName);
       jstring jConfigName;
       if (sizeof(wchar_t) != sizeof(jchar)) {
           //wchar_t is defined as different length than jchar(2 bytes)
           jchar* str = (jchar*)malloc((len+1)*sizeof(jchar));
           for (i = 0; i &lt; len; ++i) {
               str[i] = (jchar)configName[i];
           }
           str[len] = 0;
           jConfigName = (*pEnv)->NewString(pEnv, (const jchar*)str, len);
       } else {
           //wchar_t is defined same length as jchar(2 bytes)
           jConfigName = (*pEnv)->NewString(pEnv, (const jchar*)configName, len);
       }
       jclass bitmapConfigClass = (*pEnv)->FindClass(pEnv, "android/graphics/Bitmap$Config");
       jobject javaBitmapConfig = (*pEnv)->CallStaticObjectMethod(pEnv, bitmapConfigClass,
               (*pEnv)->GetStaticMethodID(pEnv, bitmapConfigClass, "valueOf", "(Ljava/lang/String;)Landroid/graphics/Bitmap$Config;"), jConfigName);
       //create the bitmap
       return (*pEnv)->CallStaticObjectMethod(pEnv, javaBitmapClass, mid, pWidth, pHeight, javaBitmapConfig);
    }

    jintArray naGetVideoRes(JNIEnv *pEnv, jobject pObj) {
       jintArray lRes;
       if (NULL == codecCtx) {
           return NULL;
       }
       lRes = (*pEnv)->NewIntArray(pEnv, 2);
       if (lRes == NULL) {
           LOGI(1, "cannot allocate memory for video size");
           return NULL;
       }
       jint lVideoRes[2];
       lVideoRes[0] = codecCtx->width;
       lVideoRes[1] = codecCtx->height;
       (*pEnv)->SetIntArrayRegion(pEnv, lRes, 0, 2, lVideoRes);
       return lRes;
    }

    void naSetSurface(JNIEnv *pEnv, jobject pObj, jobject pSurface) {
       if (0 != pSurface) {
           // get the native window reference
           window = ANativeWindow_fromSurface(pEnv, pSurface);
           // set format and size of window buffer
           ANativeWindow_setBuffersGeometry(window, 0, 0, WINDOW_FORMAT_RGBA_8888);
       } else {
           // release the native window
           ANativeWindow_release(window);
       }
    }

    jint naSetup(JNIEnv *pEnv, jobject pObj, int pWidth, int pHeight) {
       width = pWidth;
       height = pHeight;
       //create a bitmap as the buffer for frameRGBA
       bitmap = createBitmap(pEnv, pWidth, pHeight);
       if (AndroidBitmap_lockPixels(pEnv, bitmap, &amp;buffer) &lt; 0)
           return -1;
       //get the scaling context
       sws_ctx = sws_getContext (
               codecCtx->width,
               codecCtx->height,
               codecCtx->pix_fmt,
               pWidth,
               pHeight,
               AV_PIX_FMT_RGBA,
               SWS_BILINEAR,
               NULL,
               NULL,
               NULL
       );
       // Assign appropriate parts of bitmap to image planes in pFrameRGBA
       // Note that pFrameRGBA is an AVFrame, but AVFrame is a superset
       // of AVPicture
       avpicture_fill((AVPicture *)frameRGBA, buffer, AV_PIX_FMT_RGBA,
               pWidth, pHeight);
       return 0;
    }

    void finish(JNIEnv *pEnv) {
       //unlock the bitmap
       AndroidBitmap_unlockPixels(pEnv, bitmap);
       av_free(buffer);
       // Free the RGB image
       av_free(frameRGBA);
       // Free the YUV frame
       av_free(decodedFrame);
       // Close the codec
       avcodec_close(codecCtx);
       // Close the video file
       avformat_close_input(&amp;formatCtx);
    }

    void decodeAndRender(JNIEnv *pEnv) {
       ANativeWindow_Buffer    windowBuffer;
       AVPacket                packet;
       int                     i=0;
       int                     frameFinished;
       int                     lineCnt;
       while(av_read_frame(formatCtx, &amp;packet)>=0 &amp;&amp; !stop) {
           // Is this a packet from the video stream?
           if(packet.stream_index==videoStream) {
               // Decode video frame
               avcodec_decode_video2(codecCtx, decodedFrame, &amp;frameFinished,
                  &amp;packet);
               // Did we get a video frame?
               if(frameFinished) {
                   // Convert the image from its native format to RGBA
                   sws_scale
                   (
                       sws_ctx,
                       (uint8_t const * const *)decodedFrame->data,
                       decodedFrame->linesize,
                       0,
                       codecCtx->height,
                       frameRGBA->data,
                       frameRGBA->linesize
                   );
                   // lock the window buffer
                   if (ANativeWindow_lock(window, &amp;windowBuffer, NULL) &lt; 0) {
                       LOGE("cannot lock window");
                   } else {
                       // draw the frame on buffer
                       LOGI("copy buffer %d:%d:%d", width, height, width*height*4);
                       LOGI("window buffer: %d:%d:%d", windowBuffer.width,
                               windowBuffer.height, windowBuffer.stride);
                       memcpy(windowBuffer.bits, buffer,  width * height * 4);
                       // unlock the window buffer and post it to display
                       ANativeWindow_unlockAndPost(window);
                       // count number of frames
                       ++i;
                   }
               }
           }
           // Free the packet that was allocated by av_read_frame
           av_free_packet(&amp;packet);
       }
       LOGI("total No. of frames decoded and rendered %d", i);
       finish(pEnv);
    }

    /**
    * start the video playback
    */
    void naPlay(JNIEnv *pEnv, jobject pObj) {
       //create a new thread for video decode and render
       pthread_t decodeThread;
       stop = 0;
       pthread_create(&amp;decodeThread, NULL, decodeAndRender, NULL);
    }

    /**
    * stop the video playback
    */
    void naStop(JNIEnv *pEnv, jobject pObj) {
       stop = 1;
    }

    jint JNI_OnLoad(JavaVM* pVm, void* reserved) {
       JNIEnv* env;
       if ((*pVm)->GetEnv(pVm, (void **)&amp;env, JNI_VERSION_1_6) != JNI_OK) {
            return -1;
       }
       JNINativeMethod nm[8];
       nm[0].name = "naInit";
       nm[0].signature = "(Ljava/lang/String;)I";
       nm[0].fnPtr = (void*)naInit;

       nm[1].name = "naSetSurface";
       nm[1].signature = "(Landroid/view/Surface;)V";
       nm[1].fnPtr = (void*)naSetSurface;

       nm[2].name = "naGetVideoRes";
       nm[2].signature = "()[I";
       nm[2].fnPtr = (void*)naGetVideoRes;

       nm[3].name = "naSetup";
       nm[3].signature = "(II)I";
       nm[3].fnPtr = (void*)naSetup;

       nm[4].name = "naPlay";
       nm[4].signature = "()V";
       nm[4].fnPtr = (void*)naPlay;

       nm[5].name = "naStop";
       nm[5].signature = "()V";
       nm[5].fnPtr = (void*)naStop;

       jclass cls = (*env)->FindClass(env, "roman10/tutorial/android_ffmpeg_tutorial02/MainActivity");
       //Register methods with env->RegisterNatives.
       (*env)->RegisterNatives(env, cls, nm, 6);
       return JNI_VERSION_1_6;
    }

    Here is the build.sh

    #!/bin/bash
    NDK=$HOME/Desktop/adt/android-ndk-r9
    SYSROOT=$NDK/platforms/android-9/arch-arm/
    TOOLCHAIN=$NDK/toolchains/arm-linux-androideabi-4.8/prebuilt/linux-x86_64
    function build_one
    {
    ./configure \
       --prefix=$PREFIX \
       --enable-shared \
       --disable-static \
       --disable-doc \
       --disable-ffmpeg \
       --disable-ffplay \
       --disable-ffprobe \
       --disable-ffserver \
       --disable-avdevice \
       --disable-doc \
       --disable-symver \
       --cross-prefix=$TOOLCHAIN/bin/arm-linux-androideabi- \
       --target-os=linux \
       --arch=arm \
       --enable-cross-compile \
       --sysroot=$SYSROOT \
       --extra-cflags="-Os -fpic $ADDI_CFLAGS" \
       --extra-ldflags="$ADDI_LDFLAGS" \
       $ADDITIONAL_CONFIGURE_FLAG
    make clean
    make
    make install
    }
    CPU=arm
    PREFIX=$(pwd)/android/$CPU
    ADDI_CFLAGS="-marm"
    build_one

    It works on the Galaxy tab2. what can i do to make it work on all devices ?? Please help me..