
Recherche avancée
Médias (91)
-
MediaSPIP Simple : futur thème graphique par défaut ?
26 septembre 2013, par
Mis à jour : Octobre 2013
Langue : français
Type : Video
-
avec chosen
13 septembre 2013, par
Mis à jour : Septembre 2013
Langue : français
Type : Image
-
sans chosen
13 septembre 2013, par
Mis à jour : Septembre 2013
Langue : français
Type : Image
-
config chosen
13 septembre 2013, par
Mis à jour : Septembre 2013
Langue : français
Type : Image
-
SPIP - plugins - embed code - Exemple
2 septembre 2013, par
Mis à jour : Septembre 2013
Langue : français
Type : Image
-
GetID3 - Bloc informations de fichiers
9 avril 2013, par
Mis à jour : Mai 2013
Langue : français
Type : Image
Autres articles (87)
-
De l’upload à la vidéo finale [version standalone]
31 janvier 2010, parLe chemin d’un document audio ou vidéo dans SPIPMotion est divisé en trois étapes distinctes.
Upload et récupération d’informations de la vidéo source
Dans un premier temps, il est nécessaire de créer un article SPIP et de lui joindre le document vidéo "source".
Au moment où ce document est joint à l’article, deux actions supplémentaires au comportement normal sont exécutées : La récupération des informations techniques des flux audio et video du fichier ; La génération d’une vignette : extraction d’une (...) -
List of compatible distributions
26 avril 2011, parThe table below is the list of Linux distributions compatible with the automated installation script of MediaSPIP. Distribution nameVersion nameVersion number Debian Squeeze 6.x.x Debian Weezy 7.x.x Debian Jessie 8.x.x Ubuntu The Precise Pangolin 12.04 LTS Ubuntu The Trusty Tahr 14.04
If you want to help us improve this list, you can provide us access to a machine whose distribution is not mentioned above or send the necessary fixes to add (...) -
Librairies et binaires spécifiques au traitement vidéo et sonore
31 janvier 2010, parLes logiciels et librairies suivantes sont utilisées par SPIPmotion d’une manière ou d’une autre.
Binaires obligatoires FFMpeg : encodeur principal, permet de transcoder presque tous les types de fichiers vidéo et sonores dans les formats lisibles sur Internet. CF ce tutoriel pour son installation ; Oggz-tools : outils d’inspection de fichiers ogg ; Mediainfo : récupération d’informations depuis la plupart des formats vidéos et sonores ;
Binaires complémentaires et facultatifs flvtool2 : (...)
Sur d’autres sites (11793)
-
Resampling audio using libswresample, leaves small amount of noise after resampling
20 juillet 2020, par MiloI'm trying to resample audio from 44Khz to 48Khz and I'm getting s small light noise after resampling. As if someone is gently ticking the mic. This happens both ways. From 48Khz to 44Khz and vice versa.


I've read that this can happen because swrContext still has some data left and that I shoudl flush the context before resampling next frame. And although this helps a little (less noticeable noise), it's still present.


I've tried using FFmpeg resample filter instead, but the output is just loud incoherent noise. I'm pretty sure that libswresample should not output any noise on resampling which means that I just don't know how to use it well and I'm missing some options.


This is the code for resampler.


int ResampleFrame(VideoState * videoState, AVFrame *decoded_audio_frame, enum AVSampleFormat out_sample_fmt, uint8_t * out_buf)
{
 int in_sample_rate = videoState->audio->ptrAudioCodecCtx_->sample_rate;
 int out_sample_rate = SAMPLE_RATE;

// get an instance of the AudioResamplingState struct, create if NULL
AudioResamplingState* arState = getAudioResampling(videoState->audio->ptrAudioCodecCtx_->channel_layout);

if (!arState->swr_ctx)
{
 printf("swr_alloc error.\n");
 return -1;
}

// get input audio channels
arState->in_channel_layout = (videoState->audio->ptrAudioCodecCtx_->channels ==
 av_get_channel_layout_nb_channels(videoState->audio->ptrAudioCodecCtx_->channel_layout)) ?
 videoState->audio->ptrAudioCodecCtx_->channel_layout :
 av_get_default_channel_layout(videoState->audio->ptrAudioCodecCtx_->channels);


// check input audio channels correctly retrieved
if (arState->in_channel_layout <= 0)
{
 printf("in_channel_layout error.\n");
 return -1;
}


arState->out_channel_layout = AV_CH_LAYOUT_STEREO;

// retrieve number of audio samples (per channel)
arState->in_nb_samples = decoded_audio_frame->nb_samples;
if (arState->in_nb_samples <= 0)
{
 printf("in_nb_samples error.\n");
 return -1;
}

// Set SwrContext parameters for resampling
av_opt_set_int(arState->swr_ctx, "in_channel_layout", arState->in_channel_layout, 0);
av_opt_set_int(arState->swr_ctx, "in_sample_rate", in_sample_rate, 0);
av_opt_set_sample_fmt(arState->swr_ctx, "in_sample_fmt", videoState->audio->ptrAudioCodecCtx_->sample_fmt, 0);


// Set SwrContext parameters for resampling
av_opt_set_int(arState->swr_ctx, "out_channel_layout", arState->out_channel_layout, 0);
av_opt_set_int(arState->swr_ctx, "out_sample_rate", out_sample_rate, 0);
av_opt_set_sample_fmt(arState->swr_ctx, "out_sample_fmt", out_sample_fmt, 0);


// initialize SWR context after user parameters have been set
int ret = swr_init(arState->swr_ctx);
if (ret < 0)
 {
 printf("Failed to initialize the resampling context.\n");
 return -1;
 }


 // retrieve output samples number taking into account the progressive delay
int64_t delay = swr_get_delay(arState->swr_ctx, videoState->audio->ptrAudioCodecCtx_->sample_rate) + arState->in_nb_samples;
arState->out_nb_samples = av_rescale_rnd(delay, out_sample_rate, in_sample_rate, AV_ROUND_UP );

// check output samples number was correctly rescaled
if (arState->out_nb_samples <= 0)
{
 printf("av_rescale_rnd error\n");
 return -1;
}

// get number of output audio channels
arState->out_nb_channels = av_get_channel_layout_nb_channels(arState->out_channel_layout);

// allocate data pointers array for arState->resampled_data and fill data
// pointers and linesize accordingly
// check memory allocation for the resampled data was successful
ret = av_samples_alloc_array_and_samples(&arState->resampled_data, &arState->out_linesize, arState->out_nb_channels, arState->out_nb_samples, out_sample_fmt, 0);
if (ret < 0)
 {
 printf("av_samples_alloc_array_and_samples() error: Could not allocate destination samples.\n");
 return -1;
 }


if (arState->swr_ctx)
 {
 // do the actual audio data resampling
 // check audio conversion was successful
 int ret_num_samples = swr_convert(arState->swr_ctx,arState->resampled_data,arState->out_nb_samples,(const uint8_t**)decoded_audio_frame->data, decoded_audio_frame->nb_samples);
 //int ret_num_samples = swr_convert_frame(arState->swr_ctx,arState->resampled_data,arState->out_nb_samples,(const uint8_t**)decoded_audio_frame->data, decoded_audio_frame->nb_samples);

 if (ret_num_samples < 0)
 {
 printf("swr_convert_error.\n");
 return -1;
 }


 // get the required buffer size for the given audio parameters
 // check audio buffer size
 arState->resampled_data_size = av_samples_get_buffer_size(&arState->out_linesize, arState->out_nb_channels,ret_num_samples,out_sample_fmt,1);

 if (arState->resampled_data_size < 0)
 {
 printf("av_samples_get_buffer_size error.\n");
 return -1;
 }
 } else {
 printf("swr_ctx null error.\n");
 return -1;
 }



// copy the resampled data to the output buffer
memcpy(out_buf, arState->resampled_data[0], arState->resampled_data_size);


// flush the swr context
int delayed = swr_convert(arState->swr_ctx,arState->resampled_data,arState->out_nb_samples,NULL,0);



if (arState->resampled_data)
 {
 av_freep(&arState->resampled_data[0]);
 }

av_freep(&arState->resampled_data);
arState->resampled_data = NULL;

int ret_data_size = arState->resampled_data_size;



return ret_data_size;
}



I also tries using the filter as shown here but my output is just noise.


This is my filter code


int ResampleFrame(AVFrame *frame, uint8_t *out_buf)
{
 /* Push the decoded frame into the filtergraph */
 qint32 ret;
 ret = av_buffersrc_add_frame_flags(buffersrc_ctx1, frame, AV_BUFFERSRC_FLAG_KEEP_REF);
 if (ret < 0) 
 {
 printf("ResampleFrame: Error adding frame to buffer\n");
 // Delete input frame and return null
 av_frame_unref(frame);
 return 0;
 }


 //printf("resampling\n");
 AVFrame *resampled_frame = av_frame_alloc();


 /* Pull filtered frames from the filtergraph */
 ret = av_buffersink_get_frame(buffersink_ctx1, resampled_frame);

 /* Set the timestamp on the resampled frame */
 resampled_frame->best_effort_timestamp = resampled_frame->pts;

 if (ret < 0) 
 {
 av_frame_unref(frame);
 av_frame_unref(resampled_frame);
 return 0;
 }


 int buffer_size = av_samples_get_buffer_size(NULL, 2,resampled_frame->nb_samples,AV_SAMPLE_FMT_S16,1);

 memcpy(out_buf,resampled_frame->data,buffer_size);

 //av_frame_unref(frame);
 av_frame_unref(resampled_frame);
 return buffer_size;
}





QString filter_description1 = "aresample=48000,aformat=sample_fmts=s16:channel_layouts=stereo,asetnsamples=n=1024:p=0";

int InitAudioFilter(AVStream *inputStream) 
{

 char args[512];
 int ret;
 const AVFilter *buffersrc = avfilter_get_by_name("abuffer");
 const AVFilter *buffersink = avfilter_get_by_name("abuffersink");
 AVFilterInOut *outputs = avfilter_inout_alloc();
 AVFilterInOut *inputs = avfilter_inout_alloc();
 filter_graph = avfilter_graph_alloc();


 const enum AVSampleFormat out_sample_fmts[] = {AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE};
 const int64_t out_channel_layouts[] = {AV_CH_LAYOUT_STEREO, -1};
 const int out_sample_rates[] = {48000, -1};

 snprintf(args, sizeof(args), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%" PRIx64,
 inputStream->codec->time_base.num, inputStream->codec->time_base.den,
 inputStream->codec->sample_rate,
 av_get_sample_fmt_name(inputStream->codec->sample_fmt),
 inputStream->codec->channel_layout);


 ret = avfilter_graph_create_filter(&buffersrc_ctx1, buffersrc, "in", args, NULL, filter_graph);

 if (ret < 0) 
 {
 printf("InitAudioFilter: Unable to create buffersrc\n");
 return -1;
 }

 ret = avfilter_graph_create_filter(&buffersink_ctx1, buffersink, "out", NULL, NULL, filter_graph);

 if (ret < 0) 
 {
 printf("InitAudioFilter: Unable to create buffersink\n");
 return ret;
 }

 // set opt SAMPLE FORMATS
 ret = av_opt_set_int_list(buffersink_ctx1, "sample_fmts", out_sample_fmts, -1, AV_OPT_SEARCH_CHILDREN);

 if (ret < 0) 
 {
 printf("InitAudioFilter: Cannot set output sample format\n");
 return ret;
 }

 // set opt CHANNEL LAYOUTS
 ret = av_opt_set_int_list(buffersink_ctx1, "channel_layouts", out_channel_layouts, -1, AV_OPT_SEARCH_CHILDREN);

 if (ret < 0) {
 printf("InitAudioFilter: Cannot set output channel layout\n");
 return ret;
 }

 // set opt OUT SAMPLE RATES
 ret = av_opt_set_int_list(buffersink_ctx1, "sample_rates", out_sample_rates, -1, AV_OPT_SEARCH_CHILDREN);

 if (ret < 0) 
 {
 printf("InitAudioFilter: Cannot set output sample rate\n");
 return ret;
 }

 /* Endpoints for the filter graph. */
 outputs -> name = av_strdup("in");
 outputs -> filter_ctx = buffersrc_ctx1;
 outputs -> pad_idx = 0;
 outputs -> next = NULL;

 /* Endpoints for the filter graph. */
 inputs -> name = av_strdup("out");
 inputs -> filter_ctx = buffersink_ctx1;
 inputs -> pad_idx = 0;
 inputs -> next = NULL;


 if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_description1.toStdString().c_str(), &inputs, &outputs, NULL)) < 0) 
 {
 printf("InitAudioFilter: Could not add the filter to graph\n");
 }


 if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0) 
 {
 printf("InitAudioFilter: Could not configure the graph\n");
 }

 /* Print summary of the sink buffer
 * Note: args buffer is reused to store channel layout string */
 AVFilterLink *outlink = buffersink_ctx1->inputs[0];
 av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout);

 QString str = args;
 printf("Output: srate:%dHz fmt:%s chlayout: %s\n", (int) outlink->sample_rate, 
 av_get_sample_fmt_name((AVSampleFormat) outlink->format),
 str.toStdString().c_str());


 filterGraphInitialized_ = true; 
}



And since I don't have much experience with filters or audio for that matter, I'm also probably missing something here. But Can't figure out what.


Thanks


-
Pipe video frames from ffmpeg to numpy array without loading whole movie into memory
2 mai 2021, par marcmanI'm not sure whether what I'm asking is feasible or functional, but I'm experimenting with trying to load frames from a video in an ordered, but "on-demand," fashion.


Basically what I have now is to read the entire uncompressed video into a buffer by piping through
stdout
, e.g. :

H, W = 1080, 1920 # video dimensions
video = '/path/to/video.mp4' # path to video

# ffmpeg command
command = [ "ffmpeg",
 '-i', video,
 '-pix_fmt', 'rgb24',
 '-f', 'rawvideo',
 'pipe:1' ]

# run ffmpeg and load all frames into numpy array (num_frames, H, W, 3)
pipe = subprocess.run(command, stdout=subprocess.PIPE, bufsize=10**8)
video = np.frombuffer(pipe.stdout, dtype=np.uint8).reshape(-1, H, W, 3)

# or alternatively load individual frames in a loop
nb_img = H*W*3 # H * W * 3 channels * 1-byte/channel
for i in range(0, len(pipe.stdout), nb_img):
 img = np.frombuffer(pipe.stdout, dtype=np.uint8, count=nb_img, offset=i).reshape(H, W, 3)



I'm wondering if it's possible to do this same process, in Python, but without first loading the entire video into memory. In my mind, I'm picturing something like :


- 

- open the a buffer
- seeking to memory locations on demand
- loading frames to numpy arrays








I know there are other libraries, like OpenCV for example, that enable this same sort of behavior, but I'm wondering :


- 

- Is it possible to do this operation efficiently using this sort of ffmpeg-pipe-to-numpy-array operation ?
- Does this defeat the speed-up benefit of ffmpeg directly rather than seeking/loading through OpenCV or first extracting frames and then loading individual files ?






-
Encoding images into a movie file
5 avril 2014, par RuAwareI am trying to save jpgs into a movie, I have tried jcodec and alothough my s3 plays it fine other devices do not. including vlc and windows media
I have just spent most of the day playing with MediaCodec, although the SDK is so high, it will help people with jelly bean and above. But I can not work out how to get the Files to the encoder and then write the file.
Ideally I wont to support down to SDK 9/8
Has anyone got any code they can share, either to get MediaCodec to work or another option. If you say ffmpeg, I'd love to but my jin knowledge is non existent and I will need a very good guide.
Code for MediaCodec so far
public class EncodeAndMux extends AsyncTask {
private static int bitRate = 2000000;
private static int MAX_INPUT = 100000;
private static String mimeType = "video/avc";
private int frameRate = 15;
private int colorFormat;
private int stride = 1;
private int sliceHeight = 2;
private MediaCodec encoder = null;
private MediaFormat inputFormat;
private MediaCodecInfo codecInfo = null;
private MediaMuxer muxer;
private boolean mMuxerStarted = false;
private int mTrackIndex = 0;
private long presentationTime = 0;
private Paint bmpPaint;
private static int WAITTIME = 10000;
private static String TAG = "ENCODE";
private ArrayList<string> mFilePaths;
private String mPath;
private EncodeListener mListener;
private int width = 320;
private int height = 240;
private double mSpeed = 1;
public EncodeAndMux(ArrayList<string> filePaths, String savePath) {
mFilePaths = filePaths;
mPath = savePath;
// Create paint to draw BMP
bmpPaint = new Paint();
bmpPaint.setAntiAlias(true);
bmpPaint.setFilterBitmap(true);
bmpPaint.setDither(true);
}
public void setListner(EncodeListener listener) {
mListener = listener;
}
// set the speed, how many frames a second
public void setSpead(int speed) {
mSpeed = speed;
}
public double getSpeed() {
return mSpeed;
}
private long computePresentationTime(int frameIndex) {
final long ONE_SECOND = 1000000;
return (long) (frameIndex * (ONE_SECOND / mSpeed));
}
public interface EncodeListener {
public void finished();
public void errored();
}
@TargetApi(Build.VERSION_CODES.JELLY_BEAN_MR2)
@Override
protected Boolean doInBackground(Integer... params) {
try {
muxer = new MediaMuxer(mPath, OutputFormat.MUXER_OUTPUT_MPEG_4);
} catch (Exception e){
e.printStackTrace();
}
// Find a code that supports the mime type
int numCodecs = MediaCodecList.getCodecCount();
for (int i = 0; i < numCodecs && codecInfo == null; i++) {
MediaCodecInfo info = MediaCodecList.getCodecInfoAt(i);
if (!info.isEncoder()) {
continue;
}
String[] types = info.getSupportedTypes();
boolean found = false;
for (int j = 0; j < types.length && !found; j++) {
if (types[j].equals(mimeType))
found = true;
}
if (!found)
continue;
codecInfo = info;
}
for (int i = 0; i < MediaCodecList.getCodecCount(); i++) {
MediaCodecInfo info = MediaCodecList.getCodecInfoAt(i);
if (!info.isEncoder()) {
continue;
}
String[] types = info.getSupportedTypes();
for (int j = 0; j < types.length; ++j) {
if (types[j] != mimeType)
continue;
MediaCodecInfo.CodecCapabilities caps = info.getCapabilitiesForType(types[j]);
for (int k = 0; k < caps.profileLevels.length; k++) {
if (caps.profileLevels[k].profile == MediaCodecInfo.CodecProfileLevel.AVCProfileHigh && caps.profileLevels[k].level == MediaCodecInfo.CodecProfileLevel.AVCLevel4) {
codecInfo = info;
}
}
}
}
Log.d(TAG, "Found " + codecInfo.getName() + " supporting " + mimeType);
MediaCodecInfo.CodecCapabilities capabilities = codecInfo.getCapabilitiesForType(mimeType);
for (int i = 0; i < capabilities.colorFormats.length && colorFormat == 0; i++) {
int format = capabilities.colorFormats[i];
switch (format) {
case MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420Planar:
case MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420PackedPlanar:
case MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420SemiPlanar:
case MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420PackedSemiPlanar:
case MediaCodecInfo.CodecCapabilities.COLOR_TI_FormatYUV420PackedSemiPlanar:
colorFormat = format;
break;
}
}
Log.d(TAG, "Using color format " + colorFormat);
// Determine width, height and slice sizes
if (codecInfo.getName().equals("OMX.TI.DUCATI1.VIDEO.H264E")) {
// This codec doesn't support a width not a multiple of 16,
// so round down.
width &= ~15;
}
stride = width;
sliceHeight = height;
if (codecInfo.getName().startsWith("OMX.Nvidia.")) {
stride = (stride + 15) / 16 * 16;
sliceHeight = (sliceHeight + 15) / 16 * 16;
}
inputFormat = MediaFormat.createVideoFormat(mimeType, width, height);
inputFormat.setInteger(MediaFormat.KEY_BIT_RATE, bitRate);
inputFormat.setInteger(MediaFormat.KEY_FRAME_RATE, frameRate);
inputFormat.setInteger(MediaFormat.KEY_COLOR_FORMAT, colorFormat);
inputFormat.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, 5);
// inputFormat.setInteger("stride", stride);
// inputFormat.setInteger("slice-height", sliceHeight);
inputFormat.setInteger(MediaFormat.KEY_MAX_INPUT_SIZE, MAX_INPUT);
encoder = MediaCodec.createByCodecName(codecInfo.getName());
encoder.configure(inputFormat, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
encoder.start();
ByteBuffer[] inputBuffers = encoder.getInputBuffers();
ByteBuffer[] outputBuffers = encoder.getOutputBuffers();
int inputBufferIndex= -1, outputBufferIndex= -1;
BufferInfo info = new BufferInfo();
for (int i = 0; i < mFilePaths.size(); i++) {
// use decode sample to calculate inSample size and then resize
Bitmap bitmapIn = Images.decodeSampledBitmapFromPath(mFilePaths.get(i), width, height);
// Create blank bitmap
Bitmap bitmap = Bitmap.createBitmap(width, height, Config.ARGB_8888);
// Center scaled image
Canvas canvas = new Canvas(bitmap);
canvas.drawBitmap(bitmapIn,(bitmap.getWidth()/2)-(bitmapIn.getWidth()/2),(bitmap.getHeight()/2)-(bitmapIn.getHeight()/2), bmpPaint);
Log.d(TAG, "Bitmap width: " + bitmapIn.getWidth() + " height: " + bitmapIn.getHeight() + " WIDTH: " + width + " HEIGHT: " + height);
byte[] dat = getNV12(width, height, bitmap);
bitmap.recycle();
// Exception occurred on this below line in Emulator, LINE No. 182//**
inputBufferIndex = encoder.dequeueInputBuffer(WAITTIME);
Log.i("DAT", "Size= "+dat.length);
if(inputBufferIndex >= 0){
int samplesiz= dat.length;
inputBuffers[inputBufferIndex].put(dat);
presentationTime = computePresentationTime(i);
if (i == mFilePaths.size()) {
encoder.queueInputBuffer(inputBufferIndex, 0, samplesiz, presentationTime, MediaCodec.BUFFER_FLAG_END_OF_STREAM);
Log.i(TAG, "Last Frame");
} else {
encoder.queueInputBuffer(inputBufferIndex, 0, samplesiz, presentationTime, 0);
}
while(true) {
outputBufferIndex = encoder.dequeueOutputBuffer(info, WAITTIME);
Log.i("BATA", "outputBufferIndex="+outputBufferIndex);
if (outputBufferIndex >= 0) {
ByteBuffer encodedData = outputBuffers[outputBufferIndex];
if (encodedData == null) {
throw new RuntimeException("encoderOutputBuffer " + outputBufferIndex +
" was null");
}
if ((info.flags & MediaCodec.BUFFER_FLAG_CODEC_CONFIG) != 0) {
// The codec config data was pulled out and fed to the muxer when we got
// the INFO_OUTPUT_FORMAT_CHANGED status. Ignore it.
Log.d(TAG, "ignoring BUFFER_FLAG_CODEC_CONFIG");
info.size = 0;
}
if (info.size != 0) {
if (!mMuxerStarted) {
throw new RuntimeException("muxer hasn't started");
}
// adjust the ByteBuffer values to match BufferInfo (not needed?)
encodedData.position(info.offset);
encodedData.limit(info.offset + info.size);
muxer.writeSampleData(mTrackIndex, encodedData, info);
Log.d(TAG, "sent " + info.size + " bytes to muxer");
}
encoder.releaseOutputBuffer(outputBufferIndex, false);
inputBuffers[inputBufferIndex].clear();
outputBuffers[outputBufferIndex].clear();
if ((info.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) {
break; // out of while
}
} else if (outputBufferIndex == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
// Subsequent data will conform to new format.
MediaFormat opmediaformat = encoder.getOutputFormat();
if (!mMuxerStarted) {
mTrackIndex = muxer.addTrack(opmediaformat);
muxer.start();
mMuxerStarted = true;
}
Log.i(TAG, "op_buf_format_changed: " + opmediaformat);
} else if(outputBufferIndex == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
outputBuffers = encoder.getOutputBuffers();
Log.d(TAG, "Output Buffer changed " + outputBuffers);
} else if(outputBufferIndex == MediaCodec.INFO_TRY_AGAIN_LATER) {
// No Data, break out
break;
} else {
// Unexpected State, ignore it
Log.d(TAG, "Unexpected State " + outputBufferIndex);
}
}
}
}
if (encoder != null) {
encoder.flush();
encoder.stop();
encoder.release();
encoder = null;
}
if (muxer != null) {
muxer.stop();
muxer.release();
muxer = null;
}
return true;
};
@Override
protected void onPostExecute(Boolean result) {
if (result) {
if (mListener != null)
mListener.finished();
} else {
if (mListener != null)
mListener.errored();
}
super.onPostExecute(result);
}
byte [] getNV12(int inputWidth, int inputHeight, Bitmap scaled) {
int [] argb = new int[inputWidth * inputHeight];
scaled.getPixels(argb, 0, inputWidth, 0, 0, inputWidth, inputHeight);
byte [] yuv = new byte[inputWidth*inputHeight*3/2];
encodeYUV420SP(yuv, argb, inputWidth, inputHeight);
scaled.recycle();
return yuv;
}
void encodeYUV420SP(byte[] yuv420sp, int[] argb, int width, int height) {
final int frameSize = width * height;
int yIndex = 0;
int uvIndex = frameSize;
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
a = (argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
// well known RGB to YVU algorithm
Y = ( ( 66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
V = ( ( -38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
U = ( ( 112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0) {
yuv420sp[uvIndex++] = (byte)((V<0) ? 0 : ((V > 255) ? 255 : V));
yuv420sp[uvIndex++] = (byte)((U<0) ? 0 : ((U > 255) ? 255 : U));
}
index ++;
}
}
}
}
</string></string>This has now been tested on 4 of my devices and works fine, is there are way to
1/ Calculate the MAX_INPUT (to high and on the N7 II it crashes, I Don't want that happening once released)
2/ Offer an api 16 solution ?
3/ Do I need stride and stride height ?Thanks