
Recherche avancée
Autres articles (104)
-
MediaSPIP 0.1 Beta version
25 avril 2011, parMediaSPIP 0.1 beta is the first version of MediaSPIP proclaimed as "usable".
The zip file provided here only contains the sources of MediaSPIP in its standalone version.
To get a working installation, you must manually install all-software dependencies on the server.
If you want to use this archive for an installation in "farm mode", you will also need to proceed to other manual (...) -
Multilang : améliorer l’interface pour les blocs multilingues
18 février 2011, parMultilang est un plugin supplémentaire qui n’est pas activé par défaut lors de l’initialisation de MediaSPIP.
Après son activation, une préconfiguration est mise en place automatiquement par MediaSPIP init permettant à la nouvelle fonctionnalité d’être automatiquement opérationnelle. Il n’est donc pas obligatoire de passer par une étape de configuration pour cela. -
Websites made with MediaSPIP
2 mai 2011, parThis page lists some websites based on MediaSPIP.
Sur d’autres sites (12725)
-
C - Transcoding to UDP using FFmpeg ?
30 avril 2013, par golmschenkI'm trying to use the FFmpeg libraries to take an existing video file and stream it over a UDP connection. Specifically, I've been looking at the muxing.c and demuxing.c example files in the source code doc/example directory of FFmpeg. The demuxing file presents code which allows an input video to be converted into the video and audio streams. The muxing file presents code which creates fake data and can already be output to a UDP connection as I would like. I've begun work combining the two. Below can be found my code which is basically a copy of the muxing file with some parts replaced/appended with parts of the demuxing file. Unfortunately I'm running into plenty of complications attempting my goal through this approach. Is there an existing source code example which does the transcoding I'm looking for ? Or at least a tutorial on how one might create this ? If not, at least a few pointers might be helpful in directing my work in combing the two files to achieve my goal. Specifically, I'm getting the error :
[NULL @ 0x23b4040] Unable to find a suitable output format for 'udp://localhost:7777'
Could not deduce output format from file extension: using MPEG.
Output #0, mpeg, to 'udp://localhost:7777':Even though the muxing file could accept UDP formats. Any suggestions ? Thank you much !
#include
#include
#include
#include
#include <libavutil></libavutil>mathematics.h>
#include <libavformat></libavformat>avformat.h>
#include <libswscale></libswscale>swscale.h>
/* 5 seconds stream duration */
#define STREAM_DURATION 200.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
//FROM DE
static AVFormatContext *fmt_ctx = NULL;
static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx;
static AVStream *video_stream = NULL, *audio_stream = NULL;
static const char *src_filename = NULL;
static const char *video_dst_filename = NULL;
static const char *audio_dst_filename = NULL;
static FILE *video_dst_file = NULL;
static FILE *audio_dst_file = NULL;
static uint8_t *video_dst_data[4] = {NULL};
static int video_dst_linesize[4];
static int video_dst_bufsize;
static uint8_t **audio_dst_data = NULL;
static int audio_dst_linesize;
static int audio_dst_bufsize;
static int video_stream_idx = -1, audio_stream_idx = -1;
static AVFrame *frame = NULL;
static AVPacket pkt;
static int video_frame_count = 0;
static int audio_frame_count = 0;
//END DE
static int sws_flags = SWS_BICUBIC;
/**************************************************************/
/* audio output */
static float t, tincr, tincr2;
static int16_t *samples;
static int audio_input_frame_size;
/* Add an output stream. */
static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
enum AVCodecID codec_id)
{
AVCodecContext *c;
AVStream *st;
/* find the encoder */
*codec = avcodec_find_encoder(codec_id);
if (!(*codec)) {
fprintf(stderr, "Could not find encoder for '%s'\n",
avcodec_get_name(codec_id));
exit(1);
}
st = avformat_new_stream(oc, *codec);
if (!st) {
fprintf(stderr, "Could not allocate stream\n");
exit(1);
}
st->id = oc->nb_streams-1;
c = st->codec;
switch ((*codec)->type) {
case AVMEDIA_TYPE_AUDIO:
st->id = 1;
c->sample_fmt = AV_SAMPLE_FMT_S16;
c->bit_rate = 64000;
c->sample_rate = 44100;
c->channels = 2;
break;
case AVMEDIA_TYPE_VIDEO:
c->codec_id = codec_id;
c->bit_rate = 400000;
/* Resolution must be a multiple of two. */
c->width = 352;
c->height = 288;
/* timebase: This is the fundamental unit of time (in seconds) in terms
* of which frame timestamps are represented. For fixed-fps content,
* timebase should be 1/framerate and timestamp increments should be
* identical to 1. */
c->time_base.den = STREAM_FRAME_RATE;
c->time_base.num = 1;
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
c->pix_fmt = STREAM_PIX_FMT;
if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
/* just for testing, we also add B frames */
c->max_b_frames = 2;
}
if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
/* Needed to avoid using macroblocks in which some coeffs overflow.
* This does not happen with normal video, it just happens here as
* the motion of the chroma plane does not match the luma plane. */
c->mb_decision = 2;
}
break;
default:
break;
}
/* Some formats want stream headers to be separate. */
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
return st;
}
/**************************************************************/
/* audio output */
static float t, tincr, tincr2;
static int16_t *samples;
static int audio_input_frame_size;
static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
{
AVCodecContext *c;
int ret;
c = st->codec;
/* open it */
ret = avcodec_open2(c, codec, NULL);
if (ret < 0) {
fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
exit(1);
}
/* init signal generator */
t = 0;
tincr = 2 * M_PI * 110.0 / c->sample_rate;
/* increment frequency by 110 Hz per second */
tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
audio_input_frame_size = 10000;
else
audio_input_frame_size = c->frame_size;
samples = av_malloc(audio_input_frame_size *
av_get_bytes_per_sample(c->sample_fmt) *
c->channels);
if (!samples) {
fprintf(stderr, "Could not allocate audio samples buffer\n");
exit(1);
}
}
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
* 'nb_channels' channels. */
static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
{
int j, i, v;
int16_t *q;
q = samples;
for (j = 0; j < frame_size; j++) {
v = (int)(sin(t) * 10000);
for (i = 0; i < nb_channels; i++)
*q++ = v;
t += tincr;
tincr += tincr2;
}
}
static void write_audio_frame(AVFormatContext *oc, AVStream *st)
{
AVCodecContext *c;
AVPacket pkt = { 0 }; // data and size must be 0;
AVFrame *frame = avcodec_alloc_frame();
int got_packet, ret;
av_init_packet(&pkt);
c = st->codec;
get_audio_frame(samples, audio_input_frame_size, c->channels);
frame->nb_samples = audio_input_frame_size;
avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
(uint8_t *)samples,
audio_input_frame_size *
av_get_bytes_per_sample(c->sample_fmt) *
c->channels, 1);
ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
if (ret < 0) {
fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
exit(1);
}
if (!got_packet)
return;
pkt.stream_index = st->index;
/* Write the compressed frame to the media file. */
ret = av_interleaved_write_frame(oc, &pkt);
if (ret != 0) {
fprintf(stderr, "Error while writing audio frame: %s\n",
av_err2str(ret));
exit(1);
}
avcodec_free_frame(&frame);
}
static void close_audio(AVFormatContext *oc, AVStream *st)
{
avcodec_close(st->codec);
av_free(samples);
}
/**************************************************************/
/* video output */
static AVFrame *frame;
static AVPicture src_picture, dst_picture;
static int frame_count;
static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
{
int ret;
AVCodecContext *c = st->codec;
/* open the codec */
ret = avcodec_open2(c, codec, NULL);
if (ret < 0) {
fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
exit(1);
}
/* allocate and init a re-usable frame */
frame = avcodec_alloc_frame();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
/* Allocate the encoded raw picture. */
ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
if (ret < 0) {
fprintf(stderr, "Could not allocate picture: %s\n", av_err2str(ret));
exit(1);
}
/* If the output format is not YUV420P, then a temporary YUV420P
* picture is needed too. It is then converted to the required
* output format. */
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
ret = avpicture_alloc(&src_picture, AV_PIX_FMT_YUV420P, c->width, c->height);
if (ret < 0) {
fprintf(stderr, "Could not allocate temporary picture: %s\n",
av_err2str(ret));
exit(1);
}
}
/* copy data and linesize picture pointers to frame */
*((AVPicture *)frame) = dst_picture;
}
/* Prepare a dummy image. */
static void fill_yuv_image(AVPicture *pict, int frame_index,
int width, int height)
{
int x, y, i;
i = frame_index;
/* Y */
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
/* Cb and Cr */
for (y = 0; y < height / 2; y++) {
for (x = 0; x < width / 2; x++) {
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
}
}
}
static void write_video_frame(AVFormatContext *oc, AVStream *st)
{
int ret;
static struct SwsContext *sws_ctx;
AVCodecContext *c = st->codec;
if (frame_count >= STREAM_NB_FRAMES) {
/* No more frames to compress. The codec has a latency of a few
* frames if using B-frames, so we get the last frames by
* passing the same picture again. */
} else {
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
/* as we only generate a YUV420P picture, we must convert it
* to the codec pixel format if needed */
if (!sws_ctx) {
sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P,
c->width, c->height, c->pix_fmt,
sws_flags, NULL, NULL, NULL);
if (!sws_ctx) {
fprintf(stderr,
"Could not initialize the conversion context\n");
exit(1);
}
}
fill_yuv_image(&src_picture, frame_count, c->width, c->height);
sws_scale(sws_ctx,
(const uint8_t * const *)src_picture.data, src_picture.linesize,
0, c->height, dst_picture.data, dst_picture.linesize);
} else {
fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
}
}
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
/* Raw video case - directly store the picture in the packet */
AVPacket pkt;
av_init_packet(&pkt);
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = st->index;
pkt.data = dst_picture.data[0];
pkt.size = sizeof(AVPicture);
ret = av_interleaved_write_frame(oc, &pkt);
} else {
AVPacket pkt = { 0 };
int got_packet;
av_init_packet(&pkt);
/* encode the image */
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
if (ret < 0) {
fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
exit(1);
}
/* If size is zero, it means the image was buffered. */
if (!ret && got_packet && pkt.size) {
pkt.stream_index = st->index;
/* Write the compressed frame to the media file. */
ret = av_interleaved_write_frame(oc, &pkt);
} else {
ret = 0;
}
}
if (ret != 0) {
fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
exit(1);
}
frame_count++;
}
static void close_video(AVFormatContext *oc, AVStream *st)
{
avcodec_close(st->codec);
av_free(src_picture.data[0]);
av_free(dst_picture.data[0]);
av_free(frame);
}
static int open_codec_context(int *stream_idx,
AVFormatContext *fmt_ctx, enum AVMediaType type)
{
int ret;
AVStream *st;
AVCodecContext *dec_ctx = NULL;
AVCodec *dec = NULL;
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
if (ret < 0) {
fprintf(stderr, "Could not find %s stream in input file '%s'\n",
av_get_media_type_string(type), src_filename);
return ret;
} else {
*stream_idx = ret;
st = fmt_ctx->streams[*stream_idx];
/* find decoder for the stream */
dec_ctx = st->codec;
dec = avcodec_find_decoder(dec_ctx->codec_id);
if (!dec) {
fprintf(stderr, "Failed to find %s codec\n",
av_get_media_type_string(type));
return ret;
}
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
fprintf(stderr, "Failed to open %s codec\n",
av_get_media_type_string(type));
return ret;
}
}
return 0;
}
/**************************************************************/
/* media file output */
int main(int argc, char **argv)
{
const char *filename;
AVOutputFormat *fmt;
AVFormatContext *oc;
AVStream *audio_st, *video_st;
AVCodec *audio_codec, *video_codec;
double audio_pts, video_pts;
int ret = 0, got_frame;;
/* Initialize libavcodec, and register all codecs and formats. */
av_register_all();
if (argc != 3) {
printf("usage: %s input_file output_file\n"
"\n", argv[0]);
return 1;
}
src_filename = argv[1];
filename = argv[2];
/* allocate the output media context */
avformat_alloc_output_context2(&oc, NULL, NULL, filename);
if (!oc) {
printf("Could not deduce output format from file extension: using MPEG.\n");
avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
}
if (!oc) {
return 1;
}
fmt = oc->oformat;
/* Add the audio and video streams using the default format codecs
* and initialize the codecs. */
video_stream = NULL;
audio_stream = NULL;
//FROM DE
/* open input file, and allocate format context */
if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
fprintf(stderr, "Could not open source file %s\n", src_filename);
exit(1);
}
/* retrieve stream information */
if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
fprintf(stderr, "Could not find stream information\n");
exit(1);
}
if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
video_stream = fmt_ctx->streams[video_stream_idx];
video_dec_ctx = video_stream->codec;
/* allocate image where the decoded image will be put */
ret = av_image_alloc(video_dst_data, video_dst_linesize,
video_dec_ctx->width, video_dec_ctx->height,
video_dec_ctx->pix_fmt, 1);
if (ret < 0) {
fprintf(stderr, "Could not allocate raw video buffer\n");
goto end;
}
video_dst_bufsize = ret;
}
if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
int nb_planes;
audio_stream = fmt_ctx->streams[audio_stream_idx];
audio_dec_ctx = audio_stream->codec;
nb_planes = av_sample_fmt_is_planar(audio_dec_ctx->sample_fmt) ?
audio_dec_ctx->channels : 1;
audio_dst_data = av_mallocz(sizeof(uint8_t *) * nb_planes);
if (!audio_dst_data) {
fprintf(stderr, "Could not allocate audio data buffers\n");
ret = AVERROR(ENOMEM);
goto end;
}
}
//END DE
/* Now that all the parameters are set, we can open the audio and
* video codecs and allocate the necessary encode buffers. */
if (video_stream)
open_video(oc, video_codec, video_stream);
if (audio_stream)
open_audio(oc, audio_codec, audio_stream);
av_dump_format(oc, 0, filename, 1);
/* open the output file, if needed */
if (!(fmt->flags & AVFMT_NOFILE)) {
ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Could not open '%s': %s\n", filename,
av_err2str(ret));
return 1;
}
}
/* Write the stream header, if any. */
ret = avformat_write_header(oc, NULL);
if (ret < 0) {
fprintf(stderr, "Error occurred when opening output file: %s\n",
av_err2str(ret));
return 1;
}
if (frame)
frame->pts = 0;
for (;;) {
/* Compute current audio and video time. */
if (audio_stream)
audio_pts = (double)audio_stream->pts.val * audio_stream->time_base.num / audio_stream->time_base.den;
else
audio_pts = 0.0;
if (video_stream)
video_pts = (double)video_stream->pts.val * video_stream->time_base.num /
video_stream->time_base.den;
else
video_pts = 0.0;
if ((!audio_stream || audio_pts >= STREAM_DURATION) &&
(!video_stream || video_pts >= STREAM_DURATION))
break;
/* write interleaved audio and video frames */
if (!video_stream || (video_stream && audio_st && audio_pts < video_pts)) {
write_audio_frame(oc, audio_stream);
} else {
write_video_frame(oc, video_stream);
frame->pts += av_rescale_q(1, video_stream->codec->time_base, video_stream->time_base);
}
}
/* Write the trailer, if any. The trailer must be written before you
* close the CodecContexts open when you wrote the header; otherwise
* av_write_trailer() may try to use memory that was freed on
* av_codec_close(). */
av_write_trailer(oc);
/* Close each codec. */
if (video_st)
close_video(oc, video_st);
if (audio_st)
close_audio(oc, audio_st);
if (!(fmt->flags & AVFMT_NOFILE))
/* Close the output file. */
avio_close(oc->pb);
/* free the stream */
avformat_free_context(oc);
end:
if (video_dec_ctx)
avcodec_close(video_dec_ctx);
if (audio_dec_ctx)
avcodec_close(audio_dec_ctx);
avformat_close_input(&fmt_ctx);
if (video_dst_file)
fclose(video_dst_file);
if (audio_dst_file)
fclose(audio_dst_file);
av_free(frame);
av_free(video_dst_data[0]);
av_free(audio_dst_data);
return 0;
} -
Video created using H263 codec and ffmpeg does not play on android device [closed]
21 mars 2013, par susheel tickooI have created a video using FFmpeg and H263 codec. But when I play the video on an Android device the player is unable to play it. I have used both the extensions .mp4 and .3gp.
void generate(JNIEnv *pEnv, jobject pObj,jobjectArray stringArray,int famerate,int width,int height,jstring videoFilename)
{
AVCodec *codec;
AVCodecContext *c= NULL;
//int framesnum=5;
int i,looper, out_size, size, x, y,encodecbuffsize,j;
__android_log_write(ANDROID_LOG_INFO, "record","************into generate************");
int imagecount= (*pEnv)->GetArrayLength(pEnv, stringArray);
__android_log_write(ANDROID_LOG_INFO, "record","************got magecount************");
int retval=-10;
FILE *f;
AVFrame *picture,*encoded_avframe;
uint8_t *encodedbuffer;
jbyte *raw_record;
char logdatadata[100];
int returnvalue = -1,numBytes =-1;
const char *gVideoFileName = (char *)(*pEnv)->GetStringUTFChars(pEnv, videoFilename, NULL);
__android_log_write(ANDROID_LOG_INFO, "record","************got video file name************");
/* find the mpeg1 video encoder */
codec = avcodec_find_encoder(CODEC_ID_H264);
if (!codec) {
__android_log_write(ANDROID_LOG_INFO, "record","codec not found");
exit(1);
}
c= avcodec_alloc_context();
/*c->bit_rate = 400000;
c->width = width;
c->height = height;
c->time_base= (AVRational){1,famerate};
c->gop_size = 12; // emit one intra frame every ten frames
c->max_b_frames=0;
c->pix_fmt = PIX_FMT_YUV420P;
c->codec_type = AVMEDIA_TYPE_VIDEO;
c->codec_id = CODEC_ID_H263;*/
c->bit_rate = 400000;
// resolution must be a multiple of two
c->width = 176;
c->height = 144;
c->pix_fmt = PIX_FMT_YUV420P;
c->qcompress = 0.0;
c->qblur = 0.0;
c->gop_size = 20; //or 1
c->sub_id = 1;
c->workaround_bugs = FF_BUG_AUTODETECT;
//pFFmpeg->c->time_base = (AVRational){1,25};
c->time_base.num = 1;
c->time_base.den = famerate;
c->max_b_frames = 0; //pas de B frame en H263
// c->opaque = opaque;
c->dct_algo = FF_DCT_AUTO;
c->idct_algo = FF_IDCT_AUTO;
//lc->rtp_mode = 0;
c->rtp_payload_size = 1000;
c->rtp_callback = 0; // ffmpeg_rtp_callback;
c->flags |= CODEC_FLAG_QSCALE;
c->mb_decision = FF_MB_DECISION_RD;
c->thread_count = 1;
#define DEFAULT_RATE (16 * 8 * 1024)
c->rc_min_rate = DEFAULT_RATE;
c->rc_max_rate = DEFAULT_RATE;
c->rc_buffer_size = DEFAULT_RATE * 64;
c->bit_rate = DEFAULT_RATE;
sprintf(logdatadata, "------width from c ---- = %d",width);
__android_log_write(ANDROID_LOG_INFO, "record",logdatadata);
sprintf(logdatadata, "------height from c ---- = %d",height);
__android_log_write(ANDROID_LOG_INFO, "record",logdatadata);
__android_log_write(ANDROID_LOG_INFO, "record","************Found codec and now opening it************");
/* open it */
retval = avcodec_open(c, codec);
if ( retval < 0)
{
sprintf(logdatadata, "------avcodec_open ---- retval = %d",retval);
__android_log_write(ANDROID_LOG_INFO, "record",logdatadata);
__android_log_write(ANDROID_LOG_INFO, "record","could not open codec");
exit(1);
}
__android_log_write(ANDROID_LOG_INFO, "record","statement 5");
f = fopen(gVideoFileName, "ab");
if (!f) {
__android_log_write(ANDROID_LOG_INFO, "record","could not open video file");
exit(1);
}
__android_log_write(ANDROID_LOG_INFO, "record", "***************Allocating encodedbuffer*********\n");
encodecbuffsize = avpicture_get_size(PIX_FMT_RGB24, c->width, c->height);
sprintf(logdatadata, "encodecbuffsize = %d",encodecbuffsize);
__android_log_write(ANDROID_LOG_INFO, "record",logdatadata);
encodedbuffer = malloc(encodecbuffsize);
jclass cls = (*pEnv)->FindClass(pEnv, "com/canvasm/mediclinic/VideoGenerator");
jmethodID mid = (*pEnv)->GetMethodID(pEnv, cls, "videoProgress", "(Ljava/lang/String;)Ljava/lang/String;");
jmethodID mid_delete = (*pEnv)->GetMethodID(pEnv, cls, "deleteTempFile", "(Ljava/lang/String;)Ljava/lang/String;");
if (mid == 0)
return;
__android_log_write(ANDROID_LOG_INFO, "native","got method id");
for(i=0;i<=imagecount;i++) {
jboolean isCp;
int progress = 0;
float temp;
jstring string;
if(i==imagecount)
string = (jstring) (*pEnv)->GetObjectArrayElement(pEnv, stringArray, imagecount-1);
else
string = (jstring) (*pEnv)->GetObjectArrayElement(pEnv, stringArray, i);
const char *rawString = (*pEnv)->GetStringUTFChars(pEnv, string, &isCp);
__android_log_write(ANDROID_LOG_INFO, "record",rawString);
picture = OpenImage(rawString,width,height);
//WriteJPEG(c,picture,i);
// encode video
memset(encodedbuffer,0,encodecbuffsize);
//do{
for(looper=0;looper<5;looper++)
{
memset(encodedbuffer,0,encodecbuffsize);
out_size = avcodec_encode_video(c, encodedbuffer, encodecbuffsize, picture);
sprintf(logdatadata, "avcodec_encode_video ----- out_size = %d \n",out_size );
__android_log_write(ANDROID_LOG_INFO, "record",logdatadata);
if(out_size>0)
break;
}
__android_log_write(ANDROID_LOG_INFO, "record","*************Start looping for same image*******");
returnvalue = fwrite(encodedbuffer, 1, out_size, f);
sprintf(logdatadata, "fwrite ----- returnvalue = %d \n",returnvalue );
__android_log_write(ANDROID_LOG_INFO, "record",logdatadata);
__android_log_write(ANDROID_LOG_INFO, "record","*************End looping for same image*******");
// publishing progress
progress = ((i*100)/(imagecount+1))+15;//+1 is for last frame duplicated entry
if(progress<20 )
progress =20;
if(progress>=95 )
progress =95;
sprintf(logdatadata, "%d",progress );
jstring jstrBuf = (*pEnv)->NewStringUTF(pEnv, logdatadata);
(*pEnv)->CallObjectMethod(pEnv, pObj, mid,jstrBuf);
if(i>0)
(*pEnv)->CallObjectMethod(pEnv, pObj, mid_delete,string);
}
/* get the delayed frames */
for(; out_size; i++) {
fflush(stdout);
out_size = avcodec_encode_video(c, encodedbuffer, encodecbuffsize, NULL);
fwrite(encodedbuffer, 20, out_size, f);
}
/* add sequence end code to have a real mpeg file */
encodedbuffer[0] = 0x00;
encodedbuffer[1] = 0x00;
encodedbuffer[2] = 0x01;
encodedbuffer[3] = 0xb7;
fwrite(encodedbuffer, 1, 4, f);
fclose(f);
free(encodedbuffer);
avcodec_close(c);
av_free(c);
__android_log_write(ANDROID_LOG_INFO, "record","Video created ");
// last updation of 100%
sprintf(logdatadata, "%d",100 );
jstring jstrBuf = (*pEnv)->NewStringUTF(pEnv, logdatadata);
(*pEnv)->CallObjectMethod(pEnv, pObj, mid,jstrBuf);
}
AVFrame* OpenImage(const char* imageFileName,int w,int h)
{
AVFrame *pFrame;
AVCodec *pCodec ;
AVFormatContext *pFormatCtx;
AVCodecContext *pCodecCtx;
uint8_t *buffer;
int frameFinished,framesNumber = 0,retval = -1,numBytes=0;
AVPacket packet;
char logdatadata[100];
//__android_log_write(ANDROID_LOG_INFO, "OpenImage",imageFileName);
if(av_open_input_file(&pFormatCtx, imageFileName, NULL, 0, NULL)!=0)
//if(avformat_open_input(&pFormatCtx,imageFileName,NULL,NULL)!=0)
{
__android_log_write(ANDROID_LOG_INFO, "record",
"Can't open image file ");
return NULL;
}
pCodecCtx = pFormatCtx->streams[0]->codec;
pCodecCtx->width = w;
pCodecCtx->height = h;
pCodecCtx->pix_fmt = PIX_FMT_YUV420P;
// Find the decoder for the video stream
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (!pCodec)
{
__android_log_write(ANDROID_LOG_INFO, "record",
"Can't open image file ");
return NULL;
}
pFrame = avcodec_alloc_frame();
numBytes = avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
sprintf(logdatadata, "numBytes = %d",numBytes);
__android_log_write(ANDROID_LOG_INFO, "record",logdatadata);
retval = avpicture_fill((AVPicture *) pFrame, buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
// Open codec
if(avcodec_open(pCodecCtx, pCodec)<0)
{
__android_log_write(ANDROID_LOG_INFO, "record","Could not open codec");
return NULL;
}
if (!pFrame)
{
__android_log_write(ANDROID_LOG_INFO, "record","Can't allocate memory for AVFrame\n");
return NULL;
}
int readval = -5;
while (readval = av_read_frame(pFormatCtx, &packet) >= 0)
{
if(packet.stream_index != 0)
continue;
int ret = avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
sprintf(logdatadata, "avcodec_decode_video2 ret = %d",ret);
__android_log_write(ANDROID_LOG_INFO, "record",logdatadata);
if (ret > 0)
{
__android_log_write(ANDROID_LOG_INFO, "record","Frame is decoded\n");
pFrame->quality = 4;
av_free_packet(&packet);
av_close_input_file(pFormatCtx);
return pFrame;
}
else
{
__android_log_write(ANDROID_LOG_INFO, "record","error while decoding frame \n");
}
}
sprintf(logdatadata, "readval = %d",readval);
__android_log_write(ANDROID_LOG_INFO, "record",logdatadata);
}The
generate
method takes a list of strings (path to images) and converts them to video and theOpenImage
method is responsible for convertign a single image toAVFrame
. -
Android AudioRecord to FFMPEG encode native AAC
8 mars 2013, par Curtis KiuI am doing video chatting in android and i would like to port ffmpeg to stream rtsp or rtmp but now i have a try in RTSP first.
Somehow the problem now is av_write_frame or av_interleaved_write_frame is fail to work or just crash.
Maybe...
AudioRecord Sample format is not equals to FFMPEG setting
Frame receive is not equalsSo code... AudioRecorder
http://pastebin.com/iWtB3Jhy
package com.curtis.broadcaster.Publisher ;import android.app.Activity;
import android.graphics.Bitmap;
import android.media.AudioFormat;
import android.media.AudioRecord;
import android.media.AudioRecord.OnRecordPositionUpdateListener;
import android.media.MediaRecorder;
import android.os.Bundle;
import android.util.Log;
public class Publisher extends Activity {
private int mAudioBufferSize;
private int mAudioBufferSampleSize;
private AudioRecord mAudioRecord;
private boolean inRecordMode = false;
private short[] audioBuffer;
private String Tag = "Publisher/Publisher.java";
public void onCreate(Bundle savedInstanceState) {
Log.i(Tag, "|| onCreate()");
super.onCreate(savedInstanceState);
initAudioRecord();
Log.i(Tag, "-- End onCreate()");
}
@Override
public void onResume() {
Log.i(Tag, "|| onResume()");
super.onResume();
inRecordMode = true;
Thread t = new Thread(new Runnable() {
public void run() {
Log.i(Tag, "|| Run Threat t");
getSamples();
Log.i(Tag, "-- End Threat t");
}
});
t.start();
Log.i(Tag, "-- End onResume()");
}
protected void onPause() {
Log.i(Tag, "|| Run onPause()");
inRecordMode = false;
super.onPause();
Log.i(Tag, "-- End onPause()");
}
@Override
protected void onDestroy() {
Log.i(Tag, "|| Run onDestroy()");
if (mAudioRecord != null) {
mAudioRecord.release();
Log.i(Tag + " onDestroy", "mAudioRecord.release()");
}
jniStopAll();
super.onDestroy();
android.os.Process.killProcess(android.os.Process.myPid());
Log.i(Tag, "-- End onDestroy()");
}
public OnRecordPositionUpdateListener mListener = new OnRecordPositionUpdateListener() {
public void onPeriodicNotification(AudioRecord recorder) {
Log.i(Tag + " mListener(onPeriodicNotification)", "time is "
+ System.currentTimeMillis());
jniSetAudioSample(audioBuffer);
// audioBuffer = new short[mAudioBufferSampleSize];
}
public void onMarkerReached(AudioRecord recorder) {
Log.i(Tag + " mListener(onMarkerReached)",
"time is " + System.currentTimeMillis());
inRecordMode = false;
recorder.stop();
Log.i(Tag, "recorder.stop()");
}
};
private void initAudioRecord() {
try {
jniCheck();
int sampleRate = 44100;
int channelConfig = AudioFormat.CHANNEL_IN_MONO;
int audioFormat = AudioFormat.ENCODING_PCM_16BIT;
mAudioBufferSize = 2 * AudioRecord.getMinBufferSize(sampleRate,
channelConfig, audioFormat);
mAudioBufferSampleSize = mAudioBufferSize / 2;
Log.i(Tag, "Buffer Size " + mAudioBufferSize);
Log.i(Tag, "new AudioRecord begin");
mAudioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC,
sampleRate, channelConfig, audioFormat, mAudioBufferSize);
Log.i(Tag, "new AudioRecord end");
jniInitFFMpeg();
} catch (IllegalArgumentException e) {
Log.i(Tag, "initAudioRecord go Errors");
e.printStackTrace();
}
// mAudioRecord.setNotificationMarkerPosition(10000);
mAudioRecord.setPositionNotificationPeriod(1024);
mAudioRecord.setRecordPositionUpdateListener(mListener);
int audioRecordState = mAudioRecord.getState();
if (audioRecordState != AudioRecord.STATE_INITIALIZED) {
finish();
}
}
private void getSamples() {
Log.i(Tag, "|| getSamples()");
if (mAudioRecord == null)
return;
audioBuffer = new short[mAudioBufferSampleSize];
mAudioRecord.startRecording();
int audioRecordingState = mAudioRecord.getRecordingState();
if (audioRecordingState != AudioRecord.RECORDSTATE_RECORDING) {
finish();
}
while (inRecordMode) {
int samplesRead = mAudioRecord.read(audioBuffer, 0,
mAudioBufferSampleSize);
Log.i(Tag, "getSamples >>SamplesRead : " + samplesRead);
}
mAudioRecord.stop();
Log.i(Tag, "mAudioRecord.stop()");
}
private native void jniCheck();
private native void jniInitFFMpeg();
private native void jniSetAudioSample(short[] audioBuffer);
private native void jniStopAll();
static {
System.loadLibrary("ffmpeg");
System.loadLibrary("testerv4");
}
}FFMPEG JNI http://pastebin.com/hgPva35b
#include
#include <android></android>log.h>
#include <android></android>bitmap.h>
#include
#include
#include
#include
#include <sys></sys>time.h>
#include "libavformat/rtsp.h"
#include <libavutil></libavutil>mathematics.h>
#include <libavformat></libavformat>avformat.h>
#include <libavcodec></libavcodec>avcodec.h>
#include <libswscale></libswscale>swscale.h>
#undef exit
/* Log System */
#define LOG_TAG "FFMPEGSample - v4a"
#define DEBUG_TAG "FFMPEG-AUDIO PART"
#define LOGI(...) __android_log_print(ANDROID_LOG_INFO,LOG_TAG,__VA_ARGS__)
#define LOGE(...) __android_log_print(ANDROID_LOG_ERROR,LOG_TAG,__VA_ARGS__)
/* 5 seconds stream duration */
#define STREAM_DURATION 5.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */
#define VIDEO_CODEC_ID CODEC_ID_FLV1
#define AUDIO_CODEC_ID CODEC_ID_AAC
static int sws_flags = SWS_BICUBIC;
int mode = 1; //1 = only audio, 2 = only video, 3 = both video and audio
AVFormatContext *avForCtx;
//AVFormatContext *oc;
AVStream *audio_st, *video_st;
double audio_pts, video_pts;
int frameCount, audioFrameCount, start;
char *url;
/*Audio Declare*/
float t, tincr, tincr2;
int16_t *samples;
uint8_t *audio_outbuf;
int audio_outbuf_size;
int audio_input_frame_size;
AVFormatContext *createAVFormatContext();
AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id);
void open_video(AVFormatContext *oc, AVStream *st);
void open_audio(AVFormatContext *oc, AVStream *st);
AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id);
void write_audio_frame(AVFormatContext *oc, AVStream *st);
void write_video_frame(AVFormatContext *oc, AVStream *st);
void init();
void setAudioSample(unsigned char *inSample[]);
void stopAll();
/*/////////////////////////////////JNI Bridge////////////////////////////////////// */
void Java_com_curtis_broadcaster_Publisher_Publisher_jniCheck(JNIEnv* env,
jobject this) {
LOGI("-@ JNI work fine @-");
}
void Java_com_curtis_broadcaster_Publisher_Publisher_jniInitFFMpeg(JNIEnv* env,
jobject this) {
LOGI("-@ Init Encorder @-");
/* initialize libavcodec, and register all codecs and formats */
avcodec_init();
avcodec_register_all();
av_register_all();
avformat_network_init(); //ERROR
/* allocate the output media context */
avForCtx = createAVFormatContext();
frameCount = 1;
audioFrameCount = 1;
start = 0;
/* add the audio and video streams using the default format codecs
and initialize the codecs */
video_st = NULL;
audio_st = NULL;
if (mode == 1 || mode == 3) {
audio_st = add_audio_stream(avForCtx, AUDIO_CODEC_ID);
LOGI("(Init Encorder) - addAudioStream");
}
if (mode == 2 || mode == 3) {
video_st = add_video_stream(avForCtx, VIDEO_CODEC_ID);
LOGI("(Init Encorder) - addVideoStream");
}
// av_dump_format(avForCtx, 0, "rtsp://192.168.1.104/live/live", 1);
LOGI("(Init Encorder) - Waiting to call open_*");
if (audio_st) {
open_audio(avForCtx, audio_st);
LOGI("(Init Encorder) - open_audio");
}
if (video_st) {
open_video(avForCtx, video_st);
LOGI("(Init Encorder) - open_video");
}
av_write_header(avForCtx);
LOGI("-@ Finish Init Encorder @-");
}
void Java_com_curtis_broadcaster_Publisher_Publisher_jniSetAudioSample(
JNIEnv* env, jobject this, unsigned char *inSample[]) {
if (audio_st) {
LOGI("-@ Start setAudioSample @-");
samples = (int16_t *) inSample;
write_audio_frame(avForCtx, audio_st);
LOGI("-@ Finish setAudioSample @-");
}
}
void Java_com_curtis_broadcaster_Publisher_Publisher_jniStopAll(JNIEnv* env,
jobject this) {
LOGI("-@ Stopping All @-");
//close_audio(avForCtx, audio_st);
//close_video(avForCtx, video_st);
LOGI("-@ Stopped All @-");
}
/*/////////////////////////////END JNI Bridge////////////////////////////////////// */
/* New Added Coding */
AVFormatContext *createAVFormatContext() {
LOGI("-@OPEN - createAVFormatContext@-");
AVFormatContext *ctx = avformat_alloc_context();
// ctx->oformat = av_guess_format("flv", "rtmp://192.168.1.104/live/live",
// NULL);
// ctx->oformat = av_guess_format("flv", NULL, NULL);
//if (!av_guess_format("flv", NULL, NULL)) {
//LOGI("-flv Can not Guess Format-");
//}
ctx->oformat = av_guess_format("rtsp", NULL, NULL);
if (!av_guess_format("rtsp", NULL, NULL)) {
LOGI("-flv Can not Guess Format-");
}
/*
LOGI("%d",avformat_alloc_output_context2(&ctx, ctx->oformat, "flv",
"rtmp://192.168.1.104/live/live"));
if (!ctx) {
LOGI("-@avformat_alloc_output_context2 fail@-");
}*/
// LOGI("flv %d",avformat_alloc_output_context2(&ctx, ctx->oformat, "flv",
// "rtmp://192.168.1.104/live/live"));
// LOGI("rtmp %d",avformat_alloc_output_context2(&ctx, ctx->oformat, "rtmp",
// "rtmp://192.168.1.104/live/live"));
// LOGI("mpeg4 %d",avformat_alloc_output_context2(&ctx, ctx->oformat, "mpeg4",
// "rtmp://192.168.1.104/live/live"));
// LOGI("NULL %d",avformat_alloc_output_context2(&ctx, ctx->oformat, NULL,
// "rtmp://192.168.1.104/live/live"));
avformat_alloc_output_context2(&ctx, ctx->oformat, "sdp",
"rtsp://192.168.1.104:1935/live/live");
if (!ctx) {
LOGI("-@avformat_alloc_output_context2 fail@-");
}
LOGI("-@CLOSE - createAVFormatContext@-");
return ctx;
}
/**************************************************************/
/* audio output */
/*
* add an audio output stream
*/
AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id) {
LOGI("-@OPEN - add_audio_stream@-");
AVCodecContext *c;
AVStream *st = avformat_new_stream(oc, avcodec_find_encoder(codec_id));
if (!st) {
LOGI("-@add_audio_stream - Could not alloc stream@-");
exit(1);
}
st->id = 1;
c = st->codec;
c->codec_id = AUDIO_CODEC_ID;
c->codec_type = AVMEDIA_TYPE_AUDIO;
/* put sample parameters */
c->sample_fmt = AV_SAMPLE_FMT_FLT;
//c->sample_fmt = AV_SAMPLE_FMT_S16;
c->bit_rate = 100000;
c->sample_rate = 44100;
c->channels = 1;
// some formats want stream headers to be separate
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
LOGI("-@Close - add_audio_stream@-");
return st;
}
void open_audio(AVFormatContext *oc, AVStream *st) {
LOGI("@- open_audio -@");
AVCodecContext *c;
AVCodec *codec;
c = st->codec;
c->strict_std_compliance = -2;
/* find the audio encoder */
codec = avcodec_find_encoder(c->codec_id);
if (!codec) {
LOGI("@- open_audio E:codec not found-@");
exit(1);
}
/* open it */
if (avcodec_open(c, codec) < 0) {
LOGI("%d",avcodec_open(c, codec));
LOGI("@- open_audio E:could not open codec-@");
exit(1);
}
/* init signal generator */
t = 0;
tincr = 2 * M_PI * 110.0 / c->sample_rate;
/* increment frequency by 110 Hz per second */
tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
audio_outbuf_size = 10000;
audio_outbuf = av_malloc(audio_outbuf_size);
/* ugly hack for PCM codecs (will be removed ASAP with new PCM
support to compute the input frame size in samples */
if (c->frame_size <= 1) {
audio_input_frame_size = audio_outbuf_size / c->channels;
switch (st->codec->codec_id) {
case CODEC_ID_PCM_S16LE:
case CODEC_ID_PCM_S16BE:
case CODEC_ID_PCM_U16LE:
case CODEC_ID_PCM_U16BE:
audio_input_frame_size >>= 1;
break;
default:
break;
}
} else {
audio_input_frame_size = c->frame_size;
}
LOGI("audio_input_frame_size : %d",audio_input_frame_size);
samples = av_malloc(audio_input_frame_size * 2 * c->channels);
LOGI("@- Close open_audio -@");
}
/* prepare a 16 bit dummy audio frame of 'frame_size' samples and
'nb_channels' channels */
void get_audio_frame(int16_t *samples, int frame_size, int nb_channels) {
LOGI("@- get_audio_frame -@");
int j, i, v;
int16_t *q;
q = samples;
for (j = 0; j < frame_size; j++) {
v = (int) (sin(t) * 10000);
for (i = 0; i < nb_channels; i++)
*q++ = v;
t += tincr;
tincr += tincr2;
LOGI("@- audio_frame Looping -@");
}
LOGI("@- CLOSE get_audio_frame -@");
}
void write_audio_frame(AVFormatContext *oc, AVStream *st) {
LOGI("@- write_audio_frame -@");
AVCodecContext *c;
AVPacket pkt;
av_init_packet(&pkt);
c = st->codec;
//get_audio_frame(samples, audio_input_frame_size, c->channels);
LOGI("@- write_audio_frame : got frame from get_audio_frame -@");
pkt.size
= avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples);
LOGI("%d",pkt.size);
if (c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE)
pkt.pts
= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
LOGI("%d",pkt.pts);
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = st->index;
pkt.data = audio_outbuf;
LOGI("Finish PKT");
/* write the compressed frame in the media file */
// if (av_interleaved_write_frame(oc, &pkt) != 0) {
// LOGI("@- write_audio_frame E:Error while writing audio frame -@");
// exit(1);
// }
if (av_interleaved_write_frame(oc, &pkt) != 0) {
LOGI("Error while writing audio frame %d\n", audioFrameCount);
} else {
LOGI("Writing Audio Frame %d", audioFrameCount);
}
LOGI("@- CLOSE write_audio_frame -@");
audioFrameCount++;
av_free_packet(&pkt);
}
void close_audio(AVFormatContext *oc, AVStream *st) {
avcodec_close(st->codec);
av_free(samples);
av_free(audio_outbuf);
}
/**************************************************************/
/* video output */
AVFrame *picture, *tmp_picture;
uint8_t *video_outbuf;
int frame_count, video_outbuf_size;
/* add a video output stream */
AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id) {
AVCodecContext *c;
AVStream *st;
AVCodec *codec;
st = avformat_new_stream(oc, NULL);
if (!st) {
fprintf(stderr, "Could not alloc stream\n");
exit(1);
}
c = st->codec;
/* find the video encoder */
codec = avcodec_find_encoder(codec_id);
if (!codec) {
fprintf(stderr, "codec not found\n");
exit(1);
}
avcodec_get_context_defaults3(c, codec);
c->codec_id = codec_id;
/* put sample parameters */
c->bit_rate = 400000;
/* resolution must be a multiple of two */
c->width = 352;
c->height = 288;
/* time base: this is the fundamental unit of time (in seconds) in terms
of which frame timestamps are represented. for fixed-fps content,
timebase should be 1/framerate and timestamp increments should be
identically 1. */
c->time_base.den = STREAM_FRAME_RATE;
c->time_base.num = 1;
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
c->pix_fmt = STREAM_PIX_FMT;
if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
/* just for testing, we also add B frames */
c->max_b_frames = 2;
}
if (c->codec_id == CODEC_ID_MPEG1VIDEO) {
/* Needed to avoid using macroblocks in which some coeffs overflow.
This does not happen with normal video, it just happens here as
the motion of the chroma plane does not match the luma plane. */
c->mb_decision = 2;
}
// some formats want stream headers to be separate
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
return st;
}
AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int height) {
AVFrame * picture;
uint8_t *picture_buf;
int size;
picture = avcodec_alloc_frame();
if (!picture)
return NULL;
size = avpicture_get_size(pix_fmt, width, height);
picture_buf = av_malloc(size);
if (!picture_buf) {
av_free(picture);
return NULL;
}
avpicture_fill((AVPicture *) picture, picture_buf, pix_fmt, width, height);
return picture;
}
void open_video(AVFormatContext *oc, AVStream *st) {
AVCodec *codec;
AVCodecContext *c;
c = st->codec;
/* find the video encoder */
codec = avcodec_find_encoder(c->codec_id);
if (!codec) {
fprintf(stderr, "codec not found\n");
exit(1);
}
/* open the codec */
if (avcodec_open(c, codec) < 0) {
fprintf(stderr, "could not open codec\n");
exit(1);
}
video_outbuf = NULL;
if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
/* allocate output buffer */
/* XXX: API change will be done */
/* buffers passed into lav* can be allocated any way you prefer,
as long as they're aligned enough for the architecture, and
they're freed appropriately (such as using av_free for buffers
allocated with av_malloc) */
video_outbuf_size = 200000;
video_outbuf = av_malloc(video_outbuf_size);
}
/* allocate the encoded raw picture */
picture = alloc_picture(c->pix_fmt, c->width, c->height);
if (!picture) {
fprintf(stderr, "Could not allocate picture\n");
exit(1);
}
/* if the output format is not YUV420P, then a temporary YUV420P
picture is needed too. It is then converted to the required
output format */
tmp_picture = NULL;
if (c->pix_fmt != PIX_FMT_YUV420P) {
tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);
if (!tmp_picture) {
fprintf(stderr, "Could not allocate temporary picture\n");
exit(1);
}
}
}
/* prepare a dummy image */
void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height) {
int x, y, i;
i = frame_index;
/* Y */
for (y = 0; y < height; y++) {
for (x = 0; x < width; x++) {
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
}
}
/* Cb and Cr */
for (y = 0; y < height / 2; y++) {
for (x = 0; x < width / 2; x++) {
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
}
}
}
void write_video_frame(AVFormatContext *oc, AVStream *st) {
int out_size, ret;
AVCodecContext *c;
struct SwsContext *img_convert_ctx;
c = st->codec;
if (frame_count >= STREAM_NB_FRAMES) {
/* no more frame to compress. The codec has a latency of a few
frames if using B frames, so we get the last frames by
passing the same picture again */
} else {
if (c->pix_fmt != PIX_FMT_YUV420P) {
/* as we only generate a YUV420P picture, we must convert it
to the codec pixel format if needed */
if (img_convert_ctx == NULL) {
img_convert_ctx = sws_getContext(c->width, c->height,
PIX_FMT_YUV420P, c->width, c->height, c->pix_fmt,
sws_flags, NULL, NULL, NULL);
if (img_convert_ctx == NULL) {
fprintf(stderr,
"Cannot initialize the conversion context\n");
exit(1);
}
}
fill_yuv_image(tmp_picture, frame_count, c->width, c->height);
sws_scale(img_convert_ctx, tmp_picture->data,
tmp_picture->linesize, 0, c->height, picture->data,
picture->linesize);
} else {
fill_yuv_image(picture, frame_count, c->width, c->height);
}
}
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
/* raw video case. The API will change slightly in the near
future for that. */
AVPacket pkt;
av_init_packet(&pkt);
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = st->index;
pkt.data = (uint8_t *) picture;
pkt.size = sizeof(AVPicture);
ret = av_interleaved_write_frame(oc, &pkt);
} else {
/* encode the image */
out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size,
picture);
/* if zero size, it means the image was buffered */
if (out_size > 0) {
AVPacket pkt;
av_init_packet(&pkt);
if (c->coded_frame->pts != AV_NOPTS_VALUE)
pkt.pts = av_rescale_q(c->coded_frame->pts, c->time_base,
st->time_base);
if (c->coded_frame->key_frame)
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = st->index;
pkt.data = video_outbuf;
pkt.size = out_size;
/* write the compressed frame in the media file */
ret = av_interleaved_write_frame(oc, &pkt);
} else {
ret = 0;
}
}
if (ret != 0) {
fprintf(stderr, "Error while writing video frame\n");
exit(1);
}
frame_count++;
}
void close_video(AVFormatContext *oc, AVStream *st) {
avcodec_close(st->codec);
av_free(picture->data[0]);
av_free(picture);
if (tmp_picture) {
av_free(tmp_picture->data[0]);
av_free(tmp_picture);
}
av_free(video_outbuf);
}Android Manifest has been set and init everything.
Please give me some ideas..
Some log message to yours http://pastebin.com/uPD5LyH2