
Recherche avancée
Médias (10)
-
Demon Seed
26 septembre 2011, par
Mis à jour : Septembre 2011
Langue : English
Type : Audio
-
Demon seed (wav version)
26 septembre 2011, par
Mis à jour : Avril 2013
Langue : English
Type : Audio
-
The four of us are dying (wav version)
26 septembre 2011, par
Mis à jour : Avril 2013
Langue : English
Type : Audio
-
Corona radiata (wav version)
26 septembre 2011, par
Mis à jour : Avril 2013
Langue : English
Type : Audio
-
Lights in the sky (wav version)
26 septembre 2011, par
Mis à jour : Avril 2013
Langue : English
Type : Audio
-
Head down (wav version)
26 septembre 2011, par
Mis à jour : Avril 2013
Langue : English
Type : Audio
Autres articles (55)
-
Demande de création d’un canal
12 mars 2010, parEn fonction de la configuration de la plateforme, l’utilisateur peu avoir à sa disposition deux méthodes différentes de demande de création de canal. La première est au moment de son inscription, la seconde, après son inscription en remplissant un formulaire de demande.
Les deux manières demandent les mêmes choses fonctionnent à peu près de la même manière, le futur utilisateur doit remplir une série de champ de formulaire permettant tout d’abord aux administrateurs d’avoir des informations quant à (...) -
Support de tous types de médias
10 avril 2011Contrairement à beaucoup de logiciels et autres plate-formes modernes de partage de documents, MediaSPIP a l’ambition de gérer un maximum de formats de documents différents qu’ils soient de type : images (png, gif, jpg, bmp et autres...) ; audio (MP3, Ogg, Wav et autres...) ; vidéo (Avi, MP4, Ogv, mpg, mov, wmv et autres...) ; contenu textuel, code ou autres (open office, microsoft office (tableur, présentation), web (html, css), LaTeX, Google Earth) (...)
-
Contribute to a better visual interface
13 avril 2011MediaSPIP is based on a system of themes and templates. Templates define the placement of information on the page, and can be adapted to a wide range of uses. Themes define the overall graphic appearance of the site.
Anyone can submit a new graphic theme or template and make it available to the MediaSPIP community.
Sur d’autres sites (6549)
-
FFMPEG C api h.264 encoding / MPEG2 ts streaming problems
3 mars 2015, par ccoralClass prototype is as follows :
#ifndef _FULL_MOTION_VIDEO_STREAM_H_
#define _FULL_MOTION_VIDEO_STREAM_H_
#include <memory>
#include <string>
#ifndef INT64_C
# define INT64_C(c) (c ## LL)
# define UINT64_C(c) (c ## ULL)
#endif
extern "C"
{
#include "libavutil/opt.h"
#include "libavcodec/avcodec.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/imgutils.h"
#include "libavutil/mathematics.h"
#include "libavutil/samplefmt.h"
#include "libavformat/avformat.h"
#include <libavutil></libavutil>timestamp.h>
#include <libswscale></libswscale>swscale.h>
#include <libswresample></libswresample>swresample.h>
}
class FMVStream
{
public:
struct OutputStream
{
OutputStream() :
st(0),
next_pts(0),
samples_count(0),
frame(0),
tmpFrame(0),
sws_ctx(0)
{
}
AVStream *st;
/* pts of the next frame that will be generated */
int64_t next_pts;
int samples_count;
AVFrame *frame;
AVFrame *tmpFrame;
struct SwsContext *sws_ctx;
};
///
/// Constructor
///
FMVStream();
///
/// Destructor
///
~FMVStream();
///
/// Frame encoder helper function
///
/// Encodes a raw RGB frame into the transport stream
///
int EncodeFrame(uint8_t* frame);
///
/// Frame width setter
///
void setFrameWidth(int width);
///
/// Frame width getter
///
int getFrameWidth() const;
///
/// Frame height setter
///
void setFrameHeight(int height);
///
/// Frame height getter
///
int getFrameHeight() const;
///
/// Stream address setter
///
void setStreamAddress(const std::string& address);
///
/// Stream address getter
///
std::string getStreamAddress() const;
private:
///
/// Video Stream creation
///
AVStream* initVideoStream(AVFormatContext* oc);
///
/// Raw frame transcoder
///
/// This will convert the raw RGB frame to a raw YUV frame necessary for h.264 encoding
///
void CopyFrameData(uint8_t* src_frame);
///
/// Video frame allocator
///
AVFrame* AllocPicture(PixelFormat pix_fmt, int width, int height);
///
/// Debug print helper function
///
void print_sdp(AVFormatContext **avc, int n);
///
/// Write the frame to the stream
///
int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt);
///
/// initialize the frame data
///
void initFrame();
// formatting data needed for output streaming and the output container (MPEG 2 TS)
AVOutputFormat* format;
AVFormatContext* format_ctx;
// structure container for our video stream
OutputStream stream;
AVIOContext* io_ctx;
std::string streamFilename;
int frameWidth;
int frameHeight;
};
#endif
</string></memory>This block starts the class declaration.
#include "FullMotionVideoStream.h"
#include <stdexcept>
#include <iostream>
FMVStream::FMVStream()
: format(0),
format_ctx(0),
stream(),
io_ctx(0),
streamFilename("test.mpeg"),
frameWidth(640),
frameHeight(480)
{
// Register all formats and codecs
av_register_all();
avcodec_register_all();
// Init networking
avformat_network_init();
// Find format
this->format = av_guess_format("mpegts", NULL, NULL);
// allocate the AVFormatContext
this->format_ctx = avformat_alloc_context();
if (!this->format_ctx)
{
throw std::runtime_error("avformat_alloc_context failed");
}
this->format_ctx->oformat = this->format;
//sprintf_s(this->format_ctx->filename, sizeof(this->format_ctx->filename), "%s", this->streamFilename.c_str());
this->stream.st = initVideoStream(this->format_ctx);
this->initFrame();
// Allocate AVIOContext
int ret = avio_open(&this->io_ctx, this->streamFilename.c_str(), AVIO_FLAG_WRITE);
if (ret != 0)
{
throw std::runtime_error("avio_open failed");
}
this->format_ctx->pb = this->io_ctx;
// Print some debug info about the format
av_dump_format(this->format_ctx, 0, NULL, 1);
// Begin the output by writing the container header
avformat_write_header(this->format_ctx, NULL);
AVFormatContext* ac[] = { this->format_ctx };
print_sdp(ac, 1);
}
FMVStream::~FMVStream()
{
av_write_trailer(this->format_ctx);
avcodec_close(this->stream.st->codec);
avio_close(io_ctx);
avformat_free_context(this->format_ctx);
av_frame_free(&this->stream.frame);
av_free(this->format);
}
AVFrame* FMVStream::AllocPicture(PixelFormat pix_fmt, int width, int height)
{
// Allocate a frame
AVFrame* frame = av_frame_alloc();
if (frame == nullptr)
{
throw std::runtime_error("avcodec_alloc_frame failed");
}
if (av_image_alloc(frame->data, frame->linesize, width, height, pix_fmt, 1) < 0)
{
throw std::runtime_error("av_image_alloc failed");
}
frame->width = width;
frame->height = height;
frame->format = pix_fmt;
return frame;
}
void FMVStream::print_sdp(AVFormatContext **avc, int n)
{
char sdp[2048];
av_sdp_create(avc, n, sdp, sizeof(sdp));
printf("SDP:\n%s\n", sdp);
fflush(stdout);
}
AVStream* FMVStream::initVideoStream(AVFormatContext *oc)
{
AVStream* st = avformat_new_stream(oc, NULL);
if (st == nullptr)
{
std::runtime_error("Could not alloc stream");
}
AVCodec* codec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (codec == nullptr)
{
throw std::runtime_error("couldn't find mpeg2 encoder");
}
st->codec = avcodec_alloc_context3(codec);
st->codec->codec_id = AV_CODEC_ID_H264;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->bit_rate = 400000;
st->codec->width = this->frameWidth;
st->codec->height = this->frameHeight;
st->time_base.num = 1;
st->time_base.den = 30;
st->codec->framerate.num = 1;
st->codec->framerate.den = 30;
st->codec->max_b_frames = 2;
st->codec->gop_size = 12;
st->codec->pix_fmt = PIX_FMT_YUV420P;
st->id = oc->nb_streams - 1;
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
{
st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
// option setup for the codec
av_opt_set(st->codec->priv_data, "profile", "baseline", AV_OPT_SEARCH_CHILDREN);
if (avcodec_open2(st->codec, codec, NULL) < 0)
{
throw std::runtime_error("avcodec_open failed");
}
return st;
}
void FMVStream::initFrame()
{
// Allocate a tmp frame for converting our raw RGB data to YUV for encoding
this->stream.tmpFrame = this->AllocPicture(PIX_FMT_RGB24, this->frameWidth, this->frameHeight);
// Allocate a main frame
this->stream.frame = this->AllocPicture(PIX_FMT_YUV420P, this->frameWidth, this->frameHeight);
}
</iostream></stdexcept>This block is attempting to convert from the raw RGB to our needed YUV format for h.264 encoding.
void FMVStream::CopyFrameData(uint8_t* data)
{
// fill image with our raw RGB data
//avpicture_alloc((AVPicture*)this->stream.tmpFrame, PIX_FMT_RGB24, this->stream.st->codec->width, this->stream.st->codec->height);
int numBytes = avpicture_get_size(PIX_FMT_RGB24, this->stream.st->codec->width, this->stream.st->codec->height);
uint8_t* buffer = (uint8_t*) av_malloc(numBytes * sizeof(uint8_t));
avpicture_fill((AVPicture*)this->stream.tmpFrame, buffer, PIX_FMT_RGB24, this->stream.st->codec->width, this->stream.st->codec->height);
for (int y = 0; y < this->stream.st->codec->height; y++)
{
for (int x = 0; x < this->stream.st->codec->width; x++)
{
int offset = 3 * (x + y * this->stream.st->codec->width);
this->stream.tmpFrame->data[0][offset + 0] = data[x + y * this->stream.st->codec->width]; // R
this->stream.tmpFrame->data[0][offset + 1] = data[x + y * this->stream.st->codec->width + 1]; // G
this->stream.tmpFrame->data[0][offset + 2] = data[x + y * this->stream.st->codec->width + 2]; // B
}
}
// convert the RGB frame to a YUV frame using the sws Context
this->stream.sws_ctx = sws_getContext(this->stream.st->codec->width, this->stream.st->codec->height, PIX_FMT_RGB32, this->stream.st->codec->width, this->stream.st->codec->height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);
// use the scale function to transcode this raw frame to the correct type
sws_scale(this->stream.sws_ctx, this->stream.tmpFrame->data, this->stream.tmpFrame->linesize, 0, this->stream.st->codec->height, this->stream.frame->data, this->stream.frame->linesize);
}This is the block that encodes the raw data to h.264, and then send it out the Mpeg2 ts. I believe the problem lies within this block. I can put a break point in my write frame block and see that frames are being written, however, opening the resulting file in VLC results in a blank video. The file is approx 2Mb.
int FMVStream::EncodeFrame(uint8_t* data)
{
AVCodecContext* c = this->stream.st->codec;
AVRational one;
one.den = one.num = 1;
// check to see if we want to keep writing frames we can probably change this to a toggle switch
if (av_compare_ts(this->stream.next_pts, this->stream.st->codec->time_base, 10, one) >= 0)
{
this->stream.frame = nullptr;
}
else
{
// Convert and load the frame data into the AVFrame struct
CopyFrameData(data);
}
// setup the timestamp stepping
AVPacket pkt = { 0 };
av_init_packet(&pkt);
this->stream.frame->pts = (int64_t)((1.0 / this->stream.st->codec->framerate.den) * 90000.0 * this->stream.next_pts++);
int gotPacket, out_size, ret;
out_size = avcodec_encode_video2(c, &pkt, this->stream.frame, &gotPacket);
if (gotPacket == 1)
{
ret = write_frame(this->format_ctx, &c->time_base, this->stream.st, &pkt);
}
else
{
ret = 0;
}
if (ret < 0)
{
std::cerr << "Error writing video frame" << std::endl;
}
av_free_packet(&pkt);
return ((this->stream.frame != nullptr) || gotPacket) ? 0 : 1;
}
int FMVStream::write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
{
/* rescale output packet timestamp values from codec to stream timebase */
av_packet_rescale_ts(pkt, *time_base, st->time_base);
pkt->stream_index = st->index;
return av_interleaved_write_frame(fmt_ctx, pkt);
}
void FMVStream::setFrameWidth(const int width)
{
this->frameWidth = width;
}
int FMVStream::getFrameWidth() const
{
return this->frameWidth;
}
void FMVStream::setFrameHeight(const int height)
{
this->frameHeight = height;
}
int FMVStream::getFrameHeight() const
{
return this->frameHeight;
}
void FMVStream::setStreamAddress(const std::string& address)
{
this->streamFilename = address;
}
std::string FMVStream::getStreamAddress() const
{
return this->streamFilename;
}Here is the Main function.
#include "FullMotionVideoStream.h"
#include <iostream>
#include <thread>
#include <chrono>
int main(int argc, char** argv)
{
FMVStream* fmv = new FMVStream;
fmv->setFrameWidth(640);
fmv->setFrameHeight(480);
std::cout << "Streaming Address: " << fmv->getStreamAddress() << std::endl;
// create our alternating frame of black and white to test the streaming functionality
uint8_t white[640 * 480 * sizeof(uint8_t) * 3];
uint8_t black[640 * 480 * sizeof(uint8_t) * 3];
std::memset(white, 255, 640 * 480 * sizeof(uint8_t) * 3);
std::memset(black, 0, 640 * 480 * sizeof(uint8_t)* 3);
for (auto i = 0; i < 100; i++)
{
auto ret = fmv->EncodeFrame(white);
if (ret != 0)
{
std::cerr << "There was a problem encoding the frame: " << i << std::endl;
}
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
for (auto i = 0; i < 100; i++)
{
auto ret = fmv->EncodeFrame(black);
if (ret != 0)
{
std::cerr << "There was a problem encoding the frame: " << i << std::endl;
}
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
delete fmv;
}
</chrono></thread></iostream>Here is the resultant output via the console / my print SDP function.
[libx264 @ 000000ac95f58440] using cpu capabilities: MMX2 SSE2Fast SSSE3 SSE4.2
AVX FMA3 AVX2 LZCNT BMI2
[libx264 @ 000000ac95f58440] profile Constrained Baseline, level 3.0
Output #0, mpegts, to '(null)':
Stream #0:0: Video: h264 (libx264), yuv420p, 640x480, q=-1--1, 400 kb/s, 30
tbn
SDP:
v=0
o=- 0 0 IN IP4 127.0.0.1
s=No Name
t=0 0
a=tool:libavformat 56.23.104
m=video 0 RTP/AVP 96
b=AS:400
a=rtpmap:96 H264/90000
a=fmtp:96 packetization-mode=1
a=control:streamid=0
Streaming Address: test.mpeg
[libx264 @ 000000ac95f58440] frame I:45 Avg QP: 0.51 size: 1315
[libx264 @ 000000ac95f58440] frame P:136 Avg QP: 0.29 size: 182
[libx264 @ 000000ac95f58440] mb I I16..4: 99.7% 0.0% 0.3%
[libx264 @ 000000ac95f58440] mb P I16..4: 0.1% 0.0% 0.1% P16..4: 0.1% 0.0
% 0.0% 0.0% 0.0% skip:99.7%
[libx264 @ 000000ac95f58440] final ratefactor: -68.99
[libx264 @ 000000ac95f58440] coded y,uvDC,uvAC intra: 0.5% 0.5% 0.5% inter: 0.0%
0.1% 0.1%
[libx264 @ 000000ac95f58440] i16 v,h,dc,p: 96% 0% 3% 0%
[libx264 @ 000000ac95f58440] i4 v,h,dc,ddl,ddr,vr,hd,vl,hu: 1% 10% 85% 0% 3%
0% 1% 0% 0%
[libx264 @ 000000ac95f58440] i8c dc,h,v,p: 100% 0% 0% 0%
[libx264 @ 000000ac95f58440] ref P L0: 46.8% 25.2% 28.0%
[libx264 @ 000000ac95f58440] kb/s:0.03I know there are probably many issues with this program, I am very new with FFMPEG and multimedia programming in general. Ive used many pieces of code found through searching google/ stack overflow to get to this point as is. The file has a good size but comes up as length 0.04 tells me that my time stamping must be broken between the frames / pkts, but I am unsure on how to fix this issue.
I tried inspecting the file with ffmpeg.exe using ffmpeg -i and outputting to a regular TS. It seems my code works more then I originally intended however, I am simply trying to output a bunch of all white frames.
ffmpeg -i test.mpeg test.ts
ffmpeg version N-70125-g6c9537b Copyright (c) 2000-2015 the FFmpeg developers
built with gcc 4.9.2 (GCC)
configuration: --disable-static --enable-shared --enable-gpl --enable-version3
--disable-w32threads --enable-avisynth --enable-bzlib --enable-fontconfig --ena
ble-frei0r --enable-gnutls --enable-iconv --enable-libass --enable-libbluray --e
nable-libbs2b --enable-libcaca --enable-libfreetype --enable-libgme --enable-lib
gsm --enable-libilbc --enable-libmodplug --enable-libmp3lame --enable-libopencor
e-amrnb --enable-libopencore-amrwb --enable-libopenjpeg --enable-libopus --enabl
e-librtmp --enable-libschroedinger --enable-libsoxr --enable-libspeex --enable-l
ibtheora --enable-libtwolame --enable-libvidstab --enable-libvo-aacenc --enable-
libvo-amrwbenc --enable-libvorbis --enable-libvpx --enable-libwavpack --enable-l
ibwebp --enable-libx264 --enable-libx265 --enable-libxavs --enable-libxvid --ena
ble-lzma --enable-decklink --enable-zlib
libavutil 54. 19.100 / 54. 19.100
libavcodec 56. 26.100 / 56. 26.100
libavformat 56. 23.104 / 56. 23.104
libavdevice 56. 4.100 / 56. 4.100
libavfilter 5. 11.101 / 5. 11.101
libswscale 3. 1.101 / 3. 1.101
libswresample 1. 1.100 / 1. 1.100
libpostproc 53. 3.100 / 53. 3.100
Input #0, mpegts, from 'test.mpeg':
Duration: 00:00:00.04, start: 0.000000, bitrate: 24026 kb/s
Program 1
Metadata:
service_name : Service01
service_provider: FFmpeg
Stream #0:0[0x100]: Video: h264 (Constrained Baseline) ([27][0][0][0] / 0x00
1B), yuv420p, 640x480, 25 fps, 25 tbr, 90k tbn, 50 tbc
File 'test.ts' already exists. Overwrite ? [y/N] y
Output #0, mpegts, to 'test.ts':
Metadata:
encoder : Lavf56.23.104
Stream #0:0: Video: mpeg2video, yuv420p, 640x480, q=2-31, 200 kb/s, 25 fps,
90k tbn, 25 tbc
Metadata:
encoder : Lavc56.26.100 mpeg2video
Stream mapping:
Stream #0:0 -> #0:0 (h264 (native) -> mpeg2video (native))
Press [q] to stop, [?] for help
frame= 3 fps=0.0 q=2.0 Lsize= 9kB time=00:00:00.08 bitrate= 883.6kbits/
s dup=0 drop=178
video:7kB audio:0kB subtitle:0kB other streams:0kB global headers:0kB muxing ove
rhead: 22.450111% -
FFmpeg encoding in c
25 décembre 2012, par barisatbasI have been working on a project about video summarization on android platfrom. and I am stuck in encoding. I think ;
first I must convert my frame into RGB Frame, then convert that RGB FRame into YUV Frame. Then encode the frame. After this operations, The output video was so weird. I think I missed something. Here is my las optimized code. Maybe someone has an experiement in this subject.Its syntax is changed according to android ndk syntax :
jint Java_com_test_Test_encodeVideo(JNIEnv* env, jobject javaThis)
{
char *flname, *err, *info;
AVCodec *codec;
AVCodecContext *c= NULL;
int i,out_size, size, x, y,z, outbuf_size;
int frameCount=99;
FILE *f;
AVFrame *picture, *yuvFrame;
uint8_t *outbuf, *picture_buf;
PPMImage *img;
const char *destfilename = "/sdcard/new.mp4";
int numBytes;
uint8_t *buffer;
av_register_all();
// must be called before using avcodec lib
avcodec_init();
// register all the codecs
avcodec_register_all();
log_message("Video encoding\n");
// find the H263 video encoder
codec = avcodec_find_encoder(CODEC_ID_H263);
if (!codec) {
sprintf(err, "codec not found\n");
log_message(err);
}
c= avcodec_alloc_context();
picture= avcodec_alloc_frame();
yuvFrame= avcodec_alloc_frame();
// get first ppm context. it is because I need width and height values.
img = getPPM("/sdcard/frame1.ppm");
c->bit_rate = 400000;
// resolution must be a multiple of two
c->width = img->x;
c->height = img->y;
free(img);
// frames per second
c->time_base= (AVRational){1,25};
c->gop_size = 10; // emit one intra frame every ten frames
//c->max_b_frames=1;
c->pix_fmt = PIX_FMT_YUV420P;
// open it
if (avcodec_open(c, codec) < 0){
log_message("codec couldn't open");
return -1;
}
//destfilename = (*env)->GetStringUTFChars(env, dst, 0);
f = fopen(destfilename, "wb");
log_message(destfilename);
if (!f) {
sprintf(err, "could not open %s", destfilename);
log_message(err);
}
log_message("after destination file opening");
// alloc image and output buffer
outbuf_size = 100000;
outbuf = malloc(outbuf_size);
size = c->width * c->height;
picture_buf = malloc(size * 3); // size for RGB
picture->data[0] = picture_buf;
picture->data[1] = picture->data[0] + size;
picture->data[2] = picture->data[1] + size / 4;
picture->linesize[0] = c->width;
picture->linesize[1] = c->width / 2;
picture->linesize[2] = c->width / 2;
numBytes=avpicture_get_size(PIX_FMT_YUV420P, c->width,
c->height);
buffer=malloc(numBytes);
// Assign appropriate parts of buffer to image planes in FrameYUV
avpicture_fill((AVPicture *)yuvFrame, buffer, PIX_FMT_YUV420P,
c->width, c->height);
// encode the video
log_message("before for loop");
for(z=1;zsdcard/frame%d.ppm",z);
// read the ppm file
img = getPPM(flname);
picture->data[0] = img->data;
// convert the rgb frame into yuv frame
rgb2yuv(picture,yuvFrame,c);
log_message("translation completed.");
// encode the image
out_size = avcodec_encode_video(c, outbuf, outbuf_size, yuvFrame);
sprintf(info,"encoding frame %3d (size=%5d)\n", z, out_size);
log_message(info);
fwrite(outbuf, 1, out_size, f);
free(img);
}
// get the delayed frames
for(; out_size; i++) {
//fflush(stdout);
out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
sprintf(info,"write frame %3d (size=%5d)\n", i, out_size);
log_message(info);
fwrite(outbuf, 1, out_size, f);
}
// add sequence end code to have a real mpeg file
outbuf[0] = 0x00;
outbuf[1] = 0x00;
outbuf[2] = 0x01;
outbuf[3] = 0xb7;
fwrite(outbuf, 1, 4, f);
fclose(f);
free(picture_buf);
free(outbuf);
avcodec_close(c);
av_free(c);
av_free(picture);
av_free(yuvFrame);
}
int rgb2yuv(AVFrame *frameRGB, AVFrame *frameYUV, AVCodecContext *c)
{
char *err;
static struct SwsContext *img_convert_ctx;
log_message("conversion starts");
// Convert the image into YUV format from RGB format
if(img_convert_ctx == NULL) {
int w = c->width;
int h = c->height;
img_convert_ctx = sws_getContext(w, h, PIX_FMT_RGB24,w, h, c->pix_fmt, SWS_BICUBIC,NULL, NULL, NULL);
if(img_convert_ctx == NULL) {
sprintf(err, "Cannot initialize the conversion context!\n");
log_message(err);
return -1;
}
}
int ret = sws_scale(img_convert_ctx,frameRGB->data, frameRGB->linesize , 0,c->height,frameYUV->data, frameYUV->linesize );
return;
} -
libavcodec : how to encode with h264 codec ,with mp4 container using controllable frame rate and bitrate(through c code)
26 mai 2016, par musimbateI am trying to record the screen of a pc and encode the recorded frames using h264 encoder
and wrap them into a mp4 container.I want to do this because this super user link http://superuser.com/questions/300897/what-is-a-codec-e-g-divx-and-how-does-it-differ-from-a-file-format-e-g-mp/300997#300997 suggests it allows good trade-off between size and quality of the output file.The application I am working on should allow users to record a few hours of video and have the minimum output file size with decent quality.
The code I have cooked up so far allows me to record and save .mpg(container) files with the mpeg1video encoder
Running :
ffmpeg -i test.mpg
on the output file gives the following output :
[mpegvideo @ 028c7400] Estimating duration from bitrate, this may be inaccurate
Input #0, mpegvideo, from 'test.mpg':
Duration: 00:00:00.29, bitrate: 104857 kb/s
Stream #0:0: Video: mpeg1video, yuv420p(tv), 1366x768 [SAR 1:1 DAR 683:384], 104857 kb/s, 25 fps, 25 tbr, 1200k tbn, 25 tbcI have these settings for my output :
const char * filename="test.mpg";
int codec_id= AV_CODEC_ID_MPEG1VIDEO;
AVCodec *codec11;
AVCodecContext *outContext= NULL;
int got_output;
FILE *f;
AVPacket pkt;
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
/* put sample parameters */
outContext->bit_rate = 400000;
/* resolution must be a multiple of two */
outContext->width=pCodecCtx->width;
outContext->height=pCodecCtx->height;
/* frames per second */
outContext->time_base.num=1;
outContext->time_base.den=25;
/* emit one intra frame every ten frames
* check frame pict_type before passing frame
* to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
* then gop_size is ignored and the output of encoder
* will always be I frame irrespective to gop_size
*/
outContext->gop_size = 10;
outContext->max_b_frames = 1;
outContext->pix_fmt = AV_PIX_FMT_YUV420P;When I change int codec_id= AV_CODEC_ID_MPEG1VIDEO to int codec_id= AV_CODEC_ID_H264 i get a file that does not play with vlc.
I have read that writing the
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
array at the end of your file when finished encoding makes your file a legitimate mpeg file.It is written like this :
fwrite(endcode, 1, sizeof(endcode), f);
fclose(f);in my code. Should I do the same thing when I change my encoder to AV_CODEC_ID_H264 ?
I am capturing using gdi input like this :
AVDictionary* options = NULL;
//Set some options
//grabbing frame rate
av_dict_set(&options,"framerate","30",0);
AVInputFormat *ifmt=av_find_input_format("gdigrab");
if(avformat_open_input(&pFormatCtx,"desktop",ifmt,&options)!=0){
printf("Couldn't open input stream.\n");
return -1;
}I want to be able to modify my grabbing rate to optimize for the outptut file size
but When I change it to 20 for example I get a video that plays so fast.How do
I get a video that plays with normal speed with frames captured at 20 fps or any
lower frame rate value ?While recording I get the following output on the standard error output :
[gdigrab @ 00cdb8e0] Capturing whole desktop as 1366x768x32 at (0,0)
Input #0, gdigrab, from '(null)':
Duration: N/A, start: 1420718663.655713, bitrate: 1006131 kb/s
Stream #0:0: Video: bmp, bgra, 1366x768, 1006131 kb/s, 29.97 tbr, 1000k tbn, 29.97 tbc
[swscaler @ 00d24120] Warning: data is not aligned! This can lead to a speedloss
[mpeg1video @ 00cdd160] AVFrame.format is not set
[mpeg1video @ 00cdd160] AVFrame.width or height is not set
[mpeg1video @ 00cdd160] AVFrame.format is not set
[mpeg1video @ 00cdd160] AVFrame.width or height is not set
[mpeg1video @ 00cdd160] AVFrame.format is not setHow do I get rid of this error in my code ?
In summary :
1) How do I encode h264 video wrapped into mp4 container ?2) How do I capture at lower frame rates and still play
the encoded video at normal speed ?3) How do I set the format(and which format—depends on the codec ?)
and width and height info on the frames I write ?The code I am using in its entirety is shown below
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libswscale/swscale.h"
#include "libavdevice/avdevice.h"
#include <libavutil></libavutil>opt.h>
#include <libavutil></libavutil>channel_layout.h>
#include <libavutil></libavutil>common.h>
#include <libavutil></libavutil>imgutils.h>
#include <libavutil></libavutil>mathematics.h>
#include <libavutil></libavutil>samplefmt.h>
//SDL
#include "SDL.h"
#include "SDL_thread.h"
}
//Output YUV420P
#define OUTPUT_YUV420P 0
//'1' Use Dshow
//'0' Use GDIgrab
#define USE_DSHOW 0
int main(int argc, char* argv[])
{
//1.WE HAVE THE FORMAT CONTEXT
//THIS IS FROM THE DESKTOP GRAB STREAM.
AVFormatContext *pFormatCtx;
int i, videoindex;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
av_register_all();
avformat_network_init();
//ASSIGN STH TO THE FORMAT CONTEXT.
pFormatCtx = avformat_alloc_context();
//Register Device
avdevice_register_all();
//Windows
#ifdef _WIN32
#if USE_DSHOW
//Use dshow
//
//Need to Install screen-capture-recorder
//screen-capture-recorder
//Website: http://sourceforge.net/projects/screencapturer/
//
AVInputFormat *ifmt=av_find_input_format("dshow");
//if(avformat_open_input(&pFormatCtx,"video=screen-capture-recorder",ifmt,NULL)!=0){
if(avformat_open_input(&pFormatCtx,"video=UScreenCapture",ifmt,NULL)!=0){
printf("Couldn't open input stream.\n");
return -1;
}
#else
//Use gdigrab
AVDictionary* options = NULL;
//Set some options
//grabbing frame rate
av_dict_set(&options,"framerate","30",0);
//The distance from the left edge of the screen or desktop
//av_dict_set(&options,"offset_x","20",0);
//The distance from the top edge of the screen or desktop
//av_dict_set(&options,"offset_y","40",0);
//Video frame size. The default is to capture the full screen
//av_dict_set(&options,"video_size","640x480",0);
AVInputFormat *ifmt=av_find_input_format("gdigrab");
if(avformat_open_input(&pFormatCtx,"desktop",ifmt,&options)!=0){
printf("Couldn't open input stream.\n");
return -1;
}
#endif
#endif//FOR THE WIN32 THING.
if(avformat_find_stream_info(pFormatCtx,NULL)<0)
{
printf("Couldn't find stream information.\n");
return -1;
}
videoindex=-1;
for(i=0; inb_streams; i++)
if(pFormatCtx->streams[i]->codec->codec_type
==AVMEDIA_TYPE_VIDEO)
{
videoindex=i;
break;
}
if(videoindex==-1)
{
printf("Didn't find a video stream.\n");
return -1;
}
pCodecCtx=pFormatCtx->streams[videoindex]->codec;
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL)
{
printf("Codec not found.\n");
return -1;
}
if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)
{
printf("Could not open codec.\n");
return -1;
}
//THIS IS WHERE YOU CONTROL THE FORMAT(THROUGH FRAMES).
AVFrame *pFrame;
pFrame=av_frame_alloc();
int ret, got_picture;
AVPacket *packet=(AVPacket *)av_malloc(sizeof(AVPacket));
//TRY TO INIT THE PACKET HERE
av_init_packet(packet);
//Output Information-----------------------------
printf("File Information---------------------\n");
av_dump_format(pFormatCtx,0,NULL,0);
printf("-------------------------------------------------\n");
//<<--FOR WRITING MPG FILES
//<<--START:PREPARE TO WRITE YOUR MPG FILE.
const char * filename="test.mpg";
int codec_id= AV_CODEC_ID_MPEG1VIDEO;
AVCodec *codec11;
AVCodecContext *outContext= NULL;
int got_output;
FILE *f;
AVPacket pkt;
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
printf("Encode video file %s\n", filename);
/* find the mpeg1 video encoder */
codec11 = avcodec_find_encoder((AVCodecID)codec_id);
if (!codec11) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
outContext = avcodec_alloc_context3(codec11);
if (!outContext) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
/* put sample parameters */
outContext->bit_rate = 400000;
/* resolution must be a multiple of two */
outContext->width=pCodecCtx->width;
outContext->height=pCodecCtx->height;
/* frames per second */
outContext->time_base.num=1;
outContext->time_base.den=25;
/* emit one intra frame every ten frames
* check frame pict_type before passing frame
* to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
* then gop_size is ignored and the output of encoder
* will always be I frame irrespective to gop_size
*/
outContext->gop_size = 10;
outContext->max_b_frames = 1;
outContext->pix_fmt = AV_PIX_FMT_YUV420P;
if (codec_id == AV_CODEC_ID_H264)
av_opt_set(outContext->priv_data, "preset", "slow", 0);
/* open it */
if (avcodec_open2(outContext, codec11, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
f = fopen(filename, "wb");
if (!f) {
fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
AVFrame *outframe = av_frame_alloc();
int nbytes = avpicture_get_size(outContext->pix_fmt,
outContext->width,
outContext->height);
uint8_t* outbuffer = (uint8_t*)av_malloc(nbytes);
//ASSOCIATE THE FRAME TO THE ALLOCATED BUFFER.
avpicture_fill((AVPicture*)outframe, outbuffer,
AV_PIX_FMT_YUV420P,
outContext->width, outContext->height);
SwsContext* swsCtx_ ;
swsCtx_= sws_getContext(pCodecCtx->width,
pCodecCtx->height,
pCodecCtx->pix_fmt,
outContext->width, outContext->height,
outContext->pix_fmt,
SWS_BICUBIC, NULL, NULL, NULL);
//HERE WE START PULLING PACKETS FROM THE SPECIFIED FORMAT CONTEXT.
while(av_read_frame(pFormatCtx, packet)>=0)
{
if(packet->stream_index==videoindex)
{
ret= avcodec_decode_video2(pCodecCtx,
pFrame,
&got_picture,packet );
if(ret < 0)
{
printf("Decode Error.\n");
return -1;
}
if(got_picture)
{
sws_scale(swsCtx_, pFrame->data, pFrame->linesize,
0, pCodecCtx->height, outframe->data,
outframe->linesize);
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
ret = avcodec_encode_video2(outContext, &pkt, outframe, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
fwrite(pkt.data, 1, pkt.size, f);
av_free_packet(&pkt);
}
}
}
av_free_packet(packet);
}//THE LOOP TO PULL PACKETS FROM THE FORMAT CONTEXT ENDS HERE.
//
/* get the delayed frames */
for (got_output = 1; got_output; i++) {
//fflush(stdout);
ret = avcodec_encode_video2(outContext, &pkt, NULL, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
fwrite(pkt.data, 1, pkt.size, f);
av_free_packet(&pkt);
}
}
/* add sequence end code to have a real mpeg file */
fwrite(endcode, 1, sizeof(endcode), f);
fclose(f);
avcodec_close(outContext);
av_free(outContext);
//av_freep(&frame->data[0]);
//av_frame_free(&frame);
//THIS WAS ADDED LATER
av_free(outbuffer);
avcodec_close(pCodecCtx);
avformat_close_input(&pFormatCtx);
return 0;
}Thank you for your time.