
Recherche avancée
Médias (9)
-
Stereo master soundtrack
17 octobre 2011, par
Mis à jour : Octobre 2011
Langue : English
Type : Audio
-
Elephants Dream - Cover of the soundtrack
17 octobre 2011, par
Mis à jour : Octobre 2011
Langue : English
Type : Image
-
#7 Ambience
16 octobre 2011, par
Mis à jour : Juin 2015
Langue : English
Type : Audio
-
#6 Teaser Music
16 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
-
#5 End Title
16 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
-
#3 The Safest Place
16 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
Autres articles (97)
-
MediaSPIP 0.1 Beta version
25 avril 2011, parMediaSPIP 0.1 beta is the first version of MediaSPIP proclaimed as "usable".
The zip file provided here only contains the sources of MediaSPIP in its standalone version.
To get a working installation, you must manually install all-software dependencies on the server.
If you want to use this archive for an installation in "farm mode", you will also need to proceed to other manual (...) -
Multilang : améliorer l’interface pour les blocs multilingues
18 février 2011, parMultilang est un plugin supplémentaire qui n’est pas activé par défaut lors de l’initialisation de MediaSPIP.
Après son activation, une préconfiguration est mise en place automatiquement par MediaSPIP init permettant à la nouvelle fonctionnalité d’être automatiquement opérationnelle. Il n’est donc pas obligatoire de passer par une étape de configuration pour cela. -
List of compatible distributions
26 avril 2011, parThe table below is the list of Linux distributions compatible with the automated installation script of MediaSPIP. Distribution nameVersion nameVersion number Debian Squeeze 6.x.x Debian Weezy 7.x.x Debian Jessie 8.x.x Ubuntu The Precise Pangolin 12.04 LTS Ubuntu The Trusty Tahr 14.04
If you want to help us improve this list, you can provide us access to a machine whose distribution is not mentioned above or send the necessary fixes to add (...)
Sur d’autres sites (9336)
-
Video rotating to left by 90 degree when converted using ffmpeg
15 juillet 2017, par Herdesh VermaI developed a below code :
extern "C"
{
#include <libavutil></libavutil>imgutils.h>
#include <libavutil></libavutil>opt.h>
#include <libavcodec></libavcodec>avcodec.h>
#include <libavutil></libavutil>mathematics.h>
#include <libavutil></libavutil>samplefmt.h>
#include <libavutil></libavutil>timestamp.h>
#include <libavformat></libavformat>avformat.h>
#include <libavfilter></libavfilter>avfiltergraph.h>
#include <libswscale></libswscale>swscale.h>
}
#include
static AVFormatContext *fmt_ctx = NULL;
static int frame_index = 0;
static int j = 0, nbytes=0;
uint8_t *video_outbuf = NULL;
static AVPacket *pAVPacket=NULL;
static int value=0;
static AVFrame *pAVFrame=NULL;
static AVFrame *outFrame=NULL;
static AVStream *video_st=NULL;
static AVFormatContext *outAVFormatContext=NULL;
static AVCodec *outAVCodec=NULL;
static AVOutputFormat *output_format=NULL;
static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx;
static AVCodecContext *outAVCodecContext=NULL;
static int width, height;
static enum AVPixelFormat pix_fmt;
static AVStream *video_stream = NULL, *audio_stream = NULL;
static const char *src_filename = NULL;
static const char *video_dst_filename = NULL;
static const char *audio_dst_filename = NULL;
static FILE *video_dst_file = NULL;
static FILE *audio_dst_file = NULL;
static uint8_t *video_dst_data[4] = {NULL};
static int video_dst_linesize[4];
static int video_dst_bufsize;
static int video_stream_idx = -1, audio_stream_idx = -1;
static AVPacket *pkt=NULL;
static AVPacket *pkt1=NULL;
static AVFrame *frame = NULL;
//static AVPacket pkt;
static int video_frame_count = 0;
static int audio_frame_count = 0;
static int refcount = 0;
AVCodec *codec;
static struct SwsContext *sws_ctx;
AVCodecContext *c= NULL;
int i, out_size, size, x, y, outbuf_size;
AVFrame *picture;
uint8_t *outbuf, *picture_buf;
int video_outbuf_size;
int w, h;
AVPixelFormat pixFmt;
uint8_t *data[4];
int linesize[4];
static int open_codec_context(int *stream_idx,
AVCodecContext **dec_ctx, AVFormatContext
*fmt_ctx, enum AVMediaType type)
{
int ret, stream_index;
AVStream *st;
AVCodec *dec = NULL;
AVDictionary *opts = NULL;
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
if (ret < 0) {
printf("Could not find %s stream in input file '%s'\n",
av_get_media_type_string(type), src_filename);
return ret;
} else {
stream_index = ret;
st = fmt_ctx->streams[stream_index];
/* find decoder for the stream */
dec = avcodec_find_decoder(st->codecpar->codec_id);
if (!dec) {
printf("Failed to find %s codec\n",
av_get_media_type_string(type));
return AVERROR(EINVAL);
}
/* Allocate a codec context for the decoder */
*dec_ctx = avcodec_alloc_context3(dec);
if (!*dec_ctx) {
printf("Failed to allocate the %s codec context\n",
av_get_media_type_string(type));
return AVERROR(ENOMEM);
}
/* Copy codec parameters from input stream to output codec context */
if ((ret = avcodec_parameters_to_context(*dec_ctx, st->codecpar)) < 0) {
printf("Failed to copy %s codec parameters to decoder context\n",
av_get_media_type_string(type));
return ret;
}
/* Init the decoders, with or without reference counting */
av_dict_set(&opts, "refcounted_frames", refcount ? "1" : "0", 0);
if ((ret = avcodec_open2(*dec_ctx, dec, &opts)) < 0) {
printf("Failed to open %s codec\n",
av_get_media_type_string(type));
return ret;
}
*stream_idx = stream_index;
}
return 0;
}
int main (int argc, char **argv)
{
int ret = 0, got_frame;
src_filename = argv[1];
video_dst_filename = argv[2];
audio_dst_filename = argv[3];
av_register_all();
avcodec_register_all();
printf("Registered all\n");
/* open input file, and allocate format context */
if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
printf("Could not open source file %s\n", src_filename);
exit(1);
}
/* retrieve stream information */
if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
printf("Could not find stream information\n");
exit(1);
}
if (open_codec_context(&video_stream_idx, &video_dec_ctx, fmt_ctx,
AVMEDIA_TYPE_VIDEO) >= 0) {
video_stream = fmt_ctx->streams[video_stream_idx];
avformat_alloc_output_context2(&outAVFormatContext, NULL, NULL,
video_dst_filename);
if (!outAVFormatContext)
{
printf("\n\nError : avformat_alloc_output_context2()");
return -1;
}
}
if (open_codec_context(&audio_stream_idx, &audio_dec_ctx, fmt_ctx,
AVMEDIA_TYPE_AUDIO) >= 0) {
audio_stream = fmt_ctx->streams[audio_stream_idx];
audio_dst_file = fopen(audio_dst_filename, "wb");
if (!audio_dst_file) {
printf("Could not open destination file %s\n", audio_dst_filename);
ret = 1;
goto end;
}
}
/* dump input information to stderr */
av_dump_format(fmt_ctx, 0, src_filename, 0);
if (!audio_stream && !video_stream) {
printf("Could not find audio or video stream in the input, aborting\n");
ret = 1;
goto end;
}
output_format = av_guess_format(NULL, video_dst_filename, NULL);
if( !output_format )
{
printf("\n\nError : av_guess_format()");
return -1;
}
video_st = avformat_new_stream(outAVFormatContext ,NULL);
if( !video_st )
{
printf("\n\nError : avformat_new_stream()");
return -1;
}
outAVCodecContext = avcodec_alloc_context3(outAVCodec);
if( !outAVCodecContext )
{
printf("\n\nError : avcodec_alloc_context3()");
return -1;
}
outAVCodecContext = video_st->codec;
outAVCodecContext->codec_id = AV_CODEC_ID_MPEG4;// AV_CODEC_ID_MPEG4; //
AV_CODEC_ID_H264 // AV_CODEC_ID_MPEG1VIDEO
outAVCodecContext->codec_type = AVMEDIA_TYPE_VIDEO;
outAVCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
outAVCodecContext->bit_rate = 400000; // 2500000
outAVCodecContext->width = 1920;
//outAVCodecContext->width = 500;
outAVCodecContext->height = 1080;
//outAVCodecContext->height = 500;
outAVCodecContext->gop_size = 3;
outAVCodecContext->max_b_frames = 2;
outAVCodecContext->time_base.num = 1;
outAVCodecContext->time_base.den = 30; // 15fps
if (outAVCodecContext->codec_id == AV_CODEC_ID_H264)
{
av_opt_set(outAVCodecContext->priv_data, "preset", "slow", 0);
}
outAVCodec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);
if( !outAVCodec )
{
printf("\n\nError : avcodec_find_encoder()");
return -1;
}
/* Some container formats (like MP4) require global headers to be
present
Mark the encoder so that it behaves accordingly. */
if ( outAVFormatContext->oformat->flags & AVFMT_GLOBALHEADER)
{
outAVCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
value = avcodec_open2(outAVCodecContext, outAVCodec, NULL);
if( value < 0)
{
printf("\n\nError : avcodec_open2()");
return -1;
}
/* create empty video file */
if ( !(outAVFormatContext->flags & AVFMT_NOFILE) )
{
if( avio_open2(&outAVFormatContext->pb , video_dst_filename,
AVIO_FLAG_WRITE ,NULL, NULL) < 0 )
{
printf("\n\nError : avio_open2()");
}
}
if(!outAVFormatContext->nb_streams)
{
printf("\n\nError : Output file dose not contain any stream");
return -1;
}
/* imp: mp4 container or some advanced container file required header
information*/
value = avformat_write_header(outAVFormatContext , NULL);
if(value < 0)
{
printf("\n\nError : avformat_write_header()");
return -1;
}
printf("\n\nOutput file information :\n\n");
av_dump_format(outAVFormatContext , 0 ,video_dst_filename ,1);
int flag;
int frameFinished;
value = 0;
pAVPacket = (AVPacket *)av_malloc(sizeof(AVPacket));
av_init_packet(pAVPacket);
pAVFrame = av_frame_alloc();
if( !pAVFrame )
{
printf("\n\nError : av_frame_alloc()");
return -1;
}
outFrame = av_frame_alloc();//Allocate an AVFrame and set its fields to
default values.
if( !outFrame )
{
printf("\n\nError : av_frame_alloc()");
return -1;
}
nbytes = av_image_get_buffer_size(outAVCodecContext-
>pix_fmt,outAVCodecContext->width,outAVCodecContext->height,32);
video_outbuf = (uint8_t*)av_malloc(nbytes);
if( video_outbuf == NULL )
{
printf("\n\nError : av_malloc()");
}
value = av_image_fill_arrays( outFrame->data, outFrame->linesize,
video_outbuf , AV_PIX_FMT_YUV420P, outAVCodecContext-
>width,outAVCodecContext->height,1 ); // returns : the size in bytes
required for src
if(value < 0)
{
printf("\n\nError : av_image_fill_arrays()");
}
SwsContext* swsCtx_ ;
// Allocate and return swsContext.
// a pointer to an allocated context, or NULL in case of error
// Deprecated : Use sws_getCachedContext() instead.
swsCtx_ = sws_getContext(video_dec_ctx->width,
video_dec_ctx->height,
video_dec_ctx->pix_fmt,
video_dec_ctx->width,
video_dec_ctx->height,
video_dec_ctx->pix_fmt,
SWS_BICUBIC, NULL, NULL, NULL);
AVPacket outPacket;
int got_picture;
while( av_read_frame( fmt_ctx , pAVPacket ) >= 0 )
{
if(pAVPacket->stream_index == video_stream_idx)
{
value = avcodec_decode_video2(video_dec_ctx , pAVFrame ,
&frameFinished , pAVPacket );
if( value < 0)
{
printf("Error : avcodec_decode_video2()");
}
if(frameFinished)// Frame successfully decoded :)
{
sws_scale(swsCtx_, pAVFrame->data, pAVFrame-
>linesize,0, video_dec_ctx->height, outFrame->data,outFrame->linesize);
// sws_scale(swsCtx_, pAVFrame->data, pAVFrame-
>linesize,0, video_dec_ctx->height, outFrame->data,outFrame->linesize);
av_init_packet(&outPacket);
outPacket.data = NULL; // packet data will be
allocated by the encoder
outPacket.size = 0;
avcodec_encode_video2(outAVCodecContext ,
&outPacket ,outFrame , &got_picture);
if(got_picture)
{
if(outPacket.pts != AV_NOPTS_VALUE)
outPacket.pts =
av_rescale_q(outPacket.pts, video_st->codec->time_base, video_st-
>time_base);
if(outPacket.dts != AV_NOPTS_VALUE)
outPacket.dts =
av_rescale_q(outPacket.dts, video_st->codec->time_base, video_st-
>time_base);
printf("Write frame %3d (size= %2d)\n",
j++, outPacket.size/1000);
if(av_write_frame(outAVFormatContext ,
&outPacket) != 0)
{
printf("\n\nError :
av_write_frame()");
}
av_packet_unref(&outPacket);
} // got_picture
av_packet_unref(&outPacket);
} // frameFinished
}
}// End of while-loop
value = av_write_trailer(outAVFormatContext);
if( value < 0)
{
printf("\n\nError : av_write_trailer()");
}
//THIS WAS ADDED LATER
av_free(video_outbuf);
end:
avcodec_free_context(&video_dec_ctx);
avcodec_free_context(&audio_dec_ctx);
avformat_close_input(&fmt_ctx);
if (video_dst_file)
fclose(video_dst_file);
if (audio_dst_file)
fclose(audio_dst_file);
//av_frame_free(&frame);
av_free(video_dst_data[0]);
return ret < 0;
}Problem with above code is that it rotates a video to left by 90 degree.
Snapshot of video given as input to above program
Snapshot of output video. It is rotated by 90 degree to left.
I compiled program using below command :
g++ -D__STDC_CONSTANT_MACROS -Wall -g ScreenRecorder.cpp -I/home/harry/Documents/compressor/ffmpeg-3.3/ -I/root/android-ndk-r14b/platforms/android-21/arch-x86_64/usr/include/ -c -o ScreenRecorder.o -w
And linked it using below command :
g++ -Wall -g ScreenRecorder.o -I/home/harry/Documents/compressor/ffmpeg-3.3/ -I/root/android-ndk-r14b/platforms/android-21/arch-x86_64/usr/include/ -L/usr/lib64 -L/lib64 -L/usr/lib/gcc/x86_64-redhat-linux/4.4.7/ -L/home/harry/Documents/compressor/ffmpeg-3.3/ffmpeg-build -L/root/android-ndk-r14b/platforms/android-21/arch-x86_64/usr/lib64 -o ScreenRecorder.exe -lavformat -lavcodec -lavutil -lavdevice -lavfilter -lswscale -lx264 -lswresample -lm -lpthread -ldl -lstdc++ -lc -lrt
Program is being run using below command :
./ScreenRecorder.exe vertical.MOV videoH.mp4 audioH.mp3
Note :
- Source video is taken from iphone and is of .mov format.
- Output video is being stored in .mp4 file.Can anyone please tell me why it is rotating video by 90 degree ?
One thing i noticed in dump is shown below :
Duration: 00:00:06.04, start: 0.000000, bitrate: 17087 kb/s
Stream #0:0(und): Video: h264 (High) (avc1 / 0x31637661), yuv420p(tv, bt709), 1920x1080, 17014 kb/s, 29.98 fps, 29.97 tbr, 600 tbn, 1200 tbc (default)
Metadata:
rotate : 90
creation_time : 2017-07-09T10:56:42.000000Z
handler_name : Core Media Data Handler
encoder : H.264
Side data:
displaymatrix: rotation of -90.00 degreesit says
displaymatrix: rotation of -90.00 degrees
. Is it responsible for rotating video by 90 degree ? -
trying to make OpenCV 3.2.0 work with virtualenv
24 juillet 2017, par lollercoasterI’m on Ubuntu 16.04 with Python 2.7 and virtualenv & virtualenvwrapper.
By following this guide I managed to get the following script working with my system Python2.7 which has
cv2
globally installed.I used this script to install it :
######################################
# INSTALL OPENCV ON UBUNTU OR DEBIAN #
######################################
# | THIS SCRIPT IS TESTED CORRECTLY ON |
# |----------------------------------------------------|
# | OS | OpenCV | Test | Last test |
# |----------------|--------------|------|-------------|
# | Ubuntu 16.04.2 | OpenCV 3.2.0 | OK | 20 May 2017 |
# | Debian 8.8 | OpenCV 3.2.0 | OK | 20 May 2017 |
# | Debian 9.0 | OpenCV 3.2.0 | OK | 25 Jun 2017 |
# 1. KEEP UBUNTU OR DEBIAN UP TO DATE
sudo apt-get -y update
sudo apt-get -y upgrade
sudo apt-get -y dist-upgrade
sudo apt-get -y autoremove
# 2. INSTALL THE DEPENDENCIES
# Build tools:
sudo apt-get install -y build-essential cmake
# GUI (if you want to use GTK instead of Qt, replace 'qt5-default' with 'libgtkglext1-dev' and remove '-DWITH_QT=ON' option in CMake):
sudo apt-get install -y qt5-default libvtk6-dev
# Media I/O:
sudo apt-get install -y zlib1g-dev libjpeg-dev libwebp-dev libpng-dev libtiff5-dev libjasper-dev libopenexr-dev libgdal-dev
# Video I/O:
sudo apt-get install -y libdc1394-22-dev libavcodec-dev libavformat-dev libswscale-dev libtheora-dev libvorbis-dev libxvidcore-dev libx264-dev yasm libopencore-amrnb-dev libopencore-amrwb-dev libv4l-dev libxine2-dev
# Parallelism and linear algebra libraries:
sudo apt-get install -y libtbb-dev libeigen3-dev
# Python:
sudo apt-get install -y python-dev python-tk python-numpy python3-dev python3-tk python3-numpy
# Documentation:
sudo apt-get install -y doxygen
# UI stuff
sudo apt-get install libgtk-3-dev libatlas-base-dev gfortran
# 3. INSTALL THE LIBRARY (YOU CAN CHANGE '3.2.0' FOR THE LAST STABLE VERSION)
sudo apt-get install -y unzip wget
# opencv contrib
wget https://github.com/opencv/opencv_contrib/archive/3.2.0.zip -O opencv_contrib-3.2.0.zip
unzip opencv_contrib-3.2.0.zip
rm opencv_contrib-3.2.0.zip
# opencv
wget https://github.com/opencv/opencv/archive/3.2.0.zip
unzip 3.2.0.zip
rm 3.2.0.zip
mv opencv-3.2.0 OpenCV-3.2.0
cd OpenCV-3.2.0
mkdir build
cd build
cmake -D WITH_QT=ON \
-D WITH_OPENGL=ON \
-D FORCE_VTK=ON \
-D WITH_TBB=ON \
-D WITH_GDAL=ON \
-D WITH_XINE=ON \
-D BUILD_EXAMPLES=ON \
-D INSTALL_PYTHON_EXAMPLES=ON \
-D ENABLE_PRECOMPILED_HEADERS=OFF \
-D BUILD_NEW_PYTHON_SUPPORT=ON \
..
make -j4
sudo make install
sudo ldconfig
# 4. EXECUTE SOME OPENCV EXAMPLES AND COMPILE A DEMONSTRATION
# To complete this step, please visit 'http://milq.github.io/install-opencv-ubuntu-debian'.The following script below works great with that system-wide installation :
import cv2
img = cv2.imread('some_img.jpg')Though this one doesn’t - even the system Python can’t read videos for some reason...
import cv2
video_capture = cv2.VideoCapture(0)
ret, frame = video_capture.read()
print ret # always Falsebut I want it to work with my virtualenv. So I recompiled OpenCV with :
cmake -D WITH_QT=ON \
-D WITH_OPENGL=ON \
-D FORCE_VTK=ON \
-D WITH_TBB=ON \
-D WITH_GDAL=ON \
-D WITH_XINE=ON \
-D BUILD_EXAMPLES=ON \
-D INSTALL_PYTHON_EXAMPLES=ON \
-D ENABLE_PRECOMPILED_HEADERS=OFF \
-D BUILD_NEW_PYTHON_SUPPORT=ON \
-D OPENCV_EXTRA_MODULES_PATH=/home/me/code/myproject/opencv_contrib-3.2.0/modules \
-D PYTHON_EXECUTABLE=~/.envs/myenv/bin/python \
..
make -j4
sudo make install
sudo ldconfigHere’s the CMake log :
-- Found VTK ver. 6.2.0 (usefile: /usr/lib/cmake/vtk-6.2/UseVTK.cmake)
-- Caffe: NO
-- Protobuf: YES
-- Glog: NO
-- freetype2: YES
-- harfbuzz: YES
-- Module opencv_sfm disabled because the following dependencies are not found: Glog/Gflags
-- freetype2: YES
-- harfbuzz: YES
-- Checking for modules 'tesseract;lept'
-- No package 'tesseract' found
-- No package 'lept' found
-- Tesseract: NO
-- Check contents of vgg_generated_48.i ...
-- Check contents of vgg_generated_64.i ...
-- Check contents of vgg_generated_80.i ...
-- Check contents of vgg_generated_120.i ...
-- Check contents of boostdesc_bgm.i ...
-- Check contents of boostdesc_bgm_bi.i ...
-- Check contents of boostdesc_bgm_hd.i ...
-- Check contents of boostdesc_binboost_064.i ...
-- Check contents of boostdesc_binboost_128.i ...
-- Check contents of boostdesc_binboost_256.i ...
-- Check contents of boostdesc_lbgm.i ...
--
-- General configuration for OpenCV 3.2.0 =====================================
-- Version control: 817bd7b-dirty
--
-- Extra modules:
-- Location (extra): /home/me/code/myproject/opencv_contrib-3.2.0/modules
-- Version control (extra): 817bd7b-dirty
--
-- Platform:
-- Timestamp: 2017-07-20T18:25:26Z
-- Host: Linux 4.8.0-58-generic x86_64
-- CMake: 3.5.1
-- CMake generator: Unix Makefiles
-- CMake build tool: /usr/bin/make
-- Configuration: Release
--
-- C/C++:
-- Built as dynamic libs?: YES
-- C++ Compiler: /usr/bin/c++ (ver 5.4.0)
-- C++ flags (Release): -fsigned-char -W -Wall -Werror=return-type -Werror=non-virtual-dtor -Werror=address -Werror=sequence-point -Wformat -Werror=format-security -Wmissing-declarations -Wundef -Winit-self -Wpointer-arith -Wshadow -Wsign-promo -Wno-narrowing -Wno-delete-non-virtual-dtor -Wno-comment -fdiagnostics-show-option -Wno-long-long -pthread -fomit-frame-pointer -msse -msse2 -mno-avx -msse3 -mno-ssse3 -mno-sse4.1 -mno-sse4.2 -ffunction-sections -fvisibility=hidden -fvisibility-inlines-hidden -O3 -DNDEBUG -DNDEBUG
-- C++ flags (Debug): -fsigned-char -W -Wall -Werror=return-type -Werror=non-virtual-dtor -Werror=address -Werror=sequence-point -Wformat -Werror=format-security -Wmissing-declarations -Wundef -Winit-self -Wpointer-arith -Wshadow -Wsign-promo -Wno-narrowing -Wno-delete-non-virtual-dtor -Wno-comment -fdiagnostics-show-option -Wno-long-long -pthread -fomit-frame-pointer -msse -msse2 -mno-avx -msse3 -mno-ssse3 -mno-sse4.1 -mno-sse4.2 -ffunction-sections -fvisibility=hidden -fvisibility-inlines-hidden -g -O0 -DDEBUG -D_DEBUG
-- C Compiler: /usr/bin/cc
-- C flags (Release): -fsigned-char -W -Wall -Werror=return-type -Werror=non-virtual-dtor -Werror=address -Werror=sequence-point -Wformat -Werror=format-security -Wmissing-declarations -Wmissing-prototypes -Wstrict-prototypes -Wundef -Winit-self -Wpointer-arith -Wshadow -Wno-narrowing -Wno-comment -fdiagnostics-show-option -Wno-long-long -pthread -fomit-frame-pointer -msse -msse2 -mno-avx -msse3 -mno-ssse3 -mno-sse4.1 -mno-sse4.2 -ffunction-sections -fvisibility=hidden -O3 -DNDEBUG -DNDEBUG
-- C flags (Debug): -fsigned-char -W -Wall -Werror=return-type -Werror=non-virtual-dtor -Werror=address -Werror=sequence-point -Wformat -Werror=format-security -Wmissing-declarations -Wmissing-prototypes -Wstrict-prototypes -Wundef -Winit-self -Wpointer-arith -Wshadow -Wno-narrowing -Wno-comment -fdiagnostics-show-option -Wno-long-long -pthread -fomit-frame-pointer -msse -msse2 -mno-avx -msse3 -mno-ssse3 -mno-sse4.1 -mno-sse4.2 -ffunction-sections -fvisibility=hidden -g -O0 -DDEBUG -D_DEBUG
-- Linker flags (Release):
-- Linker flags (Debug):
-- ccache: NO
-- Precompiled headers: NO
-- Extra dependencies: Qt5::Test Qt5::Concurrent Qt5::OpenGL /usr/lib/x86_64-linux-gnu/libwebp.so /usr/lib/x86_64-linux-gnu/libjasper.so /usr/lib/x86_64-linux-gnu/libImath.so /usr/lib/x86_64-linux-gnu/libIlmImf.so /usr/lib/x86_64-linux-gnu/libIex.so /usr/lib/x86_64-linux-gnu/libHalf.so /usr/lib/x86_64-linux-gnu/libIlmThread.so /usr/lib/libgdal.so dc1394 xine avcodec-ffmpeg avformat-ffmpeg avutil-ffmpeg swscale-ffmpeg Qt5::Core Qt5::Gui Qt5::Widgets /usr/lib/x86_64-linux-gnu/hdf5/serial/lib/libhdf5.so /usr/lib/x86_64-linux-gnu/libpthread.so /usr/lib/x86_64-linux-gnu/libsz.so /usr/lib/x86_64-linux-gnu/libdl.so /usr/lib/x86_64-linux-gnu/libm.so vtkRenderingOpenGL vtkImagingHybrid vtkIOImage vtkCommonDataModel vtkCommonMath vtkCommonCore vtksys vtkCommonMisc vtkCommonSystem vtkCommonTransforms vtkCommonExecutionModel vtkDICOMParser vtkIOCore /usr/lib/x86_64-linux-gnu/libz.so vtkmetaio /usr/lib/x86_64-linux-gnu/libjpeg.so /usr/lib/x86_64-linux-gnu/libpng.so /usr/lib/x86_64-linux-gnu/libtiff.so vtkImagingCore vtkRenderingCore vtkCommonColor vtkFiltersExtraction vtkFiltersCore vtkFiltersGeneral vtkCommonComputationalGeometry vtkFiltersStatistics vtkImagingFourier vtkalglib vtkFiltersGeometry vtkFiltersSources vtkInteractionStyle vtkRenderingLOD vtkFiltersModeling vtkIOPLY vtkIOGeometry /usr/lib/x86_64-linux-gnu/libjsoncpp.so vtkFiltersTexture vtkRenderingFreeType /usr/lib/x86_64-linux-gnu/libfreetype.so vtkftgl vtkIOExport vtkRenderingAnnotation vtkImagingColor vtkRenderingContext2D vtkRenderingGL2PS vtkRenderingContextOpenGL /usr/lib/libgl2ps.so vtkRenderingLabel dl m pthread rt /usr/lib/x86_64-linux-gnu/libGLU.so /usr/lib/x86_64-linux-gnu/libGL.so tbb
-- 3rdparty dependencies: libprotobuf
--
-- OpenCV modules:
-- To be built: core flann hdf imgproc ml photo reg surface_matching video viz dnn freetype fuzzy imgcodecs shape videoio highgui objdetect plot superres ts xobjdetect xphoto bgsegm bioinspired dpm face features2d line_descriptor saliency text calib3d ccalib cvv datasets rgbd stereo tracking videostab xfeatures2d ximgproc aruco optflow phase_unwrapping stitching structured_light java python2 python3
-- Disabled: world contrib_world
-- Disabled by dependency: -
-- Unavailable: cudaarithm cudabgsegm cudacodec cudafeatures2d cudafilters cudaimgproc cudalegacy cudaobjdetect cudaoptflow cudastereo cudawarping cudev cnn_3dobj matlab sfm
--
-- GUI:
-- QT 5.x: YES (ver 5.5.1)
-- QT OpenGL support: YES (Qt5::OpenGL 5.5.1)
-- OpenGL support: YES (/usr/lib/x86_64-linux-gnu/libGLU.so /usr/lib/x86_64-linux-gnu/libGL.so)
-- VTK support: YES (ver 6.2.0)
--
-- Media I/O:
-- ZLib: /usr/lib/x86_64-linux-gnu/libz.so (ver 1.2.8)
-- JPEG: /usr/lib/x86_64-linux-gnu/libjpeg.so (ver )
-- WEBP: /usr/lib/x86_64-linux-gnu/libwebp.so (ver encoder: 0x0202)
-- PNG: /usr/lib/x86_64-linux-gnu/libpng.so (ver 1.2.54)
-- TIFF: /usr/lib/x86_64-linux-gnu/libtiff.so (ver 42 - 4.0.6)
-- JPEG 2000: /usr/lib/x86_64-linux-gnu/libjasper.so (ver 1.900.1)
-- OpenEXR: /usr/lib/x86_64-linux-gnu/libImath.so /usr/lib/x86_64-linux-gnu/libIlmImf.so /usr/lib/x86_64-linux-gnu/libIex.so /usr/lib/x86_64-linux-gnu/libHalf.so /usr/lib/x86_64-linux-gnu/libIlmThread.so (ver 2.2.0)
-- GDAL: /usr/lib/libgdal.so
-- GDCM: NO
--
-- Video I/O:
-- DC1394 1.x: NO
-- DC1394 2.x: YES (ver 2.2.4)
-- FFMPEG: YES
-- avcodec: YES (ver 56.60.100)
-- avformat: YES (ver 56.40.101)
-- avutil: YES (ver 54.31.100)
-- swscale: YES (ver 3.1.101)
-- avresample: NO
-- GStreamer: NO
-- OpenNI: NO
-- OpenNI PrimeSensor Modules: NO
-- OpenNI2: NO
-- PvAPI: NO
-- GigEVisionSDK: NO
-- Aravis SDK: NO
-- UniCap: NO
-- UniCap ucil: NO
-- V4L/V4L2: NO/YES
-- XIMEA: NO
-- Xine: YES (ver 1.2.6)
-- gPhoto2: NO
--
-- Parallel framework: TBB (ver 4.4 interface 9002)
--
-- Other third-party libraries:
-- Use IPP: 9.0.1 [9.0.1]
-- at: /home/me/code/myproject/OpenCV-3.2.0/build/3rdparty/ippicv/ippicv_lnx
-- Use IPP Async: NO
-- Use VA: NO
-- Use Intel VA-API/OpenCL: NO
-- Use Lapack: NO
-- Use Eigen: YES (ver 3.2.92)
-- Use Cuda: NO
-- Use OpenCL: YES
-- Use OpenVX: NO
-- Use custom HAL: NO
--
-- OpenCL: <dynamic loading="loading" of="of" opencl="opencl" library="library">
-- Include path: /home/me/code/myproject/OpenCV-3.2.0/3rdparty/include/opencl/1.2
-- Use AMDFFT: NO
-- Use AMDBLAS: NO
--
-- Python 2:
-- Interpreter: /home/me/.envs/myenv/bin/python (ver 2.7.12)
-- Libraries: /usr/lib/x86_64-linux-gnu/libpython2.7.so (ver 2.7.12)
-- numpy: /home/me/.envs/myenv/local/lib/python2.7/site-packages/numpy/core/include (ver 1.13.1)
-- packages path: lib/python2.7/site-packages
--
-- Python 3:
-- Interpreter: /usr/bin/python3 (ver 3.5.2)
-- Libraries: /usr/lib/x86_64-linux-gnu/libpython3.5m.so (ver 3.5.2)
-- numpy: /usr/lib/python3/dist-packages/numpy/core/include (ver 1.11.0)
-- packages path: lib/python3.5/dist-packages
--
-- Python (for build): /home/me/.envs/myenv/bin/python
--
-- Java:
-- ant: /usr/bin/ant (ver 1.9.6)
-- JNI: /usr/lib/jvm/default-java/include /usr/lib/jvm/default-java/include/linux /usr/lib/jvm/default-java/include
-- Java wrappers: YES
-- Java tests: YES
--
-- Matlab: Matlab not found or implicitly disabled
--
-- Documentation:
-- Doxygen: /usr/bin/doxygen (ver 1.8.11)
--
-- Tests and samples:
-- Tests: YES
-- Performance tests: YES
-- C/C++ Examples: YES
--
-- Install path: /usr/local
--
-- cvconfig.h is in: /home/me/code/myproject/OpenCV-3.2.0/build
-- -----------------------------------------------------------------
--
</dynamic>Unfortunately, while this works and I can import
cv2
in the shell, it cannot read video using the above script, probably due to incorrect compilation or linking offfmpeg
? The confusing part is the system-wide installation of OpenCV works fine, even without ffmpeg installed !What am I doing wrong ? How can I get OpenCV working with a virtualenv ?
====
EDIT : Running the C++ video writing example has this result :
$ cd /home/me/code/myproject/OpenCV-3.2.0/build/bin
$ ./cpp-tutorial-video-write ../../samples/data/vtest.avi R Y
------------------------------------------------------------------------------
This program shows how to write video files.
You can extract the R or G or B color channel of the input video.
Usage:
./video-write [ R | G | B] [Y | N]
------------------------------------------------------------------------------
OpenCV: FFMPEG: tag 0xffffffff/'����' is not found (format 'avi / AVI (Audio Video Interleaved)')'
(cpp-tutorial-video-write:19523): GStreamer-CRITICAL **: gst_element_make_from_uri: assertion 'gst_uri_is_valid (uri)' failed
OpenCV Error: Unsupported format or combination of formats (Gstreamer Opencv backend does not support this codec.) in CvVideoWriter_GStreamer::open, file /home/me/code/myproject/OpenCV-3.2.0/modules/videoio/src/cap_gstreamer.cpp, line 1388
VIDEOIO(cvCreateVideoWriter_GStreamer(filename, fourcc, fps, frameSize, is_color)): raised OpenCV exception:
/home/me/code/myproject/OpenCV-3.2.0/modules/videoio/src/cap_gstreamer.cpp:1388: error: (-210) Gstreamer Opencv backend does not support this codec. in function CvVideoWriter_GStreamer::open
Could not open the output video for write: ../../samples/data/vtest.aviAnd the
opencv_test_videoio
unit test reports the following : https://pastebin.com/q4mf224QHowever, running the c++ video starter example DOES work, with the following command and output, I can see the webcam working and streaming video in the highgui interface :
$ ./cpp-example-videocapture_starter 0
VIDEOIO ERROR: V4L: device 0: Unable to query number of channels
(ERROR)icvOpenAVI_XINE(): Unable to initialize video driver.
GStreamer: Error opening bin: no element "0"
press space to save a picture. q or esc to quit
init done
opengl support available -
ffmpeg/libx264 C API : frames dropped from end of short MP4
19 juillet 2017, par Blake McConnellIn my C++ application, I am taking a series of JPEG images, manipulating their data using FreeImage, and then encoding the bitmaps as H264 using the ffmpeg/libx264 C API. The output is an MP4 which shows the series of 22 images at 12fps. My code is adapted from the "muxing" example that comes with ffmpeg C source code.
My problem : no matter how I tune the codec parameters, a certain number of frames at the end of the sequence which are passed to the encoder do not appear in the final output. I’ve set the AVCodecContext parameters like this :
//set context params
ctx->codec_id = AV_CODEC_ID_H264;
ctx->bit_rate = 4000 * 1000;
ctx->width = _width;
ctx->height = _height;
ost->st->time_base = AVRational{ 1, 12 };
ctx->time_base = ost->st->time_base;
ctx->gop_size = 1;
ctx->pix_fmt = AV_PIX_FMT_YUV420P;I have found that the higher the
gop_size
the more frames are dropped from the end of the video. I can also see from the output that, with this gop size (where I’m essentially directing that all output frames be I frames) that only 9 frames are written.I’m not sure why this is occurring. I experimented with encoding duplicate frames and making a much longer video. This resulted in no frames being dropped. I know with the ffmpeg command line tool there is a concatenation command that accomplishes what I am trying to do, but I’m not sure how to accomplish the same goal using the C API.
Here’s the output I’m getting from the console :
[libx264 @ 026d81c0] using cpu capabilities : MMX2 SSE2Fast SSSE3
SSE4.2 AVX FMA3 BMI2 AVX2 [libx264 @ 026d81c0] profile High, level
3.1 [libx264 @ 026d81c0] 264 - core 152 r2851 ba24899 - H.264/MPEG-4 AVC codec - Cop yleft 2003-2017 - http://www.videolan.org/x264.html -
options : cabac=1 ref=1 deb lock=1:0:0 analyse=0x3:0x113 me=hex subme=7
psy=1 psy_rd=1.00:0.00 mixed_ref=0 m e_range=16 chroma_me=1 trellis=1
8x8dct=1 cqm=0 deadzone=21,11 fast_pskip=1 chro ma_qp_offset=-2
threads=12 lookahead_threads=2 sliced_threads=0 nr=0 decimate=1
interlaced=0 bluray_compat=0 constrained_intra=0 bframes=0 weightp=0
keyint=1 ke yint_min=1 scenecut=40 intra_refresh=0 rc=abr mbtree=0
bitrate=4000 ratetol=1.0 qcomp=0.60 qpmin=0 qpmax=69 qpstep=4
ip_ratio=1.40 aq=1:1.00 Output #0, mp4, to
’....\images\c411a991-46f6-400c-8bb0-77af3738559a.mp4’ :
Stream #0:0 : Video : h264, yuv420p, 700x700, q=2-31, 4000 kb/s, 12 tbn[libx264 @ 026d81c0] frame I:9 Avg QP:17.83 size:111058 [libx264
@ 026d81c0] mb I I16..4 : 1.9% 47.7% 50.5% [libx264 @ 026d81c0] final
ratefactor : 19.14 [libx264 @ 026d81c0] 8x8 transform intra:47.7%
[libx264 @ 026d81c0] coded y,uvDC,uvAC intra : 98.4% 96.9% 89.5%
[libx264 @ 026d81c0] i16 v,h,dc,p : 64% 6% 2% 28% [libx264 @
026d81c0] i8 v,h,dc,ddl,ddr,vr,hd,vl,hu : 32% 15% 9% 5% 5% 6% 8%
10% 10% [libx264 @ 026d81c0] i4 v,h,dc,ddl,ddr,vr,hd,vl,hu : 28% 18%
7% 6% 8% 8% 8% 9% 8% [libx264 @ 026d81c0] i8c dc,h,v,p : 43% 22%
25% 10% [libx264 @ 026d81c0] kb/s:10661.53Code included below :
MP4Writer.h
#ifndef MPEG_WRITER
#define MPEG_WRITER
#include <iostream>
#include <string>
#include <vector>
#include
extern "C" {
#include <libavformat></libavformat>avformat.h>
#include <libswscale></libswscale>swscale.h>
#include <libswresample></libswresample>swresample.h>
#include <libswscale></libswscale>swscale.h>
}
typedef struct OutputStream
{
AVStream *st;
AVCodecContext *enc;
//pts of the next frame that will be generated
int64_t next_pts;
int samples_count;
AVFrame *frame;
AVFrame *tmp_frame;
float t, tincr, tincr2;
struct SwsContext *sws_ctx;
struct SwrContext *swr_ctx;
};
class MP4Writer {
public:
MP4Writer();
void Init();
int16_t SetOutput( const std::string & path );
int16_t AddFrame( uint8_t * imgData );
int16_t Write( std::vector<imgdata> & imgData );
int16_t Finalize();
void SetHeight( const int height ) { _height = _width = height; } //assuming 1:1 aspect ratio
private:
int16_t AddStream( OutputStream * ost, AVFormatContext * formatCtx, AVCodec ** codec, enum AVCodecID codecId );
int16_t OpenVideo( AVFormatContext * formatCtx, AVCodec *codec, OutputStream * ost, AVDictionary * optArg );
static AVFrame * AllocPicture( enum AVPixelFormat pixFmt, int width, int height );
static AVFrame * GetVideoFrame( uint8_t * imgData, OutputStream * ost, const int width, const int height );
static int WriteFrame( AVFormatContext * formatCtx, const AVRational * timeBase, AVStream * stream, AVPacket * packet );
int _width;
int _height;
OutputStream _ost;
AVFormatContext * _formatCtx;
AVDictionary * _dict;
};
#endif //MPEG_WRITER
</imgdata></vector></string></iostream>MP4Writer.cpp
#include
#include <algorithm>
MP4Writer::MP4Writer()
{
_width = 0;
_height = 0;
}
void MP4Writer::Init()
{
av_register_all();
}
/**
sets up output stream for the specified path.
note that the output format is deduced automatically from the file extension passed
@param path: output file path
@returns: -1 = output could not be deduced, -2 = invalid codec, -3 = error opening output file,
-4 = error writing header
*/
int16_t MP4Writer::SetOutput( const std::string & path )
{
int error;
AVCodec * codec;
AVOutputFormat * format;
_ost = OutputStream{}; //TODO reset state in a more focused way?
//allocate output media context
avformat_alloc_output_context2( &_formatCtx, NULL, NULL, path.c_str() );
if ( !_formatCtx ) {
std::cout << "could not deduce output format from file extension. aborting" << std::endl;
return -1;
}
//set format
format = _formatCtx->oformat;
if ( format->video_codec != AV_CODEC_ID_NONE ) {
AddStream( &_ost, _formatCtx, &codec, format->video_codec );
}
else {
std::cout << "there is no video codec set. aborting" << std::endl;
return -2;
}
OpenVideo( _formatCtx, codec, &_ost, _dict );
av_dump_format( _formatCtx, 0, path.c_str(), 1 );
//open output file
if ( !( format->flags & AVFMT_NOFILE )) {
error = avio_open( &_formatCtx->pb, path.c_str(), AVIO_FLAG_WRITE );
if ( error < 0 ) {
std::cout << "there was an error opening output file " << path << ". aborting" << std::endl;
return -3;
}
}
//write header
error = avformat_write_header( _formatCtx, &_dict );
if ( error < 0 ) {
std::cout << "an error occurred writing header. aborting" << std::endl;
return -4;
}
return 0;
}
/**
initialize the output stream
@param ost: the output stream
@param formatCtx: the context format
@param codec: the output codec
@param codec: the ffmpeg enumerated id of the codec
@returns: -1 = encoder not found, -2 = stream could not be allocated, -3 = encoding context could not be allocated
*/
int16_t MP4Writer::AddStream( OutputStream * ost, AVFormatContext * formatCtx, AVCodec ** codec, enum AVCodecID codecId )
{
AVCodecContext * ctx; //TODO not sure why this is here, could just set ost->enc directly
int i;
//detect the encoder
*codec = avcodec_find_encoder( codecId );
if ( (*codec) == NULL ) {
std::cout << "could not find encoder. aborting" << std::endl;
return -1;
}
//allocate stream
ost->st = avformat_new_stream( formatCtx, NULL );
if ( ost->st == NULL ) {
std::cout << "could not allocate stream. aborting" << std::endl;
return -2;
}
//allocate encoding context
ost->st->id = formatCtx->nb_streams - 1;
ctx = avcodec_alloc_context3( *codec );
if ( ctx == NULL ) {
std::cout << "could not allocate encoding context. aborting" << std::endl;
return -3;
}
ost->enc = ctx;
//set context params
ctx->codec_id = AV_CODEC_ID_H264;
ctx->bit_rate = 4000 * 1000;
ctx->width = _width;
ctx->height = _height;
ost->st->time_base = AVRational{ 1, 12 };
ctx->time_base = ost->st->time_base;
ctx->gop_size = 1;
ctx->pix_fmt = AV_PIX_FMT_YUV420P;
//if neccesary, set stream headers and formats separately
if ( formatCtx->oformat->flags & AVFMT_GLOBALHEADER ) {
std::cout << "setting stream and headers to be separate" << std::endl;
ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
return 0;
}
/**
open the video for writing
@param formatCtx: the format context
@param codec: output codec
@param ost: output stream
@param optArg: dictionary
@return: -1 = error opening codec, -2 = allocate new frame, -3 = copy stream params
*/
int16_t MP4Writer::OpenVideo( AVFormatContext * formatCtx, AVCodec *codec, OutputStream * ost, AVDictionary * optArg )
{
int error;
AVCodecContext * ctx = ost->enc;
AVDictionary * dict = NULL;
av_dict_copy( &dict, optArg, 0 );
//open codec
error = avcodec_open2( ctx, codec, &dict );
av_dict_free( &dict );
if ( error < 0 ) {
std::cout << "there was an error opening the codec. aborting" << std::endl;
return -1;
}
//allocate new frame
ost->frame = AllocPicture( ctx->pix_fmt, ctx->width, ctx->height );
if ( ost->frame == NULL ) {
std::cout << "there was an error allocating a new frame. aborting" << std::endl;
return -2;
}
//copy steam params
error = avcodec_parameters_from_context( ost->st->codecpar, ctx );
if ( error < 0 ) {
std::cout << "could not copy stream parameters. aborting" << std::endl;
return -3;
}
return 0;
}
/**
allocate a new frame
@param pixFmt: ffmpeg enumerated pixel format
@param width: output width
@param height: output height
@returns: an inititalized frame
*/
AVFrame * MP4Writer::AllocPicture( enum AVPixelFormat pixFmt, int width, int height )
{
AVFrame * picture;
int error;
//allocate the frame
picture = av_frame_alloc();
if ( picture == NULL ) {
std::cout << "there was an error allocating the picture" << std::endl;
return NULL;
}
picture->format = pixFmt;
picture->width = width;
picture->height = height;
//allocate the frame's data buffer
error = av_frame_get_buffer( picture, 32 );
if ( error < 0 ) {
std::cout << "could not allocate frame data" << std::endl;
return NULL;
}
picture->pts = 0;
return picture;
}
/**
convert raw RGB buffer to YUV frame
@return: frame that contains image data
*/
AVFrame * MP4Writer::GetVideoFrame( uint8_t * imgData, OutputStream * ost, const int width, const int height )
{
int error;
AVCodecContext * ctx = ost->enc;
//prepare the frame
error = av_frame_make_writable( ost->frame );
if ( error < 0 ) {
std::cout << "could not make frame writeable" << std::endl;
return NULL;
}
//TODO set this context one time per run, or even better, one time at init
//convert RGB to YUV
struct SwsContext* fooContext = sws_getContext( width, height, AV_PIX_FMT_BGR24,
width, height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL );
int inLinesize[1] = { 3 * width }; // RGB stride
uint8_t * inData[1] = { imgData };
int sliceHeight = sws_scale( fooContext, inData, inLinesize, 0, height, ost->frame->data, ost->frame->linesize );
sws_freeContext( fooContext );
ost->frame->pts = ost->next_pts++;
//TODO does the frame need to be returned here as it is available at the class level?
return ost->frame;
}
/**
write frame to file
@param formatCtx: the output format context
@param timeBase: the framerate
@param stream: output stream
@param packet: data packet
@returns: see return values for av_interleaved_write_frame
*/
int MP4Writer::WriteFrame( AVFormatContext * formatCtx, const AVRational * timeBase, AVStream * stream, AVPacket * packet )
{
av_packet_rescale_ts( packet, *timeBase, stream->time_base );
packet->stream_index = stream->index;
//write compressed file to media file
return av_interleaved_write_frame( formatCtx, packet );
}
int16_t MP4Writer::Write( std::vector<imgdata> & imgData )
{
int16_t errorCount = 0;
int16_t retVal = 0;
bool countingUp = true;
size_t i = 0;
while ( true ) {
//don't show first frame again when counting back down
if ( !countingUp && i == 0 ) {
break;
}
uint8_t * pixels = imgData[i].GetBits( imgData[i].mp4Input );
AddFrame( pixels );
//handle inc/dec without repeating last frame
if ( countingUp ) {
if ( i == imgData.size() -1 ) {
countingUp = false;
i--;
}
else {
i++;
}
}
else {
i--;
}
}
Finalize();
return 0; //TODO return error code
}
/**
add another frame to output video
@param imgData: the raw image data
@returns -1 = error encoding video frame, -2 = error writing frame
*/
int16_t MP4Writer::AddFrame( uint8_t * imgData )
{
int error;
AVCodecContext * ctx;
AVFrame * frame;
int gotPacket = 0;
AVPacket pkt = { 0 };
ctx = _ost.enc;
av_init_packet( &pkt );
frame = GetVideoFrame( imgData, &_ost, _width, _height );
//encode the image
error = avcodec_encode_video2( ctx, &pkt, frame, &gotPacket );
if ( error < 0 ) {
std::cout << "there was an error encoding the video frame" << std::endl;
return -1;
}
//write the frame. NOTE: this doesn't kick in until the encoder has received a certain number of frames
if ( gotPacket ) {
error = WriteFrame( _formatCtx, &ctx->time_base, _ost.st, &pkt );
if ( error < 0 ) {
std::cout << "the video frame could not be written" << std::endl;
return -2;
}
}
return 0;
}
/**
finalize output video and cleanup
*/
int16_t MP4Writer::Finalize()
{
av_write_trailer( _formatCtx );
avcodec_free_context( &_ost.enc );
av_frame_free( &_ost.frame);
av_frame_free( &_ost.tmp_frame );
avio_closep( &_formatCtx->pb );
avformat_free_context( _formatCtx );
sws_freeContext( _ost.sws_ctx );
swr_free( &_ost.swr_ctx);
return 0;
}
</imgdata></algorithm>usage
#include
#include
#include <vector>
struct ImgData
{
unsigned int width;
unsigned int height;
std::string path;
FIBITMAP * mp4Input;
uint8_t * GetBits( FIBITMAP * bmp ) { return FreeImage_GetBits( bmp ); }
};
int main()
{
std::vector<imgdata> imgDataVec;
//load images and push to imgDataVec
MP4Writer mp4Writer;
mp4Writer.SetHeight( 1200 ); //assumes 1:1 aspect ratio
mp4Writer.Init();
mp4Writer.SetOutput( "test.mp4" );
mp4Writer.Write( imgDataVec );
}
</imgdata></vector>