
Recherche avancée
Médias (1)
-
Bug de détection d’ogg
22 mars 2013, par
Mis à jour : Avril 2013
Langue : français
Type : Video
Autres articles (112)
-
Personnaliser en ajoutant son logo, sa bannière ou son image de fond
5 septembre 2013, parCertains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;
-
Personnaliser les catégories
21 juin 2013, parFormulaire de création d’une catégorie
Pour ceux qui connaissent bien SPIP, une catégorie peut être assimilée à une rubrique.
Dans le cas d’un document de type catégorie, les champs proposés par défaut sont : Texte
On peut modifier ce formulaire dans la partie :
Administration > Configuration des masques de formulaire.
Dans le cas d’un document de type média, les champs non affichés par défaut sont : Descriptif rapide
Par ailleurs, c’est dans cette partie configuration qu’on peut indiquer le (...) -
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir
Sur d’autres sites (10829)
-
FFmpeg "movflags" > "faststart" causes invalid MP4 file to be written
22 août 2016, par williamtroupI’m setting up the format layout for the video as follows :
AVOutputFormat* outputFormat = ffmpeg.av_guess_format(null, "output.mp4", null);
AVCodec* videoCodec = ffmpeg.avcodec_find_encoder(outputFormat->video_codec);
AVFormatContext* formatContext = ffmpeg.avformat_alloc_context();
formatContext->oformat = outputFormat;
formatContext->video_codec_id = videoCodec->id;
ffmpeg.avformat_new_stream(formatContext, videoCodec);This is how I am setting up the Codec Context :
AVCodecContext* codecContext = ffmpeg.avcodec_alloc_context3(videoCodec);
codecContext->bit_rate = 400000;
codecContext->width = 1280;
codecContext->height = 720;
codecContext->gop_size = 12;
codecContext->max_b_frames = 1;
codecContext->pix_fmt = videoCodec->pix_fmts[0];
codecContext->codec_id = videoCodec->id;
codecContext->codec_type = videoCodec->type;
codecContext->time_base = new AVRational
{
num = 1,
den = 30
};I’m using the following code to setup the "movflags" > "faststart" option for the header of the video :
AVDictionary* options = null;
int result = ffmpeg.av_dict_set(&options, "movflags", "faststart", 0);
int writeHeaderResult = ffmpeg.avformat_write_header(formatContext, &options);The file is opened and the header is written as follows :
if ((formatContext->oformat->flags & ffmpeg.AVFMT_NOFILE) == 0)
{
int ioOptionResult = ffmpeg.avio_open(&formatContext->pb, "output.mp4", ffmpeg.AVIO_FLAG_WRITE);
}
int writeHeaderResult = ffmpeg.avformat_write_header(formatContext, &options);After this, I write each video frame as follows :
outputFrame->pts = frameIndex;
packet.flags |= ffmpeg.AV_PKT_FLAG_KEY;
packet.pts = frameIndex;
packet.dts = frameIndex;
int encodedFrame = 0;
int encodeVideoResult = ffmpeg.avcodec_encode_video2(codecContext, &packet, outputFrame, &encodedFrame);
if (encodedFrame != 0)
{
packet.pts = ffmpeg.av_rescale_q(packet.pts, codecContext->time_base, m_videoStream->time_base);
packet.dts = ffmpeg.av_rescale_q(packet.dts, codecContext->time_base, m_videoStream->time_base);
packet.stream_index = m_videoStream->index;
if (codecContext->coded_frame->key_frame > 0)
{
packet.flags |= ffmpeg.AV_PKT_FLAG_KEY;
}
int writeFrameResult = ffmpeg.av_interleaved_write_frame(formatContext, &packet);
}After that, I write the trailer :
int writeTrailerResult = ffmpeg.av_write_trailer(formatContext);
The file finishes writing and everything closes and frees up correctly. However, the MP4 file is unplayable (even VLC cant play it). AtomicParsley.exe won’t show any information about the file either.
The DLLs used for the AutoGen library are :
avcodec-56.dll
avdevice-56.dll
avfilter-5.dll
avformat-56.dll
avutil-54.dll
postproc-53.dll
swresample-1.dll
swscale-3.dll -
ffmpeg : missing frames with mp4 encoding
6 juillet 2016, par SierraI’m currently developing a desktop app that generates videos from pictures (
QImage
to be more specific). I’m working with Qt 5.6 and the last build of ffmpeg (build git-0a9e781 (2016-06-10)).I encode severals
QImage
to create an .mp4 video. I already have an output but it seems that some frames are missing.Here is my code. I tried to be as clear as possible, removing comments and errors catching.
## INITIALIZATION
#####################################################################
AVOutputFormat * outputFormat = Q_NULLPTR;
AVFormatContext * formatContext = Q_NULLPTR;
// filePath: "C:/Users/.../qt_temp.Jv7868.mp4"
avformat_alloc_output_context2(&formatContext, NULL, NULL, filePath.data());
outputFormat = formatContext->oformat;
if (outputFormat->video_codec != AV_CODEC_ID_NONE) {
// Finding a registered encoder with a matching codec ID...
*codec = avcodec_find_encoder(outputFormat->video_codec);
// Adding a new stream to a media file...
stream = avformat_new_stream(formatContext, *codec);
stream->id = formatContext->nb_streams - 1;
AVCodecContext * codecContext = avcodec_alloc_context3(*codec);
switch ((*codec)->type) {
case AVMEDIA_TYPE_VIDEO:
codecContext->codec_id = outputFormat->video_codec;
codecContext->bit_rate = 400000;
codecContext->width = 1240;
codecContext->height = 874;
// Timebase: this is the fundamental unit of time (in seconds) in terms of which frame
// timestamps are represented. For fixed-fps content, timebase should be 1/framerate
// and timestamp increments should be identical to 1.
stream->time_base = (AVRational){1, 24};
codecContext->time_base = stream->time_base;
// Emit 1 intra frame every 12 frames at most
codecContext->gop_size = 12;
codecContext->pix_fmt = AV_PIX_FMT_YUV420P;
if (codecContext->codec_id == AV_CODEC_ID_H264) {
av_opt_set(codecContext->priv_data, "preset", "slow", 0);
}
break;
}
if (formatContext->oformat->flags & AVFMT_GLOBALHEADER) {
codecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
}
avcodec_open2(codecContext, codec, NULL);
// Allocating and initializing a re-usable frames...
frame = allocPicture(codecContext->width, codecContext->height, codecContext->pix_fmt);
tmpFrame = allocPicture(codecContext->width, codecContext->height, AV_PIX_FMT_BGRA);
avcodec_parameters_from_context(stream->codecpar, codecContext);
av_dump_format(formatContext, 0, filePath.data(), 1);
if (!(outputFormat->flags & AVFMT_NOFILE)) {
avio_open(&formatContext->pb, filePath.data(), AVIO_FLAG_WRITE);
}
// Writing the stream header, if any...
avformat_write_header(formatContext, NULL);
## RECEIVING A NEW FRAME
#####################################################################
// New QImage received: QImage image
const qint32 width = image.width();
const qint32 height = image.height();
// When we pass a frame to the encoder, it may keep a reference to it internally;
// make sure we do not overwrite it here!
av_frame_make_writable(tmpFrame);
for (qint32 y = 0; y < height(); y++) {
const uint8_t * scanline = image.scanLine(y);
for (qint32 x = 0; x < width() * 4; x++) {
tmpFrame->data[0][y * tmpFrame->linesize[0] + x] = scanline[x];
}
}
// As we only generate a BGRA picture, we must convert it to the
// codec pixel format if needed.
if (!swsCtx) {
swsCtx = sws_getContext(width, height,
AV_PIX_FMT_BGRA,
codecContext->width, codecContext->height,
codecContext->pix_fmt,
swsFlags, NULL, NULL, NULL);
}
sws_scale(swsCtx,
(const uint8_t * const *)tmpFrame->data,
tmpFrame->linesize,
0,
codecContext->height,
frame->data,
frame->linesize);
...
AVFrame * frame = Q_NULLPTR;
int gotPacket = 0;
av_init_packet(&packet);
// Packet data will be allocated by the encoder
this->packet.data = NULL;
this->packet.size = 0;
frame->pts = nextPts++; // nextPts starts to 0
avcodec_encode_video2(codecContext, &packet, frame, &gotPacket);
if (gotPacket) {
if (codecContext->coded_frame->key_frame) {
packet.flags |= AV_PKT_FLAG_KEY;
}
// Rescale output packet timestamp values from codec to stream timebase
av_packet_rescale_ts(packet, *codecContext->time_base, stream->time_base);
packet->stream_index = stream->index;
// Write the compressed frame to the media file.
av_interleaved_write_frame(formatContext, packet);
av_packet_unref(&this->packet);
}
## FINISHING ENCODING
#####################################################################
// Retrieving delayed frames if any...
for (int gotOutput = 1; gotOutput;) {
avcodec_encode_video2(codecContext, &packet, NULL, &gotOutput);
if (gotOutput) {
// Rescale output packet timestamp values from codec to stream timebase
av_packet_rescale_ts(packet, *codecContext->time_base, stream->time_base);
packet->stream_index = stream->index;
// Write the compressed frame to the media file.
av_interleaved_write_frame(formatContext, packet);
av_packet_unref(&packet);
}
}
av_write_trailer(formatContext);
avcodec_free_context(&codecContext);
av_frame_free(&frame);
av_frame_free(&tmpFrame);
sws_freeContext(swsCtx);
if (!(outputFormat->flags & AVFMT_NOFILE)) {
// Closing the output file...
avio_closep(&formatContext->pb);
}
avformat_free_context(formatContext);A part of the last second is always cut (e.g. when I send 48 frames, 24 fps, media players show 1,9 seconds of the video). I analyzed the video (48 frames, 24fps) with ffmpeg in command line, and I found something weird :
When I re encode the video with ffmpeg (in command line) to the same format, I get a more logical output :
From what I rode on different topics, I think it is closely connected to the h264 codec but I have no idea how to fix it. I’m not familiar with ffmpeg so any kind of help would be highly appreciated. Thank you.
EDIT 06/07/2016
Digging a little bit more in ffmpeg examples, I noticed these lines when closing the media file :uint8_t endcode[] = { 0, 0, 1, 0xb7 };
...
/* add sequence end code to have a real mpeg file */
fwrite(endcode, 1, sizeof(endcode), f);Is that sequence could be linked to my problem ? I’m trying to implement this to my code but, for now, it corrupts the media file. Any idea on how can I implement that line for my case ?
-
ffmpeg avcodec_encode_video2 hangs when using Quick Sync h264_qsv encoder
11 janvier 2017, par Mike SimpsonWhen I use the mpeg4 or h264 encoders, I am able to successfully encode images to make a valid AVI file using the API for ffmpeg 3.1.0. However, when I use the Quick Sync encoder (h264_qsv), avcodec_encode_video2 will hang some of the time. I found that when using images that are 1920x1080, it was rare that avcodec_encode_video2 would hang. When using 256x256 images, it was very likely that the function would hang.
I have created the test code below that demonstrates the hang of avcodec_encode_video2. The code will create a 1000 frame, 256x256 AVI with a bit rate of 400000. The frames are simply allocated, so the output video should just be green frames.
The problem was observed using Windows 7 and Windows 10, using the 32-bit or 64-bit test application.
If anyone has any idea on how I can avoid the avcodec_encode_video2 hang I would be very grateful ! Thanks in advance for any assistance.
extern "C"
{
#ifndef __STDC_CONSTANT_MACROS
#define __STDC_CONSTANT_MACROS
#endif
#include "avcodec.h"
#include "avformat.h"
#include "swscale.h"
#include "avutil.h"
#include "imgutils.h"
#include "opt.h"
#include
}
#include <iostream>
// Globals
AVCodec* m_pCodec = NULL;
AVStream *m_pStream = NULL;
AVOutputFormat* m_pFormat = NULL;
AVFormatContext* m_pFormatContext = NULL;
AVCodecContext* m_pCodecContext = NULL;
AVFrame* m_pFrame = NULL;
int m_frameIndex;
// Output format
AVPixelFormat m_pixType = AV_PIX_FMT_NV12;
// Use for mpeg4
//AVPixelFormat m_pixType = AV_PIX_FMT_YUV420P;
// Output frame rate
int m_frameRate = 30;
// Output image dimensions
int m_imageWidth = 256;
int m_imageHeight = 256;
// Number of frames to export
int m_frameCount = 1000;
// Output file name
const char* m_fileName = "c:/test/test.avi";
// Output file type
const char* m_fileType = "AVI";
// Codec name used to encode
const char* m_encoderName = "h264_qsv";
// use for mpeg4
//const char* m_encoderName = "mpeg4";
// Target bit rate
int m_targetBitRate = 400000;
void addVideoStream()
{
m_pStream = avformat_new_stream( m_pFormatContext, m_pCodec );
m_pStream->id = m_pFormatContext->nb_streams - 1;
m_pStream->time_base = m_pCodecContext->time_base;
m_pStream->codec->pix_fmt = m_pixType;
m_pStream->codec->flags = m_pCodecContext->flags;
m_pStream->codec->width = m_pCodecContext->width;
m_pStream->codec->height = m_pCodecContext->height;
m_pStream->codec->time_base = m_pCodecContext->time_base;
m_pStream->codec->bit_rate = m_pCodecContext->bit_rate;
}
AVFrame* allocatePicture( enum AVPixelFormat pix_fmt, int width, int height )
{
AVFrame *frame;
frame = av_frame_alloc();
if ( !frame )
{
return NULL;
}
frame->format = pix_fmt;
frame->width = width;
frame->height = height;
int checkImage = av_image_alloc( frame->data, frame->linesize, width, height, pix_fmt, 32 );
if ( checkImage < 0 )
{
return NULL;
}
return frame;
}
bool initialize()
{
AVRational frameRate;
frameRate.den = m_frameRate;
frameRate.num = 1;
av_register_all();
m_pCodec = avcodec_find_encoder_by_name(m_encoderName);
if( !m_pCodec )
{
return false;
}
m_pCodecContext = avcodec_alloc_context3( m_pCodec );
m_pCodecContext->width = m_imageWidth;
m_pCodecContext->height = m_imageHeight;
m_pCodecContext->time_base = frameRate;
m_pCodecContext->gop_size = 0;
m_pCodecContext->pix_fmt = m_pixType;
m_pCodecContext->codec_id = m_pCodec->id;
m_pCodecContext->bit_rate = m_targetBitRate;
av_opt_set( m_pCodecContext->priv_data, "+CBR", "", 0 );
return true;
}
bool startExport()
{
m_frameIndex = 0;
char fakeFileName[512];
int checkAllocContext = avformat_alloc_output_context2( &m_pFormatContext, NULL, m_fileType, fakeFileName );
if ( checkAllocContext < 0 )
{
return false;
}
if ( !m_pFormatContext )
{
return false;
}
m_pFormat = m_pFormatContext->oformat;
if ( m_pFormat->video_codec != AV_CODEC_ID_NONE )
{
addVideoStream();
int checkOpen = avcodec_open2( m_pCodecContext, m_pCodec, NULL );
if ( checkOpen < 0 )
{
return false;
}
m_pFrame = allocatePicture( m_pCodecContext->pix_fmt, m_pCodecContext->width, m_pCodecContext->height );
if( !m_pFrame )
{
return false;
}
m_pFrame->pts = 0;
}
int checkOpen = avio_open( &m_pFormatContext->pb, m_fileName, AVIO_FLAG_WRITE );
if ( checkOpen < 0 )
{
return false;
}
av_dict_set( &(m_pFormatContext->metadata), "title", "QS Test", 0 );
int checkHeader = avformat_write_header( m_pFormatContext, NULL );
if ( checkHeader < 0 )
{
return false;
}
return true;
}
int processFrame( AVPacket& avPacket )
{
avPacket.stream_index = 0;
avPacket.pts = av_rescale_q( m_pFrame->pts, m_pStream->codec->time_base, m_pStream->time_base );
avPacket.dts = av_rescale_q( m_pFrame->pts, m_pStream->codec->time_base, m_pStream->time_base );
m_pFrame->pts++;
int retVal = av_interleaved_write_frame( m_pFormatContext, &avPacket );
return retVal;
}
bool exportFrame()
{
int success = 1;
int result = 0;
AVPacket avPacket;
av_init_packet( &avPacket );
avPacket.data = NULL;
avPacket.size = 0;
fflush(stdout);
std::cout << "Before avcodec_encode_video2 for frame: " << m_frameIndex << std::endl;
success = avcodec_encode_video2( m_pCodecContext, &avPacket, m_pFrame, &result );
std::cout << "After avcodec_encode_video2 for frame: " << m_frameIndex << std::endl;
if( result )
{
success = processFrame( avPacket );
}
av_packet_unref( &avPacket );
m_frameIndex++;
return ( success == 0 );
}
void endExport()
{
int result = 0;
int success = 0;
if (m_pFrame)
{
while ( success == 0 )
{
AVPacket avPacket;
av_init_packet( &avPacket );
avPacket.data = NULL;
avPacket.size = 0;
fflush(stdout);
success = avcodec_encode_video2( m_pCodecContext, &avPacket, NULL, &result );
if( result )
{
success = processFrame( avPacket );
}
av_packet_unref( &avPacket );
if (!result)
{
break;
}
}
}
if (m_pFormatContext)
{
av_write_trailer( m_pFormatContext );
if( m_pFrame )
{
av_frame_free( &m_pFrame );
}
avio_closep( &m_pFormatContext->pb );
avformat_free_context( m_pFormatContext );
m_pFormatContext = NULL;
}
}
void cleanup()
{
if( m_pFrame || m_pCodecContext )
{
if( m_pFrame )
{
av_frame_free( &m_pFrame );
}
if( m_pCodecContext )
{
avcodec_close( m_pCodecContext );
av_free( m_pCodecContext );
}
}
}
int main()
{
bool success = true;
if (initialize())
{
if (startExport())
{
for (int loop = 0; loop < m_frameCount; loop++)
{
if (!exportFrame())
{
std::cout << "Failed to export frame\n";
success = false;
break;
}
}
endExport();
}
else
{
std::cout << "Failed to start export\n";
success = false;
}
cleanup();
}
else
{
std::cout << "Failed to initialize export\n";
success = false;
}
if (success)
{
std::cout << "Successfully exported file\n";
}
return 1;
}
</iostream>