
Recherche avancée
Médias (1)
-
The Great Big Beautiful Tomorrow
28 octobre 2011, par
Mis à jour : Octobre 2011
Langue : English
Type : Texte
Autres articles (111)
-
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir -
Script d’installation automatique de MediaSPIP
25 avril 2011, parAfin de palier aux difficultés d’installation dues principalement aux dépendances logicielles coté serveur, un script d’installation "tout en un" en bash a été créé afin de faciliter cette étape sur un serveur doté d’une distribution Linux compatible.
Vous devez bénéficier d’un accès SSH à votre serveur et d’un compte "root" afin de l’utiliser, ce qui permettra d’installer les dépendances. Contactez votre hébergeur si vous ne disposez pas de cela.
La documentation de l’utilisation du script d’installation (...) -
Ajouter des informations spécifiques aux utilisateurs et autres modifications de comportement liées aux auteurs
12 avril 2011, parLa manière la plus simple d’ajouter des informations aux auteurs est d’installer le plugin Inscription3. Il permet également de modifier certains comportements liés aux utilisateurs (référez-vous à sa documentation pour plus d’informations).
Il est également possible d’ajouter des champs aux auteurs en installant les plugins champs extras 2 et Interface pour champs extras.
Sur d’autres sites (11278)
-
FFMPEG libav gdigrab capturing with wrong colors
7 mars 2018, par user1496491I’m capturing screen with code below, and it gets me the picture with wrong colors.
The picture on left is raw data which I assumed in ARGB the picture in right is encoded as YUV. I’ve tried different formats, the pictures slighly changing, but it’s never looks ow it should be. In what format gdigrab gives its output ? What’s the right way to encode it ?
#include "MainWindow.h"
#include <qguiapplication>
#include <qlabel>
#include <qscreen>
#include <qtimer>
#include <qlayout>
#include <qimage>
#include <qtconcurrent></qtconcurrent>QtConcurrent>
#include <qthreadpool>
#include "ScreenCapture.h"
MainWindow::MainWindow(QWidget *parent) : QMainWindow(parent)
{
resize(800, 600);
label = new QLabel();
label->setAlignment(Qt::AlignHCenter | Qt::AlignVCenter);
auto layout = new QHBoxLayout();
layout->addWidget(label);
auto widget = new QWidget();
widget->setLayout(layout);
setCentralWidget(widget);
init();
initOutFile();
collectFrame();
}
MainWindow::~MainWindow()
{
avformat_close_input(&inputFormatContext);
avformat_free_context(inputFormatContext);
QThreadPool::globalInstance()->waitForDone();
}
void MainWindow::init()
{
av_register_all();
avcodec_register_all();
avdevice_register_all();
auto screen = QGuiApplication::screens()[1];
QRect geometry = screen->geometry();
inputFormatContext = avformat_alloc_context();
AVDictionary* options = NULL;
av_dict_set(&options, "framerate", "30", NULL);
av_dict_set(&options, "offset_x", QString::number(geometry.x()).toLatin1().data(), NULL);
av_dict_set(&options, "offset_y", QString::number(geometry.y()).toLatin1().data(), NULL);
av_dict_set(&options, "preset", "ultrafast", NULL);
av_dict_set(&options, "probesize", "10MB", NULL);
av_dict_set(&options, "pix_fmt", "yuv420p", NULL);
av_dict_set(&options, "video_size", QString(QString::number(geometry.width()) + "x" + QString::number(geometry.height())).toLatin1().data(), NULL);
AVInputFormat* inputFormat = av_find_input_format("gdigrab");
avformat_open_input(&inputFormatContext, "desktop", inputFormat, &options);
// AVDictionary* options = NULL;
// av_dict_set(&options, "framerate", "30", NULL);
// av_dict_set(&options, "preset", "ultrafast", NULL);
// av_dict_set(&options, "vcodec", "h264", NULL);
// av_dict_set(&options, "s", "1280x720", NULL);
// av_dict_set(&options, "crf", "0", NULL);
// av_dict_set(&options, "rtbufsize", "100M", NULL);
// AVInputFormat *format = av_find_input_format("dshow");
// avformat_open_input(&inputFormatContext, "video=screen-capture-recorder", format, &options);
av_dict_free(&options);
avformat_find_stream_info(inputFormatContext, NULL);
videoStreamIndex = av_find_best_stream(inputFormatContext, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
inputCodec = avcodec_find_decoder(inputFormatContext->streams[videoStreamIndex]->codecpar->codec_id);
if(!inputCodec) qDebug() << "Не найден кодек входящего потока!";
inputCodecContext = avcodec_alloc_context3(inputCodec);
inputCodecContext->codec_id = inputCodec->id;
avcodec_parameters_to_context(inputCodecContext, inputFormatContext->streams[videoStreamIndex]->codecpar);
if(avcodec_open2(inputCodecContext, inputCodec, NULL)) qDebug() << "Не удалось открыть входной кодек!";
}
void MainWindow::initOutFile()
{
const char* filename = "C:/Temp/output.mp4";
if(avformat_alloc_output_context2(&outFormatContext, NULL, NULL, filename) < 0) qDebug() << "Не удалось создать выходной контекст!";
outCodec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);
if(!outCodec) qDebug() << "Не удалось найти кодек!";
videoStream = avformat_new_stream(outFormatContext, outCodec);
videoStream->time_base = {1, 30};
const AVPixelFormat* pixelFormat = outCodec->pix_fmts;
while (*pixelFormat != AV_PIX_FMT_NONE)
{
qDebug() << "OUT_FORMAT" << av_get_pix_fmt_name(*pixelFormat);
++pixelFormat;
}
outCodecContext = videoStream->codec;
outCodecContext->bit_rate = 400000;
outCodecContext->width = inputCodecContext->width;
outCodecContext->height = inputCodecContext->height;
outCodecContext->time_base = videoStream->time_base;
outCodecContext->gop_size = 10;
outCodecContext->max_b_frames = 1;
outCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
if (outFormatContext->oformat->flags & AVFMT_GLOBALHEADER) outCodecContext->flags |= CODEC_FLAG_GLOBAL_HEADER;
if(avcodec_open2(outCodecContext, outCodec, NULL)) qDebug() << "Не удалось открыть выходной кодек!";
swsContext = sws_getContext(inputCodecContext->width,
inputCodecContext->height,
// inputCodecContext->pix_fmt,
AV_PIX_FMT_ABGR,
outCodecContext->width,
outCodecContext->height,
outCodecContext->pix_fmt,
SWS_BICUBIC, NULL, NULL, NULL);
if(avio_open(&outFormatContext->pb, filename, AVIO_FLAG_WRITE) < 0) qDebug() << "Не удалось открыть файл!";
if(avformat_write_header(outFormatContext, NULL) < 0) qDebug() << "Не удалось записать заголовок!";
}
void MainWindow::collectFrame()
{
AVFrame* inFrame = av_frame_alloc();
inFrame->format = inputCodecContext->pix_fmt;
inFrame->width = inputCodecContext->width;
inFrame->height = inputCodecContext->height;
int size = av_image_alloc(inFrame->data, inFrame->linesize, inFrame->width, inFrame->height, inputCodecContext->pix_fmt, 1);
qDebug() << size;
AVFrame* outFrame = av_frame_alloc();
outFrame->format = outCodecContext->pix_fmt;
outFrame->width = outCodecContext->width;
outFrame->height = outCodecContext->height;
qDebug() << av_image_alloc(outFrame->data, outFrame->linesize, outFrame->width, outFrame->height, outCodecContext->pix_fmt, 1);
AVPacket packet;
av_init_packet(&packet);
av_read_frame(inputFormatContext, &packet);
// while(av_read_frame(inputFormatContext, &packet) >= 0)
// {
if(packet.stream_index == videoStream->index)
{
memcpy(inFrame->data[0], packet.data, size);
sws_scale(swsContext, inFrame->data, inFrame->linesize, 0, inputCodecContext->height, outFrame->data, outFrame->linesize);
QImage image(inFrame->data[0], inFrame->width, inFrame->height, QImage::Format_ARGB32);
label->setPixmap(QPixmap::fromImage(image).scaled(label->size(), Qt::KeepAspectRatio));
AVPacket outPacket;
av_init_packet(&outPacket);
int encodeResult = avcodec_receive_packet(outCodecContext, &outPacket);
while(encodeResult == AVERROR(EAGAIN))
{
if(avcodec_send_frame(outCodecContext, outFrame)) qDebug() << "Ошибка отправки фрейма на кодирование!";
encodeResult = avcodec_receive_packet(outCodecContext, &outPacket);
}
if(encodeResult != 0) qDebug() << "Ошибка во время кодирования!" << encodeResult;
if(outPacket.pts != AV_NOPTS_VALUE) outPacket.pts = av_rescale_q(outPacket.pts, videoStream->codec->time_base, videoStream->time_base);
if(outPacket.dts != AV_NOPTS_VALUE) outPacket.dts = av_rescale_q(outPacket.dts, videoStream->codec->time_base, videoStream->time_base);
av_write_frame(outFormatContext, &outPacket);
av_packet_unref(&outPacket);
}
// }
av_packet_unref(&packet);
av_write_trailer(outFormatContext);
avio_close(outFormatContext->pb);
}
</qthreadpool></qimage></qlayout></qtimer></qscreen></qlabel></qguiapplication> -
swscaler bad src image pointers
7 mars 2018, par user1496491I’m completely lost. I’m trying to capture 30 screenshots and put them into a video with
FFMPEG
under Windows 10. And it keeps telling me that[swscaler @ 073890a0] bad src image pointers
. As a result the video is entirely green. If I change format todshow
usingvideo=screen-capture-recorder
the video looks to be mostly garbage. Here’s my short code for that. I’m completely stuck and don’t know even in which direction to look.MainWindow.h
#ifndef MAINWINDOW_H
#define MAINWINDOW_H
#include <qmainwindow>
#include <qfuture>
#include <qfuturewatcher>
#include <qmutex>
#include <qmutexlocker>
extern "C" {
#include "libavcodec/avcodec.h"
#include "libavcodec/avfft.h"
#include "libavdevice/avdevice.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/avfiltergraph.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#include "libavformat/avformat.h"
#include "libavformat/avio.h"
#include "libavutil/opt.h"
#include "libavutil/common.h"
#include "libavutil/channel_layout.h"
#include "libavutil/imgutils.h"
#include "libavutil/mathematics.h"
#include "libavutil/samplefmt.h"
#include "libavutil/time.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/file.h"
#include "libswscale/swscale.h"
}
class MainWindow : public QMainWindow
{
Q_OBJECT
public:
MainWindow(QWidget *parent = 0);
~MainWindow();
private:
AVFormatContext *inputFormatContext = nullptr;
AVFormatContext *outFormatContext = nullptr;
AVStream* videoStream = nullptr;
AVDictionary* options = nullptr;
AVCodec* outCodec = nullptr;
AVCodec* inputCodec = nullptr;
AVCodecContext* inputCodecContext = nullptr;
AVCodecContext* outCodecContext = nullptr;
SwsContext* swsContext = nullptr;
private:
void init();
void initOutFile();
void collectFrame();
};
#endif // MAINWINDOW_H
</qmutexlocker></qmutex></qfuturewatcher></qfuture></qmainwindow>MainWindow.cpp
#include "MainWindow.h"
#include <qguiapplication>
#include <qlabel>
#include <qscreen>
#include <qtimer>
#include <qlayout>
#include <qimage>
#include <qtconcurrent></qtconcurrent>QtConcurrent>
#include <qthreadpool>
#include "ScreenCapture.h"
MainWindow::MainWindow(QWidget *parent) : QMainWindow(parent)
{
resize(800, 600);
auto label = new QLabel();
label->setAlignment(Qt::AlignHCenter | Qt::AlignVCenter);
auto layout = new QHBoxLayout();
layout->addWidget(label);
auto widget = new QWidget();
widget->setLayout(layout);
setCentralWidget(widget);
init();
initOutFile();
collectFrame();
}
MainWindow::~MainWindow()
{
avformat_close_input(&inputFormatContext);
avformat_free_context(inputFormatContext);
QThreadPool::globalInstance()->waitForDone();
}
void MainWindow::init()
{
av_register_all();
avcodec_register_all();
avdevice_register_all();
avformat_network_init();
auto screen = QGuiApplication::screens()[0];
QRect geometry = screen->geometry();
inputFormatContext = avformat_alloc_context();
options = NULL;
av_dict_set(&options, "framerate", "30", NULL);
av_dict_set(&options, "offset_x", QString::number(geometry.x()).toLatin1().data(), NULL);
av_dict_set(&options, "offset_y", QString::number(geometry.y()).toLatin1().data(), NULL);
av_dict_set(&options, "video_size", QString(QString::number(geometry.width()) + "x" + QString::number(geometry.height())).toLatin1().data(), NULL);
av_dict_set(&options, "show_region", "1", NULL);
AVInputFormat* inputFormat = av_find_input_format("gdigrab");
avformat_open_input(&inputFormatContext, "desktop", inputFormat, &options);
int videoStreamIndex = av_find_best_stream(inputFormatContext, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
inputCodecContext = inputFormatContext->streams[videoStreamIndex]->codec;
inputCodecContext->width = geometry.width();
inputCodecContext->height = geometry.height();
inputCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
inputCodec = avcodec_find_decoder(inputCodecContext->codec_id);
avcodec_open2(inputCodecContext, inputCodec, NULL);
}
void MainWindow::initOutFile()
{
const char* filename = "C:/Temp/output.mp4";
avformat_alloc_output_context2(&outFormatContext, NULL, NULL, filename);
outCodec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);
videoStream = avformat_new_stream(outFormatContext, outCodec);
videoStream->time_base = {1, 30};
outCodecContext = videoStream->codec;
outCodecContext->codec_id = AV_CODEC_ID_MPEG4;
outCodecContext->codec_type = AVMEDIA_TYPE_VIDEO;
outCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
outCodecContext->bit_rate = 400000;
outCodecContext->width = inputCodecContext->width;
outCodecContext->height = inputCodecContext->height;
outCodecContext->gop_size = 3;
outCodecContext->max_b_frames = 2;
outCodecContext->time_base = videoStream->time_base;
if (outFormatContext->oformat->flags & AVFMT_GLOBALHEADER)
outCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
avcodec_open2(outCodecContext, outCodec, NULL);
if (!(outFormatContext->flags & AVFMT_NOFILE))
avio_open2(&outFormatContext->pb, filename, AVIO_FLAG_WRITE, NULL, NULL);
swsContext = sws_getContext(inputCodecContext->width,
inputCodecContext->height,
inputCodecContext->pix_fmt,
outCodecContext->width,
outCodecContext->height,
outCodecContext->pix_fmt,
SWS_BICUBIC, NULL, NULL, NULL);
avformat_write_header(outFormatContext, &options);
}
void MainWindow::collectFrame()
{
AVFrame* frame = av_frame_alloc();
frame->data[0] = NULL;
frame->width = inputCodecContext->width;
frame->height = inputCodecContext->height;
frame->format = inputCodecContext->pix_fmt;
av_image_alloc(frame->data, frame->linesize, inputCodecContext->width, inputCodecContext->height, (AVPixelFormat)frame->format, 32);
AVFrame* outFrame = av_frame_alloc();
outFrame->data[0] = NULL;
outFrame->width = outCodecContext->width;
outFrame->height = outCodecContext->height;
outFrame->format = outCodecContext->pix_fmt;
av_image_alloc(outFrame->data, outFrame->linesize, outCodecContext->width, outCodecContext->height, (AVPixelFormat)outFrame->format, 32);
int bufferSize = av_image_get_buffer_size(outCodecContext->pix_fmt,
outCodecContext->width,
outCodecContext->height,
24);
uint8_t* outBuffer = (uint8_t*)av_malloc(bufferSize);
avpicture_fill((AVPicture*)outFrame, outBuffer,
AV_PIX_FMT_YUV420P,
outCodecContext->width, outCodecContext->height);
int frameCount = 30;
int count = 0;
AVPacket* packet = (AVPacket*)av_malloc(sizeof(AVPacket));
av_init_packet(packet);
while(av_read_frame(inputFormatContext, packet) >= 0)
{
if(packet->stream_index == videoStream->index)
{
int frameFinished = 0;
avcodec_decode_video2(inputCodecContext, frame, &frameFinished, packet);
if(frameFinished)
{
if(++count > frameCount)
{
qDebug() << "FINISHED!";
break;
}
sws_scale(swsContext, frame->data, frame->linesize, 0, inputCodecContext->height, outFrame->data, outFrame->linesize);
AVPacket outPacket;
av_init_packet(&outPacket);
outPacket.data = NULL;
outPacket.size = 0;
int got_picture = 0;
avcodec_encode_video2(outCodecContext, &outPacket, outFrame, &got_picture);
if(got_picture)
{
if(outPacket.pts != AV_NOPTS_VALUE) outPacket.pts = av_rescale_q(outPacket.pts, videoStream->codec->time_base, videoStream->time_base);
if(outPacket.dts != AV_NOPTS_VALUE) outPacket.dts = av_rescale_q(outPacket.dts, videoStream->codec->time_base, videoStream->time_base);
av_write_frame(outFormatContext , &outPacket);
}
av_packet_unref(&outPacket);
}
}
}
av_write_trailer(outFormatContext);
av_free(outBuffer);
}
</qthreadpool></qimage></qlayout></qtimer></qscreen></qlabel></qguiapplication> -
avcodec_receive_packet() doesn't see the output
1er mars 2018, par Eugene AlexeevI’m trying to create a converter which will make a video out of set of images. Everything is at its place,
AVFormatContext
,AVCodecContext
,AVCodec
. I’m creating YUV AVFrame out of UIImage and send it to encoder byavcodec_send_frame()
method. Everything goes fine until I’m trying to getAVPacket
with methodavcodec_receive_packet()
. Every time it returns -53 which means -output is not available in the current state - user must try to send input
. As I said, I’m sending an input before I’m trying to get something and sending is successful.Here’s my code :
Init ffmpeg entities :
- (BOOL)setupForConvert:(DummyFVPVideoFile *)videoFile outputPath:(NSString *)path
{
if (!videoFile) {
[self.delegate convertationFailed:@"VideoFile is nil!"];
return NO;
}
currentVideoFile = videoFile;
outputPath = path;
BOOL success = NO;
success = [self initFormatCtxAndCodecs:path];
if (!success) {
return NO;
}
success = [self addCameraStreams:videoFile];
if (!success) {
return NO;
}
success = [self openIOContext:path];
if (!success) {
return NO;
}
return YES;
}
- (BOOL)initFormatCtxAndCodecs:(NSString *)path
{
//AVOutputFormat *fmt = av_guess_format("mp4", NULL, NULL);
int ret = avformat_alloc_output_context2(&pFormatCtx, NULL, NULL, [path UTF8String]);
if (ret < 0) {
NSLog(@"Couldn't create output context");
return NO;
}
//encoder codec init
pCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!pCodec) {
NSLog(@"Couldn't find a encoder codec!");
return NO;
}
pCodecCtx = avcodec_alloc_context3(pCodec);
if (!pCodecCtx) {
NSLog(@"Couldn't alloc encoder codec context!");
return NO;
}
pCodecCtx->codec_tag = AV_CODEC_ID_H264;
pCodecCtx->bit_rate = 400000;
pCodecCtx->width = currentVideoFile.size.width;
pCodecCtx->height = currentVideoFile.size.height;
pCodecCtx->time_base = (AVRational){1, (int)currentVideoFile.framerate};
pCodecCtx->framerate = (AVRational){(int)currentVideoFile.framerate, 1};
pCodecCtx->gop_size = 10;
pCodecCtx->max_b_frames = 1;
pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
NSLog(@"Couldn't open the encoder codec!");
return NO;
}
pPacket = av_packet_alloc();
return YES;
}
- (BOOL)addCameraStreams:(DummyFVPVideoFile *)videoFile
{
AVCodecParameters *params = avcodec_parameters_alloc();
if (!params) {
NSLog(@"Couldn't allocate codec parameters!");
return NO;
}
if (avcodec_parameters_from_context(params, pCodecCtx) < 0) {
NSLog(@"Couldn't copy parameters from context!");
return NO;
}
for (int i = 0; i < videoFile.idCameras.count - 1; i++)
{
NSString *path = [videoFile.url URLByAppendingPathComponent:videoFile.idCameras[i]].path;
AVStream *stream = avformat_new_stream(pFormatCtx, pCodec);
if (!stream) {
NSLog(@"Couldn't alloc stream!");
return NO;
}
if (avcodec_parameters_copy(stream->codecpar, params) < 0) {
NSLog(@"Couldn't copy parameters into stream!");
return NO;
}
stream->avg_frame_rate.num = videoFile.framerate;
stream->avg_frame_rate.den = 1;
stream->codecpar->codec_tag = 0; //some silly workaround
stream->index = i;
streams[path] = [[VideoStream alloc] initWithStream:stream];
}
return YES;
}
- (BOOL)openIOContext:(NSString *)path
{
AVIOContext *ioCtx = nil;
if (avio_open(&ioCtx, [path UTF8String], AVIO_FLAG_WRITE) < 0) {
return NO;
}
pFormatCtx->pb = ioCtx;
return YES;
}And here’s convertation process :
- (void)launchConvert:(DummyFVPVideoFile *)videoFile
{
BOOL convertInProgress = YES;
unsigned int frameCount = 1;
unsigned long pts = 0;
BOOL success = NO;
success = [self writeHeader];
if (!success) {
NSLog(@"Couldn't write header!");
return;
}
AVRational defaultTimeBase;
defaultTimeBase.num = 1;
defaultTimeBase.den = videoFile.framerate;
AVRational streamTimeBase = streams.allValues.firstObject.stream->time_base;
while (convertInProgress)
{
pts += av_rescale_q(1, defaultTimeBase, streamTimeBase);
for (NSString *path in streams.allKeys)
{
UIImage *img = [UIImage imageWithContentsOfFile:[NSString stringWithFormat:@"%@/%u.jpg", path, frameCount]];
AVPacket *pkt = [self getAVPacket:img withPts:pts];
if (!pkt->data) { continue; }
pkt->stream_index = streams[path].stream->index;
//check all settings of pkt
if (![self writePacket:pkt]) {
NSLog(@"Couldn't write packet!");
convertInProgress = NO;
break;
}
}
frameCount++;
}
success = [self writeTrailer];
if (!success) {
NSLog(@"Couldn't write trailer!");
return;
}
NSLog(@"Convertation finished!");
//delegate convertationFinished method
}
- (BOOL)writeHeader
{
if (avformat_write_header(pFormatCtx, NULL) < 0) {
return NO;
}
return YES;
}
- (BOOL)writePacket:(AVPacket *)pkt
{
if (av_interleaved_write_frame(pFormatCtx, pkt) != 0) {
return NO;
}
return YES;
}
- (BOOL)writeTrailer
{
if (av_write_trailer(pFormatCtx) != 0) {
return NO;
}
return YES;
}
/**
This method will create AVPacket out of UIImage.
@return AVPacket
*/
- (AVPacket *)getAVPacket:(UIImage *)img withPts:(unsigned long)pts
{
if (!img) {
NSLog(@"imgData is nil!");
return nil;
}
uint8_t *imgData = [self getPixelDataFromImage:img];
AVFrame *frame_yuv = av_frame_alloc();
if (!frame_yuv) {
NSLog(@"frame_yuv is nil!");
return nil;
}
frame_yuv->format = AV_PIX_FMT_YUV420P;
frame_yuv->width = (int)img.size.width;
frame_yuv->height = (int)img.size.height;
int ret = av_image_alloc(frame_yuv->data,
frame_yuv->linesize,
frame_yuv->width,
frame_yuv->height,
frame_yuv->format,
32);
if (ret < 0) {
NSLog(@"Couldn't alloc yuv frame!");
return nil;
}
struct SwsContext *sws_ctx = nil;
sws_ctx = sws_getContext((int)img.size.width, (int)img.size.height, AV_PIX_FMT_RGB24,
(int)img.size.width, (int)img.size.height, AV_PIX_FMT_YUV420P,
0, NULL, NULL, NULL);
const uint8_t *scaleData[1] = { imgData };
int inLineSize[1] = { 4 * img.size.width };
sws_scale(sws_ctx, scaleData, inLineSize, 0, (int)img.size.height, frame_yuv->data, frame_yuv->linesize);
frame_yuv->pict_type = AV_PICTURE_TYPE_I;
frame_yuv->pts = pCodecCtx->frame_number;
ret = avcodec_send_frame(pCodecCtx, frame_yuv); //every time everything is fine
if (ret != 0) {
NSLog(@"Couldn't send yuv frame!");
return nil;
}
av_init_packet(pPacket);
pPacket->dts = pPacket->pts = pts;
do {
ret = avcodec_receive_packet(pCodecCtx, pPacket); //every time -35 error
NSLog(@"ret = %d", ret);
if (ret == AVERROR_EOF) {
NSLog(@"AVERROR_EOF!");
} else if (ret == AVERROR(EAGAIN)) {
NSLog(@"AVERROR(EAGAIN)");
} else if (ret == AVERROR(EINVAL)) {
NSLog(@"AVERROR(EINVAL)");
}
if (ret != 0) {
NSLog(@"Couldn't receive packet!");
//return nil;
}
} while ( ret == 0 );
free(imgData);
av_packet_unref(pPacket);
av_packet_free(pPacket);
av_frame_unref(&frame_yuv);
av_frame_free(&frame_yuv);
//perform other clean up and test dat shit
return pPacket;
}Any insights would be helpful. Thanks !