
Recherche avancée
Autres articles (111)
-
Participer à sa traduction
10 avril 2011Vous pouvez nous aider à améliorer les locutions utilisées dans le logiciel ou à traduire celui-ci dans n’importe qu’elle nouvelle langue permettant sa diffusion à de nouvelles communautés linguistiques.
Pour ce faire, on utilise l’interface de traduction de SPIP où l’ensemble des modules de langue de MediaSPIP sont à disposition. ll vous suffit de vous inscrire sur la liste de discussion des traducteurs pour demander plus d’informations.
Actuellement MediaSPIP n’est disponible qu’en français et (...) -
Les autorisations surchargées par les plugins
27 avril 2010, parMediaspip core
autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs -
Les formats acceptés
28 janvier 2010, parLes commandes suivantes permettent d’avoir des informations sur les formats et codecs gérés par l’installation local de ffmpeg :
ffmpeg -codecs ffmpeg -formats
Les format videos acceptés en entrée
Cette liste est non exhaustive, elle met en exergue les principaux formats utilisés : h264 : H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 m4v : raw MPEG-4 video format flv : Flash Video (FLV) / Sorenson Spark / Sorenson H.263 Theora wmv :
Les formats vidéos de sortie possibles
Dans un premier temps on (...)
Sur d’autres sites (11360)
-
How can we achieve textAlign="center" within box in case of multiline text using ffmpeg drawtext ? [closed]
13 septembre 2024, par Ata MuhiUlDinWhen working with drawtext in ffmpeg, it allows to enable box to show behind the text, and also allows to assign some background color to it.


But when it comes to multiline text, let's say there are three lines as shown in picture as current result :




It never align text to center. But I want results like shown below :




This time am using following ffmpeg command to draw text :


-vf "drawtext=text='${text}': x=((w*${x})/${DIMENSIONS.width}): y=((h*${y})/${DIMENSIONS.height}):fontsize=36:fontcolor=yellow@0.9: box=1: boxcolor=black@0.6:boxborderw=20" -c:a copy


-
AVPacket->Data is empty "0/0" but has size
24 juin 2024, par CottonBudsI am using libAV* to encode frames(unsigned char*) from my streaming application. I encoded my initialized frames but when I tried to get the avpacket. it returns an avpacket with a size but without data inside it "0/0"


here is my code


StreamCodec.h



class StreamCodec : public QObject 
{
 Q_OBJECT
public:
 StreamCodec(int height, int width, int fps);

public slots:
 void encodeFrame(std::shared_ptr<uchar> pData);
 void run();

signals:
 void encodeFinish(AVPacket* packet);

private:
 void initializeSWS();
 void initializeCodec();

 AVPacket* allocatepacket(AVFrame* frame);
 AVFrame* allocateFrame(std::shared_ptr<uchar> pData);
 AVFrame* formatFrame(AVFrame* frame);

 const AVCodec* codec;
 AVCodecContext* context;
 SwsContext *swsContext;
 int bytesPerPixel;
 int width;
 int height;
 int fps;
 int pts = 0;
};

</uchar></uchar>


StreamCodec.cpp


StreamCodec::StreamCodec(int height, int width, int fps)
{
 this->height = height;
 this->width = width;
 this->fps = fps;
}

void StreamCodec::initializeCodec()
{
 codec = avcodec_find_encoder(AV_CODEC_ID_H264);
 if (!codec) {
 qDebug() << "Codec not found";
 exit(1);
 }
 
 context = avcodec_alloc_context3(codec);
 if (!context) {
 qDebug() << "Could not allocate codec context";
 exit(1);
 }

 context->height = height;
 context->width = width;
 context->time_base.num = 1;
 context->time_base.den = fps;
 context->framerate.num = fps;
 context->framerate.den = 1;
 context->pix_fmt = AV_PIX_FMT_YUV420P;

 context->gop_size = 0;

 av_opt_set(context->priv_data, "preset", "ultrafast", 0);
 av_opt_set(context->priv_data, "crf", "35", 0);
 av_opt_set(context->priv_data, "tune", "zerolatency", 0);

 auto desc = av_pix_fmt_desc_get(AV_PIX_FMT_BGRA);
 if (!desc){
 qDebug() << "Can't get descriptor for pixel format";
 exit(1);
 }
 bytesPerPixel = av_get_bits_per_pixel(desc) / 8;
 if(av_get_bits_per_pixel(desc) % 8 != 0){
 qDebug() << "Unhandled bits per pixel, bad in pix fmt";
 exit(1);
 }

 int err = avcodec_open2(context, codec, nullptr);
 if (err < 0) {
 qDebug() << "Could not open codec";
 exit(1);
 }
}
void StreamCodec::initializeSWS()
{
 swsContext = sws_getContext(width, height, AV_PIX_FMT_BGRA, width, height, AV_PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);
 if (!swsContext) {
 qDebug() << "Could not allocate SWS Context";
 exit(1);
 }
}

void StreamCodec::encodeFrame(std::shared_ptr<uchar> pData)
{
 int err = 0;
 AVFrame* frame1 = allocateFrame(pData);
 AVFrame* frame = formatFrame(frame1);

 err = avcodec_send_frame(context, frame);
 if (err < 0) {
 qDebug() << "Error sending frame to codec";
 char* errStr = new char;
 av_make_error_string(errStr, 255, err);
 qDebug() << errStr;
 av_frame_free(&frame);
 exit(1);
 }

 while (true) {
 AVPacket* packet = allocatepacket(frame);
 err = avcodec_receive_packet(context, packet);
 if (err == AVERROR_EOF || err == AVERROR(EAGAIN) ) {
 av_packet_unref(packet);
 av_packet_free(&packet);
 break;
 }
 if (err < 0) {
 qDebug() << "Error recieving to codec";
 char* errStr = new char;
 av_make_error_string(errStr, 255, err);
 qDebug() << errStr;
 av_frame_free(&frame);
 av_frame_free(&frame1);
 av_packet_free(&packet);
 exit(1);
 }
 emit encodeFinish(packet);
 }

 av_frame_free(&frame);
 av_frame_free(&frame1);
}

void StreamCodec::run()
{
 initializeCodec();
 initializeSWS();
}

AVPacket* StreamCodec::allocatepacket(AVFrame* frame)
{
 AVPacket* packet = av_packet_alloc();
 if (!packet) {
 qDebug() << "Could not allocate memory for packet";
 av_frame_free(&frame);
 exit(1);
 }
 return packet;
}

AVFrame* StreamCodec::allocateFrame(std::shared_ptr<uchar> pData)
{
 AVFrame* frame = av_frame_alloc();
 if (!frame) {
 qDebug() << "Could not allocate memory for frame";
 exit(1);
 }

 frame->format = AV_PIX_FMT_BGRA;
 frame->width = width;
 frame->height = height;
 frame->pts = pts;

 if (av_frame_get_buffer(frame, 0) < 0) {
 qDebug() << "Failed to get frame buffer";
 exit(1);
 }

 if (av_frame_make_writable(frame) < 0) {
 qDebug() << "Failed to make frame writable";
 exit(1);
 }

 frame->data[0] = pData.get();

 return frame;
}

AVFrame* StreamCodec::formatFrame(AVFrame* frame)
{
 AVFrame* yuvFrame = av_frame_alloc();
 if (!yuvFrame) {
 qDebug() << "Unable to allocate memory for yuv frame";
 av_frame_free(&frame);
 exit(1);
 }

 yuvFrame->format = context->pix_fmt;
 yuvFrame->width = width;
 yuvFrame->height = height;
 yuvFrame->pts = pts;
 pts += 1;
 
 if (av_frame_get_buffer(yuvFrame, 0) < 0) {
 qDebug() << "Failed to get frame buffer";
 exit(1);
 }

 if (av_frame_make_writable(yuvFrame) < 0) {
 qDebug() << "Failed to make frame writable";
 exit(1);
 }

 int err = sws_scale(swsContext, (const uint8_t* const*)frame->data, frame->linesize, 0, height, (uint8_t* const*)yuvFrame->data, yuvFrame->linesize);
 if (err < 0) {
 qDebug() << "Could not format frame to yuv420p";
 exit(1);
 }
 return yuvFrame;
}


</uchar></uchar>


I tried checking for the frames and I'm pretty sure the data is there. I just dont know what to do at this point.


edit 1


I tried viewing the data using visual studio code "view" button it showed me this




Thank you so much to all that commented and pointed me to the right direction.


-
avfilter/vf_scale : add optional "ref" input
24 avril 2024, par Niklas Haasavfilter/vf_scale : add optional "ref" input
This is automatically enabled if the width/height expressions reference
any ref_* variable. This will ultimately serve as a more principled
replacement for the fundamentally broken scale2ref.See-Also : https://trac.ffmpeg.org/ticket/10795