
Recherche avancée
Médias (1)
-
Video d’abeille en portrait
14 mai 2011, par
Mis à jour : Février 2012
Langue : français
Type : Video
Autres articles (106)
-
Les autorisations surchargées par les plugins
27 avril 2010, parMediaspip core
autoriser_auteur_modifier() afin que les visiteurs soient capables de modifier leurs informations sur la page d’auteurs -
Problèmes fréquents
10 mars 2010, parPHP et safe_mode activé
Une des principales sources de problèmes relève de la configuration de PHP et notamment de l’activation du safe_mode
La solution consiterait à soit désactiver le safe_mode soit placer le script dans un répertoire accessible par apache pour le site -
Qu’est ce qu’un éditorial
21 juin 2013, parEcrivez votre de point de vue dans un article. Celui-ci sera rangé dans une rubrique prévue à cet effet.
Un éditorial est un article de type texte uniquement. Il a pour objectif de ranger les points de vue dans une rubrique dédiée. Un seul éditorial est placé à la une en page d’accueil. Pour consulter les précédents, consultez la rubrique dédiée.
Vous pouvez personnaliser le formulaire de création d’un éditorial.
Formulaire de création d’un éditorial Dans le cas d’un document de type éditorial, les (...)
Sur d’autres sites (13414)
-
FFMpeg gpl (ffmpeg-4.2.1-win64-dev_and_shared) version give different decode result (cmd query vs code)
31 mai 2021, par Aleksey Timoshchenko*all the source img I put on my google drive due to SO restrictions (just click on links provided in the text)


The problem is that for
.h264
I use two implementations (cmd query and code) (depends on the tasks) that give me different results and I don't see any reason for this.

Before all I would like to give an explanation, I have
.bmp
bayer image, then I do debayering and compress it to.h264
(compress .h264) with the script

ffmpeg -y -hide_banner -i orig_bayer.bmp -vf format=gray -f rawvideo pipe: | ffmpeg -hide_banner -y -framerate 30 -f rawvideo -pixel_format bayer_rggb8 -video_size 4096x3000 -i pipe: -c:v hevc_nvenc -qp 0 -pix_fmt yuv444p res.h264




Then the first cmd query implementation of decoding (image result)


ffmpeg -y -i res.h264 -vframes 1 -f image2 gpl_script_res.bmp -hide_banner




The second implementation is code one and takes more lines (image result here)


Init ffmpeg


bool FFmpegDecoder::InitFFmpeg()
{
 m_pAVPkt = av_packet_alloc();
 m_pAVFrame = av_frame_alloc();
 m_pAVFrameRGB = av_frame_alloc();

 m_pAVFormatCtx = avformat_alloc_context();
 m_pIoCtx->initAVFormatContext(m_pAVFormatCtx);

 if (avformat_open_input(&m_pAVFormatCtx, "", nullptr, nullptr) != 0)
 {
 printf("FFmpegDecoder::InitFFmpeg: error in avformat_open_input\n");
 return false;
 }

 if (avformat_find_stream_info(m_pAVFormatCtx, nullptr) < 0)
 {
 printf("FFmpegDecoder::InitFFmpeg: error in avformat_find_stream_info\n");
 return false;
 }

 //av_dump_format(ctx_format, 0, "", false);
 for (int i = 0; i < (int)m_pAVFormatCtx->nb_streams; i++)
 {
 if (m_pAVFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
 {
 m_streamIdx = i;
 m_pAVStream = m_pAVFormatCtx->streams[i];
 break;
 }
 }
 if (m_pAVStream == nullptr)
 {
 printf("FFmpegDecoder::InitFFmpeg: failed to find video stream\n");
 return false;
 }

 m_pAVCodec = avcodec_find_decoder(m_pAVStream->codecpar->codec_id);
 if (!m_pAVCodec)
 {
 printf("FFmpegDecoder::InitFFmpeg: error in avcodec_find_decoder\n");
 return false;
 }

 m_pAVCodecCtx = avcodec_alloc_context3(m_pAVCodec);
 if (!m_pAVCodecCtx)
 {
 printf("FFmpegDecoder::InitFFmpeg: error in avcodec_alloc_context3\n");
 return false;
 }

 if (avcodec_parameters_to_context(m_pAVCodecCtx, m_pAVStream->codecpar) < 0)
 {
 printf("FFmpegDecoder::InitFFmpeg: error in avcodec_parameters_to_context\n");
 return false;
 }

 if (avcodec_open2(m_pAVCodecCtx, m_pAVCodec, nullptr) < 0)
 {
 printf("FFmpegDecoder::InitFFmpeg: error in avcodec_open2\n");
 return false;
 }

 m_pAVFrameRGB->format = AV_PIX_FMT_BGR24;
 m_pAVFrameRGB->width = m_pAVCodecCtx->width;
 m_pAVFrameRGB->height = m_pAVCodecCtx->height;
 if (av_frame_get_buffer(m_pAVFrameRGB, 32) != 0)
 {
 printf("FFmpegDecoder::InitFFmpeg: error in av_frame_get_buffer\n");
 return false;
 }

 m_streamRotationDegrees = GetAVStreamRotation(m_pAVStream);
 m_estimatedFramesCount = 0;
 assert(m_pAVFormatCtx->nb_streams > 0);
 if (m_pAVFormatCtx->nb_streams > 0)
 {
 m_estimatedFramesCount = m_pAVFormatCtx->streams[0]->nb_frames;
 }

 return InitConvertColorSpace(); 
}

bool FFmpegDecoder::InitConvertColorSpace()
{
 // Init converter from YUV420p to BGR:
 m_pSwsCtxConvertImg = sws_getContext(m_pAVCodecCtx->width, m_pAVCodecCtx->height, m_pAVCodecCtx->pix_fmt, m_pAVCodecCtx->width, m_pAVCodecCtx->height, AV_PIX_FMT_RGB24, SWS_FAST_BILINEAR, NULL, NULL, NULL);
 if (!m_pSwsCtxConvertImg)
 {
 printf("FFmpegDecoder::InitFFmpeg: error in sws_getContext\n");
 return false;
 }
 return true;
}



Decoding impl


bool FFmpegDecoder::DecodeContinue(int firstFrameIdx, int maxNumFrames)
{
 if (firstFrameIdx == FIRST_FRAME_IDX_BEGINNING)
 {
 firstFrameIdx = 0;
 }

 auto lastReportedFrameIdxTillNow = GetLastReportedFrameIdx();

 if (GetLastDecodedFrameIdx() >= 0 && firstFrameIdx <= GetLastDecodedFrameIdx())
 {
 printf("FFmpegDecoder::DecodeContinue FAILED: firstFrameIdx (%d) already passed decoded (Last decoded idx: %d)\n", firstFrameIdx, GetLastDecodedFrameIdx());
 return false;
 }

 bool bRes;
 int nRet = 0;
 bool bDecodeShouldEnd = false;

 if (m_pAVPkt != nullptr)
 {
 while (nRet >= 0)
 {
 m_bCurrentAVPktSentToCodec = false;
 nRet = av_read_frame(m_pAVFormatCtx, m_pAVPkt);
 if (nRet < 0)
 {
 break;
 }
 if (m_pAVPkt->stream_index == m_streamIdx)
 {
 bRes = DecodeCurrentAVPkt(firstFrameIdx, maxNumFrames, bDecodeShouldEnd);
 if (!bRes || m_bRequestedAbort)
 {
 av_packet_unref(m_pAVPkt);
 return false;
 }

 if (bDecodeShouldEnd)
 {
 av_packet_unref(m_pAVPkt);
 return true;
 }
 }

 av_packet_unref(m_pAVPkt);
 }
 m_bCurrentAVPktSentToCodec = false;
 m_pAVPkt = nullptr;
 }

 // drain:
 bRes = DecodeCurrentAVPkt(firstFrameIdx, maxNumFrames, bDecodeShouldEnd);
 if (!bRes)
 {
 return false;
 }

 if (lastReportedFrameIdxTillNow == GetLastReportedFrameIdx())
 {
 printf("FFmpegDecoder::DecodeContinue(firstFrameIdx==%d, maxNumFrames==%d) FAILED: no new frame was decoded\n", firstFrameIdx, maxNumFrames);
 return false;
 }

 return true;
}


bool FFmpegDecoder::DecodeCurrentAVPkt(int firstFrameIdx, int maxNumFrames, bool & bDecodeShouldEnd)
{
 bDecodeShouldEnd = false;

 int ret = 0;
 if (m_bCurrentAVPktSentToCodec == false)
 {
 ret = avcodec_send_packet(m_pAVCodecCtx, m_pAVPkt);
 m_bCurrentAVPktSentToCodec = true;
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
 {
 printf("FFmpegDecoder::DecodeFrameImp: error EAGAIN/AVERROR_EOF in avcodec_send_packet\n");
 return false;
 }
 if (ret < 0)
 {
 if (ret == AVERROR_INVALIDDATA)
 {
 printf("FFmpegDecoder::DecodeFrameImp: error (%d - AVERROR_INVALIDDATA) in avcodec_send_packet\n", ret);
 }
 else
 {
 printf("FFmpegDecoder::DecodeFrameImp: error (%d) in avcodec_send_packet\n", ret);
 }
 return false;
 }
 }

 ret = 0;
 while (ret >= 0)
 {
 ret = avcodec_receive_frame(m_pAVCodecCtx, m_pAVFrame);
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
 {
 break;
 }

 IncrementLastDecodedFrameIdx();
 if (GetLastDecodedFrameIdx() < firstFrameIdx)
 {
 printf("FFmpegDecoder::DecodeCurrentAVPkt ignoring frame idx %d\n", GetLastDecodedFrameIdx());
 continue; // we don't need this frame
 }

 AVFrame * theFrame = m_pAVFrame; // default

 for (int j = 0; j < m_pAVFrame->nb_side_data; j++)
 {
 AVFrameSideData *sd = m_pAVFrame->side_data[j];
 if (sd->type == AV_FRAME_DATA_DISPLAYMATRIX) {
 auto ddd = av_display_rotation_get((int32_t *)sd->data);
 }
 }

 if (m_pSwsCtxConvertImg != nullptr)
 {
 {
 if (sws_scale(m_pSwsCtxConvertImg, theFrame->data, theFrame->linesize, 0, theFrame->height, m_pAVFrameRGB->data, m_pAVFrameRGB->linesize) == 0)
 {
 printf("FFmpegDecoder::DecodeFrameImp: error in sws_scale\n");
 return false;
 }
 }
 int numChannels = 3;
 FFmpegDecoderCallback::EPixelFormat pixFormat = FFmpegDecoderCallback::EPixelFormat::RGB;
 // Report frame to the client and update last reported frame idx:
 m_pCB->FFmpegDecoderCallback_HandleFrame(m_reqId, GetLastDecodedFrameIdx(), m_pAVFrameRGB->width, m_pAVFrameRGB->height, m_pAVFrameRGB->linesize[0], pixFormat, numChannels, m_pAVFrameRGB->data[0]);
 m_lastReportedFrameIdx = GetLastDecodedFrameIdx(); 
 }

 if (maxNumFrames != MAX_NUM_FRAMES_INFINITE && GetLastDecodedFrameIdx() >= (firstFrameIdx + maxNumFrames - 1))
 {
 bDecodeShouldEnd = true;
 return true;
 }
 }
 return true;
}

/*virtual*/ void FFmpegOneDecoderCtx::FFmpegDecoderCallback_HandleFrame(int reqId, int frameIdx0based, int width, int height, int widthStepBytes, EPixelFormat pixFormat, int numChannels, void * pData) /*override*/
{
 // We don't have metadata json => return the frame as is:
 m_pLastFrame->create(height, width, CV_8UC3);
 *m_pLastFrame = cv::Scalar(0, 0, 0);
 unsigned char * pSrc = reinterpret_cast<unsigned char="char">(pData);
 unsigned char *pDst = m_pLastFrame->data;
 auto dstStride = m_pLastFrame->step[0];
 for (int y = 0; y < height; ++y)
 {
 memcpy(pDst + y * dstStride, pSrc + y * widthStepBytes, numChannels*width);
 }
}
</unsigned>


And eventually usage looks like this


//>>>Get Frame
 FFmpegMultiDecoder decoder;
 decoder.HandleRequest_GetFrame(nullptr, filename, 1, image);
 //<<<

 //>>> Collor conversion from BGR to RGB
 cv::cvtColor(image, image, cv::COLOR_BGR2RGB);
 //<<<
 bool isOk = cv::imwrite(save_location, image);



The problem is that if you try to open two final decompressed images


- 

- one with code https://drive.google.com/file/d/1sfTnqvHKQ2DUy0uP8POZXDw2-u3oIRfZ/view?usp=sharing
- one with cmd query https://drive.google.com/file/d/1cwsOltk3DVtK86eLeyhiYjNeEXj0msES/view?usp=sharing






and try to flip from one image to other you'll see that image I got by cmd query a little bit brighter than that I got by code.


What is a possible problem here ?


If I missed smth feel free to ask.


-
Anomalie #4159 : empêcher l’upgrade en 3.1 si le code n’est plus compatible avec la version PHP
9 février 2021, par b bDe plus, le loader gère ça très bien maintenant, donc ça me semble bon.
-
cannot resolve variable PIX_FMT_RGB24, ffmpeg source code install with the newest version [duplicate]
10 août 2016, par NacyLThis question already has an answer here :
i installed the ffmpeg from source code according https://trac.ffmpeg.org/wiki/CompilationGuide/Ubuntu, and write a test file to save ppm file from a video, but the code cannot reslove
PIX_FMT_RGB24
, i write the code as below :int main() {
// Initalizing these to NULL prevents segfaults!
AVFormatContext *pFormatCtx = NULL;
int i, videoStream;
AVCodecContext *pCodecCtxOrig = NULL;
AVCodecContext *pCodecCtx = NULL;
AVCodec *pCodec = NULL;
AVFrame *pFrame = NULL;
AVFrame *pFrameRGB = NULL;
AVPacket packet;
int frameFinished;
int numBytes;
uint8_t *buffer = NULL;
struct SwsContext *sws_ctx = NULL;
const char* url = "/home/liulijuan/bin/test.mp4";
// [1] Register all formats and codecs
av_register_all();
// [2] Open video file
if(avformat_open_input(&pFormatCtx, url, NULL, NULL)!=0)
return -1; // Couldn't open file
// [3] Retrieve stream information
if(avformat_find_stream_info(pFormatCtx, NULL)<0)
return -1; // Couldn't find stream information
// Dump information about file onto standard error
av_dump_format(pFormatCtx, 0, url, 0);
// Find the first video stream
videoStream=-1;
for(i=0; inb_streams; i++)
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
videoStream=i;
break;
}
if(videoStream==-1)
return -1; // Didn't find a video stream
// Get a pointer to the codec context for the video stream
pCodecCtxOrig=pFormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
pCodec=avcodec_find_decoder(pCodecCtxOrig->codec_id);
if(pCodec==NULL) {
fprintf(stderr, "Unsupported codec!\n");
return -1; // Codec not found
}
// Copy context
pCodecCtx = avcodec_alloc_context3(pCodec);
if(avcodec_copy_context(pCodecCtx, pCodecCtxOrig) != 0) {
fprintf(stderr, "Couldn't copy codec context");
return -1; // Error copying codec context
}
// Open codec
if(avcodec_open2(pCodecCtx, pCodec, NULL)<0)
return -1; // Could not open codec
// Allocate video frame
pFrame=av_frame_alloc();
// Allocate an AVFrame structure
pFrameRGB=av_frame_alloc();
if(pFrameRGB==NULL)
return -1;
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
pCodecCtx->height);
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
pCodecCtx->width, pCodecCtx->height);
// initialize SWS context for software scaling
sws_ctx = sws_getContext(pCodecCtx->width,
pCodecCtx->height,
pCodecCtx->pix_fmt,
pCodecCtx->width,
pCodecCtx->height,
PIX_FMT_RGB24,
SWS_BILINEAR,
NULL,
NULL,
NULL
);
// [4] Read frames and save first five frames to disk
i=0;
while(av_read_frame(pFormatCtx, &packet)>=0) {
// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
// Decode video frame
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
// Did we get a video frame?
if(frameFinished) {
// Convert the image from its native format to RGB
sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data,
pFrame->linesize, 0, pCodecCtx->height,
pFrameRGB->data, pFrameRGB->linesize);
// Save the frame to disk
if(++i<=5)
SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height,
i);
}
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
// Free the RGB image
av_free(buffer);
av_frame_free(&pFrameRGB);
// Free the YUV frame
av_frame_free(&pFrame);
// Close the codecs
avcodec_close(pCodecCtx);
avcodec_close(pCodecCtxOrig);
// Close the video file
avformat_close_input(&pFormatCtx);
return 0;
}so i replace
PIX_FMT_RGB24
withAV_PIX_FMT_RGB24
, but i cannot open the saved ppm file, the save code as below :void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame) {
FILE *pFile;
char szFilename[32];
int y;
printf("start save frame ...\n");
// Open file
sprintf(szFilename, "/home/liulijuan/frame%d.ppm", iFrame);
pFile=fopen(szFilename, "wb");
if(pFile==NULL)
return;
printf("start write header ...\n");
// Write header
fprintf(pFile, "/P6\n%d %d\n255\n", width, height);
// Write pixel data
for(y=0; ydata[0]+y*pFrame->linesize[0], 1, width*3, pFile);
// Close file
fclose(pFile);
printf("close file ...\n");
}so, what’s wrong with this code ?