
Recherche avancée
Autres articles (2)
-
Submit bugs and patches
13 avril 2011Unfortunately a software is never perfect.
If you think you have found a bug, report it using our ticket system. Please to help us to fix it by providing the following information : the browser you are using, including the exact version as precise an explanation as possible of the problem if possible, the steps taken resulting in the problem a link to the site / page in question
If you think you have solved the bug, fill in a ticket and attach to it a corrective patch.
You may also (...) -
Monitoring de fermes de MediaSPIP (et de SPIP tant qu’à faire)
31 mai 2013, parLorsque l’on gère plusieurs (voir plusieurs dizaines) de MediaSPIP sur la même installation, il peut être très pratique d’obtenir d’un coup d’oeil certaines informations.
Cet article a pour but de documenter les scripts de monitoring Munin développés avec l’aide d’Infini.
Ces scripts sont installés automatiquement par le script d’installation automatique si une installation de munin est détectée.
Description des scripts
Trois scripts Munin ont été développés :
1. mediaspip_medias
Un script de (...)
Sur d’autres sites (1560)
-
How to view/save AVFrame have format AV_PIX_FMT_YUVJ420P to file
30 juillet 2022, par Che HuuI have a AVFrame and I want to save it to file. If I only store frame->data[0] to file, the image will be Grey image, how to view full color ? I use C language.


Do you have any suggestions on what I should read to understand and do these things by myself ?


-
ffmpeg - make pixels transparent depending on how black they are
8 juillet 2022, par TheForgot3n1Fully black pixels should be fully transparent. Grey pixels should be semi-transparent. White pixels should not be transparent at all. Is this possible to achieve in ffmpeg ?


-
libav seeking to n seconds in a video and saving frames there onwards
7 juillet 2022, par Sam KoreI've been trying to seek in a mpegts video file. After exploring many codes, I've come up to the following stage of fetching frames and saving them.
However after using av_seek_frame also I'm getting following results :


- 

- Initial 7-8 frames are saved as grey frames.
- Thereafter frames are fetched from beginning of the video only. i.e. 0 seconds onwards, while I'm trying to seek from say 12th seconds onwards.






Can you please explain how should I be calculating the timestamp and do the things correctly ?



int main(int argc, const char *argv[])
{

 int stream_id;
 int64_t timestamp;
 char ts_buf[60];


 if (argc < 2) {
 printf("You need to specify a media file.\n");
 return -1;
 }
 
 logging("initializing all the containers, codecs and protocols.");

 
 AVFormatContext *pFormatContext = avformat_alloc_context();
 if (!pFormatContext) {
 logging("ERROR could not allocate memory for Format Context");
 return -1;
 }

 logging("opening the input file (%s) and loading format (container) header", argv[1]);
 
 if (avformat_open_input(&pFormatContext, argv[1], NULL, NULL) != 0) {
 logging("ERROR could not open the file");
 return -1;
 }

 logging("format %s, duration %lld us, bit_rate %lld", pFormatContext->iformat->name, pFormatContext->duration, pFormatContext->bit_rate);

 logging("finding stream info from format");
 
 if (avformat_find_stream_info(pFormatContext, NULL) < 0) {
 logging("ERROR could not get the stream info");
 return -1;
 }

 AVCodec *pCodec = NULL;
 AVCodecParameters *pCodecParameters = NULL;
 int video_stream_index = -1;

 // loop though all the streams and print its main information
 for (int i = 0; i < pFormatContext->nb_streams; i++)
 {
 AVCodecParameters *pLocalCodecParameters = NULL;
 pLocalCodecParameters = pFormatContext->streams[i]->codecpar;
 logging("AVStream->time_base before open coded %d/%d", pFormatContext->streams[i]->time_base.num, pFormatContext->streams[i]->time_base.den);
 logging("AVStream->r_frame_rate before open coded %d/%d", pFormatContext->streams[i]->r_frame_rate.num, pFormatContext->streams[i]->r_frame_rate.den);
 logging("AVStream->start_time %" PRId64, pFormatContext->streams[i]->start_time);
 logging("AVStream->duration %" PRId64, pFormatContext->streams[i]->duration);

 logging("finding the proper decoder (CODEC)");

 AVCodec *pLocalCodec = NULL;

 pLocalCodec = avcodec_find_decoder(pLocalCodecParameters->codec_id);

 if (pLocalCodec==NULL) {
 logging("ERROR unsupported codec!");
 // In this example if the codec is not found we just skip it
 continue;
 }

 // when the stream is a video we store its index, codec parameters and codec
 if (pLocalCodecParameters->codec_type == AVMEDIA_TYPE_VIDEO) {
 if (video_stream_index == -1) {
 video_stream_index = i;
 pCodec = pLocalCodec;
 pCodecParameters = pLocalCodecParameters;
 }

 logging("Video Codec: resolution %d x %d", pLocalCodecParameters->width, pLocalCodecParameters->height);
 } else if (pLocalCodecParameters->codec_type == AVMEDIA_TYPE_AUDIO) {
 logging("Audio Codec: %d channels, sample rate %d", pLocalCodecParameters->channels, pLocalCodecParameters->sample_rate);
 }

 // print its name, id and bitrate
 logging("\tCodec %s ID %d bit_rate %lld", pLocalCodec->name, pLocalCodec->id, pLocalCodecParameters->bit_rate);
 }

 if (video_stream_index == -1) {
 logging("File %s does not contain a video stream!", argv[1]);
 return -1;
 }

 AVCodecContext *pCodecContext = avcodec_alloc_context3(pCodec);
 if (!pCodecContext)
 {
 logging("failed to allocated memory for AVCodecContext");
 return -1;
 }

 if (avcodec_parameters_to_context(pCodecContext, pCodecParameters) < 0)
 {
 logging("failed to copy codec params to codec context");
 return -1;
 }

 if (avcodec_open2(pCodecContext, pCodec, NULL) < 0)
 {
 logging("failed to open codec through avcodec_open2");
 return -1;
 }

 AVFrame *pFrame = av_frame_alloc();
 if (!pFrame)
 {
 logging("failed to allocate memory for AVFrame");
 return -1;
 }

 AVPacket *pPacket = av_packet_alloc();
 if (!pPacket)
 {
 logging("failed to allocate memory for AVPacket");
 return -1;
 }

 /*seek for 20 seconds*/
 int64_t incr, pos = 5;

 int64_t seek_target = (pos * AV_TIME_BASE), stream_index = 0, how_many_packets_to_process = 50, response = 0;

 printf("seek_target before : %lu\n", seek_target);
 seek_target = av_rescale_q(seek_target, AV_TIME_BASE_Q, pFormatContext->streams[stream_index]->time_base);

 printf("seek_target after: %lu\n", seek_target);

 do
 {
 response = decode_packet(pFormatContext, pPacket, pCodecContext, pFrame, seek_target);
 if (response < 0)
 break;
 avcodec_flush_buffers(pCodecContext);

 /*av_frame_unref(pFrame);
 av_packet_unref(pPacket);*/
 // stop it, otherwise we'll be saving hundreds of frames
 if (--how_many_packets_to_process <= 0) 
 break;

 }while(1);

logging("releasing all the resources");

 avformat_close_input(&pFormatContext);
 av_packet_free(&pPacket);
 av_frame_free(&pFrame);
 avcodec_free_context(&pCodecContext);
 return 0;
}


int decode_packet(AVFormatContext *pFormatContext, AVPacket *pPacket, AVCodecContext *pCodecContext, AVFrame *pFrame, int64_t seek_target)
{
 if(av_seek_frame(pFormatContext, 0, /*(startTime + frameTimestamp) / 10*/seek_target, AVSEEK_FLAG_BACKWARD) < 0)
 {
 printf("error while seeking\n"/*, pFormatContext->filename*/);
 return -1;
 }
 avcodec_flush_buffers(pCodecContext);

 while(1)
 {
 if(av_read_frame(pFormatContext, pPacket) < 0)
 {
 logging("av_read_frame failure");
 break;
 }

 /* I'm not able to get to correct timestamp to discard prior frames upto desired seconds. This if hasn't worked out well as of now. */
 if((av_q2d(pFormatContext->streams[0]->time_base) * pPacket->pts) < (seek_target * 1000))
 {
 printf("skipping the frame\npFormatContext->streams[0]->time_base: %d %d\tpckt.pts: %lu\tseek: %lu", pFormatContext->streams[0]->time_base.num, pFormatContext->streams[0]->time_base.den, pPacket->pts, seek_target);
 av_packet_unref(pPacket);
 continue;
 }

 // Send Packet for decoding
 int response = avcodec_send_packet(pCodecContext, pPacket);

 if (response < 0) {
 logging("Error while sending a packet to the decoder: %s", av_err2str(response));
 return response;
 }

 while (response >= 0)
 {
 response = avcodec_receive_frame(pCodecContext, pFrame);
 if (response == AVERROR(EAGAIN) || response == AVERROR_EOF) {
 break;
 } else if (response < 0) {
 logging("Error while receiving a frame from the decoder: %s", av_err2str(response));
 return response;
 }

 if (response >= 0) {
 logging(
 "Frame %d (type=%c, size=%d bytes, format=%d) pts %d key_frame %d [DTS %d]: %d",
 pCodecContext->frame_number,
 av_get_picture_type_char(pFrame->pict_type),
 pFrame->pkt_size,
 pFrame->format,
 pFrame->pts,
 pFrame->key_frame,
 pFrame->coded_picture_number,
 pPacket->dts
 );

 char frame_filename[1024];
 snprintf(frame_filename, sizeof(frame_filename), "%s-%d.pgm", "im/frame", pCodecContext->frame_number);

 if (pFrame->format != AV_PIX_FMT_YUV420P)
 {
 logging("Warning: the generated file may not be a grayscale image, but could e.g. be just the R component if the video format is RGB");
 }
 // save a grayscale frame into a .pgm file
 save_gray_frame(pFrame->data[0], pFrame->linesize[0], pFrame->width, pFrame->height, frame_filename);

 av_frame_unref(m_pAVFrame);
 
 }
 }
 }
 return 0;
}