
Recherche avancée
Autres articles (61)
-
Websites made with MediaSPIP
2 mai 2011, parThis page lists some websites based on MediaSPIP.
-
Configuration spécifique d’Apache
4 février 2011, parModules spécifiques
Pour la configuration d’Apache, il est conseillé d’activer certains modules non spécifiques à MediaSPIP, mais permettant d’améliorer les performances : mod_deflate et mod_headers pour compresser automatiquement via Apache les pages. Cf ce tutoriel ; mode_expires pour gérer correctement l’expiration des hits. Cf ce tutoriel ;
Il est également conseillé d’ajouter la prise en charge par apache du mime-type pour les fichiers WebM comme indiqué dans ce tutoriel.
Création d’un (...) -
Creating farms of unique websites
13 avril 2011, parMediaSPIP platforms can be installed as a farm, with a single "core" hosted on a dedicated server and used by multiple websites.
This allows (among other things) : implementation costs to be shared between several different projects / individuals rapid deployment of multiple unique sites creation of groups of like-minded sites, making it possible to browse media in a more controlled and selective environment than the major "open" (...)
Sur d’autres sites (6059)
-
What Is Incrementality & Why Is It Important in Marketing ?
26 mars 2024, par Erin -
CRO Audit : Increase Your Conversions in 10 Simple Steps
25 mars 2024, par Erin -
Display real time frames from several RTSP streams
13 février 2024, par MraxI have this class, it uses ffmpeg library for rtsp live streaming :


#include <iostream>
#include <string>
#include <vector>
#include <mutex>

extern "C"
{
#include <libavcodec></libavcodec>avcodec.h>
#include <libavformat></libavformat>avformat.h>
#include <libavformat></libavformat>avio.h>
}

class ryMediaSource
{
public:
 ryMediaSource() {}
 ryMediaSource(const ryMediaSource& other);
 ~ryMediaSource();

 bool ryOpenMediaSource(const std::string&);

private:
 mediaSource pMediaSource;
 AVFormatContext* pFormatCtx;
 mutable std::mutex pMutex;
};
</mutex></vector></string></iostream>


And inside my main file, I have these vector of ryMediaSource and four rstp urls :


std::vector<rymediasource> mediaSources;
std::vector streams =
{
 {"rtsp://1
 {"rtsp://2
 {"rtsp://3
 {"rtsp://4
};
</rymediasource>


Creating a instance for every vector :


for (const auto& stream : streams)
{
 mediaSources.emplace_back(); // Create a new instance for each stream
}



And opening all the streams (I need to have access to all the streams, all the time).


for (size_t s = 0; s < streams.size(); s++)
{
 mediaSources[s].ryOpenMediaSource(streams[s]);
}



After all the streams are loaded, I start to display the videos all of the streams : av_read_frame(pFormatCtx, pPacket).
But I am having a gap from what is been displayed to what is really capturing from the source (IP Cameras).
From ryOpenMediaSource(streams[0]) is about 11 seconds, ryOpenMediaSource(streams[1]) about 7 seconds, ryOpenMediaSource(streams[2]) is about 4 seconds and ryOpenMediaSource(streams[3]) is real time.
I realized that the issue is on my ryOpenMediaSource code :


bool ryMediaSource::ryOpenMediaSource(const std::string& url)
{
 int rc = -1;

 pFormatCtx = avformat_alloc_context();
 if (!pFormatCtx)
 throw std::runtime_error("Failed to allocate AVFormatContext.");
 rc = avformat_open_input(&pFormatCtx, url.c_str(), NULL, NULL);
 if (rc < 0)
 {
 return false;
 }
}



My question is, why this is happening ? Why can't all streams have the same (time stamp ?) , as the last inserted in my vector of ryMediaSource ?


Should I overwrite some variable of pFormatCtx to "force" the all vector to have the (time stamp ?) as the last one ? If so, can you give me some guidance ?


Tried setting some different values on pFormatCtx after loaded with avformat_open_input(&pFormatCtx, url.c_str(), NULL, &pDicts) ; but no luck at all.


I am expecting that all streams started at the same time, even if pre loading them, for later on, transform these frames into a cv::Mat for rendering.


MRE :


Header :

#pragma once

#include <iostream>
#include <string>
#include <vector>
#include <chrono>
#include <thread>
#include <mutex>


extern "C"
{
#include <libavcodec></libavcodec>avcodec.h>
#include <libavformat></libavformat>avformat.h>
#include <libavutil></libavutil>pixdesc.h>
#include <libavutil></libavutil>hwcontext.h>
#include <libavutil></libavutil>opt.h>
#include <libavutil></libavutil>avassert.h>
#include <libavutil></libavutil>imgutils.h>
#include <libswscale></libswscale>swscale.h>
#include <libavdevice></libavdevice>avdevice.h>
#include <libavformat></libavformat>avio.h>
#include <libavutil></libavutil>time.h>
}

class ryMediaSource
{
public:
 ryMediaSource() {}
 ryMediaSource(const ryMediaSource& other);
 ~ryMediaSource();

 struct mediaSourceParams
 {
 int sx;
 int sy;
 int lsize;
 double fps;
 unsigned char* frame;
 };

 bool ryOpenMediaSource(const std::string&);
 mediaSourceParams ryGetMediaSourceFrame();
 void ryCloseMediaSource();

private:
 mediaSource pMediaSource;
 AVFormatContext* pFormatCtx;
 AVCodecContext* pCodecCtx;
 AVFrame* pFrame;
 SwsContext* pSwsCtx;
 AVPacket* pPacket;
 int pVideoStream;
 uint8_t* pBuffer;
 AVFrame* pPict;
 double pFPS;
 mutable std::mutex pMutex;
};

C++ source code :

#include "ryMediaSource.hpp"

ryMediaSource::ryMediaSource(const ryMediaSource& other)
:pFormatCtx(nullptr), 
pCodecCtx(nullptr), 
pFrame(nullptr), 
pSwsCtx(nullptr), 
pPacket(nullptr), 
pBuffer(nullptr), 
pPict(nullptr)
{
 std::lock_guard lock(other.pMutex);
 av_log_set_level(0);
 avformat_network_init();
}

bool ryMediaSource::ryOpenMediaSource(const std::string& url)
{
 int rc = -1;

 try
 {
 AVDictionary* pDicts = nullptr;

 pFormatCtx = avformat_alloc_context();
 if (!pFormatCtx)
 throw std::runtime_error("Failed to allocate AVFormatContext.");
 rc = av_dict_set(&pDicts, "rtsp_transport", "tcp", 0);
 if (rc < 0)
 throw std::runtime_error("av_dict_set failed.");
 rc = avformat_open_input(&pFormatCtx, url.c_str(), NULL, &pDicts);
 if (rc < 0)
 {
 av_dict_free(&pDicts); // Free the dictionary in case of an error
 throw std::runtime_error("Could not open source.");
 }
 }
 catch (const std::exception& e)
 {
 std::cerr << "Exception: " << e.what() << std::endl;
 return false;
 }

 try
 {
 rc = avformat_find_stream_info(pFormatCtx, NULL);
 if (rc < 0)
 {
 throw std::runtime_error("Could not find stream information.");
 }
 pVideoStream = -1;
 for (size_t v = 0; v < pFormatCtx->nb_streams; ++v)
 {
 if (pFormatCtx->streams[v]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
 {
 pVideoStream = static_cast<int>(v);
 AVRational rational = pFormatCtx->streams[pVideoStream]->avg_frame_rate;
 pFPS = 1.0 / ((double)rational.num / (double)(rational.den));
 break;
 }
 }
 if (pVideoStream < 0)
 {
 throw std::runtime_error("Could not find video stream.");
 }

 const AVCodec* pCodec = avcodec_find_decoder(pFormatCtx->streams[pVideoStream]->codecpar->codec_id);
 if (!pCodec)
 {
 throw std::runtime_error("Unsupported codec!");
 }
 pCodecCtx = avcodec_alloc_context3(pCodec);
 if (!pCodecCtx)
 {
 throw std::runtime_error("Failed to allocate AVCodecContext.");
 }
 rc = avcodec_parameters_to_context(pCodecCtx, pFormatCtx->streams[pVideoStream]->codecpar);
 if (rc != 0)
 {
 throw std::runtime_error("Could not copy codec context.");
 }
 rc = avcodec_open2(pCodecCtx, pCodec, NULL);
 if (rc < 0)
 {
 throw std::runtime_error("Could not open codec.");
 }
 pFrame = av_frame_alloc();
 if (!pFrame)
 {
 throw std::runtime_error("Could not allocate frame.");
 }
 pSwsCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_BGR24, SWS_BILINEAR, NULL, NULL, NULL);
 if (!pSwsCtx)
 {
 throw std::runtime_error("Failed to allocate SwsContext.");
 }
 pPacket = av_packet_alloc();
 if (!pPacket)
 {
 throw std::runtime_error("Could not allocate AVPacket.");
 }
 pBuffer = (uint8_t*)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height, 1));
 if (!pBuffer)
 {
 throw std::runtime_error("Could not allocate buffer.");
 }
 pPict = av_frame_alloc();
 if (!pPict)
 {
 throw std::runtime_error("Could not allocate frame.");
 }
 av_image_fill_arrays(pPict->data, pPict->linesize, pBuffer, AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height, 1);
 }
 catch (const std::exception& e)
 {
 std::cerr << "Exception: " << e.what() << std::endl;
 return false;
 }

 return true;
}

ryMediaSource::mediaSourceParams ryMediaSource::ryGetMediaSourceFrame()
{
 mediaSourceParams msp = { 0, 0, 0, 0.0, nullptr };
 char errbuf[AV_ERROR_MAX_STRING_SIZE];

 std::lock_guard lock(pMutex);
 if (av_read_frame(pFormatCtx, pPacket) >= 0)
 {
 if (pPacket->stream_index == pVideoStream)
 {
 int ret = avcodec_send_packet(pCodecCtx, pPacket);
 if (ret < 0)
 {
 av_strerror(ret, errbuf, sizeof(errbuf));
 std::cerr << "Error sending packet for avcodec_send_packet: " << errbuf << std::endl;

 std::cerr << "avcodec_flush_buffers " << errbuf << std::endl;
 avcodec_flush_buffers(pCodecCtx);
 // Handle specific error cases
 if (ret == AVERROR(EAGAIN))
 {
 std::cerr << "EAGAIN indicates that more input is required" << std::endl;
 }
 else if (ret == AVERROR_EOF)
 {
 std::cerr << "AVERROR_EOF indicates that the encoder has been fully flushed" << std::endl;
 }
 else
 {
 //std::cerr << "avcodec_flush_buffers " << errbuf << std::endl;
 // For other errors, you may choose to flush the codec context and continue decoding.
 //avcodec_flush_buffers(pCodecCtx);
 }
 }
 ret = avcodec_receive_frame(pCodecCtx, pFrame);
 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
 {
 av_strerror(ret, errbuf, sizeof(errbuf));

 std::cerr << "Error receiving packet for avcodec_receive_frame: " << errbuf << std::endl;


 // EAGAIN indicates that more frames are needed or EOF is reached.
 // You may choose to break out of the loop or handle it based on your application's logic.

 return msp;
 }
 else if (ret < 0)
 {
 av_strerror(ret, errbuf, sizeof(errbuf));
 std::cerr << "Error receiving frame for avcodec_receive_frame: " << errbuf << std::endl;
 // Optionally, handle specific error cases
 if (ret == AVERROR(EINVAL))
 {
 std::cerr << "EINVAL indicates that more input is required" << std::endl;

 //break;
 }
 else
 {
 std::cerr << "For other errors" << std::endl;

 //break;
 }
 }
 // Move memory allocation outside the loop if frame size is constant
 size_t bufferSize = static_cast(pPict->linesize[0]) * pCodecCtx->height;
 msp.frame = new unsigned char[bufferSize];
 msp.lsize = pPict->linesize[0];
 msp.sx = pCodecCtx->width;
 msp.sy = pCodecCtx->height;
 msp.fps = pFPS;
 sws_scale(pSwsCtx, (uint8_t const* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pPict->data, pPict->linesize);
 std::memcpy(msp.frame, pBuffer, bufferSize);
 //delete[] msp.frame;
 }

 // Unref packet for non-video streams
 av_packet_unref(pPacket);
 }

 return msp;
}

main.cpp

std::vector streams =
{
 {"rtsp://1},
 {"rtsp://2},
 {"rtsp://3},
 {"rtsp://4},
};

std::vector<rymediasource> mediaSources;

void main()
{
 int key = 0;
 int channel = 0;
 std::vector streamFrame(streams.size());
 ryMediaSource::mediaSourceParams msp = { 0, 0, 0, 0.0, nullptr };

 for (const auto& stream : streams)
 {
 mediaSources.emplace_back(); // Create a new instance for each stream
 }
 for (size_t s = 0; s < streams.size(); s++)
 {
 try
 {
 mediaSources[s].ryOpenMediaSource(streams[s]);
 }
 catch (const std::exception& e)
 {
 std::cerr << "Error initializing stream " << s << ": " << e.what() << std::endl;
 }
 }

 cv::namedWindow("ryInferenceServer", cv::WINDOW_FREERATIO);
 cv::resizeWindow("ryInferenceServer", 640, 480);
 cv::moveWindow("ryInferenceServer", 0, 0);
 for (;;)
 {
 for (size_t st = 0; st < mediaSources.size(); ++st)
 {
 msp = mediaSources[st].ryGetMediaSourceFrame();
 if (msp.frame != nullptr)
 {
 cv::Mat preview;
 cv::Mat frame(msp.sy, msp.sx, CV_8UC3, msp.frame, msp.lsize);
 cv::resize(frame, preview, cv::Size(640, 480));
 if (!frame.empty())
 {
 try
 {
 streamFrame[st] = frame.clone();
 if (channel == st)
 {
 cv::imshow("ryInferenceServer", preview);
 key = cv::waitKeyEx(1);
 if (key == LEFT_KEY)
 {
 channel--;
 if (channel < 0)
 channel = 0;
 }
 if (key == RIGHT_KEY)
 {
 channel++;
 if (channel >= mediaSources.size())
 channel = mediaSources.size() - 1;
 }
 if (key == 27)
 break;
 }
 streamFrame[st].release();
 delete[] msp.frame;
 }
 catch (const std::exception& e)
 {
 std::cerr << "Exception in processing frame for stream " << st << ": " << e.what() << std::endl;
 }
 }
 frame.release();
 }
 }
 }
}
</rymediasource></int></mutex></thread></chrono></vector></string></iostream>