
Recherche avancée
Médias (1)
-
Sintel MP4 Surround 5.1 Full
13 mai 2011, par
Mis à jour : Février 2012
Langue : English
Type : Video
Autres articles (36)
-
Personnaliser en ajoutant son logo, sa bannière ou son image de fond
5 septembre 2013, parCertains thèmes prennent en compte trois éléments de personnalisation : l’ajout d’un logo ; l’ajout d’une bannière l’ajout d’une image de fond ;
-
Publier sur MédiaSpip
13 juin 2013Puis-je poster des contenus à partir d’une tablette Ipad ?
Oui, si votre Médiaspip installé est à la version 0.2 ou supérieure. Contacter au besoin l’administrateur de votre MédiaSpip pour le savoir -
Taille des images et des logos définissables
9 février 2011, parDans beaucoup d’endroits du site, logos et images sont redimensionnées pour correspondre aux emplacements définis par les thèmes. L’ensemble des ces tailles pouvant changer d’un thème à un autre peuvent être définies directement dans le thème et éviter ainsi à l’utilisateur de devoir les configurer manuellement après avoir changé l’apparence de son site.
Ces tailles d’images sont également disponibles dans la configuration spécifique de MediaSPIP Core. La taille maximale du logo du site en pixels, on permet (...)
Sur d’autres sites (4438)
-
How to use ffmpeg to open 2 cams and push the stream from one of them ?
23 septembre 2024, par kkk123I'm using ffmpeg to capture 2 cams and push stream from one of them.


For single camera, I start capturing frames by calling
startCameraCapture()
frommain()
which callsinitRemoteStream()
. Here it opens a pipe toffmpeg
to initialize streaming. Followed by it,decodePacket()
is called which writes YUV422 data to the pipe. It runs fine.

But when I want to use 2 cameras, I create a thread for each camera in
main()
and defineactive_cam
. When I want to push cam1's stream,active_cam = 1
. The same logic of the single cam is used. It doesn't work.

Could you please help to understand the logic ? Following is my C++ code. Thanks.


#include <iostream>
#include <thread>
#include <atomic>
#include <mutex>
#include 
#include <queue>
#include <opencv2></opencv2>opencv.hpp>

extern "C" {
#include <libavcodec></libavcodec>avcodec.h>
#include <libavformat></libavformat>avformat.h>
#include <libavutil></libavutil>imgutils.h>
#include <libavutil></libavutil>opt.h>
#include <libswscale></libswscale>swscale.h>
#include <libavdevice></libavdevice>avdevice.h>
}

// 定义全局变量
std::queue frame_queue1, frame_queue2;
std::mutex queue_mutex1, queue_mutex2;
std::condition_variable cv1, cv2;
std::atomic<bool> keep_running(true);
FILE* pipe_s1 = nullptr;
FILE* pipe_s2 = nullptr;

// 初始化推流
FILE* initRemoteStream(int camera_id) {
 std::string ffmpegCmd = "ffmpeg -y -f rawvideo -vcodec rawvideo -pix_fmt yuv422 -s 1920x1080 -r 30 -i - -vf scale 1280x720 -c:v libx264 -f rtsp rtsp://192.168.66.126/live/stream" + std::to_string(camera_id);
 FILE* pipe_s = popen(ffmpegCmd.c_str(), "w");
 if (!pipe_s) {
 std::cerr << "无法启动 FFmpeg 进程" << std::endl;
 return nullptr;
 }
 return pipe_s;
}

// 解码数据包
bool decodePacket(AVCodecContext *codec_ctx, AVPacket *packet, int video_stream_index, cv::Mat& global_frame, std::queue& frame_queue, std::condition_variable& cv) {
 AVFrame *frame = av_frame_alloc();
 int ret;

 if (packet->stream_index == video_stream_index) {
 ret = avcodec_send_packet(codec_ctx, packet);
 if (ret < 0) return false;

 ret = avcodec_receive_frame(codec_ctx, frame);
 if (ret < 0) return false;

 int width = codec_ctx->width;
 int height = codec_ctx->height;

 // 创建 YUV422 格式的 Mat 对象
 cv::Mat yuv422_img(height, width, CV_8UC2, frame->data[0]);

 // 将 YUV422 转换为 BGR
 //cv::cvtColor(yuv422_img, global_frame, cv::COLOR_YUV2BGR_YUY2);
 
 {
 std::lock_guard lock(queue_mutex1);
 frame_queue.push(global_frame.clone());
 cv.notify_one();
 }
 }

 av_frame_free(&frame);
 return true;
}


// 摄像头捕获线程
void startCameraCapture(const char* device_name, int camera_id) {
 AVFormatContext *format_ctx = nullptr;
 AVCodecContext *codec_ctx = nullptr;
 const AVCodec *codec = nullptr;
 AVPacket packet;
 int video_stream_index = -1;

 avdevice_register_all();
 avformat_network_init();

 AVDictionary *options = nullptr;
 av_dict_set(&options, "video_size", "640x480", 0);
 av_dict_set(&options, "framerate", "30", 0);
 AVInputFormat *input_format = av_find_input_format("v4l2");

 if (avformat_open_input(&format_ctx, device_name, input_format, &options) != 0) {
 std::cerr << "无法打开输入" << std::endl;
 return;
 }

 if (avformat_find_stream_info(format_ctx, nullptr) < 0) return;

 for (unsigned int i = 0; i < format_ctx->nb_streams; i++) {
 if (format_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
 video_stream_index = i;
 break;
 }
 }

 AVCodecParameters *codec_par = format_ctx->streams[video_stream_index]->codecpar;
 codec = avcodec_find_decoder(codec_par->codec_id);
 codec_ctx = avcodec_alloc_context3(codec);
 avcodec_parameters_to_context(codec_ctx, codec_par);
 avcodec_open2(codec_ctx, codec, nullptr);

 // 初始化推流
 FILE* pipe_s = initRemoteStream(camera_id);
 if (camera_id == 1) {
 pipe_s1 = pipe_s;
 } else {
 pipe_s2 = pipe_s;
 }

 while (keep_running) {
 if (av_read_frame(format_ctx, &packet) >= 0) {
 cv::Mat global_frame;
 if (decodePacket(codec_ctx, &packet, video_stream_index, global_frame, (camera_id == 1 ? frame_queue1 : frame_queue2), (camera_id == 1 ? cv1 : cv2))) {
 av_packet_unref(&packet);
 }
 }
 }

 avcodec_free_context(&codec_ctx);
 avformat_close_input(&format_ctx);
 pclose(pipe_s);
}

// 推理线程
void inferenceThread() {
 while (keep_running) {
 cv::Mat frame1, frame2;

 {
 std::unique_lock lock(queue_mutex1);
 cv1.wait(lock, [] { return !frame_queue1.empty(); });
 frame1 = frame_queue1.front();
 frame_queue1.pop();
 }

 {
 std::unique_lock lock(queue_mutex2);
 cv2.wait(lock, [] { return !frame_queue2.empty(); });
 frame2 = frame_queue2.front();
 frame_queue2.pop();
 }

 // 进行推理处理 

 // 根据CAN信号决定推流
 int selected_camera_id = 1; // 假设根据CAN信号确定的摄像头ID
 FILE* selected_pipe_s = (selected_camera_id == 1) ? pipe_s1 : pipe_s2; // 选择相应的推流管道

 if (selected_pipe_s) {
 fwrite(selected_camera_id == 1 ? frame1.data : frame2.data, 1, (selected_camera_id == 1 ? frame1.total() : frame2.total()) * frame1.elemSize(), selected_pipe_s);
 }
 }
}

int main(int argc, char *argv[]) {
 std::thread camera1(startCameraCapture, "/dev/video0", 1);
 std::thread camera2(startCameraCapture, "/dev/video1", 2);
 std::thread infer_thread(inferenceThread);

 camera1.join();
 camera2.join();
 infer_thread.join();

 // 释放资源
 if (pipe_s1) pclose(pipe_s1);
 if (pipe_s2) pclose(pipe_s2);

 return 0;
}

</bool></queue></mutex></atomic></thread></iostream>


-
Open cv.VideoCapture(index) - ffmpeg list camera names - How to match ?
15 novembre 2024, par Chris Pdef fetch_camera_input_settings(self):
 try:
 self.database_functions = database_functions

 self.camera_input_device_name = database_functions.read_setting("camera_input_device_name")["value"]
 self.camera_input_device_number = int(self.database_functions.read_setting("camera_input_device_number")["value"])

 self.camera_input_devices = [[0,-1,"Καμία συσκευή κάμερας"]]
 self.available_cameras = [{"device_index":-1,"device_name":"Καμία συσκευή κάμερας"}]

 # FFmpeg command to list video capture devices on Windows
 cmd = ["ffmpeg", "-list_devices", "true", "-f", "dshow", "-i", "dummy"]
 result = subprocess.run(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, text=True)
 output = result.stderr # FFmpeg sends device listing to stderr

 # Updated regular expression to capture both video and audio devices
 device_pattern = re.compile(r'\[dshow @ .+?\] "(.*?)" \(video\)')
 cameras = device_pattern.findall(output)
 counter = 0
 for camera in cameras:
 counter += 1
 self.camera_input_devices.append([counter,counter-1,camera])
 self.available_cameras.append({"device_index": counter-1, "device_name": camera})

 self.to_emitter.send({"type":"available_devices","devices":self.camera_input_devices,"device_index":self.camera_input_device_number})
 except:
 error_message = traceback.format_exc()
 self.to_emitter.send({"type":"error","error_message":error_message})




How to match ffmpeg camera device names output with cv2.VideoCapture which wants camera index as input ?


-
avformat/dvdvideodec : open subdemuxer after initializing IFO headers
7 octobre 2024, par Marth64