
Recherche avancée
Médias (33)
-
Stereo master soundtrack
17 octobre 2011, par
Mis à jour : Octobre 2011
Langue : English
Type : Audio
-
#7 Ambience
16 octobre 2011, par
Mis à jour : Juin 2015
Langue : English
Type : Audio
-
#6 Teaser Music
16 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
-
#5 End Title
16 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
-
#3 The Safest Place
16 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
-
#4 Emo Creates
15 octobre 2011, par
Mis à jour : Février 2013
Langue : English
Type : Audio
Autres articles (65)
-
MediaSPIP version 0.1 Beta
16 avril 2011, parMediaSPIP 0.1 beta est la première version de MediaSPIP décrétée comme "utilisable".
Le fichier zip ici présent contient uniquement les sources de MediaSPIP en version standalone.
Pour avoir une installation fonctionnelle, il est nécessaire d’installer manuellement l’ensemble des dépendances logicielles sur le serveur.
Si vous souhaitez utiliser cette archive pour une installation en mode ferme, il vous faudra également procéder à d’autres modifications (...) -
MediaSPIP 0.1 Beta version
25 avril 2011, parMediaSPIP 0.1 beta is the first version of MediaSPIP proclaimed as "usable".
The zip file provided here only contains the sources of MediaSPIP in its standalone version.
To get a working installation, you must manually install all-software dependencies on the server.
If you want to use this archive for an installation in "farm mode", you will also need to proceed to other manual (...) -
Amélioration de la version de base
13 septembre 2013Jolie sélection multiple
Le plugin Chosen permet d’améliorer l’ergonomie des champs de sélection multiple. Voir les deux images suivantes pour comparer.
Il suffit pour cela d’activer le plugin Chosen (Configuration générale du site > Gestion des plugins), puis de configurer le plugin (Les squelettes > Chosen) en activant l’utilisation de Chosen dans le site public et en spécifiant les éléments de formulaires à améliorer, par exemple select[multiple] pour les listes à sélection multiple (...)
Sur d’autres sites (7805)
-
Open cv.VideoCapture(index) - ffmpeg list camera names - How to match ?
15 novembre 2024, par Chris Pdef fetch_camera_input_settings(self):
 try:
 self.database_functions = database_functions

 self.camera_input_device_name = database_functions.read_setting("camera_input_device_name")["value"]
 self.camera_input_device_number = int(self.database_functions.read_setting("camera_input_device_number")["value"])

 self.camera_input_devices = [[0,-1,"Καμία συσκευή κάμερας"]]
 self.available_cameras = [{"device_index":-1,"device_name":"Καμία συσκευή κάμερας"}]

 # FFmpeg command to list video capture devices on Windows
 cmd = ["ffmpeg", "-list_devices", "true", "-f", "dshow", "-i", "dummy"]
 result = subprocess.run(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, text=True)
 output = result.stderr # FFmpeg sends device listing to stderr

 # Updated regular expression to capture both video and audio devices
 device_pattern = re.compile(r'\[dshow @ .+?\] "(.*?)" \(video\)')
 cameras = device_pattern.findall(output)
 counter = 0
 for camera in cameras:
 counter += 1
 self.camera_input_devices.append([counter,counter-1,camera])
 self.available_cameras.append({"device_index": counter-1, "device_name": camera})

 self.to_emitter.send({"type":"available_devices","devices":self.camera_input_devices,"device_index":self.camera_input_device_number})
 except:
 error_message = traceback.format_exc()
 self.to_emitter.send({"type":"error","error_message":error_message})




How to match ffmpeg camera device names output with cv2.VideoCapture which wants camera index as input ?


-
How to use ffmpeg to open 2 cams and push the stream from one of them ?
23 septembre 2024, par kkk123I'm using ffmpeg to capture 2 cams and push stream from one of them.


For single camera, I start capturing frames by calling
startCameraCapture()
frommain()
which callsinitRemoteStream()
. Here it opens a pipe toffmpeg
to initialize streaming. Followed by it,decodePacket()
is called which writes YUV422 data to the pipe. It runs fine.

But when I want to use 2 cameras, I create a thread for each camera in
main()
and defineactive_cam
. When I want to push cam1's stream,active_cam = 1
. The same logic of the single cam is used. It doesn't work.

Could you please help to understand the logic ? Following is my C++ code. Thanks.


#include <iostream>
#include <thread>
#include <atomic>
#include <mutex>
#include 
#include <queue>
#include <opencv2></opencv2>opencv.hpp>

extern "C" {
#include <libavcodec></libavcodec>avcodec.h>
#include <libavformat></libavformat>avformat.h>
#include <libavutil></libavutil>imgutils.h>
#include <libavutil></libavutil>opt.h>
#include <libswscale></libswscale>swscale.h>
#include <libavdevice></libavdevice>avdevice.h>
}

// 定义全局变量
std::queue frame_queue1, frame_queue2;
std::mutex queue_mutex1, queue_mutex2;
std::condition_variable cv1, cv2;
std::atomic<bool> keep_running(true);
FILE* pipe_s1 = nullptr;
FILE* pipe_s2 = nullptr;

// 初始化推流
FILE* initRemoteStream(int camera_id) {
 std::string ffmpegCmd = "ffmpeg -y -f rawvideo -vcodec rawvideo -pix_fmt yuv422 -s 1920x1080 -r 30 -i - -vf scale 1280x720 -c:v libx264 -f rtsp rtsp://192.168.66.126/live/stream" + std::to_string(camera_id);
 FILE* pipe_s = popen(ffmpegCmd.c_str(), "w");
 if (!pipe_s) {
 std::cerr << "无法启动 FFmpeg 进程" << std::endl;
 return nullptr;
 }
 return pipe_s;
}

// 解码数据包
bool decodePacket(AVCodecContext *codec_ctx, AVPacket *packet, int video_stream_index, cv::Mat& global_frame, std::queue& frame_queue, std::condition_variable& cv) {
 AVFrame *frame = av_frame_alloc();
 int ret;

 if (packet->stream_index == video_stream_index) {
 ret = avcodec_send_packet(codec_ctx, packet);
 if (ret < 0) return false;

 ret = avcodec_receive_frame(codec_ctx, frame);
 if (ret < 0) return false;

 int width = codec_ctx->width;
 int height = codec_ctx->height;

 // 创建 YUV422 格式的 Mat 对象
 cv::Mat yuv422_img(height, width, CV_8UC2, frame->data[0]);

 // 将 YUV422 转换为 BGR
 //cv::cvtColor(yuv422_img, global_frame, cv::COLOR_YUV2BGR_YUY2);
 
 {
 std::lock_guard lock(queue_mutex1);
 frame_queue.push(global_frame.clone());
 cv.notify_one();
 }
 }

 av_frame_free(&frame);
 return true;
}


// 摄像头捕获线程
void startCameraCapture(const char* device_name, int camera_id) {
 AVFormatContext *format_ctx = nullptr;
 AVCodecContext *codec_ctx = nullptr;
 const AVCodec *codec = nullptr;
 AVPacket packet;
 int video_stream_index = -1;

 avdevice_register_all();
 avformat_network_init();

 AVDictionary *options = nullptr;
 av_dict_set(&options, "video_size", "640x480", 0);
 av_dict_set(&options, "framerate", "30", 0);
 AVInputFormat *input_format = av_find_input_format("v4l2");

 if (avformat_open_input(&format_ctx, device_name, input_format, &options) != 0) {
 std::cerr << "无法打开输入" << std::endl;
 return;
 }

 if (avformat_find_stream_info(format_ctx, nullptr) < 0) return;

 for (unsigned int i = 0; i < format_ctx->nb_streams; i++) {
 if (format_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
 video_stream_index = i;
 break;
 }
 }

 AVCodecParameters *codec_par = format_ctx->streams[video_stream_index]->codecpar;
 codec = avcodec_find_decoder(codec_par->codec_id);
 codec_ctx = avcodec_alloc_context3(codec);
 avcodec_parameters_to_context(codec_ctx, codec_par);
 avcodec_open2(codec_ctx, codec, nullptr);

 // 初始化推流
 FILE* pipe_s = initRemoteStream(camera_id);
 if (camera_id == 1) {
 pipe_s1 = pipe_s;
 } else {
 pipe_s2 = pipe_s;
 }

 while (keep_running) {
 if (av_read_frame(format_ctx, &packet) >= 0) {
 cv::Mat global_frame;
 if (decodePacket(codec_ctx, &packet, video_stream_index, global_frame, (camera_id == 1 ? frame_queue1 : frame_queue2), (camera_id == 1 ? cv1 : cv2))) {
 av_packet_unref(&packet);
 }
 }
 }

 avcodec_free_context(&codec_ctx);
 avformat_close_input(&format_ctx);
 pclose(pipe_s);
}

// 推理线程
void inferenceThread() {
 while (keep_running) {
 cv::Mat frame1, frame2;

 {
 std::unique_lock lock(queue_mutex1);
 cv1.wait(lock, [] { return !frame_queue1.empty(); });
 frame1 = frame_queue1.front();
 frame_queue1.pop();
 }

 {
 std::unique_lock lock(queue_mutex2);
 cv2.wait(lock, [] { return !frame_queue2.empty(); });
 frame2 = frame_queue2.front();
 frame_queue2.pop();
 }

 // 进行推理处理 

 // 根据CAN信号决定推流
 int selected_camera_id = 1; // 假设根据CAN信号确定的摄像头ID
 FILE* selected_pipe_s = (selected_camera_id == 1) ? pipe_s1 : pipe_s2; // 选择相应的推流管道

 if (selected_pipe_s) {
 fwrite(selected_camera_id == 1 ? frame1.data : frame2.data, 1, (selected_camera_id == 1 ? frame1.total() : frame2.total()) * frame1.elemSize(), selected_pipe_s);
 }
 }
}

int main(int argc, char *argv[]) {
 std::thread camera1(startCameraCapture, "/dev/video0", 1);
 std::thread camera2(startCameraCapture, "/dev/video1", 2);
 std::thread infer_thread(inferenceThread);

 camera1.join();
 camera2.join();
 infer_thread.join();

 // 释放资源
 if (pipe_s1) pclose(pipe_s1);
 if (pipe_s2) pclose(pipe_s2);

 return 0;
}

</bool></queue></mutex></atomic></thread></iostream>


-
FFmpeg.AutoGen C# library Can't open Codec G726
8 septembre 2024, par Rushdi EskandarI am stuck with FFmpeg.AutoGen G726 Audio codec, it always shows error invalid argument.
Note that I Tried other codecs such as G722, G711 and it is working fine except for G726 codec. I also confirm that my binaries does support G726 codec using this command ffmpeg -formats. Please tell me what could be the reason, and if you know any other library for decoding G726 that would be better


public unsafe class AudioDecoder : IDisposable
 {
 private AVCodecContext* _codecContext;
 private AVCodec* _codec;
 private AVFrame* _frame;
 private AVPacket* _packet;

 public AudioDecoder()
 {
 // Find the G726 decoder
 _codec = ffmpeg.avcodec_find_decoder(AVCodecID.AV_CODEC_ID_ADPCM_G726);
 if (_codec == null)
 {
 throw new Exception("G726 codec not found.");
 }

 // Allocate codec context
 _codecContext = ffmpeg.avcodec_alloc_context3(_codec);
 if (_codecContext == null)
 {
 throw new Exception("Failed to allocate codec context.");
 }

 // Print supported sample formats
 Console.WriteLine("Supported sample formats for G726 codec:");
 AVSampleFormat* sample_fmts = _codec->sample_fmts;
 if (sample_fmts != null)
 {
 while (*sample_fmts != AVSampleFormat.AV_SAMPLE_FMT_NONE)
 {
 Console.WriteLine($"Sample format: {*sample_fmts}");
 sample_fmts++;
 }
 }

 // Set codec parameters
 _codecContext->sample_rate = 8000; // G726 typically uses 8000Hz
 if (_codec->sample_fmts != null)
 {
 _codecContext->sample_fmt = *_codec->sample_fmts; // Use the first supported sample format
 }
 else
 {
 _codecContext->sample_fmt = AVSampleFormat.AV_SAMPLE_FMT_S16; // Fallback to a common format
 }
 _codecContext->bit_rate = 32000; // Example bitrate for G726

 // Attempt to open the codec with detailed error logging
 int ret = ffmpeg.avcodec_open2(_codecContext, _codec, null);
 if (ret < 0)
 {
 byte[] errBuf = new byte[1024];
 fixed (byte* errPtr = errBuf)
 {
 ffmpeg.av_strerror(ret, errPtr, (ulong)errBuf.Length);
 }
 string errMessage = System.Text.Encoding.UTF8.GetString(errBuf);
 throw new Exception($"Failed to open codec (Error code: {ret}): {errMessage}");
 }

 // Allocate packet and frame
 _packet = ffmpeg.av_packet_alloc();
 _frame = ffmpeg.av_frame_alloc();
 if (_packet == null || _frame == null)
 {
 throw new Exception("Failed to allocate packet or frame.");
 }
 }
}