
Recherche avancée
Autres articles (13)
-
Emballe médias : à quoi cela sert ?
4 février 2011, parCe plugin vise à gérer des sites de mise en ligne de documents de tous types.
Il crée des "médias", à savoir : un "média" est un article au sens SPIP créé automatiquement lors du téléversement d’un document qu’il soit audio, vidéo, image ou textuel ; un seul document ne peut être lié à un article dit "média" ; -
Déploiements possibles
31 janvier 2010, parDeux types de déploiements sont envisageable dépendant de deux aspects : La méthode d’installation envisagée (en standalone ou en ferme) ; Le nombre d’encodages journaliers et la fréquentation envisagés ;
L’encodage de vidéos est un processus lourd consommant énormément de ressources système (CPU et RAM), il est nécessaire de prendre tout cela en considération. Ce système n’est donc possible que sur un ou plusieurs serveurs dédiés.
Version mono serveur
La version mono serveur consiste à n’utiliser qu’une (...) -
Gestion des droits de création et d’édition des objets
8 février 2011, parPar défaut, beaucoup de fonctionnalités sont limitées aux administrateurs mais restent configurables indépendamment pour modifier leur statut minimal d’utilisation notamment : la rédaction de contenus sur le site modifiables dans la gestion des templates de formulaires ; l’ajout de notes aux articles ; l’ajout de légendes et d’annotations sur les images ;
Sur d’autres sites (4031)
-
ffmpeg HLS not append -hls_base_url to m3u8
22 juin 2023, par user3378326When i use -hls_base_url http://10.10.10.1:5000/hls it's not append in the m3u8 Playlist.


My command i use for generating the HLS Output on Windows cmd with ffmpeg 6.0-essentials_build :


ffmpeg -y -i Movie.ts -c:v copy -c:a:1 copy -bsf:v h264_mp4toannexb -f segment -hls_base_url http:/10.10.10.1:5000/hls/ -segment_time 10 -segment_list Movie.m3u8 hls\Movie-%03d.ts


I try with :


-hls_base_url http://10.10.10.1:5000/hls -hls_base_url 'http://10.10.10.1:5000/hls' -hls_base_url "http://10.10.10.1:5000/hls"


-
Flask app using OpenCv crash when i start recording
17 mai 2023, par Mulham DarwishI build this flask app to live stream security cameras and the live stream works with the screenshot function but when start recording it crash but few times same code it worked and saved the video here the code. with the html file using js.


from flask import Flask, render_template, Response, request
import cv2
import os
import time
import threading
import requests

app = Flask(__name__)

# Define the IP cameras
cameras = [
 {'url': 'rtsp://****:*****@******', 'name': 'Camera 1'},
 {'url': 'rtsp://****:*****@******', 'name': 'Camera 2'},
 {'url': 'rtsp://****:*****@******', 'name': 'Camera 3'},
 {'url': 'rtsp://****:*****@******', 'name': 'Camera 4'}
]

# Create a VideoCapture object for each camera
capture_objs = [cv2.VideoCapture(cam['url']) for cam in cameras]
stop_events = {i: threading.Event() for i in range(len(cameras))}
# Define the directory to save the recorded videos
recording_dir = os.path.join(os.getcwd(), 'recordings')

# Ensure the recording directory exists
if not os.path.exists(recording_dir):
 os.makedirs(recording_dir)

# Define the function to capture and save a video
def record_video(camera_index, stop_recording):
 # Define the codec and file extension
 fourcc = cv2.VideoWriter_fourcc(*'mp4v')
 file_extension = '.mp4'

 # Get the current timestamp for the filename
 timestamp = time.strftime("%Y%m%d-%H%M%S")

 # Define the filename and path
 filename = f'{cameras[camera_index]["name"]}_{timestamp}{file_extension}'
 filepath = os.path.join(recording_dir, filename)

 # Create a VideoWriter object to save the video
 width = int(capture_objs[camera_index].get(cv2.CAP_PROP_FRAME_WIDTH))
 height = int(capture_objs[camera_index].get(cv2.CAP_PROP_FRAME_HEIGHT))
 fps = int(capture_objs[camera_index].get(cv2.CAP_PROP_FPS))
 video_writer = cv2.VideoWriter(filepath, fourcc, fps, (width, height))

 # Capture frames and write them to the file
 while True:
 if stop_recording.is_set():
 break # stop recording if stop_recording is set
 ret, frame = capture_objs[camera_index].read()
 if ret:
 video_writer.write(frame)
 else:
 break

 # Release the VideoWriter object and the VideoCapture object
 video_writer.release()
 capture_objs[camera_index].release()

@app.route('/')
def index():
 # Render the index page with the list of cameras
 return render_template('index.html', cameras=cameras)

def generate(camera_index):
 # Generate frames from the video feed
 while True:
 ret, frame = capture_objs[camera_index].read()
 if not ret:
 break

 # Encode the frame as JPEG
 _, jpeg = cv2.imencode('.jpg', frame)

 # Yield the frame as a Flask response
 yield (b'--frame\r\n'
 b'Content-Type: image/jpeg\r\n\r\n' + jpeg.tobytes() + b'\r\n')

@app.route('/video_feed')
def video_feed():
 # Get the camera index from the request arguments
 camera_index = int(request.args.get('camera_index'))

 # Generate the video feed
 return Response(generate(camera_index),
 mimetype='multipart/x-mixed-replace; boundary=frame')

@app.route('/record', methods=['POST'])
def record():
 # Get the camera index from the request form
 camera_index = int(request.form['camera_index'])

 stop_recording = stop_events[camera_index] # get the stop_recording event for the camera
 thread = threading.Thread(target=record_video, args=(camera_index, stop_recording))
 thread.start() # start a thread to record video

 # Return a response indicating that the recording has started
 return 'Recording started.'

@app.route('/stop_record', methods=['POST'])
def stop_record():
 # Get the camera index from the request form
 camera_index = int(request.form['camera_index'])

 # Set the stop_recording event for the corresponding camera thread
 stop_events[camera_index].set()

 # Return a response indicating that recording has been stopped
 return 'Recording stopped.'

@app.route('/screenshot', methods=['POST'])
def take_screenshot():
 # Take a screenshot of the video stream and save it as a file
 camera = capture_objs[int(request.form['camera_id'])]
 success, frame = camera.read()
 if success:
 timestamp = time.strftime("%Y%m%d-%H%M%S")
 filename = f'screenshot_{timestamp}.jpg'
 cv2.imwrite(filename, frame)
 return 'Screenshot taken and saved'
 else:
 return 'Failed to take screenshot'

if __name__ == '__main__':
 app.run()



I tried to update ffmpeg to the latest version and installed pip install opencv-python-headless and installed pip install opencv-python but most of the time i come to this crash code


* Serving Flask app 'run'
* Debug mode: off
WARNING: This is a development server. Do not use it in a production deployment. Use a production WSGI server instead.
 * Running on http://127.0.0.1:5000
Press CTRL+C to quit
127.0.0.1 - - [17/May/2023 13:24:11] "GET / HTTP/1.1" 200 -
127.0.0.1 - - [17/May/2023 13:24:11] "GET /video_feed?camera_index=0 HTTP/1.1" 200 -
127.0.0.1 - - [17/May/2023 13:24:11] "GET /video_feed?camera_index=1 HTTP/1.1" 200 -
127.0.0.1 - - [17/May/2023 13:24:11] "GET /video_feed?camera_index=2 HTTP/1.1" 200 -
127.0.0.1 - - [17/May/2023 13:24:11] "GET /video_feed?camera_index=3 HTTP/1.1" 200 -
127.0.0.1 - - [17/May/2023 13:24:44] "GET / HTTP/1.1" 200 -
127.0.0.1 - - [17/May/2023 13:24:45] "GET /video_feed?camera_index=3 HTTP/1.1" 200 -
127.0.0.1 - - [17/May/2023 13:24:45] "GET /video_feed?camera_index=0 HTTP/1.1" 200 -
127.0.0.1 - - [17/May/2023 13:24:45] "GET /video_feed?camera_index=1 HTTP/1.1" 200 -
127.0.0.1 - - [17/May/2023 13:24:45] "GET /video_feed?camera_index=2 HTTP/1.1" 200 -
[h264 @ 0x5605285fc5c0] error while decoding MB 28 29, bytestream -9
[h264 @ 0x560529110040] error while decoding MB 15 37, bytestream -6
[h264 @ 0x560528624980] error while decoding MB 45 45, bytestream -23
[h264 @ 0x5605286f1900] error while decoding MB 50 34, bytestream -7
[h264 @ 0x5605285fc5c0] error while decoding MB 25 9, bytestream -17
[h264 @ 0x5605292b0080] error while decoding MB 28 41, bytestream -5
[h264 @ 0x560528660040] error while decoding MB 101 45, bytestream -17
[h264 @ 0x5605285fc5c0] error while decoding MB 42 44, bytestream -5
[h264 @ 0x5605286f1900] error while decoding MB 118 42, bytestream -9
[h264 @ 0x560529110040] error while decoding MB 92 43, bytestream -5
[h264 @ 0x560528660040] error while decoding MB 99 34, bytestream -11
[h264 @ 0x56052932b0c0] error while decoding MB 92 36, bytestream -13
[h264 @ 0x560528667ac0] error while decoding MB 44 54, bytestream -5
[h264 @ 0x560529110040] error while decoding MB 93 33, bytestream -7
[h264 @ 0x5605286dd880] error while decoding MB 27 37, bytestream -19
[h264 @ 0x560528660040] error while decoding MB 66 56, bytestream -9
127.0.0.1 - - [17/May/2023 13:36:45] "POST /record HTTP/1.1" 200 -
Assertion fctx->async_lock failed at libavcodec/pthread_frame.c:175
Aborted (core dumped)



-
FFMPEG convert NV12 format to NV12 with the same height and width
7 septembre 2022, par Chun WangI want to use FFmpeg4.2.2 to convert the input NV12 format to output NV12 format with the same height and width. I used sws_scale conversion, but the output frame's colors are all green.


P.S. It seems no need to use swscale to get the same width,same height and same format frame,but it is neccessary in my project for dealing with other frames.


I have successfully converted the input NV12 format to output NV12 format with the different height and width, the output frame's colors were right.But I FAILED to convert NV12 to NV12 with the same height and width. It was so weird, I couldn't know why :(


I want to know what the reason is and what I should do.
The following is my code.swsCtx4 was used for converting NV12 format to output NV12 format. Others were used for other formats converted test.
Thank you for you help


//the main code is 
 AVFrame* frame_nv12 = av_frame_alloc();
 frame_nv12->width = in_width;
 frame_nv12->height = in_height;
 frame_nv12->format = AV_PIX_FMT_NV12;
 uint8_t* frame_buffer_nv12 = (uint8_t*)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_NV12, in_width, in_height , 1));
 av_image_fill_arrays(frame_nv12->data, frame_nv12->linesize, frame_buffer_nv12, AV_PIX_FMT_NV12, in_width, in_height, 1);


 AVFrame* frame2_nv12 = av_frame_alloc();
 frame2_nv12->width = in_width1;
 frame2_nv12->height = in_height1;
 frame2_nv12->format = AV_PIX_FMT_NV12;

 uint8_t* frame2_buffer_nv12 = (uint8_t*)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_NV12, in_width1, in_height1, 1));
 av_image_fill_arrays(frame2_nv12->data, frame2_nv12->linesize, frame2_buffer_nv12, AV_PIX_FMT_NV12, in_width1, in_height1, 1);
 
 SwsContext* swsCtx4 = nullptr;
 swsCtx4 = sws_getContext(in_width, in_height, AV_PIX_FMT_NV12, in_width1, in_height1, AV_PIX_FMT_NV12,
 SWS_BILINEAR | SWS_PRINT_INFO, NULL, NULL, NULL);
 printf("swsCtx4\n");
 
 ret = sws_scale(swsCtx4, frame_nv12->data, frame_nv12->linesize, 0, frame_nv12->height, frame2_nv12->data, frame2_nv12->linesize);
 if (ret < 0) {
 printf("sws_4scale failed\n");
 }
 



//the complete code
extern "C" {
#include <libavcodec></libavcodec>avcodec.h>
#include <libavformat></libavformat>avformat.h>
#include <libavutil></libavutil>imgutils.h>
#include <libswscale></libswscale>swscale.h>
}
#include <seeker></seeker>loggerApi.h>
#include "seeker/common.h"
#include <iostream>

//解决原因:pts设置为0,dts设置为0
#define FILE_SRC "testPicFilter.yuv" //源文件
#define FILE_DES "test11.yuv" //源文件

int count = 0;


int main(int argc, char* argv[])
{
 av_register_all();

 int ret = 0;
 
 //std::this_thread::sleep_for(std::chrono::milliseconds(5000));
 int count1 = 1;
 int piccount;
 int align = 1;


 /*打开输入yuv文件*/
 FILE* fp_in = fopen(FILE_SRC, "rb+");
 if (fp_in == NULL)
 {
 printf("文件打开失败\n");
 return 0;
 }
 int in_width = 640;
 int in_height = 360;
 int in_width1 = 640;
 int in_height1 = 360;
 


 /*处理后的文件*/
 FILE* fp_out = fopen(FILE_DES, "wb+");
 if (fp_out == NULL)
 {
 printf("文件创建失败\n");
 return 0;
 }
 char buff[50];

 AVFrame* frame_in = av_frame_alloc();
 unsigned char* frame_buffer_in;
 frame_buffer_in = (unsigned char*)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, in_width, in_height, 1));
 /*根据图像设置图像指针和内存对齐方式*/
 av_image_fill_arrays(frame_in->data, frame_in->linesize, frame_buffer_in, AV_PIX_FMT_YUV420P, in_width, in_height, 1);

 frame_in->width = in_width;
 frame_in->height = in_height;
 frame_in->format = AV_PIX_FMT_YUV420P;


 //输入yuv转成frame_nv12
 AVFrame* frame_nv12 = av_frame_alloc();
 frame_nv12->width = in_width;
 frame_nv12->height = in_height;
 frame_nv12->format = AV_PIX_FMT_NV12;
 uint8_t* frame_buffer_nv12 = (uint8_t*)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_NV12, in_width, in_height , 1));
 av_image_fill_arrays(frame_nv12->data, frame_nv12->linesize, frame_buffer_nv12, AV_PIX_FMT_NV12, in_width, in_height, 1);


 AVFrame* frame2_nv12 = av_frame_alloc();
 frame2_nv12->width = in_width1;
 frame2_nv12->height = in_height1;
 frame2_nv12->format = AV_PIX_FMT_NV12;

 uint8_t* frame2_buffer_nv12 = (uint8_t*)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_NV12, in_width1, in_height1, 1));
 av_image_fill_arrays(frame2_nv12->data, frame2_nv12->linesize, frame2_buffer_nv12, AV_PIX_FMT_NV12, in_width1, in_height1, 1);


 
 //输入rgb转成yuv
 AVFrame* frame_yuv = av_frame_alloc();
 frame_yuv->width = in_width;
 frame_yuv->height = in_height;
 frame_yuv->format = AV_PIX_FMT_YUV420P;
 uint8_t* frame_buffer_yuv = (uint8_t*)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, in_width, in_height, 1));
 av_image_fill_arrays(frame_yuv->data, frame_yuv->linesize, frame_buffer_yuv,
 AV_PIX_FMT_YUV420P, in_width, in_height, 1);



 SwsContext* swsCtx = nullptr;
 swsCtx = sws_getContext(in_width, in_height, AV_PIX_FMT_YUV420P, in_width, in_height, AV_PIX_FMT_NV12,
 SWS_BILINEAR | SWS_PRINT_INFO, NULL, NULL, NULL);
 printf("swsCtx\n");

 SwsContext* swsCtx4 = nullptr;
 swsCtx4 = sws_getContext(in_width, in_height, AV_PIX_FMT_NV12, in_width1, in_height1, AV_PIX_FMT_NV12,
 SWS_BILINEAR | SWS_PRINT_INFO, NULL, NULL, NULL);
 printf("swsCtx4\n");

 
 SwsContext* swsCtx2 = nullptr;
 swsCtx2 = sws_getContext(in_width1, in_height1, AV_PIX_FMT_NV12, in_width, in_height, AV_PIX_FMT_YUV420P,
 SWS_BILINEAR | SWS_PRINT_INFO, NULL, NULL, NULL);
 printf("swsCtx2\n");





 while (1)
 {


 count++;

 if (fread(frame_buffer_in, 1, in_width * in_height * 3 / 2, fp_in) != in_width * in_height * 3 / 2)
 {
 break;
 }

 frame_in->data[0] = frame_buffer_in;
 frame_in->data[1] = frame_buffer_in + in_width * in_height;
 frame_in->data[2] = frame_buffer_in + in_width * in_height * 5 / 4;


 //转NV12格式
 int ret = sws_scale(swsCtx, frame_in->data, frame_in->linesize, 0, frame_in->height, frame_nv12->data, frame_nv12->linesize);
 if (ret < 0) {
 printf("sws_scale swsCtx failed\n");
 }


 ret = sws_scale(swsCtx4, frame_nv12->data, frame_nv12->linesize, 0, frame_nv12->height, frame2_nv12->data, frame2_nv12->linesize);
 if (ret < 0) {
 printf("sws_scale swsCtx4 failed\n");
 }
 

 if (ret > 0) {
 
 int ret2 = sws_scale(swsCtx2, frame2_nv12->data, frame2_nv12->linesize, 0, frame2_nv12->height, frame_yuv->data, frame_yuv->linesize);
 if (ret2 < 0) {
 printf("sws_scale swsCtx2 failed\n");
 }
 I_LOG("frame_yuv:{},{}", frame_yuv->width, frame_yuv->height);

 
 //I_LOG("frame_yuv:{}", frame_yuv->format);

 if (frame_yuv->format == AV_PIX_FMT_YUV420P)
 {

 for (int i = 0; i < frame_yuv->height; i++)
 {
 fwrite(frame_yuv->data[0] + frame_yuv->linesize[0] * i, 1, frame_yuv->width, fp_out);
 }
 for (int i = 0; i < frame_yuv->height / 2; i++)
 {
 fwrite(frame_yuv->data[1] + frame_yuv->linesize[1] * i, 1, frame_yuv->width / 2, fp_out);
 }
 for (int i = 0; i < frame_yuv->height / 2; i++)
 {
 fwrite(frame_yuv->data[2] + frame_yuv->linesize[2] * i, 1, frame_yuv->width / 2, fp_out);
 }
 printf("yuv to file\n");
 }
 }

 }


 fclose(fp_in);
 fclose(fp_out);
 av_frame_free(&frame_in);
 av_frame_free(&frame_nv12);
 av_frame_free(&frame_yuv);
 sws_freeContext(swsCtx);
 sws_freeContext(swsCtx2);
 sws_freeContext(swsCtx4);

 //std::this_thread::sleep_for(std::chrono::milliseconds(8000));

 return 0;

}



</iostream>