Browse Source

准备把模型独立线程,调整为独立进程前的版本。

zfbox
张龙 7 months ago
parent
commit
69663f45ee
  1. 173
      core/CapManager.py
  2. 174
      core/ChannelData.py
  3. 2
      core/ChannelManager.py
  4. 2
      core/ModelManager.py
  5. 10
      model/plugins/ModelBase.py
  6. 52
      myutils/MyRtspManager.py
  7. 16
      myutils/mydvpp.py
  8. 0
      myutils/myutil.py
  9. 49
      run.py
  10. 2
      web/API/__init__.py
  11. 6
      web/API/viedo.py
  12. 40
      web/API/warn.py
  13. 6
      web/__init__.py
  14. 288
      web/main/static/resources/scripts/aiortc-client-new.js
  15. 9
      web/main/static/resources/scripts/channel_manager.js
  16. 10
      web/main/static/resources/scripts/flv.min.js
  17. 160
      web/main/static/resources/scripts/warn_manager.js
  18. 27
      web/main/templates/h264_test.html
  19. 1
      web/main/templates/header.html
  20. 11
      web/main/templates/view_main.html
  21. 72
      web/main/templates/warn_manager.html

173
core/CapManager.py

@ -1,21 +1,54 @@
import math
import queue
import cv2
import threading
import time
from myutils.ConfigManager import myCongif
from myutils.MyDeque import MyDeque
import subprocess as sp
from myutils.MyLogger_logger import LogHandler
class CapManager:
def __init__(self):
self.logger = LogHandler().get_logger("CapManager")
self.mycap_map = {} #source,VideoCaptureWithFPS
self.lock = threading.Lock()
def __del__(self):
pass
def start_get_video(self,source,type=1):
vcf = None
with self.lock:
if source in self.mycap_map:
vcf = self.mycap_map[source]
vcf.addcount()
else:
vcf = VideoCaptureWithFPS(source,type)
self.mycap_map[source] = vcf
return vcf
def stop_get_video(self,source):
with self.lock:
if source in self.mycap_map:
vcf = self.mycap_map[source]
vcf.delcount()
if vcf.icount == 0:
del self.mycap_map[source]
else:
self.logger.error("数据存在问题!")
mCap = CapManager()
class VideoCaptureWithFPS:
'''视频捕获的封装类,是一个通道一个
打开摄像头 0--USB摄像头1-RTSP,2-海康SDK
'''
def __init__(self, source,type=1):
self.source = source
self.source = self.ensure_udp_transport(source)
self.width = None
self.height = None
self.bok = False
self.icount = 1 #引用次数
# GStreamer --- 内存占用太高,且工作环境的部署也不简单
# self.pipeline = (
# "rtspsrc location=rtsp://192.168.3.102/live1 protocols=udp latency=100 ! "
@ -27,29 +60,28 @@ class VideoCaptureWithFPS:
# )
#self.cap = cv2.VideoCapture(self.pipeline, cv2.CAP_GSTREAMER)
#FFmpeg --更加定制化的使用--但要明确宽高。。。
# self.ffmpeg_cmd = [
# 'ffmpeg',
# '-rtsp_transport', 'udp',
# '-i', 'rtsp://192.168.3.102/live1',
# '-f', 'image2pipe',
# '-pix_fmt', 'bgr24',
# '-vcodec', 'rawvideo', '-'
# ]
# self.pipe = sp.Popen(self.ffmpeg_cmd, stdout=sp.PIPE, bufsize=10 ** 8)
# opencv -- 后端默认使用的就是FFmpeg -- 不支持UDP
self.running = True
#self.frame_queue = queue.Queue(maxsize=1)
self.frame_queue = MyDeque(5)
#self.frame = None
#self.read_lock = threading.Lock()
self.frame = None
self.read_lock = threading.Lock()
self.thread = threading.Thread(target=self.update)
self.thread.start()
def addcount(self):
self.icount += 1
def delcount(self):
self.icount -= 1
if self.icount ==0: #结束线程
self.release()
def openViedo_opencv(self,source):
self.cap = cv2.VideoCapture(source)
self.cap = cv2.VideoCapture(source,cv2.CAP_FFMPEG)
# self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3)
if self.cap.isOpened(): # 若没有打开成功,在读取画面的时候,已有判断和处理 -- 这里也要检查下内存的释放情况
self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
@ -60,31 +92,67 @@ class VideoCaptureWithFPS:
else:
raise ValueError("无法打开视频源")
def ensure_udp_transport(self,source): #使用udp拉流时使用
# 检查 source 是否已经包含 '?transport=udp'
# if not source.endswith("?transport=udp"):
# # 如果没有,则添加 '?transport=udp'
# if "?" in source:
# # 如果已有其他查询参数,用 '&' 拼接
# source += "&transport=udp"
# else:
# # 否则直接添加 '?transport=udp'
# source += "?transport=udp"
return source
def update(self):
sleep_time = myCongif.get_data("cap_sleep_time")
reconnect_attempts = myCongif.get_data("reconnect_attempts")
frame_interval = 1 / (myCongif.get_data("verify_rate")+1)
last_frame_time = time.time()
while self.running:
try:
# ffmpeg_process = subprocess.Popen(
# ['ffmpeg', '-i', self.source, '-f', 'image2pipe', '-pix_fmt', 'bgr24', '-vcodec', 'rawvideo', '-'],
# stdout=subprocess.PIPE, stderr=subprocess.PIPE
# )
#读取帧后,对帧数据进行处理
# frame = np.frombuffer(raw_frame, np.uint8).reshape((480, 640, 3))
# frame = np.copy(frame) # 创建一个可写的副本
self.openViedo_opencv(self.source)
if not self.cap.isOpened():
raise RuntimeError("视频源打开失败")
failure_count = 0
self.bok = True
while self.running:
ret, frame = self.cap.read()
if not ret:
#subprocess-udp 拉流
#raw_frame = ffmpeg_process.stdout.read(640 * 480 * 3)
if self.cap.grab():
current_time = time.time()
if current_time - last_frame_time > frame_interval:
last_frame_time = current_time
ret, frame = self.cap.retrieve()
if ret:
# resized_frame = cv2.resize(frame, (int(self.width / 2), int(self.height / 2)))
#self.frame_queue.myappend(frame)
with self.read_lock:
self.frame = frame
failure_count = 0 # 重置计数
else:
failure_count += 1
time.sleep(0.5) #休眠一段时间后重试
time.sleep(0.1) # 休眠一段时间后重试
if failure_count >= reconnect_attempts:
with self.read_lock:
self.frame = None
raise RuntimeError("无法读取视频帧")
continue
self.frame_queue.myappend(frame)
failure_count = 0 #重置计数
# 跳过指定数量的帧以避免积压
for _ in range(self.fps):
self.cap.grab()
#正常结束,关闭进程,释放资源
#ffmpeg_process.terminate()
self.cap.release()
self.bok = False
except Exception as e:
print(f"发生异常:{e}")
#ffmpeg_process.terminate()
self.cap.release()
self.bok = False
print(f"{self.source}视频流,将于{sleep_time}秒后重连!")
@ -95,37 +163,14 @@ class VideoCaptureWithFPS:
return
time.sleep(2)
total_sleep += 2
#释放cap资源,由release调用实现
#resized_frame = cv2.resize(frame, (int(self.width / 2), int(self.height / 2)))
# with self.read_lock:
# self.frame = frame
# if self.frame_queue.full():
# try:
# #print("队列满---采集线程丢帧")
# self.frame_queue.get(timeout=0.01) #这里不get的好处是,模型线程不会有None
# except queue.Empty: #为空不处理
# pass
# self.frame_queue.put(frame)
# if not self.frame_queue.full():
# self.frame_queue.put(frame)
def read(self):
'''
直接读视频原画面
:param type: 0-大多数情况读取1-绘制区域时读取一帧但当前帧不丢还是回队列
:return:
'''
# with self.read_lock:
# frame = self.frame.copy() if self.frame is not None else None
# if frame is not None:
# return True, frame
# else:
# return False, None
with self.read_lock:
frame = self.frame.copy() if self.frame is not None else None
if frame is not None:
return True, frame
else:
return False, None
# if not self.frame_queue.empty():
# try:
# frame = self.frame_queue.get(timeout=0.05)
@ -136,15 +181,15 @@ class VideoCaptureWithFPS:
# #print("cap-frame None")
# return False, None
ret = False
frame = None
if self.bok: #连接状态再读取
frame = self.frame_queue.mypopleft()
if frame is not None:
ret = True
else:
print("____读取cap帧为空,采集速度过慢___")
return ret, frame
# ret = False
# frame = None
# if self.bok: #连接状态再读取
# frame = self.frame_queue.mypopleft()
# if frame is not None:
# ret = True
# else:
# print("____读取cap帧为空,采集速度过慢___")
# return ret, frame
def release(self):
self.running = False

174
core/ChannelData.py

@ -7,15 +7,17 @@ import threading
import cv2
import ffmpeg
import subprocess
import select
from collections import deque
from myutils.MyLogger_logger import LogHandler
from core.CapManager import VideoCaptureWithFPS
from core.CapManager import mCap
from core.ACLModelManager import ACLModeManger
from model.plugins.ModelBase import ModelBase
from core.WarnManager import WarnData
from core.DataStruct import ModelinData,ModeloutData
from myutils.MyDeque import MyDeque
from myutils.ConfigManager import myCongif
from myutils.mydvpp import bgr_to_yuv420
class ChannelData:
def __init__(self,channel_id,deque_length,icount_max,warnM):
@ -25,7 +27,8 @@ class ChannelData:
self.warnM = warnM #报警线程管理对象--MQ
self.ffprocess = self.start_h264_encoder(myCongif.get_data("mywidth"),myCongif.get_data("myheight"))#基于frame进行编码
#视频采集相关
self.cap = None #该通道视频采集对象
self.cap = None #该通道视频采集对象 还是vcf对象
self.source = None #rtsp 路径
self.frame_rate = myCongif.get_data("frame_rate")
self.frame_interval = 1.0 / int(myCongif.get_data("verify_rate"))
@ -78,7 +81,6 @@ class ChannelData:
# except queue.Empty:
# self.logger.debug(f"{self.channel_id}--web--获取分析画面失败,队列空")
# return None
frame = self.frame_queue.mypopleft()
return frame
else: #如果没有运行,直接从cap获取画面
@ -90,33 +92,11 @@ class ChannelData:
ret,buffer_bgr_webp = self._encode_frame(frame)
return buffer_bgr_webp
# frame_bgr_webp = self.encode_frame_to_flv(frame)
# return frame_bgr_webp
return None
def encode_frame_to_flv(self,frame):
try:
process = (
ffmpeg
.input('pipe:', format='rawvideo', pix_fmt='bgr24', s=f'{frame.shape[1]}x{frame.shape[0]}')
.output('pipe:', format='flv',vcodec='libx264')
.run_async(pipe_stdin=True, pipe_stdout=True, pipe_stderr=True)
)
out, err = process.communicate(input=frame.tobytes())
if process.returncode != 0:
raise RuntimeError(f"FFmpeg encoding failed: {err.decode('utf-8')}")
return out
except Exception as e:
print(f"Error during frame encoding: {e}")
return None
def update_last_frame(self,buffer):
if buffer:
self.frame_queue.myappend(buffer)
# with self.lock:
# self.last_frame = None
# self.last_frame = buffer
@ -136,66 +116,126 @@ class ChannelData:
# pass
#------------h264编码相关---------------
def start_h264_encoder(self,width, height): #宽高一样,初步定全进程一个
def start_h264_encoder(self,width, height): #宽高一样,初步定全进程一个 libx264 h264_ascend
process = subprocess.Popen(
['ffmpeg',
'-f', 'rawvideo',
'-pix_fmt', 'bgr24',
'-pix_fmt', 'yuv420p',
'-s', f'{width}x{height}',
'-i', '-', # Take input from stdin
'-an', # No audio
'-vcodec', 'h264_ascend',
'-preset', 'ultrafast',
'-f', 'h264', # Output format H.264
'-vcodec', 'libx264',
'-f', 'flv',
'-'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
return process
def encode_frame_h264(self, frame):
if self.process.poll() is not None:
raise RuntimeError("FFmpeg process has exited unexpectedly.")
# Write frame to stdin of the FFmpeg process
def close_h264_encoder(self):
self.ffprocess.stdin.close()
self.ffprocess.wait()
def encode_frame_h264_bak(self,frame):
try:
self.process.stdin.write(frame.tobytes())
process = (
ffmpeg
.input('pipe:', format='rawvideo', pix_fmt='bgr24', s=f'{frame.shape[1]}x{frame.shape[0]}')
.output('pipe:', format='flv',vcodec='h264_ascend')
.run_async(pipe_stdin=True, pipe_stdout=True, pipe_stderr=True)
)
out, err = process.communicate(input=frame.tobytes())
if process.returncode != 0:
raise RuntimeError(f"FFmpeg encoding failed: {err.decode('utf-8')}")
return out
except Exception as e:
raise RuntimeError(f"Failed to write frame to FFmpeg: {e}")
print(f"Error during frame encoding: {e}")
return None
def encode_frame_h264(self, frame,timeout=1):
yuv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2YUV_I420)
if self.ffprocess.poll() is not None:
raise RuntimeError("FFmpeg process has exited unexpectedly.")
# Write frame to stdin of the FFmpeg process
try:
self.ffprocess.stdin.write(yuv_frame.tobytes())
self.ffprocess.stdin.flush()
# Capture the encoded output
buffer_size = 1024 * 10 # Adjust this based on the size of the encoded frame
encoded_frame = bytearray()
buffer_size = 1024 * 1 # Adjust this based on the size of the encoded frame
encoded_frame = bytearray()
start_time = time.time()
while True:
# Use select to handle non-blocking reads from both stdout and stderr
#ready_to_read, _, _ = select.select([self.ffprocess.stdout, self.ffprocess.stderr], [], [], timeout)
ready_to_read, _, _ = select.select([self.ffprocess.stdout.fileno(), self.ffprocess.stderr.fileno()],
[], [], timeout)
# Check if there's data in stdout (encoded frame)
if self.ffprocess.stdout.fileno() in ready_to_read:
chunk = self.ffprocess.stdout.read(buffer_size)
if chunk:
encoded_frame.extend(chunk)
else:
break # No more data to read from stdout
while True:
chunk = self.process.stdout.read(buffer_size)
if not chunk:
break
encoded_frame.extend(chunk)
# Check if there's an error in stderr
if self.ffprocess.stderr.fileno() in ready_to_read:
error = self.ffprocess.stderr.read(buffer_size).decode('utf-8')
raise RuntimeError(f"FFmpeg error: {error}")
if not encoded_frame:
raise RuntimeError("No encoded data received from FFmpeg.")
# Timeout handling to avoid infinite blocking
if time.time() - start_time > timeout:
raise RuntimeError("FFmpeg encoding timed out.")
# Optional: Check for errors in stderr
# stderr_output = self.process.stderr.read()
# if "error" in stderr_output.lower():
# raise RuntimeError(f"FFmpeg error: {stderr_output}")
if not encoded_frame:
raise RuntimeError("No encoded data received from FFmpeg.")
return bytes(encoded_frame)
# Optional: Check for errors in stderr
# stderr_output = self.process.stderr.read()
# if "error" in stderr_output.lower():
# raise RuntimeError(f"FFmpeg error: {stderr_output}")
return bytes(encoded_frame)
except Exception as e:
print(f"Error during frame encoding: {e}")
return None
def _frame_pre_work(self, frame):
'''
对采集到的图片数据进行预处理需要确保是在原图上进行的修改
:param frame:
:return:
'''
# ----------添加时间戳-------------
# 获取当前时间
current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# 设置字体和位置
font = cv2.FONT_HERSHEY_SIMPLEX
position = (10, 30) # 时间戳在左上角
font_scale = 1
color = (255, 255, 255) # 白色
thickness = 2
# 在帧上绘制时间戳
cv2.putText(frame, current_time, position, font, font_scale, color, thickness, cv2.LINE_AA)
return frame
def _encode_frame(self,frame,itype=0):
ret = False
buffer_bgr_webp = None
if itype == 0: #jpg
ret, frame_bgr_webp = cv2.imencode('.jpg', frame, self.encode_param)
if ret:
buffer_bgr_webp = frame_bgr_webp.tobytes()
elif itype == 1: #H264
try:
buffer_bgr_webp = self.encode_frame_h264(frame)
ret = True
except Exception as e:
print(e)
else:
print("错误的参数!!")
if frame is not None:
if itype == 0: #jpg
self._frame_pre_work(frame) #对图片添加一些信息,目前是添加时间戳(model是入队列的时间,cap是发送前取帧的时间)
ret, frame_bgr_webp = cv2.imencode('.jpg', frame, self.encode_param)
if ret:
buffer_bgr_webp = frame_bgr_webp.tobytes()
elif itype == 1: #H264
try:
buffer_bgr_webp = self.encode_frame_h264(frame)
ret = True
except Exception as e:
print(e)
else:
print("错误的参数!!")
return ret,buffer_bgr_webp
#帧序列号自增 一个线程中处理,不用加锁
@ -213,9 +253,10 @@ class ChannelData:
'''
ret = False
if self.cap:
self.cap.release()
mCap.stop_get_video(self.source)
self.cap = None
self.cap = VideoCaptureWithFPS(source,type)
self.cap = mCap.start_get_video(source,type)
self.source = source
if self.cap:
ret = True
return ret
@ -229,7 +270,7 @@ class ChannelData:
return False
else:
if self.cap:
self.cap.release()
mCap.stop_get_video(self.source)
self.cap = None
return True #一般不会没有cap
@ -348,6 +389,9 @@ class ChannelData:
warntext = ""
if model and schedule[weekday][hour] == 1: #不在计划则不进行验证,直接返回图片
# 调用模型,进行检测,model是动态加载的,具体的判断标准由模型内执行 ---- *********
# bwarn = False
# warntext = ""
# time.sleep(2)
bwarn, warntext = model.verify(img, model_data,isdraw) #****************重要
# 对识别结果要部要进行处理
if bwarn:

2
core/ChannelManager.py

@ -121,7 +121,7 @@ class ChannelManager:
for i in range(5):
ret, frame = channel_data.cap.read()
if ret:
ret, frame_bgr_webp = cv2.imencode('.jpg', frame,self.encode_param)
ret, frame_bgr_webp = cv2.imencode('.jpg', frame)
if ret:
# 将图像数据编码为Base64
img_base64 = base64.b64encode(frame_bgr_webp).decode('utf-8')

2
core/ModelManager.py

@ -50,8 +50,6 @@ class ModelManager:
self.warnM = WarnManager()
self.warnM.start_warnmanager_th()
#工作线程:cap+model
if channel_id ==0:
strsql = "select id,ulr,type from channel where is_work = 1;" #执行所有通道

10
model/plugins/ModelBase.py

@ -34,7 +34,7 @@ class ModelBase(ABC):
self._output_num = 0 # 输出数据个数
self._output_info = [] # 输出信息列表
self._is_released = True # 资源是否被释放
self.polygon = None #检测区域
self.system = myCongif.get_data("model_platform") #platform.system() #获取系统平台
self.do_map = { # 定义插件的入口函数 --
# POCType.POC: self.do_verify,
@ -59,10 +59,10 @@ class ModelBase(ABC):
self.release()
def draw_polygon(self, img, polygon_points,color=(0, 255, 0)):
self.polygon = Polygon(ast.literal_eval(polygon_points))
points = np.array([self.polygon.exterior.coords], dtype=np.int32)
cv2.polylines(img, points, isClosed=True, color=color, thickness=2)
if polygon_points and polygon_points.strip():
self.polygon = Polygon(ast.literal_eval(polygon_points))
points = np.array([self.polygon.exterior.coords], dtype=np.int32)
cv2.polylines(img, points, isClosed=True, color=color, thickness=2)
def is_point_in_region(self, point):
'''判断点是否在区域内,需要先执行draw_polygon'''

52
myutils/MyRtspManager.py

@ -0,0 +1,52 @@
import socket
class MyRtspManager:
def __init__(self):
self.rtsp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
pass
def __del__(self):
pass
def _send_rtsp_request(self,request):
'''
发送 RTSP 请求按照 RTSP 协议的格式发送请求例如 OPTIONSDESCRIBESETUP PLAY 请求
:param socket:
:param request:
:return:
'''
self.rtsp_socket.sendall(request.encode())
response = self.rtsp_socket.recv(4096).decode()
print("Response:\n", response)
return response
def _connectRest(self,IP,Port):
#self.rtsp_socket.connect(('192.168.3.103', 554))
self.rtsp_socket.connect((IP, Port))
#需要补充重连机制
def startGetRtsp(self):
# 发送 OPTIONS 请求
request = "OPTIONS rtsp://192.168.3.103/live1 RTSP/1.0\r\nCSeq: 1\r\n\r\n"
self._send_rtsp_request(request)
# 发送 DESCRIBE 请求获取 SDP 信息
request = "DESCRIBE rtsp://192.168.3.103/live1 RTSP/1.0\r\nCSeq: 2\r\nAccept: application/sdp\r\n\r\n"
response = self._send_rtsp_request(request)
# 解析 SDP 信息,提取媒体信息(如视频轨道的端口、编码)
# 此处需解析返回的 response 来提取端口、编码等信息
# 发送 SETUP 请求来配置 RTP/RTCP 传输
request = "SETUP rtsp://192.168.3.103/live1/trackID=0 RTSP/1.0\r\nCSeq: 3\r\nTransport: RTP/AVP;unicast;client_port=5000-5001\r\n\r\n"
self._send_rtsp_request(request)
# 创建 UDP 套接字来接收 RTP 数据
rtp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
rtp_socket.bind(('0.0.0.0', 5000)) # 绑定到前面 SETUP 请求中指定的 client_port
while True:
rtp_packet, _ = rtp_socket.recvfrom(2048)
# 解析 RTP 包的头部,提取负载数据(视频帧)
# 可以根据负载类型解析 H264 或其他格式的帧数据
print(f"Received RTP packet of size {len(rtp_packet)}")

16
myutils/mydvpp.py

@ -0,0 +1,16 @@
import cv2
def bgr_to_yuv420(bgr_img):
# Step 1: Convert BGR to YUV 4:4:4
yuv_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2YUV)
# Step 2: Extract Y, U, V channels
Y, U, V = cv2.split(yuv_img)
# Step 3: Downsample U and V channels (Subsample by a factor of 2)
U_downsampled = cv2.resize(U, (U.shape[1] // 2, U.shape[0] // 2), interpolation=cv2.INTER_LINEAR)
V_downsampled = cv2.resize(V, (V.shape[1] // 2, V.shape[0] // 2), interpolation=cv2.INTER_LINEAR)
# Step 4: Combine Y, U_downsampled, V_downsampled to get YUV 4:2:0 format
return Y, U_downsampled, V_downsampled

0
myutils/myutil.py

49
run.py

@ -4,23 +4,65 @@ import os
import platform
import shutil
import asyncio
import uvicorn
from hypercorn.asyncio import serve
from hypercorn.config import Config
from myutils.MyTraceMalloc import MyTraceMalloc
import threading
import subprocess
print(f"Current working directory (run.py): {os.getcwd()}")
web = create_app()
app = create_app()
async def run_quart_app():
config = Config()
config.bind = ["0.0.0.0:5001"]
await serve(web, config)
await serve(app, config)
def test():
mMM.test1()
def run_subprocess():
# 启动子进程,用于接收 stdin 输入并通过 stdout 返回结果
process = subprocess.Popen(
['python3', '-c', '''
import sys
# 从stdin读取输入
for line in sys.stdin:
# 处理输入并写回stdout
sys.stdout.write(f"Processed: {line}")
sys.stdout.flush() # 刷新stdout输出缓冲区
'''],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True
)
# 发送输入数据到子进程的 stdin
input_data = "Hello, subprocess!\n"
print(f"Sending to subprocess: {input_data}")
process.stdin.write(input_data)
process.stdin.flush() # 确保输入被传递给子进程
# 从子进程的 stdout 读取返回数据
output = process.stdout.readline() # 读取一行输出
print(f"Received from subprocess: {output}")
# 检查是否有错误输出
error = process.stderr.read()
if error:
print(f"Error from subprocess: {error}")
else:
print("no error!!")
# 关闭 stdin,并等待子进程完成
process.stdin.close()
process.wait()
if __name__ == '__main__':
#test()
#run_subprocess()
system = platform.system()
if system == "Windows":
@ -42,6 +84,7 @@ if __name__ == '__main__':
#mVManager.start_check_rtsp() #线程更新视频在线情况
#启动web服务
asyncio.run(run_quart_app())
#uvicorn.run("run:app", host="0.0.0.0", port=5001, workers=4,reload=True)

2
web/API/__init__.py

@ -1,4 +1,4 @@
from quart import Blueprint
#定义模块
api = Blueprint('api',__name__)
from . import user,system,viedo,channel,model
from . import user,system,viedo,channel,model,warn

6
web/API/viedo.py

@ -187,7 +187,7 @@ async def handle_channel(channel_id,websocket):
icount = 0
send_stime = time.time()
await websocket.send(frame)
await websocket.send(b'frame:'+frame)
send_etime = time.time()
send_all_time = send_all_time + (send_etime - send_stime)
else:
@ -196,7 +196,7 @@ async def handle_channel(channel_id,websocket):
if icount > error_max_count:
print(f"通道-{channel_id},长时间未获取图像,休眠一段时间后再获取。")
#icount = 0
error_message = b"video_error"
error_message = b'error:video_error'
await websocket.send(error_message)
await asyncio.sleep(sleep_time) # 等待视频重连时间
@ -209,7 +209,7 @@ async def handle_channel(channel_id,websocket):
# 每隔一定时间(比如5秒)计算一次帧率
if el_time >= 10:
fps = frame_count / el_time
print(f"当前帧率: {fps} FPS,循环次数:{frame_count},花费总耗时:{all_time}S,get耗时:{get_all_time},send耗时:{send_all_time}")
print(f"{channel_id}当前帧率: {fps} FPS,循环次数:{frame_count},花费总耗时:{all_time}S,get耗时:{get_all_time},send耗时:{send_all_time}")
# 重置计数器和时间
frame_count = 0
all_time = 0

40
web/API/warn.py

@ -0,0 +1,40 @@
from . import api
from web.common.utils import login_required
from quart import jsonify, request
from core.DBManager import mDBM
@api.route('/warn/search_warn',methods=['POST'])
@login_required
async def warn_get(): #新增算法
#获取查询参数
json_data = await request.get_json()
s_count = json_data.get('s_count','')
e_count = json_data.get('e_count','')
model_name = json_data.get('model_name','')
channel_id = json_data.get('channel_id','')
start_time = json_data.get('start_time','')
end_time = json_data.get('end_time','')
# 动态拼接 SQL 语句
sql = "SELECT * FROM warn WHERE 1=1"
if model_name:
sql += f" AND model_name = {model_name}"
if channel_id:
sql += f" AND channel_id = {channel_id}"
if start_time and end_time:
sql += f" AND creat_time BETWEEN {start_time} AND {end_time}"
# 增加倒序排列和分页
sql += f" ORDER BY creat_time DESC LIMIT {e_count} OFFSET {s_count}"
# 使用SQLAlchemy执行查询
try:
print(sql)
data = mDBM.do_select(sql)
# 将数据转换为JSON格式返回给前端
warn_list = [{"ID": warn[0], "model_name": warn[1], "video_path": warn[2], "img_path": warn[3],
"creat_time": warn[4], "channel_id": warn[5]} for warn in data]
return jsonify(warn_list)
except Exception as e:
return jsonify({"error": str(e)}), 500

6
web/__init__.py

@ -32,15 +32,13 @@ class MemcachedSessionInterface: #只是能用,不明所以
def create_app():
app = Quart(__name__)
#app = cors(app, allow_credentials=True) #allow_origin:指定允许跨域访问的来源
#相关配置--设置各种配置选项,这些选项会在整个应用程序中被访问和使用。
# app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db'
# app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = 'zfxxkj_2024_!@#'
if myCongif.get_data("model_platform") == "acl":
app.config['SESSION_TYPE'] = 'memcached' # session类型
elif myCongif.get_data("model_platform") =="cpu":
app.config['SESSION_TYPE'] = 'redis' # session类型
#app.config['SESSION_FILE_DIR'] = './sessions' # session保存路径
#app.config['SESSION_MEMCACHED'] = base.Client(('localhost', 11211))
app.config['SESSION_PERMANENT'] = True # 如果设置为True,则关闭浏览器session就失效。

288
web/main/static/resources/scripts/aiortc-client-new.js

@ -2,7 +2,8 @@ let video_list = {}; //element_id -- socket
let run_list = {}; //element_id -- runtag
let berror_state_list = {}; //element_id -- 错误信息显示
let m_count = 0;
var channel_list = null;
let connection_version = {}; // 保存每个 element_id 的版本号
let channel_list = null;
const fourViewButton = document.getElementById('fourView');
const nineViewButton = document.getElementById('nineView');
@ -127,15 +128,19 @@ document.getElementById('nineView').addEventListener('click', function() {
nineViewButton.classList.add('btn-primary');
});
function generateVideoNodes(count) { //在这里显示视频-初始化
function generateVideoNodes(count) { //在这里显示视频-初始化 ---这里使用重置逻辑
//结束在播放的socket
for(let key in video_list){
const videoFrame = document.getElementById(`video-${key}`);
const event = new Event('closeVideo');
videoFrame.dispatchEvent(event);
delete video_list[key];
//flv使用
// const videoFrame = document.getElementById(`video-${key}`);
// const event = new Event('closeVideo');
// videoFrame.dispatchEvent(event);
//通用关闭
run_list[key] = false;
video_list[key].close();
berror_state_list[key] = false;
delete video_list[key];
}
//切换窗口布局
const videoGrid = document.getElementById('videoGrid');
@ -150,16 +155,15 @@ function generateVideoNodes(count) { //在这里显示视频-初始化
<div class="video-buttons">
<button onclick="toggleFullScreen(${i})">🔲</button>
<button onclick="closeVideo(${i})"></button>
<!-- <button onclick="closeFLVStream(${i})"></button> -->
</div>
</div>
<div class="video-area"><img id="video-${i}" alt="Video Stream" /></div>
<!-- <div class="video-area"><video id="video-${i}" controls></video></div> -->
<div class="video-area"><canvas id="video-${i}"></canvas></div>
</div>`;
}
videoGrid.innerHTML = html;
//开始还原视频,获取视频接口
// if(m_count != 0){
//获取视频接口
const url = `/api/viewlist?count=${count}`;
fetch(url)
.then(response => response.json())
@ -170,7 +174,6 @@ function generateVideoNodes(count) { //在这里显示视频-初始化
nlist = data.nlist;
for(let i=0;i<clist.length;i++){
if(parseInt(elist[i]) < count){
console.log("切换窗口时进行连接",clist[i])
connectToStream(elist[i],clist[i],nlist[i])
//startFLVStream(elist[i],clist[i],nlist[i]);
}
@ -250,137 +253,160 @@ function drop(event) {
//console.log('retrun 只是把fetch结束,这里的代码还是会执行');
}
function connectToStream(element_id,channel_id,channel_name) {
console.log("开始连接视频",element_id,channel_id);
// 设置视频区域的标题
const titleElement = document.querySelector(`[data-frame-id="${element_id}"] .video-title`);
titleElement.textContent = channel_name;
//获取视频
const imgElement = document.getElementById(`video-${element_id}`);
imgElement.alt = `Stream ${channel_name}`;
const streamUrl = `ws://${window.location.host}/api/ws/video_feed/${channel_id}`;
let berror = false;
function connect() {
const socket = new WebSocket(streamUrl);
console.log("socket建立----",element_id);
//全局变量需要维护好
if(element_id in video_list) {
video_list[element_id].close();
}else{
berror_state_list[element_id] = false;
}
video_list[element_id] = socket;
run_list[element_id] = true;
imgElement.style.display = 'block';
// 处理连接打开事件
socket.onopen = () => {
console.log('WebSocket connection established');
};
function connect(channel_id,element_id,imgcanvas,ctx,offscreenCtx,offscreenCanvas,streamUrl) {
//判断是否有重复socket,进行删除
if(element_id in video_list) {
run_list[element_id] = false;
video_list[element_id].close();
delete video_list[element_id];
console.log("有历史数据未删干净!!---",element_id) //要不要等待待定
}
// 每次连接时增加版本号
const current_version = (connection_version[element_id] || 0) + 1;
connection_version[element_id] = current_version;
const socket = new WebSocket(streamUrl);
socket.binaryType = 'arraybuffer'; // 设置为二进制数据接收
socket.customData = { channel_id: channel_id, element_id: element_id,
imgcanvas:imgcanvas,ctx:ctx,offscreenCtx:offscreenCtx,offscreenCanvas:offscreenCanvas,
version_id: current_version,streamUrl:streamUrl}; // 自定义属性 -- JS异步事件只能等到当前同步任务(代码块)完成之后才有可能被触发。
//新的连接
video_list[element_id] = socket;
run_list[element_id] = true;
berror_state_list[element_id] = false;
imgcanvas.style.display = 'block';
// 处理连接打开事件
socket.onopen = function(){
console.log('WebSocket connection established--',socket.customData.channel_id);
};
socket.onmessage = function(event) {
const reader = new FileReader();
reader.readAsArrayBuffer(event.data);
socket.onmessage = function(event) {
let el_id = socket.customData.element_id
let cl_id = socket.customData.channel_id
let imgcanvas = socket.customData.imgcanvas
let ctx = socket.customData.ctx
let offctx = socket.customData.offscreenCtx
let offscreenCanvas = socket.customData.offscreenCanvas
// 转换为字符串来检查前缀
let message = new TextDecoder().decode(event.data.slice(0, 6)); // 取前6个字节
if (message.startsWith('frame:')){
//如有错误信息显示 -- 清除错误信息
if(berror_state_list[el_id]){
removeErrorMessage(imgcanvas);
berror_state_list[el_id] = false;
}
// 接收到 JPG 图像数据,转换为 Blob
let img = new Image();
let blob = new Blob([event.data.slice(6)], { type: 'image/jpeg' });
// 将 Blob 转换为可用的图像 URL
img.src = URL.createObjectURL(blob);
//定义图片加载函数
img.onload = function() {
imgcanvas.width = offscreenCanvas.width = img.width;
imgcanvas.height = offscreenCanvas.height = img.height;
// 在 OffscreenCanvas 上绘制
offctx.clearRect(0, 0, imgcanvas.width, imgcanvas.height);
offctx.drawImage(img, 0, 0, imgcanvas.width, imgcanvas.height);
// 将 OffscreenCanvas 的内容复制到主 canvas
ctx.drawImage(offscreenCanvas, 0, 0);
// 用完就释放
URL.revokeObjectURL(img.src);
// blob = null
// img = null
// message = null
// event.data = null
// event = null
};
}else if(message.startsWith('error:')){
const errorText = new TextDecoder().decode(event.data.slice(6)); // 截掉前缀 'error:'
//目前只处理一个错误信息,暂不区分
displayErrorMessage(imgcanvas, "该视频源未获取到画面,请检查后刷新重试,默认两分钟后重连");
berror_state_list[el_id] = true;
}
};
reader.onload = () => {
const arrayBuffer = reader.result;
const decoder = new TextDecoder("utf-8");
const decodedData = decoder.decode(arrayBuffer);
socket.onclose = function() {
let el_id = socket.customData.element_id;
let cl_id = socket.customData.channel_id;
if(run_list[el_id] && socket.customData.version_id === connection_version[el_id]){
console.log(`尝试重新连接... Channel ID: ${cl_id}`);
setTimeout(() => connect(cl_id, el_id, socket.customData.imgcanvas,
socket.customData.ctx,socket.customData.streamUrl), 1000*10); // 尝试在10秒后重新连接
}
};
if (decodedData === "video_error") { //video_error
displayErrorMessage(imgElement, "该视频源未获取到画面,请检查后刷新重试,默认两分钟后重连");
berror_state_list[element_id] = true;
//socket.close(1000, "Normal Closure"); // 停止连接
} else if(decodedData === "client_error"){ //client_error
run_list[element_id] = false;
displayErrorMessage(imgElement, "该通道节点数据存在问题,请重启或联系技术支持!");
socket.close(1000, "Normal Closure"); // 停止连接
berror_state_list[element_id] = true;
}
else {
if(berror_state_list[element_id]){
removeErrorMessage(imgElement);
berror_state_list[element_id] = false;
//console.log("移除错误信息!");
}
// 释放旧的对象URL
if (imgElement.src) {
URL.revokeObjectURL(imgElement.src);
}
//blob = new Blob([arrayBuffer], { type: 'image/jpeg' });
imgElement.src = URL.createObjectURL(event.data);
}
};
socket.onerror = function() {
console.log(`WebSocket错误,Channel ID: ${socket.customData.channel_id}`);
socket.close(1000, "Normal Closure");
};
}
//图片显示方案二
// if (imgElement.src) {
// URL.revokeObjectURL(imgElement.src);
// }
// imgElement.src = URL.createObjectURL(event.data);
};
function connectToStream(element_id,channel_id,channel_name) {
console.log("开始连接视频",element_id,channel_id);
//更新控件状态--设置视频区域的标题
const titleElement = document.querySelector(`[data-frame-id="${element_id}"] .video-title`);
titleElement.textContent = channel_name;
//视频控件
//const imgElement = document.getElementById(`video-${element_id}`);
//imgElement.alt = `Stream ${channel_name}`;
const imgcanvas = document.getElementById(`video-${element_id}`);
const ctx = imgcanvas.getContext('2d')
// 创建 OffscreenCanvas
const offscreenCanvas = new OffscreenCanvas(imgcanvas.width, imgcanvas.height);
const offscreenCtx = offscreenCanvas.getContext('2d');
socket.onclose = function() {
if(run_list[element_id]){
console.log(`尝试重新连接... Channel ID: ${channel_id}`);
setTimeout(connect, 1000*10); // 尝试在10秒后重新连接
}
};
const streamUrl = `ws://${window.location.host}/api/ws/video_feed/${channel_id}`;
socket.onerror = function() {
console.log(`WebSocket错误,Channel ID: ${channel_id}`);
socket.close(1000, "Normal Closure");
};
};
connect();
//创建websocket连接,并接收和显示图片
connect(channel_id,element_id,imgcanvas,ctx,offscreenCtx,offscreenCanvas,streamUrl); //执行websocket连接 -- 异步的应该会直接返回
}
function closeVideo(id) {
const titleElement = document.querySelector(`[data-frame-id="${id}"] .video-title`);
if (titleElement.textContent === `Video Stream ${Number(id)+1}`) {
showModal('当前视频窗口未播放视频。');
return;
};
console.log('closeVideo');
//发送视频链接接口
const url = '/api/close_stream';
const data = {"element_id":id};
// 发送 POST 请求
fetch(url, {
method: 'POST', // 指定请求方法为 POST
headers: {
'Content-Type': 'application/json' // 设置请求头,告诉服务器请求体的数据类型为 JSON
},
body: JSON.stringify(data) // 将 JavaScript 对象转换为 JSON 字符串
})
.then(response => response.json()) // 将响应解析为 JSON
.then(data => {
console.log('Success:', data);
const istatus = data.status;
if(istatus == 0){
showModal(data.msg); // 使用 Modal 显示消息
if(id in video_list) {
const imgcanvas = document.getElementById(`video-${id}`);
const titleElement = document.querySelector(`[data-frame-id="${id}"] .video-title`);
//断socket
run_list[id] = false;
video_list[id].close();
delete video_list[id];
//清空控件状态
imgcanvas.style.display = 'none'; // 停止播放时隐藏元素
titleElement.textContent = `Video Stream ${id+1}`;
removeErrorMessage(imgcanvas);
berror_state_list[id] = false;
//删记录
const url = '/api/close_stream';
const data = {"element_id":id};
// 发送 POST 请求
fetch(url, {
method: 'POST', // 指定请求方法为 POST
headers: {
'Content-Type': 'application/json' // 设置请求头,告诉服务器请求体的数据类型为 JSON
},
body: JSON.stringify(data) // 将 JavaScript 对象转换为 JSON 字符串
})
.then(response => response.json()) // 将响应解析为 JSON
.then(data => {
console.log('Success:', data);
const istatus = data.status;
if(istatus == 0){
showModal(data.msg); // 使用 Modal 显示消息
return;
}
})
.catch((error) => {
showModal(`Error: ${error.message}`); // 使用 Modal 显示错误信息
return;
}
else{
const videoFrame = document.querySelector(`[data-frame-id="${id}"] .video-area img`);
const titleElement = document.querySelector(`[data-frame-id="${id}"] .video-title`);
run_list[id] = false;
video_list[id].close();
delete video_list[id];
videoFrame.src = ''; // 清空画面
videoFrame.style.display = 'none'; // 停止播放时隐藏 img 元素
titleElement.textContent = `Video Stream ${id+1}`;
removeErrorMessage(videoFrame);
berror_state_list[id] = false;
}
})
.catch((error) => {
showModal(`Error: ${error.message}`); // 使用 Modal 显示错误信息
});
}
else{
showModal('当前视频窗口未播放视频。');
return;
});
}
}
function startFLVStream(element_id,channel_id,channel_name) {

9
web/main/static/resources/scripts/channel_manager.js

@ -19,7 +19,7 @@ let m_polygon = "";
let check_area = 0;
let draw_status = false; //是否是绘制状态,处于绘制状态才能开始绘制
let b_img = false; //有没有加载图片成功,如果没有初始化的时候就不绘制线条了。
let points = [];
let points = []; //检测区域的点坐标数组
//布防计划
@ -269,8 +269,9 @@ function configureAlgorithm(row) {
b_img = false;
document.getElementById('but_hzqy').textContent = "绘制区域";
//开始初始化算法管理模块
show_channel_img(cid); //获取并显示一帧图片 -- 获取不到图片就是黑画面
show_channel_model_schedule(cid); //获取并显示结构化数据
show_channel_img(cid); //获取并显示一帧图片 -- 获取不到图片就是黑画面 --并要绘制检测区域
//显示窗口
$('#MX_M').modal('show');
}
@ -304,8 +305,9 @@ img.onload = () => { //清除、画图和画线应该分开
backgroundCanvas.height = canvas.height = img.height;
// 将图片绘制到背景画布上
backgroundCtx.drawImage(img, 0, 0, img.width, img.height);
drawLines();
// 将背景画布的内容复制到前台画布上
ctx.drawImage(backgroundCanvas, 0, 0, canvas.width, canvas.height); //绘制画面
//ctx.drawImage(backgroundCanvas, 0, 0, canvas.width, canvas.height); //绘制画面
};
//开始和重新绘制
@ -462,7 +464,6 @@ function show_channel_model_schedule(cid){
if(m_polygon !== ""){ //指定区域了,一般是会有数据的。
const coords = parseCoordStr(m_polygon);
points = coords;
drawLines();
}
}
//阈值

10
web/main/static/resources/scripts/flv.min.js

File diff suppressed because one or more lines are too long

160
web/main/static/resources/scripts/warn_manager.js

@ -0,0 +1,160 @@
let modelMap = []; //model_name model_id
let channelMap = []; //channel_name channel_id
//页面加载初始化
document.addEventListener('DOMContentLoaded', function () {
perWarnHtml()
});
//搜索按钮点击
document.getElementById('searchMButton').addEventListener('click', function() {
const startTime = document.getElementById('startTime').value;
const endTime = document.getElementById('endTime').value;
if (startTime && endTime) {
console.log(`开始时间: ${startTime}, 结束时间: ${endTime}`);
// 在这里执行其他逻辑,例如根据时间范围查询数据
} else {
alert('请选择完整的时间区间');
}
});
async function perWarnHtml() {
//获取算法和通道列表,在下拉框显示
try{
//算法名称下拉框
let response = await fetch('/api/model/list');
if (!response.ok) {
throw new Error('Network response was not ok');
}
model_datas = await response.json();
model_select_datas = ["请选择"];
model_datas.forEach(option => {
model_select_datas.push(option.name);
modelMap[option.name] = option.ID;
});
set_select_data("modelSelect",model_select_datas);
//视频通道下拉框
response = await fetch('/api/channel/tree');
if (!response.ok) {
throw new Error('Network response was not ok');
}
channel_datas = await response.json();
channel_select_datas = ["请选择"];
channel_datas.forEach(option => {
channel_select_datas.push(option.channel_name);
channelMap[option.channel_name] = option.ID;
});
set_select_data("channelSelect",channel_select_datas);
//查询告警数据
let modelName = document.getElementById('modelSelect').value;
let channelId = document.getElementById('channelSelect').value;
const startTime = document.getElementById('startTime').value;
const endTime = document.getElementById('endTime').value;
const sCount = 0; // 起始记录数从0开始
const eCount = 100; // 每页显示10条记录
if(modelName == "请选择"){
modelName = "";
}
if(channelId == "请选择"){
channelId = "";
}
// 构造请求体
const requestData = {
model_name: modelName || "", // 如果为空,则传空字符串
channel_id: channelId || "",
start_time: startTime || "",
end_time: endTime || "",
s_count: sCount,
e_count: eCount
};
try{
// 发送POST请求到后端
const response = await fetch('/api/warn/search_warn', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify(requestData) // 将数据转为JSON字符串
});
// 检查响应是否成功
if (response.ok) {
const data = await response.json();
console.log('查询结果:', data);
// 在这里处理查询结果,比如更新表格显示数据
//updateTableWithData(data);
} else {
console.error('查询失败:', response.status);
}
} catch (error) {
console.error('请求出错:', error);
}
}catch (error) {
console.error('Error fetching model data:', error);
}
//读取报警数据并进行显示--要分页显示
// modelData_bak = modelData;
// currentPage = 1; // 重置当前页为第一页
// renderTable(); //刷新表格
// renderPagination();
//操作-删除,图片,视频,审核(灰)
}
//刷新表单页面数据
function renderTable() {
const tableBody = document.getElementById('table-body-model');
tableBody.innerHTML = ''; //清空
const start = (currentPage - 1) * rowsPerPage;
const end = start + rowsPerPage;
const pageData = modelData.slice(start, end);
const surplus_count = rowsPerPage - pageData.length;
pageData.forEach((model) => {
const row = document.createElement('tr');
row.innerHTML = `
<td>${model.ID}</td>
<td>${model.name}</td>
<td>${model.version}</td>
<td>${model.duration_time}</td>
<td>${model.proportion}</td>
<td>
<button class="btn btn-primary btn-sm modify-btn">升级</button>
<button class="btn btn-secondary btn-sm algorithm-btn">配置</button>
<button class="btn btn-danger btn-sm delete-btn">删除</button>
</td>
`;
tableBody.appendChild(row);
row.querySelector('.modify-btn').addEventListener('click', () => modifyModel(row));
row.querySelector('.algorithm-btn').addEventListener('click', () => configureModel(row));
row.querySelector('.delete-btn').addEventListener('click', () => deleteModel(row));
});
}
//刷新分页标签
function renderPagination() {
const pagination = document.getElementById('pagination-model');
pagination.innerHTML = '';
const totalPages = Math.ceil(modelData.length / rowsPerPage);
for (let i = 1; i <= totalPages; i++) {
const pageItem = document.createElement('li');
pageItem.className = 'page-item' + (i === currentPage ? ' active' : '');
pageItem.innerHTML = `<a class="page-link" href="#">${i}</a>`;
pageItem.addEventListener('click', (event) => {
event.preventDefault();
currentPage = i;
renderTable();
renderPagination();
});
pagination.appendChild(pageItem);
}
}

27
web/main/templates/h264_test.html

@ -0,0 +1,27 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>H.264 Streaming</title>
<script src="{{ url_for('main.static', filename='scripts/flv.min.js') }}"></script>
<script src="{{ url_for('main.static', filename='scripts/jquery-3.2.1.min.js') }}"></script>
</head>
<body>
<video id="videoElement" controls autoplay width="640" height="480"></video>
<script>
if (flvjs.isSupported()) {
const videoElement = document.getElementById('videoElement');
const flvPlayer = flvjs.createPlayer({
type: 'flv',
isLive: true,
url: `ws://${window.location.host}/api/ws/video_feed/2` // 与 WebSocket 后端的 URL
});
flvPlayer.attachMediaElement(videoElement);
flvPlayer.load();
flvPlayer.play();
}
</script>
</body>
</html>

1
web/main/templates/header.html

@ -12,6 +12,7 @@
<li class="nav-item"><a href="/view_main.html" class="nav-link active" aria-current="page">实时预览</a></li>
<li class="nav-item"><a href="/channel_manager.html" class="nav-link">通道管理</a></li>
<li class="nav-item"><a href="/model_manager.html" class="nav-link">算法管理</a></li>
<li class="nav-item"><a href="/warn_manager.html" class="nav-link">报警管理</a></li>
<li class="nav-item"><a href="/system_manager.html" class="nav-link">系统管理</a></li>
<li class="nav-item"><a href="/user_manager.html" class="nav-link">用户管理</a></li>
</ul>

11
web/main/templates/view_main.html

@ -61,14 +61,14 @@
border: 1px solid #ddd; /* 视频区域边框 */
}
.video-area img {
display: none; /* 初始隐藏 */
.video-area canvas {
display: none;
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
object-fit: cover;
object-fit: contain;
}
.video-buttons {
@ -91,10 +91,7 @@
min-width: 100px;
}
.video-frame img {
width: 100%;
height: auto;
}
#videoGrid.four .video-frame {
width: calc(50% - 10px); /* 每行4个视频框架 */
}

72
web/main/templates/warn_manager.html

@ -0,0 +1,72 @@
{% extends 'base.html' %}
{% block title %}ZFBOX{% endblock %}
{% block style %}
.table-container {
min-height: 400px; /* 设置最小高度,可以根据需要调整 */
}
/* 缩小表格行高 */
.table-sm th,
.table-sm td {
padding: 0.2rem; /* 调整这里的值来改变行高 */
}
{% endblock %}
{% block content %}
<!-- 模态框区域 -->
<!-- 搜索区 -->
<div class="container d-flex flex-column" >
<div class="row justify-content-center align-items-center mb-3">
<div class="col-md-1 text-end"><label class="col-form-label form-label">算法名称:</label></div>
<div class="col-md-2">
<select id="modelSelect" class="form-select mr-2" aria-label="Default select example"></select></div>
<div class="col-md-1 text-end"><label class="col-form-label form-label">视频通道:</label></div>
<div class="col-md-2">
<select id="channelSelect" class="form-select mr-2" aria-label="Default select example"></select></div>
<div class="col-md-1 text-end"><label class="col-form-label form-label">告警时间:</label></div>
<!-- 时间区间选择器 -->
<div class="col-md-2"><input id="startTime" type="datetime-local" class="form-control"></div>
<div class="col-md-2"><input id="endTime" type="datetime-local" class="form-control"></div>
<div class="col-md-1"><button id="searchMButton" type="button" class="btn btn-primary">查 询</button></div>
</div>
<div class="mb-3">
<button id="delButton" type="button" class="btn btn-primary" data-bs-toggle="modal" data-bs-target="#channelModal">
清除报警
</button>
<button id="exportButton" type="button" class="btn btn-primary" data-bs-toggle="modal" data-bs-target="#channelModal">
导出报警
</button>
</div>
<div class="table-container">
<table class="table">
<thead class="table-light">
<tr>
<th scope="col">ID</th>
<th scope="col">算法名称</th>
<th scope="col">视频通道</th>
<th scope="col">报警时间</th>
<th scope="col">操作</th>
</tr>
</thead>
<tbody id="table-body" class="table-group-divider">
<!-- 表格数据动态填充 -->
</tbody>
</table>
<nav>
<ul id="pagination" class="pagination">
<!-- 分页控件将动态生成 -->
</ul>
</nav>
</div>
</div>
{% endblock %}
{% block script %}
<script src="{{ url_for('main.static', filename='scripts/warn_manager.js') }}"></script>
{% endblock %}
Loading…
Cancel
Save