diff --git a/config.yaml b/config.yaml index d293428..a239cc6 100644 --- a/config.yaml +++ b/config.yaml @@ -30,7 +30,7 @@ RTSP_Check_Time : 600 #10分钟 -- 2024-7-8 取消使用 #max_channel_num max_channel_num : 8 #最大视频通道数量 -encode_param : 50 #无参数默认是95 +encode_param : 80 #无参数默认是95 mywidth: 640 myheight: 480 @@ -43,7 +43,7 @@ cap_sleep_time: 120 #单位秒 -- 5分钟 buffer_len: 100 #分析后画面缓冲区帧数 -- 可以与验证帧率结合确定缓冲区大小 RESET_INTERVAL : 100000 #帧数重置上限 frame_rate : 20 #帧率参考值 -- 后续作用主要基于verify_rate进行帧率控制 -verify_rate : 10 #验证帧率--- 也就是视频输出的帧率 +verify_rate : 6 #验证帧率--- 也就是视频输出的帧率 warn_video_path: /mnt/zfbox/model/warn/ warn_interval: 120 #报警间隔--单位秒 video_error_count: 10 #单位秒 ---根据验证帧率,判断10秒内都是空帧的话,视频源链接有问题。 @@ -54,5 +54,5 @@ wired_interface : eth0 wireless_interface : WLAN #独立模型线程相关 -workType : 1 # 1--一通道一线程。2--模型独立线程 +workType : 2 # 1--一通道一线程。2--模型独立线程 diff --git a/core/ACLModelManager.py b/core/ACLModelManager.py index 1f1a3db..82b6423 100644 --- a/core/ACLModelManager.py +++ b/core/ACLModelManager.py @@ -66,3 +66,49 @@ class ACLModeManger: return True + @staticmethod + def pro_init_acl(device_id): + ''' + 独立进程初始化acl资源 + :param device_id: + :return: + ''' + # '''acl初始化函数''' + ret = acl.init() # 0-成功,其它失败 + if ret: + raise RuntimeError(ret) + ret = acl.rt.set_device(device_id) # 指定当前进程或线程中用于运算的Device。可以进程或线程中指定。*多设备时可以放线程* + # 在某一进程中指定Device,该进程内的多个线程可共用此Device显式创建Context(acl.rt.create_context接口)。 + if ret: + raise RuntimeError(ret) + print('ACL init Device Successfully') + #显示创建context + context, ret = acl.rt.create_context(device_id) # 显式创建一个Context + if ret: + raise RuntimeError(ret) + print('Init TH-Context Successfully') + return context + + @staticmethod + def pro_del_acl(device_id,context): + ''' + 独立进程反初始化acl资源 + :param device_id: + :param context: + :return: + ''' + # 释放context + if context: + ret = acl.rt.destroy_context(context) # 释放 Context + if ret: + raise RuntimeError(ret) + #acl去初始化 + ret = acl.rt.reset_device(device_id) # 释放Device + if ret: + raise RuntimeError(ret) + + ret = acl.finalize() # 去初始化 0-成功,其它失败 --官方文档不建议放析构函数中执行 + if ret: + raise RuntimeError(ret) + print('ACL finalize Successfully') + return True \ No newline at end of file diff --git a/core/ChannelData.py b/core/ChannelData.py index 9cc7d3a..3d70a25 100644 --- a/core/ChannelData.py +++ b/core/ChannelData.py @@ -8,6 +8,7 @@ import cv2 import ffmpeg import subprocess import select +import multiprocessing from collections import deque from myutils.MyLogger_logger import LogHandler from core.CapManager import mCap @@ -52,7 +53,8 @@ class ChannelData: self.post_th = None #后处理线程句柄 self.post_status = False #后处理线程状态 self.model_node= None #模型对象 -- inmq,outmq - self.out_mq = MyDeque(30) #放通道里面 + + self.out_mq = MyDeque(30) #分析结果存放MQ #设置JPEG压缩基本 self.encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), myCongif.get_data("encode_param")] # 50 是压缩质量(0到100) @@ -279,9 +281,9 @@ class ChannelData: if not self.cap: self.logger.error("采集线程未正常启动,不进行工作") return - while self.model_node.model_th_status == 0: #避免模型没启动成功,模型线程在运行 + while self.model_node.m_p_status.value == 0: #避免模型没启动成功,模型线程在运行 time.sleep(1) - if self.model_node.model_th_status == 1: + if self.model_node.m_p_status.value == 1: # 开始循环处理业务 last_frame_time = time.time() # 初始化个读帧时间 self.per_status = True @@ -306,7 +308,7 @@ class ChannelData: #图片预处理 img,scale_ratio, pad_size = self.model_node.model.prework(frame) indata = ModelinData(self.channel_id,img,frame,scale_ratio, pad_size) - self.model_node.in_mq.myappend(indata) + self.model_node.pro_add_data(indata) #数据入队列 else:# 不在计划则不进行验证,直接返回图片 --存在问题是:result 漏数据 ret, frame_bgr_webp = cv2.imencode('.jpg', frame,self.encode_param) if not ret: @@ -341,10 +343,12 @@ class ChannelData: else: # 没有产生报警也需要记录,统一计算占比 result.append(0) #分析画面保存 - ret, frame_bgr_webp = cv2.imencode('.jpg', out_data.image,self.encode_param) - buffer_bgr_webp = None - if ret: - buffer_bgr_webp = frame_bgr_webp.tobytes() + # ret, frame_bgr_webp = cv2.imencode('.jpg', out_data.image,self.encode_param) + # buffer_bgr_webp = None + # if ret: + # buffer_bgr_webp = frame_bgr_webp.tobytes() + ret, buffer_bgr_webp = self._encode_frame(out_data.image) + # 分析图片放入缓冲区内存中 self.add_deque(out_data.image) # 缓冲区大小由maxlen控制 超上限后,删除最前的数据 # 分析画面一直更新最新帧,提供网页端显示 @@ -522,6 +526,7 @@ class ChannelData: print("线程结束!!!!") #2024-9-9 新增兼容独立model线程 根据self.model_node判断,None:1通道1线程,not None:独立线程 + #2024-10-14 model调整为独立子进程执行 def _start_model_th(self,model_data,schedule,type=1): verify_rate = myCongif.get_data("verify_rate") warn_interval = myCongif.get_data("warn_interval") @@ -532,8 +537,10 @@ class ChannelData: args=(model_data[3],model_data[4],verify_rate,warn_interval,model_data[7], model_data[1],model_data[2],model_data[8],model_data[9])) self.post_th.start() - #启动模型线程,若线程已启动,则+1 mq + + # 启动模型线程,若线程已启动,则+1 self.model_node.start_model_th(self.channel_id,self.out_mq) + #启动预处理线程 self.per_th = threading.Thread(target=self._pre_work_th,args=(schedule,)) self.per_th.start() @@ -557,7 +564,9 @@ class ChannelData: if self.post_th: self.post_th.join() self.post_th = None + #清空MQ self.out_mq.myclear()#清空后处理mq中未处理的数据 + else: if self.work_th: if self.b_model: diff --git a/core/ChannelManager.py b/core/ChannelManager.py index 969e078..e25e8fd 100644 --- a/core/ChannelManager.py +++ b/core/ChannelManager.py @@ -131,12 +131,14 @@ class ChannelManager: return img_base64 '''模型独立线程修改2024-9-9,要求是双模式兼容''' + '''2024-10-13修改独立线程为独立进程---acl初始化需要在子进程中初始化 -- 该方案无法兼容旧版本''' def CreateModelNode(self, model_id, model_path, channel_id): if model_id in self.model_list: modelN = self.model_list[model_id] else: modelN = ModelNode(self.device_id,model_path) self.model_list[model_id] = modelN + #modelN = ModelNode(self.device_id, model_path,channel_id) return modelN def delModelNode(self): #关于modelnodel :1.考虑modelnode是否可以不删除,清空inmq即可,2.mdel_list是否需要加锁。#? diff --git a/core/DataStruct.py b/core/DataStruct.py index 0a333e4..4113879 100644 --- a/core/DataStruct.py +++ b/core/DataStruct.py @@ -34,11 +34,12 @@ class ModelinData: pass class ModeloutData: - def __init__(self,image,scale_ratio, pad_size,outputs): + def __init__(self,image,scale_ratio, pad_size,outputs,channel_id): self.image = image #原图 self.outputs = outputs #模型推理后结果 self.scale_ratio = scale_ratio self.pad_size = pad_size + self.channel_id = channel_id def __del__(self): pass diff --git a/core/ModelManager.py b/core/ModelManager.py index 34e7a39..1113b5b 100644 --- a/core/ModelManager.py +++ b/core/ModelManager.py @@ -21,16 +21,18 @@ class ModelManager: # acl初始化 -- 一个进程一个 self.model_platform = myCongif.get_data("model_platform") if self.model_platform == "acl": - self.device_id = myCongif.get_data("device_id") - ACLModeManger.init_acl(self.device_id) #acl -- 全程序初始化 - #self.model_dic = {} # model_id model + if myCongif.get_data("workType") == 1: + self.device_id = myCongif.get_data("device_id") + ACLModeManger.init_acl(self.device_id) #acl -- 全程序初始化 + self.model_dic = {} # model_id model def __del__(self): self.logger.debug("释放资源") self.stop_work(0) #停止所有工作 del self.verify_list #应该需要深入的删除--待完善 if self.model_platform == "acl": #去初始化 - ACLModeManger.del_acl(self.device_id) #acl -- 全程序反初始化 需要确保在执行析构前,其它资源已释放 + if myCongif.get_data("workType") == 1: + ACLModeManger.del_acl(self.device_id) #acl -- 全程序反初始化 需要确保在执行析构前,其它资源已释放 def send_warn(self): '''发送报警信息''' diff --git a/core/ModelNode.py b/core/ModelNode.py index 86c8a99..0c55665 100644 --- a/core/ModelNode.py +++ b/core/ModelNode.py @@ -1,38 +1,104 @@ import threading import importlib.util import time -from myutils.MyDeque import MyDeque +import multiprocessing +from multiprocessing.managers import BaseManager from myutils.ConfigManager import myCongif from myutils.MyLogger_logger import LogHandler from core.ACLModelManager import ACLModeManger from core.DataStruct import ModelinData,ModeloutData from threading import Lock + +#2024-10-14model处理调整为独立子进程 +def model_process(device,model,model_platform,m_p_status,brun,in_mq,out_mq): + + # 初始化模型运行资源 + context = None + if model_platform == "acl": # ACL线程中初始化内容 + context = ACLModeManger.pro_init_acl(device) # 初始化acl资源,并创建context + # 初始化模型资源 -- 加载模型文件 + ret = model.init_acl_resource() # 加载和初始化离线模型文件--om文件 + if not ret: + print("初始化模型资源出错,退出线程!") + m_p_status.value = 2 + return + + #执行工作 + m_p_status.value = 1 + s_time = time.time() + icount = 0 + while brun.value: + try: + inData = in_mq.get(timeout=0.01) #空时-block,直到有值 #(self,channel_id,img,image,scale_ratio, pad_size): + except: + #print("in_mq_空") + continue + if inData: + outputs = model.execute([inData.img,])#创建input,执行模型,返回结果 --失败返回None + outdata = ModeloutData(inData.image,inData.scale_ratio,inData.pad_size,outputs,inData.channel_id) + del inData.img + #结果输出 + if out_mq.full(): + tmp = out_mq.get() + #print("model_输出mq满!") + del tmp + out_mq.put(outdata) # 需要确保out_mq只有在这里put + else: #正常情况不会执行到该条件 + time.sleep(0.05) + icount += 1 + if icount == 1000: + e_time = time.time() + use_time = (e_time - s_time) / 1000 + print(f"model_process耗时--{use_time}秒") + s_time = time.time() + icount = 0 + + #结束进程,释放资源 + m_p_status.value = 0 + while not in_mq.empty(): + try: + in_mq.get_nowait() # Get without blocking + except Exception as e: + break # In case of any unexpected errors + + # 反初始化 + if model_platform == "acl": + try: + model.release() # 释放模型资源资源 + # 删除模型对象 + del model + # 释放ACL资源 + ACLModeManger.pro_del_acl(device,context) + except Exception as e: + print(e) + + class ModelNode: - def __init__(self,device,model_path): + def __init__(self,device,model_path,channel_id): self.device = device self.model_path = model_path + self.channel_id = channel_id self.model = None #模型对象 - self.model_th = None #模型线程句柄 - self.brun = True #模型控制标识 - self.model_th_status = 0 #模型线程运行状态 0--初始状态,1-线程执行成功,2-线程退出 - self.in_mq = MyDeque(50) # - self.channel_list = {} #channel_id out_mq --需要线程安全 - self.clist_Lock = Lock() #channel_list的维护锁 self.ch_count = 0 #关联启动的通道数量 self.count_Lock = Lock() #count的维护锁 self.model_platform = myCongif.get_data("model_platform") self.logger = LogHandler().get_logger("ModelNode") + #分发线程相关 + self.model_out_th = None + self.channel_dict = {} + self.cdict_Lock = Lock() - + #独立进程方案--共享参数 + self.process = None + self.in_mq = multiprocessing.Queue(maxsize=30) + self.out_mq = multiprocessing.Queue(maxsize=30) #调整结构,多线程(预处理)-》in_mq-子进程-out_mq-》线程分发outdata->多线程(后处理) + self.brun = multiprocessing.Value('b',True) #brun.value = False,brun.value = True + self.m_p_status = multiprocessing.Value('i',0) def __del__(self): pass - def _reset(self): #重置数据 - #self.model_th_status = 0 # 模型线程运行状态 0--初始状态,1-线程执行成功,2-线程退出 - self.in_mq.myclear() - def _import_model(self,model_path,threshold=0.5,iou_thres=0.5): ''' 根据路径,动态导入模块 @@ -61,73 +127,79 @@ class ModelNode: print(f"An unexpected error occurred: {e}") return None - def _model_th(self): - # 加载自定义模型文件 - self.model = self._import_model(self.model_path) # 动态加载模型处理文件py - if not self.model: - self.logger.error("自定义模型文件加载失败,退出model线程") - self.model_th_status = 2 - return - # 初始化模型运行资源 - context = None - if self.model_platform == "acl": # ACL线程中初始化内容 - context = ACLModeManger.th_inti_acl(self.device) # 创建context - # 初始化模型资源 -- 加载模型文件 - ret = self.model.init_acl_resource() # 加载和初始化离线模型文件--om文件 - if not ret: - print("初始化模型资源出错,退出线程!") - self.model_th_status = 2 - return - #执行工作 - self.model_th_status = 1 - while self.brun: - inData = self.in_mq.mypopleft() #空时,返回None #(self,channel_id,img,image,scale_ratio, pad_size): - if inData: - outputs = self.model.execute([inData.img,])#创建input,执行模型,返回结果 --失败返回None - outdata = ModeloutData(inData.image,inData.scale_ratio,inData.pad_size,outputs) - del inData.img - with self.clist_Lock: - if inData.channel_id in self.channel_list: - self.channel_list[inData.channel_id].myappend(outdata) - else: - time.sleep(0.05) + def pro_add_data(self,data): + # try: + # self.in_mq.put(data,timeout=0.1) + # except multiprocessing.queues.Full: + # print("mdel_inmq输入满!") + # del data + if self.in_mq.full(): + tmp = self.in_mq.get() + #print("mdel_inmq输入满!") + del tmp + self.in_mq.put(data) # 需要确保out_mq只有在这里put - #结束线程,释放资源 - self.model_th_status = 0 - self._reset() - # 反初始化 - if self.model_platform == "acl": + def _modle_th(self): + '''根据channel_id分发out_data到out_mq''' + s_time = time.time() + icount = 0 + while self.brun.value: try: - self.model.release() # 释放模型资源资源 - # 删除模型对象 - del self.model - # 释放context - if context: # ACL线程中反初始化内容 -- 若线程异常退出,这些资源就不能正常释放了 - # 再释放context - ACLModeManger.th_del_acl(context) - except Exception as e: - print(e) + outdata = self.out_mq.get(timeout=1) + except: + continue + with self.cdict_Lock: + if outdata.channel_id in self.channel_dict: + self.channel_dict[outdata.channel_id].myappend(outdata) #后面就交给后处理线程了 + icount += 1 + if icount ==1000: + e_time = time.time() + use_time = (e_time-s_time) /1000 + print(f"{self.channel_id}_modle_th耗时--{use_time}秒") + s_time = time.time() + icount = 0 + + #2024-10-14调整为独立进程执行 -- 一个线程一个MQ MyDeque def start_model_th(self,channel_id,out_mq): with self.count_Lock: - with self.clist_Lock: - if channel_id in self.channel_list: - return #这个可以删除老的,新增新的 - self.channel_list[channel_id] = out_mq - if self.ch_count == 0: #第一次启动线程 - self.brun = True - self.model_th = threading.Thread(target=self._model_th) - self.model_th.start() + with self.cdict_Lock: + if channel_id in self.channel_dict: + return #这个可以删除老的,新增新的--后续验证,若需要则进行修改 + self.channel_dict[channel_id] = out_mq #增加一个记录 + + if self.ch_count == 0: #第一次启动--需要启动处理线程和进程 + #加载自定义模型文件 + self.model = self._import_model(self.model_path) # 动态加载模型处理文件py --置信阈值一直没使用 + if not self.model: + self.logger.error("自定义模型文件加载失败,不启动model子进程") + self.m_p_status.value = 2 + return + + self.brun.value = True + #创建outMQ的分发线程 + self.model_out_th = threading.Thread(target=self._modle_th) + self.model_out_th.start() + + # 创建子进程 + self.process = multiprocessing.Process(target=model_process, + args=(self.device,self.model,self.model_platform, + self.m_p_status,self.brun,self.in_mq,self.out_mq)) + self.process.start() self.ch_count += 1 #有通道调用一次就加一 def stop_model_th(self,channel_id): with self.count_Lock: - with self.clist_Lock: - if channel_id in self.channel_list: - del self.channel_list[channel_id] + with self.cdict_Lock: + if channel_id in self.channel_dict: + del self.channel_dict[channel_id] self.ch_count -= 1 if self.ch_count == 0: #所有通道结束 - self.brun = False - self.model_th.join() - self.model_th = None + self.brun.value = False + self.model_out_th.join() #等待线程结束 + self.model_out_th = None + + self.process.join() #等待子进程结束 + self.process = None + diff --git a/core/WarnManager.py b/core/WarnManager.py index b0af853..13a7027 100644 --- a/core/WarnManager.py +++ b/core/WarnManager.py @@ -42,7 +42,7 @@ class WarnManager: except queue.Empty: continue if warn_data: - self.save_warn(warn_data.model_name,warn_data.img_buffer,warn_data.width,warn_data.height, + ret = self.save_warn(warn_data.model_name,warn_data.img_buffer,warn_data.width,warn_data.height, warn_data.channel_id,self.FPS,self.fourcc,myDBM) self.send_warn() del warn_data.img_buffer @@ -80,7 +80,9 @@ class WarnManager: filename = f"{channnel_id}_{current_time_str}" save_path = myCongif.get_data("warn_video_path") # 保存视频 - video_writer = cv2.VideoWriter(f"{save_path}{filename}.mp4", fourcc, FPS, (width, height)) + str_video = f"{save_path}{filename}.mp4" + video_writer = cv2.VideoWriter(str_video, fourcc, FPS, (width, height)) + #print(f"File: {str_video}, FourCC: {fourcc}, FPS: {FPS}, Size: ({width}, {height})") if not video_writer.isOpened(): print(f"Failed to open video writer for model/warn/{filename}.mp4") return False diff --git a/model/plugins/RYRQ_Model_ACL/RYRQ_Model_ACL.py b/model/plugins/RYRQ_Model_ACL/RYRQ_Model_ACL.py index 22ba6ed..e9ea452 100644 --- a/model/plugins/RYRQ_Model_ACL/RYRQ_Model_ACL.py +++ b/model/plugins/RYRQ_Model_ACL/RYRQ_Model_ACL.py @@ -29,10 +29,10 @@ class Model(ModelBase): def prework(self,image): '''模型输入图片数据前处理 --- 针对每个模型特有的预处理内容 -''' - img, scale_ratio, pad_size = letterbox(image, new_shape=[640, 640]) # 对图像进行缩放与填充 + img, scale_ratio, dw,dh = letterbox(image, new_shape=[self.netw, self.neth]) # 对图像进行缩放与填充 img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, HWC to CHW #图片在输入时已经做了转换 img = np.ascontiguousarray(img, dtype=np.float32) / 255.0 # 转换为内存连续存储的数组 - return img,scale_ratio, pad_size + return img,scale_ratio, (dw,dh) def postwork(self,image,outputs,scale_ratio,pad_size,check_area,polygon,conf_threshold,iou_thres): ''' @@ -47,6 +47,7 @@ class Model(ModelBase): :param iou_thres: :return: ''' + filtered_pred_all = None bwarn = False warn_text = "" # 是否有检测区域,有先绘制检测区域 由于在该函数生成了polygon对象,所有需要在检测区域前调用。 @@ -54,40 +55,31 @@ class Model(ModelBase): self.draw_polygon(image, polygon, (255, 0, 0)) if outputs: - output = outputs[0] # 只放了一张图片 -- #是否能批量验证? - # 后处理 -- boxout 是 tensor-list: [tensor([[],[].[]])] --[x1,y1,x2,y2,置信度,coco_index] - # 利用非极大值抑制处理模型输出,conf_thres 为置信度阈值,iou_thres 为iou阈值 - output_torch = torch.tensor(output) - boxout = nms(output_torch, conf_thres=conf_threshold, iou_thres=iou_thres) - del output_torch - pred_all = boxout[0].numpy() # 转换为numpy数组 -- [[],[],[]] --[x1,y1,x2,y2,置信度,coco_index] - # pred_all[:, :4] 取所有行的前4列,pred_all[:,1]--第一列 - scale_coords([640, 640], pred_all[:, :4], image.shape, ratio_pad=(scale_ratio, pad_size)) # 将推理结果缩放到原始图片大小 - - # 过滤掉不是目标标签的数据 -- 序号0-- person self.labels_dict --这个考虑下是否可以放到nms前面#? - filtered_pred_all = pred_all[pred_all[:, 5] == 0] - # 绘制检测结果 --- 也需要封装在类里, - for pred in filtered_pred_all: - x1, y1, x2, y2 = int(pred[0]), int(pred[1]), int(pred[2]), int(pred[3]) + output = np.squeeze(outputs[0]) # 移除张量为1的维度 --暂时不明白其具体意义 + dw,dh = pad_size + pred_all = non_max_suppression_v10(output, self.conf_threshold, scale_ratio, dw, dh) + for xmin, ymin, xmax, ymax, confidence, label in pred_all: # # 绘制目标识别的锚框 --已经在draw_bbox里处理 # cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2) - if check_area == 1: # 指定了检测区域 - x_center = (x1 + x2) / 2 - y_center = (y1 + y2) / 2 - # 绘制中心点? - cv2.circle(image, (int(x_center), int(y_center)), 5, (0, 0, 255), -1) - # 判断是否区域点 - if not self.is_point_in_region((x_center, y_center)): - continue # 没产生报警-继续 - # 产生报警 -- 有一个符合即可 - bwarn = True - warn_text = "People Intruder detected!" - draw_bbox(filtered_pred_all, image, (0, 255, 0), 2, self.labels_dict) # 画出检测框、类别、概率 + draw_box(image, [xmin, ymin, xmax, ymax], confidence, label) # 画出检测框、类别、概率 + if label == 0: # person + # 判断是否产生告警 + x1, y1, x2, y2 = int(xmin), int(ymin), int(xmax), int(ymax) + if check_area == 1: # 指定了检测区域 + x_center = (x1 + x2) / 2 + y_center = (y1 + y2) / 2 + # 绘制中心点? + cv2.circle(image, (int(x_center), int(y_center)), 5, (0, 0, 255), -1) + # 判断是否区域点 + if not self.is_point_in_region((x_center, y_center)): + continue # 没产生报警-继续 + # 产生报警 -- 有一个符合即可 + bwarn = True + warn_text = "People Intruder detected!" # 清理内存 del outputs, output - del boxout del pred_all, filtered_pred_all - #图片绘制是在原图生效 image + # cv2.imwrite('img_res.png', img_dw) return bwarn, warn_text def verify(self,image,data,isdraw=1): diff --git a/web/API/channel.py b/web/API/channel.py index b048dbf..e1b7a7b 100644 --- a/web/API/channel.py +++ b/web/API/channel.py @@ -52,11 +52,12 @@ async def channel_add(): #新增通道 -- 2024-8-1修改为与修改通道用一 area = mDBM.do_select(strsql,1) if area: area_id = area[0] - strsql = f"select ID from channel where area_id={area_id} and channel_name='{cName}';" + #strsql = f"select ID from channel where area_id={area_id} and channel_name='{cName}';" + strsql = f"select ID from channel where channel_name='{cName}';" data = mDBM.do_select(strsql, 1) if data and data[0] != cid: #有值--代表重复 或者是它自己(只修改了RTSP地址) reStatus = 0 - reMsg = "同一区域内的通道名称不能相同!" + reMsg = "通道名称不能相同!" else: if cid == -1: max_count = myCongif.get_data("max_channel_num") diff --git a/web/API/viedo.py b/web/API/viedo.py index b69d168..e01ae1e 100644 --- a/web/API/viedo.py +++ b/web/API/viedo.py @@ -201,24 +201,22 @@ async def handle_channel(channel_id,websocket): await asyncio.sleep(sleep_time) # 等待视频重连时间 #----------输出时间----------- - frame_count += 1 - end_time = time.time() - # 计算时间差 - el_time = end_time - start_time - all_time = all_time + (end_time - current_time) - # 每隔一定时间(比如5秒)计算一次帧率 - if el_time >= 10: - fps = frame_count / el_time - print(f"{channel_id}当前帧率: {fps} FPS,循环次数:{frame_count},花费总耗时:{all_time}S,get耗时:{get_all_time},send耗时:{send_all_time}") - # 重置计数器和时间 - frame_count = 0 - all_time = 0 - get_all_time = 0 - send_all_time = 0 - start_time = time.time() - # print(f"get_frame:{round(get_etime-get_stime,5)}Sceond;" - # f"send_frame:{round(send_etime-send_stime,5)}Sceond;" - # f"All_time={round(end_time-current_time,5)}") + # frame_count += 1 + # end_time = time.time() + # # 计算时间差 + # el_time = end_time - start_time + # all_time = all_time + (end_time - current_time) + # # 每隔一定时间(比如5秒)计算一次帧率 + # if el_time >= 10: + # fps = frame_count / el_time + # print(f"{channel_id}当前帧率: {fps} FPS,循环次数:{frame_count},花费总耗时:{all_time}S,get耗时:{get_all_time},send耗时:{send_all_time}") + # # 重置计数器和时间 + # frame_count = 0 + # all_time = 0 + # get_all_time = 0 + # send_all_time = 0 + # start_time = time.time() + except asyncio.CancelledError: print(f"WebSocket connection for channel {channel_id} closed by client") raise diff --git a/web/API/warn.py b/web/API/warn.py index 4abacab..c98bf83 100644 --- a/web/API/warn.py +++ b/web/API/warn.py @@ -1,32 +1,33 @@ +import os from . import api from web.common.utils import login_required -from quart import jsonify, request +from quart import jsonify, request,send_file from core.DBManager import mDBM @api.route('/warn/search_warn',methods=['POST']) @login_required -async def warn_get(): #新增算法 +async def warn_search(): #查询报警 #获取查询参数 json_data = await request.get_json() s_count = json_data.get('s_count','') e_count = json_data.get('e_count','') model_name = json_data.get('model_name','') - channel_id = json_data.get('channel_id','') + channel_name = json_data.get('channel_name','') start_time = json_data.get('start_time','') end_time = json_data.get('end_time','') # 动态拼接 SQL 语句 - sql = "SELECT * FROM warn WHERE 1=1" + sql = "SELECT t1.*,t2.channel_name FROM warn t1 LEFT JOIN channel t2 ON t1.channel_id = t2.element_id WHERE 1=1" if model_name: - sql += f" AND model_name = {model_name}" - if channel_id: - sql += f" AND channel_id = {channel_id}" + sql += f" AND t1.model_name = {model_name}" + if channel_name: + sql += f" AND t2.channel_name = {channel_name}" if start_time and end_time: - sql += f" AND creat_time BETWEEN {start_time} AND {end_time}" + sql += f" AND t1.creat_time BETWEEN {start_time} AND {end_time}" # 增加倒序排列和分页 - sql += f" ORDER BY creat_time DESC LIMIT {e_count} OFFSET {s_count}" + sql += f" ORDER BY t1.creat_time DESC LIMIT {e_count} OFFSET {s_count}" # 使用SQLAlchemy执行查询 try: @@ -34,7 +35,55 @@ async def warn_get(): #新增算法 data = mDBM.do_select(sql) # 将数据转换为JSON格式返回给前端 warn_list = [{"ID": warn[0], "model_name": warn[1], "video_path": warn[2], "img_path": warn[3], - "creat_time": warn[4], "channel_id": warn[5]} for warn in data] + "creat_time": warn[4], "channel_name": warn[6]} for warn in data] return jsonify(warn_list) except Exception as e: return jsonify({"error": str(e)}), 500 + +@api.route('/warn/warn_img') +@login_required +async def warn_img(): + # 获取图片路径参数 + image_path = request.args.get('path') + if image_path and os.path.exists(image_path): + # 返回图片文件 + return await send_file(image_path) + else: + # 图片不存在时,返回 404 + return 'Image not found', 404 + +@api.route('/warn/warn_video') +@login_required +async def warn_video(): + # 获取视频路径参数 + video_path = request.args.get('path') + if video_path and os.path.exists(video_path): + # 返回视频文件流 + return await send_file(video_path, as_attachment=True) + else: + # 视频文件不存在时,返回 404 + return 'Video not found', 404 + +@api.route('/warn/warn_del',methods=['POST']) +@login_required +async def warn_del(): + # 获取请求体中的报警ID数组 + data = await request.get_json() + alarm_ids = data.get('alarmIds') + + if not alarm_ids: + return jsonify({'success': False, 'message': '没有提供报警ID'}), 400 + + try: + # 根据报警ID进行删除数据的操作,这里是假设数据库删除操作 + # 例如:delete_from_database(alarm_ids) + + # 模拟删除成功 + success = True + + if success: + return jsonify({'success': True, 'message': '删除成功'}) + else: + return jsonify({'success': False, 'message': '删除失败'}) + except Exception as e: + return jsonify({'success': False, 'message': f'发生异常: {str(e)}'}), 500 \ No newline at end of file diff --git a/web/main/static/resources/scripts/aiortc-client-new.js b/web/main/static/resources/scripts/aiortc-client-new.js index 2b42365..e03ede7 100644 --- a/web/main/static/resources/scripts/aiortc-client-new.js +++ b/web/main/static/resources/scripts/aiortc-client-new.js @@ -294,6 +294,7 @@ function connect(channel_id,element_id,imgcanvas,ctx,offscreenCtx,offscreenCanva //如有错误信息显示 -- 清除错误信息 if(berror_state_list[el_id]){ removeErrorMessage(imgcanvas); + console.log("清除错误信息!") berror_state_list[el_id] = false; } // 接收到 JPG 图像数据,转换为 Blob diff --git a/web/main/static/resources/scripts/warn_manager.js b/web/main/static/resources/scripts/warn_manager.js index a1b1a69..84f505a 100644 --- a/web/main/static/resources/scripts/warn_manager.js +++ b/web/main/static/resources/scripts/warn_manager.js @@ -1,23 +1,74 @@ let modelMap = []; //model_name model_id let channelMap = []; //channel_name channel_id - +let warn_data = []; //查询到的报警数据 +let page_data = []; +let currentEditingRow = null; +let currentPage = 1; +const rowsPerPage = 30; //页面加载初始化 document.addEventListener('DOMContentLoaded', function () { perWarnHtml() + + document.getElementById('delPageButton').addEventListener('click', function () { + delpageWarn(); + }); }); //搜索按钮点击 document.getElementById('searchMButton').addEventListener('click', function() { + shearchWarn(); +}); + +//查询数据 +async function shearchWarn(){ + //查询告警数据 + let modelName = document.getElementById('modelSelect').value; + let channelName = document.getElementById('channelSelect').value; const startTime = document.getElementById('startTime').value; const endTime = document.getElementById('endTime').value; + const sCount = 0; // 起始记录数从0开始 + const eCount = 5000; // 每页显示10条记录 - if (startTime && endTime) { - console.log(`开始时间: ${startTime}, 结束时间: ${endTime}`); - // 在这里执行其他逻辑,例如根据时间范围查询数据 - } else { - alert('请选择完整的时间区间'); + if(modelName == "请选择"){ + modelName = ""; + } + if(channelName == "请选择"){ + channelName = ""; } -}); + + // 构造请求体 + const requestData = { + model_name: modelName || "", // 如果为空,则传空字符串 + channel_name: channelName || "", + start_time: startTime || "", + end_time: endTime || "", + s_count: sCount, + e_count: eCount + }; + try{ + // 发送POST请求到后端 + const response = await fetch('/api/warn/search_warn', { + method: 'POST', + headers: { + 'Content-Type': 'application/json' + }, + body: JSON.stringify(requestData) // 将数据转为JSON字符串 + }); + + // 检查响应是否成功 + if (response.ok) { + warn_data = await response.json(); + // 在这里处理查询结果,比如更新表格显示数据 + currentPage = 1; // 重置当前页为第一页 + renderTable(); //刷新表格 + renderPagination(); + } else { + console.error('查询失败:', response.status); + } + } catch (error) { + console.error('请求出错:', error); + } +} async function perWarnHtml() { //获取算法和通道列表,在下拉框显示 @@ -47,104 +98,50 @@ async function perWarnHtml() { channelMap[option.channel_name] = option.ID; }); set_select_data("channelSelect",channel_select_datas); - - //查询告警数据 - let modelName = document.getElementById('modelSelect').value; - let channelId = document.getElementById('channelSelect').value; - const startTime = document.getElementById('startTime').value; - const endTime = document.getElementById('endTime').value; - const sCount = 0; // 起始记录数从0开始 - const eCount = 100; // 每页显示10条记录 - - if(modelName == "请选择"){ - modelName = ""; - } - if(channelId == "请选择"){ - channelId = ""; - } - - // 构造请求体 - const requestData = { - model_name: modelName || "", // 如果为空,则传空字符串 - channel_id: channelId || "", - start_time: startTime || "", - end_time: endTime || "", - s_count: sCount, - e_count: eCount - }; - try{ - // 发送POST请求到后端 - const response = await fetch('/api/warn/search_warn', { - method: 'POST', - headers: { - 'Content-Type': 'application/json' - }, - body: JSON.stringify(requestData) // 将数据转为JSON字符串 - }); - - // 检查响应是否成功 - if (response.ok) { - const data = await response.json(); - console.log('查询结果:', data); - // 在这里处理查询结果,比如更新表格显示数据 - //updateTableWithData(data); - } else { - console.error('查询失败:', response.status); - } - } catch (error) { - console.error('请求出错:', error); - } - + //查询数据 + shearchWarn() }catch (error) { console.error('Error fetching model data:', error); } - - //读取报警数据并进行显示--要分页显示 - // modelData_bak = modelData; - // currentPage = 1; // 重置当前页为第一页 - // renderTable(); //刷新表格 - // renderPagination(); - //操作-删除,图片,视频,审核(灰) } //刷新表单页面数据 function renderTable() { - const tableBody = document.getElementById('table-body-model'); + const tableBody = document.getElementById('table-body-warn'); tableBody.innerHTML = ''; //清空 const start = (currentPage - 1) * rowsPerPage; const end = start + rowsPerPage; - const pageData = modelData.slice(start, end); + pageData = warn_data.slice(start, end); const surplus_count = rowsPerPage - pageData.length; - pageData.forEach((model) => { + pageData.forEach((warn) => { const row = document.createElement('tr'); row.innerHTML = ` -