Browse Source

单一model进程处理通道数量,动态版本一版

zfbox
张龙 6 months ago
parent
commit
84f83c4549
  1. 5
      config.yaml
  2. 55
      core/ChannelData.py
  3. 29
      core/ChannelManager.py
  4. 76
      core/ModelNode.py
  5. 76
      core/ModelNodeManager.py
  6. 2
      myutils/MyDeque.py
  7. 4
      web/API/viedo.py
  8. 15
      web/main/static/resources/scripts/aiortc-client-new.js

5
config.yaml

@ -43,10 +43,10 @@ cap_sleep_time: 120 #单位秒 -- 5分钟
buffer_len: 100 #分析后画面缓冲区帧数 -- 可以与验证帧率结合确定缓冲区大小 buffer_len: 100 #分析后画面缓冲区帧数 -- 可以与验证帧率结合确定缓冲区大小
RESET_INTERVAL : 100000 #帧数重置上限 RESET_INTERVAL : 100000 #帧数重置上限
frame_rate : 20 #帧率参考值 -- 后续作用主要基于verify_rate进行帧率控制 frame_rate : 20 #帧率参考值 -- 后续作用主要基于verify_rate进行帧率控制
verify_rate : 6 #验证帧率--- 也就是视频输出的帧率 verify_rate : 5 #验证帧率--- 也就是视频输出的帧率
warn_video_path: /mnt/zfbox/model/warn/ warn_video_path: /mnt/zfbox/model/warn/
warn_interval: 120 #报警间隔--单位秒 warn_interval: 120 #报警间隔--单位秒
video_error_count: 10 #单位秒 ---根据验证帧率,判断10秒内都是空帧的话,视频源链接有问题。 video_error_count: 10 #单位秒 ---根据验 证帧率,判断10秒内都是空帧的话,视频源链接有问题。
reconnect_attempts: 5 #cap 读取帧失败几次后进行重连 reconnect_attempts: 5 #cap 读取帧失败几次后进行重连
#system --- 指定网卡 #system --- 指定网卡
@ -55,4 +55,5 @@ wireless_interface : WLAN
#独立模型线程相关 #独立模型线程相关
workType : 2 # 1--一通道一线程。2--模型独立线程 workType : 2 # 1--一通道一线程。2--模型独立线程
maxCount : 2 #单个进程最大通道数量

55
core/ChannelData.py

@ -44,7 +44,7 @@ class ChannelData:
self.deque_frame = deque(maxlen=self.max_len) #视频缓冲区用于保存录像 self.deque_frame = deque(maxlen=self.max_len) #视频缓冲区用于保存录像
self.last_frame = None # 保存图片数据 self.last_frame = None # 保存图片数据
#self.frame_queue = queue.Queue(maxsize=1) #self.frame_queue = queue.Queue(maxsize=1)
self.frame_queue = MyDeque(10) #分析画面MQ self.frame_queue = MyDeque(15) #分析画面MQ
self.counter = 0 #帧序列号--保存报警录像使用 self.counter = 0 #帧序列号--保存报警录像使用
#model独立线程相关 #model独立线程相关
@ -52,12 +52,13 @@ class ChannelData:
self.per_status= False #预处理线程状态 self.per_status= False #预处理线程状态
self.post_th = None #后处理线程句柄 self.post_th = None #后处理线程句柄
self.post_status = False #后处理线程状态 self.post_status = False #后处理线程状态
self.model_nM = None # 模型管理对象
self.model_node= None #模型对象 -- inmq,outmq self.model_node= None #模型对象 -- inmq,outmq
self.out_mq = MyDeque(30) #分析结果存放MQ self.out_mq = MyDeque(20) #分析结果存放MQ
#设置JPEG压缩基本 #设置JPEG压缩基本
self.encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), myCongif.get_data("encode_param")] # 50 是压缩质量(0到100) self.encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), myCongif.get_data("encode_param")] #压缩质量(0到100)
#添加一帧图片 #添加一帧图片
def add_deque(self, value): def add_deque(self, value):
@ -77,12 +78,6 @@ class ChannelData:
# with self.lock: # with self.lock:
# frame = self.last_frame # frame = self.last_frame
# return frame # return frame
# try:
# frame = self.frame_queue.get(timeout=0.3) #web传输没有做帧率控制了,可以超时时间长一点
# except queue.Empty:
# self.logger.debug(f"{self.channel_id}--web--获取分析画面失败,队列空")
# return None
frame = self.frame_queue.mypopleft() frame = self.frame_queue.mypopleft()
return frame return frame
else: #如果没有运行,直接从cap获取画面 else: #如果没有运行,直接从cap获取画面
@ -93,30 +88,16 @@ class ChannelData:
return None return None
ret,buffer_bgr_webp = self._encode_frame(frame) ret,buffer_bgr_webp = self._encode_frame(frame)
return buffer_bgr_webp return buffer_bgr_webp
return None return None
def update_last_frame(self,buffer): def update_last_frame(self,buffer):
if buffer: if buffer:
#print(f"{time.time()}--{self.channel_id}--数据入frame_queue队列\r")
self.frame_queue.myappend(buffer) self.frame_queue.myappend(buffer)
# with self.lock: # with self.lock:
# self.last_frame = None # self.last_frame = None
# self.last_frame = buffer # self.last_frame = buffer
# if self.frame_queue.full():
# try:
# print("channel--丢帧")
# self.frame_queue.get(timeout=0.01)
# except queue.Empty: #为空不处理
# pass
# self.frame_queue.put(buffer)
# try:
# self.frame_queue.put(buffer,timeout=0.05)
# except queue.Full:
# self.logger.debug(f"{self.channel_id}分析画面队列满,插入失败")
# pass
#------------h264编码相关--------------- #------------h264编码相关---------------
def start_h264_encoder(self,width, height): #宽高一样,初步定全进程一个 libx264 h264_ascend def start_h264_encoder(self,width, height): #宽高一样,初步定全进程一个 libx264 h264_ascend
process = subprocess.Popen( process = subprocess.Popen(
@ -298,7 +279,7 @@ class ChannelData:
# *********取画面************* # *********取画面*************
ret, frame = self.cap.read() # 除了第一帧,其它应该都是有画面的 ret, frame = self.cap.read() # 除了第一帧,其它应该都是有画面的
if not ret: if not ret:
# self.logger.debug(f"{self.channel_id}--model--获取cap画面失败,队列空") self.logger.debug(f"{self.channel_id}--model--获取cap画面失败,队列空")
continue # 没读到画面继续 continue # 没读到画面继续
# 验证检测计划,是否在布防时间内 # 验证检测计划,是否在布防时间内
now = datetime.datetime.now() # 获取当前日期和时间 now = datetime.datetime.now() # 获取当前日期和时间
@ -326,11 +307,16 @@ class ChannelData:
# 初始化业务数据 # 初始化业务数据
result = [0 for _ in range(duration_time * verify_rate)] # 初始化时间*验证帧率数量的结果list result = [0 for _ in range(duration_time * verify_rate)] # 初始化时间*验证帧率数量的结果list
warn_last_time = time.time() warn_last_time = time.time()
tmpcount = 0
while self.bool_run: while self.bool_run:
out_data = self.out_mq.mypopleft() #(image,scale_ratio, pad_size,outputs): out_data = self.out_mq.mypopleft() #(image,scale_ratio, pad_size,outputs):
if not out_data: if not out_data:
time.sleep(0.1) tmpcount += 1
if tmpcount == 10:
print(f"{time.time()}--{self.channel_id}--model-outmq 连续10次没有数据\r")
time.sleep(1)
continue continue
tmpcount = 0
#开始后处理 #开始后处理
bwarn, warn_text = self.model_node.model.postwork(out_data.image,out_data.outputs,out_data.scale_ratio,out_data.pad_size, bwarn, warn_text = self.model_node.model.postwork(out_data.image,out_data.outputs,out_data.scale_ratio,out_data.pad_size,
check_area,polygon,conf_threshold,iou_thres) check_area,polygon,conf_threshold,iou_thres)
@ -531,7 +517,7 @@ class ChannelData:
verify_rate = myCongif.get_data("verify_rate") verify_rate = myCongif.get_data("verify_rate")
warn_interval = myCongif.get_data("warn_interval") warn_interval = myCongif.get_data("warn_interval")
self.bool_run = True self.bool_run = True
if self.model_node:#要起个预处理线程,和一个后处理线程 if self.model_nM:#要起个预处理线程,和一个后处理线程
#启动后处理线程 #启动后处理线程
self.post_th = threading.Thread(target=self._post_work_th, self.post_th = threading.Thread(target=self._post_work_th,
args=(model_data[3],model_data[4],verify_rate,warn_interval,model_data[7], args=(model_data[3],model_data[4],verify_rate,warn_interval,model_data[7],
@ -539,7 +525,12 @@ class ChannelData:
self.post_th.start() self.post_th.start()
# 启动模型线程,若线程已启动,则+1 # 启动模型线程,若线程已启动,则+1
self.model_node.start_model_th(self.channel_id,self.out_mq) self.model_nM.start_model_th(self.channel_id,self.out_mq)
self.model_node = self.model_nM.getModle_Nodel(self.channel_id)
if not self.model_node:
self.logger.error("model_node 不应该为空!!")
self.bool_run = False
return
#启动预处理线程 #启动预处理线程
self.per_th = threading.Thread(target=self._pre_work_th,args=(schedule,)) self.per_th = threading.Thread(target=self._pre_work_th,args=(schedule,))
@ -552,14 +543,14 @@ class ChannelData:
self.work_th.start() self.work_th.start()
def _stop_model_th(self): def _stop_model_th(self):
if self.model_node: #独立线程,需要停止预处理线程,和后处理线程 if self.model_nM: #独立线程,需要停止预处理线程,和后处理线程
self.bool_run = False self.bool_run = False
#停止预处理线程 #停止预处理线程
if self.per_th: if self.per_th:
self.per_th.join() self.per_th.join()
self.per_th = None self.per_th = None
#停止model线程 -1 #停止model线程 -1
self.model_node.stop_model_th(self.channel_id) self.model_nM.stop_model_th(self.channel_id)
#停止后处理线程 #停止后处理线程
if self.post_th: if self.post_th:
self.post_th.join() self.post_th.join()
@ -630,7 +621,7 @@ class ChannelData:
# self.logger.debug(f"{model_path} 加载成功!!!!") # self.logger.debug(f"{model_path} 加载成功!!!!")
# return md # return md
def start_work(self,cap_data,model_data,schedule,type,model_Node=None): def start_work(self,cap_data,model_data,schedule,type,model_NodeM=None):
''' '''
开始工作线程包括视频通道采集和模型处理 开始工作线程包括视频通道采集和模型处理
:param cap_data: [source,type] :param cap_data: [source,type]
@ -644,7 +635,7 @@ class ChannelData:
:return: True,False :return: True,False
''' '''
ret = False ret = False
self.model_node = model_Node self.model_nM = model_NodeM
if type==0: if type==0:
self._start_cap_th(cap_data[0],cap_data[1]) #先cap,再model self._start_cap_th(cap_data[0],cap_data[1]) #先cap,再model
self._start_model_th(model_data,schedule) self._start_model_th(model_data,schedule)

29
core/ChannelManager.py

@ -2,8 +2,10 @@ import cv2
import threading import threading
import base64 import base64
from myutils.ConfigManager import myCongif from myutils.ConfigManager import myCongif
from myutils.MyLogger_logger import LogHandler
#独立模型线程 #独立模型线程
from core.ModelNode import ModelNode from core.ModelNodeManager import ModelNodeManger
#from core.ModelNode import ModelNode
from core.ChannelData import ChannelData #其实ChannelNode会更加贴切一些 from core.ChannelData import ChannelData #其实ChannelNode会更加贴切一些
''' '''
@ -11,8 +13,9 @@ from core.ChannelData import ChannelData #其实ChannelNode会更加贴
''' '''
class ChannelManager: class ChannelManager:
def __init__(self): def __init__(self):
self._channels = {} self._channels = {} #channel_id ChannelData(C_Node)
self.cm_lock = threading.RLock() # 用于保证字典操作的线程安全 self.cm_lock = threading.RLock() # 用于保证字典操作的线程安全
self.logger = LogHandler().get_logger("ChannelManager")
# 独立Model_th相关参数 --- modelNode 用一个类封装下model线程和其相关参数 # 独立Model_th相关参数 --- modelNode 用一个类封装下model线程和其相关参数
self.model_list = {} # model_id -- modelNode self.model_list = {} # model_id -- modelNode
@ -66,13 +69,12 @@ class ChannelManager:
with self.cm_lock: with self.cm_lock:
if channel_id in self._channels: if channel_id in self._channels:
c_node = self._channels[channel_id] c_node = self._channels[channel_id]
model_node = None model_nodeM = None
if self.workType == 2 and type !=1: #需要确保当type!=1时,model_data必须有数据 -- 调用时已经有判断 if self.workType == 2 and type !=1: #需要确保当type!=1时,model_data必须有数据 -- 调用时已经有判断
model_node = self.CreateModelNode(model_data[0], model_data[5], channel_id) model_nodeM = self.CreateModelNode(model_data[0], model_data[5], channel_id)
ret = c_node.start_work(cap_data,model_data,schedule,type,model_node) ret = c_node.start_work(cap_data,model_data,schedule,type,model_nodeM)
return ret return ret
#停止工作线程---要把视频采集线程停止掉 #停止工作线程---要把视频采集线程停止掉
def stop_channel(self,channel_id,type): #9-10截止目前就重启模型线程时用到该函数(channel_id,2) def stop_channel(self,channel_id,type): #9-10截止目前就重启模型线程时用到该函数(channel_id,2)
''' '''
@ -132,19 +134,20 @@ class ChannelManager:
'''模型独立线程修改2024-9-9,要求是双模式兼容''' '''模型独立线程修改2024-9-9,要求是双模式兼容'''
'''2024-10-13修改独立线程为独立进程---acl初始化需要在子进程中初始化 -- 该方案无法兼容旧版本''' '''2024-10-13修改独立线程为独立进程---acl初始化需要在子进程中初始化 -- 该方案无法兼容旧版本'''
'''2025-10-24修改动态通道数量,cm --> mn_m -->model_nodel'''
def CreateModelNode(self, model_id, model_path, channel_id): def CreateModelNode(self, model_id, model_path, channel_id):
if model_id in self.model_list: if model_id in self.model_list:
modelN = self.model_list[model_id] modelNM = self.model_list[model_id]
else: else:
modelN = ModelNode(self.device_id,model_path) modelNM = ModelNodeManger(self.device_id,model_id,model_path)
self.model_list[model_id] = modelN self.model_list[model_id] = modelNM
#modelN = ModelNode(self.device_id, model_path,channel_id) #modelN = ModelNode(self.device_id, model_path,channel_id)
return modelN return modelNM
def delModelNode(self): #关于modelnodel :1.考虑modelnode是否可以不删除,清空inmq即可,2.mdel_list是否需要加锁。#? def delModelNode(self): #关于modelnodel :1.考虑modelnode是否可以不删除,清空inmq即可,2.mdel_list是否需要加锁。#?
return #return
for model_id, modelNode in self.model_list.items(): for model_id, modelNodeM in self.model_list.items():
if modelNode.ch_count == 0: if modelNodeM.ch_count == 0:
del self.model_list[model_id] del self.model_list[model_id]

76
core/ModelNode.py

@ -1,7 +1,7 @@
import threading import threading
import importlib.util
import time import time
import multiprocessing import multiprocessing
import importlib.util
from multiprocessing.managers import BaseManager from multiprocessing.managers import BaseManager
from myutils.ConfigManager import myCongif from myutils.ConfigManager import myCongif
from myutils.MyLogger_logger import LogHandler from myutils.MyLogger_logger import LogHandler
@ -12,7 +12,6 @@ from threading import Lock
#2024-10-14model处理调整为独立子进程 #2024-10-14model处理调整为独立子进程
def model_process(device,model,model_platform,m_p_status,brun,in_mq,out_mq): def model_process(device,model,model_platform,m_p_status,brun,in_mq,out_mq):
# 初始化模型运行资源 # 初始化模型运行资源
context = None context = None
if model_platform == "acl": # ACL线程中初始化内容 if model_platform == "acl": # ACL线程中初始化内容
@ -26,16 +25,19 @@ def model_process(device,model,model_platform,m_p_status,brun,in_mq,out_mq):
#执行工作 #执行工作
m_p_status.value = 1 m_p_status.value = 1
s_time = time.time() use_time = 0
icount = 0 icount = 0
while brun.value: while brun.value:
try: try:
inData = in_mq.get(timeout=0.01) #空时-block,直到有值 #(self,channel_id,img,image,scale_ratio, pad_size): inData = in_mq.get(timeout=0.1) #空时-block,直到有值 #(self,channel_id,img,image,scale_ratio, pad_size):
except: except:
#print("in_mq_空") #print("in_mq_空")
continue continue
if inData: if inData:
#print(f"{time.time()}--{inData.channel_id}--数据取出进行处理!")
s_time = time.time()
outputs = model.execute([inData.img,])#创建input,执行模型,返回结果 --失败返回None outputs = model.execute([inData.img,])#创建input,执行模型,返回结果 --失败返回None
e_time = time.time()
outdata = ModeloutData(inData.image,inData.scale_ratio,inData.pad_size,outputs,inData.channel_id) outdata = ModeloutData(inData.image,inData.scale_ratio,inData.pad_size,outputs,inData.channel_id)
del inData.img del inData.img
#结果输出 #结果输出
@ -44,14 +46,14 @@ def model_process(device,model,model_platform,m_p_status,brun,in_mq,out_mq):
#print("model_输出mq满!") #print("model_输出mq满!")
del tmp del tmp
out_mq.put(outdata) # 需要确保out_mq只有在这里put out_mq.put(outdata) # 需要确保out_mq只有在这里put
else: #正常情况不会执行到该条件 # else: #正常情况不会执行到该条件
time.sleep(0.05) # time.sleep(0.01)
icount += 1 icount += 1
if icount == 1000: use_time += (e_time - s_time)
e_time = time.time() if icount == 500:
use_time = (e_time - s_time) / 1000 avg_time = use_time / 500
print(f"model_process耗时--{use_time}") print(f"model_process耗时--{avg_time}")
s_time = time.time() use_time = 0
icount = 0 icount = 0
#结束进程,释放资源 #结束进程,释放资源
@ -75,11 +77,12 @@ def model_process(device,model,model_platform,m_p_status,brun,in_mq,out_mq):
class ModelNode: class ModelNode:
def __init__(self,device,model_path,channel_id): def __init__(self,device,model_path,ch_max_count=1):
self.device = device self.device = device
self.model_path = model_path self.model_path = model_path
self.channel_id = channel_id self.channel_id = [] #channel_id_list
self.model = None #模型对象 self.model = None #模型对象
self.ch_max_count = ch_max_count
self.ch_count = 0 #关联启动的通道数量 self.ch_count = 0 #关联启动的通道数量
self.count_Lock = Lock() #count的维护锁 self.count_Lock = Lock() #count的维护锁
self.model_platform = myCongif.get_data("model_platform") self.model_platform = myCongif.get_data("model_platform")
@ -88,18 +91,21 @@ class ModelNode:
self.model_out_th = None self.model_out_th = None
self.channel_dict = {} self.channel_dict = {}
self.cdict_Lock = Lock() self.cdict_Lock = Lock()
self.in_mq_Lock = Lock()
self.last_in_c_id = 0
#独立进程方案--共享参数 #独立进程方案--共享参数
self.process = None self.process = None
self.in_mq = multiprocessing.Queue(maxsize=30) self.imq_count = ch_max_count * 20 #一个通道20帧缓冲区间
self.out_mq = multiprocessing.Queue(maxsize=30) #调整结构,多线程(预处理)-》in_mq-子进程-out_mq-》线程分发outdata->多线程(后处理) self.in_mq = multiprocessing.Queue(maxsize=self.imq_count)
self.out_mq = multiprocessing.Queue(maxsize=self.imq_count) #调整结构,多线程(预处理)-》in_mq-子进程-out_mq-》线程分发outdata->多线程(后处理)
self.brun = multiprocessing.Value('b',True) #brun.value = False,brun.value = True self.brun = multiprocessing.Value('b',True) #brun.value = False,brun.value = True
self.m_p_status = multiprocessing.Value('i',0) self.m_p_status = multiprocessing.Value('i',0)
def __del__(self): def __del__(self):
pass pass
def _import_model(self,model_path,threshold=0.5,iou_thres=0.5): def _import_model(self, model_path, threshold=0.5, iou_thres=0.5):
''' '''
根据路径动态导入模块 根据路径动态导入模块
:param model_path: 模块路径 :param model_path: 模块路径
@ -115,7 +121,7 @@ class ModelNode:
# 从模块中获取指定的类 # 从模块中获取指定的类
Model = getattr(module, "Model") Model = getattr(module, "Model")
# 使用 Model 类 # 使用 Model 类
model_instance = Model(model_path,threshold,iou_thres) model_instance = Model(model_path, threshold, iou_thres)
return model_instance return model_instance
except ModuleNotFoundError as e: except ModuleNotFoundError as e:
print(f"Module not found: {e}") print(f"Module not found: {e}")
@ -133,11 +139,15 @@ class ModelNode:
# except multiprocessing.queues.Full: # except multiprocessing.queues.Full:
# print("mdel_inmq输入满!") # print("mdel_inmq输入满!")
# del data # del data
if self.in_mq.full(): with self.in_mq_Lock:
tmp = self.in_mq.get() if self.ch_count>1 and self.last_in_c_id == data.channel_id:
#print("mdel_inmq输入满!") return
del tmp self.last_in_c_id = data.channel_id
self.in_mq.put(data) # 需要确保out_mq只有在这里put if self.in_mq.full():
tmp = self.in_mq.get()
#print("mdel_inmq输入满!")
del tmp
self.in_mq.put(data) # 需要确保out_mq只有在这里put
def _modle_th(self): def _modle_th(self):
'''根据channel_id分发out_data到out_mq''' '''根据channel_id分发out_data到out_mq'''
@ -145,19 +155,22 @@ class ModelNode:
icount = 0 icount = 0
while self.brun.value: while self.brun.value:
try: try:
outdata = self.out_mq.get(timeout=1) outdata = self.out_mq.get(timeout=0.1)
except: except:
continue continue
with self.cdict_Lock: with self.cdict_Lock:
if outdata.channel_id in self.channel_dict: if outdata.channel_id in self.channel_dict:
self.channel_dict[outdata.channel_id].myappend(outdata) #后面就交给后处理线程了 self.channel_dict[outdata.channel_id].myappend(outdata) #后面就交给后处理线程了
icount += 1 else:
if icount ==1000: print(f"{outdata.channel_id}不在channel_dict里面")
e_time = time.time() # icount += 1
use_time = (e_time-s_time) /1000 # if icount ==500:
print(f"{self.channel_id}_modle_th耗时--{use_time}") # e_time = time.time()
s_time = time.time() # use_time = (e_time-s_time) /500
icount = 0 # print(f"{self.channel_id}_modle_th耗时--{use_time}秒")
# s_time = time.time()
# icount = 0
#2024-10-14调整为独立进程执行 -- 一个线程一个MQ MyDeque #2024-10-14调整为独立进程执行 -- 一个线程一个MQ MyDeque
@ -165,8 +178,10 @@ class ModelNode:
with self.count_Lock: with self.count_Lock:
with self.cdict_Lock: with self.cdict_Lock:
if channel_id in self.channel_dict: if channel_id in self.channel_dict:
print(f"{channel_id}已经在channel_dict内")
return #这个可以删除老的,新增新的--后续验证,若需要则进行修改 return #这个可以删除老的,新增新的--后续验证,若需要则进行修改
self.channel_dict[channel_id] = out_mq #增加一个记录 self.channel_dict[channel_id] = out_mq #增加一个记录
print(f"新增一个channel节点--{channel_id}")
if self.ch_count == 0: #第一次启动--需要启动处理线程和进程 if self.ch_count == 0: #第一次启动--需要启动处理线程和进程
#加载自定义模型文件 #加载自定义模型文件
@ -186,6 +201,7 @@ class ModelNode:
args=(self.device,self.model,self.model_platform, args=(self.device,self.model,self.model_platform,
self.m_p_status,self.brun,self.in_mq,self.out_mq)) self.m_p_status,self.brun,self.in_mq,self.out_mq))
self.process.start() self.process.start()
self.ch_count += 1 #有通道调用一次就加一 self.ch_count += 1 #有通道调用一次就加一
def stop_model_th(self,channel_id): def stop_model_th(self,channel_id):

76
core/ModelNodeManager.py

@ -0,0 +1,76 @@
import multiprocessing
from threading import Lock
from core.ModelNode import ModelNode
from myutils.ConfigManager import myCongif
class ModelNodeManger:
def __init__(self,device_id,model_id,model_path,threshold=0.5,iou_thres=0.5):
self.ch_count = 0 #当前模型关联的通道数量
self.channel_list = {} #channel_id modelNodle
self.clist_Lock = Lock() # count的维护锁
self.modelNode_list = [] # model_Node
self.brun = multiprocessing.Value('b',True) #brun.value = False,brun.value = True
self.m_p_status = multiprocessing.Value('i',0)
self.model = None # 模型对象
#-------
self.max_count = myCongif.get_data('maxCount')
self.device_id = device_id
self.model_id = model_id
self.model_path = model_path
self.threshold = threshold
self.iou_thres = iou_thres
# #添加数据
# def pro_add_data(self,channel_id,data):
# pass
# # #加锁? -- 对于多线程这里的耗时会不会有点大。。。 model_nodel 反馈出去?????getModle_Nodel
# # with self.clist_Lock:
# # if channel_id in self.channel_list:
# # self.modelNode_list[channel_id].pro_add_data(data)
#暴露modelNodel -- 避免pro_add_data锁竞争
def getModle_Nodel(self,channel_id):
model_nodel = None
if channel_id in self.channel_list:
model_nodel = self.channel_list[channel_id]
return model_nodel
def start_model_th(self, channel_id, out_mq):
with self.clist_Lock:
if channel_id in self.channel_list: #已经在List 说明已经启动过了,但不应该重复调用
print("不应该重复调用start_model_th")
return True
else: # 还没入list ,查找可以插入的model_node
for model_node in self.modelNode_list:
if model_node.ch_count < self.max_count: # 没满可以插入
model_node.start_model_th(channel_id, out_mq) #+1
self.channel_list[channel_id] = model_node #新增 channel_di 与model_node的对应关系
return True
# 执行到这说明没有空的model_nodel -- 需要新建
model_node = ModelNode(self.device_id,self.model_path,self.max_count)
model_node.start_model_th(channel_id, out_mq) # 启动线程,并+1
#两个数据结构,新增节点
self.modelNode_list.append(model_node) #新增一个model_node节点
self.channel_list[channel_id] = model_node #新增 channel_di 与model_node的对应关系
#行动完更新数量
self.ch_count +=1
return True
def stop_model_th(self, channel_id):
with self.clist_Lock:
if channel_id in self.channel_list:
model_node = self.channel_list[channel_id]
model_node.stop_model_th(channel_id) #减-,根据情况停止工作进程、
# 需要考虑下model_node是否要删除,还是保留后重复利用-
if model_node.ch_count ==0:
self.modelNode_list.remove(model_node) #如何没有关联通道了,侧删除该modelnodel
del model_node #删除对象
del self.channel_list[channel_id] #删除channel_id 与 model_node的对应关心节点
#行动完更新数量
self.ch_count -= 1
return True
else:
print("不在list里面,不应该调用stop_model_th")
return False

2
myutils/MyDeque.py

@ -24,6 +24,8 @@ class MyDeque:
with self.lock: with self.lock:
if self.dq: if self.dq:
object = self.dq.popleft() object = self.dq.popleft()
else:
pass
return object return object
def myclear(self): def myclear(self):

4
web/API/viedo.py

@ -158,7 +158,7 @@ async def handle_channel(channel_id,websocket):
#视频传输缓冲区 #视频传输缓冲区
#frame_buffer = deque(maxlen=10) #frame_buffer = deque(maxlen=10)
try: try:
cnode = mMM.verify_list.get_channel_data(channel_id) cnode = mMM.verify_list.get_channel_data(channel_id) #ChannelManager.get_channel_data
if cnode is None: if cnode is None:
print("---channel_id--错误--") print("---channel_id--错误--")
return return
@ -195,7 +195,7 @@ async def handle_channel(channel_id,websocket):
icount += 1 icount += 1
if icount > error_max_count: if icount > error_max_count:
print(f"通道-{channel_id},长时间未获取图像,休眠一段时间后再获取。") print(f"通道-{channel_id},长时间未获取图像,休眠一段时间后再获取。")
#icount = 0 icount = 0
error_message = b'error:video_error' error_message = b'error:video_error'
await websocket.send(error_message) await websocket.send(error_message)
await asyncio.sleep(sleep_time) # 等待视频重连时间 await asyncio.sleep(sleep_time) # 等待视频重连时间

15
web/main/static/resources/scripts/aiortc-client-new.js

@ -293,8 +293,8 @@ function connect(channel_id,element_id,imgcanvas,ctx,offscreenCtx,offscreenCanva
if (message.startsWith('frame:')){ if (message.startsWith('frame:')){
//如有错误信息显示 -- 清除错误信息 //如有错误信息显示 -- 清除错误信息
if(berror_state_list[el_id]){ if(berror_state_list[el_id]){
console.log("清除错误信息!");
removeErrorMessage(imgcanvas); removeErrorMessage(imgcanvas);
console.log("清除错误信息!")
berror_state_list[el_id] = false; berror_state_list[el_id] = false;
} }
// 接收到 JPG 图像数据,转换为 Blob // 接收到 JPG 图像数据,转换为 Blob
@ -332,10 +332,16 @@ function connect(channel_id,element_id,imgcanvas,ctx,offscreenCtx,offscreenCanva
socket.onclose = function() { socket.onclose = function() {
let el_id = socket.customData.element_id; let el_id = socket.customData.element_id;
let cl_id = socket.customData.channel_id; let cl_id = socket.customData.channel_id;
if(run_list[el_id] && socket.customData.version_id === connection_version[el_id]){ //判断是不是要重连
console.log(`尝试重新连接... Channel ID: ${cl_id}`); if(socket.customData.version_id === connection_version[el_id]){
setTimeout(() => connect(cl_id, el_id, socket.customData.imgcanvas, if(run_list[el_id]){
console.log(`尝试重新连接... Channel ID: ${cl_id}`);
setTimeout(() => connect(cl_id, el_id, socket.customData.imgcanvas,
socket.customData.ctx,socket.customData.streamUrl), 1000*10); // 尝试在10秒后重新连接 socket.customData.ctx,socket.customData.streamUrl), 1000*10); // 尝试在10秒后重新连接
}
delete video_list[el_id];
delete connection_version[el_id];
delete run_list[el_id];
} }
}; };
@ -551,6 +557,7 @@ function removeErrorMessage(imgElement) {
const errorElement = imgElement.parentNode.querySelector('.error-message'); const errorElement = imgElement.parentNode.querySelector('.error-message');
if (errorElement) { if (errorElement) {
imgElement.parentNode.removeChild(errorElement); imgElement.parentNode.removeChild(errorElement);
imgElement.style.display = 'block';
} }
} }

Loading…
Cancel
Save