16 changed files with 1218 additions and 500 deletions
@ -0,0 +1,629 @@ |
|||
import time |
|||
import copy |
|||
import importlib.util |
|||
import datetime |
|||
import time |
|||
import threading |
|||
import cv2 |
|||
import ffmpeg |
|||
import subprocess |
|||
from collections import deque |
|||
from myutils.MyLogger_logger import LogHandler |
|||
from core.CapManager import VideoCaptureWithFPS |
|||
from core.ACLModelManager import ACLModeManger |
|||
from model.plugins.ModelBase import ModelBase |
|||
from core.WarnManager import WarnData |
|||
from core.DataStruct import ModelinData,ModeloutData |
|||
from myutils.MyDeque import MyDeque |
|||
from myutils.ConfigManager import myCongif |
|||
|
|||
class ChannelData: |
|||
def __init__(self,channel_id,deque_length,icount_max,warnM): |
|||
self.logger = LogHandler().get_logger("ChannelDat") |
|||
self.model_platform = myCongif.get_data("model_platform") |
|||
self.channel_id = channel_id #该通道的通道ID |
|||
self.warnM = warnM #报警线程管理对象--MQ |
|||
self.ffprocess = self.start_h264_encoder(myCongif.get_data("mywidth"),myCongif.get_data("myheight"))#基于frame进行编码 |
|||
#视频采集相关 |
|||
self.cap = None #该通道视频采集对象 |
|||
self.frame_rate = myCongif.get_data("frame_rate") |
|||
self.frame_interval = 1.0 / int(myCongif.get_data("verify_rate")) |
|||
|
|||
#模型采集相关 |
|||
self.model = None #模型对象 -- 一通道只关联一个模型 |
|||
self.work_th = None #该通道工作线程句柄 |
|||
self.b_model = False #是否有运行模型线程 |
|||
self.bool_run = True # 线程运行标识 |
|||
self.lock = threading.RLock() # 用于保证线程安全 |
|||
self.icount_max = icount_max # 帧序列号上限 |
|||
self.max_len = myCongif.get_data("buffer_len") |
|||
self.deque_frame = deque(maxlen=self.max_len) #视频缓冲区用于保存录像 |
|||
self.last_frame = None # 保存图片数据 |
|||
#self.frame_queue = queue.Queue(maxsize=1) |
|||
self.frame_queue = MyDeque(10) #分析画面MQ |
|||
self.counter = 0 #帧序列号--保存报警录像使用 |
|||
|
|||
#model独立线程相关 |
|||
self.per_th = None #预处理线程句柄 |
|||
self.per_status= False #预处理线程状态 |
|||
self.post_th = None #后处理线程句柄 |
|||
self.post_status = False #后处理线程状态 |
|||
self.model_node= None #模型对象 -- inmq,outmq |
|||
self.out_mq = MyDeque(30) #放通道里面 |
|||
|
|||
#设置JPEG压缩基本 |
|||
self.encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), myCongif.get_data("encode_param")] # 50 是压缩质量(0到100) |
|||
|
|||
#添加一帧图片 |
|||
def add_deque(self, value): |
|||
if len(self.deque_frame) == self.max_len: |
|||
removed_frame = self.deque_frame.popleft() |
|||
del removed_frame |
|||
removed_frame = None |
|||
self.deque_frame.append(value) #deque 满了以后会把前面的数据移除 |
|||
|
|||
#拷贝一份数据 |
|||
def copy_deque(self): |
|||
return copy.deepcopy(self.deque_frame) |
|||
|
|||
#获取最后一帧图片 |
|||
def get_last_frame(self): |
|||
if self.b_model: |
|||
# with self.lock: |
|||
# frame = self.last_frame |
|||
# return frame |
|||
|
|||
# try: |
|||
# frame = self.frame_queue.get(timeout=0.3) #web传输没有做帧率控制了,可以超时时间长一点 |
|||
# except queue.Empty: |
|||
# self.logger.debug(f"{self.channel_id}--web--获取分析画面失败,队列空") |
|||
# return None |
|||
|
|||
frame = self.frame_queue.mypopleft() |
|||
return frame |
|||
else: #如果没有运行,直接从cap获取画面 |
|||
if self.cap: |
|||
ret, frame = self.cap.read() # 除了第一帧,其它应该都是有画面的 |
|||
if not ret: |
|||
self.logger.debug(f"{self.channel_id}--web--获取原画失败,队列空") |
|||
return None |
|||
ret,buffer_bgr_webp = self._encode_frame(frame) |
|||
return buffer_bgr_webp |
|||
|
|||
# frame_bgr_webp = self.encode_frame_to_flv(frame) |
|||
# return frame_bgr_webp |
|||
return None |
|||
|
|||
def encode_frame_to_flv(self,frame): |
|||
try: |
|||
process = ( |
|||
ffmpeg |
|||
.input('pipe:', format='rawvideo', pix_fmt='bgr24', s=f'{frame.shape[1]}x{frame.shape[0]}') |
|||
.output('pipe:', format='flv',vcodec='libx264') |
|||
.run_async(pipe_stdin=True, pipe_stdout=True, pipe_stderr=True) |
|||
) |
|||
out, err = process.communicate(input=frame.tobytes()) |
|||
|
|||
if process.returncode != 0: |
|||
raise RuntimeError(f"FFmpeg encoding failed: {err.decode('utf-8')}") |
|||
|
|||
return out |
|||
|
|||
except Exception as e: |
|||
print(f"Error during frame encoding: {e}") |
|||
return None |
|||
|
|||
def update_last_frame(self,buffer): |
|||
if buffer: |
|||
self.frame_queue.myappend(buffer) |
|||
|
|||
# with self.lock: |
|||
# self.last_frame = None |
|||
# self.last_frame = buffer |
|||
|
|||
# if self.frame_queue.full(): |
|||
# try: |
|||
# print("channel--丢帧") |
|||
# self.frame_queue.get(timeout=0.01) |
|||
# except queue.Empty: #为空不处理 |
|||
# pass |
|||
# self.frame_queue.put(buffer) |
|||
|
|||
# try: |
|||
# self.frame_queue.put(buffer,timeout=0.05) |
|||
# except queue.Full: |
|||
# self.logger.debug(f"{self.channel_id}分析画面队列满,插入失败") |
|||
# pass |
|||
|
|||
#------------h264编码相关--------------- |
|||
def start_h264_encoder(self,width, height): #宽高一样,初步定全进程一个 |
|||
process = subprocess.Popen( |
|||
['ffmpeg', |
|||
'-f', 'rawvideo', |
|||
'-pix_fmt', 'bgr24', |
|||
'-s', f'{width}x{height}', |
|||
'-i', '-', # Take input from stdin |
|||
'-an', # No audio |
|||
'-vcodec', 'h264_ascend', |
|||
'-preset', 'ultrafast', |
|||
'-f', 'h264', # Output format H.264 |
|||
'-'], |
|||
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE |
|||
) |
|||
return process |
|||
|
|||
def encode_frame_h264(self, frame): |
|||
if self.process.poll() is not None: |
|||
raise RuntimeError("FFmpeg process has exited unexpectedly.") |
|||
# Write frame to stdin of the FFmpeg process |
|||
try: |
|||
self.process.stdin.write(frame.tobytes()) |
|||
except Exception as e: |
|||
raise RuntimeError(f"Failed to write frame to FFmpeg: {e}") |
|||
|
|||
# Capture the encoded output |
|||
buffer_size = 1024 * 10 # Adjust this based on the size of the encoded frame |
|||
encoded_frame = bytearray() |
|||
|
|||
while True: |
|||
chunk = self.process.stdout.read(buffer_size) |
|||
if not chunk: |
|||
break |
|||
encoded_frame.extend(chunk) |
|||
|
|||
if not encoded_frame: |
|||
raise RuntimeError("No encoded data received from FFmpeg.") |
|||
|
|||
# Optional: Check for errors in stderr |
|||
# stderr_output = self.process.stderr.read() |
|||
# if "error" in stderr_output.lower(): |
|||
# raise RuntimeError(f"FFmpeg error: {stderr_output}") |
|||
|
|||
return bytes(encoded_frame) |
|||
|
|||
def _encode_frame(self,frame,itype=0): |
|||
ret = False |
|||
buffer_bgr_webp = None |
|||
if itype == 0: #jpg |
|||
ret, frame_bgr_webp = cv2.imencode('.jpg', frame, self.encode_param) |
|||
if ret: |
|||
buffer_bgr_webp = frame_bgr_webp.tobytes() |
|||
elif itype == 1: #H264 |
|||
try: |
|||
buffer_bgr_webp = self.encode_frame_h264(frame) |
|||
ret = True |
|||
except Exception as e: |
|||
print(e) |
|||
else: |
|||
print("错误的参数!!") |
|||
return ret,buffer_bgr_webp |
|||
|
|||
#帧序列号自增 一个线程中处理,不用加锁 |
|||
def increment_counter(self): |
|||
self.counter += 1 |
|||
if self.counter > self.icount_max: |
|||
self.counter = 0 |
|||
|
|||
def get_counter(self): |
|||
return self.counter |
|||
|
|||
def _start_cap_th(self,source,type=1): |
|||
'''开始cap采集线程 |
|||
type = 打开摄像头 0--USB摄像头,1-RTSP,2-海康SDK |
|||
''' |
|||
ret = False |
|||
if self.cap: |
|||
self.cap.release() |
|||
self.cap = None |
|||
self.cap = VideoCaptureWithFPS(source,type) |
|||
if self.cap: |
|||
ret = True |
|||
return ret |
|||
|
|||
def _stop_cap_th(self): |
|||
'''停止cap采集线程 |
|||
重要约束:停止cap线程前,必须先停止model线程 |
|||
''' |
|||
if self.b_model: |
|||
self.logger.error("停止采集线程前,请先停止model线程") |
|||
return False |
|||
else: |
|||
if self.cap: |
|||
self.cap.release() |
|||
self.cap = None |
|||
return True #一般不会没有cap |
|||
|
|||
def _pre_work_th(self,schedule): |
|||
'''一个通道一个线程,关联的模型在一个线程检测,局部变量都是一个通道独有''' |
|||
if not self.cap: |
|||
self.logger.error("采集线程未正常启动,不进行工作") |
|||
return |
|||
while self.model_node.model_th_status == 0: #避免模型没启动成功,模型线程在运行 |
|||
time.sleep(1) |
|||
if self.model_node.model_th_status == 1: |
|||
# 开始循环处理业务 |
|||
last_frame_time = time.time() # 初始化个读帧时间 |
|||
self.per_status = True |
|||
self.b_model = True |
|||
while self.bool_run: # 基于tag 作为运行标识。 线程里只是读,住线程更新,最多晚一轮,应该不用线程锁。需验证 |
|||
# 帧率控制帧率 |
|||
current_time = time.time() |
|||
elapsed_time = current_time - last_frame_time |
|||
if elapsed_time < self.frame_interval: |
|||
time.sleep(self.frame_interval - elapsed_time) # 若小于间隔时间则休眠 |
|||
last_frame_time = time.time() |
|||
# *********取画面************* |
|||
ret, frame = self.cap.read() # 除了第一帧,其它应该都是有画面的 |
|||
if not ret: |
|||
# self.logger.debug(f"{self.channel_id}--model--获取cap画面失败,队列空") |
|||
continue # 没读到画面继续 |
|||
# 验证检测计划,是否在布防时间内 |
|||
now = datetime.datetime.now() # 获取当前日期和时间 |
|||
weekday = now.weekday() # 获取星期几,星期一是0,星期天是6 |
|||
hour = now.hour |
|||
if schedule[weekday][hour] == 1: |
|||
#图片预处理 |
|||
img,scale_ratio, pad_size = self.model_node.model.prework(frame) |
|||
indata = ModelinData(self.channel_id,img,frame,scale_ratio, pad_size) |
|||
self.model_node.in_mq.myappend(indata) |
|||
else:# 不在计划则不进行验证,直接返回图片 --存在问题是:result 漏数据 |
|||
ret, frame_bgr_webp = cv2.imencode('.jpg', frame,self.encode_param) |
|||
if not ret: |
|||
buffer_bgr_webp = None |
|||
else: |
|||
buffer_bgr_webp = frame_bgr_webp.tobytes() |
|||
self.update_last_frame(buffer_bgr_webp) |
|||
else: |
|||
self.logger.error("模型线程为启动成功,不进行工作") |
|||
return |
|||
self.b_model = False |
|||
self.per_status = False |
|||
|
|||
def _post_work_th(self,duration_time,proportion,verify_rate,warn_interval,model_name,check_area,polygon,conf_threshold,iou_thres): |
|||
# 初始化业务数据 |
|||
result = [0 for _ in range(duration_time * verify_rate)] # 初始化时间*验证帧率数量的结果list |
|||
warn_last_time = time.time() |
|||
while self.bool_run: |
|||
out_data = self.out_mq.mypopleft() #(image,scale_ratio, pad_size,outputs): |
|||
if not out_data: |
|||
time.sleep(0.1) |
|||
continue |
|||
#开始后处理 |
|||
bwarn, warn_text = self.model_node.model.postwork(out_data.image,out_data.outputs,out_data.scale_ratio,out_data.pad_size, |
|||
check_area,polygon,conf_threshold,iou_thres) |
|||
# 对识别结果要部要进行处理 |
|||
if bwarn: |
|||
# 绘制报警文本 |
|||
cv2.putText(out_data.image, warn_text, (50, 50), |
|||
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) |
|||
result.append(1) # 要验证数组修改,是地址修改吗? |
|||
else: # 没有产生报警也需要记录,统一计算占比 |
|||
result.append(0) |
|||
#分析画面保存 |
|||
ret, frame_bgr_webp = cv2.imencode('.jpg', out_data.image,self.encode_param) |
|||
buffer_bgr_webp = None |
|||
if ret: |
|||
buffer_bgr_webp = frame_bgr_webp.tobytes() |
|||
# 分析图片放入缓冲区内存中 |
|||
self.add_deque(out_data.image) # 缓冲区大小由maxlen控制 超上限后,删除最前的数据 |
|||
# 分析画面一直更新最新帧,提供网页端显示 |
|||
self.update_last_frame(buffer_bgr_webp) |
|||
|
|||
if bwarn: |
|||
# 验证result -是否触发报警要求 --遍历每个模型执行的result |
|||
count_one = float(sum(result)) # 1,0 把1累加的和就是1的数量 |
|||
ratio_of_ones = count_one / len(result) |
|||
# self.logger.debug(result) |
|||
if ratio_of_ones >= proportion: # 触发报警 |
|||
# 基于时间间隔判断 |
|||
current_time = time.time() |
|||
elapsed_time = current_time - warn_last_time |
|||
if elapsed_time < warn_interval: |
|||
continue |
|||
warn_last_time = current_time |
|||
# 处理报警 |
|||
warn_data = WarnData() |
|||
warn_data.model_name = model_name |
|||
warn_data.warn_text = warn_text |
|||
warn_data.img_buffer = self.copy_deque() # 深度复制缓冲区 |
|||
warn_data.width = self.cap.width |
|||
warn_data.height = self.cap.height |
|||
warn_data.channel_id = self.channel_id |
|||
self.warnM.add_warn_data(warn_data) |
|||
|
|||
# 结果记录要清空 |
|||
for i in range(len(result)): |
|||
result[i] = 0 |
|||
|
|||
def _verify(self,frame,model,model_data,schedule,result,isdraw=1): |
|||
'''验证执行主函数,实现遍历通道关联的模型,调用对应模型执行验证''' |
|||
#img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) |
|||
img = frame |
|||
#验证检测计划,是否在布防时间内 |
|||
now = datetime.datetime.now() # 获取当前日期和时间 |
|||
weekday = now.weekday() # 获取星期几,星期一是0,星期天是6 |
|||
hour = now.hour |
|||
result.pop(0) # 保障结果数组定长 --先把最早的结果推出数组 |
|||
|
|||
warntext = "" |
|||
if model and schedule[weekday][hour] == 1: #不在计划则不进行验证,直接返回图片 |
|||
# 调用模型,进行检测,model是动态加载的,具体的判断标准由模型内执行 ---- ********* |
|||
bwarn, warntext = model.verify(img, model_data,isdraw) #****************重要 |
|||
# 对识别结果要部要进行处理 |
|||
if bwarn: |
|||
# 绘制报警文本 |
|||
cv2.putText(img, warntext, (50, 50), |
|||
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) |
|||
result.append(1) #要验证数组修改,是地址修改吗? |
|||
else: #没有产生报警也需要记录,统一计算占比 |
|||
warntext = "" |
|||
result.append(0) |
|||
else: |
|||
result.append(0) |
|||
|
|||
# 将检测结果图像转换为帧--暂时用不到AVFrame--2024-7-5 |
|||
# new_frame_rgb_avframe = av.VideoFrame.from_ndarray(img, format="rgb24") # AVFrame |
|||
# new_frame_rgb_avframe.pts = None # 添加此行确保有pts属性 |
|||
# if isinstance(img, np.ndarray): -- 留个纪念 |
|||
|
|||
#处理完的图片后返回-bgr模式 |
|||
#img_bgr_ndarray = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) |
|||
|
|||
ret,buffer_bgr_webp = self._encode_frame(img) |
|||
|
|||
#buffer_bgr_webp = self.encode_frame_to_flv(img) |
|||
return buffer_bgr_webp,img,warntext |
|||
|
|||
def _dowork_thread(self,channel_id,model_data,schedule,verify_rate,warn_interval): |
|||
'''一个通道一个线程,关联的模型在一个线程检测,局部变量都是一个通道独有''' |
|||
if not self.cap: |
|||
self.logger.error("采集线程未正常启动,退出model线程") |
|||
return |
|||
#加载自定义模型文件 |
|||
model = self._import_model(str(channel_id), model_data[5], model_data[8], model_data[9]) # 动态加载模型处理文件py |
|||
if not model: |
|||
self.logger.error("自定义模型文件加载失败,退出model线程") |
|||
return |
|||
#初始化模型运行资源 |
|||
context = None |
|||
device_id = myCongif.get_data("device_id") |
|||
if self.model_platform == "acl": # ACL线程中初始化内容 |
|||
context = ACLModeManger.th_inti_acl(device_id) #创建context |
|||
#初始化模型资源 -- 加载模型文件 |
|||
ret = model.init_acl_resource() #加载和初始化离线模型文件--om文件 |
|||
if not ret: |
|||
print("初始化模型资源出错,退出线程!") |
|||
return |
|||
#初始化业务数据 |
|||
result = [0 for _ in range(model_data[3] * verify_rate)] # 初始化时间*验证帧率数量的结果list |
|||
proportion = model_data[4] # 判断是否报警的占比 |
|||
warn_last_time = time.time() |
|||
#warn_save_count = 0 #保存录像的最新帧初始化为0 |
|||
|
|||
#开始循环处理业务 |
|||
last_frame_time = time.time() #初始化个读帧时间 |
|||
self.b_model = True |
|||
while self.bool_run: #基于tag 作为运行标识。 线程里只是读,住线程更新,最多晚一轮,应该不用线程锁。需验证 |
|||
# 帧率控制帧率 |
|||
current_time = time.time() |
|||
elapsed_time = current_time - last_frame_time |
|||
if elapsed_time < self.frame_interval: |
|||
time.sleep(self.frame_interval - elapsed_time) #若小于间隔时间则休眠 |
|||
last_frame_time = time.time() |
|||
#*********取画面************* |
|||
ret,frame = self.cap.read() #除了第一帧,其它应该都是有画面的 |
|||
if not ret: |
|||
#self.logger.debug(f"{self.channel_id}--model--获取cap画面失败,队列空") |
|||
continue #没读到画面继续 |
|||
|
|||
#执行图片推理 |
|||
buffer_bgr_webp,img_bgr_ndarray,warn_text = self._verify(frame,model,model_data,schedule,result) |
|||
|
|||
#分析图片放入内存中 |
|||
self.add_deque(img_bgr_ndarray) # 缓冲区大小由maxlen控制 超上限后,删除最前的数据 |
|||
|
|||
# 分析画面一直更新最新帧,提供网页端显示 |
|||
self.update_last_frame(buffer_bgr_webp) |
|||
|
|||
if warn_text: |
|||
#验证result -是否触发报警要求 --遍历每个模型执行的result |
|||
count_one = float(sum(result)) #1,0 把1累加的和就是1的数量 |
|||
ratio_of_ones = count_one / len(result) |
|||
#self.logger.debug(result) |
|||
if ratio_of_ones >= proportion: #触发报警 |
|||
# 基于时间间隔判断 |
|||
current_time = time.time() |
|||
elapsed_time = current_time - warn_last_time |
|||
if elapsed_time < warn_interval: |
|||
continue |
|||
warn_last_time = current_time |
|||
# 处理报警 |
|||
warn_data = WarnData() |
|||
warn_data.model_name = model_data[7] |
|||
warn_data.warn_text = warn_text |
|||
warn_data.img_buffer = self.copy_deque() # 深度复制缓冲区 |
|||
warn_data.width = self.cap.width |
|||
warn_data.height = self.cap.height |
|||
warn_data.channel_id = channel_id |
|||
self.warnM.add_warn_data(warn_data) |
|||
|
|||
#结果记录要清空 |
|||
for i in range(len(result)): |
|||
result[i] = 0 |
|||
|
|||
# end_time = time.time() # 结束时间 |
|||
# print(f"Processing time: {end_time - start_time} seconds") |
|||
# 本地显示---测试使用 |
|||
# if channel_id == 2: |
|||
# cv2.imshow(str(channel_id), img) |
|||
# if cv2.waitKey(1) & 0xFF == ord('q'): |
|||
# break |
|||
|
|||
#结束线程 |
|||
print("开始结束工作线程") |
|||
self.b_model = False |
|||
#反初始化 |
|||
if self.model_platform == "acl": |
|||
try: |
|||
model.release() #释放模型资源资源 |
|||
# 删除模型对象 |
|||
del model |
|||
#释放context |
|||
if context: # ACL线程中反初始化内容 -- 若线程异常退出,这些资源就不能正常释放了 |
|||
# 再释放context |
|||
ACLModeManger.th_del_acl(context) |
|||
except Exception as e: |
|||
print(e) |
|||
#cv2.destroyAllWindows() |
|||
print("线程结束!!!!") |
|||
|
|||
#2024-9-9 新增兼容独立model线程 根据self.model_node判断,None:1通道1线程,not None:独立线程 |
|||
def _start_model_th(self,model_data,schedule,type=1): |
|||
verify_rate = myCongif.get_data("verify_rate") |
|||
warn_interval = myCongif.get_data("warn_interval") |
|||
self.bool_run = True |
|||
if self.model_node:#要起个预处理线程,和一个后处理线程 |
|||
#启动后处理线程 |
|||
self.post_th = threading.Thread(target=self._post_work_th, |
|||
args=(model_data[3],model_data[4],verify_rate,warn_interval,model_data[7], |
|||
model_data[1],model_data[2],model_data[8],model_data[9])) |
|||
self.post_th.start() |
|||
#启动模型线程,若线程已启动,则+1 mq |
|||
self.model_node.start_model_th(self.channel_id,self.out_mq) |
|||
#启动预处理线程 |
|||
self.per_th = threading.Thread(target=self._pre_work_th,args=(schedule,)) |
|||
self.per_th.start() |
|||
|
|||
else: |
|||
self.work_th = threading.Thread(target=self._dowork_thread, |
|||
args=(self.channel_id, model_data, schedule, verify_rate, |
|||
warn_interval)) # 一个视频通道一个线程 |
|||
self.work_th.start() |
|||
|
|||
def _stop_model_th(self): |
|||
if self.model_node: #独立线程,需要停止预处理线程,和后处理线程 |
|||
self.bool_run = False |
|||
#停止预处理线程 |
|||
if self.per_th: |
|||
self.per_th.join() |
|||
self.per_th = None |
|||
#停止model线程 -1 |
|||
self.model_node.stop_model_th(self.channel_id) |
|||
#停止后处理线程 |
|||
if self.post_th: |
|||
self.post_th.join() |
|||
self.post_th = None |
|||
self.out_mq.myclear()#清空后处理mq中未处理的数据 |
|||
else: |
|||
if self.work_th: |
|||
if self.b_model: |
|||
self.bool_run = False |
|||
self.work_th.join() #线程结束时,会把b_model置False |
|||
self.logger.debug(f"{self.channel_id}停止工作线程") |
|||
self.work_th = None |
|||
|
|||
def _import_model(self,model_name,model_path,threshold,iou_thres): |
|||
''' |
|||
根据路径,动态导入模块 |
|||
:param model_name: 模块名称 --用通道ID代替 |
|||
:param model_path: 模块路径 |
|||
:param threshold: 置信阈值 |
|||
:param iou_thres: iou阈值 |
|||
:return: |
|||
''' |
|||
try: |
|||
module_path = model_path.replace("/", ".").rsplit(".", 1)[0] |
|||
print(module_path) |
|||
# 动态导入模块 |
|||
module = importlib.import_module(module_path) |
|||
# 从模块中获取指定的类 |
|||
Model = getattr(module, "Model") |
|||
# 使用 Model 类 |
|||
model_instance = Model(model_path,threshold,iou_thres) |
|||
return model_instance |
|||
except ModuleNotFoundError as e: |
|||
print(f"Module not found: {e}") |
|||
return None |
|||
except AttributeError as e: |
|||
print(f"Class not found in module: {e}") |
|||
return None |
|||
except Exception as e: |
|||
print(f"An unexpected error occurred: {e}") |
|||
return None |
|||
# if os.path.exists(model_path): |
|||
# module_spec = importlib.util.spec_from_file_location(model_name, model_path) |
|||
# if module_spec is None: |
|||
# self.logger.error(f"{model_path} 加载错误") |
|||
# return None |
|||
# module = importlib.util.module_from_spec(module_spec) |
|||
# start_time = time.time() |
|||
# print(f"通道{self.channel_id},开始exec_module自定义模型文件") |
|||
# module_spec.loader.exec_module(module) |
|||
# end_time = time.time() |
|||
# print(f"通道{self.channel_id},完成exec_module模型文件实例化耗时{end_time - start_time}") |
|||
# |
|||
# try: |
|||
# md = getattr(module, "Model")(model_path,threshold,iou_thres) #实例化类 |
|||
# except Exception as e: |
|||
# self.logger.error(f"{model_path} 实例化错误,退出模型线程") |
|||
# return None |
|||
# |
|||
# if not isinstance(md, ModelBase): |
|||
# self.logger.error("{} not zf_model".format(md)) |
|||
# return None |
|||
# else: |
|||
# self.logger.error("{}文件不存在".format(model_path)) |
|||
# return None |
|||
# self.logger.debug(f"{model_path} 加载成功!!!!") |
|||
# return md |
|||
|
|||
def start_work(self,cap_data,model_data,schedule,type,model_Node=None): |
|||
''' |
|||
开始工作线程,包括视频通道采集和模型处理 |
|||
:param cap_data: [source,type] |
|||
:param model_data: 跟通道关联的模型数据 |
|||
strsql = ( |
|||
f"select t1.model_id,t1.check_area,t1.polygon ,t2.duration_time,t2.proportion,t2.model_path,t1.ID," |
|||
f"t2.model_name,t1.conf_thres,t1.iou_thres " |
|||
f"from channel2model t1 left join model t2 on t1.model_id = t2.ID where t1.channel_id ={channel_id};") |
|||
:param schedule |
|||
:param type: 0-启动所有线程,1-只启动cap采集线程,2-只启动model模型处理线程 |
|||
:return: True,False |
|||
''' |
|||
ret = False |
|||
self.model_node = model_Node |
|||
if type==0: |
|||
self._start_cap_th(cap_data[0],cap_data[1]) #先cap,再model |
|||
self._start_model_th(model_data,schedule) |
|||
ret = True |
|||
elif type == 1: |
|||
self._start_cap_th(cap_data[0],cap_data[1]) |
|||
ret = True |
|||
elif type == 2: |
|||
self._start_model_th(model_data,schedule) |
|||
ret = True |
|||
else: |
|||
self.logger.error("暂时还不支持该类型工作!") |
|||
return ret |
|||
|
|||
def stop_work(self,type=0): |
|||
''' |
|||
清空数据,停止工作线程(若有 ,并删除deque 和 last_frame) |
|||
:param type: 0-停止所有线程,1-只停止cap采集线程,2-只停止model模型处理线程 |
|||
:return: True,False |
|||
''' |
|||
ret = False |
|||
if type == 0: |
|||
self._stop_model_th() |
|||
self._stop_cap_th() |
|||
ret = True |
|||
elif type == 1: |
|||
self._stop_cap_th() |
|||
ret = True |
|||
elif type == 2: |
|||
self.logger.debug("单独停止工作线程") |
|||
self._stop_model_th() |
|||
ret = True |
|||
else: |
|||
self.logger.error("暂时还不支持该类型工作!") |
|||
return ret |
@ -0,0 +1,133 @@ |
|||
import threading |
|||
import importlib.util |
|||
import time |
|||
from myutils.MyDeque import MyDeque |
|||
from myutils.ConfigManager import myCongif |
|||
from myutils.MyLogger_logger import LogHandler |
|||
from core.ACLModelManager import ACLModeManger |
|||
from core.DataStruct import ModelinData,ModeloutData |
|||
from threading import Lock |
|||
|
|||
class ModelNode: |
|||
def __init__(self,device,model_path): |
|||
self.device = device |
|||
self.model_path = model_path |
|||
self.model = None #模型对象 |
|||
self.model_th = None #模型线程句柄 |
|||
self.brun = True #模型控制标识 |
|||
self.model_th_status = 0 #模型线程运行状态 0--初始状态,1-线程执行成功,2-线程退出 |
|||
self.in_mq = MyDeque(50) # |
|||
self.channel_list = {} #channel_id out_mq --需要线程安全 |
|||
self.clist_Lock = Lock() #channel_list的维护锁 |
|||
self.ch_count = 0 #关联启动的通道数量 |
|||
self.count_Lock = Lock() #count的维护锁 |
|||
self.model_platform = myCongif.get_data("model_platform") |
|||
self.logger = LogHandler().get_logger("ModelNode") |
|||
|
|||
|
|||
|
|||
def __del__(self): |
|||
pass |
|||
|
|||
def _reset(self): #重置数据 |
|||
#self.model_th_status = 0 # 模型线程运行状态 0--初始状态,1-线程执行成功,2-线程退出 |
|||
self.in_mq.myclear() |
|||
|
|||
def _import_model(self,model_path,threshold=0.5,iou_thres=0.5): |
|||
''' |
|||
根据路径,动态导入模块 |
|||
:param model_path: 模块路径 |
|||
:param threshold: 置信阈值 |
|||
:param iou_thres: iou阈值 |
|||
:return: |
|||
''' |
|||
try: |
|||
module_path = model_path.replace("/", ".").rsplit(".", 1)[0] |
|||
print(module_path) |
|||
# 动态导入模块 |
|||
module = importlib.import_module(module_path) |
|||
# 从模块中获取指定的类 |
|||
Model = getattr(module, "Model") |
|||
# 使用 Model 类 |
|||
model_instance = Model(model_path,threshold,iou_thres) |
|||
return model_instance |
|||
except ModuleNotFoundError as e: |
|||
print(f"Module not found: {e}") |
|||
return None |
|||
except AttributeError as e: |
|||
print(f"Class not found in module: {e}") |
|||
return None |
|||
except Exception as e: |
|||
print(f"An unexpected error occurred: {e}") |
|||
return None |
|||
|
|||
def _model_th(self): |
|||
# 加载自定义模型文件 |
|||
self.model = self._import_model(self.model_path) # 动态加载模型处理文件py |
|||
if not self.model: |
|||
self.logger.error("自定义模型文件加载失败,退出model线程") |
|||
self.model_th_status = 2 |
|||
return |
|||
# 初始化模型运行资源 |
|||
context = None |
|||
if self.model_platform == "acl": # ACL线程中初始化内容 |
|||
context = ACLModeManger.th_inti_acl(self.device) # 创建context |
|||
# 初始化模型资源 -- 加载模型文件 |
|||
ret = self.model.init_acl_resource() # 加载和初始化离线模型文件--om文件 |
|||
if not ret: |
|||
print("初始化模型资源出错,退出线程!") |
|||
self.model_th_status = 2 |
|||
return |
|||
#执行工作 |
|||
self.model_th_status = 1 |
|||
while self.brun: |
|||
inData = self.in_mq.mypopleft() #空时,返回None #(self,channel_id,img,image,scale_ratio, pad_size): |
|||
if inData: |
|||
outputs = self.model.execute([inData.img,])#创建input,执行模型,返回结果 --失败返回None |
|||
outdata = ModeloutData(inData.image,inData.scale_ratio,inData.pad_size,outputs) |
|||
del inData.img |
|||
with self.clist_Lock: |
|||
if inData.channel_id in self.channel_list: |
|||
self.channel_list[inData.channel_id].myappend(outdata) |
|||
else: |
|||
time.sleep(0.05) |
|||
|
|||
#结束线程,释放资源 |
|||
self.model_th_status = 0 |
|||
self._reset() |
|||
# 反初始化 |
|||
if self.model_platform == "acl": |
|||
try: |
|||
self.model.release() # 释放模型资源资源 |
|||
# 删除模型对象 |
|||
del self.model |
|||
# 释放context |
|||
if context: # ACL线程中反初始化内容 -- 若线程异常退出,这些资源就不能正常释放了 |
|||
# 再释放context |
|||
ACLModeManger.th_del_acl(context) |
|||
except Exception as e: |
|||
print(e) |
|||
|
|||
def start_model_th(self,channel_id,out_mq): |
|||
with self.count_Lock: |
|||
with self.clist_Lock: |
|||
if channel_id in self.channel_list: |
|||
return #这个可以删除老的,新增新的 |
|||
self.channel_list[channel_id] = out_mq |
|||
if self.ch_count == 0: #第一次启动线程 |
|||
self.brun = True |
|||
self.model_th = threading.Thread(target=self._model_th) |
|||
self.model_th.start() |
|||
self.ch_count += 1 #有通道调用一次就加一 |
|||
|
|||
def stop_model_th(self,channel_id): |
|||
with self.count_Lock: |
|||
with self.clist_Lock: |
|||
if channel_id in self.channel_list: |
|||
del self.channel_list[channel_id] |
|||
self.ch_count -= 1 |
|||
if self.ch_count == 0: #所有通道结束 |
|||
self.brun = False |
|||
self.model_th.join() |
|||
self.model_th = None |
|||
|
@ -0,0 +1,9 @@ |
|||
CLASSES = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', |
|||
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', |
|||
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', |
|||
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', |
|||
'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', |
|||
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', |
|||
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', |
|||
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', |
|||
'hair drier', 'toothbrush'] |
@ -0,0 +1,107 @@ |
|||
import cv2 |
|||
import numpy as np |
|||
from model.base_model.ascnedcl.classes import CLASSES |
|||
|
|||
|
|||
def letterbox(img, new_shape=(640, 640), auto=False, scaleFill=False, scaleup=True, center=True, stride=32): |
|||
# Resize and pad image while meeting stride-multiple constraints |
|||
shape = img.shape[:2] # current shape [height, width] |
|||
if isinstance(new_shape, int): |
|||
new_shape = (new_shape, new_shape) |
|||
|
|||
# Scale ratio (new / old) |
|||
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) |
|||
if not scaleup: # only scale down, do not scale up (for better val mAP) |
|||
r = min(r, 1.0) |
|||
|
|||
# Compute padding |
|||
ratio = r, r # width, height ratios |
|||
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) |
|||
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding |
|||
if auto: # minimum rectangle |
|||
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding |
|||
elif scaleFill: # stretch |
|||
dw, dh = 0.0, 0.0 |
|||
new_unpad = (new_shape[1], new_shape[0]) |
|||
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios |
|||
|
|||
if center: |
|||
dw /= 2 # divide padding into 2 sides |
|||
dh /= 2 |
|||
|
|||
if shape[::-1] != new_unpad: # resize |
|||
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) |
|||
top, bottom = int(round(dh - 0.1)) if center else 0, int(round(dh + 0.1)) |
|||
left, right = int(round(dw - 0.1)) if center else 0, int(round(dw + 0.1)) |
|||
img = cv2.copyMakeBorder( |
|||
img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=(114, 114, 114) |
|||
) # add border |
|||
|
|||
return img, ratio, dw, dh |
|||
|
|||
def non_max_suppression_v10(prediction,conf_thres,ratio,dw,dh): |
|||
result = [] |
|||
for i in range(prediction.shape[0]): |
|||
data = prediction[i] |
|||
# 读取类别置信度 |
|||
confidence = data[4] |
|||
# 用阈值进行过滤 |
|||
if confidence > conf_thres: |
|||
# 读取类别索引 |
|||
label = int(data[5]) |
|||
# 读取类坐标值,把坐标还原到原始图像 |
|||
xmin = int((data[0] - int(round(dw - 0.1))) / ratio[0]) |
|||
ymin = int((data[1] - int(round(dh - 0.1))) / ratio[1]) |
|||
xmax = int((data[2] - int(round(dw + 0.1))) / ratio[0]) |
|||
ymax = int((data[3] - int(round(dh + 0.1))) / ratio[1]) |
|||
result.append([xmin, ymin, xmax, ymax, confidence, label]) |
|||
return result |
|||
|
|||
|
|||
def draw_bbox_old(bbox, img0, color, wt): |
|||
det_result_str = '' |
|||
for idx, class_id in enumerate(bbox[:, 5]): |
|||
if float(bbox[idx][4] < float(0.05)): |
|||
continue |
|||
img0 = cv2.rectangle(img0, (int(bbox[idx][0]), int(bbox[idx][1])), (int(bbox[idx][2]), int(bbox[idx][3])), color, wt) |
|||
img0 = cv2.putText(img0, str(idx) + ' ' + CLASSES[int(class_id)], (int(bbox[idx][0]), int(bbox[idx][1] + 16)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1) |
|||
img0 = cv2.putText(img0, '{:.4f}'.format(bbox[idx][4]), (int(bbox[idx][0]), int(bbox[idx][1] + 32)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1) |
|||
det_result_str += '{} {} {} {} {} {}\n'.format(CLASSES[bbox[idx][5]], str(bbox[idx][4]), bbox[idx][0], bbox[idx][1], bbox[idx][2], bbox[idx][3]) |
|||
return img0 |
|||
|
|||
def draw_box(img, |
|||
box, # [xmin, ymin, xmax, ymax] |
|||
score, |
|||
class_id): |
|||
'''Draws a bounding box on the image''' |
|||
|
|||
# Retrieve the color for the class ID |
|||
color_palette = np.random.uniform(0, 255, size=(len(CLASSES), 3)) |
|||
color = color_palette[class_id] |
|||
|
|||
# Draw the bounding box on the image |
|||
cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), color, 2) |
|||
|
|||
# Create the label text with class name and score |
|||
label = f'{CLASSES[class_id]}: {score:.2f}' |
|||
|
|||
# Calculate the dimensions of the label text |
|||
(label_width, label_height), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1) |
|||
|
|||
# Calculate the position of the label text |
|||
label_x = box[0] |
|||
label_y = box[1] - 10 if box[1] - 10 > label_height else box[1] + 10 |
|||
|
|||
# Draw a filled rectangle as the background for the label text |
|||
cv2.rectangle( |
|||
img, |
|||
(int(label_x), int(label_y - label_height)), |
|||
(int(label_x + label_width), int(label_y + label_height)), |
|||
color, |
|||
cv2.FILLED, |
|||
) |
|||
|
|||
# Draw the label text on the image |
|||
cv2.putText(img, label, (int(label_x), int(label_y)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA) |
|||
|
|||
return img |
Loading…
Reference in new issue