Browse Source

完成在ACL板基本功能的开发,但网页显示画面延迟很大。板卡端使用pymemcache 保存session

master
张龙 10 months ago
parent
commit
2afdb42e92
  1. 2
      .idea/FristProject.iml
  2. 15
      .idea/deployment.xml
  3. 2
      .idea/misc.xml
  4. 2
      config.yaml
  5. 240
      core/ModelManager.py
  6. 272
      core/Test.py
  7. 49
      core/templates/index.html
  8. 179
      model/plugins/ModelBase.py
  9. 20
      model/plugins/RYRQ/RYRQ_Model.py
  10. 82
      model/plugins/RYRQ_ACL/RYRQ_Model_ACL.py
  11. 3
      run.py
  12. 31
      web/API/channel.py
  13. 8
      web/API/user.py
  14. 42
      web/API/viedo.py
  15. 33
      web/__init__.py
  16. 4
      web/main/routes.py
  17. 12
      web/main/templates/index.html
  18. 15
      web/main/templates/实时预览.html
  19. 1
      web/main/templates/通道管理.html
  20. BIN
      zfbox.db

2
.idea/FristProject.iml

@ -2,7 +2,7 @@
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="jdk" jdkName="Remote Python 3.9.2 (sftp://root@192.168.3.48:22/usr/local/miniconda3/bin/python)" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>

15
.idea/deployment.xml

@ -0,0 +1,15 @@
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="PublishConfigData" autoUpload="Always" serverName="root@192.168.3.48:22" remoteFilesAllowedToDisappearOnAutoupload="false">
<serverData>
<paths name="root@192.168.3.48:22">
<serverdata>
<mappings>
<mapping deploy="/mnt/zfbox" local="$PROJECT_DIR$" />
</mappings>
</serverdata>
</paths>
</serverData>
<option name="myAutoUpload" value="ALWAYS" />
</component>
</project>

2
.idea/misc.xml

@ -3,7 +3,7 @@
<component name="Black">
<option name="sdkName" value="PyTorch" />
</component>
<component name="ProjectRootManager" version="2" project-jdk-name="PyTorch" project-jdk-type="Python SDK" />
<component name="ProjectRootManager" version="2" project-jdk-name="Remote Python 3.9.2 (sftp://root@192.168.3.48:22/usr/local/miniconda3/bin/python)" project-jdk-type="Python SDK" />
<component name="PyCharmProfessionalAdvertiser">
<option name="shown" value="true" />
</component>

2
config.yaml

@ -28,7 +28,7 @@ ALLOWED_EXTENSIONS : {'zip'}
RTSP_Check_Time : 600 #10分钟
#model
model_platform : cpu #acl gpu
model_platform : acl #acl gpu cpu
weight_path: /model/weights
yolov5_path: D:/Project/FristProject/model/base_model/yolov5 #使用绝对路径,不同的部署环境需要修改!
cap_sleep_time: 300 #5分钟

240
core/ModelManager.py

@ -7,34 +7,35 @@ import numpy as np
import threading
import importlib.util
import datetime
import queue
from collections import deque
from core.DBManager import mDBM,DBManager
from myutils.MyLogger_logger import LogHandler
from myutils.ConfigManager import myCongif
from model.plugins.ModelBase import ModelBase
import sys
import acl
from PIL import Image
ACL_MEM_MALLOC_HUGE_FIRST = 0
ACL_MEMCPY_HOST_TO_DEVICE = 1
ACL_MEMCPY_DEVICE_TO_HOST = 2
class VideoCaptureWithFPS:
'''视频捕获的封装类,是一个通道一个'''
def __init__(self, source=0):
def __init__(self, source):
self.source = source
self.cap = cv2.VideoCapture(self.source)
self.width = None
self.height = None
if self.cap.isOpened(): #若没有打开成功,在读取画面的时候,已有判断和处理
self.cap = cv2.VideoCapture(self.source)
if self.cap.isOpened(): #若没有打开成功,在读取画面的时候,已有判断和处理 -- 这里也要检查下内存的释放情况
self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
#self.fps = fps # 线程保持最大帧率的刷新画面---过高的帧率会影响CPU性能,但过地的帧率会造成帧积压
self.fps = self.cap.get(cv2.CAP_PROP_FPS) + 20 #只是个参考值,休眠时间要比帧率快点,由于read也需要耗时。
#print(self.fps)
self.frame = None
self.running = True
self.read_lock = threading.Lock()
self.frame_queue = queue.Queue(maxsize=1)
#self.frame = None
#self.read_lock = threading.Lock()
self.thread = threading.Thread(target=self.update)
self.thread.start()
@ -42,15 +43,24 @@ class VideoCaptureWithFPS:
while self.running:
ret, frame = self.cap.read()
if not ret:
#time.sleep(1.0 / self.fps)
continue
with self.read_lock:
self.frame = frame
time.sleep(1.0 / self.fps)
resized_frame = cv2.resize(frame, (int(self.width / 2), int(self.height / 2)))
# with self.read_lock:
# self.frame = resized_frame
if not self.frame_queue.full():
self.frame_queue.put(resized_frame)
time.sleep(1.0 / self.fps) #按照视频源的帧率进行休眠
#print("Frame updated at:",time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
def read(self):
with self.read_lock:
frame = self.frame
return frame is not None, frame
# with self.read_lock:
# frame = self.frame.copy() if self.frame is not None else None
if not self.frame_queue.empty():
return True, self.frame_queue.get()
else:
return False, None
def release(self):
self.running = False
@ -65,28 +75,21 @@ class ModelManager:
self.logger = LogHandler().get_logger("ModelManager")
# 本地YOLOv5仓库路径
self.yolov5_path = myCongif.get_data("yolov5_path")
self.buflen = myCongif.get_data("buffer_len")
#self.buflen = myCongif.get_data("buffer_len")
self.icout_max = myCongif.get_data("RESET_INTERVAL") #跟视频帧序用一个变量
self.frame_rate = myCongif.get_data("frame_rate")
self.frame_interval = 1.0 / int(myCongif.get_data("verify_rate"))
#保存视频相关内容
self.FPS = myCongif.get_data("verify_rate") # 视频帧率--是否能实现动态帧率
self.fourcc = cv2.VideoWriter_fourcc(*'mp4v') # 使用 mp4 编码
#基于模型运行环境进行相应初始化工作
if myCongif.get_data("model_platform") == "acl":
self._init_acl()
# self.model_platform = myCongif.get_data("model_platform")
# if self.model_platform == "acl":
# self._init_acl()
def __del__(self):
self.logger.debug("释放资源")
def _init_acl(self):
'''acl初始化函数'''
self.device_id = 0
#step1 初始化
ret = acl.init()
def _open_view(self,url,itype): #打开摄像头 0--USB摄像头,1-RTSP,2-海康SDK
if itype == 0:
cap = VideoCaptureWithFPS(int(url))
@ -96,8 +99,14 @@ class ModelManager:
raise Exception("视频参数错误!")
return cap
def _import_model(self,model_name,model_path):
'''根据路径,动态导入模块'''
def _import_model(self,model_name,model_path,threshold):
'''
根据路径动态导入模块
:param model_name: 模块名称
:param model_path: 模块路径
:param threshold: 置信阈值
:return:
'''
if os.path.exists(model_path):
module_spec = importlib.util.spec_from_file_location(model_name, model_path)
if module_spec is None:
@ -105,7 +114,7 @@ class ModelManager:
return None
module = importlib.util.module_from_spec(module_spec)
module_spec.loader.exec_module(module)
md = getattr(module, "Model")(model_path)
md = getattr(module, "Model")(model_path,threshold) #实例化类
if not isinstance(md, ModelBase):
self.logger.error("{} not zf_model".format(md))
return None
@ -160,36 +169,38 @@ class ModelManager:
def verify(self,frame,myModle_list,myModle_data,channel_id,schedule_list,result_list,isdraw=1):
'''验证执行主函数,实现遍历通道关联的模型,调用对应模型执行验证,模型文件遍历执行'''
img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
#img = np.ascontiguousarray(img, dtype=np.float32) / 255.0 # 转换为内存连续存储的数组 --该函数可以待定下是不是所有模型都可以做
# img = frame.to_ndarray(format="bgr24")
#img = frame
# 使用 模型 进行目标检测
i_warn_count = 0 #报警标签
isverify = False
for i in range(len(myModle_list)): # 遍历通道关联的算法进行检测,若不控制模型数量,有可能需要考虑多线程执行。
model = myModle_list[i]
data = myModle_data[i]
schedule = schedule_list[i]
result = result_list[i]
#验证检测计划,是否在布防时间内
now = datetime.datetime.now() # 获取当前日期和时间
weekday = now.weekday() # 获取星期几,星期一是0,星期天是6
hour = now.hour
result.pop(0) # 保障结果数组定长 --先把最早的结果推出数组
if schedule[weekday][hour] == 1: #不在计划则不进行验证,直接返回图片
# 调用模型,进行检测,model是动态加载的,具体的判断标准由模型内执行 ---- *********
isverify = True
detections, bwarn, warntext = model.verify(img, data,isdraw) #****************重要
# 对识别结果要部要进行处理
if bwarn: # 整个识别有产生报警
#根据模型设定的时间和占比判断是否
# 绘制报警文本
cv2.putText(img, 'Intruder detected!', (50, (i_warn_count + 1) * 50),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
i_warn_count += 1
result.append(1) #要验证数组修改,是地址修改吗?
else: #没有产生报警也需要记录,统一计算占比
result.append(0)
else:
result.append(0)
# for i in range(len(myModle_list)): # 遍历通道关联的算法进行检测,若不控制模型数量,有可能需要考虑多线程执行。
# model = myModle_list[i]
# data = myModle_data[i]
# schedule = schedule_list[i]
# result = result_list[i]
# #验证检测计划,是否在布防时间内
# now = datetime.datetime.now() # 获取当前日期和时间
# weekday = now.weekday() # 获取星期几,星期一是0,星期天是6
# hour = now.hour
# result.pop(0) # 保障结果数组定长 --先把最早的结果推出数组
# if schedule[weekday][hour] == 1: #不在计划则不进行验证,直接返回图片
# # 调用模型,进行检测,model是动态加载的,具体的判断标准由模型内执行 ---- *********
# isverify = True
# detections, bwarn, warntext = model.verify(img, data,isdraw) #****************重要
# # 对识别结果要部要进行处理
# if bwarn: # 整个识别有产生报警
# #根据模型设定的时间和占比判断是否
# # 绘制报警文本
# cv2.putText(img, 'Intruder detected!', (50, (i_warn_count + 1) * 50),
# cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
# i_warn_count += 1
# result.append(1) #要验证数组修改,是地址修改吗?
# else: #没有产生报警也需要记录,统一计算占比
# result.append(0)
# else:
# result.append(0)
if not isverify: #没做处理,直接返回的,需要控制下帧率,太快读取没有意义。
time.sleep(1.0/self.frame_rate) #给个默认帧率,不超过30帧,---若经过模型计算,CPU下单模型也就12帧这样
@ -210,14 +221,14 @@ class ModelManager:
def dowork_thread(self,channel_id):
'''一个通道一个线程,关联的模型在一个线程检测,局部变量都是一个通道独有'''
channel_data = self.verify_list[channel_id] #一通道一线程 [url,type,True,img_buffer,img,icount]
cap = None
view_buffer_deque = deque(maxlen=myCongif.get_data("buffer_len"))
#查询关联的模型 --- 在循环运行前把基础数据都准备好
myDBM = DBManager()
myDBM.connect()
strsql = (f"select t1.model_id,t1.check_area,t1.polygon ,t2.duration_time,t2.proportion,t2.model_path,t1.ID,"
f"t2.model_name "
f"t2.model_name,t1.conf_threshold "
f"from channel2model t1 left join model t2 on t1.model_id = t2.ID where t1.channel_id ={channel_id};")
#print(strsql)
myModels = myDBM.do_select(strsql)
#加载模型 --- 是不是要做个限制,一个视频通道关联算法模块的上限 --- 关联多了一个线程执行耗时较多,造成帧率太低,或者再多线程并发 #?
@ -225,55 +236,50 @@ class ModelManager:
myModle_data = [] #存放检测参数 一个模型一个
schedule_list = [] #布防策略 -一个模型一个
result_list = [] #检测结果记录 -一个模型一个
proportion_lsit = []#占比设定 -一个模型一个
proportion_list = []#占比设定 -一个模型一个
warn_save_count = []#没个模型触发报警后,保存录像的最新帧序号 -一个模型一个
threshold_list = [] #模型针对该通道的一个置信阈值, -个模型一个
view_buffer = []
ibuffer_count = 0
last_img = None
#获取视频通道的模型相关数据-list
for model in myModels:
#基于基类实例化模块类
m = self._import_model("",model[5]) #动态加载模型
m = self._import_model("",model[5],model[8]) #动态加载模型处理文件py --需要验证模型文件是否能加载
if m:
myModle_list.append(m)
myModle_list.append(m) #没有成功加载的模型原画输出
myModle_data.append(model)
#model[6] -- c2m_id --把通道对于模型的 0-周一,6-周日
schedule_list.append(self.getschedule(model[6],myDBM))
result = [0 for _ in range(model[3] * myCongif.get_data("verify_rate"))] #初始化时间*验证帧率数量的结果list
result_list.append(result)
proportion_lsit.append(model[4])
proportion_list.append(model[4]) #判断是否报警的占比
warn_save_count.append(0) #保存录像的最新帧初始化为0
# if not myModle_list: #没有成功加载的模型原画输出
# self.logger.info(f"视频通道:{channel_id},未配置算法模块,结束线程!")
# return #可以不结束,直接返回未处理画面显示。
#开始循环检测
#print(mydata[0],mydata[1],mydata[2],mydata[3]) # url type tag img_buffer
#[url,type,True,img_buffer]
iread_count =0
last_frame_time = time.time()
#保存视频录像使用 -- 放在循环外面,可以减少点消耗
FPS = myCongif.get_data("verify_rate") # 视频帧率
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # 使用 mp4 编码
#开始拉取画面循环检测
cap = None
iread_count =0 #失败读取的次数
last_frame_time = time.time() #初始化个读帧时间
while channel_data[2]: #基于tag 作为运行标识。 线程里只是读,住线程更新,最多晚一轮,应该不用线程锁。需验证
# 需要控制帧率
# 帧率控制帧率
current_time = time.time()
elapsed_time = current_time - last_frame_time
if elapsed_time < self.frame_interval:
time.sleep(self.frame_interval - elapsed_time) #若小于间隔时间则休眠
last_frame_time = time.time()
#*********取画面*************
if not cap: #还没连接视频源
if not cap: #第一次需要打开视频流
try:
cap = self._open_view(channel_data[0],channel_data[1])
cap = self._open_view(channel_data[0],channel_data[1]) #创建子线程读画面
iread_count = 0
except:
self.logger.error("参数错误,终止线程")
self.logger.error("打开视频参数错误,终止线程!")
return
ret,frame = cap.read()
if not ret:
if iread_count > 60:
if iread_count > 30:
self.logger.warning(f"通道-{channel_id}:view disconnected. Reconnecting...")
cap.release()
cap = None
@ -283,25 +289,23 @@ class ModelManager:
time.sleep(1.0/20) #20帧只是作为个默认参考值,一般验证帧率都比这个慢
continue #没读到画面继续
iread_count = 0 #重置下视频帧计算
last_frame_time = time.time()
#执行图片推理 -- 如何没有模型或不在工作时间,返回的是原画,要不要控制下帧率? -- 在verify中做了sleep
new_frame,img = self.verify(frame,myModle_list,myModle_data,channel_id,schedule_list,result_list)
#分析图片放入内存中
channel_data[3].append(img)
channel_data[3].append(img) # 缓冲区大小由maxlen控制 超上限后,删除最前的数据
channel_data[5] += 1 #帧序列加一
#print(self.verify_list[channel_id][5])
if channel_data[5] > self.icout_max:
channel_data = 0
if len(channel_data[3]) > self.buflen: # 保持缓冲区大小---缓冲区可以用来保持录像
channel_data[3].pop(0)
#self.logger.debug("drop one frame!")
channel_data[4] = new_frame #一直更新最新帧
channel_data[4] = new_frame #一直更新最新帧**********
#print(f"{channel_id}--Frame updated at:",time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
#验证result_list -是否触发报警要求
for i in range(len(result_list)):
result = result_list[i]
proportion = proportion_lsit[i]
proportion = proportion_list[i]
count_one = float(sum(result)) #1,0 把1累加的和就是1的数量
ratio_of_ones = count_one / len(result)
@ -311,7 +315,7 @@ class ModelManager:
w_s_count = warn_save_count[i]
buffer_count = channel_data[5]
self.save_warn(model_name,w_s_count,buffer_count,channel_data[3],cap.width,cap.height,
channel_id,myDBM,FPS,fourcc)
channel_id,myDBM,self.FPS,self.fourcc)
self.send_warn()
#更新帧序列号
warn_save_count[i] = buffer_count
@ -327,7 +331,7 @@ class ModelManager:
# break
#结束线程
cap.release()
cv2.destroyAllWindows()
#cv2.destroyAllWindows()
def save_warn(self,model_name,w_s_count,buffer_count,buffer,width,height,channnel_id,myDBM,FPS,fourcc):
'''
@ -372,7 +376,6 @@ class ModelManager:
ret = myDBM.do_sql(strsql)
return ret
def send_warn(self):
'''发送报警信息'''
pass
@ -392,7 +395,7 @@ class ModelManager:
strsql = f"select id,ulr,type from channel where is_work = 1 and id = {channel_id};" #单通道启动检测线程
datas = mDBM.do_select(strsql)
for data in datas:
img_buffer = []
img_buffer = deque(maxlen=myCongif.get_data("buffer_len")) #创建个定长的视频buffer
img = None
icout = 0 #跟img_buffer对应,记录进入缓冲区的帧序列号
run_data = [data[1],data[2],True,img_buffer,img,icout]
@ -400,7 +403,6 @@ class ModelManager:
th_chn = threading.Thread(target=self.dowork_thread, args=(data[0],)) #一个视频通道一个线程,线程句柄暂时部保留
th_chn.start()
def stop_work(self,channel_id=0):
'''停止工作线程,0-停止所有,非0停止对应通道ID的线程'''
if channel_id ==0: #所有线程停止
@ -418,38 +420,36 @@ class ModelManager:
#print(f"Current working directory (ModelManager.py): {os.getcwd()}")
mMM = ModelManager()
def test():
buffer = [np.zeros((480, 640, 3), dtype=np.uint8) for _ in range(60)] # 示例帧列表
FRAME_WIDTH = 640 # 根据你的图像尺寸设置
FRAME_HEIGHT = 480 # 根据你的图像尺寸设置
FPS = myCongif.get_data("verify_rate") # 你的视频帧率
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # 使用 mp4 编码
now = datetime.datetime.now() # 获取当前日期和时间
current_time_str = now.strftime("%Y-%m-%d_%H-%M-%S")
filename = f"{2}_{current_time_str}.mp4"
#filename = "saved_video.mp4"
print(filename)
# 保存视频
video_writer = cv2.VideoWriter(filename, fourcc, FPS, (FRAME_WIDTH, FRAME_HEIGHT))
if not video_writer.isOpened():
print(f"Failed to open video writer for filename")
return False
for frame in buffer:
video_writer.write(frame)
video_writer.release()
# 保存图片
ret = cv2.imwrite("saved_frame.jpg", buffer[-1])
if ret:
print("保存图片成功")
def test1():
print(cv2.getBuildInformation())
source = 'rtsp://192.168.3.44/live1'
gstreamer_pipeline = (
f"rtspsrc location={source} protocols=udp latency=0 ! "
"rtph264depay ! h264parse ! avdec_h264 ! videoconvert ! appsink"
)
cap = cv2.VideoCapture(gstreamer_pipeline, cv2.CAP_GSTREAMER)
if not cap.isOpened():
print("Error: Unable to open the video source.")
return
else:
print("保存图片失败")
return False
print("Successfully opened the video source.")
ret, frame = cap.read()
if ret:
cv2.imshow('Frame', frame)
cv2.waitKey(0)
cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
#mMM.start_work()
# model = ModelManager()._import_model("", "../model/plugins/RYRQ/RYRQ_Model_ACL.py")
# model.testRun()
test()
test1()
print("111")
# name = acl.get_soc_name()
# count, ret = acl.rt.get_device_count()
# print(name,count)

272
core/Test.py

@ -1,162 +1,120 @@
# 导入代码依赖
import cv2
# import os
# from model.plugins.ModelBase import ModelBase
# import importlib.util
import threading
import time
import numpy as np
import ipywidgets as widgets
from IPython.display import display
import torch
from skvideo.io import vreader, FFmpegWriter
import IPython.display
from ais_bench.infer.interface import InferSession
from det_utils import letterbox, scale_coords, nms
def preprocess_image(image, cfg, bgr2rgb=True):
"""图片预处理"""
img, scale_ratio, pad_size = letterbox(image, new_shape=cfg['input_shape'])
if bgr2rgb:
img = img[:, :, ::-1]
img = img.transpose(2, 0, 1) # HWC2CHW
img = np.ascontiguousarray(img, dtype=np.float32)
return img, scale_ratio, pad_size
def draw_bbox(bbox, img0, color, wt, names):
"""在图片上画预测框"""
det_result_str = ''
for idx, class_id in enumerate(bbox[:, 5]):
if float(bbox[idx][4] < float(0.05)):
continue
img0 = cv2.rectangle(img0, (int(bbox[idx][0]), int(bbox[idx][1])), (int(bbox[idx][2]), int(bbox[idx][3])),
color, wt)
img0 = cv2.putText(img0, str(idx) + ' ' + names[int(class_id)], (int(bbox[idx][0]), int(bbox[idx][1] + 16)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
img0 = cv2.putText(img0, '{:.4f}'.format(bbox[idx][4]), (int(bbox[idx][0]), int(bbox[idx][1] + 32)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
det_result_str += '{} {} {} {} {} {}\n'.format(
names[bbox[idx][5]], str(bbox[idx][4]), bbox[idx][0], bbox[idx][1], bbox[idx][2], bbox[idx][3])
return img0
def get_labels_from_txt(path):
"""从txt文件获取图片标签"""
labels_dict = dict()
with open(path) as f:
for cat_id, label in enumerate(f.readlines()):
labels_dict[cat_id] = label.strip()
return labels_dict
def draw_prediction(pred, image, labels):
"""在图片上画出预测框并进行可视化展示"""
imgbox = widgets.Image(format='jpg', height=720, width=1280)
img_dw = draw_bbox(pred, image, (0, 255, 0), 2, labels)
imgbox.value = cv2.imencode('.jpg', img_dw)[1].tobytes()
display(imgbox)
def infer_image(img_path, model, class_names, cfg):
"""图片推理"""
# 图片载入
image = cv2.imread(img_path)
# 数据预处理
img, scale_ratio, pad_size = preprocess_image(image, cfg)
# 模型推理
output = model.infer([img])[0]
output = torch.tensor(output)
# 非极大值抑制后处理
boxout = nms(output, conf_thres=cfg["conf_thres"], iou_thres=cfg["iou_thres"])
pred_all = boxout[0].numpy()
# 预测坐标转换
scale_coords(cfg['input_shape'], pred_all[:, :4], image.shape, ratio_pad=(scale_ratio, pad_size))
# 图片预测结果可视化
draw_prediction(pred_all, image, class_names)
def infer_frame_with_vis(image, model, labels_dict, cfg, bgr2rgb=True):
# 数据预处理
img, scale_ratio, pad_size = preprocess_image(image, cfg, bgr2rgb)
# 模型推理
output = model.infer([img])[0]
output = torch.tensor(output)
# 非极大值抑制后处理
boxout = nms(output, conf_thres=cfg["conf_thres"], iou_thres=cfg["iou_thres"])
pred_all = boxout[0].numpy()
# 预测坐标转换
scale_coords(cfg['input_shape'], pred_all[:, :4], image.shape, ratio_pad=(scale_ratio, pad_size))
# 图片预测结果可视化
img_vis = draw_bbox(pred_all, image, (0, 255, 0), 2, labels_dict)
return img_vis
def img2bytes(image):
"""将图片转换为字节码"""
return bytes(cv2.imencode('.jpg', image)[1])
def infer_video(video_path, model, labels_dict, cfg):
"""视频推理"""
image_widget = widgets.Image(format='jpeg', width=800, height=600)
display(image_widget)
# 读入视频
cap = cv2.VideoCapture(video_path)
while True:
ret, img_frame = cap.read()
from quart import Quart, render_template, websocket, jsonify
from quart.helpers import Response
# def test_acl_verify():
# '''根据路径,动态导入模块'''
# model_path = "/mnt/zfbox/model/plugins/RYRQ_ACL/RYRQ_Model_ACL.py"
# model_name = "测试"
# if os.path.exists(model_path):
# module_spec = importlib.util.spec_from_file_location(model_name, model_path)
# if module_spec is None:
# print(f"{model_path} 加载错误")
# return None
# module = importlib.util.module_from_spec(module_spec)
# module_spec.loader.exec_module(module)
# md = getattr(module, "Model")(model_path,0.5) # 实例化类
# if not isinstance(md, ModelBase) or not md.init_ok:
# print("{} not zf_model".format(md))
# return None
# else:
# print("{}文件不存在".format(model_path))
# return None
# print("开执行检测")
# #return md
# img = cv2.imread("/mnt/zfbox/model/plugins/RYRQ_ACL/world_cup.jpg", cv2.IMREAD_COLOR) # 读入图片
# data = [0,1,"[(100, 100), (50, 200), (100, 300), (300, 300), (300, 100)]"]
# boxout,ret,strret = md.verify(img,data)
# #print(boxout)
# #释放相关资源
# md.release()
# 加载YOLOv5模型
#model = yolov5.load('yolov5s')
app = Quart(__name__)
class VideoCaptureWithYOLO:
def __init__(self, source):
self.source = source
self.cap = cv2.VideoCapture(self._get_gstreamer_pipeline(), cv2.CAP_GSTREAMER)
self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.frame = None
self.running = True
self.read_lock = threading.Lock()
self.thread = threading.Thread(target=self.update)
self.thread.start()
def _get_gstreamer_pipeline(self):
return (
f"rtspsrc location={self.source} protocols=udp latency=0 ! "
"rtph264depay ! h264parse ! avdec_h264 ! videoconvert ! appsink"
)
def update(self):
while self.running:
ret, frame = self.cap.read()
if not ret:
break
# 对视频帧进行推理
image_pred = infer_frame_with_vis(img_frame, model, labels_dict, cfg, bgr2rgb=True)
image_widget.value = img2bytes(image_pred)
def infer_camera(model, labels_dict, cfg):
"""外设摄像头实时推理"""
def find_camera_index():
max_index_to_check = 10 # Maximum index to check for camera
for index in range(max_index_to_check):
cap = cv2.VideoCapture(index)
if cap.read()[0]:
cap.release()
return index
# If no camera is found
raise ValueError("No camera found.")
# 获取摄像头
camera_index = find_camera_index()
cap = cv2.VideoCapture(camera_index)
# 初始化可视化对象
image_widget = widgets.Image(format='jpeg', width=1280, height=720)
display(image_widget)
continue
# YOLOv5分析
annotated_frame = frame
with self.read_lock:
self.frame = annotated_frame
time.sleep(0.01) # 控制帧率
def read(self):
with self.read_lock:
frame = self.frame.copy() if self.frame is not None else None
return frame
def release(self):
self.running = False
self.thread.join()
self.cap.release()
# 创建多个视频流
streams = {
'stream1': VideoCaptureWithYOLO('rtsp://192.168.3.44/live1'),
'stream2': VideoCaptureWithYOLO('rtsp://192.168.3.44/live1'),
'stream3': VideoCaptureWithYOLO('rtsp://192.168.3.44/live1'),
'stream4': VideoCaptureWithYOLO('rtsp://192.168.3.44/live1'),
'stream5': VideoCaptureWithYOLO('rtsp://192.168.3.44/live1'),
'stream6': VideoCaptureWithYOLO('rtsp://192.168.3.44/live1'),
'stream7': VideoCaptureWithYOLO('rtsp://192.168.3.44/live1'),
'stream8': VideoCaptureWithYOLO('rtsp://192.168.3.44/live1'),
'stream9': VideoCaptureWithYOLO('rtsp://192.168.3.44/live1')
}
@app.route('/')
async def index():
return await render_template('index.html', streams=streams.keys())
async def generate_frames(stream_id):
video_capture = streams[stream_id]
while True:
# 对摄像头每一帧进行推理和可视化
_, img_frame = cap.read()
image_pred = infer_frame_with_vis(img_frame, model, labels_dict, cfg)
image_widget.value = img2bytes(image_pred)
if __name__ == "__main__":
cfg = {
'conf_thres': 0.4, # 模型置信度阈值,阈值越低,得到的预测框越多
'iou_thres': 0.5, # IOU阈值,高于这个阈值的重叠预测框会被过滤掉
'input_shape': [640, 640], # 模型输入尺寸
}
model_path = 'yolo.om'
label_path = './coco_names.txt'
# 初始化推理模型
model = InferSession(0, model_path)
labels_dict = get_labels_from_txt(label_path)
infer_mode = 'video'
if infer_mode == 'image':
img_path = 'world_cup.jpg'
infer_image(img_path, model, labels_dict, cfg)
elif infer_mode == 'camera':
infer_camera(model, labels_dict, cfg)
elif infer_mode == 'video':
video_path = 'racing.mp4'
infer_video(video_path, model, labels_dict, cfg)
frame = video_capture.read()
if frame is not None:
_, buffer = cv2.imencode('.jpg', frame)
frame = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route('/video_feed/<stream_id>')
async def video_feed(stream_id):
return Response(generate_frames(stream_id),
mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)

49
core/templates/index.html

@ -0,0 +1,49 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>YOLOv5 Video Streams</title>
<style>
body {
display: flex;
justify-content: center;
align-items: center;
height: 100vh;
margin: 0;
background-color: #f0f0f0;
}
.grid-container {
display: grid;
grid-template-columns: repeat(3, 1fr);
grid-template-rows: repeat(3, 1fr);
gap: 10px;
width: 90vw;
height: 90vh;
}
.grid-item {
position: relative;
width: 100%;
padding-top: 75%; /* 4:3 aspect ratio */
}
.grid-item img {
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
object-fit: cover;
border-radius: 8px;
}
</style>
</head>
<body>
<div class="grid-container">
{% for stream_id in streams %}
<div class="grid-item">
<img src="{{ url_for('video_feed', stream_id=stream_id) }}" alt="Stream {{ stream_id }}">
</div>
{% endfor %}
</div>
</body>
</html>

179
model/plugins/ModelBase.py

@ -1,21 +1,35 @@
from abc import abstractmethod,ABC
from shapely.geometry import Point, Polygon
from myutils.ConfigManager import myCongif
import numpy as np
import cv2
import ast
import platform
import acl
#-----acl相关------
SUCCESS = 0 # 成功状态值
FAILED = 1 # 失败状态值
ACL_MEM_MALLOC_NORMAL_ONLY = 2 # 申请内存策略, 仅申请普通页
class ModelBase(ABC):
def __init__(self):
def __init__(self,path):
'''
模型类实例化
:param path: 模型文件本身的路径
:param threshold: 模型的置信阈值
'''
self.name = None #基于name来查询,用户对模型的配置参数,代表着模型名称需要唯一 2024-6-18 -逻辑还需要完善和验证
self.version = None
self.model_type = None # 模型类型 1-图像分类,2-目标检测(yolov5),3-分割模型,4-关键点
self.system = platform.system() #获取系统平台
self.system = myCongif.get_data("model_platform") #platform.system() #获取系统平台
self.do_map = { # 定义插件的入口函数 --
# POCType.POC: self.do_verify,
# POCType.SNIFFER: self.do_sniffer,
# POCType.BRUTE: self.do_brute
}
self.model_path = path # 模型路径
def __del__(self):
print("资源释放")
@ -33,6 +47,167 @@ class ModelBase(ABC):
else:
return False
#acl ----- 相关-----
def _init_acl(self):
'''acl初始化函数'''
self.device_id = 0
#step1 初始化
ret = acl.init()
ret = acl.rt.set_device(self.device_id) # 指定运算的Device
if ret:
raise RuntimeError(ret)
self.context, ret = acl.rt.create_context(self.device_id) # 显式创建一个Context
if ret:
raise RuntimeError(ret)
print('Init ACL Successfully')
def _del_acl(self):
'''acl去初始化'''
ret = acl.rt.destroy_context(self.context) # 释放 Context
if ret:
raise RuntimeError(ret)
ret = acl.rt.reset_device(self.device_id) # 释放Device
if ret:
raise RuntimeError(ret)
ret = acl.finalize() # 去初始化
if ret:
raise RuntimeError(ret)
print('Deinit ACL Successfully')
def _init_resource(self):
''' 初始化模型、输出相关资源。相关数据类型: aclmdlDesc aclDataBuffer aclmdlDataset'''
print("Init model resource")
# 加载模型文件
#self.model_path = "/home/HwHiAiUser/samples/yolo_acl_sample/yolov5s_bs1.om"
self.model_id, ret = acl.mdl.load_from_file(self.model_path) # 加载模型
if ret != 0:
print(f"{self.model_path}---模型加载失败!")
return False
self.model_desc = acl.mdl.create_desc() # 初始化模型信息对象
ret = acl.mdl.get_desc(self.model_desc, self.model_id) # 根据模型获取描述信息
print("[Model] Model init resource stage success")
# 创建模型输出 dataset 结构
self._gen_output_dataset() # 创建模型输出dataset结构
return True
def _gen_output_dataset(self):
''' 组织输出数据的dataset结构 '''
ret = SUCCESS
self._output_num = acl.mdl.get_num_outputs(self.model_desc) # 获取模型输出个数
self.output_dataset = acl.mdl.create_dataset() # 创建输出dataset结构
for i in range(self._output_num):
temp_buffer_size = acl.mdl.get_output_size_by_index(self.model_desc, i) # 获取模型输出个数
temp_buffer, ret = acl.rt.malloc(temp_buffer_size, ACL_MEM_MALLOC_NORMAL_ONLY) # 为每个输出申请device内存
dataset_buffer = acl.create_data_buffer(temp_buffer,
temp_buffer_size) # 创建输出的data buffer结构,将申请的内存填入data buffer
_, ret = acl.mdl.add_dataset_buffer(self.output_dataset, dataset_buffer) # 将 data buffer 加入输出dataset
if ret == FAILED:
self._release_dataset(self.output_dataset) # 失败时释放dataset
print("[Model] create model output dataset success")
def _gen_input_dataset(self, input_list):
''' 组织输入数据的dataset结构 '''
ret = SUCCESS
self._input_num = acl.mdl.get_num_inputs(self.model_desc) # 获取模型输入个数
self.input_dataset = acl.mdl.create_dataset() # 创建输入dataset结构
for i in range(self._input_num):
item = input_list[i] # 获取第 i 个输入数据
data_ptr = acl.util.bytes_to_ptr(item.tobytes()) # 获取输入数据字节流
size = item.size * item.itemsize # 获取输入数据字节数
dataset_buffer = acl.create_data_buffer(data_ptr, size) # 创建输入dataset buffer结构, 填入输入数据
_, ret = acl.mdl.add_dataset_buffer(self.input_dataset, dataset_buffer) # 将dataset buffer加入dataset
if ret == FAILED:
self._release_dataset(self.input_dataset) # 失败时释放dataset
print("[Model] create model input dataset success")
def _unpack_bytes_array(self, byte_array, shape, datatype):
''' 将内存不同类型的数据解码为numpy数组 '''
np_type = None
# 获取输出数据类型对应的numpy数组类型和解码标记
if datatype == 0: # ACL_FLOAT
np_type = np.float32
elif datatype == 1: # ACL_FLOAT16
np_type = np.float16
elif datatype == 3: # ACL_INT32
np_type = np.int32
elif datatype == 8: # ACL_UINT32
np_type = np.uint32
else:
print("unsurpport datatype ", datatype)
return
# 将解码后的数据组织为numpy数组,并设置shape和类型
return np.frombuffer(byte_array, dtype=np_type).reshape(shape)
def _output_dataset_to_numpy(self):
''' 将模型输出解码为numpy数组 '''
dataset = []
# 遍历每个输出
for i in range(self._output_num):
buffer = acl.mdl.get_dataset_buffer(self.output_dataset, i) # 从输出dataset中获取buffer
data_ptr = acl.get_data_buffer_addr(buffer) # 获取输出数据内存地址
size = acl.get_data_buffer_size(buffer) # 获取输出数据字节数
narray = acl.util.ptr_to_bytes(data_ptr, size) # 将指针转为字节流数据
# 根据模型输出的shape和数据类型,将内存数据解码为numpy数组
outret = acl.mdl.get_output_dims(self.model_desc, i)[0]
dims = outret["dims"] # 获取每个输出的维度
print(f"name:{outret['name']}")
print(f"dimCount:{outret['dimCount']}")
'''
dims = {
"name": xxx, #tensor name
"dimCount":xxx#shape中的维度个数
"dims": [xx, xx, xx] # 维度信息 --- 取的这个
}
'''
datatype = acl.mdl.get_output_data_type(self.model_desc, i) # 获取每个输出的数据类型 --就数据类型float16,int8等
output_nparray = self._unpack_bytes_array(narray, tuple(dims), datatype) # 解码为numpy数组
dataset.append(output_nparray)
return dataset
def execute(self, input_list):
'''创建输入dataset对象, 推理完成后, 将输出数据转换为numpy格式'''
self._gen_input_dataset(input_list) # 创建模型输入dataset结构
ret = acl.mdl.execute(self.model_id, self.input_dataset, self.output_dataset) # 调用离线模型的execute推理数据
out_numpy = self._output_dataset_to_numpy() # 将推理输出的二进制数据流解码为numpy数组, 数组的shape和类型与模型输出规格一致
return out_numpy
def release(self):
''' 释放模型相关资源 '''
if self._is_released:
return
print("Model start release...")
self._release_dataset(self.input_dataset) # 释放输入数据结构
self.input_dataset = None # 将输入数据置空
self._release_dataset(self.output_dataset) # 释放输出数据结构
self.output_dataset = None # 将输出数据置空
if self.model_id:
ret = acl.mdl.unload(self.model_id) # 卸载模型
if self.model_desc:
ret = acl.mdl.destroy_desc(self.model_desc) # 释放模型描述信息
self._is_released = True
print("Model release source success")
def _release_dataset(self, dataset):
''' 释放 aclmdlDataset 类型数据 '''
if not dataset:
return
num = acl.mdl.get_dataset_num_buffers(dataset) # 获取数据集包含的buffer个数
for i in range(num):
data_buf = acl.mdl.get_dataset_buffer(dataset, i) # 获取buffer指针
if data_buf:
ret = acl.destroy_data_buffer(data_buf) # 释放buffer
ret = acl.mdl.destroy_dataset(dataset) # 销毁数据集
# @abstractmethod
# def infer(self, inputs): # 保留接口, 子类必须重写
# pass
@abstractmethod

20
model/plugins/RYRQ/RYRQ_Model.py

@ -5,18 +5,22 @@ from myutils.ConfigManager import myCongif
import torch
import cv2
class Model(ModelBase):
def __init__(self,path):
super().__init__()
self.name = "人员入侵(ACL)"
self.version = "V1.0"
self.model_type = 2
#找pt模型路径 -- 一个约束py文件和模型文件的路径关系需要固定, -- 上传模型时,要解压好路径
dirpath,filename = os.path.split(path)
def __init__(self,path,threshold=0.5):
# 找pt模型路径 -- 一个约束py文件和模型文件的路径关系需要固定, -- 上传模型时,要解压好路径
dirpath, filename = os.path.split(path)
model_file = os.path.join(dirpath,"yolov5s.pt") #目前约束模型文件和py文件在同一目录
yolov5_path = myCongif.get_data("yolov5_path")
print(f"************{model_file},{yolov5_path}")
#实例化模型
super().__init__(dirpath,threshold)
self.name = "人员入侵(ACL)"
self.version = "V1.0"
self.model_type = 2
#实例化模型--实例化模型没有对失败的情况进行处理
self.init_ok = True
self.model = torch.hub.load(yolov5_path, 'custom', path=model_file, source='local')
#if model 失败,inti_ok = Flase
def verify(self,image,data,isdraw=1):

82
model/plugins/RYRQ_ACL/RYRQ_Model_ACL.py

@ -1,37 +1,76 @@
import os.path
from model.plugins.ModelBase import ModelBase
from myutils.ConfigManager import myCongif
import torch
from model.base_model.ascnedcl.det_utils import get_labels_from_txt, letterbox, scale_coords, nms, draw_bbox # 模型前后处理相关函数
import cv2
import numpy as np
import torch # 深度学习运算框架,此处主要用来处理数据
class Model(ModelBase):
def __init__(self,path):
super().__init__()
def __init__(self,path,threshold=0.5):
# 找pt模型路径 -- 一个约束py文件和模型文件的路径关系需要固定, -- 上传模型时,要解压好路径
dirpath, filename = os.path.split(path)
self.model_file = os.path.join(dirpath, "yolov5s_bs1.om") # 目前约束模型文件和py文件在同一目录
self.coco_file = os.path.join(dirpath, "coco_names.txt")
super().__init__(self.model_file) #acl环境初始化基类负责类的实例化
self.model_id = None # 模型 id
self.input_dataset = None # 输入数据结构
self.output_dataset = None # 输出数据结构
self.model_desc = None # 模型描述信息
self._input_num = 0 # 输入数据个数
self._output_num = 0 # 输出数据个数
self._output_info = [] # 输出信息列表
self._is_released = False # 资源是否被释放
self.name = "人员入侵"
self.version = "V1.0"
self.model_type = 2
#找pt模型路径 -- 一个约束py文件和模型文件的路径关系需要固定, -- 上传模型时,要解压好路径
dirpath,filename = os.path.split(path)
model_file = os.path.join(dirpath,"yolov5s.pt") #目前约束模型文件和py文件在同一目录
yolov5_path = myCongif.get_data("yolov5_path")
print(f"************{model_file},{yolov5_path}")
#实例化模型
self.model = torch.hub.load(yolov5_path, 'custom', path=model_file, source='local')
self.neth = 640 # 缩放的目标高度, 也即模型的输入高度
self.netw = 640 # 缩放的目标宽度, 也即模型的输入宽度
self.conf_threshold = threshold # 置信度阈值
self._init_acl()
self.init_ok = True
if not self._init_resource():
print("加载模型文件失败!")
self.init_ok = False
def __del__(self):
if self.init_ok:
self.release()
self._del_acl()
def verify(self,image,data,isdraw=1):
results = self.model(image) # 进行模型检测 --- 需要统一接口
detections = results.pandas().xyxy[0].to_dict(orient="records")
labels_dict = get_labels_from_txt('/mnt/zfbox/model/plugins/RYRQ_ACL/coco_names.txt') # 得到类别信息,返回序号与类别对应的字典
# 数据前处理
img, scale_ratio, pad_size = letterbox(image, new_shape=[640, 640]) # 对图像进行缩放与填充
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, HWC to CHW #图片在输入时已经做了转换
img = np.ascontiguousarray(img, dtype=np.float32) / 255.0 # 转换为内存连续存储的数组
# 模型推理, 得到模型输出
output = self.execute([img,])[0]
# 后处理 -- boxout 是 tensor-list: [tensor([[],[].[]])] --[x1,y1,x2,y2,置信度,coco_index]
boxout = nms(torch.tensor(output), conf_thres=0.4,
iou_thres=0.5) # 利用非极大值抑制处理模型输出,conf_thres 为置信度阈值,iou_thres 为iou阈值
pred_all = boxout[0].numpy() # 转换为numpy数组 -- [[],[],[]] --[x1,y1,x2,y2,置信度,coco_index]
# pred_all[:, :4] 取所有行的前4列,pred_all[:,1]--第一列
scale_coords([640, 640], pred_all[:, :4], image.shape, ratio_pad=(scale_ratio, pad_size)) # 将推理结果缩放到原始图片大小
bwarn = False
warn_text = ""
#是否有检测区域,有先绘制检测区域 由于在该函数生成了polygon对象,所有需要在检测区域前调用。
if data[1] == 1:
self.draw_polygon(image,data[2],(0, 255, 0))
self.draw_polygon(image,data[2],(255,0,0))
#过滤掉不是目标标签的数据
filtered_pred_all = pred_all[pred_all[:, 5] == 0]
# 绘制检测结果 --- 也需要封装在类里,
for det in detections:
if det['name'] == 'person': #标签是人
x1, y1, x2, y2 = int(det['xmin']), int(det['ymin']), int(det['xmax']), int(det['ymax'])
# 绘制目标识别的锚框
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
for pred in filtered_pred_all:
x1, y1, x2, y2 = int(pred[0]), int(pred[1]), int(pred[2]), int(pred[3])
# # 绘制目标识别的锚框 --已经在draw_bbox里处理
# cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
if data[1] == 1: # 指定了检测区域
x_center = (x1 + x2) / 2
y_center = (y1 + y2) / 2
@ -43,7 +82,10 @@ class Model(ModelBase):
#产生报警
bwarn = True
warn_text = "Intruder detected!"
return detections,bwarn,warn_text
img_dw = draw_bbox(filtered_pred_all, image, (0, 255, 0), 2, labels_dict) # 画出检测框、类别、概率
cv2.imwrite('img_res.png', img_dw)
return filtered_pred_all, False, ""
def testRun(self):
print("1111")

3
run.py

@ -24,8 +24,7 @@ if __name__ == '__main__':
else:
raise NotImplementedError(f"Unsupported operating system: {system}")
print(free/(1024*1024))
mMM.start_work() # 启动所有通道的处理
mVManager.start_check_rtsp() #线程更新视频在线情况
web.run(debug=True,port=5001)
web.run(debug=True,port=5001,host="0.0.0.0")

31
web/API/channel.py

@ -223,13 +223,12 @@ async def channel_model_linkmodel(): #获取算法列表 --关联算法时展示
@api.route('/channel/model/getarea',methods=['GET'])
@login_required
async def channel_model_getarea(): #获取算法区域的备注信息
async def channel_model_getarea(): #获取算法区域的备注信息 --- 同时获取检测阈值
ID = request.args.get('ID')
strsql = f"select check_area,check_x,check_y,check_width,check_height from channel2model where ID={ID};"
strsql = f"select check_area,polygon,conf_threshold from channel2model where ID={ID};"
data = mDBM.do_select(strsql,1)
if data:
reMsg = {'ID':ID,'check_area':data[0],'check_x':data[1],'check_y':data[2],
'check_width':data[3],'check_height':data[4]}
reMsg = {'ID':ID,'check_area':data[0],'polygon':data[1],'conf_threshold':data[2]}
else:
reMsg = {}
return jsonify(reMsg)
@ -239,12 +238,24 @@ async def channel_model_getarea(): #获取算法区域的备注信息
async def channel_model_changearea(): #修改算法区域信息
ID = (await request.form)['ID']
check_area = (await request.form)['check_area']
check_x = (await request.form)['check_x']
check_y = (await request.form)['check_y']
check_width = (await request.form)['check_width']
check_height = (await request.form)['check_height']
strsql = (f"update channel2model set check_area={check_area},check_x={check_x},check_y={check_y}"
f",check_width={check_width},check_height={check_height} where ID={ID};")
check_x = (await request.form)['polygon']
strsql = (f"update channel2model set check_area={check_area},polygon={check_x} where ID={ID};")
ret = mDBM.do_sql(strsql)
if ret == True:
reStatus = 1
reMsg = '修改算法检测区域成功'
else:
reStatus = 0
reMsg = '新修改算法检测区域失败,请联系技术支持!'
return jsonify({'status': reStatus, 'msg': reMsg})
@api.route('/channel/model/changethreshold',methods=['POST'])
@login_required
async def channel_model_changthreshold(): #修改算法区域信息
ID = (await request.form)['ID']
conf_threshold = (await request.form)['conf_threshold']
strsql = (f"update channel2model set conf_threshold={conf_threshold} where ID={ID};")
ret = mDBM.do_sql(strsql)
if ret == True:
reStatus = 1

8
web/API/user.py

@ -24,7 +24,7 @@ async def user_login(): #用户登录
captcha = (await request.form)['captcha']
if captcha != session.get('captcha'):
#验证码验证过后,需要失效
#?
print(session.get('captcha'))
return '验证码错误', 400
#比对用户名和密码
strsql = f"select password from user where username = '{username}'"
@ -56,13 +56,14 @@ async def user_adduser(): #新增用户
people = (await request.form)['people']
tellnum = (await request.form)['tellnum']
strsql = f"select username from user where username = '{username}';"
password = myCongif.get_data('pw')
data = mDBM.do_select(strsql)
if data:
reStatus = 0
reMsg = '用户名重复,请重新输入!'
else:
strsql = (f"INSERT INTO user (username ,password ,status,people,tellnum ) VALUES "
f"('{username}','{myCongif.get_data("pw")}',1,'{people}','{tellnum}');")
f"('{username}','{password}',1,'{people}','{tellnum}');")
ret = mDBM.do_sql(strsql)
if ret == True:
reStatus = 1
@ -76,7 +77,8 @@ async def user_adduser(): #新增用户
@login_required
async def user_change_passwd(): #重置密码
username = (await request.form)['username']
strsql = f"update user set password='{myCongif.get_data("pw")}' where username='{username}';"
password = myCongif.get_data('pw')
strsql = f"update user set password='{password}' where username='{username}';"
ret = mDBM.do_sql(strsql)
if ret == True:
reStatus = 1

42
web/API/viedo.py

@ -3,7 +3,7 @@ import asyncio
import time
from . import api
from quart import jsonify, request
from aiortc import RTCPeerConnection, RTCSessionDescription, VideoStreamTrack
from aiortc import RTCPeerConnection, RTCSessionDescription, VideoStreamTrack,RTCConfiguration
from core.ModelManager import mMM
from core.DBManager import mDBM
from myutils.ConfigManager import myCongif
@ -50,14 +50,12 @@ pcs = {}
# continue
#threading.Thread(target=camera_thread, daemon=True).start()
'''
---------------------传输--------------------------
'''
class VideoTransformTrack(VideoStreamTrack):
kind = "video"
def __init__(self,cid,itype=1): #0-usb,1-RTSP,2-海康SDK
def __init__(self,cid,itype=1): #0-usb,1-RTSP,2-海康SDK --itype已没有作用
super().__init__()
self.channel_id = cid
@ -69,13 +67,14 @@ class VideoTransformTrack(VideoStreamTrack):
async def recv(self):
new_frame = None
time.sleep(1.0 / self.frame_rate)
while True:
new_frame = mMM.verify_list[self.channel_id][4]
#new_frame = av.VideoFrame.from_ndarray(img, format="rgb24")
if new_frame is not None:
break
else:
time.sleep(1.0 / self.frame_rate)
# while True:
# new_frame = mMM.verify_list[self.channel_id][4]
# #new_frame = av.VideoFrame.from_ndarray(img, format="rgb24")
# if new_frame is not None:
# break
# else:
# time.sleep(1.0 / self.frame_rate)
# 设置时间戳和时间基数 -- 根据耗时实时计算帧率
# elapsed_time = time.time() - self.start_time
# self.frame_count += 1
@ -87,15 +86,24 @@ class VideoTransformTrack(VideoStreamTrack):
self.frame_count = 0
new_frame.pts = self.frame_count
new_frame.time_base = self.time_base
#print(f"Frame pts: {new_frame.pts}, time_base: {new_frame.time_base}")
print(f"{self.channel_id} -- Frame pts: {new_frame.pts}, time_base: {new_frame.time_base}")
# 将检测结果图像转换为帧
# new_frame.pts = int(self.cap.get(cv2.CAP_PROP_POS_FRAMES))
# new_frame.time_base = self.time_base
# end_time = time.time() # 结束时间
# print(f"Processing time: {end_time - start_time} seconds")
#print("webRTC recv at:",time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
return new_frame
async def get_stats(peer_connection):
stats = await peer_connection.getStats()
for report in stats.values():
if report.type == 'outbound-rtp':
print(f"RTT: {report.roundTripTime} seconds")
print(f"Packets Sent: {report.packetsSent}")
print(f"Bytes Sent: {report.bytesSent}")
@api.route('/offer', methods=['POST'])
async def offer():
#接收客户端的连接请求
@ -125,10 +133,15 @@ async def offer():
#提取客户端发来的SDP,生成服务器端的SDP
offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"])
# # 配置 STUN 服务器
# ice_servers = [{"urls": ["stun:stun.voipbuster.com"]}]
#ice_servers = [{"urls": []}]# 禁用 STUN 服务器
# pc = RTCPeerConnection(configuration={"iceServers": ice_servers})
pc = RTCPeerConnection() #实例化一个rtcp对象
pcs[channel_id] = pc #集合中添加一个对象,若已存在则不添加
config = RTCConfiguration(iceServers=[])
pc = RTCPeerConnection(configuration=config)
# t = threading.Thread(target=get_stats,args=(pc,))
# t.start()
#pc = RTCPeerConnection() # 实例化一个rtcp对象
pcs[channel_id] = pc # 集合中添加一个对象,若已存在则不添
@pc.on("datachannel")
def on_datachannel(channel):
@ -142,6 +155,7 @@ async def offer():
elif message == 'resume':
# 执行继续操作
pass
#监听RTC连接状态
@pc.on("iconnectionstatechange") #当ice连接状态发生变化时
async def iconnectionstatechange():

33
web/__init__.py

@ -1,27 +1,48 @@
from quart import Quart, render_template, request, jsonify,send_from_directory,session,redirect, url_for
from quart_session import Session
from pymemcache.client import base
from .main import main
from .API import api
from functools import wraps
from quart_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
#app.config['SECRET_KEY'] = 'mysecret' #密钥 --需要放配置文件
#socketio = SocketIO(app)
# Create the custom backend for quart-session
class MemcachedSessionInterface: #只是能用,不明所以
def __init__(self, client):
self.client = client
async def open_session(self, app, request):
sid = request.cookies.get(app.session_cookie_name)
if not sid:
sid = self._generate_sid()
val = await self.client.get(self.key_prefix + sid)
if val is not None:
return self._deserialize(val)
return self._get_default_session()
async def save_session(self, app, session, response):
val = self._serialize(session)
await self.client.set(self.key_prefix + session.sid, val, self.expire)
def create_app():
app = Quart(__name__)
#相关配置--设置各种配置选项,这些选项会在整个应用程序中被访问和使用。
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db'
# app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = 'zfxxkj_2024_!@#'
app.config['SESSION_TYPE'] = 'redis' # session类型
app.config['SESSION_FILE_DIR'] = './sessions' # session保存路径
app.config['SESSION_TYPE'] = 'memcached' # session类型
#app.config['SESSION_FILE_DIR'] = './sessions' # session保存路径
#app.config['SESSION_MEMCACHED'] = base.Client(('localhost', 11211))
app.config['SESSION_PERMANENT'] = True # 如果设置为True,则关闭浏览器session就失效。
app.config['SESSION_USE_SIGNER'] = False # 是否对发送到浏览器上session的cookie值进行加密
memcached_client = base.Client(('localhost', 11211))
app.session_interface = MemcachedSessionInterface(memcached_client)
Session(app)
#ORM数据库管理

4
web/main/routes.py

@ -21,9 +21,9 @@ def login_required(f):
@main.route('/')
async def index():
# error = request.args.get('error')
#error = request.args.get('error')
return await render_template('实时预览.html')
# return await render_template('登录.html',error=error)
#return await render_template('登录.html',error=error)
#return await render_template('index_webrtc.html')
# @main.route('/', methods=['GET', 'POST'])

12
web/main/templates/index.html

@ -1,12 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>YOLOv5 Real-Time Detection</title>
</head>
<body>
<h1>YOLOv5 Real-Time Detection</h1>
<img src="{{ url_for('api.video_feed') }}" width="640" height="480">
</body>
</html>

15
web/main/templates/实时预览.html

@ -212,26 +212,17 @@
<!-- Unnamed (Image) -->
<div id="u37" class="ax_default image">
<img id="u37_img" class="img " src="images/实时预览/u32.png"/>
<div id="u37_text" class="text " style="display:none; visibility: hidden">
<p></p>
</div>
<video id="u37_video" autoplay="true" playsinline="true"></video>
</div>
<!-- Unnamed (Image) -->
<div id="u38" class="ax_default image">
<img id="u38_img" class="img " src="images/实时预览/u32.png"/>
<div id="u38_text" class="text " style="display:none; visibility: hidden">
<p></p>
</div>
<video id="u38_video" autoplay="true" playsinline="true"></video>
</div>
<!-- Unnamed (Image) -->
<div id="u39" class="ax_default image">
<img id="u39_img" class="img " src="images/实时预览/u32.png"/>
<div id="u39_text" class="text " style="display:none; visibility: hidden">
<p></p>
</div>
<video id="u39_video" autoplay="true" playsinline="true"></video>
</div>
<!-- Unnamed (Image) -->

1
web/main/templates/通道管理.html

@ -273,7 +273,6 @@
<div id="u91" class="ax_default table_cell">
<img id="u91_img" class="img " src="images/通道管理/u84.png"/>
<div id="u91_text" class="text ">
<p><span>rtsp://www.easydarwin.org:554/your_stream_id</span></p>
</div>
</div>

BIN
zfbox.db

Binary file not shown.
Loading…
Cancel
Save