Browse Source

windows---第一个完整功能初版

master
张龙 10 months ago
parent
commit
1420083226
  1. 8
      config.yaml
  2. 21
      core/DBManager.py
  3. 394
      core/ModelManager.py
  4. 2
      model/ModelManager.py
  5. 226
      model/base_model/ascnedcl/det_utils.py
  6. 16
      model/plugins/ModelBase.py
  7. 50
      model/plugins/RYRQ/RYRQ_Model.py
  8. 17
      model/plugins/RYRQ_ACL/RYRQ_Model_ACL.py
  9. 80
      model/plugins/RYRQ_ACL/coco_names.txt
  10. BIN
      model/plugins/RYRQ_ACL/world_cup.jpg
  11. BIN
      model/plugins/RYRQ_ACL/yolov5s.onnx
  12. BIN
      model/plugins/RYRQ_ACL/yolov5s_bs1.om
  13. 24
      run.py
  14. 2
      web/API/channel.py
  15. 338
      web/API/viedo.py
  16. 74
      web/main/static/resources/scripts/aiortc-client-new.js
  17. 71
      web/main/templates/实时预览.html
  18. BIN
      zfbox.db

8
config.yaml

@ -28,7 +28,11 @@ ALLOWED_EXTENSIONS : {'zip'}
RTSP_Check_Time : 600 #10分钟
#model
model_platform : cpu #npu gpu
weight_path: /model/weights
yolov5_path: model/base_model/yolov5
yolov5_path: D:/Project/FristProject/model/base_model/yolov5 #使用绝对路径,不同的部署环境需要修改!
cap_sleep_time: 300 #5分钟
buffer_len: 30 #分析后画面缓冲区帧数
buffer_len: 300 #分析后画面缓冲区帧数 -- 可以与验证帧率结合确定缓冲区大小
RESET_INTERVAL : 100000 #帧数重置上限
frame_rate : 20 #帧率参考值 -- 后续作用主要基于verify_rate进行帧率控制
verify_rate : 5 #验证帧率--- 也就是视频输出的帧率

21
core/DBManager.py

@ -2,6 +2,7 @@ import pymysql
import sqlite3
import threading
import os
import json
from myutils.ConfigManager import myCongif
from myutils.MyLogger_logger import LogHandler
@ -160,6 +161,26 @@ class DBManager():
strsql = f"insert into channel2model (channel_id,model_id) values ({channel_id},{one});"
if self.do_sql(strsql) == False:
return False
#初始化布防时间 -- 全1
strsql = f"select ID from channel2model where channel_id={channel_id} and model_id={one};"
data = mDBM.do_select(strsql,1)
schedule_data_str = ("{'6': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "
"'0': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "
"'1': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "
"'2': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], "
"'3': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],"
"'4': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],"
"'5': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}")
schedule_data = json.loads(schedule_data_str.replace("'", '"'))
for day, hours in schedule_data.items():
for hour, status in enumerate(hours):
strsql = (
f"insert into schedule (channel2model_id,day,hour,status) values ({ID},'{day}',{hour},{status})"
f" on conflict(channel2model_id,day,hour) do update set status=excluded.status;")
ret = mDBM.do_sql(strsql)
if not ret:
return ret
#差删除
for one in need_del:
strsql = f"select ID from channel2model where channel_id={channel_id} and model_id={one};"

394
core/ModelManager.py

@ -3,47 +3,71 @@ import time
import av
import os
import cv2
import torch
import numpy as np
import threading
import importlib.util
import datetime
from core.DBManager import mDBM,DBManager
from myutils.MyLogger_logger import LogHandler
from myutils.ConfigManager import myCongif
from model.plugins.ModelBase import ModelBase
class VideoCaptureWithFPS:
def __init__(self, src=0, target_fps=10):
self.cap = cv2.VideoCapture(src)
self.target_fps = target_fps
self.frame_interval = 1.0 / target_fps
self.last_frame_time = time.time()
def read(self):
current_time = time.time()
elapsed_time = current_time - self.last_frame_time
'''视频捕获的封装类,是一个通道一个'''
def __init__(self, source=0):
self.source = source
self.cap = cv2.VideoCapture(self.source)
self.width = None
self.height = None
if self.cap.isOpened(): #若没有打开成功,在读取画面的时候,已有判断和处理
self.width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
#self.fps = fps # 线程保持最大帧率的刷新画面---过高的帧率会影响CPU性能,但过地的帧率会造成帧积压
self.fps = self.cap.get(cv2.CAP_PROP_FPS) + 20 #只是个参考值,休眠时间要比帧率快点,由于read也需要耗时。
#print(self.fps)
self.frame = None
self.running = True
self.read_lock = threading.Lock()
self.thread = threading.Thread(target=self.update)
self.thread.start()
if elapsed_time < self.frame_interval: #小于间隔时间会休眠
time.sleep(self.frame_interval - elapsed_time)
self.last_frame_time = time.time()
def update(self):
while self.running:
ret, frame = self.cap.read()
return ret, frame
if not ret:
continue
with self.read_lock:
self.frame = frame
time.sleep(1.0 / self.fps)
def read(self):
with self.read_lock:
frame = self.frame
return frame is not None, frame
def release(self):
self.running = False
self.thread.join()
self.cap.release()
self.cap = None
class ModelManager:
def __init__(self):
self.verify_list = {}
self.verify_list = {} #模型的主要数据
self.bRun = True
self.logger = LogHandler().get_logger("ModelManager")
# 本地YOLOv5仓库路径
self.yolov5_path = myCongif.get_data("yolov5_path")
self.buflen = myCongif.get_data("buffer_len")
self.icout_max = myCongif.get_data("RESET_INTERVAL") #跟视频帧序用一个变量
self.frame_rate = myCongif.get_data("frame_rate")
self.frame_interval = 1.0 / int(myCongif.get_data("verify_rate"))
#保存视频相关内容
self.fourcc = cv2.VideoWriter_fourcc(*'mp4v') # 使用 mp4 编码
def __del__(self):
self.logger.debug("释放资源")
def _open_view(self,url,itype): #打开摄像头 0--USB摄像头,1-RTSP,2-海康SDK
if itype == 0:
@ -58,96 +82,309 @@ class ModelManager:
'''根据路径,动态导入模块'''
if os.path.exists(model_path):
module_spec = importlib.util.spec_from_file_location(model_name, model_path)
if module_spec is None:
self.logger.error(f"{model_path} 加载错误")
return None
module = importlib.util.module_from_spec(module_spec)
module_spec.loader.exec_module(module)
md = getattr(module, "Model")(model_path)
if not isinstance(md, ModelBase):
self.logger.error("{} not zf_model".format(md))
return None
else:
self.logger.error("{}文件不存在".format(model_path))
return None
return module
return md
def getschedule(self,c2m_id,myDBM):
'''
根据c2mID 查询该算法的布防时间
:param c2m_id:
:return: 以day为行hour作为列的布防标识值二维list
'''
strsql = f"select day,hour,status from schedule where channel2model_id ={c2m_id} order by hour asc,day asc;"
datas = myDBM.do_select(strsql)
onelist = []
twolist = []
threelist = []
fourlist = []
fivelist = []
sixlist = []
sevenlist = []
if datas:
for i in range(24):
onelist.append(datas[i*7 + 0][2])
twolist.append(datas[i*7 + 1][2])
threelist.append(datas[i*7 + 2][2])
fourlist.append(datas[i*7 + 3][2])
fivelist.append(datas[i*7 + 4][2])
sixlist.append(datas[i*7 + 5][2])
sevenlist.append(datas[i*7 + 6][2])
else:
self.logger(f"没有数据?{c2m_id}")
onelist = [1]*24
twolist = [1]*24
threelist = [1]*24
fourlist = [1]*24
fivelist = [1]*24
sixlist = [1]*24
sevenlist = [1]*24
schedule_list = []
schedule_list.append(onelist)
schedule_list.append(twolist)
schedule_list.append(threelist)
schedule_list.append(fourlist)
schedule_list.append(fivelist)
schedule_list.append(sixlist)
schedule_list.append(sevenlist)
return schedule_list
def verify(self,frame,myModle_list,myModle_data,channel_id,schedule_list,result_list,isdraw=1):
'''验证执行主函数,实现遍历通道关联的模型,调用对应模型执行验证,模型文件遍历执行'''
img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# img = frame.to_ndarray(format="bgr24")
# 使用 模型 进行目标检测
i_warn_count = 0 #报警标签
isverify = False
for i in range(len(myModle_list)): # 遍历通道关联的算法进行检测,若不控制模型数量,有可能需要考虑多线程执行。
model = myModle_list[i]
data = myModle_data[i]
schedule = schedule_list[i]
result = result_list[i]
#验证检测计划,是否在布防时间内
now = datetime.datetime.now() # 获取当前日期和时间
weekday = now.weekday() # 获取星期几,星期一是0,星期天是6
hour = now.hour
result.pop(0) # 保障结果数组定长 --先把最早的结果推出数组
if schedule[weekday][hour] == 1: #不在计划则不进行验证,直接返回图片
# 调用模型,进行检测,model是动态加载的,具体的判断标准由模型内执行 ---- *********
isverify = True
detections, bwarn, warntext = model.verify(img, data,isdraw) #****************重要
# 对识别结果要部要进行处理
if bwarn: # 整个识别有产生报警
#根据模型设定的时间和占比判断是否
# 绘制报警文本
cv2.putText(img, 'Intruder detected!', (50, (i_warn_count + 1) * 50),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
i_warn_count += 1
result.append(1) #要验证数组修改,是地址修改吗?
else: #没有产生报警也需要记录,统一计算占比
result.append(0)
else:
result.append(0)
if not isverify: #没做处理,直接返回的,需要控制下帧率,太快读取没有意义。
time.sleep(1.0/self.frame_rate) #给个默认帧率,不超过30帧,---若经过模型计算,CPU下单模型也就12帧这样
# 将检测结果图像转换为帧 -- 需要确认前面对img的处理都是累加的。
#img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #numpy.ndarray
if isinstance(img, np.ndarray):
new_frame = av.VideoFrame.from_ndarray(img, format="rgb24") #AVFrame
new_frame.pts = None # 添加此行确保有pts属性
#self.logger.debug("img 是np.darry")
else:
#self.logger.error("img不是np.darry")
new_frame = None
#处理完的图片后返回-gbr模式 (new_frame 是 rgb)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
return new_frame,img
def dowork_thread(self,channel_id):
'''一个通道一个线程,关联的模型在一个线程检测'''
'''一个通道一个线程,关联的模型在一个线程检测,局部变量都是一个通道独有'''
channel_data = self.verify_list[channel_id] #一通道一线程 [url,type,True,img_buffer,img,icount]
cap = None
#查询关联的模型 --- 在循环运行前把基础数据都准备好
myDBM = DBManager()
myDBM.connection()
strsql = (f"select t1.model_id,t1.check_area,t1.polygon ,t2.duration_time,t2.proportion,t2.model_path "
myDBM.connect()
strsql = (f"select t1.model_id,t1.check_area,t1.polygon ,t2.duration_time,t2.proportion,t2.model_path,t1.ID,"
f"t2.model_name "
f"from channel2model t1 left join model t2 on t1.model_id = t2.ID where t1.channel_id ={channel_id};")
myModels = myDBM.do_select(strsql)
#加载模型 --- 是不是要做个限制,一个视频通道关联算法模块的上限 --- 关联多了一个线程执行耗时较多,造成帧率太低,或者再多线程并发 #?
myModle_list = [] #存放模型对象List
myModle_data = [] #存放检测参数
myModle_list = [] #存放模型对象List 一个模型一个
myModle_data = [] #存放检测参数 一个模型一个
schedule_list = [] #布防策略 -一个模型一个
result_list = [] #检测结果记录 -一个模型一个
proportion_lsit = []#占比设定 -一个模型一个
warn_save_count = []#没个模型触发报警后,保存录像的最新帧序号 -一个模型一个
view_buffer = []
ibuffer_count = 0
last_img = None
for model in myModels:
#基于基类实例化模块类
m = self._import_model("",model[5]) #动态加载模型 -- 待完善
m = self._import_model("",model[5]) #动态加载模型
if m:
myModle_list.append(m)
myModle_data.append(model)
#model[6] -- c2m_id --把通道对于模型的 0-周一,6-周日
schedule_list.append(self.getschedule(model[6],myDBM))
result = [0 for _ in range(model[3] * myCongif.get_data("verify_rate"))] #初始化时间*验证帧率数量的结果list
result_list.append(result)
proportion_lsit.append(model[4])
warn_save_count.append(0) #保存录像的最新帧初始化为0
# if not myModle_list: #没有成功加载的模型原画输出
# self.logger.info(f"视频通道:{channel_id},未配置算法模块,结束线程!")
# return #可以不结束,直接返回未处理画面显示。
#开始循环检测
#print(mydata[0],mydata[1],mydata[2],mydata[3]) # url type tag img_buffer
#[url,type,True,img_buffer]
while self.verify_list[channel_id][2]: #基于tag 作为运行标识。 线程里只是读,住线程更新,最多晚一轮,应该不用线程锁。需验证
iread_count =0
last_frame_time = time.time()
#保存视频录像使用 -- 放在循环外面,可以减少点消耗
FPS = myCongif.get_data("verify_rate") # 视频帧率
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # 使用 mp4 编码
while channel_data[2]: #基于tag 作为运行标识。 线程里只是读,住线程更新,最多晚一轮,应该不用线程锁。需验证
# 需要控制帧率
current_time = time.time()
elapsed_time = current_time - last_frame_time
if elapsed_time < self.frame_interval:
time.sleep(self.frame_interval - elapsed_time) #若小于间隔时间则休眠
last_frame_time = time.time()
#*********取画面*************
if not cap: #还没连接视频源
try:
cap = self._open_view(self.verify_list[channel_id][0],self.verify_list[channel_id][1])
cap = self._open_view(channel_data[0],channel_data[1])
iread_count = 0
except:
self.logger.error("参数错误,终止线程")
return
ret,frame = cap.read()
if not ret:
self.logger.warning("view disconnected. Reconnecting...")
if iread_count > 60:
self.logger.warning(f"通道-{channel_id}:view disconnected. Reconnecting...")
cap.release()
cap = None
time.sleep(myCongif.get_data("cap_sleep_time"))
else:
iread_count += 1
time.sleep(1.0/20) #20帧只是作为个默认参考值,一般验证帧率都比这个慢
continue #没读到画面继续
#图像处理
img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# img = frame.to_ndarray(format="bgr24")
#检查布防时间是否在布防时间内
# 使用 模型 进行目标检测
i_warn_count = 0
for i in range(len(myModle_list)):#如果比较多可以考虑做线程进行检测
model = myModle_list[i]
data = myModle_data[i]
# 进行模型检测
detections,bwarn,warntext = model.verify(img,data)
#对识别结果要部要进行处理
if bwarn: # 整个识别有产生报警
# 绘制报警文本
cv2.putText(img, 'Intruder detected!', (50, (i_warn_count+1)*50),
cv2.FONT_HERSHEY_SIMPLEX, 1,(0, 0, 255), 2)
i_warn_count += 1
# 保存报警信息? --- 待完成
self.save_warn()
# 推送报警? --- 待完成
iread_count = 0 #重置下视频帧计算
#执行图片推理 -- 如何没有模型或不在工作时间,返回的是原画,要不要控制下帧率? -- 在verify中做了sleep
new_frame,img = self.verify(frame,myModle_list,myModle_data,channel_id,schedule_list,result_list)
#分析图片放入内存中
channel_data[3].append(img)
channel_data[5] += 1 #帧序列加一
#print(self.verify_list[channel_id][5])
if channel_data[5] > self.icout_max:
channel_data = 0
if len(channel_data[3]) > self.buflen: # 保持缓冲区大小---缓冲区可以用来保持录像
channel_data[3].pop(0)
#self.logger.debug("drop one frame!")
channel_data[4] = new_frame #一直更新最新帧
#验证result_list -是否触发报警要求
for i in range(len(result_list)):
result = result_list[i]
proportion = proportion_lsit[i]
count_one = float(sum(result)) #1,0 把1累加的和就是1的数量
ratio_of_ones = count_one / len(result)
#self.logger.debug(result)
if ratio_of_ones >= proportion: #触发报警
model_name = myModle_data[i][7]
w_s_count = warn_save_count[i]
buffer_count = channel_data[5]
self.save_warn(model_name,w_s_count,buffer_count,channel_data[3],cap.width,cap.height,
channel_id,myDBM,FPS,fourcc)
self.send_warn()
# 将检测结果图像转换为帧 -- 需要确认前面对img的处理都是累加的。
new_frame = av.VideoFrame.from_ndarray(img, format="rgb24")
# 分析图片放入内存中 ---
#new_frame = cv2.resize(new_frame, (640, 480)) # 降低视频分辨率
self.verify_list[channel_id][3].append(new_frame)
if len(self.verify_list[channel_id]) > self.buflen: # 保持缓冲区大小不超过10帧
self.verify_list[channel_id][3].pop(0)
self.logger.debug("drop one frame!")
#更新帧序列号
warn_save_count[i] = buffer_count
#结果记录要清空
for i in range(len(result)):
result[i] = 0
# end_time = time.time() # 结束时间
# print(f"Processing time: {end_time - start_time} seconds")
# 本地显示---测试使用
# cv2.imshow('Frame', img)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
#结束线程
cap.release()
cv2.destroyAllWindows()
def save_warn(self,model_name,w_s_count,buffer_count,buffer,width,height,channnel_id,myDBM,FPS,fourcc):
'''
保存报警信息
:param model_name: 模型名称如人员入侵
:param w_s_count: 报警已存储的最新帧序列
:param buffer_count: 当前视频缓冲区的最新帧序列
:param buffer: 视频缓存区
:param width: 视频画面的width
:param height: 视频画面的height
:param channnel_id: 视频通道ID
:return: ret 数据库操作记录
'''
now = datetime.datetime.now() # 获取当前日期和时间
current_time_str = now.strftime("%Y-%m-%d_%H-%M-%S")
filename = f"{channnel_id}_{current_time_str}"
#保存视频
video_writer = cv2.VideoWriter(f"model/warn/{filename}.mp4", fourcc, FPS, (width, height))
if not video_writer.isOpened():
print(f"Failed to open video writer for model/warn/{filename}.mp4")
return False
ilen = len(buffer)
istart = 0;
iend = ilen
if buffer_count < w_s_count or (buffer_count-w_s_count) > ilen: #buffer_count重置过
#buffer区,都保存为视频
istart = 0
else:#只取差异的缓冲区大小
istart = ilen - (buffer_count-w_s_count)
for i in range(istart,iend):
video_writer.write(buffer[i])
video_writer.release()
#保存图片
ret = cv2.imwrite(f"model/warn/{filename}.png",buffer[-1])
if not ret:
print("保存图片失败")
return False
#保存数据库
strsql = (f"INSERT INTO warn (model_name ,video_path ,img_path ,creat_time,channel_id ) "
f"Values ('{model_name}','model/warn/{filename}.mp4','model/warn/{filename}.png',"
f"'{current_time_str}','{channnel_id}');")
ret = myDBM.do_sql(strsql)
return ret
def send_warn(self):
'''发送报警信息'''
pass
def save_frame_to_video(self):
'''把缓冲区中的画面保存为录像'''
pass
def start_work(self,channel_id=0):
'''算法模型是在后台根据画面实时分析的'''
'''算法模型是在后台根据画面实时分析的
1.布防开关需要触发通道关闭和开启
2.布防策略的调整也需要关闭和重启工作
'''
if channel_id ==0:
strsql = "select id,ulr,type from channel where is_work = 1;" #要考虑布防和撤防开关的的调整
strsql = "select id,ulr,type from channel where is_work = 1;" #执行所有通道
else:
strsql = f"select id,ulr,type from channel where is_work = 1 and id = {channel_id};" #单通道启动检测线程
datas = mDBM.do_select(strsql)
for data in datas:
img_buffer = []
run_data = [data[1],data[2],True,img_buffer]
self.verify_list[data[0]] = run_data #需要验证重复情况
img = None
icout = 0 #跟img_buffer对应,记录进入缓冲区的帧序列号
run_data = [data[1],data[2],True,img_buffer,img,icout]
self.verify_list[data[0]] = run_data #需要验证重复情况#?
th_chn = threading.Thread(target=self.dowork_thread, args=(data[0],)) #一个视频通道一个线程,线程句柄暂时部保留
th_chn.start()
def stop_work(self,channel_id=0):
'''停止工作线程,0-停止所有,非0停止对应通道ID的线程'''
if channel_id ==0: #所有线程停止
for data in self.verify_list:
data[2] = False
@ -161,7 +398,40 @@ class ModelManager:
time.sleep(1)
del self.verify_list[channel_id]
#print(f"Current working directory (ModelManager.py): {os.getcwd()}")
mMM = ModelManager()
def test():
buffer = [np.zeros((480, 640, 3), dtype=np.uint8) for _ in range(60)] # 示例帧列表
FRAME_WIDTH = 640 # 根据你的图像尺寸设置
FRAME_HEIGHT = 480 # 根据你的图像尺寸设置
FPS = myCongif.get_data("verify_rate") # 你的视频帧率
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # 使用 mp4 编码
now = datetime.datetime.now() # 获取当前日期和时间
current_time_str = now.strftime("%Y-%m-%d_%H-%M-%S")
filename = f"{2}_{current_time_str}.mp4"
#filename = "saved_video.mp4"
print(filename)
# 保存视频
video_writer = cv2.VideoWriter(filename, fourcc, FPS, (FRAME_WIDTH, FRAME_HEIGHT))
if not video_writer.isOpened():
print(f"Failed to open video writer for filename")
return False
for frame in buffer:
video_writer.write(frame)
video_writer.release()
# 保存图片
ret = cv2.imwrite("saved_frame.jpg", buffer[-1])
if ret:
print("保存图片成功")
else:
print("保存图片失败")
return False
if __name__ == "__main__":
ModelManager().start_work()
#mMM.start_work()
# model = ModelManager()._import_model("", "../model/plugins/RYRQ/RYRQ_Model_ACL.py")
# model.testRun()
test()

2
model/ModelManager.py

@ -198,7 +198,7 @@ def run_plugin(plgpath, target,copy_flag=True):
return None
def test():
run_plugin("plugins/RYRQ_Model.py","")
run_plugin("plugins/RYRQ_Model_ACL.py","")
if __name__ == "__main__":
test()

226
model/base_model/ascnedcl/det_utils.py

@ -0,0 +1,226 @@
"""
Copyright 2022 Huawei Technologies Co., Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
import cv2
import numpy as np
import torch
import torchvision
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=False, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def non_max_suppression(
prediction,
conf_thres=0.25,
iou_thres=0.45,
classes=None,
agnostic=False,
multi_label=False,
labels=(),
max_det=300,
nm=0, # number of masks
):
"""Non-Maximum Suppression (NMS) on inference results to reject overlapping detections
Returns:
list of detections, on (n,6) tensor per image [xyxy, conf, cls]
"""
if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out)
prediction = prediction[0] # select only inference output
device = prediction.device
mps = 'mps' in device.type # Apple MPS
if mps: # MPS not fully supported yet, convert tensors to CPU before NMS
prediction = prediction.cpu()
bs = prediction.shape[0] # batch size
nc = prediction.shape[2] - nm - 5 # number of classes
xc = prediction[..., 4] > conf_thres # candidates
# Checks
assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'
assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'
# Settings
# min_wh = 2 # (pixels) minimum box width and height
max_wh = 7680 # (pixels) maximum box width and height
max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
time_limit = 0.5 + 0.05 * bs # seconds to quit after
multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
t = time.time()
mi = 5 + nc # mask start index
output = [torch.zeros((0, 6 + nm), device=prediction.device)] * bs
for xi, x in enumerate(prediction): # image index, image inference
# Apply constraints
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
x = x[xc[xi]] # confidence
# Cat apriori labels if autolabelling
if labels and len(labels[xi]):
lb = labels[xi]
v = torch.zeros((len(lb), nc + nm + 5), device=x.device)
v[:, :4] = lb[:, 1:5] # box
v[:, 4] = 1.0 # conf
v[range(len(lb)), lb[:, 0].long() + 5] = 1.0 # cls
x = torch.cat((x, v), 0)
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
# Box/Mask
box = xywh2xyxy(x[:, :4]) # center_x, center_y, width, height) to (x1, y1, x2, y2)
mask = x[:, mi:] # zero columns if no masks
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:mi] > conf_thres).nonzero(as_tuple=False).T
x = torch.cat((box[i], x[i, 5 + j, None], j[:, None].float(), mask[i]), 1)
else: # best class only
conf, j = x[:, 5:mi].max(1, keepdim=True)
x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres]
# Filter by class
if classes is not None:
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
# Check shape
n = x.shape[0] # number of boxes
if not n: # no boxes
continue
elif n > max_nms: # excess boxes
x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
else:
x = x[x[:, 4].argsort(descending=True)] # sort by confidence
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
if i.shape[0] > max_det: # limit detections
i = i[:max_det]
output[xi] = x[i]
if mps:
output[xi] = output[xi].to(device)
if (time.time() - t) > time_limit:
print(f'WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded')
break # time limit exceeded
return output
def xywh2xyxy(x):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
def get_labels_from_txt(path):
labels_dict = dict()
with open(path) as f:
for cat_id, label in enumerate(f.readlines()):
labels_dict[cat_id] = label.strip()
return labels_dict
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
coords[:, [0, 2]] -= pad[0] # x padding
coords[:, [1, 3]] -= pad[1] # y padding
coords[:, :4] /= gain
clip_coords(coords, img0_shape)
return coords
def clip_coords(boxes, shape):
# Clip bounding xyxy bounding boxes to image shape (height, width)
if isinstance(boxes, torch.Tensor): # faster individually
boxes[:, 0].clamp_(0, shape[1]) # x1
boxes[:, 1].clamp_(0, shape[0]) # y1
boxes[:, 2].clamp_(0, shape[1]) # x2
boxes[:, 3].clamp_(0, shape[0]) # y2
else: # np.array (faster grouped)
boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2
boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2
def nms(box_out, conf_thres=0.4, iou_thres=0.5):
try:
boxout = non_max_suppression(box_out, conf_thres=conf_thres, iou_thres=iou_thres, multi_label=True)
except:
boxout = non_max_suppression(box_out, conf_thres=conf_thres, iou_thres=iou_thres)
return boxout
def draw_bbox(bbox, img0, color, wt, names):
det_result_str = ''
for idx, class_id in enumerate(bbox[:, 5]):
if float(bbox[idx][4] < float(0.05)):
continue
img0 = cv2.rectangle(img0, (int(bbox[idx][0]), int(bbox[idx][1])), (int(bbox[idx][2]), int(bbox[idx][3])), color, wt)
img0 = cv2.putText(img0, str(idx) + ' ' + names[int(class_id)], (int(bbox[idx][0]), int(bbox[idx][1] + 16)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
img0 = cv2.putText(img0, '{:.4f}'.format(bbox[idx][4]), (int(bbox[idx][0]), int(bbox[idx][1] + 32)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
det_result_str += '{} {} {} {} {} {}\n'.format(names[bbox[idx][5]], str(bbox[idx][4]), bbox[idx][0], bbox[idx][1], bbox[idx][2], bbox[idx][3])
return img0

16
model/plugins/ModelBase.py

@ -2,12 +2,15 @@ from abc import abstractmethod,ABC
from shapely.geometry import Point, Polygon
import numpy as np
import cv2
import ast
import platform
class ModelBase(ABC):
def __init__(self):
self.name = None #基于name来查询,用户对模型的配置参数,代表着模型名称需要唯一 2024-6-18 -逻辑还需要完善和验证
self.version = None
self.model_type = None # 模型类型 1-图像分类,2-目标检测(yolov5),3-分割模型,4-关键点
self.system = platform.system() #获取系统平台
self.do_map = { # 定义插件的入口函数 --
# POCType.POC: self.do_verify,
# POCType.SNIFFER: self.do_sniffer,
@ -18,7 +21,7 @@ class ModelBase(ABC):
print("资源释放")
def draw_polygon(self, img, polygon_points,color=(0, 255, 0)):
self.polygon = Polygon(polygon_points)
self.polygon = Polygon(ast.literal_eval(polygon_points))
points = np.array([self.polygon.exterior.coords], dtype=np.int32)
cv2.polylines(img, points, isClosed=True, color=color, thickness=2)
@ -30,19 +33,14 @@ class ModelBase(ABC):
else:
return False
def save_warn(self):
'''保存报警信息'''
pass
def send_warn(self):
'''发送报警信息'''
pass
@abstractmethod
def verify(self,image,data):
def verify(self,image,data,isdraw=1):
'''
:param image: 需要验证的图片
:param data: select t1.model_id,t1.check_area,t1.polygon ,t2.duration_time,t2.proportion,t2.model_path
:return: detections,bwarn,warntext
:param isdraw: 是否需要绘制线框0-不绘制1-绘制
:return: detections,bwarn,warntext bwarn:0-没有识别到符合要求的目标1-没有识别到符合要求的目标
'''
pass

50
model/plugins/RYRQ/RYRQ_Model.py

@ -0,0 +1,50 @@
import os.path
from model.plugins.ModelBase import ModelBase
from myutils.ConfigManager import myCongif
import torch
import cv2
class Model(ModelBase):
def __init__(self,path):
super().__init__()
self.name = "人员入侵(ACL)"
self.version = "V1.0"
self.model_type = 2
#找pt模型路径 -- 一个约束py文件和模型文件的路径关系需要固定, -- 上传模型时,要解压好路径
dirpath,filename = os.path.split(path)
model_file = os.path.join(dirpath,"yolov5s.pt") #目前约束模型文件和py文件在同一目录
yolov5_path = myCongif.get_data("yolov5_path")
print(f"************{model_file},{yolov5_path}")
#实例化模型
self.model = torch.hub.load(yolov5_path, 'custom', path=model_file, source='local')
def verify(self,image,data,isdraw=1):
results = self.model(image) # 进行模型检测 --- 需要统一接口
detections = results.pandas().xyxy[0].to_dict(orient="records")
bwarn = False
warn_text = ""
#是否有检测区域,有先绘制检测区域 由于在该函数生成了polygon对象,所有需要在检测区域前调用。
if data[1] == 1:
self.draw_polygon(image,data[2],(0, 255, 0))
# 绘制检测结果 --- 也需要封装在类里,
for det in detections:
if det['name'] == 'person': #标签是人
x1, y1, x2, y2 = int(det['xmin']), int(det['ymin']), int(det['xmax']), int(det['ymax'])
# 绘制目标识别的锚框
cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 2)
if data[1] == 1: # 指定了检测区域
x_center = (x1 + x2) / 2
y_center = (y1 + y2) / 2
#绘制中心点?
cv2.circle(image, (int(x_center), int(y_center)), 5, (0, 0, 255), -1)
#判断是否区域点
if not self.is_point_in_region((x_center, y_center)):
continue #没产生报警-继续
#产生报警
bwarn = True
warn_text = "Intruder detected!"
return detections,bwarn,warn_text
def testRun(self):
print("1111")

17
model/plugins/RYRQ_Model.py → model/plugins/RYRQ_ACL/RYRQ_Model_ACL.py

@ -1,20 +1,24 @@
import os.path
from model.plugins.ModelBase import ModelBase
from myutils.ConfigManager import myCongif
from shapely.geometry import Point, Polygon
import torch
import cv2
import av
import numpy as np
class Model(ModelBase):
def __init__(self,path):
super().__init__()
self.name = "人员入侵"
self.version = "V1.0"
self.model_type = 2
#找pt模型路径 -- 一个约束py文件和模型文件的路径关系需要固定, -- 上传模型时,要解压好路径
dirpath,filename = os.path.split(path)
model_file = os.path.join(dirpath,"yolov5s.pt") #目前约束模型文件和py文件在同一目录
yolov5_path = myCongif.get_data("yolov5_path")
print(f"************{model_file},{yolov5_path}")
#实例化模型
self.model = torch.hub.load(myCongif.get_data("yolov5_path"), 'custom', path=path, source='local')
self.model = torch.hub.load(yolov5_path, 'custom', path=model_file, source='local')
def verify(self,image,data):
def verify(self,image,data,isdraw=1):
results = self.model(image) # 进行模型检测 --- 需要统一接口
detections = results.pandas().xyxy[0].to_dict(orient="records")
bwarn = False
@ -40,3 +44,6 @@ class Model(ModelBase):
bwarn = True
warn_text = "Intruder detected!"
return detections,bwarn,warn_text
def testRun(self):
print("1111")

80
model/plugins/RYRQ_ACL/coco_names.txt

@ -0,0 +1,80 @@
person
bicycle
car
motorcycle
airplane
bus
train
truck
boat
traffic_light
fire_hydrant
stop_sign
parking_meter
bench
bird
cat
dog
horse
sheep
cow
elephant
bear
zebra
giraffe
backpack
umbrella
handbag
tie
suitcase
frisbee
skis
snowboard
sports_ball
kite
baseball_bat
baseball_glove
skateboard
surfboard
tennis_racket
bottle
wine_glass
cup
fork
knife
spoon
bowl
banana
apple
sandwich
orange
broccoli
carrot
hot_dog
pizza
donut
cake
chair
couch
potted_plant
bed
dining_table
toilet
tv
laptop
mouse
remote
keyboard
cell_phone
microwave
oven
toaster
sink
refrigerator
book
clock
vase
scissors
teddy_bear
hair_drier
toothbrush

BIN
model/plugins/RYRQ_ACL/world_cup.jpg

Binary file not shown.

After

Width:  |  Height:  |  Size: 64 KiB

BIN
model/plugins/RYRQ_ACL/yolov5s.onnx

Binary file not shown.

BIN
model/plugins/RYRQ_ACL/yolov5s_bs1.om

Binary file not shown.

24
run.py

@ -1,17 +1,31 @@
import platform
from myutils.ConfigManager import myCongif
from myutils.MyLogger_logger import LogHandler
from core.ViewManager import mVManager
from web import create_app
from core.ModelManager import mMM
import os
import platform
import shutil
#print(f"Current working directory (run.py): {os.getcwd()}")
print(f"Current working directory (run.py): {os.getcwd()}")
web = create_app()
if __name__ == '__main__':
# logger = LogHandler().get_logger("main")
# logger.debug("debug")
# logger.info("info")
mVManager.start_check_rtsp()
system = platform.system()
if system == "Windows":
total, used, free = shutil.disk_usage("/")
elif system == "Linux":
"""获取Linux系统的剩余存储空间"""
statvfs = os.statvfs(os.getcwd())
free = statvfs.f_bavail * statvfs.f_frsize
else:
raise NotImplementedError(f"Unsupported operating system: {system}")
print(free/(1024*1024))
mMM.start_work() # 启动所有通道的处理
mVManager.start_check_rtsp() #线程更新视频在线情况
web.run(debug=True,port=5001)

2
web/API/channel.py

@ -200,7 +200,7 @@ async def channel_model_list(): #获取算法列表
@api.route('/channel/model/linkmodel',methods=['POST'])
@login_required
async def channel_model_linkmodel(): #获取算法列表 --关联算法时展示
async def channel_model_linkmodel(): #获取算法列表 --关联算法时展示 #?关联算法时需要初始化布防计划,同样删除的需要删除
channel_id = (await request.form)['channel_id']
model_list = json.loads((await request.form)['model_list'])
#? 需要对channel_id和model_list的是否在数据库要进行一个检查

338
web/API/viedo.py

@ -1,115 +1,134 @@
import cv2
import asyncio
import av
import torch
import json
import time
import os
import sys
import numpy as np
from . import api
from quart import jsonify, request,render_template, Response,websocket
from quart import jsonify, request
from aiortc import RTCPeerConnection, RTCSessionDescription, VideoStreamTrack
from aiortc.contrib.media import MediaBlackhole, MediaPlayer, MediaRecorder
from core.ModelManager import mMM
from core.DBManager import mDBM
from myutils.ConfigManager import myCongif
from fractions import Fraction
from shapely.geometry import Point, Polygon
import threading
import logging
# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
rtsp_url = 'rtsp://127.0.0.1/live1'
#-------------------基于WEBRTC实现拉流
pcs = set() #创建一个空的集合,去重复且无序
#pcs = set() #创建一个空的集合,去重复且无序
pcs = {}
'''
--------------基础信息--后续封装在函数里---------------
'''
#print(f"Current working directory (video.py): {os.getcwd()}")
# 自定义模型文件的路径
model_path = 'model/mode_test/yolov5s.pt' # 假设模型文件名为 yolov5s.pt 并与执行文件在同一个目录
# 本地YOLOv5仓库路径
repo_path = 'model/base_model/yolov5'
# 打开摄像头
# def get_camera():
# cap = cv2.VideoCapture(0)
# if not cap.isOpened():
# raise IOError("Cannot open webcam")
# return cap
#
# cap = get_camera()
# last_image = None
# read_lock = threading.Lock()
# def camera_thread():
# global cap, last_image
# while True:
# try:
# ret, frame = cap.read()
# if not ret:
# logger.warning("Camera disconnected. Reconnecting...")
# cap.release()
# cap = get_camera()
# continue
# frame = cv2.resize(frame, (640, 480)) # 降低视频分辨率
# with read_lock:
# last_image = frame
# time.sleep(1.0/20)
# except Exception as e:
# logger.error(f"Error in camera thread: {e}")
# continue
# 加载自定义模型
model = torch.hub.load(repo_path, 'custom', path=model_path, source='local')
# 定义监控区域(多边形顶点)
region_points = [(100, 100), (500, 100), (500, 400), (100, 400)]
polygon = Polygon(region_points)
#threading.Thread(target=camera_thread, daemon=True).start()
# 打开摄像头
def get_camera():
cap = cv2.VideoCapture(0)
if not cap.isOpened():
raise IOError("Cannot open webcam")
return cap
# cap = get_camera()
'''
---------------------传输--------------------------
'''
class VideoTransformTrack(VideoStreamTrack):
kind = "video"
def __init__(self,strUrl,itype=1): #0-usb,1-RTSP,2-海康SDK
def __init__(self,cid,itype=1): #0-usb,1-RTSP,2-海康SDK
super().__init__()
self.cap = cv2.VideoCapture(strUrl)
if not self.cap.isOpened():
raise Exception("Unable to open video source")
# Set desired resolution and frame rate
# self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280) # Set width to 1280
# self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720) # Set height to 720
# self.cap.set(cv2.CAP_PROP_FPS, 30) # Set frame rate to 30 FPS
self.frame_rate = int(self.cap.get(cv2.CAP_PROP_FPS)) or 15 # Default to 30 if unable to get FPS
self.channel_id = cid
self.frame_rate = myCongif.get_data("frame_rate")
self.time_base = Fraction(1, self.frame_rate)
self.frame_count = 0
self.start_time = time.time()
async def recv(self):
start_time = time.time() # 开始时间
ret, frame = self.cap.read()
if not ret:
raise Exception("Unable to read frame")
# # Convert frame to RGB
# frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
#
# # Convert to VideoFrame
# video_frame = av.VideoFrame.from_ndarray(frame_rgb, format="rgb24")
# # Set timestamp
# video_frame.pts = int(self.cap.get(cv2.CAP_PROP_POS_FRAMES))
# video_frame.time_base = self.time_base
new_frame = None
time.sleep(1.0 / self.frame_rate)
while True:
new_frame = mMM.verify_list[self.channel_id][4]
#new_frame = av.VideoFrame.from_ndarray(img, format="rgb24")
if new_frame is not None:
break
else:
time.sleep(1.0 / self.frame_rate)
# 设置时间戳和时间基数 -- 根据耗时实时计算帧率
# elapsed_time = time.time() - self.start_time
# self.frame_count += 1
# actual_frame_rate = self.frame_count / elapsed_time
# self.time_base = Fraction(1, int(actual_frame_rate))
#设置时间戳和帧率
self.frame_count += 1
if self.frame_count > myCongif.get_data("RESET_INTERVAL"):
self.frame_count = 0
new_frame.pts = self.frame_count
new_frame.time_base = self.time_base
#print(f"Frame pts: {new_frame.pts}, time_base: {new_frame.time_base}")
img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
#img = frame.to_ndarray(format="bgr24")
# 使用 Yolov5 进行目标检测
results = model(img)
detections = results.pandas().xyxy[0].to_dict(orient="records")
# 绘制检测结果
for det in detections:
if det['name'] == 'person': # 只检测人员
x1, y1, x2, y2 = int(det['xmin']), int(det['ymin']), int(det['xmax']), int(det['ymax'])
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.putText(img, f"{det['name']} {det['confidence']:.2f}", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9,
(0, 255, 0), 2)
# 将检测结果图像转换为帧
new_frame = av.VideoFrame.from_ndarray(img, format="rgb24")
new_frame.pts = int(self.cap.get(cv2.CAP_PROP_POS_FRAMES))
new_frame.time_base = self.time_base
end_time = time.time() # 结束时间
print(f"Processing time: {end_time - start_time} seconds")
# new_frame.pts = int(self.cap.get(cv2.CAP_PROP_POS_FRAMES))
# new_frame.time_base = self.time_base
# end_time = time.time() # 结束时间
# print(f"Processing time: {end_time - start_time} seconds")
return new_frame
@api.route('/offer', methods=['POST'])
async def offer():
#接收客户端的连接请求
params = await request.json
url_type = params["url_type"]
url = params["url"]
channel_id = params["cid"] #需要传参过来
itype = params["mytype"]
element_id = params["element_id"]
reStatus = 0
reMsg = ""
ret = False
if itype == 1:
#添加通道和页面控件ID的关系
strsql = f"select * from channel where element_id = {element_id}"
data = mDBM.do_select(strsql,1)
if data:
reStatus = 0
reMsg = '该控件已经关联某一视频通道,请确认数据是否正确!'
return jsonify({'status': reStatus, 'msg': reMsg})
strsql = f"update channel set element_id = {element_id} where ID = {channel_id};"
ret = mDBM.do_sql(strsql)
if ret == True:
reStatus = 1
reMsg = '播放画面成功,请稍等!'
else:
reStatus = 0
reMsg = '播放画面失败,请联系技术支持!'
#提取客户端发来的SDP,生成服务器端的SDP
offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"])
# # 配置 STUN 服务器
# ice_servers = [{"urls": ["stun:stun.voipbuster.com"]}]
# pc = RTCPeerConnection(configuration={"iceServers": ice_servers})
pc = RTCPeerConnection() #实例化一个rtcp对象
pcs.add(pc) #集合中添加一个对象,若已存在则不添加
pcs[channel_id] = pc #集合中添加一个对象,若已存在则不添加
@pc.on("datachannel")
def on_datachannel(channel):
@ -128,10 +147,10 @@ async def offer():
async def iconnectionstatechange():
if pc.iceConnectionState == "failed":
await pc.close()
pcs.discard(pc) #移除对象
pcs.pop(channel_id, None) #移除对象
# 添加视频轨道
video_track = VideoTransformTrack(url,url_type)
video_track = VideoTransformTrack(channel_id)
pc.addTrack(video_track)
# @pc.on('track') --- stream.getTracks().forEach(track => pc.addTrack(track, stream)); 猜测是这里触发的添加了摄像头通道
@ -150,163 +169,36 @@ async def offer():
print("返回sdp")
return jsonify({
"sdp": pc.localDescription.sdp,
"type": pc.localDescription.type
"type": pc.localDescription.type,
"status":reStatus, #itype =1 的时候才有意义
"msg":reMsg
})
@api.route('/shutdown', methods=['POST'])
async def shutdown():
async def shutdown():#这是全关
coros = [pc.close() for pc in pcs]
await asyncio.gather(*coros)
pcs.clear()
return 'Server shutting down...'
'''
----------------------实现方式二网页显示分析画面--基础版
'''
def is_point_in_polygon(point, polygon):
return polygon.contains(Point(point))
frame_buffer = []
def camera_thread():
global cap, frame_buffer
while True:
try:
ret, frame = cap.read()
if not ret:
logger.warning("Camera disconnected. Reconnecting...")
cap.release()
cap = get_camera()
continue
frame = cv2.resize(frame, (640, 480)) # 降低视频分辨率
frame_buffer.append(frame)
if len(frame_buffer) > 10: # 保持缓冲区大小不超过10帧
frame_buffer.pop(0)
logger.debug("drop one frame!")
except Exception as e:
logger.error(f"Error in camera thread: {e}")
continue
#threading.Thread(target=camera_thread, daemon=True).start()
async def generate_frames():
global frame_buffer
while True:
frame = None
buffer = None
try:
if frame_buffer:
frame = frame_buffer.pop(0)
# 进行推理
results = model(frame)
detections = results.pandas().xyxy[0]
# 绘制监控区域
cv2.polylines(frame, [np.array(region_points, np.int32)], isClosed=True, color=(0, 255, 0), thickness=2)
for _, row in detections.iterrows():
if row['name'] == 'person':
# 获取人员检测框的中心点
x_center = (row['xmin'] + row['xmax']) / 2
y_center = (row['ymin'] + row['ymax']) / 2
if is_point_in_polygon((x_center, y_center), polygon):
# 触发报警
cv2.putText(frame, 'Alert: Intrusion Detected', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1,
(0, 0, 255), 2)
#logger.info('Alert: Intrusion Detected')
# 绘制检测框
cv2.rectangle(frame, (int(row['xmin']), int(row['ymin'])), (int(row['xmax']), int(row['ymax'])),
(255, 0, 0), 2)
cv2.circle(frame, (int(x_center), int(y_center)), 5, (0, 0, 255), -1)
ret, buffer = cv2.imencode('.jpg', frame)
if not ret:
continue
frame_bytes = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame_bytes + b'\r\n')
await asyncio.sleep(0.2)
except Exception as e:
logger.error(f"Error in generate_frames: {e}")
finally:
if frame is not None:
del frame
if buffer is not None:
del buffer
@api.route('/video_feed')
async def video_feed():
#return jsonify(1)
return Response(generate_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
'''
----------------------实现方式三网页显示分析画面--WEBRTC
'''
class VideoTransformTrack_new(VideoStreamTrack):
def __init__(self, track):
super().__init__()
self.track = track
async def recv(self):
frame = await self.track.recv()
img = frame.to_ndarray(format="bgr24")
# 使用 Yolov5 进行目标检测
results = model(img)
detections = results.pandas().xyxy[0].to_dict(orient="records")
# 绘制检测结果
for det in detections:
if det['name'] == 'person': # 只检测人员
x1, y1, x2, y2 = int(det['xmin']), int(det['ymin']), int(det['xmax']), int(det['ymax'])
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.putText(img, f"{det['name']} {det['confidence']:.2f}", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9,
(0, 255, 0), 2)
# 将检测结果图像转换为帧
new_frame = frame.from_ndarray(img, format="bgr24")
return new_frame
pcs = set()
@api.websocket('/ws')
async def ws():
params = await websocket.receive_json()
offer = RTCSessionDescription(sdp=params['sdp'], type=params['type'])
pc = RTCPeerConnection()
pcs.add(pc)
@pc.on('iceconnectionstatechange')
async def on_iceconnectionstatechange():
if pc.iceConnectionState == 'failed':
await pc.close()
pcs.discard(pc)
@pc.on('track')
def on_track(track):
if track.kind == 'video':
local_video = VideoTransformTrack(track)
pc.addTrack(local_video)
await pc.setRemoteDescription(offer)
answer = await pc.createAnswer()
await pc.setLocalDescription(answer)
await websocket.send(json.dumps({
'sdp': pc.localDescription.sdp,
'type': pc.localDescription.type
}))
try:
while True:
await asyncio.sleep(1)
finally:
await pc.close()
pcs.discard(pc)
@api.route('/close_stream', methods=['POST'])
async def close_stream():
channel_id = (await request.form)['channel_id']
reStatus = 0
reMsg =""
if channel_id in pcs:
await pcs[channel_id].close()
pcs.pop(channel_id, None)
print(f'Stream {channel_id} closed.')
#数据库中该通道的关联关系要清除
strsql = f"update channel set element_id = NULL where ID={channel_id};"
ret = mDBM.do_sql(strsql)
if ret == True:
reStatus = 1
reMsg = '关闭画面成功!'
else:
reStatus = 0
reMsg = '删除通道与组件关联关系失败,请联系技术支持!'
else:
reMsg = "通道编号不在系统内,请检查!"
return jsonify({'status': reStatus, 'msg': reMsg})

74
web/main/static/resources/scripts/aiortc-client-new.js

@ -1,5 +1,34 @@
var pc = null;
function negotiate() {
var pc_list = {};
var channel_list = null;
document.addEventListener('DOMContentLoaded', async function() {
console.log('DOM fully loaded and parsed');
// 发送请求获取额外数据
try {
let response = await fetch('/api/channel/list');
if (!response.ok) {
throw new Error('Network response was not ok');
}
channel_list = await response.json();
// 遍历输出每个元素的信息
channel_list.forEach(channel => {
if(channel.element_id){ //""空为false,非空为true
// console.log(`Area Name: ${channel.area_name}`);
// console.log(`ID: ${channel.ID}`);
// console.log(`Channel Name: ${channel.channel_name}`);
// console.log(`URL: ${channel.url}`);
// console.log(`Type: ${channel.type}`);
// console.log(`Status: ${channel.status}`);
// console.log(`Element ID: ${channel.element_id}`);
start(channel.element_id,channel.ID,0)
}
});
} catch (error) {
console.error('Failed to fetch data:', error);
}
});
function negotiate(pc,channel_id,itype,element_id) {
pc.addTransceiver('video', { direction: 'recvonly' });
return pc.createOffer().then((offer) => {
return pc.setLocalDescription(offer);
@ -24,6 +53,9 @@ function negotiate() {
body: JSON.stringify({
sdp: offer.sdp,
type: offer.type,
cid: channel_id,
mytype: itype,
element_id:element_id
}),
headers: {
'Content-Type': 'application/json'
@ -39,31 +71,43 @@ function negotiate() {
});
}
function start() {
console.log('开始执行!');
var config = {
sdpSemantics: 'unified-plan'
};
//config.iceServers = [{ urls: ['stun:stun.voiparound.com'] }];
function start(element_id,channel_id,itype) {// iytpe =0,不用修改数据库,1,需要添加数据库记录
console.log(`Element ID: ${element_id}`);
console.log(`Channel ID: ${channel_id}`);
pc = new RTCPeerConnection();
pc_list[channel_id] = pc; //保留pc
pc.addEventListener('track', (evt) => {
if (evt.track.kind == 'video') {
//document.getElementById('video').srcObject = evt.streams[0];
document.getElementById('u33_video').srcObject = evt.streams[0];
document.getElementById(element_id).srcObject = evt.streams[0];
}
});
negotiate();
negotiate(pc,channel_id,itype,element_id);
}
function showView(){
console.log('status:',document.getElementById('u33_video').paused)
if (document.getElementById('u33_video').paused) {
document.getElementById('u33_video').play().catch(error => {
channel_list.forEach(channel => {
if(channel.element_id){ //""空为false,非空为true
console.log('status:',document.getElementById(channel.element_id).paused)
if (document.getElementById(channel.element_id).paused) {
document.getElementById(channel.element_id).play().catch(error => {
console.error('Error playing video:', error);
});
}
}
});
}
function getChannellist(){
async function closestream(channel_id){
let response = await fetch('/close_stream', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ channel_id: channel_id })
});
let data = await response.json();
console.log(data);
if(pc_list[channel_id]){
pc_list[channel_id].close();
delete pc_list[channel_id];
}
}

71
web/main/templates/实时预览.html

@ -171,55 +171,23 @@
<p></p>
</div>
</div>
<script>
document.addEventListener('DOMContentLoaded', async function() {
console.log('DOM fully loaded and parsed');
// 发送请求获取额外数据
try {
let response = await fetch('/api/channel/list');
if (!response.ok) {
throw new Error('Network response was not ok');
}
let channelList = await response.json();
// 遍历输出每个元素的信息
channelList.forEach(channel => {
if(channel.element_id){ //""空为false,非空为true
//拿分析画面显示 ---------? 没有配置算法就拉实时画面,---实时画面都没有则黑屏加提示
console.log(`Area Name: ${channel.area_name}`);
console.log(`ID: ${channel.ID}`);
console.log(`Channel Name: ${channel.channel_name}`);
console.log(`URL: ${channel.url}`);
console.log(`Type: ${channel.type}`);
console.log(`Status: ${channel.status}`);
console.log(`Element ID: ${channel.element_id}`);
}
});
} catch (error) {
console.error('Failed to fetch data:', error);
}
});
<!-- window.onload = function() {-->
<!-- // 页面加载完成后执行的代码-->
<!-- start();-->
<!-- var intervalId = setInterval(showView, 1000);-->
<!-- setTimeout(function() {-->
<!-- clearInterval(intervalId); // 停止定时循环-->
<!-- console.log("Interval stopped after 10 seconds");-->
<!-- }, 10000);-->
<!-- }-->
</script>
<script src="/resources/scripts/aiortc-client-new.js">
console.log('当指定了src属性时,浏览器会加载并执行该文件中的脚本,而忽略标签内部的任何内容,所以不会执行');
</script>
<script>
window.onload = function() {
// 页面加载完成后执行的代码
var intervalId = setInterval(showView, 1000);
setTimeout(function() {
clearInterval(intervalId); // 停止定时循环
console.log("Interval stopped after 10 seconds");
}, 10000);
}
</script>
<!-- Unnamed (Image) -->
<div id="u32" class="ax_default image">
<img id="u32_img" alt="Video Stream" src=""/>
<div id="u32_text" class="text " style="display:none; visibility: hidden">
<p></p>
</div>
<video id="u32_video" autoplay="true" playsinline="true"></video>
</div>
<!-- Unnamed (Image) -->
@ -229,26 +197,17 @@
<!-- Unnamed (Image) -->
<div id="u34" class="ax_default image">
<img id="u34_img" class="img " src="images/实时预览/u32.png"/>
<div id="u34_text" class="text " style="display:none; visibility: hidden">
<p></p>
</div>
<video id="u34_video" autoplay="true" playsinline="true"></video>
</div>
<!-- Unnamed (Image) -->
<div id="u35" class="ax_default image">
<img id="u35_img" class="img " src="images/实时预览/u32.png"/>
<div id="u35_text" class="text " style="display:none; visibility: hidden">
<p></p>
</div>
<video id="u35_video" autoplay="true" playsinline="true"></video>
</div>
<!-- Unnamed (Image) -->
<div id="u36" class="ax_default image">
<img id="u36_img" class="img " src="images/实时预览/u32.png"/>
<div id="u36_text" class="text " style="display:none; visibility: hidden">
<p></p>
</div>
<video id="u36_video" autoplay="true" playsinline="true"></video>
</div>
<!-- Unnamed (Image) -->

BIN
zfbox.db

Binary file not shown.
Loading…
Cancel
Save