You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

312 lines
11 KiB

10 months ago
import cv2
import asyncio
import av
import torch
import json
import time
import os
import sys
import numpy as np
from . import api
from quart import jsonify, request,render_template, Response,websocket
from aiortc import RTCPeerConnection, RTCSessionDescription, VideoStreamTrack
from aiortc.contrib.media import MediaBlackhole, MediaPlayer, MediaRecorder
from fractions import Fraction
from shapely.geometry import Point, Polygon
import threading
import logging
# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
rtsp_url = 'rtsp://127.0.0.1/live1'
#-------------------基于WEBRTC实现拉流
pcs = set() #创建一个空的集合,去重复且无序
'''
--------------基础信息--后续封装在函数里---------------
'''
#print(f"Current working directory (video.py): {os.getcwd()}")
# 自定义模型文件的路径
model_path = 'model/mode_test/yolov5s.pt' # 假设模型文件名为 yolov5s.pt 并与执行文件在同一个目录
# 本地YOLOv5仓库路径
repo_path = 'model/base_model/yolov5'
# 加载自定义模型
model = torch.hub.load(repo_path, 'custom', path=model_path, source='local')
# 定义监控区域(多边形顶点)
region_points = [(100, 100), (500, 100), (500, 400), (100, 400)]
polygon = Polygon(region_points)
# 打开摄像头
def get_camera():
cap = cv2.VideoCapture(0)
if not cap.isOpened():
raise IOError("Cannot open webcam")
return cap
# cap = get_camera()
'''
---------------------传输--------------------------
'''
class VideoTransformTrack(VideoStreamTrack):
kind = "video"
def __init__(self,strUrl,itype=1): #0-usb,1-RTSP,2-海康SDK
super().__init__()
self.cap = cv2.VideoCapture(strUrl)
if not self.cap.isOpened():
raise Exception("Unable to open video source")
# Set desired resolution and frame rate
# self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280) # Set width to 1280
# self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720) # Set height to 720
# self.cap.set(cv2.CAP_PROP_FPS, 30) # Set frame rate to 30 FPS
self.frame_rate = int(self.cap.get(cv2.CAP_PROP_FPS)) or 15 # Default to 30 if unable to get FPS
self.time_base = Fraction(1, self.frame_rate)
async def recv(self):
start_time = time.time() # 开始时间
ret, frame = self.cap.read()
if not ret:
raise Exception("Unable to read frame")
# # Convert frame to RGB
# frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
#
# # Convert to VideoFrame
# video_frame = av.VideoFrame.from_ndarray(frame_rgb, format="rgb24")
# # Set timestamp
# video_frame.pts = int(self.cap.get(cv2.CAP_PROP_POS_FRAMES))
# video_frame.time_base = self.time_base
img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
#img = frame.to_ndarray(format="bgr24")
# 使用 Yolov5 进行目标检测
results = model(img)
detections = results.pandas().xyxy[0].to_dict(orient="records")
# 绘制检测结果
for det in detections:
if det['name'] == 'person': # 只检测人员
x1, y1, x2, y2 = int(det['xmin']), int(det['ymin']), int(det['xmax']), int(det['ymax'])
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.putText(img, f"{det['name']} {det['confidence']:.2f}", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9,
(0, 255, 0), 2)
# 将检测结果图像转换为帧
new_frame = av.VideoFrame.from_ndarray(img, format="rgb24")
new_frame.pts = int(self.cap.get(cv2.CAP_PROP_POS_FRAMES))
new_frame.time_base = self.time_base
end_time = time.time() # 结束时间
print(f"Processing time: {end_time - start_time} seconds")
return new_frame
@api.route('/offer', methods=['POST'])
async def offer():
#接收客户端的连接请求
params = await request.json
url_type = params["url_type"]
url = params["url"]
#提取客户端发来的SDP,生成服务器端的SDP
offer = RTCSessionDescription(sdp=params["sdp"], type=params["type"])
# # 配置 STUN 服务器
# ice_servers = [{"urls": ["stun:stun.voipbuster.com"]}]
# pc = RTCPeerConnection(configuration={"iceServers": ice_servers})
pc = RTCPeerConnection() #实例化一个rtcp对象
pcs.add(pc) #集合中添加一个对象,若已存在则不添加
@pc.on("datachannel")
def on_datachannel(channel):
@channel.on("message")
def on_message(message):
print(f'Message received: {message}')
# 处理接收到的数据
if message == 'pause':
# 执行暂停操作
pass
elif message == 'resume':
# 执行继续操作
pass
#监听RTC连接状态
@pc.on("iconnectionstatechange") #当ice连接状态发生变化时
async def iconnectionstatechange():
if pc.iceConnectionState == "failed":
await pc.close()
pcs.discard(pc) #移除对象
# 添加视频轨道
video_track = VideoTransformTrack(url,url_type)
pc.addTrack(video_track)
# @pc.on('track') --- stream.getTracks().forEach(track => pc.addTrack(track, stream)); 猜测是这里触发的添加了摄像头通道
# def on_track(track):
# if track.kind == 'video':
# local_video = VideoTransformTrack(track)
# pc.addTrack(local_video)
# 记录客户端 SDP
await pc.setRemoteDescription(offer)
# 生成本地 SDP
answer = await pc.createAnswer()
# 记录本地 SDP
await pc.setLocalDescription(answer)
print("返回sdp")
return jsonify({
"sdp": pc.localDescription.sdp,
"type": pc.localDescription.type
})
@api.route('/shutdown', methods=['POST'])
async def shutdown():
coros = [pc.close() for pc in pcs]
await asyncio.gather(*coros)
pcs.clear()
return 'Server shutting down...'
'''
----------------------实现方式二网页显示分析画面--基础版
'''
def is_point_in_polygon(point, polygon):
return polygon.contains(Point(point))
frame_buffer = []
def camera_thread():
global cap, frame_buffer
while True:
try:
ret, frame = cap.read()
if not ret:
logger.warning("Camera disconnected. Reconnecting...")
cap.release()
cap = get_camera()
continue
frame = cv2.resize(frame, (640, 480)) # 降低视频分辨率
frame_buffer.append(frame)
if len(frame_buffer) > 10: # 保持缓冲区大小不超过10帧
frame_buffer.pop(0)
logger.debug("drop one frame!")
except Exception as e:
logger.error(f"Error in camera thread: {e}")
continue
#threading.Thread(target=camera_thread, daemon=True).start()
async def generate_frames():
global frame_buffer
while True:
frame = None
buffer = None
try:
if frame_buffer:
frame = frame_buffer.pop(0)
# 进行推理
results = model(frame)
detections = results.pandas().xyxy[0]
# 绘制监控区域
cv2.polylines(frame, [np.array(region_points, np.int32)], isClosed=True, color=(0, 255, 0), thickness=2)
for _, row in detections.iterrows():
if row['name'] == 'person':
# 获取人员检测框的中心点
x_center = (row['xmin'] + row['xmax']) / 2
y_center = (row['ymin'] + row['ymax']) / 2
if is_point_in_polygon((x_center, y_center), polygon):
# 触发报警
cv2.putText(frame, 'Alert: Intrusion Detected', (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 1,
(0, 0, 255), 2)
#logger.info('Alert: Intrusion Detected')
# 绘制检测框
cv2.rectangle(frame, (int(row['xmin']), int(row['ymin'])), (int(row['xmax']), int(row['ymax'])),
(255, 0, 0), 2)
cv2.circle(frame, (int(x_center), int(y_center)), 5, (0, 0, 255), -1)
ret, buffer = cv2.imencode('.jpg', frame)
if not ret:
continue
frame_bytes = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame_bytes + b'\r\n')
await asyncio.sleep(0.2)
except Exception as e:
logger.error(f"Error in generate_frames: {e}")
finally:
if frame is not None:
del frame
if buffer is not None:
del buffer
@api.route('/video_feed')
async def video_feed():
#return jsonify(1)
return Response(generate_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
'''
----------------------实现方式三网页显示分析画面--WEBRTC
'''
class VideoTransformTrack_new(VideoStreamTrack):
def __init__(self, track):
super().__init__()
self.track = track
async def recv(self):
frame = await self.track.recv()
img = frame.to_ndarray(format="bgr24")
# 使用 Yolov5 进行目标检测
results = model(img)
detections = results.pandas().xyxy[0].to_dict(orient="records")
# 绘制检测结果
for det in detections:
if det['name'] == 'person': # 只检测人员
x1, y1, x2, y2 = int(det['xmin']), int(det['ymin']), int(det['xmax']), int(det['ymax'])
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.putText(img, f"{det['name']} {det['confidence']:.2f}", (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.9,
(0, 255, 0), 2)
# 将检测结果图像转换为帧
new_frame = frame.from_ndarray(img, format="bgr24")
return new_frame
pcs = set()
@api.websocket('/ws')
async def ws():
params = await websocket.receive_json()
offer = RTCSessionDescription(sdp=params['sdp'], type=params['type'])
pc = RTCPeerConnection()
pcs.add(pc)
@pc.on('iceconnectionstatechange')
async def on_iceconnectionstatechange():
if pc.iceConnectionState == 'failed':
await pc.close()
pcs.discard(pc)
@pc.on('track')
def on_track(track):
if track.kind == 'video':
local_video = VideoTransformTrack(track)
pc.addTrack(local_video)
await pc.setRemoteDescription(offer)
answer = await pc.createAnswer()
await pc.setLocalDescription(answer)
await websocket.send(json.dumps({
'sdp': pc.localDescription.sdp,
'type': pc.localDescription.type
}))
try:
while True:
await asyncio.sleep(1)
finally:
await pc.close()
pcs.discard(pc)