You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 

682 lines
30 KiB

import time
import copy
import importlib.util
import datetime
import time
import threading
import cv2
import ffmpeg
import subprocess
import select
import multiprocessing
from collections import deque
from myutils.MyLogger_logger import LogHandler
from core.CapManager import mCap
from core.ACLModelManager import ACLModeManger
from model.plugins.ModelBase import ModelBase
from core.WarnManager import WarnData
from core.DataStruct import ModelinData,ModeloutData
from myutils.MyDeque import MyDeque
from myutils.ConfigManager import myCongif
from myutils.mydvpp import bgr_to_yuv420
class ChannelData:
def __init__(self,channel_id,deque_length,icount_max,warnM):
self.logger = LogHandler().get_logger("ChannelDat")
self.model_platform = myCongif.get_data("model_platform")
self.channel_id = channel_id #该通道的通道ID
self.warnM = warnM #报警线程管理对象--MQ
self.ffprocess = self.start_h264_encoder(myCongif.get_data("mywidth"),myCongif.get_data("myheight"))#基于frame进行编码
#视频采集相关
self.cap = None #该通道视频采集对象 还是vcf对象
self.source = None #rtsp 路径
self.frame_rate = myCongif.get_data("frame_rate")
self.frame_interval = 1.0 / int(myCongif.get_data("verify_rate"))
#模型采集相关
self.model = None #模型对象 -- 一通道只关联一个模型
self.work_th = None #该通道工作线程句柄
self.b_model = False #是否有运行模型线程
self.bool_run = True # 线程运行标识
self.lock = threading.RLock() # 用于保证线程安全
self.icount_max = icount_max # 帧序列号上限
self.max_len = myCongif.get_data("buffer_len")
self.deque_frame = deque(maxlen=self.max_len) #视频缓冲区用于保存录像
self.last_frame = None # 保存图片数据
#self.frame_queue = queue.Queue(maxsize=1)
self.frame_queue = MyDeque(10) #分析画面MQ
self.counter = 0 #帧序列号--保存报警录像使用
#model独立线程相关
self.per_th = None #预处理线程句柄
self.per_status= False #预处理线程状态
self.post_th = None #后处理线程句柄
self.post_status = False #后处理线程状态
self.model_node= None #模型对象 -- inmq,outmq
self.out_mq = MyDeque(30) #分析结果存放MQ
#设置JPEG压缩基本
self.encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), myCongif.get_data("encode_param")] # 50 是压缩质量(0到100)
#添加一帧图片
def add_deque(self, value):
if len(self.deque_frame) == self.max_len:
removed_frame = self.deque_frame.popleft()
del removed_frame
removed_frame = None
self.deque_frame.append(value) #deque 满了以后会把前面的数据移除
#拷贝一份数据
def copy_deque(self):
return copy.deepcopy(self.deque_frame)
#获取最后一帧图片
def get_last_frame(self):
if self.b_model:
# with self.lock:
# frame = self.last_frame
# return frame
# try:
# frame = self.frame_queue.get(timeout=0.3) #web传输没有做帧率控制了,可以超时时间长一点
# except queue.Empty:
# self.logger.debug(f"{self.channel_id}--web--获取分析画面失败,队列空")
# return None
frame = self.frame_queue.mypopleft()
return frame
else: #如果没有运行,直接从cap获取画面
if self.cap:
ret, frame = self.cap.read() # 除了第一帧,其它应该都是有画面的
if not ret:
self.logger.debug(f"{self.channel_id}--web--获取原画失败,队列空")
return None
ret,buffer_bgr_webp = self._encode_frame(frame)
return buffer_bgr_webp
return None
def update_last_frame(self,buffer):
if buffer:
self.frame_queue.myappend(buffer)
# with self.lock:
# self.last_frame = None
# self.last_frame = buffer
# if self.frame_queue.full():
# try:
# print("channel--丢帧")
# self.frame_queue.get(timeout=0.01)
# except queue.Empty: #为空不处理
# pass
# self.frame_queue.put(buffer)
# try:
# self.frame_queue.put(buffer,timeout=0.05)
# except queue.Full:
# self.logger.debug(f"{self.channel_id}分析画面队列满,插入失败")
# pass
#------------h264编码相关---------------
def start_h264_encoder(self,width, height): #宽高一样,初步定全进程一个 libx264 h264_ascend
process = subprocess.Popen(
['ffmpeg',
'-f', 'rawvideo',
'-pix_fmt', 'yuv420p',
'-s', f'{width}x{height}',
'-i', '-', # Take input from stdin
'-an', # No audio
'-vcodec', 'libx264',
'-f', 'flv',
'-'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
return process
def close_h264_encoder(self):
self.ffprocess.stdin.close()
self.ffprocess.wait()
def encode_frame_h264_bak(self,frame):
try:
process = (
ffmpeg
.input('pipe:', format='rawvideo', pix_fmt='bgr24', s=f'{frame.shape[1]}x{frame.shape[0]}')
.output('pipe:', format='flv',vcodec='h264_ascend')
.run_async(pipe_stdin=True, pipe_stdout=True, pipe_stderr=True)
)
out, err = process.communicate(input=frame.tobytes())
if process.returncode != 0:
raise RuntimeError(f"FFmpeg encoding failed: {err.decode('utf-8')}")
return out
except Exception as e:
print(f"Error during frame encoding: {e}")
return None
def encode_frame_h264(self, frame,timeout=1):
yuv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2YUV_I420)
if self.ffprocess.poll() is not None:
raise RuntimeError("FFmpeg process has exited unexpectedly.")
# Write frame to stdin of the FFmpeg process
try:
self.ffprocess.stdin.write(yuv_frame.tobytes())
self.ffprocess.stdin.flush()
# Capture the encoded output
buffer_size = 1024 * 1 # Adjust this based on the size of the encoded frame
encoded_frame = bytearray()
start_time = time.time()
while True:
# Use select to handle non-blocking reads from both stdout and stderr
#ready_to_read, _, _ = select.select([self.ffprocess.stdout, self.ffprocess.stderr], [], [], timeout)
ready_to_read, _, _ = select.select([self.ffprocess.stdout.fileno(), self.ffprocess.stderr.fileno()],
[], [], timeout)
# Check if there's data in stdout (encoded frame)
if self.ffprocess.stdout.fileno() in ready_to_read:
chunk = self.ffprocess.stdout.read(buffer_size)
if chunk:
encoded_frame.extend(chunk)
else:
break # No more data to read from stdout
# Check if there's an error in stderr
if self.ffprocess.stderr.fileno() in ready_to_read:
error = self.ffprocess.stderr.read(buffer_size).decode('utf-8')
raise RuntimeError(f"FFmpeg error: {error}")
# Timeout handling to avoid infinite blocking
if time.time() - start_time > timeout:
raise RuntimeError("FFmpeg encoding timed out.")
if not encoded_frame:
raise RuntimeError("No encoded data received from FFmpeg.")
# Optional: Check for errors in stderr
# stderr_output = self.process.stderr.read()
# if "error" in stderr_output.lower():
# raise RuntimeError(f"FFmpeg error: {stderr_output}")
return bytes(encoded_frame)
except Exception as e:
print(f"Error during frame encoding: {e}")
return None
def _frame_pre_work(self, frame):
'''
对采集到的图片数据进行预处理,需要确保是在原图上进行的修改
:param frame:
:return:
'''
# ----------添加时间戳-------------
# 获取当前时间
current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# 设置字体和位置
font = cv2.FONT_HERSHEY_SIMPLEX
position = (10, 30) # 时间戳在左上角
font_scale = 1
color = (255, 255, 255) # 白色
thickness = 2
# 在帧上绘制时间戳
cv2.putText(frame, current_time, position, font, font_scale, color, thickness, cv2.LINE_AA)
return frame
def _encode_frame(self,frame,itype=0):
ret = False
buffer_bgr_webp = None
if frame is not None:
if itype == 0: #jpg
self._frame_pre_work(frame) #对图片添加一些信息,目前是添加时间戳(model是入队列的时间,cap是发送前取帧的时间)
ret, frame_bgr_webp = cv2.imencode('.jpg', frame, self.encode_param)
if ret:
buffer_bgr_webp = frame_bgr_webp.tobytes()
elif itype == 1: #H264
try:
buffer_bgr_webp = self.encode_frame_h264(frame)
ret = True
except Exception as e:
print(e)
else:
print("错误的参数!!")
return ret,buffer_bgr_webp
#帧序列号自增 一个线程中处理,不用加锁
def increment_counter(self):
self.counter += 1
if self.counter > self.icount_max:
self.counter = 0
def get_counter(self):
return self.counter
def _start_cap_th(self,source,type=1):
'''开始cap采集线程
type = 打开摄像头 0--USB摄像头,1-RTSP,2-海康SDK
'''
ret = False
if self.cap:
mCap.stop_get_video(self.source)
self.cap = None
self.cap = mCap.start_get_video(source,type)
self.source = source
if self.cap:
ret = True
return ret
def _stop_cap_th(self):
'''停止cap采集线程
重要约束:停止cap线程前,必须先停止model线程
'''
if self.b_model:
self.logger.error("停止采集线程前,请先停止model线程")
return False
else:
if self.cap:
mCap.stop_get_video(self.source)
self.cap = None
return True #一般不会没有cap
def _pre_work_th(self,schedule):
'''一个通道一个线程,关联的模型在一个线程检测,局部变量都是一个通道独有'''
if not self.cap:
self.logger.error("采集线程未正常启动,不进行工作")
return
while self.model_node.m_p_status.value == 0: #避免模型没启动成功,模型线程在运行
time.sleep(1)
if self.model_node.m_p_status.value == 1:
# 开始循环处理业务
last_frame_time = time.time() # 初始化个读帧时间
self.per_status = True
self.b_model = True
while self.bool_run: # 基于tag 作为运行标识。 线程里只是读,住线程更新,最多晚一轮,应该不用线程锁。需验证
# 帧率控制帧率
current_time = time.time()
elapsed_time = current_time - last_frame_time
if elapsed_time < self.frame_interval:
time.sleep(self.frame_interval - elapsed_time) # 若小于间隔时间则休眠
last_frame_time = time.time()
# *********取画面*************
ret, frame = self.cap.read() # 除了第一帧,其它应该都是有画面的
if not ret:
# self.logger.debug(f"{self.channel_id}--model--获取cap画面失败,队列空")
continue # 没读到画面继续
# 验证检测计划,是否在布防时间内
now = datetime.datetime.now() # 获取当前日期和时间
weekday = now.weekday() # 获取星期几,星期一是0,星期天是6
hour = now.hour
if schedule[weekday][hour] == 1:
#图片预处理
img,scale_ratio, pad_size = self.model_node.model.prework(frame)
indata = ModelinData(self.channel_id,img,frame,scale_ratio, pad_size)
self.model_node.pro_add_data(indata) #数据入队列
else:# 不在计划则不进行验证,直接返回图片 --存在问题是:result 漏数据
ret, frame_bgr_webp = cv2.imencode('.jpg', frame,self.encode_param)
if not ret:
buffer_bgr_webp = None
else:
buffer_bgr_webp = frame_bgr_webp.tobytes()
self.update_last_frame(buffer_bgr_webp)
else:
self.logger.error("模型线程为启动成功,不进行工作")
return
self.b_model = False
self.per_status = False
def _post_work_th(self,duration_time,proportion,verify_rate,warn_interval,model_name,check_area,polygon,conf_threshold,iou_thres):
# 初始化业务数据
result = [0 for _ in range(duration_time * verify_rate)] # 初始化时间*验证帧率数量的结果list
warn_last_time = time.time()
while self.bool_run:
out_data = self.out_mq.mypopleft() #(image,scale_ratio, pad_size,outputs):
if not out_data:
time.sleep(0.1)
continue
#开始后处理
bwarn, warn_text = self.model_node.model.postwork(out_data.image,out_data.outputs,out_data.scale_ratio,out_data.pad_size,
check_area,polygon,conf_threshold,iou_thres)
# 对识别结果要部要进行处理
if bwarn:
# 绘制报警文本
cv2.putText(out_data.image, warn_text, (50, 50),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
result.append(1) # 要验证数组修改,是地址修改吗?
else: # 没有产生报警也需要记录,统一计算占比
result.append(0)
#分析画面保存
# ret, frame_bgr_webp = cv2.imencode('.jpg', out_data.image,self.encode_param)
# buffer_bgr_webp = None
# if ret:
# buffer_bgr_webp = frame_bgr_webp.tobytes()
ret, buffer_bgr_webp = self._encode_frame(out_data.image)
# 分析图片放入缓冲区内存中
self.add_deque(out_data.image) # 缓冲区大小由maxlen控制 超上限后,删除最前的数据
# 分析画面一直更新最新帧,提供网页端显示
self.update_last_frame(buffer_bgr_webp)
if bwarn:
# 验证result -是否触发报警要求 --遍历每个模型执行的result
count_one = float(sum(result)) # 1,0 把1累加的和就是1的数量
ratio_of_ones = count_one / len(result)
# self.logger.debug(result)
if ratio_of_ones >= proportion: # 触发报警
# 基于时间间隔判断
current_time = time.time()
elapsed_time = current_time - warn_last_time
if elapsed_time < warn_interval:
continue
warn_last_time = current_time
# 处理报警
warn_data = WarnData()
warn_data.model_name = model_name
warn_data.warn_text = warn_text
warn_data.img_buffer = self.copy_deque() # 深度复制缓冲区
warn_data.width = self.cap.width
warn_data.height = self.cap.height
warn_data.channel_id = self.channel_id
self.warnM.add_warn_data(warn_data)
# 结果记录要清空
for i in range(len(result)):
result[i] = 0
def _verify(self,frame,model,model_data,schedule,result,isdraw=1):
'''验证执行主函数,实现遍历通道关联的模型,调用对应模型执行验证'''
#img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
img = frame
#验证检测计划,是否在布防时间内
now = datetime.datetime.now() # 获取当前日期和时间
weekday = now.weekday() # 获取星期几,星期一是0,星期天是6
hour = now.hour
result.pop(0) # 保障结果数组定长 --先把最早的结果推出数组
warntext = ""
if model and schedule[weekday][hour] == 1: #不在计划则不进行验证,直接返回图片
# 调用模型,进行检测,model是动态加载的,具体的判断标准由模型内执行 ---- *********
# bwarn = False
# warntext = ""
# time.sleep(2)
bwarn, warntext = model.verify(img, model_data,isdraw) #****************重要
# 对识别结果要部要进行处理
if bwarn:
# 绘制报警文本
cv2.putText(img, warntext, (50, 50),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
result.append(1) #要验证数组修改,是地址修改吗?
else: #没有产生报警也需要记录,统一计算占比
warntext = ""
result.append(0)
else:
result.append(0)
# 将检测结果图像转换为帧--暂时用不到AVFrame--2024-7-5
# new_frame_rgb_avframe = av.VideoFrame.from_ndarray(img, format="rgb24") # AVFrame
# new_frame_rgb_avframe.pts = None # 添加此行确保有pts属性
# if isinstance(img, np.ndarray): -- 留个纪念
#处理完的图片后返回-bgr模式
#img_bgr_ndarray = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
ret,buffer_bgr_webp = self._encode_frame(img)
#buffer_bgr_webp = self.encode_frame_to_flv(img)
return buffer_bgr_webp,img,warntext
def _dowork_thread(self,channel_id,model_data,schedule,verify_rate,warn_interval):
'''一个通道一个线程,关联的模型在一个线程检测,局部变量都是一个通道独有'''
if not self.cap:
self.logger.error("采集线程未正常启动,退出model线程")
return
#加载自定义模型文件
model = self._import_model(str(channel_id), model_data[5], model_data[8], model_data[9]) # 动态加载模型处理文件py
if not model:
self.logger.error("自定义模型文件加载失败,退出model线程")
return
#初始化模型运行资源
context = None
device_id = myCongif.get_data("device_id")
if self.model_platform == "acl": # ACL线程中初始化内容
context = ACLModeManger.th_inti_acl(device_id) #创建context
#初始化模型资源 -- 加载模型文件
ret = model.init_acl_resource() #加载和初始化离线模型文件--om文件
if not ret:
print("初始化模型资源出错,退出线程!")
return
#初始化业务数据
result = [0 for _ in range(model_data[3] * verify_rate)] # 初始化时间*验证帧率数量的结果list
proportion = model_data[4] # 判断是否报警的占比
warn_last_time = time.time()
#warn_save_count = 0 #保存录像的最新帧初始化为0
#开始循环处理业务
last_frame_time = time.time() #初始化个读帧时间
self.b_model = True
while self.bool_run: #基于tag 作为运行标识。 线程里只是读,住线程更新,最多晚一轮,应该不用线程锁。需验证
# 帧率控制帧率
current_time = time.time()
elapsed_time = current_time - last_frame_time
if elapsed_time < self.frame_interval:
time.sleep(self.frame_interval - elapsed_time) #若小于间隔时间则休眠
last_frame_time = time.time()
#*********取画面*************
ret,frame = self.cap.read() #除了第一帧,其它应该都是有画面的
if not ret:
#self.logger.debug(f"{self.channel_id}--model--获取cap画面失败,队列空")
continue #没读到画面继续
#执行图片推理
buffer_bgr_webp,img_bgr_ndarray,warn_text = self._verify(frame,model,model_data,schedule,result)
#分析图片放入内存中
self.add_deque(img_bgr_ndarray) # 缓冲区大小由maxlen控制 超上限后,删除最前的数据
# 分析画面一直更新最新帧,提供网页端显示
self.update_last_frame(buffer_bgr_webp)
if warn_text:
#验证result -是否触发报警要求 --遍历每个模型执行的result
count_one = float(sum(result)) #1,0 把1累加的和就是1的数量
ratio_of_ones = count_one / len(result)
#self.logger.debug(result)
if ratio_of_ones >= proportion: #触发报警
# 基于时间间隔判断
current_time = time.time()
elapsed_time = current_time - warn_last_time
if elapsed_time < warn_interval:
continue
warn_last_time = current_time
# 处理报警
warn_data = WarnData()
warn_data.model_name = model_data[7]
warn_data.warn_text = warn_text
warn_data.img_buffer = self.copy_deque() # 深度复制缓冲区
warn_data.width = self.cap.width
warn_data.height = self.cap.height
warn_data.channel_id = channel_id
self.warnM.add_warn_data(warn_data)
#结果记录要清空
for i in range(len(result)):
result[i] = 0
# end_time = time.time() # 结束时间
# print(f"Processing time: {end_time - start_time} seconds")
# 本地显示---测试使用
# if channel_id == 2:
# cv2.imshow(str(channel_id), img)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
#结束线程
print("开始结束工作线程")
self.b_model = False
#反初始化
if self.model_platform == "acl":
try:
model.release() #释放模型资源资源
# 删除模型对象
del model
#释放context
if context: # ACL线程中反初始化内容 -- 若线程异常退出,这些资源就不能正常释放了
# 再释放context
ACLModeManger.th_del_acl(context)
except Exception as e:
print(e)
#cv2.destroyAllWindows()
print("线程结束!!!!")
#2024-9-9 新增兼容独立model线程 根据self.model_node判断,None:1通道1线程,not None:独立线程
#2024-10-14 model调整为独立子进程执行
def _start_model_th(self,model_data,schedule,type=1):
verify_rate = myCongif.get_data("verify_rate")
warn_interval = myCongif.get_data("warn_interval")
self.bool_run = True
if self.model_node:#要起个预处理线程,和一个后处理线程
#启动后处理线程
self.post_th = threading.Thread(target=self._post_work_th,
args=(model_data[3],model_data[4],verify_rate,warn_interval,model_data[7],
model_data[1],model_data[2],model_data[8],model_data[9]))
self.post_th.start()
# 启动模型线程,若线程已启动,则+1
self.model_node.start_model_th(self.channel_id,self.out_mq)
#启动预处理线程
self.per_th = threading.Thread(target=self._pre_work_th,args=(schedule,))
self.per_th.start()
else:
self.work_th = threading.Thread(target=self._dowork_thread,
args=(self.channel_id, model_data, schedule, verify_rate,
warn_interval)) # 一个视频通道一个线程
self.work_th.start()
def _stop_model_th(self):
if self.model_node: #独立线程,需要停止预处理线程,和后处理线程
self.bool_run = False
#停止预处理线程
if self.per_th:
self.per_th.join()
self.per_th = None
#停止model线程 -1
self.model_node.stop_model_th(self.channel_id)
#停止后处理线程
if self.post_th:
self.post_th.join()
self.post_th = None
#清空MQ
self.out_mq.myclear()#清空后处理mq中未处理的数据
else:
if self.work_th:
if self.b_model:
self.bool_run = False
self.work_th.join() #线程结束时,会把b_model置False
self.logger.debug(f"{self.channel_id}停止工作线程")
self.work_th = None
def _import_model(self,model_name,model_path,threshold,iou_thres):
'''
根据路径,动态导入模块
:param model_name: 模块名称 --用通道ID代替
:param model_path: 模块路径
:param threshold: 置信阈值
:param iou_thres: iou阈值
:return:
'''
try:
module_path = model_path.replace("/", ".").rsplit(".", 1)[0]
print(module_path)
# 动态导入模块
module = importlib.import_module(module_path)
# 从模块中获取指定的类
Model = getattr(module, "Model")
# 使用 Model 类
model_instance = Model(model_path,threshold,iou_thres)
return model_instance
except ModuleNotFoundError as e:
print(f"Module not found: {e}")
return None
except AttributeError as e:
print(f"Class not found in module: {e}")
return None
except Exception as e:
print(f"An unexpected error occurred: {e}")
return None
# if os.path.exists(model_path):
# module_spec = importlib.util.spec_from_file_location(model_name, model_path)
# if module_spec is None:
# self.logger.error(f"{model_path} 加载错误")
# return None
# module = importlib.util.module_from_spec(module_spec)
# start_time = time.time()
# print(f"通道{self.channel_id},开始exec_module自定义模型文件")
# module_spec.loader.exec_module(module)
# end_time = time.time()
# print(f"通道{self.channel_id},完成exec_module模型文件实例化耗时{end_time - start_time}")
#
# try:
# md = getattr(module, "Model")(model_path,threshold,iou_thres) #实例化类
# except Exception as e:
# self.logger.error(f"{model_path} 实例化错误,退出模型线程")
# return None
#
# if not isinstance(md, ModelBase):
# self.logger.error("{} not zf_model".format(md))
# return None
# else:
# self.logger.error("{}文件不存在".format(model_path))
# return None
# self.logger.debug(f"{model_path} 加载成功!!!!")
# return md
def start_work(self,cap_data,model_data,schedule,type,model_Node=None):
'''
开始工作线程,包括视频通道采集和模型处理
:param cap_data: [source,type]
:param model_data: 跟通道关联的模型数据
strsql = (
f"select t1.model_id,t1.check_area,t1.polygon ,t2.duration_time,t2.proportion,t2.model_path,t1.ID,"
f"t2.model_name,t1.conf_thres,t1.iou_thres "
f"from channel2model t1 left join model t2 on t1.model_id = t2.ID where t1.channel_id ={channel_id};")
:param schedule
:param type: 0-启动所有线程,1-只启动cap采集线程,2-只启动model模型处理线程
:return: True,False
'''
ret = False
self.model_node = model_Node
if type==0:
self._start_cap_th(cap_data[0],cap_data[1]) #先cap,再model
self._start_model_th(model_data,schedule)
ret = True
elif type == 1:
self._start_cap_th(cap_data[0],cap_data[1])
ret = True
elif type == 2:
self._start_model_th(model_data,schedule)
ret = True
else:
self.logger.error("暂时还不支持该类型工作!")
return ret
def stop_work(self,type=0):
'''
清空数据,停止工作线程(若有 ,并删除deque 和 last_frame)
:param type: 0-停止所有线程,1-只停止cap采集线程,2-只停止model模型处理线程
:return: True,False
'''
ret = False
if type == 0:
self._stop_model_th()
self._stop_cap_th()
ret = True
elif type == 1:
self._stop_cap_th()
ret = True
elif type == 2:
self.logger.debug("单独停止工作线程")
self._stop_model_th()
ret = True
else:
self.logger.error("暂时还不支持该类型工作!")
return ret