You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
173 lines
7.2 KiB
173 lines
7.2 KiB
'''
|
|
实现对大模型调用的封装,隔离具体使用的LLM
|
|
pip install openai
|
|
'''
|
|
import openai
|
|
import json
|
|
import threading
|
|
import re
|
|
from openai import OpenAI
|
|
from myutils.MyTime import get_local_timestr
|
|
|
|
class LLMManager:
|
|
def __init__(self,illm_type=0):
|
|
self.api_key = None
|
|
self.api_url = None
|
|
self.task_id =0 #一个任务一个id
|
|
self.llm_sn = 0 # llm执行序列号,--一任务一序列
|
|
self.llm_sn_lock = threading.Lock() #
|
|
#temperature设置
|
|
#DS------代码生成/数学解题:0.0 -- 数据抽取/分析:1.0 -- 通用对话:1.3 -- 翻译:1.3 -- 创意类写作:1.5
|
|
#腾讯云---
|
|
self.temperature = 1.0
|
|
if illm_type == 0: #腾讯云
|
|
self.api_key = "fGBYaQLHykBOQsFwVrQdIFTsYr8YDtDVDQWFU41mFsmvfNPc"
|
|
self.api_url = ""
|
|
elif illm_type == 1: #DS
|
|
self.api_key ="sk-10360148b465424288218f02c87b0e1b"
|
|
self.api_url ="https://api.deepseek.com/v1"
|
|
self.model = "deepseek-reasoner" #model=deepseek-reasoner -- R1 model=deepseek-chat --V3
|
|
#创建会话对象 -- 一个任务的LLM必须唯一
|
|
self.client = OpenAI(api_key=self.api_key,base_url=self.api_url)
|
|
self.messages = []
|
|
# 初始化阶段和已知信息
|
|
current_stage = "信息收集"
|
|
known_info = {"url": "www.test.com"}
|
|
results = [] # 存储所有任务结果,用于生成报告
|
|
|
|
#******测试使用,设置slef.message的值
|
|
def test_old_message(self,strMessage):
|
|
try:
|
|
self.messages = json.loads(strMessage)
|
|
except json.JSONDecodeError as e:
|
|
print(f"JSON解析错误: {str(e)}")
|
|
except Exception as e:
|
|
print(f"错误: {str(e)}")
|
|
|
|
# 构建初始提示 初始化messages
|
|
def build_initial_prompt(self,target,know_info=""):
|
|
self.messages = [{"role": "system",
|
|
"content": "你是一位资深的渗透测试专家,需要动态控制整个渗透测试过程,包括信息收集、漏洞扫描、漏洞利用等阶段,最终生成渗透测试报告。由你规划执行的指令,我会根据你的指令执行并提交结果,你再对结果进行分析,规划并生成下一步指令,直到完成渗透测试,生成测试报告。"
|
|
"生成的指令需满足如下约束:"
|
|
"1.只返回具体的shell指令或Python代码,不要包含注释和说明;"
|
|
"2.shell指令以```bash(.*?)```包裹,python代码以```python(.*?)```包裹;"
|
|
"3.针对提供的Python代码,需要包含错误处理,并将执行结果保存到文件中(为每个任务指定唯一文件名),执行结束返回success或failure和该文件名;"
|
|
"4.如果认为渗透测试已完成,请生成生成报告的python代码,并返回success和complete"}] # 一个messages
|
|
return f"现在开始对目标{target}进行渗透测试,已知信息{know_info},请提供下一步执行的指令。"
|
|
|
|
# 构建反馈提示
|
|
def build_feedback_prompt(self,bres,instruction, result):
|
|
if bres:
|
|
return f"执行指令“{instruction}”的结果是“{result}”。请根据这些结果生成下一步具体的指令。"
|
|
else:
|
|
return ""
|
|
|
|
def init_data(self,task_id=0):
|
|
#初始化LLM数据
|
|
self.llm_sn = 0
|
|
self.task_id = task_id
|
|
self.messages = []
|
|
|
|
# 调用LLM生成指令
|
|
def get_llm_instruction(self,prompt,th_DBM):
|
|
'''
|
|
1.由于大模型API不记录用户请求的上下文,一个任务的LLM不能并发!
|
|
:param prompt:用户本次输入的内容
|
|
:return: instr_list
|
|
'''
|
|
#添加本次输入入队列
|
|
message = {"role":"user","content":prompt}
|
|
self.messages.append(message)
|
|
|
|
#提交LLM
|
|
post_time = get_local_timestr()
|
|
response = self.client.chat.completions.create(
|
|
model=self.model,
|
|
messages = self.messages
|
|
)
|
|
|
|
reasoning_content = ""
|
|
content = ""
|
|
#LLM返回处理
|
|
if self.model == "deepseek-reasoner":
|
|
#返回错误码:DS-https://api-docs.deepseek.com/zh-cn/quick_start/error_codes
|
|
reasoning_content = response.choices[0].message.reasoning_content #推理过程
|
|
print(reasoning_content)
|
|
content = response.choices[0].message.content #推理内容
|
|
print(content)
|
|
#记录历史信息
|
|
self.messages.append({'role': 'assistant', 'content': content})
|
|
elif self.model == "deepseek-chat":
|
|
content = response.choices[0].message
|
|
#记录历史信息
|
|
self.messages.append(content)
|
|
#LLM记录存数据库
|
|
with self.llm_sn_lock:
|
|
self.llm_sn += 1
|
|
#llm查询记录入库
|
|
bres = th_DBM.insert_llm(self.task_id,prompt,reasoning_content,content,post_time,self.llm_sn)
|
|
if not bres:
|
|
print("llm入库失败!")
|
|
|
|
#********测试时使用---输出和记录LLM返回指令的message
|
|
print(f"Messages:{self.messages}")
|
|
with open("test","w",encoding="utf-8") as f: #输出到文件
|
|
json.dump(self.messages,f,ensure_ascii=False)
|
|
|
|
#需要对指令进行提取
|
|
instr_list = self.fetch_instruction(content)
|
|
return instr_list
|
|
|
|
def fetch_instruction(self,response_text):
|
|
'''
|
|
提取命令列表,包括:
|
|
1. Python 代码块(仅保留有效 Python 代码)
|
|
2. Shell 命令(分割空行,每个块视为一条指令)
|
|
|
|
:param text: 输入文本
|
|
:return: 解析后的命令列表
|
|
'''
|
|
#针对llm的回复,提取执行的指令
|
|
# 正则匹配 Python 代码块
|
|
python_blocks = re.findall(r"```python(.*?)```", response_text, flags=re.DOTALL)
|
|
# 处理 Python 代码块,去除空行并格式化
|
|
python_blocks = [block.strip() for block in python_blocks]
|
|
|
|
# 按连续的空行拆分
|
|
# 移除 Python 代码块,但保留内容用于返回
|
|
text_no_python = re.sub(r"```python.*?```", "PYTHON_BLOCK", response_text, flags=re.DOTALL)
|
|
# 这里用 \n\s*\n 匹配一个或多个空白行
|
|
parts = re.split(r'\n\s*\n', text_no_python)
|
|
|
|
commands = []
|
|
python_index = 0 # 记录 Python 代码块插入位置
|
|
|
|
for part in parts:
|
|
part = part.strip()
|
|
if not part:
|
|
continue
|
|
if "PYTHON_BLOCK" in part:
|
|
# 还原 Python 代码块
|
|
commands.append(f"python {python_blocks[python_index]}")
|
|
python_index += 1
|
|
else:
|
|
# 添加普通 Shell 命令
|
|
commands.append(part)
|
|
|
|
return commands
|
|
|
|
def test_llm(self):
|
|
with open("test", "r", encoding="utf-8") as f:
|
|
messages = json.load(f)
|
|
text = messages[-1]["content"]
|
|
list = self.fetch_instruction(text)
|
|
for itme in list:
|
|
print("***********")
|
|
print(itme)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
LM = LLMManager(1)
|
|
LM.test_llm()
|
|
|
|
|
|
|