feat;联调会议预定助手2
This commit is contained in:
6
.env
6
.env
@@ -1,3 +1,3 @@
|
|||||||
MODEL_API_KEY=sk-KnfrPFFnNDOCFkPkWsvRE7uJGNR0QMDCZ1Ie83ARhtOKMMWa
|
MODEL_API_KEY=sk-9nng4v6qsdiw42r6d3re4nym15hiti29
|
||||||
MODEL_BASE_URL=https://www.chataiapi.com/v1
|
MODEL_BASE_URL=https://gateout.yced.com.cn/v1
|
||||||
MODEL_NAME=deepseek-r1
|
MODEL_NAME=deepseek
|
||||||
@@ -3,6 +3,7 @@ from datetime import datetime
|
|||||||
import requests, json
|
import requests, json
|
||||||
from .openai_client import call_openai_api
|
from .openai_client import call_openai_api
|
||||||
from decouple import config
|
from decouple import config
|
||||||
|
import threading, re
|
||||||
|
|
||||||
MODEL_NAME = config('MODEL_NAME', default="")
|
MODEL_NAME = config('MODEL_NAME', default="")
|
||||||
BASE_URL = config('MODEL_BASE_URL', default="")
|
BASE_URL = config('MODEL_BASE_URL', default="")
|
||||||
@@ -26,6 +27,11 @@ def book_room(data: dict) -> str:
|
|||||||
return resp.text
|
return resp.text
|
||||||
|
|
||||||
|
|
||||||
|
def check_and_process_think(content: str) -> str:
|
||||||
|
filtered_text = re.sub(r"<think\b[^>]*>.*?</think>", '', content, flags=re.DOTALL)
|
||||||
|
return filtered_text
|
||||||
|
|
||||||
|
|
||||||
def build_prompt():
|
def build_prompt():
|
||||||
"""构建增强提示词"""
|
"""构建增强提示词"""
|
||||||
# 获取可用会议室信息
|
# 获取可用会议室信息
|
||||||
@@ -49,10 +55,10 @@ def build_prompt():
|
|||||||
请按以下步骤处理:
|
请按以下步骤处理:
|
||||||
1. 解析用户预订需求(时间、人数、设备要求等)
|
1. 解析用户预订需求(时间、人数、设备要求等)
|
||||||
2. 根据可用会议室列表推荐合适选项,推荐选项时不需要提取用户预订信息,待用户确认是再提取。按照正常自然语言对话返回
|
2. 根据可用会议室列表推荐合适选项,推荐选项时不需要提取用户预订信息,待用户确认是再提取。按照正常自然语言对话返回
|
||||||
3. 如果用户确定要预订某间会议室,而不是在询问合适会议室时,请帮根据上下文我提取用户预订信息,预订时间等信息并返回,结果请只返回json格式得预订信息且不需要包含多余的描述内容,输出结果示例如下:
|
3. 如果用户确定要预订某间会议室,而不是在询问合适会议室时,请帮根据上下文我提取用户预订信息,预订时间等信息并返回,结果请只返回json格式得预订信息且不需要包含多余的描述内容以及<think>标签等,输出结果示例如下:
|
||||||
{for_mart_str}
|
{for_mart_str}
|
||||||
4. 当用户再次发起预订会议室时,不要直接提取用户的预订信息而是请重新查看现有会议室的最新情况,基于用户需求给用户推荐合理的会议室,推荐选项时不需要提取用户预订信息,不要返回json数据,待用户确认是再提取。按照正常自然语言对话返回
|
4. 当用户再次发起预订会议室时,不要直接提取用户的预订信息而是请重新查看现有会议室的最新情况,基于用户需求给用户推荐合理的会议室,推荐选项时不需要提取用户预订信息,不要返回json数据,待用户确认是再提取。按照正常自然语言对话返回
|
||||||
5. 如果用户需要解析预订会议室返回的结果,请解析相应的结果信息,并给予自然语言反馈,不需要返回json数据
|
5. 如果用户需要解析调用预订会议室返回的结果,请解析用户提供的相应结果信息,并给予自然语言反馈,不需要返回json数据
|
||||||
6. 用户其他需求,请按照自然语言对话返回
|
6. 用户其他需求,请按照自然语言对话返回
|
||||||
|
|
||||||
"""
|
"""
|
||||||
@@ -78,14 +84,14 @@ class DialogManager:
|
|||||||
"content": content
|
"content": content
|
||||||
})
|
})
|
||||||
# 上下文压缩(超长对话处理)
|
# 上下文压缩(超长对话处理)
|
||||||
if len(self.dialogs[session_id]) > 10:
|
if len(self.dialogs[session_id]) > 50:
|
||||||
self.compress_context(session_id)
|
self.compress_context(session_id)
|
||||||
|
|
||||||
def compress_context(self, session_id):
|
def compress_context(self, session_id):
|
||||||
"""对话历史压缩算法"""
|
"""对话历史压缩算法"""
|
||||||
history = self.dialogs[session_id]
|
history = self.dialogs[session_id]
|
||||||
# 保留最近3条完整记录
|
# 保留最近3条完整记录
|
||||||
recent = history[-3:]
|
recent = history[-5:]
|
||||||
# 摘要生成中间对话内容
|
# 摘要生成中间对话内容
|
||||||
summary = self.generate_summary(history[3:-3])
|
summary = self.generate_summary(history[3:-3])
|
||||||
# 重组会话历史
|
# 重组会话历史
|
||||||
@@ -97,17 +103,33 @@ class DialogManager:
|
|||||||
def generate_summary(self, messages):
|
def generate_summary(self, messages):
|
||||||
"""生成对话摘要(调用本地模型)"""
|
"""生成对话摘要(调用本地模型)"""
|
||||||
text = "\n".join([f"{m['role']}: {m['content']}" for m in messages])
|
text = "\n".join([f"{m['role']}: {m['content']}" for m in messages])
|
||||||
|
headers = {}
|
||||||
|
messages = [
|
||||||
|
{"role": "system",
|
||||||
|
"content": "你现在是一个对话总结助手,基于用户与模型智能体的对话,生成对话总结摘要,要求简明且保留关键信息"},
|
||||||
|
{"role": "user", "content": f"请生成以下对话的简明摘要(保留关键信息):\n\n{text}"}
|
||||||
|
]
|
||||||
payload = {
|
payload = {
|
||||||
"model": MODEL_NAME,
|
"model": MODEL_NAME,
|
||||||
"prompt": f"请生成以下对话的简明摘要(保留关键信息):\n\n{text}"
|
"messages": messages,
|
||||||
}
|
}
|
||||||
response = requests.post(f"{BASE_URL}", json=payload, timeout=10)
|
|
||||||
|
headers["Authorization"] = f"Bearer {config('MODEL_API_KEY')}"
|
||||||
|
response = requests.post(f"{BASE_URL}/chat/completions", data=json.dumps(payload, ensure_ascii=False),
|
||||||
|
timeout=1000, headers=headers, verify=False)
|
||||||
|
response.raise_for_status()
|
||||||
|
print(response.text)
|
||||||
return response.json().get("response", "摘要生成失败")
|
return response.json().get("response", "摘要生成失败")
|
||||||
|
|
||||||
|
|
||||||
dialog_manager = DialogManager()
|
dialog_manager = DialogManager()
|
||||||
|
|
||||||
|
|
||||||
|
def add_message_async(manager: DialogManager, session_id: str, role: str, content: str):
|
||||||
|
thread = threading.Thread(target=manager.add_message, args=(session_id, role, content))
|
||||||
|
thread.start()
|
||||||
|
|
||||||
|
|
||||||
def process_chat(user_id: str, user_input: str):
|
def process_chat(user_id: str, user_input: str):
|
||||||
history = []
|
history = []
|
||||||
query_history = dialog_manager.get_history(user_id)
|
query_history = dialog_manager.get_history(user_id)
|
||||||
@@ -115,16 +137,17 @@ def process_chat(user_id: str, user_input: str):
|
|||||||
prompt = ''
|
prompt = ''
|
||||||
if history is None or len(history) == 0:
|
if history is None or len(history) == 0:
|
||||||
prompt = build_prompt()
|
prompt = build_prompt()
|
||||||
dialog_manager.add_message(user_id, 'system', prompt)
|
add_message_async(dialog_manager, user_id, 'system', prompt)
|
||||||
dialog_manager.add_message(user_id, 'user', user_input)
|
add_message_async(dialog_manager, user_id, 'user', user_input)
|
||||||
resp = call_openai_api(model=MODEL_NAME, system_prompt=prompt, user_query=user_input,
|
resp = call_openai_api(model=MODEL_NAME, system_prompt=prompt, user_query=user_input,
|
||||||
api_key=config('MODEL_API_KEY'),
|
api_key=config('MODEL_API_KEY'),
|
||||||
history=history)
|
history=history)
|
||||||
content = resp["choices"][0]["message"]["content"]
|
content = resp["choices"][0]["message"]["content"]
|
||||||
reasoning_content = resp["choices"][0]["message"]["reasoning_content"]
|
reasoning_content = resp["choices"][0]["message"]["reasoning_content"]
|
||||||
print(content)
|
print(content)
|
||||||
if 'json' in content or is_json(content):
|
new_content = check_and_process_think(content=content)
|
||||||
new_content = content.replace("json", '')
|
if 'json' in new_content or is_json(new_content):
|
||||||
|
new_content = new_content.replace("json", '')
|
||||||
new_content = new_content.replace("`", '')
|
new_content = new_content.replace("`", '')
|
||||||
data = json.loads(new_content)
|
data = json.loads(new_content)
|
||||||
# 触发预订函数------
|
# 触发预订函数------
|
||||||
@@ -144,9 +167,11 @@ def process_chat(user_id: str, user_input: str):
|
|||||||
system_prompt='',
|
system_prompt='',
|
||||||
)
|
)
|
||||||
content = resp["choices"][0]["message"]["content"]
|
content = resp["choices"][0]["message"]["content"]
|
||||||
dialog_manager.add_message(user_id, 'assistant', content)
|
add_message_async(dialog_manager, user_id, 'assistant', content)
|
||||||
return {'response': resp}
|
return {'response': resp}
|
||||||
else:
|
else:
|
||||||
dialog_manager.add_message(user_id, 'assistant', content)
|
|
||||||
dialog_manager.add_message(user_id, 'assistant', reasoning_content)
|
add_message_async(dialog_manager, user_id, 'assistant', content)
|
||||||
|
if reasoning_content:
|
||||||
|
add_message_async(dialog_manager, user_id, 'assistant', reasoning_content)
|
||||||
return {'response': resp}
|
return {'response': resp}
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ SECRET_KEY = 'django-insecure-i(fm5c2v*=vgfwmgdl^qi7iezv(xfwovbqu=+^=vm72e$gnx&l
|
|||||||
# SECURITY WARNING: don't run with debug turned on in production!
|
# SECURITY WARNING: don't run with debug turned on in production!
|
||||||
DEBUG = True
|
DEBUG = True
|
||||||
|
|
||||||
ALLOWED_HOSTS = ["192.168.237.130", '127.0.0.1','10.212.27.44']
|
ALLOWED_HOSTS = ["192.168.237.130", '127.0.0.1','10.212.27.4']
|
||||||
|
|
||||||
# Application definition
|
# Application definition
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user