From 2f5fcbcfe96cbf63cdc8f8b1ae886bdb65d85505 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=9B=B7=E9=9B=A8?= Date: Thu, 5 Jun 2025 11:27:57 +0800 Subject: [PATCH] =?UTF-8?q?feat=EF=BC=9B=E8=81=94=E8=B0=83=E4=BC=9A?= =?UTF-8?q?=E8=AE=AE=E9=A2=84=E5=AE=9A=E5=8A=A9=E6=89=8B2?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .env | 6 ++-- yj_room_agent/LLM/ai_service.py | 51 ++++++++++++++++++++++++--------- yj_room_agent/settings.py | 2 +- 3 files changed, 42 insertions(+), 17 deletions(-) diff --git a/.env b/.env index ed63011..add1c3a 100644 --- a/.env +++ b/.env @@ -1,3 +1,3 @@ -MODEL_API_KEY=sk-KnfrPFFnNDOCFkPkWsvRE7uJGNR0QMDCZ1Ie83ARhtOKMMWa -MODEL_BASE_URL=https://www.chataiapi.com/v1 -MODEL_NAME=deepseek-r1 \ No newline at end of file +MODEL_API_KEY=sk-9nng4v6qsdiw42r6d3re4nym15hiti29 +MODEL_BASE_URL=https://gateout.yced.com.cn/v1 +MODEL_NAME=deepseek \ No newline at end of file diff --git a/yj_room_agent/LLM/ai_service.py b/yj_room_agent/LLM/ai_service.py index 79529e0..c0ed507 100644 --- a/yj_room_agent/LLM/ai_service.py +++ b/yj_room_agent/LLM/ai_service.py @@ -3,6 +3,7 @@ from datetime import datetime import requests, json from .openai_client import call_openai_api from decouple import config +import threading, re MODEL_NAME = config('MODEL_NAME', default="") BASE_URL = config('MODEL_BASE_URL', default="") @@ -26,6 +27,11 @@ def book_room(data: dict) -> str: return resp.text +def check_and_process_think(content: str) -> str: + filtered_text = re.sub(r"]*>.*?", '', content, flags=re.DOTALL) + return filtered_text + + def build_prompt(): """构建增强提示词""" # 获取可用会议室信息 @@ -49,10 +55,10 @@ def build_prompt(): 请按以下步骤处理: 1. 解析用户预订需求(时间、人数、设备要求等) 2. 根据可用会议室列表推荐合适选项,推荐选项时不需要提取用户预订信息,待用户确认是再提取。按照正常自然语言对话返回 - 3. 如果用户确定要预订某间会议室,而不是在询问合适会议室时,请帮根据上下文我提取用户预订信息,预订时间等信息并返回,结果请只返回json格式得预订信息且不需要包含多余的描述内容,输出结果示例如下: + 3. 如果用户确定要预订某间会议室,而不是在询问合适会议室时,请帮根据上下文我提取用户预订信息,预订时间等信息并返回,结果请只返回json格式得预订信息且不需要包含多余的描述内容以及标签等,输出结果示例如下: {for_mart_str} 4. 当用户再次发起预订会议室时,不要直接提取用户的预订信息而是请重新查看现有会议室的最新情况,基于用户需求给用户推荐合理的会议室,推荐选项时不需要提取用户预订信息,不要返回json数据,待用户确认是再提取。按照正常自然语言对话返回 - 5. 如果用户需要解析预订会议室返回的结果,请解析相应的结果信息,并给予自然语言反馈,不需要返回json数据 + 5. 如果用户需要解析调用预订会议室返回的结果,请解析用户提供的相应结果信息,并给予自然语言反馈,不需要返回json数据 6. 用户其他需求,请按照自然语言对话返回 """ @@ -78,14 +84,14 @@ class DialogManager: "content": content }) # 上下文压缩(超长对话处理) - if len(self.dialogs[session_id]) > 10: + if len(self.dialogs[session_id]) > 50: self.compress_context(session_id) def compress_context(self, session_id): """对话历史压缩算法""" history = self.dialogs[session_id] # 保留最近3条完整记录 - recent = history[-3:] + recent = history[-5:] # 摘要生成中间对话内容 summary = self.generate_summary(history[3:-3]) # 重组会话历史 @@ -97,17 +103,33 @@ class DialogManager: def generate_summary(self, messages): """生成对话摘要(调用本地模型)""" text = "\n".join([f"{m['role']}: {m['content']}" for m in messages]) + headers = {} + messages = [ + {"role": "system", + "content": "你现在是一个对话总结助手,基于用户与模型智能体的对话,生成对话总结摘要,要求简明且保留关键信息"}, + {"role": "user", "content": f"请生成以下对话的简明摘要(保留关键信息):\n\n{text}"} + ] payload = { "model": MODEL_NAME, - "prompt": f"请生成以下对话的简明摘要(保留关键信息):\n\n{text}" + "messages": messages, } - response = requests.post(f"{BASE_URL}", json=payload, timeout=10) + + headers["Authorization"] = f"Bearer {config('MODEL_API_KEY')}" + response = requests.post(f"{BASE_URL}/chat/completions", data=json.dumps(payload, ensure_ascii=False), + timeout=1000, headers=headers, verify=False) + response.raise_for_status() + print(response.text) return response.json().get("response", "摘要生成失败") dialog_manager = DialogManager() +def add_message_async(manager: DialogManager, session_id: str, role: str, content: str): + thread = threading.Thread(target=manager.add_message, args=(session_id, role, content)) + thread.start() + + def process_chat(user_id: str, user_input: str): history = [] query_history = dialog_manager.get_history(user_id) @@ -115,16 +137,17 @@ def process_chat(user_id: str, user_input: str): prompt = '' if history is None or len(history) == 0: prompt = build_prompt() - dialog_manager.add_message(user_id, 'system', prompt) - dialog_manager.add_message(user_id, 'user', user_input) + add_message_async(dialog_manager, user_id, 'system', prompt) + add_message_async(dialog_manager, user_id, 'user', user_input) resp = call_openai_api(model=MODEL_NAME, system_prompt=prompt, user_query=user_input, api_key=config('MODEL_API_KEY'), history=history) content = resp["choices"][0]["message"]["content"] reasoning_content = resp["choices"][0]["message"]["reasoning_content"] print(content) - if 'json' in content or is_json(content): - new_content = content.replace("json", '') + new_content = check_and_process_think(content=content) + if 'json' in new_content or is_json(new_content): + new_content = new_content.replace("json", '') new_content = new_content.replace("`", '') data = json.loads(new_content) # 触发预订函数------ @@ -144,9 +167,11 @@ def process_chat(user_id: str, user_input: str): system_prompt='', ) content = resp["choices"][0]["message"]["content"] - dialog_manager.add_message(user_id, 'assistant', content) + add_message_async(dialog_manager, user_id, 'assistant', content) return {'response': resp} else: - dialog_manager.add_message(user_id, 'assistant', content) - dialog_manager.add_message(user_id, 'assistant', reasoning_content) + + add_message_async(dialog_manager, user_id, 'assistant', content) + if reasoning_content: + add_message_async(dialog_manager, user_id, 'assistant', reasoning_content) return {'response': resp} diff --git a/yj_room_agent/settings.py b/yj_room_agent/settings.py index 4378779..a845d53 100644 --- a/yj_room_agent/settings.py +++ b/yj_room_agent/settings.py @@ -25,7 +25,7 @@ SECRET_KEY = 'django-insecure-i(fm5c2v*=vgfwmgdl^qi7iezv(xfwovbqu=+^=vm72e$gnx&l # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True -ALLOWED_HOSTS = ["192.168.237.130", '127.0.0.1','10.212.27.44'] +ALLOWED_HOSTS = ["192.168.237.130", '127.0.0.1','10.212.27.4'] # Application definition