feat:整改dockefile中服务启动方式以及流程
This commit is contained in:
@@ -3,8 +3,8 @@ WORKDIR /app
|
||||
COPY . /app
|
||||
ENV TZ=Asia/Shanghai \
|
||||
LANG=C.UTF-8
|
||||
RUN apt-get update && pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ \
|
||||
RUN rm -rf logs .git .idea .venv && apt-get update && pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ \
|
||||
&& pip install yonyouopenapisdk-1.1.1-py3-none-any.whl -i https://mirrors.aliyun.com/pypi/simple/
|
||||
RUN mkdir -p /app/logs && touch /app/logs/yj_room_agent.log
|
||||
RUN mkdir -p /app/logs && touch /app/logs/yj_room_agent.log && rm -rf *.whl
|
||||
EXPOSE 9000
|
||||
CMD ["gunicorn","-w 5","-b 0.0.0.0:9000","-t 1200", "yj_room_agent.wsgi"]
|
||||
CMD ["hypercorn","-b","0.0.0.0:9000","yj_room_agent.asgi:application"]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
Django==5.2.1
|
||||
requests==2.32.3
|
||||
python-decouple==3.8
|
||||
gunicorn==23.0.0
|
||||
APScheduler==3.11.0
|
||||
pydantic-ai==0.3.6
|
||||
pydantic-ai==0.3.6
|
||||
Hypercorn==0.17.3
|
||||
@@ -6,6 +6,9 @@ import requests, json
|
||||
from apscheduler.schedulers.background import BackgroundScheduler
|
||||
from decouple import config
|
||||
import threading, re
|
||||
|
||||
from pydantic_ai.settings import ModelSettings
|
||||
|
||||
from ..tools import getinfo, params_filter
|
||||
import logging
|
||||
|
||||
@@ -107,7 +110,7 @@ def query_room_info(data: dict, params: dict) -> str:
|
||||
meeting_room = getinfo.query_meetingroom(params)
|
||||
content = json.loads(json.dumps(meeting_room))
|
||||
result = params_filter.filter_params(content['data'], "query_meeting_room")
|
||||
logger.info("query_room_info result => {0}".format(result))
|
||||
logger.debug("query_room_info result => {0}".format(result))
|
||||
new_list = []
|
||||
# 字典中只要key存在,哪怕对应的value是'',也能取出来空字符串 data.get('capacity', 0)可能是空字符串
|
||||
capacity = data.get('capacity', 0)
|
||||
@@ -735,7 +738,7 @@ def process_cancel_room_meeting(data, params) -> tuple:
|
||||
```
|
||||
{result}
|
||||
```
|
||||
请帮用户解析取消预订会议室的结果,此次操作禁止联想历史上下文推理,并根据结果给予用户相应自然语言反馈
|
||||
请帮用户解析取消预订会议室的结果(如果取消成功,提示用户还有有其他需要请联系你),此次操作禁止联想历史上下文推理,并根据结果给予用户相应自然语言反馈
|
||||
'''
|
||||
else:
|
||||
dic_data = {
|
||||
@@ -789,6 +792,8 @@ func_tion_call_map = {
|
||||
|
||||
|
||||
def extract_json_blocks(text):
|
||||
#去除think的之前的数据再提取
|
||||
text=check_and_process_think(text)
|
||||
json_blocks = re.findall(r'\{[^{}]*\}', text)
|
||||
valid_blocks = []
|
||||
for block in json_blocks:
|
||||
@@ -813,7 +818,12 @@ from pydantic_ai.providers.openai import OpenAIProvider
|
||||
provider = OpenAIProvider(api_key=config('MODEL_API_KEY'), base_url=BASE_URL)
|
||||
model = OpenAIModel(MODEL_NAME, provider=provider)
|
||||
agent = Agent(model, system_prompt=build_prompt({'tenantId':config('TEMP_TENANT_ID')}))
|
||||
#seed 作用:控制面对相同问题,输出尽量保持一致,越大一致性越强
|
||||
model_setting=ModelSettings(
|
||||
temperature=config('MODEL_TEMPERATURE',cast=float,default=0.2),
|
||||
seed=config('MODEL_SEED',cast=int,default=80),
|
||||
|
||||
)
|
||||
|
||||
|
||||
def process_chat(covers_id:str,user_id: str, user_input: str, params: dict):
|
||||
@@ -821,7 +831,7 @@ def process_chat(covers_id:str,user_id: str, user_input: str, params: dict):
|
||||
history = []
|
||||
query_history = dialog_manager.get_history(covers_id)
|
||||
history.extend(query_history)
|
||||
req = agent.run_sync(user_prompt=user_input, message_history=history if len(history) > 0 else None)
|
||||
req = agent.run_sync(user_prompt=user_input,model_settings=model_setting, message_history=history if len(history) > 0 else None)
|
||||
dialog_manager.set_msg(covers_id, req.all_messages())
|
||||
content = req.output
|
||||
logger.info(f"process chat content is : {content}")
|
||||
@@ -858,7 +868,7 @@ def process_chat(covers_id:str,user_id: str, user_input: str, params: dict):
|
||||
请帮用户解析结果,并根据结果给予用户相应自然语言反馈
|
||||
'''
|
||||
logger.info("test point completed the function call ")
|
||||
resp = agent.run_sync(user_prompt=book_promot)
|
||||
resp = agent.run_sync(user_prompt=book_promot,model_settings=model_setting)
|
||||
content=resp.output
|
||||
logger.info("final content => {0}".format(content))
|
||||
new_content = check_and_process_think(content)
|
||||
|
||||
@@ -239,7 +239,7 @@ def query_meetingbooking_info(params: dict):
|
||||
logger.debug("query_meetingbooking_info params => {0}".format(params))
|
||||
respones = requests.get(url=request_url, params=params,
|
||||
headers=header,verify=False)
|
||||
logger.info("meeting room book respones => {0}".format(respones.text))
|
||||
logger.debug("meeting room book respones => {0}".format(respones.text))
|
||||
if respones.status_code == 200:
|
||||
return respones.json()
|
||||
else:
|
||||
|
||||
Reference in New Issue
Block a user