smartpi 1.1.5__py3-none-any.whl → 1.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- smartpi/__init__.py +1 -1
- smartpi/onnx_text_workflow.pyc +0 -0
- smartpi/posenet_utils.pyc +0 -0
- smartpi/rknn_text_workflow.pyc +0 -0
- {smartpi-1.1.5.dist-info → smartpi-1.1.6.dist-info}/METADATA +2 -3
- {smartpi-1.1.5.dist-info → smartpi-1.1.6.dist-info}/RECORD +8 -68
- smartpi/__init__.pyc +0 -0
- smartpi/_gui.py +0 -66
- smartpi/ai_asr.py +0 -1037
- smartpi/ai_llm.py +0 -934
- smartpi/ai_tts.py +0 -938
- smartpi/ai_vad.py +0 -83
- smartpi/audio.py +0 -125
- smartpi/base_driver.py +0 -618
- smartpi/camera.py +0 -84
- smartpi/color_sensor.py +0 -18
- smartpi/cw2015.py +0 -179
- smartpi/flash.py +0 -130
- smartpi/humidity.py +0 -20
- smartpi/led.py +0 -19
- smartpi/light_sensor.py +0 -72
- smartpi/local_model.py +0 -432
- smartpi/mcp_client.py +0 -100
- smartpi/mcp_fastmcp.py +0 -322
- smartpi/mcp_intent_recognizer.py +0 -408
- smartpi/models/__init__.pyc +0 -0
- smartpi/models/snakers4_silero-vad/__init__.pyc +0 -0
- smartpi/models/snakers4_silero-vad/hubconf.pyc +0 -0
- smartpi/motor.py +0 -177
- smartpi/move.py +0 -218
- smartpi/onnx_hand_workflow.py +0 -201
- smartpi/onnx_image_workflow.py +0 -176
- smartpi/onnx_pose_workflow.py +0 -482
- smartpi/onnx_text_workflow.py +0 -173
- smartpi/onnx_voice_workflow.py +0 -437
- smartpi/posemodel/__init__.pyc +0 -0
- smartpi/posenet_utils.py +0 -222
- smartpi/rknn_hand_workflow.py +0 -245
- smartpi/rknn_image_workflow.py +0 -405
- smartpi/rknn_pose_workflow.py +0 -592
- smartpi/rknn_text_workflow.py +0 -240
- smartpi/rknn_voice_workflow.py +0 -394
- smartpi/servo.py +0 -178
- smartpi/temperature.py +0 -18
- smartpi/tencentcloud-speech-sdk-python/__init__.pyc +0 -0
- smartpi/tencentcloud-speech-sdk-python/asr/__init__.pyc +0 -0
- smartpi/tencentcloud-speech-sdk-python/asr/flash_recognizer.pyc +0 -0
- smartpi/tencentcloud-speech-sdk-python/asr/speech_recognizer.pyc +0 -0
- smartpi/tencentcloud-speech-sdk-python/common/__init__.pyc +0 -0
- smartpi/tencentcloud-speech-sdk-python/common/credential.pyc +0 -0
- smartpi/tencentcloud-speech-sdk-python/common/log.pyc +0 -0
- smartpi/tencentcloud-speech-sdk-python/common/utils.pyc +0 -0
- smartpi/tencentcloud-speech-sdk-python/soe/__init__.pyc +0 -0
- smartpi/tencentcloud-speech-sdk-python/soe/speaking_assessment.pyc +0 -0
- smartpi/tencentcloud-speech-sdk-python/tts/__init__.pyc +0 -0
- smartpi/tencentcloud-speech-sdk-python/tts/flowing_speech_synthesizer.pyc +0 -0
- smartpi/tencentcloud-speech-sdk-python/tts/speech_synthesizer.pyc +0 -0
- smartpi/tencentcloud-speech-sdk-python/tts/speech_synthesizer_ws.pyc +0 -0
- smartpi/tencentcloud-speech-sdk-python/vc/__init__.pyc +0 -0
- smartpi/tencentcloud-speech-sdk-python/vc/speech_convertor_ws.pyc +0 -0
- smartpi/text_gte_model/__init__.pyc +0 -0
- smartpi/text_gte_model/config/__init__.pyc +0 -0
- smartpi/text_gte_model/gte/__init__.pyc +0 -0
- smartpi/touch_sensor.py +0 -16
- smartpi/trace.py +0 -120
- smartpi/ultrasonic.py +0 -20
- {smartpi-1.1.5.dist-info → smartpi-1.1.6.dist-info}/WHEEL +0 -0
- {smartpi-1.1.5.dist-info → smartpi-1.1.6.dist-info}/top_level.txt +0 -0
smartpi/ai_llm.py
DELETED
|
@@ -1,934 +0,0 @@
|
|
|
1
|
-
# llm_manager.py
|
|
2
|
-
# -*- coding: utf-8 -*-
|
|
3
|
-
import openai
|
|
4
|
-
import json
|
|
5
|
-
import os
|
|
6
|
-
import sys
|
|
7
|
-
import threading
|
|
8
|
-
import time
|
|
9
|
-
# 获取当前脚本的绝对路径
|
|
10
|
-
# 导入MCP意图识别器
|
|
11
|
-
from .mcp_intent_recognizer import MCPIntentRecognizer
|
|
12
|
-
current_script_path = os.path.abspath(__file__)
|
|
13
|
-
current_dir = os.path.dirname(current_script_path)
|
|
14
|
-
|
|
15
|
-
# 添加SDK绝对路径
|
|
16
|
-
sdk_path = os.path.join(current_dir, "tencentcloud-speech-sdk-python")
|
|
17
|
-
sys.path.append(sdk_path)
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
# 全局变量,用于存储初始化后的实例
|
|
23
|
-
_conversation_manager = None
|
|
24
|
-
_llm_client = None
|
|
25
|
-
_intent_recognizer = None
|
|
26
|
-
_mcp_intent_recognizer = None
|
|
27
|
-
|
|
28
|
-
# 全局意图映射配置 - 集中管理所有意图
|
|
29
|
-
gLOBAL_INTENTS = {} # 默认空字典
|
|
30
|
-
|
|
31
|
-
# 非阻塞响应跟踪变量
|
|
32
|
-
_response_lock = threading.Lock()
|
|
33
|
-
_response_chunks = [] # 存储回复片段的数组
|
|
34
|
-
_full_response = "" # 完整的回复内容
|
|
35
|
-
_response_completed = False # 回复是否完成的标志
|
|
36
|
-
_response_error = None # 错误信息
|
|
37
|
-
_stream_callback = None # 流式回调函数,用于实时处理回复片段
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
class ConversationManager:
|
|
41
|
-
"""对话历史管理器(原逻辑完整迁移)"""
|
|
42
|
-
def __init__(self, system_prompt=None, max_history_length=10):
|
|
43
|
-
"""
|
|
44
|
-
初始化对话历史管理器
|
|
45
|
-
:param system_prompt: 系统提示词 (str, 可选)
|
|
46
|
-
:param max_history_length: 最大对话历史长度 (int, 默认10)
|
|
47
|
-
"""
|
|
48
|
-
# 生成包含所有意图信息的系统提示词
|
|
49
|
-
intents_description = self._generate_intents_description()
|
|
50
|
-
|
|
51
|
-
# 默认系统提示词(可外部自定义)
|
|
52
|
-
default_system_prompt = f"你是一个友好、helpful的智能机器人,可以执行一些简单指令,名字叫小鸣同学。请根据用户的问题和需求提供简洁、有用的回答,保持回答简洁自然,符合口语习惯。\n\n比如你可以:{intents_description}等等,聊天涉及上述动作的时候请简单回答最好做到10字以内的回答,比如用户让你敬个礼,你回答好的,向领导敬礼,回答的时候不要有表情符号、特殊符号以及html和语气词等。"
|
|
53
|
-
|
|
54
|
-
self.system_prompt = system_prompt or default_system_prompt
|
|
55
|
-
self.history = [{"role": "system", "content": self.system_prompt}]
|
|
56
|
-
self.max_history_length = max_history_length # 从外部传入最大历史长度
|
|
57
|
-
|
|
58
|
-
def _generate_intents_description(self):
|
|
59
|
-
"""
|
|
60
|
-
生成意图描述文本,用于系统提示词
|
|
61
|
-
:return: 意图描述文本 (str)
|
|
62
|
-
"""
|
|
63
|
-
if not gLOBAL_INTENTS:
|
|
64
|
-
return "进行简单对话交流"
|
|
65
|
-
|
|
66
|
-
descriptions = []
|
|
67
|
-
for intent_code, intent_info in gLOBAL_INTENTS.items():
|
|
68
|
-
intent_name = intent_info["name"]
|
|
69
|
-
keywords = "、".join(intent_info["keywords"])
|
|
70
|
-
descriptions.append(f"{intent_name}(可以用{keywords}等关键词触发)")
|
|
71
|
-
return "、".join(descriptions)
|
|
72
|
-
|
|
73
|
-
def add_message(self, role, content):
|
|
74
|
-
"""
|
|
75
|
-
添加单条对话记录
|
|
76
|
-
:param role: 角色,如"user"或"assistant" (str)
|
|
77
|
-
:param content: 消息内容 (str)
|
|
78
|
-
"""
|
|
79
|
-
self.history.append({"role": role, "content": content})
|
|
80
|
-
self._truncate_history() # 自动截断超长历史
|
|
81
|
-
|
|
82
|
-
def _truncate_history(self):
|
|
83
|
-
"""
|
|
84
|
-
截断对话历史(按配置的最大长度保留)
|
|
85
|
-
:return: 无返回值 (None)
|
|
86
|
-
"""
|
|
87
|
-
if len(self.history) > self.max_history_length + 1: # +1 是因为保留system prompt
|
|
88
|
-
self.history = [self.history[0]] + self.history[-(self.max_history_length):]
|
|
89
|
-
|
|
90
|
-
def get_messages(self):
|
|
91
|
-
"""
|
|
92
|
-
获取当前对话历史(返回副本避免外部修改)
|
|
93
|
-
:return: 对话历史列表 (list[dict]),每个元素包含"role"和"content"字段
|
|
94
|
-
"""
|
|
95
|
-
return self.history.copy()
|
|
96
|
-
|
|
97
|
-
def clear_history(self):
|
|
98
|
-
"""
|
|
99
|
-
清空对话历史(仅保留system prompt)
|
|
100
|
-
:return: 无返回值 (None)
|
|
101
|
-
"""
|
|
102
|
-
self.history = [self.history[0]]
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
class IntentRecognizer:
|
|
106
|
-
"""意图识别大模型管理器"""
|
|
107
|
-
def __init__(self, api_key, api_base, model="deepseek-v3", provider="deepseek"):
|
|
108
|
-
"""
|
|
109
|
-
初始化意图识别大模型管理器
|
|
110
|
-
:param api_key: API密钥 (str)
|
|
111
|
-
:param api_base: API基础URL (str)
|
|
112
|
-
:param model: 模型名称 (str, 默认"deepseek-v3")
|
|
113
|
-
:param provider: 提供商名称 (str, 默认"deepseek"),可选值:"deepseek"或"openai"
|
|
114
|
-
"""
|
|
115
|
-
# 统一使用新参数格式
|
|
116
|
-
actual_api_key = api_key
|
|
117
|
-
actual_api_base = api_base
|
|
118
|
-
actual_model = model
|
|
119
|
-
|
|
120
|
-
# 初始化客户端(支持DeepSeek和OpenAI)
|
|
121
|
-
self.client = openai.OpenAI(
|
|
122
|
-
api_key=actual_api_key,
|
|
123
|
-
base_url=actual_api_base
|
|
124
|
-
)
|
|
125
|
-
self.model = actual_model # 模型名称
|
|
126
|
-
self.provider = provider.lower() # 提供商名称(deepseek或openai)
|
|
127
|
-
|
|
128
|
-
# 生成意图识别系统提示词
|
|
129
|
-
self.intent_system_prompt = self._generate_intent_system_prompt()
|
|
130
|
-
|
|
131
|
-
def _generate_intent_system_prompt(self):
|
|
132
|
-
"""
|
|
133
|
-
根据全局意图映射生成意图识别系统提示词
|
|
134
|
-
:return: 意图识别系统提示词 (str)
|
|
135
|
-
"""
|
|
136
|
-
# 构建意图列表字符串
|
|
137
|
-
intent_list = []
|
|
138
|
-
for intent_code, intent_info in gLOBAL_INTENTS.items():
|
|
139
|
-
intent_name = intent_info["name"]
|
|
140
|
-
keywords = intent_info["keywords"]
|
|
141
|
-
|
|
142
|
-
for keyword in keywords:
|
|
143
|
-
intent_list.append(f" - {keyword}:{intent_code}")
|
|
144
|
-
|
|
145
|
-
intent_list_text = "\n".join(intent_list)
|
|
146
|
-
|
|
147
|
-
# 构建完整的系统提示词
|
|
148
|
-
prompt = f"""你是一个意图识别助手,能够准确识别用户的指令意图。请你根据以下规则处理用户的输入:
|
|
149
|
-
1. 仔细分析用户输入,识别其中包含的所有意图
|
|
150
|
-
2. 每个意图必须对应一个预设的意图代码:
|
|
151
|
-
{intent_list_text}
|
|
152
|
-
3. 提取每个意图中的参数,并将其放入arg数组中
|
|
153
|
-
4. 请只返回意图JSON字符串,不要添加任何其他解释性文字
|
|
154
|
-
5. 输出格式必须严格遵循标准JSON:
|
|
155
|
-
- 单个意图:{{"intent":1,"arg":[]}}
|
|
156
|
-
- 多个意图:[{{"intent":1,"arg":[]}},{{"intent":1,"arg":[]}}]
|
|
157
|
-
6. 如果没有识别出任何意图,请返回空数组 []
|
|
158
|
-
|
|
159
|
-
示例:
|
|
160
|
-
用户输入: 请以50速度前进
|
|
161
|
-
输出: {{"intent":1,"arg":["50"]}}
|
|
162
|
-
|
|
163
|
-
用户输入: 音量调节到50%
|
|
164
|
-
输出: {{"intent":2,"arg":["50"]}}
|
|
165
|
-
|
|
166
|
-
用户输入: 请先以50速度前进然后把音量调节到50%
|
|
167
|
-
输出: [{{"intent":1,"arg":["50"]}},{{"intent":2,"arg":["50"]}}]
|
|
168
|
-
"""
|
|
169
|
-
|
|
170
|
-
return prompt
|
|
171
|
-
|
|
172
|
-
def recognize_intent(self, user_input):
|
|
173
|
-
"""
|
|
174
|
-
识别用户输入中的意图
|
|
175
|
-
:param user_input: 用户输入文本 (str)
|
|
176
|
-
:return: 意图识别结果列表 (list[dict]),每个字典包含"intent"和"arg"字段
|
|
177
|
-
- intent: 意图代码 (str)
|
|
178
|
-
- arg: 参数列表 (list[str])
|
|
179
|
-
"""
|
|
180
|
-
try:
|
|
181
|
-
# 构建用于意图识别的消息
|
|
182
|
-
messages = [
|
|
183
|
-
{"role": "system", "content": self.intent_system_prompt},
|
|
184
|
-
{"role": "user", "content": user_input}
|
|
185
|
-
]
|
|
186
|
-
|
|
187
|
-
# 调用大模型进行意图识别
|
|
188
|
-
response = self.client.chat.completions.create(
|
|
189
|
-
model=self.model,
|
|
190
|
-
messages=messages,
|
|
191
|
-
temperature=0.0 # 低温度保证输出稳定性
|
|
192
|
-
)
|
|
193
|
-
|
|
194
|
-
# 获取大模型回复
|
|
195
|
-
intent_text = response.choices[0].message.content.strip()
|
|
196
|
-
print(f"=====================================意图识别结果:{intent_text}=================================")
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
# 解析意图JSON
|
|
200
|
-
return self._parse_intent_result(intent_text)
|
|
201
|
-
|
|
202
|
-
except Exception as e:
|
|
203
|
-
print(f"意图识别失败: {str(e)}")
|
|
204
|
-
return []
|
|
205
|
-
|
|
206
|
-
def _parse_intent_result(self, intent_text):
|
|
207
|
-
"""
|
|
208
|
-
解析意图识别结果
|
|
209
|
-
:param intent_text: 大模型返回的意图文本 (str)
|
|
210
|
-
:return: 解析后的意图识别结果列表 (list[dict]),每个字典包含"intent"和"arg"字段
|
|
211
|
-
- intent: 意图代码 (str)
|
|
212
|
-
- arg: 参数列表 (list[str])
|
|
213
|
-
"""
|
|
214
|
-
try:
|
|
215
|
-
# 处理可能的格式问题
|
|
216
|
-
if not intent_text or intent_text == "[]":
|
|
217
|
-
return []
|
|
218
|
-
|
|
219
|
-
print(f"原始意图文本: {intent_text}")
|
|
220
|
-
|
|
221
|
-
# 第一步:确保所有双花括号都被替换为单花括号
|
|
222
|
-
# 有时候大模型可能返回 {{intent:"1",arg:[]}} 这种格式
|
|
223
|
-
intent_text = intent_text.replace("{{", "{").replace("}}", "}")
|
|
224
|
-
|
|
225
|
-
# 第二步:尝试多种解析策略
|
|
226
|
-
# 策略1:尝试作为单个JSON对象解析
|
|
227
|
-
try:
|
|
228
|
-
if intent_text.startswith("{"):
|
|
229
|
-
intent_data = json.loads(intent_text)
|
|
230
|
-
print(f"策略1成功,解析到单个意图")
|
|
231
|
-
return [intent_data]
|
|
232
|
-
except Exception as e1:
|
|
233
|
-
print(f"策略1失败: {str(e1)}")
|
|
234
|
-
|
|
235
|
-
# 策略2:尝试作为JSON数组解析
|
|
236
|
-
try:
|
|
237
|
-
if not intent_text.startswith("["):
|
|
238
|
-
formatted_text = "[" + intent_text + "]"
|
|
239
|
-
intents = json.loads(formatted_text)
|
|
240
|
-
else:
|
|
241
|
-
intents = json.loads(intent_text)
|
|
242
|
-
|
|
243
|
-
if isinstance(intents, list):
|
|
244
|
-
print(f"策略2成功,解析到{len(intents)}个意图")
|
|
245
|
-
return intents
|
|
246
|
-
elif isinstance(intents, dict):
|
|
247
|
-
print(f"策略2成功,解析到1个意图")
|
|
248
|
-
return [intents]
|
|
249
|
-
except Exception as e2:
|
|
250
|
-
print(f"策略2失败: {str(e2)}")
|
|
251
|
-
|
|
252
|
-
# 策略3:处理可能包含多个意图的情况(以,分隔)
|
|
253
|
-
try:
|
|
254
|
-
# 去除所有空白字符
|
|
255
|
-
clean_text = ''.join(intent_text.split())
|
|
256
|
-
|
|
257
|
-
if ",{" in clean_text:
|
|
258
|
-
# 分割多个意图
|
|
259
|
-
intent_parts = clean_text.split(",")
|
|
260
|
-
intents = []
|
|
261
|
-
for i, part in enumerate(intent_parts):
|
|
262
|
-
try:
|
|
263
|
-
# 确保每个部分都是有效的JSON对象
|
|
264
|
-
if not part.startswith("{"):
|
|
265
|
-
part = "{" + part
|
|
266
|
-
if not part.endswith("}"):
|
|
267
|
-
part = part + "}"
|
|
268
|
-
intent_data = json.loads(part)
|
|
269
|
-
intents.append(intent_data)
|
|
270
|
-
print(f"策略3成功,分割并解析到第{i+1}个意图: {intent_data}")
|
|
271
|
-
except Exception as e_inner:
|
|
272
|
-
print(f"解析第{i+1}个意图失败: {str(e_inner)}")
|
|
273
|
-
pass
|
|
274
|
-
if intents:
|
|
275
|
-
return intents
|
|
276
|
-
except Exception as e3:
|
|
277
|
-
print(f"策略3失败: {str(e3)}")
|
|
278
|
-
|
|
279
|
-
# 策略4:尝试修复格式后再解析(更严格的处理)
|
|
280
|
-
try:
|
|
281
|
-
# 去除所有空白字符
|
|
282
|
-
clean_text = ''.join(intent_text.split())
|
|
283
|
-
|
|
284
|
-
# 确保是对象或数组格式
|
|
285
|
-
if not clean_text.startswith("{") and not clean_text.startswith("["):
|
|
286
|
-
# 尝试添加对象括号
|
|
287
|
-
if clean_text.startswith("intent") or ":" in clean_text:
|
|
288
|
-
clean_text = "{" + clean_text + "}"
|
|
289
|
-
intent_data = json.loads(clean_text)
|
|
290
|
-
print(f"策略4成功,修复为对象并解析")
|
|
291
|
-
return [intent_data]
|
|
292
|
-
else:
|
|
293
|
-
# 再次尝试解析
|
|
294
|
-
intent_data = json.loads(clean_text)
|
|
295
|
-
if isinstance(intent_data, list):
|
|
296
|
-
print(f"策略4成功,解析到数组")
|
|
297
|
-
return intent_data
|
|
298
|
-
else:
|
|
299
|
-
print(f"策略4成功,解析到对象")
|
|
300
|
-
return [intent_data]
|
|
301
|
-
except Exception as e4:
|
|
302
|
-
print(f"策略4失败: {str(e4)}")
|
|
303
|
-
|
|
304
|
-
# 所有策略都失败
|
|
305
|
-
print("所有解析尝试都失败了,返回空列表")
|
|
306
|
-
return []
|
|
307
|
-
except Exception as e:
|
|
308
|
-
print(f"解析意图结果时发生严重错误: {str(e)}")
|
|
309
|
-
return []
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
class LLMClient:
|
|
313
|
-
"""大模型客户端(支持DeepSeek和OpenAI接口)"""
|
|
314
|
-
def __init__(self, api_key=None, api_base=None,
|
|
315
|
-
model="deepseek-v3", llm_timeout=10.0,
|
|
316
|
-
llm_temperature=0.7, llm_max_tokens=200, max_history_length=10,
|
|
317
|
-
provider="deepseek"):
|
|
318
|
-
"""
|
|
319
|
-
初始化大模型客户端
|
|
320
|
-
:param api_key: API密钥 (str, 可选)
|
|
321
|
-
:param api_base: API基础URL (str, 可选)
|
|
322
|
-
:param model: 模型名称 (str, 默认"deepseek-v3")
|
|
323
|
-
:param llm_timeout: 超时时间 (float, 默认10.0秒)
|
|
324
|
-
:param llm_temperature: 温度参数 (float, 默认0.7)
|
|
325
|
-
:param llm_max_tokens: 最大生成 tokens (int, 默认200)
|
|
326
|
-
:param max_history_length: 最大历史长度 (int, 默认10)
|
|
327
|
-
:param provider: 提供商名称 (str, 默认"deepseek"),可选值:"deepseek"或"openai"
|
|
328
|
-
"""
|
|
329
|
-
# 统一使用新参数格式
|
|
330
|
-
actual_api_key = api_key
|
|
331
|
-
actual_api_base = api_base
|
|
332
|
-
actual_model = model
|
|
333
|
-
|
|
334
|
-
# 初始化客户端(支持DeepSeek和OpenAI)
|
|
335
|
-
self.client = openai.OpenAI(
|
|
336
|
-
api_key=actual_api_key,
|
|
337
|
-
base_url=actual_api_base,
|
|
338
|
-
timeout=llm_timeout # 超时时间
|
|
339
|
-
)
|
|
340
|
-
self.model = actual_model # 模型名称
|
|
341
|
-
self.provider = provider.lower() # 提供商名称(deepseek或openai)
|
|
342
|
-
self.is_generating = False # 生成状态标记
|
|
343
|
-
self.current_response = None # 存储当前响应对象
|
|
344
|
-
self.max_history_length = max_history_length # 最大历史长度
|
|
345
|
-
|
|
346
|
-
# 生成参数
|
|
347
|
-
self.generation_params = {
|
|
348
|
-
"temperature": llm_temperature, # 温度参数
|
|
349
|
-
"max_tokens": llm_max_tokens, # 最大生成 tokens
|
|
350
|
-
"top_p": 0.9, # 控制多样性
|
|
351
|
-
"stream": True # 启用流式输出
|
|
352
|
-
}
|
|
353
|
-
|
|
354
|
-
def generate_stream(self, messages, is_running):
|
|
355
|
-
"""
|
|
356
|
-
流式调用大模型(优化版:更快的响应速度)
|
|
357
|
-
:param messages: 对话历史 (list[dict]),每个元素包含"role"和"content"字段
|
|
358
|
-
:param is_running: 主程序运行状态 (bool),用于中断生成
|
|
359
|
-
:return: 流式返回的文本片段 (str),使用yield逐个返回
|
|
360
|
-
"""
|
|
361
|
-
self.is_generating = True
|
|
362
|
-
full_response = "" # 累计完整响应
|
|
363
|
-
self.current_response = None
|
|
364
|
-
|
|
365
|
-
try:
|
|
366
|
-
# 优化对话历史,减少tokens使用
|
|
367
|
-
optimized_messages = self._optimize_messages(messages)
|
|
368
|
-
|
|
369
|
-
# 流式调用大模型,使用优化参数
|
|
370
|
-
response = self.client.chat.completions.create(
|
|
371
|
-
model=self.model,
|
|
372
|
-
messages=optimized_messages,
|
|
373
|
-
**self.generation_params
|
|
374
|
-
)
|
|
375
|
-
self.current_response = response
|
|
376
|
-
|
|
377
|
-
# 流式处理响应(优化版:更快的片段处理)
|
|
378
|
-
for chunk in response:
|
|
379
|
-
# 检查是否需要中断(主程序退出/手动停止)
|
|
380
|
-
if not is_running or not self.is_generating:
|
|
381
|
-
# 强制关闭响应流
|
|
382
|
-
if hasattr(response, 'close'):
|
|
383
|
-
try:
|
|
384
|
-
response.close()
|
|
385
|
-
except:
|
|
386
|
-
pass
|
|
387
|
-
break
|
|
388
|
-
|
|
389
|
-
# 提取当前片段内容
|
|
390
|
-
content = chunk.choices[0].delta.content
|
|
391
|
-
if content:
|
|
392
|
-
full_response += content
|
|
393
|
-
# 立即返回,不做额外处理
|
|
394
|
-
yield content
|
|
395
|
-
|
|
396
|
-
return full_response
|
|
397
|
-
|
|
398
|
-
except Exception as e:
|
|
399
|
-
# 异常捕获(返回简化错误信息)
|
|
400
|
-
error_msg = f"抱歉,我现在无法回答你的问题。"
|
|
401
|
-
yield error_msg
|
|
402
|
-
return error_msg
|
|
403
|
-
|
|
404
|
-
finally:
|
|
405
|
-
# 无论成功/失败,重置生成状态
|
|
406
|
-
self.is_generating = False
|
|
407
|
-
self.current_response = None
|
|
408
|
-
|
|
409
|
-
def _optimize_messages(self, messages):
|
|
410
|
-
"""
|
|
411
|
-
优化对话历史,减少tokens使用
|
|
412
|
-
:param messages: 原始对话历史 (list[dict])
|
|
413
|
-
:return: 优化后的对话历史 (list[dict])
|
|
414
|
-
"""
|
|
415
|
-
# 如果消息数量过多,只保留最近的几条和系统提示
|
|
416
|
-
if len(messages) > self.max_history_length + 1:
|
|
417
|
-
# 保留系统提示
|
|
418
|
-
system_messages = [msg for msg in messages if msg['role'] == 'system']
|
|
419
|
-
# 保留最近的用户和助手消息
|
|
420
|
-
recent_messages = messages[-(self.max_history_length):]
|
|
421
|
-
return system_messages + recent_messages
|
|
422
|
-
return messages
|
|
423
|
-
|
|
424
|
-
def stop_generation(self):
|
|
425
|
-
"""
|
|
426
|
-
手动停止大模型生成(优化版:更快的中断响应)
|
|
427
|
-
:return: 无返回值 (None)
|
|
428
|
-
"""
|
|
429
|
-
# 立即设置标志为False
|
|
430
|
-
self.is_generating = False
|
|
431
|
-
|
|
432
|
-
# 尝试关闭当前响应流(如果存在)
|
|
433
|
-
if self.current_response and hasattr(self.current_response, 'close'):
|
|
434
|
-
try:
|
|
435
|
-
self.current_response.close()
|
|
436
|
-
print("已强制关闭大模型响应流")
|
|
437
|
-
except Exception as e:
|
|
438
|
-
print(f"关闭响应流时出错: {str(e)}")
|
|
439
|
-
|
|
440
|
-
# 重置当前响应对象
|
|
441
|
-
self.current_response = None
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
# 全局初始化函数
|
|
445
|
-
def init(api_key=None, api_base=None,
|
|
446
|
-
model="deepseek-v3", llm_timeout=10.0,
|
|
447
|
-
llm_temperature=0.7, llm_max_tokens=200, max_history_length=10,
|
|
448
|
-
system_prompt=None, global_intents=None, provider="openai", isOpenAi=True,
|
|
449
|
-
stream_callback=None):
|
|
450
|
-
"""
|
|
451
|
-
初始化大模型相关组件
|
|
452
|
-
:param api_key: API密钥 (str, 可选)
|
|
453
|
-
:param api_base: API基础URL (str, 可选)
|
|
454
|
-
:param model: 大模型名称 (str, 默认"deepseek-v3")
|
|
455
|
-
:param llm_timeout: 大模型请求超时时间 (float, 默认10.0秒)
|
|
456
|
-
:param llm_temperature: 大模型生成温度 (float, 默认0.7)
|
|
457
|
-
:param llm_max_tokens: 大模型最大生成token数 (int, 默认200)
|
|
458
|
-
:param max_history_length: 最大对话历史长度 (int, 默认10)
|
|
459
|
-
:param system_prompt: 系统提示词 (str, 可选)
|
|
460
|
-
:param global_intents: 全局意图映射配置 (dict, 可选),默认空字典
|
|
461
|
-
格式: {"intent_code": {"name": "意图名称", "keywords": ["关键词1", "关键词2"]}}
|
|
462
|
-
:param provider: 大模型提供商 (str, 默认"openai"),可选值:"deepseek"或"openai"
|
|
463
|
-
:param isOpenAi: 是否使用OpenAI接口模式 (bool, 默认True)
|
|
464
|
-
:param stream_callback: 流式回调函数 (callable, 可选),用于实时处理回复片段
|
|
465
|
-
函数签名: callback(chunk: str)
|
|
466
|
-
:return: 无返回值 (None)
|
|
467
|
-
"""
|
|
468
|
-
global _stream_callback
|
|
469
|
-
_stream_callback = stream_callback
|
|
470
|
-
# 根据isOpenAi参数自动设置provider
|
|
471
|
-
if isOpenAi:
|
|
472
|
-
provider = "openai"
|
|
473
|
-
|
|
474
|
-
# 统一使用新参数格式
|
|
475
|
-
final_api_key = api_key
|
|
476
|
-
final_api_base = api_base
|
|
477
|
-
final_model = model or "deepseek-v3"
|
|
478
|
-
|
|
479
|
-
global _conversation_manager, _llm_client, _intent_recognizer, gLOBAL_INTENTS
|
|
480
|
-
|
|
481
|
-
# 设置全局意图映射
|
|
482
|
-
if global_intents is not None:
|
|
483
|
-
gLOBAL_INTENTS = global_intents
|
|
484
|
-
|
|
485
|
-
# 初始化对话管理器
|
|
486
|
-
_conversation_manager = ConversationManager(
|
|
487
|
-
system_prompt=system_prompt,
|
|
488
|
-
max_history_length=max_history_length
|
|
489
|
-
)
|
|
490
|
-
|
|
491
|
-
# 初始化大模型客户端(支持DeepSeek和OpenAI)
|
|
492
|
-
_llm_client = LLMClient(
|
|
493
|
-
api_key=final_api_key,
|
|
494
|
-
api_base=final_api_base,
|
|
495
|
-
model=final_model,
|
|
496
|
-
llm_timeout=llm_timeout,
|
|
497
|
-
llm_temperature=llm_temperature,
|
|
498
|
-
llm_max_tokens=llm_max_tokens,
|
|
499
|
-
max_history_length=max_history_length,
|
|
500
|
-
provider=provider
|
|
501
|
-
)
|
|
502
|
-
|
|
503
|
-
# 初始化意图识别器
|
|
504
|
-
_intent_recognizer = IntentRecognizer(
|
|
505
|
-
api_key=final_api_key,
|
|
506
|
-
api_base=final_api_base,
|
|
507
|
-
model=final_model,
|
|
508
|
-
provider=provider
|
|
509
|
-
)
|
|
510
|
-
|
|
511
|
-
# 初始化MCP意图识别器
|
|
512
|
-
global _mcp_intent_recognizer
|
|
513
|
-
_mcp_intent_recognizer = MCPIntentRecognizer(global_intents=gLOBAL_INTENTS)
|
|
514
|
-
|
|
515
|
-
print("大模型组件初始化成功")
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
def send_message(message, is_running=True):
|
|
519
|
-
"""
|
|
520
|
-
发送消息给大模型
|
|
521
|
-
:param message: 用户消息文本 (str)
|
|
522
|
-
:param is_running: 运行状态标记 (bool, 默认True)
|
|
523
|
-
:return: 大模型回复的完整内容 (str)
|
|
524
|
-
"""
|
|
525
|
-
global _conversation_manager, _llm_client
|
|
526
|
-
|
|
527
|
-
if not _conversation_manager or not _llm_client:
|
|
528
|
-
raise Exception("大模型组件尚未初始化,请先调用init函数")
|
|
529
|
-
|
|
530
|
-
# 添加用户消息到对话历史
|
|
531
|
-
_conversation_manager.add_message("user", message)
|
|
532
|
-
|
|
533
|
-
# 获取完整对话历史
|
|
534
|
-
messages = _conversation_manager.get_messages()
|
|
535
|
-
|
|
536
|
-
# 调用大模型获取回复
|
|
537
|
-
full_response = ""
|
|
538
|
-
for chunk in _llm_client.generate_stream(messages, is_running):
|
|
539
|
-
if chunk:
|
|
540
|
-
full_response += chunk
|
|
541
|
-
|
|
542
|
-
# 添加助手回复到对话历史
|
|
543
|
-
_conversation_manager.add_message("assistant", full_response)
|
|
544
|
-
|
|
545
|
-
return full_response
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
def get_response_stream(message, is_running=True):
|
|
549
|
-
"""
|
|
550
|
-
获取大模型回复的流式结果
|
|
551
|
-
:param message: 用户消息文本 (str)
|
|
552
|
-
:param is_running: 运行状态标记 (bool, 默认True)
|
|
553
|
-
:return: 流式生成的回复片段 (str),使用yield逐个返回
|
|
554
|
-
"""
|
|
555
|
-
global _conversation_manager, _llm_client
|
|
556
|
-
|
|
557
|
-
if not _conversation_manager or not _llm_client:
|
|
558
|
-
raise Exception("大模型组件尚未初始化,请先调用init函数")
|
|
559
|
-
|
|
560
|
-
# 添加用户消息到对话历史
|
|
561
|
-
_conversation_manager.add_message("user", message)
|
|
562
|
-
|
|
563
|
-
# 获取完整对话历史
|
|
564
|
-
messages = _conversation_manager.get_messages()
|
|
565
|
-
|
|
566
|
-
# 调用大模型获取流式回复
|
|
567
|
-
full_response = ""
|
|
568
|
-
chunks = []
|
|
569
|
-
for chunk in _llm_client.generate_stream(messages, is_running):
|
|
570
|
-
if chunk:
|
|
571
|
-
chunks.append(chunk)
|
|
572
|
-
full_response += chunk
|
|
573
|
-
yield chunk, False
|
|
574
|
-
|
|
575
|
-
# 处理最后一个回复片段
|
|
576
|
-
if chunks:
|
|
577
|
-
yield chunks[-1], True
|
|
578
|
-
|
|
579
|
-
# 添加助手回复到对话历史
|
|
580
|
-
_conversation_manager.add_message("assistant", full_response)
|
|
581
|
-
|
|
582
|
-
|
|
583
|
-
def recognize_intent(message):
|
|
584
|
-
"""
|
|
585
|
-
识别用户意图
|
|
586
|
-
:param message: 用户消息文本 (str)
|
|
587
|
-
:return: 意图识别结果列表 (list[dict]),每个字典包含"intent"和"arg"字段
|
|
588
|
-
- intent: 意图代码 (str)
|
|
589
|
-
- arg: 参数列表 (list[str])
|
|
590
|
-
"""
|
|
591
|
-
global _intent_recognizer
|
|
592
|
-
|
|
593
|
-
if not _intent_recognizer:
|
|
594
|
-
raise Exception("大模型组件尚未初始化,请先调用init函数")
|
|
595
|
-
|
|
596
|
-
return _intent_recognizer.recognize_intent(message)
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
def recognize_intent_mcp(message):
|
|
600
|
-
"""
|
|
601
|
-
使用MCP识别用户意图
|
|
602
|
-
:param message: 用户消息文本 (str)
|
|
603
|
-
:return: 意图识别结果列表 (list[dict]),每个字典包含"intent"和"arg"字段
|
|
604
|
-
- intent: 意图代码 (str)
|
|
605
|
-
- arg: 参数列表 (list[str])
|
|
606
|
-
"""
|
|
607
|
-
global _mcp_intent_recognizer
|
|
608
|
-
|
|
609
|
-
if not _mcp_intent_recognizer:
|
|
610
|
-
raise Exception("大模型组件尚未初始化,请先调用init函数")
|
|
611
|
-
|
|
612
|
-
return _mcp_intent_recognizer.recognize_intent(message)
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
def clear_history():
|
|
616
|
-
"""
|
|
617
|
-
清空对话历史
|
|
618
|
-
:return: 无返回值 (None)
|
|
619
|
-
"""
|
|
620
|
-
global _conversation_manager
|
|
621
|
-
|
|
622
|
-
if not _conversation_manager:
|
|
623
|
-
raise Exception("大模型组件尚未初始化,请先调用init函数")
|
|
624
|
-
|
|
625
|
-
_conversation_manager.clear_history()
|
|
626
|
-
|
|
627
|
-
|
|
628
|
-
def stop_generation():
|
|
629
|
-
"""
|
|
630
|
-
停止大模型生成
|
|
631
|
-
:return: 无返回值 (None)
|
|
632
|
-
"""
|
|
633
|
-
global _llm_client
|
|
634
|
-
|
|
635
|
-
if not _llm_client:
|
|
636
|
-
raise Exception("大模型组件尚未初始化,请先调用init函数")
|
|
637
|
-
|
|
638
|
-
_llm_client.stop_generation()
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
def _async_response_handler(message, is_running):
|
|
642
|
-
"""
|
|
643
|
-
异步响应处理函数,在后台线程中运行
|
|
644
|
-
:param message: 用户消息文本 (str)
|
|
645
|
-
:param is_running: 运行状态标记 (bool)
|
|
646
|
-
:return: 无返回值 (None)
|
|
647
|
-
"""
|
|
648
|
-
global _conversation_manager, _llm_client
|
|
649
|
-
global _response_chunks, _full_response, _response_completed, _response_error
|
|
650
|
-
|
|
651
|
-
try:
|
|
652
|
-
# 重置响应状态
|
|
653
|
-
with _response_lock:
|
|
654
|
-
_response_chunks = []
|
|
655
|
-
_full_response = ""
|
|
656
|
-
_response_completed = False
|
|
657
|
-
_response_error = None
|
|
658
|
-
|
|
659
|
-
# 添加用户消息到对话历史
|
|
660
|
-
_conversation_manager.add_message("user", message)
|
|
661
|
-
|
|
662
|
-
# 获取完整对话历史
|
|
663
|
-
messages = _conversation_manager.get_messages()
|
|
664
|
-
|
|
665
|
-
# 调用大模型获取流式回复
|
|
666
|
-
full_response = ""
|
|
667
|
-
chunks = []
|
|
668
|
-
for chunk in _llm_client.generate_stream(messages, is_running):
|
|
669
|
-
if chunk:
|
|
670
|
-
chunks.append(chunk)
|
|
671
|
-
full_response += chunk
|
|
672
|
-
# 更新响应片段
|
|
673
|
-
with _response_lock:
|
|
674
|
-
_response_chunks.append(chunk)
|
|
675
|
-
_full_response = full_response
|
|
676
|
-
# 调用流式回调函数(如果已设置)
|
|
677
|
-
if _stream_callback and callable(_stream_callback):
|
|
678
|
-
try:
|
|
679
|
-
_stream_callback(chunk, is_last=False)
|
|
680
|
-
except Exception as e:
|
|
681
|
-
print(f"流式回调函数执行出错: {str(e)}")
|
|
682
|
-
|
|
683
|
-
# 所有chunk发送完成后,发送一个空字符串并标记is_last=True以结束TTS合成
|
|
684
|
-
# 这样既避免了重复发送最后一个chunk,又能正确结束TTS合成
|
|
685
|
-
if _stream_callback and callable(_stream_callback):
|
|
686
|
-
try:
|
|
687
|
-
_stream_callback("", is_last=True)
|
|
688
|
-
except Exception as e:
|
|
689
|
-
print(f"流式回调函数执行出错: {str(e)}")
|
|
690
|
-
|
|
691
|
-
# 添加助手回复到对话历史
|
|
692
|
-
_conversation_manager.add_message("assistant", full_response)
|
|
693
|
-
|
|
694
|
-
# 标记回复完成
|
|
695
|
-
with _response_lock:
|
|
696
|
-
_full_response = full_response
|
|
697
|
-
_response_completed = True
|
|
698
|
-
_response_error = None
|
|
699
|
-
|
|
700
|
-
except Exception as e:
|
|
701
|
-
# 处理异常
|
|
702
|
-
error_msg = f"抱歉,我现在无法回答你的问题。"
|
|
703
|
-
with _response_lock:
|
|
704
|
-
_response_chunks.append(error_msg)
|
|
705
|
-
_full_response = error_msg
|
|
706
|
-
_response_completed = True
|
|
707
|
-
_response_error = str(e)
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
def send_message_async(message, is_running=True):
|
|
711
|
-
"""
|
|
712
|
-
非阻塞方式发送消息给大模型
|
|
713
|
-
:param message: 用户消息文本 (str)
|
|
714
|
-
:param is_running: 运行状态标记 (bool, 默认True)
|
|
715
|
-
:return: 无返回值 (None),结果通过get_response_status等函数获取
|
|
716
|
-
"""
|
|
717
|
-
global _conversation_manager, _llm_client
|
|
718
|
-
|
|
719
|
-
if not _conversation_manager or not _llm_client:
|
|
720
|
-
raise Exception("大模型组件尚未初始化,请先调用init函数")
|
|
721
|
-
|
|
722
|
-
# 创建并启动后台线程
|
|
723
|
-
thread = threading.Thread(target=_async_response_handler, args=(message, is_running))
|
|
724
|
-
thread.daemon = True
|
|
725
|
-
thread.start()
|
|
726
|
-
|
|
727
|
-
|
|
728
|
-
def get_response_status():
|
|
729
|
-
"""
|
|
730
|
-
获取当前响应状态
|
|
731
|
-
:return: 包含状态信息的字典 (dict)
|
|
732
|
-
- completed: 回复是否完成 (bool)
|
|
733
|
-
- chunks: 回复片段列表 (list[str])
|
|
734
|
-
- full_response: 完整回复内容 (str)
|
|
735
|
-
- error: 错误信息 (str或None)
|
|
736
|
-
"""
|
|
737
|
-
with _response_lock:
|
|
738
|
-
return {
|
|
739
|
-
"completed": _response_completed,
|
|
740
|
-
"chunks": _response_chunks.copy(),
|
|
741
|
-
"full_response": _full_response,
|
|
742
|
-
"error": _response_error
|
|
743
|
-
}
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
def get_full_response():
|
|
747
|
-
"""
|
|
748
|
-
获取完整回复
|
|
749
|
-
:return: 完整回复内容 (str),如果未完成返回空字符串
|
|
750
|
-
"""
|
|
751
|
-
with _response_lock:
|
|
752
|
-
if _response_completed:
|
|
753
|
-
return _full_response
|
|
754
|
-
return ""
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
def get_realtime_chunks():
|
|
758
|
-
"""
|
|
759
|
-
获取当前收集的实时回复数组
|
|
760
|
-
:return: 当前收集的所有回复片段数组 (list[str])
|
|
761
|
-
"""
|
|
762
|
-
with _response_lock:
|
|
763
|
-
return _response_chunks.copy()
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
def clear_response_cache():
|
|
767
|
-
"""
|
|
768
|
-
清除回复缓存
|
|
769
|
-
:return: 无返回值 (None)
|
|
770
|
-
"""
|
|
771
|
-
with _response_lock:
|
|
772
|
-
global _response_chunks, _full_response, _response_completed, _response_error
|
|
773
|
-
_response_chunks = []
|
|
774
|
-
_full_response = ""
|
|
775
|
-
_response_completed = False
|
|
776
|
-
_response_error = None
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
def wait_for_response(timeout=None):
|
|
780
|
-
"""
|
|
781
|
-
等待响应完成,可设置超时
|
|
782
|
-
:param timeout: 超时时间(秒),None表示无限等待 (float或None)
|
|
783
|
-
:return: 完整回复内容 (str)
|
|
784
|
-
"""
|
|
785
|
-
start_time = time.time()
|
|
786
|
-
|
|
787
|
-
while True:
|
|
788
|
-
with _response_lock:
|
|
789
|
-
if _response_completed:
|
|
790
|
-
return _full_response
|
|
791
|
-
|
|
792
|
-
# 检查超时
|
|
793
|
-
if timeout is not None and (time.time() - start_time) > timeout:
|
|
794
|
-
raise TimeoutError("等待响应超时")
|
|
795
|
-
|
|
796
|
-
time.sleep(0.1) # 短暂休眠,避免CPU占用过高
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
def get_global_intents():
|
|
800
|
-
"""
|
|
801
|
-
获取全局意图映射
|
|
802
|
-
:return: 全局意图映射字典 (dict),键为意图代码,值为意图信息字典
|
|
803
|
-
"""
|
|
804
|
-
global gLOBAL_INTENTS
|
|
805
|
-
return gLOBAL_INTENTS.copy() # 返回副本避免外部直接修改
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
def add_custom_intents(intent_code, name="", keywords=None, en_code="en_code"):
|
|
809
|
-
"""
|
|
810
|
-
直接向全局意图映射中添加一条自定义意图
|
|
811
|
-
|
|
812
|
-
Args:
|
|
813
|
-
intent_code: 意图代码,必须唯一且非空
|
|
814
|
-
name (str): 意图名称
|
|
815
|
-
keywords (list, optional): 意图关键词列表
|
|
816
|
-
en_code (str, optional): 英文代码
|
|
817
|
-
|
|
818
|
-
Returns:
|
|
819
|
-
bool: 添加成功返回True,失败返回False
|
|
820
|
-
"""
|
|
821
|
-
global gLOBAL_INTENTS
|
|
822
|
-
|
|
823
|
-
if not intent_code:
|
|
824
|
-
print("意图代码不能为空")
|
|
825
|
-
return False
|
|
826
|
-
|
|
827
|
-
if keywords is None:
|
|
828
|
-
keywords = []
|
|
829
|
-
|
|
830
|
-
# 创建意图配置
|
|
831
|
-
intent_config = {
|
|
832
|
-
"name": name,
|
|
833
|
-
"keywords": keywords,
|
|
834
|
-
"en_code": en_code
|
|
835
|
-
}
|
|
836
|
-
|
|
837
|
-
# 添加到全局意图映射
|
|
838
|
-
gLOBAL_INTENTS[intent_code] = intent_config
|
|
839
|
-
print(f"已成功添加自定义意图: {intent_code} - {name}")
|
|
840
|
-
|
|
841
|
-
# 如果意图识别器已经初始化,更新其系统提示词
|
|
842
|
-
if _intent_recognizer:
|
|
843
|
-
_intent_recognizer.intent_system_prompt = _intent_recognizer._generate_intent_system_prompt()
|
|
844
|
-
|
|
845
|
-
# 如果对话管理器已经初始化,更新其系统提示词
|
|
846
|
-
if _conversation_manager:
|
|
847
|
-
intents_description = _conversation_manager._generate_intents_description()
|
|
848
|
-
|
|
849
|
-
# 如果MCP意图识别器已初始化,更新其全局意图映射和英文代码映射
|
|
850
|
-
global _mcp_intent_recognizer
|
|
851
|
-
if _mcp_intent_recognizer:
|
|
852
|
-
_mcp_intent_recognizer.global_intents = gLOBAL_INTENTS
|
|
853
|
-
_mcp_intent_recognizer.update_en_code_mapping()
|
|
854
|
-
print(f"MCP意图识别器已更新,添加新意图: {intent_code} - {name}")
|
|
855
|
-
default_system_prompt = f"你是一个友好、helpful的智能机器人,可以执行一些简单指令,名字叫小鸣同学。请根据用户的问题和需求提供简洁、有用的回答,保持回答简洁自然,符合口语习惯。\n\n比如你可以:{intents_description}等等,聊天涉及上述动作的时候请简单回答最好做到10字以内的回答,比如用户让你敬个礼,你回答好的,向领导敬礼,回答的时候不要有表情符号、特殊符号以及html和语气词等。"
|
|
856
|
-
_conversation_manager.system_prompt = default_system_prompt
|
|
857
|
-
_conversation_manager.history[0]["content"] = default_system_prompt
|
|
858
|
-
|
|
859
|
-
return True
|
|
860
|
-
|
|
861
|
-
def get_global_intents_count():
|
|
862
|
-
"""
|
|
863
|
-
获取当前全局意图映射中的意图数量
|
|
864
|
-
|
|
865
|
-
Returns:
|
|
866
|
-
int: 意图数量
|
|
867
|
-
"""
|
|
868
|
-
return len(gLOBAL_INTENTS)
|
|
869
|
-
|
|
870
|
-
def clear_global_intents():
|
|
871
|
-
"""
|
|
872
|
-
清空全局意图映射
|
|
873
|
-
|
|
874
|
-
Returns:
|
|
875
|
-
bool: 清空成功返回True
|
|
876
|
-
"""
|
|
877
|
-
global gLOBAL_INTENTS
|
|
878
|
-
|
|
879
|
-
# 清空全局意图映射
|
|
880
|
-
gLOBAL_INTENTS.clear()
|
|
881
|
-
print("已成功清空所有全局意图")
|
|
882
|
-
|
|
883
|
-
# 如果意图识别器已经初始化,更新其系统提示词
|
|
884
|
-
if _intent_recognizer:
|
|
885
|
-
_intent_recognizer.intent_system_prompt = _intent_recognizer._generate_intent_system_prompt()
|
|
886
|
-
|
|
887
|
-
# 如果对话管理器已经初始化,更新其系统提示词
|
|
888
|
-
if _conversation_manager:
|
|
889
|
-
intents_description = _conversation_manager._generate_intents_description()
|
|
890
|
-
default_system_prompt = f"你是一个友好、helpful的智能机器人,可以执行一些简单指令,名字叫小鸣同学。请根据用户的问题和需求提供简洁、有用的回答,保持回答简洁自然,符合口语习惯。\n\n比如你可以:{intents_description}等等,聊天涉及上述动作的时候请简单回答最好做到10字以内的回答,比如用户让你敬个礼,你回答好的,向领导敬礼,回答的时候不要有表情符号、特殊符号以及html和语气词等。"
|
|
891
|
-
_conversation_manager.system_prompt = default_system_prompt
|
|
892
|
-
_conversation_manager.history[0]["content"] = default_system_prompt
|
|
893
|
-
|
|
894
|
-
# 如果MCP意图识别器已初始化,更新其全局意图映射和英文代码映射
|
|
895
|
-
global _mcp_intent_recognizer
|
|
896
|
-
if _mcp_intent_recognizer:
|
|
897
|
-
_mcp_intent_recognizer.global_intents = gLOBAL_INTENTS
|
|
898
|
-
_mcp_intent_recognizer.update_en_code_mapping()
|
|
899
|
-
print("MCP意图识别器已清空所有意图")
|
|
900
|
-
|
|
901
|
-
return True
|
|
902
|
-
|
|
903
|
-
|
|
904
|
-
def set_global_intents(intents_dict):
|
|
905
|
-
"""
|
|
906
|
-
设置全局意图映射
|
|
907
|
-
:param intents_dict: 新的意图映射字典 (dict),键为意图代码,值为意图信息字典
|
|
908
|
-
:return: 无返回值 (None)
|
|
909
|
-
"""
|
|
910
|
-
global gLOBAL_INTENTS, _intent_recognizer, _conversation_manager, _mcp_intent_recognizer
|
|
911
|
-
|
|
912
|
-
if not isinstance(intents_dict, dict):
|
|
913
|
-
raise ValueError("意图映射必须是字典类型")
|
|
914
|
-
|
|
915
|
-
gLOBAL_INTENTS = intents_dict
|
|
916
|
-
|
|
917
|
-
# 如果意图识别器已初始化,更新其系统提示词
|
|
918
|
-
if _intent_recognizer:
|
|
919
|
-
_intent_recognizer.intent_system_prompt = _intent_recognizer._generate_intent_system_prompt()
|
|
920
|
-
|
|
921
|
-
# 如果对话管理器已初始化,更新其系统提示词
|
|
922
|
-
if _conversation_manager:
|
|
923
|
-
intents_description = _conversation_manager._generate_intents_description()
|
|
924
|
-
default_system_prompt = f"你是一个友好、helpful的智能机器人,可以执行一些简单指令,名字叫小鸣同学。请根据用户的问题和需求提供简洁、有用的回答,保持回答简洁自然,符合口语习惯。\n\n比如你可以:{intents_description}等等,聊天涉及上述动作的时候请简单回答最好做到10字以内的回答,比如用户让你敬个礼,你回答好的,向领导敬礼,回答的时候不要有表情符号、特殊符号以及html和语气词等。"
|
|
925
|
-
_conversation_manager.system_prompt = default_system_prompt
|
|
926
|
-
_conversation_manager.history[0]["content"] = default_system_prompt
|
|
927
|
-
|
|
928
|
-
# 如果MCP意图识别器已初始化,更新其全局意图映射和英文代码映射
|
|
929
|
-
if _mcp_intent_recognizer:
|
|
930
|
-
_mcp_intent_recognizer.global_intents = gLOBAL_INTENTS
|
|
931
|
-
_mcp_intent_recognizer.update_en_code_mapping()
|
|
932
|
-
print(f"MCP意图识别器全局意图已更新,当前意图数量: {len(_mcp_intent_recognizer.global_intents)}")
|
|
933
|
-
|
|
934
|
-
print("全局意图映射已更新")
|