matrix-for-agents 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentmatrix/__init__.py +20 -0
- agentmatrix/agents/__init__.py +1 -0
- agentmatrix/agents/base.py +572 -0
- agentmatrix/agents/claude_coder.py +10 -0
- agentmatrix/agents/data_crawler.py +14 -0
- agentmatrix/agents/post_office.py +212 -0
- agentmatrix/agents/report_writer.py +14 -0
- agentmatrix/agents/secretary.py +10 -0
- agentmatrix/agents/stateful.py +10 -0
- agentmatrix/agents/user_proxy.py +82 -0
- agentmatrix/agents/worker.py +30 -0
- agentmatrix/backends/__init__.py +1 -0
- agentmatrix/backends/llm_client.py +414 -0
- agentmatrix/backends/mock_llm.py +35 -0
- agentmatrix/cli_runner.py +94 -0
- agentmatrix/core/__init__.py +0 -0
- agentmatrix/core/action.py +50 -0
- agentmatrix/core/browser/bing.py +208 -0
- agentmatrix/core/browser/browser_adapter.py +298 -0
- agentmatrix/core/browser/browser_common.py +85 -0
- agentmatrix/core/browser/drission_page_adapter.py +1296 -0
- agentmatrix/core/browser/google.py +230 -0
- agentmatrix/core/cerebellum.py +121 -0
- agentmatrix/core/events.py +22 -0
- agentmatrix/core/loader.py +185 -0
- agentmatrix/core/loader_v1.py +146 -0
- agentmatrix/core/log_util.py +158 -0
- agentmatrix/core/message.py +32 -0
- agentmatrix/core/prompt_engine.py +30 -0
- agentmatrix/core/runtime.py +211 -0
- agentmatrix/core/session.py +20 -0
- agentmatrix/db/__init__.py +1 -0
- agentmatrix/db/database.py +79 -0
- agentmatrix/db/vector_db.py +213 -0
- agentmatrix/docs/Design.md +109 -0
- agentmatrix/docs/Framework Capbilities.md +105 -0
- agentmatrix/docs/Planner Design.md +148 -0
- agentmatrix/docs/crawler_flow.md +110 -0
- agentmatrix/docs/report_writer.md +83 -0
- agentmatrix/docs/review.md +99 -0
- agentmatrix/docs/skill_design.md +23 -0
- agentmatrix/profiles/claude_coder.yml +40 -0
- agentmatrix/profiles/mark.yml +26 -0
- agentmatrix/profiles/planner.yml +21 -0
- agentmatrix/profiles/prompts/base.txt +88 -0
- agentmatrix/profiles/prompts/base_v1.txt +101 -0
- agentmatrix/profiles/prompts/base_v2.txt +94 -0
- agentmatrix/profiles/tom_the_data_crawler.yml +38 -0
- agentmatrix/profiles/user_proxy.yml +17 -0
- agentmatrix/skills/__init__.py +1 -0
- agentmatrix/skills/crawler_helpers.py +315 -0
- agentmatrix/skills/data_crawler.py +777 -0
- agentmatrix/skills/filesystem.py +204 -0
- agentmatrix/skills/notebook.py +158 -0
- agentmatrix/skills/project_management.py +114 -0
- agentmatrix/skills/report_writer.py +194 -0
- agentmatrix/skills/report_writer_utils.py +379 -0
- agentmatrix/skills/search_tool.py +383 -0
- agentmatrix/skills/terminal_ctrl.py +122 -0
- agentmatrix/skills/utils.py +33 -0
- agentmatrix/skills/web_searcher.py +1107 -0
- matrix_for_agents-0.1.2.dist-info/METADATA +44 -0
- matrix_for_agents-0.1.2.dist-info/RECORD +66 -0
- matrix_for_agents-0.1.2.dist-info/WHEEL +5 -0
- matrix_for_agents-0.1.2.dist-info/licenses/LICENSE +190 -0
- matrix_for_agents-0.1.2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,414 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
import traceback
|
|
4
|
+
from typing import Dict, Union, List
|
|
5
|
+
import aiohttp
|
|
6
|
+
from ..core.log_util import AutoLoggerMixin
|
|
7
|
+
import logging
|
|
8
|
+
|
|
9
|
+
class LLMClient(AutoLoggerMixin):
|
|
10
|
+
|
|
11
|
+
_custom_log_level = logging.DEBUG
|
|
12
|
+
def __init__(self, url: str, api_key: str,model_name: str):
|
|
13
|
+
"""
|
|
14
|
+
初始化LLM客户端
|
|
15
|
+
|
|
16
|
+
Args:
|
|
17
|
+
url (str): 大模型API的URL
|
|
18
|
+
api_key (str): API密钥
|
|
19
|
+
"""
|
|
20
|
+
self.url = url
|
|
21
|
+
self.api_key = api_key
|
|
22
|
+
self.model_name = model_name
|
|
23
|
+
self.headers = {
|
|
24
|
+
"Authorization": f"Bearer {api_key}",
|
|
25
|
+
"Content-Type": "application/json"
|
|
26
|
+
}
|
|
27
|
+
self.gemini_headers = {
|
|
28
|
+
"Content-Type": "application/json",
|
|
29
|
+
"x-goog-api-key": self.api_key
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
async def think(self, messages: Union[str, List[Dict[str, str]]], **kwargs) -> Dict[str, str]:
|
|
34
|
+
if isinstance(messages, str):
|
|
35
|
+
#如果messages 是string,就包装成open ai chat messages 的格式
|
|
36
|
+
messages =[{"role": "user", "content": messages}]
|
|
37
|
+
if "googleapis.com" in self.url or "gemini" in self.model_name.lower():
|
|
38
|
+
return await self._async_stream_think_gemini(messages, **kwargs)
|
|
39
|
+
return await self.async_stream_think(messages, **kwargs)
|
|
40
|
+
|
|
41
|
+
def _to_gemini_messages(self, messages: list[dict[str, str]]) -> dict:
|
|
42
|
+
"""
|
|
43
|
+
OpenAI 格式 -> Gemini 格式转换
|
|
44
|
+
"""
|
|
45
|
+
gemini_contents = []
|
|
46
|
+
system_instruction = None
|
|
47
|
+
|
|
48
|
+
for msg in messages:
|
|
49
|
+
role = msg.get("role")
|
|
50
|
+
content = msg.get("content", "")
|
|
51
|
+
|
|
52
|
+
if role == "system":
|
|
53
|
+
# Gemini system instruction 是顶层字段
|
|
54
|
+
system_instruction = {"parts": [{"text": content}]}
|
|
55
|
+
elif role == "user":
|
|
56
|
+
gemini_contents.append({"role": "user", "parts": [{"text": content}]})
|
|
57
|
+
elif role == "assistant":
|
|
58
|
+
gemini_contents.append({"role": "model", "parts": [{"text": content}]})
|
|
59
|
+
|
|
60
|
+
return {
|
|
61
|
+
"contents": gemini_contents,
|
|
62
|
+
"systemInstruction": system_instruction
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
def _construct_gemini_config(self, **kwargs) -> dict:
|
|
66
|
+
"""
|
|
67
|
+
构建符合官方规范的 generationConfig,处理 thinkingConfig 的嵌套
|
|
68
|
+
"""
|
|
69
|
+
config = {}
|
|
70
|
+
|
|
71
|
+
# 提取 Thinking 相关的参数并封装
|
|
72
|
+
thinking_config = {}
|
|
73
|
+
if "thinking_level" in kwargs:
|
|
74
|
+
thinking_config["thinkingLevel"] = kwargs.pop("thinking_level")
|
|
75
|
+
if "include_thoughts" in kwargs:
|
|
76
|
+
thinking_config["includeThoughts"] = kwargs.pop("include_thoughts")
|
|
77
|
+
|
|
78
|
+
# 其他常见参数映射 (OpenAI命名 -> Gemini命名)
|
|
79
|
+
if "max_tokens" in kwargs:
|
|
80
|
+
config["maxOutputTokens"] = kwargs.pop("max_tokens")
|
|
81
|
+
if "temperature" in kwargs:
|
|
82
|
+
config["temperature"] = kwargs.pop("temperature")
|
|
83
|
+
if "top_p" in kwargs:
|
|
84
|
+
config["topP"] = kwargs.pop("top_p")
|
|
85
|
+
|
|
86
|
+
# 将剩余的 kwargs 也放入 config
|
|
87
|
+
config.update(kwargs)
|
|
88
|
+
|
|
89
|
+
# 如果有 thinking 配置,按照官方格式嵌套
|
|
90
|
+
if thinking_config:
|
|
91
|
+
config["thinkingConfig"] = thinking_config
|
|
92
|
+
|
|
93
|
+
return config
|
|
94
|
+
|
|
95
|
+
async def _async_stream_think_gemini(self, messages: list[dict[str, str]], **kwargs) -> Dict[str, str]:
|
|
96
|
+
"""
|
|
97
|
+
Gemini 专用异步流式方法
|
|
98
|
+
"""
|
|
99
|
+
try:
|
|
100
|
+
# 1. 消息格式转换
|
|
101
|
+
payload_parts = self._to_gemini_messages(messages)
|
|
102
|
+
|
|
103
|
+
# 2. 构建 Request Body (匹配官方结构)
|
|
104
|
+
generation_config = self._construct_gemini_config(**kwargs)
|
|
105
|
+
|
|
106
|
+
data = {
|
|
107
|
+
"contents": payload_parts["contents"],
|
|
108
|
+
"generationConfig": generation_config
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
if payload_parts["systemInstruction"]:
|
|
112
|
+
data["systemInstruction"] = payload_parts["systemInstruction"]
|
|
113
|
+
|
|
114
|
+
# 3. 处理 Tools (如果 kwargs 里传了 tools,按照官方结构放入顶层)
|
|
115
|
+
# 注意:这里的实现假设 kwargs 里的 'tools' 已经是 Gemini 格式,或者你可以加转换逻辑
|
|
116
|
+
if "tools" in kwargs:
|
|
117
|
+
data["tools"] = kwargs.pop("tools")
|
|
118
|
+
|
|
119
|
+
final_content = ""
|
|
120
|
+
final_reasoning = ""
|
|
121
|
+
|
|
122
|
+
timeout = aiohttp.ClientTimeout(total=120)
|
|
123
|
+
|
|
124
|
+
async with aiohttp.ClientSession(headers=self.gemini_headers, timeout=timeout, trust_env=True) as session:
|
|
125
|
+
async with session.post(self.url, json=data) as resp:
|
|
126
|
+
if resp.status != 200:
|
|
127
|
+
error_text = await resp.text()
|
|
128
|
+
raise Exception(f"Gemini Error {resp.status}: {error_text}")
|
|
129
|
+
|
|
130
|
+
# Gemini 流式解析 (JSON Array Stream)
|
|
131
|
+
buffer = ""
|
|
132
|
+
brace_count = 0
|
|
133
|
+
in_string = False
|
|
134
|
+
escape = False
|
|
135
|
+
|
|
136
|
+
async for chunk in resp.content.iter_chunked(1024):
|
|
137
|
+
if not chunk: continue
|
|
138
|
+
text = chunk.decode("utf-8", errors="ignore")
|
|
139
|
+
|
|
140
|
+
for char in text:
|
|
141
|
+
# 简易 JSON 对象提取器
|
|
142
|
+
if char == '[' and brace_count == 0: continue
|
|
143
|
+
if char == ']' and brace_count == 0: continue
|
|
144
|
+
if char == ',' and brace_count == 0: continue
|
|
145
|
+
|
|
146
|
+
buffer += char
|
|
147
|
+
|
|
148
|
+
if char == '"' and not escape: in_string = not in_string
|
|
149
|
+
if char == '\\' and not escape: escape = True
|
|
150
|
+
else: escape = False
|
|
151
|
+
|
|
152
|
+
if not in_string:
|
|
153
|
+
if char == '{': brace_count += 1
|
|
154
|
+
elif char == '}': brace_count -= 1
|
|
155
|
+
|
|
156
|
+
if brace_count == 0 and buffer.strip():
|
|
157
|
+
try:
|
|
158
|
+
obj = json.loads(buffer)
|
|
159
|
+
# 解析 candidates
|
|
160
|
+
candidates = obj.get("candidates", [])
|
|
161
|
+
if candidates:
|
|
162
|
+
content_obj = candidates[0].get("content", {})
|
|
163
|
+
parts = content_obj.get("parts", [])
|
|
164
|
+
|
|
165
|
+
# 遍历 parts (Gemini 可能在一个 chunk 返回多个 part)
|
|
166
|
+
for part in parts:
|
|
167
|
+
part_text = part.get("text", "")
|
|
168
|
+
|
|
169
|
+
# 尝试识别 Reasoning/Thought
|
|
170
|
+
# 目前 Gemini API 尚未统一 "thought" 字段,
|
|
171
|
+
# 但如果官方将来在 part 里加了 "thought": true,可以在这里捕获
|
|
172
|
+
is_thought = part.get("thought", False)
|
|
173
|
+
|
|
174
|
+
if is_thought:
|
|
175
|
+
final_reasoning += part_text
|
|
176
|
+
else:
|
|
177
|
+
final_content += part_text
|
|
178
|
+
|
|
179
|
+
except json.JSONDecodeError:
|
|
180
|
+
pass
|
|
181
|
+
finally:
|
|
182
|
+
buffer = ""
|
|
183
|
+
|
|
184
|
+
return {
|
|
185
|
+
"reasoning": final_reasoning,
|
|
186
|
+
"reply": final_content
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
except Exception as e:
|
|
190
|
+
self.logger.exception("Gemini调用失败")
|
|
191
|
+
raise Exception(f"Gemini调用失败: {str(e)}")
|
|
192
|
+
|
|
193
|
+
def no_stream_think(self, messages: list[dict[str, str]], **kwargs) -> Dict[str, str]:
|
|
194
|
+
"""
|
|
195
|
+
调用大模型API
|
|
196
|
+
|
|
197
|
+
Args:
|
|
198
|
+
messages (list[dict[str, str]]): 对话消息列表,每个消息包含role和content字段
|
|
199
|
+
**kwargs: 其他可选参数
|
|
200
|
+
|
|
201
|
+
Returns:
|
|
202
|
+
Dict[str, str]: 包含reasoning_content和content的字典
|
|
203
|
+
|
|
204
|
+
Raises:
|
|
205
|
+
Exception: 当API调用失败时抛出异常
|
|
206
|
+
"""
|
|
207
|
+
try:
|
|
208
|
+
# 构建请求数据
|
|
209
|
+
data = {
|
|
210
|
+
"messages": messages,
|
|
211
|
+
"model": self.model_name,
|
|
212
|
+
"stream": False,
|
|
213
|
+
**kwargs
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
# 发送请求
|
|
217
|
+
response = requests.post(
|
|
218
|
+
self.url,
|
|
219
|
+
headers=self.headers,
|
|
220
|
+
json=data,
|
|
221
|
+
timeout=30
|
|
222
|
+
)
|
|
223
|
+
|
|
224
|
+
# 检查响应状态
|
|
225
|
+
response.raise_for_status()
|
|
226
|
+
|
|
227
|
+
# 解析响应
|
|
228
|
+
result = response.json()
|
|
229
|
+
|
|
230
|
+
# 提取所需内容
|
|
231
|
+
return {
|
|
232
|
+
"reasoning_content": result.get("reasoning_content", ""),
|
|
233
|
+
"content": result.get("content", "")
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
except requests.exceptions.RequestException as e:
|
|
237
|
+
raise Exception(f"API请求失败: {str(e)}")
|
|
238
|
+
except json.JSONDecodeError:
|
|
239
|
+
raise Exception("API响应解析失败")
|
|
240
|
+
except KeyError as e:
|
|
241
|
+
raise Exception(f"API响应格式错误,缺少必要字段: {str(e)}")
|
|
242
|
+
except Exception as e:
|
|
243
|
+
self.logger.exception("未知错误")
|
|
244
|
+
self.logger.debug(messages)
|
|
245
|
+
raise Exception(f"未知错误: {str(e)}")
|
|
246
|
+
|
|
247
|
+
def stream_think(self, messages: list[dict[str, str]], **kwargs) -> Dict[str, str]:
|
|
248
|
+
"""
|
|
249
|
+
流式调用大模型API,实时打印响应内容
|
|
250
|
+
|
|
251
|
+
Args:
|
|
252
|
+
messages (list[dict[str, str]]): 对话消息列表,每个消息包含role和content字段
|
|
253
|
+
**kwargs: 其他可选参数
|
|
254
|
+
|
|
255
|
+
Returns:
|
|
256
|
+
Dict[str, str]: 包含reasoning_content和content的字典
|
|
257
|
+
|
|
258
|
+
Raises:
|
|
259
|
+
Exception: 当API调用失败时抛出异常
|
|
260
|
+
"""
|
|
261
|
+
try:
|
|
262
|
+
# 构建请求数据
|
|
263
|
+
data = {
|
|
264
|
+
"messages": messages,
|
|
265
|
+
"model": self.model_name,
|
|
266
|
+
"stream": True
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
# 初始化响应内容
|
|
270
|
+
final_reasoning_content = ""
|
|
271
|
+
final_content = ""
|
|
272
|
+
#print(self.url)
|
|
273
|
+
#print(self.headers)
|
|
274
|
+
# 发送流式请求
|
|
275
|
+
response = requests.post(
|
|
276
|
+
self.url,
|
|
277
|
+
headers=self.headers,
|
|
278
|
+
json=data,
|
|
279
|
+
stream=True,
|
|
280
|
+
timeout=120
|
|
281
|
+
)
|
|
282
|
+
think_started = False
|
|
283
|
+
content_started= False
|
|
284
|
+
buffer = ""
|
|
285
|
+
for chunk in response.iter_content(decode_unicode=True):
|
|
286
|
+
if chunk:
|
|
287
|
+
buffer += chunk
|
|
288
|
+
lines = buffer.split('\n')
|
|
289
|
+
buffer = lines[-1] # Keep incomplete line in buffer
|
|
290
|
+
|
|
291
|
+
for line in lines[:-1]:
|
|
292
|
+
if line.strip():
|
|
293
|
+
try:
|
|
294
|
+
# Parse SSE data
|
|
295
|
+
if line.startswith('data: '):
|
|
296
|
+
data_str = line[6:] # Remove "data: " prefix
|
|
297
|
+
if data_str.strip() == '[DONE]':
|
|
298
|
+
continue
|
|
299
|
+
elif data_str.strip():
|
|
300
|
+
try:
|
|
301
|
+
data = json.loads(data_str)
|
|
302
|
+
if 'choices' in data and data['choices']:
|
|
303
|
+
delta = data['choices'][0].get('delta', {})
|
|
304
|
+
|
|
305
|
+
# Handle think mode streaming
|
|
306
|
+
reasoning_content = delta.get('reasoning_content', '')
|
|
307
|
+
content = delta.get('content', '')
|
|
308
|
+
|
|
309
|
+
if reasoning_content:
|
|
310
|
+
if not think_started:
|
|
311
|
+
print("Reasoning: ")
|
|
312
|
+
think_started = True
|
|
313
|
+
print(reasoning_content, end='', flush=True)
|
|
314
|
+
final_reasoning_content += reasoning_content
|
|
315
|
+
if content:
|
|
316
|
+
if not content_started:
|
|
317
|
+
print("Content: ")
|
|
318
|
+
content_started = True
|
|
319
|
+
print(content, end='', flush=True)
|
|
320
|
+
final_content += content
|
|
321
|
+
except json.JSONDecodeError:
|
|
322
|
+
continue
|
|
323
|
+
except:
|
|
324
|
+
continue
|
|
325
|
+
|
|
326
|
+
|
|
327
|
+
|
|
328
|
+
|
|
329
|
+
|
|
330
|
+
|
|
331
|
+
return {
|
|
332
|
+
"reasoning": final_reasoning_content,
|
|
333
|
+
"reply": final_content
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
except requests.exceptions.RequestException as e:
|
|
337
|
+
traceback.print_exc()
|
|
338
|
+
raise Exception(f"API请求失败: {str(e)}")
|
|
339
|
+
|
|
340
|
+
except Exception as e:
|
|
341
|
+
traceback.print_exc()
|
|
342
|
+
raise Exception(f"未知错误: {str(e)}")
|
|
343
|
+
|
|
344
|
+
|
|
345
|
+
async def async_stream_think(self, messages: list[dict[str, str]], **kwargs) -> Dict[str, str]:
|
|
346
|
+
"""
|
|
347
|
+
异步流式调用大模型API,实时打印响应内容(使用 aiohttp)
|
|
348
|
+
"""
|
|
349
|
+
|
|
350
|
+
try:
|
|
351
|
+
data = {
|
|
352
|
+
"messages": messages,
|
|
353
|
+
"model": self.model_name,
|
|
354
|
+
"stream": True,
|
|
355
|
+
**kwargs
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
final_reasoning_content = ""
|
|
359
|
+
final_content = ""
|
|
360
|
+
|
|
361
|
+
buffer = ""
|
|
362
|
+
|
|
363
|
+
timeout = aiohttp.ClientTimeout(total=120)
|
|
364
|
+
async with aiohttp.ClientSession(headers=self.headers, timeout=timeout, trust_env=True) as session:
|
|
365
|
+
async with session.post(self.url, json=data) as resp:
|
|
366
|
+
if resp.status != 200:
|
|
367
|
+
error_text = await resp.text()
|
|
368
|
+
raise Exception(f"API请求失败: {resp.status}, message='{error_text}', url='{self.url}'")
|
|
369
|
+
resp.raise_for_status()
|
|
370
|
+
async for chunk in resp.content.iter_chunked(1024):
|
|
371
|
+
if not chunk:
|
|
372
|
+
continue
|
|
373
|
+
text = chunk.decode("utf-8", errors="ignore")
|
|
374
|
+
buffer += text
|
|
375
|
+
lines = buffer.split("\n")
|
|
376
|
+
buffer = lines[-1] # 不完整行保留在 buffer
|
|
377
|
+
for line in lines[:-1]:
|
|
378
|
+
line = line.strip()
|
|
379
|
+
if not line:
|
|
380
|
+
continue
|
|
381
|
+
if line.startswith("data: "):
|
|
382
|
+
data_str = line[6:].strip()
|
|
383
|
+
if data_str == "[DONE]":
|
|
384
|
+
continue
|
|
385
|
+
try:
|
|
386
|
+
payload = json.loads(data_str)
|
|
387
|
+
except json.JSONDecodeError:
|
|
388
|
+
continue
|
|
389
|
+
|
|
390
|
+
if "choices" in payload and payload["choices"]:
|
|
391
|
+
delta = payload["choices"][0].get("delta", {})
|
|
392
|
+
reasoning_content = delta.get("reasoning_content", "")
|
|
393
|
+
content = delta.get("content", "")
|
|
394
|
+
|
|
395
|
+
if reasoning_content:
|
|
396
|
+
final_reasoning_content += reasoning_content
|
|
397
|
+
|
|
398
|
+
if content:
|
|
399
|
+
final_content += content
|
|
400
|
+
|
|
401
|
+
#print() # 确保换行
|
|
402
|
+
return {
|
|
403
|
+
"reasoning": final_reasoning_content,
|
|
404
|
+
"reply": final_content
|
|
405
|
+
}
|
|
406
|
+
|
|
407
|
+
except aiohttp.ClientError as e:
|
|
408
|
+
traceback.print_exc()
|
|
409
|
+
raise Exception(f"API请求失败: {str(e)}")
|
|
410
|
+
except Exception as e:
|
|
411
|
+
traceback.print_exc()
|
|
412
|
+
raise Exception(f"未知错误: {str(e)}")
|
|
413
|
+
|
|
414
|
+
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import json
|
|
3
|
+
|
|
4
|
+
class MockLLM:
|
|
5
|
+
"""模拟 LLM,根据输入关键词决定输出,方便测试流程"""
|
|
6
|
+
|
|
7
|
+
async def chat(self, role: str, history: list) -> str:
|
|
8
|
+
await asyncio.sleep(1.0) # 模拟思考时间
|
|
9
|
+
|
|
10
|
+
last_msg = history[-1]["content"].lower()
|
|
11
|
+
|
|
12
|
+
# === 模拟 Planner 的逻辑 ===
|
|
13
|
+
if role == "Planner":
|
|
14
|
+
if "start_task" in last_msg:
|
|
15
|
+
# 收到用户新任务 -> 派活给 Coder
|
|
16
|
+
return "[Action: EMAIL] To: Coder | Subject: Task | Body: 请帮我读取 data.csv 并分析数据。"
|
|
17
|
+
elif "done" in last_msg:
|
|
18
|
+
# 收到 Coder 回复 -> 汇报给 User
|
|
19
|
+
return "[Action: REPLY] 任务已完成,数据分析结果如下..."
|
|
20
|
+
|
|
21
|
+
# === 模拟 Coder 的逻辑 ===
|
|
22
|
+
elif role == "Coder":
|
|
23
|
+
if "读取" in last_msg and "secretary" not in last_msg:
|
|
24
|
+
# 收到任务,发现需要读文件 -> 问秘书
|
|
25
|
+
return "[Action: EMAIL] To: Secretary | Subject: Help | Body: 请帮我读取文件 data.csv"
|
|
26
|
+
elif "file_content" in last_msg:
|
|
27
|
+
# 收到秘书回信 -> 完成任务
|
|
28
|
+
return "[Action: REPLY] 我已读取文件,分析完毕,结果是 Done。"
|
|
29
|
+
|
|
30
|
+
# === 模拟 Secretary (SLM) 的逻辑 ===
|
|
31
|
+
elif role == "Secretary":
|
|
32
|
+
if "读取文件" in last_msg:
|
|
33
|
+
return json.dumps({"tool": "read_file", "args": {"path": "data.csv"}})
|
|
34
|
+
|
|
35
|
+
return "[Action: REPLY] I am thinking..."
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
# cli_runner.py
|
|
2
|
+
import asyncio
|
|
3
|
+
from agentmatrix import AgentMatrix
|
|
4
|
+
from agentmatrix import Email
|
|
5
|
+
import logging
|
|
6
|
+
import uuid
|
|
7
|
+
logger = logging.getLogger('CLI_Runner')
|
|
8
|
+
# 1. 定义:收到信时干什么?-> 打印出来
|
|
9
|
+
|
|
10
|
+
mail_id_sender_map = {} # 记录邮件 ID 到发送者的映射
|
|
11
|
+
|
|
12
|
+
async def print_to_console(email: Email):
|
|
13
|
+
f = f"""
|
|
14
|
+
📨 [New Mail] From {email.sender}: "
|
|
15
|
+
Subject: {email.subject}"
|
|
16
|
+
Body: {email.body}"
|
|
17
|
+
MsgID: {email.id}
|
|
18
|
+
>> 请输入回复 (格式: To_Agent: Content) 或 'exit':"
|
|
19
|
+
"""
|
|
20
|
+
mail_id_sender_map[email.id] = email.sender # 记录映射
|
|
21
|
+
loop = asyncio.get_running_loop()
|
|
22
|
+
|
|
23
|
+
await loop.run_in_executor(None, logger.debug, f)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def global_event_handler(event):
|
|
27
|
+
print(f"🔔 事件触发: {event}")
|
|
28
|
+
|
|
29
|
+
async def main():
|
|
30
|
+
# 2. 初始化 Matrix
|
|
31
|
+
matrix = AgentMatrix(agent_profile_path="./profiles", matrix_path='../../Samples/TestWorkspace', async_event_call_back=global_event_handler)
|
|
32
|
+
#matrix.load_matrix('Samples/TestWorkspace')
|
|
33
|
+
|
|
34
|
+
# 3. 挂载回调:把我们的打印函数挂给 UserProxy
|
|
35
|
+
matrix.agents["User"].on_mail_received = print_to_console
|
|
36
|
+
|
|
37
|
+
await asyncio.to_thread(print, ">>> 系统启动。可以在下面输入指令。")
|
|
38
|
+
await asyncio.to_thread(print, ">>> 例如: Planner: 帮我分析数据")
|
|
39
|
+
|
|
40
|
+
# 4. 主循环:监听键盘输入 (这是在主线程,不会阻塞 Matrix 的后台 asyncio)
|
|
41
|
+
# 注意:在 asyncio 程序里做 input() 是个 tricky 的事情,
|
|
42
|
+
# 生产环境通常用 aioconsole,这里为了演示简单处理
|
|
43
|
+
|
|
44
|
+
loop = asyncio.get_event_loop()
|
|
45
|
+
|
|
46
|
+
import sys
|
|
47
|
+
from aioconsole import ainput # pip install aioconsole
|
|
48
|
+
user_session_id = str(uuid.uuid4())
|
|
49
|
+
while True:
|
|
50
|
+
try:
|
|
51
|
+
user_input = await ainput(">> ") # 异步等待输入
|
|
52
|
+
|
|
53
|
+
if user_input.lower() == "exit":
|
|
54
|
+
await matrix.save_matrix()
|
|
55
|
+
break
|
|
56
|
+
|
|
57
|
+
if user_input.lower() == "new session":
|
|
58
|
+
user_session_id = str(uuid.uuid4())
|
|
59
|
+
await asyncio.to_thread(print, f"✅ 新会话开始 ID: {user_session_id}")
|
|
60
|
+
continue
|
|
61
|
+
|
|
62
|
+
if user_input.lower().startswith("reply:"):
|
|
63
|
+
parts = user_input.split(":")
|
|
64
|
+
if len(parts) < 3:
|
|
65
|
+
await asyncio.to_thread(print, "❌ 回复格式错误,请使用 'reply: MsgID: Content'")
|
|
66
|
+
continue
|
|
67
|
+
reply_to_id = parts[1].strip()
|
|
68
|
+
content = ":".join(parts[2:]).strip()
|
|
69
|
+
if reply_to_id not in mail_id_sender_map:
|
|
70
|
+
await asyncio.to_thread(print, f"❌ 未找到消息 ID: {reply_to_id}")
|
|
71
|
+
continue
|
|
72
|
+
target = mail_id_sender_map[reply_to_id]
|
|
73
|
+
await matrix.agents["User"].speak(
|
|
74
|
+
user_session_id=user_session_id,
|
|
75
|
+
to=target,
|
|
76
|
+
subject=f"Re: 回复您的消息 {reply_to_id}",
|
|
77
|
+
content=content,
|
|
78
|
+
reply_to_id=reply_to_id
|
|
79
|
+
)
|
|
80
|
+
continue
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
if ":" in user_input:
|
|
84
|
+
target, content = user_input.split(":", 1)
|
|
85
|
+
# 5. 调用 UserProxy 说话
|
|
86
|
+
await matrix.agents["User"].speak(user_session_id, target.strip(), content.strip())
|
|
87
|
+
else:
|
|
88
|
+
await asyncio.to_thread(print,"❌ 格式错误,请使用 'Target: Content'")
|
|
89
|
+
|
|
90
|
+
except KeyboardInterrupt:
|
|
91
|
+
break
|
|
92
|
+
|
|
93
|
+
if __name__ == "__main__":
|
|
94
|
+
asyncio.run(main())
|
|
File without changes
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
from enum import Enum
|
|
2
|
+
from dataclasses import dataclass
|
|
3
|
+
from typing import Any, Dict
|
|
4
|
+
import json
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass
|
|
9
|
+
class ActionMetadata:
|
|
10
|
+
name: str
|
|
11
|
+
description: str
|
|
12
|
+
# 存储生成的 JSON Schema,给 SLM 看
|
|
13
|
+
json_schema: Dict[str, Any]
|
|
14
|
+
|
|
15
|
+
def register_action(
|
|
16
|
+
description: str,
|
|
17
|
+
param_infos: Dict[str, str] = None
|
|
18
|
+
):
|
|
19
|
+
"""
|
|
20
|
+
Args:
|
|
21
|
+
description: 函数功能的自然语言描述
|
|
22
|
+
type: 动作流类型
|
|
23
|
+
param_infos: 参数名 -> 参数含义的映射 (e.g. {"to": "目标Agent名字"})
|
|
24
|
+
"""
|
|
25
|
+
if param_infos is None:
|
|
26
|
+
param_infos = {}
|
|
27
|
+
|
|
28
|
+
def decorator(func):
|
|
29
|
+
func._is_action = True
|
|
30
|
+
func._action_desc = description
|
|
31
|
+
func._action_param_infos = param_infos
|
|
32
|
+
return func
|
|
33
|
+
return decorator
|
|
34
|
+
|
|
35
|
+
@dataclass
|
|
36
|
+
class ActionDef:
|
|
37
|
+
name: str
|
|
38
|
+
description: str
|
|
39
|
+
parameters: Dict[str, str]
|
|
40
|
+
|
|
41
|
+
@classmethod
|
|
42
|
+
def from_dict(cls, data: Dict[str, Any]):
|
|
43
|
+
return cls(
|
|
44
|
+
name=data["name"],
|
|
45
|
+
description=data["description"],
|
|
46
|
+
parameters=data.get("parameters", {})
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
def to_prompt(self):
|
|
50
|
+
return f"- {self.name}: {self.description} | Params: {json.dumps(self.parameters, ensure_ascii=False)}"
|