matrix-for-agents 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. agentmatrix/__init__.py +20 -0
  2. agentmatrix/agents/__init__.py +1 -0
  3. agentmatrix/agents/base.py +572 -0
  4. agentmatrix/agents/claude_coder.py +10 -0
  5. agentmatrix/agents/data_crawler.py +14 -0
  6. agentmatrix/agents/post_office.py +212 -0
  7. agentmatrix/agents/report_writer.py +14 -0
  8. agentmatrix/agents/secretary.py +10 -0
  9. agentmatrix/agents/stateful.py +10 -0
  10. agentmatrix/agents/user_proxy.py +82 -0
  11. agentmatrix/agents/worker.py +30 -0
  12. agentmatrix/backends/__init__.py +1 -0
  13. agentmatrix/backends/llm_client.py +414 -0
  14. agentmatrix/backends/mock_llm.py +35 -0
  15. agentmatrix/cli_runner.py +94 -0
  16. agentmatrix/core/__init__.py +0 -0
  17. agentmatrix/core/action.py +50 -0
  18. agentmatrix/core/browser/bing.py +208 -0
  19. agentmatrix/core/browser/browser_adapter.py +298 -0
  20. agentmatrix/core/browser/browser_common.py +85 -0
  21. agentmatrix/core/browser/drission_page_adapter.py +1296 -0
  22. agentmatrix/core/browser/google.py +230 -0
  23. agentmatrix/core/cerebellum.py +121 -0
  24. agentmatrix/core/events.py +22 -0
  25. agentmatrix/core/loader.py +185 -0
  26. agentmatrix/core/loader_v1.py +146 -0
  27. agentmatrix/core/log_util.py +158 -0
  28. agentmatrix/core/message.py +32 -0
  29. agentmatrix/core/prompt_engine.py +30 -0
  30. agentmatrix/core/runtime.py +211 -0
  31. agentmatrix/core/session.py +20 -0
  32. agentmatrix/db/__init__.py +1 -0
  33. agentmatrix/db/database.py +79 -0
  34. agentmatrix/db/vector_db.py +213 -0
  35. agentmatrix/docs/Design.md +109 -0
  36. agentmatrix/docs/Framework Capbilities.md +105 -0
  37. agentmatrix/docs/Planner Design.md +148 -0
  38. agentmatrix/docs/crawler_flow.md +110 -0
  39. agentmatrix/docs/report_writer.md +83 -0
  40. agentmatrix/docs/review.md +99 -0
  41. agentmatrix/docs/skill_design.md +23 -0
  42. agentmatrix/profiles/claude_coder.yml +40 -0
  43. agentmatrix/profiles/mark.yml +26 -0
  44. agentmatrix/profiles/planner.yml +21 -0
  45. agentmatrix/profiles/prompts/base.txt +88 -0
  46. agentmatrix/profiles/prompts/base_v1.txt +101 -0
  47. agentmatrix/profiles/prompts/base_v2.txt +94 -0
  48. agentmatrix/profiles/tom_the_data_crawler.yml +38 -0
  49. agentmatrix/profiles/user_proxy.yml +17 -0
  50. agentmatrix/skills/__init__.py +1 -0
  51. agentmatrix/skills/crawler_helpers.py +315 -0
  52. agentmatrix/skills/data_crawler.py +777 -0
  53. agentmatrix/skills/filesystem.py +204 -0
  54. agentmatrix/skills/notebook.py +158 -0
  55. agentmatrix/skills/project_management.py +114 -0
  56. agentmatrix/skills/report_writer.py +194 -0
  57. agentmatrix/skills/report_writer_utils.py +379 -0
  58. agentmatrix/skills/search_tool.py +383 -0
  59. agentmatrix/skills/terminal_ctrl.py +122 -0
  60. agentmatrix/skills/utils.py +33 -0
  61. agentmatrix/skills/web_searcher.py +1107 -0
  62. matrix_for_agents-0.1.2.dist-info/METADATA +44 -0
  63. matrix_for_agents-0.1.2.dist-info/RECORD +66 -0
  64. matrix_for_agents-0.1.2.dist-info/WHEEL +5 -0
  65. matrix_for_agents-0.1.2.dist-info/licenses/LICENSE +190 -0
  66. matrix_for_agents-0.1.2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,230 @@
1
+ import traceback
2
+
3
+ async def extract_search_results(adapter, tab):
4
+ """
5
+ Extract search results from Google search results page.
6
+
7
+ Args:
8
+ adapter: DrissionPageAdapter instance
9
+ tab: Current browser tab handle
10
+
11
+ Returns:
12
+ List of dictionaries containing title, url, and snippet for each search result
13
+ """
14
+ print("\n4. Extracting search results...")
15
+ results = []
16
+
17
+ try:
18
+ # Wait for search results to load
19
+ print(" Waiting for search results to load...")
20
+ import time
21
+ time.sleep(3) # Give time for search results to appear
22
+
23
+ # Find all h3 elements (each h3 is a search result title)
24
+ search_result_elements = tab.eles('@tag()=h3')
25
+
26
+ print(f" Found {len(search_result_elements)} h3 elements")
27
+
28
+ for idx, h3_element in enumerate(search_result_elements):
29
+ try:
30
+ print(f" Processing result {idx+1}...")
31
+
32
+ # Extract title from h3
33
+ title = h3_element.text.strip()
34
+ if not title:
35
+ print(f" No title found in h3 {idx+1}")
36
+ continue
37
+
38
+ print(f" Found title: {title[:50]}...")
39
+
40
+ # Find the parent <a> element (h3's direct parent)
41
+ # Try to get parent - DrissionPage might have different API
42
+ a_element = h3_element.parent()
43
+
44
+ # Verify it's an <a> element
45
+ if not a_element or a_element.tag != 'a':
46
+ # Try alternative: search for <a> in parent chain
47
+ current = h3_element
48
+ a_element = None
49
+ for _ in range(3): # Check up to 3 levels up
50
+ current = current.parent() if current else None
51
+ if current and current.tag == 'a':
52
+ a_element = current
53
+ break
54
+
55
+ if not a_element:
56
+ print(f" No parent <a> found for h3 {idx+1}")
57
+ continue
58
+
59
+ # Extract URL from <a> element
60
+ url = a_element.attr('href')
61
+ if not url:
62
+ print(f" No href found in <a> of result {idx+1}")
63
+ continue
64
+
65
+ print(f" Found URL: {url}")
66
+
67
+ # Navigate up from <a> to find 3 levels of <div> elements
68
+ # Structure: a -> [possibly other elements] -> div -> div -> div
69
+ # Then find the sibling of the 3rd div, which contains the snippet
70
+
71
+ current = a_element
72
+ div_count = 0
73
+ target_div = None
74
+
75
+ # Navigate up to find the 3rd level div
76
+ while current and div_count < 3:
77
+ current = current.parent()
78
+ if current and current.tag == 'div':
79
+ div_count += 1
80
+ if div_count == 3:
81
+ target_div = current
82
+ break
83
+
84
+ if not target_div:
85
+ print(f" Could not find 3 levels of <div> for result {idx+1}")
86
+ snippet = "No description available"
87
+ else:
88
+ # Find the sibling div of the 3rd level div
89
+ snippet_div = target_div.next()
90
+
91
+ if snippet_div:
92
+ snippet = snippet_div.text.strip() if snippet_div.text else "No description available"
93
+ print(f" Found snippet: {snippet[:50]}...")
94
+ else:
95
+ snippet = "No description available"
96
+
97
+ # Add result to list
98
+ if title and url:
99
+ result = {
100
+ 'title': title,
101
+ 'url': url,
102
+ 'snippet': snippet
103
+ }
104
+ results.append(result)
105
+ print(f" ✓ Successfully extracted result {idx+1}")
106
+
107
+ except Exception as e:
108
+ print(f" Error extracting result {idx+1}: {e}")
109
+ traceback.print_exc()
110
+ continue
111
+
112
+ print(f"✓ Successfully extracted {len(results)} search results")
113
+
114
+ except Exception as e:
115
+ traceback.print_exc()
116
+ print(f"❌ Error extracting search results: {e}")
117
+
118
+ return results
119
+
120
+
121
+ async def search_google(adapter, tab, query, max_pages=5, page=None):
122
+ """
123
+ Perform a Google search and extract results from multiple pages.
124
+
125
+ Args:
126
+ adapter: DrissionPageAdapter instance
127
+ tab: Current browser tab handle
128
+ query: Search query string
129
+ max_pages: Maximum number of pages to extract (default: 5)
130
+ page: Specific page to extract (default: None). If specified, only returns results from that page.
131
+
132
+ Returns:
133
+ List of dictionaries containing title, url, and snippet for each search result
134
+ """
135
+ print(f"\n=== Google Search: {query} (max pages: {max_pages}) ===")
136
+
137
+ # Navigate to Google
138
+ print("1. Navigating to Google...")
139
+ interaction_report = await adapter.navigate(tab, "https://www.google.com")
140
+ print(f"✓ Navigation completed. URL changed: {interaction_report.is_url_changed}")
141
+
142
+ # Wait a moment for page to load
143
+ import time
144
+ time.sleep(2)
145
+
146
+ # Type search query in textarea and submit
147
+ print("2. Typing search query...")
148
+ await adapter.type_text(tab, "@@tag()=textarea", f"{query}", True)
149
+ search_btn = await adapter.find_element(tab, 'xpath:(//input[@type="submit" and @role="button"])[2]')
150
+ await adapter.click_and_observe(tab, search_btn)
151
+ print("✓ Search query submitted")
152
+
153
+ # Stabilize the search results page
154
+ print("\n3. Stabilizing search results page...")
155
+ stabilization_success = await adapter.stabilize(tab)
156
+ print(f"✓ Stabilization completed: {stabilization_success}")
157
+
158
+ # If page is specified, only extract that specific page
159
+ if page is not None:
160
+ print(f"\n=== Extracting page {page} only ===")
161
+
162
+ # Navigate to the specified page
163
+ target_page = page
164
+ while target_page > 1:
165
+ try:
166
+ next_page_selector = f'css:a[aria-label="Page {target_page}"]'
167
+ print(f"Looking for Page {target_page}...")
168
+ next_page_link = tab.ele(next_page_selector, timeout=2)
169
+
170
+ if next_page_link:
171
+ print(f"✓ Found Page {target_page}, clicking...")
172
+ next_page_link.click()
173
+ time.sleep(2)
174
+ await adapter.stabilize(tab)
175
+ target_page -= 1
176
+ else:
177
+ print(f"✗ Page {page} not found")
178
+ return []
179
+ except Exception as e:
180
+ print(f"✗ Error navigating to page {page}: {e}")
181
+ return []
182
+
183
+ # Extract results from the specified page
184
+ print(f"\n=== Processing page {page} ===")
185
+ page_results = await extract_search_results(adapter, tab)
186
+ print(f"\n=== Total results collected: {len(page_results)} ===")
187
+ return page_results
188
+
189
+ # Extract search results from multiple pages (original logic)
190
+ all_results = []
191
+ current_page = 1
192
+
193
+ while current_page <= max_pages:
194
+ print(f"\n=== Processing page {current_page} ===")
195
+
196
+ # Extract results from current page
197
+ page_results = await extract_search_results(adapter, tab)
198
+ all_results.extend(page_results)
199
+
200
+ # Check if we should continue to next page
201
+ if current_page < max_pages:
202
+ # Look for next page link using aria-label="Page X"
203
+ next_page_num = current_page + 1
204
+ next_page_selector = f'css:a[aria-label="Page {next_page_num}"]'
205
+
206
+ try:
207
+ print(f"\nLooking for next page (Page {next_page_num})...")
208
+ next_page_link = tab.ele(next_page_selector, timeout=2)
209
+
210
+ if next_page_link:
211
+ print(f"✓ Found next page link, clicking...")
212
+ next_page_link.click()
213
+ time.sleep(2) # Wait for page to load
214
+
215
+ # Stabilize after page change
216
+ await adapter.stabilize(tab)
217
+ current_page += 1
218
+ else:
219
+ print(f"✓ No more pages available")
220
+ break
221
+
222
+ except Exception as e:
223
+ print(f"✓ No more pages available or error finding next page: {e}")
224
+ break
225
+ else:
226
+ print(f"\n✓ Reached maximum page limit ({max_pages})")
227
+ break
228
+
229
+ print(f"\n=== Total results collected: {len(all_results)} ===")
230
+ return all_results
@@ -0,0 +1,121 @@
1
+ # core/cerebellum.py
2
+ import json
3
+ import textwrap
4
+ from ..core.log_util import AutoLoggerMixin
5
+ import logging
6
+ class Cerebellum(AutoLoggerMixin):
7
+ _log_from_attr = "log_name" # 日志名字来自 self.log_name 属性
8
+ _custom_log_level = logging.DEBUG
9
+ def __init__(self, backend_client, agent_name):
10
+ self.backend = backend_client
11
+ self.agent_name = agent_name
12
+ self.log_name = f"{agent_name}(Cerebellum)"
13
+
14
+ async def think(self, messages):
15
+ return await self.backend.think(messages)
16
+
17
+ async def negotiate(self, initial_intent: str, tools_manifest: str, contacts, brain_callback) -> dict:
18
+ """
19
+ 谈判循环:如果意图不清晰,Cerebellum 会反问 Brain。
20
+
21
+ Args:
22
+ initial_intent: Brain 最初的想法
23
+ tools_manifest: 工具列表
24
+ brain_callback: 一个异步函数,允许 Cerebellum 向 Brain 提问 (param: question_str) -> answer_str
25
+ """
26
+ system_prompt = textwrap.dedent(f"""
27
+ You are the Interface Manager (Cerebellum). Your job is to convert User Intent into a Function Call JSON.
28
+
29
+ [Available Tools]:
30
+ {tools_manifest}
31
+
32
+ [Available Email Receipients]:
33
+ {contacts}
34
+
35
+ [Instructions]:
36
+ 1. Identify the tool the user wants to use.
37
+ 2. Check if ALL required parameters for that tool are present in the Intent. Rview Intent carefully, find all required parameters and form a proper JSON as output.
38
+ 3. DECISION:
39
+ - If READY: Output JSON `{{ "status": "READY", "action_json": {{ "action": "name", "params": {{...}} }} }}`
40
+ - If MISSING PARAM: Output JSON `{{ "status": "ASK", "question": "What is the value for [param_name]?" }}`
41
+ - If AMBIGUOUS: Output JSON `{{ "status": "ASK", "question": "whatever need to be clarified" }}`
42
+ - If Not fesible: Output JSON `{{ "status": "NOT_FEASIBLE", "reason": "No matching tool found." }}`
43
+
44
+ Output ONLY valid JSON.
45
+ """)
46
+
47
+ initial_intent = textwrap.dedent(f"""[User Intent]:
48
+ {initial_intent}
49
+ """)
50
+ # 维护一个临时的谈判历史,避免 Brain 忘记自己刚才要做什么
51
+ negotiation_history =[
52
+ {"role": "system", "content": system_prompt},
53
+ {"role": "user", "content": initial_intent}
54
+ ]
55
+
56
+ self.logger.debug(f"开始谈判:{initial_intent}")
57
+
58
+ #print(f"准备分析大脑意图:{initial_intent}")
59
+
60
+ max_turns = 5 # 防止两个模型在那儿没完没了地聊天
61
+
62
+ for i in range(max_turns):
63
+ # 1. 小脑思考:我现在还需要什么?
64
+ # 这是一个特殊的 Prompt,要求小脑检查参数完整性
65
+
66
+
67
+ # 小脑进行推理(这里用 fast model)
68
+ response = await self.backend.think(messages=negotiation_history)
69
+ reasoning_str= response['reasoning']
70
+ reply_str= response['reply']
71
+
72
+ #loop = asyncio.get_running_loop()
73
+ # 把同步的 logger.debug 扔到线程池执行
74
+ self.logger.debug(f"小脑思考:{reasoning_str}")
75
+ self.logger.debug(f"小脑回复:{reply_str}")
76
+
77
+
78
+
79
+ raw_content = response['reply'].replace("```json", "").replace("```", "").strip()
80
+
81
+ try:
82
+ decision = json.loads(raw_content)
83
+ status = decision.get("status")
84
+
85
+ if status == "READY":
86
+ return decision["action_json"]
87
+
88
+ elif status == "NOT_FEASIBLE":
89
+ # 彻底放弃,返回错误动作
90
+ question = "Intent is not feasible."
91
+ # 记录这一轮对话
92
+ self.logger.debug(question)
93
+ answer = await brain_callback(question)
94
+ negotiation_history.append({"role": "assistant", "content": "Need clarification."})
95
+ negotiation_history.append({"role": "user", "content": answer})
96
+
97
+ elif status == "ASK":
98
+ # === 关键点:反问 Brain ===
99
+ question = decision.get("question")
100
+ self.logger.debug(question)
101
+
102
+ # 记录这一轮对话
103
+
104
+
105
+ # 调用回调函数,让 Brain 回答
106
+ # Brain 会基于它原始的 Context + 这里的 Question 来回答
107
+ answer = await brain_callback(question)
108
+
109
+ self.logger.debug(question)
110
+ self.logger.debug('[Brain Reply]:' + answer)
111
+ negotiation_history.append({"role": "assistant", "content": question})
112
+ negotiation_history.append({"role": "user", "content": answer})
113
+
114
+ # 继续下一轮循环
115
+ continue
116
+
117
+ except json.JSONDecodeError:
118
+ if len(negotiation_history)>2:
119
+ negotiation_history.pop()
120
+
121
+ return {"action": "_parse_failed", "params": {"error": "Negotiation timeout"}}
@@ -0,0 +1,22 @@
1
+ from dataclasses import dataclass, field
2
+ from datetime import datetime
3
+ from typing import Any, Dict
4
+
5
+ @dataclass
6
+ class AgentEvent:
7
+ event_type: str # THINKING, MAIL_SENT, TOOL_USE...
8
+ source: str
9
+ source_status: str
10
+ content: str
11
+ payload: Dict[str, Any] = field(default_factory=dict)
12
+ timestamp: datetime = field(default_factory=datetime.now)
13
+
14
+ def to_dict(self):
15
+ return {
16
+ "type": self.event_type,
17
+ "source": self.source,
18
+ "source_status": self.source_status,
19
+ "content": self.content,
20
+ "payload": self.payload,
21
+ "time": self.timestamp.isoformat()
22
+ }
@@ -0,0 +1,185 @@
1
+ import yaml
2
+ import importlib
3
+ import os
4
+ import logging
5
+ from typing import List, Any, Dict
6
+ from dotenv import load_dotenv
7
+ import json
8
+ from ..backends.llm_client import LLMClient
9
+ from ..core.cerebellum import Cerebellum
10
+ from ..core.log_util import AutoLoggerMixin
11
+
12
+
13
+ class AgentLoader(AutoLoggerMixin):
14
+ def __init__(self, profile_path):
15
+ self.profile_path = profile_path
16
+ env_file = os.path.join(profile_path, ".env")
17
+ if not os.path.exists(env_file):
18
+ raise FileNotFoundError(f"环境变量文件不存在: {self.profile_path}")
19
+
20
+ if not os.access(env_file, os.R_OK):
21
+ raise PermissionError(f"没有读取文件的权限: {self.profile_path}")
22
+
23
+ load_dotenv(env_file)
24
+ llm_config_file = os.path.join(self.profile_path, "llm_config.json")
25
+
26
+ with open(llm_config_file, 'r', encoding='utf-8') as f:
27
+ self.llm_config = json.load(f)
28
+
29
+ if "default_slm" not in self.llm_config:
30
+ raise ValueError("llm_config.json 中必须包含 'default_slm' 配置,用于驱动默认小脑。")
31
+
32
+ for config in self.llm_config.values():
33
+ if "API_KEY" in config:
34
+ api_key = config["API_KEY"]
35
+ if os.getenv(api_key) is not None:
36
+ config["API_KEY"] = os.getenv(api_key)
37
+
38
+ prompts_path = os.path.join(self.profile_path, "prompts")
39
+ self.prompts = {}
40
+ for prompt_txt in os.listdir(prompts_path):
41
+ if prompt_txt.endswith(".txt"):
42
+ self.logger.info(f">>> 加载Prompt模板 {prompt_txt}...")
43
+ with open(os.path.join(prompts_path, prompt_txt), "r", encoding='utf-8') as f:
44
+ self.prompts[prompt_txt[:-4]] = f.read()
45
+
46
+ self.logger.info(self.prompts)
47
+
48
+ def _parse_value(self, value):
49
+ """
50
+ 解析配置文件中的值,支持基本类型
51
+ YAML会自动解析类型,但我们需要确保一致性
52
+ """
53
+ if isinstance(value, str):
54
+ # 尝试解析特殊字符串
55
+ value_lower = value.lower()
56
+ if value_lower == 'null':
57
+ return None
58
+ elif value_lower == 'true':
59
+ return True
60
+ elif value_lower == 'false':
61
+ return False
62
+ return value
63
+
64
+ def load_from_file(self, file_path: str) -> Any:
65
+ """从 YAML 文件加载并实例化一个 Agent (支持动态 Mixin 和属性初始化)"""
66
+ self.logger.info(f">>> 加载Agent配置文件 {file_path}...")
67
+ with open(file_path, 'r', encoding='utf-8') as f:
68
+ profile = yaml.safe_load(f)
69
+
70
+ # 1. 解析基础类信息
71
+ module_name = profile["module"]
72
+ class_name = profile["class_name"]
73
+
74
+ # 🆕 2. 解析 Mixin 列表(如果配置了)
75
+ mixin_classes = []
76
+ if "mixins" in profile and profile["mixins"]:
77
+ for mixin_path in profile["mixins"]:
78
+ mixin_module_name, mixin_class_name = mixin_path.rsplit('.', 1)
79
+ try:
80
+ mixin_module = importlib.import_module(mixin_module_name)
81
+ mixin_class = getattr(mixin_module, mixin_class_name)
82
+ mixin_classes.append(mixin_class)
83
+ self.logger.info(f">>> ✅ 加载Mixin: {mixin_path}")
84
+ except (ImportError, AttributeError) as e:
85
+ self.logger.warning(f">>> ⚠️ 加载Mixin失败 {mixin_path}: {e}")
86
+
87
+ # 🆕 3. 解析属性初始化配置
88
+ attribute_inits = profile.pop("attribute_initializations", {})
89
+
90
+ # 🆕 4. 解析类属性配置
91
+ class_attrs = profile.pop("class_attributes", {})
92
+
93
+ # 清理配置中的特殊字段
94
+ del profile["module"]
95
+ del profile["class_name"]
96
+ if "mixins" in profile:
97
+ del profile["mixins"]
98
+
99
+ # 5. 动态导入基础 Agent 类
100
+ try:
101
+ module = importlib.import_module(module_name)
102
+ base_agent_class = getattr(module, class_name)
103
+ except (ImportError, AttributeError) as e:
104
+ raise ImportError(f"无法加载 Agent 类: {module_name}.{class_name}. 错误: {e}")
105
+
106
+ # 🆕 6. 动态创建带 Mixin 的新类
107
+ if mixin_classes:
108
+ # 创建新类:DynamicAgent 继承自 base_agent_class 和所有 mixin_classes
109
+ dynamic_class_name = f"Dynamic{class_name}"
110
+ agent_class = type(
111
+ dynamic_class_name,
112
+ (base_agent_class, *mixin_classes), # 继承元组
113
+ class_attrs # 🆕 注入类属性
114
+ )
115
+ self.logger.info(f">>> 🎨 动态创建Agent类: {dynamic_class_name}")
116
+ self.logger.info(f">>> 继承链: {' -> '.join([c.__name__ for c in (base_agent_class, *mixin_classes)])}")
117
+ if class_attrs:
118
+ self.logger.info(f">>> 类属性: {class_attrs}")
119
+ else:
120
+ # 如果没有 Mixin,直接使用原类,但也要设置类属性
121
+ if class_attrs:
122
+ for attr_name, attr_value in class_attrs.items():
123
+ setattr(base_agent_class, attr_name, attr_value)
124
+ self.logger.info(f">>> 设置类属性: {class_attrs}")
125
+ agent_class = base_agent_class
126
+
127
+ # 7. 注入 prompt template
128
+ if "prompte_template" not in profile:
129
+ profile["prompt_template"] = "base"
130
+ prompt_template_name = profile.get("prompt_template")
131
+ self.logger.info(f">>> 加载Prompt模板 {prompt_template_name}...")
132
+ if prompt_template_name in self.prompts:
133
+ prompt = self.prompts[prompt_template_name]
134
+ profile["full_prompt"] = prompt
135
+ else:
136
+ raise ValueError(f"加载Agent {file_path} 失败,Prompt 模板 {prompt_template_name} 未找到。")
137
+
138
+ # 8. 实例化 Agent
139
+ agent_instance = agent_class(profile.copy())
140
+
141
+ # 🆕 9. 注入实例属性(Mixin 需要的属性)
142
+ if attribute_inits:
143
+ for attr_name, attr_value in attribute_inits.items():
144
+ parsed_value = self._parse_value(attr_value)
145
+ setattr(agent_instance, attr_name, parsed_value)
146
+ self.logger.info(f">>> 🔧 初始化属性: {attr_name} = {parsed_value}")
147
+
148
+ # 10. 设置LLM backend
149
+ backend_model = agent_instance.backend_model
150
+ agent_instance.brain = self._create_llm_client(backend_model)
151
+ print(f"Agent {agent_instance.name} brain set to {backend_model}")
152
+
153
+ # 设置小脑 (Cerebellum)
154
+ cerebellum_config = profile.get("cerebellum")
155
+ slm_client = None
156
+
157
+ if cerebellum_config:
158
+ slm_model = cerebellum_config.get("backend_model", "default_slm")
159
+ slm_client = self._create_llm_client(slm_model)
160
+ print(f"[{agent_instance.name}] Using custom SLM: {slm_model}")
161
+ else:
162
+ slm_client = self._create_llm_client("default_slm")
163
+ print(f"[{agent_instance.name}] Using system default SLM.")
164
+
165
+ agent_instance.cerebellum = Cerebellum(slm_client, agent_instance.name)
166
+
167
+ return agent_instance
168
+
169
+ def _create_llm_client(self, model_name):
170
+ llm_config = self.llm_config[model_name]
171
+ url = llm_config['url']
172
+ api_key = llm_config['API_KEY']
173
+ model_name = llm_config['model_name']
174
+ llm_client = LLMClient(url, api_key, model_name)
175
+ return llm_client
176
+
177
+ def load_all(self) -> Dict[str, Any]:
178
+ agents = {}
179
+ for filename in os.listdir(self.profile_path):
180
+ if filename.endswith(".yml"):
181
+ full_path = os.path.join(self.profile_path, filename)
182
+ print(f"Loading agent from {filename}...")
183
+ agent = self.load_from_file(full_path)
184
+ agents[agent.name] = agent
185
+ return agents