beswarm 0.2.82__py3-none-any.whl → 0.2.84__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- beswarm/agents/planact.py +27 -72
- beswarm/aient/aient/architext/architext/core.py +314 -56
- beswarm/aient/aient/architext/test/test.py +511 -28
- beswarm/aient/aient/models/chatgpt.py +9 -8
- beswarm/prompt.py +44 -17
- {beswarm-0.2.82.dist-info → beswarm-0.2.84.dist-info}/METADATA +1 -1
- {beswarm-0.2.82.dist-info → beswarm-0.2.84.dist-info}/RECORD +9 -9
- {beswarm-0.2.82.dist-info → beswarm-0.2.84.dist-info}/WHEEL +0 -0
- {beswarm-0.2.82.dist-info → beswarm-0.2.84.dist-info}/top_level.txt +0 -0
beswarm/agents/planact.py
CHANGED
@@ -5,17 +5,16 @@ import json
|
|
5
5
|
import difflib
|
6
6
|
import asyncio
|
7
7
|
import tomllib
|
8
|
-
import platform
|
9
8
|
from pathlib import Path
|
10
|
-
from datetime import datetime
|
11
9
|
from typing import List, Dict, Union
|
12
10
|
|
13
11
|
from ..broker import MessageBroker
|
14
12
|
from ..aient.aient.models import chatgpt
|
15
13
|
from ..aient.aient.plugins import get_function_call_list, registry
|
16
|
-
from ..prompt import worker_system_prompt, instruction_system_prompt
|
14
|
+
from ..prompt import worker_system_prompt, instruction_system_prompt, Goal
|
17
15
|
from ..utils import extract_xml_content, get_current_screen_image_message, replace_xml_content, register_mcp_tools, setup_logger
|
18
16
|
from ..aient.aient.models.chatgpt import ModelNotFoundError, TaskComplete, RetryFailedError, InputTokenCountExceededError, BadRequestError
|
17
|
+
from ..aient.aient.architext.architext import Messages, SystemMessage, UserMessage, AssistantMessage, ToolCalls, ToolResults, Texts, RoleMessage, Images, Files
|
19
18
|
|
20
19
|
try:
|
21
20
|
from importlib import metadata
|
@@ -31,24 +30,22 @@ except metadata.PackageNotFoundError:
|
|
31
30
|
|
32
31
|
class BaseAgent:
|
33
32
|
"""Base class for agents, handling common initialization and disposal."""
|
34
|
-
def __init__(self, goal: str, tools_json: List, agent_config: Dict, work_dir: str, cache_messages: Union[bool, List[Dict]], broker: MessageBroker, listen_topic: str, publish_topic: str, status_topic: str
|
33
|
+
def __init__(self, goal: str, tools_json: List, agent_config: Dict, work_dir: str, cache_messages: Union[bool, List[Dict]], broker: MessageBroker, listen_topic: str, publish_topic: str, status_topic: str):
|
35
34
|
self.goal = goal
|
36
35
|
self.tools_json = tools_json
|
37
36
|
self.work_dir = work_dir
|
37
|
+
self.pkl_file = Path(work_dir) / ".beswarm" / "history.pkl"
|
38
38
|
self.cache_file = Path(work_dir) / ".beswarm" / "work_agent_conversation_history.json"
|
39
39
|
self.config = agent_config
|
40
40
|
self.logger = agent_config.get("logger", None)
|
41
41
|
self.cache_messages = cache_messages
|
42
42
|
if cache_messages and isinstance(cache_messages, bool) and cache_messages == True:
|
43
|
-
self.cache_messages =
|
43
|
+
self.cache_messages = Messages.load(self.pkl_file)
|
44
44
|
self.broker = broker
|
45
45
|
self.listen_topic = listen_topic
|
46
46
|
self.error_topic = listen_topic + ".error"
|
47
47
|
self.publish_topic = publish_topic
|
48
48
|
self.status_topic = status_topic
|
49
|
-
self.graph_update_topic = graph_update_topic
|
50
|
-
if self.graph_update_topic:
|
51
|
-
self.graph_update_subscription = self.broker.subscribe(self.handle_graph_update, self.graph_update_topic)
|
52
49
|
|
53
50
|
self._subscription = self.broker.subscribe(self.handle_message, [self.listen_topic, self.error_topic])
|
54
51
|
|
@@ -58,11 +55,6 @@ class BaseAgent:
|
|
58
55
|
"""Process incoming messages. Must be implemented by subclasses."""
|
59
56
|
raise NotImplementedError
|
60
57
|
|
61
|
-
def handle_graph_update(self, message: Dict):
|
62
|
-
"""Handle graph update messages."""
|
63
|
-
if message.get("message") == "graph_updated":
|
64
|
-
self.graph_tree = message.get("graph")
|
65
|
-
|
66
58
|
def dispose(self):
|
67
59
|
"""Cancels the subscription and cleans up resources."""
|
68
60
|
if self._subscription:
|
@@ -71,16 +63,16 @@ class BaseAgent:
|
|
71
63
|
|
72
64
|
class InstructionAgent(BaseAgent):
|
73
65
|
"""Generates instructions and publishes them to a message broker."""
|
74
|
-
def __init__(self, goal: str, tools_json: List, agent_config: Dict, work_dir: str, cache_messages: Union[bool, List[Dict]], broker: MessageBroker, listen_topic: str, publish_topic: str, status_topic: str
|
75
|
-
super().__init__(goal, tools_json, agent_config, work_dir, cache_messages, broker, listen_topic, publish_topic, status_topic
|
66
|
+
def __init__(self, goal: str, tools_json: List, agent_config: Dict, work_dir: str, cache_messages: Union[bool, List[Dict]], broker: MessageBroker, listen_topic: str, publish_topic: str, status_topic: str):
|
67
|
+
super().__init__(goal, tools_json, agent_config, work_dir, cache_messages, broker, listen_topic, publish_topic, status_topic)
|
76
68
|
|
77
69
|
self.last_instruction = None
|
78
70
|
self.agent = chatgpt(**self.config)
|
79
71
|
|
80
72
|
self.goal_diff = None
|
81
73
|
|
82
|
-
if self.cache_messages and
|
83
|
-
old_goal =
|
74
|
+
if self.cache_messages and self.cache_messages.provider("goal"):
|
75
|
+
old_goal = self.cache_messages.provider("goal").content
|
84
76
|
if old_goal.strip() != goal.strip():
|
85
77
|
diff_generator = difflib.ndiff(old_goal.splitlines(), goal.splitlines())
|
86
78
|
changed_lines = []
|
@@ -89,38 +81,14 @@ class InstructionAgent(BaseAgent):
|
|
89
81
|
changed_lines.append(line)
|
90
82
|
self.goal_diff = '\n'.join(changed_lines).strip()
|
91
83
|
|
92
|
-
def get_conversation_history(self, raw_conversation_history: List[Dict]):
|
93
|
-
|
94
|
-
for index, message in enumerate(raw_conversation_history):
|
95
|
-
if message.get("content") and isinstance(message["content"], str):
|
96
|
-
if "<knowledge_graph_tree>" in message["content"] and self.graph_tree:
|
97
|
-
message["content"] = replace_xml_content(message["content"], "knowledge_graph_tree", self.graph_tree)
|
98
|
-
raw_conversation_history[index] = message
|
99
|
-
if "\n\nYour message **must** end with [done] to signify the end of your output." in message["content"]:
|
100
|
-
message["content"] = message["content"].replace("\n\nYour message **must** end with [done] to signify the end of your output.", "")
|
101
|
-
|
84
|
+
async def get_conversation_history(self, raw_conversation_history: List[Dict]):
|
102
85
|
conversation_history = copy.deepcopy(raw_conversation_history)
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
match = re.search(regex, original_content, re.DOTALL)
|
110
|
-
if match:
|
111
|
-
extracted_content = f"<latest_file_content>{match.group(1)}</latest_file_content>\n\n"
|
112
|
-
else:
|
113
|
-
extracted_content = ""
|
114
|
-
if isinstance(conversation_history[0]["content"], str):
|
115
|
-
conversation_history[0]["content"] = extracted_content + conversation_history[0]["content"]
|
116
|
-
elif isinstance(conversation_history[0]["content"], list) and extracted_content:
|
117
|
-
conversation_history[0]["content"].append({"type": "text", "text": extracted_content})
|
118
|
-
|
119
|
-
for message in conversation_history:
|
120
|
-
if message.get("content") and isinstance(message["content"], str):
|
121
|
-
if "<knowledge_graph_tree>" in message["content"] and self.graph_tree:
|
122
|
-
message["content"] = replace_xml_content(message["content"], "knowledge_graph_tree", self.graph_tree)
|
123
|
-
|
86
|
+
conversation_history.save(self.pkl_file)
|
87
|
+
self.cache_file.write_text(json.dumps(await conversation_history.render_latest(), ensure_ascii=False, indent=4), encoding="utf-8")
|
88
|
+
latest_file_content = conversation_history.pop("files")
|
89
|
+
conversation_history.pop(0)
|
90
|
+
if conversation_history and latest_file_content:
|
91
|
+
conversation_history[0] = latest_file_content + conversation_history[0]
|
124
92
|
|
125
93
|
return conversation_history
|
126
94
|
|
@@ -141,7 +109,7 @@ class InstructionAgent(BaseAgent):
|
|
141
109
|
f"这是你上次给assistant的错误格式的指令:\n{self.last_instruction}"
|
142
110
|
)
|
143
111
|
|
144
|
-
self.agent.conversation["default"][1:] = self.get_conversation_history(message["conversation"])
|
112
|
+
self.agent.conversation["default"][1:] = await self.get_conversation_history(message["conversation"])
|
145
113
|
|
146
114
|
if "find_and_click_element" in json.dumps(self.tools_json):
|
147
115
|
instruction_prompt = await get_current_screen_image_message(instruction_prompt)
|
@@ -183,7 +151,7 @@ class InstructionAgent(BaseAgent):
|
|
183
151
|
"现在开始执行第一步:\n"
|
184
152
|
f"{instruction}"
|
185
153
|
)
|
186
|
-
self.broker.publish({"instruction": instruction
|
154
|
+
self.broker.publish({"instruction": instruction, "conversation": message["conversation"]}, self.publish_topic)
|
187
155
|
self.last_instruction = None
|
188
156
|
else:
|
189
157
|
self.logger.error("\n❌ 指令智能体生成的指令不符合要求,正在重新生成。")
|
@@ -193,8 +161,8 @@ class InstructionAgent(BaseAgent):
|
|
193
161
|
|
194
162
|
class WorkerAgent(BaseAgent):
|
195
163
|
"""Executes instructions and publishes results to a message broker."""
|
196
|
-
def __init__(self, goal: str, tools_json: List, agent_config: Dict, work_dir: str, cache_messages: Union[bool, List[Dict]], broker: MessageBroker, listen_topic: str, publish_topic: str, status_topic: str
|
197
|
-
super().__init__(goal, tools_json, agent_config, work_dir, cache_messages, broker, listen_topic, publish_topic, status_topic
|
164
|
+
def __init__(self, goal: str, tools_json: List, agent_config: Dict, work_dir: str, cache_messages: Union[bool, List[Dict]], broker: MessageBroker, listen_topic: str, publish_topic: str, status_topic: str):
|
165
|
+
super().__init__(goal, tools_json, agent_config, work_dir, cache_messages, broker, listen_topic, publish_topic, status_topic)
|
198
166
|
|
199
167
|
if self.cache_messages and isinstance(self.cache_messages, list) and len(self.cache_messages) > 1:
|
200
168
|
first_user_message = replace_xml_content(self.cache_messages[1]["content"], "goal", goal)
|
@@ -211,18 +179,12 @@ class WorkerAgent(BaseAgent):
|
|
211
179
|
}, self.publish_topic)
|
212
180
|
return
|
213
181
|
|
214
|
-
for index, raw_message in enumerate(self.agent.conversation["default"]):
|
215
|
-
if raw_message.get("content") and isinstance(raw_message["content"], str) \
|
216
|
-
and "<knowledge_graph_tree>" in raw_message["content"] and self.graph_tree:
|
217
|
-
raw_message["content"] = replace_xml_content(raw_message["content"], "knowledge_graph_tree", self.graph_tree)
|
218
|
-
self.agent.conversation["default"][index] = raw_message
|
219
|
-
|
220
182
|
instruction = message["instruction"]
|
221
183
|
if "find_and_click_element" in json.dumps(self.tools_json):
|
222
184
|
instruction = await get_current_screen_image_message(instruction)
|
223
185
|
|
224
186
|
try:
|
225
|
-
response = await self.agent.ask_async(instruction)
|
187
|
+
response = await self.agent.ask_async(UserMessage(instruction, Texts("\n\nYour message **must** end with [done] to signify the end of your output.", name="done")))
|
226
188
|
except TaskComplete as e:
|
227
189
|
self.broker.publish({"status": "finished", "result": e.completion_message}, self.status_topic)
|
228
190
|
return
|
@@ -263,7 +225,6 @@ class BrokerWorker:
|
|
263
225
|
self.INSTRUCTION_TOPIC = self.channel + ".instructions"
|
264
226
|
self.WORKER_RESPONSE_TOPIC = self.channel + ".worker_responses"
|
265
227
|
self.TASK_STATUS_TOPIC =self.channel + ".task_status"
|
266
|
-
self.GRAPH_UPDATE_TOPIC = self.channel + ".knowledge_graph"
|
267
228
|
|
268
229
|
self.setup()
|
269
230
|
|
@@ -272,7 +233,6 @@ class BrokerWorker:
|
|
272
233
|
cache_dir.mkdir(parents=True, exist_ok=True)
|
273
234
|
self.task_manager.set_root_path(self.work_dir)
|
274
235
|
self.kgm.set_root_path(self.work_dir)
|
275
|
-
self.kgm.set_publish_topic(self.GRAPH_UPDATE_TOPIC)
|
276
236
|
self.cache_file = cache_dir / "work_agent_conversation_history.json"
|
277
237
|
if not self.cache_file.exists():
|
278
238
|
self.cache_file.write_text("[]", encoding="utf-8")
|
@@ -313,25 +273,22 @@ class BrokerWorker:
|
|
313
273
|
self.logger.info(message.get("result"))
|
314
274
|
|
315
275
|
def _setup_agents(self):
|
276
|
+
instruction_system_prompt.provider("tools").update(self.tools_json)
|
277
|
+
instruction_system_prompt.provider("workspace_path").update(str(self.work_dir))
|
316
278
|
instruction_agent_config = {
|
317
279
|
"api_key": os.getenv("API_KEY"), "api_url": os.getenv("BASE_URL"),
|
318
280
|
"engine": os.getenv("MODEL"),
|
319
|
-
"system_prompt": instruction_system_prompt
|
320
|
-
os_version=platform.platform(), tools_list=self.tools_json,
|
321
|
-
workspace_path=self.work_dir, current_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
322
|
-
),
|
281
|
+
"system_prompt": instruction_system_prompt,
|
323
282
|
"print_log": os.getenv("DEBUG", "false").lower() in ("true", "1", "t", "yes"),
|
324
283
|
"temperature": 0.7, "use_plugins": False, "logger": self.logger
|
325
284
|
}
|
326
285
|
|
286
|
+
worker_system_prompt.provider("tools").update(self.tools_json)
|
287
|
+
worker_system_prompt.provider("workspace_path").update(str(self.work_dir))
|
327
288
|
worker_agent_config = {
|
328
289
|
"api_key": os.getenv("API_KEY"), "api_url": os.getenv("BASE_URL"),
|
329
290
|
"engine": os.getenv("FAST_MODEL") or os.getenv("MODEL"),
|
330
|
-
"system_prompt": worker_system_prompt
|
331
|
-
os_version=platform.platform(), workspace_path=self.work_dir,
|
332
|
-
shell=os.getenv('SHELL', 'Unknown'), current_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
333
|
-
tools_list=self.tools_json
|
334
|
-
),
|
291
|
+
"system_prompt": worker_system_prompt,
|
335
292
|
"print_log": True, "temperature": 0.5, "function_call_max_loop": 100, "logger": self.logger,
|
336
293
|
"check_done": True
|
337
294
|
}
|
@@ -340,14 +297,12 @@ class BrokerWorker:
|
|
340
297
|
goal=self.goal, tools_json=self.tools_json, agent_config=instruction_agent_config, work_dir=self.work_dir, cache_messages=self.cache_messages,
|
341
298
|
broker=self.broker, listen_topic=self.WORKER_RESPONSE_TOPIC,
|
342
299
|
publish_topic=self.INSTRUCTION_TOPIC, status_topic=self.TASK_STATUS_TOPIC,
|
343
|
-
graph_update_topic=self.GRAPH_UPDATE_TOPIC
|
344
300
|
)
|
345
301
|
|
346
302
|
worker_agent = WorkerAgent(
|
347
303
|
goal=self.goal, tools_json=self.tools_json, agent_config=worker_agent_config, work_dir=self.work_dir, cache_messages=self.cache_messages,
|
348
304
|
broker=self.broker, listen_topic=self.INSTRUCTION_TOPIC,
|
349
305
|
publish_topic=self.WORKER_RESPONSE_TOPIC, status_topic=self.TASK_STATUS_TOPIC,
|
350
|
-
graph_update_topic=self.GRAPH_UPDATE_TOPIC
|
351
306
|
)
|
352
307
|
return instruction_agent, worker_agent
|
353
308
|
|