beswarm 0.2.34__py3-none-any.whl → 0.2.36__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of beswarm might be problematic. Click here for more details.
- beswarm/aient/setup.py +1 -1
- beswarm/aient/src/aient/core/request.py +26 -9
- beswarm/aient/src/aient/core/response.py +58 -106
- beswarm/aient/src/aient/models/chatgpt.py +4 -3
- beswarm/broker.py +235 -0
- beswarm/tools/click.py +1 -0
- beswarm/tools/search_web.py +1 -3
- beswarm/tools/taskmanager.py +12 -4
- beswarm/tools/worker.py +330 -418
- {beswarm-0.2.34.dist-info → beswarm-0.2.36.dist-info}/METADATA +1 -1
- {beswarm-0.2.34.dist-info → beswarm-0.2.36.dist-info}/RECORD +13 -12
- {beswarm-0.2.34.dist-info → beswarm-0.2.36.dist-info}/WHEEL +0 -0
- {beswarm-0.2.34.dist-info → beswarm-0.2.36.dist-info}/top_level.txt +0 -0
beswarm/tools/worker.py
CHANGED
|
@@ -4,9 +4,186 @@ import sys
|
|
|
4
4
|
import copy
|
|
5
5
|
import json
|
|
6
6
|
import difflib
|
|
7
|
+
import asyncio
|
|
7
8
|
import platform
|
|
8
9
|
from pathlib import Path
|
|
9
10
|
from datetime import datetime
|
|
11
|
+
from typing import List, Dict, Union
|
|
12
|
+
|
|
13
|
+
from ..broker import MessageBroker
|
|
14
|
+
from ..aient.src.aient.models import chatgpt
|
|
15
|
+
from ..aient.src.aient.plugins import register_tool, get_function_call_list, registry
|
|
16
|
+
from ..prompt import worker_system_prompt, instruction_system_prompt
|
|
17
|
+
from ..utils import extract_xml_content, get_current_screen_image_message, replace_xml_content, register_mcp_tools
|
|
18
|
+
from ..bemcp.bemcp import MCPManager
|
|
19
|
+
|
|
20
|
+
class BaseAgent:
|
|
21
|
+
"""Base class for agents, handling common initialization and disposal."""
|
|
22
|
+
def __init__(self, goal: str, tools_json: List, agent_config: Dict, work_dir: str, cache_messages: Union[bool, List[Dict]], broker: MessageBroker, listen_topic: str, publish_topic: str, status_topic: str):
|
|
23
|
+
self.goal = goal
|
|
24
|
+
self.tools_json = tools_json
|
|
25
|
+
self.work_dir = work_dir
|
|
26
|
+
self.cache_file = Path(work_dir) / ".beswarm" / "work_agent_conversation_history.json"
|
|
27
|
+
self.config = agent_config
|
|
28
|
+
self.cache_messages = cache_messages
|
|
29
|
+
if cache_messages and isinstance(cache_messages, bool) and cache_messages == True:
|
|
30
|
+
self.cache_messages = json.loads(self.cache_file.read_text(encoding="utf-8"))
|
|
31
|
+
self.broker = broker
|
|
32
|
+
self.listen_topic = listen_topic
|
|
33
|
+
self.error_topic = listen_topic + ".error"
|
|
34
|
+
self.publish_topic = publish_topic
|
|
35
|
+
self.status_topic = status_topic
|
|
36
|
+
self._subscription = self.broker.subscribe(self.handle_message, [self.listen_topic, self.error_topic])
|
|
37
|
+
|
|
38
|
+
async def handle_message(self, message: Dict):
|
|
39
|
+
"""Process incoming messages. Must be implemented by subclasses."""
|
|
40
|
+
raise NotImplementedError
|
|
41
|
+
|
|
42
|
+
def dispose(self):
|
|
43
|
+
"""Cancels the subscription and cleans up resources."""
|
|
44
|
+
if self._subscription:
|
|
45
|
+
self._subscription.dispose()
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class InstructionAgent(BaseAgent):
|
|
49
|
+
"""Generates instructions and publishes them to a message broker."""
|
|
50
|
+
def __init__(self, goal: str, tools_json: List, agent_config: Dict, work_dir: str, cache_messages: Union[bool, List[Dict]], broker: MessageBroker, listen_topic: str, publish_topic: str, status_topic: str):
|
|
51
|
+
super().__init__(goal, tools_json, agent_config, work_dir, cache_messages, broker, listen_topic, publish_topic, status_topic)
|
|
52
|
+
|
|
53
|
+
self.last_instruction = None
|
|
54
|
+
self.agent = chatgpt(**self.config)
|
|
55
|
+
|
|
56
|
+
self.goal_diff = None
|
|
57
|
+
|
|
58
|
+
if self.cache_messages and isinstance(self.cache_messages, list) and len(self.cache_messages) > 1:
|
|
59
|
+
old_goal = extract_xml_content(self.cache_messages[1]["content"], "goal")
|
|
60
|
+
if old_goal.strip() != goal.strip():
|
|
61
|
+
diff_generator = difflib.ndiff(old_goal.splitlines(), goal.splitlines())
|
|
62
|
+
changed_lines = []
|
|
63
|
+
for line in diff_generator:
|
|
64
|
+
if (line.startswith('+ ') or line.startswith('- ')) and line[2:].strip():
|
|
65
|
+
changed_lines.append(line)
|
|
66
|
+
self.goal_diff = '\n'.join(changed_lines).strip()
|
|
67
|
+
|
|
68
|
+
def get_conversation_history(self, conversation_history: List[Dict]):
|
|
69
|
+
conversation_history = copy.deepcopy(conversation_history)
|
|
70
|
+
|
|
71
|
+
self.cache_file.write_text(json.dumps(conversation_history, ensure_ascii=False, indent=4), encoding="utf-8")
|
|
72
|
+
|
|
73
|
+
work_agent_system_prompt = conversation_history.pop(0)
|
|
74
|
+
if conversation_history:
|
|
75
|
+
original_content = work_agent_system_prompt["content"]
|
|
76
|
+
regex = r"<latest_file_content>(.*?)</latest_file_content>"
|
|
77
|
+
match = re.search(regex, original_content, re.DOTALL)
|
|
78
|
+
if match:
|
|
79
|
+
extracted_content = f"<latest_file_content>{match.group(1)}</latest_file_content>\n\n"
|
|
80
|
+
else:
|
|
81
|
+
extracted_content = ""
|
|
82
|
+
if isinstance(conversation_history[0]["content"], str):
|
|
83
|
+
conversation_history[0]["content"] = extracted_content + conversation_history[0]["content"]
|
|
84
|
+
elif isinstance(conversation_history[0]["content"], list) and extracted_content:
|
|
85
|
+
conversation_history[0]["content"].append({"type": "text", "text": extracted_content})
|
|
86
|
+
|
|
87
|
+
return conversation_history
|
|
88
|
+
|
|
89
|
+
async def handle_message(self, message: Dict):
|
|
90
|
+
"""Receives a worker response, generates the next instruction, and publishes it."""
|
|
91
|
+
|
|
92
|
+
if len(message["conversation"]) > 1 and message["conversation"][-2]["role"] == "user" \
|
|
93
|
+
and "<task_complete_message>" in message["conversation"][-2]["content"]:
|
|
94
|
+
task_complete_message = extract_xml_content(message["conversation"][-2]["content"], "task_complete_message")
|
|
95
|
+
self.broker.publish({"status": "finished", "result": task_complete_message}, self.status_topic)
|
|
96
|
+
return
|
|
97
|
+
|
|
98
|
+
instruction_prompt = "".join([
|
|
99
|
+
"</work_agent_conversation_end>\n\n",
|
|
100
|
+
f"任务目标: {self.goal}\n\n",
|
|
101
|
+
f"任务目标新变化:\n{self.goal_diff}\n\n" if self.goal_diff else "",
|
|
102
|
+
"在 tag <work_agent_conversation_start>...</work_agent_conversation_end> 之前的对话历史都是工作智能体的对话历史。\n\n",
|
|
103
|
+
"根据以上对话历史和目标,请生成下一步指令。如果任务已完成,指示工作智能体调用task_complete工具。\n\n",
|
|
104
|
+
])
|
|
105
|
+
if self.last_instruction and 'fetch_gpt_response_stream HTTP Error' not in self.last_instruction:
|
|
106
|
+
instruction_prompt = (
|
|
107
|
+
f"{instruction_prompt}\n\n"
|
|
108
|
+
"你生成的指令格式错误,必须把给assistant的指令放在<instructions>...</instructions>标签内。请重新生成格式正确的指令。"
|
|
109
|
+
f"这是你上次给assistant的错误格式的指令:\n{self.last_instruction}"
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
self.agent.conversation["default"][1:] = self.get_conversation_history(message["conversation"])
|
|
113
|
+
|
|
114
|
+
if "find_and_click_element" in json.dumps(self.tools_json):
|
|
115
|
+
instruction_prompt = await get_current_screen_image_message(instruction_prompt)
|
|
116
|
+
|
|
117
|
+
raw_response = await self.agent.ask_async(instruction_prompt)
|
|
118
|
+
|
|
119
|
+
if "fetch_gpt_response_stream HTTP Error', 'status_code': 404" in raw_response:
|
|
120
|
+
raise Exception(f"Model: {self.config['engine']} not found!")
|
|
121
|
+
if "'status_code': 413" in raw_response or \
|
|
122
|
+
"'status_code': 400" in raw_response:
|
|
123
|
+
self.broker.publish({"status": "error", "result": raw_response}, self.status_topic)
|
|
124
|
+
return
|
|
125
|
+
|
|
126
|
+
self.broker.publish({"status": "new_message", "result": "\n🤖 指令智能体:\n" + raw_response}, self.status_topic)
|
|
127
|
+
|
|
128
|
+
self.last_instruction = raw_response
|
|
129
|
+
instruction = extract_xml_content(raw_response, "instructions")
|
|
130
|
+
if instruction:
|
|
131
|
+
if len(message["conversation"]) == 1:
|
|
132
|
+
instruction = (
|
|
133
|
+
"任务描述:\n"
|
|
134
|
+
f"<goal>{self.goal}</goal>\n\n"
|
|
135
|
+
"你作为指令的**执行者**,而非任务的**规划师**,你必须严格遵循以下单步工作流程:\n"
|
|
136
|
+
"**执行指令**\n"
|
|
137
|
+
" - **严格遵从:** 只执行我当前下达的明确指令。在我明确给出下一步指令前,绝不擅自行动或推测、执行任何未明确要求的后续步骤。\n"
|
|
138
|
+
" - **严禁越权:** 禁止执行任何我未指定的步骤。`<goal>` 标签中的内容仅为背景信息,不得据此进行任务规划或推测。\n"
|
|
139
|
+
"**汇报结果**\n"
|
|
140
|
+
" - **聚焦单步:** 指令完成后,仅汇报该步骤的执行结果与产出。\n"
|
|
141
|
+
"**暂停等待**\n"
|
|
142
|
+
" - **原地待命:** 汇报后,任务暂停。在收到我新的指令前,严禁发起任何新的工具调用或操作。\n"
|
|
143
|
+
" - **请求指令:** 回复的最后必须明确请求我提供下一步指令。\n"
|
|
144
|
+
"**注意:** 禁止完成超出下面我未规定的步骤,`<goal>` 标签中的内容仅为背景信息。"
|
|
145
|
+
"现在开始执行第一步:\n"
|
|
146
|
+
f"{instruction}"
|
|
147
|
+
)
|
|
148
|
+
self.broker.publish({"instruction": instruction, "conversation": message["conversation"]}, self.publish_topic)
|
|
149
|
+
else:
|
|
150
|
+
print("\n❌ 指令智能体生成的指令不符合要求,正在重新生成。")
|
|
151
|
+
self.broker.publish(message, self.error_topic)
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
class WorkerAgent(BaseAgent):
|
|
155
|
+
"""Executes instructions and publishes results to a message broker."""
|
|
156
|
+
def __init__(self, goal: str, tools_json: List, agent_config: Dict, work_dir: str, cache_messages: Union[bool, List[Dict]], broker: MessageBroker, listen_topic: str, publish_topic: str, status_topic: str):
|
|
157
|
+
super().__init__(goal, tools_json, agent_config, work_dir, cache_messages, broker, listen_topic, publish_topic, status_topic)
|
|
158
|
+
|
|
159
|
+
if self.cache_messages and isinstance(self.cache_messages, list) and len(self.cache_messages) > 1:
|
|
160
|
+
first_user_message = replace_xml_content(self.cache_messages[1]["content"], "goal", goal)
|
|
161
|
+
self.config["cache_messages"] = self.cache_messages[0:1] + [{"role": "user", "content": first_user_message}] + self.cache_messages[2:]
|
|
162
|
+
|
|
163
|
+
self.agent = chatgpt(**self.config)
|
|
164
|
+
|
|
165
|
+
async def handle_message(self, message: Dict):
|
|
166
|
+
"""Receives an instruction, executes it, and publishes the response."""
|
|
167
|
+
|
|
168
|
+
if message.get("instruction") == "Initial kickoff":
|
|
169
|
+
self.broker.publish({
|
|
170
|
+
"conversation": self.agent.conversation["default"]
|
|
171
|
+
}, self.publish_topic)
|
|
172
|
+
return
|
|
173
|
+
|
|
174
|
+
instruction = message["instruction"]
|
|
175
|
+
if "find_and_click_element" in json.dumps(self.tools_json):
|
|
176
|
+
instruction = await get_current_screen_image_message(instruction)
|
|
177
|
+
response = await self.agent.ask_async(instruction)
|
|
178
|
+
|
|
179
|
+
if response.strip() == '':
|
|
180
|
+
print("\n❌ 工作智能体回复为空,请重新生成指令。")
|
|
181
|
+
self.broker.publish(message, self.error_topic)
|
|
182
|
+
else:
|
|
183
|
+
self.broker.publish({"status": "new_message", "result": "\n✅ 工作智能体:\n" + response}, self.status_topic)
|
|
184
|
+
self.broker.publish({
|
|
185
|
+
"conversation": self.agent.conversation["default"]
|
|
186
|
+
}, self.publish_topic)
|
|
10
187
|
|
|
11
188
|
class Tee:
|
|
12
189
|
def __init__(self, *files):
|
|
@@ -21,435 +198,170 @@ class Tee:
|
|
|
21
198
|
for f in self.files:
|
|
22
199
|
f.flush()
|
|
23
200
|
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
201
|
+
broker = MessageBroker()
|
|
202
|
+
mcp_manager = MCPManager()
|
|
203
|
+
|
|
204
|
+
class BrokerWorker:
|
|
205
|
+
"""The 'glue' class that orchestrates agents via a MessageBroker."""
|
|
206
|
+
def __init__(self, goal: str, tools: List[Union[str, Dict]], work_dir: str, cache_messages: Union[bool, List[Dict]] = None, broker: MessageBroker = None, mcp_manager: MCPManager = None):
|
|
207
|
+
self.goal = goal
|
|
208
|
+
self.tools = tools
|
|
209
|
+
self.work_dir = Path(work_dir)
|
|
210
|
+
self.cache_messages = cache_messages
|
|
211
|
+
|
|
212
|
+
self.broker = broker
|
|
213
|
+
self.mcp_manager = mcp_manager
|
|
214
|
+
self.task_completion_event = asyncio.Event()
|
|
215
|
+
self.final_result = None
|
|
216
|
+
self._status_subscription = None
|
|
217
|
+
self.setup()
|
|
218
|
+
|
|
219
|
+
self.channel = self.broker.request_channel()
|
|
220
|
+
self.INSTRUCTION_TOPIC = self.channel + ".instructions"
|
|
221
|
+
self.WORKER_RESPONSE_TOPIC = self.channel + ".worker_responses"
|
|
222
|
+
self.TASK_STATUS_TOPIC =self.channel + ".task_status"
|
|
223
|
+
|
|
224
|
+
def setup(self):
|
|
225
|
+
cache_dir = self.work_dir / ".beswarm"
|
|
226
|
+
cache_dir.mkdir(parents=True, exist_ok=True)
|
|
227
|
+
task_manager.set_root_path(self.work_dir)
|
|
228
|
+
self.cache_file = cache_dir / "work_agent_conversation_history.json"
|
|
229
|
+
if not self.cache_file.exists():
|
|
230
|
+
self.cache_file.write_text("[]", encoding="utf-8")
|
|
231
|
+
|
|
232
|
+
DEBUG = os.getenv("DEBUG", "false").lower() in ("true", "1", "t", "yes")
|
|
233
|
+
if DEBUG:
|
|
234
|
+
log_file = open(cache_dir / "history.log", "a", encoding="utf-8")
|
|
235
|
+
log_file.write(f"========== {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} ==========\n")
|
|
236
|
+
original_stdout = sys.stdout
|
|
237
|
+
original_stderr = sys.stderr
|
|
238
|
+
sys.stdout = Tee(original_stdout, log_file)
|
|
239
|
+
sys.stderr = Tee(original_stderr, log_file)
|
|
240
|
+
|
|
241
|
+
async def _configure_tools(self):
|
|
242
|
+
mcp_list = [item for item in self.tools if isinstance(item, dict)]
|
|
243
|
+
if mcp_list:
|
|
244
|
+
for mcp_item in mcp_list:
|
|
245
|
+
mcp_name, mcp_config = list(mcp_item.items())[0]
|
|
246
|
+
await self.mcp_manager.add_server(mcp_name, mcp_config)
|
|
247
|
+
client = self.mcp_manager.clients.get(mcp_name)
|
|
248
|
+
await register_mcp_tools(client, registry)
|
|
249
|
+
all_mcp_tools = await self.mcp_manager.get_all_tools()
|
|
250
|
+
self.tools.extend([tool.name for tool in sum(all_mcp_tools.values(), [])])
|
|
251
|
+
self.tools = [item for item in self.tools if not isinstance(item, dict)]
|
|
252
|
+
if "task_complete" not in self.tools: self.tools.append("task_complete")
|
|
253
|
+
self.tools_json = [value for _, value in get_function_call_list(self.tools).items()]
|
|
254
|
+
|
|
255
|
+
def _task_status_subscriber(self, message: Dict):
|
|
256
|
+
"""Subscriber for task status changes."""
|
|
257
|
+
if message.get("status") == "finished":
|
|
258
|
+
self.final_result = message.get("result")
|
|
259
|
+
self.task_completion_event.set()
|
|
260
|
+
|
|
261
|
+
if message.get("status") == "error":
|
|
262
|
+
raise Exception(message.get("result"))
|
|
263
|
+
|
|
264
|
+
if message.get("status") == "new_message":
|
|
265
|
+
print(message.get("result"))
|
|
266
|
+
|
|
267
|
+
def _setup_agents(self):
|
|
268
|
+
instruction_agent_config = {
|
|
269
|
+
"api_key": os.getenv("API_KEY"), "api_url": os.getenv("BASE_URL"),
|
|
270
|
+
"engine": os.getenv("MODEL"),
|
|
271
|
+
"system_prompt": instruction_system_prompt.format(
|
|
272
|
+
os_version=platform.platform(), tools_list=self.tools_json,
|
|
273
|
+
workspace_path=self.work_dir, current_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
274
|
+
),
|
|
275
|
+
"print_log": os.getenv("DEBUG", "false").lower() in ("true", "1", "t", "yes"),
|
|
276
|
+
"temperature": 0.7, "use_plugins": False
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
worker_agent_config = {
|
|
280
|
+
"api_key": os.getenv("API_KEY"), "api_url": os.getenv("BASE_URL"),
|
|
281
|
+
"engine": os.getenv("FAST_MODEL") or os.getenv("MODEL"),
|
|
282
|
+
"system_prompt": worker_system_prompt.format(
|
|
283
|
+
os_version=platform.platform(), workspace_path=self.work_dir,
|
|
284
|
+
shell=os.getenv('SHELL', 'Unknown'), current_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
|
285
|
+
tools_list=self.tools_json
|
|
286
|
+
),
|
|
287
|
+
"print_log": True, "temperature": 0.5, "function_call_max_loop": 100
|
|
288
|
+
}
|
|
289
|
+
|
|
290
|
+
instruction_agent = InstructionAgent(
|
|
291
|
+
goal=self.goal, tools_json=self.tools_json, agent_config=instruction_agent_config, work_dir=self.work_dir, cache_messages=self.cache_messages,
|
|
292
|
+
broker=self.broker, listen_topic=self.WORKER_RESPONSE_TOPIC,
|
|
293
|
+
publish_topic=self.INSTRUCTION_TOPIC, status_topic=self.TASK_STATUS_TOPIC
|
|
294
|
+
)
|
|
295
|
+
|
|
296
|
+
worker_agent = WorkerAgent(
|
|
297
|
+
goal=self.goal, tools_json=self.tools_json, agent_config=worker_agent_config, work_dir=self.work_dir, cache_messages=self.cache_messages,
|
|
298
|
+
broker=self.broker, listen_topic=self.INSTRUCTION_TOPIC,
|
|
299
|
+
publish_topic=self.WORKER_RESPONSE_TOPIC, status_topic=self.TASK_STATUS_TOPIC
|
|
300
|
+
)
|
|
301
|
+
return instruction_agent, worker_agent
|
|
302
|
+
|
|
303
|
+
async def run(self):
|
|
304
|
+
"""Sets up subscriptions and starts the workflow."""
|
|
305
|
+
os.chdir(self.work_dir.absolute())
|
|
306
|
+
await self._configure_tools()
|
|
307
|
+
|
|
308
|
+
instruction_agent, worker_agent = self._setup_agents()
|
|
309
|
+
|
|
310
|
+
self.broker.publish({"instruction": "Initial kickoff"}, self.INSTRUCTION_TOPIC)
|
|
311
|
+
|
|
312
|
+
self._status_subscription = self.broker.subscribe(self._task_status_subscriber, self.TASK_STATUS_TOPIC)
|
|
313
|
+
await self.task_completion_event.wait()
|
|
314
|
+
|
|
315
|
+
instruction_agent.dispose()
|
|
316
|
+
worker_agent.dispose()
|
|
317
|
+
self._status_subscription.dispose()
|
|
318
|
+
await self.mcp_manager.cleanup()
|
|
319
|
+
return self.final_result
|
|
320
|
+
|
|
321
|
+
async def stream_run(self):
|
|
322
|
+
"""Runs the workflow and yields status messages."""
|
|
323
|
+
os.chdir(self.work_dir.absolute())
|
|
324
|
+
await self._configure_tools()
|
|
325
|
+
|
|
326
|
+
instruction_agent, worker_agent = self._setup_agents()
|
|
327
|
+
|
|
328
|
+
self.broker.publish({"instruction": "Initial kickoff"}, self.INSTRUCTION_TOPIC)
|
|
329
|
+
|
|
330
|
+
try:
|
|
331
|
+
async for message in self.broker.iter_topic(self.TASK_STATUS_TOPIC):
|
|
332
|
+
if message.get("status") == "new_message":
|
|
333
|
+
yield message.get("result")
|
|
334
|
+
elif message.get("status") == "finished":
|
|
335
|
+
yield message.get("result")
|
|
336
|
+
break
|
|
337
|
+
elif message.get("status") == "error":
|
|
338
|
+
raise Exception(message.get("result"))
|
|
339
|
+
finally:
|
|
340
|
+
instruction_agent.dispose()
|
|
341
|
+
worker_agent.dispose()
|
|
342
|
+
await self.mcp_manager.cleanup()
|
|
29
343
|
|
|
30
|
-
manager = MCPManager()
|
|
31
344
|
|
|
32
345
|
@register_tool()
|
|
33
|
-
async def worker(goal, tools, work_dir, cache_messages=None):
|
|
34
|
-
cache_dir = Path(work_dir) / ".beswarm"
|
|
35
|
-
cache_dir.mkdir(parents=True, exist_ok=True)
|
|
36
|
-
task_manager.set_root_path(work_dir)
|
|
37
|
-
cache_file = cache_dir / "work_agent_conversation_history.json"
|
|
38
|
-
if not cache_file.exists():
|
|
39
|
-
cache_file.write_text("[]", encoding="utf-8")
|
|
40
|
-
|
|
41
|
-
DEBUG = os.getenv("DEBUG", "false").lower() in ("true", "1", "t", "yes")
|
|
42
|
-
if DEBUG:
|
|
43
|
-
log_file = open(cache_dir / "history.log", "a", encoding="utf-8")
|
|
44
|
-
log_file.write(f"========== {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} ==========\n")
|
|
45
|
-
original_stdout = sys.stdout
|
|
46
|
-
original_stderr = sys.stderr
|
|
47
|
-
sys.stdout = Tee(original_stdout, log_file)
|
|
48
|
-
sys.stderr = Tee(original_stderr, log_file)
|
|
49
|
-
|
|
346
|
+
async def worker(goal: str, tools: List[Union[str, Dict]], work_dir: str, cache_messages: Union[bool, List[Dict]] = None):
|
|
50
347
|
start_time = datetime.now()
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
goal_diff = None
|
|
54
|
-
|
|
55
|
-
mcp_list = [item for item in tools if isinstance(item, dict)]
|
|
56
|
-
if mcp_list:
|
|
57
|
-
for mcp_item in mcp_list:
|
|
58
|
-
mcp_name, mcp_config = list(mcp_item.items())[0]
|
|
59
|
-
await manager.add_server(mcp_name, mcp_config)
|
|
60
|
-
client = manager.clients.get(mcp_name)
|
|
61
|
-
await register_mcp_tools(client, registry)
|
|
62
|
-
all_tools = await manager.get_all_tools()
|
|
63
|
-
mcp_tools_name = [tool.name for tool in sum(all_tools.values(), [])]
|
|
64
|
-
tools += mcp_tools_name
|
|
65
|
-
|
|
66
|
-
tools = [item for item in tools if not isinstance(item, dict)]
|
|
67
|
-
if "task_complete" not in tools:
|
|
68
|
-
tools.append("task_complete")
|
|
69
|
-
|
|
70
|
-
tools_json = [value for _, value in get_function_call_list(tools).items()]
|
|
71
|
-
work_agent_system_prompt = worker_system_prompt.format(
|
|
72
|
-
os_version=platform.platform(),
|
|
73
|
-
workspace_path=work_dir,
|
|
74
|
-
shell=os.getenv('SHELL', 'Unknown'),
|
|
75
|
-
current_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
|
76
|
-
tools_list=tools_json
|
|
77
|
-
)
|
|
78
|
-
|
|
79
|
-
work_agent_config = {
|
|
80
|
-
"api_key": os.getenv("API_KEY"),
|
|
81
|
-
"api_url": os.getenv("BASE_URL"),
|
|
82
|
-
"engine": os.getenv("FAST_MODEL") or os.getenv("MODEL"),
|
|
83
|
-
"system_prompt": work_agent_system_prompt,
|
|
84
|
-
"print_log": True,
|
|
85
|
-
# "max_tokens": 8000,
|
|
86
|
-
"temperature": 0.5,
|
|
87
|
-
"function_call_max_loop": 100,
|
|
88
|
-
}
|
|
89
|
-
if cache_messages:
|
|
90
|
-
if isinstance(cache_messages, bool) and cache_messages == True:
|
|
91
|
-
cache_messages = json.loads(cache_file.read_text(encoding="utf-8"))
|
|
92
|
-
if cache_messages and isinstance(cache_messages, list) and len(cache_messages) > 1:
|
|
93
|
-
old_goal = extract_xml_content(cache_messages[1]["content"], "goal")
|
|
94
|
-
if old_goal.strip() != goal.strip():
|
|
95
|
-
diff_generator = difflib.ndiff(old_goal.splitlines(), goal.splitlines())
|
|
96
|
-
changed_lines = []
|
|
97
|
-
for line in diff_generator:
|
|
98
|
-
if (line.startswith('+ ') or line.startswith('- ')) and line[2:].strip():
|
|
99
|
-
changed_lines.append(line)
|
|
100
|
-
goal_diff = '\n'.join(changed_lines).strip()
|
|
101
|
-
first_user_message = replace_xml_content(cache_messages[1]["content"], "goal", goal)
|
|
102
|
-
work_agent_config["cache_messages"] = cache_messages[0:1] + [{"role": "user", "content": first_user_message}] + cache_messages[2:]
|
|
103
|
-
|
|
104
|
-
instruction_agent_config = {
|
|
105
|
-
"api_key": os.getenv("API_KEY"),
|
|
106
|
-
"api_url": os.getenv("BASE_URL"),
|
|
107
|
-
"engine": os.getenv("MODEL"),
|
|
108
|
-
"system_prompt": instruction_system_prompt.format(os_version=platform.platform(), tools_list=tools_json, workspace_path=work_dir, current_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S")),
|
|
109
|
-
"print_log": DEBUG,
|
|
110
|
-
# "max_tokens": 4000,
|
|
111
|
-
"temperature": 0.7,
|
|
112
|
-
"use_plugins": False,
|
|
113
|
-
}
|
|
114
|
-
|
|
115
|
-
# 工作agent初始化
|
|
116
|
-
work_agent = chatgpt(**work_agent_config)
|
|
117
|
-
async def instruction_agent_task():
|
|
118
|
-
last_instruction = None
|
|
119
|
-
while True:
|
|
120
|
-
instruction_prompt = "".join([
|
|
121
|
-
"</work_agent_conversation_end>\n\n",
|
|
122
|
-
f"任务目标: {goal}\n\n",
|
|
123
|
-
f"任务目标新变化:\n{goal_diff}\n\n" if goal_diff else "",
|
|
124
|
-
"在 tag <work_agent_conversation_start>...</work_agent_conversation_end> 之前的对话历史都是工作智能体的对话历史。\n\n",
|
|
125
|
-
"根据以上对话历史和目标,请生成下一步指令。如果任务已完成,指示工作智能体调用task_complete工具。\n\n",
|
|
126
|
-
])
|
|
127
|
-
if last_instruction and 'fetch_gpt_response_stream HTTP Error' not in last_instruction:
|
|
128
|
-
instruction_prompt = (
|
|
129
|
-
f"{instruction_prompt}\n\n"
|
|
130
|
-
"你生成的指令格式错误,必须把给assistant的指令放在<instructions>...</instructions>标签内。请重新生成格式正确的指令。"
|
|
131
|
-
f"这是你上次给assistant的错误格式的指令:\n{last_instruction}"
|
|
132
|
-
)
|
|
133
|
-
# 让指令agent分析对话历史并生成新指令
|
|
134
|
-
instruction_agent = chatgpt(**instruction_agent_config)
|
|
135
|
-
conversation_history = copy.deepcopy(work_agent.conversation["default"])
|
|
136
|
-
if len(conversation_history) > 1 and conversation_history[-2]["role"] == "user" \
|
|
137
|
-
and "<task_complete_message>" in conversation_history[-2]["content"]:
|
|
138
|
-
task_complete_message = extract_xml_content(conversation_history[-2]["content"], "task_complete_message")
|
|
139
|
-
# del work_agent.conversation["default"][-4:]
|
|
140
|
-
return "<task_complete_message>" + task_complete_message + "</task_complete_message>"
|
|
141
|
-
|
|
142
|
-
cache_file.write_text(json.dumps(conversation_history, ensure_ascii=False, indent=4), encoding="utf-8")
|
|
143
|
-
|
|
144
|
-
work_agent_system_prompt = conversation_history.pop(0)
|
|
145
|
-
if conversation_history:
|
|
146
|
-
# 获取原始内容
|
|
147
|
-
original_content = work_agent_system_prompt["content"]
|
|
148
|
-
|
|
149
|
-
# 定义正则表达式
|
|
150
|
-
regex = r"<latest_file_content>(.*?)</latest_file_content>"
|
|
151
|
-
|
|
152
|
-
# 进行匹配
|
|
153
|
-
match = re.search(regex, original_content, re.DOTALL)
|
|
154
|
-
|
|
155
|
-
# 提取内容或设置为空字符串
|
|
156
|
-
if match:
|
|
157
|
-
extracted_content = f"<latest_file_content>{match.group(1)}</latest_file_content>\n\n"
|
|
158
|
-
else:
|
|
159
|
-
extracted_content = ""
|
|
160
|
-
if isinstance(conversation_history[0]["content"], str):
|
|
161
|
-
conversation_history[0]["content"] = extracted_content + conversation_history[0]["content"]
|
|
162
|
-
elif isinstance(conversation_history[0]["content"], list) and extracted_content:
|
|
163
|
-
conversation_history[0]["content"].append({"type": "text", "text": extracted_content})
|
|
164
|
-
|
|
165
|
-
instruction_agent.conversation["default"][1:] = conversation_history
|
|
166
|
-
if "find_and_click_element" in str(tools_json):
|
|
167
|
-
instruction_prompt = await get_current_screen_image_message(instruction_prompt)
|
|
168
|
-
next_instruction = await instruction_agent.ask_async(instruction_prompt)
|
|
169
|
-
print("\n🤖 指令智能体生成的下一步指令:", next_instruction)
|
|
170
|
-
if "fetch_gpt_response_stream HTTP Error', 'status_code': 404" in next_instruction:
|
|
171
|
-
raise Exception(f"Model: {instruction_agent_config['engine']} not found!")
|
|
172
|
-
if "'status_code': 413" in next_instruction or \
|
|
173
|
-
"'status_code': 400" in next_instruction:
|
|
174
|
-
end_time = datetime.now()
|
|
175
|
-
total_time = end_time - start_time
|
|
176
|
-
print(f"\n任务开始时间: {start_time.strftime('%Y-%m-%d %H:%M:%S')}")
|
|
177
|
-
print(f"任务结束时间: {end_time.strftime('%Y-%m-%d %H:%M:%S')}")
|
|
178
|
-
print(f"总用时: {total_time}")
|
|
179
|
-
raise Exception(f"The request body is too long, please try again.")
|
|
180
|
-
|
|
181
|
-
last_instruction = next_instruction
|
|
182
|
-
next_instruction = extract_xml_content(next_instruction, "instructions")
|
|
183
|
-
if not next_instruction:
|
|
184
|
-
print("\n❌ 指令智能体生成的指令不符合要求,请重新生成。")
|
|
185
|
-
continue
|
|
186
|
-
else:
|
|
187
|
-
if conversation_history == []:
|
|
188
|
-
next_instruction = (
|
|
189
|
-
"任务描述:\n"
|
|
190
|
-
f"<goal>{goal}</goal>\n\n"
|
|
191
|
-
"你作为指令的**执行者**,而非任务的**规划师**,你必须严格遵循以下单步工作流程:\n"
|
|
192
|
-
"**执行指令**\n"
|
|
193
|
-
" - **严格遵从:** 只执行我当前下达的明确指令。在我明确给出下一步指令前,绝不擅自行动或推测、执行任何未明确要求的后续步骤。\n"
|
|
194
|
-
" - **严禁越权:** 禁止执行任何我未指定的步骤。`<goal>` 标签中的内容仅为背景信息,不得据此进行任务规划或推测。\n"
|
|
195
|
-
"**汇报结果**\n"
|
|
196
|
-
" - **聚焦单步:** 指令完成后,仅汇报该步骤的执行结果与产出。\n"
|
|
197
|
-
"**暂停等待**\n"
|
|
198
|
-
" - **原地待命:** 汇报后,任务暂停。在收到我新的指令前,严禁发起任何新的工具调用或操作。\n"
|
|
199
|
-
" - **请求指令:** 回复的最后必须明确请求我提供下一步指令。\n"
|
|
200
|
-
"**注意:** 禁止完成超出下面我未规定的步骤,`<goal>` 标签中的内容仅为背景信息。"
|
|
201
|
-
"现在开始执行第一步:\n"
|
|
202
|
-
f"{next_instruction}"
|
|
203
|
-
)
|
|
204
|
-
break
|
|
205
|
-
return next_instruction
|
|
206
|
-
|
|
207
|
-
need_instruction = True
|
|
208
|
-
result = None
|
|
209
|
-
while True:
|
|
210
|
-
next_instruction = ''
|
|
211
|
-
if need_instruction:
|
|
212
|
-
next_instruction = await instruction_agent_task()
|
|
213
|
-
|
|
214
|
-
# 检查任务是否完成
|
|
215
|
-
if "<task_complete_message>" in next_instruction:
|
|
216
|
-
if finish_flag == 0:
|
|
217
|
-
finish_flag = 1
|
|
218
|
-
continue
|
|
219
|
-
elif finish_flag == 1:
|
|
220
|
-
result = extract_xml_content(next_instruction, "task_complete_message")
|
|
221
|
-
break
|
|
222
|
-
else:
|
|
223
|
-
finish_flag = 0
|
|
224
|
-
if "find_and_click_element" in str(tools_json):
|
|
225
|
-
next_instruction = await get_current_screen_image_message(next_instruction)
|
|
226
|
-
result = await work_agent.ask_async(next_instruction)
|
|
227
|
-
if result.strip() == '' or result.strip() == '</content>\n</write_to_file>':
|
|
228
|
-
print("\n❌ 工作智能体回复为空,请重新生成指令。")
|
|
229
|
-
need_instruction = False
|
|
230
|
-
continue
|
|
231
|
-
print("✅ 工作智能体回复:", result)
|
|
232
|
-
need_instruction = True
|
|
233
|
-
|
|
348
|
+
worker_instance = BrokerWorker(goal, tools, work_dir, cache_messages, broker, mcp_manager)
|
|
349
|
+
result = await worker_instance.run()
|
|
234
350
|
end_time = datetime.now()
|
|
235
|
-
total_time = end_time - start_time
|
|
236
|
-
print("\n✅ 任务已完成:", result)
|
|
237
351
|
print(f"\n任务开始时间: {start_time.strftime('%Y-%m-%d %H:%M:%S')}")
|
|
238
352
|
print(f"任务结束时间: {end_time.strftime('%Y-%m-%d %H:%M:%S')}")
|
|
239
|
-
print(f"总用时: {
|
|
240
|
-
await manager.cleanup()
|
|
353
|
+
print(f"总用时: {end_time - start_time}")
|
|
241
354
|
return result
|
|
242
355
|
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
cache_dir.mkdir(parents=True, exist_ok=True)
|
|
246
|
-
task_manager.set_root_path(work_dir)
|
|
247
|
-
cache_file = cache_dir / "work_agent_conversation_history.json"
|
|
248
|
-
if not cache_file.exists():
|
|
249
|
-
cache_file.write_text("[]", encoding="utf-8")
|
|
250
|
-
|
|
251
|
-
DEBUG = os.getenv("DEBUG", "false").lower() in ("true", "1", "t", "yes")
|
|
252
|
-
if DEBUG:
|
|
253
|
-
log_file = open(cache_dir / "history.log", "a", encoding="utf-8")
|
|
254
|
-
log_file.write(f"========== {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} ==========\n")
|
|
255
|
-
original_stdout = sys.stdout
|
|
256
|
-
original_stderr = sys.stderr
|
|
257
|
-
sys.stdout = Tee(original_stdout, log_file)
|
|
258
|
-
sys.stderr = Tee(original_stderr, log_file)
|
|
259
|
-
|
|
356
|
+
@register_tool()
|
|
357
|
+
async def worker_gen(goal: str, tools: List[Union[str, Dict]], work_dir: str, cache_messages: Union[bool, List[Dict]] = None):
|
|
260
358
|
start_time = datetime.now()
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
mcp_list = [item for item in tools if isinstance(item, dict)]
|
|
266
|
-
if mcp_list:
|
|
267
|
-
for mcp_item in mcp_list:
|
|
268
|
-
mcp_name, mcp_config = list(mcp_item.items())[0]
|
|
269
|
-
await manager.add_server(mcp_name, mcp_config)
|
|
270
|
-
client = manager.clients.get(mcp_name)
|
|
271
|
-
await register_mcp_tools(client, registry)
|
|
272
|
-
all_tools = await manager.get_all_tools()
|
|
273
|
-
mcp_tools_name = [tool.name for tool in sum(all_tools.values(), [])]
|
|
274
|
-
tools += mcp_tools_name
|
|
275
|
-
|
|
276
|
-
tools = [item for item in tools if not isinstance(item, dict)]
|
|
277
|
-
if "task_complete" not in tools:
|
|
278
|
-
tools.append("task_complete")
|
|
279
|
-
|
|
280
|
-
tools_json = [value for _, value in get_function_call_list(tools).items()]
|
|
281
|
-
work_agent_system_prompt = worker_system_prompt.format(
|
|
282
|
-
os_version=platform.platform(),
|
|
283
|
-
workspace_path=work_dir,
|
|
284
|
-
shell=os.getenv('SHELL', 'Unknown'),
|
|
285
|
-
current_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
|
286
|
-
tools_list=tools_json
|
|
287
|
-
)
|
|
288
|
-
|
|
289
|
-
work_agent_config = {
|
|
290
|
-
"api_key": os.getenv("API_KEY"),
|
|
291
|
-
"api_url": os.getenv("BASE_URL"),
|
|
292
|
-
"engine": os.getenv("FAST_MODEL") or os.getenv("MODEL"),
|
|
293
|
-
"system_prompt": work_agent_system_prompt,
|
|
294
|
-
"print_log": True,
|
|
295
|
-
# "max_tokens": 8000,
|
|
296
|
-
"temperature": 0.5,
|
|
297
|
-
"function_call_max_loop": 100,
|
|
298
|
-
}
|
|
299
|
-
if cache_messages:
|
|
300
|
-
if isinstance(cache_messages, bool) and cache_messages == True:
|
|
301
|
-
cache_messages = json.loads(cache_file.read_text(encoding="utf-8"))
|
|
302
|
-
if cache_messages and isinstance(cache_messages, list) and len(cache_messages) > 1:
|
|
303
|
-
old_goal = extract_xml_content(cache_messages[1]["content"], "goal")
|
|
304
|
-
if old_goal.strip() != goal.strip():
|
|
305
|
-
diff_generator = difflib.ndiff(old_goal.splitlines(), goal.splitlines())
|
|
306
|
-
changed_lines = []
|
|
307
|
-
for line in diff_generator:
|
|
308
|
-
if (line.startswith('+ ') or line.startswith('- ')) and line[2:].strip():
|
|
309
|
-
changed_lines.append(line)
|
|
310
|
-
goal_diff = '\n'.join(changed_lines).strip()
|
|
311
|
-
first_user_message = replace_xml_content(cache_messages[1]["content"], "goal", goal)
|
|
312
|
-
work_agent_config["cache_messages"] = cache_messages[0:1] + [{"role": "user", "content": first_user_message}] + cache_messages[2:]
|
|
313
|
-
|
|
314
|
-
instruction_agent_config = {
|
|
315
|
-
"api_key": os.getenv("API_KEY"),
|
|
316
|
-
"api_url": os.getenv("BASE_URL"),
|
|
317
|
-
"engine": os.getenv("MODEL"),
|
|
318
|
-
"system_prompt": instruction_system_prompt.format(os_version=platform.platform(), tools_list=tools_json, workspace_path=work_dir, current_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S")),
|
|
319
|
-
"print_log": DEBUG,
|
|
320
|
-
# "max_tokens": 4000,
|
|
321
|
-
"temperature": 0.7,
|
|
322
|
-
"use_plugins": False,
|
|
323
|
-
}
|
|
324
|
-
|
|
325
|
-
# 工作agent初始化
|
|
326
|
-
work_agent = chatgpt(**work_agent_config)
|
|
327
|
-
async def instruction_agent_task():
|
|
328
|
-
last_instruction = None
|
|
329
|
-
while True:
|
|
330
|
-
instruction_prompt = "".join([
|
|
331
|
-
"</work_agent_conversation_end>\n\n",
|
|
332
|
-
f"任务目标: {goal}\n\n",
|
|
333
|
-
f"任务目标新变化:\n{goal_diff}\n\n" if goal_diff else "",
|
|
334
|
-
"在 tag <work_agent_conversation_start>...</work_agent_conversation_end> 之前的对话历史都是工作智能体的对话历史。\n\n",
|
|
335
|
-
"根据以上对话历史和目标,请生成下一步指令。如果任务已完成,指示工作智能体调用task_complete工具。\n\n",
|
|
336
|
-
])
|
|
337
|
-
if last_instruction and 'fetch_gpt_response_stream HTTP Error' not in last_instruction:
|
|
338
|
-
instruction_prompt = (
|
|
339
|
-
f"{instruction_prompt}\n\n"
|
|
340
|
-
"你生成的指令格式错误,必须把给assistant的指令放在<instructions>...</instructions>标签内。请重新生成格式正确的指令。"
|
|
341
|
-
f"这是你上次给assistant的错误格式的指令:\n{last_instruction}"
|
|
342
|
-
)
|
|
343
|
-
# 让指令agent分析对话历史并生成新指令
|
|
344
|
-
instruction_agent = chatgpt(**instruction_agent_config)
|
|
345
|
-
conversation_history = copy.deepcopy(work_agent.conversation["default"])
|
|
346
|
-
if len(conversation_history) > 1 and conversation_history[-2]["role"] == "user" \
|
|
347
|
-
and "<task_complete_message>" in conversation_history[-2]["content"]:
|
|
348
|
-
task_complete_message = extract_xml_content(conversation_history[-2]["content"], "task_complete_message")
|
|
349
|
-
# del work_agent.conversation["default"][-4:]
|
|
350
|
-
return "<task_complete_message>" + task_complete_message + "</task_complete_message>"
|
|
351
|
-
|
|
352
|
-
cache_file.write_text(json.dumps(conversation_history, ensure_ascii=False, indent=4), encoding="utf-8")
|
|
353
|
-
|
|
354
|
-
work_agent_system_prompt = conversation_history.pop(0)
|
|
355
|
-
if conversation_history:
|
|
356
|
-
# 获取原始内容
|
|
357
|
-
original_content = work_agent_system_prompt["content"]
|
|
358
|
-
|
|
359
|
-
# 定义正则表达式
|
|
360
|
-
regex = r"<latest_file_content>(.*?)</latest_file_content>"
|
|
361
|
-
|
|
362
|
-
# 进行匹配
|
|
363
|
-
match = re.search(regex, original_content, re.DOTALL)
|
|
364
|
-
|
|
365
|
-
# 提取内容或设置为空字符串
|
|
366
|
-
if match:
|
|
367
|
-
extracted_content = f"<latest_file_content>{match.group(1)}</latest_file_content>\n\n"
|
|
368
|
-
else:
|
|
369
|
-
extracted_content = ""
|
|
370
|
-
if isinstance(conversation_history[0]["content"], str):
|
|
371
|
-
conversation_history[0]["content"] = extracted_content + conversation_history[0]["content"]
|
|
372
|
-
elif isinstance(conversation_history[0]["content"], list) and extracted_content:
|
|
373
|
-
conversation_history[0]["content"].append({"type": "text", "text": extracted_content})
|
|
374
|
-
|
|
375
|
-
instruction_agent.conversation["default"][1:] = conversation_history
|
|
376
|
-
if "find_and_click_element" in str(tools_json):
|
|
377
|
-
instruction_prompt = await get_current_screen_image_message(instruction_prompt)
|
|
378
|
-
next_instruction = await instruction_agent.ask_async(instruction_prompt)
|
|
379
|
-
print("\n🤖 指令智能体生成的下一步指令:", next_instruction)
|
|
380
|
-
if "fetch_gpt_response_stream HTTP Error', 'status_code': 404" in next_instruction:
|
|
381
|
-
raise Exception(f"Model: {instruction_agent_config['engine']} not found!")
|
|
382
|
-
if "'status_code': 413" in next_instruction or \
|
|
383
|
-
"'status_code': 400" in next_instruction:
|
|
384
|
-
end_time = datetime.now()
|
|
385
|
-
total_time = end_time - start_time
|
|
386
|
-
print(f"\n任务开始时间: {start_time.strftime('%Y-%m-%d %H:%M:%S')}")
|
|
387
|
-
print(f"任务结束时间: {end_time.strftime('%Y-%m-%d %H:%M:%S')}")
|
|
388
|
-
print(f"总用时: {total_time}")
|
|
389
|
-
raise Exception(f"The request body is too long, please try again.")
|
|
390
|
-
|
|
391
|
-
last_instruction = next_instruction
|
|
392
|
-
next_instruction = extract_xml_content(next_instruction, "instructions")
|
|
393
|
-
if not next_instruction:
|
|
394
|
-
print("\n❌ 指令智能体生成的指令不符合要求,请重新生成。")
|
|
395
|
-
continue
|
|
396
|
-
else:
|
|
397
|
-
if conversation_history == []:
|
|
398
|
-
next_instruction = (
|
|
399
|
-
"任务描述:\n"
|
|
400
|
-
f"<goal>{goal}</goal>\n\n"
|
|
401
|
-
"你作为指令的**执行者**,而非任务的**规划师**,你必须严格遵循以下单步工作流程:\n"
|
|
402
|
-
"**执行指令**\n"
|
|
403
|
-
" - **严格遵从:** 只执行我当前下达的明确指令。在我明确给出下一步指令前,绝不擅自行动或推测、执行任何未明确要求的后续步骤。\n"
|
|
404
|
-
" - **严禁越权:** 禁止执行任何我未指定的步骤。`<goal>` 标签中的内容仅为背景信息,不得据此进行任务规划或推测。\n"
|
|
405
|
-
"**汇报结果**\n"
|
|
406
|
-
" - **聚焦单步:** 指令完成后,仅汇报该步骤的执行结果与产出。\n"
|
|
407
|
-
"**暂停等待**\n"
|
|
408
|
-
" - **原地待命:** 汇报后,任务暂停。在收到我新的指令前,严禁发起任何新的工具调用或操作。\n"
|
|
409
|
-
" - **请求指令:** 回复的最后必须明确请求我提供下一步指令。\n"
|
|
410
|
-
"**注意:** 禁止完成超出下面我未规定的步骤,`<goal>` 标签中的内容仅为背景信息。"
|
|
411
|
-
"现在开始执行第一步:\n"
|
|
412
|
-
f"{next_instruction}"
|
|
413
|
-
)
|
|
414
|
-
break
|
|
415
|
-
return next_instruction
|
|
416
|
-
|
|
417
|
-
need_instruction = True
|
|
418
|
-
result = None
|
|
419
|
-
while True:
|
|
420
|
-
next_instruction = ''
|
|
421
|
-
if need_instruction:
|
|
422
|
-
next_instruction = await instruction_agent_task()
|
|
423
|
-
|
|
424
|
-
yield {"user": next_instruction}
|
|
425
|
-
|
|
426
|
-
# 检查任务是否完成
|
|
427
|
-
if "<task_complete_message>" in next_instruction:
|
|
428
|
-
if finish_flag == 0:
|
|
429
|
-
finish_flag = 1
|
|
430
|
-
continue
|
|
431
|
-
elif finish_flag == 1:
|
|
432
|
-
result = extract_xml_content(next_instruction, "task_complete_message")
|
|
433
|
-
break
|
|
434
|
-
else:
|
|
435
|
-
finish_flag = 0
|
|
436
|
-
if "find_and_click_element" in str(tools_json):
|
|
437
|
-
next_instruction = await get_current_screen_image_message(next_instruction)
|
|
438
|
-
result = await work_agent.ask_async(next_instruction)
|
|
439
|
-
if result.strip() == '' or result.strip() == '</content>\n</write_to_file>':
|
|
440
|
-
print("\n❌ 工作智能体回复为空,请重新生成指令。")
|
|
441
|
-
need_instruction = False
|
|
442
|
-
continue
|
|
443
|
-
yield {"assistant": result}
|
|
444
|
-
print("✅ 工作智能体回复:", result)
|
|
445
|
-
need_instruction = True
|
|
446
|
-
|
|
359
|
+
worker_instance = BrokerWorker(goal, tools, work_dir, cache_messages, broker, mcp_manager)
|
|
360
|
+
async for result in worker_instance.stream_run():
|
|
361
|
+
yield result
|
|
447
362
|
end_time = datetime.now()
|
|
448
|
-
total_time = end_time - start_time
|
|
449
|
-
print("\n✅ 任务已完成:", result)
|
|
450
363
|
print(f"\n任务开始时间: {start_time.strftime('%Y-%m-%d %H:%M:%S')}")
|
|
451
364
|
print(f"任务结束时间: {end_time.strftime('%Y-%m-%d %H:%M:%S')}")
|
|
452
|
-
print(f"总用时: {
|
|
453
|
-
await manager.cleanup()
|
|
365
|
+
print(f"总用时: {end_time - start_time}")
|
|
454
366
|
|
|
455
|
-
from .taskmanager import task_manager
|
|
367
|
+
from .taskmanager import task_manager
|