beswarm 0.2.35__py3-none-any.whl → 0.2.36__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of beswarm might be problematic. Click here for more details.

beswarm/aient/setup.py CHANGED
@@ -4,7 +4,7 @@ from setuptools import setup, find_packages
4
4
 
5
5
  setup(
6
6
  name="aient",
7
- version="1.1.53",
7
+ version="1.1.54",
8
8
  description="Aient: The Awakening of Agent.",
9
9
  long_description=Path.open(Path("README.md"), encoding="utf-8").read(),
10
10
  long_description_content_type="text/markdown",
@@ -1354,7 +1354,8 @@ async def get_openrouter_payload(request, engine, provider, api_key=None):
1354
1354
 
1355
1355
  messages = []
1356
1356
  for msg in request.messages:
1357
- name = None
1357
+ tool_calls = None
1358
+ tool_call_id = None
1358
1359
  if isinstance(msg.content, list):
1359
1360
  content = []
1360
1361
  for item in msg.content:
@@ -1366,9 +1367,25 @@ async def get_openrouter_payload(request, engine, provider, api_key=None):
1366
1367
  content.append(image_message)
1367
1368
  else:
1368
1369
  content = msg.content
1369
- name = msg.name
1370
- if name:
1371
- messages.append({"role": msg.role, "name": name, "content": content})
1370
+ tool_calls = msg.tool_calls
1371
+ tool_call_id = msg.tool_call_id
1372
+
1373
+ if tool_calls:
1374
+ tool_calls_list = []
1375
+ for tool_call in tool_calls:
1376
+ tool_calls_list.append({
1377
+ "id": tool_call.id,
1378
+ "type": tool_call.type,
1379
+ "function": {
1380
+ "name": tool_call.function.name,
1381
+ "arguments": tool_call.function.arguments
1382
+ }
1383
+ })
1384
+ if provider.get("tools"):
1385
+ messages.append({"role": msg.role, "tool_calls": tool_calls_list})
1386
+ elif tool_call_id:
1387
+ if provider.get("tools"):
1388
+ messages.append({"role": msg.role, "tool_call_id": tool_call_id, "content": content})
1372
1389
  else:
1373
1390
  # print("content", content)
1374
1391
  if isinstance(content, list):
@@ -42,11 +42,12 @@ def gemini_json_poccess(response_str):
42
42
  is_thinking = safe_get(json_data, "parts", 0, "thought", default=False)
43
43
 
44
44
  function_call_name = safe_get(json_data, "functionCall", "name", default=None)
45
- function_full_response = json.dumps(safe_get(json_data, "functionCall", "args", default=""))
45
+ function_full_response = safe_get(json_data, "functionCall", "args", default="")
46
+ function_full_response = json.dumps(function_full_response) if function_full_response else None
46
47
 
47
48
  blockReason = safe_get(json_data, 0, "promptFeedback", "blockReason", default=None)
48
49
 
49
- return is_thinking, content, image_base64, function_call_name, function_full_response, blockReason, promptTokenCount, candidatesTokenCount, totalTokenCount
50
+ return is_thinking, content, image_base64, function_call_name, function_full_response, finishReason, blockReason, promptTokenCount, candidatesTokenCount, totalTokenCount
50
51
 
51
52
  async def fetch_gemini_response_stream(client, url, headers, payload, model):
52
53
  timestamp = int(datetime.timestamp(datetime.now()))
@@ -62,7 +63,6 @@ async def fetch_gemini_response_stream(client, url, headers, payload, model):
62
63
  parts_json = ""
63
64
  async for chunk in response.aiter_text():
64
65
  buffer += chunk
65
- cache_buffer += chunk
66
66
 
67
67
  while "\n" in buffer:
68
68
  line, buffer = buffer.split("\n", 1)
@@ -77,7 +77,7 @@ async def fetch_gemini_response_stream(client, url, headers, payload, model):
77
77
  continue
78
78
 
79
79
  # https://ai.google.dev/api/generate-content?hl=zh-cn#FinishReason
80
- is_thinking, content, image_base64, function_call_name, function_full_response, blockReason, promptTokenCount, candidatesTokenCount, totalTokenCount = gemini_json_poccess(parts_json)
80
+ is_thinking, content, image_base64, function_call_name, function_full_response, finishReason, blockReason, promptTokenCount, candidatesTokenCount, totalTokenCount = gemini_json_poccess(parts_json)
81
81
 
82
82
  if is_thinking:
83
83
  sse_string = await generate_sse_response(timestamp, model, reasoning_content=content)
@@ -99,9 +99,10 @@ async def fetch_gemini_response_stream(client, url, headers, payload, model):
99
99
  if parts_json == "[]" or blockReason == "PROHIBITED_CONTENT":
100
100
  sse_string = await generate_sse_response(timestamp, model, stop="PROHIBITED_CONTENT")
101
101
  yield sse_string
102
- else:
102
+ elif finishReason:
103
103
  sse_string = await generate_sse_response(timestamp, model, stop="stop")
104
104
  yield sse_string
105
+ break
105
106
 
106
107
  parts_json = ""
107
108
 
@@ -187,7 +187,8 @@ class chatgpt(BaseLLM):
187
187
  # print(json.dumps(replaced_text, indent=4, ensure_ascii=False))
188
188
  while message_index < conversation_len:
189
189
  if self.conversation[convo_id][message_index]["role"] == self.conversation[convo_id][message_index + 1]["role"]:
190
- if self.conversation[convo_id][message_index].get("content") and self.conversation[convo_id][message_index + 1].get("content"):
190
+ if self.conversation[convo_id][message_index].get("content") and self.conversation[convo_id][message_index + 1].get("content") \
191
+ and self.conversation[convo_id][message_index].get("content") != self.conversation[convo_id][message_index + 1].get("content"):
191
192
  if type(self.conversation[convo_id][message_index + 1]["content"]) == str \
192
193
  and type(self.conversation[convo_id][message_index]["content"]) == list:
193
194
  self.conversation[convo_id][message_index + 1]["content"] = [{"type": "text", "text": self.conversation[convo_id][message_index + 1]["content"]}]
@@ -754,8 +755,8 @@ class chatgpt(BaseLLM):
754
755
 
755
756
  # 打印日志
756
757
  if self.print_log:
757
- print("api_url", kwargs.get('api_url', self.api_url.chat_url) == url)
758
- print("api_url", kwargs.get('api_url', self.api_url.chat_url))
758
+ # print("api_url", kwargs.get('api_url', self.api_url.chat_url) == url)
759
+ # print("api_url", kwargs.get('api_url', self.api_url.chat_url))
759
760
  print("api_url", url)
760
761
  # print("headers", headers)
761
762
  print("api_key", kwargs.get('api_key', self.api_key))
beswarm/broker.py ADDED
@@ -0,0 +1,235 @@
1
+ """
2
+ 使用 Reaktiv 模拟消息队列 (发布/订阅)
3
+
4
+ 本模块提供了一个 MessageBroker 类,它利用 Reaktiv 的核心原语(Signal, Computed, Effect)
5
+ 来构建一个功能类似消息队列的、内存中的发布/订阅系统。
6
+ """
7
+ import asyncio
8
+ from typing import Callable, Any, List, Union, Tuple
9
+
10
+ from reaktiv import Signal, Effect, Computed, untracked, to_async_iter
11
+
12
+ class Subscription:
13
+ """封装一个或多个 Effect,提供统一的暂停、恢复和取消订阅的接口。"""
14
+
15
+ def __init__(
16
+ self,
17
+ broker: "MessageBroker",
18
+ callback: Callable[[Any], None],
19
+ effects_with_topics: List[Tuple[Effect, str]],
20
+ ):
21
+ self._broker = broker
22
+ self._callback = callback
23
+ self._effects_with_topics = effects_with_topics
24
+ self._effects = [e for e, t in effects_with_topics]
25
+ self.is_paused = Signal(False)
26
+
27
+ def pause(self):
28
+ """暂停订阅,将不再处理新消息。"""
29
+ self.is_paused.set(True)
30
+ if self._broker.debug:
31
+ print(f"Subscription paused.")
32
+
33
+ def resume(self):
34
+ """恢复订阅,将继续处理新消息。"""
35
+ self.is_paused.set(False)
36
+ if self._broker.debug:
37
+ print(f"Subscription resumed.")
38
+
39
+ def dispose(self):
40
+ """永久取消订阅并清理资源。"""
41
+ for effect, topic in self._effects_with_topics:
42
+ effect.dispose()
43
+ # 从代理的注册表中移除
44
+ if (
45
+ topic in self._broker._effects_registry
46
+ and self._callback in self._broker._effects_registry[topic]
47
+ ):
48
+ del self._broker._effects_registry[topic][self._callback]
49
+ if not self._broker._effects_registry[topic]:
50
+ del self._broker._effects_registry[topic]
51
+ if self._broker.debug:
52
+ print(f"Subscription disposed.")
53
+
54
+
55
+ class MessageBroker:
56
+ """一个简单的消息代理,使用 Reaktiv Signal 和 Computed 模拟消息队列和派生主题。"""
57
+
58
+ def __init__(self, debug: bool = False):
59
+ # 现在 _topics 可以存储 Signal (原始主题) 或 Computed (派生主题)。
60
+ self._topics: dict[str, Union[Signal[List[Any]], Computed[List[Any]]]] = {}
61
+ # 新增: 注册表来跟踪 (主题, 回调) -> Effect 的映射
62
+ self._effects_registry: dict[str, dict[Callable, Effect]] = {}
63
+ self.debug = debug
64
+ self._channel_counters: dict[str, int] = {}
65
+ # print("消息代理已启动。")
66
+
67
+ def request_channel(self, prefix: str = "channel") -> str:
68
+ """
69
+ 申请一个新的、唯一的频道名称。
70
+
71
+ 此方法为每个前缀维护一个独立的计数器。
72
+ 返回一个基于前缀和该前缀当前计数值的唯一字符串,例如 'channel0', 'worker_0', 'channel1'。
73
+ 它不直接创建主题或任何关联的 Signal;这将在首次发布或订阅到返回的主题名称时发生。
74
+
75
+ Args:
76
+ prefix: 频道名称的前缀。默认为 'channel'。
77
+
78
+ Returns:
79
+ 一个基于前缀的唯一主题/频道名称字符串。
80
+ """
81
+ if prefix not in self._channel_counters:
82
+ self._channel_counters[prefix] = 0
83
+
84
+ channel_name = f"{prefix}{self._channel_counters[prefix]}"
85
+ self._channel_counters[prefix] += 1
86
+ return channel_name
87
+
88
+ def publish(self, message: Any, topic: Union[str, List[str]] = "default"):
89
+ """
90
+ 向一个或多个主题发布一条新消息。
91
+ """
92
+ topics_to_publish = [topic] if isinstance(topic, str) else topic
93
+
94
+ for t in topics_to_publish:
95
+ # 只能向原始主题发布
96
+ topic_signal = self._topics.get(t)
97
+ if not isinstance(topic_signal, Signal):
98
+ print(f"警告:主题 '{t}' 不存在或不是一个可发布的原始主题。正在创建...")
99
+ topic_signal = Signal([])
100
+ self._topics[t] = topic_signal
101
+
102
+ # 通过 update 方法追加新消息来触发更新。
103
+ # 必须创建一个新列表才能让 Reaktiv 检测到变化。
104
+ topic_signal.update(lambda messages: messages + [message])
105
+ if self.debug:
106
+ print(f"新消息发布到 '{t}': \"{message}\"")
107
+
108
+ def subscribe(self, callback: Callable[[Any], None], topic: Union[str, List[str]] = "default") -> Subscription:
109
+ """
110
+ 订阅一个或多个主题。每当有新消息发布时,回调函数将被调用。
111
+ 此方法是幂等的:重复订阅同一个回调到同一个主题不会产生副作用。
112
+
113
+ Args:
114
+ callback: 处理消息的回调函数。
115
+ topic: 要订阅的主题,可以是单个字符串或字符串列表。
116
+
117
+ Returns:
118
+ 一个 Subscription 实例,用于管理订阅的生命周期(暂停、恢复、取消)。
119
+ """
120
+ topics_to_subscribe = [topic] if isinstance(topic, str) else topic
121
+ created_effects_with_topics = []
122
+
123
+ # 创建一个 Subscription 实例来管理所有相关的 effects
124
+ # 它需要提前创建,以便 effect_factory 可以访问它的 is_paused 信号
125
+ subscription = Subscription(self, callback, created_effects_with_topics)
126
+
127
+ for t in topics_to_subscribe:
128
+ # 检查此回调是否已订阅该主题
129
+ if t in self._effects_registry and callback in self._effects_registry.get(t, {}):
130
+ print(f"警告:订阅者 '{callback.__name__}' 已经订阅了 '{t}' 主题。跳过。")
131
+ continue
132
+
133
+ if t not in self._topics:
134
+ # 如果订阅一个不存在的主题,也为它创建一个 Signal
135
+ self._topics[t] = Signal([])
136
+
137
+ # 使用一个工厂函数来为每个主题创建独立的闭包,
138
+ # 确保每个订阅都有自己的 'last_processed_index'。
139
+ def effect_factory(current_topic: str):
140
+ last_processed_index = 0
141
+
142
+ def process_new_messages():
143
+ nonlocal last_processed_index
144
+ all_messages = self._topics[current_topic]()
145
+
146
+ # 如果暂停了,只更新索引以跳过消息,不进行处理
147
+ if untracked(subscription.is_paused):
148
+ last_processed_index = len(all_messages)
149
+ return
150
+
151
+ new_messages = all_messages[last_processed_index:]
152
+
153
+ if new_messages:
154
+ if self.debug:
155
+ print(f" -> 订阅者 '{callback.__name__}' 在 '{current_topic}' 主题上收到 {len(new_messages)} 条新消息。")
156
+ for msg in new_messages:
157
+ try:
158
+ if asyncio.iscoroutinefunction(callback):
159
+ asyncio.create_task(callback(msg))
160
+ else:
161
+ callback(msg)
162
+ except Exception as e:
163
+ print(f" !! 在订阅者 '{callback.__name__}' 中发生错误: {e}")
164
+
165
+ last_processed_index = len(all_messages)
166
+ return process_new_messages
167
+
168
+ if self.debug:
169
+ print(f"订阅者 '{callback.__name__}' 已订阅 '{t}' 主题。")
170
+ effect = Effect(effect_factory(t))
171
+
172
+ # 注册新的 effect
173
+ if t not in self._effects_registry:
174
+ self._effects_registry[t] = {}
175
+ self._effects_registry[t][callback] = effect
176
+
177
+ created_effects_with_topics.append((effect, t))
178
+
179
+ return subscription
180
+
181
+ def create_derived_topic(self, new_topic_name: str, source_topic: str, transform_fn: Callable[[List[Any]], List[Any]]):
182
+ """
183
+ 创建一个派生主题。
184
+
185
+ 这个新主题的内容是一个 Computed 信号,它会根据源主题的内容和转换函数自动更新。
186
+
187
+ Args:
188
+ new_topic_name: 派生主题的名称。
189
+ source_topic: 源主题的名称。
190
+ transform_fn: 一个函数,接收源主题的消息列表并返回新的消息列表。
191
+ """
192
+ if new_topic_name in self._topics:
193
+ print(f"警告:主题 '{new_topic_name}' 已存在。")
194
+ return
195
+
196
+ source_signal = self._topics.get(source_topic)
197
+ if not isinstance(source_signal, (Signal, Computed)):
198
+ print(f"错误:源主题 '{source_topic}' 不存在。")
199
+ return
200
+
201
+ # 创建一个 Computed 信号作为派生主题
202
+ derived_signal = Computed(
203
+ lambda: transform_fn(source_signal())
204
+ )
205
+
206
+ self._topics[new_topic_name] = derived_signal
207
+ if self.debug:
208
+ print(f"已从 '{source_topic}' 创建派生主题 '{new_topic_name}'。")
209
+
210
+ async def iter_topic(self, topic: str):
211
+ """
212
+ 返回一个异步迭代器,用于通过 async for 循环消费主题消息。
213
+
214
+ Args:
215
+ topic: 要订阅的主题名称。
216
+
217
+ Yields:
218
+ 主题中的新消息。
219
+ """
220
+ if topic not in self._topics:
221
+ # 如果主题不存在,创建一个,以防万一
222
+ self._topics[topic] = Signal([])
223
+
224
+ topic_signal = self._topics[topic]
225
+ last_yielded_index = 0
226
+
227
+ # to_async_iter 会在每次 topic_signal 更新时产生一个新的消息列表
228
+ async for all_messages in to_async_iter(topic_signal):
229
+ new_messages = all_messages[last_yielded_index:]
230
+ for msg in new_messages:
231
+ # 过滤掉内部的 'init' 消息
232
+ if msg != "init":
233
+ yield msg
234
+
235
+ last_yielded_index = len(all_messages)
beswarm/tools/click.py CHANGED
@@ -223,6 +223,7 @@ Returns:
223
223
 
224
224
  # 工作agent初始化
225
225
  click_agent = chatgpt(**click_agent_config)
226
+ # https://developers.googleblog.com/en/conversational-image-segmentation-gemini-2-5/
226
227
  prompt = f"Give the segmentation masks for the {target_element}. Output a JSON list of segmentation masks where each entry contains the 2D bounding box in \"box_2d\" (format: ymin, xmin, ymax, xmax) and the mask in \"mask\". Only output the one that meets the criteria the most."
227
228
 
228
229
  print("正在截取当前屏幕...")
@@ -2,9 +2,7 @@ import re
2
2
  import os
3
3
  import json
4
4
  import httpx
5
- from urllib.parse import quote_plus
6
5
  import threading
7
- import time
8
6
 
9
7
  from ..aient.src.aient.plugins import register_tool, get_url_content # Assuming a similar plugin structure
10
8
 
@@ -364,7 +362,7 @@ if __name__ == '__main__':
364
362
  # search_query = "美国"
365
363
  # search_query = "machine learning models for higher heating value prediction using proximate vs ultimate analysis"
366
364
  # search_query = "patent driver cognitive load monitoring micro-expression thermal imaging fusion"
367
- search_query = "Self-supervised learning for seismology"
365
+ search_query = "deep learning models for siRNA activity"
368
366
  print(f"Performing web search for: '{search_query}'")
369
367
  results = await search_web(search_query) # results is a list of URLs
370
368
 
beswarm/tools/worker.py CHANGED
@@ -4,9 +4,186 @@ import sys
4
4
  import copy
5
5
  import json
6
6
  import difflib
7
+ import asyncio
7
8
  import platform
8
9
  from pathlib import Path
9
10
  from datetime import datetime
11
+ from typing import List, Dict, Union
12
+
13
+ from ..broker import MessageBroker
14
+ from ..aient.src.aient.models import chatgpt
15
+ from ..aient.src.aient.plugins import register_tool, get_function_call_list, registry
16
+ from ..prompt import worker_system_prompt, instruction_system_prompt
17
+ from ..utils import extract_xml_content, get_current_screen_image_message, replace_xml_content, register_mcp_tools
18
+ from ..bemcp.bemcp import MCPManager
19
+
20
+ class BaseAgent:
21
+ """Base class for agents, handling common initialization and disposal."""
22
+ def __init__(self, goal: str, tools_json: List, agent_config: Dict, work_dir: str, cache_messages: Union[bool, List[Dict]], broker: MessageBroker, listen_topic: str, publish_topic: str, status_topic: str):
23
+ self.goal = goal
24
+ self.tools_json = tools_json
25
+ self.work_dir = work_dir
26
+ self.cache_file = Path(work_dir) / ".beswarm" / "work_agent_conversation_history.json"
27
+ self.config = agent_config
28
+ self.cache_messages = cache_messages
29
+ if cache_messages and isinstance(cache_messages, bool) and cache_messages == True:
30
+ self.cache_messages = json.loads(self.cache_file.read_text(encoding="utf-8"))
31
+ self.broker = broker
32
+ self.listen_topic = listen_topic
33
+ self.error_topic = listen_topic + ".error"
34
+ self.publish_topic = publish_topic
35
+ self.status_topic = status_topic
36
+ self._subscription = self.broker.subscribe(self.handle_message, [self.listen_topic, self.error_topic])
37
+
38
+ async def handle_message(self, message: Dict):
39
+ """Process incoming messages. Must be implemented by subclasses."""
40
+ raise NotImplementedError
41
+
42
+ def dispose(self):
43
+ """Cancels the subscription and cleans up resources."""
44
+ if self._subscription:
45
+ self._subscription.dispose()
46
+
47
+
48
+ class InstructionAgent(BaseAgent):
49
+ """Generates instructions and publishes them to a message broker."""
50
+ def __init__(self, goal: str, tools_json: List, agent_config: Dict, work_dir: str, cache_messages: Union[bool, List[Dict]], broker: MessageBroker, listen_topic: str, publish_topic: str, status_topic: str):
51
+ super().__init__(goal, tools_json, agent_config, work_dir, cache_messages, broker, listen_topic, publish_topic, status_topic)
52
+
53
+ self.last_instruction = None
54
+ self.agent = chatgpt(**self.config)
55
+
56
+ self.goal_diff = None
57
+
58
+ if self.cache_messages and isinstance(self.cache_messages, list) and len(self.cache_messages) > 1:
59
+ old_goal = extract_xml_content(self.cache_messages[1]["content"], "goal")
60
+ if old_goal.strip() != goal.strip():
61
+ diff_generator = difflib.ndiff(old_goal.splitlines(), goal.splitlines())
62
+ changed_lines = []
63
+ for line in diff_generator:
64
+ if (line.startswith('+ ') or line.startswith('- ')) and line[2:].strip():
65
+ changed_lines.append(line)
66
+ self.goal_diff = '\n'.join(changed_lines).strip()
67
+
68
+ def get_conversation_history(self, conversation_history: List[Dict]):
69
+ conversation_history = copy.deepcopy(conversation_history)
70
+
71
+ self.cache_file.write_text(json.dumps(conversation_history, ensure_ascii=False, indent=4), encoding="utf-8")
72
+
73
+ work_agent_system_prompt = conversation_history.pop(0)
74
+ if conversation_history:
75
+ original_content = work_agent_system_prompt["content"]
76
+ regex = r"<latest_file_content>(.*?)</latest_file_content>"
77
+ match = re.search(regex, original_content, re.DOTALL)
78
+ if match:
79
+ extracted_content = f"<latest_file_content>{match.group(1)}</latest_file_content>\n\n"
80
+ else:
81
+ extracted_content = ""
82
+ if isinstance(conversation_history[0]["content"], str):
83
+ conversation_history[0]["content"] = extracted_content + conversation_history[0]["content"]
84
+ elif isinstance(conversation_history[0]["content"], list) and extracted_content:
85
+ conversation_history[0]["content"].append({"type": "text", "text": extracted_content})
86
+
87
+ return conversation_history
88
+
89
+ async def handle_message(self, message: Dict):
90
+ """Receives a worker response, generates the next instruction, and publishes it."""
91
+
92
+ if len(message["conversation"]) > 1 and message["conversation"][-2]["role"] == "user" \
93
+ and "<task_complete_message>" in message["conversation"][-2]["content"]:
94
+ task_complete_message = extract_xml_content(message["conversation"][-2]["content"], "task_complete_message")
95
+ self.broker.publish({"status": "finished", "result": task_complete_message}, self.status_topic)
96
+ return
97
+
98
+ instruction_prompt = "".join([
99
+ "</work_agent_conversation_end>\n\n",
100
+ f"任务目标: {self.goal}\n\n",
101
+ f"任务目标新变化:\n{self.goal_diff}\n\n" if self.goal_diff else "",
102
+ "在 tag <work_agent_conversation_start>...</work_agent_conversation_end> 之前的对话历史都是工作智能体的对话历史。\n\n",
103
+ "根据以上对话历史和目标,请生成下一步指令。如果任务已完成,指示工作智能体调用task_complete工具。\n\n",
104
+ ])
105
+ if self.last_instruction and 'fetch_gpt_response_stream HTTP Error' not in self.last_instruction:
106
+ instruction_prompt = (
107
+ f"{instruction_prompt}\n\n"
108
+ "你生成的指令格式错误,必须把给assistant的指令放在<instructions>...</instructions>标签内。请重新生成格式正确的指令。"
109
+ f"这是你上次给assistant的错误格式的指令:\n{self.last_instruction}"
110
+ )
111
+
112
+ self.agent.conversation["default"][1:] = self.get_conversation_history(message["conversation"])
113
+
114
+ if "find_and_click_element" in json.dumps(self.tools_json):
115
+ instruction_prompt = await get_current_screen_image_message(instruction_prompt)
116
+
117
+ raw_response = await self.agent.ask_async(instruction_prompt)
118
+
119
+ if "fetch_gpt_response_stream HTTP Error', 'status_code': 404" in raw_response:
120
+ raise Exception(f"Model: {self.config['engine']} not found!")
121
+ if "'status_code': 413" in raw_response or \
122
+ "'status_code': 400" in raw_response:
123
+ self.broker.publish({"status": "error", "result": raw_response}, self.status_topic)
124
+ return
125
+
126
+ self.broker.publish({"status": "new_message", "result": "\n🤖 指令智能体:\n" + raw_response}, self.status_topic)
127
+
128
+ self.last_instruction = raw_response
129
+ instruction = extract_xml_content(raw_response, "instructions")
130
+ if instruction:
131
+ if len(message["conversation"]) == 1:
132
+ instruction = (
133
+ "任务描述:\n"
134
+ f"<goal>{self.goal}</goal>\n\n"
135
+ "你作为指令的**执行者**,而非任务的**规划师**,你必须严格遵循以下单步工作流程:\n"
136
+ "**执行指令**\n"
137
+ " - **严格遵从:** 只执行我当前下达的明确指令。在我明确给出下一步指令前,绝不擅自行动或推测、执行任何未明确要求的后续步骤。\n"
138
+ " - **严禁越权:** 禁止执行任何我未指定的步骤。`<goal>` 标签中的内容仅为背景信息,不得据此进行任务规划或推测。\n"
139
+ "**汇报结果**\n"
140
+ " - **聚焦单步:** 指令完成后,仅汇报该步骤的执行结果与产出。\n"
141
+ "**暂停等待**\n"
142
+ " - **原地待命:** 汇报后,任务暂停。在收到我新的指令前,严禁发起任何新的工具调用或操作。\n"
143
+ " - **请求指令:** 回复的最后必须明确请求我提供下一步指令。\n"
144
+ "**注意:** 禁止完成超出下面我未规定的步骤,`<goal>` 标签中的内容仅为背景信息。"
145
+ "现在开始执行第一步:\n"
146
+ f"{instruction}"
147
+ )
148
+ self.broker.publish({"instruction": instruction, "conversation": message["conversation"]}, self.publish_topic)
149
+ else:
150
+ print("\n❌ 指令智能体生成的指令不符合要求,正在重新生成。")
151
+ self.broker.publish(message, self.error_topic)
152
+
153
+
154
+ class WorkerAgent(BaseAgent):
155
+ """Executes instructions and publishes results to a message broker."""
156
+ def __init__(self, goal: str, tools_json: List, agent_config: Dict, work_dir: str, cache_messages: Union[bool, List[Dict]], broker: MessageBroker, listen_topic: str, publish_topic: str, status_topic: str):
157
+ super().__init__(goal, tools_json, agent_config, work_dir, cache_messages, broker, listen_topic, publish_topic, status_topic)
158
+
159
+ if self.cache_messages and isinstance(self.cache_messages, list) and len(self.cache_messages) > 1:
160
+ first_user_message = replace_xml_content(self.cache_messages[1]["content"], "goal", goal)
161
+ self.config["cache_messages"] = self.cache_messages[0:1] + [{"role": "user", "content": first_user_message}] + self.cache_messages[2:]
162
+
163
+ self.agent = chatgpt(**self.config)
164
+
165
+ async def handle_message(self, message: Dict):
166
+ """Receives an instruction, executes it, and publishes the response."""
167
+
168
+ if message.get("instruction") == "Initial kickoff":
169
+ self.broker.publish({
170
+ "conversation": self.agent.conversation["default"]
171
+ }, self.publish_topic)
172
+ return
173
+
174
+ instruction = message["instruction"]
175
+ if "find_and_click_element" in json.dumps(self.tools_json):
176
+ instruction = await get_current_screen_image_message(instruction)
177
+ response = await self.agent.ask_async(instruction)
178
+
179
+ if response.strip() == '':
180
+ print("\n❌ 工作智能体回复为空,请重新生成指令。")
181
+ self.broker.publish(message, self.error_topic)
182
+ else:
183
+ self.broker.publish({"status": "new_message", "result": "\n✅ 工作智能体:\n" + response}, self.status_topic)
184
+ self.broker.publish({
185
+ "conversation": self.agent.conversation["default"]
186
+ }, self.publish_topic)
10
187
 
11
188
  class Tee:
12
189
  def __init__(self, *files):
@@ -21,435 +198,170 @@ class Tee:
21
198
  for f in self.files:
22
199
  f.flush()
23
200
 
24
- from ..aient.src.aient.models import chatgpt
25
- from ..aient.src.aient.plugins import register_tool, get_function_call_list, registry
26
- from ..prompt import worker_system_prompt, instruction_system_prompt
27
- from ..utils import extract_xml_content, get_current_screen_image_message, replace_xml_content, register_mcp_tools
28
- from ..bemcp.bemcp import MCPClient, convert_tool_format, MCPManager
201
+ broker = MessageBroker()
202
+ mcp_manager = MCPManager()
203
+
204
+ class BrokerWorker:
205
+ """The 'glue' class that orchestrates agents via a MessageBroker."""
206
+ def __init__(self, goal: str, tools: List[Union[str, Dict]], work_dir: str, cache_messages: Union[bool, List[Dict]] = None, broker: MessageBroker = None, mcp_manager: MCPManager = None):
207
+ self.goal = goal
208
+ self.tools = tools
209
+ self.work_dir = Path(work_dir)
210
+ self.cache_messages = cache_messages
211
+
212
+ self.broker = broker
213
+ self.mcp_manager = mcp_manager
214
+ self.task_completion_event = asyncio.Event()
215
+ self.final_result = None
216
+ self._status_subscription = None
217
+ self.setup()
218
+
219
+ self.channel = self.broker.request_channel()
220
+ self.INSTRUCTION_TOPIC = self.channel + ".instructions"
221
+ self.WORKER_RESPONSE_TOPIC = self.channel + ".worker_responses"
222
+ self.TASK_STATUS_TOPIC =self.channel + ".task_status"
223
+
224
+ def setup(self):
225
+ cache_dir = self.work_dir / ".beswarm"
226
+ cache_dir.mkdir(parents=True, exist_ok=True)
227
+ task_manager.set_root_path(self.work_dir)
228
+ self.cache_file = cache_dir / "work_agent_conversation_history.json"
229
+ if not self.cache_file.exists():
230
+ self.cache_file.write_text("[]", encoding="utf-8")
231
+
232
+ DEBUG = os.getenv("DEBUG", "false").lower() in ("true", "1", "t", "yes")
233
+ if DEBUG:
234
+ log_file = open(cache_dir / "history.log", "a", encoding="utf-8")
235
+ log_file.write(f"========== {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} ==========\n")
236
+ original_stdout = sys.stdout
237
+ original_stderr = sys.stderr
238
+ sys.stdout = Tee(original_stdout, log_file)
239
+ sys.stderr = Tee(original_stderr, log_file)
240
+
241
+ async def _configure_tools(self):
242
+ mcp_list = [item for item in self.tools if isinstance(item, dict)]
243
+ if mcp_list:
244
+ for mcp_item in mcp_list:
245
+ mcp_name, mcp_config = list(mcp_item.items())[0]
246
+ await self.mcp_manager.add_server(mcp_name, mcp_config)
247
+ client = self.mcp_manager.clients.get(mcp_name)
248
+ await register_mcp_tools(client, registry)
249
+ all_mcp_tools = await self.mcp_manager.get_all_tools()
250
+ self.tools.extend([tool.name for tool in sum(all_mcp_tools.values(), [])])
251
+ self.tools = [item for item in self.tools if not isinstance(item, dict)]
252
+ if "task_complete" not in self.tools: self.tools.append("task_complete")
253
+ self.tools_json = [value for _, value in get_function_call_list(self.tools).items()]
254
+
255
+ def _task_status_subscriber(self, message: Dict):
256
+ """Subscriber for task status changes."""
257
+ if message.get("status") == "finished":
258
+ self.final_result = message.get("result")
259
+ self.task_completion_event.set()
260
+
261
+ if message.get("status") == "error":
262
+ raise Exception(message.get("result"))
263
+
264
+ if message.get("status") == "new_message":
265
+ print(message.get("result"))
266
+
267
+ def _setup_agents(self):
268
+ instruction_agent_config = {
269
+ "api_key": os.getenv("API_KEY"), "api_url": os.getenv("BASE_URL"),
270
+ "engine": os.getenv("MODEL"),
271
+ "system_prompt": instruction_system_prompt.format(
272
+ os_version=platform.platform(), tools_list=self.tools_json,
273
+ workspace_path=self.work_dir, current_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S")
274
+ ),
275
+ "print_log": os.getenv("DEBUG", "false").lower() in ("true", "1", "t", "yes"),
276
+ "temperature": 0.7, "use_plugins": False
277
+ }
278
+
279
+ worker_agent_config = {
280
+ "api_key": os.getenv("API_KEY"), "api_url": os.getenv("BASE_URL"),
281
+ "engine": os.getenv("FAST_MODEL") or os.getenv("MODEL"),
282
+ "system_prompt": worker_system_prompt.format(
283
+ os_version=platform.platform(), workspace_path=self.work_dir,
284
+ shell=os.getenv('SHELL', 'Unknown'), current_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
285
+ tools_list=self.tools_json
286
+ ),
287
+ "print_log": True, "temperature": 0.5, "function_call_max_loop": 100
288
+ }
289
+
290
+ instruction_agent = InstructionAgent(
291
+ goal=self.goal, tools_json=self.tools_json, agent_config=instruction_agent_config, work_dir=self.work_dir, cache_messages=self.cache_messages,
292
+ broker=self.broker, listen_topic=self.WORKER_RESPONSE_TOPIC,
293
+ publish_topic=self.INSTRUCTION_TOPIC, status_topic=self.TASK_STATUS_TOPIC
294
+ )
295
+
296
+ worker_agent = WorkerAgent(
297
+ goal=self.goal, tools_json=self.tools_json, agent_config=worker_agent_config, work_dir=self.work_dir, cache_messages=self.cache_messages,
298
+ broker=self.broker, listen_topic=self.INSTRUCTION_TOPIC,
299
+ publish_topic=self.WORKER_RESPONSE_TOPIC, status_topic=self.TASK_STATUS_TOPIC
300
+ )
301
+ return instruction_agent, worker_agent
302
+
303
+ async def run(self):
304
+ """Sets up subscriptions and starts the workflow."""
305
+ os.chdir(self.work_dir.absolute())
306
+ await self._configure_tools()
307
+
308
+ instruction_agent, worker_agent = self._setup_agents()
309
+
310
+ self.broker.publish({"instruction": "Initial kickoff"}, self.INSTRUCTION_TOPIC)
311
+
312
+ self._status_subscription = self.broker.subscribe(self._task_status_subscriber, self.TASK_STATUS_TOPIC)
313
+ await self.task_completion_event.wait()
314
+
315
+ instruction_agent.dispose()
316
+ worker_agent.dispose()
317
+ self._status_subscription.dispose()
318
+ await self.mcp_manager.cleanup()
319
+ return self.final_result
320
+
321
+ async def stream_run(self):
322
+ """Runs the workflow and yields status messages."""
323
+ os.chdir(self.work_dir.absolute())
324
+ await self._configure_tools()
325
+
326
+ instruction_agent, worker_agent = self._setup_agents()
327
+
328
+ self.broker.publish({"instruction": "Initial kickoff"}, self.INSTRUCTION_TOPIC)
329
+
330
+ try:
331
+ async for message in self.broker.iter_topic(self.TASK_STATUS_TOPIC):
332
+ if message.get("status") == "new_message":
333
+ yield message.get("result")
334
+ elif message.get("status") == "finished":
335
+ yield message.get("result")
336
+ break
337
+ elif message.get("status") == "error":
338
+ raise Exception(message.get("result"))
339
+ finally:
340
+ instruction_agent.dispose()
341
+ worker_agent.dispose()
342
+ await self.mcp_manager.cleanup()
29
343
 
30
- manager = MCPManager()
31
344
 
32
345
  @register_tool()
33
- async def worker(goal, tools, work_dir, cache_messages=None):
34
- cache_dir = Path(work_dir) / ".beswarm"
35
- cache_dir.mkdir(parents=True, exist_ok=True)
36
- task_manager.set_root_path(work_dir)
37
- cache_file = cache_dir / "work_agent_conversation_history.json"
38
- if not cache_file.exists():
39
- cache_file.write_text("[]", encoding="utf-8")
40
-
41
- DEBUG = os.getenv("DEBUG", "false").lower() in ("true", "1", "t", "yes")
42
- if DEBUG:
43
- log_file = open(cache_dir / "history.log", "a", encoding="utf-8")
44
- log_file.write(f"========== {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} ==========\n")
45
- original_stdout = sys.stdout
46
- original_stderr = sys.stderr
47
- sys.stdout = Tee(original_stdout, log_file)
48
- sys.stderr = Tee(original_stderr, log_file)
49
-
346
+ async def worker(goal: str, tools: List[Union[str, Dict]], work_dir: str, cache_messages: Union[bool, List[Dict]] = None):
50
347
  start_time = datetime.now()
51
- os.chdir(Path(work_dir).absolute())
52
- finish_flag = 0
53
- goal_diff = None
54
-
55
- mcp_list = [item for item in tools if isinstance(item, dict)]
56
- if mcp_list:
57
- for mcp_item in mcp_list:
58
- mcp_name, mcp_config = list(mcp_item.items())[0]
59
- await manager.add_server(mcp_name, mcp_config)
60
- client = manager.clients.get(mcp_name)
61
- await register_mcp_tools(client, registry)
62
- all_tools = await manager.get_all_tools()
63
- mcp_tools_name = [tool.name for tool in sum(all_tools.values(), [])]
64
- tools += mcp_tools_name
65
-
66
- tools = [item for item in tools if not isinstance(item, dict)]
67
- if "task_complete" not in tools:
68
- tools.append("task_complete")
69
-
70
- tools_json = [value for _, value in get_function_call_list(tools).items()]
71
- work_agent_system_prompt = worker_system_prompt.format(
72
- os_version=platform.platform(),
73
- workspace_path=work_dir,
74
- shell=os.getenv('SHELL', 'Unknown'),
75
- current_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
76
- tools_list=tools_json
77
- )
78
-
79
- work_agent_config = {
80
- "api_key": os.getenv("API_KEY"),
81
- "api_url": os.getenv("BASE_URL"),
82
- "engine": os.getenv("FAST_MODEL") or os.getenv("MODEL"),
83
- "system_prompt": work_agent_system_prompt,
84
- "print_log": True,
85
- # "max_tokens": 8000,
86
- "temperature": 0.5,
87
- "function_call_max_loop": 100,
88
- }
89
- if cache_messages:
90
- if isinstance(cache_messages, bool) and cache_messages == True:
91
- cache_messages = json.loads(cache_file.read_text(encoding="utf-8"))
92
- if cache_messages and isinstance(cache_messages, list) and len(cache_messages) > 1:
93
- old_goal = extract_xml_content(cache_messages[1]["content"], "goal")
94
- if old_goal.strip() != goal.strip():
95
- diff_generator = difflib.ndiff(old_goal.splitlines(), goal.splitlines())
96
- changed_lines = []
97
- for line in diff_generator:
98
- if (line.startswith('+ ') or line.startswith('- ')) and line[2:].strip():
99
- changed_lines.append(line)
100
- goal_diff = '\n'.join(changed_lines).strip()
101
- first_user_message = replace_xml_content(cache_messages[1]["content"], "goal", goal)
102
- work_agent_config["cache_messages"] = cache_messages[0:1] + [{"role": "user", "content": first_user_message}] + cache_messages[2:]
103
-
104
- instruction_agent_config = {
105
- "api_key": os.getenv("API_KEY"),
106
- "api_url": os.getenv("BASE_URL"),
107
- "engine": os.getenv("MODEL"),
108
- "system_prompt": instruction_system_prompt.format(os_version=platform.platform(), tools_list=tools_json, workspace_path=work_dir, current_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S")),
109
- "print_log": DEBUG,
110
- # "max_tokens": 4000,
111
- "temperature": 0.7,
112
- "use_plugins": False,
113
- }
114
-
115
- # 工作agent初始化
116
- work_agent = chatgpt(**work_agent_config)
117
- async def instruction_agent_task():
118
- last_instruction = None
119
- while True:
120
- instruction_prompt = "".join([
121
- "</work_agent_conversation_end>\n\n",
122
- f"任务目标: {goal}\n\n",
123
- f"任务目标新变化:\n{goal_diff}\n\n" if goal_diff else "",
124
- "在 tag <work_agent_conversation_start>...</work_agent_conversation_end> 之前的对话历史都是工作智能体的对话历史。\n\n",
125
- "根据以上对话历史和目标,请生成下一步指令。如果任务已完成,指示工作智能体调用task_complete工具。\n\n",
126
- ])
127
- if last_instruction and 'fetch_gpt_response_stream HTTP Error' not in last_instruction:
128
- instruction_prompt = (
129
- f"{instruction_prompt}\n\n"
130
- "你生成的指令格式错误,必须把给assistant的指令放在<instructions>...</instructions>标签内。请重新生成格式正确的指令。"
131
- f"这是你上次给assistant的错误格式的指令:\n{last_instruction}"
132
- )
133
- # 让指令agent分析对话历史并生成新指令
134
- instruction_agent = chatgpt(**instruction_agent_config)
135
- conversation_history = copy.deepcopy(work_agent.conversation["default"])
136
- if len(conversation_history) > 1 and conversation_history[-2]["role"] == "user" \
137
- and "<task_complete_message>" in conversation_history[-2]["content"]:
138
- task_complete_message = extract_xml_content(conversation_history[-2]["content"], "task_complete_message")
139
- # del work_agent.conversation["default"][-4:]
140
- return "<task_complete_message>" + task_complete_message + "</task_complete_message>"
141
-
142
- cache_file.write_text(json.dumps(conversation_history, ensure_ascii=False, indent=4), encoding="utf-8")
143
-
144
- work_agent_system_prompt = conversation_history.pop(0)
145
- if conversation_history:
146
- # 获取原始内容
147
- original_content = work_agent_system_prompt["content"]
148
-
149
- # 定义正则表达式
150
- regex = r"<latest_file_content>(.*?)</latest_file_content>"
151
-
152
- # 进行匹配
153
- match = re.search(regex, original_content, re.DOTALL)
154
-
155
- # 提取内容或设置为空字符串
156
- if match:
157
- extracted_content = f"<latest_file_content>{match.group(1)}</latest_file_content>\n\n"
158
- else:
159
- extracted_content = ""
160
- if isinstance(conversation_history[0]["content"], str):
161
- conversation_history[0]["content"] = extracted_content + conversation_history[0]["content"]
162
- elif isinstance(conversation_history[0]["content"], list) and extracted_content:
163
- conversation_history[0]["content"].append({"type": "text", "text": extracted_content})
164
-
165
- instruction_agent.conversation["default"][1:] = conversation_history
166
- if "find_and_click_element" in str(tools_json):
167
- instruction_prompt = await get_current_screen_image_message(instruction_prompt)
168
- next_instruction = await instruction_agent.ask_async(instruction_prompt)
169
- print("\n🤖 指令智能体生成的下一步指令:", next_instruction)
170
- if "fetch_gpt_response_stream HTTP Error', 'status_code': 404" in next_instruction:
171
- raise Exception(f"Model: {instruction_agent_config['engine']} not found!")
172
- if "'status_code': 413" in next_instruction or \
173
- "'status_code': 400" in next_instruction:
174
- end_time = datetime.now()
175
- total_time = end_time - start_time
176
- print(f"\n任务开始时间: {start_time.strftime('%Y-%m-%d %H:%M:%S')}")
177
- print(f"任务结束时间: {end_time.strftime('%Y-%m-%d %H:%M:%S')}")
178
- print(f"总用时: {total_time}")
179
- raise Exception(f"The request body is too long, please try again.")
180
-
181
- last_instruction = next_instruction
182
- next_instruction = extract_xml_content(next_instruction, "instructions")
183
- if not next_instruction:
184
- print("\n❌ 指令智能体生成的指令不符合要求,请重新生成。")
185
- continue
186
- else:
187
- if conversation_history == []:
188
- next_instruction = (
189
- "任务描述:\n"
190
- f"<goal>{goal}</goal>\n\n"
191
- "你作为指令的**执行者**,而非任务的**规划师**,你必须严格遵循以下单步工作流程:\n"
192
- "**执行指令**\n"
193
- " - **严格遵从:** 只执行我当前下达的明确指令。在我明确给出下一步指令前,绝不擅自行动或推测、执行任何未明确要求的后续步骤。\n"
194
- " - **严禁越权:** 禁止执行任何我未指定的步骤。`<goal>` 标签中的内容仅为背景信息,不得据此进行任务规划或推测。\n"
195
- "**汇报结果**\n"
196
- " - **聚焦单步:** 指令完成后,仅汇报该步骤的执行结果与产出。\n"
197
- "**暂停等待**\n"
198
- " - **原地待命:** 汇报后,任务暂停。在收到我新的指令前,严禁发起任何新的工具调用或操作。\n"
199
- " - **请求指令:** 回复的最后必须明确请求我提供下一步指令。\n"
200
- "**注意:** 禁止完成超出下面我未规定的步骤,`<goal>` 标签中的内容仅为背景信息。"
201
- "现在开始执行第一步:\n"
202
- f"{next_instruction}"
203
- )
204
- break
205
- return next_instruction
206
-
207
- need_instruction = True
208
- result = None
209
- while True:
210
- next_instruction = ''
211
- if need_instruction:
212
- next_instruction = await instruction_agent_task()
213
-
214
- # 检查任务是否完成
215
- if "<task_complete_message>" in next_instruction:
216
- if finish_flag == 0:
217
- finish_flag = 1
218
- continue
219
- elif finish_flag == 1:
220
- result = extract_xml_content(next_instruction, "task_complete_message")
221
- break
222
- else:
223
- finish_flag = 0
224
- if "find_and_click_element" in str(tools_json):
225
- next_instruction = await get_current_screen_image_message(next_instruction)
226
- result = await work_agent.ask_async(next_instruction)
227
- if result.strip() == '' or result.strip() == '</content>\n</write_to_file>':
228
- print("\n❌ 工作智能体回复为空,请重新生成指令。")
229
- need_instruction = False
230
- continue
231
- print("✅ 工作智能体回复:", result)
232
- need_instruction = True
233
-
348
+ worker_instance = BrokerWorker(goal, tools, work_dir, cache_messages, broker, mcp_manager)
349
+ result = await worker_instance.run()
234
350
  end_time = datetime.now()
235
- total_time = end_time - start_time
236
- print("\n✅ 任务已完成:", result)
237
351
  print(f"\n任务开始时间: {start_time.strftime('%Y-%m-%d %H:%M:%S')}")
238
352
  print(f"任务结束时间: {end_time.strftime('%Y-%m-%d %H:%M:%S')}")
239
- print(f"总用时: {total_time}")
240
- await manager.cleanup()
353
+ print(f"总用时: {end_time - start_time}")
241
354
  return result
242
355
 
243
- async def worker_gen(goal, tools, work_dir, cache_messages=None):
244
- cache_dir = Path(work_dir) / ".beswarm"
245
- cache_dir.mkdir(parents=True, exist_ok=True)
246
- task_manager.set_root_path(work_dir)
247
- cache_file = cache_dir / "work_agent_conversation_history.json"
248
- if not cache_file.exists():
249
- cache_file.write_text("[]", encoding="utf-8")
250
-
251
- DEBUG = os.getenv("DEBUG", "false").lower() in ("true", "1", "t", "yes")
252
- if DEBUG:
253
- log_file = open(cache_dir / "history.log", "a", encoding="utf-8")
254
- log_file.write(f"========== {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} ==========\n")
255
- original_stdout = sys.stdout
256
- original_stderr = sys.stderr
257
- sys.stdout = Tee(original_stdout, log_file)
258
- sys.stderr = Tee(original_stderr, log_file)
259
-
356
+ @register_tool()
357
+ async def worker_gen(goal: str, tools: List[Union[str, Dict]], work_dir: str, cache_messages: Union[bool, List[Dict]] = None):
260
358
  start_time = datetime.now()
261
- os.chdir(Path(work_dir).absolute())
262
- finish_flag = 0
263
- goal_diff = None
264
-
265
- mcp_list = [item for item in tools if isinstance(item, dict)]
266
- if mcp_list:
267
- for mcp_item in mcp_list:
268
- mcp_name, mcp_config = list(mcp_item.items())[0]
269
- await manager.add_server(mcp_name, mcp_config)
270
- client = manager.clients.get(mcp_name)
271
- await register_mcp_tools(client, registry)
272
- all_tools = await manager.get_all_tools()
273
- mcp_tools_name = [tool.name for tool in sum(all_tools.values(), [])]
274
- tools += mcp_tools_name
275
-
276
- tools = [item for item in tools if not isinstance(item, dict)]
277
- if "task_complete" not in tools:
278
- tools.append("task_complete")
279
-
280
- tools_json = [value for _, value in get_function_call_list(tools).items()]
281
- work_agent_system_prompt = worker_system_prompt.format(
282
- os_version=platform.platform(),
283
- workspace_path=work_dir,
284
- shell=os.getenv('SHELL', 'Unknown'),
285
- current_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
286
- tools_list=tools_json
287
- )
288
-
289
- work_agent_config = {
290
- "api_key": os.getenv("API_KEY"),
291
- "api_url": os.getenv("BASE_URL"),
292
- "engine": os.getenv("FAST_MODEL") or os.getenv("MODEL"),
293
- "system_prompt": work_agent_system_prompt,
294
- "print_log": True,
295
- # "max_tokens": 8000,
296
- "temperature": 0.5,
297
- "function_call_max_loop": 100,
298
- }
299
- if cache_messages:
300
- if isinstance(cache_messages, bool) and cache_messages == True:
301
- cache_messages = json.loads(cache_file.read_text(encoding="utf-8"))
302
- if cache_messages and isinstance(cache_messages, list) and len(cache_messages) > 1:
303
- old_goal = extract_xml_content(cache_messages[1]["content"], "goal")
304
- if old_goal.strip() != goal.strip():
305
- diff_generator = difflib.ndiff(old_goal.splitlines(), goal.splitlines())
306
- changed_lines = []
307
- for line in diff_generator:
308
- if (line.startswith('+ ') or line.startswith('- ')) and line[2:].strip():
309
- changed_lines.append(line)
310
- goal_diff = '\n'.join(changed_lines).strip()
311
- first_user_message = replace_xml_content(cache_messages[1]["content"], "goal", goal)
312
- work_agent_config["cache_messages"] = cache_messages[0:1] + [{"role": "user", "content": first_user_message}] + cache_messages[2:]
313
-
314
- instruction_agent_config = {
315
- "api_key": os.getenv("API_KEY"),
316
- "api_url": os.getenv("BASE_URL"),
317
- "engine": os.getenv("MODEL"),
318
- "system_prompt": instruction_system_prompt.format(os_version=platform.platform(), tools_list=tools_json, workspace_path=work_dir, current_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S")),
319
- "print_log": DEBUG,
320
- # "max_tokens": 4000,
321
- "temperature": 0.7,
322
- "use_plugins": False,
323
- }
324
-
325
- # 工作agent初始化
326
- work_agent = chatgpt(**work_agent_config)
327
- async def instruction_agent_task():
328
- last_instruction = None
329
- while True:
330
- instruction_prompt = "".join([
331
- "</work_agent_conversation_end>\n\n",
332
- f"任务目标: {goal}\n\n",
333
- f"任务目标新变化:\n{goal_diff}\n\n" if goal_diff else "",
334
- "在 tag <work_agent_conversation_start>...</work_agent_conversation_end> 之前的对话历史都是工作智能体的对话历史。\n\n",
335
- "根据以上对话历史和目标,请生成下一步指令。如果任务已完成,指示工作智能体调用task_complete工具。\n\n",
336
- ])
337
- if last_instruction and 'fetch_gpt_response_stream HTTP Error' not in last_instruction:
338
- instruction_prompt = (
339
- f"{instruction_prompt}\n\n"
340
- "你生成的指令格式错误,必须把给assistant的指令放在<instructions>...</instructions>标签内。请重新生成格式正确的指令。"
341
- f"这是你上次给assistant的错误格式的指令:\n{last_instruction}"
342
- )
343
- # 让指令agent分析对话历史并生成新指令
344
- instruction_agent = chatgpt(**instruction_agent_config)
345
- conversation_history = copy.deepcopy(work_agent.conversation["default"])
346
- if len(conversation_history) > 1 and conversation_history[-2]["role"] == "user" \
347
- and "<task_complete_message>" in conversation_history[-2]["content"]:
348
- task_complete_message = extract_xml_content(conversation_history[-2]["content"], "task_complete_message")
349
- # del work_agent.conversation["default"][-4:]
350
- return "<task_complete_message>" + task_complete_message + "</task_complete_message>"
351
-
352
- cache_file.write_text(json.dumps(conversation_history, ensure_ascii=False, indent=4), encoding="utf-8")
353
-
354
- work_agent_system_prompt = conversation_history.pop(0)
355
- if conversation_history:
356
- # 获取原始内容
357
- original_content = work_agent_system_prompt["content"]
358
-
359
- # 定义正则表达式
360
- regex = r"<latest_file_content>(.*?)</latest_file_content>"
361
-
362
- # 进行匹配
363
- match = re.search(regex, original_content, re.DOTALL)
364
-
365
- # 提取内容或设置为空字符串
366
- if match:
367
- extracted_content = f"<latest_file_content>{match.group(1)}</latest_file_content>\n\n"
368
- else:
369
- extracted_content = ""
370
- if isinstance(conversation_history[0]["content"], str):
371
- conversation_history[0]["content"] = extracted_content + conversation_history[0]["content"]
372
- elif isinstance(conversation_history[0]["content"], list) and extracted_content:
373
- conversation_history[0]["content"].append({"type": "text", "text": extracted_content})
374
-
375
- instruction_agent.conversation["default"][1:] = conversation_history
376
- if "find_and_click_element" in str(tools_json):
377
- instruction_prompt = await get_current_screen_image_message(instruction_prompt)
378
- next_instruction = await instruction_agent.ask_async(instruction_prompt)
379
- print("\n🤖 指令智能体生成的下一步指令:", next_instruction)
380
- if "fetch_gpt_response_stream HTTP Error', 'status_code': 404" in next_instruction:
381
- raise Exception(f"Model: {instruction_agent_config['engine']} not found!")
382
- if "'status_code': 413" in next_instruction or \
383
- "'status_code': 400" in next_instruction:
384
- end_time = datetime.now()
385
- total_time = end_time - start_time
386
- print(f"\n任务开始时间: {start_time.strftime('%Y-%m-%d %H:%M:%S')}")
387
- print(f"任务结束时间: {end_time.strftime('%Y-%m-%d %H:%M:%S')}")
388
- print(f"总用时: {total_time}")
389
- raise Exception(f"The request body is too long, please try again.")
390
-
391
- last_instruction = next_instruction
392
- next_instruction = extract_xml_content(next_instruction, "instructions")
393
- if not next_instruction:
394
- print("\n❌ 指令智能体生成的指令不符合要求,请重新生成。")
395
- continue
396
- else:
397
- if conversation_history == []:
398
- next_instruction = (
399
- "任务描述:\n"
400
- f"<goal>{goal}</goal>\n\n"
401
- "你作为指令的**执行者**,而非任务的**规划师**,你必须严格遵循以下单步工作流程:\n"
402
- "**执行指令**\n"
403
- " - **严格遵从:** 只执行我当前下达的明确指令。在我明确给出下一步指令前,绝不擅自行动或推测、执行任何未明确要求的后续步骤。\n"
404
- " - **严禁越权:** 禁止执行任何我未指定的步骤。`<goal>` 标签中的内容仅为背景信息,不得据此进行任务规划或推测。\n"
405
- "**汇报结果**\n"
406
- " - **聚焦单步:** 指令完成后,仅汇报该步骤的执行结果与产出。\n"
407
- "**暂停等待**\n"
408
- " - **原地待命:** 汇报后,任务暂停。在收到我新的指令前,严禁发起任何新的工具调用或操作。\n"
409
- " - **请求指令:** 回复的最后必须明确请求我提供下一步指令。\n"
410
- "**注意:** 禁止完成超出下面我未规定的步骤,`<goal>` 标签中的内容仅为背景信息。"
411
- "现在开始执行第一步:\n"
412
- f"{next_instruction}"
413
- )
414
- break
415
- return next_instruction
416
-
417
- need_instruction = True
418
- result = None
419
- while True:
420
- next_instruction = ''
421
- if need_instruction:
422
- next_instruction = await instruction_agent_task()
423
-
424
- yield {"user": next_instruction}
425
-
426
- # 检查任务是否完成
427
- if "<task_complete_message>" in next_instruction:
428
- if finish_flag == 0:
429
- finish_flag = 1
430
- continue
431
- elif finish_flag == 1:
432
- result = extract_xml_content(next_instruction, "task_complete_message")
433
- break
434
- else:
435
- finish_flag = 0
436
- if "find_and_click_element" in str(tools_json):
437
- next_instruction = await get_current_screen_image_message(next_instruction)
438
- result = await work_agent.ask_async(next_instruction)
439
- if result.strip() == '' or result.strip() == '</content>\n</write_to_file>':
440
- print("\n❌ 工作智能体回复为空,请重新生成指令。")
441
- need_instruction = False
442
- continue
443
- yield {"assistant": result}
444
- print("✅ 工作智能体回复:", result)
445
- need_instruction = True
446
-
359
+ worker_instance = BrokerWorker(goal, tools, work_dir, cache_messages, broker, mcp_manager)
360
+ async for result in worker_instance.stream_run():
361
+ yield result
447
362
  end_time = datetime.now()
448
- total_time = end_time - start_time
449
- print("\n✅ 任务已完成:", result)
450
363
  print(f"\n任务开始时间: {start_time.strftime('%Y-%m-%d %H:%M:%S')}")
451
364
  print(f"任务结束时间: {end_time.strftime('%Y-%m-%d %H:%M:%S')}")
452
- print(f"总用时: {total_time}")
453
- await manager.cleanup()
365
+ print(f"总用时: {end_time - start_time}")
454
366
 
455
- from .taskmanager import task_manager
367
+ from .taskmanager import task_manager
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: beswarm
3
- Version: 0.2.35
3
+ Version: 0.2.36
4
4
  Summary: MAS
5
5
  Requires-Python: >=3.11
6
6
  Description-Content-Type: text/markdown
@@ -1,14 +1,15 @@
1
1
  beswarm/__init__.py,sha256=HZjUOJtZR5QhMuDbq-wukQQn1VrBusNWai_ysGo-VVI,20
2
+ beswarm/broker.py,sha256=RtnQZVbhf25acUHahNBiaS5FGxcrj0rhBhkon9gFY_M,9873
2
3
  beswarm/prompt.py,sha256=5JMfOuXWHscsaeDzwBn223mj9N85eAQdOHXQZk7zeWE,32238
3
4
  beswarm/utils.py,sha256=xxbNifOPlfcVkKmF_qFzuEnZgF3MQg3mnOfz1EF0Qss,6697
4
5
  beswarm/aient/main.py,sha256=SiYAIgQlLJqYusnTVEJOx1WNkSJKMImhgn5aWjfroxg,3814
5
- beswarm/aient/setup.py,sha256=LqjY1x8CQrcvFrHKFSyZpm5h6iBuosHIpZqXdCPnPes,487
6
+ beswarm/aient/setup.py,sha256=lSEY6pYNdIdUB8F3gkj0XsEos1FMkd_c6PfIt-hscII,487
6
7
  beswarm/aient/src/aient/__init__.py,sha256=SRfF7oDVlOOAi6nGKiJIUK6B_arqYLO9iSMp-2IZZps,21
7
8
  beswarm/aient/src/aient/core/__init__.py,sha256=NxjebTlku35S4Dzr16rdSqSTWUvvwEeACe8KvHJnjPg,34
8
9
  beswarm/aient/src/aient/core/log_config.py,sha256=kz2_yJv1p-o3lUQOwA3qh-LSc3wMHv13iCQclw44W9c,274
9
10
  beswarm/aient/src/aient/core/models.py,sha256=d4MISNezTSe0ls0-fjuToI2SoT-sk5fWqAJuKVinIlo,7502
10
- beswarm/aient/src/aient/core/request.py,sha256=1tedDQf8GRv5Y7rYNE_596vQb4o7e1icaKAA7lIl4YY,76114
11
- beswarm/aient/src/aient/core/response.py,sha256=Ba0BwsIN2ozZC_UInkGS07qKlpo3dIei6rw0INQ66BE,33086
11
+ beswarm/aient/src/aient/core/request.py,sha256=GrB8hQY1K8AF1O9f5g-hoY8fwZR4SUNNhvCDpuhHVl0,76822
12
+ beswarm/aient/src/aient/core/response.py,sha256=LwaDyCuuT0RPxBwE08k8_Dmh0df_q7q4BUix7NcCJV8,33207
12
13
  beswarm/aient/src/aient/core/utils.py,sha256=8TR442o3VV7Kl9l6f6LlmOUQ1UDZ-aXMzQqm-qIrqE4,28166
13
14
  beswarm/aient/src/aient/core/test/test_base_api.py,sha256=pWnycRJbuPSXKKU9AQjWrMAX1wiLC_014Qc9hh5C2Pw,524
14
15
  beswarm/aient/src/aient/core/test/test_geminimask.py,sha256=HFX8jDbNg_FjjgPNxfYaR-0-roUrOO-ND-FVsuxSoiw,13254
@@ -17,7 +18,7 @@ beswarm/aient/src/aient/core/test/test_payload.py,sha256=8jBiJY1uidm1jzL-EiK0s6U
17
18
  beswarm/aient/src/aient/models/__init__.py,sha256=ouNDNvoBBpIFrLsk09Q_sq23HR0GbLAKfGLIFmfEuXE,219
18
19
  beswarm/aient/src/aient/models/audio.py,sha256=kRd-8-WXzv4vwvsTGwnstK-WR8--vr9CdfCZzu8y9LA,1934
19
20
  beswarm/aient/src/aient/models/base.py,sha256=z-Z0pJfTN2x0cuwfvu0BdMRY9O-RmLwHEnBIJN1x4Fg,6719
20
- beswarm/aient/src/aient/models/chatgpt.py,sha256=Yum3_-LgHmFUII0AljfNNfSc8gBhJzyPYAWZJyr4yFo,46969
21
+ beswarm/aient/src/aient/models/chatgpt.py,sha256=UP7cn6Vo0bXzj6FpqTHIOjg1UygYQDXMSlUbXbH1uU4,47118
21
22
  beswarm/aient/src/aient/models/claude.py,sha256=JezghW7y0brl4Y5qiSHvnYR5prQCFywX4RViHt39pGI,26037
22
23
  beswarm/aient/src/aient/models/duckduckgo.py,sha256=1l7vYCs9SG5SWPCbcl7q6pCcB5AUF_r-a4l9frz3Ogo,8115
23
24
  beswarm/aient/src/aient/models/gemini.py,sha256=chGLc-8G_DAOxr10HPoOhvVFW1RvMgHd6mt--VyAW98,14730
@@ -126,7 +127,7 @@ beswarm/queries/tree-sitter-languages/rust-tags.scm,sha256=9ljM1nzhfPs_ZTRw7cr2P
126
127
  beswarm/queries/tree-sitter-languages/scala-tags.scm,sha256=UxQjz80JIrrJ7Pm56uUnQyThfmQNvwk7aQzPNypB-Ao,1761
127
128
  beswarm/queries/tree-sitter-languages/typescript-tags.scm,sha256=OMdCeedPiA24ky82DpgTMKXK_l2ySTuF2zrQ2fJAi9E,1253
128
129
  beswarm/tools/__init__.py,sha256=Q24EPaPvYzuwxmshvvCQR0-bXRllzxmMFiya8ZL5YEI,1472
129
- beswarm/tools/click.py,sha256=I62GF-bzcoNzhfu3DeWdiA88Sd_6gMFihrQSwFj4nks,20795
130
+ beswarm/tools/click.py,sha256=7g6x1X7ffTInGWp7112KS-MAQ5-8wa1Ze2sIipUIbjc,20884
130
131
  beswarm/tools/completion.py,sha256=BHMMZeDCNEnaoOuwOoJjkuU_idwDB43mD1bT63p_waU,590
131
132
  beswarm/tools/edit_file.py,sha256=iwWl7a8sTVq4vj0e1ny3H6UGcHfYnxALRGcLuk5hZS8,9155
132
133
  beswarm/tools/planner.py,sha256=lguBCS6kpwNPoXQvqH-WySabVubT82iyWOkJnjt6dXw,1265
@@ -134,10 +135,10 @@ beswarm/tools/repomap.py,sha256=YsTPq5MXfn_Ds5begcvHDnY_Xp2d4jH-xmWqNMHnNHY,4523
134
135
  beswarm/tools/request_input.py,sha256=gXNAJPOJektMqxJVyzNTFOeMQ7xUkO-wWMYH-r2Rdwk,942
135
136
  beswarm/tools/screenshot.py,sha256=u6t8FCgW5YHJ_Oc4coo8e0F3wTusWE_-H8dFh1rBq9Q,1011
136
137
  beswarm/tools/search_arxiv.py,sha256=caVIUOzMhFu-r_gVgJZrH2EO9xI5iV_qLAg0b3Ie9Xg,8095
137
- beswarm/tools/search_web.py,sha256=ybbdbJq80plooXLMiyjAMOSCEyZJ0hquGUpabBhfFx0,16195
138
+ beswarm/tools/search_web.py,sha256=LhgXOSHL9fwxg5T2AAOV4TTJkcJVLogsfRJW2faPvDE,16147
138
139
  beswarm/tools/taskmanager.py,sha256=n7G6cH96Tcz57MfiOffISMMAfUtr49_uikkeoCDCeRg,12940
139
- beswarm/tools/worker.py,sha256=s6tN4JhA07qzTlP7xWiB0MjnBIJ6XSrtlJTA_RqG1_A,23539
140
- beswarm-0.2.35.dist-info/METADATA,sha256=cfwB-Cq_qEDmpCNZyzrFNjDtnikxw8IqmrKf0MZd_Yk,3878
141
- beswarm-0.2.35.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
142
- beswarm-0.2.35.dist-info/top_level.txt,sha256=pJw4O87wvt5882smuSO6DfByJz7FJ8SxxT8h9fHCmpo,8
143
- beswarm-0.2.35.dist-info/RECORD,,
140
+ beswarm/tools/worker.py,sha256=4zQfVzUmXXnTTvyBPcnyVNM7TV6jzqw6P377ZmORIq0,18567
141
+ beswarm-0.2.36.dist-info/METADATA,sha256=NoyWwKf45i-ic0OI2YnCCdGi-q-07Uh3QfUkMLszfYQ,3878
142
+ beswarm-0.2.36.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
143
+ beswarm-0.2.36.dist-info/top_level.txt,sha256=pJw4O87wvt5882smuSO6DfByJz7FJ8SxxT8h9fHCmpo,8
144
+ beswarm-0.2.36.dist-info/RECORD,,