beswarm 0.2.81__py3-none-any.whl → 0.2.83__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- beswarm/agents/planact.py +24 -71
- beswarm/aient/aient/architext/architext/__init__.py +1 -0
- beswarm/aient/aient/architext/architext/core.py +694 -0
- beswarm/aient/aient/architext/test/openai_client.py +146 -0
- beswarm/aient/aient/architext/test/test.py +1410 -0
- beswarm/aient/aient/architext/test/test_save_load.py +93 -0
- beswarm/aient/aient/models/chatgpt.py +39 -111
- beswarm/prompt.py +44 -17
- {beswarm-0.2.81.dist-info → beswarm-0.2.83.dist-info}/METADATA +1 -1
- {beswarm-0.2.81.dist-info → beswarm-0.2.83.dist-info}/RECORD +12 -7
- {beswarm-0.2.81.dist-info → beswarm-0.2.83.dist-info}/WHEEL +0 -0
- {beswarm-0.2.81.dist-info → beswarm-0.2.83.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,93 @@
|
|
1
|
+
import asyncio
|
2
|
+
import os
|
3
|
+
import sys
|
4
|
+
|
5
|
+
# Add the project root to the Python path
|
6
|
+
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
7
|
+
|
8
|
+
from architext.core import (
|
9
|
+
Messages,
|
10
|
+
SystemMessage,
|
11
|
+
UserMessage,
|
12
|
+
AssistantMessage,
|
13
|
+
ToolCalls,
|
14
|
+
ToolResults,
|
15
|
+
Texts,
|
16
|
+
Tools,
|
17
|
+
Files,
|
18
|
+
)
|
19
|
+
|
20
|
+
async def main():
|
21
|
+
"""
|
22
|
+
Tests the save and load functionality of the Messages class using pickle.
|
23
|
+
"""
|
24
|
+
print("--- Test Save/Load (pickle) ---")
|
25
|
+
|
26
|
+
# 1. Create an initial Messages object
|
27
|
+
messages = Messages(
|
28
|
+
SystemMessage(Texts("system_prompt", "You are a helpful assistant.")),
|
29
|
+
UserMessage(Texts("user_input", "What is the weather in Shanghai?")),
|
30
|
+
AssistantMessage(Texts("thought", "I should call a tool for this.")),
|
31
|
+
ToolCalls(tool_calls=[{
|
32
|
+
'id': 'call_1234',
|
33
|
+
'type': 'function',
|
34
|
+
'function': {'name': 'get_weather', 'arguments': '{"location": "Shanghai"}'}
|
35
|
+
}]),
|
36
|
+
ToolResults(tool_call_id="call_1234", content='{"temperature": "25°C"}')
|
37
|
+
)
|
38
|
+
|
39
|
+
# Add a message with Files provider
|
40
|
+
files_provider = Files()
|
41
|
+
files_provider.update("test.txt", "This is a test file.")
|
42
|
+
messages.append(UserMessage(files_provider))
|
43
|
+
|
44
|
+
# Render the original messages
|
45
|
+
original_render = await messages.render_latest()
|
46
|
+
print("Original Messages Render:")
|
47
|
+
print(original_render)
|
48
|
+
|
49
|
+
# 2. Save the messages to a file
|
50
|
+
file_path = "test_messages.pkl"
|
51
|
+
messages.save(file_path)
|
52
|
+
print(f"\nMessages saved to {file_path}")
|
53
|
+
|
54
|
+
assert os.path.exists(file_path), "Save file was not created."
|
55
|
+
|
56
|
+
# 3. Load the messages from the file
|
57
|
+
loaded_messages = Messages.load(file_path)
|
58
|
+
print("\nMessages loaded from file.")
|
59
|
+
|
60
|
+
assert loaded_messages is not None, "Loaded messages should not be None."
|
61
|
+
|
62
|
+
# Render the loaded messages
|
63
|
+
loaded_render = await loaded_messages.render_latest()
|
64
|
+
print("\nLoaded Messages Render:")
|
65
|
+
print(loaded_render)
|
66
|
+
|
67
|
+
# 4. Compare the original and loaded content
|
68
|
+
assert original_render == loaded_render, "Rendered content of original and loaded messages do not match."
|
69
|
+
print("\n✅ Assertion passed: Original and loaded message renders are identical.")
|
70
|
+
|
71
|
+
# 5. Check if the loaded object retains its class structure and methods
|
72
|
+
print(f"\nType of loaded object: {type(loaded_messages)}")
|
73
|
+
assert isinstance(loaded_messages, Messages), "Loaded object is not a Messages instance."
|
74
|
+
|
75
|
+
# Test pop functionality on the loaded object
|
76
|
+
popped_item = loaded_messages.pop(0)
|
77
|
+
assert isinstance(popped_item, SystemMessage), "Popped item is not a SystemMessage."
|
78
|
+
print(f"Popped first message: {popped_item}")
|
79
|
+
|
80
|
+
popped_render = await loaded_messages.render_latest()
|
81
|
+
print("\nRender after popping first message from loaded object:")
|
82
|
+
print(popped_render)
|
83
|
+
assert len(popped_render) == len(original_render) - 1, "Popping a message did not reduce the message count."
|
84
|
+
print("✅ Assertion passed: Pop functionality works on the loaded object.")
|
85
|
+
|
86
|
+
# 6. Clean up the test file
|
87
|
+
os.remove(file_path)
|
88
|
+
print(f"\nCleaned up {file_path}.")
|
89
|
+
|
90
|
+
print("\n--- Test Completed Successfully ---")
|
91
|
+
|
92
|
+
if __name__ == "__main__":
|
93
|
+
asyncio.run(main())
|
@@ -14,6 +14,7 @@ from ..plugins import PLUGINS, get_tools_result_async, function_call_list, updat
|
|
14
14
|
from ..utils.scripts import safe_get, async_generator_to_sync, parse_function_xml, parse_continuous_json, convert_functions_to_xml, remove_xml_tags_and_content
|
15
15
|
from ..core.request import prepare_request_payload
|
16
16
|
from ..core.response import fetch_response_stream, fetch_response
|
17
|
+
from ..architext.architext import Messages, SystemMessage, UserMessage, AssistantMessage, ToolCalls, ToolResults, Texts, RoleMessage, Images, Files
|
17
18
|
|
18
19
|
class APITimeoutError(Exception):
|
19
20
|
"""Custom exception for API timeout errors."""
|
@@ -88,7 +89,6 @@ class chatgpt(BaseLLM):
|
|
88
89
|
print_log: bool = False,
|
89
90
|
tools: Optional[Union[list, str, Callable]] = [],
|
90
91
|
function_call_max_loop: int = 3,
|
91
|
-
cut_history_by_function_name: str = "",
|
92
92
|
cache_messages: list = None,
|
93
93
|
logger: logging.Logger = None,
|
94
94
|
check_done: bool = False,
|
@@ -97,20 +97,13 @@ class chatgpt(BaseLLM):
|
|
97
97
|
Initialize Chatbot with API key (from https://platform.openai.com/account/api-keys)
|
98
98
|
"""
|
99
99
|
super().__init__(api_key, engine, api_url, system_prompt, proxy, timeout, max_tokens, temperature, top_p, presence_penalty, frequency_penalty, reply_count, truncate_limit, use_plugins=use_plugins, print_log=print_log)
|
100
|
-
self.conversation: dict[str,
|
101
|
-
"default":
|
102
|
-
{
|
103
|
-
"role": "system",
|
104
|
-
"content": self.system_prompt,
|
105
|
-
},
|
106
|
-
],
|
100
|
+
self.conversation: dict[str, Messages] = {
|
101
|
+
"default": Messages(SystemMessage(self.system_prompt)),
|
107
102
|
}
|
108
103
|
if cache_messages:
|
109
104
|
self.conversation["default"] = cache_messages
|
110
105
|
self.function_calls_counter = {}
|
111
106
|
self.function_call_max_loop = function_call_max_loop
|
112
|
-
self.cut_history_by_function_name = cut_history_by_function_name
|
113
|
-
self.latest_file_content = {}
|
114
107
|
self.check_done = check_done
|
115
108
|
|
116
109
|
if logger:
|
@@ -164,95 +157,48 @@ class chatgpt(BaseLLM):
|
|
164
157
|
if convo_id not in self.conversation:
|
165
158
|
self.reset(convo_id=convo_id)
|
166
159
|
if function_name == "" and message:
|
167
|
-
self.conversation[convo_id].append(
|
160
|
+
self.conversation[convo_id].append(RoleMessage(role, message))
|
168
161
|
elif function_name != "" and message:
|
169
|
-
# 删除从 cut_history_by_function_name 以后的所有历史记录
|
170
|
-
if function_name == self.cut_history_by_function_name:
|
171
|
-
matching_message = next(filter(lambda x: safe_get(x, "tool_calls", 0, "function", "name", default="") == 'get_next_pdf', self.conversation[convo_id]), None)
|
172
|
-
if matching_message is not None:
|
173
|
-
self.conversation[convo_id] = self.conversation[convo_id][:self.conversation[convo_id].index(matching_message)]
|
174
|
-
|
175
162
|
if not (all(value == False for value in self.plugins.values()) or self.use_plugins == False):
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
"
|
182
|
-
"
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
})
|
189
|
-
self.conversation[convo_id].append({"role": role, "tool_call_id": function_call_id, "content": message})
|
163
|
+
tool_calls = [
|
164
|
+
{
|
165
|
+
"id": function_call_id,
|
166
|
+
"type": "function",
|
167
|
+
"function": {
|
168
|
+
"name": function_name,
|
169
|
+
"arguments": function_arguments,
|
170
|
+
},
|
171
|
+
}
|
172
|
+
]
|
173
|
+
self.conversation[convo_id].append(ToolCalls(tool_calls))
|
174
|
+
self.conversation[convo_id].append(ToolResults(tool_call_id=function_call_id, content=message))
|
190
175
|
else:
|
191
176
|
last_user_message = self.conversation[convo_id][-1]["content"]
|
192
177
|
if last_user_message != message:
|
193
|
-
image_message_list =
|
178
|
+
image_message_list = UserMessage()
|
194
179
|
if isinstance(function_arguments, str):
|
195
180
|
functions_list = json.loads(function_arguments)
|
196
181
|
else:
|
197
182
|
functions_list = function_arguments
|
198
183
|
for tool_info in functions_list:
|
199
184
|
if tool_info.get("base64_image"):
|
200
|
-
image_message_list.
|
201
|
-
|
202
|
-
"
|
203
|
-
|
204
|
-
|
205
|
-
}
|
206
|
-
})
|
207
|
-
self.conversation[convo_id].append({"role": "assistant", "content": convert_functions_to_xml(function_arguments)})
|
185
|
+
image_message_list.extend([
|
186
|
+
safe_get(tool_info, "parameter", "image_path", default="") + " image:",
|
187
|
+
Images(tool_info["base64_image"]),
|
188
|
+
])
|
189
|
+
self.conversation[convo_id].append(AssistantMessage(convert_functions_to_xml(function_arguments)))
|
208
190
|
if image_message_list:
|
209
|
-
self.conversation[convo_id].append(
|
191
|
+
self.conversation[convo_id].append(UserMessage(message + image_message_list))
|
210
192
|
else:
|
211
|
-
self.conversation[convo_id].append(
|
193
|
+
self.conversation[convo_id].append(UserMessage(message))
|
212
194
|
else:
|
213
|
-
self.conversation[convo_id].append(
|
214
|
-
|
195
|
+
self.conversation[convo_id].append(AssistantMessage("我已经执行过这个工具了,接下来我需要做什么?"))
|
215
196
|
else:
|
216
197
|
self.logger.error(f"error: add_to_conversation message is None or empty, role: {role}, function_name: {function_name}, message: {message}")
|
217
198
|
|
218
|
-
conversation_len = len(self.conversation[convo_id]) - 1
|
219
|
-
message_index = 0
|
220
199
|
# if self.print_log:
|
221
200
|
# replaced_text = json.loads(re.sub(r';base64,([A-Za-z0-9+/=]+)', ';base64,***', json.dumps(self.conversation[convo_id])))
|
222
201
|
# self.logger.info(json.dumps(replaced_text, indent=4, ensure_ascii=False))
|
223
|
-
while message_index < conversation_len:
|
224
|
-
if self.conversation[convo_id][message_index]["role"] == self.conversation[convo_id][message_index + 1]["role"]:
|
225
|
-
if self.conversation[convo_id][message_index].get("content") and self.conversation[convo_id][message_index + 1].get("content") \
|
226
|
-
and self.conversation[convo_id][message_index].get("content") != self.conversation[convo_id][message_index + 1].get("content"):
|
227
|
-
if type(self.conversation[convo_id][message_index + 1]["content"]) == str \
|
228
|
-
and type(self.conversation[convo_id][message_index]["content"]) == list:
|
229
|
-
self.conversation[convo_id][message_index + 1]["content"] = [{"type": "text", "text": self.conversation[convo_id][message_index + 1]["content"]}]
|
230
|
-
if type(self.conversation[convo_id][message_index]["content"]) == str \
|
231
|
-
and type(self.conversation[convo_id][message_index + 1]["content"]) == list:
|
232
|
-
self.conversation[convo_id][message_index]["content"] = [{"type": "text", "text": self.conversation[convo_id][message_index]["content"]}]
|
233
|
-
if type(self.conversation[convo_id][message_index]["content"]) == dict \
|
234
|
-
and type(self.conversation[convo_id][message_index + 1]["content"]) == str:
|
235
|
-
self.conversation[convo_id][message_index]["content"] = [self.conversation[convo_id][message_index]["content"]]
|
236
|
-
self.conversation[convo_id][message_index + 1]["content"] = [{"type": "text", "text": self.conversation[convo_id][message_index + 1]["content"]}]
|
237
|
-
if type(self.conversation[convo_id][message_index]["content"]) == dict \
|
238
|
-
and type(self.conversation[convo_id][message_index + 1]["content"]) == list:
|
239
|
-
self.conversation[convo_id][message_index]["content"] = [self.conversation[convo_id][message_index]["content"]]
|
240
|
-
if type(self.conversation[convo_id][message_index]["content"]) == dict \
|
241
|
-
and type(self.conversation[convo_id][message_index + 1]["content"]) == dict:
|
242
|
-
self.conversation[convo_id][message_index]["content"] = [self.conversation[convo_id][message_index]["content"]]
|
243
|
-
self.conversation[convo_id][message_index + 1]["content"] = [self.conversation[convo_id][message_index + 1]["content"]]
|
244
|
-
if type(self.conversation[convo_id][message_index]["content"]) == list \
|
245
|
-
and type(self.conversation[convo_id][message_index + 1]["content"]) == dict:
|
246
|
-
self.conversation[convo_id][message_index + 1]["content"] = [self.conversation[convo_id][message_index + 1]["content"]]
|
247
|
-
if type(self.conversation[convo_id][message_index]["content"]) == str \
|
248
|
-
and type(self.conversation[convo_id][message_index + 1]["content"]) == str \
|
249
|
-
and self.conversation[convo_id][message_index].get("content").endswith(self.conversation[convo_id][message_index + 1].get("content")):
|
250
|
-
self.conversation[convo_id][message_index + 1]["content"] = ""
|
251
|
-
self.conversation[convo_id][message_index]["content"] += self.conversation[convo_id][message_index + 1]["content"]
|
252
|
-
self.conversation[convo_id].pop(message_index + 1)
|
253
|
-
conversation_len = conversation_len - 1
|
254
|
-
else:
|
255
|
-
message_index = message_index + 1
|
256
202
|
|
257
203
|
history_len = len(self.conversation[convo_id])
|
258
204
|
|
@@ -290,27 +236,6 @@ class chatgpt(BaseLLM):
|
|
290
236
|
else:
|
291
237
|
break
|
292
238
|
|
293
|
-
def get_latest_file_content(self) -> str:
|
294
|
-
"""
|
295
|
-
获取最新文件内容
|
296
|
-
"""
|
297
|
-
result = ""
|
298
|
-
if self.latest_file_content:
|
299
|
-
for file_path, content in self.latest_file_content.items():
|
300
|
-
result += (
|
301
|
-
"<file>"
|
302
|
-
f"<file_path>{file_path}</file_path>"
|
303
|
-
f"<file_content>{content}</file_content>"
|
304
|
-
"</file>\n\n"
|
305
|
-
)
|
306
|
-
if result:
|
307
|
-
result = (
|
308
|
-
"<latest_file_content>"
|
309
|
-
f"{result}"
|
310
|
-
"</latest_file_content>"
|
311
|
-
)
|
312
|
-
return result
|
313
|
-
|
314
239
|
async def get_post_body(
|
315
240
|
self,
|
316
241
|
prompt: str,
|
@@ -321,8 +246,6 @@ class chatgpt(BaseLLM):
|
|
321
246
|
stream: bool = True,
|
322
247
|
**kwargs,
|
323
248
|
):
|
324
|
-
self.conversation[convo_id][0] = {"role": "system","content": self.system_prompt + "\n\n" + self.get_latest_file_content()}
|
325
|
-
|
326
249
|
# 构造 provider 信息
|
327
250
|
provider = {
|
328
251
|
"provider": "openai",
|
@@ -333,13 +256,19 @@ class chatgpt(BaseLLM):
|
|
333
256
|
"image": True
|
334
257
|
}
|
335
258
|
|
259
|
+
done_message = self.conversation[convo_id].provider("done")
|
260
|
+
if self.check_done and done_message:
|
261
|
+
done_message.visible = False
|
262
|
+
if self.conversation[convo_id][-1][-1].name == "done":
|
263
|
+
self.conversation[convo_id][-1][-1].visible = True
|
264
|
+
|
336
265
|
# 构造请求数据
|
337
266
|
request_data = {
|
338
267
|
"model": model or self.engine,
|
339
|
-
"messages":
|
340
|
-
|
341
|
-
|
342
|
-
|
268
|
+
"messages": await self.conversation[convo_id].render_latest() if pass_history else Messages(
|
269
|
+
SystemMessage(self.system_prompt, self.conversation[convo_id].provider("files")),
|
270
|
+
UserMessage(prompt)
|
271
|
+
),
|
343
272
|
"stream": stream,
|
344
273
|
"temperature": kwargs.get("temperature", self.temperature)
|
345
274
|
}
|
@@ -655,7 +584,7 @@ class chatgpt(BaseLLM):
|
|
655
584
|
else:
|
656
585
|
yield chunk
|
657
586
|
if tool_name == "read_file" and "<tool_error>" not in tool_response:
|
658
|
-
self.
|
587
|
+
self.conversation[convo_id].provider("files").update(tool_info['parameter']["file_path"], tool_response)
|
659
588
|
all_responses.append(f"[{tool_name}({tool_args}) Result]:\n\nRead file successfully! The file content has been updated in the tag <latest_file_content>.")
|
660
589
|
elif tool_name == "write_to_file" and "<tool_error>" not in tool_response:
|
661
590
|
all_responses.append(f"[{tool_name} Result]:\n\n{tool_response}")
|
@@ -998,9 +927,8 @@ class chatgpt(BaseLLM):
|
|
998
927
|
Reset the conversation
|
999
928
|
"""
|
1000
929
|
self.system_prompt = system_prompt or self.system_prompt
|
1001
|
-
self.
|
1002
|
-
|
1003
|
-
|
1004
|
-
]
|
930
|
+
self.conversation[convo_id] = Messages(
|
931
|
+
SystemMessage(Texts("system_prompt", self.system_prompt), self.conversation[convo_id].provider("files")),
|
932
|
+
)
|
1005
933
|
self.tokens_usage[convo_id] = 0
|
1006
934
|
self.current_tokens[convo_id] = 0
|
beswarm/prompt.py
CHANGED
@@ -1,4 +1,34 @@
|
|
1
|
-
|
1
|
+
import os
|
2
|
+
import platform
|
3
|
+
from datetime import datetime
|
4
|
+
from typing import Optional, Union, Callable
|
5
|
+
|
6
|
+
from .aient.aient.architext.architext import (
|
7
|
+
Messages, SystemMessage, UserMessage, AssistantMessage, ToolCalls, ToolResults, Texts, RoleMessage, Images, Files, Tools
|
8
|
+
)
|
9
|
+
from .core import kgm
|
10
|
+
|
11
|
+
class Goal(Texts):
|
12
|
+
def __init__(self, text: Optional[Union[str, Callable[[], str]]] = None, name: str = "goal"):
|
13
|
+
super().__init__(text=text, name=name)
|
14
|
+
|
15
|
+
async def render(self) -> Optional[str]:
|
16
|
+
content = await super().render()
|
17
|
+
if content is None:
|
18
|
+
return None
|
19
|
+
return f"<goal>{content}</goal>"
|
20
|
+
|
21
|
+
class KnowledgeGraph(Texts):
|
22
|
+
def __init__(self, text: Optional[Union[str, Callable[[], str]]] = None, name: str = "knowledge_graph"):
|
23
|
+
super().__init__(text=text, name=name)
|
24
|
+
|
25
|
+
async def render(self) -> Optional[str]:
|
26
|
+
content = await super().render()
|
27
|
+
if content is None:
|
28
|
+
return None
|
29
|
+
return f"<knowledge_graph>{content}</knowledge_graph>"
|
30
|
+
|
31
|
+
worker_system_prompt = SystemMessage(f"""
|
2
32
|
<communication>
|
3
33
|
1. Format your responses in markdown. Use backticks to format file, directory, function, and class names.
|
4
34
|
2. Your message **must** end with [done] to signify the end of your output.
|
@@ -34,10 +64,10 @@ When making code changes, NEVER output code to the USER, unless requested. Inste
|
|
34
64
|
</calling_external_apis>
|
35
65
|
|
36
66
|
<user_info>
|
37
|
-
The user's OS version is {
|
67
|
+
The user's OS version is {Texts(lambda: platform.platform())}. The absolute path of the user's workspace is {Texts(name="workspace_path")} which is also the project root directory. The user's shell is {Texts(lambda: os.getenv('SHELL', 'Unknown'))}.
|
38
68
|
请在指令中使用绝对路径。所有操作必须基于工作目录。禁止在工作目录之外进行任何操作。你当前运行目录不一定就是工作目录。禁止默认你当前就在工作目录。
|
39
69
|
|
40
|
-
当前时间:{
|
70
|
+
当前时间:{Texts(lambda: datetime.now().strftime("%Y-%m-%d %H:%M:%S"))}
|
41
71
|
</user_info>
|
42
72
|
|
43
73
|
<instructions for tool use>
|
@@ -92,12 +122,10 @@ Always adhere to this format for all tool uses to ensure proper parsing and exec
|
|
92
122
|
|
93
123
|
You can use tools as follows:
|
94
124
|
|
95
|
-
|
96
|
-
|
97
|
-
</tools>
|
98
|
-
"""
|
125
|
+
{Tools()}
|
126
|
+
""")
|
99
127
|
|
100
|
-
instruction_system_prompt = """
|
128
|
+
instruction_system_prompt = SystemMessage(f"""
|
101
129
|
你是一个指令生成器,负责指导另一个智能体完成任务。
|
102
130
|
你需要分析工作智能体的对话历史,并生成下一步指令。
|
103
131
|
根据任务目标和当前进度,提供清晰明确的指令。
|
@@ -106,11 +134,11 @@ instruction_system_prompt = """
|
|
106
134
|
|
107
135
|
你需要称呼工作智能体为"你",指令禁止使用疑问句,必须使用祈使句。
|
108
136
|
所有回复必须使用中文。
|
109
|
-
运行工作智能体的系统信息:{
|
110
|
-
你的工作目录为:{workspace_path},请在指令中使用绝对路径。所有操作必须基于工作目录。
|
137
|
+
运行工作智能体的系统信息:{Texts(lambda: platform.platform())}
|
138
|
+
你的工作目录为:{Texts(name="workspace_path")},请在指令中使用绝对路径。所有操作必须基于工作目录。
|
111
139
|
除了任务目标里面明确提到的目录,禁止在工作目录之外进行任何操作。你当前运行目录不一定就是工作目录。禁止默认你当前就在工作目录。
|
112
140
|
|
113
|
-
当前时间:{
|
141
|
+
当前时间:{Texts(lambda: datetime.now().strftime("%Y-%m-%d %H:%M:%S"))}
|
114
142
|
|
115
143
|
你的输出必须符合以下步骤,以生成最终指令:
|
116
144
|
|
@@ -212,13 +240,12 @@ git clone https://github.com/username/project-name.git
|
|
212
240
|
</excute_command>
|
213
241
|
|
214
242
|
工作智能体仅可以使用如下工具:
|
215
|
-
|
216
|
-
{tools_list}
|
217
|
-
</tools>
|
243
|
+
{Tools()}
|
218
244
|
|
219
|
-
|
220
|
-
"""
|
245
|
+
{Files()}
|
221
246
|
|
247
|
+
{KnowledgeGraph(name="knowledge_graph", text=lambda: kgm.render_tree())}
|
248
|
+
<work_agent_conversation_start>""")
|
222
249
|
|
223
250
|
definition = """
|
224
251
|
1. 输入分析
|
@@ -408,4 +435,4 @@ The user's OS version is win32 10.0.22631. The absolute path of the user's works
|
|
408
435
|
<tools>
|
409
436
|
[{"type": "function", "function": {"name": "codebase_search", "description": "Find snippets of code from the codebase most relevant to the search query.\nThis is a semantic search tool, so the query should ask for something semantically matching what is needed.\nIf it makes sense to only search in particular directories, please specify them in the target_directories field.\nUnless there is a clear reason to use your own search query, please just reuse the user's exact query with their wording.\nTheir exact wording/phrasing can often be helpful for the semantic search query. Keeping the same exact question format can also be helpful.", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "The search query to find relevant code. You should reuse the user's exact query/most recent message with their wording unless there is a clear reason not to."}, "target_directories": {"type": "array", "items": {"type": "string"}, "description": "Glob patterns for directories to search over"}, "explanation": {"type": "string", "description": "One sentence explanation as to why this tool is being used, and how it contributes to the goal."}}, "required": ["query"]}}}, {"type": "function", "function": {"name": "read_file", "description": "Read the contents of a file. the output of this tool call will be the 1-indexed file contents from start_line_one_indexed to end_line_one_indexed_inclusive, together with a summary of the lines outside start_line_one_indexed and end_line_one_indexed_inclusive.\nNote that this call can view at most 250 lines at a time.\n\nWhen using this tool to gather information, it's your responsibility to ensure you have the COMPLETE context. Specifically, each time you call this command you should:\n1) Assess if the contents you viewed are sufficient to proceed with your task.\n2) Take note of where there are lines not shown.\n3) If the file contents you have viewed are insufficient, and you suspect they may be in lines not shown, proactively call the tool again to view those lines.\n4) When in doubt, call this tool again to gather more information. Remember that partial file views may miss critical dependencies, imports, or functionality.\n\nIn some cases, if reading a range of lines is not enough, you may choose to read the entire file.\nReading entire files is often wasteful and slow, especially for large files (i.e. more than a few hundred lines). So you should use this option sparingly.\nReading the entire file is not allowed in most cases. You are only allowed to read the entire file if it has been edited or manually attached to the conversation by the user.", "parameters": {"type": "object", "properties": {"relative_workspace_path": {"type": "string", "description": "The path of the file to read, relative to the workspace root."}, "should_read_entire_file": {"type": "boolean", "description": "Whether to read the entire file. Defaults to false."}, "start_line_one_indexed": {"type": "integer", "description": "The one-indexed line number to start reading from (inclusive)."}, "end_line_one_indexed_inclusive": {"type": "integer", "description": "The one-indexed line number to end reading at (inclusive)."}, "explanation": {"type": "string", "description": "One sentence explanation as to why this tool is being used, and how it contributes to the goal."}}, "required": ["relative_workspace_path", "should_read_entire_file", "start_line_one_indexed", "end_line_one_indexed_inclusive"]}}}, {"type": "function", "function": {"name": "run_terminal_cmd", "description": "Propose a command to run on behalf of the user.\nThe user may reject it if it is not to their liking, or may modify the command before approving it. If they do change it, take those changes into account.\nThe actual command will not execute until the user approves it. The user may not approve it immediately. Do not assume the command has started running.\nIf the step is waiting for user approval, it has not started running.\nAdhere to the following guidelines:\n1. Based on the contents of the conversation, you will be told if you are in the same shell as a previous step or a different shell.\n2. If in a new shell, you should `cd` to the appropriate directory and do necessary setup in addition to running the command.\n3. If in the same shell, the state will persist (eg. if you cd in one step, that cwd is persisted next time you invoke this tool).\n4. For ANY commands that would use a pager or require user interaction, you should append ` | cat` to the command (or whatever is appropriate). Otherwise, the command will break. You MUST do this for: git, less, head, tail, more, etc.\n5. For commands that are long running/expected to run indefinitely until interruption, please run them in the background. To run jobs in the background, set `is_background` to true rather than changing the details of the command.\n6. Dont include any newlines in the command.", "parameters": {"type": "object", "properties": {"command": {"type": "string", "description": "The terminal command to execute"}, "is_background": {"type": "boolean", "description": "Whether the command should be run in the background"}, "explanation": {"type": "string", "description": "One sentence explanation as to why this command needs to be run and how it contributes to the goal."}, "require_user_approval": {"type": "boolean", "description": "Whether the user must approve the command before it is executed. Only set this to false if the command is safe and if it matches the user's requirements for commands that should be executed automatically."}}, "required": ["command", "is_background", "require_user_approval"]}}}, {"type": "function", "function": {"name": "list_dir", "description": "List the contents of a directory.", "parameters": {"type": "object", "properties": {"relative_workspace_path": {"type": "string", "description": "Path to list contents of, relative to the workspace root."}, "explanation": {"type": "string", "description": "One sentence explanation as to why this tool is being used, and how it contributes to the goal."}}, "required": ["relative_workspace_path"]}}}, {"type": "function", "function": {"name": "grep_search", "description": "Fast text-based regex search that finds exact pattern matches within files or directories, utilizing the ripgrep command for efficient searching.\nTo avoid overwhelming output, the results are capped at 50 matches.\nUse the include or exclude patterns to filter the search scope by file type or specific paths.\nThis is best for finding exact text matches or regex patterns. This is preferred over semantic search when we know the exact symbol/function name/etc. to search in some set of directories/file types.", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "The regex pattern to search for"}, "case_sensitive": {"type": "boolean", "description": "Whether the search should be case sensitive"}, "include_pattern": {"type": "string", "description": "Glob pattern for files to include (e.g. '*.ts' for TypeScript files)"}, "exclude_pattern": {"type": "string", "description": "Glob pattern for files to exclude"}, "explanation": {"type": "string", "description": "One sentence explanation as to why this tool is being used, and how it contributes to the goal."}}, "required": ["query"]}}}, {"type": "function", "function": {"name": "edit_file", "description": "Use this tool to propose an edit to an existing file.\n\nThis will be read by a less intelligent model, which will quickly apply the edit. You should make it clear what the edit is, while also minimizing the unchanged code you write.\nWhen writing the edit, you should specify each edit in sequence, with the special comment `// ... existing code ...` to represent unchanged code in between edited lines.\n\nFor example:\n\n```\n// ... existing code ...\nFIRST_EDIT\n// ... existing code ...\nSECOND_EDIT\n// ... existing code ...\nTHIRD_EDIT\n// ... existing code ...\n```\n\nYou should still bias towards repeating as few lines of the original file as possible to convey the change.\nBut, each edit should contain sufficient context of unchanged lines around the code you're editing to resolve ambiguity.\nDO NOT omit spans of pre-existing code (or comments) without using the `// ... existing code ...` comment to indicate its absence. If you omit the existing code comment, the model may inadvertently delete these lines.\nMake sure it is clear what the edit should be, and where it should be applied.\n\nYou should specify the following arguments before the others: [target_file]", "parameters": {"type": "object", "properties": {"target_file": {"type": "string", "description": "The target file to modify. Always specify the target file as the first argument and use the relative path in the workspace of the file to edit"}, "instructions": {"type": "string", "description": "A single sentence instruction describing what you am going to do for the sketched edit. This is used to assist the less intelligent model in applying the edit. Please use the first person to describe what you am going to do. Dont repeat what you have said previously in normal messages. And use it to disambiguate uncertainty in the edit."}, "code_edit": {"type": "string", "description": "Specify ONLY the precise lines of code that you wish to edit. **NEVER specify or write out unchanged code**. Instead, represent all unchanged code using the comment of the language you're editing in - example: `// ... existing code ...`"}}, "required": ["target_file", "instructions", "code_edit"]}}}, {"type": "function", "function": {"name": "delete_file", "description": "Deletes a file at the specified path. The operation will fail gracefully if:\n - The file doesn't exist\n - The operation is rejected for security reasons\n - The file cannot be deleted", "parameters": {"type": "object", "properties": {"target_file": {"type": "string", "description": "The path of the file to delete, relative to the workspace root."}, "explanation": {"type": "string", "description": "One sentence explanation as to why this tool is being used, and how it contributes to the goal."}}, "required": ["target_file"]}}}]
|
410
437
|
</tools>
|
411
|
-
"""
|
438
|
+
"""
|
@@ -2,12 +2,17 @@ beswarm/__init__.py,sha256=HZjUOJtZR5QhMuDbq-wukQQn1VrBusNWai_ysGo-VVI,20
|
|
2
2
|
beswarm/broker.py,sha256=64Y-djrKYaZfBQ8obwHOmr921QgZeu9BtScZWaYLfDo,9887
|
3
3
|
beswarm/core.py,sha256=jKStpTTOu6Ojond_i-okTZLrvSAJ4yUoTZwtDfFTiRs,553
|
4
4
|
beswarm/knowledge_graph.py,sha256=oiOMknAJzGrOHc2AyQgvrCcZAkGLhFnsnvSBdfFBWMw,14831
|
5
|
-
beswarm/prompt.py,sha256=
|
5
|
+
beswarm/prompt.py,sha256=MCv9H20-2l7acG4lNuPRFJ8MO2-WTKwXL5GqyLT_Ns8,34131
|
6
6
|
beswarm/taskmanager.py,sha256=vMmcoZ4FlNvjEliRkv3AniPji50NcY4Q1_2HETzR0DU,12226
|
7
7
|
beswarm/utils.py,sha256=0J-b38P5QGT-A_38co7FjzaUNJykaskI7mbbcQ4w_68,8215
|
8
8
|
beswarm/agents/chatgroup.py,sha256=PzrmRcDKAbB7cxL16nMod_CzPosDV6bfTmXxQVuv-AQ,12012
|
9
|
-
beswarm/agents/planact.py,sha256=
|
9
|
+
beswarm/agents/planact.py,sha256=8ISGuCFh17ROh5SAF_byJkknp88pr1O_URHzyX9ShRI,17958
|
10
10
|
beswarm/aient/aient/__init__.py,sha256=SRfF7oDVlOOAi6nGKiJIUK6B_arqYLO9iSMp-2IZZps,21
|
11
|
+
beswarm/aient/aient/architext/architext/__init__.py,sha256=79Ih1151rfcqZdr7F8HSZSTs_iT2SKd1xCkehMsXeXs,19
|
12
|
+
beswarm/aient/aient/architext/architext/core.py,sha256=lMXqjpSr7J80xuuWv5IHy04kcHbTwHWtRoIcsr6MR3o,28433
|
13
|
+
beswarm/aient/aient/architext/test/openai_client.py,sha256=Dqtbmubv6vwF8uBqcayG0kbsiO65of7sgU2-DRBi-UM,4590
|
14
|
+
beswarm/aient/aient/architext/test/test.py,sha256=XOSbDD-hlBnZiu5-500T-sy0m61zsq2SqQgFIoK6TJ0,61137
|
15
|
+
beswarm/aient/aient/architext/test/test_save_load.py,sha256=o8DqH6gDYZkFkQy-a7blqLtJTRj5e4a-Lil48pJ0V3g,3260
|
11
16
|
beswarm/aient/aient/core/__init__.py,sha256=NxjebTlku35S4Dzr16rdSqSTWUvvwEeACe8KvHJnjPg,34
|
12
17
|
beswarm/aient/aient/core/log_config.py,sha256=kz2_yJv1p-o3lUQOwA3qh-LSc3wMHv13iCQclw44W9c,274
|
13
18
|
beswarm/aient/aient/core/models.py,sha256=KMlCRLjtq1wQHZTJGqnbWhPS2cHq6eLdnk7peKDrzR8,7490
|
@@ -21,7 +26,7 @@ beswarm/aient/aient/core/test/test_payload.py,sha256=8jBiJY1uidm1jzL-EiK0s6UGmW9
|
|
21
26
|
beswarm/aient/aient/models/__init__.py,sha256=ZTiZgbfBPTjIPSKURE7t6hlFBVLRS9lluGbmqc1WjxQ,43
|
22
27
|
beswarm/aient/aient/models/audio.py,sha256=kRd-8-WXzv4vwvsTGwnstK-WR8--vr9CdfCZzu8y9LA,1934
|
23
28
|
beswarm/aient/aient/models/base.py,sha256=-nnihYnx-vHZMqeVO9ljjt3k4FcD3n-iMk4tT-10nRQ,7232
|
24
|
-
beswarm/aient/aient/models/chatgpt.py,sha256=
|
29
|
+
beswarm/aient/aient/models/chatgpt.py,sha256=Krs9Fk07XmYiiRFrCuhh_tiQWcUUdJT2dh0Ei6zRhmo,42214
|
25
30
|
beswarm/aient/aient/plugins/__init__.py,sha256=p3KO6Aa3Lupos4i2SjzLQw1hzQTigOAfEHngsldrsyk,986
|
26
31
|
beswarm/aient/aient/plugins/arXiv.py,sha256=yHjb6PS3GUWazpOYRMKMzghKJlxnZ5TX8z9F6UtUVow,1461
|
27
32
|
beswarm/aient/aient/plugins/config.py,sha256=TGgZ5SnNKZ8MmdznrZ-TEq7s2ulhAAwTSKH89bci3dA,7079
|
@@ -116,7 +121,7 @@ beswarm/tools/search_web.py,sha256=0fTeczXiOX_LJQGaLEGbuJtIPzofeuquGWEt3yDMtVw,1
|
|
116
121
|
beswarm/tools/subtasks.py,sha256=4wJWNAqRFgvwmDOP12SddYo_OteWBU5cLhLBh6xILk8,10492
|
117
122
|
beswarm/tools/worker.py,sha256=mQ1qdrQ8MgL99byAbTvxfEByFFGN9mty3UHqHjARMQ8,2331
|
118
123
|
beswarm/tools/write_csv.py,sha256=u0Hq18Ksfheb52MVtyLNCnSDHibITpsYBPs2ub7USYA,1466
|
119
|
-
beswarm-0.2.
|
120
|
-
beswarm-0.2.
|
121
|
-
beswarm-0.2.
|
122
|
-
beswarm-0.2.
|
124
|
+
beswarm-0.2.83.dist-info/METADATA,sha256=BjUJDbLYpEG0BfI0ZIsT3UK_Dev2WuvtQpWRn3CJTBw,3878
|
125
|
+
beswarm-0.2.83.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
126
|
+
beswarm-0.2.83.dist-info/top_level.txt,sha256=pJw4O87wvt5882smuSO6DfByJz7FJ8SxxT8h9fHCmpo,8
|
127
|
+
beswarm-0.2.83.dist-info/RECORD,,
|
File without changes
|
File without changes
|