beswarm 0.1.56__py3-none-any.whl → 0.1.58__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- beswarm/aient/setup.py +1 -1
- beswarm/aient/src/aient/core/request.py +81 -27
- beswarm/aient/src/aient/core/response.py +13 -3
- beswarm/aient/src/aient/core/utils.py +1 -0
- beswarm/tools/edit_file.py +21 -1
- beswarm/tools/repomap.py +104 -5
- {beswarm-0.1.56.dist-info → beswarm-0.1.58.dist-info}/METADATA +1 -1
- {beswarm-0.1.56.dist-info → beswarm-0.1.58.dist-info}/RECORD +10 -10
- {beswarm-0.1.56.dist-info → beswarm-0.1.58.dist-info}/WHEEL +0 -0
- {beswarm-0.1.56.dist-info → beswarm-0.1.58.dist-info}/top_level.txt +0 -0
beswarm/aient/setup.py
CHANGED
@@ -4,7 +4,7 @@ from setuptools import setup, find_packages
|
|
4
4
|
|
5
5
|
setup(
|
6
6
|
name="aient",
|
7
|
-
version="1.1.
|
7
|
+
version="1.1.13",
|
8
8
|
description="Aient: The Awakening of Agent.",
|
9
9
|
long_description=Path.open(Path("README.md"), encoding="utf-8").read(),
|
10
10
|
long_description_content_type="text/markdown",
|
@@ -48,6 +48,7 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
|
|
48
48
|
|
49
49
|
messages = []
|
50
50
|
systemInstruction = None
|
51
|
+
system_prompt = ""
|
51
52
|
function_arguments = None
|
52
53
|
for msg in request.messages:
|
53
54
|
if msg.role == "assistant":
|
@@ -102,7 +103,8 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
|
|
102
103
|
messages.append({"role": msg.role, "parts": content})
|
103
104
|
elif msg.role == "system":
|
104
105
|
content[0]["text"] = re.sub(r"_+", "_", content[0]["text"])
|
105
|
-
|
106
|
+
system_prompt = system_prompt + "\n\n" + content[0]["text"]
|
107
|
+
systemInstruction = {"parts": [{"text": system_prompt}]}
|
106
108
|
|
107
109
|
if any(off_model in original_model for off_model in gemini_max_token_65k_models):
|
108
110
|
safety_settings = "OFF"
|
@@ -212,23 +214,35 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
|
|
212
214
|
else:
|
213
215
|
payload["generationConfig"]["maxOutputTokens"] = 8192
|
214
216
|
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
val =
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
217
|
+
if "gemini-2.5" in original_model:
|
218
|
+
payload["generationConfig"]["thinkingConfig"] = {
|
219
|
+
"includeThoughts": True,
|
220
|
+
}
|
221
|
+
# 从请求模型名中检测思考预算设置
|
222
|
+
m = re.match(r".*-think-(-?\d+)", request.model)
|
223
|
+
if m:
|
224
|
+
try:
|
225
|
+
val = int(m.group(1))
|
226
|
+
if val < 0:
|
227
|
+
val = 0
|
228
|
+
elif val > 24576:
|
229
|
+
val = 24576
|
230
|
+
payload["generationConfig"]["thinkingConfig"]["thinkingBudget"] = val
|
231
|
+
except ValueError:
|
232
|
+
# 如果转换为整数失败,忽略思考预算设置
|
233
|
+
pass
|
234
|
+
|
235
|
+
# # 检测search标签
|
236
|
+
# if request.model.endswith("-search"):
|
237
|
+
# payload["tools"] = [{"googleSearch": {}}]
|
238
|
+
|
239
|
+
if safe_get(provider, "preferences", "post_body_parameter_overrides", default=None):
|
240
|
+
for key, value in safe_get(provider, "preferences", "post_body_parameter_overrides", default={}).items():
|
241
|
+
if key == request.model:
|
242
|
+
for k, v in value.items():
|
243
|
+
payload[k] = v
|
244
|
+
elif all(_model not in request.model for _model in ["gemini", "gpt", "claude"]):
|
245
|
+
payload[key] = value
|
232
246
|
|
233
247
|
return url, headers, payload
|
234
248
|
|
@@ -303,16 +317,16 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
|
|
303
317
|
gemini_stream = "generateContent"
|
304
318
|
model_dict = get_model_dict(provider)
|
305
319
|
original_model = model_dict[request.model]
|
306
|
-
search_tool = None
|
320
|
+
# search_tool = None
|
307
321
|
|
308
322
|
# https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini/2-0-flash?hl=zh-cn
|
309
323
|
pro_models = ["gemini-2.5", "gemini-2.0"]
|
310
324
|
if any(pro_model in original_model for pro_model in pro_models):
|
311
325
|
location = gemini2
|
312
|
-
search_tool = {"googleSearch": {}}
|
326
|
+
# search_tool = {"googleSearch": {}}
|
313
327
|
else:
|
314
328
|
location = gemini1
|
315
|
-
search_tool = {"googleSearchRetrieval": {}}
|
329
|
+
# search_tool = {"googleSearchRetrieval": {}}
|
316
330
|
|
317
331
|
if "google-vertex-ai" in provider.get("base_url", ""):
|
318
332
|
url = provider.get("base_url").rstrip('/') + "/v1/projects/{PROJECT_ID}/locations/{LOCATION}/publishers/google/models/{MODEL_ID}:{stream}".format(
|
@@ -334,6 +348,7 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
|
|
334
348
|
|
335
349
|
messages = []
|
336
350
|
systemInstruction = None
|
351
|
+
system_prompt = ""
|
337
352
|
function_arguments = None
|
338
353
|
for msg in request.messages:
|
339
354
|
if msg.role == "assistant":
|
@@ -387,7 +402,8 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
|
|
387
402
|
elif msg.role != "system":
|
388
403
|
messages.append({"role": msg.role, "parts": content})
|
389
404
|
elif msg.role == "system":
|
390
|
-
|
405
|
+
system_prompt = system_prompt + "\n\n" + content[0]["text"]
|
406
|
+
systemInstruction = {"parts": [{"text": system_prompt}]}
|
391
407
|
|
392
408
|
if any(off_model in original_model for off_model in gemini_max_token_65k_models):
|
393
409
|
safety_settings = "OFF"
|
@@ -469,8 +485,34 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
|
|
469
485
|
else:
|
470
486
|
payload["generationConfig"]["max_output_tokens"] = 8192
|
471
487
|
|
472
|
-
if
|
473
|
-
payload["
|
488
|
+
if "gemini-2.5" in original_model:
|
489
|
+
payload["generationConfig"]["thinkingConfig"] = {
|
490
|
+
"includeThoughts": True,
|
491
|
+
}
|
492
|
+
# 从请求模型名中检测思考预算设置
|
493
|
+
m = re.match(r".*-think-(-?\d+)", request.model)
|
494
|
+
if m:
|
495
|
+
try:
|
496
|
+
val = int(m.group(1))
|
497
|
+
if val < 0:
|
498
|
+
val = 0
|
499
|
+
elif val > 24576:
|
500
|
+
val = 24576
|
501
|
+
payload["generationConfig"]["thinkingConfig"]["thinkingBudget"] = val
|
502
|
+
except ValueError:
|
503
|
+
# 如果转换为整数失败,忽略思考预算设置
|
504
|
+
pass
|
505
|
+
|
506
|
+
# if request.model.endswith("-search"):
|
507
|
+
# payload["tools"] = [search_tool]
|
508
|
+
|
509
|
+
if safe_get(provider, "preferences", "post_body_parameter_overrides", default=None):
|
510
|
+
for key, value in safe_get(provider, "preferences", "post_body_parameter_overrides", default={}).items():
|
511
|
+
if key == request.model:
|
512
|
+
for k, v in value.items():
|
513
|
+
payload[k] = v
|
514
|
+
elif all(_model not in request.model for _model in ["gemini", "gpt", "claude"]):
|
515
|
+
payload[key] = value
|
474
516
|
|
475
517
|
return url, headers, payload
|
476
518
|
|
@@ -1010,7 +1052,11 @@ async def get_gpt_payload(request, engine, provider, api_key=None):
|
|
1010
1052
|
|
1011
1053
|
if safe_get(provider, "preferences", "post_body_parameter_overrides", default=None):
|
1012
1054
|
for key, value in safe_get(provider, "preferences", "post_body_parameter_overrides", default={}).items():
|
1013
|
-
|
1055
|
+
if key == request.model:
|
1056
|
+
for k, v in value.items():
|
1057
|
+
payload[k] = v
|
1058
|
+
elif all(_model not in request.model for _model in ["gemini", "gpt", "claude"]):
|
1059
|
+
payload[key] = value
|
1014
1060
|
|
1015
1061
|
return url, headers, payload
|
1016
1062
|
|
@@ -1104,7 +1150,11 @@ async def get_azure_payload(request, engine, provider, api_key=None):
|
|
1104
1150
|
|
1105
1151
|
if safe_get(provider, "preferences", "post_body_parameter_overrides", default=None):
|
1106
1152
|
for key, value in safe_get(provider, "preferences", "post_body_parameter_overrides", default={}).items():
|
1107
|
-
|
1153
|
+
if key == request.model:
|
1154
|
+
for k, v in value.items():
|
1155
|
+
payload[k] = v
|
1156
|
+
elif all(_model not in request.model for _model in ["gemini", "gpt", "claude"]):
|
1157
|
+
payload[key] = value
|
1108
1158
|
|
1109
1159
|
return url, headers, payload
|
1110
1160
|
|
@@ -1433,9 +1483,13 @@ async def get_claude_payload(request, engine, provider, api_key=None):
|
|
1433
1483
|
message_index = message_index + 1
|
1434
1484
|
|
1435
1485
|
if "claude-3-7-sonnet" in original_model:
|
1436
|
-
max_tokens =
|
1486
|
+
max_tokens = 128000
|
1437
1487
|
elif "claude-3-5-sonnet" in original_model:
|
1438
1488
|
max_tokens = 8192
|
1489
|
+
elif "claude-sonnet-4" in original_model:
|
1490
|
+
max_tokens = 64000
|
1491
|
+
elif "claude-opus-4" in original_model:
|
1492
|
+
max_tokens = 32000
|
1439
1493
|
else:
|
1440
1494
|
max_tokens = 4096
|
1441
1495
|
|
@@ -535,15 +535,25 @@ async def fetch_response(client, url, headers, payload, engine, model):
|
|
535
535
|
# print("parsed_data", json.dumps(parsed_data, indent=4, ensure_ascii=False))
|
536
536
|
content = ""
|
537
537
|
reasoning_content = ""
|
538
|
-
|
539
|
-
|
540
|
-
|
538
|
+
parts_list = safe_get(parsed_data, 0, "candidates", 0, "content", "parts", default=[])
|
539
|
+
for item in parts_list:
|
540
|
+
chunk = safe_get(item, "text")
|
541
|
+
is_think = safe_get(item, "thought", default=False)
|
541
542
|
# logger.info(f"chunk: {repr(chunk)}")
|
542
543
|
if chunk:
|
543
544
|
if is_think:
|
544
545
|
reasoning_content += chunk
|
545
546
|
else:
|
546
547
|
content += chunk
|
548
|
+
# for item in parsed_data:
|
549
|
+
# chunk = safe_get(item, "candidates", 0, "content", "parts", 0, "text")
|
550
|
+
# is_think = safe_get(item, "candidates", 0, "content", "parts", 0, "thought", default=False)
|
551
|
+
# # logger.info(f"chunk: {repr(chunk)}")
|
552
|
+
# if chunk:
|
553
|
+
# if is_think:
|
554
|
+
# reasoning_content += chunk
|
555
|
+
# else:
|
556
|
+
# content += chunk
|
547
557
|
|
548
558
|
usage_metadata = safe_get(parsed_data, -1, "usageMetadata")
|
549
559
|
prompt_tokens = safe_get(usage_metadata, "promptTokenCount", default=0)
|
@@ -96,6 +96,7 @@ def get_engine(provider, endpoint=None, original_model=""):
|
|
96
96
|
and "o3" not in original_model \
|
97
97
|
and "o4" not in original_model \
|
98
98
|
and "gemini" not in original_model \
|
99
|
+
and "gemma" not in original_model \
|
99
100
|
and "learnlm" not in original_model \
|
100
101
|
and "grok" not in original_model \
|
101
102
|
and parsed_url.netloc != 'api.cloudflare.com' \
|
beswarm/tools/edit_file.py
CHANGED
@@ -157,4 +157,24 @@ def edit_file(file_path, diff_content, match_precision=0.8):
|
|
157
157
|
except UnicodeDecodeError:
|
158
158
|
return f"错误: 文件 '{file_path}' 不是文本文件或编码不是UTF-8,无法进行编码解析"
|
159
159
|
except Exception as e:
|
160
|
-
|
160
|
+
print(f"content: {content}")
|
161
|
+
print(f"file_path: {file_path}")
|
162
|
+
print(f"diff_content: {diff_content}")
|
163
|
+
import traceback
|
164
|
+
traceback.print_exc()
|
165
|
+
return f"编辑文件时发生错误: {e}"
|
166
|
+
|
167
|
+
if __name__ == "__main__":
|
168
|
+
edit_str = """
|
169
|
+
<<<<<<< SEARCH
|
170
|
+
parser.add_argument('--dataset', type=str, default='cifar10', choices=['cifar10', 'cifar100', 'imagenet'], help="Dataset to use.")
|
171
|
+
=======
|
172
|
+
parser.add_argument('--dataset', type=str, default='cifar10', choices=['cifar10', 'cifar100', 'imagenet', 'tinyimagenet'], help="Dataset to use.")
|
173
|
+
>>>>>>> REPLACE
|
174
|
+
"""
|
175
|
+
|
176
|
+
file_path = "train.py"
|
177
|
+
# 编辑文件时发生错误: '>' not supported between instances of 'str' and 'float'
|
178
|
+
print(edit_file(file_path, edit_str))
|
179
|
+
|
180
|
+
# python -m beswarm.tools.edit_file
|
beswarm/tools/repomap.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1
1
|
import os
|
2
2
|
import math
|
3
|
+
import json
|
3
4
|
import time
|
4
5
|
import random
|
5
6
|
import shutil
|
@@ -634,7 +635,23 @@ class RepoMap:
|
|
634
635
|
return data
|
635
636
|
|
636
637
|
def get_tags_raw(self, fname, rel_fname):
|
637
|
-
|
638
|
+
# 检查是否为 .ipynb 文件,如果是则转换为 Python 代码再处理
|
639
|
+
if fname.endswith('.ipynb'):
|
640
|
+
# 读取 ipynb 文件内容
|
641
|
+
ipynb_content = self.io.read_text(str(self.root / Path(fname)))
|
642
|
+
if not ipynb_content:
|
643
|
+
return
|
644
|
+
|
645
|
+
# 转换为 Python 代码
|
646
|
+
py_content = self.convert_ipynb_to_py_content(ipynb_content)
|
647
|
+
if not py_content:
|
648
|
+
return
|
649
|
+
|
650
|
+
# 使用 Python 语言处理转换后的内容
|
651
|
+
lang = "python"
|
652
|
+
else:
|
653
|
+
lang = filename_to_lang(str(self.root / Path(fname)))
|
654
|
+
|
638
655
|
# print(f"lang1: {lang}")
|
639
656
|
if not lang:
|
640
657
|
return
|
@@ -653,7 +670,11 @@ class RepoMap:
|
|
653
670
|
return
|
654
671
|
query_scm = query_scm.read_text()
|
655
672
|
|
656
|
-
|
673
|
+
# 根据文件类型选择代码内容
|
674
|
+
if fname.endswith('.ipynb'):
|
675
|
+
code = py_content
|
676
|
+
else:
|
677
|
+
code = self.io.read_text(str(self.root / Path(fname)))
|
657
678
|
# print(f"code: {code}")
|
658
679
|
if not code:
|
659
680
|
return
|
@@ -1090,13 +1111,23 @@ class RepoMap:
|
|
1090
1111
|
or self.tree_context_cache[rel_fname]["mtime"] != mtime
|
1091
1112
|
):
|
1092
1113
|
# print(f"abs_fname: {abs_fname}")
|
1093
|
-
|
1114
|
+
# 处理 .ipynb 文件
|
1115
|
+
if str(abs_fname).endswith('.ipynb'):
|
1116
|
+
# 读取 ipynb 文件并转换
|
1117
|
+
ipynb_content = self.io.read_text(abs_fname) or ""
|
1118
|
+
code = self.convert_ipynb_to_py_content(ipynb_content) or ""
|
1119
|
+
# 使用虚拟的 .py 文件名以便 TreeContext 能识别
|
1120
|
+
context_filename = rel_fname.replace('.ipynb', '.py')
|
1121
|
+
else:
|
1122
|
+
code = self.io.read_text(abs_fname) or ""
|
1123
|
+
context_filename = rel_fname
|
1124
|
+
|
1094
1125
|
# print(f"code: {code}")
|
1095
1126
|
if not code.endswith("\n"):
|
1096
1127
|
code += "\n"
|
1097
1128
|
|
1098
1129
|
context = TreeContext(
|
1099
|
-
|
1130
|
+
context_filename,
|
1100
1131
|
code,
|
1101
1132
|
color=False,
|
1102
1133
|
line_number=False,
|
@@ -1161,6 +1192,73 @@ class RepoMap:
|
|
1161
1192
|
|
1162
1193
|
return output
|
1163
1194
|
|
1195
|
+
def convert_ipynb_to_py_content(self, ipynb_content):
|
1196
|
+
"""
|
1197
|
+
将 .ipynb 文件内容转换为 Python 代码字符串
|
1198
|
+
Markdown cells 转换为注释
|
1199
|
+
Code cells 保持为 Python 代码
|
1200
|
+
"""
|
1201
|
+
try:
|
1202
|
+
notebook_data = json.loads(ipynb_content)
|
1203
|
+
except json.JSONDecodeError:
|
1204
|
+
return None
|
1205
|
+
|
1206
|
+
py_lines = []
|
1207
|
+
|
1208
|
+
for cell in notebook_data.get('cells', []):
|
1209
|
+
cell_type = cell.get('cell_type')
|
1210
|
+
source = cell.get('source', [])
|
1211
|
+
|
1212
|
+
if not isinstance(source, list):
|
1213
|
+
source = [source]
|
1214
|
+
|
1215
|
+
source_lines = "".join(source).splitlines()
|
1216
|
+
|
1217
|
+
if cell_type == 'markdown':
|
1218
|
+
for line in source_lines:
|
1219
|
+
py_lines.append(f"# {line}")
|
1220
|
+
py_lines.append("")
|
1221
|
+
elif cell_type == 'code':
|
1222
|
+
for line in source_lines:
|
1223
|
+
if line.startswith("!") or line.startswith("%"):
|
1224
|
+
py_lines.append(f"# {line}")
|
1225
|
+
else:
|
1226
|
+
py_lines.append(line)
|
1227
|
+
|
1228
|
+
outputs = cell.get('outputs', [])
|
1229
|
+
has_output_comment = False
|
1230
|
+
for output in outputs:
|
1231
|
+
output_type = output.get('output_type')
|
1232
|
+
if output_type == 'stream':
|
1233
|
+
if not has_output_comment:
|
1234
|
+
py_lines.append("# --- Output ---")
|
1235
|
+
has_output_comment = True
|
1236
|
+
text_output = output.get('text', [])
|
1237
|
+
if isinstance(text_output, list):
|
1238
|
+
for line in "".join(text_output).splitlines():
|
1239
|
+
py_lines.append(f"# {line}")
|
1240
|
+
else:
|
1241
|
+
for line in text_output.splitlines():
|
1242
|
+
py_lines.append(f"# {line}")
|
1243
|
+
elif output_type == 'execute_result':
|
1244
|
+
data = output.get('data', {})
|
1245
|
+
if 'text/plain' in data:
|
1246
|
+
if not has_output_comment:
|
1247
|
+
py_lines.append("# --- Output ---")
|
1248
|
+
has_output_comment = True
|
1249
|
+
text_output = data['text/plain']
|
1250
|
+
if isinstance(text_output, list):
|
1251
|
+
for line in "".join(text_output).splitlines():
|
1252
|
+
py_lines.append(f"# {line}")
|
1253
|
+
else:
|
1254
|
+
for line in text_output.splitlines():
|
1255
|
+
py_lines.append(f"# {line}")
|
1256
|
+
if has_output_comment:
|
1257
|
+
py_lines.append("# --- End Output ---")
|
1258
|
+
py_lines.append("")
|
1259
|
+
|
1260
|
+
return '\n'.join(py_lines)
|
1261
|
+
|
1164
1262
|
|
1165
1263
|
def find_src_files(directory):
|
1166
1264
|
if not os.path.isdir(directory):
|
@@ -1287,6 +1385,7 @@ if __name__ == "__main__":
|
|
1287
1385
|
# print(get_code_repo_map("."))
|
1288
1386
|
# print(get_code_repo_map("/Users/yanyuming/Downloads/GitHub/uni-api"))
|
1289
1387
|
# print(get_code_repo_map("/Users/yanyuming/Downloads/GitHub/text-to-motion"))
|
1290
|
-
print(get_code_repo_map("/Users/yanyuming/Downloads/GitHub/beswarm/work/secretary/secretary"))
|
1388
|
+
# print(get_code_repo_map("/Users/yanyuming/Downloads/GitHub/beswarm/work/secretary/secretary"))
|
1389
|
+
print(get_code_repo_map("/Users/yanyuming/Downloads/GitHub/beswarm/work/fer/fer"))
|
1291
1390
|
|
1292
1391
|
# python -m beswarm.tools.repomap
|
@@ -1,14 +1,14 @@
|
|
1
1
|
beswarm/__init__.py,sha256=HZjUOJtZR5QhMuDbq-wukQQn1VrBusNWai_ysGo-VVI,20
|
2
2
|
beswarm/utils.py,sha256=AdDCcqAIIKQEMl7PfryVgeT9G5sHe7QNsZnrvmTGA8E,283
|
3
3
|
beswarm/aient/main.py,sha256=SiYAIgQlLJqYusnTVEJOx1WNkSJKMImhgn5aWjfroxg,3814
|
4
|
-
beswarm/aient/setup.py,sha256=
|
4
|
+
beswarm/aient/setup.py,sha256=5aE5yVcM8sk5NULSs8Y9sJi1pRQGKZwdscZVOzPGvB0,487
|
5
5
|
beswarm/aient/src/aient/__init__.py,sha256=SRfF7oDVlOOAi6nGKiJIUK6B_arqYLO9iSMp-2IZZps,21
|
6
6
|
beswarm/aient/src/aient/core/__init__.py,sha256=NxjebTlku35S4Dzr16rdSqSTWUvvwEeACe8KvHJnjPg,34
|
7
7
|
beswarm/aient/src/aient/core/log_config.py,sha256=kz2_yJv1p-o3lUQOwA3qh-LSc3wMHv13iCQclw44W9c,274
|
8
8
|
beswarm/aient/src/aient/core/models.py,sha256=kF-HLi1I2k_G5r153ZHuiGH8_NmpTlFMfK0_myB28YQ,7366
|
9
|
-
beswarm/aient/src/aient/core/request.py,sha256=
|
10
|
-
beswarm/aient/src/aient/core/response.py,sha256=
|
11
|
-
beswarm/aient/src/aient/core/utils.py,sha256
|
9
|
+
beswarm/aient/src/aient/core/request.py,sha256=AmTnQ_Ri_ACRxDsWmPhhD6e79hNfwLxbsyBnpbAnmNA,64490
|
10
|
+
beswarm/aient/src/aient/core/response.py,sha256=Z0Bjl_QvpUguyky1LIcsVks4BKKqT0eYEpDmKa_cwpQ,31978
|
11
|
+
beswarm/aient/src/aient/core/utils.py,sha256=-naFCv8V-qhnqvDUd8BNbW1HR9CVAPxISrXoAz464Qg,26580
|
12
12
|
beswarm/aient/src/aient/core/test/test_base_api.py,sha256=pWnycRJbuPSXKKU9AQjWrMAX1wiLC_014Qc9hh5C2Pw,524
|
13
13
|
beswarm/aient/src/aient/core/test/test_geminimask.py,sha256=HFX8jDbNg_FjjgPNxfYaR-0-roUrOO-ND-FVsuxSoiw,13254
|
14
14
|
beswarm/aient/src/aient/core/test/test_image.py,sha256=_T4peNGdXKBHHxyQNx12u-NTyFE8TlYI6NvvagsG2LE,319
|
@@ -122,14 +122,14 @@ beswarm/queries/tree-sitter-languages/typescript-tags.scm,sha256=OMdCeedPiA24ky8
|
|
122
122
|
beswarm/tools/UIworker.py,sha256=1sEC76VGFwo48lSx6KOvhJwhgBj7UWAHAAH9BG_lp-M,6439
|
123
123
|
beswarm/tools/__init__.py,sha256=jOfYY4EYkwmz-FTJGrI1CyaIYkGWsmGzZBGsoupeX9M,1088
|
124
124
|
beswarm/tools/click.py,sha256=TygaekCXTmU3fIu6Uom7ZcyzEgYMlCC_GX-5SmWHuLI,20762
|
125
|
-
beswarm/tools/edit_file.py,sha256=
|
125
|
+
beswarm/tools/edit_file.py,sha256=xlAD0HB_xM0yZYc0eJwLE-9mAkywXa2UQPNHzG1OaW4,7664
|
126
126
|
beswarm/tools/planner.py,sha256=lguBCS6kpwNPoXQvqH-WySabVubT82iyWOkJnjt6dXw,1265
|
127
|
-
beswarm/tools/repomap.py,sha256=
|
127
|
+
beswarm/tools/repomap.py,sha256=N09K0UgwjCN7Zjg_5TYlVsulp3n2fztYlS8twalChU8,45003
|
128
128
|
beswarm/tools/search_arxiv.py,sha256=9slwBemXjEqrd7-YgVmyMijPXlkhZCybEDRVhWVQ9B0,7937
|
129
129
|
beswarm/tools/search_web.py,sha256=B24amOnGHnmdV_6S8bw8O2PdhZRRIDtJjg-wXcfP7dQ,11859
|
130
130
|
beswarm/tools/think.py,sha256=WLw-7jNIsnS6n8MMSYUin_f-BGLENFmnKM2LISEp0co,1760
|
131
131
|
beswarm/tools/worker.py,sha256=b-FvSEP27-zMYNcqaQeVBoWxaSf2cX_7_1p1GAF6h04,6191
|
132
|
-
beswarm-0.1.
|
133
|
-
beswarm-0.1.
|
134
|
-
beswarm-0.1.
|
135
|
-
beswarm-0.1.
|
132
|
+
beswarm-0.1.58.dist-info/METADATA,sha256=gquIkiIp96CLCAU56qpaYkmzPkGm3mGtsyDG8y9JzPQ,3553
|
133
|
+
beswarm-0.1.58.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
|
134
|
+
beswarm-0.1.58.dist-info/top_level.txt,sha256=pJw4O87wvt5882smuSO6DfByJz7FJ8SxxT8h9fHCmpo,8
|
135
|
+
beswarm-0.1.58.dist-info/RECORD,,
|
File without changes
|
File without changes
|