beswarm 0.2.34__py3-none-any.whl → 0.2.35__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of beswarm might be problematic. Click here for more details.
- beswarm/aient/setup.py +1 -1
- beswarm/aient/src/aient/core/request.py +5 -5
- beswarm/aient/src/aient/core/response.py +56 -105
- beswarm/tools/taskmanager.py +12 -4
- {beswarm-0.2.34.dist-info → beswarm-0.2.35.dist-info}/METADATA +1 -1
- {beswarm-0.2.34.dist-info → beswarm-0.2.35.dist-info}/RECORD +8 -8
- {beswarm-0.2.34.dist-info → beswarm-0.2.35.dist-info}/WHEEL +0 -0
- {beswarm-0.2.34.dist-info → beswarm-0.2.35.dist-info}/top_level.txt +0 -0
beswarm/aient/setup.py
CHANGED
|
@@ -4,7 +4,7 @@ from setuptools import setup, find_packages
|
|
|
4
4
|
|
|
5
5
|
setup(
|
|
6
6
|
name="aient",
|
|
7
|
-
version="1.1.
|
|
7
|
+
version="1.1.53",
|
|
8
8
|
description="Aient: The Awakening of Agent.",
|
|
9
9
|
long_description=Path.open(Path("README.md"), encoding="utf-8").read(),
|
|
10
10
|
long_description_content_type="text/markdown",
|
|
@@ -74,9 +74,8 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
|
|
|
74
74
|
content.append(image_message)
|
|
75
75
|
elif msg.content:
|
|
76
76
|
content = [{"text": msg.content}]
|
|
77
|
-
tool_calls = msg.tool_calls
|
|
78
77
|
elif msg.content is None:
|
|
79
|
-
|
|
78
|
+
tool_calls = msg.tool_calls
|
|
80
79
|
|
|
81
80
|
if tool_calls:
|
|
82
81
|
tool_call = tool_calls[0]
|
|
@@ -110,7 +109,7 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
|
|
|
110
109
|
}]
|
|
111
110
|
}
|
|
112
111
|
)
|
|
113
|
-
elif msg.role != "system":
|
|
112
|
+
elif msg.role != "system" and content:
|
|
114
113
|
messages.append({"role": msg.role, "parts": content})
|
|
115
114
|
elif msg.role == "system":
|
|
116
115
|
content[0]["text"] = re.sub(r"_+", "_", content[0]["text"])
|
|
@@ -409,8 +408,9 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
|
|
|
409
408
|
elif item.type == "image_url" and provider.get("image", True):
|
|
410
409
|
image_message = await get_image_message(item.image_url.url, engine)
|
|
411
410
|
content.append(image_message)
|
|
412
|
-
|
|
411
|
+
elif msg.content:
|
|
413
412
|
content = [{"text": msg.content}]
|
|
413
|
+
elif msg.content is None:
|
|
414
414
|
tool_calls = msg.tool_calls
|
|
415
415
|
|
|
416
416
|
if tool_calls:
|
|
@@ -445,7 +445,7 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
|
|
|
445
445
|
}]
|
|
446
446
|
}
|
|
447
447
|
)
|
|
448
|
-
elif msg.role != "system":
|
|
448
|
+
elif msg.role != "system" and content:
|
|
449
449
|
messages.append({"role": msg.role, "parts": content})
|
|
450
450
|
elif msg.role == "system":
|
|
451
451
|
system_prompt = system_prompt + "\n\n" + content[0]["text"]
|
|
@@ -20,6 +20,34 @@ async def check_response(response, error_log):
|
|
|
20
20
|
return {"error": f"{error_log} HTTP Error", "status_code": response.status_code, "details": error_json}
|
|
21
21
|
return None
|
|
22
22
|
|
|
23
|
+
def gemini_json_poccess(response_str):
|
|
24
|
+
promptTokenCount = 0
|
|
25
|
+
candidatesTokenCount = 0
|
|
26
|
+
totalTokenCount = 0
|
|
27
|
+
image_base64 = None
|
|
28
|
+
|
|
29
|
+
response_json = json.loads(response_str)
|
|
30
|
+
json_data = safe_get(response_json, "candidates", 0, "content", default=None)
|
|
31
|
+
finishReason = safe_get(response_json, "candidates", 0 , "finishReason", default=None)
|
|
32
|
+
if finishReason:
|
|
33
|
+
promptTokenCount = safe_get(response_json, "usageMetadata", "promptTokenCount", default=0)
|
|
34
|
+
candidatesTokenCount = safe_get(response_json, "usageMetadata", "candidatesTokenCount", default=0)
|
|
35
|
+
totalTokenCount = safe_get(response_json, "usageMetadata", "totalTokenCount", default=0)
|
|
36
|
+
|
|
37
|
+
content = safe_get(json_data, "parts", 0, "text", default="")
|
|
38
|
+
b64_json = safe_get(json_data, "parts", 0, "inlineData", "data", default="")
|
|
39
|
+
if b64_json:
|
|
40
|
+
image_base64 = b64_json
|
|
41
|
+
|
|
42
|
+
is_thinking = safe_get(json_data, "parts", 0, "thought", default=False)
|
|
43
|
+
|
|
44
|
+
function_call_name = safe_get(json_data, "functionCall", "name", default=None)
|
|
45
|
+
function_full_response = json.dumps(safe_get(json_data, "functionCall", "args", default=""))
|
|
46
|
+
|
|
47
|
+
blockReason = safe_get(json_data, 0, "promptFeedback", "blockReason", default=None)
|
|
48
|
+
|
|
49
|
+
return is_thinking, content, image_base64, function_call_name, function_full_response, blockReason, promptTokenCount, candidatesTokenCount, totalTokenCount
|
|
50
|
+
|
|
23
51
|
async def fetch_gemini_response_stream(client, url, headers, payload, model):
|
|
24
52
|
timestamp = int(datetime.timestamp(datetime.now()))
|
|
25
53
|
async with client.stream('POST', url, headers=headers, json=payload) as response:
|
|
@@ -28,131 +56,54 @@ async def fetch_gemini_response_stream(client, url, headers, payload, model):
|
|
|
28
56
|
yield error_message
|
|
29
57
|
return
|
|
30
58
|
buffer = ""
|
|
31
|
-
cache_buffer = ""
|
|
32
|
-
revicing_function_call = False
|
|
33
|
-
function_full_response = "{"
|
|
34
|
-
need_function_call = False
|
|
35
|
-
is_finish = False
|
|
36
59
|
promptTokenCount = 0
|
|
37
60
|
candidatesTokenCount = 0
|
|
38
61
|
totalTokenCount = 0
|
|
39
62
|
parts_json = ""
|
|
40
|
-
image_base64 = ""
|
|
41
|
-
# line_index = 0
|
|
42
|
-
# last_text_line = 0
|
|
43
|
-
# if "thinking" in model:
|
|
44
|
-
# is_thinking = True
|
|
45
|
-
# else:
|
|
46
|
-
# is_thinking = False
|
|
47
63
|
async for chunk in response.aiter_text():
|
|
48
64
|
buffer += chunk
|
|
49
65
|
cache_buffer += chunk
|
|
50
66
|
|
|
51
67
|
while "\n" in buffer:
|
|
52
68
|
line, buffer = buffer.split("\n", 1)
|
|
53
|
-
# line_index += 1
|
|
54
69
|
if line.startswith("data: "):
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
json_data = safe_get(response_json, "candidates", 0, "content", default=None)
|
|
58
|
-
finishReason = safe_get(response_json, "candidates", 0 , "finishReason", default=None)
|
|
59
|
-
if finishReason:
|
|
60
|
-
promptTokenCount = safe_get(response_json, "usageMetadata", "promptTokenCount", default=0)
|
|
61
|
-
candidatesTokenCount = safe_get(response_json, "usageMetadata", "candidatesTokenCount", default=0)
|
|
62
|
-
totalTokenCount = safe_get(response_json, "usageMetadata", "totalTokenCount", default=0)
|
|
63
|
-
|
|
64
|
-
content = safe_get(json_data, "parts", 0, "text", default="")
|
|
65
|
-
b64_json = safe_get(json_data, "parts", 0, "inlineData", "data", default="")
|
|
66
|
-
if b64_json:
|
|
67
|
-
image_base64 = b64_json
|
|
68
|
-
|
|
69
|
-
is_thinking = safe_get(json_data, "parts", 0, "thought", default=False)
|
|
70
|
-
if is_thinking:
|
|
71
|
-
sse_string = await generate_sse_response(timestamp, model, reasoning_content=content)
|
|
72
|
-
yield sse_string
|
|
73
|
-
elif not image_base64 and content:
|
|
74
|
-
sse_string = await generate_sse_response(timestamp, model, content=content)
|
|
75
|
-
yield sse_string
|
|
76
|
-
|
|
77
|
-
continue
|
|
78
|
-
|
|
79
|
-
# https://ai.google.dev/api/generate-content?hl=zh-cn#FinishReason
|
|
80
|
-
if line and '\"finishReason\": \"' in line:
|
|
81
|
-
if "stop" not in line.lower():
|
|
82
|
-
logger.error(f"finishReason: {line}")
|
|
83
|
-
is_finish = True
|
|
84
|
-
if is_finish and '\"promptTokenCount\": ' in line:
|
|
85
|
-
json_data = parse_json_safely( "{" + line + "}")
|
|
86
|
-
promptTokenCount = json_data.get('promptTokenCount', 0)
|
|
87
|
-
if is_finish and '\"candidatesTokenCount\": ' in line:
|
|
88
|
-
json_data = parse_json_safely( "{" + line + "}")
|
|
89
|
-
candidatesTokenCount = json_data.get('candidatesTokenCount', 0)
|
|
90
|
-
if is_finish and '\"totalTokenCount\": ' in line:
|
|
91
|
-
json_data = parse_json_safely( "{" + line + "}")
|
|
92
|
-
totalTokenCount = json_data.get('totalTokenCount', 0)
|
|
93
|
-
|
|
94
|
-
if (line and '"parts": [' in line or parts_json != "") and is_finish == False:
|
|
70
|
+
parts_json = line.lstrip("data: ").strip()
|
|
71
|
+
else:
|
|
95
72
|
parts_json += line
|
|
96
|
-
|
|
97
|
-
# tmp_parts_json = "{" + parts_json.split("} ] },")[0].strip().rstrip("}], ").replace("\n", "\\n").lstrip("{") + "}]}"
|
|
98
|
-
tmp_parts_json = "{" + parts_json.split("} ] },")[0].strip().rstrip("}], ").replace("\n", "\\n").lstrip("{")
|
|
99
|
-
if "inlineData" in tmp_parts_json:
|
|
100
|
-
tmp_parts_json = tmp_parts_json + "}}]}"
|
|
101
|
-
else:
|
|
102
|
-
tmp_parts_json = tmp_parts_json + "}]}"
|
|
73
|
+
parts_json = parts_json.lstrip("[,")
|
|
103
74
|
try:
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
content = safe_get(json_data, "parts", 0, "text", default="")
|
|
107
|
-
b64_json = safe_get(json_data, "parts", 0, "inlineData", "data", default="")
|
|
108
|
-
if b64_json:
|
|
109
|
-
image_base64 = b64_json
|
|
110
|
-
|
|
111
|
-
is_thinking = safe_get(json_data, "parts", 0, "thought", default=False)
|
|
112
|
-
if is_thinking:
|
|
113
|
-
sse_string = await generate_sse_response(timestamp, model, reasoning_content=content)
|
|
114
|
-
yield sse_string
|
|
115
|
-
elif not image_base64 and content:
|
|
116
|
-
sse_string = await generate_sse_response(timestamp, model, content=content)
|
|
117
|
-
yield sse_string
|
|
75
|
+
json.loads(parts_json)
|
|
118
76
|
except json.JSONDecodeError:
|
|
119
|
-
logger.error(f"无法解析JSON: {parts_json}")
|
|
120
|
-
parts_json = ""
|
|
121
|
-
|
|
122
|
-
if line and ('\"functionCall\": {' in line or revicing_function_call):
|
|
123
|
-
revicing_function_call = True
|
|
124
|
-
need_function_call = True
|
|
125
|
-
if ']' in line:
|
|
126
|
-
revicing_function_call = False
|
|
127
77
|
continue
|
|
128
78
|
|
|
129
|
-
|
|
79
|
+
# https://ai.google.dev/api/generate-content?hl=zh-cn#FinishReason
|
|
80
|
+
is_thinking, content, image_base64, function_call_name, function_full_response, blockReason, promptTokenCount, candidatesTokenCount, totalTokenCount = gemini_json_poccess(parts_json)
|
|
130
81
|
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
82
|
+
if is_thinking:
|
|
83
|
+
sse_string = await generate_sse_response(timestamp, model, reasoning_content=content)
|
|
84
|
+
yield sse_string
|
|
85
|
+
elif not image_base64 and content:
|
|
86
|
+
sse_string = await generate_sse_response(timestamp, model, content=content)
|
|
87
|
+
yield sse_string
|
|
134
88
|
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
function_call_name = function_call["functionCall"]["name"]
|
|
138
|
-
sse_string = await generate_sse_response(timestamp, model, content=None, tools_id="chatcmpl-9inWv0yEtgn873CxMBzHeCeiHctTV", function_call_name=function_call_name)
|
|
139
|
-
yield sse_string
|
|
140
|
-
function_full_response = json.dumps(function_call["functionCall"]["args"])
|
|
141
|
-
sse_string = await generate_sse_response(timestamp, model, content=None, tools_id="chatcmpl-9inWv0yEtgn873CxMBzHeCeiHctTV", function_call_name=None, function_call_content=function_full_response)
|
|
142
|
-
yield sse_string
|
|
89
|
+
if image_base64:
|
|
90
|
+
yield await generate_no_stream_response(timestamp, model, content=content, tools_id=None, function_call_name=None, function_call_content=None, role=None, total_tokens=totalTokenCount, prompt_tokens=promptTokenCount, completion_tokens=candidatesTokenCount, image_base64=image_base64)
|
|
143
91
|
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
92
|
+
if function_call_name:
|
|
93
|
+
sse_string = await generate_sse_response(timestamp, model, content=None, tools_id="chatcmpl-9inWv0yEtgn873CxMBzHeCeiHctTV", function_call_name=function_call_name)
|
|
94
|
+
yield sse_string
|
|
95
|
+
if function_full_response:
|
|
96
|
+
sse_string = await generate_sse_response(timestamp, model, content=None, tools_id="chatcmpl-9inWv0yEtgn873CxMBzHeCeiHctTV", function_call_name=None, function_call_content=function_full_response)
|
|
97
|
+
yield sse_string
|
|
149
98
|
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
99
|
+
if parts_json == "[]" or blockReason == "PROHIBITED_CONTENT":
|
|
100
|
+
sse_string = await generate_sse_response(timestamp, model, stop="PROHIBITED_CONTENT")
|
|
101
|
+
yield sse_string
|
|
102
|
+
else:
|
|
103
|
+
sse_string = await generate_sse_response(timestamp, model, stop="stop")
|
|
104
|
+
yield sse_string
|
|
105
|
+
|
|
106
|
+
parts_json = ""
|
|
156
107
|
|
|
157
108
|
sse_string = await generate_sse_response(timestamp, model, None, None, None, None, None, totalTokenCount, promptTokenCount, candidatesTokenCount)
|
|
158
109
|
yield sse_string
|
beswarm/tools/taskmanager.py
CHANGED
|
@@ -34,6 +34,7 @@ class TaskManager:
|
|
|
34
34
|
self.task_cache_file.touch(exist_ok=True)
|
|
35
35
|
self.read_tasks_cache()
|
|
36
36
|
self.set_task_cache("root_path", str(self.root_path))
|
|
37
|
+
self.resume_all_running_task()
|
|
37
38
|
|
|
38
39
|
def set_task_cache(self, *keys_and_value):
|
|
39
40
|
"""
|
|
@@ -86,6 +87,12 @@ class TaskManager:
|
|
|
86
87
|
self.set_task_cache(task_id, "status", TaskStatus.RUNNING.value)
|
|
87
88
|
return task_ids
|
|
88
89
|
|
|
90
|
+
def resume_all_running_task(self):
|
|
91
|
+
running_task_id_list = [task_id for task_id, task in self.tasks_cache.items() if task_id != "root_path" and task.get("status") == "RUNNING"]
|
|
92
|
+
for task_id in running_task_id_list:
|
|
93
|
+
tasks_params = self.tasks_cache[task_id]["args"]
|
|
94
|
+
task_id = task_manager.resume_task(task_id, worker_fun, tasks_params)
|
|
95
|
+
|
|
89
96
|
def resume_task(self, task_id, task_coro, args):
|
|
90
97
|
"""
|
|
91
98
|
恢复一个任务。
|
|
@@ -95,7 +102,7 @@ class TaskManager:
|
|
|
95
102
|
return TaskStatus.NOT_FOUND
|
|
96
103
|
|
|
97
104
|
coro = task_coro(**args)
|
|
98
|
-
task_id = self.create_task(coro)
|
|
105
|
+
task_id = self.create_task(coro, task_id)
|
|
99
106
|
self.set_task_cache(task_id, "args", args)
|
|
100
107
|
self.set_task_cache(task_id, "status", TaskStatus.RUNNING.value)
|
|
101
108
|
print(f"任务已恢复: ID={task_id}, Name={task_id}")
|
|
@@ -103,7 +110,7 @@ class TaskManager:
|
|
|
103
110
|
print(f"self.tasks_cache: {json.dumps(self.tasks_cache, ensure_ascii=False, indent=4)}")
|
|
104
111
|
return task_id
|
|
105
112
|
|
|
106
|
-
def create_task(self, coro):
|
|
113
|
+
def create_task(self, coro, task_id=None):
|
|
107
114
|
"""
|
|
108
115
|
创建并注册一个新任务。
|
|
109
116
|
|
|
@@ -114,7 +121,8 @@ class TaskManager:
|
|
|
114
121
|
Returns:
|
|
115
122
|
str: 任务的唯一ID。
|
|
116
123
|
"""
|
|
117
|
-
task_id
|
|
124
|
+
if task_id == None:
|
|
125
|
+
task_id = str(uuid.uuid4())
|
|
118
126
|
task_name = f"Task-{task_id[:8]}"
|
|
119
127
|
|
|
120
128
|
# 使用 asyncio.create_task() 创建任务
|
|
@@ -233,7 +241,7 @@ def create_task(goal, tools, work_dir):
|
|
|
233
241
|
Args:
|
|
234
242
|
goal (str): 需要完成的具体任务目标描述。子任务将围绕此目标进行工作。必须清晰、具体。必须包含背景信息,完成指标等。写清楚什么时候算任务完成,同时交代清楚任务的背景信息,这个背景信息可以是需要读取的文件等一切有助于完成任务的信息。
|
|
235
243
|
tools (list[str]): 一个包含可用工具函数对象的列表。子任务在执行任务时可能会调用这些工具来与环境交互(例如读写文件、执行命令等)。
|
|
236
|
-
work_dir (str):
|
|
244
|
+
work_dir (str): 工作目录的绝对路径。子任务将在此目录上下文中执行操作。子任务的工作目录位置在主任务的工作目录的子目录。子任务工作目录**禁止**设置为主任务目录本身。
|
|
237
245
|
|
|
238
246
|
Returns:
|
|
239
247
|
str: 当任务成功完成时,返回字符串 "任务已完成"。
|
|
@@ -2,13 +2,13 @@ beswarm/__init__.py,sha256=HZjUOJtZR5QhMuDbq-wukQQn1VrBusNWai_ysGo-VVI,20
|
|
|
2
2
|
beswarm/prompt.py,sha256=5JMfOuXWHscsaeDzwBn223mj9N85eAQdOHXQZk7zeWE,32238
|
|
3
3
|
beswarm/utils.py,sha256=xxbNifOPlfcVkKmF_qFzuEnZgF3MQg3mnOfz1EF0Qss,6697
|
|
4
4
|
beswarm/aient/main.py,sha256=SiYAIgQlLJqYusnTVEJOx1WNkSJKMImhgn5aWjfroxg,3814
|
|
5
|
-
beswarm/aient/setup.py,sha256=
|
|
5
|
+
beswarm/aient/setup.py,sha256=LqjY1x8CQrcvFrHKFSyZpm5h6iBuosHIpZqXdCPnPes,487
|
|
6
6
|
beswarm/aient/src/aient/__init__.py,sha256=SRfF7oDVlOOAi6nGKiJIUK6B_arqYLO9iSMp-2IZZps,21
|
|
7
7
|
beswarm/aient/src/aient/core/__init__.py,sha256=NxjebTlku35S4Dzr16rdSqSTWUvvwEeACe8KvHJnjPg,34
|
|
8
8
|
beswarm/aient/src/aient/core/log_config.py,sha256=kz2_yJv1p-o3lUQOwA3qh-LSc3wMHv13iCQclw44W9c,274
|
|
9
9
|
beswarm/aient/src/aient/core/models.py,sha256=d4MISNezTSe0ls0-fjuToI2SoT-sk5fWqAJuKVinIlo,7502
|
|
10
|
-
beswarm/aient/src/aient/core/request.py,sha256=
|
|
11
|
-
beswarm/aient/src/aient/core/response.py,sha256=
|
|
10
|
+
beswarm/aient/src/aient/core/request.py,sha256=1tedDQf8GRv5Y7rYNE_596vQb4o7e1icaKAA7lIl4YY,76114
|
|
11
|
+
beswarm/aient/src/aient/core/response.py,sha256=Ba0BwsIN2ozZC_UInkGS07qKlpo3dIei6rw0INQ66BE,33086
|
|
12
12
|
beswarm/aient/src/aient/core/utils.py,sha256=8TR442o3VV7Kl9l6f6LlmOUQ1UDZ-aXMzQqm-qIrqE4,28166
|
|
13
13
|
beswarm/aient/src/aient/core/test/test_base_api.py,sha256=pWnycRJbuPSXKKU9AQjWrMAX1wiLC_014Qc9hh5C2Pw,524
|
|
14
14
|
beswarm/aient/src/aient/core/test/test_geminimask.py,sha256=HFX8jDbNg_FjjgPNxfYaR-0-roUrOO-ND-FVsuxSoiw,13254
|
|
@@ -135,9 +135,9 @@ beswarm/tools/request_input.py,sha256=gXNAJPOJektMqxJVyzNTFOeMQ7xUkO-wWMYH-r2Rdw
|
|
|
135
135
|
beswarm/tools/screenshot.py,sha256=u6t8FCgW5YHJ_Oc4coo8e0F3wTusWE_-H8dFh1rBq9Q,1011
|
|
136
136
|
beswarm/tools/search_arxiv.py,sha256=caVIUOzMhFu-r_gVgJZrH2EO9xI5iV_qLAg0b3Ie9Xg,8095
|
|
137
137
|
beswarm/tools/search_web.py,sha256=ybbdbJq80plooXLMiyjAMOSCEyZJ0hquGUpabBhfFx0,16195
|
|
138
|
-
beswarm/tools/taskmanager.py,sha256=
|
|
138
|
+
beswarm/tools/taskmanager.py,sha256=n7G6cH96Tcz57MfiOffISMMAfUtr49_uikkeoCDCeRg,12940
|
|
139
139
|
beswarm/tools/worker.py,sha256=s6tN4JhA07qzTlP7xWiB0MjnBIJ6XSrtlJTA_RqG1_A,23539
|
|
140
|
-
beswarm-0.2.
|
|
141
|
-
beswarm-0.2.
|
|
142
|
-
beswarm-0.2.
|
|
143
|
-
beswarm-0.2.
|
|
140
|
+
beswarm-0.2.35.dist-info/METADATA,sha256=cfwB-Cq_qEDmpCNZyzrFNjDtnikxw8IqmrKf0MZd_Yk,3878
|
|
141
|
+
beswarm-0.2.35.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
142
|
+
beswarm-0.2.35.dist-info/top_level.txt,sha256=pJw4O87wvt5882smuSO6DfByJz7FJ8SxxT8h9fHCmpo,8
|
|
143
|
+
beswarm-0.2.35.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|