beswarm 0.1.62__py3-none-any.whl → 0.1.64__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
beswarm/aient/setup.py CHANGED
@@ -4,7 +4,7 @@ from setuptools import setup, find_packages
4
4
 
5
5
  setup(
6
6
  name="aient",
7
- version="1.1.16",
7
+ version="1.1.17",
8
8
  description="Aient: The Awakening of Agent.",
9
9
  long_description=Path.open(Path("README.md"), encoding="utf-8").read(),
10
10
  long_description_content_type="text/markdown",
@@ -4,7 +4,7 @@ import httpx
4
4
  import base64
5
5
  import urllib.parse
6
6
 
7
- from .models import RequestModel
7
+ from .models import RequestModel, Message
8
8
  from .utils import (
9
9
  c3s,
10
10
  c3o,
@@ -50,7 +50,12 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
50
50
  systemInstruction = None
51
51
  system_prompt = ""
52
52
  function_arguments = None
53
- for msg in request.messages:
53
+
54
+ try:
55
+ request_messages = [Message(role="user", content=request.prompt)]
56
+ except:
57
+ request_messages = request.messages
58
+ for msg in request_messages:
54
59
  if msg.role == "assistant":
55
60
  msg.role = "model"
56
61
  tool_calls = None
@@ -104,9 +109,10 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
104
109
  elif msg.role == "system":
105
110
  content[0]["text"] = re.sub(r"_+", "_", content[0]["text"])
106
111
  system_prompt = system_prompt + "\n\n" + content[0]["text"]
107
- systemInstruction = {"parts": [{"text": system_prompt}]}
112
+ if system_prompt.strip():
113
+ systemInstruction = {"parts": [{"text": system_prompt}]}
108
114
 
109
- if any(off_model in original_model for off_model in gemini_max_token_65k_models):
115
+ if any(off_model in original_model for off_model in gemini_max_token_65k_models) or original_model == "gemini-2.0-flash-preview-image-generation":
110
116
  safety_settings = "OFF"
111
117
  else:
112
118
  safety_settings = "BLOCK_NONE"
@@ -160,6 +166,7 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
160
166
  'top_logprobs',
161
167
  'response_format',
162
168
  'stream_options',
169
+ 'prompt',
163
170
  ]
164
171
  generation_config = {}
165
172
 
@@ -214,6 +221,12 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
214
221
  else:
215
222
  payload["generationConfig"]["maxOutputTokens"] = 8192
216
223
 
224
+ if original_model == "gemini-2.0-flash-preview-image-generation":
225
+ payload["generationConfig"]["response_modalities"] = [
226
+ "Text",
227
+ "Image",
228
+ ]
229
+
217
230
  if "gemini-2.5" in original_model:
218
231
  payload["generationConfig"]["thinkingConfig"] = {
219
232
  "includeThoughts": True,
@@ -241,7 +254,7 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
241
254
  if key == request.model:
242
255
  for k, v in value.items():
243
256
  payload[k] = v
244
- elif all(_model not in request.model.lower() for _model in ["gemini", "gpt", "claude"]):
257
+ elif all(_model not in request.model.lower() for _model in ["gemini", "gpt", "claude", "deepseek"]) and "-" not in key:
245
258
  payload[key] = value
246
259
 
247
260
  return url, headers, payload
@@ -36,6 +36,7 @@ async def fetch_gemini_response_stream(client, url, headers, payload, model):
36
36
  candidatesTokenCount = 0
37
37
  totalTokenCount = 0
38
38
  parts_json = ""
39
+ image_base64 = ""
39
40
  # line_index = 0
40
41
  # last_text_line = 0
41
42
  # if "thinking" in model:
@@ -67,17 +68,25 @@ async def fetch_gemini_response_stream(client, url, headers, payload, model):
67
68
  if (line and '"parts": [' in line or parts_json != "") and is_finish == False:
68
69
  parts_json += line
69
70
  if parts_json != "" and line and '],' == line.strip():
70
- tmp_parts_json = "{" + parts_json.split("} ] },")[0].strip().rstrip("}], ").replace("\n", "\\n").lstrip("{") + "}]}"
71
+ # tmp_parts_json = "{" + parts_json.split("} ] },")[0].strip().rstrip("}], ").replace("\n", "\\n").lstrip("{") + "}]}"
72
+ tmp_parts_json = "{" + parts_json.split("} ] },")[0].strip().rstrip("}], ").replace("\n", "\\n").lstrip("{")
73
+ if "inlineData" in tmp_parts_json:
74
+ tmp_parts_json = tmp_parts_json + "}}]}"
75
+ else:
76
+ tmp_parts_json = tmp_parts_json + "}]}"
71
77
  try:
72
78
  json_data = json.loads(tmp_parts_json)
73
79
 
74
80
  content = safe_get(json_data, "parts", 0, "text", default="")
81
+ b64_json = safe_get(json_data, "parts", 0, "inlineData", "data", default="")
82
+ if b64_json:
83
+ image_base64 = b64_json
75
84
 
76
85
  is_thinking = safe_get(json_data, "parts", 0, "thought", default=False)
77
86
  if is_thinking:
78
87
  sse_string = await generate_sse_response(timestamp, model, reasoning_content=content)
79
88
  yield sse_string
80
- else:
89
+ elif not image_base64:
81
90
  sse_string = await generate_sse_response(timestamp, model, content=content)
82
91
  yield sse_string
83
92
  except json.JSONDecodeError:
@@ -93,6 +102,10 @@ async def fetch_gemini_response_stream(client, url, headers, payload, model):
93
102
 
94
103
  function_full_response += line
95
104
 
105
+ if image_base64:
106
+ yield await generate_no_stream_response(timestamp, model, content=content, tools_id=None, function_call_name=None, function_call_content=None, role=None, total_tokens=totalTokenCount, prompt_tokens=promptTokenCount, completion_tokens=candidatesTokenCount, image_base64=image_base64)
107
+ return
108
+
96
109
  if need_function_call:
97
110
  function_call = json.loads(function_full_response)
98
111
  function_call_name = function_call["functionCall"]["name"]
@@ -102,6 +115,9 @@ async def fetch_gemini_response_stream(client, url, headers, payload, model):
102
115
  sse_string = await generate_sse_response(timestamp, model, content=None, tools_id="chatcmpl-9inWv0yEtgn873CxMBzHeCeiHctTV", function_call_name=None, function_call_content=function_full_response)
103
116
  yield sse_string
104
117
 
118
+ sse_string = await generate_sse_response(timestamp, model, stop="stop")
119
+ yield sse_string
120
+
105
121
  sse_string = await generate_sse_response(timestamp, model, None, None, None, None, None, totalTokenCount, promptTokenCount, candidatesTokenCount)
106
122
  yield sse_string
107
123
 
@@ -535,9 +551,13 @@ async def fetch_response(client, url, headers, payload, engine, model):
535
551
  # print("parsed_data", json.dumps(parsed_data, indent=4, ensure_ascii=False))
536
552
  content = ""
537
553
  reasoning_content = ""
554
+ image_base64 = ""
538
555
  parts_list = safe_get(parsed_data, 0, "candidates", 0, "content", "parts", default=[])
539
556
  for item in parts_list:
540
557
  chunk = safe_get(item, "text")
558
+ b64_json = safe_get(item, "inlineData", "data", default="")
559
+ if b64_json:
560
+ image_base64 = b64_json
541
561
  is_think = safe_get(item, "thought", default=False)
542
562
  # logger.info(f"chunk: {repr(chunk)}")
543
563
  if chunk:
@@ -571,7 +591,7 @@ async def fetch_response(client, url, headers, payload, engine, model):
571
591
  function_call_content = safe_get(parsed_data, -1, "candidates", 0, "content", "parts", 0, "functionCall", "args", default=None)
572
592
 
573
593
  timestamp = int(datetime.timestamp(datetime.now()))
574
- yield await generate_no_stream_response(timestamp, model, content=content, tools_id=None, function_call_name=function_call_name, function_call_content=function_call_content, role=role, total_tokens=total_tokens, prompt_tokens=prompt_tokens, completion_tokens=candidates_tokens, reasoning_content=reasoning_content)
594
+ yield await generate_no_stream_response(timestamp, model, content=content, tools_id=None, function_call_name=function_call_name, function_call_content=function_call_content, role=role, total_tokens=total_tokens, prompt_tokens=prompt_tokens, completion_tokens=candidates_tokens, reasoning_content=reasoning_content, image_base64=image_base64)
575
595
 
576
596
  elif engine == "claude":
577
597
  response_json = response.json()
@@ -112,7 +112,7 @@ def get_engine(provider, endpoint=None, original_model=""):
112
112
  if provider.get("engine"):
113
113
  engine = provider["engine"]
114
114
 
115
- if endpoint == "/v1/images/generations" or "stable-diffusion" in original_model:
115
+ if engine != "gemini" and (endpoint == "/v1/images/generations" or "stable-diffusion" in original_model):
116
116
  engine = "dalle"
117
117
  stream = False
118
118
 
@@ -449,7 +449,7 @@ end_of_line = "\n\n"
449
449
 
450
450
  import random
451
451
  import string
452
- async def generate_sse_response(timestamp, model, content=None, tools_id=None, function_call_name=None, function_call_content=None, role=None, total_tokens=0, prompt_tokens=0, completion_tokens=0, reasoning_content=None):
452
+ async def generate_sse_response(timestamp, model, content=None, tools_id=None, function_call_name=None, function_call_content=None, role=None, total_tokens=0, prompt_tokens=0, completion_tokens=0, reasoning_content=None, stop=None):
453
453
  random.seed(timestamp)
454
454
  random_str = ''.join(random.choices(string.ascii_letters + string.digits, k=29))
455
455
 
@@ -467,7 +467,7 @@ async def generate_sse_response(timestamp, model, content=None, tools_id=None, f
467
467
  "index": 0,
468
468
  "delta": delta_content,
469
469
  "logprobs": None,
470
- "finish_reason": None if content else "stop"
470
+ "finish_reason": None if content or reasoning_content else "stop"
471
471
  }
472
472
  ],
473
473
  "usage": None,
@@ -484,14 +484,19 @@ async def generate_sse_response(timestamp, model, content=None, tools_id=None, f
484
484
  total_tokens = prompt_tokens + completion_tokens
485
485
  sample_data["usage"] = {"prompt_tokens": prompt_tokens, "completion_tokens": completion_tokens, "total_tokens": total_tokens}
486
486
  sample_data["choices"] = []
487
+ if stop:
488
+ sample_data["choices"][0]["delta"] = {}
489
+ sample_data["choices"][0]["finish_reason"] = stop
490
+
487
491
  json_data = json.dumps(sample_data, ensure_ascii=False)
492
+ # print("json_data", json.dumps(sample_data, indent=4, ensure_ascii=False))
488
493
 
489
494
  # 构建SSE响应
490
495
  sse_response = f"data: {json_data}" + end_of_line
491
496
 
492
497
  return sse_response
493
498
 
494
- async def generate_no_stream_response(timestamp, model, content=None, tools_id=None, function_call_name=None, function_call_content=None, role=None, total_tokens=0, prompt_tokens=0, completion_tokens=0, reasoning_content=None):
499
+ async def generate_no_stream_response(timestamp, model, content=None, tools_id=None, function_call_name=None, function_call_content=None, role=None, total_tokens=0, prompt_tokens=0, completion_tokens=0, reasoning_content=None, image_base64=None):
495
500
  random.seed(timestamp)
496
501
  random_str = ''.join(random.choices(string.ascii_letters + string.digits, k=29))
497
502
  message = {
@@ -554,11 +559,25 @@ async def generate_no_stream_response(timestamp, model, content=None, tools_id=N
554
559
  "system_fingerprint": "fp_4691090a87"
555
560
  }
556
561
 
562
+ if image_base64:
563
+ sample_data = {
564
+ "created": timestamp,
565
+ "data": [{
566
+ "b64_json": image_base64
567
+ }],
568
+ # "usage": {
569
+ # "total_tokens": 100,
570
+ # "input_tokens": 50,
571
+ # "output_tokens": 50,
572
+ # }
573
+ }
574
+
557
575
  if total_tokens:
558
576
  total_tokens = prompt_tokens + completion_tokens
559
577
  sample_data["usage"] = {"prompt_tokens": prompt_tokens, "completion_tokens": completion_tokens, "total_tokens": total_tokens}
560
578
 
561
579
  json_data = json.dumps(sample_data, ensure_ascii=False)
580
+ # print("json_data", json.dumps(sample_data, indent=4, ensure_ascii=False))
562
581
 
563
582
  return json_data
564
583
 
beswarm/tools/__init__.py CHANGED
@@ -1,6 +1,6 @@
1
1
  from .think import think
2
2
  from .edit_file import edit_file
3
- from .worker import worker
3
+ from .worker import worker, worker_gen
4
4
 
5
5
  from .search_arxiv import search_arxiv
6
6
  from .repomap import get_code_repo_map
@@ -26,6 +26,7 @@ __all__ = [
26
26
  "think",
27
27
  "edit_file",
28
28
  "worker",
29
+ "worker_gen",
29
30
  "search_arxiv",
30
31
  "get_code_repo_map",
31
32
  # aient.plugins
beswarm/tools/worker.py CHANGED
@@ -99,8 +99,10 @@ async def worker(goal, tools, work_dir, cache_messages=None):
99
99
  extracted_content = f"<latest_file_content>{match.group(1)}</latest_file_content>\n\n"
100
100
  else:
101
101
  extracted_content = ""
102
-
103
- conversation_history[0]["content"] = extracted_content + conversation_history[0]["content"]
102
+ if isinstance(conversation_history[0]["content"], str):
103
+ conversation_history[0]["content"] = extracted_content + conversation_history[0]["content"]
104
+ elif isinstance(conversation_history[0]["content"], list) and extracted_content:
105
+ conversation_history[0]["content"].append({"type": "text", "text": extracted_content})
104
106
 
105
107
  instruction_agent.conversation["default"][1:] = conversation_history
106
108
  if "find_and_click_element" in str(tools_json):
@@ -146,4 +148,129 @@ async def worker(goal, tools, work_dir, cache_messages=None):
146
148
  print("✅ 工作智能体回复:", result)
147
149
  need_instruction = True
148
150
 
149
- return "任务已完成"
151
+ return "任务已完成"
152
+
153
+ async def worker_gen(goal, tools, work_dir, cache_messages=None):
154
+ tools_json = [value for _, value in get_function_call_list(tools).items()]
155
+ work_agent_system_prompt = system_prompt.format(
156
+ os_version=platform.platform(),
157
+ workspace_path=work_dir,
158
+ shell=os.getenv('SHELL', 'Unknown'),
159
+ tools_list=tools_json
160
+ )
161
+
162
+ work_agent_config = {
163
+ "api_key": os.getenv("API_KEY"),
164
+ "api_url": os.getenv("BASE_URL"),
165
+ "engine": os.getenv("MODEL"),
166
+ "system_prompt": work_agent_system_prompt,
167
+ "print_log": True,
168
+ # "max_tokens": 8000,
169
+ "temperature": 0.5,
170
+ "function_call_max_loop": 100,
171
+ }
172
+ if cache_messages:
173
+ work_agent_config["cache_messages"] = cache_messages
174
+
175
+ instruction_agent_config = {
176
+ "api_key": os.getenv("API_KEY"),
177
+ "api_url": os.getenv("BASE_URL"),
178
+ "engine": os.getenv("MODEL"),
179
+ "system_prompt": instruction_system_prompt.format(os_version=platform.platform(), tools_list=tools_json, workspace_path=work_dir, current_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S")),
180
+ "print_log": False,
181
+ # "max_tokens": 4000,
182
+ "temperature": 0.7,
183
+ "use_plugins": False,
184
+ }
185
+
186
+ # 工作agent初始化
187
+ work_agent = chatgpt(**work_agent_config)
188
+ async def instruction_agent_task():
189
+ while True:
190
+
191
+ instruction_prompt = f"""
192
+ </work_agent_conversation_end>
193
+ 任务目标: {goal}
194
+
195
+ 在 tag <work_agent_conversation_start>...</work_agent_conversation_end> 之前的对话历史都是工作智能体的对话历史。
196
+
197
+ 根据以上对话历史和目标,请生成下一步指令。如果任务已完成,请回复"任务已完成"。
198
+ """
199
+ # 让指令agent分析对话历史并生成新指令
200
+ instruction_agent = chatgpt(**instruction_agent_config)
201
+ conversation_history = copy.deepcopy(work_agent.conversation["default"])
202
+
203
+ cache_dir = os.path.join(work_dir, ".beswarm")
204
+ os.makedirs(cache_dir, exist_ok=True)
205
+ cache_file = os.path.join(cache_dir, "work_agent_conversation_history.json")
206
+ with open(cache_file, "w", encoding="utf-8") as f:
207
+ f.write(json.dumps(conversation_history, ensure_ascii=False, indent=4))
208
+
209
+ work_agent_system_prompt = conversation_history.pop(0)
210
+ if conversation_history:
211
+ # 获取原始内容
212
+ original_content = work_agent_system_prompt["content"]
213
+
214
+ # 定义正则表达式
215
+ regex = r"<latest_file_content>(.*?)</latest_file_content>"
216
+
217
+ # 进行匹配
218
+ match = re.search(regex, original_content, re.DOTALL)
219
+
220
+ # 提取内容或设置为空字符串
221
+ if match:
222
+ extracted_content = f"<latest_file_content>{match.group(1)}</latest_file_content>\n\n"
223
+ else:
224
+ extracted_content = ""
225
+
226
+ if isinstance(conversation_history[0]["content"], str):
227
+ conversation_history[0]["content"] = extracted_content + conversation_history[0]["content"]
228
+ elif isinstance(conversation_history[0]["content"], list) and extracted_content:
229
+ conversation_history[0]["content"].append({"type": "text", "text": extracted_content})
230
+
231
+ instruction_agent.conversation["default"][1:] = conversation_history
232
+ if "find_and_click_element" in str(tools_json):
233
+ instruction_prompt = await get_current_screen_image_message(instruction_prompt)
234
+ next_instruction = await instruction_agent.ask_async(instruction_prompt)
235
+ print("\n🤖 指令智能体生成的下一步指令:", next_instruction)
236
+ if "fetch_gpt_response_stream HTTP Error', 'status_code': 404" in next_instruction:
237
+ raise Exception(f"Model: {instruction_agent_config['engine']} not found!")
238
+ if "'status_code': 413" in next_instruction:
239
+ raise Exception(f"The request body is too long, please try again.")
240
+ next_instruction = extract_xml_content(next_instruction, "instructions")
241
+ if not next_instruction:
242
+ print("\n❌ 指令智能体生成的指令不符合要求,请重新生成。")
243
+ continue
244
+ else:
245
+ if conversation_history == []:
246
+ next_instruction = (
247
+ "任务描述:\n"
248
+ f"{goal}\n\n"
249
+ "现在开始执行第一步:\n"
250
+ f"{next_instruction}"
251
+ )
252
+ break
253
+ return next_instruction
254
+
255
+ need_instruction = True
256
+ while True:
257
+ next_instruction = ''
258
+ if need_instruction:
259
+ next_instruction = await instruction_agent_task()
260
+
261
+ yield {"user": next_instruction}
262
+
263
+ # 检查任务是否完成
264
+ if "任务已完成" in next_instruction:
265
+ print("\n✅ 任务已完成!")
266
+ break
267
+ if "find_and_click_element" in str(tools_json):
268
+ next_instruction = await get_current_screen_image_message(next_instruction)
269
+ result = await work_agent.ask_async(next_instruction)
270
+ if result.strip() == '' or result.strip() == '</content>\n</write_to_file>':
271
+ print("\n❌ 工作智能体回复为空,请重新生成指令。")
272
+ need_instruction = False
273
+ continue
274
+ yield {"assistant": result}
275
+ print("✅ 工作智能体回复:", result)
276
+ need_instruction = True
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: beswarm
3
- Version: 0.1.62
3
+ Version: 0.1.64
4
4
  Summary: MAS
5
5
  Requires-Python: >=3.11
6
6
  Description-Content-Type: text/markdown
@@ -1,14 +1,14 @@
1
1
  beswarm/__init__.py,sha256=HZjUOJtZR5QhMuDbq-wukQQn1VrBusNWai_ysGo-VVI,20
2
2
  beswarm/utils.py,sha256=Z2Kuus2BLp9EHUC2ZNL9iUsb6NWnPj-MTA7SYzGyg24,1755
3
3
  beswarm/aient/main.py,sha256=SiYAIgQlLJqYusnTVEJOx1WNkSJKMImhgn5aWjfroxg,3814
4
- beswarm/aient/setup.py,sha256=Sd2NPcQ4WHx-F_jSx8hMYbLMTtRxIo6a-wxcEkruwW0,487
4
+ beswarm/aient/setup.py,sha256=JlsLhJIFkt5a7CI_6K2Uu4M6-UXf84s3w3Qi1fr7UJ4,487
5
5
  beswarm/aient/src/aient/__init__.py,sha256=SRfF7oDVlOOAi6nGKiJIUK6B_arqYLO9iSMp-2IZZps,21
6
6
  beswarm/aient/src/aient/core/__init__.py,sha256=NxjebTlku35S4Dzr16rdSqSTWUvvwEeACe8KvHJnjPg,34
7
7
  beswarm/aient/src/aient/core/log_config.py,sha256=kz2_yJv1p-o3lUQOwA3qh-LSc3wMHv13iCQclw44W9c,274
8
8
  beswarm/aient/src/aient/core/models.py,sha256=kF-HLi1I2k_G5r153ZHuiGH8_NmpTlFMfK0_myB28YQ,7366
9
- beswarm/aient/src/aient/core/request.py,sha256=2u_vPJjoIqRsKOPOkPilGzM4iZXUpsx5xtouQy87Z4E,65693
10
- beswarm/aient/src/aient/core/response.py,sha256=Z0Bjl_QvpUguyky1LIcsVks4BKKqT0eYEpDmKa_cwpQ,31978
11
- beswarm/aient/src/aient/core/utils.py,sha256=-naFCv8V-qhnqvDUd8BNbW1HR9CVAPxISrXoAz464Qg,26580
9
+ beswarm/aient/src/aient/core/request.py,sha256=nvF_V71svezQ0-UbnC9RB_pXo_wV6QC7WE_SANwQzxE,66195
10
+ beswarm/aient/src/aient/core/response.py,sha256=YphzhA9jtQKzWb3L4XGTp9xJZ2FOzHr1aAMTsi896FQ,33201
11
+ beswarm/aient/src/aient/core/utils.py,sha256=VQ9uutGRR_JOvECOrjeoRBO2aA6w-pGwoXnnS2UvfPU,27263
12
12
  beswarm/aient/src/aient/core/test/test_base_api.py,sha256=pWnycRJbuPSXKKU9AQjWrMAX1wiLC_014Qc9hh5C2Pw,524
13
13
  beswarm/aient/src/aient/core/test/test_geminimask.py,sha256=HFX8jDbNg_FjjgPNxfYaR-0-roUrOO-ND-FVsuxSoiw,13254
14
14
  beswarm/aient/src/aient/core/test/test_image.py,sha256=_T4peNGdXKBHHxyQNx12u-NTyFE8TlYI6NvvagsG2LE,319
@@ -119,7 +119,7 @@ beswarm/queries/tree-sitter-languages/ruby-tags.scm,sha256=vIidsCeE2A0vdFN18yXKq
119
119
  beswarm/queries/tree-sitter-languages/rust-tags.scm,sha256=9ljM1nzhfPs_ZTRw7cr2P9ToOyhGcKkCoN4_HPXSWi4,1451
120
120
  beswarm/queries/tree-sitter-languages/scala-tags.scm,sha256=UxQjz80JIrrJ7Pm56uUnQyThfmQNvwk7aQzPNypB-Ao,1761
121
121
  beswarm/queries/tree-sitter-languages/typescript-tags.scm,sha256=OMdCeedPiA24ky82DpgTMKXK_l2ySTuF2zrQ2fJAi9E,1253
122
- beswarm/tools/__init__.py,sha256=oDsCE7Coy3TXM0pTRS_4mWTEyPnsKVK7Vco1idSVxJk,1041
122
+ beswarm/tools/__init__.py,sha256=1th0OE6oC_bC1_30XSegaIQ12clFXUqVre4vojfluZ4,1071
123
123
  beswarm/tools/click.py,sha256=TygaekCXTmU3fIu6Uom7ZcyzEgYMlCC_GX-5SmWHuLI,20762
124
124
  beswarm/tools/edit_file.py,sha256=xlAD0HB_xM0yZYc0eJwLE-9mAkywXa2UQPNHzG1OaW4,7664
125
125
  beswarm/tools/planner.py,sha256=lguBCS6kpwNPoXQvqH-WySabVubT82iyWOkJnjt6dXw,1265
@@ -127,8 +127,8 @@ beswarm/tools/repomap.py,sha256=N09K0UgwjCN7Zjg_5TYlVsulp3n2fztYlS8twalChU8,4500
127
127
  beswarm/tools/search_arxiv.py,sha256=9slwBemXjEqrd7-YgVmyMijPXlkhZCybEDRVhWVQ9B0,7937
128
128
  beswarm/tools/search_web.py,sha256=B24amOnGHnmdV_6S8bw8O2PdhZRRIDtJjg-wXcfP7dQ,11859
129
129
  beswarm/tools/think.py,sha256=WLw-7jNIsnS6n8MMSYUin_f-BGLENFmnKM2LISEp0co,1760
130
- beswarm/tools/worker.py,sha256=6tg2973BZkr85W73RWcTQ7cGub2sIRxHgluriorVBNc,6741
131
- beswarm-0.1.62.dist-info/METADATA,sha256=RzS5f38zi_lsfKumv6AqZARc0atpVl0OV_5g4hQ1AkE,3553
132
- beswarm-0.1.62.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
133
- beswarm-0.1.62.dist-info/top_level.txt,sha256=pJw4O87wvt5882smuSO6DfByJz7FJ8SxxT8h9fHCmpo,8
134
- beswarm-0.1.62.dist-info/RECORD,,
130
+ beswarm/tools/worker.py,sha256=bZwMGUS4wt4yOleb2kKu0rCLHIJgPcokbCmh9bGMzXA,12679
131
+ beswarm-0.1.64.dist-info/METADATA,sha256=KnWiUh0orLTMVypQqxxo-enfguZLdIWhVvJWCRj0tfE,3553
132
+ beswarm-0.1.64.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
133
+ beswarm-0.1.64.dist-info/top_level.txt,sha256=pJw4O87wvt5882smuSO6DfByJz7FJ8SxxT8h9fHCmpo,8
134
+ beswarm-0.1.64.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.8.0)
2
+ Generator: setuptools (80.9.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5