beswarm 0.3.2__py3-none-any.whl → 0.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -128,6 +128,11 @@ class Texts(ContextProvider):
128
128
  else:
129
129
  _name = name
130
130
  super().__init__(_name, visible=visible)
131
+ if not self._is_dynamic:
132
+ self._cached_content = self.content
133
+ # The content is cached, but it's still "stale" from the perspective
134
+ # of the async refresh cycle. Let the first refresh formalize it.
135
+ self._is_stale = True
131
136
 
132
137
  async def refresh(self):
133
138
  if self._is_dynamic:
@@ -191,6 +196,10 @@ class Tools(ContextProvider):
191
196
  def __init__(self, tools_json: Optional[List[Dict]] = None, name: str = "tools", visible: bool = True):
192
197
  super().__init__(name, visible=visible)
193
198
  self._tools_json = tools_json or []
199
+ # Pre-render and cache the content, but leave it stale for the first refresh
200
+ if self._tools_json:
201
+ self._cached_content = f"<tools>{str(self._tools_json)}</tools>"
202
+ self._is_stale = True
194
203
  def update(self, tools_json: List[Dict]):
195
204
  self._tools_json = tools_json
196
205
  self.mark_stale()
@@ -289,6 +298,9 @@ class Images(ContextProvider):
289
298
  def __init__(self, url: str, name: Optional[str] = None, visible: bool = True):
290
299
  super().__init__(name or url, visible=visible)
291
300
  self.url = url
301
+ if self.url.startswith("data:"):
302
+ self._cached_content = self.url
303
+ self._is_stale = True
292
304
  def update(self, url: str):
293
305
  self.url = url
294
306
  self.mark_stale()
@@ -312,19 +324,21 @@ class Images(ContextProvider):
312
324
 
313
325
  # 3. 消息类 (已合并 MessageContent)
314
326
  class Message(ABC):
315
- def __init__(self, role: str, *initial_items: Union[ContextProvider, str, list]):
327
+ def __init__(self, role: str, *initial_items: Union[ContextProvider, str, list, 'Message']):
316
328
  self.role = role
317
329
  processed_items = []
318
330
  for item in initial_items:
319
331
  if item is None:
320
332
  continue
321
- if isinstance(item, str):
322
- # Check if the string contains placeholders from f-string rendering
333
+
334
+ # This is the new recursive flattening logic
335
+ if isinstance(item, Message):
336
+ processed_items.extend(item.provider())
337
+ elif isinstance(item, str):
323
338
  import re
324
339
  placeholder_pattern = re.compile(r'(__provider_placeholder_[a-f0-9]{32}__)')
325
340
  parts = placeholder_pattern.split(item)
326
-
327
- if len(parts) > 1: # Placeholders were found
341
+ if len(parts) > 1:
328
342
  for part in parts:
329
343
  if not part: continue
330
344
  if placeholder_pattern.match(part):
@@ -333,18 +347,14 @@ class Message(ABC):
333
347
  processed_items.append(provider)
334
348
  else:
335
349
  processed_items.append(Texts(text=part))
336
- else: # No placeholders, just a regular string
350
+ else:
337
351
  processed_items.append(Texts(text=item))
338
-
339
- elif isinstance(item, Message):
340
- processed_items.extend(item.provider())
341
352
  elif isinstance(item, ContextProvider):
342
353
  processed_items.append(item)
343
354
  elif isinstance(item, list):
344
355
  for sub_item in item:
345
356
  if not isinstance(sub_item, dict) or 'type' not in sub_item:
346
357
  raise ValueError("List items must be dicts with a 'type' key.")
347
-
348
358
  item_type = sub_item['type']
349
359
  if item_type == 'text':
350
360
  processed_items.append(Texts(text=sub_item.get('text', '')))
@@ -507,10 +517,27 @@ class Message(ABC):
507
517
  """提供类似字典的 .get() 方法来访问属性。"""
508
518
  return getattr(self, key, default)
509
519
 
510
- async def render_latest(self) -> Optional[Dict[str, Any]]:
511
- """Refreshes all providers in the message and returns the rendered dictionary."""
520
+ async def refresh(self):
521
+ """刷新此消息中的所有 provider。"""
512
522
  tasks = [provider.refresh() for provider in self._items]
513
523
  await asyncio.gather(*tasks)
524
+
525
+ async def render(self) -> Optional[Dict[str, Any]]:
526
+ """
527
+ 渲染消息为字典。首次调用时会隐式刷新以确保动态内容被加载。
528
+ 后续调用将返回缓存版本,除非手动调用了 refresh()。
529
+ """
530
+ # 检查是否是首次渲染
531
+ is_first_render = not all(hasattr(p, '_cached_content') and p._cached_content is not None for p in self._items if p._is_stale)
532
+
533
+ if is_first_render:
534
+ await self.refresh()
535
+
536
+ return self.to_dict()
537
+
538
+ async def render_latest(self) -> Optional[Dict[str, Any]]:
539
+ """始终刷新并返回最新的渲染结果。"""
540
+ await self.refresh()
514
541
  return self.to_dict()
515
542
 
516
543
  def to_dict(self) -> Optional[Dict[str, Any]]:
@@ -583,12 +610,20 @@ class ToolCalls(Message):
583
610
 
584
611
  class ToolResults(Message):
585
612
  """Represents a tool message with the result of a single tool call."""
586
- def __init__(self, tool_call_id: str, content: str):
587
- # We pass a Texts provider to the parent so it can be rendered,
588
- # but the primary way to access content for ToolResults is via its dict representation.
589
- super().__init__("tool", Texts(text=content))
613
+ def __init__(self, tool_call_id: str, content: Union[str, Message]):
614
+ # The base Message class now handles the absorption of a Message object.
615
+ # We just need to pass the content to the parent __init__.
616
+ # For ToolResults, we primarily care about the textual content.
617
+ if isinstance(content, Message):
618
+ # Extract only text-like providers to pass to the parent
619
+ text_providers = [p for p in content.provider() if not isinstance(p, Images)]
620
+ super().__init__("tool", *text_providers)
621
+ else:
622
+ super().__init__("tool", content)
623
+
590
624
  self.tool_call_id = tool_call_id
591
- self._content = content
625
+ # After initialization, render the content to a simple string for _content.
626
+ self._content = self._render_content()
592
627
 
593
628
  def to_dict(self) -> Dict[str, Any]:
594
629
  return {
@@ -768,7 +803,8 @@ class Messages:
768
803
 
769
804
  def __len__(self) -> int: return len(self._messages)
770
805
  def __iter__(self): return iter(self._messages)
771
-
806
+ def __repr__(self):
807
+ return f"Messages({repr(self._messages)})"
772
808
  def __contains__(self, item: Any) -> bool:
773
809
  """Checks if a Message or ContextProvider is in the collection."""
774
810
  if isinstance(item, Message):
@@ -1578,6 +1578,137 @@ Files: {Files(visible=True, name="files")}
1578
1578
  rendered_single = await message_single.render_latest()
1579
1579
  self.assertEqual(rendered_single['content'], "Only one.")
1580
1580
 
1581
+ async def test_zad_simple_render_without_refresh(self):
1582
+ """测试 Messages(UserMessage('hi')).render() 是否能直接同步渲染"""
1583
+ # This test checks if a simple message can be rendered synchronously
1584
+ # without an explicit `await refresh()` or `await render_latest()`.
1585
+ # Calling the synchronous render method directly on a new instance
1586
+ rendered = Messages(UserMessage("hi", Images(url="data:image/png;base64,FAKE"))).render()
1587
+
1588
+ # The current implementation will likely fail here, returning []
1589
+ self.assertEqual(len(rendered), 1)
1590
+ self.assertEqual(rendered[0]['role'], 'user')
1591
+
1592
+ # Now we expect a list for multimodal content
1593
+ content = rendered[0]['content']
1594
+ self.assertIsInstance(content, list)
1595
+ self.assertEqual(len(content), 2)
1596
+ self.assertEqual(content[0]['type'], 'text')
1597
+ self.assertEqual(content[0]['text'], 'hi')
1598
+ self.assertEqual(content[1]['type'], 'image_url')
1599
+ self.assertEqual(content[1]['image_url']['url'], "data:image/png;base64,FAKE")
1600
+
1601
+ async def test_zae_messages_representation(self):
1602
+ """测试 Messages 对象的 __repr__ 方法是否提供可读的输出"""
1603
+ messages = Messages(
1604
+ UserMessage("Hello"),
1605
+ AssistantMessage("Hi there!")
1606
+ )
1607
+
1608
+ actual_repr = repr(messages)
1609
+
1610
+ # 一个可读的字符串形式应该像 Messages([...]) 这样,并包含其内部 message 的 repr
1611
+ self.assertTrue(actual_repr.startswith("Messages(["), f"期望输出以 'Messages([' 开头,但得到 '{actual_repr}'")
1612
+ self.assertTrue(actual_repr.endswith("])"), f"期望输出以 '])' 结尾,但得到 '{actual_repr}'")
1613
+ self.assertIn("Message(role='user', items=", actual_repr)
1614
+ self.assertIn("Message(role='assistant', items=", actual_repr)
1615
+
1616
+ async def test_zaf_message_absorption(self):
1617
+ """测试Message对象是否能吸收嵌套的Message对象作为其内容"""
1618
+ # 1. ToolResults吸收UserMessage
1619
+ tool_results_1 = ToolResults(tool_call_id="call_1", content=UserMessage("hi"))
1620
+ rendered_1 = await tool_results_1.render_latest()
1621
+ self.assertEqual(rendered_1['content'], "hi")
1622
+ self.assertEqual(rendered_1['tool_call_id'], "call_1")
1623
+
1624
+ # 2. UserMessage吸收AssistantMessage
1625
+ user_message_1 = UserMessage("prefix", AssistantMessage("absorbed content"))
1626
+ rendered_user_1 = await user_message_1.render_latest()
1627
+ self.assertEqual(rendered_user_1['content'], "prefixabsorbed content")
1628
+ self.assertEqual(len(user_message_1.provider()), 2) # Should be flattened
1629
+
1630
+ # 3. 复杂嵌套
1631
+ final_message = ToolResults(tool_call_id="call_final", content=UserMessage("A", AssistantMessage("B", UserMessage("C"))))
1632
+ rendered_final = await final_message.render_latest()
1633
+ self.assertEqual(rendered_final['content'], "ABC")
1634
+
1635
+ # 4. 组合情况: ToolResults(UserMessage(Texts("a"), Texts("b"))) -> content="ab"
1636
+ tool_results_2 = ToolResults(tool_call_id="call_2", content=UserMessage(Texts("a"), Texts("b")))
1637
+ rendered_2 = await tool_results_2.render_latest()
1638
+ self.assertEqual(rendered_2['content'], "ab")
1639
+
1640
+ # 5. 包含多模态内容的情况 (ToolResults应该只提取文本)
1641
+ tool_results_3 = ToolResults(tool_call_id="call_3", content=UserMessage("text part", Images(url="some_url")))
1642
+ rendered_3 = await tool_results_3.render_latest()
1643
+ self.assertEqual(rendered_3['content'], "text part") # Images should be ignored
1644
+
1645
+ # 6. 直接传入字符串的情况应保持不变
1646
+ tool_results_4 = ToolResults(tool_call_id="call_4", content="just a string")
1647
+ rendered_4 = await tool_results_4.render_latest()
1648
+ self.assertEqual(rendered_4['content'], "just a string")
1649
+
1650
+ # 7. 传入一个空的 Message
1651
+ tool_results_5 = ToolResults(tool_call_id="call_5", content=UserMessage())
1652
+ rendered_5 = await tool_results_5.render_latest()
1653
+ self.assertEqual(rendered_5['content'], "")
1654
+
1655
+ async def test_zzb_final_message_render_logic(self):
1656
+ """
1657
+ 最终版测试:
1658
+ - render() 首次调用保证获取完整结果。
1659
+ - 后续 render() 调用返回缓存结果,不会自动刷新。
1660
+ - refresh() 显式刷新内容。
1661
+ - render_latest() 总是获取最新内容。
1662
+ """
1663
+ from datetime import datetime
1664
+ import time
1665
+
1666
+ # 1. 创建带有动态 provider 的 Message
1667
+ timestamp_provider = Texts(lambda: str(datetime.now().timestamp()))
1668
+ message = UserMessage("Time: ", timestamp_provider)
1669
+
1670
+ # 2. 第一次调用 render() - 应该刷新并返回完整内容
1671
+ rendered_1 = await message.render()
1672
+ content1 = rendered_1['content']
1673
+ timestamp1_str = content1.replace("Time: ", "")
1674
+ self.assertTrue(timestamp1_str, "render() 第一次调用应返回完整动态内容")
1675
+
1676
+ # 3. 第二次调用 render() - 不应自动刷新,返回缓存的内容
1677
+ time.sleep(1)
1678
+ rendered_2 = await message.render()
1679
+ content2 = rendered_2['content']
1680
+ self.assertEqual(content1, content2, "第二次调用 render() 应返回缓存内容,不应刷新")
1681
+
1682
+ # 4. 调用 refresh() - 显式刷新
1683
+ time.sleep(1)
1684
+ await message.refresh()
1685
+
1686
+ # 5. refresh() 后调用 render() - 应该返回刚刚刷新的新内容
1687
+ rendered_3 = await message.render()
1688
+ content3 = rendered_3['content']
1689
+ timestamp3_str = content3.replace("Time: ", "")
1690
+ self.assertNotEqual(content2, content3, "refresh() 后 render() 应返回新内容")
1691
+
1692
+ # 6. 调用 render_latest() - 总是获取最新内容
1693
+ time.sleep(1)
1694
+ rendered_latest = await message.render_latest()
1695
+ content_latest = rendered_latest['content']
1696
+ timestamp_latest_str = content_latest.replace("Time: ", "")
1697
+ self.assertNotEqual(content3, content_latest, "render_latest() 应总是获取最新内容")
1698
+
1699
+ # 7. 测试 ToolResults
1700
+ tool_results_msg = ToolResults(tool_call_id="call_123", content="Result from tool")
1701
+ # ToolResults's content is static, so render() should always return the same full content
1702
+ rendered_tool_1 = await tool_results_msg.render()
1703
+ self.assertEqual(rendered_tool_1, {
1704
+ "role": "tool",
1705
+ "tool_call_id": "call_123",
1706
+ "content": "Result from tool"
1707
+ })
1708
+ # Subsequent calls should also work
1709
+ rendered_tool_2 = await tool_results_msg.render()
1710
+ self.assertEqual(rendered_tool_1, rendered_tool_2)
1711
+
1581
1712
 
1582
1713
  # ==============================================================================
1583
1714
  # 6. 演示
@@ -196,7 +196,7 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
196
196
 
197
197
  for field, value in request.model_dump(exclude_unset=True).items():
198
198
  if field not in miss_fields and value is not None:
199
- if field == "tools" and "gemini-2.0-flash-thinking" in original_model:
199
+ if field == "tools" and ("gemini-2.0-flash-thinking" in original_model or "gemini-2.5-flash-image" in original_model):
200
200
  continue
201
201
  if field == "tools":
202
202
  # 处理每个工具的 function 定义
@@ -221,6 +221,8 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
221
221
  }
222
222
  })
223
223
  elif field == "temperature":
224
+ if "gemini-2.5-flash-image" in original_model:
225
+ value = 1
224
226
  generation_config["temperature"] = value
225
227
  elif field == "max_tokens":
226
228
  if value > 65536:
@@ -244,7 +246,7 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
244
246
  "Image",
245
247
  ]
246
248
 
247
- if "gemini-2.5" in original_model:
249
+ if "gemini-2.5" in original_model and "gemini-2.5-flash-image" not in original_model:
248
250
  # 从请求模型名中检测思考预算设置
249
251
  m = re.match(r".*-think-(-?\d+)", request.model)
250
252
  if m:
@@ -45,8 +45,8 @@ async def gemini_json_poccess(response_json):
45
45
  if is_thinking:
46
46
  content = safe_get(json_data, "parts", 1, "text", default="")
47
47
 
48
- function_call_name = safe_get(json_data, "functionCall", "name", default=None)
49
- function_full_response = safe_get(json_data, "functionCall", "args", default="")
48
+ function_call_name = safe_get(json_data, "parts", 0, "functionCall", "name", default=None)
49
+ function_full_response = safe_get(json_data, "parts", 0, "functionCall", "args", default="")
50
50
  function_full_response = await asyncio.to_thread(json.dumps, function_full_response) if function_full_response else None
51
51
 
52
52
  blockReason = safe_get(json_data, 0, "promptFeedback", "blockReason", default=None)
@@ -78,7 +78,7 @@ async def fetch_gemini_response_stream(client, url, headers, payload, model, tim
78
78
  try:
79
79
  response_json = await asyncio.to_thread(json.loads, parts_json)
80
80
  except json.JSONDecodeError:
81
- logger.error(f"JSON decode error: {parts_json}")
81
+ # logger.error(f"JSON decode error: {parts_json}")
82
82
  continue
83
83
  else:
84
84
  parts_json += line
@@ -99,7 +99,12 @@ async def fetch_gemini_response_stream(client, url, headers, payload, model, tim
99
99
  yield sse_string
100
100
 
101
101
  if image_base64:
102
- yield await generate_no_stream_response(timestamp, model, content=content, tools_id=None, function_call_name=None, function_call_content=None, role=None, total_tokens=totalTokenCount, prompt_tokens=promptTokenCount, completion_tokens=candidatesTokenCount, image_base64=image_base64)
102
+ if "gemini-2.5-flash-image" not in model:
103
+ yield await generate_no_stream_response(timestamp, model, content=content, tools_id=None, function_call_name=None, function_call_content=None, role=None, total_tokens=totalTokenCount, prompt_tokens=promptTokenCount, completion_tokens=candidatesTokenCount, image_base64=image_base64)
104
+ else:
105
+ image_url = await upload_image_to_0x0st("data:image/png;base64," + image_base64)
106
+ sse_string = await generate_sse_response(timestamp, model, content=f"\n\n![image]({image_url})")
107
+ yield sse_string
103
108
 
104
109
  if function_call_name:
105
110
  sse_string = await generate_sse_response(timestamp, model, content=None, tools_id="chatcmpl-9inWv0yEtgn873CxMBzHeCeiHctTV", function_call_name=function_call_name)
@@ -125,8 +125,12 @@ class chatgpt(BaseLLM):
125
125
  else:
126
126
  # 如果没有提供 logger,创建一个默认的,它只会打印到控制台
127
127
  self.logger = logging.getLogger("chatgpt_default")
128
+ self.logger.propagate = False
128
129
  if not self.logger.handlers: # 防止重复添加 handler
129
- self.logger.addHandler(logging.StreamHandler())
130
+ handler = logging.StreamHandler()
131
+ formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
132
+ handler.setFormatter(formatter)
133
+ self.logger.addHandler(handler)
130
134
  self.logger.setLevel(logging.INFO if print_log else logging.WARNING)
131
135
 
132
136
  # 注册和处理传入的工具
@@ -282,7 +286,7 @@ class chatgpt(BaseLLM):
282
286
  "messages": await self.conversation[convo_id].render_latest() if pass_history else Messages(
283
287
  SystemMessage(self.system_prompt, self.conversation[convo_id].provider("files")),
284
288
  UserMessage(prompt)
285
- ),
289
+ ).render(),
286
290
  "stream": stream,
287
291
  "temperature": kwargs.get("temperature", self.temperature)
288
292
  }
@@ -431,7 +435,7 @@ class chatgpt(BaseLLM):
431
435
  for chunk in process_sync():
432
436
  yield chunk
433
437
 
434
- if not full_response.strip():
438
+ if not full_response.strip() and not need_function_call:
435
439
  raise EmptyResponseError("Response is empty")
436
440
 
437
441
  if self.print_log:
@@ -688,7 +692,7 @@ class chatgpt(BaseLLM):
688
692
  # 准备会话
689
693
  self.system_prompt = system_prompt or self.system_prompt
690
694
  if convo_id not in self.conversation or pass_history <= 2:
691
- self.reset(convo_id=convo_id, system_prompt=system_prompt)
695
+ self.reset(convo_id=convo_id, system_prompt=self.system_prompt)
692
696
  self.add_to_conversation(prompt, role, convo_id=convo_id, function_name=function_name, total_tokens=total_tokens, function_arguments=function_arguments, pass_history=pass_history, function_call_id=function_call_id)
693
697
 
694
698
  # 获取请求体
@@ -945,7 +949,7 @@ class chatgpt(BaseLLM):
945
949
  """
946
950
  self.system_prompt = system_prompt or self.system_prompt
947
951
  self.conversation[convo_id] = Messages(
948
- SystemMessage(Texts("system_prompt", self.system_prompt), self.conversation[convo_id].provider("files")),
952
+ SystemMessage(self.system_prompt, self.conversation[convo_id].provider("files")),
949
953
  )
950
954
  self.tokens_usage[convo_id] = 0
951
955
  self.current_tokens[convo_id] = 0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: beswarm
3
- Version: 0.3.2
3
+ Version: 0.3.4
4
4
  Summary: MAS
5
5
  Requires-Python: >=3.11
6
6
  Description-Content-Type: text/markdown
@@ -10,15 +10,15 @@ beswarm/agents/chatgroup.py,sha256=PzrmRcDKAbB7cxL16nMod_CzPosDV6bfTmXxQVuv-AQ,1
10
10
  beswarm/agents/planact.py,sha256=wYIyrAsBY6Z_Hc8rx76vbfUTsagqYFIBOfPi43ze708,18361
11
11
  beswarm/aient/aient/__init__.py,sha256=SRfF7oDVlOOAi6nGKiJIUK6B_arqYLO9iSMp-2IZZps,21
12
12
  beswarm/aient/aient/architext/architext/__init__.py,sha256=79Ih1151rfcqZdr7F8HSZSTs_iT2SKd1xCkehMsXeXs,19
13
- beswarm/aient/aient/architext/architext/core.py,sha256=KHzc6ly4vNYie2kRfPokDGhfTz3CvntuiSd-XyE3cAM,32502
13
+ beswarm/aient/aient/architext/architext/core.py,sha256=k5Sza5mrIuv_34T0qUmnFGqAZMFwWhudO96qVlnyA8c,34135
14
14
  beswarm/aient/aient/architext/test/openai_client.py,sha256=Dqtbmubv6vwF8uBqcayG0kbsiO65of7sgU2-DRBi-UM,4590
15
- beswarm/aient/aient/architext/test/test.py,sha256=9OQ12qAs81Ce6KCKXQx4Re0P9VjNuTF4rZRcTRstJVw,72868
15
+ beswarm/aient/aient/architext/test/test.py,sha256=pYDGN0Dvy5teeMcuglP4cQaESfjYdKf2Zx5m67cRPjA,79539
16
16
  beswarm/aient/aient/architext/test/test_save_load.py,sha256=o8DqH6gDYZkFkQy-a7blqLtJTRj5e4a-Lil48pJ0V3g,3260
17
17
  beswarm/aient/aient/core/__init__.py,sha256=NxjebTlku35S4Dzr16rdSqSTWUvvwEeACe8KvHJnjPg,34
18
18
  beswarm/aient/aient/core/log_config.py,sha256=kz2_yJv1p-o3lUQOwA3qh-LSc3wMHv13iCQclw44W9c,274
19
19
  beswarm/aient/aient/core/models.py,sha256=KMlCRLjtq1wQHZTJGqnbWhPS2cHq6eLdnk7peKDrzR8,7490
20
- beswarm/aient/aient/core/request.py,sha256=-KEBd4jWLVC9QYUhb1ZfgkLf4nKE7HKL0A58iULkY7o,76757
21
- beswarm/aient/aient/core/response.py,sha256=Z9geTfh2LkGHKAqjelgeleQtfOAYIyM82t9AVB4xsgE,36407
20
+ beswarm/aient/aient/core/request.py,sha256=w3HcsS4BOcrprPjSUWPz-sfcEnX26HxN7AZCThX2gE0,76949
21
+ beswarm/aient/aient/core/response.py,sha256=oKAb97XX4tbgLBdzSGTedJamGTQztp7hjL5YK3ZbJFQ,36792
22
22
  beswarm/aient/aient/core/utils.py,sha256=Z8vTH9w3uS8uubBa65c_aJ11A3OKGYEzm4q0brNZDSk,31594
23
23
  beswarm/aient/aient/core/test/test_base_api.py,sha256=pWnycRJbuPSXKKU9AQjWrMAX1wiLC_014Qc9hh5C2Pw,524
24
24
  beswarm/aient/aient/core/test/test_geminimask.py,sha256=HFX8jDbNg_FjjgPNxfYaR-0-roUrOO-ND-FVsuxSoiw,13254
@@ -27,7 +27,7 @@ beswarm/aient/aient/core/test/test_payload.py,sha256=8jBiJY1uidm1jzL-EiK0s6UGmW9
27
27
  beswarm/aient/aient/models/__init__.py,sha256=ZTiZgbfBPTjIPSKURE7t6hlFBVLRS9lluGbmqc1WjxQ,43
28
28
  beswarm/aient/aient/models/audio.py,sha256=FNW4lxG1IhxOU7L8mvcbaeC1nXk_lpUZQlg9ijQ0h_Q,1937
29
29
  beswarm/aient/aient/models/base.py,sha256=HWIGfa2A7OTccvHK0wG1-UlHB-yaWRC7hbi4oR1Mu1Y,7228
30
- beswarm/aient/aient/models/chatgpt.py,sha256=n99RspEqdMrd8u3LLGWYgAdhDEZyibiaRASqrRVeIZw,43358
30
+ beswarm/aient/aient/models/chatgpt.py,sha256=DOm0_tieWj8V_xIQCZy_33PGCebZ8nvKIXXYpzYWw6Y,43601
31
31
  beswarm/aient/aient/plugins/__init__.py,sha256=p3KO6Aa3Lupos4i2SjzLQw1hzQTigOAfEHngsldrsyk,986
32
32
  beswarm/aient/aient/plugins/arXiv.py,sha256=yHjb6PS3GUWazpOYRMKMzghKJlxnZ5TX8z9F6UtUVow,1461
33
33
  beswarm/aient/aient/plugins/config.py,sha256=TGgZ5SnNKZ8MmdznrZ-TEq7s2ulhAAwTSKH89bci3dA,7079
@@ -121,8 +121,8 @@ beswarm/tools/search_web.py,sha256=0fTeczXiOX_LJQGaLEGbuJtIPzofeuquGWEt3yDMtVw,1
121
121
  beswarm/tools/subtasks.py,sha256=NHDnmUhUPgDQKBACnpgErpFJRcsH0w_Q9VsyQjNvNHA,12658
122
122
  beswarm/tools/worker.py,sha256=mQ1qdrQ8MgL99byAbTvxfEByFFGN9mty3UHqHjARMQ8,2331
123
123
  beswarm/tools/write_csv.py,sha256=u0Hq18Ksfheb52MVtyLNCnSDHibITpsYBPs2ub7USYA,1466
124
- beswarm-0.3.2.dist-info/METADATA,sha256=cZs62VmbA4HP3fml4T7u-R6jg3QKndIxvuICMZ_mlC8,3877
125
- beswarm-0.3.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
126
- beswarm-0.3.2.dist-info/entry_points.txt,sha256=URK7Y4PDzBgxIecQnxsWTu4O-eaFa1CoAcNTWh5R7LM,45
127
- beswarm-0.3.2.dist-info/top_level.txt,sha256=pJw4O87wvt5882smuSO6DfByJz7FJ8SxxT8h9fHCmpo,8
128
- beswarm-0.3.2.dist-info/RECORD,,
124
+ beswarm-0.3.4.dist-info/METADATA,sha256=ZypZoU4XNVT0XC2xy89Pc0DK0UY9d5Sf-xV7aJdxGwM,3877
125
+ beswarm-0.3.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
126
+ beswarm-0.3.4.dist-info/entry_points.txt,sha256=URK7Y4PDzBgxIecQnxsWTu4O-eaFa1CoAcNTWh5R7LM,45
127
+ beswarm-0.3.4.dist-info/top_level.txt,sha256=pJw4O87wvt5882smuSO6DfByJz7FJ8SxxT8h9fHCmpo,8
128
+ beswarm-0.3.4.dist-info/RECORD,,