beswarm 0.2.0__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
beswarm/aient/setup.py CHANGED
@@ -4,7 +4,7 @@ from setuptools import setup, find_packages
4
4
 
5
5
  setup(
6
6
  name="aient",
7
- version="1.1.39",
7
+ version="1.1.40",
8
8
  description="Aient: The Awakening of Agent.",
9
9
  long_description=Path.open(Path("README.md"), encoding="utf-8").read(),
10
10
  long_description_content_type="text/markdown",
@@ -240,6 +240,10 @@ async def get_gemini_payload(request, engine, provider, api_key=None):
240
240
  val = int(m.group(1))
241
241
  if val < 0:
242
242
  val = 0
243
+ elif val > 32768 and "gemini-2.5-pro" in original_model:
244
+ val = 32768
245
+ elif val < 128 and "gemini-2.5-pro" in original_model:
246
+ val = 128
243
247
  elif val > 24576:
244
248
  val = 24576
245
249
  payload["generationConfig"]["thinkingConfig"]["thinkingBudget"] = val
@@ -532,6 +536,10 @@ async def get_vertex_gemini_payload(request, engine, provider, api_key=None):
532
536
  val = int(m.group(1))
533
537
  if val < 0:
534
538
  val = 0
539
+ elif val > 32768 and "gemini-2.5-pro" in original_model:
540
+ val = 32768
541
+ elif val < 128 and "gemini-2.5-pro" in original_model:
542
+ val = 128
535
543
  elif val > 24576:
536
544
  val = 24576
537
545
  payload["generationConfig"]["thinkingConfig"]["thinkingBudget"] = val
@@ -568,6 +568,7 @@ async def fetch_response(client, url, headers, payload, engine, model):
568
568
 
569
569
  elif engine == "gemini" or engine == "vertex-gemini" or engine == "aws":
570
570
  response_json = response.json()
571
+ # print("response_json", json.dumps(response_json, indent=4, ensure_ascii=False))
571
572
 
572
573
  if isinstance(response_json, str):
573
574
  import ast
@@ -579,7 +580,7 @@ async def fetch_response(client, url, headers, payload, engine, model):
579
580
  else:
580
581
  logger.error(f"error fetch_response: Unknown response_json type: {type(response_json)}")
581
582
  parsed_data = response_json
582
- # print("parsed_data", json.dumps(parsed_data, indent=4, ensure_ascii=False))
583
+
583
584
  content = ""
584
585
  reasoning_content = ""
585
586
  image_base64 = ""
@@ -596,15 +597,6 @@ async def fetch_response(client, url, headers, payload, engine, model):
596
597
  reasoning_content += chunk
597
598
  else:
598
599
  content += chunk
599
- # for item in parsed_data:
600
- # chunk = safe_get(item, "candidates", 0, "content", "parts", 0, "text")
601
- # is_think = safe_get(item, "candidates", 0, "content", "parts", 0, "thought", default=False)
602
- # # logger.info(f"chunk: {repr(chunk)}")
603
- # if chunk:
604
- # if is_think:
605
- # reasoning_content += chunk
606
- # else:
607
- # content += chunk
608
600
 
609
601
  usage_metadata = safe_get(parsed_data, -1, "usageMetadata")
610
602
  prompt_tokens = safe_get(usage_metadata, "promptTokenCount", default=0)
@@ -618,8 +610,13 @@ async def fetch_response(client, url, headers, payload, engine, model):
618
610
  logger.error(f"Unknown role: {role}, parsed_data: {parsed_data}")
619
611
  role = "assistant"
620
612
 
621
- function_call_name = safe_get(parsed_data, -1, "candidates", 0, "content", "parts", 0, "functionCall", "name", default=None)
622
- function_call_content = safe_get(parsed_data, -1, "candidates", 0, "content", "parts", 0, "functionCall", "args", default=None)
613
+ has_think = safe_get(parsed_data, 0, "candidates", 0, "content", "parts", 0, "thought", default=False)
614
+ if has_think:
615
+ function_message_parts_index = -1
616
+ else:
617
+ function_message_parts_index = 0
618
+ function_call_name = safe_get(parsed_data, -1, "candidates", 0, "content", "parts", function_message_parts_index, "functionCall", "name", default=None)
619
+ function_call_content = safe_get(parsed_data, -1, "candidates", 0, "content", "parts", function_message_parts_index, "functionCall", "args", default=None)
623
620
 
624
621
  timestamp = int(datetime.timestamp(datetime.now()))
625
622
  yield await generate_no_stream_response(timestamp, model, content=content, tools_id=None, function_call_name=function_call_name, function_call_content=function_call_content, role=role, total_tokens=total_tokens, prompt_tokens=prompt_tokens, completion_tokens=candidates_tokens, reasoning_content=reasoning_content, image_base64=image_base64)
@@ -0,0 +1,83 @@
1
+ import asyncio
2
+ import functools
3
+ from typing import Any, Callable, TypeVar
4
+
5
+ try:
6
+ from mcp.shared.exceptions import McpError
7
+ except ImportError:
8
+ # Define a dummy exception if mcp is not available,
9
+ # although it should be in the target environment.
10
+ class McpError(Exception):
11
+ pass
12
+
13
+ try:
14
+ from anyio import BrokenResourceError
15
+ except ImportError:
16
+ class BrokenResourceError(Exception):
17
+ pass
18
+
19
+ F = TypeVar('F', bound=Callable[..., Any])
20
+
21
+ def async_retry(max_retries: int = 2, delay: float = 1.0):
22
+ """
23
+ A decorator to automatically retry an async function if it raises an exception.
24
+
25
+ Args:
26
+ max_retries: The maximum number of retries.
27
+ delay: The delay between retries in seconds.
28
+ """
29
+ def decorator(func: F) -> F:
30
+ @functools.wraps(func)
31
+ async def wrapper(*args: Any, **kwargs: Any) -> Any:
32
+ last_exception = None
33
+ # The number of attempts is max_retries + 1 (the initial attempt)
34
+ for attempt in range(max_retries + 1):
35
+ try:
36
+ return await func(*args, **kwargs)
37
+ except Exception as e:
38
+ last_exception = e
39
+ if attempt < max_retries:
40
+ print(f"Attempt {attempt + 1} failed with error: {e}. Retrying in {delay}s...")
41
+ await asyncio.sleep(delay)
42
+ else:
43
+ print(f"All {max_retries + 1} attempts failed.")
44
+ if last_exception is not None:
45
+ raise last_exception
46
+ return wrapper # type: ignore
47
+ return decorator
48
+
49
+
50
+ def reconnect_on_connection_error(func: F) -> F:
51
+ """
52
+ A decorator for MCPClient methods that automatically tries to reconnect
53
+ and retry the call if a connection-related error is caught.
54
+ It handles McpError and anyio.BrokenResourceError.
55
+ """
56
+ @functools.wraps(func)
57
+ async def wrapper(self, *args: Any, **kwargs: Any) -> Any:
58
+ try:
59
+ return await func(self, *args, **kwargs)
60
+ except (McpError, BrokenResourceError) as e:
61
+ is_connection_error = False
62
+ if isinstance(e, McpError):
63
+ error_str = str(e).lower()
64
+ if "connection closed" in error_str or "peer closed connection" in error_str:
65
+ is_connection_error = True
66
+ elif isinstance(e, BrokenResourceError):
67
+ is_connection_error = True
68
+
69
+ if is_connection_error:
70
+ print(f"Connection error detected ({type(e).__name__}): {e}. Attempting to reconnect...")
71
+ try:
72
+ await self.disconnect()
73
+ await self.connect()
74
+ print("Reconnected successfully. Retrying the operation...")
75
+ return await func(self, *args, **kwargs)
76
+ except Exception as reconnect_e:
77
+ print(f"Failed to reconnect and retry: {reconnect_e}")
78
+ # If reconnect fails, raise the original connection error
79
+ raise e
80
+ else:
81
+ # Not a connection-related McpError, re-raise it.
82
+ raise
83
+ return wrapper # type: ignore
@@ -1,6 +1,7 @@
1
1
  import asyncio
2
2
  from typing import Any, Dict, List, Optional
3
3
  from contextlib import AsyncExitStack
4
+ from bemcp.decorator import async_retry, reconnect_on_connection_error
4
5
 
5
6
  from mcp import ClientSession, StdioServerParameters
6
7
  from mcp.client.stdio import stdio_client
@@ -36,6 +37,7 @@ class MCPClient:
36
37
  """Disconnects from the MCP server and cleans up resources."""
37
38
  return await self.__aexit__(None, None, None)
38
39
 
40
+ @async_retry(max_retries=2)
39
41
  async def __aenter__(self):
40
42
  """Connects to the MCP server and initializes resources."""
41
43
  if self.session:
@@ -68,6 +70,7 @@ class MCPClient:
68
70
  self._exit_stack = None
69
71
  print("Disconnected from server.")
70
72
 
73
+ @reconnect_on_connection_error
71
74
  async def list_tools(self) -> List[types.Tool]:
72
75
  """Lists available tools from the server."""
73
76
  if not self.session:
@@ -75,12 +78,14 @@ class MCPClient:
75
78
  response = await self.session.list_tools()
76
79
  return response.tools
77
80
 
81
+ @reconnect_on_connection_error
78
82
  async def call_tool(self, name: str, args: Dict[str, Any]) -> types.CallToolResult:
79
83
  """Calls a tool on the server."""
80
84
  if not self.session:
81
85
  raise ConnectionError("Not connected to any server.")
82
86
  return await self.session.call_tool(name, args)
83
87
 
88
+ @reconnect_on_connection_error
84
89
  async def list_resources(self) -> List[types.Resource]:
85
90
  """Lists available resources from the server."""
86
91
  if not self.session:
@@ -88,6 +93,7 @@ class MCPClient:
88
93
  response = await self.session.list_resources()
89
94
  return response.resources
90
95
 
96
+ @reconnect_on_connection_error
91
97
  async def read_resource(self, uri: str) -> types.ReadResourceResult:
92
98
  """Reads a resource from the server."""
93
99
  if not self.session:
@@ -24,7 +24,7 @@ def convert_tool_format(tool: types.Tool) -> Dict[str, Any]:
24
24
  "function": {
25
25
  "name": tool.name,
26
26
  "description": tool.description,
27
- "input_schema": tool.inputSchema
27
+ "parameters": tool.inputSchema
28
28
  }
29
29
  }
30
30
  return converted_tool
@@ -39,5 +39,5 @@ def debug_error(error: str) -> list[base.Message]:
39
39
  ]
40
40
 
41
41
  if __name__ == "__main__":
42
- mcp.run(transport='stdio')
43
- # mcp.run(transport='sse')
42
+ # mcp.run(transport='stdio')
43
+ mcp.run(transport='sse')
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: beswarm
3
- Version: 0.2.0
3
+ Version: 0.2.1
4
4
  Summary: MAS
5
5
  Requires-Python: >=3.11
6
6
  Description-Content-Type: text/markdown
@@ -2,13 +2,13 @@ beswarm/__init__.py,sha256=HZjUOJtZR5QhMuDbq-wukQQn1VrBusNWai_ysGo-VVI,20
2
2
  beswarm/prompt.py,sha256=3WoI_1lwqvK9O3WF4a3gaZGgQICsa1ygn07iOfuSzyI,31984
3
3
  beswarm/utils.py,sha256=lm0drN1ebXM9haoKaW2DLzJJRCOpLmiJ864mH4jAdB4,6697
4
4
  beswarm/aient/main.py,sha256=SiYAIgQlLJqYusnTVEJOx1WNkSJKMImhgn5aWjfroxg,3814
5
- beswarm/aient/setup.py,sha256=89YU9QH7tFFNyyf6SKdz-b3JRwyxPB3LzNT-CQW8V30,487
5
+ beswarm/aient/setup.py,sha256=rROoj5y5L8xuCRH08xVIiLgkk9BilfVZVZ3AOOIShIY,487
6
6
  beswarm/aient/src/aient/__init__.py,sha256=SRfF7oDVlOOAi6nGKiJIUK6B_arqYLO9iSMp-2IZZps,21
7
7
  beswarm/aient/src/aient/core/__init__.py,sha256=NxjebTlku35S4Dzr16rdSqSTWUvvwEeACe8KvHJnjPg,34
8
8
  beswarm/aient/src/aient/core/log_config.py,sha256=kz2_yJv1p-o3lUQOwA3qh-LSc3wMHv13iCQclw44W9c,274
9
9
  beswarm/aient/src/aient/core/models.py,sha256=d4MISNezTSe0ls0-fjuToI2SoT-sk5fWqAJuKVinIlo,7502
10
- beswarm/aient/src/aient/core/request.py,sha256=6Nwduj7kFuubFaZ0ZLkT_zd03XpT-bFhgrKVOZiGBOQ,71918
11
- beswarm/aient/src/aient/core/response.py,sha256=RYy70Ld_txixHHd61Dqtlo0tKHMU_OIXqxGWd6EfATI,35315
10
+ beswarm/aient/src/aient/core/request.py,sha256=gXHdx61emExUTZbUyLng_AYmlfyKl3uOcCmlAY5eN98,72330
11
+ beswarm/aient/src/aient/core/response.py,sha256=mAVsCnNhWY09DXNe0lyPUJq-1ljtGjC67Az-Uh7ozIw,35166
12
12
  beswarm/aient/src/aient/core/utils.py,sha256=fhI5wBxr01lVEp8nMfjG9dQ859AE-VdrWyb9suLzzqM,27400
13
13
  beswarm/aient/src/aient/core/test/test_base_api.py,sha256=pWnycRJbuPSXKKU9AQjWrMAX1wiLC_014Qc9hh5C2Pw,524
14
14
  beswarm/aient/src/aient/core/test/test_geminimask.py,sha256=HFX8jDbNg_FjjgPNxfYaR-0-roUrOO-ND-FVsuxSoiw,13254
@@ -72,10 +72,11 @@ beswarm/aient/test/test_whisper.py,sha256=GxKYzhh3lA8t62V_DDj42VQTXkdjocSFcl5u96
72
72
  beswarm/aient/test/test_wildcard.py,sha256=EQwwNrTMHV4CEcZKdwUVGrwtNUsF0CCKbM-_W9IOYsk,693
73
73
  beswarm/aient/test/test_yjh.py,sha256=MsHuBLNOqi3fyX-uboBKmTevkZW_KVv12p-pkF5ir3Y,787
74
74
  beswarm/bemcp/bemcp/__init__.py,sha256=Ss6bDXiZJgVIZS6KWytcGwXmIFu7SsagIXa5NpeWJ7c,140
75
- beswarm/bemcp/bemcp/main.py,sha256=dadyyAv0yzEX5EYnBM14SmBN7X3keJTrheDVie2rVJ4,8370
76
- beswarm/bemcp/bemcp/utils.py,sha256=YDRa4JvBjdNTJBzbrkwRXmRXy2gw8s9lb61WsnYZN10,953
75
+ beswarm/bemcp/bemcp/decorator.py,sha256=23bNgwLjuUkpod5VcRv-UqlJTf91_wfztf8ls7-Gg08,3218
76
+ beswarm/bemcp/bemcp/main.py,sha256=PBtvfKZoLnhjzsA0FZAWPs21WBEfcp-VFu3Y719z2yo,8613
77
+ beswarm/bemcp/bemcp/utils.py,sha256=GnasmxcPjG668WCu_mpI2r3w1TtSmWyF2pORZl4ZIjg,951
77
78
  beswarm/bemcp/test/client.py,sha256=j7PDg5Esyri-e2vz2ubZ4foDSAq5Iv8Yfl_xyzTDsFY,1593
78
- beswarm/bemcp/test/server.py,sha256=58F0_xQTHc6mt_MTXLSz_JE1COzDdEYueco1OVuFay0,1159
79
+ beswarm/bemcp/test/server.py,sha256=XqSrk88FC-dk7AcEn-OyS_fR0W0V9OnL2ONpkNEKJKE,1159
79
80
  beswarm/queries/tree-sitter-language-pack/README.md,sha256=ivZSEuWqYfUVLZl2AZZGRlm0bQsaG-VTBKBwACyM07k,291
80
81
  beswarm/queries/tree-sitter-language-pack/arduino-tags.scm,sha256=HbgdothT9Jjk56COXTtUkVAdZ14rZNnqzLbWVLeRs5U,177
81
82
  beswarm/queries/tree-sitter-language-pack/c-tags.scm,sha256=EIz45o5hBh8yEuck5ZR_4IpcGyWSeNrzxFmtkKZGt2k,461
@@ -134,7 +135,7 @@ beswarm/tools/search_arxiv.py,sha256=GpuIOYX8T0iRC-X-hmuR9AUJVn15WWZq864DaoC7BUc
134
135
  beswarm/tools/search_web.py,sha256=w0T0aCqOVlb6Of5hn_TtpnrGXo6bMtw2aKZdkrYjycI,12069
135
136
  beswarm/tools/think.py,sha256=WLw-7jNIsnS6n8MMSYUin_f-BGLENFmnKM2LISEp0co,1760
136
137
  beswarm/tools/worker.py,sha256=ubZ0nqmHlY9a02SL4_HwKEzYec0bVZI-0gJWIfWTJg4,21058
137
- beswarm-0.2.0.dist-info/METADATA,sha256=4H9vYj7QtbHENskDZqIwSt9erkE0s4e6DBTVv5k40IA,3583
138
- beswarm-0.2.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
139
- beswarm-0.2.0.dist-info/top_level.txt,sha256=pJw4O87wvt5882smuSO6DfByJz7FJ8SxxT8h9fHCmpo,8
140
- beswarm-0.2.0.dist-info/RECORD,,
138
+ beswarm-0.2.1.dist-info/METADATA,sha256=yy02FNbEFec-hqHzq9Aj47kn2lJU91lUD_YxE-9dCqc,3583
139
+ beswarm-0.2.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
140
+ beswarm-0.2.1.dist-info/top_level.txt,sha256=pJw4O87wvt5882smuSO6DfByJz7FJ8SxxT8h9fHCmpo,8
141
+ beswarm-0.2.1.dist-info/RECORD,,