swarms 7.7.9__py3-none-any.whl → 7.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,104 @@
1
+ from typing import Union
2
+ from swarms.structs.agent import Agent
3
+ from swarms.schemas.agent_class_schema import AgentConfiguration
4
+ from functools import lru_cache
5
+ import json
6
+ from pydantic import ValidationError
7
+
8
+
9
+ def validate_and_convert_config(
10
+ agent_configuration: Union[AgentConfiguration, dict, str],
11
+ ) -> AgentConfiguration:
12
+ """
13
+ Validate and convert various input types to AgentConfiguration.
14
+
15
+ Args:
16
+ agent_configuration: Can be:
17
+ - AgentConfiguration instance (BaseModel)
18
+ - Dictionary with configuration parameters
19
+ - JSON string representation of configuration
20
+
21
+ Returns:
22
+ AgentConfiguration: Validated configuration object
23
+
24
+ Raises:
25
+ ValueError: If input cannot be converted to valid AgentConfiguration
26
+ ValidationError: If validation fails
27
+ """
28
+ if agent_configuration is None:
29
+ raise ValueError("Agent configuration is required")
30
+
31
+ # If already an AgentConfiguration instance, return as-is
32
+ if isinstance(agent_configuration, AgentConfiguration):
33
+ return agent_configuration
34
+
35
+ # If string, try to parse as JSON
36
+ if isinstance(agent_configuration, str):
37
+ try:
38
+ config_dict = json.loads(agent_configuration)
39
+ except json.JSONDecodeError as e:
40
+ raise ValueError(
41
+ f"Invalid JSON string for agent configuration: {e}"
42
+ )
43
+
44
+ if not isinstance(config_dict, dict):
45
+ raise ValueError(
46
+ "JSON string must represent a dictionary/object"
47
+ )
48
+
49
+ agent_configuration = config_dict
50
+
51
+ # If dictionary, convert to AgentConfiguration
52
+ if isinstance(agent_configuration, dict):
53
+ try:
54
+ return AgentConfiguration(**agent_configuration)
55
+ except ValidationError as e:
56
+ raise ValueError(
57
+ f"Invalid agent configuration parameters: {e}"
58
+ )
59
+
60
+ # If none of the above, raise error
61
+ raise ValueError(
62
+ f"agent_configuration must be AgentConfiguration instance, dict, or JSON string. "
63
+ f"Got {type(agent_configuration)}"
64
+ )
65
+
66
+
67
+ @lru_cache(maxsize=128)
68
+ def create_agent_tool(
69
+ agent_configuration: Union[AgentConfiguration, dict, str],
70
+ ) -> Agent:
71
+ """
72
+ Create an agent tool from an agent configuration.
73
+ Uses caching to improve performance for repeated configurations.
74
+
75
+ Args:
76
+ agent_configuration: Agent configuration as:
77
+ - AgentConfiguration instance (BaseModel)
78
+ - Dictionary with configuration parameters
79
+ - JSON string representation of configuration
80
+ function: Agent class or function to create the agent
81
+
82
+ Returns:
83
+ Callable: Configured agent instance
84
+
85
+ Raises:
86
+ ValueError: If agent_configuration is invalid or cannot be converted
87
+ ValidationError: If configuration validation fails
88
+ """
89
+ # Validate and convert configuration
90
+ config = validate_and_convert_config(agent_configuration)
91
+
92
+ agent = Agent(
93
+ agent_name=config.agent_name,
94
+ agent_description=config.agent_description,
95
+ system_prompt=config.system_prompt,
96
+ max_loops=config.max_loops,
97
+ dynamic_temperature_enabled=config.dynamic_temperature_enabled,
98
+ model_name=config.model_name,
99
+ safety_prompt_on=config.safety_prompt_on,
100
+ temperature=config.temperature,
101
+ output_type="str-all-except-first",
102
+ )
103
+
104
+ return agent.run(task=config.task)
@@ -0,0 +1,504 @@
1
+ import os
2
+ import asyncio
3
+ import contextlib
4
+ import json
5
+ import random
6
+ from functools import wraps
7
+ from typing import Any, Dict, List, Literal, Optional, Union
8
+ from concurrent.futures import ThreadPoolExecutor, as_completed
9
+
10
+ from litellm.types.utils import ChatCompletionMessageToolCall
11
+ from loguru import logger
12
+ from mcp import ClientSession
13
+ from mcp.client.sse import sse_client
14
+ from mcp.types import (
15
+ CallToolRequestParams as MCPCallToolRequestParams,
16
+ )
17
+ from mcp.types import CallToolResult as MCPCallToolResult
18
+ from mcp.types import Tool as MCPTool
19
+ from openai.types.chat import ChatCompletionToolParam
20
+ from openai.types.shared_params.function_definition import (
21
+ FunctionDefinition,
22
+ )
23
+
24
+ from swarms.schemas.mcp_schemas import (
25
+ MCPConnection,
26
+ )
27
+ from swarms.utils.index import exists
28
+
29
+
30
+ class MCPError(Exception):
31
+ """Base exception for MCP related errors."""
32
+
33
+ pass
34
+
35
+
36
+ class MCPConnectionError(MCPError):
37
+ """Raised when there are issues connecting to the MCP server."""
38
+
39
+ pass
40
+
41
+
42
+ class MCPToolError(MCPError):
43
+ """Raised when there are issues with MCP tool operations."""
44
+
45
+ pass
46
+
47
+
48
+ class MCPValidationError(MCPError):
49
+ """Raised when there are validation issues with MCP operations."""
50
+
51
+ pass
52
+
53
+
54
+ class MCPExecutionError(MCPError):
55
+ """Raised when there are issues executing MCP operations."""
56
+
57
+ pass
58
+
59
+
60
+ ########################################################
61
+ # List MCP Tool functions
62
+ ########################################################
63
+ def transform_mcp_tool_to_openai_tool(
64
+ mcp_tool: MCPTool,
65
+ ) -> ChatCompletionToolParam:
66
+ """Convert an MCP tool to an OpenAI tool."""
67
+ return ChatCompletionToolParam(
68
+ type="function",
69
+ function=FunctionDefinition(
70
+ name=mcp_tool.name,
71
+ description=mcp_tool.description or "",
72
+ parameters=mcp_tool.inputSchema,
73
+ strict=False,
74
+ ),
75
+ )
76
+
77
+
78
+ async def load_mcp_tools(
79
+ session: ClientSession, format: Literal["mcp", "openai"] = "mcp"
80
+ ) -> Union[List[MCPTool], List[ChatCompletionToolParam]]:
81
+ """
82
+ Load all available MCP tools
83
+
84
+ Args:
85
+ session: The MCP session to use
86
+ format: The format to convert the tools to
87
+ By default, the tools are returned in MCP format.
88
+
89
+ If format is set to "openai", the tools are converted to OpenAI API compatible tools.
90
+ """
91
+ tools = await session.list_tools()
92
+ if format == "openai":
93
+ return [
94
+ transform_mcp_tool_to_openai_tool(mcp_tool=tool)
95
+ for tool in tools.tools
96
+ ]
97
+ return tools.tools
98
+
99
+
100
+ ########################################################
101
+ # Call MCP Tool functions
102
+ ########################################################
103
+
104
+
105
+ async def call_mcp_tool(
106
+ session: ClientSession,
107
+ call_tool_request_params: MCPCallToolRequestParams,
108
+ ) -> MCPCallToolResult:
109
+ """Call an MCP tool."""
110
+ tool_result = await session.call_tool(
111
+ name=call_tool_request_params.name,
112
+ arguments=call_tool_request_params.arguments,
113
+ )
114
+ return tool_result
115
+
116
+
117
+ def _get_function_arguments(function: FunctionDefinition) -> dict:
118
+ """Helper to safely get and parse function arguments."""
119
+ arguments = function.get("arguments", {})
120
+ if isinstance(arguments, str):
121
+ try:
122
+ arguments = json.loads(arguments)
123
+ except json.JSONDecodeError:
124
+ arguments = {}
125
+ return arguments if isinstance(arguments, dict) else {}
126
+
127
+
128
+ def transform_openai_tool_call_request_to_mcp_tool_call_request(
129
+ openai_tool: Union[ChatCompletionMessageToolCall, Dict],
130
+ ) -> MCPCallToolRequestParams:
131
+ """Convert an OpenAI ChatCompletionMessageToolCall to an MCP CallToolRequestParams."""
132
+ function = openai_tool["function"]
133
+ return MCPCallToolRequestParams(
134
+ name=function["name"],
135
+ arguments=_get_function_arguments(function),
136
+ )
137
+
138
+
139
+ async def call_openai_tool(
140
+ session: ClientSession,
141
+ openai_tool: dict,
142
+ ) -> MCPCallToolResult:
143
+ """
144
+ Call an OpenAI tool using MCP client.
145
+
146
+ Args:
147
+ session: The MCP session to use
148
+ openai_tool: The OpenAI tool to call. You can get this from the `choices[0].message.tool_calls[0]` of the response from the OpenAI API.
149
+ Returns:
150
+ The result of the MCP tool call.
151
+ """
152
+ mcp_tool_call_request_params = (
153
+ transform_openai_tool_call_request_to_mcp_tool_call_request(
154
+ openai_tool=openai_tool,
155
+ )
156
+ )
157
+ return await call_mcp_tool(
158
+ session=session,
159
+ call_tool_request_params=mcp_tool_call_request_params,
160
+ )
161
+
162
+
163
+ def retry_with_backoff(retries=3, backoff_in_seconds=1):
164
+ """Decorator for retrying functions with exponential backoff."""
165
+
166
+ def decorator(func):
167
+ @wraps(func)
168
+ async def wrapper(*args, **kwargs):
169
+ x = 0
170
+ while True:
171
+ try:
172
+ return await func(*args, **kwargs)
173
+ except Exception as e:
174
+ if x == retries:
175
+ logger.error(
176
+ f"Failed after {retries} retries: {str(e)}"
177
+ )
178
+ raise
179
+ sleep_time = (
180
+ backoff_in_seconds * 2**x
181
+ + random.uniform(0, 1)
182
+ )
183
+ logger.warning(
184
+ f"Attempt {x + 1} failed, retrying in {sleep_time:.2f}s"
185
+ )
186
+ await asyncio.sleep(sleep_time)
187
+ x += 1
188
+
189
+ return wrapper
190
+
191
+ return decorator
192
+
193
+
194
+ @contextlib.contextmanager
195
+ def get_or_create_event_loop():
196
+ """Context manager to handle event loop creation and cleanup."""
197
+ try:
198
+ loop = asyncio.get_event_loop()
199
+ except RuntimeError:
200
+ loop = asyncio.new_event_loop()
201
+ asyncio.set_event_loop(loop)
202
+
203
+ try:
204
+ yield loop
205
+ finally:
206
+ # Only close the loop if we created it and it's not the main event loop
207
+ if loop != asyncio.get_event_loop() and not loop.is_running():
208
+ if not loop.is_closed():
209
+ loop.close()
210
+
211
+
212
+ def connect_to_mcp_server(connection: MCPConnection = None):
213
+ """Connect to an MCP server.
214
+
215
+ Args:
216
+ connection (MCPConnection): The connection configuration object
217
+
218
+ Returns:
219
+ tuple: A tuple containing (headers, timeout, transport, url)
220
+
221
+ Raises:
222
+ MCPValidationError: If the connection object is invalid
223
+ """
224
+ if not isinstance(connection, MCPConnection):
225
+ raise MCPValidationError("Invalid connection type")
226
+
227
+ # Direct attribute access is faster than property access
228
+ headers = dict(connection.headers or {})
229
+ if connection.authorization_token:
230
+ headers["Authorization"] = (
231
+ f"Bearer {connection.authorization_token}"
232
+ )
233
+
234
+ return (
235
+ headers,
236
+ connection.timeout or 5,
237
+ connection.transport or "sse",
238
+ connection.url,
239
+ )
240
+
241
+
242
+ @retry_with_backoff(retries=3)
243
+ async def aget_mcp_tools(
244
+ server_path: Optional[str] = None,
245
+ format: str = "openai",
246
+ connection: Optional[MCPConnection] = None,
247
+ *args,
248
+ **kwargs,
249
+ ) -> List[Dict[str, Any]]:
250
+ """
251
+ Fetch available MCP tools from the server with retry logic.
252
+
253
+ Args:
254
+ server_path (str): Path to the MCP server script
255
+
256
+ Returns:
257
+ List[Dict[str, Any]]: List of available MCP tools in OpenAI format
258
+
259
+ Raises:
260
+ MCPValidationError: If server_path is invalid
261
+ MCPConnectionError: If connection to server fails
262
+ """
263
+ if exists(connection):
264
+ headers, timeout, transport, url = connect_to_mcp_server(
265
+ connection
266
+ )
267
+ else:
268
+ headers, timeout, _transport, _url = (
269
+ None,
270
+ 5,
271
+ None,
272
+ server_path,
273
+ )
274
+
275
+ logger.info(f"Fetching MCP tools from server: {server_path}")
276
+
277
+ try:
278
+ async with sse_client(
279
+ url=server_path,
280
+ headers=headers,
281
+ timeout=timeout,
282
+ *args,
283
+ **kwargs,
284
+ ) as (
285
+ read,
286
+ write,
287
+ ):
288
+ async with ClientSession(read, write) as session:
289
+ await session.initialize()
290
+ tools = await load_mcp_tools(
291
+ session=session, format=format
292
+ )
293
+ logger.info(
294
+ f"Successfully fetched {len(tools)} tools"
295
+ )
296
+ return tools
297
+ except Exception as e:
298
+ logger.error(f"Error fetching MCP tools: {str(e)}")
299
+ raise MCPConnectionError(
300
+ f"Failed to connect to MCP server: {str(e)}"
301
+ )
302
+
303
+
304
+ def get_mcp_tools_sync(
305
+ server_path: Optional[str] = None,
306
+ format: str = "openai",
307
+ connection: Optional[MCPConnection] = None,
308
+ *args,
309
+ **kwargs,
310
+ ) -> List[Dict[str, Any]]:
311
+ """
312
+ Synchronous version of get_mcp_tools that handles event loop management.
313
+
314
+ Args:
315
+ server_path (str): Path to the MCP server script
316
+
317
+ Returns:
318
+ List[Dict[str, Any]]: List of available MCP tools in OpenAI format
319
+
320
+ Raises:
321
+ MCPValidationError: If server_path is invalid
322
+ MCPConnectionError: If connection to server fails
323
+ MCPExecutionError: If event loop management fails
324
+ """
325
+ with get_or_create_event_loop() as loop:
326
+ try:
327
+ return loop.run_until_complete(
328
+ aget_mcp_tools(
329
+ server_path=server_path,
330
+ format=format,
331
+ connection=connection,
332
+ *args,
333
+ **kwargs,
334
+ )
335
+ )
336
+ except Exception as e:
337
+ logger.error(f"Error in get_mcp_tools_sync: {str(e)}")
338
+ raise MCPExecutionError(
339
+ f"Failed to execute MCP tools sync: {str(e)}"
340
+ )
341
+
342
+
343
+ def _fetch_tools_for_server(
344
+ url: str,
345
+ connection: Optional[MCPConnection] = None,
346
+ format: str = "openai",
347
+ ) -> List[Dict[str, Any]]:
348
+ """Helper function to fetch tools for a single server."""
349
+ return get_mcp_tools_sync(
350
+ server_path=url,
351
+ connection=connection,
352
+ format=format,
353
+ )
354
+
355
+
356
+ def get_tools_for_multiple_mcp_servers(
357
+ urls: List[str],
358
+ connections: List[MCPConnection] = None,
359
+ format: str = "openai",
360
+ output_type: Literal["json", "dict", "str"] = "str",
361
+ max_workers: Optional[int] = None,
362
+ ) -> List[Dict[str, Any]]:
363
+ """Get tools for multiple MCP servers concurrently using ThreadPoolExecutor.
364
+
365
+ Args:
366
+ urls: List of server URLs to fetch tools from
367
+ connections: Optional list of MCPConnection objects corresponding to each URL
368
+ format: Format to return tools in (default: "openai")
369
+ output_type: Type of output format (default: "str")
370
+ max_workers: Maximum number of worker threads (default: None, uses min(32, os.cpu_count() + 4))
371
+
372
+ Returns:
373
+ List[Dict[str, Any]]: Combined list of tools from all servers
374
+ """
375
+ tools = []
376
+ (
377
+ min(32, os.cpu_count() + 4)
378
+ if max_workers is None
379
+ else max_workers
380
+ )
381
+ with ThreadPoolExecutor(max_workers=max_workers) as executor:
382
+ if exists(connections):
383
+ # Create future tasks for each URL-connection pair
384
+ future_to_url = {
385
+ executor.submit(
386
+ _fetch_tools_for_server, url, connection, format
387
+ ): url
388
+ for url, connection in zip(urls, connections)
389
+ }
390
+ else:
391
+ # Create future tasks for each URL without connections
392
+ future_to_url = {
393
+ executor.submit(
394
+ _fetch_tools_for_server, url, None, format
395
+ ): url
396
+ for url in urls
397
+ }
398
+
399
+ # Process completed futures as they come in
400
+ for future in as_completed(future_to_url):
401
+ url = future_to_url[future]
402
+ try:
403
+ server_tools = future.result()
404
+ tools.extend(server_tools)
405
+ except Exception as e:
406
+ logger.error(
407
+ f"Error fetching tools from {url}: {str(e)}"
408
+ )
409
+ raise MCPExecutionError(
410
+ f"Failed to fetch tools from {url}: {str(e)}"
411
+ )
412
+
413
+ return tools
414
+
415
+
416
+ async def _execute_tool_call_simple(
417
+ response: any = None,
418
+ server_path: str = None,
419
+ connection: Optional[MCPConnection] = None,
420
+ output_type: Literal["json", "dict", "str"] = "str",
421
+ *args,
422
+ **kwargs,
423
+ ):
424
+ """Execute a tool call using the MCP client."""
425
+ if exists(connection):
426
+ headers, timeout, transport, url = connect_to_mcp_server(
427
+ connection
428
+ )
429
+ else:
430
+ headers, timeout, _transport, url = (
431
+ None,
432
+ 5,
433
+ "sse",
434
+ server_path,
435
+ )
436
+
437
+ try:
438
+ async with sse_client(
439
+ url=url, headers=headers, timeout=timeout, *args, **kwargs
440
+ ) as (
441
+ read,
442
+ write,
443
+ ):
444
+ async with ClientSession(read, write) as session:
445
+ try:
446
+ await session.initialize()
447
+
448
+ call_result = await call_openai_tool(
449
+ session=session,
450
+ openai_tool=response,
451
+ )
452
+
453
+ if output_type == "json":
454
+ out = call_result.model_dump_json(indent=4)
455
+ elif output_type == "dict":
456
+ out = call_result.model_dump()
457
+ elif output_type == "str":
458
+ data = call_result.model_dump()
459
+ formatted_lines = []
460
+ for key, value in data.items():
461
+ if isinstance(value, list):
462
+ for item in value:
463
+ if isinstance(item, dict):
464
+ for k, v in item.items():
465
+ formatted_lines.append(
466
+ f"{k}: {v}"
467
+ )
468
+ else:
469
+ formatted_lines.append(
470
+ f"{key}: {value}"
471
+ )
472
+ out = "\n".join(formatted_lines)
473
+
474
+ return out
475
+
476
+ except Exception as e:
477
+ logger.error(f"Error in tool execution: {str(e)}")
478
+ raise MCPExecutionError(
479
+ f"Tool execution failed: {str(e)}"
480
+ )
481
+
482
+ except Exception as e:
483
+ logger.error(f"Error in SSE client connection: {str(e)}")
484
+ raise MCPConnectionError(
485
+ f"Failed to connect to MCP server: {str(e)}"
486
+ )
487
+
488
+
489
+ async def execute_tool_call_simple(
490
+ response: any = None,
491
+ server_path: str = None,
492
+ connection: Optional[MCPConnection] = None,
493
+ output_type: Literal["json", "dict", "str", "formatted"] = "str",
494
+ *args,
495
+ **kwargs,
496
+ ) -> List[Dict[str, Any]]:
497
+ return await _execute_tool_call_simple(
498
+ response=response,
499
+ server_path=server_path,
500
+ connection=connection,
501
+ output_type=output_type,
502
+ *args,
503
+ **kwargs,
504
+ )
@@ -1,3 +1,5 @@
1
+ import os
2
+ import concurrent.futures
1
3
  import functools
2
4
  import inspect
3
5
  import json
@@ -240,10 +242,10 @@ class Parameters(BaseModel):
240
242
  class Function(BaseModel):
241
243
  """A function as defined by the OpenAI API"""
242
244
 
245
+ name: Annotated[str, Field(description="Name of the function")]
243
246
  description: Annotated[
244
247
  str, Field(description="Description of the function")
245
248
  ]
246
- name: Annotated[str, Field(description="Name of the function")]
247
249
  parameters: Annotated[
248
250
  Parameters, Field(description="Parameters of the function")
249
251
  ]
@@ -386,7 +388,7 @@ def get_openai_function_schema_from_func(
386
388
  function: Callable[..., Any],
387
389
  *,
388
390
  name: Optional[str] = None,
389
- description: str = None,
391
+ description: Optional[str] = None,
390
392
  ) -> Dict[str, Any]:
391
393
  """Get a JSON schema for a function as defined by the OpenAI API
392
394
 
@@ -429,6 +431,21 @@ def get_openai_function_schema_from_func(
429
431
  typed_signature, required
430
432
  )
431
433
 
434
+ name = name if name else function.__name__
435
+ description = description if description else function.__doc__
436
+
437
+ if name is None:
438
+ raise ValueError(
439
+ "Function name is required but was not provided. Please provide a name for the function "
440
+ "either through the name parameter or ensure the function has a valid __name__ attribute."
441
+ )
442
+
443
+ if description is None:
444
+ raise ValueError(
445
+ "Function description is required but was not provided. Please provide a description "
446
+ "either through the description parameter or add a docstring to the function."
447
+ )
448
+
432
449
  if return_annotation is None:
433
450
  logger.warning(
434
451
  f"The return type of the function '{function.__name__}' is not annotated. Although annotating it is "
@@ -451,16 +468,14 @@ def get_openai_function_schema_from_func(
451
468
  + f"The annotations are missing for the following parameters: {', '.join(missing_s)}"
452
469
  )
453
470
 
454
- fname = name if name else function.__name__
455
-
456
471
  parameters = get_parameters(
457
472
  required, param_annotations, default_values=default_values
458
473
  )
459
474
 
460
475
  function = ToolFunction(
461
476
  function=Function(
477
+ name=name,
462
478
  description=description,
463
- name=fname,
464
479
  parameters=parameters,
465
480
  )
466
481
  )
@@ -468,6 +483,29 @@ def get_openai_function_schema_from_func(
468
483
  return model_dump(function)
469
484
 
470
485
 
486
+ def convert_multiple_functions_to_openai_function_schema(
487
+ functions: List[Callable[..., Any]],
488
+ ) -> List[Dict[str, Any]]:
489
+ """Convert a list of functions to a list of OpenAI function schemas"""
490
+ # return [
491
+ # get_openai_function_schema_from_func(function) for function in functions
492
+ # ]
493
+ # Use 40% of cpu cores
494
+ max_workers = int(os.cpu_count() * 0.8)
495
+ print(f"max_workers: {max_workers}")
496
+
497
+ with concurrent.futures.ThreadPoolExecutor(
498
+ max_workers=max_workers
499
+ ) as executor:
500
+ futures = [
501
+ executor.submit(
502
+ get_openai_function_schema_from_func, function
503
+ )
504
+ for function in functions
505
+ ]
506
+ return [future.result() for future in futures]
507
+
508
+
471
509
  #
472
510
  def get_load_param_if_needed_function(
473
511
  t: Any,