swarms 7.7.8__py3-none-any.whl → 7.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. swarms/__init__.py +0 -1
  2. swarms/agents/cort_agent.py +206 -0
  3. swarms/agents/react_agent.py +173 -0
  4. swarms/agents/self_agent_builder.py +40 -0
  5. swarms/communication/base_communication.py +290 -0
  6. swarms/communication/duckdb_wrap.py +369 -72
  7. swarms/communication/pulsar_struct.py +691 -0
  8. swarms/communication/redis_wrap.py +1362 -0
  9. swarms/communication/sqlite_wrap.py +547 -44
  10. swarms/prompts/agent_self_builder_prompt.py +103 -0
  11. swarms/prompts/safety_prompt.py +50 -0
  12. swarms/schemas/__init__.py +6 -1
  13. swarms/schemas/agent_class_schema.py +91 -0
  14. swarms/schemas/agent_mcp_errors.py +18 -0
  15. swarms/schemas/agent_tool_schema.py +13 -0
  16. swarms/schemas/llm_agent_schema.py +92 -0
  17. swarms/schemas/mcp_schemas.py +43 -0
  18. swarms/structs/__init__.py +4 -0
  19. swarms/structs/agent.py +315 -267
  20. swarms/structs/aop.py +3 -1
  21. swarms/structs/batch_agent_execution.py +64 -0
  22. swarms/structs/conversation.py +261 -57
  23. swarms/structs/council_judge.py +542 -0
  24. swarms/structs/deep_research_swarm.py +19 -22
  25. swarms/structs/long_agent.py +424 -0
  26. swarms/structs/ma_utils.py +11 -8
  27. swarms/structs/malt.py +30 -28
  28. swarms/structs/multi_model_gpu_manager.py +1 -1
  29. swarms/structs/output_types.py +1 -1
  30. swarms/structs/swarm_router.py +70 -15
  31. swarms/tools/__init__.py +12 -0
  32. swarms/tools/base_tool.py +2840 -264
  33. swarms/tools/create_agent_tool.py +104 -0
  34. swarms/tools/mcp_client_call.py +504 -0
  35. swarms/tools/py_func_to_openai_func_str.py +45 -7
  36. swarms/tools/pydantic_to_json.py +10 -27
  37. swarms/utils/audio_processing.py +343 -0
  38. swarms/utils/history_output_formatter.py +5 -5
  39. swarms/utils/index.py +226 -0
  40. swarms/utils/litellm_wrapper.py +65 -67
  41. swarms/utils/try_except_wrapper.py +2 -2
  42. swarms/utils/xml_utils.py +42 -0
  43. {swarms-7.7.8.dist-info → swarms-7.8.0.dist-info}/METADATA +5 -4
  44. {swarms-7.7.8.dist-info → swarms-7.8.0.dist-info}/RECORD +47 -30
  45. {swarms-7.7.8.dist-info → swarms-7.8.0.dist-info}/WHEEL +1 -1
  46. swarms/client/__init__.py +0 -15
  47. swarms/client/main.py +0 -407
  48. swarms/tools/mcp_client.py +0 -246
  49. swarms/tools/mcp_integration.py +0 -340
  50. {swarms-7.7.8.dist-info → swarms-7.8.0.dist-info}/LICENSE +0 -0
  51. {swarms-7.7.8.dist-info → swarms-7.8.0.dist-info}/entry_points.txt +0 -0
@@ -1,3 +1,5 @@
1
+ import os
2
+ import concurrent.futures
1
3
  import functools
2
4
  import inspect
3
5
  import json
@@ -165,7 +167,7 @@ def get_typed_annotation(
165
167
 
166
168
 
167
169
  def get_typed_signature(
168
- call: Callable[..., Any]
170
+ call: Callable[..., Any],
169
171
  ) -> inspect.Signature:
170
172
  """Get the signature of a function with type annotations.
171
173
 
@@ -240,10 +242,10 @@ class Parameters(BaseModel):
240
242
  class Function(BaseModel):
241
243
  """A function as defined by the OpenAI API"""
242
244
 
245
+ name: Annotated[str, Field(description="Name of the function")]
243
246
  description: Annotated[
244
247
  str, Field(description="Description of the function")
245
248
  ]
246
- name: Annotated[str, Field(description="Name of the function")]
247
249
  parameters: Annotated[
248
250
  Parameters, Field(description="Parameters of the function")
249
251
  ]
@@ -386,7 +388,7 @@ def get_openai_function_schema_from_func(
386
388
  function: Callable[..., Any],
387
389
  *,
388
390
  name: Optional[str] = None,
389
- description: str = None,
391
+ description: Optional[str] = None,
390
392
  ) -> Dict[str, Any]:
391
393
  """Get a JSON schema for a function as defined by the OpenAI API
392
394
 
@@ -429,6 +431,21 @@ def get_openai_function_schema_from_func(
429
431
  typed_signature, required
430
432
  )
431
433
 
434
+ name = name if name else function.__name__
435
+ description = description if description else function.__doc__
436
+
437
+ if name is None:
438
+ raise ValueError(
439
+ "Function name is required but was not provided. Please provide a name for the function "
440
+ "either through the name parameter or ensure the function has a valid __name__ attribute."
441
+ )
442
+
443
+ if description is None:
444
+ raise ValueError(
445
+ "Function description is required but was not provided. Please provide a description "
446
+ "either through the description parameter or add a docstring to the function."
447
+ )
448
+
432
449
  if return_annotation is None:
433
450
  logger.warning(
434
451
  f"The return type of the function '{function.__name__}' is not annotated. Although annotating it is "
@@ -451,16 +468,14 @@ def get_openai_function_schema_from_func(
451
468
  + f"The annotations are missing for the following parameters: {', '.join(missing_s)}"
452
469
  )
453
470
 
454
- fname = name if name else function.__name__
455
-
456
471
  parameters = get_parameters(
457
472
  required, param_annotations, default_values=default_values
458
473
  )
459
474
 
460
475
  function = ToolFunction(
461
476
  function=Function(
477
+ name=name,
462
478
  description=description,
463
- name=fname,
464
479
  parameters=parameters,
465
480
  )
466
481
  )
@@ -468,6 +483,29 @@ def get_openai_function_schema_from_func(
468
483
  return model_dump(function)
469
484
 
470
485
 
486
+ def convert_multiple_functions_to_openai_function_schema(
487
+ functions: List[Callable[..., Any]],
488
+ ) -> List[Dict[str, Any]]:
489
+ """Convert a list of functions to a list of OpenAI function schemas"""
490
+ # return [
491
+ # get_openai_function_schema_from_func(function) for function in functions
492
+ # ]
493
+ # Use 40% of cpu cores
494
+ max_workers = int(os.cpu_count() * 0.8)
495
+ print(f"max_workers: {max_workers}")
496
+
497
+ with concurrent.futures.ThreadPoolExecutor(
498
+ max_workers=max_workers
499
+ ) as executor:
500
+ futures = [
501
+ executor.submit(
502
+ get_openai_function_schema_from_func, function
503
+ )
504
+ for function in functions
505
+ ]
506
+ return [future.result() for future in futures]
507
+
508
+
471
509
  #
472
510
  def get_load_param_if_needed_function(
473
511
  t: Any,
@@ -497,7 +535,7 @@ def get_load_param_if_needed_function(
497
535
 
498
536
 
499
537
  def load_basemodels_if_needed(
500
- func: Callable[..., Any]
538
+ func: Callable[..., Any],
501
539
  ) -> Callable[..., Any]:
502
540
  """A decorator to load the parameters of a function if they are Pydantic models
503
541
 
@@ -39,7 +39,6 @@ def check_pydantic_name(pydantic_type: type[BaseModel]) -> str:
39
39
 
40
40
  def base_model_to_openai_function(
41
41
  pydantic_type: type[BaseModel],
42
- output_str: bool = False,
43
42
  ) -> dict[str, Any]:
44
43
  """
45
44
  Convert a Pydantic model to a dictionary representation of functions.
@@ -86,34 +85,18 @@ def base_model_to_openai_function(
86
85
  _remove_a_key(parameters, "title")
87
86
  _remove_a_key(parameters, "additionalProperties")
88
87
 
89
- if output_str:
90
- out = {
91
- "function_call": {
92
- "name": name,
93
- },
94
- "functions": [
95
- {
96
- "name": name,
97
- "description": schema["description"],
98
- "parameters": parameters,
99
- },
100
- ],
101
- }
102
- return str(out)
103
-
104
- else:
105
- return {
106
- "function_call": {
88
+ return {
89
+ "function_call": {
90
+ "name": name,
91
+ },
92
+ "functions": [
93
+ {
107
94
  "name": name,
95
+ "description": schema["description"],
96
+ "parameters": parameters,
108
97
  },
109
- "functions": [
110
- {
111
- "name": name,
112
- "description": schema["description"],
113
- "parameters": parameters,
114
- },
115
- ],
116
- }
98
+ ],
99
+ }
117
100
 
118
101
 
119
102
  def multi_base_model_to_openai_function(
@@ -0,0 +1,343 @@
1
+ import base64
2
+ from typing import Union, Dict, Any, Tuple
3
+ import requests
4
+ from pathlib import Path
5
+ import wave
6
+ import numpy as np
7
+
8
+
9
+ def encode_audio_to_base64(audio_path: Union[str, Path]) -> str:
10
+ """
11
+ Encode a WAV file to base64 string.
12
+
13
+ Args:
14
+ audio_path (Union[str, Path]): Path to the WAV file
15
+
16
+ Returns:
17
+ str: Base64 encoded string of the audio file
18
+
19
+ Raises:
20
+ FileNotFoundError: If the audio file doesn't exist
21
+ ValueError: If the file is not a valid WAV file
22
+ """
23
+ try:
24
+ audio_path = Path(audio_path)
25
+ if not audio_path.exists():
26
+ raise FileNotFoundError(
27
+ f"Audio file not found: {audio_path}"
28
+ )
29
+
30
+ if not audio_path.suffix.lower() == ".wav":
31
+ raise ValueError("File must be a WAV file")
32
+
33
+ with open(audio_path, "rb") as audio_file:
34
+ audio_data = audio_file.read()
35
+ return base64.b64encode(audio_data).decode("utf-8")
36
+ except Exception as e:
37
+ raise Exception(f"Error encoding audio file: {str(e)}")
38
+
39
+
40
+ def decode_base64_to_audio(
41
+ base64_string: str, output_path: Union[str, Path]
42
+ ) -> None:
43
+ """
44
+ Decode a base64 string to a WAV file.
45
+
46
+ Args:
47
+ base64_string (str): Base64 encoded audio data
48
+ output_path (Union[str, Path]): Path where the WAV file should be saved
49
+
50
+ Raises:
51
+ ValueError: If the base64 string is invalid
52
+ IOError: If there's an error writing the file
53
+ """
54
+ try:
55
+ output_path = Path(output_path)
56
+ output_path.parent.mkdir(parents=True, exist_ok=True)
57
+
58
+ audio_data = base64.b64decode(base64_string)
59
+ with open(output_path, "wb") as audio_file:
60
+ audio_file.write(audio_data)
61
+ except Exception as e:
62
+ raise Exception(f"Error decoding audio data: {str(e)}")
63
+
64
+
65
+ def download_audio_from_url(
66
+ url: str, output_path: Union[str, Path]
67
+ ) -> None:
68
+ """
69
+ Download an audio file from a URL and save it locally.
70
+
71
+ Args:
72
+ url (str): URL of the audio file
73
+ output_path (Union[str, Path]): Path where the audio file should be saved
74
+
75
+ Raises:
76
+ requests.RequestException: If there's an error downloading the file
77
+ IOError: If there's an error saving the file
78
+ """
79
+ try:
80
+ output_path = Path(output_path)
81
+ output_path.parent.mkdir(parents=True, exist_ok=True)
82
+
83
+ response = requests.get(url)
84
+ response.raise_for_status()
85
+
86
+ with open(output_path, "wb") as audio_file:
87
+ audio_file.write(response.content)
88
+ except Exception as e:
89
+ raise Exception(f"Error downloading audio file: {str(e)}")
90
+
91
+
92
+ def process_audio_with_model(
93
+ audio_path: Union[str, Path],
94
+ model: str,
95
+ prompt: str,
96
+ voice: str = "alloy",
97
+ format: str = "wav",
98
+ ) -> Dict[str, Any]:
99
+ """
100
+ Process an audio file with a model that supports audio input/output.
101
+
102
+ Args:
103
+ audio_path (Union[str, Path]): Path to the input WAV file
104
+ model (str): Model name to use for processing
105
+ prompt (str): Text prompt to accompany the audio
106
+ voice (str, optional): Voice to use for audio output. Defaults to "alloy"
107
+ format (str, optional): Audio format. Defaults to "wav"
108
+
109
+ Returns:
110
+ Dict[str, Any]: Model response containing both text and audio if applicable
111
+
112
+ Raises:
113
+ ImportError: If litellm is not installed
114
+ ValueError: If the model doesn't support audio processing
115
+ """
116
+ try:
117
+ from litellm import (
118
+ completion,
119
+ supports_audio_input,
120
+ supports_audio_output,
121
+ )
122
+
123
+ if not supports_audio_input(model):
124
+ raise ValueError(
125
+ f"Model {model} does not support audio input"
126
+ )
127
+
128
+ # Encode the audio file
129
+ encoded_audio = encode_audio_to_base64(audio_path)
130
+
131
+ # Prepare the messages
132
+ messages = [
133
+ {
134
+ "role": "user",
135
+ "content": [
136
+ {"type": "text", "text": prompt},
137
+ {
138
+ "type": "input_audio",
139
+ "input_audio": {
140
+ "data": encoded_audio,
141
+ "format": format,
142
+ },
143
+ },
144
+ ],
145
+ }
146
+ ]
147
+
148
+ # Make the API call
149
+ response = completion(
150
+ model=model,
151
+ modalities=["text", "audio"],
152
+ audio={"voice": voice, "format": format},
153
+ messages=messages,
154
+ )
155
+
156
+ return response
157
+ except ImportError:
158
+ raise ImportError(
159
+ "Please install litellm: pip install litellm"
160
+ )
161
+ except Exception as e:
162
+ raise Exception(
163
+ f"Error processing audio with model: {str(e)}"
164
+ )
165
+
166
+
167
+ def read_wav_file(
168
+ file_path: Union[str, Path],
169
+ ) -> Tuple[np.ndarray, int]:
170
+ """
171
+ Read a WAV file and return its audio data and sample rate.
172
+
173
+ Args:
174
+ file_path (Union[str, Path]): Path to the WAV file
175
+
176
+ Returns:
177
+ Tuple[np.ndarray, int]: Audio data as numpy array and sample rate
178
+
179
+ Raises:
180
+ FileNotFoundError: If the file doesn't exist
181
+ ValueError: If the file is not a valid WAV file
182
+ """
183
+ try:
184
+ file_path = Path(file_path)
185
+ if not file_path.exists():
186
+ raise FileNotFoundError(
187
+ f"Audio file not found: {file_path}"
188
+ )
189
+
190
+ with wave.open(str(file_path), "rb") as wav_file:
191
+ # Get audio parameters
192
+ n_channels = wav_file.getnchannels()
193
+ sample_width = wav_file.getsampwidth()
194
+ frame_rate = wav_file.getframerate()
195
+ n_frames = wav_file.getnframes()
196
+
197
+ # Read audio data
198
+ frames = wav_file.readframes(n_frames)
199
+
200
+ # Convert to numpy array
201
+ dtype = np.int16 if sample_width == 2 else np.int8
202
+ audio_data = np.frombuffer(frames, dtype=dtype)
203
+
204
+ # Reshape if stereo
205
+ if n_channels == 2:
206
+ audio_data = audio_data.reshape(-1, 2)
207
+
208
+ return audio_data, frame_rate
209
+
210
+ except Exception as e:
211
+ raise Exception(f"Error reading WAV file: {str(e)}")
212
+
213
+
214
+ def write_wav_file(
215
+ audio_data: np.ndarray,
216
+ file_path: Union[str, Path],
217
+ sample_rate: int,
218
+ sample_width: int = 2,
219
+ ) -> None:
220
+ """
221
+ Write audio data to a WAV file.
222
+
223
+ Args:
224
+ audio_data (np.ndarray): Audio data as numpy array
225
+ file_path (Union[str, Path]): Path where to save the WAV file
226
+ sample_rate (int): Sample rate of the audio
227
+ sample_width (int, optional): Sample width in bytes. Defaults to 2 (16-bit)
228
+
229
+ Raises:
230
+ ValueError: If the audio data is invalid
231
+ IOError: If there's an error writing the file
232
+ """
233
+ try:
234
+ file_path = Path(file_path)
235
+ file_path.parent.mkdir(parents=True, exist_ok=True)
236
+
237
+ # Ensure audio data is in the correct format
238
+ if audio_data.dtype != np.int16 and sample_width == 2:
239
+ audio_data = (audio_data * 32767).astype(np.int16)
240
+ elif audio_data.dtype != np.int8 and sample_width == 1:
241
+ audio_data = (audio_data * 127).astype(np.int8)
242
+
243
+ # Determine number of channels
244
+ n_channels = (
245
+ 2
246
+ if len(audio_data.shape) > 1 and audio_data.shape[1] == 2
247
+ else 1
248
+ )
249
+
250
+ with wave.open(str(file_path), "wb") as wav_file:
251
+ wav_file.setnchannels(n_channels)
252
+ wav_file.setsampwidth(sample_width)
253
+ wav_file.setframerate(sample_rate)
254
+ wav_file.writeframes(audio_data.tobytes())
255
+
256
+ except Exception as e:
257
+ raise Exception(f"Error writing WAV file: {str(e)}")
258
+
259
+
260
+ def normalize_audio(audio_data: np.ndarray) -> np.ndarray:
261
+ """
262
+ Normalize audio data to have maximum amplitude of 1.0.
263
+
264
+ Args:
265
+ audio_data (np.ndarray): Input audio data
266
+
267
+ Returns:
268
+ np.ndarray: Normalized audio data
269
+ """
270
+ return audio_data / np.max(np.abs(audio_data))
271
+
272
+
273
+ def convert_to_mono(audio_data: np.ndarray) -> np.ndarray:
274
+ """
275
+ Convert stereo audio to mono by averaging channels.
276
+
277
+ Args:
278
+ audio_data (np.ndarray): Input audio data (stereo)
279
+
280
+ Returns:
281
+ np.ndarray: Mono audio data
282
+ """
283
+ if len(audio_data.shape) == 1:
284
+ return audio_data
285
+ return np.mean(audio_data, axis=1)
286
+
287
+
288
+ def encode_wav_to_base64(
289
+ audio_data: np.ndarray, sample_rate: int
290
+ ) -> str:
291
+ """
292
+ Convert audio data to base64 encoded WAV string.
293
+
294
+ Args:
295
+ audio_data (np.ndarray): Audio data
296
+ sample_rate (int): Sample rate of the audio
297
+
298
+ Returns:
299
+ str: Base64 encoded WAV data
300
+ """
301
+ # Create a temporary WAV file in memory
302
+ with wave.open("temp.wav", "wb") as wav_file:
303
+ wav_file.setnchannels(1 if len(audio_data.shape) == 1 else 2)
304
+ wav_file.setsampwidth(2) # 16-bit
305
+ wav_file.setframerate(sample_rate)
306
+ wav_file.writeframes(audio_data.tobytes())
307
+
308
+ # Read the file and encode to base64
309
+ with open("temp.wav", "rb") as f:
310
+ wav_bytes = f.read()
311
+
312
+ # Clean up temporary file
313
+ Path("temp.wav").unlink()
314
+
315
+ return base64.b64encode(wav_bytes).decode("utf-8")
316
+
317
+
318
+ def decode_base64_to_wav(
319
+ base64_string: str,
320
+ ) -> Tuple[np.ndarray, int]:
321
+ """
322
+ Convert base64 encoded WAV string to audio data and sample rate.
323
+
324
+ Args:
325
+ base64_string (str): Base64 encoded WAV data
326
+
327
+ Returns:
328
+ Tuple[np.ndarray, int]: Audio data and sample rate
329
+ """
330
+ # Decode base64 string
331
+ wav_bytes = base64.b64decode(base64_string)
332
+
333
+ # Write to temporary file
334
+ with open("temp.wav", "wb") as f:
335
+ f.write(wav_bytes)
336
+
337
+ # Read the WAV file
338
+ audio_data, sample_rate = read_wav_file("temp.wav")
339
+
340
+ # Clean up temporary file
341
+ Path("temp.wav").unlink()
342
+
343
+ return audio_data, sample_rate
@@ -1,7 +1,7 @@
1
1
  import yaml
2
2
  from swarms.structs.conversation import Conversation
3
-
4
3
  from typing import Literal, Union, List, Dict, Any
4
+ from swarms.utils.xml_utils import to_xml_string
5
5
 
6
6
  HistoryOutputType = Literal[
7
7
  "list",
@@ -14,13 +14,12 @@ HistoryOutputType = Literal[
14
14
  "json",
15
15
  "all",
16
16
  "yaml",
17
+ "xml",
17
18
  # "dict-final",
18
19
  "dict-all-except-first",
19
20
  "str-all-except-first",
20
21
  ]
21
22
 
22
- output_type: HistoryOutputType
23
-
24
23
 
25
24
  def history_output_formatter(
26
25
  conversation: Conversation, type: HistoryOutputType = "list"
@@ -39,11 +38,12 @@ def history_output_formatter(
39
38
  return conversation.get_str()
40
39
  elif type == "yaml":
41
40
  return yaml.safe_dump(conversation.to_dict(), sort_keys=False)
42
- # elif type == "dict-final":
43
- # return conversation.to_dict()
44
41
  elif type == "dict-all-except-first":
45
42
  return conversation.return_all_except_first()
46
43
  elif type == "str-all-except-first":
47
44
  return conversation.return_all_except_first_string()
45
+ elif type == "xml":
46
+ data = conversation.to_dict()
47
+ return to_xml_string(data, root_tag="conversation")
48
48
  else:
49
49
  raise ValueError(f"Invalid type: {type}")