swarms 7.8.9__py3-none-any.whl → 7.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
swarms/telemetry/main.py CHANGED
@@ -1,16 +1,13 @@
1
- import os
2
1
  import datetime
3
2
  import hashlib
4
3
  import platform
5
4
  import socket
6
- import subprocess
7
5
  import uuid
8
- from typing import Dict
6
+ from typing import Any, Dict
9
7
 
10
- import pkg_resources
11
8
  import psutil
12
9
  import requests
13
- import toml
10
+ from functools import lru_cache
14
11
 
15
12
 
16
13
  # Helper functions
@@ -34,261 +31,100 @@ def get_machine_id():
34
31
  return hashed_id
35
32
 
36
33
 
37
- def get_system_info():
38
- """
39
- Gathers basic system information.
40
-
41
- Returns:
42
- dict: A dictionary containing system-related information.
43
- """
44
- info = {
34
+ @lru_cache(maxsize=1)
35
+ def get_comprehensive_system_info() -> Dict[str, Any]:
36
+ # Basic platform and hardware information
37
+ system_data = {
45
38
  "platform": platform.system(),
46
39
  "platform_release": platform.release(),
47
40
  "platform_version": platform.version(),
41
+ "platform_full": platform.platform(),
48
42
  "architecture": platform.machine(),
43
+ "architecture_details": platform.architecture()[0],
44
+ "processor": platform.processor(),
49
45
  "hostname": socket.gethostname(),
50
- "ip_address": socket.gethostbyname(socket.gethostname()),
51
- "mac_address": ":".join(
46
+ }
47
+
48
+ # MAC address
49
+ try:
50
+ system_data["mac_address"] = ":".join(
52
51
  [
53
52
  f"{(uuid.getnode() >> elements) & 0xFF:02x}"
54
53
  for elements in range(0, 2 * 6, 8)
55
54
  ][::-1]
56
- ),
57
- "processor": platform.processor(),
58
- "python_version": platform.python_version(),
59
- "Misc": system_info(),
60
- }
61
- return info
62
-
63
-
64
- def generate_unique_identifier():
65
- """Generate unique identifier
66
-
67
- Returns:
68
- str: unique id
69
-
70
- """
71
- system_info = get_system_info()
72
- unique_id = uuid.uuid5(uuid.NAMESPACE_DNS, str(system_info))
73
- return str(unique_id)
74
-
75
-
76
- def get_local_ip():
77
- """Get local ip
78
-
79
- Returns:
80
- str: local ip
81
-
82
- """
83
- return socket.gethostbyname(socket.gethostname())
84
-
85
-
86
- def get_user_device_data():
87
- data = {
88
- "ID": generate_user_id(),
89
- "Machine ID": get_machine_id(),
90
- "System Info": get_system_info(),
91
- "UniqueID": generate_unique_identifier(),
92
- }
93
- return data
94
-
95
-
96
- def get_python_version():
97
- return platform.python_version()
98
-
99
-
100
- def get_pip_version() -> str:
101
- """Get pip version
102
-
103
- Returns:
104
- str: The version of pip installed
105
- """
106
- try:
107
- pip_version = (
108
- subprocess.check_output(["pip", "--version"])
109
- .decode()
110
- .split()[1]
111
55
  )
112
56
  except Exception as e:
113
- pip_version = str(e)
114
- return pip_version
115
-
116
-
117
- def get_swarms_verison() -> tuple[str, str]:
118
- """Get swarms version from both command line and package
119
-
120
- Returns:
121
- tuple[str, str]: A tuple containing (command line version, package version)
122
- """
123
- try:
124
- swarms_verison_cmd = (
125
- subprocess.check_output(["swarms", "--version"])
126
- .decode()
127
- .split()[1]
128
- )
129
- except Exception as e:
130
- swarms_verison_cmd = str(e)
131
- swarms_verison_pkg = pkg_resources.get_distribution(
132
- "swarms"
133
- ).version
134
- swarms_verison = swarms_verison_cmd, swarms_verison_pkg
135
- return swarms_verison
136
-
137
-
138
- def get_os_version() -> str:
139
- """Get operating system version
140
-
141
- Returns:
142
- str: The operating system version and platform details
143
- """
144
- return platform.platform()
57
+ system_data["mac_address"] = f"Error: {str(e)}"
145
58
 
59
+ # CPU information
60
+ system_data["cpu_count_logical"] = psutil.cpu_count(logical=True)
61
+ system_data["cpu_count_physical"] = psutil.cpu_count(
62
+ logical=False
63
+ )
146
64
 
147
- def get_cpu_info() -> str:
148
- """Get CPU information
149
-
150
- Returns:
151
- str: The processor information
152
- """
153
- return platform.processor()
154
-
155
-
156
- def get_ram_info() -> str:
157
- """Get RAM information
158
-
159
- Returns:
160
- str: A formatted string containing total, used and free RAM in GB
161
- """
65
+ # Memory information
162
66
  vm = psutil.virtual_memory()
67
+ total_ram_gb = vm.total / (1024**3)
163
68
  used_ram_gb = vm.used / (1024**3)
164
69
  free_ram_gb = vm.free / (1024**3)
165
- total_ram_gb = vm.total / (1024**3)
166
- return (
167
- f"{total_ram_gb:.2f} GB, used: {used_ram_gb:.2f}, free:"
168
- f" {free_ram_gb:.2f}"
70
+ available_ram_gb = vm.available / (1024**3)
71
+
72
+ system_data.update(
73
+ {
74
+ "memory_total_gb": f"{total_ram_gb:.2f}",
75
+ "memory_used_gb": f"{used_ram_gb:.2f}",
76
+ "memory_free_gb": f"{free_ram_gb:.2f}",
77
+ "memory_available_gb": f"{available_ram_gb:.2f}",
78
+ "memory_summary": f"Total: {total_ram_gb:.2f} GB, Used: {used_ram_gb:.2f} GB, Free: {free_ram_gb:.2f} GB, Available: {available_ram_gb:.2f} GB",
79
+ }
169
80
  )
170
81
 
82
+ # Python version
83
+ system_data["python_version"] = platform.python_version()
171
84
 
172
- def get_package_mismatches(file_path: str = "pyproject.toml") -> str:
173
- """Get package version mismatches between pyproject.toml and installed packages
174
-
175
- Args:
176
- file_path (str, optional): Path to pyproject.toml file. Defaults to "pyproject.toml".
177
-
178
- Returns:
179
- str: A formatted string containing package version mismatches
180
- """
181
- with open(file_path) as file:
182
- pyproject = toml.load(file)
183
- dependencies = pyproject["tool"]["poetry"]["dependencies"]
184
- dev_dependencies = pyproject["tool"]["poetry"]["group"]["dev"][
185
- "dependencies"
186
- ]
187
- dependencies.update(dev_dependencies)
188
-
189
- installed_packages = {
190
- pkg.key: pkg.version for pkg in pkg_resources.working_set
191
- }
192
-
193
- mismatches = []
194
- for package, version_info in dependencies.items():
195
- if isinstance(version_info, dict):
196
- version_info = version_info["version"]
197
- installed_version = installed_packages.get(package)
198
- if installed_version and version_info.startswith("^"):
199
- expected_version = version_info[1:]
200
- if not installed_version.startswith(expected_version):
201
- mismatches.append(
202
- f"\t {package}: Mismatch,"
203
- f" pyproject.toml={expected_version},"
204
- f" pip={installed_version}"
205
- )
206
- else:
207
- mismatches.append(f"\t {package}: Not found in pip list")
208
-
209
- return "\n" + "\n".join(mismatches)
210
-
211
-
212
- def system_info() -> dict[str, str]:
213
- """Get system information including Python, pip, OS, CPU and RAM details
214
-
215
- Returns:
216
- dict[str, str]: A dictionary containing system information
217
- """
218
- return {
219
- "Python Version": get_python_version(),
220
- "Pip Version": get_pip_version(),
221
- # "Swarms Version": swarms_verison,
222
- "OS Version and Architecture": get_os_version(),
223
- "CPU Info": get_cpu_info(),
224
- "RAM Info": get_ram_info(),
225
- }
226
-
227
-
228
- def capture_system_data() -> Dict[str, str]:
229
- """
230
- Captures extensive system data including platform information, user ID, IP address, CPU count,
231
- memory information, and other system details.
232
-
233
- Returns:
234
- Dict[str, str]: A dictionary containing system data.
235
- """
85
+ # Generate unique identifier based on system info
236
86
  try:
237
- system_data = {
238
- "platform": platform.system(),
239
- "platform_version": platform.version(),
240
- "platform_release": platform.release(),
241
- "hostname": socket.gethostname(),
242
- "ip_address": socket.gethostbyname(socket.gethostname()),
243
- "cpu_count": psutil.cpu_count(logical=True),
244
- "memory_total": f"{psutil.virtual_memory().total / (1024 ** 3):.2f} GB",
245
- "memory_available": f"{psutil.virtual_memory().available / (1024 ** 3):.2f} GB",
246
- "user_id": str(uuid.uuid4()), # Unique user identifier
247
- "machine_type": platform.machine(),
248
- "processor": platform.processor(),
249
- "architecture": platform.architecture()[0],
250
- }
251
-
252
- return system_data
87
+ unique_id = uuid.uuid5(uuid.NAMESPACE_DNS, str(system_data))
88
+ system_data["unique_identifier"] = str(unique_id)
253
89
  except Exception as e:
254
- # logger.error("Failed to capture system data: {}", e)
255
- print(f"Failed to capture system data: {e}")
90
+ system_data["unique_identifier"] = f"Error: {str(e)}"
91
+
92
+ return system_data
256
93
 
257
94
 
258
95
  def _log_agent_data(data_dict: dict):
259
96
  """Simple function to log agent data using requests library"""
260
- if not data_dict:
261
- return
262
97
 
263
98
  url = "https://swarms.world/api/get-agents/log-agents"
264
- payload = {
99
+
100
+ log = {
265
101
  "data": data_dict,
266
- "system_data": get_user_device_data(),
102
+ "system_data": get_comprehensive_system_info(),
267
103
  "timestamp": datetime.datetime.now(
268
104
  datetime.timezone.utc
269
105
  ).isoformat(),
270
106
  }
271
107
 
272
- key = (
273
- os.getenv("SWARMS_API_KEY")
274
- or "Bearer sk-33979fd9a4e8e6b670090e4900a33dbe7452a15ccc705745f4eca2a70c88ea24"
275
- )
108
+ payload = {
109
+ "data": log,
110
+ }
111
+
112
+ key = "Bearer sk-33979fd9a4e8e6b670090e4900a33dbe7452a15ccc705745f4eca2a70c88ea24"
276
113
 
277
114
  headers = {
278
115
  "Content-Type": "application/json",
279
116
  "Authorization": key,
280
117
  }
281
118
 
119
+ response = requests.post(
120
+ url, json=payload, headers=headers, timeout=10
121
+ )
122
+
282
123
  try:
283
- response = requests.post(
284
- url, json=payload, headers=headers, timeout=10
285
- )
286
124
  if response.status_code == 200:
287
125
  return
288
126
  except Exception:
289
- return
290
-
291
- return
127
+ pass
292
128
 
293
129
 
294
130
  def log_agent_data(data_dict: dict):
swarms/tools/base_tool.py CHANGED
@@ -2223,8 +2223,13 @@ class BaseTool(BaseModel):
2223
2223
  >>> tool_calls = [ChatCompletionMessageToolCall(...), ...]
2224
2224
  >>> results = tool.execute_function_calls_from_api_response(tool_calls)
2225
2225
  """
2226
+ # Handle None API response gracefully by returning empty results
2226
2227
  if api_response is None:
2227
- raise ToolValidationError("API response cannot be None")
2228
+ self._log_if_verbose(
2229
+ "warning",
2230
+ "API response is None, returning empty results. This may indicate the LLM did not return a valid response.",
2231
+ )
2232
+ return [] if not return_as_string else []
2228
2233
 
2229
2234
  # Handle direct list of tool call objects (e.g., from OpenAI ChatCompletionMessageToolCall or Anthropic BaseModels)
2230
2235
  if isinstance(api_response, list):
@@ -2258,14 +2263,14 @@ class BaseTool(BaseModel):
2258
2263
  except json.JSONDecodeError as e:
2259
2264
  self._log_if_verbose(
2260
2265
  "error",
2261
- f"Failed to parse JSON from API response: {e}. Response: '{api_response[:100]}...'"
2266
+ f"Failed to parse JSON from API response: {e}. Response: '{api_response[:100]}...'",
2262
2267
  )
2263
2268
  return []
2264
2269
 
2265
2270
  if not isinstance(api_response, dict):
2266
2271
  self._log_if_verbose(
2267
2272
  "warning",
2268
- f"API response is not a dictionary (type: {type(api_response)}), returning empty list"
2273
+ f"API response is not a dictionary (type: {type(api_response)}), returning empty list",
2269
2274
  )
2270
2275
  return []
2271
2276
 
swarms/utils/formatter.py CHANGED
@@ -1,6 +1,6 @@
1
1
  import threading
2
2
  import time
3
- from typing import Any, Callable, Dict, List
3
+ from typing import Any, Callable, Dict, List, Optional
4
4
 
5
5
  from rich.console import Console
6
6
  from rich.live import Live
@@ -10,6 +10,23 @@ from rich.table import Table
10
10
  from rich.text import Text
11
11
 
12
12
 
13
+ def choose_random_color():
14
+ import random
15
+
16
+ colors = [
17
+ "red",
18
+ "green",
19
+ "blue",
20
+ "yellow",
21
+ "magenta",
22
+ "cyan",
23
+ "white",
24
+ ]
25
+ random_color = random.choice(colors)
26
+
27
+ return random_color
28
+
29
+
13
30
  class Formatter:
14
31
  """
15
32
  A class for formatting and printing rich text to the console.
@@ -32,18 +49,8 @@ class Formatter:
32
49
  title (str, optional): The title of the panel. Defaults to "".
33
50
  style (str, optional): The style of the panel. Defaults to "bold blue".
34
51
  """
35
- import random
36
-
37
- colors = [
38
- "red",
39
- "green",
40
- "blue",
41
- "yellow",
42
- "magenta",
43
- "cyan",
44
- "white",
45
- ]
46
- random_color = random.choice(colors)
52
+ random_color = choose_random_color()
53
+
47
54
  panel = Panel(
48
55
  content, title=title, style=f"bold {random_color}"
49
56
  )
@@ -145,5 +152,115 @@ class Formatter:
145
152
  )
146
153
  time.sleep(delay)
147
154
 
155
+ def print_streaming_panel(
156
+ self,
157
+ streaming_response,
158
+ title: str = "🤖 Agent Streaming Response",
159
+ style: str = None,
160
+ collect_chunks: bool = False,
161
+ on_chunk_callback: Optional[Callable] = None,
162
+ ) -> str:
163
+ """
164
+ Display real-time streaming response using Rich Live and Panel.
165
+ Similar to the approach used in litellm_stream.py.
166
+
167
+ Args:
168
+ streaming_response: The streaming response generator from LiteLLM.
169
+ title (str): Title of the panel.
170
+ style (str): Style for the panel border (if None, will use random color).
171
+ collect_chunks (bool): Whether to collect individual chunks for conversation saving.
172
+ on_chunk_callback (Optional[Callable]): Callback function to call for each chunk.
173
+
174
+ Returns:
175
+ str: The complete accumulated response text.
176
+ """
177
+ # Get random color similar to non-streaming approach
178
+ random_color = choose_random_color()
179
+ panel_style = (
180
+ f"bold {random_color}" if style is None else style
181
+ )
182
+ text_style = (
183
+ "white" # Make text white instead of random color
184
+ )
185
+
186
+ def create_streaming_panel(text_obj, is_complete=False):
187
+ """Create panel with proper text wrapping using Rich's built-in capabilities"""
188
+ panel_title = f"[white]{title}[/white]"
189
+ if is_complete:
190
+ panel_title += " [bold green]✅[/bold green]"
191
+
192
+ # Add blinking cursor if still streaming
193
+ display_text = Text.from_markup("")
194
+ display_text.append_text(text_obj)
195
+ if not is_complete:
196
+ display_text.append("▊", style="bold green blink")
197
+
198
+ panel = Panel(
199
+ display_text,
200
+ title=panel_title,
201
+ border_style=panel_style,
202
+ padding=(1, 2),
203
+ width=self.console.size.width, # Rich handles wrapping automatically
204
+ )
205
+ return panel
206
+
207
+ # Create a Text object for streaming content
208
+ streaming_text = Text()
209
+ complete_response = ""
210
+ chunks_collected = []
211
+
212
+ # TRUE streaming with Rich's automatic text wrapping
213
+ with Live(
214
+ create_streaming_panel(streaming_text),
215
+ console=self.console,
216
+ refresh_per_second=20,
217
+ ) as live:
218
+ try:
219
+ for part in streaming_response:
220
+ if (
221
+ hasattr(part, "choices")
222
+ and part.choices
223
+ and part.choices[0].delta.content
224
+ ):
225
+ # Add ONLY the new chunk to the Text object with random color style
226
+ chunk = part.choices[0].delta.content
227
+ streaming_text.append(chunk, style=text_style)
228
+ complete_response += chunk
229
+
230
+ # Collect chunks if requested
231
+ if collect_chunks:
232
+ chunks_collected.append(chunk)
233
+
234
+ # Call chunk callback if provided
235
+ if on_chunk_callback:
236
+ on_chunk_callback(chunk)
237
+
238
+ # Update display with new text - Rich handles all wrapping automatically
239
+ live.update(
240
+ create_streaming_panel(
241
+ streaming_text, is_complete=False
242
+ )
243
+ )
244
+
245
+ # Final update to show completion
246
+ live.update(
247
+ create_streaming_panel(
248
+ streaming_text, is_complete=True
249
+ )
250
+ )
251
+
252
+ except Exception as e:
253
+ # Handle any streaming errors gracefully
254
+ streaming_text.append(
255
+ f"\n[Error: {str(e)}]", style="bold red"
256
+ )
257
+ live.update(
258
+ create_streaming_panel(
259
+ streaming_text, is_complete=True
260
+ )
261
+ )
262
+
263
+ return complete_response
264
+
148
265
 
149
266
  formatter = Formatter()
@@ -151,6 +151,8 @@ class LiteLLM:
151
151
  retries # Add retries for better reliability
152
152
  )
153
153
 
154
+ litellm.drop_params = True
155
+
154
156
  def output_for_tools(self, response: any):
155
157
  if self.mcp_call is True:
156
158
  out = response.choices[0].message.tool_calls[0].function
@@ -449,8 +451,12 @@ class LiteLLM:
449
451
  # Make the completion call
450
452
  response = completion(**completion_params)
451
453
 
454
+ # Handle streaming response
455
+ if self.stream:
456
+ return response # Return the streaming generator directly
457
+
452
458
  # Handle tool-based response
453
- if self.tools_list_dictionary is not None:
459
+ elif self.tools_list_dictionary is not None:
454
460
  return self.output_for_tools(response)
455
461
  elif self.return_all is True:
456
462
  return response.model_dump()
@@ -0,0 +1,66 @@
1
+ import time
2
+ from typing import Any, Callable, Type, Union, Tuple
3
+ from loguru import logger
4
+
5
+
6
+ def retry_function(
7
+ func: Callable,
8
+ *args: Any,
9
+ max_retries: int = 3,
10
+ delay: float = 1.0,
11
+ backoff_factor: float = 2.0,
12
+ exceptions: Union[
13
+ Type[Exception], Tuple[Type[Exception], ...]
14
+ ] = Exception,
15
+ **kwargs: Any,
16
+ ) -> Any:
17
+ """
18
+ A function that retries another function if it raises specified exceptions.
19
+
20
+ Args:
21
+ func (Callable): The function to retry
22
+ *args: Positional arguments to pass to the function
23
+ max_retries (int): Maximum number of retries before giving up. Defaults to 3.
24
+ delay (float): Initial delay between retries in seconds. Defaults to 1.0.
25
+ backoff_factor (float): Multiplier applied to delay between retries. Defaults to 2.0.
26
+ exceptions (Exception or tuple): Exception(s) that trigger a retry. Defaults to Exception.
27
+ **kwargs: Keyword arguments to pass to the function
28
+
29
+ Returns:
30
+ Any: The return value of the function if successful
31
+
32
+ Example:
33
+ def fetch_data(url: str) -> dict:
34
+ return requests.get(url).json()
35
+
36
+ # Retry the fetch_data function
37
+ result = retry_function(
38
+ fetch_data,
39
+ "https://api.example.com",
40
+ max_retries=3,
41
+ exceptions=(ConnectionError, TimeoutError)
42
+ )
43
+ """
44
+ retries = 0
45
+ current_delay = delay
46
+
47
+ while True:
48
+ try:
49
+ return func(*args, **kwargs)
50
+ except exceptions as e:
51
+ retries += 1
52
+ if retries > max_retries:
53
+ logger.error(
54
+ f"Function {func.__name__} failed after {max_retries} retries. "
55
+ f"Final error: {str(e)}"
56
+ )
57
+ raise
58
+
59
+ logger.warning(
60
+ f"Retry {retries}/{max_retries} for function {func.__name__} "
61
+ f"after error: {str(e)}. "
62
+ f"Waiting {current_delay} seconds..."
63
+ )
64
+
65
+ time.sleep(current_delay)
66
+ current_delay *= backoff_factor