swarms 7.6.5__py3-none-any.whl → 7.6.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
swarms/structs/agent.py CHANGED
@@ -46,6 +46,11 @@ from swarms.structs.safe_loading import (
46
46
  )
47
47
  from swarms.telemetry.main import log_agent_data
48
48
  from swarms.tools.base_tool import BaseTool
49
+ from swarms.tools.mcp_integration import (
50
+ MCPServerSseParams,
51
+ batch_mcp_flow,
52
+ mcp_flow_get_tool_schema,
53
+ )
49
54
  from swarms.tools.tool_parse_exec import parse_and_execute_json
50
55
  from swarms.utils.any_to_str import any_to_str
51
56
  from swarms.utils.data_to_text import data_to_text
@@ -55,15 +60,10 @@ from swarms.utils.history_output_formatter import (
55
60
  history_output_formatter,
56
61
  )
57
62
  from swarms.utils.litellm_tokenizer import count_tokens
63
+ from swarms.utils.litellm_wrapper import LiteLLM
58
64
  from swarms.utils.pdf_to_text import pdf_to_text
59
65
  from swarms.utils.str_to_dict import str_to_dict
60
66
 
61
- from swarms.tools.mcp_integration import (
62
- batch_mcp_flow,
63
- mcp_flow_get_tool_schema,
64
- MCPServerSseParams,
65
- )
66
-
67
67
 
68
68
  # Utils
69
69
  # Custom stopping condition
@@ -104,6 +104,51 @@ agent_output_type = Literal[
104
104
  ToolUsageType = Union[BaseModel, Dict[str, Any]]
105
105
 
106
106
 
107
+ # Agent Exceptions
108
+
109
+
110
+ class AgentError(Exception):
111
+ """Base class for all agent-related exceptions."""
112
+
113
+ pass
114
+
115
+
116
+ class AgentInitializationError(AgentError):
117
+ """Exception raised when the agent fails to initialize properly. Please check the configuration and parameters."""
118
+
119
+ pass
120
+
121
+
122
+ class AgentRunError(AgentError):
123
+ """Exception raised when the agent encounters an error during execution. Ensure that the task and environment are set up correctly."""
124
+
125
+ pass
126
+
127
+
128
+ class AgentLLMError(AgentError):
129
+ """Exception raised when there is an issue with the language model (LLM). Verify the model's availability and compatibility."""
130
+
131
+ pass
132
+
133
+
134
+ class AgentToolError(AgentError):
135
+ """Exception raised when the agent fails to utilize a tool. Check the tool's configuration and availability."""
136
+
137
+ pass
138
+
139
+
140
+ class AgentMemoryError(AgentError):
141
+ """Exception raised when the agent encounters a memory-related issue. Ensure that memory resources are properly allocated and accessible."""
142
+
143
+ pass
144
+
145
+
146
+ class AgentLLMInitializationError(AgentError):
147
+ """Exception raised when the LLM fails to initialize properly. Please check the configuration and parameters."""
148
+
149
+ pass
150
+
151
+
107
152
  # [FEAT][AGENT]
108
153
  class Agent:
109
154
  """
@@ -479,6 +524,12 @@ class Agent:
479
524
  self.no_print = no_print
480
525
  self.tools_list_dictionary = tools_list_dictionary
481
526
  self.mcp_servers = mcp_servers
527
+ self._cached_llm = (
528
+ None # Add this line to cache the LLM instance
529
+ )
530
+ self._default_model = (
531
+ "gpt-4o-mini" # Move default model name here
532
+ )
482
533
 
483
534
  if (
484
535
  self.agent_name is not None
@@ -599,50 +650,49 @@ class Agent:
599
650
  self.tools_list_dictionary = self.mcp_tool_handling()
600
651
 
601
652
  def llm_handling(self):
602
- from swarms.utils.litellm_wrapper import LiteLLM
653
+ # Use cached instance if available
654
+ if self._cached_llm is not None:
655
+ return self._cached_llm
603
656
 
604
657
  if self.model_name is None:
605
- # raise ValueError("Model name cannot be None")
606
658
  logger.warning(
607
- "Model name is not provided, using gpt-4o-mini. You can configure any model from litellm if desired."
659
+ f"Model name is not provided, using {self._default_model}. You can configure any model from litellm if desired."
608
660
  )
609
- self.model_name = "gpt-4o-mini"
661
+ self.model_name = self._default_model
610
662
 
611
663
  try:
664
+ # Simplify initialization logic
665
+ common_args = {
666
+ "model_name": self.model_name,
667
+ "temperature": self.temperature,
668
+ "max_tokens": self.max_tokens,
669
+ "system_prompt": self.system_prompt,
670
+ }
671
+
612
672
  if self.llm_args is not None:
613
- llm = LiteLLM(
614
- model_name=self.model_name, **self.llm_args
673
+ self._cached_llm = LiteLLM(
674
+ **{**common_args, **self.llm_args}
615
675
  )
616
676
  elif self.tools_list_dictionary is not None:
617
-
618
- length_of_tools_list_dictionary = len(
619
- self.tools_list_dictionary
620
- )
621
-
622
- if length_of_tools_list_dictionary > 0:
623
-
624
- parallel_tool_calls = True
625
-
626
- llm = LiteLLM(
627
- model_name=self.model_name,
628
- temperature=self.temperature,
629
- max_tokens=self.max_tokens,
630
- system_prompt=self.system_prompt,
677
+ self._cached_llm = LiteLLM(
678
+ **common_args,
631
679
  tools_list_dictionary=self.tools_list_dictionary,
632
680
  tool_choice="auto",
633
- parallel_tool_calls=parallel_tool_calls,
681
+ parallel_tool_calls=len(
682
+ self.tools_list_dictionary
683
+ )
684
+ > 1,
634
685
  )
635
686
  else:
636
- llm = LiteLLM(
637
- model_name=self.model_name,
638
- temperature=self.temperature,
639
- max_tokens=self.max_tokens,
640
- system_prompt=self.system_prompt,
641
- stream=self.streaming_on,
687
+ self._cached_llm = LiteLLM(
688
+ **common_args, stream=self.streaming_on
642
689
  )
643
- return llm
644
- except Exception as e:
645
- logger.error(f"Error in llm_handling: {e}")
690
+
691
+ return self._cached_llm
692
+ except AgentLLMInitializationError as e:
693
+ logger.error(
694
+ f"Error in llm_handling: {e} Your current configuration is not supported. Please check the configuration and parameters."
695
+ )
646
696
  return None
647
697
 
648
698
  def mcp_execution_flow(self, response: any):
@@ -2336,6 +2386,8 @@ class Agent:
2336
2386
 
2337
2387
  Args:
2338
2388
  task (str): The task to be performed by the `llm` object.
2389
+ img (str, optional): Path or URL to an image file.
2390
+ audio (str, optional): Path or URL to an audio file.
2339
2391
  *args: Variable length argument list.
2340
2392
  **kwargs: Arbitrary keyword arguments.
2341
2393
 
@@ -2347,22 +2399,22 @@ class Agent:
2347
2399
  TypeError: If task is not a string or llm object is None.
2348
2400
  ValueError: If task is empty.
2349
2401
  """
2350
- if not isinstance(task, str):
2351
- raise TypeError("Task must be a string")
2402
+ # if not isinstance(task, str):
2403
+ # task = any_to_str(task)
2352
2404
 
2353
- if task is None:
2354
- raise ValueError("Task cannot be None")
2405
+ # if img is not None:
2406
+ # kwargs['img'] = img
2355
2407
 
2356
- # if self.llm is None:
2357
- # raise TypeError("LLM object cannot be None")
2408
+ # if audio is not None:
2409
+ # kwargs['audio'] = audio
2358
2410
 
2359
2411
  try:
2360
- out = self.llm.run(task, *args, **kwargs)
2412
+ out = self.llm.run(task=task, *args, **kwargs)
2361
2413
 
2362
2414
  return out
2363
- except AttributeError as e:
2415
+ except AgentLLMError as e:
2364
2416
  logger.error(
2365
- f"Error calling LLM: {e} You need a class with a run(task: str) method"
2417
+ f"Error calling LLM: {e}. Task: {task}, Args: {args}, Kwargs: {kwargs}"
2366
2418
  )
2367
2419
  raise e
2368
2420
 
@@ -5,7 +5,7 @@ import asyncio
5
5
  from typing import List
6
6
 
7
7
  from loguru import logger
8
-
8
+ import litellm
9
9
 
10
10
  try:
11
11
  from litellm import completion, acompletion
@@ -77,6 +77,8 @@ class LiteLLM:
77
77
  tool_choice: str = "auto",
78
78
  parallel_tool_calls: bool = False,
79
79
  audio: str = None,
80
+ retries: int = 3,
81
+ verbose: bool = False,
80
82
  *args,
81
83
  **kwargs,
82
84
  ):
@@ -100,7 +102,18 @@ class LiteLLM:
100
102
  self.tools_list_dictionary = tools_list_dictionary
101
103
  self.tool_choice = tool_choice
102
104
  self.parallel_tool_calls = parallel_tool_calls
103
- self.modalities = ["text"]
105
+ self.modalities = []
106
+ self._cached_messages = {} # Cache for prepared messages
107
+ self.messages = [] # Initialize messages list
108
+
109
+ # Configure litellm settings
110
+ litellm.set_verbose = (
111
+ verbose # Disable verbose mode for better performance
112
+ )
113
+ litellm.ssl_verify = ssl_verify
114
+ litellm.num_retries = (
115
+ retries # Add retries for better reliability
116
+ )
104
117
 
105
118
  def _prepare_messages(self, task: str) -> list:
106
119
  """
@@ -112,15 +125,20 @@ class LiteLLM:
112
125
  Returns:
113
126
  list: A list of messages prepared for the task.
114
127
  """
115
- messages = []
128
+ # Check cache first
129
+ cache_key = f"{self.system_prompt}:{task}"
130
+ if cache_key in self._cached_messages:
131
+ return self._cached_messages[cache_key].copy()
116
132
 
117
- if self.system_prompt: # Check if system_prompt is not None
133
+ messages = []
134
+ if self.system_prompt:
118
135
  messages.append(
119
136
  {"role": "system", "content": self.system_prompt}
120
137
  )
121
-
122
138
  messages.append({"role": "user", "content": task})
123
139
 
140
+ # Cache the prepared messages
141
+ self._cached_messages[cache_key] = messages.copy()
124
142
  return messages
125
143
 
126
144
  def audio_processing(self, task: str, audio: str):
@@ -182,15 +200,16 @@ class LiteLLM:
182
200
  """
183
201
  Handle the modalities for the given task.
184
202
  """
203
+ self.messages = [] # Reset messages
204
+ self.modalities.append("text")
205
+
185
206
  if audio is not None:
186
207
  self.audio_processing(task=task, audio=audio)
208
+ self.modalities.append("audio")
187
209
 
188
210
  if img is not None:
189
211
  self.vision_processing(task=task, image=img)
190
-
191
- if audio is not None and img is not None:
192
- self.audio_processing(task=task, audio=audio)
193
- self.vision_processing(task=task, image=img)
212
+ self.modalities.append("vision")
194
213
 
195
214
  def run(
196
215
  self,
@@ -205,58 +224,78 @@ class LiteLLM:
205
224
 
206
225
  Args:
207
226
  task (str): The task to run the model for.
208
- *args: Additional positional arguments to pass to the model.
209
- **kwargs: Additional keyword arguments to pass to the model.
227
+ audio (str, optional): Audio input if any. Defaults to None.
228
+ img (str, optional): Image input if any. Defaults to None.
229
+ *args: Additional positional arguments.
230
+ **kwargs: Additional keyword arguments.
210
231
 
211
232
  Returns:
212
233
  str: The content of the response from the model.
234
+
235
+ Raises:
236
+ Exception: If there is an error in processing the request.
213
237
  """
214
238
  try:
215
-
216
239
  messages = self._prepare_messages(task)
217
240
 
218
- self.handle_modalities(task=task, audio=audio, img=img)
241
+ if audio is not None or img is not None:
242
+ self.handle_modalities(
243
+ task=task, audio=audio, img=img
244
+ )
245
+ messages = (
246
+ self.messages
247
+ ) # Use modality-processed messages
248
+
249
+ # Prepare common completion parameters
250
+ completion_params = {
251
+ "model": self.model_name,
252
+ "messages": messages,
253
+ "stream": self.stream,
254
+ "temperature": self.temperature,
255
+ "max_tokens": self.max_tokens,
256
+ **kwargs,
257
+ }
219
258
 
259
+ # Handle tool-based completion
220
260
  if self.tools_list_dictionary is not None:
221
- response = completion(
222
- model=self.model_name,
223
- messages=messages,
224
- stream=self.stream,
225
- temperature=self.temperature,
226
- max_tokens=self.max_tokens,
227
- tools=self.tools_list_dictionary,
228
- modalities=self.modalities,
229
- tool_choice=self.tool_choice,
230
- parallel_tool_calls=self.parallel_tool_calls,
231
- *args,
232
- **kwargs,
261
+ completion_params.update(
262
+ {
263
+ "tools": self.tools_list_dictionary,
264
+ "tool_choice": self.tool_choice,
265
+ "parallel_tool_calls": self.parallel_tool_calls,
266
+ }
233
267
  )
234
-
268
+ response = completion(**completion_params)
235
269
  return (
236
270
  response.choices[0]
237
271
  .message.tool_calls[0]
238
272
  .function.arguments
239
273
  )
240
274
 
241
- else:
242
- response = completion(
243
- model=self.model_name,
244
- messages=messages,
245
- stream=self.stream,
246
- temperature=self.temperature,
247
- max_tokens=self.max_tokens,
248
- modalities=self.modalities,
249
- *args,
250
- **kwargs,
275
+ # Handle modality-based completion
276
+ if (
277
+ self.modalities and len(self.modalities) > 1
278
+ ): # More than just text
279
+ completion_params.update(
280
+ {"modalities": self.modalities}
251
281
  )
282
+ response = completion(**completion_params)
283
+ return response.choices[0].message.content
252
284
 
253
- content = response.choices[
254
- 0
255
- ].message.content # Accessing the content
285
+ # Standard completion
286
+ response = completion(**completion_params)
287
+ return response.choices[0].message.content
256
288
 
257
- return content
258
289
  except Exception as error:
259
- logger.error(f"Error in LiteLLM: {error}")
290
+ logger.error(f"Error in LiteLLM run: {str(error)}")
291
+ if "rate_limit" in str(error).lower():
292
+ logger.warning(
293
+ "Rate limit hit, retrying with exponential backoff..."
294
+ )
295
+ import time
296
+
297
+ time.sleep(2) # Add a small delay before retry
298
+ return self.run(task, audio, img, *args, **kwargs)
260
299
  raise error
261
300
 
262
301
  def __call__(self, task: str, *args, **kwargs):
@@ -275,12 +314,12 @@ class LiteLLM:
275
314
 
276
315
  async def arun(self, task: str, *args, **kwargs):
277
316
  """
278
- Run the LLM model for the given task.
317
+ Run the LLM model asynchronously for the given task.
279
318
 
280
319
  Args:
281
320
  task (str): The task to run the model for.
282
- *args: Additional positional arguments to pass to the model.
283
- **kwargs: Additional keyword arguments to pass to the model.
321
+ *args: Additional positional arguments.
322
+ **kwargs: Additional keyword arguments.
284
323
 
285
324
  Returns:
286
325
  str: The content of the response from the model.
@@ -288,72 +327,113 @@ class LiteLLM:
288
327
  try:
289
328
  messages = self._prepare_messages(task)
290
329
 
330
+ # Prepare common completion parameters
331
+ completion_params = {
332
+ "model": self.model_name,
333
+ "messages": messages,
334
+ "stream": self.stream,
335
+ "temperature": self.temperature,
336
+ "max_tokens": self.max_tokens,
337
+ **kwargs,
338
+ }
339
+
340
+ # Handle tool-based completion
291
341
  if self.tools_list_dictionary is not None:
292
- response = await acompletion(
293
- model=self.model_name,
294
- messages=messages,
295
- stream=self.stream,
296
- temperature=self.temperature,
297
- max_tokens=self.max_tokens,
298
- tools=self.tools_list_dictionary,
299
- tool_choice=self.tool_choice,
300
- parallel_tool_calls=self.parallel_tool_calls,
301
- *args,
302
- **kwargs,
342
+ completion_params.update(
343
+ {
344
+ "tools": self.tools_list_dictionary,
345
+ "tool_choice": self.tool_choice,
346
+ "parallel_tool_calls": self.parallel_tool_calls,
347
+ }
303
348
  )
304
-
305
- content = (
349
+ response = await acompletion(**completion_params)
350
+ return (
306
351
  response.choices[0]
307
352
  .message.tool_calls[0]
308
353
  .function.arguments
309
354
  )
310
355
 
311
- # return response
312
-
313
- else:
314
- response = await acompletion(
315
- model=self.model_name,
316
- messages=messages,
317
- stream=self.stream,
318
- temperature=self.temperature,
319
- max_tokens=self.max_tokens,
320
- *args,
321
- **kwargs,
322
- )
356
+ # Standard completion
357
+ response = await acompletion(**completion_params)
358
+ return response.choices[0].message.content
323
359
 
324
- content = response.choices[
325
- 0
326
- ].message.content # Accessing the content
327
-
328
- return content
329
360
  except Exception as error:
330
- logger.error(f"Error in LiteLLM: {error}")
361
+ logger.error(f"Error in LiteLLM arun: {str(error)}")
362
+ if "rate_limit" in str(error).lower():
363
+ logger.warning(
364
+ "Rate limit hit, retrying with exponential backoff..."
365
+ )
366
+ await asyncio.sleep(2) # Use async sleep
367
+ return await self.arun(task, *args, **kwargs)
331
368
  raise error
332
369
 
370
+ async def _process_batch(
371
+ self, tasks: List[str], batch_size: int = 10
372
+ ):
373
+ """
374
+ Process a batch of tasks asynchronously.
375
+
376
+ Args:
377
+ tasks (List[str]): List of tasks to process.
378
+ batch_size (int): Size of each batch.
379
+
380
+ Returns:
381
+ List[str]: List of responses.
382
+ """
383
+ results = []
384
+ for i in range(0, len(tasks), batch_size):
385
+ batch = tasks[i : i + batch_size]
386
+ batch_results = await asyncio.gather(
387
+ *[self.arun(task) for task in batch],
388
+ return_exceptions=True,
389
+ )
390
+
391
+ # Handle any exceptions in the batch
392
+ for result in batch_results:
393
+ if isinstance(result, Exception):
394
+ logger.error(
395
+ f"Error in batch processing: {str(result)}"
396
+ )
397
+ results.append(str(result))
398
+ else:
399
+ results.append(result)
400
+
401
+ # Add a small delay between batches to avoid rate limits
402
+ if i + batch_size < len(tasks):
403
+ await asyncio.sleep(0.5)
404
+
405
+ return results
406
+
333
407
  def batched_run(self, tasks: List[str], batch_size: int = 10):
334
408
  """
335
- Run the LLM model for the given tasks in batches.
409
+ Run multiple tasks in batches synchronously.
410
+
411
+ Args:
412
+ tasks (List[str]): List of tasks to process.
413
+ batch_size (int): Size of each batch.
414
+
415
+ Returns:
416
+ List[str]: List of responses.
336
417
  """
337
418
  logger.info(
338
- f"Running tasks in batches of size {batch_size}. Total tasks: {len(tasks)}"
419
+ f"Running {len(tasks)} tasks in batches of {batch_size}"
339
420
  )
340
- results = []
341
- for task in tasks:
342
- logger.info(f"Running task: {task}")
343
- results.append(self.run(task))
344
- logger.info("Completed all tasks.")
345
- return results
421
+ return asyncio.run(self._process_batch(tasks, batch_size))
346
422
 
347
- def batched_arun(self, tasks: List[str], batch_size: int = 10):
423
+ async def batched_arun(
424
+ self, tasks: List[str], batch_size: int = 10
425
+ ):
348
426
  """
349
- Run the LLM model for the given tasks in batches.
427
+ Run multiple tasks in batches asynchronously.
428
+
429
+ Args:
430
+ tasks (List[str]): List of tasks to process.
431
+ batch_size (int): Size of each batch.
432
+
433
+ Returns:
434
+ List[str]: List of responses.
350
435
  """
351
436
  logger.info(
352
- f"Running asynchronous tasks in batches of size {batch_size}. Total tasks: {len(tasks)}"
437
+ f"Running {len(tasks)} tasks asynchronously in batches of {batch_size}"
353
438
  )
354
- results = []
355
- for task in tasks:
356
- logger.info(f"Running asynchronous task: {task}")
357
- results.append(asyncio.run(self.arun(task)))
358
- logger.info("Completed all asynchronous tasks.")
359
- return results
439
+ return await self._process_batch(tasks, batch_size)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: swarms
3
- Version: 7.6.5
3
+ Version: 7.6.6
4
4
  Summary: Swarms - TGSC
5
5
  Home-page: https://github.com/kyegomez/swarms
6
6
  License: MIT
@@ -23,6 +23,7 @@ Requires-Dist: docstring_parser (==0.16)
23
23
  Requires-Dist: httpx
24
24
  Requires-Dist: litellm
25
25
  Requires-Dist: loguru
26
+ Requires-Dist: mcp
26
27
  Requires-Dist: networkx
27
28
  Requires-Dist: numpy
28
29
  Requires-Dist: psutil
@@ -82,7 +82,7 @@ swarms/schemas/agent_input_schema.py,sha256=qhPyThMx2on91yG9mzNdP_08GpMh1IRDHDwF
82
82
  swarms/schemas/agent_step_schemas.py,sha256=a14gb58vR0xOwB_fwSJQbN6yb9HddEaT30E6hUrzEQA,2573
83
83
  swarms/schemas/base_schemas.py,sha256=UvBLVWg2qRen4tK5GJz50v42SiX95EQ5qK7hfyAHTEU,3267
84
84
  swarms/structs/__init__.py,sha256=ER0HI-9RQI22i10x6XQj6TaKoWJgk1a5XIP1KxiBsCU,4310
85
- swarms/structs/agent.py,sha256=IChi9EZiFUwCmj7dPa9H70TBkYd6VnTt-5Jx5TLPbWI,95234
85
+ swarms/structs/agent.py,sha256=L8ZnvSNrmvK6z0n6RwHZ7MfZtRbwtQ5t6pQPcL2OLfA,96742
86
86
  swarms/structs/agent_builder.py,sha256=tYNpfO4_8cgfMHfgA5DAOWffHnt70p6CLt59esqfVCY,12133
87
87
  swarms/structs/agent_registry.py,sha256=il507cO1NF-d4ChyANVLuWrN8bXsEAi8_7bLJ_sTU6A,12112
88
88
  swarms/structs/agent_roles.py,sha256=8XEw6RjOOZelaZaWt4gXaYQm5WMLEhSO7W6Z8sQjmFg,582
@@ -172,7 +172,7 @@ swarms/utils/formatter.py,sha256=YykmcuWXkxvQ7a2Vq6OzWuqUDiIwro6VrtSt4ITbXcU,419
172
172
  swarms/utils/function_caller_model.py,sha256=ZfgCMzOizNnuZipYLclTziECNHszH9p8RQcUq7VNr4Q,4156
173
173
  swarms/utils/history_output_formatter.py,sha256=WHcd0xhSNRDKakXtkCjv0nW1NF-GM9SYcey3RrN5gl8,778
174
174
  swarms/utils/litellm_tokenizer.py,sha256=0AAj4NffBe2eHii_3_5SpQAhSiBbunJR8MzaBTIm7hg,484
175
- swarms/utils/litellm_wrapper.py,sha256=cXZ6nUrHnGhpVgolgbpNsyKq1_TzupJs8vmw-_XtCRM,11255
175
+ swarms/utils/litellm_wrapper.py,sha256=wmWFD_TJI1_puVJTm9w3ZZYN6R0vQ6IRxAoEULHcysg,14194
176
176
  swarms/utils/loguru_logger.py,sha256=hIoSK3NHLpe7eAmjHRURrEYzNXYC2gbR7_Vv63Yaydk,685
177
177
  swarms/utils/markdown_message.py,sha256=RThHNnMf6ZLTlYK4vKn3yuewChaxWAYAWb0Xm_pTyIU,652
178
178
  swarms/utils/parse_code.py,sha256=XFOLymbdP3HzMZuqsj7pwUyisvUmTm0ev9iThR_ambI,1987
@@ -183,8 +183,8 @@ swarms/utils/try_except_wrapper.py,sha256=appEGu9Afy3TmdkNNXUgQ9yU9lj2j0uNkIoW0J
183
183
  swarms/utils/visualizer.py,sha256=0ylohEk62MAS6iPRaDOV03m9qo2k5J56tWlKJk_46p4,16927
184
184
  swarms/utils/vllm_wrapper.py,sha256=OIGnU9Vf81vE_hul1FK-xEhChFK8fxqZX6-fhQeW22c,4987
185
185
  swarms/utils/wrapper_clusterop.py,sha256=PMSCVM7ZT1vgj1D_MYAe835RR3SMLYxA-si2JS02yNQ,4220
186
- swarms-7.6.5.dist-info/LICENSE,sha256=jwRtEmTWjLrEsvFB6QFdYs2cEeZPRMdj-UMOFkPF8_0,11363
187
- swarms-7.6.5.dist-info/METADATA,sha256=2w553wEExRZlKYKxX_LCvhpbyJbhefIxw43-I1Jvwmw,104909
188
- swarms-7.6.5.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
189
- swarms-7.6.5.dist-info/entry_points.txt,sha256=2K0rTtfO1X1WaO-waJlXIKw5Voa_EpAL_yU0HXE2Jgc,47
190
- swarms-7.6.5.dist-info/RECORD,,
186
+ swarms-7.6.6.dist-info/LICENSE,sha256=jwRtEmTWjLrEsvFB6QFdYs2cEeZPRMdj-UMOFkPF8_0,11363
187
+ swarms-7.6.6.dist-info/METADATA,sha256=TEtg7nZgZwYoprC2ACZmn1n_CC_c7LOWXBIs1aPLLeM,104928
188
+ swarms-7.6.6.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
189
+ swarms-7.6.6.dist-info/entry_points.txt,sha256=2K0rTtfO1X1WaO-waJlXIKw5Voa_EpAL_yU0HXE2Jgc,47
190
+ swarms-7.6.6.dist-info/RECORD,,
File without changes