swarms 7.6.4__py3-none-any.whl → 7.6.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- swarms/structs/__init__.py +1 -3
- swarms/structs/agent.py +167 -38
- swarms/tools/mcp_integration.py +321 -483
- swarms/utils/litellm_wrapper.py +172 -92
- swarms/utils/vllm_wrapper.py +146 -0
- {swarms-7.6.4.dist-info → swarms-7.6.6.dist-info}/METADATA +2 -1
- {swarms-7.6.4.dist-info → swarms-7.6.6.dist-info}/RECORD +10 -10
- swarms/structs/auto_swarm.py +0 -229
- {swarms-7.6.4.dist-info → swarms-7.6.6.dist-info}/LICENSE +0 -0
- {swarms-7.6.4.dist-info → swarms-7.6.6.dist-info}/WHEEL +0 -0
- {swarms-7.6.4.dist-info → swarms-7.6.6.dist-info}/entry_points.txt +0 -0
swarms/utils/litellm_wrapper.py
CHANGED
@@ -5,7 +5,7 @@ import asyncio
|
|
5
5
|
from typing import List
|
6
6
|
|
7
7
|
from loguru import logger
|
8
|
-
|
8
|
+
import litellm
|
9
9
|
|
10
10
|
try:
|
11
11
|
from litellm import completion, acompletion
|
@@ -77,6 +77,8 @@ class LiteLLM:
|
|
77
77
|
tool_choice: str = "auto",
|
78
78
|
parallel_tool_calls: bool = False,
|
79
79
|
audio: str = None,
|
80
|
+
retries: int = 3,
|
81
|
+
verbose: bool = False,
|
80
82
|
*args,
|
81
83
|
**kwargs,
|
82
84
|
):
|
@@ -100,7 +102,18 @@ class LiteLLM:
|
|
100
102
|
self.tools_list_dictionary = tools_list_dictionary
|
101
103
|
self.tool_choice = tool_choice
|
102
104
|
self.parallel_tool_calls = parallel_tool_calls
|
103
|
-
self.modalities = [
|
105
|
+
self.modalities = []
|
106
|
+
self._cached_messages = {} # Cache for prepared messages
|
107
|
+
self.messages = [] # Initialize messages list
|
108
|
+
|
109
|
+
# Configure litellm settings
|
110
|
+
litellm.set_verbose = (
|
111
|
+
verbose # Disable verbose mode for better performance
|
112
|
+
)
|
113
|
+
litellm.ssl_verify = ssl_verify
|
114
|
+
litellm.num_retries = (
|
115
|
+
retries # Add retries for better reliability
|
116
|
+
)
|
104
117
|
|
105
118
|
def _prepare_messages(self, task: str) -> list:
|
106
119
|
"""
|
@@ -112,15 +125,20 @@ class LiteLLM:
|
|
112
125
|
Returns:
|
113
126
|
list: A list of messages prepared for the task.
|
114
127
|
"""
|
115
|
-
|
128
|
+
# Check cache first
|
129
|
+
cache_key = f"{self.system_prompt}:{task}"
|
130
|
+
if cache_key in self._cached_messages:
|
131
|
+
return self._cached_messages[cache_key].copy()
|
116
132
|
|
117
|
-
|
133
|
+
messages = []
|
134
|
+
if self.system_prompt:
|
118
135
|
messages.append(
|
119
136
|
{"role": "system", "content": self.system_prompt}
|
120
137
|
)
|
121
|
-
|
122
138
|
messages.append({"role": "user", "content": task})
|
123
139
|
|
140
|
+
# Cache the prepared messages
|
141
|
+
self._cached_messages[cache_key] = messages.copy()
|
124
142
|
return messages
|
125
143
|
|
126
144
|
def audio_processing(self, task: str, audio: str):
|
@@ -182,15 +200,16 @@ class LiteLLM:
|
|
182
200
|
"""
|
183
201
|
Handle the modalities for the given task.
|
184
202
|
"""
|
203
|
+
self.messages = [] # Reset messages
|
204
|
+
self.modalities.append("text")
|
205
|
+
|
185
206
|
if audio is not None:
|
186
207
|
self.audio_processing(task=task, audio=audio)
|
208
|
+
self.modalities.append("audio")
|
187
209
|
|
188
210
|
if img is not None:
|
189
211
|
self.vision_processing(task=task, image=img)
|
190
|
-
|
191
|
-
if audio is not None and img is not None:
|
192
|
-
self.audio_processing(task=task, audio=audio)
|
193
|
-
self.vision_processing(task=task, image=img)
|
212
|
+
self.modalities.append("vision")
|
194
213
|
|
195
214
|
def run(
|
196
215
|
self,
|
@@ -205,58 +224,78 @@ class LiteLLM:
|
|
205
224
|
|
206
225
|
Args:
|
207
226
|
task (str): The task to run the model for.
|
208
|
-
|
209
|
-
|
227
|
+
audio (str, optional): Audio input if any. Defaults to None.
|
228
|
+
img (str, optional): Image input if any. Defaults to None.
|
229
|
+
*args: Additional positional arguments.
|
230
|
+
**kwargs: Additional keyword arguments.
|
210
231
|
|
211
232
|
Returns:
|
212
233
|
str: The content of the response from the model.
|
234
|
+
|
235
|
+
Raises:
|
236
|
+
Exception: If there is an error in processing the request.
|
213
237
|
"""
|
214
238
|
try:
|
215
|
-
|
216
239
|
messages = self._prepare_messages(task)
|
217
240
|
|
218
|
-
|
241
|
+
if audio is not None or img is not None:
|
242
|
+
self.handle_modalities(
|
243
|
+
task=task, audio=audio, img=img
|
244
|
+
)
|
245
|
+
messages = (
|
246
|
+
self.messages
|
247
|
+
) # Use modality-processed messages
|
248
|
+
|
249
|
+
# Prepare common completion parameters
|
250
|
+
completion_params = {
|
251
|
+
"model": self.model_name,
|
252
|
+
"messages": messages,
|
253
|
+
"stream": self.stream,
|
254
|
+
"temperature": self.temperature,
|
255
|
+
"max_tokens": self.max_tokens,
|
256
|
+
**kwargs,
|
257
|
+
}
|
219
258
|
|
259
|
+
# Handle tool-based completion
|
220
260
|
if self.tools_list_dictionary is not None:
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
tools=self.tools_list_dictionary,
|
228
|
-
modalities=self.modalities,
|
229
|
-
tool_choice=self.tool_choice,
|
230
|
-
parallel_tool_calls=self.parallel_tool_calls,
|
231
|
-
*args,
|
232
|
-
**kwargs,
|
261
|
+
completion_params.update(
|
262
|
+
{
|
263
|
+
"tools": self.tools_list_dictionary,
|
264
|
+
"tool_choice": self.tool_choice,
|
265
|
+
"parallel_tool_calls": self.parallel_tool_calls,
|
266
|
+
}
|
233
267
|
)
|
234
|
-
|
268
|
+
response = completion(**completion_params)
|
235
269
|
return (
|
236
270
|
response.choices[0]
|
237
271
|
.message.tool_calls[0]
|
238
272
|
.function.arguments
|
239
273
|
)
|
240
274
|
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
max_tokens=self.max_tokens,
|
248
|
-
modalities=self.modalities,
|
249
|
-
*args,
|
250
|
-
**kwargs,
|
275
|
+
# Handle modality-based completion
|
276
|
+
if (
|
277
|
+
self.modalities and len(self.modalities) > 1
|
278
|
+
): # More than just text
|
279
|
+
completion_params.update(
|
280
|
+
{"modalities": self.modalities}
|
251
281
|
)
|
282
|
+
response = completion(**completion_params)
|
283
|
+
return response.choices[0].message.content
|
252
284
|
|
253
|
-
|
254
|
-
|
255
|
-
|
285
|
+
# Standard completion
|
286
|
+
response = completion(**completion_params)
|
287
|
+
return response.choices[0].message.content
|
256
288
|
|
257
|
-
return content
|
258
289
|
except Exception as error:
|
259
|
-
logger.error(f"Error in LiteLLM: {error}")
|
290
|
+
logger.error(f"Error in LiteLLM run: {str(error)}")
|
291
|
+
if "rate_limit" in str(error).lower():
|
292
|
+
logger.warning(
|
293
|
+
"Rate limit hit, retrying with exponential backoff..."
|
294
|
+
)
|
295
|
+
import time
|
296
|
+
|
297
|
+
time.sleep(2) # Add a small delay before retry
|
298
|
+
return self.run(task, audio, img, *args, **kwargs)
|
260
299
|
raise error
|
261
300
|
|
262
301
|
def __call__(self, task: str, *args, **kwargs):
|
@@ -275,12 +314,12 @@ class LiteLLM:
|
|
275
314
|
|
276
315
|
async def arun(self, task: str, *args, **kwargs):
|
277
316
|
"""
|
278
|
-
Run the LLM model for the given task.
|
317
|
+
Run the LLM model asynchronously for the given task.
|
279
318
|
|
280
319
|
Args:
|
281
320
|
task (str): The task to run the model for.
|
282
|
-
*args: Additional positional arguments
|
283
|
-
**kwargs: Additional keyword arguments
|
321
|
+
*args: Additional positional arguments.
|
322
|
+
**kwargs: Additional keyword arguments.
|
284
323
|
|
285
324
|
Returns:
|
286
325
|
str: The content of the response from the model.
|
@@ -288,72 +327,113 @@ class LiteLLM:
|
|
288
327
|
try:
|
289
328
|
messages = self._prepare_messages(task)
|
290
329
|
|
330
|
+
# Prepare common completion parameters
|
331
|
+
completion_params = {
|
332
|
+
"model": self.model_name,
|
333
|
+
"messages": messages,
|
334
|
+
"stream": self.stream,
|
335
|
+
"temperature": self.temperature,
|
336
|
+
"max_tokens": self.max_tokens,
|
337
|
+
**kwargs,
|
338
|
+
}
|
339
|
+
|
340
|
+
# Handle tool-based completion
|
291
341
|
if self.tools_list_dictionary is not None:
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
tools=self.tools_list_dictionary,
|
299
|
-
tool_choice=self.tool_choice,
|
300
|
-
parallel_tool_calls=self.parallel_tool_calls,
|
301
|
-
*args,
|
302
|
-
**kwargs,
|
342
|
+
completion_params.update(
|
343
|
+
{
|
344
|
+
"tools": self.tools_list_dictionary,
|
345
|
+
"tool_choice": self.tool_choice,
|
346
|
+
"parallel_tool_calls": self.parallel_tool_calls,
|
347
|
+
}
|
303
348
|
)
|
304
|
-
|
305
|
-
|
349
|
+
response = await acompletion(**completion_params)
|
350
|
+
return (
|
306
351
|
response.choices[0]
|
307
352
|
.message.tool_calls[0]
|
308
353
|
.function.arguments
|
309
354
|
)
|
310
355
|
|
311
|
-
|
312
|
-
|
313
|
-
|
314
|
-
response = await acompletion(
|
315
|
-
model=self.model_name,
|
316
|
-
messages=messages,
|
317
|
-
stream=self.stream,
|
318
|
-
temperature=self.temperature,
|
319
|
-
max_tokens=self.max_tokens,
|
320
|
-
*args,
|
321
|
-
**kwargs,
|
322
|
-
)
|
356
|
+
# Standard completion
|
357
|
+
response = await acompletion(**completion_params)
|
358
|
+
return response.choices[0].message.content
|
323
359
|
|
324
|
-
content = response.choices[
|
325
|
-
0
|
326
|
-
].message.content # Accessing the content
|
327
|
-
|
328
|
-
return content
|
329
360
|
except Exception as error:
|
330
|
-
logger.error(f"Error in LiteLLM: {error}")
|
361
|
+
logger.error(f"Error in LiteLLM arun: {str(error)}")
|
362
|
+
if "rate_limit" in str(error).lower():
|
363
|
+
logger.warning(
|
364
|
+
"Rate limit hit, retrying with exponential backoff..."
|
365
|
+
)
|
366
|
+
await asyncio.sleep(2) # Use async sleep
|
367
|
+
return await self.arun(task, *args, **kwargs)
|
331
368
|
raise error
|
332
369
|
|
370
|
+
async def _process_batch(
|
371
|
+
self, tasks: List[str], batch_size: int = 10
|
372
|
+
):
|
373
|
+
"""
|
374
|
+
Process a batch of tasks asynchronously.
|
375
|
+
|
376
|
+
Args:
|
377
|
+
tasks (List[str]): List of tasks to process.
|
378
|
+
batch_size (int): Size of each batch.
|
379
|
+
|
380
|
+
Returns:
|
381
|
+
List[str]: List of responses.
|
382
|
+
"""
|
383
|
+
results = []
|
384
|
+
for i in range(0, len(tasks), batch_size):
|
385
|
+
batch = tasks[i : i + batch_size]
|
386
|
+
batch_results = await asyncio.gather(
|
387
|
+
*[self.arun(task) for task in batch],
|
388
|
+
return_exceptions=True,
|
389
|
+
)
|
390
|
+
|
391
|
+
# Handle any exceptions in the batch
|
392
|
+
for result in batch_results:
|
393
|
+
if isinstance(result, Exception):
|
394
|
+
logger.error(
|
395
|
+
f"Error in batch processing: {str(result)}"
|
396
|
+
)
|
397
|
+
results.append(str(result))
|
398
|
+
else:
|
399
|
+
results.append(result)
|
400
|
+
|
401
|
+
# Add a small delay between batches to avoid rate limits
|
402
|
+
if i + batch_size < len(tasks):
|
403
|
+
await asyncio.sleep(0.5)
|
404
|
+
|
405
|
+
return results
|
406
|
+
|
333
407
|
def batched_run(self, tasks: List[str], batch_size: int = 10):
|
334
408
|
"""
|
335
|
-
Run
|
409
|
+
Run multiple tasks in batches synchronously.
|
410
|
+
|
411
|
+
Args:
|
412
|
+
tasks (List[str]): List of tasks to process.
|
413
|
+
batch_size (int): Size of each batch.
|
414
|
+
|
415
|
+
Returns:
|
416
|
+
List[str]: List of responses.
|
336
417
|
"""
|
337
418
|
logger.info(
|
338
|
-
f"Running tasks in batches of
|
419
|
+
f"Running {len(tasks)} tasks in batches of {batch_size}"
|
339
420
|
)
|
340
|
-
|
341
|
-
for task in tasks:
|
342
|
-
logger.info(f"Running task: {task}")
|
343
|
-
results.append(self.run(task))
|
344
|
-
logger.info("Completed all tasks.")
|
345
|
-
return results
|
421
|
+
return asyncio.run(self._process_batch(tasks, batch_size))
|
346
422
|
|
347
|
-
def batched_arun(
|
423
|
+
async def batched_arun(
|
424
|
+
self, tasks: List[str], batch_size: int = 10
|
425
|
+
):
|
348
426
|
"""
|
349
|
-
Run
|
427
|
+
Run multiple tasks in batches asynchronously.
|
428
|
+
|
429
|
+
Args:
|
430
|
+
tasks (List[str]): List of tasks to process.
|
431
|
+
batch_size (int): Size of each batch.
|
432
|
+
|
433
|
+
Returns:
|
434
|
+
List[str]: List of responses.
|
350
435
|
"""
|
351
436
|
logger.info(
|
352
|
-
f"Running
|
437
|
+
f"Running {len(tasks)} tasks asynchronously in batches of {batch_size}"
|
353
438
|
)
|
354
|
-
|
355
|
-
for task in tasks:
|
356
|
-
logger.info(f"Running asynchronous task: {task}")
|
357
|
-
results.append(asyncio.run(self.arun(task)))
|
358
|
-
logger.info("Completed all asynchronous tasks.")
|
359
|
-
return results
|
439
|
+
return await self._process_batch(tasks, batch_size)
|
@@ -0,0 +1,146 @@
|
|
1
|
+
from typing import List, Optional, Dict, Any
|
2
|
+
from loguru import logger
|
3
|
+
|
4
|
+
try:
|
5
|
+
from vllm import LLM, SamplingParams
|
6
|
+
except ImportError:
|
7
|
+
import subprocess
|
8
|
+
import sys
|
9
|
+
|
10
|
+
print("Installing vllm")
|
11
|
+
subprocess.check_call(
|
12
|
+
[sys.executable, "-m", "pip", "install", "-U", "vllm"]
|
13
|
+
)
|
14
|
+
print("vllm installed")
|
15
|
+
from vllm import LLM, SamplingParams
|
16
|
+
|
17
|
+
|
18
|
+
class VLLMWrapper:
|
19
|
+
"""
|
20
|
+
A wrapper class for vLLM that provides a similar interface to LiteLLM.
|
21
|
+
This class handles model initialization and inference using vLLM.
|
22
|
+
"""
|
23
|
+
|
24
|
+
def __init__(
|
25
|
+
self,
|
26
|
+
model_name: str = "meta-llama/Llama-2-7b-chat-hf",
|
27
|
+
system_prompt: Optional[str] = None,
|
28
|
+
stream: bool = False,
|
29
|
+
temperature: float = 0.5,
|
30
|
+
max_tokens: int = 4000,
|
31
|
+
max_completion_tokens: int = 4000,
|
32
|
+
tools_list_dictionary: Optional[List[Dict[str, Any]]] = None,
|
33
|
+
tool_choice: str = "auto",
|
34
|
+
parallel_tool_calls: bool = False,
|
35
|
+
*args,
|
36
|
+
**kwargs,
|
37
|
+
):
|
38
|
+
"""
|
39
|
+
Initialize the vLLM wrapper with the given parameters.
|
40
|
+
|
41
|
+
Args:
|
42
|
+
model_name (str): The name of the model to use. Defaults to "meta-llama/Llama-2-7b-chat-hf".
|
43
|
+
system_prompt (str, optional): The system prompt to use. Defaults to None.
|
44
|
+
stream (bool): Whether to stream the output. Defaults to False.
|
45
|
+
temperature (float): The temperature for sampling. Defaults to 0.5.
|
46
|
+
max_tokens (int): The maximum number of tokens to generate. Defaults to 4000.
|
47
|
+
max_completion_tokens (int): The maximum number of completion tokens. Defaults to 4000.
|
48
|
+
tools_list_dictionary (List[Dict[str, Any]], optional): List of available tools. Defaults to None.
|
49
|
+
tool_choice (str): How to choose tools. Defaults to "auto".
|
50
|
+
parallel_tool_calls (bool): Whether to allow parallel tool calls. Defaults to False.
|
51
|
+
"""
|
52
|
+
self.model_name = model_name
|
53
|
+
self.system_prompt = system_prompt
|
54
|
+
self.stream = stream
|
55
|
+
self.temperature = temperature
|
56
|
+
self.max_tokens = max_tokens
|
57
|
+
self.max_completion_tokens = max_completion_tokens
|
58
|
+
self.tools_list_dictionary = tools_list_dictionary
|
59
|
+
self.tool_choice = tool_choice
|
60
|
+
self.parallel_tool_calls = parallel_tool_calls
|
61
|
+
|
62
|
+
# Initialize vLLM
|
63
|
+
self.llm = LLM(model=model_name, **kwargs)
|
64
|
+
self.sampling_params = SamplingParams(
|
65
|
+
temperature=temperature,
|
66
|
+
max_tokens=max_tokens,
|
67
|
+
)
|
68
|
+
|
69
|
+
def _prepare_prompt(self, task: str) -> str:
|
70
|
+
"""
|
71
|
+
Prepare the prompt for the given task.
|
72
|
+
|
73
|
+
Args:
|
74
|
+
task (str): The task to prepare the prompt for.
|
75
|
+
|
76
|
+
Returns:
|
77
|
+
str: The prepared prompt.
|
78
|
+
"""
|
79
|
+
if self.system_prompt:
|
80
|
+
return f"{self.system_prompt}\n\nUser: {task}\nAssistant:"
|
81
|
+
return f"User: {task}\nAssistant:"
|
82
|
+
|
83
|
+
def run(self, task: str, *args, **kwargs) -> str:
|
84
|
+
"""
|
85
|
+
Run the model for the given task.
|
86
|
+
|
87
|
+
Args:
|
88
|
+
task (str): The task to run the model for.
|
89
|
+
*args: Additional positional arguments.
|
90
|
+
**kwargs: Additional keyword arguments.
|
91
|
+
|
92
|
+
Returns:
|
93
|
+
str: The model's response.
|
94
|
+
"""
|
95
|
+
try:
|
96
|
+
prompt = self._prepare_prompt(task)
|
97
|
+
|
98
|
+
outputs = self.llm.generate(prompt, self.sampling_params)
|
99
|
+
response = outputs[0].outputs[0].text.strip()
|
100
|
+
|
101
|
+
return response
|
102
|
+
|
103
|
+
except Exception as error:
|
104
|
+
logger.error(f"Error in VLLMWrapper: {error}")
|
105
|
+
raise error
|
106
|
+
|
107
|
+
def __call__(self, task: str, *args, **kwargs) -> str:
|
108
|
+
"""
|
109
|
+
Call the model for the given task.
|
110
|
+
|
111
|
+
Args:
|
112
|
+
task (str): The task to run the model for.
|
113
|
+
*args: Additional positional arguments.
|
114
|
+
**kwargs: Additional keyword arguments.
|
115
|
+
|
116
|
+
Returns:
|
117
|
+
str: The model's response.
|
118
|
+
"""
|
119
|
+
return self.run(task, *args, **kwargs)
|
120
|
+
|
121
|
+
def batched_run(
|
122
|
+
self, tasks: List[str], batch_size: int = 10
|
123
|
+
) -> List[str]:
|
124
|
+
"""
|
125
|
+
Run the model for multiple tasks in batches.
|
126
|
+
|
127
|
+
Args:
|
128
|
+
tasks (List[str]): List of tasks to run.
|
129
|
+
batch_size (int): Size of each batch. Defaults to 10.
|
130
|
+
|
131
|
+
Returns:
|
132
|
+
List[str]: List of model responses.
|
133
|
+
"""
|
134
|
+
logger.info(
|
135
|
+
f"Running tasks in batches of size {batch_size}. Total tasks: {len(tasks)}"
|
136
|
+
)
|
137
|
+
results = []
|
138
|
+
|
139
|
+
for i in range(0, len(tasks), batch_size):
|
140
|
+
batch = tasks[i : i + batch_size]
|
141
|
+
for task in batch:
|
142
|
+
logger.info(f"Running task: {task}")
|
143
|
+
results.append(self.run(task))
|
144
|
+
|
145
|
+
logger.info("Completed all tasks.")
|
146
|
+
return results
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: swarms
|
3
|
-
Version: 7.6.
|
3
|
+
Version: 7.6.6
|
4
4
|
Summary: Swarms - TGSC
|
5
5
|
Home-page: https://github.com/kyegomez/swarms
|
6
6
|
License: MIT
|
@@ -23,6 +23,7 @@ Requires-Dist: docstring_parser (==0.16)
|
|
23
23
|
Requires-Dist: httpx
|
24
24
|
Requires-Dist: litellm
|
25
25
|
Requires-Dist: loguru
|
26
|
+
Requires-Dist: mcp
|
26
27
|
Requires-Dist: networkx
|
27
28
|
Requires-Dist: numpy
|
28
29
|
Requires-Dist: psutil
|
@@ -81,15 +81,14 @@ swarms/schemas/__init__.py,sha256=EqqtVcpoptF1kfy19Wykp22ut4AA0z-yMQ5H9WB7ptA,18
|
|
81
81
|
swarms/schemas/agent_input_schema.py,sha256=qhPyThMx2on91yG9mzNdP_08GpMh1IRDHDwFna29jPs,6345
|
82
82
|
swarms/schemas/agent_step_schemas.py,sha256=a14gb58vR0xOwB_fwSJQbN6yb9HddEaT30E6hUrzEQA,2573
|
83
83
|
swarms/schemas/base_schemas.py,sha256=UvBLVWg2qRen4tK5GJz50v42SiX95EQ5qK7hfyAHTEU,3267
|
84
|
-
swarms/structs/__init__.py,sha256=
|
85
|
-
swarms/structs/agent.py,sha256=
|
84
|
+
swarms/structs/__init__.py,sha256=ER0HI-9RQI22i10x6XQj6TaKoWJgk1a5XIP1KxiBsCU,4310
|
85
|
+
swarms/structs/agent.py,sha256=L8ZnvSNrmvK6z0n6RwHZ7MfZtRbwtQ5t6pQPcL2OLfA,96742
|
86
86
|
swarms/structs/agent_builder.py,sha256=tYNpfO4_8cgfMHfgA5DAOWffHnt70p6CLt59esqfVCY,12133
|
87
87
|
swarms/structs/agent_registry.py,sha256=il507cO1NF-d4ChyANVLuWrN8bXsEAi8_7bLJ_sTU6A,12112
|
88
88
|
swarms/structs/agent_roles.py,sha256=8XEw6RjOOZelaZaWt4gXaYQm5WMLEhSO7W6Z8sQjmFg,582
|
89
89
|
swarms/structs/agent_router.py,sha256=YZw5AaK2yTvxkOA7ouED_4MoYgn0XZggvo1wrglp-4E,13017
|
90
90
|
swarms/structs/agents_available.py,sha256=SedxDim-0IWgGsNwJZxRIUMfKyAFFXdvXSYeBNu0zGw,2804
|
91
91
|
swarms/structs/async_workflow.py,sha256=7YWsLPyGY-1-mMxoIXWQ0FnYH6F227nxsS9PFAJoF9Q,26214
|
92
|
-
swarms/structs/auto_swarm.py,sha256=AHWswlEWDL_i3V8IP362tx6pi_B2arlZhALykrkI5OA,8215
|
93
92
|
swarms/structs/auto_swarm_builder.py,sha256=vPM5Kq59D_FvuWJB8hxgHuEvTXsxDxovlBnHGVQsM4o,10938
|
94
93
|
swarms/structs/base_structure.py,sha256=GDu4QJQQmhU7IyuFJHIh9UVThACCva-L7uoMbVD9l4s,15901
|
95
94
|
swarms/structs/base_swarm.py,sha256=LSGJDPJdyUCcK6698mNtjxoC1OU3s_J2NxC2k_ccGUs,23779
|
@@ -153,7 +152,7 @@ swarms/tools/function_util.py,sha256=DAnAPO0Ik__TAqL7IJzFmkukHnhpsW_QtALl3yj837g
|
|
153
152
|
swarms/tools/json_former.py,sha256=4ugLQ_EZpghhuhFsVKsy-ehin9K64pqVE2gLU7BTO_M,14376
|
154
153
|
swarms/tools/json_utils.py,sha256=WKMZjcJ0Vt6lgIjiTBenslcfjgRSLX4UWs4uDkKFMQI,1316
|
155
154
|
swarms/tools/logits_processor.py,sha256=NifZZ5w9yemWGJAJ5nHFrphtZVX1XlyesgvYZTxK1GM,2965
|
156
|
-
swarms/tools/mcp_integration.py,sha256=
|
155
|
+
swarms/tools/mcp_integration.py,sha256=rUXxC9NvXQ3V4B7Lt1AoI4ZYiCl2-T4FW3_689HTRZk,12839
|
157
156
|
swarms/tools/openai_func_calling_schema_pydantic.py,sha256=6BAH9kuaVTvJIbjgSSJ5XvHhWvWszPxgarkfUuE5Ads,978
|
158
157
|
swarms/tools/openai_tool_creator_decorator.py,sha256=SYZjHnARjWvnH9cBdj7Kc_Yy1muvNxMT3RQz8KkA2SE,2578
|
159
158
|
swarms/tools/py_func_to_openai_func_str.py,sha256=W112Gu0CmAiHrNWnRMcnoGiVZEy2FxAU4xMvnG9XP4g,15710
|
@@ -173,7 +172,7 @@ swarms/utils/formatter.py,sha256=YykmcuWXkxvQ7a2Vq6OzWuqUDiIwro6VrtSt4ITbXcU,419
|
|
173
172
|
swarms/utils/function_caller_model.py,sha256=ZfgCMzOizNnuZipYLclTziECNHszH9p8RQcUq7VNr4Q,4156
|
174
173
|
swarms/utils/history_output_formatter.py,sha256=WHcd0xhSNRDKakXtkCjv0nW1NF-GM9SYcey3RrN5gl8,778
|
175
174
|
swarms/utils/litellm_tokenizer.py,sha256=0AAj4NffBe2eHii_3_5SpQAhSiBbunJR8MzaBTIm7hg,484
|
176
|
-
swarms/utils/litellm_wrapper.py,sha256=
|
175
|
+
swarms/utils/litellm_wrapper.py,sha256=wmWFD_TJI1_puVJTm9w3ZZYN6R0vQ6IRxAoEULHcysg,14194
|
177
176
|
swarms/utils/loguru_logger.py,sha256=hIoSK3NHLpe7eAmjHRURrEYzNXYC2gbR7_Vv63Yaydk,685
|
178
177
|
swarms/utils/markdown_message.py,sha256=RThHNnMf6ZLTlYK4vKn3yuewChaxWAYAWb0Xm_pTyIU,652
|
179
178
|
swarms/utils/parse_code.py,sha256=XFOLymbdP3HzMZuqsj7pwUyisvUmTm0ev9iThR_ambI,1987
|
@@ -182,9 +181,10 @@ swarms/utils/str_to_dict.py,sha256=T3Jsdjz87WIlkSo7jAW6BB80sv0Ns49WT1qXlOrdEoE,8
|
|
182
181
|
swarms/utils/swarm_reliability_checks.py,sha256=MsgUULt3HYg72D0HifZNmtCyJYpLA2UDA2wQixI-NbA,2562
|
183
182
|
swarms/utils/try_except_wrapper.py,sha256=appEGu9Afy3TmdkNNXUgQ9yU9lj2j0uNkIoW0JhVzzY,3917
|
184
183
|
swarms/utils/visualizer.py,sha256=0ylohEk62MAS6iPRaDOV03m9qo2k5J56tWlKJk_46p4,16927
|
184
|
+
swarms/utils/vllm_wrapper.py,sha256=OIGnU9Vf81vE_hul1FK-xEhChFK8fxqZX6-fhQeW22c,4987
|
185
185
|
swarms/utils/wrapper_clusterop.py,sha256=PMSCVM7ZT1vgj1D_MYAe835RR3SMLYxA-si2JS02yNQ,4220
|
186
|
-
swarms-7.6.
|
187
|
-
swarms-7.6.
|
188
|
-
swarms-7.6.
|
189
|
-
swarms-7.6.
|
190
|
-
swarms-7.6.
|
186
|
+
swarms-7.6.6.dist-info/LICENSE,sha256=jwRtEmTWjLrEsvFB6QFdYs2cEeZPRMdj-UMOFkPF8_0,11363
|
187
|
+
swarms-7.6.6.dist-info/METADATA,sha256=TEtg7nZgZwYoprC2ACZmn1n_CC_c7LOWXBIs1aPLLeM,104928
|
188
|
+
swarms-7.6.6.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
189
|
+
swarms-7.6.6.dist-info/entry_points.txt,sha256=2K0rTtfO1X1WaO-waJlXIKw5Voa_EpAL_yU0HXE2Jgc,47
|
190
|
+
swarms-7.6.6.dist-info/RECORD,,
|