swarms 7.8.4__py3-none-any.whl → 7.8.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- swarms/agents/ape_agent.py +5 -22
- swarms/agents/consistency_agent.py +1 -1
- swarms/agents/i_agent.py +1 -1
- swarms/agents/reasoning_agents.py +99 -3
- swarms/agents/reasoning_duo.py +1 -1
- swarms/cli/main.py +1 -1
- swarms/communication/__init__.py +1 -0
- swarms/communication/duckdb_wrap.py +32 -2
- swarms/communication/pulsar_struct.py +45 -19
- swarms/communication/redis_wrap.py +56 -11
- swarms/communication/supabase_wrap.py +1659 -0
- swarms/prompts/prompt.py +0 -3
- swarms/schemas/agent_completion_response.py +71 -0
- swarms/schemas/agent_rag_schema.py +7 -0
- swarms/schemas/conversation_schema.py +9 -0
- swarms/schemas/llm_agent_schema.py +99 -81
- swarms/schemas/swarms_api_schemas.py +164 -0
- swarms/structs/__init__.py +14 -11
- swarms/structs/agent.py +219 -199
- swarms/structs/agent_rag_handler.py +685 -0
- swarms/structs/base_swarm.py +2 -1
- swarms/structs/conversation.py +608 -87
- swarms/structs/csv_to_agent.py +153 -100
- swarms/structs/deep_research_swarm.py +197 -193
- swarms/structs/dynamic_conversational_swarm.py +18 -7
- swarms/structs/hiearchical_swarm.py +1 -1
- swarms/structs/hybrid_hiearchical_peer_swarm.py +2 -18
- swarms/structs/image_batch_processor.py +261 -0
- swarms/structs/interactive_groupchat.py +356 -0
- swarms/structs/ma_blocks.py +75 -0
- swarms/structs/majority_voting.py +1 -1
- swarms/structs/mixture_of_agents.py +1 -1
- swarms/structs/multi_agent_router.py +3 -2
- swarms/structs/rearrange.py +3 -3
- swarms/structs/sequential_workflow.py +3 -3
- swarms/structs/swarm_matcher.py +500 -411
- swarms/structs/swarm_router.py +15 -97
- swarms/structs/swarming_architectures.py +1 -1
- swarms/tools/mcp_client_call.py +3 -0
- swarms/utils/__init__.py +10 -2
- swarms/utils/check_all_model_max_tokens.py +43 -0
- swarms/utils/generate_keys.py +0 -27
- swarms/utils/history_output_formatter.py +5 -20
- swarms/utils/litellm_wrapper.py +208 -60
- swarms/utils/output_types.py +24 -0
- swarms/utils/vllm_wrapper.py +5 -6
- swarms/utils/xml_utils.py +37 -2
- {swarms-7.8.4.dist-info → swarms-7.8.8.dist-info}/METADATA +31 -55
- {swarms-7.8.4.dist-info → swarms-7.8.8.dist-info}/RECORD +53 -48
- swarms/structs/multi_agent_collab.py +0 -242
- swarms/structs/output_types.py +0 -6
- swarms/utils/markdown_message.py +0 -21
- swarms/utils/visualizer.py +0 -510
- swarms/utils/wrapper_clusterop.py +0 -127
- /swarms/{tools → schemas}/tool_schema_base_model.py +0 -0
- {swarms-7.8.4.dist-info → swarms-7.8.8.dist-info}/LICENSE +0 -0
- {swarms-7.8.4.dist-info → swarms-7.8.8.dist-info}/WHEEL +0 -0
- {swarms-7.8.4.dist-info → swarms-7.8.8.dist-info}/entry_points.txt +0 -0
swarms/utils/litellm_wrapper.py
CHANGED
@@ -1,6 +1,8 @@
|
|
1
|
+
import traceback
|
1
2
|
from typing import Optional
|
2
3
|
import base64
|
3
4
|
import requests
|
5
|
+
from pathlib import Path
|
4
6
|
|
5
7
|
import asyncio
|
6
8
|
from typing import List
|
@@ -9,11 +11,7 @@ from loguru import logger
|
|
9
11
|
import litellm
|
10
12
|
from pydantic import BaseModel
|
11
13
|
|
12
|
-
from litellm import completion, acompletion
|
13
|
-
|
14
|
-
litellm.set_verbose = True
|
15
|
-
litellm.ssl_verify = False
|
16
|
-
# litellm._turn_on_debug()
|
14
|
+
from litellm import completion, acompletion, supports_vision
|
17
15
|
|
18
16
|
|
19
17
|
class LiteLLMException(Exception):
|
@@ -53,6 +51,35 @@ def get_audio_base64(audio_source: str) -> str:
|
|
53
51
|
return encoded_string
|
54
52
|
|
55
53
|
|
54
|
+
def get_image_base64(image_source: str) -> str:
|
55
|
+
"""
|
56
|
+
Convert image from a given source to a base64 encoded string.
|
57
|
+
Handles URLs, local file paths, and data URIs.
|
58
|
+
"""
|
59
|
+
# If already a data URI, return as is
|
60
|
+
if image_source.startswith("data:image"):
|
61
|
+
return image_source
|
62
|
+
|
63
|
+
# Handle URL
|
64
|
+
if image_source.startswith(("http://", "https://")):
|
65
|
+
response = requests.get(image_source)
|
66
|
+
response.raise_for_status()
|
67
|
+
image_data = response.content
|
68
|
+
# Handle local file
|
69
|
+
else:
|
70
|
+
with open(image_source, "rb") as file:
|
71
|
+
image_data = file.read()
|
72
|
+
|
73
|
+
# Get file extension for mime type
|
74
|
+
extension = Path(image_source).suffix.lower()
|
75
|
+
mime_type = (
|
76
|
+
f"image/{extension[1:]}" if extension else "image/jpeg"
|
77
|
+
)
|
78
|
+
|
79
|
+
encoded_string = base64.b64encode(image_data).decode("utf-8")
|
80
|
+
return f"data:{mime_type};base64,{encoded_string}"
|
81
|
+
|
82
|
+
|
56
83
|
class LiteLLM:
|
57
84
|
"""
|
58
85
|
This class represents a LiteLLM.
|
@@ -72,12 +99,15 @@ class LiteLLM:
|
|
72
99
|
tool_choice: str = "auto",
|
73
100
|
parallel_tool_calls: bool = False,
|
74
101
|
audio: str = None,
|
75
|
-
retries: int =
|
102
|
+
retries: int = 0,
|
76
103
|
verbose: bool = False,
|
77
104
|
caching: bool = False,
|
78
105
|
mcp_call: bool = False,
|
79
106
|
top_p: float = 1.0,
|
80
107
|
functions: List[dict] = None,
|
108
|
+
return_all: bool = False,
|
109
|
+
base_url: str = None,
|
110
|
+
api_key: str = None,
|
81
111
|
*args,
|
82
112
|
**kwargs,
|
83
113
|
):
|
@@ -105,8 +135,11 @@ class LiteLLM:
|
|
105
135
|
self.mcp_call = mcp_call
|
106
136
|
self.top_p = top_p
|
107
137
|
self.functions = functions
|
138
|
+
self.audio = audio
|
139
|
+
self.return_all = return_all
|
140
|
+
self.base_url = base_url
|
141
|
+
self.api_key = api_key
|
108
142
|
self.modalities = []
|
109
|
-
self._cached_messages = {} # Cache for prepared messages
|
110
143
|
self.messages = [] # Initialize messages list
|
111
144
|
|
112
145
|
# Configure litellm settings
|
@@ -135,7 +168,11 @@ class LiteLLM:
|
|
135
168
|
out = out.model_dump()
|
136
169
|
return out
|
137
170
|
|
138
|
-
def _prepare_messages(
|
171
|
+
def _prepare_messages(
|
172
|
+
self,
|
173
|
+
task: str,
|
174
|
+
img: str = None,
|
175
|
+
):
|
139
176
|
"""
|
140
177
|
Prepare the messages for the given task.
|
141
178
|
|
@@ -145,91 +182,201 @@ class LiteLLM:
|
|
145
182
|
Returns:
|
146
183
|
list: A list of messages prepared for the task.
|
147
184
|
"""
|
148
|
-
|
149
|
-
cache_key = f"{self.system_prompt}:{task}"
|
150
|
-
if cache_key in self._cached_messages:
|
151
|
-
return self._cached_messages[cache_key].copy()
|
185
|
+
self.check_if_model_supports_vision(img=img)
|
152
186
|
|
187
|
+
# Initialize messages
|
153
188
|
messages = []
|
154
|
-
|
189
|
+
|
190
|
+
# Add system prompt if present
|
191
|
+
if self.system_prompt is not None:
|
155
192
|
messages.append(
|
156
193
|
{"role": "system", "content": self.system_prompt}
|
157
194
|
)
|
158
|
-
messages.append({"role": "user", "content": task})
|
159
195
|
|
160
|
-
#
|
161
|
-
|
196
|
+
# Handle vision case
|
197
|
+
if img is not None:
|
198
|
+
messages = self.vision_processing(
|
199
|
+
task=task, image=img, messages=messages
|
200
|
+
)
|
201
|
+
else:
|
202
|
+
messages.append({"role": "user", "content": task})
|
203
|
+
|
162
204
|
return messages
|
163
205
|
|
164
|
-
def
|
206
|
+
def anthropic_vision_processing(
|
207
|
+
self, task: str, image: str, messages: list
|
208
|
+
) -> list:
|
165
209
|
"""
|
166
|
-
Process
|
167
|
-
|
168
|
-
Args:
|
169
|
-
task (str): The task to be processed.
|
170
|
-
audio (str): The path or identifier for the audio file.
|
210
|
+
Process vision input specifically for Anthropic models.
|
211
|
+
Handles Anthropic's specific image format requirements.
|
171
212
|
"""
|
172
|
-
|
173
|
-
|
174
|
-
|
213
|
+
# Get base64 encoded image
|
214
|
+
image_url = get_image_base64(image)
|
215
|
+
|
216
|
+
# Extract mime type from the data URI or use default
|
217
|
+
mime_type = "image/jpeg" # default
|
218
|
+
if "data:" in image_url and ";base64," in image_url:
|
219
|
+
mime_type = image_url.split(";base64,")[0].split("data:")[
|
220
|
+
1
|
221
|
+
]
|
222
|
+
|
223
|
+
# Ensure mime type is one of the supported formats
|
224
|
+
supported_formats = [
|
225
|
+
"image/jpeg",
|
226
|
+
"image/png",
|
227
|
+
"image/gif",
|
228
|
+
"image/webp",
|
229
|
+
]
|
230
|
+
if mime_type not in supported_formats:
|
231
|
+
mime_type = (
|
232
|
+
"image/jpeg" # fallback to jpeg if unsupported
|
233
|
+
)
|
175
234
|
|
176
|
-
#
|
177
|
-
|
235
|
+
# Construct Anthropic vision message
|
236
|
+
messages.append(
|
178
237
|
{
|
179
238
|
"role": "user",
|
180
239
|
"content": [
|
181
240
|
{"type": "text", "text": task},
|
182
241
|
{
|
183
|
-
"type": "
|
184
|
-
"
|
185
|
-
"
|
186
|
-
"format":
|
242
|
+
"type": "image_url",
|
243
|
+
"image_url": {
|
244
|
+
"url": image_url,
|
245
|
+
"format": mime_type,
|
187
246
|
},
|
188
247
|
},
|
189
248
|
],
|
190
249
|
}
|
191
250
|
)
|
192
251
|
|
193
|
-
|
252
|
+
return messages
|
253
|
+
|
254
|
+
def openai_vision_processing(
|
255
|
+
self, task: str, image: str, messages: list
|
256
|
+
) -> list:
|
257
|
+
"""
|
258
|
+
Process vision input specifically for OpenAI models.
|
259
|
+
Handles OpenAI's specific image format requirements.
|
260
|
+
"""
|
261
|
+
# Get base64 encoded image with proper format
|
262
|
+
image_url = get_image_base64(image)
|
263
|
+
|
264
|
+
# Prepare vision message
|
265
|
+
vision_message = {
|
266
|
+
"type": "image_url",
|
267
|
+
"image_url": {"url": image_url},
|
268
|
+
}
|
269
|
+
|
270
|
+
# Add format for specific models
|
271
|
+
extension = Path(image).suffix.lower()
|
272
|
+
mime_type = (
|
273
|
+
f"image/{extension[1:]}" if extension else "image/jpeg"
|
274
|
+
)
|
275
|
+
vision_message["image_url"]["format"] = mime_type
|
276
|
+
|
277
|
+
# Append vision message
|
278
|
+
messages.append(
|
279
|
+
{
|
280
|
+
"role": "user",
|
281
|
+
"content": [
|
282
|
+
{"type": "text", "text": task},
|
283
|
+
vision_message,
|
284
|
+
],
|
285
|
+
}
|
286
|
+
)
|
287
|
+
|
288
|
+
return messages
|
289
|
+
|
290
|
+
def vision_processing(
|
291
|
+
self, task: str, image: str, messages: Optional[list] = None
|
292
|
+
):
|
194
293
|
"""
|
195
294
|
Process the image for the given task.
|
295
|
+
Handles different image formats and model requirements.
|
296
|
+
"""
|
297
|
+
# # # Handle Anthropic models separately
|
298
|
+
# # if "anthropic" in self.model_name.lower() or "claude" in self.model_name.lower():
|
299
|
+
# # messages = self.anthropic_vision_processing(task, image, messages)
|
300
|
+
# # return messages
|
301
|
+
|
302
|
+
# # Get base64 encoded image with proper format
|
303
|
+
# image_url = get_image_base64(image)
|
304
|
+
|
305
|
+
# # Prepare vision message
|
306
|
+
# vision_message = {
|
307
|
+
# "type": "image_url",
|
308
|
+
# "image_url": {"url": image_url},
|
309
|
+
# }
|
310
|
+
|
311
|
+
# # Add format for specific models
|
312
|
+
# extension = Path(image).suffix.lower()
|
313
|
+
# mime_type = f"image/{extension[1:]}" if extension else "image/jpeg"
|
314
|
+
# vision_message["image_url"]["format"] = mime_type
|
315
|
+
|
316
|
+
# # Append vision message
|
317
|
+
# messages.append(
|
318
|
+
# {
|
319
|
+
# "role": "user",
|
320
|
+
# "content": [
|
321
|
+
# {"type": "text", "text": task},
|
322
|
+
# vision_message,
|
323
|
+
# ],
|
324
|
+
# }
|
325
|
+
# )
|
326
|
+
|
327
|
+
# return messages
|
328
|
+
if (
|
329
|
+
"anthropic" in self.model_name.lower()
|
330
|
+
or "claude" in self.model_name.lower()
|
331
|
+
):
|
332
|
+
messages = self.anthropic_vision_processing(
|
333
|
+
task, image, messages
|
334
|
+
)
|
335
|
+
return messages
|
336
|
+
else:
|
337
|
+
messages = self.openai_vision_processing(
|
338
|
+
task, image, messages
|
339
|
+
)
|
340
|
+
return messages
|
341
|
+
|
342
|
+
def audio_processing(self, task: str, audio: str):
|
196
343
|
"""
|
197
|
-
|
344
|
+
Process the audio for the given task.
|
198
345
|
|
199
|
-
|
346
|
+
Args:
|
347
|
+
task (str): The task to be processed.
|
348
|
+
audio (str): The path or identifier for the audio file.
|
349
|
+
"""
|
350
|
+
encoded_string = get_audio_base64(audio)
|
351
|
+
|
352
|
+
# Append audio message
|
200
353
|
self.messages.append(
|
201
354
|
{
|
202
355
|
"role": "user",
|
203
356
|
"content": [
|
204
357
|
{"type": "text", "text": task},
|
205
358
|
{
|
206
|
-
"type": "
|
207
|
-
"
|
208
|
-
"
|
209
|
-
|
210
|
-
# "format": "image",
|
359
|
+
"type": "input_audio",
|
360
|
+
"input_audio": {
|
361
|
+
"data": encoded_string,
|
362
|
+
"format": "wav",
|
211
363
|
},
|
212
364
|
},
|
213
365
|
],
|
214
366
|
}
|
215
367
|
)
|
216
368
|
|
217
|
-
def
|
218
|
-
self, task: str, audio: str = None, img: str = None
|
219
|
-
):
|
369
|
+
def check_if_model_supports_vision(self, img: str = None):
|
220
370
|
"""
|
221
|
-
|
371
|
+
Check if the model supports vision.
|
222
372
|
"""
|
223
|
-
self.messages = [] # Reset messages
|
224
|
-
self.modalities.append("text")
|
225
|
-
|
226
|
-
if audio is not None:
|
227
|
-
self.audio_processing(task=task, audio=audio)
|
228
|
-
self.modalities.append("audio")
|
229
|
-
|
230
373
|
if img is not None:
|
231
|
-
|
232
|
-
|
374
|
+
out = supports_vision(model=self.model_name)
|
375
|
+
|
376
|
+
if out is False:
|
377
|
+
raise ValueError(
|
378
|
+
f"Model {self.model_name} does not support vision"
|
379
|
+
)
|
233
380
|
|
234
381
|
def run(
|
235
382
|
self,
|
@@ -256,13 +403,7 @@ class LiteLLM:
|
|
256
403
|
Exception: If there is an error in processing the request.
|
257
404
|
"""
|
258
405
|
try:
|
259
|
-
messages = self._prepare_messages(task)
|
260
|
-
|
261
|
-
if audio is not None or img is not None:
|
262
|
-
self.handle_modalities(
|
263
|
-
task=task, audio=audio, img=img
|
264
|
-
)
|
265
|
-
messages = self.messages
|
406
|
+
messages = self._prepare_messages(task=task, img=img)
|
266
407
|
|
267
408
|
# Base completion parameters
|
268
409
|
completion_params = {
|
@@ -298,6 +439,9 @@ class LiteLLM:
|
|
298
439
|
{"functions": self.functions}
|
299
440
|
)
|
300
441
|
|
442
|
+
if self.base_url is not None:
|
443
|
+
completion_params["base_url"] = self.base_url
|
444
|
+
|
301
445
|
# Add modalities if needed
|
302
446
|
if self.modalities and len(self.modalities) >= 2:
|
303
447
|
completion_params["modalities"] = self.modalities
|
@@ -308,12 +452,16 @@ class LiteLLM:
|
|
308
452
|
# Handle tool-based response
|
309
453
|
if self.tools_list_dictionary is not None:
|
310
454
|
return self.output_for_tools(response)
|
455
|
+
elif self.return_all is True:
|
456
|
+
return response.model_dump()
|
311
457
|
else:
|
312
458
|
# Return standard response content
|
313
459
|
return response.choices[0].message.content
|
314
460
|
|
315
461
|
except LiteLLMException as error:
|
316
|
-
logger.error(
|
462
|
+
logger.error(
|
463
|
+
f"Error in LiteLLM run: {str(error)} Traceback: {traceback.format_exc()}"
|
464
|
+
)
|
317
465
|
if "rate_limit" in str(error).lower():
|
318
466
|
logger.warning(
|
319
467
|
"Rate limit hit, retrying with exponential backoff..."
|
@@ -0,0 +1,24 @@
|
|
1
|
+
from typing import Literal
|
2
|
+
|
3
|
+
HistoryOutputType = Literal[
|
4
|
+
"list",
|
5
|
+
"dict",
|
6
|
+
"dictionary",
|
7
|
+
"string",
|
8
|
+
"str",
|
9
|
+
"final",
|
10
|
+
"last",
|
11
|
+
"json",
|
12
|
+
"all",
|
13
|
+
"yaml",
|
14
|
+
"xml",
|
15
|
+
# "dict-final",
|
16
|
+
"dict-all-except-first",
|
17
|
+
"str-all-except-first",
|
18
|
+
"basemodel",
|
19
|
+
"dict-final",
|
20
|
+
]
|
21
|
+
|
22
|
+
OutputType = HistoryOutputType
|
23
|
+
|
24
|
+
output_type: HistoryOutputType # OutputType now includes 'xml'
|
swarms/utils/vllm_wrapper.py
CHANGED
@@ -133,12 +133,11 @@ class VLLMWrapper:
|
|
133
133
|
Returns:
|
134
134
|
List[str]: List of model responses.
|
135
135
|
"""
|
136
|
-
#
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
) as executor:
|
136
|
+
# Calculate the worker count based on 95% of available CPU cores
|
137
|
+
num_workers = max(1, int((os.cpu_count() or 1) * 0.95))
|
138
|
+
with concurrent.futures.ThreadPoolExecutor(
|
139
|
+
max_workers=num_workers
|
140
|
+
) as executor:
|
142
141
|
futures = [
|
143
142
|
executor.submit(self.run, task) for task in tasks
|
144
143
|
]
|
swarms/utils/xml_utils.py
CHANGED
@@ -3,7 +3,22 @@ from typing import Any
|
|
3
3
|
|
4
4
|
|
5
5
|
def dict_to_xml(tag: str, d: dict) -> ET.Element:
|
6
|
-
"""
|
6
|
+
"""
|
7
|
+
Convert a dictionary to an XML Element.
|
8
|
+
|
9
|
+
Args:
|
10
|
+
tag (str): The tag name for the root element
|
11
|
+
d (dict): The dictionary to convert to XML
|
12
|
+
|
13
|
+
Returns:
|
14
|
+
ET.Element: An XML Element representing the dictionary structure
|
15
|
+
|
16
|
+
Example:
|
17
|
+
>>> data = {"person": {"name": "John", "age": 30}}
|
18
|
+
>>> elem = dict_to_xml("root", data)
|
19
|
+
>>> ET.tostring(elem, encoding="unicode")
|
20
|
+
'<root><person><name>John</name><age>30</age></person></root>'
|
21
|
+
"""
|
7
22
|
elem = ET.Element(tag)
|
8
23
|
for key, val in d.items():
|
9
24
|
child = ET.Element(str(key))
|
@@ -24,7 +39,27 @@ def dict_to_xml(tag: str, d: dict) -> ET.Element:
|
|
24
39
|
|
25
40
|
|
26
41
|
def to_xml_string(data: Any, root_tag: str = "root") -> str:
|
27
|
-
"""
|
42
|
+
"""
|
43
|
+
Convert a dict or list to an XML string.
|
44
|
+
|
45
|
+
Args:
|
46
|
+
data (Any): The data to convert to XML. Can be a dictionary, list, or other value
|
47
|
+
root_tag (str, optional): The tag name for the root element. Defaults to "root"
|
48
|
+
|
49
|
+
Returns:
|
50
|
+
str: An XML string representation of the input data
|
51
|
+
|
52
|
+
Example:
|
53
|
+
>>> data = {"person": {"name": "John", "age": 30}}
|
54
|
+
>>> xml_str = to_xml_string(data)
|
55
|
+
>>> print(xml_str)
|
56
|
+
<root><person><name>John</name><age>30</age></person></root>
|
57
|
+
|
58
|
+
>>> data = [1, 2, 3]
|
59
|
+
>>> xml_str = to_xml_string(data)
|
60
|
+
>>> print(xml_str)
|
61
|
+
<root><item>1</item><item>2</item><item>3</item></root>
|
62
|
+
"""
|
28
63
|
if isinstance(data, dict):
|
29
64
|
elem = dict_to_xml(root_tag, data)
|
30
65
|
elif isinstance(data, list):
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: swarms
|
3
|
-
Version: 7.8.
|
3
|
+
Version: 7.8.8
|
4
4
|
Summary: Swarms - TGSC
|
5
5
|
License: MIT
|
6
6
|
Keywords: artificial intelligence,deep learning,optimizers,Prompt Engineering,swarms,agents,llms,transformers,multi-agent,swarms of agents,Enterprise-Grade Agents,Production-Grade Agents,Agents,Multi-Grade-Agents,Swarms,Transformers,LLMs,Prompt Engineering,Agents,Generative Agents,Generative AI,Agent Marketplace,Agent Store,quant,finance,algorithmic trading,portfolio optimization,risk management,financial modeling,machine learning for finance,natural language processing for finance
|
@@ -1424,15 +1424,13 @@ print(out)
|
|
1424
1424
|
-------
|
1425
1425
|
|
1426
1426
|
## SpreadSheetSwarm
|
1427
|
+
|
1427
1428
|
SpreadSheetSwarm manages thousands of agents concurrently for efficient task processing. It supports one-to-many task distribution, scalability, and autosaving results. Initialized with a name, description, agents, and settings, the run method executes tasks and returns a dictionary of agent outputs.
|
1428
1429
|
|
1429
1430
|
[Learn more:](https://docs.swarms.world/en/latest/swarms/structs/spreadsheet_swarm/)
|
1430
1431
|
|
1431
1432
|
```python
|
1432
|
-
import os
|
1433
1433
|
from swarms import Agent, SpreadSheetSwarm
|
1434
|
-
from swarm_models import OpenAIChat
|
1435
|
-
|
1436
1434
|
# Define custom system prompts for each social media platform
|
1437
1435
|
TWITTER_AGENT_SYS_PROMPT = """
|
1438
1436
|
You are a Twitter marketing expert specializing in real estate. Your task is to create engaging, concise tweets to promote properties, analyze trends to maximize engagement, and use appropriate hashtags and timing to reach potential buyers.
|
@@ -1459,7 +1457,7 @@ agents = [
|
|
1459
1457
|
Agent(
|
1460
1458
|
agent_name="Twitter-RealEstate-Agent",
|
1461
1459
|
system_prompt=TWITTER_AGENT_SYS_PROMPT,
|
1462
|
-
model_name="gpt-4o",
|
1460
|
+
model_name="gpt-4o-mini",
|
1463
1461
|
max_loops=1,
|
1464
1462
|
dynamic_temperature_enabled=True,
|
1465
1463
|
saved_state_path="twitter_realestate_agent.json",
|
@@ -1469,7 +1467,7 @@ agents = [
|
|
1469
1467
|
Agent(
|
1470
1468
|
agent_name="Instagram-RealEstate-Agent",
|
1471
1469
|
system_prompt=INSTAGRAM_AGENT_SYS_PROMPT,
|
1472
|
-
model_name="gpt-4o",
|
1470
|
+
model_name="gpt-4o-mini",
|
1473
1471
|
max_loops=1,
|
1474
1472
|
dynamic_temperature_enabled=True,
|
1475
1473
|
saved_state_path="instagram_realestate_agent.json",
|
@@ -1479,7 +1477,7 @@ agents = [
|
|
1479
1477
|
Agent(
|
1480
1478
|
agent_name="Facebook-RealEstate-Agent",
|
1481
1479
|
system_prompt=FACEBOOK_AGENT_SYS_PROMPT,
|
1482
|
-
model_name="gpt-4o",
|
1480
|
+
model_name="gpt-4o-mini",
|
1483
1481
|
max_loops=1,
|
1484
1482
|
dynamic_temperature_enabled=True,
|
1485
1483
|
saved_state_path="facebook_realestate_agent.json",
|
@@ -1489,7 +1487,7 @@ agents = [
|
|
1489
1487
|
Agent(
|
1490
1488
|
agent_name="LinkedIn-RealEstate-Agent",
|
1491
1489
|
system_prompt=LINKEDIN_AGENT_SYS_PROMPT,
|
1492
|
-
model_name="gpt-4o",
|
1490
|
+
model_name="gpt-4o-mini",
|
1493
1491
|
max_loops=1,
|
1494
1492
|
dynamic_temperature_enabled=True,
|
1495
1493
|
saved_state_path="linkedin_realestate_agent.json",
|
@@ -1499,7 +1497,7 @@ agents = [
|
|
1499
1497
|
Agent(
|
1500
1498
|
agent_name="Email-RealEstate-Agent",
|
1501
1499
|
system_prompt=EMAIL_AGENT_SYS_PROMPT,
|
1502
|
-
model_name="gpt-4o",
|
1500
|
+
model_name="gpt-4o-mini",
|
1503
1501
|
max_loops=1,
|
1504
1502
|
dynamic_temperature_enabled=True,
|
1505
1503
|
saved_state_path="email_realestate_agent.json",
|
@@ -1963,32 +1961,18 @@ A production-grade multi-agent system enabling sophisticated group conversations
|
|
1963
1961
|
|
1964
1962
|
|
1965
1963
|
```python
|
1966
|
-
|
1967
|
-
import os
|
1968
|
-
from dotenv import load_dotenv
|
1969
|
-
from swarm_models import OpenAIChat
|
1970
1964
|
from swarms import Agent, GroupChat, expertise_based
|
1971
1965
|
|
1972
1966
|
|
1973
1967
|
if __name__ == "__main__":
|
1974
1968
|
|
1975
|
-
load_dotenv()
|
1976
|
-
|
1977
|
-
# Get the OpenAI API key from the environment variable
|
1978
|
-
api_key = os.getenv("OPENAI_API_KEY")
|
1979
|
-
|
1980
|
-
# Create an instance of the OpenAIChat class
|
1981
|
-
model = OpenAIChat(
|
1982
|
-
openai_api_key=api_key,
|
1983
|
-
model_name="gpt-4o-mini",
|
1984
|
-
temperature=0.1,
|
1985
|
-
)
|
1986
1969
|
|
1987
1970
|
# Example agents
|
1988
1971
|
agent1 = Agent(
|
1989
1972
|
agent_name="Financial-Analysis-Agent",
|
1990
1973
|
system_prompt="You are a financial analyst specializing in investment strategies.",
|
1991
|
-
|
1974
|
+
model_name="gpt-4o-mini",
|
1975
|
+
temperature=0.1,
|
1992
1976
|
max_loops=1,
|
1993
1977
|
autosave=False,
|
1994
1978
|
dashboard=False,
|
@@ -2004,7 +1988,8 @@ if __name__ == "__main__":
|
|
2004
1988
|
agent2 = Agent(
|
2005
1989
|
agent_name="Tax-Adviser-Agent",
|
2006
1990
|
system_prompt="You are a tax adviser who provides clear and concise guidance on tax-related queries.",
|
2007
|
-
|
1991
|
+
model_name="gpt-4o-mini",
|
1992
|
+
temperature=0.1,
|
2008
1993
|
max_loops=1,
|
2009
1994
|
autosave=False,
|
2010
1995
|
dashboard=False,
|
@@ -2029,7 +2014,8 @@ if __name__ == "__main__":
|
|
2029
2014
|
history = chat.run(
|
2030
2015
|
"How to optimize tax strategy for investments?"
|
2031
2016
|
)
|
2032
|
-
print(history
|
2017
|
+
print(history)
|
2018
|
+
|
2033
2019
|
|
2034
2020
|
```
|
2035
2021
|
|
@@ -2087,35 +2073,18 @@ if __name__ == "__main__":
|
|
2087
2073
|
----------
|
2088
2074
|
|
2089
2075
|
## Onboarding Session
|
2076
|
+
|
2090
2077
|
Get onboarded now with the creator and lead maintainer of Swarms, Kye Gomez, who will show you how to get started with the installation, usage examples, and starting to build your custom use case! [CLICK HERE](https://cal.com/swarms/swarms-onboarding-session)
|
2091
2078
|
|
2092
2079
|
|
2093
2080
|
---
|
2094
2081
|
|
2095
2082
|
## Documentation
|
2083
|
+
|
2096
2084
|
Documentation is located here at: [docs.swarms.world](https://docs.swarms.world)
|
2097
2085
|
|
2098
2086
|
-----
|
2099
2087
|
|
2100
|
-
## Folder Structure
|
2101
|
-
The swarms package has been meticulously crafted for extreme usability and understanding,the swarms package is split up into various modules such as `swarms.agents` that holds pre-built agents, `swarms.structs` that holds a vast array of structures like `Agent` and multi agent structures. The package is split into various modules, with the most important being `structs`, `tools`, and `agents`.
|
2102
|
-
|
2103
|
-
```sh
|
2104
|
-
├── __init__.py
|
2105
|
-
├── agents/
|
2106
|
-
├── artifacts/
|
2107
|
-
├── client/
|
2108
|
-
├── cli/
|
2109
|
-
├── prompts/
|
2110
|
-
├── schemas/
|
2111
|
-
├── structs/
|
2112
|
-
├── telemetry/
|
2113
|
-
├── tools/
|
2114
|
-
└── utils/
|
2115
|
-
```
|
2116
|
-
|
2117
|
-
----
|
2118
|
-
|
2119
2088
|
## 🫶 Contributions:
|
2120
2089
|
|
2121
2090
|
The easiest way to contribute is to pick any issue with the `good first issue` tag 💪. Read the Contributing guidelines [here](/CONTRIBUTING.md). Bug Report? [File here](https://github.com/swarms/gateway/issues) | Feature Request? [File here](https://github.com/swarms/gateway/issues)
|
@@ -2125,17 +2094,24 @@ Swarms is an open-source project, and contributions are VERY welcome. If you wan
|
|
2125
2094
|
----
|
2126
2095
|
|
2127
2096
|
|
2128
|
-
|
2097
|
+
### Connect With Us
|
2098
|
+
|
2099
|
+
| Platform | Link | Description |
|
2100
|
+
|----------|------|-------------|
|
2101
|
+
| 📚 Documentation | [docs.swarms.world](https://docs.swarms.world) | Official documentation and guides |
|
2102
|
+
| 📝 Blog | [Medium](https://medium.com/@kyeg) | Latest updates and technical articles |
|
2103
|
+
| 💬 Discord | [Join Discord](https://discord.gg/jM3Z6M9uMq) | Live chat and community support |
|
2104
|
+
| 🐦 Twitter | [@kyegomez](https://twitter.com/kyegomez) | Latest news and announcements |
|
2105
|
+
| 👥 LinkedIn | [The Swarm Corporation](https://www.linkedin.com/company/the-swarm-corporation) | Professional network and updates |
|
2106
|
+
| 📺 YouTube | [Swarms Channel](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ) | Tutorials and demos |
|
2107
|
+
| 🎫 Events | [Sign up here](https://lu.ma/5p2jnc2v) | Join our community events |
|
2108
|
+
|
2109
|
+
|
2110
|
+
|
2111
|
+
## Citation
|
2129
2112
|
|
2130
|
-
|
2113
|
+
If you use **swarms** in your research, please cite the project by referencing the metadata in [CITATION.cff](./CITATION.cff).
|
2131
2114
|
|
2132
|
-
- View our official [Documents](https://docs.swarms.world)
|
2133
|
-
- View our official [Blog](https://medium.com/@kyeg)
|
2134
|
-
- Chat live with us on [Discord](https://discord.gg/jM3Z6M9uMq)
|
2135
|
-
- Follow us on [Twitter](https://twitter.com/kyegomez)
|
2136
|
-
- Connect with us on [LinkedIn](https://www.linkedin.com/company/the-swarm-corporation)
|
2137
|
-
- Visit us on [YouTube](https://www.youtube.com/channel/UC9yXyitkbU_WSy7bd_41SqQ)
|
2138
|
-
- Sign up for our events [Sign up here](https://lu.ma/5p2jnc2v)
|
2139
2115
|
|
2140
2116
|
# License
|
2141
2117
|
|