swarms 7.7.8__py3-none-any.whl → 7.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. swarms/__init__.py +0 -1
  2. swarms/agents/cort_agent.py +206 -0
  3. swarms/agents/react_agent.py +173 -0
  4. swarms/agents/self_agent_builder.py +40 -0
  5. swarms/communication/base_communication.py +290 -0
  6. swarms/communication/duckdb_wrap.py +369 -72
  7. swarms/communication/pulsar_struct.py +691 -0
  8. swarms/communication/redis_wrap.py +1362 -0
  9. swarms/communication/sqlite_wrap.py +547 -44
  10. swarms/prompts/agent_self_builder_prompt.py +103 -0
  11. swarms/prompts/safety_prompt.py +50 -0
  12. swarms/schemas/__init__.py +6 -1
  13. swarms/schemas/agent_class_schema.py +91 -0
  14. swarms/schemas/agent_mcp_errors.py +18 -0
  15. swarms/schemas/agent_tool_schema.py +13 -0
  16. swarms/schemas/llm_agent_schema.py +92 -0
  17. swarms/schemas/mcp_schemas.py +43 -0
  18. swarms/structs/__init__.py +4 -0
  19. swarms/structs/agent.py +315 -267
  20. swarms/structs/aop.py +3 -1
  21. swarms/structs/batch_agent_execution.py +64 -0
  22. swarms/structs/conversation.py +261 -57
  23. swarms/structs/council_judge.py +542 -0
  24. swarms/structs/deep_research_swarm.py +19 -22
  25. swarms/structs/long_agent.py +424 -0
  26. swarms/structs/ma_utils.py +11 -8
  27. swarms/structs/malt.py +30 -28
  28. swarms/structs/multi_model_gpu_manager.py +1 -1
  29. swarms/structs/output_types.py +1 -1
  30. swarms/structs/swarm_router.py +70 -15
  31. swarms/tools/__init__.py +12 -0
  32. swarms/tools/base_tool.py +2840 -264
  33. swarms/tools/create_agent_tool.py +104 -0
  34. swarms/tools/mcp_client_call.py +504 -0
  35. swarms/tools/py_func_to_openai_func_str.py +45 -7
  36. swarms/tools/pydantic_to_json.py +10 -27
  37. swarms/utils/audio_processing.py +343 -0
  38. swarms/utils/history_output_formatter.py +5 -5
  39. swarms/utils/index.py +226 -0
  40. swarms/utils/litellm_wrapper.py +65 -67
  41. swarms/utils/try_except_wrapper.py +2 -2
  42. swarms/utils/xml_utils.py +42 -0
  43. {swarms-7.7.8.dist-info → swarms-7.8.0.dist-info}/METADATA +5 -4
  44. {swarms-7.7.8.dist-info → swarms-7.8.0.dist-info}/RECORD +47 -30
  45. {swarms-7.7.8.dist-info → swarms-7.8.0.dist-info}/WHEEL +1 -1
  46. swarms/client/__init__.py +0 -15
  47. swarms/client/main.py +0 -407
  48. swarms/tools/mcp_client.py +0 -246
  49. swarms/tools/mcp_integration.py +0 -340
  50. {swarms-7.7.8.dist-info → swarms-7.8.0.dist-info}/LICENSE +0 -0
  51. {swarms-7.7.8.dist-info → swarms-7.8.0.dist-info}/entry_points.txt +0 -0
swarms/structs/aop.py CHANGED
@@ -4,7 +4,9 @@ from concurrent.futures import ThreadPoolExecutor, as_completed
4
4
  from functools import wraps
5
5
  from typing import Any, Callable, Literal, Optional
6
6
 
7
- from fastmcp import FastMCP, Client
7
+ from mcp.server.fastmcp import FastMCP
8
+ from mcp.client import Client
9
+
8
10
  from loguru import logger
9
11
  from swarms.utils.any_to_str import any_to_str
10
12
 
@@ -0,0 +1,64 @@
1
+ from swarms.structs.agent import Agent
2
+ from typing import List
3
+ from swarms.utils.formatter import formatter
4
+
5
+
6
+ def batch_agent_execution(
7
+ agents: List[Agent],
8
+ tasks: List[str],
9
+ ):
10
+ """
11
+ Execute a batch of agents on a list of tasks concurrently.
12
+
13
+ Args:
14
+ agents (List[Agent]): List of agents to execute
15
+ tasks (list[str]): List of tasks to execute
16
+
17
+ Returns:
18
+ List[str]: List of results from each agent execution
19
+
20
+ Raises:
21
+ ValueError: If number of agents doesn't match number of tasks
22
+ """
23
+ if len(agents) != len(tasks):
24
+ raise ValueError(
25
+ "Number of agents must match number of tasks"
26
+ )
27
+
28
+ import concurrent.futures
29
+ import multiprocessing
30
+
31
+ results = []
32
+
33
+ # Calculate max workers as 90% of available CPU cores
34
+ max_workers = max(1, int(multiprocessing.cpu_count() * 0.9))
35
+
36
+ formatter.print_panel(
37
+ f"Executing {len(agents)} agents on {len(tasks)} tasks using {max_workers} workers"
38
+ )
39
+
40
+ with concurrent.futures.ThreadPoolExecutor(
41
+ max_workers=max_workers
42
+ ) as executor:
43
+ # Submit all tasks to the executor
44
+ future_to_task = {
45
+ executor.submit(agent.run, task): (agent, task)
46
+ for agent, task in zip(agents, tasks)
47
+ }
48
+
49
+ # Collect results as they complete
50
+ for future in concurrent.futures.as_completed(future_to_task):
51
+ agent, task = future_to_task[future]
52
+ try:
53
+ result = future.result()
54
+ results.append(result)
55
+ except Exception as e:
56
+ print(
57
+ f"Task failed for agent {agent.agent_name}: {str(e)}"
58
+ )
59
+ results.append(None)
60
+
61
+ # Wait for all futures to complete before returning
62
+ concurrent.futures.wait(future_to_task.keys())
63
+
64
+ return results
@@ -1,20 +1,40 @@
1
+ import concurrent.futures
1
2
  import datetime
3
+ import hashlib
2
4
  import json
3
- from typing import Any, List, Optional, Union, Dict
5
+ import os
4
6
  import threading
5
- import hashlib
7
+ import uuid
8
+ from typing import (
9
+ TYPE_CHECKING,
10
+ Any,
11
+ Dict,
12
+ List,
13
+ Optional,
14
+ Union,
15
+ Literal,
16
+ )
6
17
 
7
18
  import yaml
19
+
8
20
  from swarms.structs.base_structure import BaseStructure
9
- from typing import TYPE_CHECKING
10
21
  from swarms.utils.any_to_str import any_to_str
11
22
  from swarms.utils.formatter import formatter
12
23
  from swarms.utils.litellm_tokenizer import count_tokens
13
24
 
14
25
  if TYPE_CHECKING:
15
- from swarms.structs.agent import (
16
- Agent,
17
- ) # Only imported during type checking
26
+ from swarms.structs.agent import Agent
27
+
28
+ from loguru import logger
29
+
30
+
31
+ def generate_conversation_id():
32
+ """Generate a unique conversation ID."""
33
+ return str(uuid.uuid4())
34
+
35
+
36
+ # Define available providers
37
+ providers = Literal["mem0", "in-memory"]
18
38
 
19
39
 
20
40
  class Conversation(BaseStructure):
@@ -41,10 +61,13 @@ class Conversation(BaseStructure):
41
61
  cache_enabled (bool): Flag to enable prompt caching.
42
62
  cache_stats (dict): Statistics about cache usage.
43
63
  cache_lock (threading.Lock): Lock for thread-safe cache operations.
64
+ conversations_dir (str): Directory to store cached conversations.
44
65
  """
45
66
 
46
67
  def __init__(
47
68
  self,
69
+ id: str = generate_conversation_id(),
70
+ name: str = None,
48
71
  system_prompt: Optional[str] = None,
49
72
  time_enabled: bool = False,
50
73
  autosave: bool = False,
@@ -59,29 +82,16 @@ class Conversation(BaseStructure):
59
82
  save_as_json_bool: bool = False,
60
83
  token_count: bool = True,
61
84
  cache_enabled: bool = True,
85
+ conversations_dir: Optional[str] = None,
86
+ provider: providers = "in-memory",
62
87
  *args,
63
88
  **kwargs,
64
89
  ):
65
- """
66
- Initializes the Conversation object with the provided parameters.
67
-
68
- Args:
69
- system_prompt (Optional[str]): The system prompt for the conversation.
70
- time_enabled (bool): Flag to enable time tracking for messages.
71
- autosave (bool): Flag to enable automatic saving of conversation history.
72
- save_filepath (str): File path for saving the conversation history.
73
- tokenizer (Any): Tokenizer for counting tokens in messages.
74
- context_length (int): Maximum number of tokens allowed in the conversation history.
75
- rules (str): Rules for the conversation.
76
- custom_rules_prompt (str): Custom prompt for rules.
77
- user (str): The user identifier for messages.
78
- auto_save (bool): Flag to enable auto-saving of conversation history.
79
- save_as_yaml (bool): Flag to save conversation history as YAML.
80
- save_as_json_bool (bool): Flag to save conversation history as JSON.
81
- token_count (bool): Flag to enable token counting for messages.
82
- cache_enabled (bool): Flag to enable prompt caching.
83
- """
84
90
  super().__init__()
91
+
92
+ # Initialize all attributes first
93
+ self.id = id
94
+ self.name = name or id
85
95
  self.system_prompt = system_prompt
86
96
  self.time_enabled = time_enabled
87
97
  self.autosave = autosave
@@ -97,6 +107,7 @@ class Conversation(BaseStructure):
97
107
  self.save_as_json_bool = save_as_json_bool
98
108
  self.token_count = token_count
99
109
  self.cache_enabled = cache_enabled
110
+ self.provider = provider
100
111
  self.cache_stats = {
101
112
  "hits": 0,
102
113
  "misses": 0,
@@ -104,20 +115,70 @@ class Conversation(BaseStructure):
104
115
  "total_tokens": 0,
105
116
  }
106
117
  self.cache_lock = threading.Lock()
118
+ self.conversations_dir = conversations_dir
119
+
120
+ self.setup()
121
+
122
+ def setup(self):
123
+ # Set up conversations directory
124
+ self.conversations_dir = (
125
+ self.conversations_dir
126
+ or os.path.join(
127
+ os.path.expanduser("~"), ".swarms", "conversations"
128
+ )
129
+ )
130
+ os.makedirs(self.conversations_dir, exist_ok=True)
131
+
132
+ # Try to load existing conversation if it exists
133
+ conversation_file = os.path.join(
134
+ self.conversations_dir, f"{self.name}.json"
135
+ )
136
+ if os.path.exists(conversation_file):
137
+ with open(conversation_file, "r") as f:
138
+ saved_data = json.load(f)
139
+ # Update attributes from saved data
140
+ for key, value in saved_data.get(
141
+ "metadata", {}
142
+ ).items():
143
+ if hasattr(self, key):
144
+ setattr(self, key, value)
145
+ self.conversation_history = saved_data.get(
146
+ "history", []
147
+ )
148
+ else:
149
+ # If system prompt is not None, add it to the conversation history
150
+ if self.system_prompt is not None:
151
+ self.add("System", self.system_prompt)
152
+
153
+ if self.rules is not None:
154
+ self.add(self.user or "User", self.rules)
107
155
 
108
- # If system prompt is not None, add it to the conversation history
109
- if self.system_prompt is not None:
110
- self.add("System", self.system_prompt)
156
+ if self.custom_rules_prompt is not None:
157
+ self.add(
158
+ self.user or "User", self.custom_rules_prompt
159
+ )
111
160
 
112
- if self.rules is not None:
113
- self.add("User", rules)
161
+ # If tokenizer then truncate
162
+ if self.tokenizer is not None:
163
+ self.truncate_memory_with_tokenizer()
114
164
 
115
- if custom_rules_prompt is not None:
116
- self.add(user or "User", custom_rules_prompt)
165
+ def mem0_provider(self):
166
+ try:
167
+ from mem0 import AsyncMemory
168
+ except ImportError:
169
+ logger.warning(
170
+ "mem0ai is not installed. Please install it to use the Conversation class."
171
+ )
172
+ return None
117
173
 
118
- # If tokenizer then truncate
119
- if tokenizer is not None:
120
- self.truncate_memory_with_tokenizer()
174
+ try:
175
+ memory = AsyncMemory()
176
+ return memory
177
+ except Exception as e:
178
+ logger.error(
179
+ f"Failed to initialize AsyncMemory: {str(e)}"
180
+ )
181
+ return None
121
182
 
122
183
  def _generate_cache_key(
123
184
  self, content: Union[str, dict, list]
@@ -174,7 +235,46 @@ class Conversation(BaseStructure):
174
235
  self.cache_stats["cached_tokens"] += token_count
175
236
  self.cache_stats["total_tokens"] += token_count
176
237
 
177
- def add(
238
+ def _save_to_cache(self):
239
+ """Save the current conversation state to the cache directory."""
240
+ if not self.conversations_dir:
241
+ return
242
+
243
+ conversation_file = os.path.join(
244
+ self.conversations_dir, f"{self.name}.json"
245
+ )
246
+
247
+ # Prepare metadata
248
+ metadata = {
249
+ "id": self.id,
250
+ "name": self.name,
251
+ "system_prompt": self.system_prompt,
252
+ "time_enabled": self.time_enabled,
253
+ "autosave": self.autosave,
254
+ "save_filepath": self.save_filepath,
255
+ "context_length": self.context_length,
256
+ "rules": self.rules,
257
+ "custom_rules_prompt": self.custom_rules_prompt,
258
+ "user": self.user,
259
+ "auto_save": self.auto_save,
260
+ "save_as_yaml": self.save_as_yaml,
261
+ "save_as_json_bool": self.save_as_json_bool,
262
+ "token_count": self.token_count,
263
+ "cache_enabled": self.cache_enabled,
264
+ }
265
+
266
+ # Prepare data to save
267
+ save_data = {
268
+ "metadata": metadata,
269
+ "history": self.conversation_history,
270
+ "cache_stats": self.cache_stats,
271
+ }
272
+
273
+ # Save to file
274
+ with open(conversation_file, "w") as f:
275
+ json.dump(save_data, f, indent=4)
276
+
277
+ def add_in_memory(
178
278
  self,
179
279
  role: str,
180
280
  content: Union[str, dict, list],
@@ -210,7 +310,7 @@ class Conversation(BaseStructure):
210
310
  else:
211
311
  message["cached"] = False
212
312
 
213
- # Add the message to history immediately without waiting for token count
313
+ # Add message to appropriate backend
214
314
  self.conversation_history.append(message)
215
315
 
216
316
  if self.token_count is True and not message.get(
@@ -218,11 +318,45 @@ class Conversation(BaseStructure):
218
318
  ):
219
319
  self._count_tokens(content, message)
220
320
 
321
+ # Save to cache after adding message
322
+ self._save_to_cache()
323
+
324
+ def add_mem0(
325
+ self,
326
+ role: str,
327
+ content: Union[str, dict, list],
328
+ metadata: Optional[dict] = None,
329
+ ):
330
+ """Add a message to the conversation history using the Mem0 provider."""
331
+ if self.provider == "mem0":
332
+ memory = self.mem0_provider()
333
+ memory.add(
334
+ messages=content,
335
+ agent_id=role,
336
+ run_id=self.id,
337
+ metadata=metadata,
338
+ )
339
+
340
+ def add(
341
+ self,
342
+ role: str,
343
+ content: Union[str, dict, list],
344
+ metadata: Optional[dict] = None,
345
+ ):
346
+ """Add a message to the conversation history."""
347
+ if self.provider == "in-memory":
348
+ self.add_in_memory(role, content)
349
+ elif self.provider == "mem0":
350
+ self.add_mem0(
351
+ role=role, content=content, metadata=metadata
352
+ )
353
+ else:
354
+ raise ValueError(f"Invalid provider: {self.provider}")
355
+
221
356
  def add_multiple_messages(
222
357
  self, roles: List[str], contents: List[Union[str, dict, list]]
223
358
  ):
224
- for role, content in zip(roles, contents):
225
- self.add(role, content)
359
+ return self.add_multiple(roles, contents)
226
360
 
227
361
  def _count_tokens(self, content: str, message: dict):
228
362
  # If token counting is enabled, do it in a separate thread
@@ -249,6 +383,29 @@ class Conversation(BaseStructure):
249
383
  )
250
384
  token_thread.start()
251
385
 
386
+ def add_multiple(
387
+ self,
388
+ roles: List[str],
389
+ contents: List[Union[str, dict, list, any]],
390
+ ):
391
+ """Add multiple messages to the conversation history."""
392
+ if len(roles) != len(contents):
393
+ raise ValueError(
394
+ "Number of roles and contents must match."
395
+ )
396
+
397
+ # Now create a formula to get 25% of available cpus
398
+ max_workers = int(os.cpu_count() * 0.25)
399
+
400
+ with concurrent.futures.ThreadPoolExecutor(
401
+ max_workers=max_workers
402
+ ) as executor:
403
+ futures = [
404
+ executor.submit(self.add, role, content)
405
+ for role, content in zip(roles, contents)
406
+ ]
407
+ concurrent.futures.wait(futures)
408
+
252
409
  def delete(self, index: str):
253
410
  """Delete a message from the conversation history.
254
411
 
@@ -256,6 +413,7 @@ class Conversation(BaseStructure):
256
413
  index (str): Index of the message to delete.
257
414
  """
258
415
  self.conversation_history.pop(index)
416
+ self._save_to_cache()
259
417
 
260
418
  def update(self, index: str, role, content):
261
419
  """Update a message in the conversation history.
@@ -269,6 +427,7 @@ class Conversation(BaseStructure):
269
427
  "role": role,
270
428
  "content": content,
271
429
  }
430
+ self._save_to_cache()
272
431
 
273
432
  def query(self, index: str):
274
433
  """Query a message in the conversation history.
@@ -350,12 +509,13 @@ class Conversation(BaseStructure):
350
509
  Returns:
351
510
  str: The conversation history formatted as a string.
352
511
  """
353
- return "\n".join(
354
- [
355
- f"{message['role']}: {message['content']}\n\n"
356
- for message in self.conversation_history
357
- ]
358
- )
512
+ formatted_messages = []
513
+ for message in self.conversation_history:
514
+ formatted_messages.append(
515
+ f"{message['role']}: {message['content']}"
516
+ )
517
+
518
+ return "\n\n".join(formatted_messages)
359
519
 
360
520
  def get_str(self) -> str:
361
521
  """Get the conversation history as a string.
@@ -363,17 +523,7 @@ class Conversation(BaseStructure):
363
523
  Returns:
364
524
  str: The conversation history.
365
525
  """
366
- messages = []
367
- for message in self.conversation_history:
368
- content = message["content"]
369
- if isinstance(content, (dict, list)):
370
- content = json.dumps(content)
371
- messages.append(f"{message['role']}: {content}")
372
- if "token_count" in message:
373
- messages[-1] += f" (tokens: {message['token_count']})"
374
- if message.get("cached", False):
375
- messages[-1] += " [cached]"
376
- return "\n".join(messages)
526
+ return self.return_history_as_string()
377
527
 
378
528
  def save_as_json(self, filename: str = None):
379
529
  """Save the conversation history as a JSON file.
@@ -450,6 +600,7 @@ class Conversation(BaseStructure):
450
600
  def clear(self):
451
601
  """Clear the conversation history."""
452
602
  self.conversation_history = []
603
+ self._save_to_cache()
453
604
 
454
605
  def to_json(self):
455
606
  """Convert the conversation history to a JSON string.
@@ -508,7 +659,13 @@ class Conversation(BaseStructure):
508
659
  Returns:
509
660
  str: The last message formatted as 'role: content'.
510
661
  """
511
- return f"{self.conversation_history[-1]['role']}: {self.conversation_history[-1]['content']}"
662
+ if self.provider == "mem0":
663
+ memory = self.mem0_provider()
664
+ return memory.get_all(run_id=self.id)
665
+ elif self.provider == "in-memory":
666
+ return f"{self.conversation_history[-1]['role']}: {self.conversation_history[-1]['content']}"
667
+ else:
668
+ raise ValueError(f"Invalid provider: {self.provider}")
512
669
 
513
670
  def return_messages_as_list(self):
514
671
  """Return the conversation messages as a list of formatted strings.
@@ -629,6 +786,53 @@ class Conversation(BaseStructure):
629
786
  ),
630
787
  }
631
788
 
789
+ @classmethod
790
+ def load_conversation(
791
+ cls, name: str, conversations_dir: Optional[str] = None
792
+ ) -> "Conversation":
793
+ """Load a conversation from the cache by name.
794
+
795
+ Args:
796
+ name (str): Name of the conversation to load
797
+ conversations_dir (Optional[str]): Directory containing cached conversations
798
+
799
+ Returns:
800
+ Conversation: The loaded conversation object
801
+ """
802
+ return cls(name=name, conversations_dir=conversations_dir)
803
+
804
+ @classmethod
805
+ def list_cached_conversations(
806
+ cls, conversations_dir: Optional[str] = None
807
+ ) -> List[str]:
808
+ """List all cached conversations.
809
+
810
+ Args:
811
+ conversations_dir (Optional[str]): Directory containing cached conversations
812
+
813
+ Returns:
814
+ List[str]: List of conversation names (without .json extension)
815
+ """
816
+ if conversations_dir is None:
817
+ conversations_dir = os.path.join(
818
+ os.path.expanduser("~"), ".swarms", "conversations"
819
+ )
820
+
821
+ if not os.path.exists(conversations_dir):
822
+ return []
823
+
824
+ conversations = []
825
+ for file in os.listdir(conversations_dir):
826
+ if file.endswith(".json"):
827
+ conversations.append(
828
+ file[:-5]
829
+ ) # Remove .json extension
830
+ return conversations
831
+
832
+ def clear_memory(self):
833
+ """Clear the memory of the conversation."""
834
+ self.conversation_history = []
835
+
632
836
 
633
837
  # # Example usage
634
838
  # # conversation = Conversation()