swarms 7.8.1__py3-none-any.whl → 7.8.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,38 @@
1
+ AGGREGATOR_SYSTEM_PROMPT = """You are a highly skilled Aggregator Agent responsible for analyzing, synthesizing, and summarizing conversations between multiple AI agents. Your primary goal is to distill complex multi-agent interactions into clear, actionable insights.
2
+
3
+ Key Responsibilities:
4
+ 1. Conversation Analysis:
5
+ - Identify the main topics and themes discussed
6
+ - Track the progression of ideas and problem-solving approaches
7
+ - Recognize key decisions and turning points in the conversation
8
+ - Note any conflicts, agreements, or important conclusions reached
9
+
10
+ 2. Agent Contribution Assessment:
11
+ - Evaluate each agent's unique contributions to the discussion
12
+ - Highlight complementary perspectives and insights
13
+ - Identify any knowledge gaps or areas requiring further exploration
14
+ - Recognize patterns in agent interactions and collaborative dynamics
15
+
16
+ 3. Summary Generation Guidelines:
17
+ - Begin with a high-level overview of the conversation's purpose and outcome
18
+ - Structure the summary in a logical, hierarchical manner
19
+ - Prioritize critical information while maintaining context
20
+ - Include specific examples or quotes when they significantly impact understanding
21
+ - Maintain objectivity while synthesizing different viewpoints
22
+ - Highlight actionable insights and next steps if applicable
23
+
24
+ 4. Quality Standards:
25
+ - Ensure accuracy in representing each agent's contributions
26
+ - Maintain clarity and conciseness without oversimplifying
27
+ - Use consistent terminology throughout the summary
28
+ - Preserve important technical details and domain-specific language
29
+ - Flag any uncertainties or areas needing clarification
30
+
31
+ 5. Output Format:
32
+ - Present information in a structured, easy-to-read format
33
+ - Use bullet points or sections for better readability when appropriate
34
+ - Include a brief conclusion or recommendation section if relevant
35
+ - Maintain professional and neutral tone throughout
36
+
37
+ Remember: Your role is crucial in making complex multi-agent discussions accessible and actionable. Focus on extracting value from the conversation while maintaining the integrity of each agent's contributions.
38
+ """
@@ -80,6 +80,8 @@ from swarms.structs.swarming_architectures import (
80
80
  from swarms.structs.auto_swarm_builder import AutoSwarmBuilder
81
81
  from swarms.structs.council_judge import CouncilAsAJudge
82
82
  from swarms.structs.batch_agent_execution import batch_agent_execution
83
+ from swarms.structs.ma_blocks import aggregate
84
+
83
85
 
84
86
  __all__ = [
85
87
  "Agent",
@@ -150,4 +152,5 @@ __all__ = [
150
152
  "AutoSwarmBuilder",
151
153
  "CouncilAsAJudge",
152
154
  "batch_agent_execution",
155
+ "aggregate",
153
156
  ]
@@ -1,13 +1,12 @@
1
1
  import concurrent.futures
2
2
  import datetime
3
- import hashlib
4
3
  import json
5
4
  import os
6
5
  import threading
7
6
  import uuid
8
7
  from typing import (
9
8
  TYPE_CHECKING,
10
- Any,
9
+ Callable,
11
10
  Dict,
12
11
  List,
13
12
  Optional,
@@ -33,6 +32,25 @@ def generate_conversation_id():
33
32
  return str(uuid.uuid4())
34
33
 
35
34
 
35
+ def get_conversation_dir():
36
+ """Get the directory for storing conversation logs."""
37
+ # Get the current working directory
38
+ conversation_dir = os.path.join(os.getcwd(), "conversations")
39
+ try:
40
+ os.makedirs(conversation_dir, mode=0o755, exist_ok=True)
41
+ except Exception as e:
42
+ logger.error(
43
+ f"Failed to create conversations directory: {str(e)}"
44
+ )
45
+ # Fallback to the same directory as the script
46
+ conversation_dir = os.path.join(
47
+ os.path.dirname(os.path.abspath(__file__)),
48
+ "conversations",
49
+ )
50
+ os.makedirs(conversation_dir, mode=0o755, exist_ok=True)
51
+ return conversation_dir
52
+
53
+
36
54
  # Define available providers
37
55
  providers = Literal["mem0", "in-memory"]
38
56
 
@@ -58,10 +76,6 @@ class Conversation(BaseStructure):
58
76
  save_as_json_bool (bool): Flag to save conversation history as JSON.
59
77
  token_count (bool): Flag to enable token counting for messages.
60
78
  conversation_history (list): List to store the history of messages.
61
- cache_enabled (bool): Flag to enable prompt caching.
62
- cache_stats (dict): Statistics about cache usage.
63
- cache_lock (threading.Lock): Lock for thread-safe cache operations.
64
- conversations_dir (str): Directory to store cached conversations.
65
79
  """
66
80
 
67
81
  def __init__(
@@ -70,20 +84,20 @@ class Conversation(BaseStructure):
70
84
  name: str = None,
71
85
  system_prompt: Optional[str] = None,
72
86
  time_enabled: bool = False,
73
- autosave: bool = False,
87
+ autosave: bool = False, # Changed default to False
88
+ save_enabled: bool = False, # New parameter to control if saving is enabled
74
89
  save_filepath: str = None,
75
- tokenizer: Any = None,
90
+ load_filepath: str = None, # New parameter to specify which file to load from
91
+ tokenizer: Callable = None,
76
92
  context_length: int = 8192,
77
93
  rules: str = None,
78
94
  custom_rules_prompt: str = None,
79
95
  user: str = "User:",
80
- auto_save: bool = True,
81
- save_as_yaml: bool = True,
96
+ save_as_yaml: bool = False,
82
97
  save_as_json_bool: bool = False,
83
98
  token_count: bool = True,
84
- cache_enabled: bool = True,
85
- conversations_dir: Optional[str] = None,
86
99
  provider: providers = "in-memory",
100
+ conversations_dir: Optional[str] = None,
87
101
  *args,
88
102
  **kwargs,
89
103
  ):
@@ -95,73 +109,87 @@ class Conversation(BaseStructure):
95
109
  self.system_prompt = system_prompt
96
110
  self.time_enabled = time_enabled
97
111
  self.autosave = autosave
98
- self.save_filepath = save_filepath
112
+ self.save_enabled = save_enabled
113
+ self.conversations_dir = conversations_dir
114
+
115
+ # Handle save filepath
116
+ if save_enabled and save_filepath:
117
+ self.save_filepath = save_filepath
118
+ elif save_enabled and conversations_dir:
119
+ self.save_filepath = os.path.join(
120
+ conversations_dir, f"{self.id}.json"
121
+ )
122
+ else:
123
+ self.save_filepath = None
124
+
125
+ self.load_filepath = load_filepath
99
126
  self.conversation_history = []
100
127
  self.tokenizer = tokenizer
101
128
  self.context_length = context_length
102
129
  self.rules = rules
103
130
  self.custom_rules_prompt = custom_rules_prompt
104
131
  self.user = user
105
- self.auto_save = auto_save
106
132
  self.save_as_yaml = save_as_yaml
107
133
  self.save_as_json_bool = save_as_json_bool
108
134
  self.token_count = token_count
109
- self.cache_enabled = cache_enabled
110
135
  self.provider = provider
111
- self.cache_stats = {
112
- "hits": 0,
113
- "misses": 0,
114
- "cached_tokens": 0,
115
- "total_tokens": 0,
116
- }
117
- self.cache_lock = threading.Lock()
118
- self.conversations_dir = conversations_dir
119
136
 
137
+ # Create conversation directory if saving is enabled
138
+ if self.save_enabled and self.conversations_dir:
139
+ os.makedirs(self.conversations_dir, exist_ok=True)
140
+
141
+ # Try to load existing conversation or initialize new one
120
142
  self.setup()
121
143
 
122
144
  def setup(self):
123
- # Set up conversations directory
124
- self.conversations_dir = (
125
- self.conversations_dir
126
- or os.path.join(
127
- os.path.expanduser("~"), ".swarms", "conversations"
128
- )
129
- )
130
- os.makedirs(self.conversations_dir, exist_ok=True)
131
-
132
- # Try to load existing conversation if it exists
133
- conversation_file = os.path.join(
134
- self.conversations_dir, f"{self.name}.json"
135
- )
136
- if os.path.exists(conversation_file):
137
- with open(conversation_file, "r") as f:
138
- saved_data = json.load(f)
139
- # Update attributes from saved data
140
- for key, value in saved_data.get(
141
- "metadata", {}
142
- ).items():
143
- if hasattr(self, key):
144
- setattr(self, key, value)
145
- self.conversation_history = saved_data.get(
146
- "history", []
145
+ """Set up the conversation by either loading existing data or initializing new."""
146
+ if self.load_filepath and os.path.exists(self.load_filepath):
147
+ try:
148
+ self.load_from_json(self.load_filepath)
149
+ logger.info(
150
+ f"Loaded existing conversation from {self.load_filepath}"
151
+ )
152
+ except Exception as e:
153
+ logger.error(f"Failed to load conversation: {str(e)}")
154
+ self._initialize_new_conversation()
155
+ elif self.save_filepath and os.path.exists(
156
+ self.save_filepath
157
+ ):
158
+ try:
159
+ self.load_from_json(self.save_filepath)
160
+ logger.info(
161
+ f"Loaded existing conversation from {self.save_filepath}"
147
162
  )
163
+ except Exception as e:
164
+ logger.error(f"Failed to load conversation: {str(e)}")
165
+ self._initialize_new_conversation()
148
166
  else:
149
- # If system prompt is not None, add it to the conversation history
150
- if self.system_prompt is not None:
151
- self.add("System", self.system_prompt)
152
-
153
- if self.rules is not None:
154
- self.add(self.user or "User", self.rules)
155
-
156
- if self.custom_rules_prompt is not None:
157
- self.add(
158
- self.user or "User", self.custom_rules_prompt
167
+ self._initialize_new_conversation()
168
+
169
+ def _initialize_new_conversation(self):
170
+ """Initialize a new conversation with system prompt and rules."""
171
+ if self.system_prompt is not None:
172
+ self.add("System", self.system_prompt)
173
+
174
+ if self.rules is not None:
175
+ self.add(self.user or "User", self.rules)
176
+
177
+ if self.custom_rules_prompt is not None:
178
+ self.add(self.user or "User", self.custom_rules_prompt)
179
+
180
+ if self.tokenizer is not None:
181
+ self.truncate_memory_with_tokenizer()
182
+
183
+ def _autosave(self):
184
+ """Automatically save the conversation if autosave is enabled."""
185
+ if self.autosave and self.save_filepath:
186
+ try:
187
+ self.save_as_json(self.save_filepath)
188
+ except Exception as e:
189
+ logger.error(
190
+ f"Failed to autosave conversation: {str(e)}"
159
191
  )
160
192
 
161
- # If tokenizer then truncate
162
- if self.tokenizer is not None:
163
- self.truncate_memory_with_tokenizer()
164
-
165
193
  def mem0_provider(self):
166
194
  try:
167
195
  from mem0 import AsyncMemory
@@ -180,100 +208,6 @@ class Conversation(BaseStructure):
180
208
  )
181
209
  return None
182
210
 
183
- def _generate_cache_key(
184
- self, content: Union[str, dict, list]
185
- ) -> str:
186
- """Generate a cache key for the given content.
187
-
188
- Args:
189
- content (Union[str, dict, list]): The content to generate a cache key for.
190
-
191
- Returns:
192
- str: The cache key.
193
- """
194
- if isinstance(content, (dict, list)):
195
- content = json.dumps(content, sort_keys=True)
196
- return hashlib.md5(content.encode()).hexdigest()
197
-
198
- def _get_cached_tokens(
199
- self, content: Union[str, dict, list]
200
- ) -> Optional[int]:
201
- """Get the number of cached tokens for the given content.
202
-
203
- Args:
204
- content (Union[str, dict, list]): The content to check.
205
-
206
- Returns:
207
- Optional[int]: The number of cached tokens, or None if not cached.
208
- """
209
- if not self.cache_enabled:
210
- return None
211
-
212
- with self.cache_lock:
213
- cache_key = self._generate_cache_key(content)
214
- if cache_key in self.cache_stats:
215
- self.cache_stats["hits"] += 1
216
- return self.cache_stats[cache_key]
217
- self.cache_stats["misses"] += 1
218
- return None
219
-
220
- def _update_cache_stats(
221
- self, content: Union[str, dict, list], token_count: int
222
- ):
223
- """Update cache statistics for the given content.
224
-
225
- Args:
226
- content (Union[str, dict, list]): The content to update stats for.
227
- token_count (int): The number of tokens in the content.
228
- """
229
- if not self.cache_enabled:
230
- return
231
-
232
- with self.cache_lock:
233
- cache_key = self._generate_cache_key(content)
234
- self.cache_stats[cache_key] = token_count
235
- self.cache_stats["cached_tokens"] += token_count
236
- self.cache_stats["total_tokens"] += token_count
237
-
238
- def _save_to_cache(self):
239
- """Save the current conversation state to the cache directory."""
240
- if not self.conversations_dir:
241
- return
242
-
243
- conversation_file = os.path.join(
244
- self.conversations_dir, f"{self.name}.json"
245
- )
246
-
247
- # Prepare metadata
248
- metadata = {
249
- "id": self.id,
250
- "name": self.name,
251
- "system_prompt": self.system_prompt,
252
- "time_enabled": self.time_enabled,
253
- "autosave": self.autosave,
254
- "save_filepath": self.save_filepath,
255
- "context_length": self.context_length,
256
- "rules": self.rules,
257
- "custom_rules_prompt": self.custom_rules_prompt,
258
- "user": self.user,
259
- "auto_save": self.auto_save,
260
- "save_as_yaml": self.save_as_yaml,
261
- "save_as_json_bool": self.save_as_json_bool,
262
- "token_count": self.token_count,
263
- "cache_enabled": self.cache_enabled,
264
- }
265
-
266
- # Prepare data to save
267
- save_data = {
268
- "metadata": metadata,
269
- "history": self.conversation_history,
270
- "cache_stats": self.cache_stats,
271
- }
272
-
273
- # Save to file
274
- with open(conversation_file, "w") as f:
275
- json.dump(save_data, f, indent=4)
276
-
277
211
  def add_in_memory(
278
212
  self,
279
213
  role: str,
@@ -287,9 +221,11 @@ class Conversation(BaseStructure):
287
221
  role (str): The role of the speaker (e.g., 'User', 'System').
288
222
  content (Union[str, dict, list]): The content of the message to be added.
289
223
  """
290
- # Base message with role
224
+ # Base message with role and timestamp
291
225
  message = {
292
226
  "role": role,
227
+ "timestamp": datetime.datetime.now().isoformat(),
228
+ "message_id": str(uuid.uuid4()),
293
229
  }
294
230
 
295
231
  # Handle different content types
@@ -302,24 +238,20 @@ class Conversation(BaseStructure):
302
238
  else:
303
239
  message["content"] = content
304
240
 
305
- # Check cache for token count
306
- cached_tokens = self._get_cached_tokens(content)
307
- if cached_tokens is not None:
308
- message["token_count"] = cached_tokens
309
- message["cached"] = True
310
- else:
311
- message["cached"] = False
312
-
313
- # Add message to appropriate backend
241
+ # Add message to conversation history
314
242
  self.conversation_history.append(message)
315
243
 
316
- if self.token_count is True and not message.get(
317
- "cached", False
318
- ):
244
+ if self.token_count is True:
319
245
  self._count_tokens(content, message)
320
246
 
321
- # Save to cache after adding message
322
- self._save_to_cache()
247
+ # Autosave after adding message, but only if saving is enabled
248
+ if self.autosave and self.save_enabled and self.save_filepath:
249
+ try:
250
+ self.save_as_json(self.save_filepath)
251
+ except Exception as e:
252
+ logger.error(
253
+ f"Failed to autosave conversation: {str(e)}"
254
+ )
323
255
 
324
256
  def add_mem0(
325
257
  self,
@@ -367,8 +299,6 @@ class Conversation(BaseStructure):
367
299
  tokens = count_tokens(any_to_str(content))
368
300
  # Update the message that's already in the conversation history
369
301
  message["token_count"] = int(tokens)
370
- # Update cache stats
371
- self._update_cache_stats(content, int(tokens))
372
302
 
373
303
  # If autosave is enabled, save after token count is updated
374
304
  if self.autosave:
@@ -413,7 +343,6 @@ class Conversation(BaseStructure):
413
343
  index (str): Index of the message to delete.
414
344
  """
415
345
  self.conversation_history.pop(index)
416
- self._save_to_cache()
417
346
 
418
347
  def update(self, index: str, role, content):
419
348
  """Update a message in the conversation history.
@@ -427,7 +356,6 @@ class Conversation(BaseStructure):
427
356
  "role": role,
428
357
  "content": content,
429
358
  }
430
- self._save_to_cache()
431
359
 
432
360
  def query(self, index: str):
433
361
  """Query a message in the conversation history.
@@ -462,9 +390,26 @@ class Conversation(BaseStructure):
462
390
  detailed (bool, optional): Flag to display detailed information. Defaults to False.
463
391
  """
464
392
  for message in self.conversation_history:
465
- formatter.print_panel(
466
- f"{message['role']}: {message['content']}\n\n"
467
- )
393
+ content = message["content"]
394
+ role = message["role"]
395
+
396
+ # Format the message content
397
+ if isinstance(content, (dict, list)):
398
+ content = json.dumps(content, indent=2)
399
+
400
+ # Create the display string
401
+ display_str = f"{role}: {content}"
402
+
403
+ # Add details if requested
404
+ if detailed:
405
+ display_str += f"\nTimestamp: {message.get('timestamp', 'Unknown')}"
406
+ display_str += f"\nMessage ID: {message.get('message_id', 'Unknown')}"
407
+ if "token_count" in message:
408
+ display_str += (
409
+ f"\nTokens: {message['token_count']}"
410
+ )
411
+
412
+ formatter.print_panel(display_str)
468
413
 
469
414
  def export_conversation(self, filename: str, *args, **kwargs):
470
415
  """Export the conversation history to a file.
@@ -531,9 +476,47 @@ class Conversation(BaseStructure):
531
476
  Args:
532
477
  filename (str): Filename to save the conversation history.
533
478
  """
534
- if filename is not None:
535
- with open(filename, "w") as f:
536
- json.dump(self.conversation_history, f)
479
+ # Don't save if saving is disabled
480
+ if not self.save_enabled:
481
+ return
482
+
483
+ save_path = filename or self.save_filepath
484
+ if save_path is not None:
485
+ try:
486
+ # Prepare metadata
487
+ metadata = {
488
+ "id": self.id,
489
+ "name": self.name,
490
+ "created_at": datetime.datetime.now().isoformat(),
491
+ "system_prompt": self.system_prompt,
492
+ "rules": self.rules,
493
+ "custom_rules_prompt": self.custom_rules_prompt,
494
+ }
495
+
496
+ # Prepare save data
497
+ save_data = {
498
+ "metadata": metadata,
499
+ "history": self.conversation_history,
500
+ }
501
+
502
+ # Create directory if it doesn't exist
503
+ os.makedirs(
504
+ os.path.dirname(save_path),
505
+ mode=0o755,
506
+ exist_ok=True,
507
+ )
508
+
509
+ # Write directly to file
510
+ with open(save_path, "w") as f:
511
+ json.dump(save_data, f, indent=2)
512
+
513
+ # Only log explicit saves, not autosaves
514
+ if not self.autosave:
515
+ logger.info(
516
+ f"Successfully saved conversation to {save_path}"
517
+ )
518
+ except Exception as e:
519
+ logger.error(f"Failed to save conversation: {str(e)}")
537
520
 
538
521
  def load_from_json(self, filename: str):
539
522
  """Load the conversation history from a JSON file.
@@ -541,9 +524,32 @@ class Conversation(BaseStructure):
541
524
  Args:
542
525
  filename (str): Filename to load from.
543
526
  """
544
- if filename is not None:
545
- with open(filename) as f:
546
- self.conversation_history = json.load(f)
527
+ if filename is not None and os.path.exists(filename):
528
+ try:
529
+ with open(filename) as f:
530
+ data = json.load(f)
531
+
532
+ # Load metadata
533
+ metadata = data.get("metadata", {})
534
+ self.id = metadata.get("id", self.id)
535
+ self.name = metadata.get("name", self.name)
536
+ self.system_prompt = metadata.get(
537
+ "system_prompt", self.system_prompt
538
+ )
539
+ self.rules = metadata.get("rules", self.rules)
540
+ self.custom_rules_prompt = metadata.get(
541
+ "custom_rules_prompt", self.custom_rules_prompt
542
+ )
543
+
544
+ # Load conversation history
545
+ self.conversation_history = data.get("history", [])
546
+
547
+ logger.info(
548
+ f"Successfully loaded conversation from {filename}"
549
+ )
550
+ except Exception as e:
551
+ logger.error(f"Failed to load conversation: {str(e)}")
552
+ raise
547
553
 
548
554
  def search_keyword_in_conversation(self, keyword: str):
549
555
  """Search for a keyword in the conversation history.
@@ -600,7 +606,6 @@ class Conversation(BaseStructure):
600
606
  def clear(self):
601
607
  """Clear the conversation history."""
602
608
  self.conversation_history = []
603
- self._save_to_cache()
604
609
 
605
610
  def to_json(self):
606
611
  """Convert the conversation history to a JSON string.
@@ -759,79 +764,121 @@ class Conversation(BaseStructure):
759
764
  """
760
765
  self.conversation_history.extend(messages)
761
766
 
762
- def get_cache_stats(self) -> Dict[str, int]:
763
- """Get statistics about cache usage.
764
-
765
- Returns:
766
- Dict[str, int]: Statistics about cache usage.
767
- """
768
- with self.cache_lock:
769
- return {
770
- "hits": self.cache_stats["hits"],
771
- "misses": self.cache_stats["misses"],
772
- "cached_tokens": self.cache_stats["cached_tokens"],
773
- "total_tokens": self.cache_stats["total_tokens"],
774
- "hit_rate": (
775
- self.cache_stats["hits"]
776
- / (
777
- self.cache_stats["hits"]
778
- + self.cache_stats["misses"]
779
- )
780
- if (
781
- self.cache_stats["hits"]
782
- + self.cache_stats["misses"]
783
- )
784
- > 0
785
- else 0
786
- ),
787
- }
767
+ def clear_memory(self):
768
+ """Clear the memory of the conversation."""
769
+ self.conversation_history = []
788
770
 
789
771
  @classmethod
790
772
  def load_conversation(
791
- cls, name: str, conversations_dir: Optional[str] = None
773
+ cls,
774
+ name: str,
775
+ conversations_dir: Optional[str] = None,
776
+ load_filepath: Optional[str] = None,
792
777
  ) -> "Conversation":
793
- """Load a conversation from the cache by name.
778
+ """Load a conversation from saved file by name or specific file.
794
779
 
795
780
  Args:
796
781
  name (str): Name of the conversation to load
797
- conversations_dir (Optional[str]): Directory containing cached conversations
782
+ conversations_dir (Optional[str]): Directory containing conversations
783
+ load_filepath (Optional[str]): Specific file to load from
798
784
 
799
785
  Returns:
800
786
  Conversation: The loaded conversation object
801
787
  """
802
- return cls(name=name, conversations_dir=conversations_dir)
788
+ if load_filepath:
789
+ return cls(
790
+ name=name,
791
+ load_filepath=load_filepath,
792
+ save_enabled=False, # Don't enable saving when loading specific file
793
+ )
794
+
795
+ conv_dir = conversations_dir or get_conversation_dir()
796
+ # Try loading by name first
797
+ filepath = os.path.join(conv_dir, f"{name}.json")
798
+
799
+ # If not found by name, try loading by ID
800
+ if not os.path.exists(filepath):
801
+ filepath = os.path.join(conv_dir, f"{name}")
802
+ if not os.path.exists(filepath):
803
+ logger.warning(
804
+ f"No conversation found with name or ID: {name}"
805
+ )
806
+ return cls(
807
+ name=name,
808
+ conversations_dir=conv_dir,
809
+ save_enabled=True,
810
+ )
811
+
812
+ return cls(
813
+ name=name,
814
+ conversations_dir=conv_dir,
815
+ load_filepath=filepath,
816
+ save_enabled=True,
817
+ )
803
818
 
804
819
  @classmethod
805
- def list_cached_conversations(
820
+ def list_conversations(
806
821
  cls, conversations_dir: Optional[str] = None
807
- ) -> List[str]:
808
- """List all cached conversations.
822
+ ) -> List[Dict[str, str]]:
823
+ """List all saved conversations.
809
824
 
810
825
  Args:
811
- conversations_dir (Optional[str]): Directory containing cached conversations
826
+ conversations_dir (Optional[str]): Directory containing conversations
812
827
 
813
828
  Returns:
814
- List[str]: List of conversation names (without .json extension)
829
+ List[Dict[str, str]]: List of conversation metadata
815
830
  """
816
- if conversations_dir is None:
817
- conversations_dir = os.path.join(
818
- os.path.expanduser("~"), ".swarms", "conversations"
819
- )
820
-
821
- if not os.path.exists(conversations_dir):
831
+ conv_dir = conversations_dir or get_conversation_dir()
832
+ if not os.path.exists(conv_dir):
822
833
  return []
823
834
 
824
835
  conversations = []
825
- for file in os.listdir(conversations_dir):
826
- if file.endswith(".json"):
827
- conversations.append(
828
- file[:-5]
829
- ) # Remove .json extension
830
- return conversations
836
+ seen_ids = (
837
+ set()
838
+ ) # Track seen conversation IDs to avoid duplicates
839
+
840
+ for filename in os.listdir(conv_dir):
841
+ if filename.endswith(".json"):
842
+ try:
843
+ filepath = os.path.join(conv_dir, filename)
844
+ with open(filepath) as f:
845
+ data = json.load(f)
846
+ metadata = data.get("metadata", {})
847
+ conv_id = metadata.get("id")
848
+ name = metadata.get("name")
849
+ created_at = metadata.get("created_at")
850
+
851
+ # Skip if we've already seen this ID or if required fields are missing
852
+ if (
853
+ not all([conv_id, name, created_at])
854
+ or conv_id in seen_ids
855
+ ):
856
+ continue
857
+
858
+ seen_ids.add(conv_id)
859
+ conversations.append(
860
+ {
861
+ "id": conv_id,
862
+ "name": name,
863
+ "created_at": created_at,
864
+ "filepath": filepath,
865
+ }
866
+ )
867
+ except json.JSONDecodeError:
868
+ logger.warning(
869
+ f"Skipping corrupted conversation file: {filename}"
870
+ )
871
+ continue
872
+ except Exception as e:
873
+ logger.error(
874
+ f"Failed to read conversation {filename}: {str(e)}"
875
+ )
876
+ continue
831
877
 
832
- def clear_memory(self):
833
- """Clear the memory of the conversation."""
834
- self.conversation_history = []
878
+ # Sort by creation date, newest first
879
+ return sorted(
880
+ conversations, key=lambda x: x["created_at"], reverse=True
881
+ )
835
882
 
836
883
 
837
884
  # # Example usage
@@ -0,0 +1,84 @@
1
+ from swarms.structs.agent import Agent
2
+ from typing import List, Callable
3
+ from swarms.structs.conversation import Conversation
4
+ from swarms.structs.multi_agent_exec import run_agents_concurrently
5
+ from swarms.utils.history_output_formatter import (
6
+ history_output_formatter,
7
+ HistoryOutputType,
8
+ )
9
+
10
+ from swarms.prompts.agent_conversation_aggregator import (
11
+ AGGREGATOR_SYSTEM_PROMPT,
12
+ )
13
+
14
+
15
+ def aggregator_agent_task_prompt(
16
+ task: str, workers: List[Agent], conversation: Conversation
17
+ ):
18
+ return f"""
19
+ Please analyze and summarize the following multi-agent conversation, following your guidelines for comprehensive synthesis:
20
+
21
+ Conversation Context:
22
+ Original Task: {task}
23
+ Number of Participating Agents: {len(workers)}
24
+
25
+ Conversation Content:
26
+ {conversation.get_str()}
27
+
28
+ Please provide a 3,000 word comprehensive summary report of the conversation.
29
+ """
30
+
31
+
32
+ def aggregate(
33
+ workers: List[Callable],
34
+ task: str = None,
35
+ type: HistoryOutputType = "all",
36
+ aggregator_model_name: str = "anthropic/claude-3-sonnet-20240229",
37
+ ):
38
+ """
39
+ Aggregate a list of tasks into a single task.
40
+ """
41
+
42
+ if task is None:
43
+ raise ValueError("Task is required in the aggregator block")
44
+
45
+ if workers is None:
46
+ raise ValueError(
47
+ "Workers is required in the aggregator block"
48
+ )
49
+
50
+ if not isinstance(workers, list):
51
+ raise ValueError("Workers must be a list of Callable")
52
+
53
+ if not all(isinstance(worker, Callable) for worker in workers):
54
+ raise ValueError("Workers must be a list of Callable")
55
+
56
+ conversation = Conversation()
57
+
58
+ aggregator_agent = Agent(
59
+ agent_name="Aggregator",
60
+ agent_description="Expert agent specializing in analyzing and synthesizing multi-agent conversations",
61
+ system_prompt=AGGREGATOR_SYSTEM_PROMPT,
62
+ max_loops=1,
63
+ model_name=aggregator_model_name,
64
+ output_type="final",
65
+ max_tokens=4000,
66
+ )
67
+
68
+ results = run_agents_concurrently(agents=workers, task=task)
69
+
70
+ # Zip the results with the agents
71
+ for result, agent in zip(results, workers):
72
+ conversation.add(content=result, role=agent.agent_name)
73
+
74
+ final_result = aggregator_agent.run(
75
+ task=aggregator_agent_task_prompt(task, workers, conversation)
76
+ )
77
+
78
+ conversation.add(
79
+ content=final_result, role=aggregator_agent.agent_name
80
+ )
81
+
82
+ return history_output_formatter(
83
+ conversation=conversation, type=type
84
+ )
@@ -1,3 +1,4 @@
1
+ import concurrent.futures
1
2
  import asyncio
2
3
  import os
3
4
  import threading
@@ -5,7 +6,7 @@ from concurrent.futures import (
5
6
  ThreadPoolExecutor,
6
7
  )
7
8
  from dataclasses import dataclass
8
- from typing import Any, Callable, List, Union
9
+ from typing import Any, Callable, List, Optional, Union
9
10
 
10
11
  import psutil
11
12
 
@@ -68,44 +69,42 @@ async def run_agents_concurrently_async(
68
69
  def run_agents_concurrently(
69
70
  agents: List[AgentType],
70
71
  task: str,
71
- batch_size: int = None,
72
- max_workers: int = None,
72
+ max_workers: Optional[int] = None,
73
73
  ) -> List[Any]:
74
74
  """
75
- Optimized concurrent agent runner using both uvloop and ThreadPoolExecutor.
75
+ Optimized concurrent agent runner using ThreadPoolExecutor.
76
76
 
77
77
  Args:
78
78
  agents: List of Agent instances to run concurrently
79
79
  task: Task string to execute
80
- batch_size: Number of agents to run in parallel in each batch (defaults to CPU count)
81
- max_workers: Maximum number of threads in the executor (defaults to CPU count * 2)
80
+ max_workers: Maximum number of threads in the executor (defaults to 95% of CPU cores)
82
81
 
83
82
  Returns:
84
83
  List of outputs from each agent
85
84
  """
86
- # Optimize defaults based on system resources
87
- cpu_cores = os.cpu_count()
88
- batch_size = batch_size or cpu_cores
89
- max_workers = max_workers or cpu_cores * 2
85
+ if max_workers is None:
86
+ # 95% of the available CPU cores
87
+ num_cores = os.cpu_count()
88
+ max_workers = int(num_cores * 0.95) if num_cores else 1
90
89
 
91
90
  results = []
92
91
 
93
- # Get or create event loop
94
- try:
95
- loop = asyncio.get_event_loop()
96
- except RuntimeError:
97
- loop = asyncio.new_event_loop()
98
- asyncio.set_event_loop(loop)
99
-
100
- # Create a shared thread pool executor with optimal worker count
101
- with ThreadPoolExecutor(max_workers=max_workers) as executor:
102
- # Process agents in batches
103
- for i in range(0, len(agents), batch_size):
104
- batch = agents[i : i + batch_size]
105
- batch_results = loop.run_until_complete(
106
- run_agents_concurrently_async(batch, task, executor)
107
- )
108
- results.extend(batch_results)
92
+ with concurrent.futures.ThreadPoolExecutor(
93
+ max_workers=max_workers
94
+ ) as executor:
95
+ # Submit all tasks and get futures
96
+ futures = [
97
+ executor.submit(agent.run, task) for agent in agents
98
+ ]
99
+
100
+ # Wait for all futures to complete and get results
101
+ for future in concurrent.futures.as_completed(futures):
102
+ try:
103
+ result = future.result()
104
+ results.append(result)
105
+ except Exception as e:
106
+ # Append the error if an agent fails
107
+ results.append(e)
109
108
 
110
109
  return results
111
110
 
@@ -1,7 +1,6 @@
1
1
  import json
2
2
  from typing import List, Optional, Tuple
3
3
 
4
- import numpy as np
5
4
  from pydantic import BaseModel, Field
6
5
  from tenacity import retry, stop_after_attempt, wait_exponential
7
6
 
@@ -80,7 +79,7 @@ class SwarmMatcher:
80
79
  stop=stop_after_attempt(3),
81
80
  wait=wait_exponential(multiplier=1, min=4, max=10),
82
81
  )
83
- def get_embedding(self, text: str) -> np.ndarray:
82
+ def get_embedding(self, text: str):
84
83
  """
85
84
  Generates an embedding for a given text using the configured model.
86
85
 
@@ -90,6 +89,7 @@ class SwarmMatcher:
90
89
  Returns:
91
90
  np.ndarray: The embedding vector for the text.
92
91
  """
92
+
93
93
  logger.debug(f"Getting embedding for text: {text[:50]}...")
94
94
  try:
95
95
  inputs = self.tokenizer(
@@ -141,6 +141,8 @@ class SwarmMatcher:
141
141
  Returns:
142
142
  Tuple[str, float]: A tuple containing the name of the best matching swarm type and the score.
143
143
  """
144
+ import numpy as np
145
+
144
146
  logger.debug(f"Finding best match for task: {task[:50]}...")
145
147
  try:
146
148
  task_embedding = self.get_embedding(task)
@@ -614,7 +614,13 @@ class SwarmRouter:
614
614
  Exception: If an error occurs during task execution.
615
615
  """
616
616
  try:
617
- return self._run(task=task, img=img, model_response=model_response, *args, **kwargs)
617
+ return self._run(
618
+ task=task,
619
+ img=img,
620
+ model_response=model_response,
621
+ *args,
622
+ **kwargs,
623
+ )
618
624
  except Exception as e:
619
625
  logger.error(f"Error executing task on swarm: {str(e)}")
620
626
  raise
swarms/tools/base_tool.py CHANGED
@@ -2,7 +2,6 @@ import json
2
2
  from typing import Any, Callable, Dict, List, Optional, Union
3
3
  from concurrent.futures import ThreadPoolExecutor, as_completed
4
4
 
5
- # from litellm.utils import function_to_dict
6
5
  from pydantic import BaseModel, Field
7
6
 
8
7
  from swarms.tools.func_to_str import function_to_str, functions_to_str
@@ -1,3 +1,5 @@
1
+ import os
2
+ import concurrent.futures
1
3
  from typing import List, Optional, Dict, Any
2
4
  from loguru import logger
3
5
 
@@ -131,16 +133,16 @@ class VLLMWrapper:
131
133
  Returns:
132
134
  List[str]: List of model responses.
133
135
  """
134
- logger.info(
135
- f"Running tasks in batches of size {batch_size}. Total tasks: {len(tasks)}"
136
- )
137
- results = []
138
-
139
- for i in range(0, len(tasks), batch_size):
140
- batch = tasks[i : i + batch_size]
141
- for task in batch:
142
- logger.info(f"Running task: {task}")
143
- results.append(self.run(task))
144
-
145
- logger.info("Completed all tasks.")
146
- return results
136
+ # Fetch 95% of the available CPU cores
137
+ num_cores = os.cpu_count()
138
+ num_workers = int(num_cores * 0.95)
139
+ with concurrent.futures.ThreadPoolExecutor(
140
+ max_workers=num_workers
141
+ ) as executor:
142
+ futures = [
143
+ executor.submit(self.run, task) for task in tasks
144
+ ]
145
+ return [
146
+ future.result()
147
+ for future in concurrent.futures.as_completed(futures)
148
+ ]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: swarms
3
- Version: 7.8.1
3
+ Version: 7.8.4
4
4
  Summary: Swarms - TGSC
5
5
  License: MIT
6
6
  Keywords: artificial intelligence,deep learning,optimizers,Prompt Engineering,swarms,agents,llms,transformers,multi-agent,swarms of agents,Enterprise-Grade Agents,Production-Grade Agents,Agents,Multi-Grade-Agents,Swarms,Transformers,LLMs,Prompt Engineering,Agents,Generative Agents,Generative AI,Agent Marketplace,Agent Store,quant,finance,algorithmic trading,portfolio optimization,risk management,financial modeling,machine learning for finance,natural language processing for finance
@@ -27,7 +27,6 @@ Requires-Dist: loguru
27
27
  Requires-Dist: mcp
28
28
  Requires-Dist: networkx
29
29
  Requires-Dist: numpy
30
- Requires-Dist: numpydoc
31
30
  Requires-Dist: psutil
32
31
  Requires-Dist: pydantic
33
32
  Requires-Dist: pypdf (==5.1.0)
@@ -32,6 +32,7 @@ swarms/prompts/__init__.py,sha256=ZFcghAs4b0Rsjcc_DIFssiztZub5tJ66rxmJD2_tXpQ,74
32
32
  swarms/prompts/accountant_swarm_prompts.py,sha256=swceN1B0fZCOedd3-y3gjNOHDmR-3H5YK17ytp7tTDM,11320
33
33
  swarms/prompts/ag_prompt.py,sha256=mMeqC84SMNrCjQcBeejroSXLuF_wWrVRebHU1MrEQko,4577
34
34
  swarms/prompts/aga.py,sha256=4rRG7r9LF9JBrGOLN4Hy_6cSe0z5YOeKFbGnOmRfFJ0,9156
35
+ swarms/prompts/agent_conversation_aggregator.py,sha256=B7fi8INZUV2OLLYAfn9P8ghzDRfb6CqDdffzgvX3crE,2161
35
36
  swarms/prompts/agent_judge_prompt.py,sha256=-2q5mWLRyO7pj7j512ZNYRm-uEZttPgDGaxZo3oM_QQ,2235
36
37
  swarms/prompts/agent_prompt.py,sha256=6SXkMWcFIIAY02N9P6kMdzo4wtPwzg1Bl9R2qsIdicM,2705
37
38
  swarms/prompts/agent_prompts.py,sha256=63ZqAO1jfH3TKtl9ibLLagSG5kAPlbq4xtCGgGEuuoA,6886
@@ -97,7 +98,7 @@ swarms/schemas/agent_tool_schema.py,sha256=XdaKie6R0WhNXlnEl9f5T3hHmMedFbVwG17fd
97
98
  swarms/schemas/base_schemas.py,sha256=UvBLVWg2qRen4tK5GJz50v42SiX95EQ5qK7hfyAHTEU,3267
98
99
  swarms/schemas/llm_agent_schema.py,sha256=om3tPSjwbkIj2Hr38mKMLJvWs0E2BaE1jw2kzzd0Www,3020
99
100
  swarms/schemas/mcp_schemas.py,sha256=XZJ4HyiY_cv8Gvj-53ddjzXuqT9hBU2f0cHbhIKs_jY,1330
100
- swarms/structs/__init__.py,sha256=VEHVLHULQDqpCKGhBQ-oxiQAwP_9dI9K1mRF8WSYRvg,4231
101
+ swarms/structs/__init__.py,sha256=plgl4yHp-4guyP6dk32KGFFU_Kc9G3bOY6wkFc8WBWw,4296
101
102
  swarms/structs/agent.py,sha256=MsPTIbIeOCa43eU6hIIp1pd8VSm07SjWB0he19msoyQ,100435
102
103
  swarms/structs/agent_builder.py,sha256=tYNpfO4_8cgfMHfgA5DAOWffHnt70p6CLt59esqfVCY,12133
103
104
  swarms/structs/agent_registry.py,sha256=il507cO1NF-d4ChyANVLuWrN8bXsEAi8_7bLJ_sTU6A,12112
@@ -111,7 +112,7 @@ swarms/structs/base_workflow.py,sha256=DTfFwX3AdFYxACDYwUDqhsbcDZnITlg5TeEYyxmJB
111
112
  swarms/structs/batch_agent_execution.py,sha256=d85DzeCq4uTbbPqLhAXFqFx_cxXUS5yRnJ1-gJkwU5w,1871
112
113
  swarms/structs/concat.py,sha256=utezSxNyh1mIwXgdf8-dJ803NDPyEy79WE8zJHuooGk,732
113
114
  swarms/structs/concurrent_workflow.py,sha256=OqXI-X-9a0hG2a7aLzobwd7CVF2ez0rgLj3ZHqri5bg,12952
114
- swarms/structs/conversation.py,sha256=88FgnM5tHOjC_F0TGRT04gkiPnE3tiept4ipnCO4iw8,27501
115
+ swarms/structs/conversation.py,sha256=fnzBYzMt-Zk7bRwOeV41m7_IQz1YGFNqACzX4nmAXsY,30051
115
116
  swarms/structs/council_judge.py,sha256=siYDKiHMvFmShUTXxdo4R6vXiQhKt7bEBI205oC3kU4,19639
116
117
  swarms/structs/csv_to_agent.py,sha256=ug9JqQFPguXeU9JQpSUXuVtOpHYdJhlpKJUJBovo694,9443
117
118
  swarms/structs/de_hallucination_swarm.py,sha256=9cC0rSSXGwYu6SRDwpeMbCcQ40C1WI1RE9SNapKRLOQ,10309
@@ -122,6 +123,7 @@ swarms/structs/groupchat.py,sha256=jjH0BqU9Nrd_3jl9QzrcvbSce527SFpUaepawaRiw2o,1
122
123
  swarms/structs/hiearchical_swarm.py,sha256=2x3InS4HJg4T9Y195l_ANTGu6DYcllqCdJcR3v5Xuro,33402
123
124
  swarms/structs/hybrid_hiearchical_peer_swarm.py,sha256=D1iBtNNee_mxPoOWS5WGTqcci5FQKtt38mW-J42GvfM,9494
124
125
  swarms/structs/long_agent.py,sha256=KFjE2uUI8ONTkeJO43Sh3y5-Ec0kga28BECGklic-S4,15049
126
+ swarms/structs/ma_blocks.py,sha256=dT7kzTp3MMyypPrGKR_OfurjNfcqjful0mEoIrT0z0A,2478
125
127
  swarms/structs/ma_utils.py,sha256=s8uTCplQtiFvxyqyTTTUCnJDMwNLJEuxbdGZyOURjLg,3244
126
128
  swarms/structs/majority_voting.py,sha256=F_t_MOC3YCRyMw5N6qKdFThpaXZxwixRw592Ku5Uhto,10122
127
129
  swarms/structs/malt.py,sha256=uLofKBWHkP3uNhyCkkgEyE4Z7qnOHTtcg-OTiR19x_Y,19572
@@ -130,7 +132,7 @@ swarms/structs/meme_agent_persona_generator.py,sha256=b3kKlumhsV4KV88-GS3CUnGO1U
130
132
  swarms/structs/mixture_of_agents.py,sha256=Ix2YTdrzISPQJLrQ5vrZtYOpZwIYDx0vUaNmvBwDDVM,7762
131
133
  swarms/structs/model_router.py,sha256=V5pZHYlxSmCvAA2Gsns7LaCz8dXtRi7pCvb-oLGHYIY,12739
132
134
  swarms/structs/multi_agent_collab.py,sha256=odh2NQRR23LidsphCxUfAke369lDdgL__w3Xovu9jkA,7731
133
- swarms/structs/multi_agent_exec.py,sha256=Gxwr9mHADX3n29pdxor-dQDnKPSNdnicpCxBLmPwnLg,14344
135
+ swarms/structs/multi_agent_exec.py,sha256=3hIGgwJ_mQwgD16N096jN48-DEIZWFPoetR2nCxIe6I,14203
134
136
  swarms/structs/multi_agent_router.py,sha256=21PfswEuxMHqlWDBCyritj9UuHTShYVQRaK0o23eia8,10999
135
137
  swarms/structs/multi_model_gpu_manager.py,sha256=gHC6MmVia4etMD6RlpEdbqZtV7ng4f-6jVMH0Zrt8y4,47356
136
138
  swarms/structs/omni_agent_types.py,sha256=RdKLfZ-lXDJrEa0aJT_Rfx9TypJQo8SISqKz4fnLkAk,230
@@ -144,9 +146,9 @@ swarms/structs/stopping_conditions.py,sha256=JHHwqgPoUvjX897ofW2gpPZH_cqEWmv5lDT
144
146
  swarms/structs/swarm_arange.py,sha256=5ewEfL52Y4gh-a0lRjFcleHWlsCBuc5XR1nVEEGh07w,15481
145
147
  swarms/structs/swarm_eval.py,sha256=148E2R2zaCmt_LZYx15nmdFjybXHiQ2CZbl6pk77jNs,11635
146
148
  swarms/structs/swarm_id_generator.py,sha256=Wly7AtGM9e6VgzhYmfg8_gSOdxAdsOvWHJFK81cpQNQ,68
147
- swarms/structs/swarm_matcher.py,sha256=E2KwHHEJxmW-UfTeMPWZ6VCmYdQ_I9_fwrfJbxD02GY,23322
149
+ swarms/structs/swarm_matcher.py,sha256=cV3xkoab9FiKTVL4C9Po6akNlWCMM4DFFj6xhINAqEM,23318
148
150
  swarms/structs/swarm_registry.py,sha256=P0XRrqp1qBNyt0BycqPQljUzKv9jClaQMhtaBMinhYg,5578
149
- swarms/structs/swarm_router.py,sha256=Q6g3tLUJM9OZn4tbjlChxb6xz_DIl0ix4NUschFtEf0,30578
151
+ swarms/structs/swarm_router.py,sha256=5zMhcWiNTTdQKH1D0oi8zsR0vH3AMESeNtsT57ArPgY,30673
150
152
  swarms/structs/swarming_architectures.py,sha256=q2XrY2lOqFhVckA8oin65Dz1VPUe-lfbEJHlP1Z8aTE,28278
151
153
  swarms/structs/tree_swarm.py,sha256=AnIxrt0KhWxAQN8uGjfCcOq-XCmsuTJiH8Ex4mXy8V8,12500
152
154
  swarms/structs/utils.py,sha256=Mo6wHQYOB8baWZUKnAJN5Dsgubpo81umNwJIEDitb2A,1873
@@ -155,7 +157,7 @@ swarms/telemetry/__init__.py,sha256=yibtkHEbQRPUv6ir1FhDHlAO_3nwKJPQH4LjzBC2AuQ,
155
157
  swarms/telemetry/bootup.py,sha256=0leCNCy5rhzL19EsOsqHWSDI85KVcWO6_5hLDS0h4sY,1155
156
158
  swarms/telemetry/main.py,sha256=8FxivorvclSvhgfU03cHFktaRgRNV1UXCMi0VV8-U60,11248
157
159
  swarms/tools/__init__.py,sha256=tyGQpcfrosMx06fdV9h_8_9WB-1vfT-aAjZufiTXyPQ,1838
158
- swarms/tools/base_tool.py,sha256=LMGfH2o9nrCdsuVxI2mfxtkIMcXTs2oO_jAF1ebU-YY,107065
160
+ swarms/tools/base_tool.py,sha256=kyCteIJxk38jE0upttSQA1iJ1k6GEjebYwhl8WmUxGw,107020
159
161
  swarms/tools/cohere_func_call_schema.py,sha256=XJ6_yBMXCrV9KjN7v9Bk1iFj69TRlGIWYKsUTA1oGiQ,600
160
162
  swarms/tools/create_agent_tool.py,sha256=YsiBgrR9gkn2Jenu_mIFXOMJCWb_Hdw4gBYPQN5HEQk,3467
161
163
  swarms/tools/func_calling_utils.py,sha256=PhHHZRHN-vRHA_h21ELRjXIhSRIrsT4UhU5-1Bhy-iU,3542
@@ -195,11 +197,11 @@ swarms/utils/pdf_to_text.py,sha256=nkySOS_sJ4Jf4RP5SoDpMB5WfjJ_GGc5z8gJfn2cxOM,1
195
197
  swarms/utils/str_to_dict.py,sha256=T3Jsdjz87WIlkSo7jAW6BB80sv0Ns49WT1qXlOrdEoE,874
196
198
  swarms/utils/try_except_wrapper.py,sha256=uvDZDZJcH986EF0Ej6zZBLcqHJ58NHizPsAH5olrE7Q,3919
197
199
  swarms/utils/visualizer.py,sha256=0ylohEk62MAS6iPRaDOV03m9qo2k5J56tWlKJk_46p4,16927
198
- swarms/utils/vllm_wrapper.py,sha256=OIGnU9Vf81vE_hul1FK-xEhChFK8fxqZX6-fhQeW22c,4987
200
+ swarms/utils/vllm_wrapper.py,sha256=AUoJuZhBc9iD70Uo400toAlIcY1P0LkA67eVau5rDhw,5067
199
201
  swarms/utils/wrapper_clusterop.py,sha256=PMSCVM7ZT1vgj1D_MYAe835RR3SMLYxA-si2JS02yNQ,4220
200
202
  swarms/utils/xml_utils.py,sha256=j8byUa56VT7V4e18pL8UBftLdyWKsUHbid1KDxnAWBo,1416
201
- swarms-7.8.1.dist-info/LICENSE,sha256=jwRtEmTWjLrEsvFB6QFdYs2cEeZPRMdj-UMOFkPF8_0,11363
202
- swarms-7.8.1.dist-info/METADATA,sha256=PqIGF4MP1adudx2SRbDfxkqmOCuP8QC3l-2pq7AVUDg,94969
203
- swarms-7.8.1.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
204
- swarms-7.8.1.dist-info/entry_points.txt,sha256=2K0rTtfO1X1WaO-waJlXIKw5Voa_EpAL_yU0HXE2Jgc,47
205
- swarms-7.8.1.dist-info/RECORD,,
203
+ swarms-7.8.4.dist-info/LICENSE,sha256=jwRtEmTWjLrEsvFB6QFdYs2cEeZPRMdj-UMOFkPF8_0,11363
204
+ swarms-7.8.4.dist-info/METADATA,sha256=dV2n6Kb8uRJa8-hF4AK4cpo_OM_S4cyoSyoWFhRanYM,94945
205
+ swarms-7.8.4.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
206
+ swarms-7.8.4.dist-info/entry_points.txt,sha256=2K0rTtfO1X1WaO-waJlXIKw5Voa_EpAL_yU0HXE2Jgc,47
207
+ swarms-7.8.4.dist-info/RECORD,,
File without changes