swarms 7.7.8__py3-none-any.whl → 7.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. swarms/__init__.py +0 -1
  2. swarms/agents/cort_agent.py +206 -0
  3. swarms/agents/react_agent.py +173 -0
  4. swarms/agents/self_agent_builder.py +40 -0
  5. swarms/communication/base_communication.py +290 -0
  6. swarms/communication/duckdb_wrap.py +369 -72
  7. swarms/communication/pulsar_struct.py +691 -0
  8. swarms/communication/redis_wrap.py +1362 -0
  9. swarms/communication/sqlite_wrap.py +547 -44
  10. swarms/prompts/agent_self_builder_prompt.py +103 -0
  11. swarms/prompts/safety_prompt.py +50 -0
  12. swarms/schemas/__init__.py +6 -1
  13. swarms/schemas/agent_class_schema.py +91 -0
  14. swarms/schemas/agent_mcp_errors.py +18 -0
  15. swarms/schemas/agent_tool_schema.py +13 -0
  16. swarms/schemas/llm_agent_schema.py +92 -0
  17. swarms/schemas/mcp_schemas.py +43 -0
  18. swarms/structs/__init__.py +4 -0
  19. swarms/structs/agent.py +315 -267
  20. swarms/structs/aop.py +3 -1
  21. swarms/structs/batch_agent_execution.py +64 -0
  22. swarms/structs/conversation.py +261 -57
  23. swarms/structs/council_judge.py +542 -0
  24. swarms/structs/deep_research_swarm.py +19 -22
  25. swarms/structs/long_agent.py +424 -0
  26. swarms/structs/ma_utils.py +11 -8
  27. swarms/structs/malt.py +30 -28
  28. swarms/structs/multi_model_gpu_manager.py +1 -1
  29. swarms/structs/output_types.py +1 -1
  30. swarms/structs/swarm_router.py +70 -15
  31. swarms/tools/__init__.py +12 -0
  32. swarms/tools/base_tool.py +2840 -264
  33. swarms/tools/create_agent_tool.py +104 -0
  34. swarms/tools/mcp_client_call.py +504 -0
  35. swarms/tools/py_func_to_openai_func_str.py +45 -7
  36. swarms/tools/pydantic_to_json.py +10 -27
  37. swarms/utils/audio_processing.py +343 -0
  38. swarms/utils/history_output_formatter.py +5 -5
  39. swarms/utils/index.py +226 -0
  40. swarms/utils/litellm_wrapper.py +65 -67
  41. swarms/utils/try_except_wrapper.py +2 -2
  42. swarms/utils/xml_utils.py +42 -0
  43. {swarms-7.7.8.dist-info → swarms-7.8.0.dist-info}/METADATA +5 -4
  44. {swarms-7.7.8.dist-info → swarms-7.8.0.dist-info}/RECORD +47 -30
  45. {swarms-7.7.8.dist-info → swarms-7.8.0.dist-info}/WHEEL +1 -1
  46. swarms/client/__init__.py +0 -15
  47. swarms/client/main.py +0 -407
  48. swarms/tools/mcp_client.py +0 -246
  49. swarms/tools/mcp_integration.py +0 -340
  50. {swarms-7.7.8.dist-info → swarms-7.8.0.dist-info}/LICENSE +0 -0
  51. {swarms-7.7.8.dist-info → swarms-7.8.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,424 @@
1
+ import concurrent.futures
2
+ import os
3
+ from typing import Union, List
4
+ import PyPDF2
5
+ import markdown
6
+ from pathlib import Path
7
+ from swarms.utils.litellm_tokenizer import count_tokens
8
+ from swarms.structs.agent import Agent
9
+ from swarms.structs.conversation import Conversation
10
+ from swarms.utils.history_output_formatter import (
11
+ history_output_formatter,
12
+ )
13
+ from swarms.utils.formatter import formatter
14
+
15
+
16
+ class LongAgent:
17
+ """
18
+ A class to handle and process long-form content from various sources including PDFs,
19
+ markdown files, and large text documents.
20
+ """
21
+
22
+ def __init__(
23
+ self,
24
+ name: str = "LongAgent",
25
+ description: str = "A long-form content processing agent",
26
+ token_count_per_agent: int = 16000,
27
+ output_type: str = "final",
28
+ model_name: str = "gpt-4o-mini",
29
+ aggregator_model_name: str = "gpt-4o-mini",
30
+ ):
31
+ """Initialize the LongAgent."""
32
+ self.name = name
33
+ self.description = description
34
+ self.model_name = model_name
35
+ self.aggregator_model_name = aggregator_model_name
36
+ self.content = ""
37
+ self.metadata = {}
38
+ self.token_count_per_agent = token_count_per_agent
39
+ self.output_type = output_type
40
+ self.agents = []
41
+ self.conversation = Conversation()
42
+
43
+ def load_pdf(self, file_path: Union[str, Path]) -> str:
44
+ """
45
+ Load and extract text from a PDF file.
46
+
47
+ Args:
48
+ file_path (Union[str, Path]): Path to the PDF file
49
+
50
+ Returns:
51
+ str: Extracted text from the PDF
52
+ """
53
+ if not os.path.exists(file_path):
54
+ raise FileNotFoundError(
55
+ f"PDF file not found at {file_path}"
56
+ )
57
+
58
+ text = ""
59
+ with open(file_path, "rb") as file:
60
+ pdf_reader = PyPDF2.PdfReader(file)
61
+ for page in pdf_reader.pages:
62
+ text += page.extract_text()
63
+
64
+ self.content = text
65
+ self.metadata["source"] = "pdf"
66
+ self.metadata["file_path"] = str(file_path)
67
+ return text
68
+
69
+ def load_markdown(self, file_path: Union[str, Path]) -> str:
70
+ """
71
+ Load and process a markdown file.
72
+
73
+ Args:
74
+ file_path (Union[str, Path]): Path to the markdown file
75
+
76
+ Returns:
77
+ str: Processed markdown content
78
+ """
79
+ if not os.path.exists(file_path):
80
+ raise FileNotFoundError(
81
+ f"Markdown file not found at {file_path}"
82
+ )
83
+
84
+ with open(file_path, "r", encoding="utf-8") as file:
85
+ content = file.read()
86
+
87
+ # Convert markdown to HTML for processing
88
+ markdown.markdown(content)
89
+
90
+ self.content = content
91
+ self.metadata["source"] = "markdown"
92
+ self.metadata["file_path"] = str(file_path)
93
+ return content
94
+
95
+ def load_text(self, text: str) -> str:
96
+ """
97
+ Load and process a large text string.
98
+
99
+ Args:
100
+ text (str): The text content to process
101
+
102
+ Returns:
103
+ str: The processed text
104
+ """
105
+ self.content = text
106
+ self.metadata["source"] = "text"
107
+ return text
108
+
109
+ def get_content(self) -> str:
110
+ """
111
+ Get the current content being processed.
112
+
113
+ Returns:
114
+ str: The current content
115
+ """
116
+ return self.content
117
+
118
+ def get_metadata(self) -> dict:
119
+ """
120
+ Get the metadata associated with the current content.
121
+
122
+ Returns:
123
+ dict: The metadata dictionary
124
+ """
125
+ return self.metadata
126
+
127
+ def count_token_document(
128
+ self, file_path: Union[str, Path]
129
+ ) -> int:
130
+ """
131
+ Count the number of tokens in a document.
132
+
133
+ Args:
134
+ document (str): The document to count tokens for
135
+ """
136
+ if file_path.endswith(".pdf"):
137
+ count = count_tokens(self.load_pdf(file_path))
138
+ formatter.print_panel(
139
+ f"Token count for {file_path}: {count}",
140
+ title="Token Count",
141
+ )
142
+ print(f"Token count for {file_path}: {count}")
143
+ elif file_path.endswith(".md"):
144
+ count = count_tokens(self.load_markdown(file_path))
145
+ formatter.print_panel(
146
+ f"Token count for {file_path}: {count}",
147
+ title="Token Count",
148
+ )
149
+ print(f"Token count for {file_path}: {count}")
150
+ elif file_path.endswith(".txt"):
151
+ count = count_tokens(self.load_text(file_path))
152
+ formatter.print_panel(
153
+ f"Token count for {file_path}: {count}",
154
+ title="Token Count",
155
+ )
156
+ print(f"Token count for {file_path}: {count}")
157
+ else:
158
+ raise ValueError(f"Unsupported file type: {file_path}")
159
+ return count
160
+
161
+ def count_multiple_documents(
162
+ self, file_paths: List[Union[str, Path]]
163
+ ) -> int:
164
+ """
165
+ Count the number of tokens in multiple documents.
166
+
167
+ Args:
168
+ file_paths (List[Union[str, Path]]): The list of file paths to count tokens for
169
+
170
+ Returns:
171
+ int: Total token count across all documents
172
+ """
173
+ total_tokens = 0
174
+ # Calculate max_workers as 20% of CPU count
175
+ max_workers = max(1, int(os.cpu_count() * 0.2))
176
+
177
+ with concurrent.futures.ThreadPoolExecutor(
178
+ max_workers=max_workers
179
+ ) as executor:
180
+ futures = [
181
+ executor.submit(self.count_token_document, file_path)
182
+ for file_path in file_paths
183
+ ]
184
+ for future in concurrent.futures.as_completed(futures):
185
+ try:
186
+ total_tokens += future.result()
187
+ except Exception as e:
188
+ formatter.print_panel(
189
+ f"Error processing document: {str(e)}",
190
+ title="Error",
191
+ )
192
+ continue
193
+ return total_tokens
194
+
195
+ def create_agents_for_documents(
196
+ self, file_paths: List[Union[str, Path]]
197
+ ) -> List[Agent]:
198
+ """
199
+ Create agents for each document chunk and process them.
200
+
201
+ Args:
202
+ file_paths (List[Union[str, Path]]): The list of file paths to create agents for
203
+
204
+ Returns:
205
+ List[Agent]: List of created agents
206
+ """
207
+ for file_path in file_paths:
208
+ # Load the document content
209
+ if str(file_path).endswith(".pdf"):
210
+ content = self.load_pdf(file_path)
211
+ elif str(file_path).endswith(".md"):
212
+ content = self.load_markdown(file_path)
213
+ else:
214
+ content = self.load_text(str(file_path))
215
+
216
+ # Split content into chunks based on token count
217
+ chunks = self._split_into_chunks(content)
218
+
219
+ # Create an agent for each chunk
220
+ for i, chunk in enumerate(chunks):
221
+ agent = Agent(
222
+ agent_name=f"Document Analysis Agent - {Path(file_path).name} - Chunk {i+1}",
223
+ system_prompt="""
224
+ You are an expert document analysis and summarization agent specialized in processing and understanding complex documents. Your primary responsibilities include:
225
+
226
+ 1. Document Analysis:
227
+ - Thoroughly analyze the provided document chunk
228
+ - Identify key themes, main arguments, and important details
229
+ - Extract critical information and relationships between concepts
230
+
231
+ 2. Summarization Capabilities:
232
+ - Create concise yet comprehensive summaries
233
+ - Generate both high-level overviews and detailed breakdowns
234
+ - Highlight key points, findings, and conclusions
235
+ - Maintain context and relationships between different sections
236
+
237
+ 3. Information Extraction:
238
+ - Identify and extract important facts, figures, and data points
239
+ - Recognize and preserve technical terminology and domain-specific concepts
240
+ - Maintain accuracy in representing the original content
241
+
242
+ 4. Response Format:
243
+ - Provide clear, structured responses
244
+ - Use bullet points for key findings
245
+ - Include relevant quotes or references when necessary
246
+ - Maintain professional and academic tone
247
+
248
+ 5. Context Awareness:
249
+ - Consider the document's purpose and target audience
250
+ - Adapt your analysis based on the document type (academic, technical, general)
251
+ - Preserve the original meaning and intent
252
+
253
+ Your goal is to help users understand and extract value from this document chunk while maintaining accuracy and completeness in your analysis.
254
+ """,
255
+ model_name=self.model_name,
256
+ max_loops=1,
257
+ max_tokens=self.token_count_per_agent,
258
+ )
259
+
260
+ # Run the agent on the chunk
261
+ output = agent.run(
262
+ f"Please analyze and summarize the following document chunk:\n\n{chunk}"
263
+ )
264
+
265
+ # Add the output to the conversation
266
+ self.conversation.add(
267
+ role=agent.agent_name,
268
+ content=output,
269
+ )
270
+
271
+ self.agents.append(agent)
272
+
273
+ return self.agents
274
+
275
+ def _split_into_chunks(self, content: str) -> List[str]:
276
+ """
277
+ Split content into chunks based on token count.
278
+
279
+ Args:
280
+ content (str): The content to split
281
+
282
+ Returns:
283
+ List[str]: List of content chunks
284
+ """
285
+ chunks = []
286
+ current_chunk = ""
287
+ current_tokens = 0
288
+
289
+ # Split content into sentences (simple approach)
290
+ sentences = content.split(". ")
291
+
292
+ for sentence in sentences:
293
+ sentence_tokens = count_tokens(sentence)
294
+
295
+ if (
296
+ current_tokens + sentence_tokens
297
+ > self.token_count_per_agent
298
+ ):
299
+ if current_chunk:
300
+ chunks.append(current_chunk)
301
+ current_chunk = sentence
302
+ current_tokens = sentence_tokens
303
+ else:
304
+ current_chunk += (
305
+ ". " + sentence if current_chunk else sentence
306
+ )
307
+ current_tokens += sentence_tokens
308
+
309
+ if current_chunk:
310
+ chunks.append(current_chunk)
311
+
312
+ return chunks
313
+
314
+ def count_total_agents(self) -> int:
315
+ """
316
+ Count the total number of agents.
317
+ """
318
+ count = len(self.agents)
319
+ formatter.print_panel(f"Total agents created: {count}")
320
+ return count
321
+
322
+ def _create_aggregator_agent(self) -> Agent:
323
+ """
324
+ Create an aggregator agent for synthesizing document summaries.
325
+
326
+ Returns:
327
+ Agent: The configured aggregator agent
328
+ """
329
+ return Agent(
330
+ agent_name="Document Aggregator Agent",
331
+ system_prompt="""
332
+ You are an expert document synthesis agent specialized in creating comprehensive reports from multiple document summaries. Your responsibilities include:
333
+
334
+ 1. Synthesis and Integration:
335
+ - Combine multiple document summaries into a coherent narrative
336
+ - Identify and resolve any contradictions or inconsistencies
337
+ - Maintain logical flow and structure in the final report
338
+ - Preserve important details while eliminating redundancy
339
+
340
+ 2. Report Structure:
341
+ - Create a clear, hierarchical structure for the report
342
+ - Include an executive summary at the beginning
343
+ - Organize content into logical sections with clear headings
344
+ - Ensure smooth transitions between different topics
345
+
346
+ 3. Analysis and Insights:
347
+ - Identify overarching themes and patterns across summaries
348
+ - Draw meaningful conclusions from the combined information
349
+ - Highlight key findings and their implications
350
+ - Provide context and connections between different pieces of information
351
+
352
+ 4. Quality Assurance:
353
+ - Ensure factual accuracy and consistency
354
+ - Maintain professional and academic tone
355
+ - Verify that all important information is included
356
+ - Check for clarity and readability
357
+
358
+ Your goal is to create a comprehensive, well-structured report that effectively synthesizes all the provided document summaries into a single coherent document.
359
+ """,
360
+ model_name=self.aggregator_model_name,
361
+ max_loops=1,
362
+ max_tokens=self.token_count_per_agent,
363
+ )
364
+
365
+ def run(self, file_paths: List[Union[str, Path]]) -> str:
366
+ """
367
+ Run the document processing pipeline and generate a comprehensive report.
368
+
369
+ Args:
370
+ file_paths (List[Union[str, Path]]): The list of file paths to process
371
+
372
+ Returns:
373
+ str: The final comprehensive report
374
+ """
375
+ # Count total tokens
376
+ total_tokens = self.count_multiple_documents(file_paths)
377
+ formatter.print_panel(
378
+ f"Total tokens: {total_tokens}", title="Total Tokens"
379
+ )
380
+
381
+ total_amount_of_agents = (
382
+ total_tokens / self.token_count_per_agent
383
+ )
384
+ formatter.print_panel(
385
+ f"Total amount of agents: {total_amount_of_agents}",
386
+ title="Total Amount of Agents",
387
+ )
388
+
389
+ # First, process all documents and create chunk agents
390
+ self.create_agents_for_documents(file_paths)
391
+
392
+ # Format the number of agents
393
+ # formatter.print_panel(f"Number of agents: {len(self.agents)}", title="Number of Agents")
394
+
395
+ # Create aggregator agent and collect summaries
396
+ aggregator_agent = self._create_aggregator_agent()
397
+ combined_summaries = self.conversation.get_str()
398
+
399
+ # Generate the final comprehensive report
400
+ final_report = aggregator_agent.run(
401
+ f"""
402
+ Please create a comprehensive report by synthesizing the following document summaries:
403
+
404
+ {combined_summaries}
405
+
406
+ Please structure your response as follows:
407
+ 1. Executive Summary
408
+ 2. Main Findings and Analysis
409
+ 3. Key Themes and Patterns
410
+ 4. Detailed Breakdown by Topic
411
+ 5. Conclusions and Implications
412
+
413
+ Ensure the report is well-organized, comprehensive, and maintains a professional tone throughout.
414
+ """
415
+ )
416
+
417
+ # Add the final report to the conversation
418
+ self.conversation.add(
419
+ role="Document Aggregator Agent", content=final_report
420
+ )
421
+
422
+ return history_output_formatter(
423
+ conversation=self.conversation, type=self.output_type
424
+ )
@@ -1,10 +1,9 @@
1
- from swarms.structs.agent import Agent
2
- from typing import List, Any, Optional, Union
1
+ from typing import List, Any, Optional, Union, Callable
3
2
  import random
4
3
 
5
4
 
6
5
  def list_all_agents(
7
- agents: List[Union[Agent, Any]],
6
+ agents: List[Union[Callable, Any]],
8
7
  conversation: Optional[Any] = None,
9
8
  name: str = "",
10
9
  add_to_conversation: bool = False,
@@ -74,17 +73,21 @@ models = [
74
73
 
75
74
 
76
75
  def set_random_models_for_agents(
77
- agents: Union[List[Agent], Agent], model_names: List[str] = models
78
- ) -> Union[List[Agent], Agent]:
79
- """Sets random models for agents in the swarm.
76
+ agents: Optional[Union[List[Callable], Callable]] = None,
77
+ model_names: List[str] = models,
78
+ ) -> Union[List[Callable], Callable, str]:
79
+ """Sets random models for agents in the swarm or returns a random model name.
80
80
 
81
81
  Args:
82
- agents (Union[List[Agent], Agent]): Either a single agent or a list of agents
82
+ agents (Optional[Union[List[Agent], Agent]]): Either a single agent, list of agents, or None
83
83
  model_names (List[str], optional): List of model names to choose from. Defaults to models.
84
84
 
85
85
  Returns:
86
- Union[List[Agent], Agent]: The agent(s) with randomly assigned models
86
+ Union[List[Agent], Agent, str]: The agent(s) with randomly assigned models or a random model name
87
87
  """
88
+ if agents is None:
89
+ return random.choice(model_names)
90
+
88
91
  if isinstance(agents, list):
89
92
  return [
90
93
  setattr(agent, "model_name", random.choice(model_names))
swarms/structs/malt.py CHANGED
@@ -58,12 +58,6 @@ You are a world-renowned mathematician with an extensive background in multiple
58
58
  Your response should be as comprehensive as possible, leaving no room for ambiguity, and it should reflect your mastery in constructing original mathematical arguments.
59
59
  """
60
60
 
61
- proof_creator_agent = Agent(
62
- agent_name="Proof-Creator-Agent",
63
- model_name="gpt-4o-mini",
64
- max_loops=1,
65
- system_prompt=proof_creator_prompt,
66
- )
67
61
 
68
62
  # Agent 2: Proof Verifier Agent
69
63
  proof_verifier_prompt = """
@@ -92,12 +86,6 @@ You are an esteemed mathematician and veteran academic known for your precise an
92
86
  Your review must be exhaustive, ensuring that even the most subtle aspects of the proof are scrutinized in depth.
93
87
  """
94
88
 
95
- proof_verifier_agent = Agent(
96
- agent_name="Proof-Verifier-Agent",
97
- model_name="gpt-4o-mini",
98
- max_loops=1,
99
- system_prompt=proof_verifier_prompt,
100
- )
101
89
 
102
90
  # Agent 3: Proof Refiner Agent
103
91
  proof_refiner_prompt = """
@@ -126,13 +114,6 @@ You are an expert in mathematical exposition and refinement with decades of expe
126
114
  Your refined proof should be a masterpiece of mathematical writing, addressing all the feedback with detailed revisions and explanations.
127
115
  """
128
116
 
129
- proof_refiner_agent = Agent(
130
- agent_name="Proof-Refiner-Agent",
131
- model_name="gpt-4o-mini",
132
- max_loops=1,
133
- system_prompt=proof_refiner_prompt,
134
- )
135
-
136
117
 
137
118
  majority_voting_prompt = """
138
119
  Engage in a comprehensive and exhaustive majority voting analysis of the following conversation, ensuring a deep and thoughtful examination of the responses provided by each agent. This analysis should not only summarize the responses but also critically engage with the content, context, and implications of each agent's input.
@@ -160,13 +141,6 @@ Please adhere to the following detailed guidelines:
160
141
  Throughout your analysis, focus on uncovering clear patterns while being attentive to the subtleties and complexities inherent in the responses. Pay particular attention to the nuances of mathematical contexts where algorithmic thinking may be required, ensuring that your examination is both rigorous and accessible to a diverse audience.
161
142
  """
162
143
 
163
- majority_voting_agent = Agent(
164
- agent_name="Majority-Voting-Agent",
165
- model_name="gpt-4o-mini",
166
- max_loops=1,
167
- system_prompt=majority_voting_prompt,
168
- )
169
-
170
144
 
171
145
  class MALT:
172
146
  """
@@ -210,6 +184,34 @@ class MALT:
210
184
  self.conversation = Conversation()
211
185
  logger.debug("Conversation initialized.")
212
186
 
187
+ proof_refiner_agent = Agent(
188
+ agent_name="Proof-Refiner-Agent",
189
+ model_name="gpt-4o-mini",
190
+ max_loops=1,
191
+ system_prompt=proof_refiner_prompt,
192
+ )
193
+
194
+ proof_verifier_agent = Agent(
195
+ agent_name="Proof-Verifier-Agent",
196
+ model_name="gpt-4o-mini",
197
+ max_loops=1,
198
+ system_prompt=proof_verifier_prompt,
199
+ )
200
+
201
+ Agent(
202
+ agent_name="Majority-Voting-Agent",
203
+ model_name="gpt-4o-mini",
204
+ max_loops=1,
205
+ system_prompt=majority_voting_prompt,
206
+ )
207
+
208
+ proof_creator_agent = Agent(
209
+ agent_name="Proof-Creator-Agent",
210
+ model_name="gpt-4o-mini",
211
+ max_loops=1,
212
+ system_prompt=proof_creator_prompt,
213
+ )
214
+
213
215
  if preset_agents:
214
216
  self.main_agent = proof_creator_agent
215
217
  self.refiner_agent = proof_refiner_agent
@@ -304,12 +306,12 @@ class MALT:
304
306
  ######################### MAJORITY VOTING #########################
305
307
 
306
308
  # Majority Voting on the verified outputs
307
- majority_voting_verified = majority_voting_agent.run(
309
+ majority_voting_verified = self.majority_voting_agent.run(
308
310
  task=any_to_str(verified_outputs),
309
311
  )
310
312
 
311
313
  self.conversation.add(
312
- role=majority_voting_agent.agent_name,
314
+ role=self.majority_voting_agent.agent_name,
313
315
  content=majority_voting_verified,
314
316
  )
315
317
 
@@ -147,7 +147,7 @@ class ModelMemoryCalculator:
147
147
 
148
148
  @staticmethod
149
149
  def get_huggingface_model_size(
150
- model_or_path: Union[str, Any]
150
+ model_or_path: Union[str, Any],
151
151
  ) -> float:
152
152
  """
153
153
  Calculate the memory size of a Hugging Face model in GB.
@@ -3,4 +3,4 @@ from swarms.utils.history_output_formatter import (
3
3
  )
4
4
 
5
5
  # Use the OutputType for type annotations
6
- output_type: OutputType
6
+ output_type: OutputType # OutputType now includes 'xml'