swarms 7.5.2__py3-none-any.whl → 7.5.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,109 @@
1
+ from typing import List
2
+
3
+ from loguru import logger
4
+
5
+ from swarms.prompts.reasoning_prompt import REASONING_PROMPT
6
+ from swarms.structs.agent import Agent
7
+ from swarms.structs.output_types import OutputType
8
+ from swarms.structs.conversation import Conversation
9
+ from swarms.utils.history_output_formatter import (
10
+ history_output_formatter,
11
+ )
12
+
13
+
14
+ class ReasoningDuo:
15
+ """
16
+ ReasoningDuo is a class that encapsulates the functionality of two agents: a reasoning agent and a main agent.
17
+
18
+ Attributes:
19
+ model_name (str): The name of the model used for the reasoning agent.
20
+ description (str): A description of the reasoning agent.
21
+ model_names (list[str]): A list of model names for the agents.
22
+ system_prompt (str): The system prompt for the main agent.
23
+ reasoning_agent (Agent): An instance of the Agent class for reasoning tasks.
24
+ main_agent (Agent): An instance of the Agent class for main tasks.
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ agent_name: str = "reasoning-agent-01",
30
+ agent_description: str = "A highly intelligent and thoughtful AI designed to provide accurate and well-reasoned answers to the user's questions.",
31
+ model_name: str = "gpt-4o-mini",
32
+ description: str = "A highly intelligent and thoughtful AI designed to provide accurate and well-reasoned answers to the user's questions.",
33
+ model_names: list[str] = ["gpt-4o-mini", "gpt-4o"],
34
+ system_prompt: str = "You are a helpful assistant that can answer questions and help with tasks.",
35
+ output_type: OutputType = "dict",
36
+ ):
37
+ self.agent_name = agent_name
38
+ self.agent_description = agent_description
39
+ self.model_name = model_name
40
+ self.description = description
41
+ self.output_type = output_type
42
+ self.reasoning_agent = Agent(
43
+ agent_name="Your",
44
+ description="A highly intelligent and thoughtful AI designed to provide accurate and well-reasoned answers to the user's questions.",
45
+ system_prompt=REASONING_PROMPT,
46
+ max_loops=1,
47
+ model_name=model_names[0],
48
+ dynamic_temperature_enabled=True,
49
+ )
50
+
51
+ self.main_agent = Agent(
52
+ agent_name=self.agent_name,
53
+ description=self.agent_description,
54
+ system_prompt=system_prompt,
55
+ max_loops=1,
56
+ model_name=model_names[1],
57
+ dynamic_temperature_enabled=True,
58
+ )
59
+
60
+ self.conversation = Conversation()
61
+
62
+ def run(self, task: str):
63
+ """
64
+ Executes the reasoning and main agents on the provided task.
65
+
66
+ Args:
67
+ task (str): The task to be processed by the agents.
68
+
69
+ Returns:
70
+ str: The output from the main agent after processing the task.
71
+ """
72
+ logger.info(f"Running task: {task}")
73
+
74
+ self.conversation.add(role="user", content=task)
75
+
76
+ output_reasoner = self.reasoning_agent.run(task)
77
+
78
+ self.conversation.add(
79
+ role=self.reasoning_agent.agent_name,
80
+ content=output_reasoner,
81
+ )
82
+
83
+ prompt = f"Task: {task} \n\n Your thoughts: {output_reasoner}"
84
+
85
+ output_main = self.main_agent.run(prompt)
86
+
87
+ self.conversation.add(
88
+ role=self.main_agent.agent_name, content=output_main
89
+ )
90
+
91
+ return history_output_formatter(
92
+ self.conversation, self.output_type
93
+ )
94
+
95
+ def batched_run(self, tasks: List[str]):
96
+ """
97
+ Executes the run method for a list of tasks.
98
+
99
+ Args:
100
+ tasks (list[str]): A list of tasks to be processed.
101
+
102
+ Returns:
103
+ list: A list of outputs from the main agent for each task.
104
+ """
105
+ outputs = []
106
+ for task in tasks:
107
+ logger.info(f"Processing task: {task}")
108
+ outputs.append(self.run(task))
109
+ return outputs
@@ -0,0 +1,9 @@
1
+ REASONING_PROMPT = """
2
+ This is a structured conversation between the User and the Assistant, where the User poses a question, and the Assistant is tasked with providing a comprehensive solution.
3
+
4
+ Before delivering the final answer, the Assistant must engage in a thorough reasoning process. This involves critically analyzing the question, considering various perspectives, and evaluating potential solutions. The Assistant should articulate this reasoning process clearly, allowing the User to understand the thought process behind the answer.
5
+
6
+ The reasoning process and the final answer should be distinctly enclosed within <think> </think> tags. For example, the format should be: <think> reasoning process here </think> for the reasoning, followed by <think> final answer here </think> for the answer.
7
+
8
+ It is essential to output multiple <think> </think> tags to reflect the depth of thought and exploration involved in addressing the task. The Assistant should strive to think deeply and thoroughly about the question, ensuring that all relevant aspects are considered before arriving at a conclusion.
9
+ """
@@ -83,6 +83,8 @@ from swarms.structs.swarms_api import (
83
83
  SwarmValidationError,
84
84
  )
85
85
 
86
+ from swarms.structs.de_hallucination_swarm import DeHallucinationSwarm
87
+
86
88
  __all__ = [
87
89
  "Agent",
88
90
  "AsyncWorkflow",
@@ -156,4 +158,5 @@ __all__ = [
156
158
  "AgentInput",
157
159
  "AgentsBuilder",
158
160
  "MALT",
161
+ "DeHallucinationSwarm",
159
162
  ]
swarms/structs/agent.py CHANGED
@@ -579,23 +579,31 @@ class Agent:
579
579
  # Telemetry Processor to log agent data
580
580
  log_agent_data(self.to_dict())
581
581
 
582
- if self.llm is None and self.model_name is not None:
582
+ if self.llm is None:
583
583
  self.llm = self.llm_handling()
584
584
 
585
585
  def llm_handling(self):
586
586
  from swarms.utils.litellm_wrapper import LiteLLM
587
587
 
588
- if self.llm_args is not None:
589
- llm = LiteLLM(model_name=self.model_name, **self.llm_args)
590
-
591
- else:
592
- llm = LiteLLM(
593
- model_name=self.model_name,
594
- temperature=self.temperature,
595
- max_tokens=self.max_tokens,
596
- )
588
+ if self.model_name is None:
589
+ raise ValueError("Model name cannot be None")
597
590
 
598
- return llm
591
+ try:
592
+ if self.llm_args is not None:
593
+ llm = LiteLLM(
594
+ model_name=self.model_name, **self.llm_args
595
+ )
596
+ else:
597
+ llm = LiteLLM(
598
+ model_name=self.model_name,
599
+ temperature=self.temperature,
600
+ max_tokens=self.max_tokens,
601
+ system_prompt=self.system_prompt,
602
+ )
603
+ return llm
604
+ except Exception as e:
605
+ logger.error(f"Error in llm_handling: {e}")
606
+ return None
599
607
 
600
608
  def prepare_tools_list_dictionary(self):
601
609
  import json
@@ -1064,13 +1072,13 @@ class Agent:
1064
1072
  self.short_memory.get_str()
1065
1073
  )
1066
1074
 
1067
- # Handle artifacts
1068
- if self.artifacts_on is True:
1069
- self.handle_artifacts(
1070
- concat_strings(all_responses),
1071
- self.artifacts_output_path,
1072
- self.artifacts_file_extension,
1073
- )
1075
+ # # Handle artifacts
1076
+ # if self.artifacts_on is True:
1077
+ # self.handle_artifacts(
1078
+ # concat_strings(all_responses),
1079
+ # self.artifacts_output_path,
1080
+ # self.artifacts_file_extension,
1081
+ # )
1074
1082
 
1075
1083
  log_agent_data(self.to_dict())
1076
1084
  if self.autosave is True:
@@ -2413,11 +2421,11 @@ class Agent:
2413
2421
  if not isinstance(task, str):
2414
2422
  raise TypeError("Task must be a string")
2415
2423
 
2416
- if not task.strip():
2417
- raise ValueError("Task cannot be empty")
2424
+ if task is None:
2425
+ raise ValueError("Task cannot be None")
2418
2426
 
2419
- if self.llm is None:
2420
- raise TypeError("LLM object cannot be None")
2427
+ # if self.llm is None:
2428
+ # raise TypeError("LLM object cannot be None")
2421
2429
 
2422
2430
  try:
2423
2431
  out = self.llm.run(task, *args, **kwargs)
@@ -5,8 +5,9 @@ from typing import Any, Optional, Union
5
5
  import yaml
6
6
  from swarms.structs.base_structure import BaseStructure
7
7
  from typing import TYPE_CHECKING
8
+ from swarms.utils.any_to_str import any_to_str
8
9
  from swarms.utils.formatter import formatter
9
-
10
+ from swarms.utils.litellm_tokenizer import count_tokens
10
11
 
11
12
  if TYPE_CHECKING:
12
13
  from swarms.structs.agent import (
@@ -79,6 +80,7 @@ class Conversation(BaseStructure):
79
80
  auto_save: bool = True,
80
81
  save_as_yaml: bool = True,
81
82
  save_as_json_bool: bool = False,
83
+ token_count: bool = True,
82
84
  *args,
83
85
  **kwargs,
84
86
  ):
@@ -96,6 +98,7 @@ class Conversation(BaseStructure):
96
98
  self.auto_save = auto_save
97
99
  self.save_as_yaml = save_as_yaml
98
100
  self.save_as_json_bool = save_as_json_bool
101
+ self.token_count = token_count
99
102
 
100
103
  # If system prompt is not None, add it to the conversation history
101
104
  if self.system_prompt is not None:
@@ -128,17 +131,21 @@ class Conversation(BaseStructure):
128
131
  now = datetime.datetime.now()
129
132
  timestamp = now.strftime("%Y-%m-%d %H:%M:%S")
130
133
 
134
+ tokens = count_tokens(any_to_str(content))
135
+
131
136
  if isinstance(content, dict) or isinstance(content, list):
132
137
 
133
138
  message = {
134
139
  "role": role,
135
140
  "content": content,
141
+ "token_count": int(tokens),
136
142
  }
137
143
 
138
144
  else:
139
145
  message = {
140
146
  "role": role,
141
- "content": f"Time: {timestamp} \n{content}",
147
+ "content": f"Time: {timestamp} \n {content}",
148
+ "token_count": int(tokens),
142
149
  }
143
150
 
144
151
  self.conversation_history.append(message)
@@ -448,16 +455,17 @@ class Conversation(BaseStructure):
448
455
 
449
456
  # # Example usage
450
457
  # # conversation = Conversation()
451
- # conversation = Conversation()
458
+ # conversation = Conversation(token_count=True)
452
459
  # conversation.add("user", "Hello, how are you?")
453
- # conversation.add(
454
- # "assistant", {"name": "tool_1", "output": "Hello, how are you?"}
455
- # )
456
- # print(conversation.return_json())
460
+ # conversation.add("assistant", "I am doing well, thanks.")
461
+ # # conversation.add(
462
+ # # "assistant", {"name": "tool_1", "output": "Hello, how are you?"}
463
+ # # )
464
+ # # print(conversation.return_json())
457
465
 
458
- # # print(conversation.get_last_message_as_string())
459
- # # print(conversation.return_messages_as_list())
460
- # # conversation.add("assistant", "I am doing well, thanks.")
461
- # # # print(conversation.to_json())
466
+ # # # print(conversation.get_last_message_as_string())
467
+ # print(conversation.return_json())
468
+ # # # conversation.add("assistant", "I am doing well, thanks.")
469
+ # # # # print(conversation.to_json())
462
470
  # # print(type(conversation.to_dict()))
463
471
  # # print(conversation.to_yaml())
@@ -0,0 +1,273 @@
1
+ from typing import List, Dict, Any, Optional
2
+ import time
3
+ from loguru import logger
4
+ from swarms.structs.agent import Agent
5
+
6
+ # Prompt templates for different agent roles
7
+ GENERATOR_PROMPT = """You are a knowledgeable assistant tasked with providing accurate information on a wide range of topics.
8
+
9
+ Your responsibilities:
10
+ 1. Provide accurate information based on your training data
11
+ 2. Use clear, concise language
12
+ 3. Acknowledge limitations in your knowledge
13
+ 4. Abstain from making up information when uncertain
14
+
15
+ When responding to queries:
16
+ - Stick to verified facts
17
+ - Cite your sources when possible
18
+ - Clearly distinguish between firmly established facts and more tentative claims
19
+ - Use phrases like "I'm not certain about..." or "Based on my knowledge up to my training cutoff..." when appropriate
20
+ - Avoid overly confident language for uncertain topics
21
+
22
+ Remember, it's better to acknowledge ignorance than to provide incorrect information.
23
+ """
24
+
25
+ CRITIC_PROMPT = """You are a critical reviewer tasked with identifying potential inaccuracies, hallucinations, or unsupported claims in AI-generated text.
26
+
27
+ Your responsibilities:
28
+ 1. Carefully analyze the provided text for factual errors
29
+ 2. Identify claims that lack sufficient evidence
30
+ 3. Spot logical inconsistencies
31
+ 4. Flag overly confident language on uncertain topics
32
+ 5. Detect potentially hallucinated details (names, dates, statistics, etc.)
33
+
34
+ For each issue detected, you should:
35
+ - Quote the specific problematic text
36
+ - Explain why it's potentially inaccurate
37
+ - Rate the severity of the issue (low/medium/high)
38
+ - Suggest a specific correction or improvement
39
+
40
+ Focus particularly on:
41
+ - Unfounded claims presented as facts
42
+ - Highly specific details that seem suspicious
43
+ - Logical contradictions
44
+ - Anachronisms or temporal inconsistencies
45
+ - Claims that contradict common knowledge
46
+
47
+ Be thorough and specific in your critique. Provide actionable feedback for improvement.
48
+ """
49
+
50
+ REFINER_PROMPT = """You are a refinement specialist tasked with improving text based on critical feedback.
51
+
52
+ Your responsibilities:
53
+ 1. Carefully review the original text and the critical feedback
54
+ 2. Make precise modifications to address all identified issues
55
+ 3. Ensure factual accuracy in the refined version
56
+ 4. Maintain the intended tone and style of the original
57
+ 5. Add appropriate epistemic status markers (e.g., "likely", "possibly", "according to...")
58
+
59
+ Guidelines for refinement:
60
+ - Remove or qualify unsupported claims
61
+ - Replace specific details with more general statements when evidence is lacking
62
+ - Add appropriate hedging language where certainty is not warranted
63
+ - Maintain the helpful intent of the original response
64
+ - Ensure logical consistency throughout the refined text
65
+ - Add qualifiers or clarify knowledge limitations where appropriate
66
+
67
+ The refined text should be helpful and informative while being scrupulously accurate.
68
+ """
69
+
70
+ VALIDATOR_PROMPT = """You are a validation expert tasked with ensuring the highest standards of accuracy in refined AI outputs.
71
+
72
+ Your responsibilities:
73
+ 1. Verify that all critical issues from previous feedback have been properly addressed
74
+ 2. Check for any remaining factual inaccuracies or unsupported claims
75
+ 3. Ensure appropriate epistemic status markers are used
76
+ 4. Confirm the response maintains a helpful tone while being accurate
77
+ 5. Provide a final assessment of the response quality
78
+
79
+ Assessment structure:
80
+ - Issue resolution: Have all previously identified issues been addressed? (Yes/No/Partially)
81
+ - Remaining concerns: Are there any remaining factual or logical issues? (List if any)
82
+ - Epistemics: Does the response appropriately indicate confidence levels? (Yes/No/Needs improvement)
83
+ - Helpfulness: Does the response remain helpful despite necessary qualifications? (Yes/No/Partially)
84
+ - Overall assessment: Final verdict on whether the response is ready for user consumption (Approved/Needs further refinement)
85
+
86
+ If approved, explain what makes this response trustworthy. If further refinement is needed, provide specific guidance.
87
+ """
88
+
89
+
90
+ class DeHallucinationSwarm:
91
+ """
92
+ A system of multiple agents that work together to reduce hallucinations in generated content.
93
+ The system works through multiple rounds of generation, criticism, refinement, and validation.
94
+ """
95
+
96
+ def __init__(
97
+ self,
98
+ name: str = "DeHallucinationSwarm",
99
+ description: str = "A system of multiple agents that work together to reduce hallucinations in generated content.",
100
+ model_names: List[str] = [
101
+ "gpt-4o-mini",
102
+ "gpt-4o-mini",
103
+ "gpt-4o-mini",
104
+ "gpt-4o-mini",
105
+ ],
106
+ iterations: int = 2,
107
+ system_prompt: str = GENERATOR_PROMPT,
108
+ store_intermediate_results: bool = True,
109
+ ):
110
+ """
111
+ Initialize the DeHallucinationSwarm with configurable agents.
112
+
113
+ Args:
114
+ model_names: List of model names for generator, critic, refiner, and validator
115
+ iterations: Number of criticism-refinement cycles to perform
116
+ store_intermediate_results: Whether to store all intermediate outputs
117
+ """
118
+ self.name = name
119
+ self.description = description
120
+ self.iterations = iterations
121
+ self.store_intermediate_results = store_intermediate_results
122
+ self.system_prompt = system_prompt
123
+ self.history = []
124
+
125
+ # Initialize all agents
126
+ self.generator = Agent(
127
+ agent_name="Generator",
128
+ description="An agent that generates initial responses to queries",
129
+ system_prompt=GENERATOR_PROMPT,
130
+ model_name=model_names[0],
131
+ )
132
+
133
+ self.critic = Agent(
134
+ agent_name="Critic",
135
+ description="An agent that critiques responses for potential inaccuracies",
136
+ system_prompt=CRITIC_PROMPT,
137
+ model_name=model_names[1],
138
+ )
139
+
140
+ self.refiner = Agent(
141
+ agent_name="Refiner",
142
+ description="An agent that refines responses based on critique",
143
+ system_prompt=REFINER_PROMPT,
144
+ model_name=model_names[2],
145
+ )
146
+
147
+ self.validator = Agent(
148
+ agent_name="Validator",
149
+ description="An agent that performs final validation of refined content",
150
+ system_prompt=VALIDATOR_PROMPT,
151
+ model_name=model_names[3],
152
+ )
153
+
154
+ def _log_step(
155
+ self,
156
+ step_name: str,
157
+ content: str,
158
+ metadata: Optional[Dict[str, Any]] = None,
159
+ ):
160
+ """Record a step in the swarm's processing history"""
161
+ if self.store_intermediate_results:
162
+ timestamp = time.time()
163
+ step_record = {
164
+ "timestamp": timestamp,
165
+ "step": step_name,
166
+ "content": content,
167
+ }
168
+ if metadata:
169
+ step_record["metadata"] = metadata
170
+
171
+ self.history.append(step_record)
172
+ logger.debug(f"Logged step: {step_name}")
173
+
174
+ def run(self, query: str) -> Dict[str, Any]:
175
+ """
176
+ Process a query through the swarm's multi-agent refinement cycle.
177
+
178
+ Args:
179
+ query: The user's query to process
180
+
181
+ Returns:
182
+ Dict containing the final response and processing metadata
183
+ """
184
+ logger.info(f"Processing query: {query}")
185
+ self.history = [] # Reset history for new query
186
+
187
+ # Generate initial response
188
+ initial_response = self.generator.run(query)
189
+ self._log_step(
190
+ "initial_generation", initial_response, {"query": query}
191
+ )
192
+
193
+ current_response = initial_response
194
+
195
+ # Perform multiple iteration cycles
196
+ for i in range(self.iterations):
197
+ logger.info(f"Starting iteration {i+1}/{self.iterations}")
198
+
199
+ # Step 1: Critique the current response
200
+ critique = self.critic.run(
201
+ f"Review the following response to the query: '{query}'\n\n{current_response}"
202
+ )
203
+ self._log_step(f"critique_{i+1}", critique)
204
+
205
+ # Step 2: Refine based on critique
206
+ refined_response = self.refiner.run(
207
+ f"Refine the following response based on the critique provided.\n\n"
208
+ f"Original query: {query}\n\n"
209
+ f"Original response: {current_response}\n\n"
210
+ f"Critique: {critique}"
211
+ )
212
+ self._log_step(f"refinement_{i+1}", refined_response)
213
+
214
+ # Update current response for next iteration
215
+ current_response = refined_response
216
+
217
+ # Final validation
218
+ validation = self.validator.run(
219
+ f"Validate the following refined response for accuracy and helpfulness.\n\n"
220
+ f"Original query: {query}\n\n"
221
+ f"Final response: {current_response}"
222
+ )
223
+ self._log_step("final_validation", validation)
224
+
225
+ # Prepare results
226
+ result = {
227
+ "query": query,
228
+ "final_response": current_response,
229
+ "validation_result": validation,
230
+ "iteration_count": self.iterations,
231
+ }
232
+
233
+ if self.store_intermediate_results:
234
+ result["processing_history"] = self.history
235
+
236
+ return result
237
+
238
+ def batch_run(self, queries: List[str]) -> List[Dict[str, Any]]:
239
+ """
240
+ Process multiple queries through the swarm.
241
+
242
+ Args:
243
+ queries: List of user queries to process
244
+
245
+ Returns:
246
+ List of result dictionaries, one per query
247
+ """
248
+ results = []
249
+ for query in queries:
250
+ logger.info(f"Processing batch query: {query}")
251
+ results.append(self.run(query))
252
+ return results
253
+
254
+
255
+ # # Example usage
256
+ # if __name__ == "__main__":
257
+ # # Configure logger
258
+ # logger.add("dehallucinationswarm.log", rotation="10 MB")
259
+
260
+ # # Create swarm instance
261
+ # swarm = DeHallucinationSwarm(iterations=2)
262
+
263
+ # # Example queries that might tempt hallucination
264
+ # test_queries = [
265
+ # "Tell me about the history of quantum computing",
266
+ # "What are the specific details of the Treaty of Utrecht?",
267
+ # "Who won the Nobel Prize in Physics in 2020?",
268
+ # "What are the main causes of the economic recession of 2008?",
269
+ # ]
270
+
271
+ # # Process batch of queries
272
+ # results = swarm.batch_run(test_queries)
273
+ # print(results)
swarms/structs/malt.py CHANGED
@@ -13,7 +13,7 @@ Potential Improvements:
13
13
  - Autonomously create the agents based on the task.
14
14
  - Feed verifier responses back into the creator to improve the proof.
15
15
  - Feed refiner responses back into the creator to improve the proof.
16
- -
16
+ - Feed majority voting responses back into the creator to improve the proof.
17
17
 
18
18
 
19
19
  This is a simplified implementation of the MALT orchestrator. The original implementation trains the models with dpo and sft.
@@ -21,8 +21,6 @@ Whereas this implementation uses the models as is.
21
21
 
22
22
  """
23
23
 
24
- from ast import Mult
25
- import concurrent.futures
26
24
  from typing import List
27
25
 
28
26
  from loguru import logger
@@ -169,6 +167,7 @@ majority_voting_agent = Agent(
169
167
  system_prompt=majority_voting_prompt,
170
168
  )
171
169
 
170
+
172
171
  class MALT:
173
172
  """
174
173
  MALT (Mult-Agent Learning Task) orchestrates the interaction between multiple agents
@@ -296,29 +295,26 @@ class MALT:
296
295
  task=main_agent_output,
297
296
  max_workers=3,
298
297
  )
299
-
298
+
300
299
  self.conversation.add(
301
300
  role=self.verifier_agent.agent_name,
302
301
  content=verified_outputs,
303
302
  )
304
-
305
-
303
+
306
304
  ######################### MAJORITY VOTING #########################
307
305
 
308
-
309
306
  # Majority Voting on the verified outputs
310
307
  majority_voting_verified = majority_voting_agent.run(
311
308
  task=any_to_str(verified_outputs),
312
309
  )
313
310
 
314
311
  self.conversation.add(
315
- role=self.majority_voting_agent.agent_name,
312
+ role=majority_voting_agent.agent_name,
316
313
  content=majority_voting_verified,
317
314
  )
318
-
315
+
319
316
  #########################################################
320
-
321
-
317
+
322
318
  # Refining the majority voting output
323
319
  logger.info("Running task with refiner agents")
324
320
  for output in verified_outputs:
@@ -351,6 +347,7 @@ class MALT:
351
347
  str or list or dict: The output from the conversation based on the specified return format.
352
348
  """
353
349
  task = task
350
+
354
351
  for i in range(self.max_loops):
355
352
  logger.info(f"Starting iteration {i+1}/{self.max_loops}")
356
353
  output = self.step(task, img, *args, **kwargs)
@@ -389,18 +386,3 @@ class MALT:
389
386
 
390
387
  def __repr__(self):
391
388
  return self.conversation.get_str()
392
-
393
- def run_concurrently(self, tasks: List[str], *args, **kwargs):
394
- """Executes a list of tasks using the main agent and processes the output through verifier and refiner agents.
395
-
396
- Args:
397
- tasks (list[str]): The list of tasks to be executed by the main agent.
398
- """
399
- logger.info("Running batch of tasks concurrently.")
400
- logger.info(f"Number of tasks: {len(tasks)}")
401
- with concurrent.futures.ThreadPoolExecutor() as executor:
402
- futures = [
403
- executor.submit(self.run, task, *args, **kwargs)
404
- for task in tasks
405
- ]
406
- return concurrent.futures.as_completed(futures)
@@ -306,7 +306,7 @@ class SwarmRouter:
306
306
  *args,
307
307
  **kwargs,
308
308
  )
309
-
309
+
310
310
  elif self.swarm_type == "MALT":
311
311
  return MALT(
312
312
  name=self.name,
@@ -453,9 +453,11 @@ class SwarmRouter:
453
453
  self.swarm = self._create_swarm(task, *args, **kwargs)
454
454
 
455
455
  try:
456
- logger.info(f"Running task on {self.swarm_type} swarm with task: {task}")
456
+ logger.info(
457
+ f"Running task on {self.swarm_type} swarm with task: {task}"
458
+ )
457
459
  result = self.swarm.run(task=task, *args, **kwargs)
458
-
460
+
459
461
  logger.info("Swarm completed successfully")
460
462
  return result
461
463
  except Exception as e:
@@ -0,0 +1,14 @@
1
+ from swarms.structs.conversation import Conversation
2
+
3
+
4
+ def history_output_formatter(
5
+ conversation: Conversation, type: str = "list"
6
+ ):
7
+ if type == "list":
8
+ return conversation.return_messages_as_list()
9
+ elif type == "dict":
10
+ return conversation.to_dict()
11
+ elif type == "string" or type == "str":
12
+ return conversation.get_str()
13
+ else:
14
+ raise ValueError(f"Invalid type: {type}")