swarms 7.6.0__py3-none-any.whl → 7.6.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -8,6 +8,8 @@ from swarms.structs.base_swarm import BaseSwarm
8
8
  from swarms.structs.base_workflow import BaseWorkflow
9
9
  from swarms.structs.concurrent_workflow import ConcurrentWorkflow
10
10
  from swarms.structs.conversation import Conversation
11
+ from swarms.structs.de_hallucination_swarm import DeHallucinationSwarm
12
+ from swarms.structs.deep_research_swarm import DeepResearchSwarm
11
13
  from swarms.structs.graph_workflow import (
12
14
  Edge,
13
15
  GraphWorkflow,
@@ -18,6 +20,9 @@ from swarms.structs.groupchat import (
18
20
  GroupChat,
19
21
  expertise_based,
20
22
  )
23
+ from swarms.structs.hybrid_hiearchical_peer_swarm import (
24
+ HybridHierarchicalClusterSwarm,
25
+ )
21
26
  from swarms.structs.majority_voting import (
22
27
  MajorityVoting,
23
28
  majority_voting,
@@ -83,8 +88,6 @@ from swarms.structs.swarms_api import (
83
88
  SwarmValidationError,
84
89
  )
85
90
 
86
- from swarms.structs.de_hallucination_swarm import DeHallucinationSwarm
87
-
88
91
  __all__ = [
89
92
  "Agent",
90
93
  "AsyncWorkflow",
@@ -159,4 +162,6 @@ __all__ = [
159
162
  "AgentsBuilder",
160
163
  "MALT",
161
164
  "DeHallucinationSwarm",
165
+ "DeepResearchSwarm",
166
+ "HybridHierarchicalClusterSwarm",
162
167
  ]
swarms/structs/agent.py CHANGED
@@ -6,6 +6,7 @@ import random
6
6
  import threading
7
7
  import time
8
8
  import uuid
9
+
9
10
  from concurrent.futures import ThreadPoolExecutor
10
11
  from datetime import datetime
11
12
  from typing import (
@@ -49,12 +50,14 @@ from swarms.structs.safe_loading import (
49
50
  from swarms.telemetry.main import log_agent_data
50
51
  from swarms.tools.base_tool import BaseTool
51
52
  from swarms.tools.tool_parse_exec import parse_and_execute_json
53
+ from swarms.utils.any_to_str import any_to_str
52
54
  from swarms.utils.data_to_text import data_to_text
53
55
  from swarms.utils.file_processing import create_file_in_folder
54
56
  from swarms.utils.formatter import formatter
55
57
  from swarms.utils.litellm_tokenizer import count_tokens
56
58
  from swarms.utils.pdf_to_text import pdf_to_text
57
59
  from swarms.structs.agent_roles import agent_roles
60
+ from swarms.utils.str_to_dict import str_to_dict
58
61
 
59
62
 
60
63
  # Utils
@@ -803,7 +806,7 @@ class Agent:
803
806
  # Main function
804
807
  def _run(
805
808
  self,
806
- task: Optional[str] = None,
809
+ task: Optional[Union[str, Any]] = None,
807
810
  img: Optional[str] = None,
808
811
  speech: Optional[str] = None,
809
812
  video: Optional[str] = None,
@@ -1113,6 +1116,7 @@ class Agent:
1113
1116
  # )
1114
1117
 
1115
1118
  log_agent_data(self.to_dict())
1119
+
1116
1120
  if self.autosave is True:
1117
1121
  self.save()
1118
1122
 
@@ -2494,7 +2498,7 @@ class Agent:
2494
2498
 
2495
2499
  def run(
2496
2500
  self,
2497
- task: Optional[str] = None,
2501
+ task: Optional[Union[str, Any]] = None,
2498
2502
  img: Optional[str] = None,
2499
2503
  device: Optional[str] = "cpu", # gpu
2500
2504
  device_id: Optional[int] = 0,
@@ -2531,6 +2535,9 @@ class Agent:
2531
2535
  Exception: If any other error occurs during execution.
2532
2536
  """
2533
2537
 
2538
+ if not isinstance(task, str):
2539
+ task = any_to_str(task)
2540
+
2534
2541
  if scheduled_run_date:
2535
2542
  while datetime.now() < scheduled_run_date:
2536
2543
  time.sleep(
@@ -2539,13 +2546,18 @@ class Agent:
2539
2546
 
2540
2547
  try:
2541
2548
  # If cluster ops disabled, run directly
2542
- return self._run(
2549
+ output = self._run(
2543
2550
  task=task,
2544
2551
  img=img,
2545
2552
  *args,
2546
2553
  **kwargs,
2547
2554
  )
2548
2555
 
2556
+ if self.tools_list_dictionary is not None:
2557
+ return str_to_dict(output)
2558
+ else:
2559
+ return output
2560
+
2549
2561
  except ValueError as e:
2550
2562
  self._handle_run_error(e)
2551
2563
 
@@ -2635,7 +2647,7 @@ class Agent:
2635
2647
  )
2636
2648
 
2637
2649
  return agent.run(
2638
- task=f"From {self.agent_name}: {output}",
2650
+ task=f"From {self.agent_name}: Message: {output}",
2639
2651
  img=img,
2640
2652
  *args,
2641
2653
  **kwargs,
@@ -2651,10 +2663,27 @@ class Agent:
2651
2663
  """
2652
2664
  Talk to multiple agents.
2653
2665
  """
2654
- outputs = []
2655
- for agent in agents:
2656
- output = self.talk_to(agent, task, *args, **kwargs)
2657
- outputs.append(output)
2666
+ # o# Use the existing executor from self.executor or create a new one if needed
2667
+ with ThreadPoolExecutor() as executor:
2668
+ # Create futures for each agent conversation
2669
+ futures = [
2670
+ executor.submit(
2671
+ self.talk_to, agent, task, *args, **kwargs
2672
+ )
2673
+ for agent in agents
2674
+ ]
2675
+
2676
+ # Wait for all futures to complete and collect results
2677
+ outputs = []
2678
+ for future in futures:
2679
+ try:
2680
+ result = future.result()
2681
+ outputs.append(result)
2682
+ except Exception as e:
2683
+ logger.error(f"Error in agent communication: {e}")
2684
+ outputs.append(
2685
+ None
2686
+ ) # or handle error case as needed
2658
2687
 
2659
2688
  return outputs
2660
2689
 
@@ -177,9 +177,7 @@ class AgentsBuilder:
177
177
  The output from the swarm's execution
178
178
  """
179
179
  logger.info(f"Running swarm on task: {task}")
180
- agents = self._create_agents(
181
- task, image_url, *args, **kwargs
182
- )
180
+ agents = self._create_agents(task, image_url, *args, **kwargs)
183
181
 
184
182
  return agents
185
183
 
@@ -201,26 +199,26 @@ class AgentsBuilder:
201
199
  temperature=0.1,
202
200
  base_model=Agents,
203
201
  model_name=self.model_name,
204
- max_tokens = 8192,
202
+ max_tokens=8192,
205
203
  )
206
204
 
207
205
  agents_dictionary = model.run(task)
208
206
  print(agents_dictionary)
209
-
207
+
210
208
  print(type(agents_dictionary))
211
209
  logger.info("Agents successfully created")
212
210
  logger.info(f"Agents: {len(agents_dictionary.agents)}")
213
211
 
214
212
  if self.return_dictionary:
215
213
  logger.info("Returning dictionary")
216
-
214
+
217
215
  # Convert swarm config to dictionary
218
216
  agents_dictionary = agents_dictionary.model_dump()
219
217
  return agents_dictionary
220
218
  else:
221
219
  logger.info("Returning agents")
222
220
  return self.create_agents(agents_dictionary)
223
-
221
+
224
222
  def create_agents(self, agents_dictionary: Any):
225
223
  # Create agents from config
226
224
  agents = []
@@ -292,4 +290,4 @@ class AgentsBuilder:
292
290
  # builder = AgentsBuilder(model_name="gpt-4o")
293
291
  # agents = builder.run("Create a swarm that can write a book about the history of the world")
294
292
  # print(agents)
295
- # print(type(agents))
293
+ # print(type(agents))
@@ -12,7 +12,7 @@ from swarms.utils.file_processing import create_file_in_folder
12
12
  from swarms.utils.loguru_logger import initialize_logger
13
13
  from swarms.structs.conversation import Conversation
14
14
  from swarms.structs.swarm_id_generator import generate_swarm_id
15
- from swarms.structs.output_type import OutputType
15
+ from swarms.structs.output_types import OutputType
16
16
 
17
17
  logger = initialize_logger(log_folder="concurrent_workflow")
18
18
 
@@ -0,0 +1,482 @@
1
+ import asyncio
2
+ import concurrent.futures
3
+ import os
4
+ from concurrent.futures import ThreadPoolExecutor, as_completed
5
+ from typing import Any, Dict, List, Tuple
6
+
7
+ import aiohttp
8
+ from dotenv import load_dotenv
9
+ from rich.console import Console
10
+
11
+ from swarms.agents.reasoning_duo import ReasoningDuo
12
+ from swarms.structs.agent import Agent
13
+ from swarms.structs.conversation import Conversation
14
+ from swarms.utils.any_to_str import any_to_str
15
+ from swarms.utils.formatter import formatter
16
+ from swarms.utils.history_output_formatter import (
17
+ history_output_formatter,
18
+ )
19
+ from swarms.utils.str_to_dict import str_to_dict
20
+
21
+ console = Console()
22
+ load_dotenv()
23
+
24
+ # Number of worker threads for concurrent operations
25
+ MAX_WORKERS = (
26
+ os.cpu_count() * 2
27
+ ) # Optimal number of workers based on CPU cores
28
+
29
+ ###############################################################################
30
+ # 1. System Prompts for Each Scientist Agent
31
+ ###############################################################################
32
+
33
+
34
+ def format_exa_results(json_data: Dict[str, Any]) -> str:
35
+ """Formats Exa.ai search results into structured text"""
36
+ if "error" in json_data:
37
+ return f"### Error\n{json_data['error']}\n"
38
+
39
+ # Pre-allocate formatted_text list with initial capacity
40
+ formatted_text = []
41
+
42
+ # Extract search metadata
43
+ search_params = json_data.get("effectiveFilters", {})
44
+ query = search_params.get("query", "General web search")
45
+ formatted_text.append(
46
+ f"### Exa Search Results for: '{query}'\n\n---\n"
47
+ )
48
+
49
+ # Process results
50
+ results = json_data.get("results", [])
51
+
52
+ if not results:
53
+ formatted_text.append("No results found.\n")
54
+ return "".join(formatted_text)
55
+
56
+ def process_result(
57
+ result: Dict[str, Any], index: int
58
+ ) -> List[str]:
59
+ """Process a single result in a thread-safe manner"""
60
+ title = result.get("title", "No title")
61
+ url = result.get("url", result.get("id", "No URL"))
62
+ published_date = result.get("publishedDate", "")
63
+
64
+ # Handle highlights efficiently
65
+ highlights = result.get("highlights", [])
66
+ highlight_text = (
67
+ "\n".join(
68
+ (
69
+ h.get("text", str(h))
70
+ if isinstance(h, dict)
71
+ else str(h)
72
+ )
73
+ for h in highlights[:3]
74
+ )
75
+ if highlights
76
+ else "No summary available"
77
+ )
78
+
79
+ return [
80
+ f"{index}. **{title}**\n",
81
+ f" - URL: {url}\n",
82
+ f" - Published: {published_date.split('T')[0] if published_date else 'Date unknown'}\n",
83
+ f" - Key Points:\n {highlight_text}\n\n",
84
+ ]
85
+
86
+ # Process results concurrently
87
+ with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
88
+ future_to_result = {
89
+ executor.submit(process_result, result, i + 1): i
90
+ for i, result in enumerate(results)
91
+ }
92
+
93
+ # Collect results in order
94
+ processed_results = [None] * len(results)
95
+ for future in as_completed(future_to_result):
96
+ idx = future_to_result[future]
97
+ try:
98
+ processed_results[idx] = future.result()
99
+ except Exception as e:
100
+ console.print(
101
+ f"[bold red]Error processing result {idx + 1}: {str(e)}[/bold red]"
102
+ )
103
+ processed_results[idx] = [
104
+ f"Error processing result {idx + 1}: {str(e)}\n"
105
+ ]
106
+
107
+ # Extend formatted text with processed results in correct order
108
+ for result_text in processed_results:
109
+ formatted_text.extend(result_text)
110
+
111
+ return "".join(formatted_text)
112
+
113
+
114
+ async def _async_exa_search(
115
+ query: str, **kwargs: Any
116
+ ) -> Dict[str, Any]:
117
+ """Asynchronous helper function for Exa.ai API requests"""
118
+ api_url = "https://api.exa.ai/search"
119
+ headers = {
120
+ "x-api-key": os.getenv("EXA_API_KEY"),
121
+ "Content-Type": "application/json",
122
+ }
123
+
124
+ payload = {
125
+ "query": query,
126
+ "useAutoprompt": True,
127
+ "numResults": kwargs.get("num_results", 10),
128
+ "contents": {
129
+ "text": True,
130
+ "highlights": {"numSentences": 2},
131
+ },
132
+ **kwargs,
133
+ }
134
+
135
+ try:
136
+ async with aiohttp.ClientSession() as session:
137
+ async with session.post(
138
+ api_url, json=payload, headers=headers
139
+ ) as response:
140
+ if response.status != 200:
141
+ return {
142
+ "error": f"HTTP {response.status}: {await response.text()}"
143
+ }
144
+ return await response.json()
145
+ except Exception as e:
146
+ return {"error": str(e)}
147
+
148
+
149
+ def exa_search(query: str, **kwargs: Any) -> str:
150
+ """Performs web search using Exa.ai API with concurrent processing"""
151
+ try:
152
+ # Run async search in the event loop
153
+ loop = asyncio.new_event_loop()
154
+ asyncio.set_event_loop(loop)
155
+ try:
156
+ response_json = loop.run_until_complete(
157
+ _async_exa_search(query, **kwargs)
158
+ )
159
+ finally:
160
+ loop.close()
161
+
162
+ # Format results concurrently
163
+ formatted_text = format_exa_results(response_json)
164
+
165
+ return formatted_text
166
+
167
+ except Exception as e:
168
+ error_msg = f"Unexpected error: {str(e)}"
169
+ console.print(f"[bold red]{error_msg}[/bold red]")
170
+ return error_msg
171
+
172
+
173
+ # Define the research tools schema
174
+ tools = [
175
+ {
176
+ "type": "function",
177
+ "function": {
178
+ "name": "search_topic",
179
+ "description": "Conduct an in-depth search on a specified topic or subtopic, generating a comprehensive array of highly detailed search queries tailored to the input parameters.",
180
+ "parameters": {
181
+ "type": "object",
182
+ "properties": {
183
+ "depth": {
184
+ "type": "integer",
185
+ "description": "Indicates the level of thoroughness for the search. Values range from 1 to 3, where 1 represents a superficial search and 3 signifies an exploration of the topic.",
186
+ },
187
+ "detailed_queries": {
188
+ "type": "array",
189
+ "description": "An array of highly specific search queries that are generated based on the input query and the specified depth. Each query should be designed to elicit detailed and relevant information from various sources.",
190
+ "items": {
191
+ "type": "string",
192
+ "description": "Each item in this array should represent a unique search query that targets a specific aspect of the main topic, ensuring a comprehensive exploration of the subject matter.",
193
+ },
194
+ },
195
+ },
196
+ "required": ["depth", "detailed_queries"],
197
+ },
198
+ },
199
+ },
200
+ ]
201
+
202
+ RESEARCH_AGENT_PROMPT = """
203
+ You are an advanced research agent specialized in conducting deep, comprehensive research across multiple domains.
204
+ Your task is to:
205
+
206
+ 1. Break down complex topics into searchable subtopics
207
+ 2. Generate diverse search queries to explore each subtopic thoroughly
208
+ 3. Identify connections and patterns across different areas of research
209
+ 4. Synthesize findings into coherent insights
210
+ 5. Identify gaps in current knowledge and suggest areas for further investigation
211
+
212
+ For each research task:
213
+ - Consider multiple perspectives and approaches
214
+ - Look for both supporting and contradicting evidence
215
+ - Evaluate the credibility and relevance of sources
216
+ - Track emerging trends and recent developments
217
+ - Consider cross-disciplinary implications
218
+
219
+ Output Format:
220
+ - Provide structured research plans
221
+ - Include specific search queries for each subtopic
222
+ - Prioritize queries based on relevance and potential impact
223
+ - Suggest follow-up areas for deeper investigation
224
+ """
225
+
226
+ SUMMARIZATION_AGENT_PROMPT = """
227
+ You are an expert information synthesis and summarization agent designed for producing clear, accurate, and insightful summaries of complex information. Your core capabilities include:
228
+
229
+
230
+ Core Capabilities:
231
+ - Identify and extract key concepts, themes, and insights from any given content
232
+ - Recognize patterns, relationships, and hierarchies within information
233
+ - Filter out noise while preserving crucial context and nuance
234
+ - Handle multiple sources and perspectives simultaneously
235
+
236
+ Summarization Strategy
237
+ 1. Multi-level Structure
238
+ - Provide an extensive summary
239
+ - Follow with key findings
240
+ - Include detailed insights with supporting evidence
241
+ - End with implications or next steps when relevant
242
+
243
+ 2. Quality Standards
244
+ - Maintain factual accuracy and precision
245
+ - Preserve important technical details and terminology
246
+ - Avoid oversimplification of complex concepts
247
+ - Include quantitative data when available
248
+ - Cite or reference specific sources when summarizing claims
249
+
250
+ 3. Clarity & Accessibility
251
+ - Use clear, concise language
252
+ - Define technical terms when necessary
253
+ - Structure information logically
254
+ - Use formatting to enhance readability
255
+ - Maintain appropriate level of technical depth for the audience
256
+
257
+ 4. Synthesis & Analysis
258
+ - Identify conflicting information or viewpoints
259
+ - Highlight consensus across sources
260
+ - Note gaps or limitations in the information
261
+ - Draw connections between related concepts
262
+ - Provide context for better understanding
263
+
264
+ OUTPUT REQUIREMENTS:
265
+ - Begin with a clear statement of the topic or question being addressed
266
+ - Use consistent formatting and structure
267
+ - Clearly separate different levels of detail
268
+ - Include confidence levels for conclusions when appropriate
269
+ - Note any areas requiring additional research or clarification
270
+
271
+ Remember: Your goal is to make complex information accessible while maintaining accuracy and depth. Prioritize clarity without sacrificing important nuance or detail."""
272
+
273
+
274
+ # Initialize the research agent
275
+ research_agent = Agent(
276
+ agent_name="Deep-Research-Agent",
277
+ agent_description="Specialized agent for conducting comprehensive research across multiple domains",
278
+ system_prompt=RESEARCH_AGENT_PROMPT,
279
+ max_loops=1, # Allow multiple iterations for thorough research
280
+ tools_list_dictionary=tools,
281
+ model_name="gpt-4o-mini",
282
+ )
283
+
284
+
285
+ reasoning_duo = ReasoningDuo(
286
+ system_prompt=SUMMARIZATION_AGENT_PROMPT, output_type="string"
287
+ )
288
+
289
+
290
+ class DeepResearchSwarm:
291
+ def __init__(
292
+ self,
293
+ name: str = "DeepResearchSwarm",
294
+ description: str = "A swarm that conducts comprehensive research across multiple domains",
295
+ research_agent: Agent = research_agent,
296
+ max_loops: int = 1,
297
+ nice_print: bool = True,
298
+ output_type: str = "json",
299
+ max_workers: int = os.cpu_count()
300
+ * 2, # Let the system decide optimal thread count
301
+ token_count: bool = False,
302
+ research_model_name: str = "gpt-4o-mini",
303
+ ):
304
+ self.name = name
305
+ self.description = description
306
+ self.research_agent = research_agent
307
+ self.max_loops = max_loops
308
+ self.nice_print = nice_print
309
+ self.output_type = output_type
310
+ self.max_workers = max_workers
311
+ self.research_model_name = research_model_name
312
+
313
+ self.reliability_check()
314
+ self.conversation = Conversation(token_count=token_count)
315
+
316
+ # Create a persistent ThreadPoolExecutor for the lifetime of the swarm
317
+ # This eliminates thread creation overhead on each query
318
+ self.executor = concurrent.futures.ThreadPoolExecutor(
319
+ max_workers=self.max_workers
320
+ )
321
+
322
+ def __del__(self):
323
+ """Clean up the executor on object destruction"""
324
+ self.executor.shutdown(wait=False)
325
+
326
+ def reliability_check(self):
327
+ """Check the reliability of the query"""
328
+ if self.max_loops < 1:
329
+ raise ValueError("max_loops must be greater than 0")
330
+
331
+ formatter.print_panel(
332
+ "DeepResearchSwarm is booting up...", "blue"
333
+ )
334
+ formatter.print_panel("Reliability check passed", "green")
335
+
336
+ def get_queries(self, query: str) -> List[str]:
337
+ """
338
+ Generate a list of detailed search queries based on the input query.
339
+
340
+ Args:
341
+ query (str): The main research query to explore
342
+
343
+ Returns:
344
+ List[str]: A list of detailed search queries
345
+ """
346
+ self.conversation.add(role="User", content=query)
347
+
348
+ # Get the agent's response
349
+ agent_output = self.research_agent.run(query)
350
+
351
+ self.conversation.add(
352
+ role=self.research_agent.agent_name, content=agent_output
353
+ )
354
+
355
+ # Convert the string output to dictionary
356
+ output_dict = str_to_dict(agent_output)
357
+
358
+ # Print the conversation history
359
+ if self.nice_print:
360
+ to_do_list = any_to_str(output_dict)
361
+ formatter.print_panel(to_do_list, "blue")
362
+
363
+ # Extract the detailed queries from the output
364
+ if (
365
+ isinstance(output_dict, dict)
366
+ and "detailed_queries" in output_dict
367
+ ):
368
+ queries = output_dict["detailed_queries"]
369
+ formatter.print_panel(
370
+ f"Generated {len(queries)} queries", "blue"
371
+ )
372
+ return queries
373
+
374
+ return []
375
+
376
+ def _process_query(self, query: str) -> Tuple[str, str]:
377
+ """
378
+ Process a single query with search and reasoning.
379
+ This function is designed to be run in a separate thread.
380
+
381
+ Args:
382
+ query (str): The query to process
383
+
384
+ Returns:
385
+ Tuple[str, str]: A tuple containing (search_results, reasoning_output)
386
+ """
387
+ # Run the search
388
+ results = exa_search(query)
389
+
390
+ # Run the reasoning on the search results
391
+ reasoning_output = reasoning_duo.run(results)
392
+
393
+ return (results, reasoning_output)
394
+
395
+ def step(self, query: str):
396
+ """
397
+ Execute a single research step with maximum parallelism.
398
+
399
+ Args:
400
+ query (str): The research query to process
401
+
402
+ Returns:
403
+ Formatted conversation history
404
+ """
405
+ # Get all the queries to process
406
+ queries = self.get_queries(query)
407
+
408
+ # Submit all queries for concurrent processing
409
+ # Using a list instead of generator for clearer debugging
410
+ futures = []
411
+ for q in queries:
412
+ future = self.executor.submit(self._process_query, q)
413
+ futures.append((q, future))
414
+
415
+ # Process results as they complete (no waiting for slower queries)
416
+ for q, future in futures:
417
+ try:
418
+ # Get results (blocks until this specific future is done)
419
+ results, reasoning_output = future.result()
420
+
421
+ # Add search results to conversation
422
+ self.conversation.add(
423
+ role="User",
424
+ content=f"Search results for {q}: \n {results}",
425
+ )
426
+
427
+ # Add reasoning output to conversation
428
+ self.conversation.add(
429
+ role=reasoning_duo.agent_name,
430
+ content=reasoning_output,
431
+ )
432
+ except Exception as e:
433
+ # Handle any errors in the thread
434
+ self.conversation.add(
435
+ role="System",
436
+ content=f"Error processing query '{q}': {str(e)}",
437
+ )
438
+
439
+ # Once all query processing is complete, generate the final summary
440
+ # This step runs after all queries to ensure it summarizes all results
441
+ final_summary = reasoning_duo.run(
442
+ f"Generate an extensive report of the following content: {self.conversation.get_str()}"
443
+ )
444
+
445
+ self.conversation.add(
446
+ role=reasoning_duo.agent_name,
447
+ content=final_summary,
448
+ )
449
+
450
+ return history_output_formatter(
451
+ self.conversation, type=self.output_type
452
+ )
453
+
454
+ def run(self, task: str):
455
+ return self.step(task)
456
+
457
+ def batched_run(self, tasks: List[str]):
458
+ """
459
+ Execute a list of research tasks in parallel.
460
+
461
+ Args:
462
+ tasks (List[str]): A list of research tasks to execute
463
+
464
+ Returns:
465
+ List[str]: A list of formatted conversation histories
466
+ """
467
+ futures = []
468
+ for task in tasks:
469
+ future = self.executor.submit(self.step, task)
470
+ futures.append((task, future))
471
+
472
+
473
+ # # Example usage
474
+ # if __name__ == "__main__":
475
+ # swarm = DeepResearchSwarm(
476
+ # output_type="json",
477
+ # )
478
+ # print(
479
+ # swarm.step(
480
+ # "What is the active tarrif situation with mexico? Only create 2 queries"
481
+ # )
482
+ # )