swarms 7.8.3__py3-none-any.whl → 7.8.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. swarms/agents/ape_agent.py +5 -22
  2. swarms/agents/consistency_agent.py +1 -1
  3. swarms/agents/i_agent.py +1 -1
  4. swarms/agents/reasoning_agents.py +99 -3
  5. swarms/agents/reasoning_duo.py +1 -1
  6. swarms/cli/main.py +1 -1
  7. swarms/communication/__init__.py +1 -0
  8. swarms/communication/duckdb_wrap.py +32 -2
  9. swarms/communication/pulsar_struct.py +45 -19
  10. swarms/communication/redis_wrap.py +56 -11
  11. swarms/communication/supabase_wrap.py +1659 -0
  12. swarms/prompts/agent_conversation_aggregator.py +38 -0
  13. swarms/prompts/prompt.py +0 -3
  14. swarms/schemas/agent_completion_response.py +71 -0
  15. swarms/schemas/agent_rag_schema.py +7 -0
  16. swarms/schemas/conversation_schema.py +9 -0
  17. swarms/schemas/llm_agent_schema.py +99 -81
  18. swarms/schemas/swarms_api_schemas.py +164 -0
  19. swarms/structs/__init__.py +15 -9
  20. swarms/structs/agent.py +219 -199
  21. swarms/structs/agent_rag_handler.py +685 -0
  22. swarms/structs/base_swarm.py +2 -1
  23. swarms/structs/conversation.py +832 -264
  24. swarms/structs/csv_to_agent.py +153 -100
  25. swarms/structs/deep_research_swarm.py +197 -193
  26. swarms/structs/dynamic_conversational_swarm.py +18 -7
  27. swarms/structs/hiearchical_swarm.py +1 -1
  28. swarms/structs/hybrid_hiearchical_peer_swarm.py +2 -18
  29. swarms/structs/image_batch_processor.py +261 -0
  30. swarms/structs/interactive_groupchat.py +356 -0
  31. swarms/structs/ma_blocks.py +159 -0
  32. swarms/structs/majority_voting.py +1 -1
  33. swarms/structs/mixture_of_agents.py +1 -1
  34. swarms/structs/multi_agent_exec.py +25 -26
  35. swarms/structs/multi_agent_router.py +3 -2
  36. swarms/structs/rearrange.py +3 -3
  37. swarms/structs/sequential_workflow.py +3 -3
  38. swarms/structs/swarm_matcher.py +499 -408
  39. swarms/structs/swarm_router.py +15 -97
  40. swarms/structs/swarming_architectures.py +1 -1
  41. swarms/tools/mcp_client_call.py +3 -0
  42. swarms/utils/__init__.py +10 -2
  43. swarms/utils/check_all_model_max_tokens.py +43 -0
  44. swarms/utils/generate_keys.py +0 -27
  45. swarms/utils/history_output_formatter.py +5 -20
  46. swarms/utils/litellm_wrapper.py +208 -60
  47. swarms/utils/output_types.py +24 -0
  48. swarms/utils/vllm_wrapper.py +14 -13
  49. swarms/utils/xml_utils.py +37 -2
  50. {swarms-7.8.3.dist-info → swarms-7.8.7.dist-info}/METADATA +31 -55
  51. {swarms-7.8.3.dist-info → swarms-7.8.7.dist-info}/RECORD +55 -48
  52. swarms/structs/multi_agent_collab.py +0 -242
  53. swarms/structs/output_types.py +0 -6
  54. swarms/utils/markdown_message.py +0 -21
  55. swarms/utils/visualizer.py +0 -510
  56. swarms/utils/wrapper_clusterop.py +0 -127
  57. /swarms/{tools → schemas}/tool_schema_base_model.py +0 -0
  58. {swarms-7.8.3.dist-info → swarms-7.8.7.dist-info}/LICENSE +0 -0
  59. {swarms-7.8.3.dist-info → swarms-7.8.7.dist-info}/WHEEL +0 -0
  60. {swarms-7.8.3.dist-info → swarms-7.8.7.dist-info}/entry_points.txt +0 -0
@@ -1,17 +1,14 @@
1
- import asyncio
2
1
  import concurrent.futures
2
+ import json
3
3
  import os
4
- from concurrent.futures import ThreadPoolExecutor, as_completed
5
- from typing import Any, Dict, List, Tuple
4
+ from typing import Any, List
6
5
 
7
- import aiohttp
8
6
  from dotenv import load_dotenv
9
7
  from rich.console import Console
8
+ import requests
10
9
 
11
- from swarms.agents.reasoning_duo import ReasoningDuo
12
10
  from swarms.structs.agent import Agent
13
11
  from swarms.structs.conversation import Conversation
14
- from swarms.utils.any_to_str import any_to_str
15
12
  from swarms.utils.formatter import formatter
16
13
  from swarms.utils.history_output_formatter import (
17
14
  history_output_formatter,
@@ -31,37 +28,73 @@ MAX_WORKERS = (
31
28
  ###############################################################################
32
29
 
33
30
 
34
- def format_exa_results(json_data: Dict[str, Any]) -> str:
35
- """Formats Exa.ai search results into structured text"""
31
+ def exa_search(query: str, **kwargs: Any) -> str:
32
+ """Performs web search using Exa.ai API and returns formatted results."""
33
+ api_url = "https://api.exa.ai/search"
34
+ api_key = os.getenv("EXA_API_KEY")
35
+
36
+ if not api_key:
37
+ return "### Error\nEXA_API_KEY environment variable not set\n"
38
+
39
+ headers = {
40
+ "x-api-key": api_key,
41
+ "Content-Type": "application/json",
42
+ }
43
+
44
+ safe_kwargs = {
45
+ str(k): v
46
+ for k, v in kwargs.items()
47
+ if k is not None and v is not None and str(k) != "None"
48
+ }
49
+
50
+ payload = {
51
+ "query": query,
52
+ "useAutoprompt": True,
53
+ "numResults": safe_kwargs.get("num_results", 10),
54
+ "contents": {
55
+ "text": True,
56
+ "highlights": {"numSentences": 10},
57
+ },
58
+ }
59
+
60
+ for key, value in safe_kwargs.items():
61
+ if key not in payload and key not in [
62
+ "query",
63
+ "useAutoprompt",
64
+ "numResults",
65
+ "contents",
66
+ ]:
67
+ payload[key] = value
68
+
69
+ try:
70
+ response = requests.post(
71
+ api_url, json=payload, headers=headers
72
+ )
73
+ if response.status_code != 200:
74
+ return f"### Error\nHTTP {response.status_code}: {response.text}\n"
75
+ json_data = response.json()
76
+ except Exception as e:
77
+ return f"### Error\n{str(e)}\n"
78
+
36
79
  if "error" in json_data:
37
80
  return f"### Error\n{json_data['error']}\n"
38
81
 
39
- # Pre-allocate formatted_text list with initial capacity
40
82
  formatted_text = []
41
-
42
- # Extract search metadata
43
83
  search_params = json_data.get("effectiveFilters", {})
44
84
  query = search_params.get("query", "General web search")
45
85
  formatted_text.append(
46
86
  f"### Exa Search Results for: '{query}'\n\n---\n"
47
87
  )
48
88
 
49
- # Process results
50
89
  results = json_data.get("results", [])
51
-
52
90
  if not results:
53
91
  formatted_text.append("No results found.\n")
54
92
  return "".join(formatted_text)
55
93
 
56
- def process_result(
57
- result: Dict[str, Any], index: int
58
- ) -> List[str]:
59
- """Process a single result in a thread-safe manner"""
94
+ for i, result in enumerate(results, 1):
60
95
  title = result.get("title", "No title")
61
96
  url = result.get("url", result.get("id", "No URL"))
62
97
  published_date = result.get("publishedDate", "")
63
-
64
- # Handle highlights efficiently
65
98
  highlights = result.get("highlights", [])
66
99
  highlight_text = (
67
100
  "\n".join(
@@ -76,100 +109,18 @@ def format_exa_results(json_data: Dict[str, Any]) -> str:
76
109
  else "No summary available"
77
110
  )
78
111
 
79
- return [
80
- f"{index}. **{title}**\n",
81
- f" - URL: {url}\n",
82
- f" - Published: {published_date.split('T')[0] if published_date else 'Date unknown'}\n",
83
- f" - Key Points:\n {highlight_text}\n\n",
84
- ]
85
-
86
- # Process results concurrently
87
- with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
88
- future_to_result = {
89
- executor.submit(process_result, result, i + 1): i
90
- for i, result in enumerate(results)
91
- }
92
-
93
- # Collect results in order
94
- processed_results = [None] * len(results)
95
- for future in as_completed(future_to_result):
96
- idx = future_to_result[future]
97
- try:
98
- processed_results[idx] = future.result()
99
- except Exception as e:
100
- console.print(
101
- f"[bold red]Error processing result {idx + 1}: {str(e)}[/bold red]"
102
- )
103
- processed_results[idx] = [
104
- f"Error processing result {idx + 1}: {str(e)}\n"
105
- ]
106
-
107
- # Extend formatted text with processed results in correct order
108
- for result_text in processed_results:
109
- formatted_text.extend(result_text)
112
+ formatted_text.extend(
113
+ [
114
+ f"{i}. **{title}**\n",
115
+ f" - URL: {url}\n",
116
+ f" - Published: {published_date.split('T')[0] if published_date else 'Date unknown'}\n",
117
+ f" - Key Points:\n {highlight_text}\n\n",
118
+ ]
119
+ )
110
120
 
111
121
  return "".join(formatted_text)
112
122
 
113
123
 
114
- async def _async_exa_search(
115
- query: str, **kwargs: Any
116
- ) -> Dict[str, Any]:
117
- """Asynchronous helper function for Exa.ai API requests"""
118
- api_url = "https://api.exa.ai/search"
119
- headers = {
120
- "x-api-key": os.getenv("EXA_API_KEY"),
121
- "Content-Type": "application/json",
122
- }
123
-
124
- payload = {
125
- "query": query,
126
- "useAutoprompt": True,
127
- "numResults": kwargs.get("num_results", 10),
128
- "contents": {
129
- "text": True,
130
- "highlights": {"numSentences": 2},
131
- },
132
- **kwargs,
133
- }
134
-
135
- try:
136
- async with aiohttp.ClientSession() as session:
137
- async with session.post(
138
- api_url, json=payload, headers=headers
139
- ) as response:
140
- if response.status != 200:
141
- return {
142
- "error": f"HTTP {response.status}: {await response.text()}"
143
- }
144
- return await response.json()
145
- except Exception as e:
146
- return {"error": str(e)}
147
-
148
-
149
- def exa_search(query: str, **kwargs: Any) -> str:
150
- """Performs web search using Exa.ai API with concurrent processing"""
151
- try:
152
- # Run async search in the event loop
153
- loop = asyncio.new_event_loop()
154
- asyncio.set_event_loop(loop)
155
- try:
156
- response_json = loop.run_until_complete(
157
- _async_exa_search(query, **kwargs)
158
- )
159
- finally:
160
- loop.close()
161
-
162
- # Format results concurrently
163
- formatted_text = format_exa_results(response_json)
164
-
165
- return formatted_text
166
-
167
- except Exception as e:
168
- error_msg = f"Unexpected error: {str(e)}"
169
- console.print(f"[bold red]{error_msg}[/bold red]")
170
- return error_msg
171
-
172
-
173
124
  # Define the research tools schema
174
125
  tools = [
175
126
  {
@@ -283,6 +234,7 @@ class DeepResearchSwarm:
283
234
  * 2, # Let the system decide optimal thread count
284
235
  token_count: bool = False,
285
236
  research_model_name: str = "gpt-4o-mini",
237
+ claude_summarization_model_name: str = "claude-3-5-sonnet-20240620",
286
238
  ):
287
239
  self.name = name
288
240
  self.description = description
@@ -291,6 +243,9 @@ class DeepResearchSwarm:
291
243
  self.output_type = output_type
292
244
  self.max_workers = max_workers
293
245
  self.research_model_name = research_model_name
246
+ self.claude_summarization_model_name = (
247
+ claude_summarization_model_name
248
+ )
294
249
 
295
250
  self.reliability_check()
296
251
  self.conversation = Conversation(token_count=token_count)
@@ -308,12 +263,17 @@ class DeepResearchSwarm:
308
263
  system_prompt=RESEARCH_AGENT_PROMPT,
309
264
  max_loops=1, # Allow multiple iterations for thorough research
310
265
  tools_list_dictionary=tools,
311
- model_name="gpt-4o-mini",
266
+ model_name=self.research_model_name,
267
+ output_type="final",
312
268
  )
313
269
 
314
- self.reasoning_duo = ReasoningDuo(
270
+ self.summarization_agent = Agent(
271
+ agent_name="Summarization-Agent",
272
+ agent_description="Specialized agent for summarizing research results",
315
273
  system_prompt=SUMMARIZATION_AGENT_PROMPT,
316
- output_type="string",
274
+ max_loops=1,
275
+ model_name=self.claude_summarization_model_name,
276
+ output_type="final",
317
277
  )
318
278
 
319
279
  def __del__(self):
@@ -345,49 +305,49 @@ class DeepResearchSwarm:
345
305
  # Get the agent's response
346
306
  agent_output = self.research_agent.run(query)
347
307
 
348
- self.conversation.add(
349
- role=self.research_agent.agent_name, content=agent_output
350
- )
308
+ # Transform the string into a list of dictionaries
309
+ agent_output = json.loads(agent_output)
310
+ print(agent_output)
311
+ print(type(agent_output))
351
312
 
352
- # Convert the string output to dictionary
353
- output_dict = str_to_dict(agent_output)
313
+ formatter.print_panel(
314
+ f"Agent output type: {type(agent_output)} \n {agent_output}",
315
+ "blue",
316
+ )
354
317
 
355
- # Print the conversation history
356
- if self.nice_print:
357
- to_do_list = any_to_str(output_dict)
358
- formatter.print_panel(to_do_list, "blue")
318
+ # Convert the output to a dictionary if it's a list
319
+ if isinstance(agent_output, list):
320
+ agent_output = json.dumps(agent_output)
359
321
 
360
- # Extract the detailed queries from the output
361
- if (
362
- isinstance(output_dict, dict)
363
- and "detailed_queries" in output_dict
364
- ):
365
- queries = output_dict["detailed_queries"]
366
- formatter.print_panel(
367
- f"Generated {len(queries)} queries", "blue"
322
+ if isinstance(agent_output, str):
323
+ # Convert the string output to dictionary
324
+ output_dict = (
325
+ str_to_dict(agent_output)
326
+ if isinstance(agent_output, str)
327
+ else agent_output
368
328
  )
369
- return queries
370
-
371
- return []
372
-
373
- def _process_query(self, query: str) -> Tuple[str, str]:
374
- """
375
- Process a single query with search and reasoning.
376
- This function is designed to be run in a separate thread.
377
-
378
- Args:
379
- query (str): The query to process
380
329
 
381
- Returns:
382
- Tuple[str, str]: A tuple containing (search_results, reasoning_output)
383
- """
384
- # Run the search
385
- results = exa_search(query)
330
+ # Extract the detailed queries from the output
331
+ # Search for the key "detailed_queries" in the output list[dictionary]
332
+ if isinstance(output_dict, list):
333
+ for item in output_dict:
334
+ if "detailed_queries" in item:
335
+ queries = item["detailed_queries"]
336
+ break
337
+ else:
338
+ queries = output_dict.get("detailed_queries", [])
339
+
340
+ print(queries)
341
+
342
+ # Log the number of queries generated
343
+ formatter.print_panel(
344
+ f"Generated {len(queries)} queries", "blue"
345
+ )
386
346
 
387
- # Run the reasoning on the search results
388
- reasoning_output = self.reasoning_duo.run(results)
347
+ print(queries)
348
+ print(type(queries))
389
349
 
390
- return (results, reasoning_output)
350
+ return queries
391
351
 
392
352
  def step(self, query: str):
393
353
  """
@@ -399,54 +359,91 @@ class DeepResearchSwarm:
399
359
  Returns:
400
360
  Formatted conversation history
401
361
  """
402
- # Get all the queries to process
403
- queries = self.get_queries(query)
404
-
405
- # Submit all queries for concurrent processing
406
- # Using a list instead of generator for clearer debugging
407
- futures = []
408
- for q in queries:
409
- future = self.executor.submit(self._process_query, q)
410
- futures.append((q, future))
411
-
412
- # Process results as they complete (no waiting for slower queries)
413
- for q, future in futures:
362
+ try:
363
+ # Get all the queries to process
364
+ queries = self.get_queries(query)
365
+
366
+ print(queries)
367
+
368
+ # Submit all queries for concurrent processing
369
+ futures = []
370
+ for q in queries:
371
+ future = self.executor.submit(exa_search, q)
372
+ futures.append((q, future))
373
+
374
+ # Process results as they complete
375
+ for q, future in futures:
376
+ try:
377
+ # Get search results only
378
+ results = future.result()
379
+
380
+ # Add search results to conversation
381
+ self.conversation.add(
382
+ role="User",
383
+ content=f"Search results for {q}: \n {results}",
384
+ )
385
+
386
+ except Exception as e:
387
+ # Handle any errors in the thread
388
+ error_msg = (
389
+ f"Error processing query '{q}': {str(e)}"
390
+ )
391
+ console.print(f"[bold red]{error_msg}[/bold red]")
392
+ self.conversation.add(
393
+ role="System",
394
+ content=error_msg,
395
+ )
396
+
397
+ # Generate final comprehensive analysis after all searches are complete
414
398
  try:
415
- # Get results (blocks until this specific future is done)
416
- results, reasoning_output = future.result()
417
-
418
- # Add search results to conversation
419
- self.conversation.add(
420
- role="User",
421
- content=f"Search results for {q}: \n {results}",
399
+ final_summary = self.summarization_agent.run(
400
+ f"Please generate a comprehensive 4,000-word report analyzing the following content: {self.conversation.get_str()}"
422
401
  )
423
402
 
424
- # Add reasoning output to conversation
425
403
  self.conversation.add(
426
- role=self.reasoning_duo.agent_name,
427
- content=reasoning_output,
404
+ role=self.summarization_agent.agent_name,
405
+ content=final_summary,
428
406
  )
429
407
  except Exception as e:
430
- # Handle any errors in the thread
408
+ error_msg = (
409
+ f"Error generating final summary: {str(e)}"
410
+ )
411
+ console.print(f"[bold red]{error_msg}[/bold red]")
431
412
  self.conversation.add(
432
413
  role="System",
433
- content=f"Error processing query '{q}': {str(e)}",
414
+ content=error_msg,
434
415
  )
435
416
 
436
- # Once all query processing is complete, generate the final summary
437
- # This step runs after all queries to ensure it summarizes all results
438
- final_summary = self.reasoning_duo.run(
439
- f"Generate an extensive report of the following content: {self.conversation.get_str()}"
440
- )
441
-
442
- self.conversation.add(
443
- role=self.reasoning_duo.agent_name,
444
- content=final_summary,
445
- )
417
+ # Return formatted output
418
+ result = history_output_formatter(
419
+ self.conversation, type=self.output_type
420
+ )
446
421
 
447
- return history_output_formatter(
448
- self.conversation, type=self.output_type
449
- )
422
+ # If output type is JSON, ensure it's properly formatted
423
+ if self.output_type.lower() == "json":
424
+ try:
425
+ import json
426
+
427
+ if isinstance(result, str):
428
+ # Try to parse and reformat for pretty printing
429
+ parsed = json.loads(result)
430
+ return json.dumps(
431
+ parsed, indent=2, ensure_ascii=False
432
+ )
433
+ except (json.JSONDecodeError, TypeError):
434
+ # If parsing fails, return as-is
435
+ pass
436
+
437
+ return result
438
+
439
+ except Exception as e:
440
+ error_msg = f"Critical error in step execution: {str(e)}"
441
+ console.print(f"[bold red]{error_msg}[/bold red]")
442
+ return (
443
+ {"error": error_msg}
444
+ if self.output_type.lower() == "json"
445
+ else error_msg
446
+ )
450
447
 
451
448
  def run(self, task: str):
452
449
  return self.step(task)
@@ -467,13 +464,20 @@ class DeepResearchSwarm:
467
464
  futures.append((task, future))
468
465
 
469
466
 
470
- # # Example usage
467
+ # Example usage
471
468
  # if __name__ == "__main__":
472
- # swarm = DeepResearchSwarm(
473
- # output_type="json",
474
- # )
475
- # print(
476
- # swarm.step(
477
- # "What is the active tarrif situation with mexico? Only create 2 queries"
469
+ # try:
470
+ # swarm = DeepResearchSwarm(
471
+ # output_type="json",
472
+ # )
473
+ # result = swarm.step(
474
+ # "What is the active tariff situation with mexico? Only create 2 queries"
478
475
  # )
479
- # )
476
+
477
+ # # Parse and display results in rich format with markdown export
478
+ # swarm.parse_and_display_results(result, export_markdown=True)
479
+
480
+ # except Exception as e:
481
+ # print(f"Error running deep research swarm: {str(e)}")
482
+ # import traceback
483
+ # traceback.print_exc()
@@ -3,6 +3,7 @@ import random
3
3
  from swarms.structs.agent import Agent
4
4
  from typing import List
5
5
  from swarms.structs.conversation import Conversation
6
+ from swarms.structs.ma_blocks import find_agent_by_name
6
7
  from swarms.utils.history_output_formatter import (
7
8
  history_output_formatter,
8
9
  )
@@ -84,14 +85,24 @@ class DynamicConversationalSwarm:
84
85
  except json.JSONDecodeError:
85
86
  raise ValueError("Invalid JSON string")
86
87
 
87
- def find_agent_by_name(self, agent_name: str) -> Agent:
88
- for agent in self.agents:
89
- if agent.name == agent_name:
90
- return agent
91
- raise ValueError(f"Agent with name {agent_name} not found")
92
-
93
88
  def run_agent(self, agent_name: str, task: str) -> str:
94
- agent = self.find_agent_by_name(agent_name)
89
+ """
90
+ Run a specific agent with a given task.
91
+
92
+ Args:
93
+ agent_name (str): The name of the agent to run
94
+ task (str): The task to execute
95
+
96
+ Returns:
97
+ str: The agent's response to the task
98
+
99
+ Raises:
100
+ ValueError: If agent is not found
101
+ RuntimeError: If there's an error running the agent
102
+ """
103
+ agent = find_agent_by_name(
104
+ agents=self.agents, agent_name=agent_name
105
+ )
95
106
  return agent.run(task)
96
107
 
97
108
  def fetch_random_agent_name(self) -> str:
@@ -7,7 +7,7 @@ from pydantic import BaseModel, Field
7
7
  from swarms.structs.agent import Agent
8
8
  from swarms.structs.base_swarm import BaseSwarm
9
9
  from swarms.structs.conversation import Conversation
10
- from swarms.structs.output_types import OutputType
10
+ from swarms.utils.output_types import OutputType
11
11
  from swarms.utils.any_to_str import any_to_str
12
12
  from swarms.utils.formatter import formatter
13
13
 
@@ -1,5 +1,5 @@
1
1
  import os
2
- from typing import List, Literal
2
+ from typing import List
3
3
  from swarms.structs.agent import Agent
4
4
  from swarms.structs.conversation import Conversation
5
5
  from swarms.structs.multi_agent_exec import get_swarms_info
@@ -9,23 +9,7 @@ from swarms.utils.history_output_formatter import (
9
9
  )
10
10
  from concurrent.futures import ThreadPoolExecutor, as_completed
11
11
  from typing import Union, Callable
12
-
13
-
14
- HistoryOutputType = Literal[
15
- "list",
16
- "dict",
17
- "dictionary",
18
- "string",
19
- "str",
20
- "final",
21
- "last",
22
- "json",
23
- "all",
24
- "yaml",
25
- # "dict-final",
26
- "dict-all-except-first",
27
- "str-all-except-first",
28
- ]
12
+ from swarms.utils.history_output_formatter import HistoryOutputType
29
13
 
30
14
  tools = [
31
15
  {