swarms 7.8.3__py3-none-any.whl → 7.8.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. swarms/agents/ape_agent.py +5 -22
  2. swarms/agents/consistency_agent.py +1 -1
  3. swarms/agents/i_agent.py +1 -1
  4. swarms/agents/reasoning_agents.py +99 -3
  5. swarms/agents/reasoning_duo.py +1 -1
  6. swarms/cli/main.py +1 -1
  7. swarms/communication/__init__.py +1 -0
  8. swarms/communication/duckdb_wrap.py +32 -2
  9. swarms/communication/pulsar_struct.py +45 -19
  10. swarms/communication/redis_wrap.py +56 -11
  11. swarms/communication/supabase_wrap.py +1659 -0
  12. swarms/prompts/agent_conversation_aggregator.py +38 -0
  13. swarms/prompts/prompt.py +0 -3
  14. swarms/schemas/agent_completion_response.py +71 -0
  15. swarms/schemas/agent_rag_schema.py +7 -0
  16. swarms/schemas/conversation_schema.py +9 -0
  17. swarms/schemas/llm_agent_schema.py +99 -81
  18. swarms/schemas/swarms_api_schemas.py +164 -0
  19. swarms/structs/__init__.py +15 -9
  20. swarms/structs/agent.py +219 -199
  21. swarms/structs/agent_rag_handler.py +685 -0
  22. swarms/structs/base_swarm.py +2 -1
  23. swarms/structs/conversation.py +832 -264
  24. swarms/structs/csv_to_agent.py +153 -100
  25. swarms/structs/deep_research_swarm.py +197 -193
  26. swarms/structs/dynamic_conversational_swarm.py +18 -7
  27. swarms/structs/hiearchical_swarm.py +1 -1
  28. swarms/structs/hybrid_hiearchical_peer_swarm.py +2 -18
  29. swarms/structs/image_batch_processor.py +261 -0
  30. swarms/structs/interactive_groupchat.py +356 -0
  31. swarms/structs/ma_blocks.py +159 -0
  32. swarms/structs/majority_voting.py +1 -1
  33. swarms/structs/mixture_of_agents.py +1 -1
  34. swarms/structs/multi_agent_exec.py +25 -26
  35. swarms/structs/multi_agent_router.py +3 -2
  36. swarms/structs/rearrange.py +3 -3
  37. swarms/structs/sequential_workflow.py +3 -3
  38. swarms/structs/swarm_matcher.py +499 -408
  39. swarms/structs/swarm_router.py +15 -97
  40. swarms/structs/swarming_architectures.py +1 -1
  41. swarms/tools/mcp_client_call.py +3 -0
  42. swarms/utils/__init__.py +10 -2
  43. swarms/utils/check_all_model_max_tokens.py +43 -0
  44. swarms/utils/generate_keys.py +0 -27
  45. swarms/utils/history_output_formatter.py +5 -20
  46. swarms/utils/litellm_wrapper.py +208 -60
  47. swarms/utils/output_types.py +24 -0
  48. swarms/utils/vllm_wrapper.py +14 -13
  49. swarms/utils/xml_utils.py +37 -2
  50. {swarms-7.8.3.dist-info → swarms-7.8.7.dist-info}/METADATA +31 -55
  51. {swarms-7.8.3.dist-info → swarms-7.8.7.dist-info}/RECORD +55 -48
  52. swarms/structs/multi_agent_collab.py +0 -242
  53. swarms/structs/output_types.py +0 -6
  54. swarms/utils/markdown_message.py +0 -21
  55. swarms/utils/visualizer.py +0 -510
  56. swarms/utils/wrapper_clusterop.py +0 -127
  57. /swarms/{tools → schemas}/tool_schema_base_model.py +0 -0
  58. {swarms-7.8.3.dist-info → swarms-7.8.7.dist-info}/LICENSE +0 -0
  59. {swarms-7.8.3.dist-info → swarms-7.8.7.dist-info}/WHEEL +0 -0
  60. {swarms-7.8.3.dist-info → swarms-7.8.7.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,38 @@
1
+ AGGREGATOR_SYSTEM_PROMPT = """You are a highly skilled Aggregator Agent responsible for analyzing, synthesizing, and summarizing conversations between multiple AI agents. Your primary goal is to distill complex multi-agent interactions into clear, actionable insights.
2
+
3
+ Key Responsibilities:
4
+ 1. Conversation Analysis:
5
+ - Identify the main topics and themes discussed
6
+ - Track the progression of ideas and problem-solving approaches
7
+ - Recognize key decisions and turning points in the conversation
8
+ - Note any conflicts, agreements, or important conclusions reached
9
+
10
+ 2. Agent Contribution Assessment:
11
+ - Evaluate each agent's unique contributions to the discussion
12
+ - Highlight complementary perspectives and insights
13
+ - Identify any knowledge gaps or areas requiring further exploration
14
+ - Recognize patterns in agent interactions and collaborative dynamics
15
+
16
+ 3. Summary Generation Guidelines:
17
+ - Begin with a high-level overview of the conversation's purpose and outcome
18
+ - Structure the summary in a logical, hierarchical manner
19
+ - Prioritize critical information while maintaining context
20
+ - Include specific examples or quotes when they significantly impact understanding
21
+ - Maintain objectivity while synthesizing different viewpoints
22
+ - Highlight actionable insights and next steps if applicable
23
+
24
+ 4. Quality Standards:
25
+ - Ensure accuracy in representing each agent's contributions
26
+ - Maintain clarity and conciseness without oversimplifying
27
+ - Use consistent terminology throughout the summary
28
+ - Preserve important technical details and domain-specific language
29
+ - Flag any uncertainties or areas needing clarification
30
+
31
+ 5. Output Format:
32
+ - Present information in a structured, easy-to-read format
33
+ - Use bullet points or sections for better readability when appropriate
34
+ - Include a brief conclusion or recommendation section if relevant
35
+ - Maintain professional and neutral tone throughout
36
+
37
+ Remember: Your role is crucial in making complex multi-agent discussions accessible and actionable. Focus on extracting value from the conversation while maintaining the integrity of each agent's contributions.
38
+ """
swarms/prompts/prompt.py CHANGED
@@ -99,9 +99,6 @@ class Prompt(BaseModel):
99
99
  if self.autosave:
100
100
  self._autosave()
101
101
 
102
- if self.auto_generate_prompt and self.llm:
103
- self.auto_generate_prompt()
104
-
105
102
  def edit_prompt(self, new_content: str) -> None:
106
103
  """
107
104
  Edits the prompt content and updates the version control.
@@ -0,0 +1,71 @@
1
+ from datetime import datetime
2
+ from typing import Any, List, Optional
3
+
4
+ from pydantic import BaseModel, Field
5
+
6
+
7
+ class Usage(BaseModel):
8
+ prompt_tokens: Optional[int] = Field(
9
+ default=None,
10
+ description="Number of tokens used in the prompt",
11
+ )
12
+ completion_tokens: Optional[int] = Field(
13
+ default=None,
14
+ description="Number of tokens used in the completion",
15
+ )
16
+ total_tokens: Optional[int] = Field(
17
+ default=None, description="Total number of tokens used"
18
+ )
19
+
20
+
21
+ class ModelConfig(BaseModel):
22
+ model_name: Optional[str] = Field(
23
+ default=None,
24
+ description="Name of the model used for generation",
25
+ )
26
+ temperature: Optional[float] = Field(
27
+ default=None,
28
+ description="Temperature setting used for generation",
29
+ )
30
+ top_p: Optional[float] = Field(
31
+ default=None, description="Top-p setting used for generation"
32
+ )
33
+ max_tokens: Optional[int] = Field(
34
+ default=None,
35
+ description="Maximum number of tokens to generate",
36
+ )
37
+ frequency_penalty: Optional[float] = Field(
38
+ default=None,
39
+ description="Frequency penalty used for generation",
40
+ )
41
+ presence_penalty: Optional[float] = Field(
42
+ default=None,
43
+ description="Presence penalty used for generation",
44
+ )
45
+
46
+
47
+ class AgentCompletionResponse(BaseModel):
48
+ id: Optional[str] = Field(
49
+ default=None, description="Unique identifier for the response"
50
+ )
51
+ agent_name: Optional[str] = Field(
52
+ default=None,
53
+ description="Name of the agent that generated the response",
54
+ )
55
+ agent_description: Optional[str] = Field(
56
+ default=None, description="Description of the agent"
57
+ )
58
+ outputs: Optional[List[Any]] = Field(
59
+ default=None,
60
+ description="List of outputs generated by the agent",
61
+ )
62
+ usage: Optional[Usage] = Field(
63
+ default=None, description="Token usage statistics"
64
+ )
65
+ model_config: Optional[ModelConfig] = Field(
66
+ default=None, description="Model configuration"
67
+ )
68
+ timestamp: Optional[str] = Field(
69
+ default_factory=lambda: datetime.now().isoformat(),
70
+ description="Timestamp of when the response was generated",
71
+ )
@@ -0,0 +1,7 @@
1
+ from pydantic import BaseModel
2
+
3
+
4
+ class AgentRAGConfig(BaseModel):
5
+ """
6
+ Configuration for the AgentRAG class.
7
+ """
@@ -0,0 +1,9 @@
1
+ from typing import Optional
2
+ from pydantic import BaseModel, Field
3
+
4
+
5
+ class ConversationSchema(BaseModel):
6
+ time_enabled: Optional[bool] = Field(default=False)
7
+ message_id_on: Optional[bool] = Field(default=True)
8
+ autosave: Optional[bool] = Field(default=False)
9
+ count_tokens: Optional[bool] = Field(default=False)
@@ -1,91 +1,109 @@
1
1
  from pydantic import BaseModel, Field
2
- from typing import List, Optional, Union, Any, Literal
3
- from litellm.types import (
4
- ChatCompletionPredictionContentParam,
5
- )
2
+ from typing import Optional
6
3
 
4
+ # from litellm.types import (
5
+ # ChatCompletionPredictionContentParam,
6
+ # )
7
7
 
8
- class LLMCompletionRequest(BaseModel):
9
- """Schema for LLM completion request parameters."""
10
8
 
11
- model: Optional[str] = Field(
12
- default=None,
13
- description="The name of the language model to use for text completion",
14
- )
15
- temperature: Optional[float] = Field(
16
- default=0.5,
17
- description="Controls randomness of the output (0.0 to 1.0)",
18
- )
19
- top_p: Optional[float] = Field(
20
- default=None,
21
- description="Controls diversity via nucleus sampling",
22
- )
23
- n: Optional[int] = Field(
24
- default=None, description="Number of completions to generate"
25
- )
26
- stream: Optional[bool] = Field(
27
- default=None, description="Whether to stream the response"
28
- )
29
- stream_options: Optional[dict] = Field(
30
- default=None, description="Options for streaming response"
31
- )
32
- stop: Optional[Any] = Field(
33
- default=None,
34
- description="Up to 4 sequences where the API will stop generating",
35
- )
36
- max_completion_tokens: Optional[int] = Field(
37
- default=None,
38
- description="Maximum tokens for completion including reasoning",
39
- )
40
- max_tokens: Optional[int] = Field(
41
- default=None,
42
- description="Maximum tokens in generated completion",
43
- )
44
- prediction: Optional[ChatCompletionPredictionContentParam] = (
45
- Field(
46
- default=None,
47
- description="Configuration for predicted output",
48
- )
49
- )
50
- presence_penalty: Optional[float] = Field(
51
- default=None,
52
- description="Penalizes new tokens based on existence in text",
53
- )
54
- frequency_penalty: Optional[float] = Field(
55
- default=None,
56
- description="Penalizes new tokens based on frequency in text",
57
- )
58
- logit_bias: Optional[dict] = Field(
59
- default=None,
60
- description="Modifies probability of specific tokens",
61
- )
62
- reasoning_effort: Optional[Literal["low", "medium", "high"]] = (
63
- Field(
64
- default=None,
65
- description="Level of reasoning effort for the model",
66
- )
67
- )
68
- seed: Optional[int] = Field(
69
- default=None, description="Random seed for reproducibility"
70
- )
71
- tools: Optional[List] = Field(
72
- default=None,
73
- description="List of tools available to the model",
74
- )
75
- tool_choice: Optional[Union[str, dict]] = Field(
76
- default=None, description="Choice of tool to use"
77
- )
78
- logprobs: Optional[bool] = Field(
79
- default=None,
80
- description="Whether to return log probabilities",
81
- )
82
- top_logprobs: Optional[int] = Field(
9
+ # class LLMCompletionRequest(BaseModel):
10
+ # """Schema for LLM completion request parameters."""
11
+
12
+ # model: Optional[str] = Field(
13
+ # default=None,
14
+ # description="The name of the language model to use for text completion",
15
+ # )
16
+ # temperature: Optional[float] = Field(
17
+ # default=0.5,
18
+ # description="Controls randomness of the output (0.0 to 1.0)",
19
+ # )
20
+ # top_p: Optional[float] = Field(
21
+ # default=None,
22
+ # description="Controls diversity via nucleus sampling",
23
+ # )
24
+ # n: Optional[int] = Field(
25
+ # default=None, description="Number of completions to generate"
26
+ # )
27
+ # stream: Optional[bool] = Field(
28
+ # default=None, description="Whether to stream the response"
29
+ # )
30
+ # stream_options: Optional[dict] = Field(
31
+ # default=None, description="Options for streaming response"
32
+ # )
33
+ # stop: Optional[Any] = Field(
34
+ # default=None,
35
+ # description="Up to 4 sequences where the API will stop generating",
36
+ # )
37
+ # max_completion_tokens: Optional[int] = Field(
38
+ # default=None,
39
+ # description="Maximum tokens for completion including reasoning",
40
+ # )
41
+ # max_tokens: Optional[int] = Field(
42
+ # default=None,
43
+ # description="Maximum tokens in generated completion",
44
+ # )
45
+ # prediction: Optional[ChatCompletionPredictionContentParam] = (
46
+ # Field(
47
+ # default=None,
48
+ # description="Configuration for predicted output",
49
+ # )
50
+ # )
51
+ # presence_penalty: Optional[float] = Field(
52
+ # default=None,
53
+ # description="Penalizes new tokens based on existence in text",
54
+ # )
55
+ # frequency_penalty: Optional[float] = Field(
56
+ # default=None,
57
+ # description="Penalizes new tokens based on frequency in text",
58
+ # )
59
+ # logit_bias: Optional[dict] = Field(
60
+ # default=None,
61
+ # description="Modifies probability of specific tokens",
62
+ # )
63
+ # reasoning_effort: Optional[Literal["low", "medium", "high"]] = (
64
+ # Field(
65
+ # default=None,
66
+ # description="Level of reasoning effort for the model",
67
+ # )
68
+ # )
69
+ # seed: Optional[int] = Field(
70
+ # default=None, description="Random seed for reproducibility"
71
+ # )
72
+ # tools: Optional[List] = Field(
73
+ # default=None,
74
+ # description="List of tools available to the model",
75
+ # )
76
+ # tool_choice: Optional[Union[str, dict]] = Field(
77
+ # default=None, description="Choice of tool to use"
78
+ # )
79
+ # logprobs: Optional[bool] = Field(
80
+ # default=None,
81
+ # description="Whether to return log probabilities",
82
+ # )
83
+ # top_logprobs: Optional[int] = Field(
84
+ # default=None,
85
+ # description="Number of most likely tokens to return",
86
+ # )
87
+ # parallel_tool_calls: Optional[bool] = Field(
88
+ # default=None,
89
+ # description="Whether to allow parallel tool calls",
90
+ # )
91
+
92
+ # class Config:
93
+ # allow_arbitrary_types = True
94
+
95
+
96
+ class ModelConfigOrigin(BaseModel):
97
+ """Schema for model configuration origin."""
98
+
99
+ model_url: Optional[str] = Field(
83
100
  default=None,
84
- description="Number of most likely tokens to return",
101
+ description="The URL of the model to use for text completion",
85
102
  )
86
- parallel_tool_calls: Optional[bool] = Field(
103
+
104
+ api_key: Optional[str] = Field(
87
105
  default=None,
88
- description="Whether to allow parallel tool calls",
106
+ description="The API key to use for the model",
89
107
  )
90
108
 
91
109
  class Config:
@@ -0,0 +1,164 @@
1
+ from pydantic import BaseModel, Field
2
+ from typing import Optional, List, Dict, Any, Union, Literal
3
+
4
+ SwarmType = Literal[
5
+ "AgentRearrange",
6
+ "MixtureOfAgents",
7
+ "SpreadSheetSwarm",
8
+ "SequentialWorkflow",
9
+ "ConcurrentWorkflow",
10
+ "GroupChat",
11
+ "MultiAgentRouter",
12
+ "AutoSwarmBuilder",
13
+ "HiearchicalSwarm",
14
+ "auto",
15
+ "MajorityVoting",
16
+ "MALT",
17
+ "DeepResearchSwarm",
18
+ "CouncilAsAJudge",
19
+ "InteractiveGroupChat",
20
+ ]
21
+
22
+
23
+ class AgentSpec(BaseModel):
24
+ agent_name: Optional[str] = Field(
25
+ # default=None,
26
+ description="The unique name assigned to the agent, which identifies its role and functionality within the swarm.",
27
+ )
28
+ description: Optional[str] = Field(
29
+ default=None,
30
+ description="A detailed explanation of the agent's purpose, capabilities, and any specific tasks it is designed to perform.",
31
+ )
32
+ system_prompt: Optional[str] = Field(
33
+ default=None,
34
+ description="The initial instruction or context provided to the agent, guiding its behavior and responses during execution.",
35
+ )
36
+ model_name: Optional[str] = Field(
37
+ default="gpt-4o-mini",
38
+ description="The name of the AI model that the agent will utilize for processing tasks and generating outputs. For example: gpt-4o, gpt-4o-mini, openai/o3-mini",
39
+ )
40
+ auto_generate_prompt: Optional[bool] = Field(
41
+ default=False,
42
+ description="A flag indicating whether the agent should automatically create prompts based on the task requirements.",
43
+ )
44
+ max_tokens: Optional[int] = Field(
45
+ default=8192,
46
+ description="The maximum number of tokens that the agent is allowed to generate in its responses, limiting output length.",
47
+ )
48
+ temperature: Optional[float] = Field(
49
+ default=0.5,
50
+ description="A parameter that controls the randomness of the agent's output; lower values result in more deterministic responses.",
51
+ )
52
+ role: Optional[str] = Field(
53
+ default="worker",
54
+ description="The designated role of the agent within the swarm, which influences its behavior and interaction with other agents.",
55
+ )
56
+ max_loops: Optional[int] = Field(
57
+ default=1,
58
+ description="The maximum number of times the agent is allowed to repeat its task, enabling iterative processing if necessary.",
59
+ )
60
+ tools_list_dictionary: Optional[List[Dict[Any, Any]]] = Field(
61
+ default=None,
62
+ description="A dictionary of tools that the agent can use to complete its task.",
63
+ )
64
+ mcp_url: Optional[str] = Field(
65
+ default=None,
66
+ description="The URL of the MCP server that the agent can use to complete its task.",
67
+ )
68
+
69
+ class Config:
70
+ arbitrary_types_allowed = True
71
+
72
+
73
+ class AgentCompletion(BaseModel):
74
+ agent_config: Optional[AgentSpec] = Field(
75
+ None,
76
+ description="The configuration of the agent to be completed.",
77
+ )
78
+ task: Optional[str] = Field(
79
+ None, description="The task to be completed by the agent."
80
+ )
81
+ history: Optional[Union[Dict[Any, Any], List[Dict[str, str]]]] = (
82
+ Field(
83
+ default=None,
84
+ description="The history of the agent's previous tasks and responses. Can be either a dictionary or a list of message objects.",
85
+ )
86
+ )
87
+
88
+ model_config = {
89
+ "arbitrary_types_allowed": True,
90
+ "populate_by_name": True,
91
+ }
92
+
93
+
94
+ class Agents(BaseModel):
95
+ """Configuration for a collection of agents that work together as a swarm to accomplish tasks."""
96
+
97
+ agents: List[AgentSpec] = Field(
98
+ description="A list containing the specifications of each agent that will participate in the swarm, detailing their roles and functionalities."
99
+ )
100
+
101
+
102
+ class SwarmSpec(BaseModel):
103
+ name: Optional[str] = Field(
104
+ None,
105
+ description="The name of the swarm, which serves as an identifier for the group of agents and their collective task.",
106
+ max_length=100,
107
+ )
108
+ description: Optional[str] = Field(
109
+ None,
110
+ description="A comprehensive description of the swarm's objectives, capabilities, and intended outcomes.",
111
+ )
112
+ agents: Optional[List[AgentSpec]] = Field(
113
+ None,
114
+ description="A list of agents or specifications that define the agents participating in the swarm.",
115
+ )
116
+ max_loops: Optional[int] = Field(
117
+ default=1,
118
+ description="The maximum number of execution loops allowed for the swarm, enabling repeated processing if needed.",
119
+ )
120
+ swarm_type: Optional[SwarmType] = Field(
121
+ None,
122
+ description="The classification of the swarm, indicating its operational style and methodology.",
123
+ )
124
+ rearrange_flow: Optional[str] = Field(
125
+ None,
126
+ description="Instructions on how to rearrange the flow of tasks among agents, if applicable.",
127
+ )
128
+ task: Optional[str] = Field(
129
+ None,
130
+ description="The specific task or objective that the swarm is designed to accomplish.",
131
+ )
132
+ img: Optional[str] = Field(
133
+ None,
134
+ description="An optional image URL that may be associated with the swarm's task or representation.",
135
+ )
136
+ return_history: Optional[bool] = Field(
137
+ True,
138
+ description="A flag indicating whether the swarm should return its execution history along with the final output.",
139
+ )
140
+ rules: Optional[str] = Field(
141
+ None,
142
+ description="Guidelines or constraints that govern the behavior and interactions of the agents within the swarm.",
143
+ )
144
+ tasks: Optional[List[str]] = Field(
145
+ None,
146
+ description="A list of tasks that the swarm should complete.",
147
+ )
148
+ messages: Optional[
149
+ Union[List[Dict[Any, Any]], Dict[Any, Any]]
150
+ ] = Field(
151
+ None,
152
+ description="A list of messages that the swarm should complete.",
153
+ )
154
+ stream: Optional[bool] = Field(
155
+ False,
156
+ description="A flag indicating whether the swarm should stream its output.",
157
+ )
158
+ service_tier: Optional[str] = Field(
159
+ "standard",
160
+ description="The service tier to use for processing. Options: 'standard' (default) or 'flex' for lower cost but slower processing.",
161
+ )
162
+
163
+ class Config:
164
+ arbitrary_types_allowed = True
@@ -1,10 +1,13 @@
1
1
  from swarms.structs.agent import Agent
2
2
  from swarms.structs.agent_builder import AgentsBuilder
3
+ from swarms.structs.auto_swarm_builder import AutoSwarmBuilder
3
4
  from swarms.structs.base_structure import BaseStructure
4
5
  from swarms.structs.base_swarm import BaseSwarm
5
6
  from swarms.structs.base_workflow import BaseWorkflow
7
+ from swarms.structs.batch_agent_execution import batch_agent_execution
6
8
  from swarms.structs.concurrent_workflow import ConcurrentWorkflow
7
9
  from swarms.structs.conversation import Conversation
10
+ from swarms.structs.council_judge import CouncilAsAJudge
8
11
  from swarms.structs.de_hallucination_swarm import DeHallucinationSwarm
9
12
  from swarms.structs.deep_research_swarm import DeepResearchSwarm
10
13
  from swarms.structs.graph_workflow import (
@@ -20,6 +23,11 @@ from swarms.structs.groupchat import (
20
23
  from swarms.structs.hybrid_hiearchical_peer_swarm import (
21
24
  HybridHierarchicalClusterSwarm,
22
25
  )
26
+ from swarms.structs.ma_blocks import (
27
+ aggregate,
28
+ find_agent_by_name,
29
+ run_agent,
30
+ )
23
31
  from swarms.structs.majority_voting import (
24
32
  MajorityVoting,
25
33
  majority_voting,
@@ -32,8 +40,9 @@ from swarms.structs.meme_agent_persona_generator import (
32
40
  )
33
41
  from swarms.structs.mixture_of_agents import MixtureOfAgents
34
42
  from swarms.structs.model_router import ModelRouter
35
- from swarms.structs.multi_agent_collab import MultiAgentCollaboration
36
43
  from swarms.structs.multi_agent_exec import (
44
+ get_agents_info,
45
+ get_swarms_info,
37
46
  run_agent_with_timeout,
38
47
  run_agents_concurrently,
39
48
  run_agents_concurrently_async,
@@ -43,8 +52,6 @@ from swarms.structs.multi_agent_exec import (
43
52
  run_agents_with_resource_monitoring,
44
53
  run_agents_with_tasks_concurrently,
45
54
  run_single_agent,
46
- get_agents_info,
47
- get_swarms_info,
48
55
  )
49
56
  from swarms.structs.multi_agent_router import MultiAgentRouter
50
57
  from swarms.structs.rearrange import AgentRearrange, rearrange
@@ -55,7 +62,6 @@ from swarms.structs.swarm_arange import SwarmRearrange
55
62
  from swarms.structs.swarm_router import (
56
63
  SwarmRouter,
57
64
  SwarmType,
58
- swarm_router,
59
65
  )
60
66
  from swarms.structs.swarming_architectures import (
61
67
  broadcast,
@@ -77,9 +83,7 @@ from swarms.structs.swarming_architectures import (
77
83
  staircase_swarm,
78
84
  star_swarm,
79
85
  )
80
- from swarms.structs.auto_swarm_builder import AutoSwarmBuilder
81
- from swarms.structs.council_judge import CouncilAsAJudge
82
- from swarms.structs.batch_agent_execution import batch_agent_execution
86
+ from swarms.structs.interactive_groupchat import InteractiveGroupChat
83
87
 
84
88
  __all__ = [
85
89
  "Agent",
@@ -93,7 +97,6 @@ __all__ = [
93
97
  "majority_voting",
94
98
  "most_frequent",
95
99
  "parse_code_completion",
96
- "MultiAgentCollaboration",
97
100
  "AgentRearrange",
98
101
  "rearrange",
99
102
  "RoundRobinSwarm",
@@ -133,7 +136,6 @@ __all__ = [
133
136
  "run_agents_with_different_tasks",
134
137
  "run_agent_with_timeout",
135
138
  "run_agents_with_resource_monitoring",
136
- "swarm_router",
137
139
  "run_agents_with_tasks_concurrently",
138
140
  "GroupChat",
139
141
  "expertise_based",
@@ -150,4 +152,8 @@ __all__ = [
150
152
  "AutoSwarmBuilder",
151
153
  "CouncilAsAJudge",
152
154
  "batch_agent_execution",
155
+ "aggregate",
156
+ "find_agent_by_name",
157
+ "run_agent",
158
+ "InteractiveGroupChat",
153
159
  ]