fast-agent-mcp 0.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (100) hide show
  1. fast_agent_mcp-0.0.7.dist-info/METADATA +322 -0
  2. fast_agent_mcp-0.0.7.dist-info/RECORD +100 -0
  3. fast_agent_mcp-0.0.7.dist-info/WHEEL +4 -0
  4. fast_agent_mcp-0.0.7.dist-info/entry_points.txt +5 -0
  5. fast_agent_mcp-0.0.7.dist-info/licenses/LICENSE +201 -0
  6. mcp_agent/__init__.py +0 -0
  7. mcp_agent/agents/__init__.py +0 -0
  8. mcp_agent/agents/agent.py +277 -0
  9. mcp_agent/app.py +303 -0
  10. mcp_agent/cli/__init__.py +0 -0
  11. mcp_agent/cli/__main__.py +4 -0
  12. mcp_agent/cli/commands/bootstrap.py +221 -0
  13. mcp_agent/cli/commands/config.py +11 -0
  14. mcp_agent/cli/commands/setup.py +229 -0
  15. mcp_agent/cli/main.py +68 -0
  16. mcp_agent/cli/terminal.py +24 -0
  17. mcp_agent/config.py +334 -0
  18. mcp_agent/console.py +28 -0
  19. mcp_agent/context.py +251 -0
  20. mcp_agent/context_dependent.py +48 -0
  21. mcp_agent/core/fastagent.py +1013 -0
  22. mcp_agent/eval/__init__.py +0 -0
  23. mcp_agent/event_progress.py +88 -0
  24. mcp_agent/executor/__init__.py +0 -0
  25. mcp_agent/executor/decorator_registry.py +120 -0
  26. mcp_agent/executor/executor.py +293 -0
  27. mcp_agent/executor/task_registry.py +34 -0
  28. mcp_agent/executor/temporal.py +405 -0
  29. mcp_agent/executor/workflow.py +197 -0
  30. mcp_agent/executor/workflow_signal.py +325 -0
  31. mcp_agent/human_input/__init__.py +0 -0
  32. mcp_agent/human_input/handler.py +49 -0
  33. mcp_agent/human_input/types.py +58 -0
  34. mcp_agent/logging/__init__.py +0 -0
  35. mcp_agent/logging/events.py +123 -0
  36. mcp_agent/logging/json_serializer.py +163 -0
  37. mcp_agent/logging/listeners.py +216 -0
  38. mcp_agent/logging/logger.py +365 -0
  39. mcp_agent/logging/rich_progress.py +120 -0
  40. mcp_agent/logging/tracing.py +140 -0
  41. mcp_agent/logging/transport.py +461 -0
  42. mcp_agent/mcp/__init__.py +0 -0
  43. mcp_agent/mcp/gen_client.py +85 -0
  44. mcp_agent/mcp/mcp_activity.py +18 -0
  45. mcp_agent/mcp/mcp_agent_client_session.py +242 -0
  46. mcp_agent/mcp/mcp_agent_server.py +56 -0
  47. mcp_agent/mcp/mcp_aggregator.py +394 -0
  48. mcp_agent/mcp/mcp_connection_manager.py +330 -0
  49. mcp_agent/mcp/stdio.py +104 -0
  50. mcp_agent/mcp_server_registry.py +275 -0
  51. mcp_agent/progress_display.py +10 -0
  52. mcp_agent/resources/examples/decorator/main.py +26 -0
  53. mcp_agent/resources/examples/decorator/optimizer.py +78 -0
  54. mcp_agent/resources/examples/decorator/orchestrator.py +68 -0
  55. mcp_agent/resources/examples/decorator/parallel.py +81 -0
  56. mcp_agent/resources/examples/decorator/router.py +56 -0
  57. mcp_agent/resources/examples/decorator/tiny.py +22 -0
  58. mcp_agent/resources/examples/mcp_researcher/main-evalopt.py +53 -0
  59. mcp_agent/resources/examples/mcp_researcher/main.py +38 -0
  60. mcp_agent/telemetry/__init__.py +0 -0
  61. mcp_agent/telemetry/usage_tracking.py +18 -0
  62. mcp_agent/workflows/__init__.py +0 -0
  63. mcp_agent/workflows/embedding/__init__.py +0 -0
  64. mcp_agent/workflows/embedding/embedding_base.py +61 -0
  65. mcp_agent/workflows/embedding/embedding_cohere.py +49 -0
  66. mcp_agent/workflows/embedding/embedding_openai.py +46 -0
  67. mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
  68. mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +359 -0
  69. mcp_agent/workflows/intent_classifier/__init__.py +0 -0
  70. mcp_agent/workflows/intent_classifier/intent_classifier_base.py +120 -0
  71. mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +134 -0
  72. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +45 -0
  73. mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +45 -0
  74. mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +161 -0
  75. mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +60 -0
  76. mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +60 -0
  77. mcp_agent/workflows/llm/__init__.py +0 -0
  78. mcp_agent/workflows/llm/augmented_llm.py +645 -0
  79. mcp_agent/workflows/llm/augmented_llm_anthropic.py +539 -0
  80. mcp_agent/workflows/llm/augmented_llm_openai.py +615 -0
  81. mcp_agent/workflows/llm/llm_selector.py +345 -0
  82. mcp_agent/workflows/llm/model_factory.py +175 -0
  83. mcp_agent/workflows/orchestrator/__init__.py +0 -0
  84. mcp_agent/workflows/orchestrator/orchestrator.py +407 -0
  85. mcp_agent/workflows/orchestrator/orchestrator_models.py +154 -0
  86. mcp_agent/workflows/orchestrator/orchestrator_prompts.py +113 -0
  87. mcp_agent/workflows/parallel/__init__.py +0 -0
  88. mcp_agent/workflows/parallel/fan_in.py +350 -0
  89. mcp_agent/workflows/parallel/fan_out.py +187 -0
  90. mcp_agent/workflows/parallel/parallel_llm.py +141 -0
  91. mcp_agent/workflows/router/__init__.py +0 -0
  92. mcp_agent/workflows/router/router_base.py +276 -0
  93. mcp_agent/workflows/router/router_embedding.py +240 -0
  94. mcp_agent/workflows/router/router_embedding_cohere.py +59 -0
  95. mcp_agent/workflows/router/router_embedding_openai.py +59 -0
  96. mcp_agent/workflows/router/router_llm.py +301 -0
  97. mcp_agent/workflows/swarm/__init__.py +0 -0
  98. mcp_agent/workflows/swarm/swarm.py +320 -0
  99. mcp_agent/workflows/swarm/swarm_anthropic.py +42 -0
  100. mcp_agent/workflows/swarm/swarm_openai.py +41 -0
@@ -0,0 +1,301 @@
1
+ from typing import Callable, List, Literal, Optional, TYPE_CHECKING
2
+
3
+ from pydantic import BaseModel
4
+
5
+ from mcp_agent.agents.agent import Agent
6
+ from mcp_agent.workflows.llm.augmented_llm import AugmentedLLM, RequestParams
7
+ from mcp_agent.workflows.router.router_base import ResultT, Router, RouterResult
8
+ from mcp_agent.logging.logger import get_logger
9
+
10
+ if TYPE_CHECKING:
11
+ from mcp_agent.context import Context
12
+
13
+ logger = get_logger(__name__)
14
+
15
+
16
+ DEFAULT_ROUTING_INSTRUCTION = """
17
+ You are a highly accurate request router that directs incoming requests to the most appropriate category.
18
+ A category is a specialized destination, such as a Function, an MCP Server (a collection of tools/functions), or an Agent (a collection of servers).
19
+ Below are the available routing categories, each with their capabilities and descriptions:
20
+
21
+ {context}
22
+
23
+ Your task is to analyze the following request and determine the most appropriate categories from the options above. Consider:
24
+ - The specific capabilities and tools each destination offers
25
+ - How well the request matches the category's description
26
+ - Whether the request might benefit from multiple categories (up to {top_k})
27
+
28
+ Request: {request}
29
+
30
+ Respond in JSON format:
31
+ {{
32
+ "categories": [
33
+ {{
34
+ "category": <category name>,
35
+ "confidence": <high, medium or low>,
36
+ "reasoning": <brief explanation>
37
+ }}
38
+ ]
39
+ }}
40
+
41
+ Only include categories that are truly relevant. You may return fewer than {top_k} if appropriate.
42
+ If none of the categories are relevant, return an empty list.
43
+ """
44
+
45
+ ROUTING_SYSTEM_INSTRUCTION = """
46
+ You are a highly accurate request router that directs incoming requests to the most appropriate category.
47
+ A category is a specialized destination, such as a Function, an MCP Server (a collection of tools/functions), or an Agent (a collection of servers).
48
+ You will be provided with a request and a list of categories to choose from.
49
+ You can choose one or more categories, or choose none if no category is appropriate.
50
+ """
51
+
52
+
53
+ class LLMRouterResult(RouterResult[ResultT]):
54
+ """A class that represents the result of an LLMRouter.route request"""
55
+
56
+ confidence: Literal["high", "medium", "low"]
57
+ """The confidence level of the routing decision."""
58
+
59
+ reasoning: str | None = None
60
+ """
61
+ A brief explanation of the routing decision.
62
+ This is optional and may only be provided if the router is an LLM
63
+ """
64
+
65
+
66
+ class StructuredResponseCategory(BaseModel):
67
+ """A class that represents a single category returned by an LLM router"""
68
+
69
+ category: str
70
+ """The name of the category (i.e. MCP server, Agent or function) to route the input to."""
71
+
72
+ confidence: Literal["high", "medium", "low"]
73
+ """The confidence level of the routing decision."""
74
+
75
+ reasoning: str | None = None
76
+ """A brief explanation of the routing decision."""
77
+
78
+
79
+ class StructuredResponse(BaseModel):
80
+ """A class that represents the structured response of an LLM router"""
81
+
82
+ categories: List[StructuredResponseCategory]
83
+ """A list of categories to route the input to."""
84
+
85
+
86
+ class LLMRouter(Router):
87
+ """
88
+ A router that uses an LLM to route an input to a specific category.
89
+ """
90
+
91
+ def __init__(
92
+ self,
93
+ llm_factory: Callable[..., AugmentedLLM],
94
+ name: str = "LLM Router",
95
+ server_names: List[str] | None = None,
96
+ agents: List[Agent] | None = None,
97
+ functions: List[Callable] | None = None,
98
+ routing_instruction: str | None = None,
99
+ context: Optional["Context"] = None,
100
+ default_request_params: Optional[RequestParams] = None,
101
+ **kwargs,
102
+ ):
103
+ super().__init__(
104
+ server_names=server_names,
105
+ agents=agents,
106
+ functions=functions,
107
+ routing_instruction=routing_instruction,
108
+ context=context,
109
+ **kwargs,
110
+ )
111
+
112
+ self.name = name
113
+ self.llm_factory = llm_factory
114
+ self.default_request_params = default_request_params or RequestParams()
115
+ self.llm = None # Will be initialized in create()
116
+
117
+ @classmethod
118
+ async def create(
119
+ cls,
120
+ llm_factory: Callable[..., AugmentedLLM],
121
+ name: str = "LLM Router",
122
+ server_names: List[str] | None = None,
123
+ agents: List[Agent] | None = None,
124
+ functions: List[Callable] | None = None,
125
+ routing_instruction: str | None = None,
126
+ context: Optional["Context"] = None,
127
+ default_request_params: Optional[RequestParams] = None,
128
+ ) -> "LLMRouter":
129
+ """
130
+ Factory method to create and initialize a router.
131
+ Use this instead of constructor since we need async initialization.
132
+ """
133
+ instance = cls(
134
+ llm_factory=llm_factory,
135
+ name=name,
136
+ server_names=server_names,
137
+ agents=agents,
138
+ functions=functions,
139
+ routing_instruction=DEFAULT_ROUTING_INSTRUCTION,
140
+ context=context,
141
+ default_request_params=default_request_params,
142
+ )
143
+ await instance.initialize()
144
+ return instance
145
+
146
+ async def initialize(self):
147
+ """Initialize the router and create the LLM instance."""
148
+ if not self.initialized:
149
+ await super().initialize()
150
+ router_params = RequestParams(
151
+ systemPrompt=ROUTING_SYSTEM_INSTRUCTION,
152
+ use_history=False, # Router should be stateless :)
153
+ )
154
+
155
+ # Merge with any provided default params
156
+ if self.default_request_params:
157
+ params_dict = router_params.model_dump()
158
+ params_dict.update(
159
+ self.default_request_params.model_dump(exclude_unset=True)
160
+ )
161
+ router_params = RequestParams(**params_dict)
162
+ # Set up router-specific request params with routing instruction
163
+ router_params.use_history = False
164
+ self.llm = self.llm_factory(
165
+ agent=None, # Router doesn't need an agent context
166
+ name="LLM Router",
167
+ default_request_params=router_params,
168
+ )
169
+ self.initialized = True
170
+
171
+ async def route(
172
+ self, request: str, top_k: int = 1
173
+ ) -> List[LLMRouterResult[str | Agent | Callable]]:
174
+ if not self.initialized:
175
+ await self.initialize()
176
+
177
+ return await self._route_with_llm(request, top_k)
178
+
179
+ async def route_to_server(
180
+ self, request: str, top_k: int = 1
181
+ ) -> List[LLMRouterResult[str]]:
182
+ if not self.initialized:
183
+ await self.initialize()
184
+
185
+ return await self._route_with_llm(
186
+ request,
187
+ top_k,
188
+ include_servers=True,
189
+ include_agents=False,
190
+ include_functions=False,
191
+ )
192
+
193
+ async def route_to_agent(
194
+ self, request: str, top_k: int = 1
195
+ ) -> List[LLMRouterResult[Agent]]:
196
+ if not self.initialized:
197
+ await self.initialize()
198
+
199
+ return await self._route_with_llm(
200
+ request,
201
+ top_k,
202
+ include_servers=False,
203
+ include_agents=True,
204
+ include_functions=False,
205
+ )
206
+
207
+ async def route_to_function(
208
+ self, request: str, top_k: int = 1
209
+ ) -> List[LLMRouterResult[Callable]]:
210
+ if not self.initialized:
211
+ await self.initialize()
212
+
213
+ return await self._route_with_llm(
214
+ request,
215
+ top_k,
216
+ include_servers=False,
217
+ include_agents=False,
218
+ include_functions=True,
219
+ )
220
+
221
+ async def _route_with_llm(
222
+ self,
223
+ request: str,
224
+ top_k: int = 1,
225
+ include_servers: bool = True,
226
+ include_agents: bool = True,
227
+ include_functions: bool = True,
228
+ ) -> List[LLMRouterResult]:
229
+ if not self.initialized:
230
+ await self.initialize()
231
+
232
+ routing_instruction = self.routing_instruction or DEFAULT_ROUTING_INSTRUCTION
233
+
234
+ # Generate the categories context
235
+ context = self._generate_context(
236
+ include_servers=include_servers,
237
+ include_agents=include_agents,
238
+ include_functions=include_functions,
239
+ )
240
+
241
+ # Format the prompt with all the necessary information
242
+ prompt = routing_instruction.format(
243
+ context=context, request=request, top_k=top_k
244
+ )
245
+
246
+ # Get routes from LLM
247
+ response = await self.llm.generate_structured(
248
+ message=prompt,
249
+ response_model=StructuredResponse,
250
+ )
251
+
252
+ # Construct the result
253
+ if not response or not response.categories:
254
+ return []
255
+
256
+ result: List[LLMRouterResult] = []
257
+ for r in response.categories:
258
+ router_category = self.categories.get(r.category)
259
+ if not router_category:
260
+ # Skip invalid categories
261
+ # TODO: log or raise an error
262
+ continue
263
+
264
+ result.append(
265
+ LLMRouterResult(
266
+ result=router_category.category,
267
+ confidence=r.confidence,
268
+ reasoning=r.reasoning,
269
+ )
270
+ )
271
+
272
+ return result[:top_k]
273
+
274
+ def _generate_context(
275
+ self,
276
+ include_servers: bool = True,
277
+ include_agents: bool = True,
278
+ include_functions: bool = True,
279
+ ) -> str:
280
+ """Generate a formatted context list of categories."""
281
+
282
+ context_list = []
283
+ idx = 1
284
+
285
+ # Format all categories
286
+ if include_servers:
287
+ for category in self.server_categories.values():
288
+ context_list.append(self.format_category(category, idx))
289
+ idx += 1
290
+
291
+ if include_agents:
292
+ for category in self.agent_categories.values():
293
+ context_list.append(self.format_category(category, idx))
294
+ idx += 1
295
+
296
+ if include_functions:
297
+ for category in self.function_categories.values():
298
+ context_list.append(self.format_category(category, idx))
299
+ idx += 1
300
+
301
+ return "\n\n".join(context_list)
File without changes
@@ -0,0 +1,320 @@
1
+ from typing import Callable, Dict, Generic, List, Optional, TYPE_CHECKING
2
+ from collections import defaultdict
3
+
4
+ from pydantic import AnyUrl, BaseModel, ConfigDict
5
+ from mcp.types import (
6
+ CallToolRequest,
7
+ EmbeddedResource,
8
+ CallToolResult,
9
+ TextContent,
10
+ TextResourceContents,
11
+ Tool,
12
+ )
13
+
14
+ from mcp_agent.agents.agent import Agent
15
+ from mcp_agent.human_input.types import HumanInputCallback
16
+ from mcp_agent.workflows.llm.augmented_llm import (
17
+ AugmentedLLM,
18
+ MessageParamT,
19
+ MessageT,
20
+ )
21
+ from mcp_agent.logging.logger import get_logger
22
+
23
+ if TYPE_CHECKING:
24
+ from mcp_agent.context import Context
25
+
26
+ logger = get_logger(__name__)
27
+
28
+
29
+ class AgentResource(EmbeddedResource):
30
+ """
31
+ A resource that returns an agent. Meant for use with tool calls that want to return an Agent for further processing.
32
+ """
33
+
34
+ agent: Optional["Agent"] = None
35
+
36
+ model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)
37
+
38
+
39
+ class AgentFunctionResultResource(EmbeddedResource):
40
+ """
41
+ A resource that returns an AgentFunctionResult.
42
+ Meant for use with tool calls that return an AgentFunctionResult for further processing.
43
+ """
44
+
45
+ result: "AgentFunctionResult"
46
+
47
+ model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)
48
+
49
+
50
+ def create_agent_resource(agent: "Agent") -> AgentResource:
51
+ return AgentResource(
52
+ type="resource",
53
+ agent=agent,
54
+ resource=TextResourceContents(
55
+ text=f"You are now Agent '{agent.name}'. Please review the messages and continue execution",
56
+ uri=AnyUrl("http://fake.url"), # Required property but not needed
57
+ ),
58
+ )
59
+
60
+
61
+ def create_agent_function_result_resource(
62
+ result: "AgentFunctionResult",
63
+ ) -> AgentFunctionResultResource:
64
+ return AgentFunctionResultResource(
65
+ type="resource",
66
+ result=result,
67
+ resource=TextResourceContents(
68
+ text=result.value or result.agent.name or "AgentFunctionResult",
69
+ uri=AnyUrl("http://fake.url"), # Required property but not needed
70
+ ),
71
+ )
72
+
73
+
74
+ class SwarmAgent(Agent):
75
+ """
76
+ A SwarmAgent is an Agent that can spawn other agents and interactively resolve a task.
77
+ Based on OpenAI Swarm: https://github.com/openai/swarm.
78
+
79
+ SwarmAgents have access to tools available on the servers they are connected to, but additionally
80
+ have a list of (possibly local) functions that can be called as tools.
81
+ """
82
+
83
+ def __init__(
84
+ self,
85
+ name: str,
86
+ instruction: str | Callable[[Dict], str] = "You are a helpful agent.",
87
+ server_names: list[str] = None,
88
+ functions: List["AgentFunctionCallable"] = None,
89
+ parallel_tool_calls: bool = True,
90
+ human_input_callback: HumanInputCallback = None,
91
+ context: Optional["Context"] = None,
92
+ **kwargs,
93
+ ):
94
+ super().__init__(
95
+ name=name,
96
+ instruction=instruction,
97
+ server_names=server_names,
98
+ functions=functions,
99
+ # TODO: saqadri - figure out if Swarm can maintain connection persistence
100
+ # It's difficult because we don't know when the agent will be done with its task
101
+ connection_persistence=False,
102
+ human_input_callback=human_input_callback,
103
+ context=context,
104
+ **kwargs,
105
+ )
106
+ self.parallel_tool_calls = parallel_tool_calls
107
+
108
+ async def call_tool(
109
+ self, name: str, arguments: dict | None = None
110
+ ) -> CallToolResult:
111
+ if not self.initialized:
112
+ await self.initialize()
113
+
114
+ if name in self._function_tool_map:
115
+ tool = self._function_tool_map[name]
116
+ result = await tool.run(arguments)
117
+
118
+ logger.debug(f"Function tool {name} result:", data=result)
119
+
120
+ if isinstance(result, Agent) or isinstance(result, SwarmAgent):
121
+ resource = create_agent_resource(result)
122
+ return CallToolResult(content=[resource])
123
+ elif isinstance(result, AgentFunctionResult):
124
+ resource = create_agent_function_result_resource(result)
125
+ return CallToolResult(content=[resource])
126
+ elif isinstance(result, str):
127
+ # TODO: saqadri - this is likely meant for returning context variables
128
+ return CallToolResult(content=[TextContent(type="text", text=result)])
129
+ elif isinstance(result, dict):
130
+ return CallToolResult(
131
+ content=[TextContent(type="text", text=str(result))]
132
+ )
133
+ else:
134
+ logger.warning(f"Unknown result type: {result}, returning as text.")
135
+ return CallToolResult(
136
+ content=[TextContent(type="text", text=str(result))]
137
+ )
138
+
139
+ return await super().call_tool(name, arguments)
140
+
141
+
142
+ class AgentFunctionResult(BaseModel):
143
+ """
144
+ Encapsulates the possible return values for a Swarm agent function.
145
+
146
+ Attributes:
147
+ value (str): The result value as a string.
148
+ agent (Agent): The agent instance, if applicable.
149
+ context_variables (dict): A dictionary of context variables.
150
+ """
151
+
152
+ value: str = ""
153
+ agent: Agent | None = None
154
+ context_variables: dict = {}
155
+
156
+ model_config = ConfigDict(extra="allow", arbitrary_types_allowed=True)
157
+
158
+
159
+ AgentFunctionReturnType = str | Agent | dict | AgentFunctionResult
160
+ """A type alias for the return type of a Swarm agent function."""
161
+
162
+ AgentFunctionCallable = Callable[[], AgentFunctionReturnType]
163
+
164
+
165
+ async def create_transfer_to_agent_tool(
166
+ agent: "Agent", agent_function: Callable[[], None]
167
+ ) -> Tool:
168
+ return Tool(
169
+ name="transfer_to_agent",
170
+ description="Transfer control to the agent",
171
+ agent_resource=create_agent_resource(agent),
172
+ agent_function=agent_function,
173
+ )
174
+
175
+
176
+ async def create_agent_function_tool(agent_function: "AgentFunctionCallable") -> Tool:
177
+ return Tool(
178
+ name="agent_function",
179
+ description="Agent function",
180
+ agent_resource=None,
181
+ agent_function=agent_function,
182
+ )
183
+
184
+
185
+ class Swarm(AugmentedLLM[MessageParamT, MessageT], Generic[MessageParamT, MessageT]):
186
+ """
187
+ Handles orchestrating agents that can use tools via MCP servers.
188
+
189
+ MCP version of the OpenAI Swarm class (https://github.com/openai/swarm.)
190
+ """
191
+
192
+ # TODO: saqadri - streaming isn't supported yet because the underlying AugmentedLLM classes don't support it
193
+ def __init__(self, agent: SwarmAgent, context_variables: Dict[str, str] = None):
194
+ """
195
+ Initialize the LLM planner with an agent, which will be used as the
196
+ starting point for the workflow.
197
+ """
198
+ super().__init__(agent=agent)
199
+ self.context_variables = defaultdict(str, context_variables or {})
200
+ self.instruction = (
201
+ agent.instruction(self.context_variables)
202
+ if isinstance(agent.instruction, Callable)
203
+ else agent.instruction
204
+ )
205
+ logger.debug(
206
+ f"Swarm initialized with agent {agent.name}",
207
+ data={
208
+ "context_variables": self.context_variables,
209
+ "instruction": self.instruction,
210
+ },
211
+ )
212
+
213
+ async def get_tool(self, tool_name: str) -> Tool | None:
214
+ """Get the schema for a tool by name."""
215
+ result = await self.aggregator.list_tools()
216
+ for tool in result.tools:
217
+ if tool.name == tool_name:
218
+ return tool
219
+
220
+ return None
221
+
222
+ async def pre_tool_call(
223
+ self, tool_call_id: str | None, request: CallToolRequest
224
+ ) -> CallToolRequest | bool:
225
+ if not self.aggregator:
226
+ # If there are no agents, we can't do anything, so we should bail
227
+ return False
228
+
229
+ tool = await self.get_tool(request.params.name)
230
+ if not tool:
231
+ logger.warning(
232
+ f"Warning: Tool '{request.params.name}' not found in agent '{self.aggregator.name}' tools. Proceeding with original request params."
233
+ )
234
+ return request
235
+
236
+ # If the tool has a "context_variables" parameter, we set it to our context variables state
237
+ if "context_variables" in tool.inputSchema:
238
+ logger.debug(
239
+ f"Setting context variables on tool_call '{request.params.name}'",
240
+ data=self.context_variables,
241
+ )
242
+ request.params.arguments["context_variables"] = self.context_variables
243
+
244
+ return request
245
+
246
+ async def post_tool_call(
247
+ self, tool_call_id: str | None, request: CallToolRequest, result: CallToolResult
248
+ ) -> CallToolResult:
249
+ contents = []
250
+ for content in result.content:
251
+ if isinstance(content, AgentResource):
252
+ # Set the new agent as the current agent
253
+ await self.set_agent(content.agent)
254
+ contents.append(TextContent(type="text", text=content.resource.text))
255
+ elif isinstance(content, AgentFunctionResult):
256
+ logger.info(
257
+ "Updating context variables with new context variables from agent function result",
258
+ data=content.context_variables,
259
+ )
260
+ self.context_variables.update(content.context_variables)
261
+ if content.agent:
262
+ # Set the new agent as the current agent
263
+ self.set_agent(content.agent)
264
+
265
+ contents.append(TextContent(type="text", text=content.resource.text))
266
+ else:
267
+ contents.append(content)
268
+
269
+ result.content = contents
270
+ return result
271
+
272
+ async def set_agent(
273
+ self,
274
+ agent: SwarmAgent,
275
+ ):
276
+ logger.info(
277
+ f"Switching from agent '{self.aggregator.name}' -> agent '{agent.name if agent else 'NULL'}'"
278
+ )
279
+ if self.aggregator:
280
+ # Close the current agent
281
+ await self.aggregator.shutdown()
282
+
283
+ # Initialize the new agent (if it's not None)
284
+ self.aggregator = agent
285
+
286
+ if not self.aggregator or isinstance(self.aggregator, DoneAgent):
287
+ self.instruction = None
288
+ return
289
+
290
+ await self.aggregator.initialize()
291
+ self.instruction = (
292
+ agent.instruction(self.context_variables)
293
+ if callable(agent.instruction)
294
+ else agent.instruction
295
+ )
296
+
297
+ def should_continue(self) -> bool:
298
+ """
299
+ Returns True if the workflow should continue, False otherwise.
300
+ """
301
+ if not self.aggregator or isinstance(self.aggregator, DoneAgent):
302
+ return False
303
+
304
+ return True
305
+
306
+
307
+ class DoneAgent(SwarmAgent):
308
+ """
309
+ A special agent that represents the end of a Swarm workflow.
310
+ """
311
+
312
+ def __init__(self):
313
+ super().__init__(name="__done__", instruction="Swarm Workflow is complete.")
314
+
315
+ async def call_tool(
316
+ self, _name: str, _arguments: dict | None = None
317
+ ) -> CallToolResult:
318
+ return CallToolResult(
319
+ content=[TextContent(type="text", text="Workflow is complete.")]
320
+ )
@@ -0,0 +1,42 @@
1
+ from mcp_agent.workflows.swarm.swarm import Swarm
2
+ from mcp_agent.workflows.llm.augmented_llm import RequestParams
3
+ from mcp_agent.workflows.llm.augmented_llm_anthropic import AnthropicAugmentedLLM
4
+ from mcp_agent.logging.logger import get_logger
5
+
6
+ logger = get_logger(__name__)
7
+
8
+
9
+ class AnthropicSwarm(Swarm, AnthropicAugmentedLLM):
10
+ """
11
+ MCP version of the OpenAI Swarm class (https://github.com/openai/swarm.),
12
+ using Anthropic's API as the LLM.
13
+ """
14
+
15
+ async def generate(self, message, request_params: RequestParams | None = None):
16
+ params = self.get_request_params(
17
+ request_params,
18
+ default=RequestParams(
19
+ model="claude-3-5-sonnet-20241022",
20
+ maxTokens=8192,
21
+ parallel_tool_calls=False,
22
+ ),
23
+ )
24
+ iterations = 0
25
+ response = None
26
+ agent_name = str(self.aggregator.name) if self.aggregator else None
27
+
28
+ while iterations < params.max_iterations and self.should_continue():
29
+ response = await super().generate(
30
+ message=message
31
+ if iterations == 0
32
+ else "Please resolve my original request. If it has already been resolved then end turn",
33
+ request_params=params.model_copy(
34
+ update={"max_iterations": 1}
35
+ ), # TODO: saqadri - validate
36
+ )
37
+ logger.debug(f"Agent: {agent_name}, response:", data=response)
38
+ agent_name = self.aggregator.name if self.aggregator else None
39
+ iterations += 1
40
+
41
+ # Return final response back
42
+ return response