fast-agent-mcp 0.0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fast-agent-mcp might be problematic. Click here for more details.
- fast_agent_mcp-0.0.7.dist-info/METADATA +322 -0
- fast_agent_mcp-0.0.7.dist-info/RECORD +100 -0
- fast_agent_mcp-0.0.7.dist-info/WHEEL +4 -0
- fast_agent_mcp-0.0.7.dist-info/entry_points.txt +5 -0
- fast_agent_mcp-0.0.7.dist-info/licenses/LICENSE +201 -0
- mcp_agent/__init__.py +0 -0
- mcp_agent/agents/__init__.py +0 -0
- mcp_agent/agents/agent.py +277 -0
- mcp_agent/app.py +303 -0
- mcp_agent/cli/__init__.py +0 -0
- mcp_agent/cli/__main__.py +4 -0
- mcp_agent/cli/commands/bootstrap.py +221 -0
- mcp_agent/cli/commands/config.py +11 -0
- mcp_agent/cli/commands/setup.py +229 -0
- mcp_agent/cli/main.py +68 -0
- mcp_agent/cli/terminal.py +24 -0
- mcp_agent/config.py +334 -0
- mcp_agent/console.py +28 -0
- mcp_agent/context.py +251 -0
- mcp_agent/context_dependent.py +48 -0
- mcp_agent/core/fastagent.py +1013 -0
- mcp_agent/eval/__init__.py +0 -0
- mcp_agent/event_progress.py +88 -0
- mcp_agent/executor/__init__.py +0 -0
- mcp_agent/executor/decorator_registry.py +120 -0
- mcp_agent/executor/executor.py +293 -0
- mcp_agent/executor/task_registry.py +34 -0
- mcp_agent/executor/temporal.py +405 -0
- mcp_agent/executor/workflow.py +197 -0
- mcp_agent/executor/workflow_signal.py +325 -0
- mcp_agent/human_input/__init__.py +0 -0
- mcp_agent/human_input/handler.py +49 -0
- mcp_agent/human_input/types.py +58 -0
- mcp_agent/logging/__init__.py +0 -0
- mcp_agent/logging/events.py +123 -0
- mcp_agent/logging/json_serializer.py +163 -0
- mcp_agent/logging/listeners.py +216 -0
- mcp_agent/logging/logger.py +365 -0
- mcp_agent/logging/rich_progress.py +120 -0
- mcp_agent/logging/tracing.py +140 -0
- mcp_agent/logging/transport.py +461 -0
- mcp_agent/mcp/__init__.py +0 -0
- mcp_agent/mcp/gen_client.py +85 -0
- mcp_agent/mcp/mcp_activity.py +18 -0
- mcp_agent/mcp/mcp_agent_client_session.py +242 -0
- mcp_agent/mcp/mcp_agent_server.py +56 -0
- mcp_agent/mcp/mcp_aggregator.py +394 -0
- mcp_agent/mcp/mcp_connection_manager.py +330 -0
- mcp_agent/mcp/stdio.py +104 -0
- mcp_agent/mcp_server_registry.py +275 -0
- mcp_agent/progress_display.py +10 -0
- mcp_agent/resources/examples/decorator/main.py +26 -0
- mcp_agent/resources/examples/decorator/optimizer.py +78 -0
- mcp_agent/resources/examples/decorator/orchestrator.py +68 -0
- mcp_agent/resources/examples/decorator/parallel.py +81 -0
- mcp_agent/resources/examples/decorator/router.py +56 -0
- mcp_agent/resources/examples/decorator/tiny.py +22 -0
- mcp_agent/resources/examples/mcp_researcher/main-evalopt.py +53 -0
- mcp_agent/resources/examples/mcp_researcher/main.py +38 -0
- mcp_agent/telemetry/__init__.py +0 -0
- mcp_agent/telemetry/usage_tracking.py +18 -0
- mcp_agent/workflows/__init__.py +0 -0
- mcp_agent/workflows/embedding/__init__.py +0 -0
- mcp_agent/workflows/embedding/embedding_base.py +61 -0
- mcp_agent/workflows/embedding/embedding_cohere.py +49 -0
- mcp_agent/workflows/embedding/embedding_openai.py +46 -0
- mcp_agent/workflows/evaluator_optimizer/__init__.py +0 -0
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +359 -0
- mcp_agent/workflows/intent_classifier/__init__.py +0 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_base.py +120 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py +134 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_cohere.py +45 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_embedding_openai.py +45 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_llm.py +161 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py +60 -0
- mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py +60 -0
- mcp_agent/workflows/llm/__init__.py +0 -0
- mcp_agent/workflows/llm/augmented_llm.py +645 -0
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +539 -0
- mcp_agent/workflows/llm/augmented_llm_openai.py +615 -0
- mcp_agent/workflows/llm/llm_selector.py +345 -0
- mcp_agent/workflows/llm/model_factory.py +175 -0
- mcp_agent/workflows/orchestrator/__init__.py +0 -0
- mcp_agent/workflows/orchestrator/orchestrator.py +407 -0
- mcp_agent/workflows/orchestrator/orchestrator_models.py +154 -0
- mcp_agent/workflows/orchestrator/orchestrator_prompts.py +113 -0
- mcp_agent/workflows/parallel/__init__.py +0 -0
- mcp_agent/workflows/parallel/fan_in.py +350 -0
- mcp_agent/workflows/parallel/fan_out.py +187 -0
- mcp_agent/workflows/parallel/parallel_llm.py +141 -0
- mcp_agent/workflows/router/__init__.py +0 -0
- mcp_agent/workflows/router/router_base.py +276 -0
- mcp_agent/workflows/router/router_embedding.py +240 -0
- mcp_agent/workflows/router/router_embedding_cohere.py +59 -0
- mcp_agent/workflows/router/router_embedding_openai.py +59 -0
- mcp_agent/workflows/router/router_llm.py +301 -0
- mcp_agent/workflows/swarm/__init__.py +0 -0
- mcp_agent/workflows/swarm/swarm.py +320 -0
- mcp_agent/workflows/swarm/swarm_anthropic.py +42 -0
- mcp_agent/workflows/swarm/swarm_openai.py +41 -0
|
@@ -0,0 +1,277 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import uuid
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from typing import Callable, Dict, List, Optional, TypeVar, Union, TYPE_CHECKING
|
|
5
|
+
|
|
6
|
+
from mcp.server.fastmcp.tools import Tool as FastTool
|
|
7
|
+
from mcp.types import (
|
|
8
|
+
CallToolResult,
|
|
9
|
+
ListToolsResult,
|
|
10
|
+
TextContent,
|
|
11
|
+
Tool,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
from mcp_agent.mcp.mcp_aggregator import MCPAggregator
|
|
15
|
+
from mcp_agent.workflows.llm.augmented_llm import RequestParams
|
|
16
|
+
from mcp_agent.human_input.types import (
|
|
17
|
+
HumanInputCallback,
|
|
18
|
+
HumanInputRequest,
|
|
19
|
+
HumanInputResponse,
|
|
20
|
+
HUMAN_INPUT_SIGNAL_NAME,
|
|
21
|
+
)
|
|
22
|
+
from mcp_agent.workflows.llm.augmented_llm import AugmentedLLM
|
|
23
|
+
from mcp_agent.logging.logger import get_logger
|
|
24
|
+
|
|
25
|
+
if TYPE_CHECKING:
|
|
26
|
+
from mcp_agent.context import Context
|
|
27
|
+
|
|
28
|
+
logger = get_logger(__name__)
|
|
29
|
+
|
|
30
|
+
# Define a TypeVar for AugmentedLLM and its subclasses
|
|
31
|
+
LLM = TypeVar("LLM", bound=AugmentedLLM)
|
|
32
|
+
|
|
33
|
+
HUMAN_INPUT_TOOL_NAME = "__human_input__"
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
@dataclass
|
|
37
|
+
class AgentConfig:
|
|
38
|
+
"""Configuration for an Agent instance"""
|
|
39
|
+
|
|
40
|
+
name: str
|
|
41
|
+
instruction: Union[str, Callable[[Dict], str]]
|
|
42
|
+
servers: List[str]
|
|
43
|
+
model: Optional[str] = None
|
|
44
|
+
use_history: bool = True
|
|
45
|
+
default_request_params: Optional[RequestParams] = None
|
|
46
|
+
|
|
47
|
+
def __post_init__(self):
|
|
48
|
+
"""Ensure default_request_params exists with proper history setting"""
|
|
49
|
+
|
|
50
|
+
if self.default_request_params is None:
|
|
51
|
+
self.default_request_params = RequestParams(use_history=self.use_history)
|
|
52
|
+
else:
|
|
53
|
+
# Override the request params history setting if explicitly configured
|
|
54
|
+
self.default_request_params.use_history = self.use_history
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class Agent(MCPAggregator):
|
|
58
|
+
"""
|
|
59
|
+
An Agent is an entity that has access to a set of MCP servers and can interact with them.
|
|
60
|
+
Each agent should have a purpose defined by its instruction.
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
def __init__(
|
|
64
|
+
self,
|
|
65
|
+
config: Union[
|
|
66
|
+
AgentConfig, str
|
|
67
|
+
], # Can be AgentConfig or backward compatible str name
|
|
68
|
+
instruction: Optional[Union[str, Callable[[Dict], str]]] = None,
|
|
69
|
+
server_names: Optional[List[str]] = None,
|
|
70
|
+
functions: Optional[List[Callable]] = None,
|
|
71
|
+
connection_persistence: bool = True,
|
|
72
|
+
human_input_callback: Optional[HumanInputCallback] = None,
|
|
73
|
+
context: Optional["Context"] = None,
|
|
74
|
+
**kwargs,
|
|
75
|
+
):
|
|
76
|
+
# Handle backward compatibility where first arg was name
|
|
77
|
+
if isinstance(config, str):
|
|
78
|
+
self.config = AgentConfig(
|
|
79
|
+
name=config,
|
|
80
|
+
instruction=instruction or "You are a helpful agent.",
|
|
81
|
+
servers=server_names or [],
|
|
82
|
+
)
|
|
83
|
+
else:
|
|
84
|
+
self.config = config
|
|
85
|
+
|
|
86
|
+
super().__init__(
|
|
87
|
+
context=context,
|
|
88
|
+
server_names=self.config.servers,
|
|
89
|
+
connection_persistence=connection_persistence,
|
|
90
|
+
name=self.config.name,
|
|
91
|
+
**kwargs,
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
self.name = self.config.name
|
|
95
|
+
self.instruction = self.config.instruction
|
|
96
|
+
self.functions = functions or []
|
|
97
|
+
self.executor = self.context.executor
|
|
98
|
+
self.logger = get_logger(f"{__name__}.{self.name}")
|
|
99
|
+
|
|
100
|
+
# Store the default request params from config
|
|
101
|
+
self._default_request_params = self.config.default_request_params
|
|
102
|
+
|
|
103
|
+
# Map function names to tools
|
|
104
|
+
self._function_tool_map: Dict[str, FastTool] = {}
|
|
105
|
+
|
|
106
|
+
self.human_input_callback: HumanInputCallback | None = human_input_callback
|
|
107
|
+
if not human_input_callback:
|
|
108
|
+
if self.context.human_input_handler:
|
|
109
|
+
self.human_input_callback = self.context.human_input_handler
|
|
110
|
+
|
|
111
|
+
async def initialize(self):
|
|
112
|
+
"""
|
|
113
|
+
Initialize the agent and connect to the MCP servers.
|
|
114
|
+
NOTE: This method is called automatically when the agent is used as an async context manager.
|
|
115
|
+
"""
|
|
116
|
+
await (
|
|
117
|
+
self.__aenter__()
|
|
118
|
+
) # This initializes the connection manager and loads the servers
|
|
119
|
+
|
|
120
|
+
for function in self.functions:
|
|
121
|
+
tool: FastTool = FastTool.from_function(function)
|
|
122
|
+
self._function_tool_map[tool.name] = tool
|
|
123
|
+
|
|
124
|
+
async def attach_llm(self, llm_factory: Callable[..., LLM]) -> LLM:
|
|
125
|
+
"""
|
|
126
|
+
Create an LLM instance for the agent.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
llm_factory: A callable that constructs an AugmentedLLM or its subclass.
|
|
130
|
+
The factory should accept keyword arguments matching the
|
|
131
|
+
AugmentedLLM constructor parameters.
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
An instance of AugmentedLLM or one of its subclasses.
|
|
135
|
+
"""
|
|
136
|
+
return llm_factory(
|
|
137
|
+
agent=self, default_request_params=self._default_request_params
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
async def shutdown(self):
|
|
141
|
+
"""
|
|
142
|
+
Shutdown the agent and close all MCP server connections.
|
|
143
|
+
NOTE: This method is called automatically when the agent is used as an async context manager.
|
|
144
|
+
"""
|
|
145
|
+
await super().close()
|
|
146
|
+
|
|
147
|
+
async def request_human_input(
|
|
148
|
+
self,
|
|
149
|
+
request: HumanInputRequest,
|
|
150
|
+
) -> str:
|
|
151
|
+
"""
|
|
152
|
+
Request input from a human user. Pauses the workflow until input is received.
|
|
153
|
+
|
|
154
|
+
Args:
|
|
155
|
+
request: The human input request
|
|
156
|
+
|
|
157
|
+
Returns:
|
|
158
|
+
The input provided by the human
|
|
159
|
+
|
|
160
|
+
Raises:
|
|
161
|
+
TimeoutError: If the timeout is exceeded
|
|
162
|
+
"""
|
|
163
|
+
if not self.human_input_callback:
|
|
164
|
+
raise ValueError("Human input callback not set")
|
|
165
|
+
|
|
166
|
+
# Generate a unique ID for this request to avoid signal collisions
|
|
167
|
+
request_id = f"{HUMAN_INPUT_SIGNAL_NAME}_{self.name}_{uuid.uuid4()}"
|
|
168
|
+
request.request_id = request_id
|
|
169
|
+
|
|
170
|
+
self.logger.debug("Requesting human input:", data=request)
|
|
171
|
+
|
|
172
|
+
async def call_callback_and_signal():
|
|
173
|
+
try:
|
|
174
|
+
user_input = await self.human_input_callback(request)
|
|
175
|
+
self.logger.debug("Received human input:", data=user_input)
|
|
176
|
+
await self.executor.signal(signal_name=request_id, payload=user_input)
|
|
177
|
+
except Exception as e:
|
|
178
|
+
await self.executor.signal(
|
|
179
|
+
request_id, payload=f"Error getting human input: {str(e)}"
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
asyncio.create_task(call_callback_and_signal())
|
|
183
|
+
|
|
184
|
+
self.logger.debug("Waiting for human input signal")
|
|
185
|
+
|
|
186
|
+
# Wait for signal (workflow is paused here)
|
|
187
|
+
result = await self.executor.wait_for_signal(
|
|
188
|
+
signal_name=request_id,
|
|
189
|
+
request_id=request_id,
|
|
190
|
+
workflow_id=request.workflow_id,
|
|
191
|
+
signal_description=request.description or request.prompt,
|
|
192
|
+
timeout_seconds=request.timeout_seconds,
|
|
193
|
+
signal_type=HumanInputResponse, # TODO: saqadri - should this be HumanInputResponse?
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
self.logger.debug("Received human input signal", data=result)
|
|
197
|
+
return result
|
|
198
|
+
|
|
199
|
+
async def list_tools(self) -> ListToolsResult:
|
|
200
|
+
if not self.initialized:
|
|
201
|
+
await self.initialize()
|
|
202
|
+
|
|
203
|
+
result = await super().list_tools()
|
|
204
|
+
|
|
205
|
+
# Add function tools
|
|
206
|
+
for tool in self._function_tool_map.values():
|
|
207
|
+
result.tools.append(
|
|
208
|
+
Tool(
|
|
209
|
+
name=tool.name,
|
|
210
|
+
description=tool.description,
|
|
211
|
+
inputSchema=tool.parameters,
|
|
212
|
+
)
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
# Add a human_input_callback as a tool
|
|
216
|
+
if not self.human_input_callback:
|
|
217
|
+
self.logger.debug("Human input callback not set")
|
|
218
|
+
return result
|
|
219
|
+
|
|
220
|
+
# Add a human_input_callback as a tool
|
|
221
|
+
human_input_tool: FastTool = FastTool.from_function(self.request_human_input)
|
|
222
|
+
result.tools.append(
|
|
223
|
+
Tool(
|
|
224
|
+
name=HUMAN_INPUT_TOOL_NAME,
|
|
225
|
+
description=human_input_tool.description,
|
|
226
|
+
inputSchema=human_input_tool.parameters,
|
|
227
|
+
)
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
return result
|
|
231
|
+
|
|
232
|
+
# todo would prefer to use tool_name to disambiguate agent name
|
|
233
|
+
async def call_tool(
|
|
234
|
+
self, name: str, arguments: dict | None = None
|
|
235
|
+
) -> CallToolResult:
|
|
236
|
+
if name == HUMAN_INPUT_TOOL_NAME:
|
|
237
|
+
# Call the human input tool
|
|
238
|
+
return await self._call_human_input_tool(arguments)
|
|
239
|
+
elif name in self._function_tool_map:
|
|
240
|
+
# Call local function and return the result as a text response
|
|
241
|
+
tool = self._function_tool_map[name]
|
|
242
|
+
result = await tool.run(arguments)
|
|
243
|
+
return CallToolResult(content=[TextContent(type="text", text=str(result))])
|
|
244
|
+
else:
|
|
245
|
+
return await super().call_tool(name, arguments)
|
|
246
|
+
|
|
247
|
+
async def _call_human_input_tool(
|
|
248
|
+
self, arguments: dict | None = None
|
|
249
|
+
) -> CallToolResult:
|
|
250
|
+
# Handle human input request
|
|
251
|
+
try:
|
|
252
|
+
request = HumanInputRequest(**arguments.get("request"))
|
|
253
|
+
result: HumanInputResponse = await self.request_human_input(request=request)
|
|
254
|
+
return CallToolResult(
|
|
255
|
+
content=[
|
|
256
|
+
TextContent(type="text", text=f"Human response: {result.response}")
|
|
257
|
+
]
|
|
258
|
+
)
|
|
259
|
+
except TimeoutError as e:
|
|
260
|
+
return CallToolResult(
|
|
261
|
+
isError=True,
|
|
262
|
+
content=[
|
|
263
|
+
TextContent(
|
|
264
|
+
type="text",
|
|
265
|
+
text=f"Error: Human input request timed out: {str(e)}",
|
|
266
|
+
)
|
|
267
|
+
],
|
|
268
|
+
)
|
|
269
|
+
except Exception as e:
|
|
270
|
+
return CallToolResult(
|
|
271
|
+
isError=True,
|
|
272
|
+
content=[
|
|
273
|
+
TextContent(
|
|
274
|
+
type="text", text=f"Error requesting human input: {str(e)}"
|
|
275
|
+
)
|
|
276
|
+
],
|
|
277
|
+
)
|
mcp_agent/app.py
ADDED
|
@@ -0,0 +1,303 @@
|
|
|
1
|
+
from typing import Any, Dict, Optional, Type, TypeVar, Callable
|
|
2
|
+
from datetime import timedelta
|
|
3
|
+
import asyncio
|
|
4
|
+
from contextlib import asynccontextmanager
|
|
5
|
+
|
|
6
|
+
from mcp import ServerSession
|
|
7
|
+
from mcp_agent.context import Context, initialize_context, cleanup_context
|
|
8
|
+
from mcp_agent.config import Settings
|
|
9
|
+
from mcp_agent.event_progress import ProgressAction
|
|
10
|
+
from mcp_agent.logging.logger import get_logger
|
|
11
|
+
from mcp_agent.executor.workflow_signal import SignalWaitCallback
|
|
12
|
+
from mcp_agent.human_input.types import HumanInputCallback
|
|
13
|
+
from mcp_agent.human_input.handler import console_input_callback
|
|
14
|
+
from mcp_agent.workflows.llm.llm_selector import ModelSelector
|
|
15
|
+
|
|
16
|
+
R = TypeVar("R")
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class MCPApp:
|
|
20
|
+
"""
|
|
21
|
+
Main application class that manages global state and can host workflows.
|
|
22
|
+
|
|
23
|
+
Example usage:
|
|
24
|
+
app = MCPApp()
|
|
25
|
+
|
|
26
|
+
@app.workflow
|
|
27
|
+
class MyWorkflow(Workflow[str]):
|
|
28
|
+
@app.task
|
|
29
|
+
async def my_task(self):
|
|
30
|
+
pass
|
|
31
|
+
|
|
32
|
+
async def run(self):
|
|
33
|
+
await self.my_task()
|
|
34
|
+
|
|
35
|
+
async with app.run() as running_app:
|
|
36
|
+
workflow = MyWorkflow()
|
|
37
|
+
result = await workflow.execute()
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
def __init__(
|
|
41
|
+
self,
|
|
42
|
+
name: str = "mcp_application",
|
|
43
|
+
settings: Optional[Settings] | str = None,
|
|
44
|
+
human_input_callback: Optional[HumanInputCallback] = console_input_callback,
|
|
45
|
+
signal_notification: Optional[SignalWaitCallback] = None,
|
|
46
|
+
upstream_session: Optional["ServerSession"] = None,
|
|
47
|
+
model_selector: ModelSelector = None,
|
|
48
|
+
):
|
|
49
|
+
"""
|
|
50
|
+
Initialize the application with a name and optional settings.
|
|
51
|
+
Args:
|
|
52
|
+
name: Name of the application
|
|
53
|
+
settings: Application configuration - If unspecified, the settings are loaded from mcp_agent.config.yaml.
|
|
54
|
+
If this is a string, it is treated as the path to the config file to load.
|
|
55
|
+
human_input_callback: Callback for handling human input
|
|
56
|
+
signal_notification: Callback for getting notified on workflow signals/events.
|
|
57
|
+
upstream_session: Optional upstream session if the MCPApp is running as a server to an MCP client.
|
|
58
|
+
initialize_model_selector: Initializes the built-in ModelSelector to help with model selection. Defaults to False.
|
|
59
|
+
"""
|
|
60
|
+
self.name = name
|
|
61
|
+
|
|
62
|
+
# We use these to initialize the context in initialize()
|
|
63
|
+
self._config_or_path = settings
|
|
64
|
+
self._human_input_callback = human_input_callback
|
|
65
|
+
self._signal_notification = signal_notification
|
|
66
|
+
self._upstream_session = upstream_session
|
|
67
|
+
self._model_selector = model_selector
|
|
68
|
+
|
|
69
|
+
self._workflows: Dict[str, Type] = {} # id to workflow class
|
|
70
|
+
self._logger = None
|
|
71
|
+
self._context: Optional[Context] = None
|
|
72
|
+
self._initialized = False
|
|
73
|
+
|
|
74
|
+
@property
|
|
75
|
+
def context(self) -> Context:
|
|
76
|
+
if self._context is None:
|
|
77
|
+
raise RuntimeError(
|
|
78
|
+
"MCPApp not initialized, please call initialize() first, or use async with app.run()."
|
|
79
|
+
)
|
|
80
|
+
return self._context
|
|
81
|
+
|
|
82
|
+
@property
|
|
83
|
+
def config(self):
|
|
84
|
+
return self._context.config
|
|
85
|
+
|
|
86
|
+
@property
|
|
87
|
+
def server_registry(self):
|
|
88
|
+
return self._context.server_registry
|
|
89
|
+
|
|
90
|
+
@property
|
|
91
|
+
def executor(self):
|
|
92
|
+
return self._context.executor
|
|
93
|
+
|
|
94
|
+
@property
|
|
95
|
+
def engine(self):
|
|
96
|
+
return self.executor.execution_engine
|
|
97
|
+
|
|
98
|
+
@property
|
|
99
|
+
def upstream_session(self):
|
|
100
|
+
return self._context.upstream_session
|
|
101
|
+
|
|
102
|
+
@upstream_session.setter
|
|
103
|
+
def upstream_session(self, value):
|
|
104
|
+
self._context.upstream_session = value
|
|
105
|
+
|
|
106
|
+
@property
|
|
107
|
+
def workflows(self):
|
|
108
|
+
return self._workflows
|
|
109
|
+
|
|
110
|
+
@property
|
|
111
|
+
def tasks(self):
|
|
112
|
+
return self.context.task_registry.list_activities()
|
|
113
|
+
|
|
114
|
+
@property
|
|
115
|
+
def logger(self):
|
|
116
|
+
if self._logger is None:
|
|
117
|
+
self._logger = get_logger(f"mcp_agent.{self.name}")
|
|
118
|
+
return self._logger
|
|
119
|
+
|
|
120
|
+
async def initialize(self):
|
|
121
|
+
"""Initialize the application."""
|
|
122
|
+
if self._initialized:
|
|
123
|
+
return
|
|
124
|
+
|
|
125
|
+
self._context = await initialize_context(self._config_or_path)
|
|
126
|
+
|
|
127
|
+
# Set the properties that were passed in the constructor
|
|
128
|
+
self._context.human_input_handler = self._human_input_callback
|
|
129
|
+
self._context.signal_notification = self._signal_notification
|
|
130
|
+
self._context.upstream_session = self._upstream_session
|
|
131
|
+
self._context.model_selector = self._model_selector
|
|
132
|
+
|
|
133
|
+
self._initialized = True
|
|
134
|
+
self.logger.info(
|
|
135
|
+
"MCPAgent initialized",
|
|
136
|
+
data={
|
|
137
|
+
"progress_action": "Running",
|
|
138
|
+
"target": self.name,
|
|
139
|
+
"agent_name": "mcp_application_loop",
|
|
140
|
+
},
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
async def cleanup(self):
|
|
144
|
+
"""Cleanup application resources."""
|
|
145
|
+
if not self._initialized:
|
|
146
|
+
return
|
|
147
|
+
|
|
148
|
+
# Updatre progress display before logging is shut down
|
|
149
|
+
self.logger.info(
|
|
150
|
+
"MCPAgent cleanup",
|
|
151
|
+
data={
|
|
152
|
+
"progress_action": ProgressAction.FINISHED,
|
|
153
|
+
"target": self.name,
|
|
154
|
+
"agent_name": "mcp_application_loop",
|
|
155
|
+
},
|
|
156
|
+
)
|
|
157
|
+
await cleanup_context()
|
|
158
|
+
self._context = None
|
|
159
|
+
self._initialized = False
|
|
160
|
+
|
|
161
|
+
@asynccontextmanager
|
|
162
|
+
async def run(self):
|
|
163
|
+
"""
|
|
164
|
+
Run the application. Use as context manager.
|
|
165
|
+
|
|
166
|
+
Example:
|
|
167
|
+
async with app.run() as running_app:
|
|
168
|
+
# App is initialized here
|
|
169
|
+
pass
|
|
170
|
+
"""
|
|
171
|
+
await self.initialize()
|
|
172
|
+
try:
|
|
173
|
+
yield self
|
|
174
|
+
finally:
|
|
175
|
+
await self.cleanup()
|
|
176
|
+
|
|
177
|
+
def workflow(
|
|
178
|
+
self, cls: Type, *args, workflow_id: str | None = None, **kwargs
|
|
179
|
+
) -> Type:
|
|
180
|
+
"""
|
|
181
|
+
Decorator for a workflow class. By default it's a no-op,
|
|
182
|
+
but different executors can use this to customize behavior
|
|
183
|
+
for workflow registration.
|
|
184
|
+
|
|
185
|
+
Example:
|
|
186
|
+
If Temporal is available & we use a TemporalExecutor,
|
|
187
|
+
this decorator will wrap with temporal_workflow.defn.
|
|
188
|
+
"""
|
|
189
|
+
decorator_registry = self.context.decorator_registry
|
|
190
|
+
execution_engine = self.engine
|
|
191
|
+
workflow_defn_decorator = decorator_registry.get_workflow_defn_decorator(
|
|
192
|
+
execution_engine
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
if workflow_defn_decorator:
|
|
196
|
+
return workflow_defn_decorator(cls, *args, **kwargs)
|
|
197
|
+
|
|
198
|
+
cls._app = self
|
|
199
|
+
self._workflows[workflow_id or cls.__name__] = cls
|
|
200
|
+
|
|
201
|
+
# Default no-op
|
|
202
|
+
return cls
|
|
203
|
+
|
|
204
|
+
def workflow_run(self, fn: Callable[..., R]) -> Callable[..., R]:
|
|
205
|
+
"""
|
|
206
|
+
Decorator for a workflow's main 'run' method.
|
|
207
|
+
Different executors can use this to customize behavior for workflow execution.
|
|
208
|
+
|
|
209
|
+
Example:
|
|
210
|
+
If Temporal is in use, this gets converted to @workflow.run.
|
|
211
|
+
"""
|
|
212
|
+
|
|
213
|
+
decorator_registry = self.context.decorator_registry
|
|
214
|
+
execution_engine = self.engine
|
|
215
|
+
workflow_run_decorator = decorator_registry.get_workflow_run_decorator(
|
|
216
|
+
execution_engine
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
if workflow_run_decorator:
|
|
220
|
+
return workflow_run_decorator(fn)
|
|
221
|
+
|
|
222
|
+
# Default no-op
|
|
223
|
+
def wrapper(*args, **kwargs):
|
|
224
|
+
# no-op wrapper
|
|
225
|
+
return fn(*args, **kwargs)
|
|
226
|
+
|
|
227
|
+
return wrapper
|
|
228
|
+
|
|
229
|
+
def workflow_task(
|
|
230
|
+
self,
|
|
231
|
+
name: str | None = None,
|
|
232
|
+
schedule_to_close_timeout: timedelta | None = None,
|
|
233
|
+
retry_policy: Dict[str, Any] | None = None,
|
|
234
|
+
**kwargs: Any,
|
|
235
|
+
) -> Callable[[Callable[..., R]], Callable[..., R]]:
|
|
236
|
+
"""
|
|
237
|
+
Decorator to mark a function as a workflow task,
|
|
238
|
+
automatically registering it in the global activity registry.
|
|
239
|
+
|
|
240
|
+
Args:
|
|
241
|
+
name: Optional custom name for the activity
|
|
242
|
+
schedule_to_close_timeout: Maximum time the task can take to complete
|
|
243
|
+
retry_policy: Retry policy configuration
|
|
244
|
+
**kwargs: Additional metadata passed to the activity registration
|
|
245
|
+
|
|
246
|
+
Returns:
|
|
247
|
+
Decorated function that preserves async and typing information
|
|
248
|
+
|
|
249
|
+
Raises:
|
|
250
|
+
TypeError: If the decorated function is not async
|
|
251
|
+
ValueError: If the retry policy or timeout is invalid
|
|
252
|
+
"""
|
|
253
|
+
|
|
254
|
+
def decorator(func: Callable[..., R]) -> Callable[..., R]:
|
|
255
|
+
if not asyncio.iscoroutinefunction(func):
|
|
256
|
+
raise TypeError(f"Function {func.__name__} must be async.")
|
|
257
|
+
|
|
258
|
+
actual_name = name or f"{func.__module__}.{func.__qualname__}"
|
|
259
|
+
timeout = schedule_to_close_timeout or timedelta(minutes=10)
|
|
260
|
+
metadata = {
|
|
261
|
+
"activity_name": actual_name,
|
|
262
|
+
"schedule_to_close_timeout": timeout,
|
|
263
|
+
"retry_policy": retry_policy or {},
|
|
264
|
+
**kwargs,
|
|
265
|
+
}
|
|
266
|
+
activity_registry = self.context.task_registry
|
|
267
|
+
activity_registry.register(actual_name, func, metadata)
|
|
268
|
+
|
|
269
|
+
setattr(func, "is_workflow_task", True)
|
|
270
|
+
setattr(func, "execution_metadata", metadata)
|
|
271
|
+
|
|
272
|
+
# TODO: saqadri - determine if we need this
|
|
273
|
+
# Preserve metadata through partial application
|
|
274
|
+
# @functools.wraps(func)
|
|
275
|
+
# async def wrapper(*args: Any, **kwargs: Any) -> R:
|
|
276
|
+
# result = await func(*args, **kwargs)
|
|
277
|
+
# return cast(R, result) # Ensure type checking works
|
|
278
|
+
|
|
279
|
+
# # Add metadata that survives partial application
|
|
280
|
+
# wrapper.is_workflow_task = True # type: ignore
|
|
281
|
+
# wrapper.execution_metadata = metadata # type: ignore
|
|
282
|
+
|
|
283
|
+
# # Make metadata accessible through partial
|
|
284
|
+
# def __getattr__(name: str) -> Any:
|
|
285
|
+
# if name == "is_workflow_task":
|
|
286
|
+
# return True
|
|
287
|
+
# if name == "execution_metadata":
|
|
288
|
+
# return metadata
|
|
289
|
+
# raise AttributeError(f"'{func.__name__}' has no attribute '{name}'")
|
|
290
|
+
|
|
291
|
+
# wrapper.__getattr__ = __getattr__ # type: ignore
|
|
292
|
+
|
|
293
|
+
# return wrapper
|
|
294
|
+
|
|
295
|
+
return func
|
|
296
|
+
|
|
297
|
+
return decorator
|
|
298
|
+
|
|
299
|
+
def is_workflow_task(self, func: Callable[..., Any]) -> bool:
|
|
300
|
+
"""
|
|
301
|
+
Check if a function is marked as a workflow task.
|
|
302
|
+
This gets set for functions that are decorated with @workflow_task."""
|
|
303
|
+
return bool(getattr(func, "is_workflow_task", False))
|
|
File without changes
|