fast-agent-mcp 0.0.9__py3-none-any.whl → 0.0.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fast-agent-mcp might be problematic. Click here for more details.
- {fast_agent_mcp-0.0.9.dist-info → fast_agent_mcp-0.0.12.dist-info}/METADATA +17 -11
- {fast_agent_mcp-0.0.9.dist-info → fast_agent_mcp-0.0.12.dist-info}/RECORD +36 -28
- mcp_agent/app.py +4 -4
- mcp_agent/cli/commands/bootstrap.py +2 -5
- mcp_agent/cli/commands/setup.py +1 -1
- mcp_agent/cli/main.py +4 -4
- mcp_agent/core/enhanced_prompt.py +315 -0
- mcp_agent/core/fastagent.py +520 -388
- mcp_agent/event_progress.py +5 -2
- mcp_agent/human_input/handler.py +6 -2
- mcp_agent/logging/rich_progress.py +10 -5
- mcp_agent/mcp/mcp_aggregator.py +2 -1
- mcp_agent/mcp/mcp_connection_manager.py +67 -37
- mcp_agent/resources/examples/internal/agent.py +17 -0
- mcp_agent/resources/examples/internal/job.py +83 -0
- mcp_agent/resources/examples/mcp_researcher/researcher-eval.py +1 -1
- mcp_agent/resources/examples/researcher/fastagent.config.yaml +53 -0
- mcp_agent/resources/examples/researcher/researcher-eval.py +53 -0
- mcp_agent/resources/examples/researcher/researcher.py +38 -0
- mcp_agent/resources/examples/workflows/agent.py +17 -0
- mcp_agent/resources/examples/workflows/agent_build.py +61 -0
- mcp_agent/resources/examples/workflows/chaining.py +0 -1
- mcp_agent/resources/examples/workflows/evaluator.py +6 -3
- mcp_agent/resources/examples/workflows/fastagent.py +22 -0
- mcp_agent/resources/examples/workflows/orchestrator.py +1 -1
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +91 -92
- mcp_agent/workflows/llm/augmented_llm.py +14 -3
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +8 -5
- mcp_agent/workflows/llm/augmented_llm_openai.py +20 -9
- mcp_agent/workflows/llm/model_factory.py +25 -11
- mcp_agent/workflows/orchestrator/orchestrator.py +68 -7
- mcp_agent/workflows/orchestrator/orchestrator_prompts.py +11 -6
- mcp_agent/workflows/router/router_llm.py +13 -2
- mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -9
- {fast_agent_mcp-0.0.9.dist-info → fast_agent_mcp-0.0.12.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.0.9.dist-info → fast_agent_mcp-0.0.12.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.0.9.dist-info → fast_agent_mcp-0.0.12.dist-info}/licenses/LICENSE +0 -0
mcp_agent/event_progress.py
CHANGED
|
@@ -11,14 +11,16 @@ class ProgressAction(str, Enum):
|
|
|
11
11
|
"""Progress actions available in the system."""
|
|
12
12
|
|
|
13
13
|
STARTING = "Starting"
|
|
14
|
+
LOADED = "Loaded"
|
|
14
15
|
INITIALIZED = "Initialized"
|
|
15
16
|
CHATTING = "Chatting"
|
|
17
|
+
ROUTING = "Routing"
|
|
18
|
+
PLANNING = "Planning"
|
|
16
19
|
READY = "Ready"
|
|
17
20
|
CALLING_TOOL = "Calling Tool"
|
|
18
21
|
FINISHED = "Finished"
|
|
19
22
|
SHUTDOWN = "Shutdown"
|
|
20
23
|
AGGREGATOR_INITIALIZED = "Running"
|
|
21
|
-
ROUTING = "Routing"
|
|
22
24
|
FATAL_ERROR = "Error"
|
|
23
25
|
|
|
24
26
|
|
|
@@ -81,7 +83,8 @@ def convert_log_event(event: Event) -> Optional[ProgressEvent]:
|
|
|
81
83
|
if chat_turn is not None:
|
|
82
84
|
details = f"{model} turn {chat_turn}"
|
|
83
85
|
else:
|
|
84
|
-
|
|
86
|
+
if not target:
|
|
87
|
+
target = event_data.get("target", "unknown")
|
|
85
88
|
|
|
86
89
|
return ProgressEvent(
|
|
87
90
|
ProgressAction(progress_action),
|
mcp_agent/human_input/handler.py
CHANGED
|
@@ -36,7 +36,9 @@ async def console_input_callback(request: HumanInputRequest) -> HumanInputRespon
|
|
|
36
36
|
try:
|
|
37
37
|
loop = asyncio.get_event_loop()
|
|
38
38
|
response = await asyncio.wait_for(
|
|
39
|
-
loop.run_in_executor(
|
|
39
|
+
loop.run_in_executor(
|
|
40
|
+
None, lambda: Prompt.ask("Provide your response ")
|
|
41
|
+
),
|
|
40
42
|
request.timeout_seconds,
|
|
41
43
|
)
|
|
42
44
|
except asyncio.TimeoutError:
|
|
@@ -44,6 +46,8 @@ async def console_input_callback(request: HumanInputRequest) -> HumanInputRespon
|
|
|
44
46
|
raise TimeoutError("No response received within timeout period")
|
|
45
47
|
else:
|
|
46
48
|
loop = asyncio.get_event_loop()
|
|
47
|
-
response = await loop.run_in_executor(
|
|
49
|
+
response = await loop.run_in_executor(
|
|
50
|
+
None, lambda: Prompt.ask("Provide your response ")
|
|
51
|
+
)
|
|
48
52
|
|
|
49
53
|
return HumanInputResponse(request_id=request.request_id, response=response.strip())
|
|
@@ -68,10 +68,12 @@ class RichProgressDisplay:
|
|
|
68
68
|
"""Map actions to appropriate styles."""
|
|
69
69
|
return {
|
|
70
70
|
ProgressAction.STARTING: "bold yellow",
|
|
71
|
+
ProgressAction.LOADED: "dim green",
|
|
71
72
|
ProgressAction.INITIALIZED: "dim green",
|
|
72
73
|
ProgressAction.CHATTING: "bold blue",
|
|
73
|
-
ProgressAction.READY: "dim green",
|
|
74
74
|
ProgressAction.ROUTING: "bold blue",
|
|
75
|
+
ProgressAction.PLANNING: "bold blue",
|
|
76
|
+
ProgressAction.READY: "dim green",
|
|
75
77
|
ProgressAction.CALLING_TOOL: "bold magenta",
|
|
76
78
|
ProgressAction.FINISHED: "black on green",
|
|
77
79
|
ProgressAction.SHUTDOWN: "black on red",
|
|
@@ -82,29 +84,32 @@ class RichProgressDisplay:
|
|
|
82
84
|
def update(self, event: ProgressEvent) -> None:
|
|
83
85
|
"""Update the progress display with a new event."""
|
|
84
86
|
task_name = event.agent_name or "default"
|
|
87
|
+
|
|
85
88
|
# Create new task if needed
|
|
86
89
|
if task_name not in self._taskmap:
|
|
87
90
|
task_id = self._progress.add_task(
|
|
88
91
|
"",
|
|
89
92
|
total=None,
|
|
90
|
-
target=f"{event.target}",
|
|
91
|
-
details=f"{event.agent_name}",
|
|
93
|
+
target=f"{event.target or task_name}", # Use task_name as fallback for target
|
|
94
|
+
details=f"{event.agent_name or ''}",
|
|
92
95
|
)
|
|
93
96
|
self._taskmap[task_name] = task_id
|
|
94
97
|
else:
|
|
95
98
|
task_id = self._taskmap[task_name]
|
|
96
99
|
|
|
100
|
+
# Ensure no None values in the update
|
|
97
101
|
self._progress.update(
|
|
98
102
|
task_id,
|
|
99
103
|
description=f"[{self._get_action_style(event.action)}]{event.action.value:<15}",
|
|
100
|
-
target=event.target,
|
|
101
|
-
details=event.details
|
|
104
|
+
target=event.target or task_name, # Use task_name as fallback for target
|
|
105
|
+
details=event.details or "",
|
|
102
106
|
task_name=task_name,
|
|
103
107
|
)
|
|
104
108
|
|
|
105
109
|
if (
|
|
106
110
|
event.action == ProgressAction.INITIALIZED
|
|
107
111
|
or event.action == ProgressAction.READY
|
|
112
|
+
or event.action == ProgressAction.LOADED
|
|
108
113
|
):
|
|
109
114
|
self._progress.update(task_id, completed=100, total=100)
|
|
110
115
|
elif event.action == ProgressAction.FINISHED:
|
mcp_agent/mcp/mcp_aggregator.py
CHANGED
|
@@ -81,13 +81,14 @@ class MCPAggregator(ContextDependent):
|
|
|
81
81
|
def __init__(
|
|
82
82
|
self,
|
|
83
83
|
server_names: List[str],
|
|
84
|
-
connection_persistence: bool =
|
|
84
|
+
connection_persistence: bool = True, # Default to True for better stability
|
|
85
85
|
context: Optional["Context"] = None,
|
|
86
86
|
name: str = None,
|
|
87
87
|
**kwargs,
|
|
88
88
|
):
|
|
89
89
|
"""
|
|
90
90
|
:param server_names: A list of server names to connect to.
|
|
91
|
+
:param connection_persistence: Whether to maintain persistent connections to servers (default: True).
|
|
91
92
|
Note: The server names must be resolvable by the gen_client function, and specified in the server registry.
|
|
92
93
|
"""
|
|
93
94
|
super().__init__(
|
|
@@ -75,6 +75,19 @@ class ServerConnection:
|
|
|
75
75
|
|
|
76
76
|
# Signal we want to shut down
|
|
77
77
|
self._shutdown_event = Event()
|
|
78
|
+
|
|
79
|
+
# Track error state
|
|
80
|
+
self._error_occurred = False
|
|
81
|
+
self._error_message = None
|
|
82
|
+
|
|
83
|
+
def is_healthy(self) -> bool:
|
|
84
|
+
"""Check if the server connection is healthy and ready to use."""
|
|
85
|
+
return self.session is not None and not self._error_occurred
|
|
86
|
+
|
|
87
|
+
def reset_error_state(self) -> None:
|
|
88
|
+
"""Reset the error state, allowing reconnection attempts."""
|
|
89
|
+
self._error_occurred = False
|
|
90
|
+
self._error_message = None
|
|
78
91
|
|
|
79
92
|
def request_shutdown(self) -> None:
|
|
80
93
|
"""
|
|
@@ -164,10 +177,12 @@ async def _server_lifecycle_task(server_conn: ServerConnection) -> None:
|
|
|
164
177
|
"server_name": server_name,
|
|
165
178
|
},
|
|
166
179
|
)
|
|
180
|
+
server_conn._error_occurred = True
|
|
181
|
+
server_conn._error_message = str(exc)
|
|
167
182
|
# If there's an error, we should also set the event so that
|
|
168
183
|
# 'get_server' won't hang
|
|
169
184
|
server_conn._initialized_event.set()
|
|
170
|
-
raise
|
|
185
|
+
# No raise - allow graceful exit
|
|
171
186
|
|
|
172
187
|
|
|
173
188
|
class MCPConnectionManager(ContextDependent):
|
|
@@ -183,38 +198,34 @@ class MCPConnectionManager(ContextDependent):
|
|
|
183
198
|
self.server_registry = server_registry
|
|
184
199
|
self.running_servers: Dict[str, ServerConnection] = {}
|
|
185
200
|
self._lock = Lock()
|
|
201
|
+
# Manage our own task group - independent of task context
|
|
202
|
+
self._task_group = None
|
|
203
|
+
self._task_group_active = False
|
|
186
204
|
|
|
187
205
|
async def __aenter__(self):
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
#
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
await self.context._connection_task_group.__aenter__()
|
|
195
|
-
|
|
196
|
-
self._tg = self.context._connection_task_group
|
|
206
|
+
# Create a task group that isn't tied to a specific task
|
|
207
|
+
self._task_group = create_task_group()
|
|
208
|
+
# Enter the task group context
|
|
209
|
+
await self._task_group.__aenter__()
|
|
210
|
+
self._task_group_active = True
|
|
211
|
+
self._tg = self._task_group
|
|
197
212
|
return self
|
|
198
213
|
|
|
199
214
|
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
200
215
|
"""Ensure clean shutdown of all connections before exiting."""
|
|
201
|
-
current_task = asyncio.current_task()
|
|
202
|
-
|
|
203
216
|
try:
|
|
204
217
|
# First request all servers to shutdown
|
|
205
218
|
await self.disconnect_all()
|
|
206
|
-
|
|
207
|
-
#
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
delattr(self.context, "_connection_task_group")
|
|
217
|
-
delattr(self.context, "_connection_task_group_context")
|
|
219
|
+
|
|
220
|
+
# Add a small delay to allow for clean shutdown
|
|
221
|
+
await asyncio.sleep(0.5)
|
|
222
|
+
|
|
223
|
+
# Then close the task group if it's active
|
|
224
|
+
if self._task_group_active:
|
|
225
|
+
await self._task_group.__aexit__(exc_type, exc_val, exc_tb)
|
|
226
|
+
self._task_group_active = False
|
|
227
|
+
self._task_group = None
|
|
228
|
+
self._tg = None
|
|
218
229
|
except Exception as e:
|
|
219
230
|
logger.error(f"Error during connection manager shutdown: {e}")
|
|
220
231
|
|
|
@@ -231,10 +242,13 @@ class MCPConnectionManager(ContextDependent):
|
|
|
231
242
|
Connect to a server and return a RunningServer instance that will persist
|
|
232
243
|
until explicitly disconnected.
|
|
233
244
|
"""
|
|
234
|
-
if
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
)
|
|
245
|
+
# Create task group if it doesn't exist yet - make this method more resilient
|
|
246
|
+
if not self._task_group_active:
|
|
247
|
+
self._task_group = create_task_group()
|
|
248
|
+
await self._task_group.__aenter__()
|
|
249
|
+
self._task_group_active = True
|
|
250
|
+
self._tg = self._task_group
|
|
251
|
+
logger.info(f"Auto-created task group for server: {server_name}")
|
|
238
252
|
|
|
239
253
|
config = self.server_registry.registry.get(server_name)
|
|
240
254
|
if not config:
|
|
@@ -286,11 +300,17 @@ class MCPConnectionManager(ContextDependent):
|
|
|
286
300
|
"""
|
|
287
301
|
Get a running server instance, launching it if needed.
|
|
288
302
|
"""
|
|
289
|
-
# Get the server connection if it's already running
|
|
303
|
+
# Get the server connection if it's already running and healthy
|
|
290
304
|
async with self._lock:
|
|
291
305
|
server_conn = self.running_servers.get(server_name)
|
|
292
|
-
if server_conn:
|
|
306
|
+
if server_conn and server_conn.is_healthy():
|
|
293
307
|
return server_conn
|
|
308
|
+
|
|
309
|
+
# If server exists but isn't healthy, remove it so we can create a new one
|
|
310
|
+
if server_conn:
|
|
311
|
+
logger.info(f"{server_name}: Server exists but is unhealthy, recreating...")
|
|
312
|
+
self.running_servers.pop(server_name)
|
|
313
|
+
server_conn.request_shutdown()
|
|
294
314
|
|
|
295
315
|
# Launch the connection
|
|
296
316
|
server_conn = await self.launch_server(
|
|
@@ -302,11 +322,13 @@ class MCPConnectionManager(ContextDependent):
|
|
|
302
322
|
# Wait until it's fully initialized, or an error occurs
|
|
303
323
|
await server_conn.wait_for_initialized()
|
|
304
324
|
|
|
305
|
-
#
|
|
306
|
-
if not server_conn
|
|
325
|
+
# Check if the server is healthy after initialization
|
|
326
|
+
if not server_conn.is_healthy():
|
|
327
|
+
error_msg = server_conn._error_message or "Unknown error"
|
|
307
328
|
raise ServerInitializationError(
|
|
308
|
-
f"{server_name}: Failed to initialize server
|
|
329
|
+
f"{server_name}: Failed to initialize server: {error_msg}"
|
|
309
330
|
)
|
|
331
|
+
|
|
310
332
|
return server_conn
|
|
311
333
|
|
|
312
334
|
async def disconnect_server(self, server_name: str) -> None:
|
|
@@ -329,11 +351,19 @@ class MCPConnectionManager(ContextDependent):
|
|
|
329
351
|
|
|
330
352
|
async def disconnect_all(self) -> None:
|
|
331
353
|
"""Disconnect all servers that are running under this connection manager."""
|
|
354
|
+
# Get a copy of servers to shutdown
|
|
355
|
+
servers_to_shutdown = []
|
|
356
|
+
|
|
332
357
|
async with self._lock:
|
|
333
358
|
if not self.running_servers:
|
|
334
359
|
return
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
360
|
+
|
|
361
|
+
# Make a copy of the servers to shut down
|
|
362
|
+
servers_to_shutdown = list(self.running_servers.items())
|
|
363
|
+
# Clear the dict immediately to prevent any new access
|
|
339
364
|
self.running_servers.clear()
|
|
365
|
+
|
|
366
|
+
# Release the lock before waiting for servers to shut down
|
|
367
|
+
for name, conn in servers_to_shutdown:
|
|
368
|
+
logger.info(f"{name}: Requesting shutdown...")
|
|
369
|
+
conn.request_shutdown()
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
from mcp_agent.core.fastagent import FastAgent
|
|
3
|
+
|
|
4
|
+
# Create the application
|
|
5
|
+
fast = FastAgent("FastAgent Example")
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
# Define the agent
|
|
9
|
+
@fast.agent(servers=["fetch"])
|
|
10
|
+
async def main():
|
|
11
|
+
# use the --model command line switch or agent arguments to change model
|
|
12
|
+
async with fast.run() as agent:
|
|
13
|
+
await agent()
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
if __name__ == "__main__":
|
|
17
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
"""
|
|
2
|
+
PMO Job Description Generator Agent
|
|
3
|
+
Purpose: Generate comprehensive PMO job descriptions using a multi-stage approach
|
|
4
|
+
for clarity, consistency and quality control
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import asyncio
|
|
8
|
+
from mcp_agent.core.fastagent import FastAgent
|
|
9
|
+
|
|
10
|
+
# Create the application
|
|
11
|
+
fast = FastAgent("PMO Job Description Generator")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@fast.agent(
|
|
15
|
+
name="content_generator",
|
|
16
|
+
instruction="""You are a PMO job description expert. Generate job descriptions for PMO roles
|
|
17
|
+
following these guidelines:
|
|
18
|
+
- Focus on modern lean/agile and product-based approaches
|
|
19
|
+
- Emphasize practical experience and demonstrated results over formal requirements
|
|
20
|
+
- Ensure clear role differentiation with minimal overlap
|
|
21
|
+
- Format output in Markdown
|
|
22
|
+
- Context: Telecommunications industry in open organization valuing practical experience
|
|
23
|
+
|
|
24
|
+
Structure each job description with:
|
|
25
|
+
1. Role Title
|
|
26
|
+
2. Position Summary
|
|
27
|
+
3. Key Responsibilities
|
|
28
|
+
4. Required Experience
|
|
29
|
+
5. Desired Capabilities
|
|
30
|
+
""",
|
|
31
|
+
model="anthropic.claude-3-5-haiku-latest",
|
|
32
|
+
)
|
|
33
|
+
@fast.agent(
|
|
34
|
+
name="consistency_checker",
|
|
35
|
+
instruction="""Review PMO job descriptions for:
|
|
36
|
+
1. Alignment with lean/agile principles
|
|
37
|
+
2. Clear role differentiation
|
|
38
|
+
3. Progressive responsibility levels
|
|
39
|
+
4. Consistent formatting and structure
|
|
40
|
+
5. Telecommunications industry relevance
|
|
41
|
+
6. Emphasis on practical experience over formal requirements
|
|
42
|
+
|
|
43
|
+
Provide specific feedback for improvements.""",
|
|
44
|
+
model="gpt-4o",
|
|
45
|
+
)
|
|
46
|
+
@fast.agent(
|
|
47
|
+
name="file_handler",
|
|
48
|
+
instruction="""Save the finalized job descriptions as individual Markdown files.
|
|
49
|
+
Use consistent naming like 'pmo_director.md', 'pmo_manager.md' etc.""",
|
|
50
|
+
servers=["filesystem"],
|
|
51
|
+
use_history=False,
|
|
52
|
+
)
|
|
53
|
+
@fast.evaluator_optimizer(
|
|
54
|
+
name="job_description_writer",
|
|
55
|
+
generator="content_generator",
|
|
56
|
+
evaluator="consistency_checker",
|
|
57
|
+
min_rating="EXCELLENT",
|
|
58
|
+
max_refinements=2,
|
|
59
|
+
)
|
|
60
|
+
async def main():
|
|
61
|
+
async with fast.run() as agent:
|
|
62
|
+
roles = [
|
|
63
|
+
"PMO Director",
|
|
64
|
+
"Portfolio Manager",
|
|
65
|
+
"Senior Program Manager",
|
|
66
|
+
"Project Manager",
|
|
67
|
+
"PMO Analyst",
|
|
68
|
+
"Project Coordinator",
|
|
69
|
+
]
|
|
70
|
+
|
|
71
|
+
# Pre-initialize the file_handler to establish a persistent connection
|
|
72
|
+
await agent.file_handler("Test connection to filesystem")
|
|
73
|
+
|
|
74
|
+
for role in roles:
|
|
75
|
+
# Generate and refine job description
|
|
76
|
+
description = await agent.job_description_writer(
|
|
77
|
+
f"Create job description for {role} role"
|
|
78
|
+
)
|
|
79
|
+
await agent.file_handler(f"Save this job description: {description}")
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
if __name__ == "__main__":
|
|
83
|
+
asyncio.run(main())
|
|
@@ -35,7 +35,7 @@ Summarize your evaluation as a structured response with:
|
|
|
35
35
|
- Specific feedback and areas for improvement.""",
|
|
36
36
|
)
|
|
37
37
|
@agents.evaluator_optimizer(
|
|
38
|
-
|
|
38
|
+
generator="Researcher",
|
|
39
39
|
evaluator="Evaluator",
|
|
40
40
|
max_refinements=5,
|
|
41
41
|
min_rating="EXCELLENT",
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
#
|
|
2
|
+
# Please edit this configuration file to match your environment (on Windows).
|
|
3
|
+
# Examples in comments below - check/change the paths.
|
|
4
|
+
#
|
|
5
|
+
#
|
|
6
|
+
|
|
7
|
+
execution_engine: asyncio
|
|
8
|
+
logger:
|
|
9
|
+
type: file
|
|
10
|
+
level: error
|
|
11
|
+
truncate_tools: true
|
|
12
|
+
|
|
13
|
+
mcp:
|
|
14
|
+
servers:
|
|
15
|
+
brave:
|
|
16
|
+
# On windows replace the command and args line to use `node` and the absolute path to the server.
|
|
17
|
+
# Use `npm i -g @modelcontextprotocol/server-brave-search` to install the server globally.
|
|
18
|
+
# Use `npm -g root` to find the global node_modules path.`
|
|
19
|
+
# command: "node"
|
|
20
|
+
# args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-brave-search/dist/index.js"]
|
|
21
|
+
command: "npx"
|
|
22
|
+
args: ["-y", "@modelcontextprotocol/server-brave-search"]
|
|
23
|
+
env:
|
|
24
|
+
# You can also place your BRAVE_API_KEY in the fastagent.secrets.yaml file.
|
|
25
|
+
BRAVE_API_KEY: <your_brave_api_key>
|
|
26
|
+
filesystem:
|
|
27
|
+
# On windows update the command and arguments to use `node` and the absolute path to the server.
|
|
28
|
+
# Use `npm i -g @modelcontextprotocol/server-filesystem` to install the server globally.
|
|
29
|
+
# Use `npm -g root` to find the global node_modules path.`
|
|
30
|
+
# command: "node"
|
|
31
|
+
# args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-filesystem/dist/index.js","./agent_folder"]
|
|
32
|
+
command: "npx"
|
|
33
|
+
args: ["-y", "@modelcontextprotocol/server-filesystem", "./agent_folder/"]
|
|
34
|
+
interpreter:
|
|
35
|
+
command: "docker"
|
|
36
|
+
args: [
|
|
37
|
+
"run",
|
|
38
|
+
"-i",
|
|
39
|
+
"--rm",
|
|
40
|
+
"--pull=always",
|
|
41
|
+
"-v",
|
|
42
|
+
"./agent_folder:/mnt/data/",
|
|
43
|
+
# Docker needs the absolute path on Windows (e.g. "x:/fastagent/agent_folder:/mnt/data/")
|
|
44
|
+
# "./agent_folder:/mnt/data/",
|
|
45
|
+
"ghcr.io/evalstate/mcp-py-repl:latest",
|
|
46
|
+
]
|
|
47
|
+
roots:
|
|
48
|
+
- uri: "file://./agent_folder/"
|
|
49
|
+
name: "agent_folder"
|
|
50
|
+
server_uri_alias: "file:///mnt/data/"
|
|
51
|
+
fetch:
|
|
52
|
+
command: "uvx"
|
|
53
|
+
args: ["mcp-server-fetch"]
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
|
|
3
|
+
from mcp_agent.core.fastagent import FastAgent
|
|
4
|
+
|
|
5
|
+
agents = FastAgent(name="Researcher")
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@agents.agent(
|
|
9
|
+
name="Researcher",
|
|
10
|
+
instruction="""
|
|
11
|
+
You are a research assistant, with access to internet search (via Brave),
|
|
12
|
+
website fetch, a python interpreter (you can install packages with uv) and a filesystem.
|
|
13
|
+
Use the current working directory to save and create files with both the Interpreter and Filesystem tools.
|
|
14
|
+
The interpreter has numpy, pandas, matplotlib and seaborn already installed.
|
|
15
|
+
|
|
16
|
+
You must always provide a summary of the specific sources you have used in your research.
|
|
17
|
+
""",
|
|
18
|
+
servers=["brave", "interpreter", "filesystem", "fetch"],
|
|
19
|
+
)
|
|
20
|
+
@agents.agent(
|
|
21
|
+
name="Evaluator",
|
|
22
|
+
model="sonnet",
|
|
23
|
+
instruction="""
|
|
24
|
+
Evaluate the response from the researcher based on the criteria:
|
|
25
|
+
- Sources cited. Has the researcher provided a summary of the specific sources used in the research?
|
|
26
|
+
- Validity. Has the researcher cross-checked and validated data and assumptions.
|
|
27
|
+
- Alignment. Has the researher acted and addressed feedback from any previous assessments?
|
|
28
|
+
|
|
29
|
+
For each criterion:
|
|
30
|
+
- Provide a rating (EXCELLENT, GOOD, FAIR, or POOR).
|
|
31
|
+
- Offer specific feedback or suggestions for improvement.
|
|
32
|
+
|
|
33
|
+
Summarize your evaluation as a structured response with:
|
|
34
|
+
- Overall quality rating.
|
|
35
|
+
- Specific feedback and areas for improvement.""",
|
|
36
|
+
)
|
|
37
|
+
@agents.evaluator_optimizer(
|
|
38
|
+
generator="Researcher",
|
|
39
|
+
evaluator="Evaluator",
|
|
40
|
+
max_refinements=5,
|
|
41
|
+
min_rating="EXCELLENT",
|
|
42
|
+
name="Researcher_Evaluator",
|
|
43
|
+
)
|
|
44
|
+
async def main():
|
|
45
|
+
async with agents.run() as agent:
|
|
46
|
+
await agent.prompt("Researcher_Evaluator")
|
|
47
|
+
|
|
48
|
+
print("Ask follow up quesions to the Researcher?")
|
|
49
|
+
await agent.prompt("Researcher", default="STOP")
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
if __name__ == "__main__":
|
|
53
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
|
|
3
|
+
from mcp_agent.core.fastagent import FastAgent
|
|
4
|
+
# from rich import print
|
|
5
|
+
|
|
6
|
+
agents = FastAgent(name="Researcher")
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@agents.agent(
|
|
10
|
+
"Researcher",
|
|
11
|
+
instruction="""
|
|
12
|
+
You are a research assistant, with access to internet search (via Brave),
|
|
13
|
+
website fetch, a python interpreter (you can install packages with uv) and a filesystem.
|
|
14
|
+
Use the current working directory to save and create files with both the Interpreter and Filesystem tools.
|
|
15
|
+
The interpreter has numpy, pandas, matplotlib and seaborn already installed
|
|
16
|
+
""",
|
|
17
|
+
servers=["brave", "interpreter", "filesystem", "fetch"],
|
|
18
|
+
)
|
|
19
|
+
async def main():
|
|
20
|
+
research_prompt = """
|
|
21
|
+
Produce an investment report for the company Eutelsat. The final report should be saved in the filesystem in markdown format, and
|
|
22
|
+
contain at least the following:
|
|
23
|
+
1 - A brief description of the company
|
|
24
|
+
2 - Current financial position (find data, create and incorporate charts)
|
|
25
|
+
3 - A PESTLE analysis
|
|
26
|
+
4 - An investment thesis for the next 3 years. Include both 'buy side' and 'sell side' arguments, and a final
|
|
27
|
+
summary and recommendation.
|
|
28
|
+
Todays date is 15 February 2025. Include the main data sources consulted in presenting the report.""" # noqa: F841
|
|
29
|
+
|
|
30
|
+
async with agents.run() as agent:
|
|
31
|
+
await agent.prompt()
|
|
32
|
+
|
|
33
|
+
# await agent.prompt(default="STOP")
|
|
34
|
+
# await agent.prompt(default=research_prompt)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
if __name__ == "__main__":
|
|
38
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
from mcp_agent.core.fastagent import FastAgent
|
|
3
|
+
|
|
4
|
+
# Create the application
|
|
5
|
+
fast = FastAgent("FastAgent Example")
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
# Define the agent
|
|
9
|
+
@fast.agent(servers=["fetch"])
|
|
10
|
+
async def main():
|
|
11
|
+
# use the --model command line switch or agent arguments to change model
|
|
12
|
+
async with fast.run() as agent:
|
|
13
|
+
await agent()
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
if __name__ == "__main__":
|
|
17
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This demonstrates creating multiple agents and an orchestrator to coordinate them.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
from mcp_agent.core.fastagent import FastAgent
|
|
7
|
+
|
|
8
|
+
# Create the application
|
|
9
|
+
fast = FastAgent("Agent Builder")
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@fast.agent(
|
|
13
|
+
"agent_expert",
|
|
14
|
+
instruction="""
|
|
15
|
+
You design agent workflows, using the practices from 'Building Effective Agents'. You provide concise
|
|
16
|
+
specific guidance on design and composition. Prefer simple solutions, and don't nest workflows more
|
|
17
|
+
than one level deep. Your ultimate goal will be to produce a single '.py' agent in the style
|
|
18
|
+
shown to you that fulfils the Human's needs.
|
|
19
|
+
Keep the application simple, define agents with appropriate MCP Servers, Tools and the Human Input Tool.
|
|
20
|
+
The style of the program should be like the examples you have been showm, very little additional code (use
|
|
21
|
+
very simple Python where necessary). """,
|
|
22
|
+
servers=["filesystem", "fetch"],
|
|
23
|
+
)
|
|
24
|
+
# Define worker agents
|
|
25
|
+
@fast.agent(
|
|
26
|
+
"requirements_capture",
|
|
27
|
+
instruction="""
|
|
28
|
+
You help the Human define their requirements for building Agent based systems. Keep questions short and
|
|
29
|
+
simple, collaborate with the agent_expert or other agents in the workflow to refine human interaction.
|
|
30
|
+
Keep requests to the Human simple and minimal. """,
|
|
31
|
+
human_input=True,
|
|
32
|
+
)
|
|
33
|
+
# Define the orchestrator to coordinate the other agents
|
|
34
|
+
@fast.orchestrator(
|
|
35
|
+
name="orchestrator_worker",
|
|
36
|
+
agents=["agent_expert", "requirements_capture"],
|
|
37
|
+
model="sonnet",
|
|
38
|
+
)
|
|
39
|
+
async def main():
|
|
40
|
+
async with fast.run() as agent:
|
|
41
|
+
await agent.agent_expert("""
|
|
42
|
+
- Read this paper: https://www.anthropic.com/research/building-effective-agents" to understand
|
|
43
|
+
the principles of Building Effective Agents.
|
|
44
|
+
- Read and examing the sample agent and workflow definitions in the current directory:
|
|
45
|
+
- chaining.py - simple agent chaining example.
|
|
46
|
+
- parallel.py - parallel agents example.
|
|
47
|
+
- evaluator.py - evaluator optimizer example.
|
|
48
|
+
- orchestrator.py - complex orchestration example.
|
|
49
|
+
- router.py - workflow routing example.
|
|
50
|
+
- Load the 'fastagent.config.yaml' file to see the available and configured MCP Servers.
|
|
51
|
+
When producing the agent/workflow definition, keep to a simple single .py file in the style
|
|
52
|
+
of the examples.
|
|
53
|
+
""")
|
|
54
|
+
|
|
55
|
+
await agent.orchestrator_worker(
|
|
56
|
+
"Write an Agent program that fulfils the Human's needs."
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
if __name__ == "__main__":
|
|
61
|
+
asyncio.run(main())
|