fast-agent-mcp 0.0.8__py3-none-any.whl → 0.0.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fast-agent-mcp might be problematic. Click here for more details.
- {fast_agent_mcp-0.0.8.dist-info → fast_agent_mcp-0.0.11.dist-info}/METADATA +15 -9
- {fast_agent_mcp-0.0.8.dist-info → fast_agent_mcp-0.0.11.dist-info}/RECORD +28 -26
- mcp_agent/app.py +4 -4
- mcp_agent/cli/commands/bootstrap.py +4 -0
- mcp_agent/cli/commands/setup.py +1 -1
- mcp_agent/core/fastagent.py +498 -369
- mcp_agent/event_progress.py +5 -2
- mcp_agent/human_input/handler.py +6 -2
- mcp_agent/logging/rich_progress.py +10 -5
- mcp_agent/mcp/mcp_aggregator.py +2 -1
- mcp_agent/mcp/mcp_connection_manager.py +67 -37
- mcp_agent/resources/examples/data-analysis/analysis.py +1 -1
- mcp_agent/resources/examples/data-analysis/fastagent.config.yaml +2 -0
- mcp_agent/resources/examples/internal/job.py +83 -0
- mcp_agent/resources/examples/workflows/agent_build.py +61 -0
- mcp_agent/resources/examples/workflows/chaining.py +0 -1
- mcp_agent/resources/examples/workflows/human_input.py +0 -1
- mcp_agent/resources/examples/workflows/orchestrator.py +1 -7
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +63 -65
- mcp_agent/workflows/llm/augmented_llm.py +9 -1
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +28 -23
- mcp_agent/workflows/llm/model_factory.py +25 -11
- mcp_agent/workflows/orchestrator/orchestrator.py +106 -100
- mcp_agent/workflows/orchestrator/orchestrator_prompts.py +11 -6
- mcp_agent/workflows/router/router_llm.py +13 -2
- {fast_agent_mcp-0.0.8.dist-info → fast_agent_mcp-0.0.11.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.0.8.dist-info → fast_agent_mcp-0.0.11.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.0.8.dist-info → fast_agent_mcp-0.0.11.dist-info}/licenses/LICENSE +0 -0
mcp_agent/event_progress.py
CHANGED
|
@@ -11,14 +11,16 @@ class ProgressAction(str, Enum):
|
|
|
11
11
|
"""Progress actions available in the system."""
|
|
12
12
|
|
|
13
13
|
STARTING = "Starting"
|
|
14
|
+
LOADED = "Loaded"
|
|
14
15
|
INITIALIZED = "Initialized"
|
|
15
16
|
CHATTING = "Chatting"
|
|
17
|
+
ROUTING = "Routing"
|
|
18
|
+
PLANNING = "Planning"
|
|
16
19
|
READY = "Ready"
|
|
17
20
|
CALLING_TOOL = "Calling Tool"
|
|
18
21
|
FINISHED = "Finished"
|
|
19
22
|
SHUTDOWN = "Shutdown"
|
|
20
23
|
AGGREGATOR_INITIALIZED = "Running"
|
|
21
|
-
ROUTING = "Routing"
|
|
22
24
|
FATAL_ERROR = "Error"
|
|
23
25
|
|
|
24
26
|
|
|
@@ -81,7 +83,8 @@ def convert_log_event(event: Event) -> Optional[ProgressEvent]:
|
|
|
81
83
|
if chat_turn is not None:
|
|
82
84
|
details = f"{model} turn {chat_turn}"
|
|
83
85
|
else:
|
|
84
|
-
|
|
86
|
+
if not target:
|
|
87
|
+
target = event_data.get("target", "unknown")
|
|
85
88
|
|
|
86
89
|
return ProgressEvent(
|
|
87
90
|
ProgressAction(progress_action),
|
mcp_agent/human_input/handler.py
CHANGED
|
@@ -36,7 +36,9 @@ async def console_input_callback(request: HumanInputRequest) -> HumanInputRespon
|
|
|
36
36
|
try:
|
|
37
37
|
loop = asyncio.get_event_loop()
|
|
38
38
|
response = await asyncio.wait_for(
|
|
39
|
-
loop.run_in_executor(
|
|
39
|
+
loop.run_in_executor(
|
|
40
|
+
None, lambda: Prompt.ask("Provide your response ")
|
|
41
|
+
),
|
|
40
42
|
request.timeout_seconds,
|
|
41
43
|
)
|
|
42
44
|
except asyncio.TimeoutError:
|
|
@@ -44,6 +46,8 @@ async def console_input_callback(request: HumanInputRequest) -> HumanInputRespon
|
|
|
44
46
|
raise TimeoutError("No response received within timeout period")
|
|
45
47
|
else:
|
|
46
48
|
loop = asyncio.get_event_loop()
|
|
47
|
-
response = await loop.run_in_executor(
|
|
49
|
+
response = await loop.run_in_executor(
|
|
50
|
+
None, lambda: Prompt.ask("Provide your response ")
|
|
51
|
+
)
|
|
48
52
|
|
|
49
53
|
return HumanInputResponse(request_id=request.request_id, response=response.strip())
|
|
@@ -68,10 +68,12 @@ class RichProgressDisplay:
|
|
|
68
68
|
"""Map actions to appropriate styles."""
|
|
69
69
|
return {
|
|
70
70
|
ProgressAction.STARTING: "bold yellow",
|
|
71
|
+
ProgressAction.LOADED: "dim green",
|
|
71
72
|
ProgressAction.INITIALIZED: "dim green",
|
|
72
73
|
ProgressAction.CHATTING: "bold blue",
|
|
73
|
-
ProgressAction.READY: "dim green",
|
|
74
74
|
ProgressAction.ROUTING: "bold blue",
|
|
75
|
+
ProgressAction.PLANNING: "bold blue",
|
|
76
|
+
ProgressAction.READY: "dim green",
|
|
75
77
|
ProgressAction.CALLING_TOOL: "bold magenta",
|
|
76
78
|
ProgressAction.FINISHED: "black on green",
|
|
77
79
|
ProgressAction.SHUTDOWN: "black on red",
|
|
@@ -82,29 +84,32 @@ class RichProgressDisplay:
|
|
|
82
84
|
def update(self, event: ProgressEvent) -> None:
|
|
83
85
|
"""Update the progress display with a new event."""
|
|
84
86
|
task_name = event.agent_name or "default"
|
|
87
|
+
|
|
85
88
|
# Create new task if needed
|
|
86
89
|
if task_name not in self._taskmap:
|
|
87
90
|
task_id = self._progress.add_task(
|
|
88
91
|
"",
|
|
89
92
|
total=None,
|
|
90
|
-
target=f"{event.target}",
|
|
91
|
-
details=f"{event.agent_name}",
|
|
93
|
+
target=f"{event.target or task_name}", # Use task_name as fallback for target
|
|
94
|
+
details=f"{event.agent_name or ''}",
|
|
92
95
|
)
|
|
93
96
|
self._taskmap[task_name] = task_id
|
|
94
97
|
else:
|
|
95
98
|
task_id = self._taskmap[task_name]
|
|
96
99
|
|
|
100
|
+
# Ensure no None values in the update
|
|
97
101
|
self._progress.update(
|
|
98
102
|
task_id,
|
|
99
103
|
description=f"[{self._get_action_style(event.action)}]{event.action.value:<15}",
|
|
100
|
-
target=event.target,
|
|
101
|
-
details=event.details
|
|
104
|
+
target=event.target or task_name, # Use task_name as fallback for target
|
|
105
|
+
details=event.details or "",
|
|
102
106
|
task_name=task_name,
|
|
103
107
|
)
|
|
104
108
|
|
|
105
109
|
if (
|
|
106
110
|
event.action == ProgressAction.INITIALIZED
|
|
107
111
|
or event.action == ProgressAction.READY
|
|
112
|
+
or event.action == ProgressAction.LOADED
|
|
108
113
|
):
|
|
109
114
|
self._progress.update(task_id, completed=100, total=100)
|
|
110
115
|
elif event.action == ProgressAction.FINISHED:
|
mcp_agent/mcp/mcp_aggregator.py
CHANGED
|
@@ -81,13 +81,14 @@ class MCPAggregator(ContextDependent):
|
|
|
81
81
|
def __init__(
|
|
82
82
|
self,
|
|
83
83
|
server_names: List[str],
|
|
84
|
-
connection_persistence: bool =
|
|
84
|
+
connection_persistence: bool = True, # Default to True for better stability
|
|
85
85
|
context: Optional["Context"] = None,
|
|
86
86
|
name: str = None,
|
|
87
87
|
**kwargs,
|
|
88
88
|
):
|
|
89
89
|
"""
|
|
90
90
|
:param server_names: A list of server names to connect to.
|
|
91
|
+
:param connection_persistence: Whether to maintain persistent connections to servers (default: True).
|
|
91
92
|
Note: The server names must be resolvable by the gen_client function, and specified in the server registry.
|
|
92
93
|
"""
|
|
93
94
|
super().__init__(
|
|
@@ -75,6 +75,19 @@ class ServerConnection:
|
|
|
75
75
|
|
|
76
76
|
# Signal we want to shut down
|
|
77
77
|
self._shutdown_event = Event()
|
|
78
|
+
|
|
79
|
+
# Track error state
|
|
80
|
+
self._error_occurred = False
|
|
81
|
+
self._error_message = None
|
|
82
|
+
|
|
83
|
+
def is_healthy(self) -> bool:
|
|
84
|
+
"""Check if the server connection is healthy and ready to use."""
|
|
85
|
+
return self.session is not None and not self._error_occurred
|
|
86
|
+
|
|
87
|
+
def reset_error_state(self) -> None:
|
|
88
|
+
"""Reset the error state, allowing reconnection attempts."""
|
|
89
|
+
self._error_occurred = False
|
|
90
|
+
self._error_message = None
|
|
78
91
|
|
|
79
92
|
def request_shutdown(self) -> None:
|
|
80
93
|
"""
|
|
@@ -164,10 +177,12 @@ async def _server_lifecycle_task(server_conn: ServerConnection) -> None:
|
|
|
164
177
|
"server_name": server_name,
|
|
165
178
|
},
|
|
166
179
|
)
|
|
180
|
+
server_conn._error_occurred = True
|
|
181
|
+
server_conn._error_message = str(exc)
|
|
167
182
|
# If there's an error, we should also set the event so that
|
|
168
183
|
# 'get_server' won't hang
|
|
169
184
|
server_conn._initialized_event.set()
|
|
170
|
-
raise
|
|
185
|
+
# No raise - allow graceful exit
|
|
171
186
|
|
|
172
187
|
|
|
173
188
|
class MCPConnectionManager(ContextDependent):
|
|
@@ -183,38 +198,34 @@ class MCPConnectionManager(ContextDependent):
|
|
|
183
198
|
self.server_registry = server_registry
|
|
184
199
|
self.running_servers: Dict[str, ServerConnection] = {}
|
|
185
200
|
self._lock = Lock()
|
|
201
|
+
# Manage our own task group - independent of task context
|
|
202
|
+
self._task_group = None
|
|
203
|
+
self._task_group_active = False
|
|
186
204
|
|
|
187
205
|
async def __aenter__(self):
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
#
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
await self.context._connection_task_group.__aenter__()
|
|
195
|
-
|
|
196
|
-
self._tg = self.context._connection_task_group
|
|
206
|
+
# Create a task group that isn't tied to a specific task
|
|
207
|
+
self._task_group = create_task_group()
|
|
208
|
+
# Enter the task group context
|
|
209
|
+
await self._task_group.__aenter__()
|
|
210
|
+
self._task_group_active = True
|
|
211
|
+
self._tg = self._task_group
|
|
197
212
|
return self
|
|
198
213
|
|
|
199
214
|
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
200
215
|
"""Ensure clean shutdown of all connections before exiting."""
|
|
201
|
-
current_task = asyncio.current_task()
|
|
202
|
-
|
|
203
216
|
try:
|
|
204
217
|
# First request all servers to shutdown
|
|
205
218
|
await self.disconnect_all()
|
|
206
|
-
|
|
207
|
-
#
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
delattr(self.context, "_connection_task_group")
|
|
217
|
-
delattr(self.context, "_connection_task_group_context")
|
|
219
|
+
|
|
220
|
+
# Add a small delay to allow for clean shutdown
|
|
221
|
+
await asyncio.sleep(0.5)
|
|
222
|
+
|
|
223
|
+
# Then close the task group if it's active
|
|
224
|
+
if self._task_group_active:
|
|
225
|
+
await self._task_group.__aexit__(exc_type, exc_val, exc_tb)
|
|
226
|
+
self._task_group_active = False
|
|
227
|
+
self._task_group = None
|
|
228
|
+
self._tg = None
|
|
218
229
|
except Exception as e:
|
|
219
230
|
logger.error(f"Error during connection manager shutdown: {e}")
|
|
220
231
|
|
|
@@ -231,10 +242,13 @@ class MCPConnectionManager(ContextDependent):
|
|
|
231
242
|
Connect to a server and return a RunningServer instance that will persist
|
|
232
243
|
until explicitly disconnected.
|
|
233
244
|
"""
|
|
234
|
-
if
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
)
|
|
245
|
+
# Create task group if it doesn't exist yet - make this method more resilient
|
|
246
|
+
if not self._task_group_active:
|
|
247
|
+
self._task_group = create_task_group()
|
|
248
|
+
await self._task_group.__aenter__()
|
|
249
|
+
self._task_group_active = True
|
|
250
|
+
self._tg = self._task_group
|
|
251
|
+
logger.info(f"Auto-created task group for server: {server_name}")
|
|
238
252
|
|
|
239
253
|
config = self.server_registry.registry.get(server_name)
|
|
240
254
|
if not config:
|
|
@@ -286,11 +300,17 @@ class MCPConnectionManager(ContextDependent):
|
|
|
286
300
|
"""
|
|
287
301
|
Get a running server instance, launching it if needed.
|
|
288
302
|
"""
|
|
289
|
-
# Get the server connection if it's already running
|
|
303
|
+
# Get the server connection if it's already running and healthy
|
|
290
304
|
async with self._lock:
|
|
291
305
|
server_conn = self.running_servers.get(server_name)
|
|
292
|
-
if server_conn:
|
|
306
|
+
if server_conn and server_conn.is_healthy():
|
|
293
307
|
return server_conn
|
|
308
|
+
|
|
309
|
+
# If server exists but isn't healthy, remove it so we can create a new one
|
|
310
|
+
if server_conn:
|
|
311
|
+
logger.info(f"{server_name}: Server exists but is unhealthy, recreating...")
|
|
312
|
+
self.running_servers.pop(server_name)
|
|
313
|
+
server_conn.request_shutdown()
|
|
294
314
|
|
|
295
315
|
# Launch the connection
|
|
296
316
|
server_conn = await self.launch_server(
|
|
@@ -302,11 +322,13 @@ class MCPConnectionManager(ContextDependent):
|
|
|
302
322
|
# Wait until it's fully initialized, or an error occurs
|
|
303
323
|
await server_conn.wait_for_initialized()
|
|
304
324
|
|
|
305
|
-
#
|
|
306
|
-
if not server_conn
|
|
325
|
+
# Check if the server is healthy after initialization
|
|
326
|
+
if not server_conn.is_healthy():
|
|
327
|
+
error_msg = server_conn._error_message or "Unknown error"
|
|
307
328
|
raise ServerInitializationError(
|
|
308
|
-
f"{server_name}: Failed to initialize server
|
|
329
|
+
f"{server_name}: Failed to initialize server: {error_msg}"
|
|
309
330
|
)
|
|
331
|
+
|
|
310
332
|
return server_conn
|
|
311
333
|
|
|
312
334
|
async def disconnect_server(self, server_name: str) -> None:
|
|
@@ -329,11 +351,19 @@ class MCPConnectionManager(ContextDependent):
|
|
|
329
351
|
|
|
330
352
|
async def disconnect_all(self) -> None:
|
|
331
353
|
"""Disconnect all servers that are running under this connection manager."""
|
|
354
|
+
# Get a copy of servers to shutdown
|
|
355
|
+
servers_to_shutdown = []
|
|
356
|
+
|
|
332
357
|
async with self._lock:
|
|
333
358
|
if not self.running_servers:
|
|
334
359
|
return
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
360
|
+
|
|
361
|
+
# Make a copy of the servers to shut down
|
|
362
|
+
servers_to_shutdown = list(self.running_servers.items())
|
|
363
|
+
# Clear the dict immediately to prevent any new access
|
|
339
364
|
self.running_servers.clear()
|
|
365
|
+
|
|
366
|
+
# Release the lock before waiting for servers to shut down
|
|
367
|
+
for name, conn in servers_to_shutdown:
|
|
368
|
+
logger.info(f"{name}: Requesting shutdown...")
|
|
369
|
+
conn.request_shutdown()
|
|
@@ -7,7 +7,7 @@ fast = FastAgent("Data Analysis (Roots)")
|
|
|
7
7
|
|
|
8
8
|
|
|
9
9
|
@fast.agent(
|
|
10
|
-
name="
|
|
10
|
+
name="data_analysis",
|
|
11
11
|
instruction="""
|
|
12
12
|
You have access to a Python 3.12 interpreter and you can use this to analyse and process data.
|
|
13
13
|
Common analysis packages such as Pandas, Seaborn and Matplotlib are already installed.
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
"""
|
|
2
|
+
PMO Job Description Generator Agent
|
|
3
|
+
Purpose: Generate comprehensive PMO job descriptions using a multi-stage approach
|
|
4
|
+
for clarity, consistency and quality control
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import asyncio
|
|
8
|
+
from mcp_agent.core.fastagent import FastAgent
|
|
9
|
+
|
|
10
|
+
# Create the application
|
|
11
|
+
fast = FastAgent("PMO Job Description Generator")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@fast.agent(
|
|
15
|
+
name="content_generator",
|
|
16
|
+
instruction="""You are a PMO job description expert. Generate job descriptions for PMO roles
|
|
17
|
+
following these guidelines:
|
|
18
|
+
- Focus on modern lean/agile and product-based approaches
|
|
19
|
+
- Emphasize practical experience and demonstrated results over formal requirements
|
|
20
|
+
- Ensure clear role differentiation with minimal overlap
|
|
21
|
+
- Format output in Markdown
|
|
22
|
+
- Context: Telecommunications industry in open organization valuing practical experience
|
|
23
|
+
|
|
24
|
+
Structure each job description with:
|
|
25
|
+
1. Role Title
|
|
26
|
+
2. Position Summary
|
|
27
|
+
3. Key Responsibilities
|
|
28
|
+
4. Required Experience
|
|
29
|
+
5. Desired Capabilities
|
|
30
|
+
""",
|
|
31
|
+
model="anthropic.claude-3-5-haiku-latest",
|
|
32
|
+
)
|
|
33
|
+
@fast.agent(
|
|
34
|
+
name="consistency_checker",
|
|
35
|
+
instruction="""Review PMO job descriptions for:
|
|
36
|
+
1. Alignment with lean/agile principles
|
|
37
|
+
2. Clear role differentiation
|
|
38
|
+
3. Progressive responsibility levels
|
|
39
|
+
4. Consistent formatting and structure
|
|
40
|
+
5. Telecommunications industry relevance
|
|
41
|
+
6. Emphasis on practical experience over formal requirements
|
|
42
|
+
|
|
43
|
+
Provide specific feedback for improvements.""",
|
|
44
|
+
model="gpt-4o",
|
|
45
|
+
)
|
|
46
|
+
@fast.agent(
|
|
47
|
+
name="file_handler",
|
|
48
|
+
instruction="""Save the finalized job descriptions as individual Markdown files.
|
|
49
|
+
Use consistent naming like 'pmo_director.md', 'pmo_manager.md' etc.""",
|
|
50
|
+
servers=["filesystem"],
|
|
51
|
+
use_history=False,
|
|
52
|
+
)
|
|
53
|
+
@fast.evaluator_optimizer(
|
|
54
|
+
name="job_description_writer",
|
|
55
|
+
optimizer="content_generator",
|
|
56
|
+
evaluator="consistency_checker",
|
|
57
|
+
min_rating="EXCELLENT",
|
|
58
|
+
max_refinements=2,
|
|
59
|
+
)
|
|
60
|
+
async def main():
|
|
61
|
+
async with fast.run() as agent:
|
|
62
|
+
roles = [
|
|
63
|
+
"PMO Director",
|
|
64
|
+
"Portfolio Manager",
|
|
65
|
+
"Senior Program Manager",
|
|
66
|
+
"Project Manager",
|
|
67
|
+
"PMO Analyst",
|
|
68
|
+
"Project Coordinator",
|
|
69
|
+
]
|
|
70
|
+
|
|
71
|
+
# Pre-initialize the file_handler to establish a persistent connection
|
|
72
|
+
await agent.file_handler("Test connection to filesystem")
|
|
73
|
+
|
|
74
|
+
for role in roles:
|
|
75
|
+
# Generate and refine job description
|
|
76
|
+
description = await agent.job_description_writer(
|
|
77
|
+
f"Create job description for {role} role"
|
|
78
|
+
)
|
|
79
|
+
await agent.file_handler(f"Save this job description: {description}")
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
if __name__ == "__main__":
|
|
83
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This demonstrates creating multiple agents and an orchestrator to coordinate them.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
from mcp_agent.core.fastagent import FastAgent
|
|
7
|
+
|
|
8
|
+
# Create the application
|
|
9
|
+
fast = FastAgent("Agent Builder")
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@fast.agent(
|
|
13
|
+
"agent_expert",
|
|
14
|
+
instruction="""
|
|
15
|
+
You design agent workflows, using the practices from 'Building Effective Agents'. You provide concise
|
|
16
|
+
specific guidance on design and composition. Prefer simple solutions, and don't nest workflows more
|
|
17
|
+
than one level deep. Your ultimate goal will be to produce a single '.py' agent in the style
|
|
18
|
+
shown to you that fulfils the Human's needs.
|
|
19
|
+
Keep the application simple, define agents with appropriate MCP Servers, Tools and the Human Input Tool.
|
|
20
|
+
The style of the program should be like the examples you have been showm, very little additional code (use
|
|
21
|
+
very simple Python where necessary). """,
|
|
22
|
+
servers=["filesystem", "fetch"],
|
|
23
|
+
)
|
|
24
|
+
# Define worker agents
|
|
25
|
+
@fast.agent(
|
|
26
|
+
"requirements_capture",
|
|
27
|
+
instruction="""
|
|
28
|
+
You help the Human define their requirements for building Agent based systems. Keep questions short and
|
|
29
|
+
simple, collaborate with the agent_expert or other agents in the workflow to refine human interaction.
|
|
30
|
+
Keep requests to the Human simple and minimal. """,
|
|
31
|
+
human_input=True,
|
|
32
|
+
)
|
|
33
|
+
# Define the orchestrator to coordinate the other agents
|
|
34
|
+
@fast.orchestrator(
|
|
35
|
+
name="orchestrator_worker",
|
|
36
|
+
agents=["agent_expert", "requirements_capture"],
|
|
37
|
+
model="sonnet",
|
|
38
|
+
)
|
|
39
|
+
async def main():
|
|
40
|
+
async with fast.run() as agent:
|
|
41
|
+
await agent.agent_expert("""
|
|
42
|
+
- Read this paper: https://www.anthropic.com/research/building-effective-agents" to understand
|
|
43
|
+
the principles of Building Effective Agents.
|
|
44
|
+
- Read and examing the sample agent and workflow definitions in the current directory:
|
|
45
|
+
- chaining.py - simple agent chaining example.
|
|
46
|
+
- parallel.py - parallel agents example.
|
|
47
|
+
- evaluator.py - evaluator optimizer example.
|
|
48
|
+
- orchestrator.py - complex orchestration example.
|
|
49
|
+
- router.py - workflow routing example.
|
|
50
|
+
- Load the 'fastagent.config.yaml' file to see the available and configured MCP Servers.
|
|
51
|
+
When producing the agent/workflow definition, keep to a simple single .py file in the style
|
|
52
|
+
of the examples.
|
|
53
|
+
""")
|
|
54
|
+
|
|
55
|
+
await agent.orchestrator_worker(
|
|
56
|
+
"Write an Agent program that fulfils the Human's needs."
|
|
57
|
+
)
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
if __name__ == "__main__":
|
|
61
|
+
asyncio.run(main())
|
|
@@ -24,7 +24,7 @@ fast = FastAgent("Orchestrator-Workers")
|
|
|
24
24
|
the closest match to a user's request, make the appropriate tool calls,
|
|
25
25
|
and return the URI and CONTENTS of the closest match.""",
|
|
26
26
|
servers=["fetch", "filesystem"],
|
|
27
|
-
model="gpt-4o
|
|
27
|
+
model="gpt-4o",
|
|
28
28
|
)
|
|
29
29
|
@fast.agent(
|
|
30
30
|
name="writer",
|
|
@@ -44,12 +44,6 @@ fast = FastAgent("Orchestrator-Workers")
|
|
|
44
44
|
# Define the orchestrator to coordinate the other agents
|
|
45
45
|
@fast.orchestrator(
|
|
46
46
|
name="orchestrate",
|
|
47
|
-
instruction="""Load the student's short story from short_story.md,
|
|
48
|
-
and generate a report with feedback across proofreading,
|
|
49
|
-
factuality/logical consistency and style adherence. Use the style rules from
|
|
50
|
-
https://apastyle.apa.org/learn/quick-guide-on-formatting and
|
|
51
|
-
https://apastyle.apa.org/learn/quick-guide-on-references.
|
|
52
|
-
Write the graded report to graded_report.md in the same directory as short_story.md""",
|
|
53
47
|
agents=["finder", "writer", "proofreader"],
|
|
54
48
|
model="sonnet",
|
|
55
49
|
)
|
|
@@ -169,98 +169,96 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
|
|
|
169
169
|
best_response = None
|
|
170
170
|
best_rating = QualityRating.POOR
|
|
171
171
|
self.refinement_history = []
|
|
172
|
-
|
|
173
|
-
#
|
|
172
|
+
|
|
173
|
+
# Use a single AsyncExitStack for the entire method to maintain connections
|
|
174
174
|
async with contextlib.AsyncExitStack() as stack:
|
|
175
|
+
# Enter all agent contexts once at the beginning
|
|
175
176
|
if isinstance(self.optimizer, Agent):
|
|
176
177
|
await stack.enter_async_context(self.optimizer)
|
|
178
|
+
if isinstance(self.evaluator, Agent):
|
|
179
|
+
await stack.enter_async_context(self.evaluator)
|
|
180
|
+
|
|
181
|
+
# Initial generation
|
|
177
182
|
response = await self.optimizer_llm.generate(
|
|
178
183
|
message=message,
|
|
179
184
|
request_params=request_params,
|
|
180
185
|
)
|
|
181
186
|
|
|
182
|
-
|
|
187
|
+
best_response = response
|
|
183
188
|
|
|
184
|
-
|
|
185
|
-
|
|
189
|
+
while refinement_count < self.max_refinements:
|
|
190
|
+
logger.debug("Optimizer result:", data=response)
|
|
186
191
|
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
evaluation_result = None
|
|
197
|
-
async with contextlib.AsyncExitStack() as stack:
|
|
198
|
-
if isinstance(self.evaluator, Agent):
|
|
199
|
-
await stack.enter_async_context(self.evaluator)
|
|
192
|
+
# Evaluate current response
|
|
193
|
+
eval_prompt = self._build_eval_prompt(
|
|
194
|
+
original_request=str(message),
|
|
195
|
+
current_response="\n".join(str(r) for r in response)
|
|
196
|
+
if isinstance(response, list)
|
|
197
|
+
else str(response),
|
|
198
|
+
iteration=refinement_count,
|
|
199
|
+
)
|
|
200
200
|
|
|
201
|
+
# No need for nested AsyncExitStack here - using the outer one
|
|
201
202
|
evaluation_result = await self.evaluator_llm.generate_structured(
|
|
202
203
|
message=eval_prompt,
|
|
203
204
|
response_model=EvaluationResult,
|
|
204
205
|
request_params=request_params,
|
|
205
206
|
)
|
|
206
207
|
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
)
|
|
215
|
-
|
|
216
|
-
logger.debug("Evaluator result:", data=evaluation_result)
|
|
217
|
-
|
|
218
|
-
# Track best response (using enum ordering)
|
|
219
|
-
if evaluation_result.rating.value > best_rating.value:
|
|
220
|
-
best_rating = evaluation_result.rating
|
|
221
|
-
best_response = response
|
|
222
|
-
logger.debug(
|
|
223
|
-
"New best response:",
|
|
224
|
-
data={"rating": best_rating, "response": best_response},
|
|
208
|
+
# Track iteration
|
|
209
|
+
self.refinement_history.append(
|
|
210
|
+
{
|
|
211
|
+
"attempt": refinement_count + 1,
|
|
212
|
+
"response": response,
|
|
213
|
+
"evaluation_result": evaluation_result,
|
|
214
|
+
}
|
|
225
215
|
)
|
|
226
216
|
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
"rating":
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
217
|
+
logger.debug("Evaluator result:", data=evaluation_result)
|
|
218
|
+
|
|
219
|
+
# Track best response (using enum ordering)
|
|
220
|
+
if evaluation_result.rating.value > best_rating.value:
|
|
221
|
+
best_rating = evaluation_result.rating
|
|
222
|
+
best_response = response
|
|
223
|
+
logger.debug(
|
|
224
|
+
"New best response:",
|
|
225
|
+
data={"rating": best_rating, "response": best_response},
|
|
226
|
+
)
|
|
227
|
+
|
|
228
|
+
# Check if we've reached acceptable quality
|
|
229
|
+
if (
|
|
230
|
+
evaluation_result.rating.value >= self.min_rating.value
|
|
231
|
+
or not evaluation_result.needs_improvement
|
|
232
|
+
):
|
|
233
|
+
logger.debug(
|
|
234
|
+
f"Acceptable quality {evaluation_result.rating.value} reached",
|
|
235
|
+
data={
|
|
236
|
+
"rating": evaluation_result.rating.value,
|
|
237
|
+
"needs_improvement": evaluation_result.needs_improvement,
|
|
238
|
+
"min_rating": self.min_rating.value,
|
|
239
|
+
},
|
|
240
|
+
)
|
|
241
|
+
break
|
|
242
|
+
|
|
243
|
+
# Generate refined response
|
|
244
|
+
refinement_prompt = self._build_refinement_prompt(
|
|
245
|
+
original_request=str(message),
|
|
246
|
+
current_response="\n".join(str(r) for r in response)
|
|
247
|
+
if isinstance(response, list)
|
|
248
|
+
else str(response),
|
|
249
|
+
feedback=evaluation_result,
|
|
250
|
+
iteration=refinement_count,
|
|
239
251
|
)
|
|
240
|
-
break
|
|
241
|
-
|
|
242
|
-
# Generate refined response
|
|
243
|
-
refinement_prompt = self._build_refinement_prompt(
|
|
244
|
-
original_request=str(message),
|
|
245
|
-
current_response="\n".join(str(r) for r in response)
|
|
246
|
-
if isinstance(response, list)
|
|
247
|
-
else str(response),
|
|
248
|
-
feedback=evaluation_result,
|
|
249
|
-
iteration=refinement_count,
|
|
250
|
-
)
|
|
251
|
-
|
|
252
|
-
async with contextlib.AsyncExitStack() as stack:
|
|
253
|
-
if isinstance(self.optimizer, Agent):
|
|
254
|
-
await stack.enter_async_context(self.optimizer)
|
|
255
252
|
|
|
253
|
+
# No nested AsyncExitStack here either
|
|
256
254
|
response = await self.optimizer_llm.generate(
|
|
257
255
|
message=refinement_prompt,
|
|
258
256
|
request_params=request_params,
|
|
259
257
|
)
|
|
260
258
|
|
|
261
|
-
|
|
259
|
+
refinement_count += 1
|
|
262
260
|
|
|
263
|
-
|
|
261
|
+
return best_response
|
|
264
262
|
|
|
265
263
|
async def generate_str(
|
|
266
264
|
self,
|