fast-agent-mcp 0.1.7__py3-none-any.whl → 0.1.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.9.dist-info}/METADATA +37 -9
- {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.9.dist-info}/RECORD +53 -31
- {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.9.dist-info}/entry_points.txt +1 -0
- mcp_agent/agents/agent.py +5 -11
- mcp_agent/core/agent_app.py +125 -44
- mcp_agent/core/decorators.py +3 -2
- mcp_agent/core/enhanced_prompt.py +106 -20
- mcp_agent/core/factory.py +28 -66
- mcp_agent/core/fastagent.py +13 -3
- mcp_agent/core/mcp_content.py +222 -0
- mcp_agent/core/prompt.py +132 -0
- mcp_agent/core/proxies.py +41 -36
- mcp_agent/human_input/handler.py +4 -1
- mcp_agent/logging/transport.py +30 -3
- mcp_agent/mcp/mcp_aggregator.py +27 -22
- mcp_agent/mcp/mime_utils.py +69 -0
- mcp_agent/mcp/prompt_message_multipart.py +64 -0
- mcp_agent/mcp/prompt_serialization.py +447 -0
- mcp_agent/mcp/prompts/__init__.py +0 -0
- mcp_agent/mcp/prompts/__main__.py +10 -0
- mcp_agent/mcp/prompts/prompt_server.py +508 -0
- mcp_agent/mcp/prompts/prompt_template.py +469 -0
- mcp_agent/mcp/resource_utils.py +203 -0
- mcp_agent/resources/examples/internal/agent.py +1 -1
- mcp_agent/resources/examples/internal/fastagent.config.yaml +2 -2
- mcp_agent/resources/examples/internal/sizer.py +0 -5
- mcp_agent/resources/examples/prompting/__init__.py +3 -0
- mcp_agent/resources/examples/prompting/agent.py +23 -0
- mcp_agent/resources/examples/prompting/fastagent.config.yaml +44 -0
- mcp_agent/resources/examples/prompting/image_server.py +56 -0
- mcp_agent/resources/examples/researcher/researcher-eval.py +1 -1
- mcp_agent/resources/examples/workflows/orchestrator.py +5 -4
- mcp_agent/resources/examples/workflows/router.py +0 -2
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py +57 -87
- mcp_agent/workflows/llm/anthropic_utils.py +101 -0
- mcp_agent/workflows/llm/augmented_llm.py +155 -141
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +135 -281
- mcp_agent/workflows/llm/augmented_llm_openai.py +175 -337
- mcp_agent/workflows/llm/augmented_llm_passthrough.py +104 -0
- mcp_agent/workflows/llm/augmented_llm_playback.py +109 -0
- mcp_agent/workflows/llm/model_factory.py +25 -6
- mcp_agent/workflows/llm/openai_utils.py +65 -0
- mcp_agent/workflows/llm/providers/__init__.py +8 -0
- mcp_agent/workflows/llm/providers/multipart_converter_anthropic.py +348 -0
- mcp_agent/workflows/llm/providers/multipart_converter_openai.py +426 -0
- mcp_agent/workflows/llm/providers/openai_multipart.py +197 -0
- mcp_agent/workflows/llm/providers/sampling_converter_anthropic.py +258 -0
- mcp_agent/workflows/llm/providers/sampling_converter_openai.py +229 -0
- mcp_agent/workflows/llm/sampling_format_converter.py +39 -0
- mcp_agent/workflows/orchestrator/orchestrator.py +62 -153
- mcp_agent/workflows/router/router_llm.py +18 -24
- mcp_agent/core/server_validation.py +0 -44
- mcp_agent/core/simulator_registry.py +0 -22
- mcp_agent/workflows/llm/enhanced_passthrough.py +0 -70
- {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.9.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.7.dist-info → fast_agent_mcp-0.1.9.dist-info}/licenses/LICENSE +0 -0
@@ -6,8 +6,8 @@ from typing import (
|
|
6
6
|
List,
|
7
7
|
Literal,
|
8
8
|
Optional,
|
9
|
-
Type,
|
10
9
|
TYPE_CHECKING,
|
10
|
+
Type,
|
11
11
|
)
|
12
12
|
|
13
13
|
from mcp_agent.agents.agent import Agent
|
@@ -132,21 +132,7 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
132
132
|
# Store agents by name - COMPLETE REWRITE OF AGENT STORAGE
|
133
133
|
self.agents = {}
|
134
134
|
for agent in available_agents:
|
135
|
-
# Fix: Remove all special handling of agent names and store them exactly as they are
|
136
135
|
agent_name = agent.name
|
137
|
-
|
138
|
-
# Verify if the name is actually "None" (string) or None (NoneType)
|
139
|
-
if agent_name == "None":
|
140
|
-
# Try to get a better name from config if available
|
141
|
-
if hasattr(agent, "config") and agent.config and agent.config.name:
|
142
|
-
agent_name = agent.config.name
|
143
|
-
elif agent_name is None:
|
144
|
-
# Try to get a better name from config if available
|
145
|
-
if hasattr(agent, "config") and agent.config and agent.config.name:
|
146
|
-
agent_name = agent.config.name
|
147
|
-
else:
|
148
|
-
agent_name = f"unnamed_agent_{len(self.agents)}"
|
149
|
-
|
150
136
|
self.logger.info(f"Adding agent '{agent_name}' to orchestrator")
|
151
137
|
self.agents[agent_name] = agent
|
152
138
|
|
@@ -169,7 +155,7 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
169
155
|
) -> str:
|
170
156
|
"""Request an LLM generation and return the string representation of the result"""
|
171
157
|
params = self.get_request_params(request_params)
|
172
|
-
|
158
|
+
|
173
159
|
result = await self.generate(
|
174
160
|
message=message,
|
175
161
|
request_params=params,
|
@@ -183,30 +169,7 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
183
169
|
response_model: Type[ModelT],
|
184
170
|
request_params: RequestParams | None = None,
|
185
171
|
) -> ModelT:
|
186
|
-
|
187
|
-
import json
|
188
|
-
from pydantic import ValidationError
|
189
|
-
|
190
|
-
params = self.get_request_params(request_params)
|
191
|
-
result_str = await self.generate_str(message=message, request_params=params)
|
192
|
-
|
193
|
-
try:
|
194
|
-
# Directly parse JSON and create model instance
|
195
|
-
parsed_data = json.loads(result_str)
|
196
|
-
return response_model(**parsed_data)
|
197
|
-
except (json.JSONDecodeError, ValidationError) as e:
|
198
|
-
# Log the error and fall back to the original method if direct parsing fails
|
199
|
-
self.logger.error(
|
200
|
-
f"Direct JSON parsing failed: {str(e)}. Falling back to standard method."
|
201
|
-
)
|
202
|
-
self.logger.debug(f"Failed JSON content: {result_str}")
|
203
|
-
|
204
|
-
# Use AugmentedLLM's structured output handling as fallback
|
205
|
-
return await super().generate_structured(
|
206
|
-
message=result_str,
|
207
|
-
response_model=response_model,
|
208
|
-
request_params=params,
|
209
|
-
)
|
172
|
+
return None
|
210
173
|
|
211
174
|
async def execute(
|
212
175
|
self, objective: str, request_params: RequestParams | None = None
|
@@ -299,9 +262,11 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
299
262
|
plan_result.add_step_result(step_result)
|
300
263
|
total_steps_executed += 1
|
301
264
|
|
302
|
-
# Check
|
303
|
-
if
|
304
|
-
plan_result
|
265
|
+
# Check if we need to break from the main loop due to hitting max_steps
|
266
|
+
if (
|
267
|
+
hasattr(plan_result, "max_steps_reached")
|
268
|
+
and plan_result.max_steps_reached
|
269
|
+
):
|
305
270
|
break
|
306
271
|
|
307
272
|
logger.debug(
|
@@ -317,21 +282,38 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
317
282
|
|
318
283
|
iterations += 1
|
319
284
|
|
320
|
-
# If we
|
321
|
-
|
322
|
-
|
323
|
-
)
|
285
|
+
# If we reach here, either:
|
286
|
+
# 1. We hit iteration limit without completing
|
287
|
+
# 2. We hit max_steps limit without completing
|
288
|
+
# 3. We detected diminishing returns (plan with 0-1 steps after multiple iterations)
|
324
289
|
|
325
|
-
#
|
326
|
-
|
290
|
+
# Check if we hit iteration limits without completing
|
291
|
+
if iterations >= params.max_iterations and not plan_result.is_complete:
|
292
|
+
self.logger.warning(
|
293
|
+
f"Failed to complete in {params.max_iterations} iterations."
|
294
|
+
)
|
295
|
+
# Mark that we hit the iteration limit
|
296
|
+
plan_result.max_iterations_reached = True
|
327
297
|
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
298
|
+
# Use the incomplete template when we've hit iteration limits
|
299
|
+
synthesis_prompt = SYNTHESIZE_INCOMPLETE_PLAN_TEMPLATE.format(
|
300
|
+
plan_result=format_plan_result(plan_result),
|
301
|
+
max_iterations=params.max_iterations,
|
302
|
+
)
|
303
|
+
else:
|
304
|
+
# Either plan is complete or we had diminishing returns (which we mark as complete)
|
305
|
+
if not plan_result.is_complete:
|
306
|
+
self.logger.info(
|
307
|
+
"Plan terminated due to diminishing returns, marking as complete"
|
308
|
+
)
|
309
|
+
plan_result.is_complete = True
|
310
|
+
|
311
|
+
# Use standard template for complete plans
|
312
|
+
synthesis_prompt = SYNTHESIZE_PLAN_PROMPT_TEMPLATE.format(
|
313
|
+
plan_result=format_plan_result(plan_result)
|
314
|
+
)
|
333
315
|
|
334
|
-
# Generate
|
316
|
+
# Generate the final synthesis with the appropriate template
|
335
317
|
plan_result.result = await self.planner.generate_str(
|
336
318
|
message=synthesis_prompt,
|
337
319
|
request_params=params.model_copy(update={"max_iterations": 1}),
|
@@ -359,8 +341,6 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
359
341
|
# Make sure we're using a valid agent name
|
360
342
|
agent = self.agents.get(task.agent)
|
361
343
|
if not agent:
|
362
|
-
# Log a more prominent error - this is a serious problem that shouldn't happen
|
363
|
-
# with the improved prompt
|
364
344
|
self.logger.error(
|
365
345
|
f"AGENT VALIDATION ERROR: No agent found matching '{task.agent}'. Available agents: {list(self.agents.keys())}"
|
366
346
|
)
|
@@ -431,18 +411,10 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
431
411
|
request_params: RequestParams | None = None,
|
432
412
|
) -> Plan:
|
433
413
|
"""Generate full plan considering previous results"""
|
434
|
-
import json
|
435
|
-
from pydantic import ValidationError
|
436
|
-
from mcp_agent.workflows.orchestrator.orchestrator_models import (
|
437
|
-
Plan,
|
438
|
-
Step,
|
439
|
-
AgentTask,
|
440
|
-
)
|
441
414
|
|
442
415
|
params = self.get_request_params(request_params)
|
443
416
|
params = params.model_copy(update={"use_history": False})
|
444
417
|
|
445
|
-
# Format agents without numeric prefixes for cleaner XML
|
446
418
|
agent_formats = []
|
447
419
|
for agent_name in self.agents.keys():
|
448
420
|
formatted = self._format_agent_info(agent_name)
|
@@ -452,7 +424,7 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
452
424
|
|
453
425
|
# Create clear plan status indicator for the template
|
454
426
|
plan_status = "Plan Status: Not Started"
|
455
|
-
if
|
427
|
+
if plan_result.is_complete:
|
456
428
|
plan_status = (
|
457
429
|
"Plan Status: Complete"
|
458
430
|
if plan_result.is_complete
|
@@ -478,49 +450,30 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
478
450
|
)
|
479
451
|
|
480
452
|
# Get raw JSON response from LLM
|
481
|
-
|
453
|
+
return await self.planner.generate_structured(
|
482
454
|
message=prompt,
|
483
455
|
request_params=params,
|
456
|
+
response_model=Plan,
|
484
457
|
)
|
485
|
-
|
486
|
-
|
487
|
-
|
488
|
-
|
489
|
-
|
490
|
-
|
491
|
-
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
|
501
|
-
|
502
|
-
|
503
|
-
|
504
|
-
steps.append(step)
|
505
|
-
|
506
|
-
# Create final Plan
|
507
|
-
plan = Plan(steps=steps, is_complete=data.get("is_complete", False))
|
508
|
-
|
509
|
-
return plan
|
510
|
-
|
511
|
-
except (json.JSONDecodeError, ValidationError, KeyError) as e:
|
512
|
-
# Log detailed error and fall back to the original method as last resort
|
513
|
-
self.logger.error(f"Error parsing plan JSON: {str(e)}")
|
514
|
-
self.logger.debug(f"Failed JSON content: {result_str}")
|
515
|
-
|
516
|
-
# Use the normal structured parsing as fallback
|
517
|
-
plan = await self.planner.generate_structured(
|
518
|
-
message=result_str,
|
519
|
-
response_model=Plan,
|
520
|
-
request_params=params,
|
521
|
-
)
|
522
|
-
|
523
|
-
return plan
|
458
|
+
# return data
|
459
|
+
|
460
|
+
# steps = []
|
461
|
+
# for step_data in data.steps:
|
462
|
+
# tasks = []
|
463
|
+
# for task_data in step_data.tasks:
|
464
|
+
# task = AgentTask(
|
465
|
+
# description=task_data.description,
|
466
|
+
# agent=task_data.agent,
|
467
|
+
# )
|
468
|
+
# tasks.append(task)
|
469
|
+
|
470
|
+
# # Create Step with the exact task objects we created
|
471
|
+
# step = Step(description=step_data.description, tasks=tasks)
|
472
|
+
# steps.append(step)
|
473
|
+
|
474
|
+
# # Create final Plan
|
475
|
+
# plan = Plan(steps=steps, is_complete=data.is_complete)
|
476
|
+
# return plan
|
524
477
|
|
525
478
|
async def _get_next_step(
|
526
479
|
self,
|
@@ -529,12 +482,6 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
529
482
|
request_params: RequestParams | None = None,
|
530
483
|
) -> NextStep:
|
531
484
|
"""Generate just the next needed step"""
|
532
|
-
import json
|
533
|
-
from pydantic import ValidationError
|
534
|
-
from mcp_agent.workflows.orchestrator.orchestrator_models import (
|
535
|
-
NextStep,
|
536
|
-
AgentTask,
|
537
|
-
)
|
538
485
|
|
539
486
|
params = self.get_request_params(request_params)
|
540
487
|
params = params.model_copy(update={"use_history": False})
|
@@ -547,7 +494,7 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
547
494
|
|
548
495
|
# Create clear plan status indicator for the template
|
549
496
|
plan_status = "Plan Status: Not Started"
|
550
|
-
if
|
497
|
+
if plan_result:
|
551
498
|
plan_status = (
|
552
499
|
"Plan Status: Complete"
|
553
500
|
if plan_result.is_complete
|
@@ -569,48 +516,10 @@ class Orchestrator(AugmentedLLM[MessageParamT, MessageT]):
|
|
569
516
|
)
|
570
517
|
|
571
518
|
# Get raw JSON response from LLM
|
572
|
-
|
573
|
-
message=prompt,
|
574
|
-
request_params=params,
|
519
|
+
return await self.planner.generate_structured(
|
520
|
+
message=prompt, request_params=params, response_model=NextStep
|
575
521
|
)
|
576
522
|
|
577
|
-
try:
|
578
|
-
# Parse JSON directly
|
579
|
-
data = json.loads(result_str)
|
580
|
-
|
581
|
-
# Create task objects manually to preserve exact agent names
|
582
|
-
tasks = []
|
583
|
-
for task_data in data.get("tasks", []):
|
584
|
-
# Preserve the exact agent name as specified in the JSON
|
585
|
-
task = AgentTask(
|
586
|
-
description=task_data.get("description", ""),
|
587
|
-
agent=task_data.get("agent", ""),
|
588
|
-
)
|
589
|
-
tasks.append(task)
|
590
|
-
|
591
|
-
# Create step with manually constructed tasks
|
592
|
-
next_step = NextStep(
|
593
|
-
description=data.get("description", ""),
|
594
|
-
tasks=tasks,
|
595
|
-
is_complete=data.get("is_complete", False),
|
596
|
-
)
|
597
|
-
|
598
|
-
return next_step
|
599
|
-
|
600
|
-
except (json.JSONDecodeError, ValidationError, KeyError) as e:
|
601
|
-
# Log detailed error and fall back to the original method
|
602
|
-
self.logger.error(f"Error parsing next step JSON: {str(e)}")
|
603
|
-
self.logger.debug(f"Failed JSON content: {result_str}")
|
604
|
-
|
605
|
-
# Use the normal structured parsing as fallback
|
606
|
-
next_step = await self.planner.generate_structured(
|
607
|
-
message=result_str,
|
608
|
-
response_model=NextStep,
|
609
|
-
request_params=params,
|
610
|
-
)
|
611
|
-
|
612
|
-
return next_step
|
613
|
-
|
614
523
|
def _format_server_info(self, server_name: str) -> str:
|
615
524
|
"""Format server information for display to planners using XML tags"""
|
616
525
|
from mcp_agent.workflows.llm.prompt_utils import format_server_info
|
@@ -13,7 +13,8 @@ if TYPE_CHECKING:
|
|
13
13
|
|
14
14
|
logger = get_logger(__name__)
|
15
15
|
|
16
|
-
|
16
|
+
# TODO -- reinstate function/server routing
|
17
|
+
# TODO -- Generate the Example Schema from the Pydantic Model
|
17
18
|
DEFAULT_ROUTING_INSTRUCTION = """
|
18
19
|
You are a highly accurate request router that directs incoming requests to the most appropriate category.
|
19
20
|
A category is a specialized destination, such as a Function, an MCP Server (a collection of tools/functions), or an Agent (a collection of servers).
|
@@ -34,7 +35,7 @@ Your task is to analyze the request and determine the most appropriate categorie
|
|
34
35
|
- Whether the request might benefit from multiple categories (up to {top_k})
|
35
36
|
|
36
37
|
<fastagent:instruction>
|
37
|
-
Respond in JSON format:
|
38
|
+
Respond in JSON format. NEVER include Code Fences:
|
38
39
|
{{
|
39
40
|
"categories": [
|
40
41
|
{{
|
@@ -65,37 +66,31 @@ Follow these guidelines:
|
|
65
66
|
"""
|
66
67
|
|
67
68
|
|
68
|
-
class
|
69
|
-
"""
|
69
|
+
class ConfidenceRating(BaseModel):
|
70
|
+
"""Base class for models with confidence ratings and reasoning"""
|
70
71
|
|
71
|
-
confidence: Literal["high", "medium", "low"]
|
72
72
|
"""The confidence level of the routing decision."""
|
73
|
+
confidence: Literal["high", "medium", "low"]
|
74
|
+
"""A brief explanation of the routing decision."""
|
75
|
+
reasoning: str | None = None # Make nullable to support both use cases
|
73
76
|
|
74
|
-
reasoning: str | None = None
|
75
|
-
"""
|
76
|
-
A brief explanation of the routing decision.
|
77
|
-
This is optional and may only be provided if the router is an LLM
|
78
|
-
"""
|
79
|
-
|
80
|
-
|
81
|
-
class StructuredResponseCategory(BaseModel):
|
82
|
-
"""A class that represents a single category returned by an LLM router"""
|
83
77
|
|
84
|
-
|
78
|
+
# Used for LLM output parsing
|
79
|
+
class StructuredResponseCategory(ConfidenceRating):
|
85
80
|
"""The name of the category (i.e. MCP server, Agent or function) to route the input to."""
|
86
81
|
|
87
|
-
|
88
|
-
"""The confidence level of the routing decision."""
|
89
|
-
|
90
|
-
reasoning: str | None = None
|
91
|
-
"""A brief explanation of the routing decision."""
|
82
|
+
category: str # Category name for lookup
|
92
83
|
|
93
84
|
|
94
85
|
class StructuredResponse(BaseModel):
|
95
|
-
"""A class that represents the structured response of an LLM router"""
|
96
|
-
|
97
86
|
categories: List[StructuredResponseCategory]
|
98
|
-
|
87
|
+
|
88
|
+
|
89
|
+
# Used for final router output
|
90
|
+
class LLMRouterResult(RouterResult[ResultT], ConfidenceRating):
|
91
|
+
# Inherits 'result' from RouterResult
|
92
|
+
# Inherits 'confidence' and 'reasoning' from ConfidenceRating
|
93
|
+
pass
|
99
94
|
|
100
95
|
|
101
96
|
class LLMRouter(Router):
|
@@ -282,7 +277,6 @@ class LLMRouter(Router):
|
|
282
277
|
for r in response.categories:
|
283
278
|
router_category = self.categories.get(r.category)
|
284
279
|
if not router_category:
|
285
|
-
# Skip invalid categories
|
286
280
|
# TODO: log or raise an error
|
287
281
|
continue
|
288
282
|
|
@@ -1,44 +0,0 @@
|
|
1
|
-
"""FastAgent validation methods."""
|
2
|
-
|
3
|
-
from mcp_agent.core.exceptions import ServerConfigError
|
4
|
-
|
5
|
-
|
6
|
-
def _validate_server_references(self) -> None:
|
7
|
-
"""
|
8
|
-
Validate that all server references in agent configurations exist in config.
|
9
|
-
Raises ServerConfigError if any referenced servers are not defined.
|
10
|
-
"""
|
11
|
-
# First check if any agents need servers
|
12
|
-
agents_needing_servers = {
|
13
|
-
name: agent_data["config"].servers
|
14
|
-
for name, agent_data in self.agents.items()
|
15
|
-
if agent_data["config"].servers
|
16
|
-
}
|
17
|
-
|
18
|
-
if not agents_needing_servers:
|
19
|
-
return # No validation needed
|
20
|
-
|
21
|
-
# If we need servers, verify MCP config exists
|
22
|
-
if not hasattr(self.context.config, "mcp"):
|
23
|
-
raise ServerConfigError(
|
24
|
-
"MCP configuration missing",
|
25
|
-
"Agents require server access but no MCP configuration found.\n"
|
26
|
-
"Add an 'mcp' section to your configuration file.",
|
27
|
-
)
|
28
|
-
|
29
|
-
if not self.context.config.mcp.servers:
|
30
|
-
raise ServerConfigError(
|
31
|
-
"No MCP servers configured",
|
32
|
-
"Agents require server access but no servers are defined.\n"
|
33
|
-
"Add server definitions under mcp.servers in your configuration file.",
|
34
|
-
)
|
35
|
-
|
36
|
-
# Now check each agent's servers exist
|
37
|
-
available_servers = set(self.context.config.mcp.servers.keys())
|
38
|
-
for name, servers in agents_needing_servers.items():
|
39
|
-
missing = [s for s in servers if s not in available_servers]
|
40
|
-
if missing:
|
41
|
-
raise ServerConfigError(
|
42
|
-
f"Missing server configuration for agent '{name}'",
|
43
|
-
f"The following servers are referenced but not defined in config: {', '.join(missing)}",
|
44
|
-
)
|
@@ -1,22 +0,0 @@
|
|
1
|
-
from typing import Optional, Any
|
2
|
-
|
3
|
-
|
4
|
-
class SimulatorRegistry:
|
5
|
-
"""Registry to access simulator instances for testing assertions"""
|
6
|
-
|
7
|
-
_instances = {}
|
8
|
-
|
9
|
-
@classmethod
|
10
|
-
def register(cls, name: str, simulator: "Any"):
|
11
|
-
"""Register a simulator instance"""
|
12
|
-
cls._instances[name] = simulator
|
13
|
-
|
14
|
-
@classmethod
|
15
|
-
def get(cls, name: str) -> Optional["Any"]:
|
16
|
-
"""Get a simulator by name"""
|
17
|
-
return cls._instances.get(name)
|
18
|
-
|
19
|
-
@classmethod
|
20
|
-
def clear(cls):
|
21
|
-
"""Clear registry (useful between tests)"""
|
22
|
-
cls._instances.clear()
|
@@ -1,70 +0,0 @@
|
|
1
|
-
# src/mcp_agent/workflows/llm/enhanced_passthrough.py
|
2
|
-
|
3
|
-
|
4
|
-
import datetime
|
5
|
-
from typing import List, Optional, Union
|
6
|
-
from mcp_agent.core.simulator_registry import SimulatorRegistry
|
7
|
-
from mcp_agent.workflows.llm.augmented_llm import (
|
8
|
-
AugmentedLLM,
|
9
|
-
MessageParamT,
|
10
|
-
RequestParams,
|
11
|
-
)
|
12
|
-
|
13
|
-
|
14
|
-
class EnhancedPassthroughLLM(AugmentedLLM):
|
15
|
-
"""Enhanced passthrough LLM for testing parameter handling and workflows"""
|
16
|
-
|
17
|
-
def __init__(self, name: str = "Simulator", context=None, **kwargs):
|
18
|
-
super().__init__(name=name, context=context, **kwargs)
|
19
|
-
self.simulation_mode = kwargs.get("simulation_mode", "passthrough")
|
20
|
-
self.request_log = []
|
21
|
-
self.last_request_params = None
|
22
|
-
|
23
|
-
# Register this instance with the registry
|
24
|
-
SimulatorRegistry.register(self.name, self)
|
25
|
-
|
26
|
-
async def generate_str(
|
27
|
-
self,
|
28
|
-
message: Union[str, MessageParamT, List[MessageParamT]],
|
29
|
-
request_params: Optional[RequestParams] = None,
|
30
|
-
) -> str:
|
31
|
-
"""Capture parameters and log the request"""
|
32
|
-
# Store for assertion testing
|
33
|
-
self.last_request_params = request_params
|
34
|
-
|
35
|
-
# Log the request
|
36
|
-
self.request_log.append(
|
37
|
-
{
|
38
|
-
"timestamp": datetime.now().isoformat(),
|
39
|
-
"message": str(message),
|
40
|
-
"request_params": request_params.model_dump()
|
41
|
-
if request_params
|
42
|
-
else None,
|
43
|
-
}
|
44
|
-
)
|
45
|
-
|
46
|
-
# Display for debugging
|
47
|
-
self.show_user_message(str(message), model="simulator", chat_turn=0)
|
48
|
-
|
49
|
-
# Simulate response
|
50
|
-
result = f"[SIMULATOR] Response to: {message}"
|
51
|
-
await self.show_assistant_message(result, title="SIMULATOR")
|
52
|
-
|
53
|
-
return result
|
54
|
-
|
55
|
-
# Other generate methods with similar parameter capture
|
56
|
-
|
57
|
-
def get_parameter_usage_report(self):
|
58
|
-
"""Generate report of parameter usage"""
|
59
|
-
param_usage = {}
|
60
|
-
|
61
|
-
for req in self.request_log:
|
62
|
-
params = req.get("request_params", {})
|
63
|
-
if params:
|
64
|
-
for key, value in params.items():
|
65
|
-
if key not in param_usage:
|
66
|
-
param_usage[key] = {"count": 0, "values": set()}
|
67
|
-
param_usage[key]["count"] += 1
|
68
|
-
param_usage[key]["values"].add(str(value))
|
69
|
-
|
70
|
-
return {"total_requests": len(self.request_log), "parameter_usage": param_usage}
|
File without changes
|
File without changes
|