fast-agent-mcp 0.1.2__py3-none-any.whl → 0.1.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fast-agent-mcp
3
- Version: 0.1.2
3
+ Version: 0.1.4
4
4
  Summary: Define, Prompt and Test MCP enabled Agents and Workflows
5
5
  Author-email: Shaun Smith <fastagent@llmindset.co.uk>, Sarmad Qadri <sarmad@lastmileai.dev>
6
6
  License: Apache License
@@ -366,7 +366,7 @@ Saved as `social.py` we can now run this workflow from the command line with:
366
366
  uv run social.py --agent social_media --message "<url>"
367
367
  ```
368
368
 
369
- Add the `--quiet` switch to only return the final response, which is useful for simple automations.
369
+ Add the `--quiet` switch to disable progress and message display and return only the final response - useful for simple automations.
370
370
 
371
371
  ## Workflows
372
372
 
@@ -426,7 +426,7 @@ The Parallel Workflow sends the same message to multiple Agents simultaneously (
426
426
  )
427
427
  ```
428
428
 
429
- Look at the `parallel.py` workflow example for more examples. If you don't specify a `fan-in` agent, the `parallel` returns the combined Agent results verbatim.
429
+ If you don't specify a `fan-in` agent, the `parallel` returns the combined Agent results verbatim.
430
430
 
431
431
  `parallel` is also useful to ensemble ideas from different LLMs.
432
432
 
@@ -526,6 +526,7 @@ agent["greeter"].send("Good Evening!") # Dictionary access is supported
526
526
  name="chain", # name of the chain
527
527
  sequence=["agent1", "agent2", ...], # list of agents in execution order
528
528
  instruction="instruction", # instruction to describe the chain for other workflows
529
+ cumulative=False # whether to accumulate messages through the chain
529
530
  continue_with_final=True, # open chat with agent at end of chain after prompting
530
531
  )
531
532
  ```
@@ -19,13 +19,14 @@ mcp_agent/cli/commands/setup.py,sha256=_SCpd6_PrixqbSaE72JQ7erIRkZnJGmh_3TvvwSzE
19
19
  mcp_agent/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
20
  mcp_agent/core/agent_app.py,sha256=6U3HLYAJOfyVuUpZVELWT5lOo64-b_sfWp0yn88s7Wo,6085
21
21
  mcp_agent/core/agent_types.py,sha256=yKiMbv9QO2dduq4zXmoMZlOZpXJZhM4oNwIq1-134FE,318
22
- mcp_agent/core/agent_utils.py,sha256=yUJ-qvw5TblqqOsB1vj0Qvcz9mass9awPA6UNNvuw0A,1738
23
- mcp_agent/core/enhanced_prompt.py,sha256=XraDKdIMW960KXCiMfCEPKDakbf1wHYgvHwD-9CBDi0,13011
22
+ mcp_agent/core/agent_utils.py,sha256=QMvwmxZyCqYhBzSyL9xARsxTuwdmlyjQvrPpsH36HnQ,1888
23
+ mcp_agent/core/enhanced_prompt.py,sha256=Zh1I_PZtQmH_v70dJ-gK5lnLOc42h6_Ai8FGi3aLBvU,13400
24
24
  mcp_agent/core/error_handling.py,sha256=D3HMW5odrbJvaKqcpCGj6eDXrbFcuqYaCZz7fyYiTu4,623
25
25
  mcp_agent/core/exceptions.py,sha256=a2-JGRwFFRoQEPuAq0JC5PhAJ5TO3xVJfdS4-VN29cw,2225
26
- mcp_agent/core/fastagent.py,sha256=drf11eHH1xCiyS91v_ADWfaV8T9asm_2Vw0NXxjinpc,58730
27
- mcp_agent/core/proxies.py,sha256=hXDUpsgGO4xBTIjdUeXj6vULPb8sf55vAFVQh6Ybn60,4411
26
+ mcp_agent/core/fastagent.py,sha256=G0GNOD1JMD37rAaZdYRNsa7gXYjvYiV6Cg39laBKRW8,59714
27
+ mcp_agent/core/proxies.py,sha256=35k-j-umlQ4sTBK1Z8qNsBS3ciG5ZqAUVazL-Tlmk6s,6238
28
28
  mcp_agent/core/server_validation.py,sha256=_59cn16nNT4HGPwg19HgxMtHK4MsdWYDUw_CuL-5xek,1696
29
+ mcp_agent/core/simulator_registry.py,sha256=rcd1cyFGx8MAnN5O0UgwElmVKU_uoIBh9s24pxP33Jc,573
29
30
  mcp_agent/core/types.py,sha256=Zhi9iW7uiOfdpSt9NC0FCtGRFtJPg4mpZPK2aYi7a7M,817
30
31
  mcp_agent/eval/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
31
32
  mcp_agent/executor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -56,7 +57,7 @@ mcp_agent/mcp/mcp_connection_manager.py,sha256=WLli0w3TVcsszyD9M7zP7vLKPetnQLTf_
56
57
  mcp_agent/mcp/stdio.py,sha256=tW075R5rQ-UlflXWFKIFDgCbWbuhKqxhiYolWvyEkFs,3985
57
58
  mcp_agent/resources/examples/data-analysis/analysis-campaign.py,sha256=EG-HhaDHltZ4hHAqhgfX_pHM2wem48aYhSIKJxyWHKc,7269
58
59
  mcp_agent/resources/examples/data-analysis/analysis.py,sha256=5zLoioZQNKUfXt1EXLrGX3TU06-0N06-L9Gtp9BIr6k,2611
59
- mcp_agent/resources/examples/data-analysis/fastagent.config.yaml,sha256=eTKGbjnTHhDTeNRPQvG_fr9OQpEZ5Y9v7X2NyCj0V70,530
60
+ mcp_agent/resources/examples/data-analysis/fastagent.config.yaml,sha256=ini94PHyJCfgpjcjHKMMbGuHs6LIj46F1NwY0ll5HVk,1609
60
61
  mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv,sha256=pcMeOL1_r8m8MziE6xgbBrQbjl5Ijo98yycZn7O-dlk,227977
61
62
  mcp_agent/resources/examples/internal/agent.py,sha256=f-jTgYabV3nWCQm0ZP9NtSEWjx3nQbRngzArRufcELg,384
62
63
  mcp_agent/resources/examples/internal/job.py,sha256=WEKIAANMEAuKr13__rYf3PqJeTAsNB_kqYqbqVYQlUM,4093
@@ -64,6 +65,7 @@ mcp_agent/resources/examples/internal/social.py,sha256=Cot2lg3PLhLm13gPdVFvFEN28
64
65
  mcp_agent/resources/examples/mcp_researcher/researcher-eval.py,sha256=kNPjIU-JwE0oIBQKwhv6lZsUF_SPtYVkiEEbY1ZVZxk,1807
65
66
  mcp_agent/resources/examples/researcher/fastagent.config.yaml,sha256=2_VXZneckR6zk6RWzzL-smV_oWmgg4uSkLWqZv8jF0I,1995
66
67
  mcp_agent/resources/examples/researcher/researcher-eval.py,sha256=kNPjIU-JwE0oIBQKwhv6lZsUF_SPtYVkiEEbY1ZVZxk,1807
68
+ mcp_agent/resources/examples/researcher/researcher-imp.py,sha256=Xfw2YAyjXd47pQz-uljgG5ii5x77fVuCP2XCivRDI48,7885
67
69
  mcp_agent/resources/examples/researcher/researcher.py,sha256=jPRafm7jbpHKkX_dQiYGG3Sw-e1Dm86q-JZT-WZDhM0,1425
68
70
  mcp_agent/resources/examples/workflows/agent_build.py,sha256=ioG4X8IbR8wwja8Zdncsk8YAu0VD2Xt1Vhr7saNJCZQ,2855
69
71
  mcp_agent/resources/examples/workflows/chaining.py,sha256=1G_0XBcFkSJCOXb6N_iXWlSc_oGAlhENR0k_CN1vJKI,1208
@@ -81,7 +83,7 @@ mcp_agent/workflows/embedding/embedding_base.py,sha256=-c20ggQ8s7XhMxRX-WEhOgHE7
81
83
  mcp_agent/workflows/embedding/embedding_cohere.py,sha256=OKTJvKD_uEafd4c2uhR5tBjprea1nyvlJOO-3FDqOnk,1540
82
84
  mcp_agent/workflows/embedding/embedding_openai.py,sha256=dntjJ5P-FSMGYuyPZC8MuCU_ehwjXw9wDfzZZuSQN1E,1480
83
85
  mcp_agent/workflows/evaluator_optimizer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
84
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py,sha256=N4HjckQf_boFRxoWJmuvwq1IEnGYW-k8pKtqjpsnLSE,19223
86
+ mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py,sha256=hocvUtTJ5YHiT7utFuzbzY1aqFWCheuB_5dQ_ttOAZ4,20009
85
87
  mcp_agent/workflows/intent_classifier/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
86
88
  mcp_agent/workflows/intent_classifier/intent_classifier_base.py,sha256=zTbOmq6EY_abOlme4zl28HM4RWNNS6bbHl3tF7SshJ0,4004
87
89
  mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py,sha256=_bWZGukc_q9LdA_Q18UoAMSzhN8tt4K_bRHNUhy7Crw,3997
@@ -92,10 +94,11 @@ mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py,sha256=
92
94
  mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py,sha256=zj76WlTYnSCYjBQ_IDi5vFBQGmNwYaoUq1rT730sY98,1940
93
95
  mcp_agent/workflows/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
94
96
  mcp_agent/workflows/llm/augmented_llm.py,sha256=Hyx-jwgbMjE_WQ--YjIUvdj6HAgX36IvXBesGy6uic0,25884
95
- mcp_agent/workflows/llm/augmented_llm_anthropic.py,sha256=XZmumX-og07VR4O2TnEYQ9ZPwGgzLWt3uq6MII-tjnI,23076
97
+ mcp_agent/workflows/llm/augmented_llm_anthropic.py,sha256=q-WWkD59b_aYMn5Dh1Vaz1pknBgFnacvfbdhRd-u9Kk,23183
96
98
  mcp_agent/workflows/llm/augmented_llm_openai.py,sha256=a95Q4AFiVw36bXMgYNLFrC2zyDmHERWwkjxJFHlL6JU,25061
99
+ mcp_agent/workflows/llm/enhanced_passthrough.py,sha256=rHNbb6pYllIuVMOhuzUbt63_6WlUnjm57Y7r59N1pnk,2388
97
100
  mcp_agent/workflows/llm/llm_selector.py,sha256=G7pIybuBDwtmyxUDov_QrNYH2FoI0qFRu2JfoxWUF5Y,11045
98
- mcp_agent/workflows/llm/model_factory.py,sha256=7zTJrO2ReHa_6dfh_gY6xO8dTySqGFCKlOG9-AMJ-i8,6920
101
+ mcp_agent/workflows/llm/model_factory.py,sha256=ZyO3FpiIiM2EiFgE8Y5PhdzLglKENfgknmF9unmKWJ8,7075
99
102
  mcp_agent/workflows/llm/prompt_utils.py,sha256=EY3eddqnmc_YDUQJFysPnpTH6hr4r2HneeEmX76P8TQ,4948
100
103
  mcp_agent/workflows/orchestrator/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
101
104
  mcp_agent/workflows/orchestrator/orchestrator.py,sha256=Cu8cfDoTpT_FhGJp-T4NnCVvjkyDO1sbEJ7oKamK47k,26021
@@ -115,8 +118,8 @@ mcp_agent/workflows/swarm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJW
115
118
  mcp_agent/workflows/swarm/swarm.py,sha256=-lAIeSWDqbGHGRPTvjiP9nIKWvxxy9DAojl9yQzO1Pw,11050
116
119
  mcp_agent/workflows/swarm/swarm_anthropic.py,sha256=pW8zFx5baUWGd5Vw3nIDF2oVOOGNorij4qvGJKdYPcs,1624
117
120
  mcp_agent/workflows/swarm/swarm_openai.py,sha256=wfteywvAGkT5bLmIxX_StHJq8144whYmCRnJASAjOes,1596
118
- fast_agent_mcp-0.1.2.dist-info/METADATA,sha256=qcYR5D0SlhnnqX7er7yFF_0nEOmt4J74hbWiftzw6iI,27861
119
- fast_agent_mcp-0.1.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
120
- fast_agent_mcp-0.1.2.dist-info/entry_points.txt,sha256=2IXtSmDK9XjWN__RWuRIJTgWyW17wJnJ_h-pb0pZAxo,174
121
- fast_agent_mcp-0.1.2.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
122
- fast_agent_mcp-0.1.2.dist-info/RECORD,,
121
+ fast_agent_mcp-0.1.4.dist-info/METADATA,sha256=I8Te4FG-3ClIawLJBEYIkUkaJacG4BiKfF-cuMuUBrs,27924
122
+ fast_agent_mcp-0.1.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
123
+ fast_agent_mcp-0.1.4.dist-info/entry_points.txt,sha256=2IXtSmDK9XjWN__RWuRIJTgWyW17wJnJ_h-pb0pZAxo,174
124
+ fast_agent_mcp-0.1.4.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
125
+ fast_agent_mcp-0.1.4.dist-info/RECORD,,
@@ -8,10 +8,11 @@ from mcp_agent.event_progress import ProgressAction
8
8
 
9
9
  # Handle circular imports
10
10
  if TYPE_CHECKING:
11
- from mcp_agent.core.proxies import BaseAgentProxy, LLMAgentProxy
11
+ from mcp_agent.core.proxies import BaseAgentProxy
12
12
  from mcp_agent.core.types import AgentOrWorkflow, ProxyDict
13
13
  else:
14
- from mcp_agent.core.proxies import BaseAgentProxy, LLMAgentProxy
14
+ from mcp_agent.core.proxies import BaseAgentProxy
15
+
15
16
  # Define minimal types for runtime
16
17
  AgentOrWorkflow = object # Simple placeholder
17
18
  ProxyDict = dict # Simple placeholder
@@ -27,8 +28,13 @@ def unwrap_proxy(proxy: BaseAgentProxy) -> AgentOrWorkflow:
27
28
  Returns:
28
29
  The underlying Agent or workflow instance
29
30
  """
31
+ from mcp_agent.core.proxies import LLMAgentProxy, ChainProxy
32
+
30
33
  if isinstance(proxy, LLMAgentProxy):
31
34
  return proxy._agent
35
+ elif isinstance(proxy, ChainProxy):
36
+ # Return the ChainProxy itself as the workflow
37
+ return proxy
32
38
  return proxy._workflow
33
39
 
34
40
 
@@ -51,7 +57,7 @@ def get_agent_instances(
51
57
  def log_agent_load(app, agent_name: str) -> None:
52
58
  """
53
59
  Log agent loading event to application logger.
54
-
60
+
55
61
  Args:
56
62
  app: The application instance
57
63
  agent_name: Name of the agent being loaded
@@ -62,4 +68,4 @@ def log_agent_load(app, agent_name: str) -> None:
62
68
  "progress_action": ProgressAction.LOADED,
63
69
  "agent_name": agent_name,
64
70
  },
65
- )
71
+ )
@@ -11,6 +11,7 @@ from prompt_toolkit.key_binding import KeyBindings
11
11
  from prompt_toolkit.completion import Completer, Completion
12
12
  from prompt_toolkit.lexers import PygmentsLexer
13
13
  from prompt_toolkit.filters import Condition
14
+ from prompt_toolkit.styles import Style
14
15
  from pygments.lexers.python import PythonLexer
15
16
  from rich import print as rich_print
16
17
 
@@ -31,8 +32,8 @@ available_agents = set()
31
32
  # Keep track of multi-line mode state
32
33
  in_multiline_mode = False
33
34
 
34
- # Track which agents have already shown welcome messages
35
- agent_messages_shown = set()
35
+ # Track whether help text has been shown globally
36
+ help_message_shown = False
36
37
 
37
38
 
38
39
  class AgentCompleter(Completer):
@@ -87,7 +88,7 @@ class AgentCompleter(Completer):
87
88
  start_position=-len(agent_name),
88
89
  display=agent,
89
90
  display_meta=agent_type,
90
- style="bg:ansiblack fg:ansiblue",
91
+ # style="bg:ansiblack fg:ansiblue",
91
92
  )
92
93
 
93
94
 
@@ -168,7 +169,7 @@ async def get_enhanced_input(
168
169
  Returns:
169
170
  User input string
170
171
  """
171
- global in_multiline_mode, available_agents
172
+ global in_multiline_mode, available_agents, help_message_shown
172
173
 
173
174
  # Update global state
174
175
  in_multiline_mode = multiline
@@ -210,10 +211,21 @@ async def get_enhanced_input(
210
211
  shortcuts = [(k, v) for k, v in shortcuts if v]
211
212
 
212
213
  shortcut_text = " | ".join(f"{key}:{action}" for key, action in shortcuts)
214
+
213
215
  return HTML(
214
- f" <{toolbar_color}> {agent_name} </{toolbar_color}> | <b>Mode:</b> <{mode_style}> {mode_text} </{mode_style}> {newline} | {shortcut_text} | <dim>v{app_version}</dim>"
216
+ f" <style fg='{toolbar_color}' bg='ansiblack'> {agent_name} </style> Mode: <style fg='{mode_style}' bg='ansiblack'> {mode_text} </style> {newline} | {shortcut_text} | v{app_version}"
215
217
  )
216
218
 
219
+ # A more terminal-agnostic style that should work across themes
220
+ custom_style = Style.from_dict(
221
+ {
222
+ "completion-menu.completion": "bg:#ansiblack #ansigreen",
223
+ "completion-menu.completion.current": "bg:#ansiblack bold #ansigreen",
224
+ "completion-menu.meta.completion": "bg:#ansiblack #ansiblue",
225
+ "completion-menu.meta.completion.current": "bg:#ansibrightblack #ansiblue",
226
+ "bottom-toolbar": "#ansiblack bg:#ansigray",
227
+ }
228
+ )
217
229
  # Create session with history and completions
218
230
  session = PromptSession(
219
231
  history=agent_histories[agent_name],
@@ -227,7 +239,8 @@ async def get_enhanced_input(
227
239
  multiline=Condition(lambda: in_multiline_mode),
228
240
  complete_in_thread=True,
229
241
  mouse_support=False,
230
- bottom_toolbar=get_toolbar, # Pass the function here
242
+ bottom_toolbar=get_toolbar,
243
+ style=custom_style,
231
244
  )
232
245
 
233
246
  # Create key bindings with a reference to the app
@@ -237,7 +250,7 @@ async def get_enhanced_input(
237
250
  session.app.key_bindings = bindings
238
251
 
239
252
  # Create formatted prompt text
240
- prompt_text = f"<ansicyan>{agent_name}</ansicyan> > "
253
+ prompt_text = f"<ansibrightblue>{agent_name}</ansibrightblue> > "
241
254
 
242
255
  # Add default value display if requested
243
256
  if show_default and default and default != "STOP":
@@ -246,25 +259,24 @@ async def get_enhanced_input(
246
259
  # Only show hints at startup if requested
247
260
  if show_stop_hint:
248
261
  if default == "STOP":
249
- rich_print("[yellow]Press <ENTER> to finish.[/yellow]")
250
- else:
251
- rich_print("Enter a prompt, or [red]STOP[/red] to finish")
262
+ rich_print("Enter a prompt, [red]STOP[/red] to finish")
252
263
  if default:
253
264
  rich_print(
254
265
  f"Press <ENTER> to use the default prompt:\n[cyan]{default}[/cyan]"
255
266
  )
256
267
 
257
- # Mention available features but only on first usage for this agent
258
- if agent_name not in agent_messages_shown:
268
+ # Mention available features but only on first usage globally
269
+ if not help_message_shown:
259
270
  if is_human_input:
260
271
  rich_print(
261
- "[dim]Tip: Type /help for commands. Ctrl+T toggles multiline mode. Ctrl+Enter to submit in multiline mode.[/dim]"
272
+ "[dim]Type /help for commands. Ctrl+T toggles multiline mode.[/dim]"
262
273
  )
263
274
  else:
264
275
  rich_print(
265
- "[dim]Tip: Type /help for commands, @Agent to switch agent. Ctrl+T toggles multiline mode. [/dim]"
276
+ "[dim]Type /help for commands, @agent to switch agent. Ctrl+T toggles multiline mode. [/dim]"
266
277
  )
267
- agent_messages_shown.add(agent_name)
278
+ rich_print()
279
+ help_message_shown = True
268
280
 
269
281
  # Process special commands
270
282
  def pre_process_input(text):
@@ -325,7 +337,6 @@ async def handle_special_commands(command, agent_app=None):
325
337
  rich_print(
326
338
  " Enter - Submit (normal mode) / New line (multiline mode)"
327
339
  )
328
- rich_print(" \\ + Enter - Insert new line in normal mode")
329
340
  rich_print(" Ctrl+Enter - Always submit (in any mode)")
330
341
  rich_print(" Ctrl+T - Toggle multiline mode")
331
342
  rich_print(" Ctrl+L - Clear input")
@@ -696,6 +696,7 @@ class FastAgent(ContextDependent):
696
696
  use_history: bool = True,
697
697
  request_params: Optional[Dict] = None,
698
698
  continue_with_final: bool = True,
699
+ cumulative: bool = False,
699
700
  ) -> Callable:
700
701
  """
701
702
  Decorator to create and register a chain of agents.
@@ -709,6 +710,8 @@ class FastAgent(ContextDependent):
709
710
  use_history: Whether to maintain conversation history
710
711
  request_params: Additional request parameters
711
712
  continue_with_final: When using prompt(), whether to continue with the final agent after processing chain (default: True)
713
+ cumulative: When True, each agent receives all previous agent responses concatenated (default: False)
714
+ When False, each agent only gets the output of the previous agent (default behavior)
712
715
  """
713
716
  # Support both parameter names
714
717
  agent_sequence = sequence or agents
@@ -717,8 +720,11 @@ class FastAgent(ContextDependent):
717
720
 
718
721
  # Auto-generate instruction if not provided
719
722
  if instruction is None:
720
- # We'll generate it later when we have access to the agent configs and can see servers
721
- instruction = f"Chain of agents: {', '.join(agent_sequence)}"
723
+ # Generate an appropriate instruction based on mode
724
+ if cumulative:
725
+ instruction = f"Cumulative chain of agents: {', '.join(agent_sequence)}"
726
+ else:
727
+ instruction = f"Chain of agents: {', '.join(agent_sequence)}"
722
728
 
723
729
  decorator = self._create_decorator(
724
730
  AgentType.CHAIN,
@@ -734,6 +740,7 @@ class FastAgent(ContextDependent):
734
740
  use_history=use_history,
735
741
  request_params=request_params,
736
742
  continue_with_final=continue_with_final,
743
+ cumulative=cumulative,
737
744
  )
738
745
  return decorator
739
746
 
@@ -912,9 +919,13 @@ class FastAgent(ContextDependent):
912
919
  f"evaluator={agent_data['evaluator']}"
913
920
  )
914
921
 
915
- optimizer_model = (
916
- generator.config.model if isinstance(generator, Agent) else None
917
- )
922
+ # Get model from generator if it's an Agent, or from config otherwise
923
+ optimizer_model = None
924
+ if isinstance(generator, Agent):
925
+ optimizer_model = generator.config.model
926
+ elif hasattr(generator, '_sequence') and hasattr(generator, '_agent_proxies'):
927
+ # For ChainProxy, use the config model directly
928
+ optimizer_model = config.model
918
929
 
919
930
  instance = EvaluatorOptimizerLLM(
920
931
  name=config.name, # Pass name from config
@@ -993,6 +1004,10 @@ class FastAgent(ContextDependent):
993
1004
  instance._continue_with_final = agent_data.get(
994
1005
  "continue_with_final", True
995
1006
  )
1007
+ # Set cumulative behavior from configuration
1008
+ instance._cumulative = agent_data.get(
1009
+ "cumulative", False
1010
+ )
996
1011
 
997
1012
  # We removed the AgentType.PASSTHROUGH case
998
1013
  # Passthrough agents are now created as BASIC agents with a special LLM
@@ -1307,12 +1322,6 @@ class FastAgent(ContextDependent):
1307
1322
  # First create basic agents
1308
1323
  active_agents = await self._create_basic_agents(agent_app)
1309
1324
 
1310
- # Create workflow types that don't depend on other workflows first
1311
- evaluator_optimizers = await self._create_evaluator_optimizers(
1312
- agent_app, active_agents
1313
- )
1314
- active_agents.update(evaluator_optimizers)
1315
-
1316
1325
  # Create parallel agents next as they might be dependencies
1317
1326
  parallel_agents = await self._create_parallel_agents(
1318
1327
  agent_app, active_agents
@@ -1323,11 +1332,17 @@ class FastAgent(ContextDependent):
1323
1332
  routers = await self._create_routers(agent_app, active_agents)
1324
1333
  active_agents.update(routers)
1325
1334
 
1326
- # Create chains next
1335
+ # Create chains next - MOVED UP because evaluator-optimizers might depend on chains
1327
1336
  chains = await self._create_agents_in_dependency_order(
1328
1337
  agent_app, active_agents, AgentType.CHAIN
1329
1338
  )
1330
1339
  active_agents.update(chains)
1340
+
1341
+ # Now create evaluator-optimizers AFTER chains are available
1342
+ evaluator_optimizers = await self._create_evaluator_optimizers(
1343
+ agent_app, active_agents
1344
+ )
1345
+ active_agents.update(evaluator_optimizers)
1331
1346
 
1332
1347
  # Create orchestrators last as they might depend on any other agent type
1333
1348
  orchestrators = await self._create_orchestrators(
mcp_agent/core/proxies.py CHANGED
@@ -13,12 +13,10 @@ if TYPE_CHECKING:
13
13
  from mcp_agent.core.types import WorkflowType, ProxyDict
14
14
  else:
15
15
  # Define minimal versions for runtime
16
- from mcp_agent.workflows.orchestrator.orchestrator import Orchestrator
17
- from mcp_agent.workflows.parallel.parallel_llm import ParallelLLM
18
- from mcp_agent.workflows.evaluator_optimizer.evaluator_optimizer import EvaluatorOptimizerLLM
19
- from mcp_agent.workflows.router.router_llm import LLMRouter
20
- from typing import Union
21
- WorkflowType = Union[Orchestrator, ParallelLLM, EvaluatorOptimizerLLM, LLMRouter]
16
+ from typing import Any
17
+
18
+ # Use Any for runtime to avoid circular imports
19
+ WorkflowType = Any
22
20
  ProxyDict = Dict[str, "BaseAgentProxy"]
23
21
 
24
22
 
@@ -51,7 +49,8 @@ class BaseAgentProxy:
51
49
  class AgentProxy(BaseAgentProxy):
52
50
  """Legacy proxy for individual agent operations"""
53
51
 
54
- async def generate_str(self, message: str) -> str:
52
+ async def generate_str(self, message: str, **kwargs) -> str:
53
+ """Forward only the message to app.send, ignoring kwargs for legacy compatibility"""
55
54
  return await self._app.send(self._name, message)
56
55
 
57
56
 
@@ -62,8 +61,9 @@ class LLMAgentProxy(BaseAgentProxy):
62
61
  super().__init__(app, name)
63
62
  self._agent = agent
64
63
 
65
- async def generate_str(self, message: str) -> str:
66
- return await self._agent._llm.generate_str(message)
64
+ async def generate_str(self, message: str, **kwargs) -> str:
65
+ """Forward message and all kwargs to the agent's LLM"""
66
+ return await self._agent._llm.generate_str(message, **kwargs)
67
67
 
68
68
 
69
69
  class WorkflowProxy(BaseAgentProxy):
@@ -73,8 +73,9 @@ class WorkflowProxy(BaseAgentProxy):
73
73
  super().__init__(app, name)
74
74
  self._workflow = workflow
75
75
 
76
- async def generate_str(self, message: str) -> str:
77
- return await self._workflow.generate_str(message)
76
+ async def generate_str(self, message: str, **kwargs) -> str:
77
+ """Forward message and all kwargs to the underlying workflow"""
78
+ return await self._workflow.generate_str(message, **kwargs)
78
79
 
79
80
 
80
81
  class RouterProxy(BaseAgentProxy):
@@ -84,7 +85,11 @@ class RouterProxy(BaseAgentProxy):
84
85
  super().__init__(app, name)
85
86
  self._workflow = workflow
86
87
 
87
- async def generate_str(self, message: str) -> str:
88
+ async def generate_str(self, message: str, **kwargs) -> str:
89
+ """
90
+ Route the message and forward kwargs to the resulting agent if applicable.
91
+ Note: For now, route() itself doesn't accept kwargs.
92
+ """
88
93
  results = await self._workflow.route(message)
89
94
  if not results:
90
95
  return "No appropriate route found for the request."
@@ -92,10 +97,9 @@ class RouterProxy(BaseAgentProxy):
92
97
  # Get the top result
93
98
  top_result = results[0]
94
99
  if isinstance(top_result.result, Agent):
95
- # Agent route - delegate to the agent
100
+ # Agent route - delegate to the agent, passing along kwargs
96
101
  agent = top_result.result
97
-
98
- return await agent._llm.generate_str(message)
102
+ return await agent._llm.generate_str(message, **kwargs)
99
103
  elif isinstance(top_result.result, str):
100
104
  # Server route - use the router directly
101
105
  return "Tool call requested by router - not yet supported"
@@ -113,15 +117,50 @@ class ChainProxy(BaseAgentProxy):
113
117
  self._sequence = sequence
114
118
  self._agent_proxies = agent_proxies
115
119
  self._continue_with_final = True # Default behavior
116
-
117
- async def generate_str(self, message: str) -> str:
118
- """Chain message through a sequence of agents"""
119
- current_message = message
120
-
121
- for agent_name in self._sequence:
122
- proxy = self._agent_proxies[agent_name]
123
- current_message = await proxy.generate_str(current_message)
124
-
125
- return current_message
126
-
127
-
120
+ self._cumulative = False # Default to sequential chaining
121
+
122
+ async def generate_str(self, message: str, **kwargs) -> str:
123
+ """Chain message through a sequence of agents.
124
+
125
+ For the first agent in the chain, pass all kwargs to maintain transparency.
126
+
127
+ Two modes of operation:
128
+ 1. Sequential (default): Each agent receives only the output of the previous agent
129
+ 2. Cumulative: Each agent receives all previous agent responses concatenated
130
+ """
131
+ if not self._sequence:
132
+ return message
133
+
134
+ # Process the first agent (same for both modes)
135
+ first_agent = self._sequence[0]
136
+ first_proxy = self._agent_proxies[first_agent]
137
+ first_response = await first_proxy.generate_str(message, **kwargs)
138
+
139
+ if len(self._sequence) == 1:
140
+ return first_response
141
+
142
+ if self._cumulative:
143
+ # Cumulative mode: each agent gets all previous responses
144
+ cumulative_response = f"<{first_agent}>\n{first_response}\n</{first_agent}>"
145
+
146
+ # Process subsequent agents with cumulative results
147
+ for agent_name in self._sequence[1:]:
148
+ proxy = self._agent_proxies[agent_name]
149
+ # Pass all previous responses to next agent
150
+ agent_response = await proxy.generate_str(cumulative_response)
151
+ # Add this agent's response to the cumulative result
152
+ cumulative_response += (
153
+ f"\n\n<{agent_name}>\n{agent_response}\n</{agent_name}>"
154
+ )
155
+
156
+ return cumulative_response
157
+ else:
158
+ # Sequential chaining (original behavior)
159
+ current_message = first_response
160
+
161
+ # For subsequent agents, just pass the message from previous agent
162
+ for agent_name in self._sequence[1:]:
163
+ proxy = self._agent_proxies[agent_name]
164
+ current_message = await proxy.generate_str(current_message)
165
+
166
+ return current_message
@@ -0,0 +1,22 @@
1
+ from typing import Optional, Any
2
+
3
+
4
+ class SimulatorRegistry:
5
+ """Registry to access simulator instances for testing assertions"""
6
+
7
+ _instances = {}
8
+
9
+ @classmethod
10
+ def register(cls, name: str, simulator: "Any"):
11
+ """Register a simulator instance"""
12
+ cls._instances[name] = simulator
13
+
14
+ @classmethod
15
+ def get(cls, name: str) -> Optional["Any"]:
16
+ """Get a simulator by name"""
17
+ return cls._instances.get(name)
18
+
19
+ @classmethod
20
+ def clear(cls):
21
+ """Clear registry (useful between tests)"""
22
+ cls._instances.clear()
@@ -20,3 +20,22 @@ mcp:
20
20
  - uri: "file://./mount-point/"
21
21
  name: "test_data"
22
22
  server_uri_alias: "file:///mnt/data/"
23
+ filesystem:
24
+ # On windows update the command and arguments to use `node` and the absolute path to the server.
25
+ # Use `npm i -g @modelcontextprotocol/server-filesystem` to install the server globally.
26
+ # Use `npm -g root` to find the global node_modules path.`
27
+ # command: "node"
28
+ # args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-filesystem/dist/index.js","."]
29
+ command: "npx"
30
+ args: ["-y", "@modelcontextprotocol/server-filesystem", "./mount-point/"]
31
+ fetch:
32
+ command: "uvx"
33
+ args: ["mcp-server-fetch"]
34
+ brave:
35
+ # On windows replace the command and args line to use `node` and the absolute path to the server.
36
+ # Use `npm i -g @modelcontextprotocol/server-brave-search` to install the server globally.
37
+ # Use `npm -g root` to find the global node_modules path.`
38
+ # command: "node"
39
+ # args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-brave-search/dist/index.js"]
40
+ command: "npx"
41
+ args: ["-y", "@modelcontextprotocol/server-brave-search"]
@@ -0,0 +1,190 @@
1
+ import asyncio
2
+ from mcp_agent.core.fastagent import FastAgent
3
+
4
+ agents = FastAgent(name="Enhanced Researcher")
5
+
6
+
7
+ @agents.agent(
8
+ name="ResearchPlanner",
9
+ model="sonnet", # Using a more capable model for planning
10
+ instruction="""
11
+ You are a strategic research planner. Your job is to:
12
+ 1. Break down complex research questions into specific sub-questions
13
+ 2. Identify the key information sources needed to answer each sub-question
14
+ 3. Outline a structured research plan
15
+
16
+ When given a research topic:
17
+ - Analyze what is being asked and identify the core components
18
+ - Define 3-5 specific sub-questions that need to be answered
19
+ - For each sub-question, suggest specific search queries and information sources
20
+ - Prioritize the most important areas to investigate first
21
+ - Include suggestions for data visualization or analysis if appropriate
22
+
23
+ Your output should be a clear, structured research plan that the Researcher can follow.
24
+ """,
25
+ servers=["brave"],
26
+ )
27
+ @agents.agent(
28
+ name="Researcher",
29
+ model="sonnet", # Using a more capable model for deep research
30
+ instruction="""
31
+ You are an expert research assistant with access to multiple resources:
32
+ - Brave Search for initial exploration and discovering sources
33
+ - Website fetching to read and extract information directly from webpages
34
+ - Python interpreter for data analysis and visualization
35
+ - Filesystem tools to save and organize your findings
36
+
37
+ RESEARCH METHODOLOGY:
38
+ 1. First understand the research plan provided
39
+ 2. For each sub-question, use search tools to find multiple relevant sources
40
+ 3. Go beyond surface-level information by:
41
+ - Consulting primary sources when possible
42
+ - Cross-referencing information across multiple sources
43
+ - Using the fetch tool to access complete articles rather than just search snippets
44
+ - Analyzing data with Python when numerical evidence is needed
45
+ - Creating visualizations when they help clarify complex information
46
+
47
+ CRITICAL INFORMATION ASSESSMENT:
48
+ - Evaluate the credibility of each source (consider recency, authority, potential bias)
49
+ - Look for consensus across multiple sources
50
+ - Highlight any contradictions or areas of debate in the research
51
+ - Clearly state limitations in the available information
52
+
53
+ DOCUMENTATION:
54
+ - Save important information, data, and visualizations to files
55
+ - Always create a comprehensive bibliography with links to all sources
56
+ - Include specific citation details (author, date, publication) when available
57
+ - Note which specific information came from which source
58
+
59
+ FINAL RESPONSE:
60
+ - Structure your findings logically with clear headings
61
+ - Synthesize the information rather than just listing facts
62
+ - Directly address each sub-question from the research plan
63
+ - Use data and visualizations to support key points
64
+ - End with a concise executive summary of your findings
65
+ - Include a "Methodology" section explaining how you conducted your research
66
+ """,
67
+ servers=["brave", "interpreter", "filesystem", "fetch"],
68
+ use_history=True,
69
+ )
70
+ @agents.agent(
71
+ name="FactChecker",
72
+ instruction="""
73
+ You are a meticulous fact-checker and critical evaluator of research. Your responsibilities are to:
74
+
75
+ 1. Verify factual claims by cross-checking with authoritative sources
76
+ 2. Identify any unsupported assertions or logical fallacies
77
+ 3. Detect potential biases or limitations in the research methodology
78
+ 4. Ensure proper representation of diverse perspectives on controversial topics
79
+ 5. Evaluate the quality, reliability, and currency of cited sources
80
+
81
+ When reviewing research:
82
+ - Flag any claims that lack sufficient evidence or citation
83
+ - Identify information that seems outdated or contradicts current consensus
84
+ - Check for oversimplifications of complex topics
85
+ - Ensure numerical data and statistics are accurately represented
86
+ - Verify that quotations are accurate and in proper context
87
+ - Look for any gaps in the research or important perspectives that were omitted
88
+
89
+ Your feedback should be specific, actionable, and structured to help improve accuracy and comprehensiveness.
90
+ """,
91
+ servers=["brave", "fetch"],
92
+ )
93
+ @agents.agent(
94
+ name="Evaluator",
95
+ model="sonnet",
96
+ instruction="""
97
+ You are a senior research quality evaluator with expertise in academic and professional research standards.
98
+
99
+ COMPREHENSIVE EVALUATION CRITERIA:
100
+ 1. Research Methodology
101
+ - Has the researcher followed a structured approach?
102
+ - Were appropriate research methods applied?
103
+ - Is there evidence of strategic information gathering?
104
+
105
+ 2. Source Quality & Diversity
106
+ - Are sources authoritative, current, and relevant?
107
+ - Is there appropriate diversity of sources?
108
+ - Were primary sources consulted when appropriate?
109
+
110
+ 3. Information Depth
111
+ - Does the research go beyond surface-level information?
112
+ - Is there evidence of in-depth analysis?
113
+ - Has the researcher explored multiple aspects of the topic?
114
+
115
+ 4. Critical Analysis
116
+ - Has information been critically evaluated rather than simply reported?
117
+ - Are limitations and uncertainties acknowledged?
118
+ - Are multiple perspectives considered on controversial topics?
119
+
120
+ 5. Data & Evidence
121
+ - Is quantitative data properly analyzed and presented?
122
+ - Are visualizations clear, accurate, and informative?
123
+ - Is qualitative information presented with appropriate context?
124
+
125
+ 6. Documentation & Attribution
126
+ - Are all sources properly cited with complete reference information?
127
+ - Is it clear which information came from which source?
128
+ - Is the bibliography comprehensive and well-formatted?
129
+
130
+ 7. Structure & Communication
131
+ - Is the research presented in a logical, well-organized manner?
132
+ - Are findings communicated clearly and precisely?
133
+ - Is the level of technical language appropriate for the intended audience?
134
+
135
+ 8. Alignment with Previous Feedback
136
+ - Has the researcher addressed specific feedback from previous evaluations?
137
+ - Have requested improvements been successfully implemented?
138
+
139
+ For each criterion, provide:
140
+ - A detailed RATING (EXCELLENT, GOOD, FAIR, or POOR)
141
+ - Specific examples from the research that justify your rating
142
+ - Clear, actionable suggestions for improvement
143
+
144
+ Your evaluation should conclude with:
145
+ - An OVERALL RATING that reflects the research quality
146
+ - A concise summary of the research's major strengths
147
+ - A prioritized list of the most important areas for improvement
148
+
149
+ The researcher should be able to understand exactly why they received their rating and what specific steps they can take to improve.
150
+ """,
151
+ )
152
+ @agents.chain(
153
+ name="ResearchProcess",
154
+ sequence=["ResearchPlanner", "Researcher", "FactChecker"],
155
+ instruction="A comprehensive research workflow that plans, executes, and verifies research",
156
+ cumulative=True,
157
+ )
158
+ @agents.evaluator_optimizer(
159
+ generator="ResearchProcess",
160
+ evaluator="Evaluator",
161
+ max_refinements=3,
162
+ min_rating="EXCELLENT",
163
+ name="EnhancedResearcher",
164
+ )
165
+ async def main():
166
+ async with agents.run() as agent:
167
+ # Start with a warm-up to set expectations and explain the research approach
168
+ await agent.Researcher.send(
169
+ """I'm an enhanced research assistant trained to conduct thorough, evidence-based research.
170
+ I'll approach your question by:
171
+ 1. Creating a structured research plan
172
+ 2. Gathering information from multiple authoritative sources
173
+ 3. Analyzing data and creating visualizations when helpful
174
+ 4. Fact-checking and verifying all information
175
+ 5. Providing a comprehensive, well-documented answer
176
+
177
+ What would you like me to research for you today?"""
178
+ )
179
+
180
+ # Start the main research workflow
181
+ await agent.prompt("EnhancedResearcher")
182
+
183
+ print(
184
+ "\nWould you like to ask follow-up questions to the Researcher? (Type 'STOP' to end)"
185
+ )
186
+ await agent.prompt("Researcher", default="STOP")
187
+
188
+
189
+ if __name__ == "__main__":
190
+ asyncio.run(main())
@@ -127,6 +127,7 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
127
127
  self.max_refinements = max_refinements
128
128
 
129
129
  # Determine generator's history setting before super().__init__
130
+
130
131
  if isinstance(generator, Agent):
131
132
  self.generator_use_history = generator.config.use_history
132
133
  elif isinstance(generator, AugmentedLLM):
@@ -140,6 +141,11 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
140
141
  "use_history",
141
142
  getattr(generator.default_request_params, "use_history", False),
142
143
  )
144
+ # Handle ChainProxy with type checking
145
+ elif hasattr(generator, "_sequence") and hasattr(generator, "_agent_proxies"):
146
+ # This is how we detect a ChainProxy without directly importing it
147
+ # For ChainProxy, we'll default use_history to False
148
+ self.generator_use_history = False
143
149
  else:
144
150
  raise ValueError(f"Unsupported optimizer type: {type(generator)}")
145
151
 
@@ -152,6 +158,7 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
152
158
  self._llm = PassthroughLLM(name=f"{self.name}_passthrough", context=context)
153
159
 
154
160
  # Set up the generator
161
+
155
162
  if isinstance(generator, Agent):
156
163
  if not llm_factory:
157
164
  raise ValueError("llm_factory is required when using an Agent")
@@ -171,6 +178,13 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
171
178
  else None
172
179
  ) # Fallback to generator's
173
180
  )
181
+ elif hasattr(generator, "_sequence") and hasattr(generator, "_agent_proxies"):
182
+ # For ChainProxy, use it directly for generation
183
+ self.generator_llm = generator
184
+ self.aggregator = None
185
+ self.instruction = (
186
+ instruction or f"Chain of agents: {', '.join(generator._sequence)}"
187
+ )
174
188
 
175
189
  elif isinstance(generator, AugmentedLLM):
176
190
  self.generator_llm = generator
@@ -252,7 +266,7 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
252
266
  if isinstance(self.evaluator, Agent):
253
267
  await stack.enter_async_context(self.evaluator)
254
268
 
255
- # Initial generation
269
+ # Initial generation - pass parameters to any type of generator
256
270
  response = await self.generator_llm.generate_str(
257
271
  message=message,
258
272
  request_params=params, # Pass params which may override use_history
@@ -321,6 +335,7 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
321
335
  use_history=self.generator_use_history, # Use the generator's history setting
322
336
  )
323
337
 
338
+ # Pass parameters to any type of generator
324
339
  response = await self.generator_llm.generate_str(
325
340
  message=refinement_prompt,
326
341
  request_params=params, # Pass params which may override use_history
@@ -332,15 +332,21 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
332
332
 
333
333
  final_text: List[str] = []
334
334
 
335
+ # Process all responses and collect all text content
335
336
  for response in responses:
337
+ # Extract text content from each message
338
+ message_text = ""
336
339
  for content in response.content:
337
340
  if content.type == "text":
338
- final_text.append(content.text)
339
- # elif content.type == "tool_use":
340
- # final_text.append(
341
- # f"[Calling tool {content.name} with args {content.input}]"
342
- # )
343
- # TODO -- check whether this should be reinstated - OpenAI doesn't return this....
341
+ # Extract text from text blocks
342
+ message_text += content.text
343
+
344
+ # Only append non-empty text
345
+ if message_text:
346
+ final_text.append(message_text)
347
+
348
+ # TODO -- make tool detail inclusion behaviour configurable
349
+ # Join all collected text
344
350
  return "\n".join(final_text)
345
351
 
346
352
  async def generate_structured(
@@ -0,0 +1,70 @@
1
+ # src/mcp_agent/workflows/llm/enhanced_passthrough.py
2
+
3
+
4
+ import datetime
5
+ from typing import List, Optional, Union
6
+ from mcp_agent.core.simulator_registry import SimulatorRegistry
7
+ from mcp_agent.workflows.llm.augmented_llm import (
8
+ AugmentedLLM,
9
+ MessageParamT,
10
+ RequestParams,
11
+ )
12
+
13
+
14
+ class EnhancedPassthroughLLM(AugmentedLLM):
15
+ """Enhanced passthrough LLM for testing parameter handling and workflows"""
16
+
17
+ def __init__(self, name: str = "Simulator", context=None, **kwargs):
18
+ super().__init__(name=name, context=context, **kwargs)
19
+ self.simulation_mode = kwargs.get("simulation_mode", "passthrough")
20
+ self.request_log = []
21
+ self.last_request_params = None
22
+
23
+ # Register this instance with the registry
24
+ SimulatorRegistry.register(self.name, self)
25
+
26
+ async def generate_str(
27
+ self,
28
+ message: Union[str, MessageParamT, List[MessageParamT]],
29
+ request_params: Optional[RequestParams] = None,
30
+ ) -> str:
31
+ """Capture parameters and log the request"""
32
+ # Store for assertion testing
33
+ self.last_request_params = request_params
34
+
35
+ # Log the request
36
+ self.request_log.append(
37
+ {
38
+ "timestamp": datetime.now().isoformat(),
39
+ "message": str(message),
40
+ "request_params": request_params.model_dump()
41
+ if request_params
42
+ else None,
43
+ }
44
+ )
45
+
46
+ # Display for debugging
47
+ self.show_user_message(str(message), model="simulator", chat_turn=0)
48
+
49
+ # Simulate response
50
+ result = f"[SIMULATOR] Response to: {message}"
51
+ await self.show_assistant_message(result, title="SIMULATOR")
52
+
53
+ return result
54
+
55
+ # Other generate methods with similar parameter capture
56
+
57
+ def get_parameter_usage_report(self):
58
+ """Generate report of parameter usage"""
59
+ param_usage = {}
60
+
61
+ for req in self.request_log:
62
+ params = req.get("request_params", {})
63
+ if params:
64
+ for key, value in params.items():
65
+ if key not in param_usage:
66
+ param_usage[key] = {"count": 0, "values": set()}
67
+ param_usage[key]["count"] += 1
68
+ param_usage[key]["values"].add(str(value))
69
+
70
+ return {"total_requests": len(self.request_log), "parameter_usage": param_usage}
@@ -7,6 +7,7 @@ from mcp_agent.core.exceptions import ModelConfigError
7
7
  from mcp_agent.workflows.llm.augmented_llm_anthropic import AnthropicAugmentedLLM
8
8
  from mcp_agent.workflows.llm.augmented_llm_openai import OpenAIAugmentedLLM
9
9
  from mcp_agent.workflows.llm.augmented_llm import RequestParams
10
+ from mcp_agent.workflows.llm.enhanced_passthrough import EnhancedPassthroughLLM
10
11
 
11
12
  # Type alias for LLM classes
12
13
  LLMClass = Union[Type[AnthropicAugmentedLLM], Type[OpenAIAugmentedLLM]]
@@ -17,6 +18,7 @@ class Provider(Enum):
17
18
 
18
19
  ANTHROPIC = auto()
19
20
  OPENAI = auto()
21
+ SIMULATOR = auto()
20
22
 
21
23
 
22
24
  class ReasoningEffort(Enum):
@@ -91,6 +93,7 @@ class ModelFactory:
91
93
  PROVIDER_CLASSES: Dict[Provider, LLMClass] = {
92
94
  Provider.ANTHROPIC: AnthropicAugmentedLLM,
93
95
  Provider.OPENAI: OpenAIAugmentedLLM,
96
+ Provider.SIMULATOR: EnhancedPassthroughLLM,
94
97
  }
95
98
 
96
99
  @classmethod