fast-agent-mcp 0.0.11__py3-none-any.whl → 0.0.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: fast-agent-mcp
3
- Version: 0.0.11
3
+ Version: 0.0.12
4
4
  Summary: Define, Prompt and Test MCP enabled Agents and Workflows
5
5
  Author-email: Shaun Smith <fastagent@llmindset.co.uk>, Sarmad Qadri <sarmad@lastmileai.dev>
6
6
  License: Apache License
@@ -217,6 +217,7 @@ Requires-Dist: numpy>=2.2.1
217
217
  Requires-Dist: openai>=1.63.2
218
218
  Requires-Dist: opentelemetry-distro>=0.50b0
219
219
  Requires-Dist: opentelemetry-exporter-otlp-proto-http>=1.29.0
220
+ Requires-Dist: prompt-toolkit>=3.0.50
220
221
  Requires-Dist: pydantic-settings>=2.7.0
221
222
  Requires-Dist: pydantic>=2.10.4
222
223
  Requires-Dist: pyyaml>=6.0.2
@@ -271,6 +272,7 @@ Other bootstrap examples include a Researcher (with Evaluator-Optimizer workflow
271
272
  ### llmindset.co.uk fork:
272
273
 
273
274
  - "FastAgent" style prototyping, with per-agent models
275
+ - Api keys through Environment Variables
274
276
  - Warm-up / Post-Workflow Agent Interactions
275
277
  - Quick Setup
276
278
  - Interactive Prompt Mode
@@ -11,14 +11,15 @@ mcp_agent/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,
11
11
  mcp_agent/agents/agent.py,sha256=utMR_QWKD1_MqWE_fYY-xqUMKtGlekW0laJfduU6Ckw,9831
12
12
  mcp_agent/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
13
  mcp_agent/cli/__main__.py,sha256=bhxe66GYqy0q78OQhi7dkuubY1Tn0bQL6hU5Nn47E34,73
14
- mcp_agent/cli/main.py,sha256=wyOvUg0BihD1NpoiFcIaOruevgaHxs1-Xy_bnwb1Ik4,2449
14
+ mcp_agent/cli/main.py,sha256=h_TqBlpIMGhsJr6pp_oxUl00x3F9d1R-JQVwJ9uAreA,2449
15
15
  mcp_agent/cli/terminal.py,sha256=5fqrKlJvIpKEuvpvZ653OueQSYFFktBEbosjr2ucMUc,1026
16
- mcp_agent/cli/commands/bootstrap.py,sha256=lVWMAt3Uzd6HHvEJ3X28bbKdX6o1_F-Vs6682fdol20,10798
16
+ mcp_agent/cli/commands/bootstrap.py,sha256=Q55I2gL-K3Ja8c6MmbLZMVQQ_MaOTxnEC5se09XTI2s,10742
17
17
  mcp_agent/cli/commands/config.py,sha256=32YTS5jmsYAs9QzAhjkG70_daAHqOemf4XbZBBSMz6g,204
18
18
  mcp_agent/cli/commands/setup.py,sha256=dI_01B5nye707Rcd15gvZZCYlZGSiKajlnuLf6hJf2A,6197
19
19
  mcp_agent/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
+ mcp_agent/core/enhanced_prompt.py,sha256=U4hbAbm5WFUwTiwmBlNR7nadbMD9oHYgKuNNQVrGdvc,11047
20
21
  mcp_agent/core/exceptions.py,sha256=xDdhYh83ni3t0NiXQTEL0_Yyx0qQxBPQL1gSwRToeaw,1469
21
- mcp_agent/core/fastagent.py,sha256=obVbZ8AwqxpeHg-LQvlwukswDxvk3sMPzkvPYk1QJSk,47127
22
+ mcp_agent/core/fastagent.py,sha256=Wn5uHR6DIzMXu4S93Ll8oFD7VJUcSBH4wMm4c0aofpw,48858
22
23
  mcp_agent/core/server_validation.py,sha256=_59cn16nNT4HGPwg19HgxMtHK4MsdWYDUw_CuL-5xek,1696
23
24
  mcp_agent/eval/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
25
  mcp_agent/executor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -50,12 +51,18 @@ mcp_agent/mcp/stdio.py,sha256=tW075R5rQ-UlflXWFKIFDgCbWbuhKqxhiYolWvyEkFs,3985
50
51
  mcp_agent/resources/examples/data-analysis/analysis.py,sha256=Sp-umPPfwVjG3yNrHdQA6blGtG6jc5of1e_0oS4njYc,1379
51
52
  mcp_agent/resources/examples/data-analysis/fastagent.config.yaml,sha256=eTKGbjnTHhDTeNRPQvG_fr9OQpEZ5Y9v7X2NyCj0V70,530
52
53
  mcp_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv,sha256=pcMeOL1_r8m8MziE6xgbBrQbjl5Ijo98yycZn7O-dlk,227977
53
- mcp_agent/resources/examples/internal/job.py,sha256=uqIqc1i5PfWynobwNaWWcQ6FP_BRifINd4ncmPvzwm0,4093
54
- mcp_agent/resources/examples/mcp_researcher/researcher-eval.py,sha256=NI1vujVuLeTrcF8dM_ipZZ3Tg-1AL35CaltmuzxWrU4,1807
54
+ mcp_agent/resources/examples/internal/agent.py,sha256=f-jTgYabV3nWCQm0ZP9NtSEWjx3nQbRngzArRufcELg,384
55
+ mcp_agent/resources/examples/internal/job.py,sha256=WEKIAANMEAuKr13__rYf3PqJeTAsNB_kqYqbqVYQlUM,4093
56
+ mcp_agent/resources/examples/mcp_researcher/researcher-eval.py,sha256=kNPjIU-JwE0oIBQKwhv6lZsUF_SPtYVkiEEbY1ZVZxk,1807
55
57
  mcp_agent/resources/examples/mcp_researcher/researcher.py,sha256=jPRafm7jbpHKkX_dQiYGG3Sw-e1Dm86q-JZT-WZDhM0,1425
58
+ mcp_agent/resources/examples/researcher/fastagent.config.yaml,sha256=2_VXZneckR6zk6RWzzL-smV_oWmgg4uSkLWqZv8jF0I,1995
59
+ mcp_agent/resources/examples/researcher/researcher-eval.py,sha256=kNPjIU-JwE0oIBQKwhv6lZsUF_SPtYVkiEEbY1ZVZxk,1807
60
+ mcp_agent/resources/examples/researcher/researcher.py,sha256=jPRafm7jbpHKkX_dQiYGG3Sw-e1Dm86q-JZT-WZDhM0,1425
61
+ mcp_agent/resources/examples/workflows/agent.py,sha256=f-jTgYabV3nWCQm0ZP9NtSEWjx3nQbRngzArRufcELg,384
56
62
  mcp_agent/resources/examples/workflows/agent_build.py,sha256=vdjS02rZR88RU53WYzXxPscfFNEFFe_niHYE_i49I8Q,2396
57
63
  mcp_agent/resources/examples/workflows/chaining.py,sha256=QD_r_PKIoDedWqOTzg7IBnTY8OVoDSMot5WnArJubnc,751
58
- mcp_agent/resources/examples/workflows/evaluator.py,sha256=ByILFY7PsA8UXtmNa4YtLIGSsnVfZVjKlHGH9G0ie2I,3069
64
+ mcp_agent/resources/examples/workflows/evaluator.py,sha256=kC8uBcCMoeDROip4B_X6jLr-1QXXvcUB0fZ6elun7k4,3147
65
+ mcp_agent/resources/examples/workflows/fastagent.py,sha256=lkO3waYLt_zQtAVqGjirmIsG73jpHA5ad1WSm4BXv2I,532
59
66
  mcp_agent/resources/examples/workflows/human_input.py,sha256=c8cBdLEPbaMXddFwsfN3Z7RFs5PZXsdrjANfvq1VTPM,605
60
67
  mcp_agent/resources/examples/workflows/orchestrator.py,sha256=kHUDDALqjA8TRjkbsDP2MwspEj1a5DdSUOPAiI17izQ,2545
61
68
  mcp_agent/resources/examples/workflows/parallel.py,sha256=cNYcIcsdo0-KK-S7KEPCc11aWELeVlQJdJ2LIC9xgDs,3090
@@ -68,7 +75,7 @@ mcp_agent/workflows/embedding/embedding_base.py,sha256=-c20ggQ8s7XhMxRX-WEhOgHE7
68
75
  mcp_agent/workflows/embedding/embedding_cohere.py,sha256=OKTJvKD_uEafd4c2uhR5tBjprea1nyvlJOO-3FDqOnk,1540
69
76
  mcp_agent/workflows/embedding/embedding_openai.py,sha256=dntjJ5P-FSMGYuyPZC8MuCU_ehwjXw9wDfzZZuSQN1E,1480
70
77
  mcp_agent/workflows/evaluator_optimizer/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
71
- mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py,sha256=ddAfHu8jFGwdVGbHwm7CZQnmtMeDnIl1gIB_lfkXx_E,13628
78
+ mcp_agent/workflows/evaluator_optimizer/evaluator_optimizer.py,sha256=r0ATtuc8JouBZz6iPc_dQ6g5oUZc7JXfbI9adKd2Wg8,13572
72
79
  mcp_agent/workflows/intent_classifier/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
73
80
  mcp_agent/workflows/intent_classifier/intent_classifier_base.py,sha256=zTbOmq6EY_abOlme4zl28HM4RWNNS6bbHl3tF7SshJ0,4004
74
81
  mcp_agent/workflows/intent_classifier/intent_classifier_embedding.py,sha256=_bWZGukc_q9LdA_Q18UoAMSzhN8tt4K_bRHNUhy7Crw,3997
@@ -78,9 +85,9 @@ mcp_agent/workflows/intent_classifier/intent_classifier_llm.py,sha256=WSLUv2Casb
78
85
  mcp_agent/workflows/intent_classifier/intent_classifier_llm_anthropic.py,sha256=Hp4454IniWFxV4ml50Ml8ip9rS1La5FBn5pd7vm1FHA,1964
79
86
  mcp_agent/workflows/intent_classifier/intent_classifier_llm_openai.py,sha256=zj76WlTYnSCYjBQ_IDi5vFBQGmNwYaoUq1rT730sY98,1940
80
87
  mcp_agent/workflows/llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
81
- mcp_agent/workflows/llm/augmented_llm.py,sha256=BQ7xhYVzloE7_VeuJvvjABYs7sNxJjfS14QdvHUUvjY,23209
82
- mcp_agent/workflows/llm/augmented_llm_anthropic.py,sha256=_Td5UTNfnaFtqN3XNV5bX1w7ituRjzoWMOyaobz9vO4,21127
83
- mcp_agent/workflows/llm/augmented_llm_openai.py,sha256=RqsbX0Fc5By1AvQ2N85hxzz0d84mVwuPggslxwqSJVM,24190
88
+ mcp_agent/workflows/llm/augmented_llm.py,sha256=AjYxTn2XdBDHnibmjlCKwaVfQQlQRES9sRBMIU6NaPQ,23258
89
+ mcp_agent/workflows/llm/augmented_llm_anthropic.py,sha256=yrOv1V6rOfm2TTDR58fnf8YU8hBnTIpOZhB2sUgZw6o,21246
90
+ mcp_agent/workflows/llm/augmented_llm_openai.py,sha256=5PwTh0QJSQ29EtK0UuiltgX6snRSBoau75C35S4xQcQ,24477
84
91
  mcp_agent/workflows/llm/llm_selector.py,sha256=G7pIybuBDwtmyxUDov_QrNYH2FoI0qFRu2JfoxWUF5Y,11045
85
92
  mcp_agent/workflows/llm/model_factory.py,sha256=5JrMXZ5jbE8isiteF2A912gGuCyomGpjtC_BCVSAM9s,6806
86
93
  mcp_agent/workflows/orchestrator/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -101,8 +108,8 @@ mcp_agent/workflows/swarm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJW
101
108
  mcp_agent/workflows/swarm/swarm.py,sha256=-lAIeSWDqbGHGRPTvjiP9nIKWvxxy9DAojl9yQzO1Pw,11050
102
109
  mcp_agent/workflows/swarm/swarm_anthropic.py,sha256=pW8zFx5baUWGd5Vw3nIDF2oVOOGNorij4qvGJKdYPcs,1624
103
110
  mcp_agent/workflows/swarm/swarm_openai.py,sha256=wfteywvAGkT5bLmIxX_StHJq8144whYmCRnJASAjOes,1596
104
- fast_agent_mcp-0.0.11.dist-info/METADATA,sha256=p3Q0Gcx-dzK-vJA6F2s2iaw6NNm2RoWrB5VakfLIp00,16504
105
- fast_agent_mcp-0.0.11.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
106
- fast_agent_mcp-0.0.11.dist-info/entry_points.txt,sha256=2IXtSmDK9XjWN__RWuRIJTgWyW17wJnJ_h-pb0pZAxo,174
107
- fast_agent_mcp-0.0.11.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
108
- fast_agent_mcp-0.0.11.dist-info/RECORD,,
111
+ fast_agent_mcp-0.0.12.dist-info/METADATA,sha256=mgH0wJrtVRReqKTn2a-ANc_reC_quUXY-texT2zGdVI,16583
112
+ fast_agent_mcp-0.0.12.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
113
+ fast_agent_mcp-0.0.12.dist-info/entry_points.txt,sha256=2IXtSmDK9XjWN__RWuRIJTgWyW17wJnJ_h-pb0pZAxo,174
114
+ fast_agent_mcp-0.0.12.dist-info/licenses/LICENSE,sha256=cN3FxDURL9XuzE5mhK9L2paZo82LTfjwCYVT7e3j0e4,10939
115
+ fast_agent_mcp-0.0.12.dist-info/RECORD,,
@@ -33,11 +33,7 @@ EXAMPLE_TYPES = {
33
33
  "description": "Research agent example with additional evaluation/optimization\n"
34
34
  "example. Uses Brave Search and Docker MCP Servers.\n"
35
35
  "Creates examples in a 'researcher' subdirectory.",
36
- "files": [
37
- "researcher.py",
38
- "researcher-eval.py",
39
- "mcp_agent.secrets.yaml.example",
40
- ],
36
+ "files": ["researcher.py", "researcher-eval.py", "fastagent.config.yaml"],
41
37
  "create_subdir": True,
42
38
  },
43
39
  "data-analysis": {
mcp_agent/cli/main.py CHANGED
@@ -40,11 +40,11 @@ def show_welcome():
40
40
 
41
41
  console.print("\n[bold]Getting Started:[/bold]")
42
42
  console.print("1. Set up a new project:")
43
- console.print(" mcp-agent setup")
43
+ console.print(" fastagent setup")
44
44
  console.print("\n2. Try an example:")
45
- console.print(" mcp-agent bootstrap create decorator")
45
+ console.print(" fastagent bootstrap create decorator")
46
46
  console.print("\nUse --help with any command for more information")
47
- console.print("Example: mcp-agent bootstrap --help")
47
+ console.print("Example: fastagent bootstrap --help")
48
48
 
49
49
 
50
50
  @app.callback(invoke_without_command=True)
@@ -56,7 +56,7 @@ def main(
56
56
  True, "--color/--no-color", help="Enable/disable color output"
57
57
  ),
58
58
  ):
59
- """MCP Agent CLI - Build effective agents using Model Context Protocol (MCP).
59
+ """FastAgent CLI - Build effective agents using Model Context Protocol (MCP).
60
60
 
61
61
  Use --help with any command for detailed usage information.
62
62
  """
@@ -0,0 +1,315 @@
1
+ """
2
+ Enhanced prompt functionality with advanced prompt_toolkit features.
3
+ """
4
+
5
+ from typing import List
6
+ from prompt_toolkit import PromptSession
7
+ from prompt_toolkit.formatted_text import HTML
8
+ from prompt_toolkit.history import InMemoryHistory
9
+ from prompt_toolkit.key_binding import KeyBindings
10
+ from prompt_toolkit.completion import Completer, Completion
11
+ from prompt_toolkit.lexers import PygmentsLexer
12
+ from prompt_toolkit.filters import Condition
13
+ from pygments.lexers.python import PythonLexer
14
+ from rich import print as rich_print
15
+
16
+ # Map of agent names to their history
17
+ agent_histories = {}
18
+
19
+ # Store available agents for auto-completion
20
+ available_agents = set()
21
+
22
+ # Keep track of multi-line mode state
23
+ in_multiline_mode = False
24
+
25
+ # Track which agents have already shown welcome messages
26
+ agent_messages_shown = set()
27
+
28
+
29
+ class AgentCompleter(Completer):
30
+ """Provide completion for agent names and common commands."""
31
+
32
+ def __init__(self, agents: List[str], commands: List[str] = None, agent_types: dict = None):
33
+ self.agents = agents
34
+ self.commands = commands or ["help", "clear", "STOP"]
35
+ self.agent_types = agent_types or {}
36
+
37
+ def get_completions(self, document, complete_event):
38
+ """Synchronous completions method - this is what prompt_toolkit expects by default"""
39
+ text = document.text_before_cursor.lower()
40
+
41
+ # Complete commands
42
+ if text.startswith("/"):
43
+ cmd = text[1:]
44
+ for command in self.commands:
45
+ if command.lower().startswith(cmd):
46
+ yield Completion(
47
+ command,
48
+ start_position=-len(cmd),
49
+ display=command,
50
+ display_meta="Command",
51
+ )
52
+
53
+ # Complete agent names for agent-related commands
54
+ elif text.startswith("@"):
55
+ agent_name = text[1:]
56
+ for agent in self.agents:
57
+ if agent.lower().startswith(agent_name.lower()):
58
+ # Get agent type or default to "Agent"
59
+ agent_type = self.agent_types.get(agent, "Agent")
60
+ yield Completion(
61
+ agent,
62
+ start_position=-len(agent_name),
63
+ display=agent,
64
+ display_meta=agent_type,
65
+ )
66
+
67
+
68
+ def create_keybindings(on_toggle_multiline=None, app=None):
69
+ """Create custom key bindings."""
70
+ kb = KeyBindings()
71
+
72
+ @kb.add("c-m", filter=Condition(lambda: not in_multiline_mode))
73
+ def _(event):
74
+ """Enter: accept input when not in multiline mode."""
75
+ event.current_buffer.validate_and_handle()
76
+
77
+ @kb.add("c-m", filter=Condition(lambda: in_multiline_mode))
78
+ def _(event):
79
+ """Enter: insert newline when in multiline mode."""
80
+ event.current_buffer.insert_text("\n")
81
+
82
+ @kb.add("escape", "enter")
83
+ def _(event):
84
+ """Alt+Enter: always submit even in multiline mode."""
85
+ event.current_buffer.validate_and_handle()
86
+
87
+ @kb.add("c-t")
88
+ def _(event):
89
+ """Ctrl+T: Toggle multiline mode."""
90
+ global in_multiline_mode
91
+ in_multiline_mode = not in_multiline_mode
92
+
93
+ # Force redraw the app to update toolbar
94
+ if event.app:
95
+ event.app.invalidate()
96
+ elif app:
97
+ app.invalidate()
98
+
99
+ # Call the toggle callback if provided
100
+ if on_toggle_multiline:
101
+ on_toggle_multiline(in_multiline_mode)
102
+
103
+ # Instead of printing, we'll just update the toolbar
104
+ # The toolbar will show the current mode
105
+
106
+ @kb.add("c-l")
107
+ def _(event):
108
+ """Ctrl+L: Clear input."""
109
+ event.current_buffer.text = ""
110
+
111
+ return kb
112
+
113
+
114
+ async def get_enhanced_input(
115
+ agent_name: str,
116
+ default: str = "",
117
+ show_default: bool = False,
118
+ show_stop_hint: bool = False,
119
+ multiline: bool = False,
120
+ available_agent_names: List[str] = None,
121
+ syntax: str = None,
122
+ agent_types: dict = None,
123
+ ) -> str:
124
+ """
125
+ Enhanced input with advanced prompt_toolkit features.
126
+
127
+ Args:
128
+ agent_name: Name of the agent (used for prompt and history)
129
+ default: Default value if user presses enter
130
+ show_default: Whether to show the default value in the prompt
131
+ show_stop_hint: Whether to show the STOP hint
132
+ multiline: Start in multiline mode
133
+ available_agent_names: List of agent names for auto-completion
134
+ syntax: Syntax highlighting (e.g., 'python', 'sql')
135
+ agent_types: Dictionary mapping agent names to their types for display
136
+
137
+ Returns:
138
+ User input string
139
+ """
140
+ global in_multiline_mode, available_agents
141
+
142
+ # Update global state
143
+ in_multiline_mode = multiline
144
+ if available_agent_names:
145
+ available_agents = set(available_agent_names)
146
+
147
+ # Get or create history object for this agent
148
+ if agent_name not in agent_histories:
149
+ agent_histories[agent_name] = InMemoryHistory()
150
+
151
+ # Define callback for multiline toggle
152
+ def on_multiline_toggle(enabled):
153
+ nonlocal session
154
+ if hasattr(session, "app") and session.app:
155
+ session.app.invalidate()
156
+
157
+ # Define toolbar function that will update dynamically
158
+ def get_toolbar():
159
+ if in_multiline_mode:
160
+ mode_style = "ansired" # More noticeable for multiline mode
161
+ mode_text = "MULTILINE"
162
+ toggle_text = "Normal Editing"
163
+ else:
164
+ mode_style = "ansigreen"
165
+ mode_text = "NORMAL"
166
+ toggle_text = "Multiline Editing"
167
+
168
+ shortcuts = [
169
+ ("Ctrl+T", toggle_text),
170
+ ("Alt+Enter", "Submit" if in_multiline_mode else ""),
171
+ ("Ctrl+L", "Clear"),
172
+ ("↑/↓", "History"),
173
+ ]
174
+ # Only show relevant shortcuts based on mode
175
+ shortcuts = [(k, v) for k, v in shortcuts if v]
176
+
177
+ shortcut_text = " | ".join(f"{key}:{action}" for key, action in shortcuts)
178
+ return HTML(
179
+ f" <b>Agent:</b> <ansiblue> {agent_name} </ansiblue> | <b>Mode:</b> <{mode_style}> {mode_text} </{mode_style}> | {shortcut_text}"
180
+ )
181
+
182
+ # Create session with history and completions
183
+ session = PromptSession(
184
+ history=agent_histories[agent_name],
185
+ completer=AgentCompleter(
186
+ agents=list(available_agents) if available_agents else [],
187
+ agent_types=agent_types or {},
188
+ ),
189
+ complete_while_typing=True,
190
+ lexer=PygmentsLexer(PythonLexer) if syntax == "python" else None,
191
+ multiline=Condition(lambda: in_multiline_mode),
192
+ complete_in_thread=True,
193
+ mouse_support=True,
194
+ bottom_toolbar=get_toolbar, # Pass the function here
195
+ )
196
+
197
+ # Create key bindings with a reference to the app
198
+ bindings = create_keybindings(
199
+ on_toggle_multiline=on_multiline_toggle, app=session.app
200
+ )
201
+ session.app.key_bindings = bindings
202
+
203
+ # Create formatted prompt text
204
+ prompt_text = f"<ansicyan>{agent_name}</ansicyan> > "
205
+
206
+ # Add default value display if requested
207
+ if show_default and default and default != "STOP":
208
+ prompt_text = f"{prompt_text} [<ansigreen>{default}</ansigreen>] "
209
+
210
+ # Only show hints at startup if requested
211
+ if show_stop_hint:
212
+ if default == "STOP":
213
+ rich_print("[yellow]Press <ENTER> to finish.[/yellow]")
214
+ else:
215
+ rich_print("Enter a prompt, or [red]STOP[/red] to finish")
216
+ if default:
217
+ rich_print(
218
+ f"Press <ENTER> to use the default prompt:\n[cyan]{default}[/cyan]"
219
+ )
220
+
221
+ # Mention available features but only on first usage for this agent
222
+ if agent_name not in agent_messages_shown:
223
+ rich_print(
224
+ "[dim]Tip: Type /help for commands, press F1 for keyboard shortcuts. Ctrl+T toggles multiline mode. @Agent to switch agent[/dim]"
225
+ )
226
+ agent_messages_shown.add(agent_name)
227
+
228
+ # Process special commands
229
+ def pre_process_input(text):
230
+ # Command processing
231
+ if text and text.startswith("/"):
232
+ cmd = text[1:].strip().lower()
233
+ if cmd == "help":
234
+ return "HELP"
235
+ elif cmd == "clear":
236
+ return "CLEAR"
237
+ elif cmd == "agents":
238
+ return "LIST_AGENTS"
239
+
240
+ # Agent switching
241
+ if text and text.startswith("@"):
242
+ return f"SWITCH:{text[1:].strip()}"
243
+
244
+ return text
245
+
246
+ # Get the input - using async version
247
+ try:
248
+ result = await session.prompt_async(HTML(prompt_text), default=default)
249
+ return pre_process_input(result)
250
+ except KeyboardInterrupt:
251
+ # Handle Ctrl+C gracefully
252
+ return "STOP"
253
+ except EOFError:
254
+ # Handle Ctrl+D gracefully
255
+ return "STOP"
256
+ except Exception as e:
257
+ # Log and gracefully handle other exceptions
258
+ print(f"\nInput error: {type(e).__name__}: {e}")
259
+ return "STOP"
260
+
261
+
262
+ async def handle_special_commands(command, agent_app=None):
263
+ """Handle special input commands."""
264
+ # Quick guard for empty or None commands
265
+ if not command:
266
+ return False
267
+
268
+ # Check for special commands
269
+ if command == "HELP":
270
+ rich_print("\n[bold]Available Commands:[/bold]")
271
+ rich_print(" /help - Show this help")
272
+ rich_print(" /clear - Clear screen")
273
+ rich_print(" /agents - List available agents")
274
+ rich_print(" @agent_name - Switch to agent")
275
+ rich_print(" STOP - End session")
276
+ rich_print("\n[bold]Keyboard Shortcuts:[/bold]")
277
+ rich_print(
278
+ " Enter - Submit (normal mode) / New line (multiline mode)"
279
+ )
280
+ rich_print(" Alt+Enter - Always submit (even in multiline mode)")
281
+ rich_print(" Ctrl+T - Toggle multiline mode")
282
+ rich_print(" Ctrl+L - Clear input")
283
+ rich_print(" Up/Down - Navigate history")
284
+ rich_print(" F1 - Show help")
285
+ return True
286
+
287
+ elif command == "CLEAR":
288
+ # Clear screen (ANSI escape sequence)
289
+ print("\033c", end="")
290
+ return True
291
+
292
+ elif command == "LIST_AGENTS":
293
+ if available_agents:
294
+ rich_print("\n[bold]Available Agents:[/bold]")
295
+ for agent in sorted(available_agents):
296
+ rich_print(f" @{agent}")
297
+ else:
298
+ rich_print("[yellow]No agents available[/yellow]")
299
+ return True
300
+
301
+ elif isinstance(command, str) and command.startswith("SWITCH:"):
302
+ agent_name = command.split(":", 1)[1]
303
+ if agent_name in available_agents:
304
+ if agent_app:
305
+ rich_print(f"[green]Switching to agent: {agent_name}[/green]")
306
+ return {"switch_agent": agent_name}
307
+ else:
308
+ rich_print(
309
+ "[yellow]Agent switching not available in this context[/yellow]"
310
+ )
311
+ else:
312
+ rich_print(f"[red]Unknown agent: {agent_name}[/red]")
313
+ return True
314
+
315
+ return False
@@ -28,7 +28,6 @@ from mcp_agent.workflows.evaluator_optimizer.evaluator_optimizer import (
28
28
  )
29
29
  from mcp_agent.workflows.router.router_llm import LLMRouter
30
30
  from mcp_agent.config import Settings
31
- from rich.prompt import Prompt
32
31
  from rich import print
33
32
  from mcp_agent.progress_display import progress_display
34
33
  from mcp_agent.workflows.llm.model_factory import ModelFactory
@@ -161,34 +160,73 @@ class AgentApp:
161
160
 
162
161
  async def prompt(self, agent_name: Optional[str] = None, default: str = "") -> str:
163
162
  """
164
- Interactive prompt for sending messages.
163
+ Interactive prompt for sending messages with advanced features.
165
164
 
166
165
  Args:
167
166
  agent_name: Optional target agent name (uses default if not specified)
168
- default_prompt: Default message to use when user presses enter
167
+ default: Default message to use when user presses enter
169
168
  """
169
+ from .enhanced_prompt import get_enhanced_input, handle_special_commands
170
170
 
171
171
  agent = agent_name or self._default
172
172
 
173
173
  if agent not in self._agents:
174
174
  raise ValueError(f"No agent named '{agent}'")
175
+
176
+ # Pass all available agent names for auto-completion
177
+ available_agents = list(self._agents.keys())
178
+
179
+ # Create agent_types dictionary mapping agent names to their types
180
+ agent_types = {}
181
+ for name, proxy in self._agents.items():
182
+ # Determine agent type based on the proxy type
183
+ if isinstance(proxy, LLMAgentProxy):
184
+ # Convert AgentType.BASIC.value ("agent") to "Agent"
185
+ agent_types[name] = "Agent"
186
+ elif isinstance(proxy, RouterProxy):
187
+ agent_types[name] = "Router"
188
+ elif isinstance(proxy, WorkflowProxy):
189
+ # For workflow proxies, check the workflow type
190
+ workflow = proxy._workflow
191
+ if isinstance(workflow, Orchestrator):
192
+ agent_types[name] = "Orchestrator"
193
+ elif isinstance(workflow, ParallelLLM):
194
+ agent_types[name] = "Parallel"
195
+ elif isinstance(workflow, EvaluatorOptimizerLLM):
196
+ agent_types[name] = "Evaluator"
197
+ else:
198
+ agent_types[name] = "Workflow"
199
+
175
200
  result = ""
176
201
  while True:
177
202
  with progress_display.paused():
178
- if default == "STOP":
179
- print("Press <ENTER> to finish.")
180
- elif default != "":
181
- print("Enter a prompt, or [red]STOP[/red] to finish.")
182
- print(
183
- f"Press <ENTER> to use the default prompt:\n[cyan]{default}[/cyan]"
184
- )
185
- else:
186
- print("Enter a prompt, or [red]STOP[/red] to finish")
187
-
188
- prompt_text = f"[blue]{agent}[/blue] >"
189
- user_input = Prompt.ask(
190
- prompt=prompt_text, default=default, show_default=False
203
+ # Use the enhanced input method with advanced features
204
+ user_input = await get_enhanced_input(
205
+ agent_name=agent,
206
+ default=default,
207
+ show_default=(default != ""),
208
+ show_stop_hint=True,
209
+ multiline=False, # Default to single-line mode
210
+ available_agent_names=available_agents,
211
+ syntax=None, # Can enable syntax highlighting for code input
212
+ agent_types=agent_types, # Pass agent types for display
191
213
  )
214
+
215
+ # Handle special commands
216
+ command_result = await handle_special_commands(user_input, self)
217
+
218
+ # Check if we should switch agents
219
+ if (
220
+ isinstance(command_result, dict)
221
+ and "switch_agent" in command_result
222
+ ):
223
+ agent = command_result["switch_agent"]
224
+ continue
225
+
226
+ # Skip further processing if command was handled
227
+ if command_result:
228
+ continue
229
+
192
230
  if user_input.upper() == "STOP":
193
231
  return
194
232
  if user_input == "":
@@ -400,12 +438,12 @@ class FastAgent(ContextDependent):
400
438
  elif agent_type == AgentType.EVALUATOR_OPTIMIZER.value:
401
439
  # Check both evaluator and optimizer exist
402
440
  evaluator = agent_data["evaluator"]
403
- optimizer = agent_data["optimizer"]
441
+ generator = agent_data["generator"]
404
442
  missing = []
405
443
  if evaluator not in available_components:
406
444
  missing.append(f"evaluator: {evaluator}")
407
- if optimizer not in available_components:
408
- missing.append(f"optimizer: {optimizer}")
445
+ if generator not in available_components:
446
+ missing.append(f"generator: {generator}")
409
447
  if missing:
410
448
  raise AgentConfigError(
411
449
  f"Evaluator-Optimizer '{name}' references non-existent components: {', '.join(missing)}"
@@ -672,7 +710,7 @@ class FastAgent(ContextDependent):
672
710
  def evaluator_optimizer(
673
711
  self,
674
712
  name: str,
675
- optimizer: str,
713
+ generator: str,
676
714
  evaluator: str,
677
715
  min_rating: str = "GOOD",
678
716
  max_refinements: int = 3,
@@ -684,7 +722,7 @@ class FastAgent(ContextDependent):
684
722
 
685
723
  Args:
686
724
  name: Name of the workflow
687
- optimizer: Name of the optimizer agent
725
+ generator: Name of the generator agent
688
726
  evaluator: Name of the evaluator agent
689
727
  min_rating: Minimum acceptable quality rating (EXCELLENT, GOOD, FAIR, POOR)
690
728
  max_refinements: Maximum number of refinement iterations
@@ -699,7 +737,7 @@ class FastAgent(ContextDependent):
699
737
  wrapper_needed=True,
700
738
  )(
701
739
  name=name,
702
- optimizer=optimizer,
740
+ generator=generator,
703
741
  evaluator=evaluator,
704
742
  min_rating=min_rating,
705
743
  max_refinements=max_refinements,
@@ -853,27 +891,27 @@ class FastAgent(ContextDependent):
853
891
 
854
892
  elif agent_type == AgentType.EVALUATOR_OPTIMIZER:
855
893
  # Get the referenced agents - unwrap from proxies
856
- optimizer = self._unwrap_proxy(
857
- active_agents[agent_data["optimizer"]]
894
+ generator = self._unwrap_proxy(
895
+ active_agents[agent_data["generator"]]
858
896
  )
859
897
  evaluator = self._unwrap_proxy(
860
898
  active_agents[agent_data["evaluator"]]
861
899
  )
862
900
 
863
- if not optimizer or not evaluator:
901
+ if not generator or not evaluator:
864
902
  raise ValueError(
865
903
  f"Missing agents for workflow {name}: "
866
- f"optimizer={agent_data['optimizer']}, "
904
+ f"generator={agent_data['generator']}, "
867
905
  f"evaluator={agent_data['evaluator']}"
868
906
  )
869
907
 
870
908
  # TODO: Remove legacy - factory usage is only needed for str evaluators
871
909
  # Later this should only be passed when evaluator is a string
872
910
  optimizer_model = (
873
- optimizer.config.model if isinstance(optimizer, Agent) else None
911
+ generator.config.model if isinstance(generator, Agent) else None
874
912
  )
875
913
  instance = EvaluatorOptimizerLLM(
876
- optimizer=optimizer,
914
+ generator=generator,
877
915
  evaluator=evaluator,
878
916
  min_rating=QualityRating[agent_data["min_rating"]],
879
917
  max_refinements=agent_data["max_refinements"],
@@ -0,0 +1,17 @@
1
+ import asyncio
2
+ from mcp_agent.core.fastagent import FastAgent
3
+
4
+ # Create the application
5
+ fast = FastAgent("FastAgent Example")
6
+
7
+
8
+ # Define the agent
9
+ @fast.agent(servers=["fetch"])
10
+ async def main():
11
+ # use the --model command line switch or agent arguments to change model
12
+ async with fast.run() as agent:
13
+ await agent()
14
+
15
+
16
+ if __name__ == "__main__":
17
+ asyncio.run(main())
@@ -52,7 +52,7 @@ fast = FastAgent("PMO Job Description Generator")
52
52
  )
53
53
  @fast.evaluator_optimizer(
54
54
  name="job_description_writer",
55
- optimizer="content_generator",
55
+ generator="content_generator",
56
56
  evaluator="consistency_checker",
57
57
  min_rating="EXCELLENT",
58
58
  max_refinements=2,
@@ -35,7 +35,7 @@ Summarize your evaluation as a structured response with:
35
35
  - Specific feedback and areas for improvement.""",
36
36
  )
37
37
  @agents.evaluator_optimizer(
38
- optimizer="Researcher",
38
+ generator="Researcher",
39
39
  evaluator="Evaluator",
40
40
  max_refinements=5,
41
41
  min_rating="EXCELLENT",
@@ -0,0 +1,53 @@
1
+ #
2
+ # Please edit this configuration file to match your environment (on Windows).
3
+ # Examples in comments below - check/change the paths.
4
+ #
5
+ #
6
+
7
+ execution_engine: asyncio
8
+ logger:
9
+ type: file
10
+ level: error
11
+ truncate_tools: true
12
+
13
+ mcp:
14
+ servers:
15
+ brave:
16
+ # On windows replace the command and args line to use `node` and the absolute path to the server.
17
+ # Use `npm i -g @modelcontextprotocol/server-brave-search` to install the server globally.
18
+ # Use `npm -g root` to find the global node_modules path.`
19
+ # command: "node"
20
+ # args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-brave-search/dist/index.js"]
21
+ command: "npx"
22
+ args: ["-y", "@modelcontextprotocol/server-brave-search"]
23
+ env:
24
+ # You can also place your BRAVE_API_KEY in the fastagent.secrets.yaml file.
25
+ BRAVE_API_KEY: <your_brave_api_key>
26
+ filesystem:
27
+ # On windows update the command and arguments to use `node` and the absolute path to the server.
28
+ # Use `npm i -g @modelcontextprotocol/server-filesystem` to install the server globally.
29
+ # Use `npm -g root` to find the global node_modules path.`
30
+ # command: "node"
31
+ # args: ["c:/Program Files/nodejs/node_modules/@modelcontextprotocol/server-filesystem/dist/index.js","./agent_folder"]
32
+ command: "npx"
33
+ args: ["-y", "@modelcontextprotocol/server-filesystem", "./agent_folder/"]
34
+ interpreter:
35
+ command: "docker"
36
+ args: [
37
+ "run",
38
+ "-i",
39
+ "--rm",
40
+ "--pull=always",
41
+ "-v",
42
+ "./agent_folder:/mnt/data/",
43
+ # Docker needs the absolute path on Windows (e.g. "x:/fastagent/agent_folder:/mnt/data/")
44
+ # "./agent_folder:/mnt/data/",
45
+ "ghcr.io/evalstate/mcp-py-repl:latest",
46
+ ]
47
+ roots:
48
+ - uri: "file://./agent_folder/"
49
+ name: "agent_folder"
50
+ server_uri_alias: "file:///mnt/data/"
51
+ fetch:
52
+ command: "uvx"
53
+ args: ["mcp-server-fetch"]
@@ -0,0 +1,53 @@
1
+ import asyncio
2
+
3
+ from mcp_agent.core.fastagent import FastAgent
4
+
5
+ agents = FastAgent(name="Researcher")
6
+
7
+
8
+ @agents.agent(
9
+ name="Researcher",
10
+ instruction="""
11
+ You are a research assistant, with access to internet search (via Brave),
12
+ website fetch, a python interpreter (you can install packages with uv) and a filesystem.
13
+ Use the current working directory to save and create files with both the Interpreter and Filesystem tools.
14
+ The interpreter has numpy, pandas, matplotlib and seaborn already installed.
15
+
16
+ You must always provide a summary of the specific sources you have used in your research.
17
+ """,
18
+ servers=["brave", "interpreter", "filesystem", "fetch"],
19
+ )
20
+ @agents.agent(
21
+ name="Evaluator",
22
+ model="sonnet",
23
+ instruction="""
24
+ Evaluate the response from the researcher based on the criteria:
25
+ - Sources cited. Has the researcher provided a summary of the specific sources used in the research?
26
+ - Validity. Has the researcher cross-checked and validated data and assumptions.
27
+ - Alignment. Has the researher acted and addressed feedback from any previous assessments?
28
+
29
+ For each criterion:
30
+ - Provide a rating (EXCELLENT, GOOD, FAIR, or POOR).
31
+ - Offer specific feedback or suggestions for improvement.
32
+
33
+ Summarize your evaluation as a structured response with:
34
+ - Overall quality rating.
35
+ - Specific feedback and areas for improvement.""",
36
+ )
37
+ @agents.evaluator_optimizer(
38
+ generator="Researcher",
39
+ evaluator="Evaluator",
40
+ max_refinements=5,
41
+ min_rating="EXCELLENT",
42
+ name="Researcher_Evaluator",
43
+ )
44
+ async def main():
45
+ async with agents.run() as agent:
46
+ await agent.prompt("Researcher_Evaluator")
47
+
48
+ print("Ask follow up quesions to the Researcher?")
49
+ await agent.prompt("Researcher", default="STOP")
50
+
51
+
52
+ if __name__ == "__main__":
53
+ asyncio.run(main())
@@ -0,0 +1,38 @@
1
+ import asyncio
2
+
3
+ from mcp_agent.core.fastagent import FastAgent
4
+ # from rich import print
5
+
6
+ agents = FastAgent(name="Researcher")
7
+
8
+
9
+ @agents.agent(
10
+ "Researcher",
11
+ instruction="""
12
+ You are a research assistant, with access to internet search (via Brave),
13
+ website fetch, a python interpreter (you can install packages with uv) and a filesystem.
14
+ Use the current working directory to save and create files with both the Interpreter and Filesystem tools.
15
+ The interpreter has numpy, pandas, matplotlib and seaborn already installed
16
+ """,
17
+ servers=["brave", "interpreter", "filesystem", "fetch"],
18
+ )
19
+ async def main():
20
+ research_prompt = """
21
+ Produce an investment report for the company Eutelsat. The final report should be saved in the filesystem in markdown format, and
22
+ contain at least the following:
23
+ 1 - A brief description of the company
24
+ 2 - Current financial position (find data, create and incorporate charts)
25
+ 3 - A PESTLE analysis
26
+ 4 - An investment thesis for the next 3 years. Include both 'buy side' and 'sell side' arguments, and a final
27
+ summary and recommendation.
28
+ Todays date is 15 February 2025. Include the main data sources consulted in presenting the report.""" # noqa: F841
29
+
30
+ async with agents.run() as agent:
31
+ await agent.prompt()
32
+
33
+ # await agent.prompt(default="STOP")
34
+ # await agent.prompt(default=research_prompt)
35
+
36
+
37
+ if __name__ == "__main__":
38
+ asyncio.run(main())
@@ -0,0 +1,17 @@
1
+ import asyncio
2
+ from mcp_agent.core.fastagent import FastAgent
3
+
4
+ # Create the application
5
+ fast = FastAgent("FastAgent Example")
6
+
7
+
8
+ # Define the agent
9
+ @fast.agent(servers=["fetch"])
10
+ async def main():
11
+ # use the --model command line switch or agent arguments to change model
12
+ async with fast.run() as agent:
13
+ await agent()
14
+
15
+
16
+ if __name__ == "__main__":
17
+ asyncio.run(main())
@@ -11,7 +11,7 @@ fast = FastAgent("Evaluator-Optimizer")
11
11
 
12
12
  # Define optimizer agent
13
13
  @fast.agent(
14
- name="optimizer",
14
+ name="generator",
15
15
  instruction="""You are a career coach specializing in cover letter writing.
16
16
  You are tasked with generating a compelling cover letter given the job posting,
17
17
  candidate details, and company information. Tailor the response to the company and job requirements.
@@ -38,12 +38,13 @@ fast = FastAgent("Evaluator-Optimizer")
38
38
  Summarize your evaluation as a structured response with:
39
39
  - Overall quality rating.
40
40
  - Specific feedback and areas for improvement.""",
41
- model="sonnet",
41
+ # instructor doesn't seem to work for sonnet37
42
+ # model="sonnet35",
42
43
  )
43
44
  # Define the evaluator-optimizer workflow
44
45
  @fast.evaluator_optimizer(
45
46
  name="cover_letter_writer",
46
- optimizer="optimizer", # Reference to optimizer agent
47
+ generator="generator", # Reference to optimizer agent
47
48
  evaluator="evaluator", # Reference to evaluator agent
48
49
  min_rating="EXCELLENT", # Strive for excellence
49
50
  max_refinements=3, # Maximum iterations
@@ -70,6 +71,8 @@ async def main():
70
71
  f"Company information: {company_information}",
71
72
  )
72
73
 
74
+ await agent()
75
+
73
76
 
74
77
  if __name__ == "__main__":
75
78
  asyncio.run(main())
@@ -0,0 +1,22 @@
1
+ import asyncio
2
+ from mcp_agent.core.fastagent import FastAgent
3
+
4
+ # Create the application
5
+ agent_app = FastAgent("FastAgent Example")
6
+ # Uncomment the below to disable human input callback tool
7
+ # agent_app.app._human_input_callback = None
8
+
9
+
10
+ # Define the agent
11
+ @agent_app.agent(
12
+ instruction="You are a helpful AI Agent",
13
+ servers=[],
14
+ )
15
+ async def main():
16
+ # use the --model= command line switch to specify model
17
+ async with agent_app.run() as agent:
18
+ await agent()
19
+
20
+
21
+ if __name__ == "__main__":
22
+ asyncio.run(main())
@@ -66,18 +66,19 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
66
66
 
67
67
  def __init__(
68
68
  self,
69
- optimizer: Agent | AugmentedLLM,
69
+ generator: Agent | AugmentedLLM,
70
70
  evaluator: str | Agent | AugmentedLLM,
71
71
  min_rating: QualityRating = QualityRating.GOOD,
72
72
  max_refinements: int = 3,
73
- llm_factory: Callable[[Agent], AugmentedLLM] | None = None, # TODO: Remove legacy - factory should only be needed for str evaluator
73
+ llm_factory: Callable[[Agent], AugmentedLLM]
74
+ | None = None, # TODO: Remove legacy - factory should only be needed for str evaluator
74
75
  context: Optional["Context"] = None,
75
76
  ):
76
77
  """
77
78
  Initialize the evaluator-optimizer workflow.
78
79
 
79
80
  Args:
80
- optimizer: The agent/LLM/workflow that generates responses. Can be:
81
+ generator: The agent/LLM/workflow that generates responses. Can be:
81
82
  - An Agent that will be converted to an AugmentedLLM
82
83
  - An AugmentedLLM instance
83
84
  - An Orchestrator/Router/ParallelLLM workflow
@@ -90,38 +91,38 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
90
91
  super().__init__(context=context)
91
92
 
92
93
  # Set up the optimizer
93
- self.name = optimizer.name
94
+ self.name = generator.name
94
95
  self.llm_factory = llm_factory
95
- self.optimizer = optimizer
96
+ self.generator = generator
96
97
  self.evaluator = evaluator
97
98
 
98
99
  # TODO: Remove legacy - optimizer should always be an AugmentedLLM, no conversion needed
99
- if isinstance(optimizer, Agent):
100
+ if isinstance(generator, Agent):
100
101
  if not llm_factory:
101
102
  raise ValueError("llm_factory is required when using an Agent")
102
103
 
103
104
  # Only create new LLM if agent doesn't have one
104
- if hasattr(optimizer, "_llm") and optimizer._llm:
105
- self.optimizer_llm = optimizer._llm
105
+ if hasattr(generator, "_llm") and generator._llm:
106
+ self.generator_llm = generator._llm
106
107
  else:
107
- self.optimizer_llm = llm_factory(agent=optimizer)
108
-
109
- self.aggregator = optimizer
108
+ self.generator_llm = llm_factory(agent=generator)
109
+
110
+ self.aggregator = generator
110
111
  self.instruction = (
111
- optimizer.instruction
112
- if isinstance(optimizer.instruction, str)
112
+ generator.instruction
113
+ if isinstance(generator.instruction, str)
113
114
  else None
114
115
  )
115
116
 
116
- elif isinstance(optimizer, AugmentedLLM):
117
- self.optimizer_llm = optimizer
118
- self.aggregator = optimizer.aggregator
119
- self.instruction = optimizer.instruction
117
+ elif isinstance(generator, AugmentedLLM):
118
+ self.generator_llm = generator
119
+ self.aggregator = generator.aggregator
120
+ self.instruction = generator.instruction
120
121
 
121
122
  else:
122
- raise ValueError(f"Unsupported optimizer type: {type(optimizer)}")
123
+ raise ValueError(f"Unsupported optimizer type: {type(generator)}")
123
124
 
124
- self.history = self.optimizer_llm.history
125
+ self.history = self.generator_llm.history
125
126
 
126
127
  # Set up the evaluator
127
128
  if isinstance(evaluator, AugmentedLLM):
@@ -169,17 +170,17 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
169
170
  best_response = None
170
171
  best_rating = QualityRating.POOR
171
172
  self.refinement_history = []
172
-
173
+
173
174
  # Use a single AsyncExitStack for the entire method to maintain connections
174
175
  async with contextlib.AsyncExitStack() as stack:
175
176
  # Enter all agent contexts once at the beginning
176
- if isinstance(self.optimizer, Agent):
177
- await stack.enter_async_context(self.optimizer)
177
+ if isinstance(self.generator, Agent):
178
+ await stack.enter_async_context(self.generator)
178
179
  if isinstance(self.evaluator, Agent):
179
180
  await stack.enter_async_context(self.evaluator)
180
-
181
+
181
182
  # Initial generation
182
- response = await self.optimizer_llm.generate(
183
+ response = await self.generator_llm.generate(
183
184
  message=message,
184
185
  request_params=request_params,
185
186
  )
@@ -251,7 +252,7 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
251
252
  )
252
253
 
253
254
  # No nested AsyncExitStack here either
254
- response = await self.optimizer_llm.generate(
255
+ response = await self.generator_llm.generate(
255
256
  message=refinement_prompt,
256
257
  request_params=request_params,
257
258
  )
@@ -274,13 +275,13 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
274
275
  # Handle case where response is a single message
275
276
  if not isinstance(response, list):
276
277
  return str(response)
277
-
278
+
278
279
  # Convert all messages to strings, handling different message types
279
280
  result_strings = []
280
281
  for r in response:
281
- if hasattr(r, 'text'):
282
+ if hasattr(r, "text"):
282
283
  result_strings.append(r.text)
283
- elif hasattr(r, 'content'):
284
+ elif hasattr(r, "content"):
284
285
  # Handle ToolUseBlock and similar
285
286
  if isinstance(r.content, list):
286
287
  # Typically content is a list of blocks
@@ -290,7 +291,7 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
290
291
  else:
291
292
  # Fallback to string representation
292
293
  result_strings.append(str(r))
293
-
294
+
294
295
  return "\n".join(result_strings)
295
296
 
296
297
  async def generate_structured(
@@ -304,7 +305,7 @@ class EvaluatorOptimizerLLM(AugmentedLLM[MessageParamT, MessageT]):
304
305
  message=message, request_params=request_params
305
306
  )
306
307
 
307
- return await self.optimizer.generate_structured(
308
+ return await self.generator.generate_structured(
308
309
  message=response_str,
309
310
  response_model=response_model,
310
311
  request_params=request_params,
@@ -495,7 +495,10 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
495
495
  console.console.print("\n")
496
496
 
497
497
  async def show_assistant_message(
498
- self, message_text: str | Text, highlight_namespaced_tool: str = ""
498
+ self,
499
+ message_text: str | Text,
500
+ highlight_namespaced_tool: str = "",
501
+ title: str = "ASSISTANT",
499
502
  ):
500
503
  """Display an assistant message in a formatted panel."""
501
504
 
@@ -525,7 +528,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol[MessageParamT, Message
525
528
 
526
529
  panel = Panel(
527
530
  message_text,
528
- title=f"[ASSISTANT]{f' ({self.name})' if self.name else ''}",
531
+ title=f"[{title}]{f' ({self.name})' if self.name else ''}",
529
532
  title_align="left",
530
533
  style="green",
531
534
  border_style="bold white",
@@ -331,7 +331,9 @@ class AnthropicAugmentedLLM(AugmentedLLM[MessageParam, Message]):
331
331
  messages=[{"role": "user", "content": response}],
332
332
  max_tokens=params.maxTokens,
333
333
  )
334
-
334
+ await self.show_assistant_message(
335
+ str(structured_response), title="ASSISTANT/STRUCTURED"
336
+ )
335
337
  return structured_response
336
338
 
337
339
  @classmethod
@@ -97,12 +97,7 @@ class OpenAIAugmentedLLM(
97
97
  use_history=True,
98
98
  )
99
99
 
100
- async def generate(self, message, request_params: RequestParams | None = None):
101
- """
102
- Process a query using an LLM and available tools.
103
- The default implementation uses OpenAI's ChatCompletion as the LLM.
104
- Override this method to use a different LLM.
105
- """
100
+ def _api_key(self) -> str:
106
101
  config = self.context.config
107
102
  api_key = None
108
103
 
@@ -121,9 +116,22 @@ class OpenAIAugmentedLLM(
121
116
  "Add it to your configuration file under openai.api_key\n"
122
117
  "Or set the OPENAI_API_KEY environment variable",
123
118
  )
119
+ return api_key
120
+
121
+ def _base_url(self) -> str:
122
+ return (
123
+ self.context.config.openai.base_url if self.context.config.openai else None
124
+ )
125
+
126
+ async def generate(self, message, request_params: RequestParams | None = None):
127
+ """
128
+ Process a query using an LLM and available tools.
129
+ The default implementation uses OpenAI's ChatCompletion as the LLM.
130
+ Override this method to use a different LLM.
131
+ """
124
132
 
125
133
  try:
126
- openai_client = OpenAI(api_key=api_key, base_url=config.openai.base_url)
134
+ openai_client = OpenAI(api_key=self._api_key(), base_url=self._base_url())
127
135
  messages: List[ChatCompletionMessageParam] = []
128
136
  params = self.get_request_params(request_params)
129
137
  except AuthenticationError as e:
@@ -356,8 +364,8 @@ class OpenAIAugmentedLLM(
356
364
  # Next we pass the text through instructor to extract structured data
357
365
  client = instructor.from_openai(
358
366
  OpenAI(
359
- api_key=self.context.config.openai.api_key,
360
- base_url=self.context.config.openai.base_url,
367
+ api_key=self._api_key(),
368
+ base_url=self._base_url(),
361
369
  ),
362
370
  mode=instructor.Mode.TOOLS_STRICT,
363
371
  )
@@ -373,6 +381,9 @@ class OpenAIAugmentedLLM(
373
381
  {"role": "user", "content": response},
374
382
  ],
375
383
  )
384
+ await self.show_assistant_message(
385
+ str(structured_response), title="ASSISTANT/STRUCTURED"
386
+ )
376
387
 
377
388
  return structured_response
378
389