fast-agent-mcp 0.1.4__py3-none-any.whl → 0.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.6.dist-info}/METADATA +7 -1
- {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.6.dist-info}/RECORD +28 -17
- mcp_agent/agents/agent.py +46 -0
- mcp_agent/core/agent_app.py +373 -9
- mcp_agent/core/decorators.py +455 -0
- mcp_agent/core/enhanced_prompt.py +71 -5
- mcp_agent/core/factory.py +501 -0
- mcp_agent/core/fastagent.py +143 -1059
- mcp_agent/core/proxies.py +71 -14
- mcp_agent/core/validation.py +221 -0
- mcp_agent/human_input/handler.py +5 -2
- mcp_agent/mcp/mcp_aggregator.py +537 -47
- mcp_agent/mcp/mcp_connection_manager.py +13 -2
- mcp_agent/mcp_server/__init__.py +4 -0
- mcp_agent/mcp_server/agent_server.py +121 -0
- mcp_agent/resources/examples/internal/fastagent.config.yaml +52 -0
- mcp_agent/resources/examples/internal/prompt_category.py +21 -0
- mcp_agent/resources/examples/internal/prompt_sizing.py +53 -0
- mcp_agent/resources/examples/internal/sizer.py +24 -0
- mcp_agent/resources/examples/researcher/fastagent.config.yaml +14 -1
- mcp_agent/resources/examples/workflows/sse.py +23 -0
- mcp_agent/ui/console_display.py +278 -0
- mcp_agent/workflows/llm/augmented_llm.py +245 -179
- mcp_agent/workflows/llm/augmented_llm_anthropic.py +49 -3
- mcp_agent/workflows/llm/augmented_llm_openai.py +52 -4
- {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.6.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.6.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.1.4.dist-info → fast_agent_mcp-0.1.6.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,455 @@
|
|
1
|
+
"""
|
2
|
+
Decorators for FastAgent applications.
|
3
|
+
Contains decorator definitions extracted from fastagent.py.
|
4
|
+
"""
|
5
|
+
|
6
|
+
from typing import Callable, Dict, List, Optional, TypeVar, Literal
|
7
|
+
from mcp_agent.agents.agent import AgentConfig
|
8
|
+
from mcp_agent.workflows.llm.augmented_llm import RequestParams
|
9
|
+
from mcp_agent.core.agent_types import AgentType
|
10
|
+
|
11
|
+
T = TypeVar("T") # For the wrapper classes
|
12
|
+
|
13
|
+
|
14
|
+
def _create_decorator(
|
15
|
+
self,
|
16
|
+
agent_type: AgentType,
|
17
|
+
default_name: str = None,
|
18
|
+
default_instruction: str = None,
|
19
|
+
default_servers: List[str] = None,
|
20
|
+
default_use_history: bool = True,
|
21
|
+
wrapper_needed: bool = False,
|
22
|
+
**extra_defaults,
|
23
|
+
) -> Callable:
|
24
|
+
"""
|
25
|
+
Factory method for creating agent decorators with common behavior.
|
26
|
+
|
27
|
+
Args:
|
28
|
+
agent_type: Type of agent/workflow to create
|
29
|
+
default_name: Default name to use if not provided
|
30
|
+
default_instruction: Default instruction to use if not provided
|
31
|
+
default_servers: Default servers list to use if not provided
|
32
|
+
default_use_history: Default history setting
|
33
|
+
wrapper_needed: Whether to wrap the decorated function
|
34
|
+
**extra_defaults: Additional agent/workflow-specific parameters
|
35
|
+
"""
|
36
|
+
|
37
|
+
def decorator_wrapper(**kwargs):
|
38
|
+
# Apply defaults for common parameters
|
39
|
+
name = kwargs.get("name", default_name or f"{agent_type.name.title()}")
|
40
|
+
instruction = kwargs.get("instruction", default_instruction or "")
|
41
|
+
servers = kwargs.get("servers", default_servers or [])
|
42
|
+
model = kwargs.get("model", None)
|
43
|
+
use_history = kwargs.get("use_history", default_use_history)
|
44
|
+
request_params = kwargs.get("request_params", None)
|
45
|
+
human_input = kwargs.get("human_input", False)
|
46
|
+
|
47
|
+
# Create base request params
|
48
|
+
def decorator(func: Callable) -> Callable:
|
49
|
+
# Create base request params
|
50
|
+
if (
|
51
|
+
request_params is not None
|
52
|
+
or model is not None
|
53
|
+
or use_history != default_use_history
|
54
|
+
):
|
55
|
+
max_tokens = 4096 if agent_type == AgentType.BASIC else None
|
56
|
+
params_dict = {"use_history": use_history, "model": model}
|
57
|
+
if max_tokens:
|
58
|
+
params_dict["maxTokens"] = max_tokens
|
59
|
+
if request_params:
|
60
|
+
params_dict.update(request_params)
|
61
|
+
base_params = RequestParams(**params_dict)
|
62
|
+
else:
|
63
|
+
base_params = RequestParams(use_history=use_history)
|
64
|
+
|
65
|
+
# Create agent configuration
|
66
|
+
config = AgentConfig(
|
67
|
+
name=name,
|
68
|
+
instruction=instruction,
|
69
|
+
servers=servers,
|
70
|
+
model=model,
|
71
|
+
use_history=use_history,
|
72
|
+
default_request_params=base_params,
|
73
|
+
human_input=human_input,
|
74
|
+
)
|
75
|
+
|
76
|
+
# Build agent/workflow specific data
|
77
|
+
agent_data = {
|
78
|
+
"config": config,
|
79
|
+
"type": agent_type.value,
|
80
|
+
"func": func,
|
81
|
+
}
|
82
|
+
|
83
|
+
# Add extra parameters specific to this agent type
|
84
|
+
for key, value in kwargs.items():
|
85
|
+
if key not in [
|
86
|
+
"name",
|
87
|
+
"instruction",
|
88
|
+
"servers",
|
89
|
+
"model",
|
90
|
+
"use_history",
|
91
|
+
"request_params",
|
92
|
+
"human_input",
|
93
|
+
]:
|
94
|
+
agent_data[key] = value
|
95
|
+
|
96
|
+
# Store the configuration under the agent name
|
97
|
+
self.agents[name] = agent_data
|
98
|
+
|
99
|
+
# Either wrap or return the original function
|
100
|
+
if wrapper_needed:
|
101
|
+
|
102
|
+
async def wrapper(*args, **kwargs):
|
103
|
+
return await func(*args, **kwargs)
|
104
|
+
|
105
|
+
return wrapper
|
106
|
+
return func
|
107
|
+
|
108
|
+
return decorator
|
109
|
+
|
110
|
+
return decorator_wrapper
|
111
|
+
|
112
|
+
|
113
|
+
def agent(
|
114
|
+
self,
|
115
|
+
name: str = "Agent",
|
116
|
+
instruction_or_kwarg: str = None,
|
117
|
+
*,
|
118
|
+
instruction: str = "You are a helpful agent.",
|
119
|
+
servers: List[str] = [],
|
120
|
+
model: str | None = None,
|
121
|
+
use_history: bool = True,
|
122
|
+
request_params: Optional[Dict] = None,
|
123
|
+
human_input: bool = False,
|
124
|
+
) -> Callable:
|
125
|
+
"""
|
126
|
+
Decorator to create and register an agent with configuration.
|
127
|
+
|
128
|
+
Args:
|
129
|
+
name: Name of the agent
|
130
|
+
instruction_or_kwarg: Optional positional parameter for instruction
|
131
|
+
instruction: Base instruction for the agent (keyword arg)
|
132
|
+
servers: List of server names the agent should connect to
|
133
|
+
model: Model specification string (highest precedence)
|
134
|
+
use_history: Whether to maintain conversation history
|
135
|
+
request_params: Additional request parameters for the LLM
|
136
|
+
human_input: Whether to enable human input capabilities
|
137
|
+
|
138
|
+
The instruction can be provided either as a second positional argument
|
139
|
+
or as a keyword argument. Positional argument takes precedence when both are provided.
|
140
|
+
|
141
|
+
Usage:
|
142
|
+
@fast.agent("agent_name", "Your instruction here") # Using positional arg
|
143
|
+
@fast.agent("agent_name", instruction="Your instruction here") # Using keyword arg
|
144
|
+
"""
|
145
|
+
# Use positional argument if provided, otherwise use keyword argument
|
146
|
+
final_instruction = (
|
147
|
+
instruction_or_kwarg if instruction_or_kwarg is not None else instruction
|
148
|
+
)
|
149
|
+
|
150
|
+
decorator = self._create_decorator(
|
151
|
+
AgentType.BASIC,
|
152
|
+
default_name="Agent",
|
153
|
+
default_instruction="You are a helpful agent.",
|
154
|
+
default_use_history=True,
|
155
|
+
)(
|
156
|
+
name=name,
|
157
|
+
instruction=final_instruction,
|
158
|
+
servers=servers,
|
159
|
+
model=model,
|
160
|
+
use_history=use_history,
|
161
|
+
request_params=request_params,
|
162
|
+
human_input=human_input,
|
163
|
+
)
|
164
|
+
return decorator
|
165
|
+
|
166
|
+
|
167
|
+
def orchestrator(
|
168
|
+
self,
|
169
|
+
name: str = "Orchestrator",
|
170
|
+
*,
|
171
|
+
instruction: str | None = None,
|
172
|
+
agents: List[str],
|
173
|
+
model: str | None = None,
|
174
|
+
use_history: bool = False,
|
175
|
+
request_params: Optional[Dict] = None,
|
176
|
+
human_input: bool = False,
|
177
|
+
plan_type: Literal["full", "iterative"] = "full",
|
178
|
+
max_iterations: int = 30, # Add the max_iterations parameter with default value
|
179
|
+
) -> Callable:
|
180
|
+
"""
|
181
|
+
Decorator to create and register an orchestrator.
|
182
|
+
|
183
|
+
Args:
|
184
|
+
name: Name of the orchestrator
|
185
|
+
instruction: Base instruction for the orchestrator
|
186
|
+
agents: List of agent names this orchestrator can use
|
187
|
+
model: Model specification string (highest precedence)
|
188
|
+
use_history: Whether to maintain conversation history (forced false)
|
189
|
+
request_params: Additional request parameters for the LLM
|
190
|
+
human_input: Whether to enable human input capabilities
|
191
|
+
plan_type: Planning approach - "full" generates entire plan first, "iterative" plans one step at a time
|
192
|
+
max_iterations: Maximum number of planning iterations (default: 10)
|
193
|
+
"""
|
194
|
+
default_instruction = """
|
195
|
+
You are an expert planner. Given an objective task and a list of MCP servers (which are collections of tools)
|
196
|
+
or Agents (which are collections of servers), your job is to break down the objective into a series of steps,
|
197
|
+
which can be performed by LLMs with access to the servers or agents.
|
198
|
+
"""
|
199
|
+
|
200
|
+
# Handle request_params update with max_iterations
|
201
|
+
if request_params is None:
|
202
|
+
request_params = {"max_iterations": max_iterations}
|
203
|
+
elif isinstance(request_params, dict):
|
204
|
+
if "max_iterations" not in request_params:
|
205
|
+
request_params["max_iterations"] = max_iterations
|
206
|
+
|
207
|
+
decorator = self._create_decorator(
|
208
|
+
AgentType.ORCHESTRATOR,
|
209
|
+
default_name="Orchestrator",
|
210
|
+
default_instruction=default_instruction,
|
211
|
+
default_servers=[],
|
212
|
+
default_use_history=False,
|
213
|
+
)(
|
214
|
+
name=name,
|
215
|
+
instruction=instruction,
|
216
|
+
child_agents=agents,
|
217
|
+
model=model,
|
218
|
+
use_history=use_history,
|
219
|
+
request_params=request_params,
|
220
|
+
human_input=human_input,
|
221
|
+
plan_type=plan_type,
|
222
|
+
)
|
223
|
+
return decorator
|
224
|
+
|
225
|
+
|
226
|
+
def parallel(
|
227
|
+
self,
|
228
|
+
name: str,
|
229
|
+
fan_out: List[str],
|
230
|
+
fan_in: Optional[str] = None,
|
231
|
+
instruction: str = "",
|
232
|
+
model: str | None = None,
|
233
|
+
use_history: bool = True,
|
234
|
+
request_params: Optional[Dict] = None,
|
235
|
+
include_request: bool = True,
|
236
|
+
) -> Callable:
|
237
|
+
"""
|
238
|
+
Decorator to create and register a parallel executing agent.
|
239
|
+
|
240
|
+
Args:
|
241
|
+
name: Name of the parallel executing agent
|
242
|
+
fan_out: List of parallel execution agents
|
243
|
+
fan_in: Optional name of collecting agent. If not provided, a passthrough agent
|
244
|
+
will be created automatically with the name "{name}_fan_in"
|
245
|
+
instruction: Optional instruction for the parallel agent
|
246
|
+
model: Model specification string
|
247
|
+
use_history: Whether to maintain conversation history
|
248
|
+
request_params: Additional request parameters for the LLM
|
249
|
+
include_request: Whether to include the original request in the fan-in message
|
250
|
+
"""
|
251
|
+
# If fan_in is not provided, create a passthrough agent with a derived name
|
252
|
+
if fan_in is None:
|
253
|
+
passthrough_name = f"{name}_fan_in"
|
254
|
+
|
255
|
+
# Register the passthrough agent directly in self.agents
|
256
|
+
self.agents[passthrough_name] = {
|
257
|
+
"config": AgentConfig(
|
258
|
+
name=passthrough_name,
|
259
|
+
instruction=f"Passthrough fan-in for {name}",
|
260
|
+
servers=[],
|
261
|
+
use_history=use_history,
|
262
|
+
),
|
263
|
+
"type": AgentType.BASIC.value, # Using BASIC type since we're just attaching a PassthroughLLM
|
264
|
+
"func": lambda x: x, # Simple passthrough function (never actually called)
|
265
|
+
}
|
266
|
+
|
267
|
+
# Use this passthrough as the fan-in
|
268
|
+
fan_in = passthrough_name
|
269
|
+
|
270
|
+
decorator = self._create_decorator(
|
271
|
+
AgentType.PARALLEL,
|
272
|
+
default_instruction="",
|
273
|
+
default_servers=[],
|
274
|
+
default_use_history=True,
|
275
|
+
)(
|
276
|
+
name=name,
|
277
|
+
fan_in=fan_in,
|
278
|
+
fan_out=fan_out,
|
279
|
+
instruction=instruction,
|
280
|
+
model=model,
|
281
|
+
use_history=use_history,
|
282
|
+
request_params=request_params,
|
283
|
+
include_request=include_request,
|
284
|
+
)
|
285
|
+
return decorator
|
286
|
+
|
287
|
+
|
288
|
+
def evaluator_optimizer(
|
289
|
+
self,
|
290
|
+
name: str,
|
291
|
+
generator: str,
|
292
|
+
evaluator: str,
|
293
|
+
min_rating: str = "GOOD",
|
294
|
+
max_refinements: int = 3,
|
295
|
+
use_history: bool = True,
|
296
|
+
request_params: Optional[Dict] = None,
|
297
|
+
instruction: Optional[str] = None,
|
298
|
+
) -> Callable:
|
299
|
+
"""
|
300
|
+
Decorator to create and register an evaluator-optimizer workflow.
|
301
|
+
|
302
|
+
Args:
|
303
|
+
name: Name of the workflow
|
304
|
+
generator: Name of the generator agent
|
305
|
+
evaluator: Name of the evaluator agent
|
306
|
+
min_rating: Minimum acceptable quality rating (EXCELLENT, GOOD, FAIR, POOR)
|
307
|
+
max_refinements: Maximum number of refinement iterations
|
308
|
+
use_history: Whether to maintain conversation history
|
309
|
+
request_params: Additional request parameters for the LLM
|
310
|
+
instruction: Optional instruction for the workflow (if not provided, uses generator's instruction)
|
311
|
+
"""
|
312
|
+
decorator = self._create_decorator(
|
313
|
+
AgentType.EVALUATOR_OPTIMIZER,
|
314
|
+
default_instruction="", # We'll get instruction from generator or override
|
315
|
+
default_servers=[],
|
316
|
+
default_use_history=True,
|
317
|
+
wrapper_needed=True,
|
318
|
+
)(
|
319
|
+
name=name,
|
320
|
+
generator=generator,
|
321
|
+
evaluator=evaluator,
|
322
|
+
min_rating=min_rating,
|
323
|
+
max_refinements=max_refinements,
|
324
|
+
use_history=use_history,
|
325
|
+
request_params=request_params,
|
326
|
+
instruction=instruction, # Pass through any custom instruction
|
327
|
+
)
|
328
|
+
return decorator
|
329
|
+
|
330
|
+
|
331
|
+
def router(
|
332
|
+
self,
|
333
|
+
name: str,
|
334
|
+
agents: List[str],
|
335
|
+
# servers: List[str] = [],
|
336
|
+
model: Optional[str] = None,
|
337
|
+
use_history: bool = True,
|
338
|
+
request_params: Optional[Dict] = None,
|
339
|
+
human_input: bool = False,
|
340
|
+
) -> Callable:
|
341
|
+
"""
|
342
|
+
Decorator to create and register a router.
|
343
|
+
|
344
|
+
Args:
|
345
|
+
name: Name of the router
|
346
|
+
agents: List of agent names this router can delegate to
|
347
|
+
servers: List of server names the router can use directly (currently not supported)
|
348
|
+
model: Model specification string
|
349
|
+
use_history: Whether to maintain conversation history
|
350
|
+
request_params: Additional request parameters for the LLM
|
351
|
+
human_input: Whether to enable human input capabilities
|
352
|
+
"""
|
353
|
+
decorator = self._create_decorator(
|
354
|
+
AgentType.ROUTER,
|
355
|
+
default_instruction="",
|
356
|
+
default_servers=[],
|
357
|
+
default_use_history=False,
|
358
|
+
wrapper_needed=True,
|
359
|
+
)(
|
360
|
+
name=name,
|
361
|
+
agents=agents,
|
362
|
+
model=model,
|
363
|
+
use_history=use_history,
|
364
|
+
request_params=request_params,
|
365
|
+
human_input=human_input,
|
366
|
+
)
|
367
|
+
return decorator
|
368
|
+
|
369
|
+
|
370
|
+
def chain(
|
371
|
+
self,
|
372
|
+
name: str = "Chain",
|
373
|
+
*,
|
374
|
+
sequence: List[str] = None,
|
375
|
+
agents: List[str] = None, # Alias for sequence
|
376
|
+
instruction: str = None,
|
377
|
+
model: str | None = None,
|
378
|
+
use_history: bool = True,
|
379
|
+
request_params: Optional[Dict] = None,
|
380
|
+
continue_with_final: bool = True,
|
381
|
+
cumulative: bool = False,
|
382
|
+
) -> Callable:
|
383
|
+
"""
|
384
|
+
Decorator to create and register a chain of agents.
|
385
|
+
|
386
|
+
Args:
|
387
|
+
name: Name of the chain
|
388
|
+
sequence: List of agent names in order of execution (preferred name)
|
389
|
+
agents: Alias for sequence (backwards compatibility)
|
390
|
+
instruction: Optional custom instruction for the chain (if none provided, will autogenerate based on sequence)
|
391
|
+
model: Model specification string (not used directly in chain)
|
392
|
+
use_history: Whether to maintain conversation history
|
393
|
+
request_params: Additional request parameters
|
394
|
+
continue_with_final: When using prompt(), whether to continue with the final agent after processing chain (default: True)
|
395
|
+
cumulative: When True, each agent receives all previous agent responses concatenated (default: False)
|
396
|
+
When False, each agent only gets the output of the previous agent (default behavior)
|
397
|
+
"""
|
398
|
+
# Support both parameter names
|
399
|
+
agent_sequence = sequence or agents
|
400
|
+
if agent_sequence is None:
|
401
|
+
raise ValueError("Either 'sequence' or 'agents' parameter must be provided")
|
402
|
+
|
403
|
+
# Auto-generate instruction if not provided
|
404
|
+
if instruction is None:
|
405
|
+
# Generate an appropriate instruction based on mode
|
406
|
+
if cumulative:
|
407
|
+
instruction = f"Cumulative chain of agents: {', '.join(agent_sequence)}"
|
408
|
+
else:
|
409
|
+
instruction = f"Chain of agents: {', '.join(agent_sequence)}"
|
410
|
+
|
411
|
+
decorator = self._create_decorator(
|
412
|
+
AgentType.CHAIN,
|
413
|
+
default_name="Chain",
|
414
|
+
default_instruction=instruction,
|
415
|
+
default_use_history=True,
|
416
|
+
wrapper_needed=True,
|
417
|
+
)(
|
418
|
+
name=name,
|
419
|
+
sequence=agent_sequence,
|
420
|
+
instruction=instruction,
|
421
|
+
model=model,
|
422
|
+
use_history=use_history,
|
423
|
+
request_params=request_params,
|
424
|
+
continue_with_final=continue_with_final,
|
425
|
+
cumulative=cumulative,
|
426
|
+
)
|
427
|
+
return decorator
|
428
|
+
|
429
|
+
|
430
|
+
def passthrough(
|
431
|
+
self, name: str = "Passthrough", use_history: bool = True, **kwargs
|
432
|
+
) -> Callable:
|
433
|
+
"""
|
434
|
+
Decorator to create and register a passthrough agent.
|
435
|
+
A passthrough agent simply returns any input message without modification.
|
436
|
+
|
437
|
+
This is useful for parallel workflows where no fan-in aggregation is needed
|
438
|
+
(the fan-in agent can be a passthrough that simply returns the combined outputs).
|
439
|
+
|
440
|
+
Args:
|
441
|
+
name: Name of the passthrough agent
|
442
|
+
use_history: Whether to maintain conversation history
|
443
|
+
**kwargs: Additional parameters (ignored, for compatibility)
|
444
|
+
"""
|
445
|
+
decorator = self._create_decorator(
|
446
|
+
AgentType.BASIC, # Using BASIC agent type since we'll use a regular agent with PassthroughLLM
|
447
|
+
default_name="Passthrough",
|
448
|
+
default_instruction="Passthrough agent that returns input without modification",
|
449
|
+
default_use_history=use_history,
|
450
|
+
wrapper_needed=True,
|
451
|
+
)(
|
452
|
+
name=name,
|
453
|
+
use_history=use_history,
|
454
|
+
)
|
455
|
+
return decorator
|
@@ -50,14 +50,20 @@ class AgentCompleter(Completer):
|
|
50
50
|
# Map commands to their descriptions for better completion hints
|
51
51
|
self.commands = {
|
52
52
|
"help": "Show available commands",
|
53
|
-
"
|
53
|
+
"prompts": "List and select MCP prompts", # Changed description
|
54
|
+
"prompt": "Apply a specific prompt by name (/prompt <name>)", # New command
|
54
55
|
"agents": "List available agents",
|
56
|
+
"clear": "Clear the screen",
|
55
57
|
"STOP": "Stop this prompting session and move to next workflow step",
|
56
58
|
"EXIT": "Exit fast-agent, terminating any running workflows",
|
57
59
|
**(commands or {}), # Allow custom commands to be passed in
|
58
60
|
}
|
59
61
|
if is_human_input:
|
60
62
|
self.commands.pop("agents")
|
63
|
+
self.commands.pop("prompts") # Remove prompts command in human input mode
|
64
|
+
self.commands.pop(
|
65
|
+
"prompt", None
|
66
|
+
) # Remove prompt command in human input mode
|
61
67
|
self.agent_types = agent_types or {}
|
62
68
|
|
63
69
|
def get_completions(self, document, complete_event):
|
@@ -67,6 +73,7 @@ class AgentCompleter(Completer):
|
|
67
73
|
# Complete commands
|
68
74
|
if text.startswith("/"):
|
69
75
|
cmd = text[1:]
|
76
|
+
# Simple command completion - match beginning of command
|
70
77
|
for command, description in self.commands.items():
|
71
78
|
if command.lower().startswith(cmd):
|
72
79
|
yield Completion(
|
@@ -88,7 +95,6 @@ class AgentCompleter(Completer):
|
|
88
95
|
start_position=-len(agent_name),
|
89
96
|
display=agent,
|
90
97
|
display_meta=agent_type,
|
91
|
-
# style="bg:ansiblack fg:ansiblue",
|
92
98
|
)
|
93
99
|
|
94
100
|
|
@@ -273,22 +279,30 @@ async def get_enhanced_input(
|
|
273
279
|
)
|
274
280
|
else:
|
275
281
|
rich_print(
|
276
|
-
"[dim]Type /help for commands, @agent to switch agent. Ctrl+T toggles multiline mode.
|
282
|
+
"[dim]Type /help for commands, @agent to switch agent. Ctrl+T toggles multiline mode.[/dim]"
|
277
283
|
)
|
278
284
|
rich_print()
|
279
285
|
help_message_shown = True
|
280
286
|
|
281
287
|
# Process special commands
|
288
|
+
|
282
289
|
def pre_process_input(text):
|
283
290
|
# Command processing
|
284
291
|
if text and text.startswith("/"):
|
285
|
-
|
292
|
+
cmd_parts = text[1:].strip().split(maxsplit=1)
|
293
|
+
cmd = cmd_parts[0].lower()
|
294
|
+
|
286
295
|
if cmd == "help":
|
287
296
|
return "HELP"
|
288
297
|
elif cmd == "clear":
|
289
298
|
return "CLEAR"
|
290
299
|
elif cmd == "agents":
|
291
300
|
return "LIST_AGENTS"
|
301
|
+
elif cmd == "prompts":
|
302
|
+
return "SELECT_PROMPT" # Changed from LIST_PROMPTS to directly launch selection UI
|
303
|
+
elif cmd == "prompt" and len(cmd_parts) > 1:
|
304
|
+
# Direct prompt selection with name
|
305
|
+
return f"SELECT_PROMPT:{cmd_parts[1].strip()}"
|
292
306
|
elif cmd == "exit":
|
293
307
|
return "EXIT"
|
294
308
|
elif cmd == "stop":
|
@@ -298,6 +312,8 @@ async def get_enhanced_input(
|
|
298
312
|
if text and text.startswith("@"):
|
299
313
|
return f"SWITCH:{text[1:].strip()}"
|
300
314
|
|
315
|
+
# Remove the # command handling completely
|
316
|
+
|
301
317
|
return text
|
302
318
|
|
303
319
|
# Get the input - using async version
|
@@ -328,6 +344,8 @@ async def handle_special_commands(command, agent_app=None):
|
|
328
344
|
rich_print(" /help - Show this help")
|
329
345
|
rich_print(" /clear - Clear screen")
|
330
346
|
rich_print(" /agents - List available agents")
|
347
|
+
rich_print(" /prompts - List and select MCP prompts")
|
348
|
+
rich_print(" /prompt <name> - Apply a specific prompt by name")
|
331
349
|
rich_print(" @agent_name - Switch to agent")
|
332
350
|
rich_print(" STOP - Return control back to the workflow")
|
333
351
|
rich_print(
|
@@ -360,11 +378,59 @@ async def handle_special_commands(command, agent_app=None):
|
|
360
378
|
rich_print("[yellow]No agents available[/yellow]")
|
361
379
|
return True
|
362
380
|
|
381
|
+
elif command == "LIST_PROMPTS":
|
382
|
+
# Return a dictionary with a list_prompts action to be handled by the caller
|
383
|
+
# The actual prompt listing is implemented in the AgentApp class
|
384
|
+
if agent_app:
|
385
|
+
rich_print("\n[bold]Fetching available MCP prompts...[/bold]")
|
386
|
+
return {"list_prompts": True}
|
387
|
+
else:
|
388
|
+
rich_print(
|
389
|
+
"[yellow]Prompt listing is not available outside of an agent context[/yellow]"
|
390
|
+
)
|
391
|
+
return True
|
392
|
+
|
393
|
+
elif command == "SELECT_PROMPT" or (
|
394
|
+
isinstance(command, str) and command.startswith("SELECT_PROMPT:")
|
395
|
+
):
|
396
|
+
# Handle prompt selection UI
|
397
|
+
if agent_app:
|
398
|
+
# If it's a specific prompt, extract the name
|
399
|
+
prompt_name = None
|
400
|
+
if isinstance(command, str) and command.startswith("SELECT_PROMPT:"):
|
401
|
+
prompt_name = command.split(":", 1)[1].strip()
|
402
|
+
|
403
|
+
# Return a dictionary with a select_prompt action to be handled by the caller
|
404
|
+
return {"select_prompt": True, "prompt_name": prompt_name}
|
405
|
+
else:
|
406
|
+
rich_print(
|
407
|
+
"[yellow]Prompt selection is not available outside of an agent context[/yellow]"
|
408
|
+
)
|
409
|
+
return True
|
410
|
+
|
411
|
+
elif command == "SELECT_PROMPT" or (
|
412
|
+
isinstance(command, str) and command.startswith("SELECT_PROMPT:")
|
413
|
+
):
|
414
|
+
# Handle prompt selection UI (previously named "list_prompts" action)
|
415
|
+
if agent_app:
|
416
|
+
# If it's a specific prompt, extract the name
|
417
|
+
prompt_name = None
|
418
|
+
if isinstance(command, str) and command.startswith("SELECT_PROMPT:"):
|
419
|
+
prompt_name = command.split(":", 1)[1].strip()
|
420
|
+
|
421
|
+
# Return a dictionary with a select_prompt action to be handled by the caller
|
422
|
+
return {"select_prompt": True, "prompt_name": prompt_name}
|
423
|
+
else:
|
424
|
+
rich_print(
|
425
|
+
"[yellow]Prompt selection is not available outside of an agent context[/yellow]"
|
426
|
+
)
|
427
|
+
return True
|
428
|
+
|
363
429
|
elif isinstance(command, str) and command.startswith("SWITCH:"):
|
364
430
|
agent_name = command.split(":", 1)[1]
|
365
431
|
if agent_name in available_agents:
|
366
432
|
if agent_app:
|
367
|
-
rich_print(f"[green]Switching to agent: {agent_name}[/green]")
|
433
|
+
# rich_print(f"[green]Switching to agent: {agent_name}[/green]")
|
368
434
|
return {"switch_agent": agent_name}
|
369
435
|
else:
|
370
436
|
rich_print(
|