fast-agent-mcp 0.2.16__py3-none-any.whl → 0.2.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. {fast_agent_mcp-0.2.16.dist-info → fast_agent_mcp-0.2.18.dist-info}/METADATA +6 -7
  2. {fast_agent_mcp-0.2.16.dist-info → fast_agent_mcp-0.2.18.dist-info}/RECORD +48 -47
  3. mcp_agent/agents/base_agent.py +50 -6
  4. mcp_agent/agents/workflow/orchestrator_agent.py +6 -7
  5. mcp_agent/agents/workflow/router_agent.py +70 -136
  6. mcp_agent/app.py +1 -124
  7. mcp_agent/cli/commands/go.py +133 -0
  8. mcp_agent/cli/commands/setup.py +2 -2
  9. mcp_agent/cli/main.py +5 -3
  10. mcp_agent/config.py +16 -13
  11. mcp_agent/context.py +4 -22
  12. mcp_agent/core/agent_types.py +2 -2
  13. mcp_agent/core/direct_decorators.py +2 -2
  14. mcp_agent/core/direct_factory.py +2 -1
  15. mcp_agent/core/enhanced_prompt.py +12 -7
  16. mcp_agent/core/fastagent.py +39 -5
  17. mcp_agent/core/interactive_prompt.py +6 -2
  18. mcp_agent/core/request_params.py +5 -1
  19. mcp_agent/core/validation.py +12 -1
  20. mcp_agent/executor/workflow_signal.py +0 -2
  21. mcp_agent/llm/augmented_llm.py +183 -57
  22. mcp_agent/llm/augmented_llm_passthrough.py +1 -1
  23. mcp_agent/llm/augmented_llm_playback.py +21 -1
  24. mcp_agent/llm/memory.py +3 -3
  25. mcp_agent/llm/model_factory.py +3 -1
  26. mcp_agent/llm/provider_key_manager.py +1 -0
  27. mcp_agent/llm/provider_types.py +2 -1
  28. mcp_agent/llm/providers/augmented_llm_anthropic.py +50 -10
  29. mcp_agent/llm/providers/augmented_llm_deepseek.py +1 -5
  30. mcp_agent/llm/providers/augmented_llm_google.py +30 -0
  31. mcp_agent/llm/providers/augmented_llm_openai.py +96 -159
  32. mcp_agent/llm/providers/multipart_converter_openai.py +10 -27
  33. mcp_agent/llm/providers/sampling_converter_openai.py +5 -6
  34. mcp_agent/mcp/interfaces.py +6 -1
  35. mcp_agent/mcp/mcp_aggregator.py +2 -8
  36. mcp_agent/mcp/prompt_message_multipart.py +25 -2
  37. mcp_agent/resources/examples/data-analysis/analysis-campaign.py +2 -2
  38. mcp_agent/resources/examples/in_dev/agent_build.py +1 -1
  39. mcp_agent/resources/examples/internal/job.py +1 -1
  40. mcp_agent/resources/examples/mcp/state-transfer/fastagent.config.yaml +1 -1
  41. mcp_agent/resources/examples/prompting/agent.py +0 -2
  42. mcp_agent/resources/examples/prompting/fastagent.config.yaml +2 -3
  43. mcp_agent/resources/examples/researcher/fastagent.config.yaml +1 -6
  44. mcp_agent/resources/examples/workflows/fastagent.config.yaml +0 -1
  45. mcp_agent/resources/examples/workflows/parallel.py +1 -1
  46. mcp_agent/executor/decorator_registry.py +0 -112
  47. {fast_agent_mcp-0.2.16.dist-info → fast_agent_mcp-0.2.18.dist-info}/WHEEL +0 -0
  48. {fast_agent_mcp-0.2.16.dist-info → fast_agent_mcp-0.2.18.dist-info}/entry_points.txt +0 -0
  49. {fast_agent_mcp-0.2.16.dist-info → fast_agent_mcp-0.2.18.dist-info}/licenses/LICENSE +0 -0
@@ -5,9 +5,8 @@ This provides a simplified implementation that routes messages to agents
5
5
  by determining the best agent for a request and dispatching to it.
6
6
  """
7
7
 
8
- from typing import TYPE_CHECKING, List, Optional, Tuple, Type
8
+ from typing import TYPE_CHECKING, Callable, List, Optional, Tuple, Type
9
9
 
10
- from mcp.types import TextContent
11
10
  from pydantic import BaseModel
12
11
 
13
12
  from mcp_agent.agents.agent import Agent
@@ -17,10 +16,12 @@ from mcp_agent.core.exceptions import AgentConfigError
17
16
  from mcp_agent.core.prompt import Prompt
18
17
  from mcp_agent.core.request_params import RequestParams
19
18
  from mcp_agent.logging.logger import get_logger
20
- from mcp_agent.mcp.interfaces import ModelT
19
+ from mcp_agent.mcp.interfaces import AugmentedLLMProtocol, ModelT
21
20
  from mcp_agent.mcp.prompt_message_multipart import PromptMessageMultipart
22
21
 
23
22
  if TYPE_CHECKING:
23
+ from a2a_types.types import AgentCard
24
+
24
25
  from mcp_agent.context import Context
25
26
 
26
27
  logger = get_logger(__name__)
@@ -36,47 +37,17 @@ Follow these guidelines:
36
37
  - Provide your confidence level (high, medium, low) and brief reasoning for your selection
37
38
  """
38
39
 
39
- # Default routing instruction with placeholders for context and request
40
+ # Default routing instruction with placeholders for context (AgentCard JSON)
40
41
  DEFAULT_ROUTING_INSTRUCTION = """
41
- You are a highly accurate request router that directs incoming requests to the most appropriate agent.
42
-
43
- <fastagent:data>
42
+ Select from the following agents to handle the request:
44
43
  <fastagent:agents>
44
+ [
45
45
  {context}
46
+ ]
46
47
  </fastagent:agents>
47
48
 
48
- <fastagent:request>
49
- {request}
50
- </fastagent:request>
51
- </fastagent:data>
52
-
53
- Your task is to analyze the request and determine the most appropriate agent from the options above.
54
-
55
- <fastagent:instruction>
56
- Respond with JSON following the schema below:
57
- {{
58
- "type": "object",
59
- "required": ["agent", "confidence", "reasoning"],
60
- "properties": {{
61
- "agent": {{
62
- "type": "string",
63
- "description": "The exact name of the selected agent"
64
- }},
65
- "confidence": {{
66
- "type": "string",
67
- "enum": ["high", "medium", "low"],
68
- "description": "Your confidence level in this selection"
69
- }},
70
- "reasoning": {{
71
- "type": "string",
72
- "description": "Brief explanation for your selection"
73
- }}
74
- }}
75
- }}
76
-
77
- Supply only the JSON with no preamble. Use "reasoning" field to describe actions. NEVER EMIT CODE FENCES.
78
-
79
- </fastagent:instruction>
49
+ You must respond with the 'name' of one of the agents listed above.
50
+
80
51
  """
81
52
 
82
53
 
@@ -85,18 +56,7 @@ class RoutingResponse(BaseModel):
85
56
 
86
57
  agent: str
87
58
  confidence: str
88
- reasoning: Optional[str] = None
89
-
90
-
91
- class RouterResult(BaseModel):
92
- """Router result with agent reference and confidence rating."""
93
-
94
- result: BaseAgent
95
- confidence: str
96
- reasoning: Optional[str] = None
97
-
98
- # Allow Agent objects to be stored without serialization
99
- model_config = {"arbitrary_types_allowed": True}
59
+ reasoning: str | None = None
100
60
 
101
61
 
102
62
  class RouterAgent(BaseAgent):
@@ -142,9 +102,7 @@ class RouterAgent(BaseAgent):
142
102
  # Set up base router request parameters
143
103
  base_params = {"systemPrompt": ROUTING_SYSTEM_INSTRUCTION, "use_history": False}
144
104
 
145
- # Merge with provided defaults if any
146
105
  if default_request_params:
147
- # Start with defaults and override with router-specific settings
148
106
  merged_params = default_request_params.model_copy(update=base_params)
149
107
  else:
150
108
  merged_params = RequestParams(**base_params)
@@ -174,32 +132,16 @@ class RouterAgent(BaseAgent):
174
132
  except Exception as e:
175
133
  logger.warning(f"Error shutting down agent: {str(e)}")
176
134
 
177
- async def _get_routing_result(
135
+ async def attach_llm(
178
136
  self,
179
- messages: List[PromptMessageMultipart],
180
- ) -> Optional[RouterResult]:
181
- """
182
- Common method to extract request and get routing result.
183
-
184
- Args:
185
- messages: The messages to extract request from
186
-
187
- Returns:
188
- RouterResult containing the selected agent, or None if no suitable agent found
189
- """
190
- if not self.initialized:
191
- await self.initialize()
192
-
193
- # Extract the request text from the last message
194
- request = messages[-1].all_text() if messages else ""
195
-
196
- # Determine which agent to route to
197
- routing_result = await self._route_request(request)
198
-
199
- if not routing_result:
200
- logger.warning("Could not determine appropriate agent for this request")
201
-
202
- return routing_result
137
+ llm_factory: type[AugmentedLLMProtocol] | Callable[..., AugmentedLLMProtocol],
138
+ model: str | None = None,
139
+ request_params: RequestParams | None = None,
140
+ **additional_kwargs,
141
+ ) -> AugmentedLLMProtocol:
142
+ return await super().attach_llm(
143
+ llm_factory, model, request_params, verb="Routing", **additional_kwargs
144
+ )
203
145
 
204
146
  async def generate(
205
147
  self,
@@ -216,32 +158,21 @@ class RouterAgent(BaseAgent):
216
158
  Returns:
217
159
  The response from the selected agent
218
160
  """
219
- routing_result = await self._get_routing_result(multipart_messages)
220
-
221
- if not routing_result:
222
- return PromptMessageMultipart(
223
- role="assistant",
224
- content=[
225
- TextContent(
226
- type="text", text="Could not determine appropriate agent for this request."
227
- )
228
- ],
229
- )
230
161
 
231
- # Get the selected agent
232
- selected_agent = routing_result.result
162
+ route, warn = await self._route_request(multipart_messages[-1])
233
163
 
234
- # Log the routing decision
235
- logger.info(
236
- f"Routing request to agent: {selected_agent.name} (confidence: {routing_result.confidence})"
237
- )
164
+ if not route:
165
+ return Prompt.assistant(warn or "No routing result or warning received")
166
+
167
+ # Get the selected agent
168
+ agent: Agent = self.agent_map[route.agent]
238
169
 
239
170
  # Dispatch the request to the selected agent
240
- return await selected_agent.generate(multipart_messages, request_params)
171
+ return await agent.generate(multipart_messages, request_params)
241
172
 
242
173
  async def structured(
243
174
  self,
244
- prompt: List[PromptMessageMultipart],
175
+ multipart_messages: List[PromptMessageMultipart],
245
176
  model: Type[ModelT],
246
177
  request_params: Optional[RequestParams] = None,
247
178
  ) -> Tuple[ModelT | None, PromptMessageMultipart]:
@@ -256,23 +187,22 @@ class RouterAgent(BaseAgent):
256
187
  Returns:
257
188
  The parsed response from the selected agent, or None if parsing fails
258
189
  """
259
- routing_result = await self._get_routing_result(prompt)
190
+ route, warn = await self._route_request(multipart_messages[-1])
260
191
 
261
- if not routing_result:
262
- return None, Prompt.assistant("No routing result")
192
+ if not route:
193
+ return None, Prompt.assistant(
194
+ warn or "No routing result or warning received (structured)"
195
+ )
263
196
 
264
197
  # Get the selected agent
265
- selected_agent = routing_result.result
266
-
267
- # Log the routing decision
268
- logger.info(
269
- f"Routing structured request to agent: {selected_agent.name} (confidence: {routing_result.confidence})"
270
- )
198
+ agent: Agent = self.agent_map[route.agent]
271
199
 
272
200
  # Dispatch the request to the selected agent
273
- return await selected_agent.structured(prompt, model, request_params)
201
+ return await agent.structured(multipart_messages, model, request_params)
274
202
 
275
- async def _route_request(self, request: str) -> Optional[RouterResult]:
203
+ async def _route_request(
204
+ self, message: PromptMessageMultipart
205
+ ) -> Tuple[RoutingResponse | None, str | None]:
276
206
  """
277
207
  Determine which agent to route the request to.
278
208
 
@@ -283,49 +213,53 @@ class RouterAgent(BaseAgent):
283
213
  RouterResult containing the selected agent, or None if no suitable agent was found
284
214
  """
285
215
  if not self.agents:
286
- logger.warning("No agents available for routing")
287
- return None
216
+ logger.error("No agents available for routing")
217
+ raise AgentConfigError("No agents available for routing - fatal error")
288
218
 
289
219
  # If only one agent is available, use it directly
290
220
  if len(self.agents) == 1:
291
- return RouterResult(
292
- result=self.agents[0], confidence="high", reasoning="Only one agent available"
293
- )
221
+ return RoutingResponse(
222
+ agent=self.agents[0].name, confidence="high", reasoning="Only one agent available"
223
+ ), None
294
224
 
295
225
  # Generate agent descriptions for the context
296
226
  agent_descriptions = []
297
- for i, agent in enumerate(self.agents, 1):
298
- description = agent.instruction if isinstance(agent.instruction, str) else ""
299
- agent_descriptions.append(f"{i}. Name: {agent.name} - {description}")
227
+ for agent in self.agents:
228
+ agent_card: AgentCard = await agent.agent_card()
229
+ agent_descriptions.append(
230
+ agent_card.model_dump_json(
231
+ include={"name", "description", "skills"}, exclude_none=True
232
+ )
233
+ )
300
234
 
301
- context = "\n\n".join(agent_descriptions)
235
+ context = ",\n".join(agent_descriptions)
302
236
 
303
237
  # Format the routing prompt
304
238
  routing_instruction = self.routing_instruction or DEFAULT_ROUTING_INSTRUCTION
305
- prompt_text = routing_instruction.format(context=context, request=request)
306
-
307
- # Create multipart message for the router
308
- prompt = PromptMessageMultipart(
309
- role="user", content=[TextContent(type="text", text=prompt_text)]
310
- )
239
+ routing_instruction = routing_instruction.format(context=context)
311
240
 
312
- # Get structured response from LLM
313
241
  assert self._llm
242
+ mutated = message.model_copy(deep=True)
243
+ mutated.add_text(routing_instruction)
314
244
  response, _ = await self._llm.structured(
315
- [prompt], RoutingResponse, self._default_request_params
245
+ [mutated],
246
+ RoutingResponse,
247
+ self._default_request_params,
316
248
  )
317
249
 
250
+ warn: str | None = None
318
251
  if not response:
319
- logger.warning("No routing response received from LLM")
320
- return None
252
+ warn = "No routing response received from LLM"
253
+ elif response.agent not in self.agent_map:
254
+ warn = f"A response was received, but the agent {response.agent} was not known to the Router"
321
255
 
322
- # Look up the agent by name
323
- selected_agent = self.agent_map.get(response.agent)
324
-
325
- if not selected_agent:
326
- logger.warning(f"Agent '{response.agent}' not found in available agents")
327
- return None
256
+ if warn:
257
+ logger.warning(warn)
258
+ return None, warn
259
+ else:
260
+ assert response
261
+ logger.info(
262
+ f"Routing structured request to agent: {response.agent or 'error'} (confidence: {response.confidence or ''})"
263
+ )
328
264
 
329
- return RouterResult(
330
- result=selected_agent, confidence=response.confidence, reasoning=response.reasoning
331
- )
265
+ return response, None
mcp_agent/app.py CHANGED
@@ -1,7 +1,6 @@
1
1
  import asyncio
2
2
  from contextlib import asynccontextmanager
3
- from datetime import timedelta
4
- from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Type, TypeVar
3
+ from typing import TYPE_CHECKING, Dict, Optional, Type, TypeVar
5
4
 
6
5
  from mcp_agent.config import Settings
7
6
  from mcp_agent.context import Context, cleanup_context, initialize_context
@@ -174,125 +173,3 @@ class MCPApp:
174
173
  yield self
175
174
  finally:
176
175
  await self.cleanup()
177
-
178
- def workflow(self, cls: Type, *args, workflow_id: str | None = None, **kwargs) -> Type:
179
- """
180
- Decorator for a workflow class. By default it's a no-op,
181
- but different executors can use this to customize behavior
182
- for workflow registration.
183
-
184
- Example:
185
- If Temporal is available & we use a TemporalExecutor,
186
- this decorator will wrap with temporal_workflow.defn.
187
- """
188
- decorator_registry = self.context.decorator_registry
189
- execution_engine = self.engine
190
- workflow_defn_decorator = decorator_registry.get_workflow_defn_decorator(execution_engine)
191
-
192
- if workflow_defn_decorator:
193
- return workflow_defn_decorator(cls, *args, **kwargs)
194
-
195
- cls._app = self
196
- self._workflows[workflow_id or cls.__name__] = cls
197
-
198
- # Default no-op
199
- return cls
200
-
201
- def workflow_run(self, fn: Callable[..., R]) -> Callable[..., R]:
202
- """
203
- Decorator for a workflow's main 'run' method.
204
- Different executors can use this to customize behavior for workflow execution.
205
-
206
- Example:
207
- If Temporal is in use, this gets converted to @workflow.run.
208
- """
209
-
210
- decorator_registry = self.context.decorator_registry
211
- execution_engine = self.engine
212
- workflow_run_decorator = decorator_registry.get_workflow_run_decorator(execution_engine)
213
-
214
- if workflow_run_decorator:
215
- return workflow_run_decorator(fn)
216
-
217
- # Default no-op
218
- def wrapper(*args, **kwargs):
219
- # no-op wrapper
220
- return fn(*args, **kwargs)
221
-
222
- return wrapper
223
-
224
- def workflow_task(
225
- self,
226
- name: str | None = None,
227
- schedule_to_close_timeout: timedelta | None = None,
228
- retry_policy: Dict[str, Any] | None = None,
229
- **kwargs: Any,
230
- ) -> Callable[[Callable[..., R]], Callable[..., R]]:
231
- """
232
- Decorator to mark a function as a workflow task,
233
- automatically registering it in the global activity registry.
234
-
235
- Args:
236
- name: Optional custom name for the activity
237
- schedule_to_close_timeout: Maximum time the task can take to complete
238
- retry_policy: Retry policy configuration
239
- **kwargs: Additional metadata passed to the activity registration
240
-
241
- Returns:
242
- Decorated function that preserves async and typing information
243
-
244
- Raises:
245
- TypeError: If the decorated function is not async
246
- ValueError: If the retry policy or timeout is invalid
247
- """
248
-
249
- def decorator(func: Callable[..., R]) -> Callable[..., R]:
250
- if not asyncio.iscoroutinefunction(func):
251
- raise TypeError(f"Function {func.__name__} must be async.")
252
-
253
- actual_name = name or f"{func.__module__}.{func.__qualname__}"
254
- timeout = schedule_to_close_timeout or timedelta(minutes=10)
255
- metadata = {
256
- "activity_name": actual_name,
257
- "schedule_to_close_timeout": timeout,
258
- "retry_policy": retry_policy or {},
259
- **kwargs,
260
- }
261
- activity_registry = self.context.task_registry
262
- activity_registry.register(actual_name, func, metadata)
263
-
264
- setattr(func, "is_workflow_task", True)
265
- setattr(func, "execution_metadata", metadata)
266
-
267
- # TODO: saqadri - determine if we need this
268
- # Preserve metadata through partial application
269
- # @functools.wraps(func)
270
- # async def wrapper(*args: Any, **kwargs: Any) -> R:
271
- # result = await func(*args, **kwargs)
272
- # return cast(R, result) # Ensure type checking works
273
-
274
- # # Add metadata that survives partial application
275
- # wrapper.is_workflow_task = True # type: ignore
276
- # wrapper.execution_metadata = metadata # type: ignore
277
-
278
- # # Make metadata accessible through partial
279
- # def __getattr__(name: str) -> Any:
280
- # if name == "is_workflow_task":
281
- # return True
282
- # if name == "execution_metadata":
283
- # return metadata
284
- # raise AttributeError(f"'{func.__name__}' has no attribute '{name}'")
285
-
286
- # wrapper.__getattr__ = __getattr__ # type: ignore
287
-
288
- # return wrapper
289
-
290
- return func
291
-
292
- return decorator
293
-
294
- def is_workflow_task(self, func: Callable[..., Any]) -> bool:
295
- """
296
- Check if a function is marked as a workflow task.
297
- This gets set for functions that are decorated with @workflow_task."""
298
- return bool(getattr(func, "is_workflow_task", False))
@@ -0,0 +1,133 @@
1
+ """Run an interactive agent directly from the command line."""
2
+
3
+ import asyncio
4
+ import sys
5
+ from typing import List, Optional
6
+
7
+ import typer
8
+
9
+ from mcp_agent.core.fastagent import FastAgent
10
+
11
+ app = typer.Typer(
12
+ help="Run an interactive agent directly from the command line without creating an agent.py file"
13
+ )
14
+
15
+ async def _run_agent(
16
+ name: str = "FastAgent CLI",
17
+ instruction: str = "You are a helpful AI Agent.",
18
+ config_path: Optional[str] = None,
19
+ server_list: Optional[List[str]] = None,
20
+ model: Optional[str] = None,
21
+ ) -> None:
22
+ """Async implementation to run an interactive agent."""
23
+
24
+ # Create the FastAgent instance with CLI arg parsing enabled
25
+ # It will automatically parse args like --model, --quiet, etc.
26
+ fast_kwargs = {
27
+ "name": name,
28
+ "config_path": config_path,
29
+ "ignore_unknown_args": True,
30
+ }
31
+
32
+ fast = FastAgent(**fast_kwargs)
33
+
34
+ # Define the agent with specified parameters
35
+ agent_kwargs = {"instruction": instruction}
36
+ if server_list:
37
+ agent_kwargs["servers"] = server_list
38
+ if model:
39
+ agent_kwargs["model"] = model
40
+
41
+ @fast.agent(**agent_kwargs)
42
+ async def cli_agent():
43
+ async with fast.run() as agent:
44
+ await agent.interactive()
45
+
46
+ # Run the agent
47
+ await cli_agent()
48
+
49
+ def run_async_agent(
50
+ name: str,
51
+ instruction: str,
52
+ config_path: Optional[str] = None,
53
+ servers: Optional[str] = None,
54
+ model: Optional[str] = None
55
+ ):
56
+ """Run the async agent function with proper loop handling."""
57
+ server_list = servers.split(',') if servers else None
58
+
59
+ # Check if we're already in an event loop
60
+ try:
61
+ loop = asyncio.get_event_loop()
62
+ if loop.is_running():
63
+ # We're inside a running event loop, so we can't use asyncio.run
64
+ # Instead, create a new loop
65
+ loop = asyncio.new_event_loop()
66
+ asyncio.set_event_loop(loop)
67
+ except RuntimeError:
68
+ # No event loop exists, so we'll create one
69
+ loop = asyncio.new_event_loop()
70
+ asyncio.set_event_loop(loop)
71
+
72
+ try:
73
+ loop.run_until_complete(_run_agent(
74
+ name=name,
75
+ instruction=instruction,
76
+ config_path=config_path,
77
+ server_list=server_list,
78
+ model=model
79
+ ))
80
+ finally:
81
+ try:
82
+ # Clean up the loop
83
+ tasks = asyncio.all_tasks(loop)
84
+ for task in tasks:
85
+ task.cancel()
86
+
87
+ # Run the event loop until all tasks are done
88
+ if sys.version_info >= (3, 7):
89
+ loop.run_until_complete(asyncio.gather(*tasks, return_exceptions=True))
90
+ loop.run_until_complete(loop.shutdown_asyncgens())
91
+ loop.close()
92
+ except Exception:
93
+ pass
94
+
95
+ @app.callback(invoke_without_command=True)
96
+ def go(
97
+ ctx: typer.Context,
98
+ name: str = typer.Option("FastAgent CLI", "--name", help="Name for the agent"),
99
+ instruction: str = typer.Option(
100
+ "You are a helpful AI Agent.", "--instruction", "-i", help="Instruction for the agent"
101
+ ),
102
+ config_path: Optional[str] = typer.Option(
103
+ None, "--config-path", "-c", help="Path to config file"
104
+ ),
105
+ servers: Optional[str] = typer.Option(
106
+ None, "--servers", help="Comma-separated list of server names to enable from config"
107
+ ),
108
+ model: Optional[str] = typer.Option(
109
+ None, "--model", help="Override the default model (e.g., haiku, sonnet, gpt-4)"
110
+ ),
111
+ ) -> None:
112
+ """
113
+ Run an interactive agent directly from the command line.
114
+
115
+ Example:
116
+ fast-agent go --model=haiku --instruction="You are a coding assistant" --servers=fetch,filesystem
117
+
118
+ This will start an interactive session with the agent, using the specified model
119
+ and instruction. It will use the default configuration from fastagent.config.yaml
120
+ unless --config-path is specified.
121
+
122
+ Common options:
123
+ --model: Override the default model (e.g., --model=haiku)
124
+ --quiet: Disable progress display and logging
125
+ --servers: Comma-separated list of server names to enable from config
126
+ """
127
+ run_async_agent(
128
+ name=name,
129
+ instruction=instruction,
130
+ config_path=config_path,
131
+ servers=servers,
132
+ model=model
133
+ )
@@ -15,7 +15,7 @@ FASTAGENT_CONFIG_TEMPLATE = """
15
15
  # Takes format:
16
16
  # <provider>.<model_string>.<reasoning_effort?> (e.g. anthropic.claude-3-5-sonnet-20241022 or openai.o3-mini.low)
17
17
  # Accepts aliases for Anthropic Models: haiku, haiku3, sonnet, sonnet35, opus, opus3
18
- # and OpenAI Models: gpt-4o-mini, gpt-4o, o1, o1-mini, o3-mini
18
+ # and OpenAI Models: gpt-4.1, gpt-4.1-mini, o1, o1-mini, o3-mini
19
19
  #
20
20
  # If not specified, defaults to "haiku".
21
21
  # Can be overriden with a command line switch --model=<model>, or within the Agent constructor.
@@ -221,7 +221,7 @@ def init(
221
221
  if "fastagent.secrets.yaml" in created:
222
222
  console.print("\n[yellow]Important:[/yellow] Remember to:")
223
223
  console.print(
224
- "1. Add your API keys to fastagent.secrets.yaml or set OPENAI_API_KEY and ANTHROPIC_API_KEY environment variables"
224
+ "1. Add your API keys to fastagent.secrets.yaml, or set environment variables. Use [cyan]fast-agent check[/cyan] to verify."
225
225
  )
226
226
  console.print(
227
227
  "2. Keep fastagent.secrets.yaml secure and never commit it to version control"
mcp_agent/cli/main.py CHANGED
@@ -4,7 +4,7 @@ import typer
4
4
  from rich.console import Console
5
5
  from rich.table import Table
6
6
 
7
- from mcp_agent.cli.commands import check_config, quickstart, setup
7
+ from mcp_agent.cli.commands import check_config, go, quickstart, setup
8
8
  from mcp_agent.cli.terminal import Application
9
9
 
10
10
  app = typer.Typer(
@@ -13,6 +13,7 @@ app = typer.Typer(
13
13
  )
14
14
 
15
15
  # Subcommands
16
+ app.add_typer(go.app, name="go", help="Run an interactive agent directly from the command line")
16
17
  app.add_typer(setup.app, name="setup", help="Set up a new agent project")
17
18
  app.add_typer(check_config.app, name="check", help="Show or diagnose fast-agent configuration")
18
19
  app.add_typer(quickstart.app, name="bootstrap", help="Create example applications")
@@ -39,14 +40,15 @@ def show_welcome() -> None:
39
40
  table.add_column("Command", style="green")
40
41
  table.add_column("Description")
41
42
 
42
- table.add_row("setup", "Create a new agent and configuration files")
43
+ table.add_row("[bold]go[/bold]", "Start an interactive session with an agent")
44
+ table.add_row("setup", "Create a new agent template and configuration files")
43
45
  table.add_row("check", "Show or diagnose fast-agent configuration")
44
46
  table.add_row("quickstart", "Create example applications (workflow, researcher, etc.)")
45
47
 
46
48
  console.print(table)
47
49
 
48
50
  console.print(
49
- "\n[italic]get started with:[/italic] [cyan]fast-agent[/cyan] [green]setup[/green]"
51
+ "\n[italic]get started with:[/italic] [bold][cyan]fast-agent[/cyan][/bold] [green]setup[/green]"
50
52
  )
51
53
 
52
54