fast-agent-mcp 0.2.39__py3-none-any.whl → 0.2.41__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (41) hide show
  1. {fast_agent_mcp-0.2.39.dist-info → fast_agent_mcp-0.2.41.dist-info}/METADATA +1 -1
  2. {fast_agent_mcp-0.2.39.dist-info → fast_agent_mcp-0.2.41.dist-info}/RECORD +41 -37
  3. {fast_agent_mcp-0.2.39.dist-info → fast_agent_mcp-0.2.41.dist-info}/entry_points.txt +2 -2
  4. mcp_agent/cli/__main__.py +29 -3
  5. mcp_agent/cli/commands/check_config.py +140 -81
  6. mcp_agent/cli/commands/go.py +151 -38
  7. mcp_agent/cli/commands/quickstart.py +8 -4
  8. mcp_agent/cli/commands/server_helpers.py +106 -0
  9. mcp_agent/cli/constants.py +25 -0
  10. mcp_agent/cli/main.py +1 -1
  11. mcp_agent/config.py +94 -44
  12. mcp_agent/core/agent_app.py +104 -15
  13. mcp_agent/core/agent_types.py +1 -0
  14. mcp_agent/core/direct_decorators.py +9 -0
  15. mcp_agent/core/direct_factory.py +18 -4
  16. mcp_agent/core/enhanced_prompt.py +165 -13
  17. mcp_agent/core/fastagent.py +4 -0
  18. mcp_agent/core/interactive_prompt.py +37 -37
  19. mcp_agent/core/usage_display.py +11 -1
  20. mcp_agent/core/validation.py +21 -2
  21. mcp_agent/human_input/elicitation_form.py +55 -22
  22. mcp_agent/llm/augmented_llm.py +28 -9
  23. mcp_agent/llm/augmented_llm_silent.py +48 -0
  24. mcp_agent/llm/model_database.py +20 -0
  25. mcp_agent/llm/model_factory.py +12 -0
  26. mcp_agent/llm/provider_key_manager.py +22 -8
  27. mcp_agent/llm/provider_types.py +19 -12
  28. mcp_agent/llm/providers/augmented_llm_anthropic.py +7 -2
  29. mcp_agent/llm/providers/augmented_llm_azure.py +7 -1
  30. mcp_agent/llm/providers/augmented_llm_google_native.py +4 -1
  31. mcp_agent/llm/providers/augmented_llm_openai.py +9 -2
  32. mcp_agent/llm/providers/augmented_llm_xai.py +38 -0
  33. mcp_agent/llm/usage_tracking.py +28 -3
  34. mcp_agent/mcp/mcp_agent_client_session.py +2 -0
  35. mcp_agent/mcp/mcp_aggregator.py +38 -44
  36. mcp_agent/mcp/sampling.py +15 -11
  37. mcp_agent/resources/examples/mcp/elicitations/forms_demo.py +0 -6
  38. mcp_agent/resources/examples/workflows/router.py +9 -0
  39. mcp_agent/ui/console_display.py +125 -13
  40. {fast_agent_mcp-0.2.39.dist-info → fast_agent_mcp-0.2.41.dist-info}/WHEEL +0 -0
  41. {fast_agent_mcp-0.2.39.dist-info → fast_agent_mcp-0.2.41.dist-info}/licenses/LICENSE +0 -0
@@ -53,6 +53,7 @@ class PromptProvider(Protocol):
53
53
  async def apply_prompt(
54
54
  self,
55
55
  prompt_name: str,
56
+ prompt_title: Optional[str] = None,
56
57
  arguments: Optional[Dict[str, str]] = None,
57
58
  agent_name: Optional[str] = None,
58
59
  **kwargs,
@@ -243,9 +244,10 @@ class InteractivePrompt:
243
244
  "server": server_name,
244
245
  "name": prompt.name,
245
246
  "namespaced_name": f"{server_name}{SEP}{prompt.name}",
246
- "description": getattr(prompt, "description", "No description"),
247
- "arg_count": len(getattr(prompt, "arguments", [])),
248
- "arguments": getattr(prompt, "arguments", []),
247
+ "title": prompt.title or None,
248
+ "description": prompt.description or "No description",
249
+ "arg_count": len(prompt.arguments or []),
250
+ "arguments": prompt.arguments or [],
249
251
  }
250
252
  )
251
253
  elif isinstance(prompts_info, list) and prompts_info:
@@ -256,6 +258,7 @@ class InteractivePrompt:
256
258
  "server": server_name,
257
259
  "name": prompt["name"],
258
260
  "namespaced_name": f"{server_name}{SEP}{prompt['name']}",
261
+ "title": prompt.get("title", None),
259
262
  "description": prompt.get("description", "No description"),
260
263
  "arg_count": len(prompt.get("arguments", [])),
261
264
  "arguments": prompt.get("arguments", []),
@@ -263,17 +266,15 @@ class InteractivePrompt:
263
266
  )
264
267
  else:
265
268
  # Handle Prompt objects from mcp.types
266
- prompt_name = getattr(prompt, "name", str(prompt))
267
- description = getattr(prompt, "description", "No description")
268
- arguments = getattr(prompt, "arguments", [])
269
269
  all_prompts.append(
270
270
  {
271
271
  "server": server_name,
272
- "name": prompt_name,
273
- "namespaced_name": f"{server_name}{SEP}{prompt_name}",
274
- "description": description,
275
- "arg_count": len(arguments),
276
- "arguments": arguments,
272
+ "name": prompt.name,
273
+ "namespaced_name": f"{server_name}{SEP}{prompt.name}",
274
+ "title": prompt.title or None,
275
+ "description": prompt.description or "No description",
276
+ "arg_count": len(prompt.arguments or []),
277
+ "arguments": prompt.arguments or [],
277
278
  }
278
279
  )
279
280
 
@@ -314,6 +315,7 @@ class InteractivePrompt:
314
315
  table.add_column("#", justify="right", style="cyan")
315
316
  table.add_column("Server", style="green")
316
317
  table.add_column("Prompt Name", style="bright_blue")
318
+ table.add_column("Title")
317
319
  table.add_column("Description")
318
320
  table.add_column("Args", justify="center")
319
321
 
@@ -323,6 +325,7 @@ class InteractivePrompt:
323
325
  str(i + 1),
324
326
  prompt["server"],
325
327
  prompt["name"],
328
+ prompt["title"],
326
329
  prompt["description"],
327
330
  str(prompt["arg_count"]),
328
331
  )
@@ -378,7 +381,7 @@ class InteractivePrompt:
378
381
  continue
379
382
 
380
383
  # Extract prompts
381
- prompts = []
384
+ prompts: List[Prompt] = []
382
385
  if hasattr(prompts_info, "prompts"):
383
386
  prompts = prompts_info.prompts
384
387
  elif isinstance(prompts_info, list):
@@ -387,8 +390,9 @@ class InteractivePrompt:
387
390
  # Process each prompt
388
391
  for prompt in prompts:
389
392
  # Get basic prompt info
390
- prompt_name = getattr(prompt, "name", "Unknown")
391
- prompt_description = getattr(prompt, "description", "No description")
393
+ prompt_name = prompt.name
394
+ prompt_title = prompt.title or None
395
+ prompt_description = prompt.description or "No description"
392
396
 
393
397
  # Extract argument information
394
398
  arg_names = []
@@ -397,23 +401,19 @@ class InteractivePrompt:
397
401
  arg_descriptions = {}
398
402
 
399
403
  # Get arguments list
400
- arguments = getattr(prompt, "arguments", None)
401
- if arguments:
402
- for arg in arguments:
403
- name = getattr(arg, "name", None)
404
- if name:
405
- arg_names.append(name)
406
-
407
- # Store description if available
408
- description = getattr(arg, "description", None)
409
- if description:
410
- arg_descriptions[name] = description
411
-
412
- # Check if required
413
- if getattr(arg, "required", False):
414
- required_args.append(name)
415
- else:
416
- optional_args.append(name)
404
+ if prompt.arguments:
405
+ for arg in prompt.arguments:
406
+ arg_names.append(arg.name)
407
+
408
+ # Store description if available
409
+ if arg.description:
410
+ arg_descriptions[arg.name] = arg.description
411
+
412
+ # Check if required
413
+ if arg.required:
414
+ required_args.append(arg.name)
415
+ else:
416
+ optional_args.append(arg.name)
417
417
 
418
418
  # Create namespaced version using the consistent separator
419
419
  namespaced_name = f"{server_name}{SEP}{prompt_name}"
@@ -424,6 +424,7 @@ class InteractivePrompt:
424
424
  "server": server_name,
425
425
  "name": prompt_name,
426
426
  "namespaced_name": namespaced_name,
427
+ "title": prompt_title,
427
428
  "description": prompt_description,
428
429
  "arg_count": len(arg_names),
429
430
  "arg_names": arg_names,
@@ -486,6 +487,7 @@ class InteractivePrompt:
486
487
  table.add_column("#", justify="right", style="cyan")
487
488
  table.add_column("Server", style="green")
488
489
  table.add_column("Prompt Name", style="bright_blue")
490
+ table.add_column("Title")
489
491
  table.add_column("Description")
490
492
  table.add_column("Args", justify="center")
491
493
 
@@ -508,6 +510,7 @@ class InteractivePrompt:
508
510
  str(i + 1),
509
511
  prompt["server"],
510
512
  prompt["name"],
513
+ prompt["title"] or "No title",
511
514
  prompt["description"] or "No description",
512
515
  args_display,
513
516
  )
@@ -669,6 +672,7 @@ class InteractivePrompt:
669
672
  table = Table(title="Available MCP Tools")
670
673
  table.add_column("#", justify="right", style="cyan")
671
674
  table.add_column("Tool Name", style="bright_blue")
675
+ table.add_column("Title")
672
676
  table.add_column("Description")
673
677
 
674
678
  # Add tools to table
@@ -676,16 +680,12 @@ class InteractivePrompt:
676
680
  table.add_row(
677
681
  str(i + 1),
678
682
  tool.name,
679
- getattr(tool, "description", "No description") or "No description",
683
+ tool.title or "No title",
684
+ tool.description or "No description",
680
685
  )
681
686
 
682
687
  console.print(table)
683
688
 
684
- # Add usage instructions
685
- rich_print("\n[bold]Usage:[/bold]")
686
- rich_print(" • Tools are automatically available in your conversation")
687
- rich_print(" • Just ask the agent to use a tool by name or description")
688
-
689
689
  except Exception as e:
690
690
  import traceback
691
691
 
@@ -36,6 +36,7 @@ def display_usage_report(
36
36
  total_input = 0
37
37
  total_output = 0
38
38
  total_tokens = 0
39
+ total_tool_calls = 0
39
40
 
40
41
  for agent_name, agent in agents.items():
41
42
  if agent.usage_accumulator:
@@ -45,6 +46,7 @@ def display_usage_report(
45
46
  output_tokens = summary["cumulative_output_tokens"]
46
47
  billing_tokens = summary["cumulative_billing_tokens"]
47
48
  turns = summary["turn_count"]
49
+ tool_calls = summary["cumulative_tool_calls"]
48
50
 
49
51
  # Get context percentage for this agent
50
52
  context_percentage = agent.usage_accumulator.context_usage_percentage
@@ -72,6 +74,7 @@ def display_usage_report(
72
74
  "output": output_tokens,
73
75
  "total": billing_tokens,
74
76
  "turns": turns,
77
+ "tool_calls": tool_calls,
75
78
  "context": context_percentage,
76
79
  }
77
80
  )
@@ -79,6 +82,7 @@ def display_usage_report(
79
82
  total_input += input_tokens
80
83
  total_output += output_tokens
81
84
  total_tokens += billing_tokens
85
+ total_tool_calls += tool_calls
82
86
 
83
87
  if not usage_data:
84
88
  return
@@ -94,7 +98,7 @@ def display_usage_report(
94
98
 
95
99
  # Print header with proper spacing
96
100
  console.print(
97
- f"[dim]{'Agent':<{agent_width}} {'Input':>9} {'Output':>9} {'Total':>9} {'Turns':>6} {'Context%':>9} {'Model':<25}[/dim]"
101
+ f"[dim]{'Agent':<{agent_width}} {'Input':>9} {'Output':>9} {'Total':>9} {'Turns':>6} {'Tools':>6} {'Context%':>9} {'Model':<25}[/dim]"
98
102
  )
99
103
 
100
104
  # Print agent rows - use styling based on subdued_colors flag
@@ -103,6 +107,7 @@ def display_usage_report(
103
107
  output_str = f"{data['output']:,}"
104
108
  total_str = f"{data['total']:,}"
105
109
  turns_str = str(data["turns"])
110
+ tools_str = str(data["tool_calls"])
106
111
  context_str = f"{data['context']:.1f}%" if data["context"] is not None else "-"
107
112
 
108
113
  # Truncate agent name if needed
@@ -118,6 +123,7 @@ def display_usage_report(
118
123
  f"{output_str:>9} "
119
124
  f"[bold]{total_str:>9}[/bold] "
120
125
  f"{turns_str:>6} "
126
+ f"{tools_str:>6} "
121
127
  f"{context_str:>9} "
122
128
  f"{data['model']:<25}[/dim]"
123
129
  )
@@ -129,6 +135,7 @@ def display_usage_report(
129
135
  f"{output_str:>9} "
130
136
  f"[bold]{total_str:>9}[/bold] "
131
137
  f"{turns_str:>6} "
138
+ f"{tools_str:>6} "
132
139
  f"{context_str:>9} "
133
140
  f"[dim]{data['model']:<25}[/dim]"
134
141
  )
@@ -139,6 +146,7 @@ def display_usage_report(
139
146
  total_input_str = f"{total_input:,}"
140
147
  total_output_str = f"{total_output:,}"
141
148
  total_tokens_str = f"{total_tokens:,}"
149
+ total_tools_str = str(total_tool_calls)
142
150
 
143
151
  if subdued_colors:
144
152
  # Original fastagent.py style with dim wrapper on bold
@@ -148,6 +156,7 @@ def display_usage_report(
148
156
  f"{total_output_str:>9} "
149
157
  f"[bold]{total_tokens_str:>9}[/bold] "
150
158
  f"{'':<6} "
159
+ f"{total_tools_str:>6} "
151
160
  f"{'':<9} "
152
161
  f"{'':<25}[/bold dim]"
153
162
  )
@@ -159,6 +168,7 @@ def display_usage_report(
159
168
  f"[bold]{total_output_str:>9}[/bold] "
160
169
  f"[bold]{total_tokens_str:>9}[/bold] "
161
170
  f"{'':<6} "
171
+ f"[bold]{total_tools_str:>6}[/bold] "
162
172
  f"{'':<9} "
163
173
  f"{'':<25}"
164
174
  )
@@ -52,7 +52,7 @@ def validate_workflow_references(agents: Dict[str, Dict[str, Any]]) -> None:
52
52
 
53
53
  for name, agent_data in agents.items():
54
54
  agent_type = agent_data["type"] # This is a string from config
55
-
55
+
56
56
  # Note: Compare string values from config with the Enum's string value
57
57
  if agent_type == AgentType.PARALLEL.value:
58
58
  # Check fan_in exists
@@ -226,7 +226,7 @@ def get_dependencies_groups(
226
226
  # Build the dependency graph
227
227
  for name, agent_data in agents_dict.items():
228
228
  agent_type = agent_data["type"] # This is a string from config
229
-
229
+
230
230
  # Note: Compare string values from config with the Enum's string value
231
231
  if agent_type == AgentType.PARALLEL.value:
232
232
  # Parallel agents depend on their fan-out and fan-in agents
@@ -305,3 +305,22 @@ def get_dependencies_groups(
305
305
  remaining -= current_level
306
306
 
307
307
  return result
308
+
309
+
310
+ def validate_provider_keys_post_creation(active_agents: Dict[str, Any]) -> None:
311
+ """
312
+ Validate that API keys are available for all created agents with LLMs.
313
+
314
+ This runs after agent creation when we have actual agent instances.
315
+
316
+ Args:
317
+ active_agents: Dictionary of created agent instances
318
+
319
+ Raises:
320
+ ProviderKeyError: If any required API key is missing
321
+ """
322
+ for agent_name, agent in active_agents.items():
323
+ llm = getattr(agent, "_llm", None)
324
+ if llm:
325
+ # This throws a ProviderKeyError if the key is not present
326
+ llm._api_key()
@@ -16,7 +16,6 @@ from prompt_toolkit.validation import ValidationError, Validator
16
16
  from prompt_toolkit.widgets import (
17
17
  Button,
18
18
  Checkbox,
19
- Dialog,
20
19
  Frame,
21
20
  Label,
22
21
  RadioList,
@@ -187,13 +186,31 @@ class ElicitationForm:
187
186
  height=len(self.message.split("\n")),
188
187
  )
189
188
 
190
- # Create form fields - removed useless horizontal divider
191
- form_fields = [
192
- fastagent_header, # Fast-agent info
193
- Window(height=1), # Spacing
194
- mcp_header, # MCP server message
195
- Window(height=1), # Spacing
196
- ]
189
+ # Create sticky headers (outside scrollable area)
190
+ sticky_headers = HSplit(
191
+ [
192
+ Window(height=1), # Top padding
193
+ VSplit(
194
+ [
195
+ Window(width=2), # Left padding
196
+ fastagent_header, # Fast-agent info
197
+ Window(width=2), # Right padding
198
+ ]
199
+ ),
200
+ Window(height=1), # Spacing
201
+ VSplit(
202
+ [
203
+ Window(width=2), # Left padding
204
+ mcp_header, # MCP server message
205
+ Window(width=2), # Right padding
206
+ ]
207
+ ),
208
+ Window(height=1), # Spacing
209
+ ]
210
+ )
211
+
212
+ # Create scrollable form fields (without headers)
213
+ form_fields = []
197
214
 
198
215
  for field_name, field_def in self.properties.items():
199
216
  field_widget = self._create_field(field_name, field_def)
@@ -228,18 +245,17 @@ class ElicitationForm:
228
245
  ]
229
246
  )
230
247
 
231
- # Main layout
248
+ # Main scrollable content (form fields and buttons only)
232
249
  form_fields.extend([self.status_line, buttons])
233
- content = HSplit(form_fields)
250
+ scrollable_form_content = HSplit(form_fields)
234
251
 
235
- # Add padding around content using HSplit and VSplit with empty windows
236
- padded_content = HSplit(
252
+ # Add padding around scrollable content
253
+ padded_scrollable_content = HSplit(
237
254
  [
238
- Window(height=1), # Top padding
239
255
  VSplit(
240
256
  [
241
257
  Window(width=2), # Left padding
242
- content,
258
+ scrollable_form_content,
243
259
  Window(width=2), # Right padding
244
260
  ]
245
261
  ),
@@ -247,20 +263,36 @@ class ElicitationForm:
247
263
  ]
248
264
  )
249
265
 
250
- # Wrap content in ScrollablePane to handle oversized forms
266
+ # Wrap only form fields in ScrollablePane (headers stay fixed)
251
267
  scrollable_content = ScrollablePane(
252
- content=padded_content,
268
+ content=padded_scrollable_content,
253
269
  show_scrollbar=False, # Only show when content exceeds available space
254
270
  display_arrows=False, # Only show when content exceeds available space
255
271
  keep_cursor_visible=True,
256
272
  keep_focused_window_visible=True,
257
273
  )
258
274
 
259
- # Dialog - formatted title with better styling and text
260
- dialog = Dialog(
261
- title=FormattedText([("class:title", "Elicitation Request")]),
262
- body=scrollable_content,
263
- with_background=True, # Re-enable background for proper layout
275
+ # Create title bar manually
276
+ title_bar = Window(
277
+ FormattedTextControl(FormattedText([("class:title", "Elicitation Request")])),
278
+ height=1,
279
+ style="class:dialog.title",
280
+ )
281
+
282
+ # Combine title, sticky headers, and scrollable content
283
+ full_content = HSplit(
284
+ [
285
+ title_bar,
286
+ Window(height=1), # Spacing after title
287
+ sticky_headers, # Headers stay fixed at top
288
+ scrollable_content, # Form fields can scroll
289
+ ]
290
+ )
291
+
292
+ # Create dialog frame manually to avoid Dialog's internal scrolling
293
+ dialog = Frame(
294
+ body=full_content,
295
+ style="class:dialog",
264
296
  )
265
297
 
266
298
  # Key bindings
@@ -302,7 +334,8 @@ class ElicitationForm:
302
334
  def submit_alt(event):
303
335
  self._accept()
304
336
 
305
- @kb.add("escape", eager=True)
337
+ # ESC should ALWAYS cancel immediately, no matter what
338
+ @kb.add("escape", eager=True, is_global=True)
306
339
  def cancel(event):
307
340
  self._cancel()
308
341
 
@@ -36,7 +36,7 @@ from mcp_agent.llm.sampling_format_converter import (
36
36
  BasicFormatConverter,
37
37
  ProviderFormatConverter,
38
38
  )
39
- from mcp_agent.llm.usage_tracking import UsageAccumulator
39
+ from mcp_agent.llm.usage_tracking import TurnUsage, UsageAccumulator
40
40
  from mcp_agent.logging.logger import get_logger
41
41
  from mcp_agent.mcp.helpers.content_helpers import get_text
42
42
  from mcp_agent.mcp.interfaces import (
@@ -123,6 +123,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
123
123
  ] = BasicFormatConverter,
124
124
  context: Optional["Context"] = None,
125
125
  model: Optional[str] = None,
126
+ api_key: Optional[str] = None,
126
127
  **kwargs: dict[str, Any],
127
128
  ) -> None:
128
129
  """
@@ -158,6 +159,9 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
158
159
  # Initialize the display component
159
160
  self.display = ConsoleDisplay(config=self.context.config)
160
161
 
162
+ # Tool call counter for current turn
163
+ self._current_turn_tool_calls = 0
164
+
161
165
  # Initialize default parameters, passing model info
162
166
  model_kwargs = kwargs.copy()
163
167
  if model:
@@ -173,6 +177,8 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
173
177
  self.type_converter = type_converter
174
178
  self.verb = kwargs.get("verb")
175
179
 
180
+ self._init_api_key = api_key
181
+
176
182
  # Initialize usage tracking
177
183
  self.usage_accumulator = UsageAccumulator()
178
184
 
@@ -440,15 +446,25 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
440
446
 
441
447
  def show_tool_result(self, result: CallToolResult) -> None:
442
448
  """Display a tool result in a formatted panel."""
443
- self.display.show_tool_result(result)
449
+ self.display.show_tool_result(result, name=self.name)
444
450
 
445
451
  def show_oai_tool_result(self, result: str) -> None:
446
452
  """Display a tool result in a formatted panel."""
447
- self.display.show_oai_tool_result(result)
453
+ self.display.show_oai_tool_result(result, name=self.name)
448
454
 
449
455
  def show_tool_call(self, available_tools, tool_name, tool_args) -> None:
450
456
  """Display a tool call in a formatted panel."""
451
- self.display.show_tool_call(available_tools, tool_name, tool_args)
457
+ self._current_turn_tool_calls += 1
458
+ self.display.show_tool_call(available_tools, tool_name, tool_args, name=self.name)
459
+
460
+ def _reset_turn_tool_calls(self) -> None:
461
+ """Reset tool call counter for new turn."""
462
+ self._current_turn_tool_calls = 0
463
+
464
+ def _finalize_turn_usage(self, turn_usage: "TurnUsage") -> None:
465
+ """Set tool call count on TurnUsage and add to accumulator."""
466
+ turn_usage.set_tool_calls(self._current_turn_tool_calls)
467
+ self.usage_accumulator.add_turn(turn_usage)
452
468
 
453
469
  async def show_assistant_message(
454
470
  self,
@@ -556,12 +572,12 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
556
572
 
557
573
  def _update_streaming_progress(self, content: str, model: str, estimated_tokens: int) -> int:
558
574
  """Update streaming progress with token estimation and formatting.
559
-
575
+
560
576
  Args:
561
577
  content: The text content from the streaming event
562
578
  model: The model name
563
579
  estimated_tokens: Current token count to update
564
-
580
+
565
581
  Returns:
566
582
  Updated estimated token count
567
583
  """
@@ -569,10 +585,10 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
569
585
  text_length = len(content)
570
586
  additional_tokens = max(1, text_length // 4)
571
587
  new_total = estimated_tokens + additional_tokens
572
-
588
+
573
589
  # Format token count for display
574
590
  token_str = str(new_total).rjust(5)
575
-
591
+
576
592
  # Emit progress event
577
593
  data = {
578
594
  "progress_action": ProgressAction.STREAMING,
@@ -582,7 +598,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
582
598
  "details": token_str.strip(), # Token count goes in details for STREAMING action
583
599
  }
584
600
  self.logger.info("Streaming progress", data=data)
585
-
601
+
586
602
  return new_total
587
603
 
588
604
  def _log_chat_finished(self, model: Optional[str] = None) -> None:
@@ -692,6 +708,9 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
692
708
  return self._message_history
693
709
 
694
710
  def _api_key(self):
711
+ if self._init_api_key:
712
+ return self._init_api_key
713
+
695
714
  from mcp_agent.llm.provider_key_manager import ProviderKeyManager
696
715
 
697
716
  assert self.provider
@@ -0,0 +1,48 @@
1
+ """Silent LLM implementation that suppresses display output while maintaining functionality."""
2
+
3
+ from typing import Any
4
+
5
+ from mcp_agent.llm.augmented_llm_passthrough import PassthroughLLM
6
+ from mcp_agent.llm.provider_types import Provider
7
+ from mcp_agent.llm.usage_tracking import TurnUsage, UsageAccumulator
8
+
9
+
10
+ class ZeroUsageAccumulator(UsageAccumulator):
11
+ """Usage accumulator that always reports zero usage."""
12
+
13
+ def add_turn(self, turn: TurnUsage) -> None:
14
+ """Override to do nothing - no usage accumulation."""
15
+ pass
16
+
17
+
18
+ class SilentLLM(PassthroughLLM):
19
+ """
20
+ A specialized LLM that processes messages like PassthroughLLM but suppresses all display output.
21
+
22
+ This is particularly useful for parallel agent workflows where the fan-in agent
23
+ should aggregate results without polluting the console with intermediate output.
24
+ Token counting is disabled - the model always reports zero usage.
25
+ """
26
+
27
+ def __init__(
28
+ self, provider=Provider.FAST_AGENT, name: str = "Silent", **kwargs: dict[str, Any]
29
+ ) -> None:
30
+ super().__init__(name=name, provider=provider, **kwargs)
31
+ # Override with zero usage accumulator - silent model reports no usage
32
+ self.usage_accumulator = ZeroUsageAccumulator()
33
+
34
+ def show_user_message(self, message: Any, **kwargs) -> None:
35
+ """Override to suppress user message display."""
36
+ pass
37
+
38
+ async def show_assistant_message(self, message: Any, **kwargs) -> None:
39
+ """Override to suppress assistant message display."""
40
+ pass
41
+
42
+ def show_tool_calls(self, tool_calls: Any, **kwargs) -> None:
43
+ """Override to suppress tool call display."""
44
+ pass
45
+
46
+ def show_tool_results(self, tool_results: Any, **kwargs) -> None:
47
+ """Override to suppress tool result display."""
48
+ pass
@@ -47,6 +47,7 @@ class ModelDatabase:
47
47
  "video/mp4",
48
48
  ]
49
49
  QWEN_MULTIMODAL = ["text/plain", "image/jpeg", "image/png", "image/webp"]
50
+ XAI_VISION = ["text/plain", "image/jpeg", "image/png", "image/webp"]
50
51
  TEXT_ONLY = ["text/plain"]
51
52
 
52
53
  # Common parameter configurations
@@ -128,10 +129,22 @@ class ModelDatabase:
128
129
  context_window=2097152, max_output_tokens=8192, tokenizes=GOOGLE_MULTIMODAL
129
130
  )
130
131
 
132
+ # FIXME: xAI has not documented the max output tokens for Grok 4. Using Grok 3 as a placeholder. Will need to update when available (if ever)
133
+ GROK_4 = ModelParameters(
134
+ context_window=256000, max_output_tokens=16385, tokenizes=XAI_VISION
135
+ )
136
+
137
+ # Source for Grok 3 max output: https://www.reddit.com/r/grok/comments/1j7209p/exploring_grok_3_beta_output_capacity_a_simple/
138
+ # xAI does not document Grok 3 max output tokens, using the above source as a reference.
139
+ GROK_3 = ModelParameters(
140
+ context_window=131072, max_output_tokens=16385, tokenizes=TEXT_ONLY
141
+ )
142
+
131
143
  # Model configuration database
132
144
  MODELS: Dict[str, ModelParameters] = {
133
145
  # internal models
134
146
  "passthrough": FAST_AGENT_STANDARD,
147
+ "silent": FAST_AGENT_STANDARD,
135
148
  "playback": FAST_AGENT_STANDARD,
136
149
  "slow": FAST_AGENT_STANDARD,
137
150
  # aliyun models
@@ -194,6 +207,13 @@ class ModelDatabase:
194
207
  "gemini-2.5-pro-preview": GEMINI_2_5_PRO,
195
208
  "gemini-2.5-flash-preview-05-20": GEMINI_FLASH,
196
209
  "gemini-2.5-pro-preview-05-06": GEMINI_PRO,
210
+ # xAI Grok Models
211
+ "grok-4": GROK_4,
212
+ "grok-4-0709": GROK_4,
213
+ "grok-3": GROK_3,
214
+ "grok-3-mini": GROK_3,
215
+ "grok-3-fast": GROK_3,
216
+ "grok-3-mini-fast": GROK_3,
197
217
  }
198
218
 
199
219
  @classmethod