fast-agent-mcp 0.3.7__py3-none-any.whl → 0.3.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fast-agent-mcp might be problematic. Click here for more details.

Files changed (34) hide show
  1. fast_agent/agents/llm_agent.py +30 -8
  2. fast_agent/agents/llm_decorator.py +2 -7
  3. fast_agent/agents/mcp_agent.py +9 -4
  4. fast_agent/cli/commands/auth.py +14 -1
  5. fast_agent/core/direct_factory.py +20 -8
  6. fast_agent/core/logging/listeners.py +2 -1
  7. fast_agent/interfaces.py +2 -2
  8. fast_agent/llm/model_database.py +7 -1
  9. fast_agent/llm/model_factory.py +2 -3
  10. fast_agent/llm/provider/anthropic/llm_anthropic.py +107 -62
  11. fast_agent/llm/provider/anthropic/multipart_converter_anthropic.py +4 -3
  12. fast_agent/llm/provider/bedrock/llm_bedrock.py +1 -1
  13. fast_agent/llm/provider/google/google_converter.py +8 -41
  14. fast_agent/llm/provider/google/llm_google_native.py +1 -3
  15. fast_agent/llm/provider/openai/llm_azure.py +1 -1
  16. fast_agent/llm/provider/openai/llm_openai.py +3 -3
  17. fast_agent/llm/provider/openai/llm_tensorzero_openai.py +1 -1
  18. fast_agent/llm/request_params.py +1 -1
  19. fast_agent/mcp/mcp_agent_client_session.py +45 -2
  20. fast_agent/mcp/mcp_aggregator.py +282 -5
  21. fast_agent/mcp/mcp_connection_manager.py +86 -10
  22. fast_agent/mcp/stdio_tracking_simple.py +59 -0
  23. fast_agent/mcp/streamable_http_tracking.py +309 -0
  24. fast_agent/mcp/transport_tracking.py +598 -0
  25. fast_agent/resources/examples/data-analysis/analysis.py +7 -3
  26. fast_agent/ui/console_display.py +22 -1
  27. fast_agent/ui/enhanced_prompt.py +21 -1
  28. fast_agent/ui/interactive_prompt.py +5 -0
  29. fast_agent/ui/mcp_display.py +636 -0
  30. {fast_agent_mcp-0.3.7.dist-info → fast_agent_mcp-0.3.9.dist-info}/METADATA +6 -6
  31. {fast_agent_mcp-0.3.7.dist-info → fast_agent_mcp-0.3.9.dist-info}/RECORD +34 -30
  32. {fast_agent_mcp-0.3.7.dist-info → fast_agent_mcp-0.3.9.dist-info}/WHEEL +0 -0
  33. {fast_agent_mcp-0.3.7.dist-info → fast_agent_mcp-0.3.9.dist-info}/entry_points.txt +0 -0
  34. {fast_agent_mcp-0.3.7.dist-info → fast_agent_mcp-0.3.9.dist-info}/licenses/LICENSE +0 -0
@@ -27,7 +27,9 @@ from rich.text import Text
27
27
 
28
28
  from fast_agent.agents.agent_types import AgentConfig
29
29
  from fast_agent.agents.llm_decorator import LlmDecorator, ModelT
30
+ from fast_agent.constants import FAST_AGENT_ERROR_CHANNEL
30
31
  from fast_agent.context import Context
32
+ from fast_agent.mcp.helpers.content_helpers import get_text
31
33
  from fast_agent.types import PromptMessageExtended, RequestParams
32
34
  from fast_agent.types.llm_stop_reason import LlmStopReason
33
35
  from fast_agent.ui.console_display import ConsoleDisplay
@@ -45,7 +47,7 @@ class LlmAgent(LlmDecorator):
45
47
 
46
48
  This class provides LLM-specific functionality including UI display methods,
47
49
  tool call tracking, and chat interaction patterns while delegating core
48
- LLM operations to the attached AugmentedLLMProtocol.
50
+ LLM operations to the attached FastAgentLLMProtocol.
49
51
  """
50
52
 
51
53
  def __init__(
@@ -123,6 +125,28 @@ class LlmAgent(LlmDecorator):
123
125
  Text("The assistant requested tool calls", style="dim green italic")
124
126
  )
125
127
 
128
+ case LlmStopReason.ERROR:
129
+ # Check if there's detailed error information in the error channel
130
+ if message.channels and FAST_AGENT_ERROR_CHANNEL in message.channels:
131
+ error_blocks = message.channels[FAST_AGENT_ERROR_CHANNEL]
132
+ if error_blocks:
133
+ # Extract text from the error block using the helper function
134
+ error_text = get_text(error_blocks[0])
135
+ if error_text:
136
+ additional_segments.append(
137
+ Text(f"\n\nError details: {error_text}", style="dim red italic")
138
+ )
139
+ else:
140
+ # Fallback if we couldn't extract text
141
+ additional_segments.append(
142
+ Text(f"\n\nError details: {str(error_blocks[0])}", style="dim red italic")
143
+ )
144
+ else:
145
+ # Fallback if no detailed error is available
146
+ additional_segments.append(
147
+ Text("\n\nAn error occurred during generation.", style="dim red italic")
148
+ )
149
+
126
150
  case _:
127
151
  if message.stop_reason:
128
152
  additional_segments.append(
@@ -134,7 +158,9 @@ class LlmAgent(LlmDecorator):
134
158
 
135
159
  if additional_message is not None:
136
160
  additional_segments.append(
137
- additional_message if isinstance(additional_message, Text) else Text(str(additional_message))
161
+ additional_message
162
+ if isinstance(additional_message, Text)
163
+ else Text(str(additional_message))
138
164
  )
139
165
 
140
166
  additional_message_text = None
@@ -197,9 +223,7 @@ class LlmAgent(LlmDecorator):
197
223
  # TODO - manage error catch, recovery, pause
198
224
  result, summary = await self._generate_with_summary(messages, request_params, tools)
199
225
 
200
- summary_text = (
201
- Text(f"\n\n{summary.message}", style="dim red italic") if summary else None
202
- )
226
+ summary_text = Text(f"\n\n{summary.message}", style="dim red italic") if summary else None
203
227
 
204
228
  await self.show_assistant_message(result, additional_message=summary_text)
205
229
  return result
@@ -216,9 +240,7 @@ class LlmAgent(LlmDecorator):
216
240
  (result, message), summary = await self._structured_with_summary(
217
241
  messages, model, request_params
218
242
  )
219
- summary_text = (
220
- Text(f"\n\n{summary.message}", style="dim red italic") if summary else None
221
- )
243
+ summary_text = Text(f"\n\n{summary.message}", style="dim red italic") if summary else None
222
244
  await self.show_assistant_message(message=message, additional_message=summary_text)
223
245
  return result, message
224
246
 
@@ -57,7 +57,6 @@ from fast_agent.types import PromptMessageExtended, RequestParams
57
57
  # Define a TypeVar for models
58
58
  ModelT = TypeVar("ModelT", bound=BaseModel)
59
59
 
60
- # Define a TypeVar for AugmentedLLM and its subclasses
61
60
  LLM = TypeVar("LLM", bound=FastAgentLLMProtocol)
62
61
 
63
62
 
@@ -533,9 +532,7 @@ class LlmDecorator(AgentProtocol):
533
532
  if isinstance(block, EmbeddedResource):
534
533
  resource = getattr(block, "resource", None)
535
534
  mime = getattr(resource, "mimeType", None)
536
- if isinstance(resource, TextResourceContents) or (
537
- mime and is_text_mime_type(mime)
538
- ):
535
+ if isinstance(resource, TextResourceContents) or (mime and is_text_mime_type(mime)):
539
536
  return mime or "text/plain", "text"
540
537
  if mime and mime.startswith("image/"):
541
538
  return mime, "vision"
@@ -593,9 +590,7 @@ class LlmDecorator(AgentProtocol):
593
590
  entries.append(metadata_text)
594
591
  return entries
595
592
 
596
- def _build_removed_summary(
597
- self, removed: List[_RemovedBlock]
598
- ) -> RemovedContentSummary | None:
593
+ def _build_removed_summary(self, removed: List[_RemovedBlock]) -> RemovedContentSummary | None:
599
594
  if not removed:
600
595
  return None
601
596
 
@@ -2,7 +2,7 @@
2
2
  Base Agent class that implements the AgentProtocol interface.
3
3
 
4
4
  This class provides default implementations of the standard agent methods
5
- and delegates operations to an attached AugmentedLLMProtocol instance.
5
+ and delegates operations to an attached FastAgentLLMProtocol instance.
6
6
  """
7
7
 
8
8
  import asyncio
@@ -41,7 +41,7 @@ from fast_agent.constants import HUMAN_INPUT_TOOL_NAME
41
41
  from fast_agent.core.exceptions import PromptExitError
42
42
  from fast_agent.core.logging.logger import get_logger
43
43
  from fast_agent.interfaces import FastAgentLLMProtocol
44
- from fast_agent.mcp.mcp_aggregator import MCPAggregator
44
+ from fast_agent.mcp.mcp_aggregator import MCPAggregator, ServerStatus
45
45
  from fast_agent.tools.elicitation import (
46
46
  get_elicitation_tool,
47
47
  run_elicitation_form,
@@ -52,7 +52,6 @@ from fast_agent.types import PromptMessageExtended, RequestParams
52
52
  # Define a TypeVar for models
53
53
  ModelT = TypeVar("ModelT", bound=BaseModel)
54
54
 
55
- # Define a TypeVar for AugmentedLLM and its subclasses
56
55
  LLM = TypeVar("LLM", bound=FastAgentLLMProtocol)
57
56
 
58
57
  if TYPE_CHECKING:
@@ -67,7 +66,7 @@ class McpAgent(ABC, ToolAgent):
67
66
  A base Agent class that implements the AgentProtocol interface.
68
67
 
69
68
  This class provides default implementations of the standard agent methods
70
- and delegates LLM operations to an attached AugmentedLLMProtocol instance.
69
+ and delegates LLM operations to an attached FastAgentLLMProtocol instance.
71
70
  """
72
71
 
73
72
  def __init__(
@@ -164,6 +163,12 @@ class McpAgent(ABC, ToolAgent):
164
163
  """
165
164
  await self._aggregator.close()
166
165
 
166
+ async def get_server_status(self) -> Dict[str, ServerStatus]:
167
+ """Expose server status details for UI and diagnostics consumers."""
168
+ if not self._aggregator:
169
+ return {}
170
+ return await self._aggregator.collect_server_status()
171
+
167
172
  @property
168
173
  def initialized(self) -> bool:
169
174
  """Check if both the agent and aggregator are initialized."""
@@ -293,7 +293,9 @@ def main(
293
293
 
294
294
  @app.command()
295
295
  def login(
296
- target: str = typer.Argument(..., help="Server name (from config) or identity (base URL)"),
296
+ target: Optional[str] = typer.Argument(
297
+ None, help="Server name (from config) or identity (base URL)"
298
+ ),
297
299
  transport: Optional[str] = typer.Option(
298
300
  None, "--transport", help="Transport for identity mode: http or sse"
299
301
  ),
@@ -311,6 +313,17 @@ def login(
311
313
  cfg = None
312
314
  resolved_transport = None
313
315
 
316
+ if target is None or not target.strip():
317
+ typer.echo("Provide a server name or identity URL to log in.")
318
+ typer.echo(
319
+ "Example: `fast-agent auth login my-server` "
320
+ "or `fast-agent auth login https://example.com`."
321
+ )
322
+ typer.echo("Run `fast-agent auth login --help` for more details.")
323
+ raise typer.Exit(1)
324
+
325
+ target = target.strip()
326
+
314
327
  if "://" in target:
315
328
  # Identity mode
316
329
  base = _derive_base_server_url(target)
@@ -153,14 +153,6 @@ async def create_agents_by_type(
153
153
 
154
154
  # Get all agents of the specified type
155
155
  for name, agent_data in agents_dict.items():
156
- logger.info(
157
- f"Loaded {name}",
158
- data={
159
- "progress_action": ProgressAction.LOADED,
160
- "agent_name": name,
161
- },
162
- )
163
-
164
156
  # Compare type string from config with Enum value
165
157
  if agent_data["type"] == agent_type.value:
166
158
  # Get common configuration
@@ -187,6 +179,16 @@ async def create_agents_by_type(
187
179
  )
188
180
  result_agents[name] = agent
189
181
 
182
+ # Log successful agent creation
183
+ logger.info(
184
+ f"Loaded {name}",
185
+ data={
186
+ "progress_action": ProgressAction.LOADED,
187
+ "agent_name": name,
188
+ "target": name,
189
+ },
190
+ )
191
+
190
192
  elif agent_type == AgentType.CUSTOM:
191
193
  # Get the class to instantiate (support legacy 'agent_class' and new 'cls')
192
194
  cls = agent_data.get("agent_class") or agent_data.get("cls")
@@ -212,6 +214,16 @@ async def create_agents_by_type(
212
214
  )
213
215
  result_agents[name] = agent
214
216
 
217
+ # Log successful agent creation
218
+ logger.info(
219
+ f"Loaded {name}",
220
+ data={
221
+ "progress_action": ProgressAction.LOADED,
222
+ "agent_name": name,
223
+ "target": name,
224
+ },
225
+ )
226
+
215
227
  elif agent_type == AgentType.ORCHESTRATOR or agent_type == AgentType.ITERATIVE_PLANNER:
216
228
  # Get base params configured with model settings
217
229
  base_params = (
@@ -55,7 +55,8 @@ def convert_log_event(event: Event) -> "ProgressEvent | None":
55
55
  if progress_message: # Only override if message is non-empty
56
56
  details = progress_message
57
57
 
58
- elif "augmented_llm" in namespace:
58
+ # TODO: there must be a better way :D?!
59
+ elif "llm" in namespace:
59
60
  model = event_data.get("model", "")
60
61
 
61
62
  # For all augmented_llm events, put model info in details column
fast_agent/interfaces.py CHANGED
@@ -47,7 +47,7 @@ ModelT = TypeVar("ModelT", bound=BaseModel)
47
47
 
48
48
 
49
49
  class LLMFactoryProtocol(Protocol):
50
- """Protocol for LLM factory functions that create AugmentedLLM instances."""
50
+ """Protocol for LLM factory functions that create FastAgentLLM instances."""
51
51
 
52
52
  def __call__(self, agent: "LlmAgentProtocol", **kwargs: Any) -> "FastAgentLLMProtocol": ...
53
53
 
@@ -59,7 +59,7 @@ class ModelFactoryFunctionProtocol(Protocol):
59
59
 
60
60
 
61
61
  class FastAgentLLMProtocol(Protocol):
62
- """Protocol defining the interface for augmented LLMs"""
62
+ """Protocol defining the interface for LLMs"""
63
63
 
64
64
  async def structured(
65
65
  self,
@@ -164,7 +164,11 @@ class ModelDatabase:
164
164
  )
165
165
 
166
166
  # FIXME: xAI has not documented the max output tokens for Grok 4. Using Grok 3 as a placeholder. Will need to update when available (if ever)
167
- GROK_4 = ModelParameters(context_window=256000, max_output_tokens=16385, tokenizes=XAI_VISION)
167
+ GROK_4 = ModelParameters(context_window=256000, max_output_tokens=16385, tokenizes=TEXT_ONLY)
168
+
169
+ GROK_4_VLM = ModelParameters(
170
+ context_window=2000000, max_output_tokens=16385, tokenizes=XAI_VISION
171
+ )
168
172
 
169
173
  # Source for Grok 3 max output: https://www.reddit.com/r/grok/comments/1j7209p/exploring_grok_3_beta_output_capacity_a_simple/
170
174
  # xAI does not document Grok 3 max output tokens, using the above source as a reference.
@@ -240,6 +244,8 @@ class ModelDatabase:
240
244
  "gemini-2.5-flash-preview-05-20": GEMINI_FLASH,
241
245
  "gemini-2.5-pro-preview-05-06": GEMINI_PRO,
242
246
  # xAI Grok Models
247
+ "grok-4-fast-reasoning": GROK_4_VLM,
248
+ "grok-4-fast-non-reasoning": GROK_4_VLM,
243
249
  "grok-4": GROK_4,
244
250
  "grok-4-0709": GROK_4,
245
251
  "grok-3": GROK_3,
@@ -12,9 +12,6 @@ from fast_agent.llm.internal.slow import SlowLLM
12
12
  from fast_agent.llm.provider_types import Provider
13
13
  from fast_agent.types import RequestParams
14
14
 
15
- # from fast_agent.workflows.llm.augmented_llm_deepseek import DeekSeekAugmentedLLM
16
-
17
-
18
15
  # Type alias for LLM classes
19
16
  LLMClass = Union[Type[PassthroughLLM], Type[PlaybackLLM], Type[SilentLLM], Type[SlowLLM], type]
20
17
 
@@ -123,6 +120,8 @@ class ModelFactory:
123
120
  "kimi": "groq.moonshotai/kimi-k2-instruct-0905",
124
121
  "gpt-oss": "groq.openai/gpt-oss-120b",
125
122
  "gpt-oss-20b": "groq.openai/gpt-oss-20b",
123
+ "grok-4-fast": "xai.grok-4-fast-non-reasoning",
124
+ "grok-4-fast-reasoning": "xai.grok-4-fast-reasoning",
126
125
  }
127
126
 
128
127
  @staticmethod
@@ -1,7 +1,7 @@
1
1
  import json
2
2
  from typing import Any, List, Tuple, Type, Union, cast
3
3
 
4
- from anthropic import AsyncAnthropic, AuthenticationError
4
+ from anthropic import APIError, AsyncAnthropic, AuthenticationError
5
5
  from anthropic.lib.streaming import AsyncMessageStream
6
6
  from anthropic.types import (
7
7
  Message,
@@ -22,6 +22,7 @@ from mcp.types import (
22
22
  TextContent,
23
23
  )
24
24
 
25
+ from fast_agent.constants import FAST_AGENT_ERROR_CHANNEL
25
26
  from fast_agent.core.exceptions import ProviderKeyError
26
27
  from fast_agent.core.logging.logger import get_logger
27
28
  from fast_agent.core.prompt import Prompt
@@ -36,6 +37,7 @@ from fast_agent.llm.provider.anthropic.multipart_converter_anthropic import (
36
37
  )
37
38
  from fast_agent.llm.provider_types import Provider
38
39
  from fast_agent.llm.usage_tracking import TurnUsage
40
+ from fast_agent.mcp.helpers.content_helpers import text_content
39
41
  from fast_agent.types import PromptMessageExtended
40
42
  from fast_agent.types.llm_stop_reason import LlmStopReason
41
43
 
@@ -243,47 +245,102 @@ class AnthropicLLM(FastAgentLLM[MessageParam, Message]):
243
245
  # Track estimated output tokens by counting text chunks
244
246
  estimated_tokens = 0
245
247
 
246
- # Process the raw event stream to get token counts
247
- async for event in stream:
248
- # Count tokens in real-time from content_block_delta events
249
- if (
250
- event.type == "content_block_delta"
251
- and hasattr(event, "delta")
252
- and event.delta.type == "text_delta"
253
- ):
254
- # Use base class method for token estimation and progress emission
255
- estimated_tokens = self._update_streaming_progress(
256
- event.delta.text, model, estimated_tokens
248
+ try:
249
+ # Process the raw event stream to get token counts
250
+ async for event in stream:
251
+ # Count tokens in real-time from content_block_delta events
252
+ if (
253
+ event.type == "content_block_delta"
254
+ and hasattr(event, "delta")
255
+ and event.delta.type == "text_delta"
256
+ ):
257
+ # Use base class method for token estimation and progress emission
258
+ estimated_tokens = self._update_streaming_progress(
259
+ event.delta.text, model, estimated_tokens
260
+ )
261
+
262
+ # Also check for final message_delta events with actual usage info
263
+ elif (
264
+ event.type == "message_delta"
265
+ and hasattr(event, "usage")
266
+ and event.usage.output_tokens
267
+ ):
268
+ actual_tokens = event.usage.output_tokens
269
+ # Emit final progress with actual token count
270
+ token_str = str(actual_tokens).rjust(5)
271
+ data = {
272
+ "progress_action": ProgressAction.STREAMING,
273
+ "model": model,
274
+ "agent_name": self.name,
275
+ "chat_turn": self.chat_turn(),
276
+ "details": token_str.strip(),
277
+ }
278
+ logger.info("Streaming progress", data=data)
279
+
280
+ # Get the final message with complete usage data
281
+ message = await stream.get_final_message()
282
+
283
+ # Log final usage information
284
+ if hasattr(message, "usage") and message.usage:
285
+ logger.info(
286
+ f"Streaming complete - Model: {model}, Input tokens: {message.usage.input_tokens}, Output tokens: {message.usage.output_tokens}"
257
287
  )
258
288
 
259
- # Also check for final message_delta events with actual usage info
260
- elif (
261
- event.type == "message_delta"
262
- and hasattr(event, "usage")
263
- and event.usage.output_tokens
264
- ):
265
- actual_tokens = event.usage.output_tokens
266
- # Emit final progress with actual token count
267
- token_str = str(actual_tokens).rjust(5)
268
- data = {
269
- "progress_action": ProgressAction.STREAMING,
270
- "model": model,
271
- "agent_name": self.name,
272
- "chat_turn": self.chat_turn(),
273
- "details": token_str.strip(),
274
- }
275
- logger.info("Streaming progress", data=data)
276
-
277
- # Get the final message with complete usage data
278
- message = await stream.get_final_message()
279
-
280
- # Log final usage information
281
- if hasattr(message, "usage") and message.usage:
282
- logger.info(
283
- f"Streaming complete - Model: {model}, Input tokens: {message.usage.input_tokens}, Output tokens: {message.usage.output_tokens}"
289
+ return message
290
+ except APIError as error:
291
+ logger.error("Streaming APIError during Anthropic completion", exc_info=error)
292
+ raise # Re-raise to be handled by _anthropic_completion
293
+ except Exception as error:
294
+ logger.error("Unexpected error during Anthropic stream processing", exc_info=error)
295
+ # Convert to APIError for consistent handling
296
+ raise APIError(f"Stream processing error: {str(error)}") from error
297
+
298
+ def _stream_failure_response(self, error: APIError, model_name: str) -> PromptMessageExtended:
299
+ """Convert streaming API errors into a graceful assistant reply."""
300
+
301
+ provider_label = (
302
+ self.provider.value if isinstance(self.provider, Provider) else str(self.provider)
303
+ )
304
+ detail = getattr(error, "message", None) or str(error)
305
+ detail = detail.strip() if isinstance(detail, str) else ""
306
+
307
+ parts: list[str] = [f"{provider_label} request failed"]
308
+ if model_name:
309
+ parts.append(f"for model '{model_name}'")
310
+ code = getattr(error, "code", None)
311
+ if code:
312
+ parts.append(f"(code: {code})")
313
+ status = getattr(error, "status_code", None)
314
+ if status:
315
+ parts.append(f"(status={status})")
316
+
317
+ message = " ".join(parts)
318
+ if detail:
319
+ message = f"{message}: {detail}"
320
+
321
+ user_summary = " ".join(message.split()) if message else ""
322
+ if user_summary and len(user_summary) > 280:
323
+ user_summary = user_summary[:277].rstrip() + "..."
324
+
325
+ if user_summary:
326
+ assistant_text = f"I hit an internal error while calling the model: {user_summary}"
327
+ if not assistant_text.endswith((".", "!", "?")):
328
+ assistant_text += "."
329
+ assistant_text += " See fast-agent-error for additional details."
330
+ else:
331
+ assistant_text = (
332
+ "I hit an internal error while calling the model; see fast-agent-error for details."
284
333
  )
285
334
 
286
- return message
335
+ assistant_block = text_content(assistant_text)
336
+ error_block = text_content(message)
337
+
338
+ return PromptMessageExtended(
339
+ role="assistant",
340
+ content=[assistant_block],
341
+ channels={FAST_AGENT_ERROR_CHANNEL: [error_block]},
342
+ stop_reason=LlmStopReason.ERROR,
343
+ )
287
344
 
288
345
  async def _anthropic_completion(
289
346
  self,
@@ -369,9 +426,13 @@ class AnthropicLLM(FastAgentLLM[MessageParam, Message]):
369
426
 
370
427
  logger.debug(f"{arguments}")
371
428
  # Use streaming API with helper
372
- async with anthropic.messages.stream(**arguments) as stream:
373
- # Process the stream
374
- response = await self._process_stream(stream, model)
429
+ try:
430
+ async with anthropic.messages.stream(**arguments) as stream:
431
+ # Process the stream
432
+ response = await self._process_stream(stream, model)
433
+ except APIError as error:
434
+ logger.error("Streaming APIError during Anthropic completion", exc_info=error)
435
+ return self._stream_failure_response(error, model)
375
436
 
376
437
  # Track usage if response is valid and has usage data
377
438
  if (
@@ -393,27 +454,11 @@ class AnthropicLLM(FastAgentLLM[MessageParam, Message]):
393
454
  "The configured Anthropic API key was rejected.\nPlease check that your API key is valid and not expired.",
394
455
  ) from response
395
456
  elif isinstance(response, BaseException):
396
- error_details = str(response)
397
- logger.error(f"Error: {error_details}", data=BaseException)
398
-
399
- # Try to extract more useful information for API errors
400
- if hasattr(response, "status_code") and hasattr(response, "response"):
401
- try:
402
- error_json = response.response.json()
403
- error_details = f"Error code: {response.status_code} - {error_json}"
404
- except: # noqa: E722
405
- error_details = f"Error code: {response.status_code} - {str(response)}"
406
-
407
- # Convert other errors to text response
408
- error_message = f"Error during generation: {error_details}"
409
- response = Message(
410
- id="error",
411
- model="error",
412
- role="assistant",
413
- type="message",
414
- content=[TextBlock(type="text", text=error_message)],
415
- stop_reason="end_turn",
416
- usage=Usage(input_tokens=0, output_tokens=0),
457
+ # This path shouldn't be reached anymore since we handle APIError above,
458
+ # but keeping for backward compatibility
459
+ logger.error(f"Unexpected error type: {type(response).__name__}", exc_info=response)
460
+ return self._stream_failure_response(
461
+ APIError(f"Unexpected error: {str(response)}"), model
417
462
  )
418
463
 
419
464
  logger.debug(
@@ -273,7 +273,7 @@ class AnthropicConverter:
273
273
  title=title,
274
274
  source=URLPDFSourceParam(type="url", url=uri_str),
275
275
  )
276
- elif hasattr(resource_content, "blob"):
276
+ elif isinstance(resource_content, BlobResourceContents):
277
277
  return DocumentBlockParam(
278
278
  type="document",
279
279
  title=title,
@@ -362,8 +362,9 @@ class AnthropicConverter:
362
362
  Returns:
363
363
  A TextBlockParam with formatted SVG content
364
364
  """
365
- if hasattr(resource_content, "text"):
366
- svg_content = resource_content.text
365
+ # Use get_text helper to extract text from various content types
366
+ svg_content = get_text(resource_content)
367
+ if svg_content:
367
368
  return TextBlockParam(type="text", text=f"```xml\n{svg_content}\n```")
368
369
  return TextBlockParam(type="text", text="[SVG content could not be extracted]")
369
370
 
@@ -126,7 +126,7 @@ class ModelCapabilities:
126
126
 
127
127
  class BedrockLLM(FastAgentLLM[BedrockMessageParam, BedrockMessage]):
128
128
  """
129
- AWS Bedrock implementation of AugmentedLLM using the Converse API.
129
+ AWS Bedrock implementation of FastAgentLLM using the Converse API.
130
130
  Supports all Bedrock models including Nova, Claude, Meta, etc.
131
131
  """
132
132
 
@@ -142,7 +142,6 @@ class GoogleConverter:
142
142
  assert isinstance(part_content, EmbeddedResource)
143
143
  if (
144
144
  "application/pdf" == part_content.resource.mimeType
145
- and hasattr(part_content.resource, "blob")
146
145
  and isinstance(part_content.resource, BlobResourceContents)
147
146
  ):
148
147
  pdf_bytes = base64.b64decode(part_content.resource.blob)
@@ -154,31 +153,15 @@ class GoogleConverter:
154
153
  )
155
154
  else:
156
155
  # Check if the resource itself has text content
157
- resource_text = None
158
- if hasattr(part_content.resource, "text"): # Direct text attribute
159
- resource_text = part_content.resource.text
160
- # Example: if EmbeddedResource wraps a TextContent-like object in its 'resource' field
161
- elif (
162
- hasattr(part_content.resource, "type")
163
- and part_content.resource.type == "text"
164
- and hasattr(part_content.resource, "text")
165
- ):
166
- resource_text = get_text(part_content.resource)
156
+ # Use get_text helper to extract text from various content types
157
+ resource_text = get_text(part_content.resource)
167
158
 
168
159
  if resource_text is not None:
169
160
  parts.append(types.Part.from_text(text=resource_text))
170
161
  else:
171
162
  # Fallback for other binary types or types without direct text
172
- uri_str = (
173
- part_content.resource.uri
174
- if hasattr(part_content.resource, "uri")
175
- else "unknown_uri"
176
- )
177
- mime_str = (
178
- part_content.resource.mimeType
179
- if hasattr(part_content.resource, "mimeType")
180
- else "unknown_mime"
181
- )
163
+ uri_str = getattr(part_content.resource, "uri", "unknown_uri")
164
+ mime_str = getattr(part_content.resource, "mimeType", "unknown_mime")
182
165
  parts.append(
183
166
  types.Part.from_text(
184
167
  text=f"[Resource: {uri_str}, MIME: {mime_str}]"
@@ -291,30 +274,14 @@ class GoogleConverter:
291
274
  textual_outputs.append(f"[Error processing PDF from tool result: {e}]")
292
275
  else:
293
276
  # Check if the resource itself has text content
294
- resource_text = None
295
- if hasattr(item.resource, "text"): # Direct text attribute
296
- resource_text = item.resource.text
297
- # Example: if EmbeddedResource wraps a TextContent-like object in its 'resource' field
298
- elif (
299
- hasattr(item.resource, "type")
300
- and item.resource.type == "text"
301
- and hasattr(item.resource, "text")
302
- ):
303
- resource_text = get_text(item.resource)
277
+ # Use get_text helper to extract text from various content types
278
+ resource_text = get_text(item.resource)
304
279
 
305
280
  if resource_text is not None:
306
281
  textual_outputs.append(resource_text)
307
282
  else:
308
- uri_str = (
309
- item.resource.uri
310
- if hasattr(item.resource, "uri")
311
- else "unknown_uri"
312
- )
313
- mime_str = (
314
- item.resource.mimeType
315
- if hasattr(item.resource, "mimeType")
316
- else "unknown_mime"
317
- )
283
+ uri_str = getattr(item.resource, "uri", "unknown_uri")
284
+ mime_str = getattr(item.resource, "mimeType", "unknown_mime")
318
285
  textual_outputs.append(
319
286
  f"[Unhandled Resource in Tool: {uri_str}, MIME: {mime_str}]"
320
287
  )
@@ -36,10 +36,8 @@ GOOGLE_EXCLUDE_FIELDS = {
36
36
  FastAgentLLM.PARAM_MESSAGES, # Handled by contents
37
37
  FastAgentLLM.PARAM_MODEL, # Handled during client/call setup
38
38
  FastAgentLLM.PARAM_SYSTEM_PROMPT, # Handled by system_instruction in config
39
- # AugmentedLLM.PARAM_PARALLEL_TOOL_CALLS, # Handled by tool_config in config
40
- FastAgentLLM.PARAM_USE_HISTORY, # Handled by AugmentedLLM base / this class's logic
39
+ FastAgentLLM.PARAM_USE_HISTORY, # Handled by FastAgentLLM base / this class's logic
41
40
  FastAgentLLM.PARAM_MAX_ITERATIONS, # Handled by this class's loop
42
- # Add any other OpenAI-specific params not applicable to google.genai
43
41
  FastAgentLLM.PARAM_MCP_METADATA,
44
42
  }.union(FastAgentLLM.BASE_EXCLUDE_FIELDS)
45
43
 
@@ -23,7 +23,7 @@ DEFAULT_AZURE_API_VERSION = "2024-10-21"
23
23
 
24
24
  class AzureOpenAILLM(OpenAILLM):
25
25
  """
26
- Azure OpenAI implementation extending OpenAIAugmentedLLM.
26
+ Azure OpenAI implementation extending OpenAILLM.
27
27
  Handles both API Key and DefaultAzureCredential authentication.
28
28
  """
29
29