fast-agent-mcp 0.3.8__py3-none-any.whl → 0.3.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fast-agent-mcp might be problematic. Click here for more details.
- fast_agent/agents/llm_agent.py +24 -0
- fast_agent/agents/mcp_agent.py +7 -1
- fast_agent/core/direct_factory.py +20 -8
- fast_agent/llm/provider/anthropic/llm_anthropic.py +107 -62
- fast_agent/llm/provider/anthropic/multipart_converter_anthropic.py +4 -3
- fast_agent/llm/provider/google/google_converter.py +8 -41
- fast_agent/llm/provider/openai/llm_openai.py +3 -3
- fast_agent/mcp/mcp_agent_client_session.py +45 -2
- fast_agent/mcp/mcp_aggregator.py +314 -33
- fast_agent/mcp/mcp_connection_manager.py +86 -10
- fast_agent/mcp/stdio_tracking_simple.py +59 -0
- fast_agent/mcp/streamable_http_tracking.py +309 -0
- fast_agent/mcp/transport_tracking.py +600 -0
- fast_agent/resources/examples/data-analysis/analysis.py +7 -3
- fast_agent/ui/console_display.py +22 -1
- fast_agent/ui/elicitation_style.py +7 -7
- fast_agent/ui/enhanced_prompt.py +21 -1
- fast_agent/ui/interactive_prompt.py +5 -0
- fast_agent/ui/mcp_display.py +708 -0
- {fast_agent_mcp-0.3.8.dist-info → fast_agent_mcp-0.3.10.dist-info}/METADATA +5 -5
- {fast_agent_mcp-0.3.8.dist-info → fast_agent_mcp-0.3.10.dist-info}/RECORD +24 -20
- {fast_agent_mcp-0.3.8.dist-info → fast_agent_mcp-0.3.10.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.3.8.dist-info → fast_agent_mcp-0.3.10.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.3.8.dist-info → fast_agent_mcp-0.3.10.dist-info}/licenses/LICENSE +0 -0
fast_agent/agents/llm_agent.py
CHANGED
|
@@ -27,7 +27,9 @@ from rich.text import Text
|
|
|
27
27
|
|
|
28
28
|
from fast_agent.agents.agent_types import AgentConfig
|
|
29
29
|
from fast_agent.agents.llm_decorator import LlmDecorator, ModelT
|
|
30
|
+
from fast_agent.constants import FAST_AGENT_ERROR_CHANNEL
|
|
30
31
|
from fast_agent.context import Context
|
|
32
|
+
from fast_agent.mcp.helpers.content_helpers import get_text
|
|
31
33
|
from fast_agent.types import PromptMessageExtended, RequestParams
|
|
32
34
|
from fast_agent.types.llm_stop_reason import LlmStopReason
|
|
33
35
|
from fast_agent.ui.console_display import ConsoleDisplay
|
|
@@ -123,6 +125,28 @@ class LlmAgent(LlmDecorator):
|
|
|
123
125
|
Text("The assistant requested tool calls", style="dim green italic")
|
|
124
126
|
)
|
|
125
127
|
|
|
128
|
+
case LlmStopReason.ERROR:
|
|
129
|
+
# Check if there's detailed error information in the error channel
|
|
130
|
+
if message.channels and FAST_AGENT_ERROR_CHANNEL in message.channels:
|
|
131
|
+
error_blocks = message.channels[FAST_AGENT_ERROR_CHANNEL]
|
|
132
|
+
if error_blocks:
|
|
133
|
+
# Extract text from the error block using the helper function
|
|
134
|
+
error_text = get_text(error_blocks[0])
|
|
135
|
+
if error_text:
|
|
136
|
+
additional_segments.append(
|
|
137
|
+
Text(f"\n\nError details: {error_text}", style="dim red italic")
|
|
138
|
+
)
|
|
139
|
+
else:
|
|
140
|
+
# Fallback if we couldn't extract text
|
|
141
|
+
additional_segments.append(
|
|
142
|
+
Text(f"\n\nError details: {str(error_blocks[0])}", style="dim red italic")
|
|
143
|
+
)
|
|
144
|
+
else:
|
|
145
|
+
# Fallback if no detailed error is available
|
|
146
|
+
additional_segments.append(
|
|
147
|
+
Text("\n\nAn error occurred during generation.", style="dim red italic")
|
|
148
|
+
)
|
|
149
|
+
|
|
126
150
|
case _:
|
|
127
151
|
if message.stop_reason:
|
|
128
152
|
additional_segments.append(
|
fast_agent/agents/mcp_agent.py
CHANGED
|
@@ -41,7 +41,7 @@ from fast_agent.constants import HUMAN_INPUT_TOOL_NAME
|
|
|
41
41
|
from fast_agent.core.exceptions import PromptExitError
|
|
42
42
|
from fast_agent.core.logging.logger import get_logger
|
|
43
43
|
from fast_agent.interfaces import FastAgentLLMProtocol
|
|
44
|
-
from fast_agent.mcp.mcp_aggregator import MCPAggregator
|
|
44
|
+
from fast_agent.mcp.mcp_aggregator import MCPAggregator, ServerStatus
|
|
45
45
|
from fast_agent.tools.elicitation import (
|
|
46
46
|
get_elicitation_tool,
|
|
47
47
|
run_elicitation_form,
|
|
@@ -163,6 +163,12 @@ class McpAgent(ABC, ToolAgent):
|
|
|
163
163
|
"""
|
|
164
164
|
await self._aggregator.close()
|
|
165
165
|
|
|
166
|
+
async def get_server_status(self) -> Dict[str, ServerStatus]:
|
|
167
|
+
"""Expose server status details for UI and diagnostics consumers."""
|
|
168
|
+
if not self._aggregator:
|
|
169
|
+
return {}
|
|
170
|
+
return await self._aggregator.collect_server_status()
|
|
171
|
+
|
|
166
172
|
@property
|
|
167
173
|
def initialized(self) -> bool:
|
|
168
174
|
"""Check if both the agent and aggregator are initialized."""
|
|
@@ -153,14 +153,6 @@ async def create_agents_by_type(
|
|
|
153
153
|
|
|
154
154
|
# Get all agents of the specified type
|
|
155
155
|
for name, agent_data in agents_dict.items():
|
|
156
|
-
logger.info(
|
|
157
|
-
f"Loaded {name}",
|
|
158
|
-
data={
|
|
159
|
-
"progress_action": ProgressAction.LOADED,
|
|
160
|
-
"agent_name": name,
|
|
161
|
-
},
|
|
162
|
-
)
|
|
163
|
-
|
|
164
156
|
# Compare type string from config with Enum value
|
|
165
157
|
if agent_data["type"] == agent_type.value:
|
|
166
158
|
# Get common configuration
|
|
@@ -187,6 +179,16 @@ async def create_agents_by_type(
|
|
|
187
179
|
)
|
|
188
180
|
result_agents[name] = agent
|
|
189
181
|
|
|
182
|
+
# Log successful agent creation
|
|
183
|
+
logger.info(
|
|
184
|
+
f"Loaded {name}",
|
|
185
|
+
data={
|
|
186
|
+
"progress_action": ProgressAction.LOADED,
|
|
187
|
+
"agent_name": name,
|
|
188
|
+
"target": name,
|
|
189
|
+
},
|
|
190
|
+
)
|
|
191
|
+
|
|
190
192
|
elif agent_type == AgentType.CUSTOM:
|
|
191
193
|
# Get the class to instantiate (support legacy 'agent_class' and new 'cls')
|
|
192
194
|
cls = agent_data.get("agent_class") or agent_data.get("cls")
|
|
@@ -212,6 +214,16 @@ async def create_agents_by_type(
|
|
|
212
214
|
)
|
|
213
215
|
result_agents[name] = agent
|
|
214
216
|
|
|
217
|
+
# Log successful agent creation
|
|
218
|
+
logger.info(
|
|
219
|
+
f"Loaded {name}",
|
|
220
|
+
data={
|
|
221
|
+
"progress_action": ProgressAction.LOADED,
|
|
222
|
+
"agent_name": name,
|
|
223
|
+
"target": name,
|
|
224
|
+
},
|
|
225
|
+
)
|
|
226
|
+
|
|
215
227
|
elif agent_type == AgentType.ORCHESTRATOR or agent_type == AgentType.ITERATIVE_PLANNER:
|
|
216
228
|
# Get base params configured with model settings
|
|
217
229
|
base_params = (
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import json
|
|
2
2
|
from typing import Any, List, Tuple, Type, Union, cast
|
|
3
3
|
|
|
4
|
-
from anthropic import AsyncAnthropic, AuthenticationError
|
|
4
|
+
from anthropic import APIError, AsyncAnthropic, AuthenticationError
|
|
5
5
|
from anthropic.lib.streaming import AsyncMessageStream
|
|
6
6
|
from anthropic.types import (
|
|
7
7
|
Message,
|
|
@@ -22,6 +22,7 @@ from mcp.types import (
|
|
|
22
22
|
TextContent,
|
|
23
23
|
)
|
|
24
24
|
|
|
25
|
+
from fast_agent.constants import FAST_AGENT_ERROR_CHANNEL
|
|
25
26
|
from fast_agent.core.exceptions import ProviderKeyError
|
|
26
27
|
from fast_agent.core.logging.logger import get_logger
|
|
27
28
|
from fast_agent.core.prompt import Prompt
|
|
@@ -36,6 +37,7 @@ from fast_agent.llm.provider.anthropic.multipart_converter_anthropic import (
|
|
|
36
37
|
)
|
|
37
38
|
from fast_agent.llm.provider_types import Provider
|
|
38
39
|
from fast_agent.llm.usage_tracking import TurnUsage
|
|
40
|
+
from fast_agent.mcp.helpers.content_helpers import text_content
|
|
39
41
|
from fast_agent.types import PromptMessageExtended
|
|
40
42
|
from fast_agent.types.llm_stop_reason import LlmStopReason
|
|
41
43
|
|
|
@@ -243,47 +245,102 @@ class AnthropicLLM(FastAgentLLM[MessageParam, Message]):
|
|
|
243
245
|
# Track estimated output tokens by counting text chunks
|
|
244
246
|
estimated_tokens = 0
|
|
245
247
|
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
248
|
+
try:
|
|
249
|
+
# Process the raw event stream to get token counts
|
|
250
|
+
async for event in stream:
|
|
251
|
+
# Count tokens in real-time from content_block_delta events
|
|
252
|
+
if (
|
|
253
|
+
event.type == "content_block_delta"
|
|
254
|
+
and hasattr(event, "delta")
|
|
255
|
+
and event.delta.type == "text_delta"
|
|
256
|
+
):
|
|
257
|
+
# Use base class method for token estimation and progress emission
|
|
258
|
+
estimated_tokens = self._update_streaming_progress(
|
|
259
|
+
event.delta.text, model, estimated_tokens
|
|
260
|
+
)
|
|
261
|
+
|
|
262
|
+
# Also check for final message_delta events with actual usage info
|
|
263
|
+
elif (
|
|
264
|
+
event.type == "message_delta"
|
|
265
|
+
and hasattr(event, "usage")
|
|
266
|
+
and event.usage.output_tokens
|
|
267
|
+
):
|
|
268
|
+
actual_tokens = event.usage.output_tokens
|
|
269
|
+
# Emit final progress with actual token count
|
|
270
|
+
token_str = str(actual_tokens).rjust(5)
|
|
271
|
+
data = {
|
|
272
|
+
"progress_action": ProgressAction.STREAMING,
|
|
273
|
+
"model": model,
|
|
274
|
+
"agent_name": self.name,
|
|
275
|
+
"chat_turn": self.chat_turn(),
|
|
276
|
+
"details": token_str.strip(),
|
|
277
|
+
}
|
|
278
|
+
logger.info("Streaming progress", data=data)
|
|
279
|
+
|
|
280
|
+
# Get the final message with complete usage data
|
|
281
|
+
message = await stream.get_final_message()
|
|
282
|
+
|
|
283
|
+
# Log final usage information
|
|
284
|
+
if hasattr(message, "usage") and message.usage:
|
|
285
|
+
logger.info(
|
|
286
|
+
f"Streaming complete - Model: {model}, Input tokens: {message.usage.input_tokens}, Output tokens: {message.usage.output_tokens}"
|
|
257
287
|
)
|
|
258
288
|
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
)
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
if
|
|
282
|
-
|
|
283
|
-
|
|
289
|
+
return message
|
|
290
|
+
except APIError as error:
|
|
291
|
+
logger.error("Streaming APIError during Anthropic completion", exc_info=error)
|
|
292
|
+
raise # Re-raise to be handled by _anthropic_completion
|
|
293
|
+
except Exception as error:
|
|
294
|
+
logger.error("Unexpected error during Anthropic stream processing", exc_info=error)
|
|
295
|
+
# Convert to APIError for consistent handling
|
|
296
|
+
raise APIError(f"Stream processing error: {str(error)}") from error
|
|
297
|
+
|
|
298
|
+
def _stream_failure_response(self, error: APIError, model_name: str) -> PromptMessageExtended:
|
|
299
|
+
"""Convert streaming API errors into a graceful assistant reply."""
|
|
300
|
+
|
|
301
|
+
provider_label = (
|
|
302
|
+
self.provider.value if isinstance(self.provider, Provider) else str(self.provider)
|
|
303
|
+
)
|
|
304
|
+
detail = getattr(error, "message", None) or str(error)
|
|
305
|
+
detail = detail.strip() if isinstance(detail, str) else ""
|
|
306
|
+
|
|
307
|
+
parts: list[str] = [f"{provider_label} request failed"]
|
|
308
|
+
if model_name:
|
|
309
|
+
parts.append(f"for model '{model_name}'")
|
|
310
|
+
code = getattr(error, "code", None)
|
|
311
|
+
if code:
|
|
312
|
+
parts.append(f"(code: {code})")
|
|
313
|
+
status = getattr(error, "status_code", None)
|
|
314
|
+
if status:
|
|
315
|
+
parts.append(f"(status={status})")
|
|
316
|
+
|
|
317
|
+
message = " ".join(parts)
|
|
318
|
+
if detail:
|
|
319
|
+
message = f"{message}: {detail}"
|
|
320
|
+
|
|
321
|
+
user_summary = " ".join(message.split()) if message else ""
|
|
322
|
+
if user_summary and len(user_summary) > 280:
|
|
323
|
+
user_summary = user_summary[:277].rstrip() + "..."
|
|
324
|
+
|
|
325
|
+
if user_summary:
|
|
326
|
+
assistant_text = f"I hit an internal error while calling the model: {user_summary}"
|
|
327
|
+
if not assistant_text.endswith((".", "!", "?")):
|
|
328
|
+
assistant_text += "."
|
|
329
|
+
assistant_text += " See fast-agent-error for additional details."
|
|
330
|
+
else:
|
|
331
|
+
assistant_text = (
|
|
332
|
+
"I hit an internal error while calling the model; see fast-agent-error for details."
|
|
284
333
|
)
|
|
285
334
|
|
|
286
|
-
|
|
335
|
+
assistant_block = text_content(assistant_text)
|
|
336
|
+
error_block = text_content(message)
|
|
337
|
+
|
|
338
|
+
return PromptMessageExtended(
|
|
339
|
+
role="assistant",
|
|
340
|
+
content=[assistant_block],
|
|
341
|
+
channels={FAST_AGENT_ERROR_CHANNEL: [error_block]},
|
|
342
|
+
stop_reason=LlmStopReason.ERROR,
|
|
343
|
+
)
|
|
287
344
|
|
|
288
345
|
async def _anthropic_completion(
|
|
289
346
|
self,
|
|
@@ -369,9 +426,13 @@ class AnthropicLLM(FastAgentLLM[MessageParam, Message]):
|
|
|
369
426
|
|
|
370
427
|
logger.debug(f"{arguments}")
|
|
371
428
|
# Use streaming API with helper
|
|
372
|
-
|
|
373
|
-
|
|
374
|
-
|
|
429
|
+
try:
|
|
430
|
+
async with anthropic.messages.stream(**arguments) as stream:
|
|
431
|
+
# Process the stream
|
|
432
|
+
response = await self._process_stream(stream, model)
|
|
433
|
+
except APIError as error:
|
|
434
|
+
logger.error("Streaming APIError during Anthropic completion", exc_info=error)
|
|
435
|
+
return self._stream_failure_response(error, model)
|
|
375
436
|
|
|
376
437
|
# Track usage if response is valid and has usage data
|
|
377
438
|
if (
|
|
@@ -393,27 +454,11 @@ class AnthropicLLM(FastAgentLLM[MessageParam, Message]):
|
|
|
393
454
|
"The configured Anthropic API key was rejected.\nPlease check that your API key is valid and not expired.",
|
|
394
455
|
) from response
|
|
395
456
|
elif isinstance(response, BaseException):
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
try:
|
|
402
|
-
error_json = response.response.json()
|
|
403
|
-
error_details = f"Error code: {response.status_code} - {error_json}"
|
|
404
|
-
except: # noqa: E722
|
|
405
|
-
error_details = f"Error code: {response.status_code} - {str(response)}"
|
|
406
|
-
|
|
407
|
-
# Convert other errors to text response
|
|
408
|
-
error_message = f"Error during generation: {error_details}"
|
|
409
|
-
response = Message(
|
|
410
|
-
id="error",
|
|
411
|
-
model="error",
|
|
412
|
-
role="assistant",
|
|
413
|
-
type="message",
|
|
414
|
-
content=[TextBlock(type="text", text=error_message)],
|
|
415
|
-
stop_reason="end_turn",
|
|
416
|
-
usage=Usage(input_tokens=0, output_tokens=0),
|
|
457
|
+
# This path shouldn't be reached anymore since we handle APIError above,
|
|
458
|
+
# but keeping for backward compatibility
|
|
459
|
+
logger.error(f"Unexpected error type: {type(response).__name__}", exc_info=response)
|
|
460
|
+
return self._stream_failure_response(
|
|
461
|
+
APIError(f"Unexpected error: {str(response)}"), model
|
|
417
462
|
)
|
|
418
463
|
|
|
419
464
|
logger.debug(
|
|
@@ -273,7 +273,7 @@ class AnthropicConverter:
|
|
|
273
273
|
title=title,
|
|
274
274
|
source=URLPDFSourceParam(type="url", url=uri_str),
|
|
275
275
|
)
|
|
276
|
-
elif
|
|
276
|
+
elif isinstance(resource_content, BlobResourceContents):
|
|
277
277
|
return DocumentBlockParam(
|
|
278
278
|
type="document",
|
|
279
279
|
title=title,
|
|
@@ -362,8 +362,9 @@ class AnthropicConverter:
|
|
|
362
362
|
Returns:
|
|
363
363
|
A TextBlockParam with formatted SVG content
|
|
364
364
|
"""
|
|
365
|
-
|
|
366
|
-
|
|
365
|
+
# Use get_text helper to extract text from various content types
|
|
366
|
+
svg_content = get_text(resource_content)
|
|
367
|
+
if svg_content:
|
|
367
368
|
return TextBlockParam(type="text", text=f"```xml\n{svg_content}\n```")
|
|
368
369
|
return TextBlockParam(type="text", text="[SVG content could not be extracted]")
|
|
369
370
|
|
|
@@ -142,7 +142,6 @@ class GoogleConverter:
|
|
|
142
142
|
assert isinstance(part_content, EmbeddedResource)
|
|
143
143
|
if (
|
|
144
144
|
"application/pdf" == part_content.resource.mimeType
|
|
145
|
-
and hasattr(part_content.resource, "blob")
|
|
146
145
|
and isinstance(part_content.resource, BlobResourceContents)
|
|
147
146
|
):
|
|
148
147
|
pdf_bytes = base64.b64decode(part_content.resource.blob)
|
|
@@ -154,31 +153,15 @@ class GoogleConverter:
|
|
|
154
153
|
)
|
|
155
154
|
else:
|
|
156
155
|
# Check if the resource itself has text content
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
resource_text = part_content.resource.text
|
|
160
|
-
# Example: if EmbeddedResource wraps a TextContent-like object in its 'resource' field
|
|
161
|
-
elif (
|
|
162
|
-
hasattr(part_content.resource, "type")
|
|
163
|
-
and part_content.resource.type == "text"
|
|
164
|
-
and hasattr(part_content.resource, "text")
|
|
165
|
-
):
|
|
166
|
-
resource_text = get_text(part_content.resource)
|
|
156
|
+
# Use get_text helper to extract text from various content types
|
|
157
|
+
resource_text = get_text(part_content.resource)
|
|
167
158
|
|
|
168
159
|
if resource_text is not None:
|
|
169
160
|
parts.append(types.Part.from_text(text=resource_text))
|
|
170
161
|
else:
|
|
171
162
|
# Fallback for other binary types or types without direct text
|
|
172
|
-
uri_str = (
|
|
173
|
-
|
|
174
|
-
if hasattr(part_content.resource, "uri")
|
|
175
|
-
else "unknown_uri"
|
|
176
|
-
)
|
|
177
|
-
mime_str = (
|
|
178
|
-
part_content.resource.mimeType
|
|
179
|
-
if hasattr(part_content.resource, "mimeType")
|
|
180
|
-
else "unknown_mime"
|
|
181
|
-
)
|
|
163
|
+
uri_str = getattr(part_content.resource, "uri", "unknown_uri")
|
|
164
|
+
mime_str = getattr(part_content.resource, "mimeType", "unknown_mime")
|
|
182
165
|
parts.append(
|
|
183
166
|
types.Part.from_text(
|
|
184
167
|
text=f"[Resource: {uri_str}, MIME: {mime_str}]"
|
|
@@ -291,30 +274,14 @@ class GoogleConverter:
|
|
|
291
274
|
textual_outputs.append(f"[Error processing PDF from tool result: {e}]")
|
|
292
275
|
else:
|
|
293
276
|
# Check if the resource itself has text content
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
resource_text = item.resource.text
|
|
297
|
-
# Example: if EmbeddedResource wraps a TextContent-like object in its 'resource' field
|
|
298
|
-
elif (
|
|
299
|
-
hasattr(item.resource, "type")
|
|
300
|
-
and item.resource.type == "text"
|
|
301
|
-
and hasattr(item.resource, "text")
|
|
302
|
-
):
|
|
303
|
-
resource_text = get_text(item.resource)
|
|
277
|
+
# Use get_text helper to extract text from various content types
|
|
278
|
+
resource_text = get_text(item.resource)
|
|
304
279
|
|
|
305
280
|
if resource_text is not None:
|
|
306
281
|
textual_outputs.append(resource_text)
|
|
307
282
|
else:
|
|
308
|
-
uri_str = (
|
|
309
|
-
|
|
310
|
-
if hasattr(item.resource, "uri")
|
|
311
|
-
else "unknown_uri"
|
|
312
|
-
)
|
|
313
|
-
mime_str = (
|
|
314
|
-
item.resource.mimeType
|
|
315
|
-
if hasattr(item.resource, "mimeType")
|
|
316
|
-
else "unknown_mime"
|
|
317
|
-
)
|
|
283
|
+
uri_str = getattr(item.resource, "uri", "unknown_uri")
|
|
284
|
+
mime_str = getattr(item.resource, "mimeType", "unknown_mime")
|
|
318
285
|
textual_outputs.append(
|
|
319
286
|
f"[Unhandled Resource in Tool: {uri_str}, MIME: {mime_str}]"
|
|
320
287
|
)
|
|
@@ -344,12 +344,12 @@ class OpenAILLM(FastAgentLLM[ChatCompletionMessageParam, ChatCompletionMessage])
|
|
|
344
344
|
model_name = self.default_request_params.model or DEFAULT_OPENAI_MODEL
|
|
345
345
|
|
|
346
346
|
# Use basic streaming API
|
|
347
|
-
stream = await self._openai_client().chat.completions.create(**arguments)
|
|
348
|
-
# Process the stream
|
|
349
347
|
try:
|
|
348
|
+
stream = await self._openai_client().chat.completions.create(**arguments)
|
|
349
|
+
# Process the stream
|
|
350
350
|
response = await self._process_stream(stream, model_name)
|
|
351
351
|
except APIError as error:
|
|
352
|
-
self.logger.error("
|
|
352
|
+
self.logger.error("APIError during OpenAI completion", exc_info=error)
|
|
353
353
|
return self._stream_failure_response(error, model_name)
|
|
354
354
|
# Track usage if response is valid and has usage data
|
|
355
355
|
if (
|
|
@@ -37,6 +37,7 @@ from fast_agent.mcp.sampling import sample
|
|
|
37
37
|
|
|
38
38
|
if TYPE_CHECKING:
|
|
39
39
|
from fast_agent.config import MCPServerSettings
|
|
40
|
+
from fast_agent.mcp.transport_tracking import TransportChannelMetrics
|
|
40
41
|
|
|
41
42
|
logger = get_logger(__name__)
|
|
42
43
|
|
|
@@ -90,6 +91,13 @@ class MCPAgentClientSession(ClientSession, ContextDependent):
|
|
|
90
91
|
custom_elicitation_handler = kwargs.pop("elicitation_handler", None)
|
|
91
92
|
# Extract optional context for ContextDependent mixin without passing it to ClientSession
|
|
92
93
|
self._context = kwargs.pop("context", None)
|
|
94
|
+
# Extract transport metrics tracker if provided
|
|
95
|
+
self._transport_metrics: TransportChannelMetrics | None = kwargs.pop(
|
|
96
|
+
"transport_metrics", None
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
# Track the effective elicitation mode for diagnostics
|
|
100
|
+
self.effective_elicitation_mode: str | None = "none"
|
|
93
101
|
|
|
94
102
|
version = version("fast-agent-mcp") or "dev"
|
|
95
103
|
fast_agent: Implementation = Implementation(name="fast-agent-mcp", version=version)
|
|
@@ -131,7 +139,7 @@ class MCPAgentClientSession(ClientSession, ContextDependent):
|
|
|
131
139
|
agent_config = AgentConfig(
|
|
132
140
|
name=self.agent_name or "unknown",
|
|
133
141
|
model=self.agent_model or "unknown",
|
|
134
|
-
elicitation_handler=None,
|
|
142
|
+
elicitation_handler=None,
|
|
135
143
|
)
|
|
136
144
|
elicitation_handler = resolve_elicitation_handler(
|
|
137
145
|
agent_config, context.config, self.server_config
|
|
@@ -141,12 +149,33 @@ class MCPAgentClientSession(ClientSession, ContextDependent):
|
|
|
141
149
|
pass
|
|
142
150
|
|
|
143
151
|
# Fallback to forms handler only if factory resolution wasn't attempted
|
|
144
|
-
# If factory was attempted and returned None, respect that (means no elicitation capability)
|
|
145
152
|
if elicitation_handler is None and not self.server_config:
|
|
146
153
|
from fast_agent.mcp.elicitation_handlers import forms_elicitation_handler
|
|
147
154
|
|
|
148
155
|
elicitation_handler = forms_elicitation_handler
|
|
149
156
|
|
|
157
|
+
# Determine effective elicitation mode for diagnostics
|
|
158
|
+
if self.server_config and getattr(self.server_config, "elicitation", None):
|
|
159
|
+
self.effective_elicitation_mode = self.server_config.elicitation.mode or "forms"
|
|
160
|
+
elif elicitation_handler is not None:
|
|
161
|
+
# Use global config if available to distinguish auto-cancel
|
|
162
|
+
try:
|
|
163
|
+
from fast_agent.context import get_current_context
|
|
164
|
+
|
|
165
|
+
context = get_current_context()
|
|
166
|
+
mode = None
|
|
167
|
+
if context and getattr(context, "config", None):
|
|
168
|
+
elicitation_cfg = getattr(context.config, "elicitation", None)
|
|
169
|
+
if isinstance(elicitation_cfg, dict):
|
|
170
|
+
mode = elicitation_cfg.get("mode")
|
|
171
|
+
else:
|
|
172
|
+
mode = getattr(elicitation_cfg, "mode", None)
|
|
173
|
+
self.effective_elicitation_mode = (mode or "forms").lower()
|
|
174
|
+
except Exception:
|
|
175
|
+
self.effective_elicitation_mode = "forms"
|
|
176
|
+
else:
|
|
177
|
+
self.effective_elicitation_mode = "none"
|
|
178
|
+
|
|
150
179
|
super().__init__(
|
|
151
180
|
*args,
|
|
152
181
|
**kwargs,
|
|
@@ -177,6 +206,7 @@ class MCPAgentClientSession(ClientSession, ContextDependent):
|
|
|
177
206
|
progress_callback: ProgressFnT | None = None,
|
|
178
207
|
) -> ReceiveResultT:
|
|
179
208
|
logger.debug("send_request: request=", data=request.model_dump())
|
|
209
|
+
request_id = getattr(self, "_request_id", None)
|
|
180
210
|
try:
|
|
181
211
|
result = await super().send_request(
|
|
182
212
|
request=request,
|
|
@@ -189,6 +219,7 @@ class MCPAgentClientSession(ClientSession, ContextDependent):
|
|
|
189
219
|
"send_request: response=",
|
|
190
220
|
data=result.model_dump() if result is not None else "no response returned",
|
|
191
221
|
)
|
|
222
|
+
self._attach_transport_channel(request_id, result)
|
|
192
223
|
return result
|
|
193
224
|
except Exception as e:
|
|
194
225
|
# Handle connection errors cleanly
|
|
@@ -207,6 +238,18 @@ class MCPAgentClientSession(ClientSession, ContextDependent):
|
|
|
207
238
|
logger.error(f"send_request failed: {str(e)}")
|
|
208
239
|
raise
|
|
209
240
|
|
|
241
|
+
def _attach_transport_channel(self, request_id, result) -> None:
|
|
242
|
+
if self._transport_metrics is None or request_id is None or result is None:
|
|
243
|
+
return
|
|
244
|
+
channel = self._transport_metrics.consume_response_channel(request_id)
|
|
245
|
+
if not channel:
|
|
246
|
+
return
|
|
247
|
+
try:
|
|
248
|
+
setattr(result, "transport_channel", channel)
|
|
249
|
+
except Exception:
|
|
250
|
+
# If result cannot be mutated, ignore silently
|
|
251
|
+
pass
|
|
252
|
+
|
|
210
253
|
async def _received_notification(self, notification: ServerNotification) -> None:
|
|
211
254
|
"""
|
|
212
255
|
Can be overridden by subclasses to handle a notification without needing
|