fast-agent-mcp 0.2.57__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fast-agent-mcp might be problematic. Click here for more details.
- fast_agent/__init__.py +127 -0
- fast_agent/agents/__init__.py +36 -0
- {mcp_agent/core → fast_agent/agents}/agent_types.py +2 -1
- fast_agent/agents/llm_agent.py +217 -0
- fast_agent/agents/llm_decorator.py +486 -0
- mcp_agent/agents/base_agent.py → fast_agent/agents/mcp_agent.py +377 -385
- fast_agent/agents/tool_agent.py +168 -0
- {mcp_agent → fast_agent}/agents/workflow/chain_agent.py +43 -33
- {mcp_agent → fast_agent}/agents/workflow/evaluator_optimizer.py +31 -35
- {mcp_agent → fast_agent}/agents/workflow/iterative_planner.py +56 -47
- {mcp_agent → fast_agent}/agents/workflow/orchestrator_models.py +4 -4
- {mcp_agent → fast_agent}/agents/workflow/parallel_agent.py +34 -41
- {mcp_agent → fast_agent}/agents/workflow/router_agent.py +54 -39
- {mcp_agent → fast_agent}/cli/__main__.py +5 -3
- {mcp_agent → fast_agent}/cli/commands/check_config.py +95 -66
- {mcp_agent → fast_agent}/cli/commands/go.py +20 -11
- {mcp_agent → fast_agent}/cli/commands/quickstart.py +4 -4
- {mcp_agent → fast_agent}/cli/commands/server_helpers.py +1 -1
- {mcp_agent → fast_agent}/cli/commands/setup.py +64 -134
- {mcp_agent → fast_agent}/cli/commands/url_parser.py +9 -8
- {mcp_agent → fast_agent}/cli/main.py +36 -16
- {mcp_agent → fast_agent}/cli/terminal.py +2 -2
- {mcp_agent → fast_agent}/config.py +13 -2
- fast_agent/constants.py +8 -0
- {mcp_agent → fast_agent}/context.py +24 -19
- {mcp_agent → fast_agent}/context_dependent.py +9 -5
- fast_agent/core/__init__.py +17 -0
- {mcp_agent → fast_agent}/core/agent_app.py +39 -36
- fast_agent/core/core_app.py +135 -0
- {mcp_agent → fast_agent}/core/direct_decorators.py +12 -26
- {mcp_agent → fast_agent}/core/direct_factory.py +95 -73
- {mcp_agent → fast_agent/core}/executor/executor.py +4 -5
- {mcp_agent → fast_agent}/core/fastagent.py +32 -32
- fast_agent/core/logging/__init__.py +5 -0
- {mcp_agent → fast_agent/core}/logging/events.py +3 -3
- {mcp_agent → fast_agent/core}/logging/json_serializer.py +1 -1
- {mcp_agent → fast_agent/core}/logging/listeners.py +85 -7
- {mcp_agent → fast_agent/core}/logging/logger.py +7 -7
- {mcp_agent → fast_agent/core}/logging/transport.py +10 -11
- fast_agent/core/prompt.py +9 -0
- {mcp_agent → fast_agent}/core/validation.py +4 -4
- fast_agent/event_progress.py +61 -0
- fast_agent/history/history_exporter.py +44 -0
- {mcp_agent → fast_agent}/human_input/__init__.py +9 -12
- {mcp_agent → fast_agent}/human_input/elicitation_handler.py +26 -8
- {mcp_agent → fast_agent}/human_input/elicitation_state.py +7 -7
- {mcp_agent → fast_agent}/human_input/simple_form.py +6 -4
- {mcp_agent → fast_agent}/human_input/types.py +1 -18
- fast_agent/interfaces.py +228 -0
- fast_agent/llm/__init__.py +9 -0
- mcp_agent/llm/augmented_llm.py → fast_agent/llm/fastagent_llm.py +128 -218
- fast_agent/llm/internal/passthrough.py +137 -0
- mcp_agent/llm/augmented_llm_playback.py → fast_agent/llm/internal/playback.py +29 -25
- mcp_agent/llm/augmented_llm_silent.py → fast_agent/llm/internal/silent.py +10 -17
- fast_agent/llm/internal/slow.py +38 -0
- {mcp_agent → fast_agent}/llm/memory.py +40 -30
- {mcp_agent → fast_agent}/llm/model_database.py +35 -2
- {mcp_agent → fast_agent}/llm/model_factory.py +103 -77
- fast_agent/llm/model_info.py +126 -0
- {mcp_agent/llm/providers → fast_agent/llm/provider/anthropic}/anthropic_utils.py +7 -7
- fast_agent/llm/provider/anthropic/llm_anthropic.py +603 -0
- {mcp_agent/llm/providers → fast_agent/llm/provider/anthropic}/multipart_converter_anthropic.py +79 -86
- fast_agent/llm/provider/bedrock/bedrock_utils.py +218 -0
- fast_agent/llm/provider/bedrock/llm_bedrock.py +2192 -0
- {mcp_agent/llm/providers → fast_agent/llm/provider/google}/google_converter.py +66 -14
- fast_agent/llm/provider/google/llm_google_native.py +431 -0
- mcp_agent/llm/providers/augmented_llm_aliyun.py → fast_agent/llm/provider/openai/llm_aliyun.py +6 -7
- mcp_agent/llm/providers/augmented_llm_azure.py → fast_agent/llm/provider/openai/llm_azure.py +4 -4
- mcp_agent/llm/providers/augmented_llm_deepseek.py → fast_agent/llm/provider/openai/llm_deepseek.py +10 -11
- mcp_agent/llm/providers/augmented_llm_generic.py → fast_agent/llm/provider/openai/llm_generic.py +4 -4
- mcp_agent/llm/providers/augmented_llm_google_oai.py → fast_agent/llm/provider/openai/llm_google_oai.py +4 -4
- mcp_agent/llm/providers/augmented_llm_groq.py → fast_agent/llm/provider/openai/llm_groq.py +14 -16
- mcp_agent/llm/providers/augmented_llm_openai.py → fast_agent/llm/provider/openai/llm_openai.py +133 -206
- mcp_agent/llm/providers/augmented_llm_openrouter.py → fast_agent/llm/provider/openai/llm_openrouter.py +6 -6
- mcp_agent/llm/providers/augmented_llm_tensorzero_openai.py → fast_agent/llm/provider/openai/llm_tensorzero_openai.py +17 -16
- mcp_agent/llm/providers/augmented_llm_xai.py → fast_agent/llm/provider/openai/llm_xai.py +6 -6
- {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/multipart_converter_openai.py +125 -63
- {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/openai_multipart.py +12 -12
- {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/openai_utils.py +18 -16
- {mcp_agent → fast_agent}/llm/provider_key_manager.py +2 -2
- {mcp_agent → fast_agent}/llm/provider_types.py +2 -0
- {mcp_agent → fast_agent}/llm/sampling_converter.py +15 -12
- {mcp_agent → fast_agent}/llm/usage_tracking.py +23 -5
- fast_agent/mcp/__init__.py +43 -0
- {mcp_agent → fast_agent}/mcp/elicitation_factory.py +3 -3
- {mcp_agent → fast_agent}/mcp/elicitation_handlers.py +19 -10
- {mcp_agent → fast_agent}/mcp/gen_client.py +3 -3
- fast_agent/mcp/helpers/__init__.py +36 -0
- fast_agent/mcp/helpers/content_helpers.py +183 -0
- {mcp_agent → fast_agent}/mcp/helpers/server_config_helpers.py +8 -8
- {mcp_agent → fast_agent}/mcp/hf_auth.py +25 -23
- fast_agent/mcp/interfaces.py +93 -0
- {mcp_agent → fast_agent}/mcp/logger_textio.py +4 -4
- {mcp_agent → fast_agent}/mcp/mcp_agent_client_session.py +49 -44
- {mcp_agent → fast_agent}/mcp/mcp_aggregator.py +66 -115
- {mcp_agent → fast_agent}/mcp/mcp_connection_manager.py +16 -23
- {mcp_agent/core → fast_agent/mcp}/mcp_content.py +23 -15
- {mcp_agent → fast_agent}/mcp/mime_utils.py +39 -0
- fast_agent/mcp/prompt.py +159 -0
- mcp_agent/mcp/prompt_message_multipart.py → fast_agent/mcp/prompt_message_extended.py +27 -20
- {mcp_agent → fast_agent}/mcp/prompt_render.py +21 -19
- {mcp_agent → fast_agent}/mcp/prompt_serialization.py +46 -46
- fast_agent/mcp/prompts/__main__.py +7 -0
- {mcp_agent → fast_agent}/mcp/prompts/prompt_helpers.py +31 -30
- {mcp_agent → fast_agent}/mcp/prompts/prompt_load.py +8 -8
- {mcp_agent → fast_agent}/mcp/prompts/prompt_server.py +11 -19
- {mcp_agent → fast_agent}/mcp/prompts/prompt_template.py +18 -18
- {mcp_agent → fast_agent}/mcp/resource_utils.py +1 -1
- {mcp_agent → fast_agent}/mcp/sampling.py +31 -26
- {mcp_agent/mcp_server → fast_agent/mcp/server}/__init__.py +1 -1
- {mcp_agent/mcp_server → fast_agent/mcp/server}/agent_server.py +5 -6
- fast_agent/mcp/ui_agent.py +48 -0
- fast_agent/mcp/ui_mixin.py +209 -0
- fast_agent/mcp_server_registry.py +90 -0
- {mcp_agent → fast_agent}/resources/examples/data-analysis/analysis-campaign.py +5 -4
- {mcp_agent → fast_agent}/resources/examples/data-analysis/analysis.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_forms_server.py +25 -3
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/forms_demo.py +3 -3
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/game_character.py +2 -2
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/game_character_handler.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/tool_call.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/agent_one.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/agent_two.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/researcher/researcher-eval.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/researcher/researcher-imp.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/researcher/researcher.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/tensorzero/agent.py +2 -2
- {mcp_agent → fast_agent}/resources/examples/tensorzero/image_demo.py +3 -3
- {mcp_agent → fast_agent}/resources/examples/tensorzero/simple_agent.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/workflows/chaining.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/workflows/evaluator.py +3 -3
- {mcp_agent → fast_agent}/resources/examples/workflows/human_input.py +5 -3
- {mcp_agent → fast_agent}/resources/examples/workflows/orchestrator.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/workflows/parallel.py +2 -2
- {mcp_agent → fast_agent}/resources/examples/workflows/router.py +5 -2
- fast_agent/resources/setup/.gitignore +24 -0
- fast_agent/resources/setup/agent.py +18 -0
- fast_agent/resources/setup/fastagent.config.yaml +44 -0
- fast_agent/resources/setup/fastagent.secrets.yaml.example +38 -0
- fast_agent/tools/elicitation.py +369 -0
- fast_agent/types/__init__.py +32 -0
- fast_agent/types/llm_stop_reason.py +77 -0
- fast_agent/ui/__init__.py +38 -0
- fast_agent/ui/console_display.py +1005 -0
- {mcp_agent/human_input → fast_agent/ui}/elicitation_form.py +56 -39
- mcp_agent/human_input/elicitation_forms.py → fast_agent/ui/elicitation_style.py +1 -1
- {mcp_agent/core → fast_agent/ui}/enhanced_prompt.py +96 -25
- {mcp_agent/core → fast_agent/ui}/interactive_prompt.py +330 -125
- fast_agent/ui/mcp_ui_utils.py +224 -0
- {mcp_agent → fast_agent/ui}/progress_display.py +2 -2
- {mcp_agent/logging → fast_agent/ui}/rich_progress.py +4 -4
- {mcp_agent/core → fast_agent/ui}/usage_display.py +3 -8
- {fast_agent_mcp-0.2.57.dist-info → fast_agent_mcp-0.3.0.dist-info}/METADATA +7 -7
- fast_agent_mcp-0.3.0.dist-info/RECORD +202 -0
- fast_agent_mcp-0.3.0.dist-info/entry_points.txt +5 -0
- fast_agent_mcp-0.2.57.dist-info/RECORD +0 -192
- fast_agent_mcp-0.2.57.dist-info/entry_points.txt +0 -6
- mcp_agent/__init__.py +0 -114
- mcp_agent/agents/agent.py +0 -92
- mcp_agent/agents/workflow/__init__.py +0 -1
- mcp_agent/agents/workflow/orchestrator_agent.py +0 -597
- mcp_agent/app.py +0 -175
- mcp_agent/core/__init__.py +0 -26
- mcp_agent/core/prompt.py +0 -191
- mcp_agent/event_progress.py +0 -134
- mcp_agent/human_input/handler.py +0 -81
- mcp_agent/llm/__init__.py +0 -2
- mcp_agent/llm/augmented_llm_passthrough.py +0 -232
- mcp_agent/llm/augmented_llm_slow.py +0 -53
- mcp_agent/llm/providers/__init__.py +0 -8
- mcp_agent/llm/providers/augmented_llm_anthropic.py +0 -717
- mcp_agent/llm/providers/augmented_llm_bedrock.py +0 -1788
- mcp_agent/llm/providers/augmented_llm_google_native.py +0 -495
- mcp_agent/llm/providers/sampling_converter_anthropic.py +0 -57
- mcp_agent/llm/providers/sampling_converter_openai.py +0 -26
- mcp_agent/llm/sampling_format_converter.py +0 -37
- mcp_agent/logging/__init__.py +0 -0
- mcp_agent/mcp/__init__.py +0 -50
- mcp_agent/mcp/helpers/__init__.py +0 -25
- mcp_agent/mcp/helpers/content_helpers.py +0 -187
- mcp_agent/mcp/interfaces.py +0 -266
- mcp_agent/mcp/prompts/__init__.py +0 -0
- mcp_agent/mcp/prompts/__main__.py +0 -10
- mcp_agent/mcp_server_registry.py +0 -343
- mcp_agent/tools/tool_definition.py +0 -14
- mcp_agent/ui/console_display.py +0 -790
- mcp_agent/ui/console_display_legacy.py +0 -401
- {mcp_agent → fast_agent}/agents/workflow/orchestrator_prompts.py +0 -0
- {mcp_agent/agents → fast_agent/cli}/__init__.py +0 -0
- {mcp_agent → fast_agent}/cli/constants.py +0 -0
- {mcp_agent → fast_agent}/core/error_handling.py +0 -0
- {mcp_agent → fast_agent}/core/exceptions.py +0 -0
- {mcp_agent/cli → fast_agent/core/executor}/__init__.py +0 -0
- {mcp_agent → fast_agent/core}/executor/task_registry.py +0 -0
- {mcp_agent → fast_agent/core}/executor/workflow_signal.py +0 -0
- {mcp_agent → fast_agent}/human_input/form_fields.py +0 -0
- {mcp_agent → fast_agent}/llm/prompt_utils.py +0 -0
- {mcp_agent/core → fast_agent/llm}/request_params.py +0 -0
- {mcp_agent → fast_agent}/mcp/common.py +0 -0
- {mcp_agent/executor → fast_agent/mcp/prompts}/__init__.py +0 -0
- {mcp_agent → fast_agent}/mcp/prompts/prompt_constants.py +0 -0
- {mcp_agent → fast_agent}/py.typed +0 -0
- {mcp_agent → fast_agent}/resources/examples/data-analysis/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_account_server.py +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_game_server.py +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/fastagent.secrets.yaml.example +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example +0 -0
- {mcp_agent → fast_agent}/resources/examples/researcher/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/.env.sample +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/Makefile +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/README.md +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/clam.jpg +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/crab.png +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/shrimp.png +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/docker-compose.yml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/Dockerfile +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/entrypoint.sh +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/mcp_server.py +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/pyproject.toml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/system_schema.json +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/system_template.minijinja +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/tensorzero.toml +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/graded_report.md +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/short_story.md +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/short_story.txt +0 -0
- {mcp_agent → fast_agent/ui}/console.py +0 -0
- {mcp_agent/core → fast_agent/ui}/mermaid_utils.py +0 -0
- {fast_agent_mcp-0.2.57.dist-info → fast_agent_mcp-0.3.0.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.2.57.dist-info → fast_agent_mcp-0.3.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
import json # Import at the module level
|
|
2
|
+
from typing import Any, Dict, List, Optional
|
|
3
|
+
|
|
4
|
+
from mcp import CallToolRequest, Tool
|
|
5
|
+
from mcp.types import CallToolRequestParams, PromptMessage
|
|
6
|
+
|
|
7
|
+
from fast_agent.core.logging.logger import get_logger
|
|
8
|
+
from fast_agent.core.prompt import Prompt
|
|
9
|
+
from fast_agent.llm.fastagent_llm import (
|
|
10
|
+
FastAgentLLM,
|
|
11
|
+
RequestParams,
|
|
12
|
+
)
|
|
13
|
+
from fast_agent.llm.provider_types import Provider
|
|
14
|
+
from fast_agent.llm.usage_tracking import create_turn_usage_from_messages
|
|
15
|
+
from fast_agent.mcp.helpers.content_helpers import get_text
|
|
16
|
+
from fast_agent.types import PromptMessageExtended
|
|
17
|
+
from fast_agent.types.llm_stop_reason import LlmStopReason
|
|
18
|
+
|
|
19
|
+
CALL_TOOL_INDICATOR = "***CALL_TOOL"
|
|
20
|
+
FIXED_RESPONSE_INDICATOR = "***FIXED_RESPONSE"
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class PassthroughLLM(FastAgentLLM):
|
|
24
|
+
"""
|
|
25
|
+
A specialized LLM implementation that simply passes through input messages without modification.
|
|
26
|
+
|
|
27
|
+
This is useful for cases where you need an object with the AugmentedLLM interface
|
|
28
|
+
but want to preserve the original message without any processing, such as in a
|
|
29
|
+
parallel workflow where no fan-in aggregation is needed.
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
def __init__(
|
|
33
|
+
self, provider=Provider.FAST_AGENT, name: str = "Passthrough", **kwargs: dict[str, Any]
|
|
34
|
+
) -> None:
|
|
35
|
+
super().__init__(name=name, provider=provider, **kwargs)
|
|
36
|
+
self.logger = get_logger(__name__)
|
|
37
|
+
self._messages = [PromptMessage]
|
|
38
|
+
self._fixed_response: str | None = None
|
|
39
|
+
self._correlation_id: int = 0
|
|
40
|
+
|
|
41
|
+
async def initialize(self) -> None:
|
|
42
|
+
pass
|
|
43
|
+
|
|
44
|
+
def _parse_tool_command(self, command: str) -> tuple[str, Optional[dict]]:
|
|
45
|
+
"""
|
|
46
|
+
Parse a tool command string into tool name and arguments.
|
|
47
|
+
|
|
48
|
+
Args:
|
|
49
|
+
command: The command string in format "***CALL_TOOL <tool_name> [arguments_json]"
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
Tuple of (tool_name, arguments_dict)
|
|
53
|
+
|
|
54
|
+
Raises:
|
|
55
|
+
ValueError: If command format is invalid
|
|
56
|
+
"""
|
|
57
|
+
parts = command.split(" ", 2)
|
|
58
|
+
if len(parts) < 2:
|
|
59
|
+
raise ValueError("Invalid format. Expected '***CALL_TOOL <tool_name> [arguments_json]'")
|
|
60
|
+
|
|
61
|
+
tool_name = parts[1].strip()
|
|
62
|
+
arguments = None
|
|
63
|
+
|
|
64
|
+
if len(parts) > 2:
|
|
65
|
+
try:
|
|
66
|
+
arguments = json.loads(parts[2])
|
|
67
|
+
except json.JSONDecodeError:
|
|
68
|
+
raise ValueError(f"Invalid JSON arguments: {parts[2]}")
|
|
69
|
+
|
|
70
|
+
self.logger.info(f"Calling tool {tool_name} with arguments {arguments}")
|
|
71
|
+
return tool_name, arguments
|
|
72
|
+
|
|
73
|
+
async def _apply_prompt_provider_specific(
|
|
74
|
+
self,
|
|
75
|
+
multipart_messages: List["PromptMessageExtended"],
|
|
76
|
+
request_params: RequestParams | None = None,
|
|
77
|
+
tools: list[Tool] | None = None,
|
|
78
|
+
is_template: bool = False,
|
|
79
|
+
) -> PromptMessageExtended:
|
|
80
|
+
# Add messages to history with proper is_prompt flag
|
|
81
|
+
self.history.extend(multipart_messages, is_prompt=is_template)
|
|
82
|
+
|
|
83
|
+
last_message = multipart_messages[-1]
|
|
84
|
+
tool_calls: Dict[str, CallToolRequest] = {}
|
|
85
|
+
stop_reason: LlmStopReason = LlmStopReason.END_TURN
|
|
86
|
+
if self.is_tool_call(last_message):
|
|
87
|
+
tool_name, arguments = self._parse_tool_command(last_message.first_text())
|
|
88
|
+
tool_calls["correlationId" + str(self._correlation_id)] = CallToolRequest(
|
|
89
|
+
method="tools/call",
|
|
90
|
+
params=CallToolRequestParams(name=tool_name, arguments=arguments),
|
|
91
|
+
)
|
|
92
|
+
self._correlation_id += 1
|
|
93
|
+
stop_reason = LlmStopReason.TOOL_USE
|
|
94
|
+
|
|
95
|
+
if last_message.first_text().startswith(FIXED_RESPONSE_INDICATOR):
|
|
96
|
+
self._fixed_response = (
|
|
97
|
+
last_message.first_text().split(FIXED_RESPONSE_INDICATOR, 1)[1].strip()
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
if len(last_message.tool_results or {}) > 0:
|
|
101
|
+
assert last_message.tool_results
|
|
102
|
+
concatenated_content = " ".join(
|
|
103
|
+
[
|
|
104
|
+
(get_text(tool_result.content[0]) or "<empty>")
|
|
105
|
+
for tool_result in last_message.tool_results.values()
|
|
106
|
+
]
|
|
107
|
+
)
|
|
108
|
+
result = Prompt.assistant(concatenated_content, stop_reason=stop_reason)
|
|
109
|
+
|
|
110
|
+
elif self._fixed_response:
|
|
111
|
+
result = Prompt.assistant(
|
|
112
|
+
self._fixed_response, tool_calls=tool_calls, stop_reason=stop_reason
|
|
113
|
+
)
|
|
114
|
+
else:
|
|
115
|
+
concatenated_content = "\n".join(
|
|
116
|
+
[message.all_text() for message in multipart_messages if "user" == message.role]
|
|
117
|
+
)
|
|
118
|
+
result = Prompt.assistant(
|
|
119
|
+
concatenated_content,
|
|
120
|
+
tool_calls=tool_calls,
|
|
121
|
+
stop_reason=stop_reason,
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
turn_usage = create_turn_usage_from_messages(
|
|
125
|
+
input_content=multipart_messages[-1].all_text(),
|
|
126
|
+
output_content=result.all_text(),
|
|
127
|
+
model="passthrough",
|
|
128
|
+
model_type="passthrough",
|
|
129
|
+
tool_calls=len(tool_calls),
|
|
130
|
+
delay_seconds=0.0,
|
|
131
|
+
)
|
|
132
|
+
self.usage_accumulator.add_turn(turn_usage)
|
|
133
|
+
|
|
134
|
+
return result
|
|
135
|
+
|
|
136
|
+
def is_tool_call(self, message: PromptMessageExtended) -> bool:
|
|
137
|
+
return message.first_text().startswith(CALL_TOOL_INDICATOR)
|
|
@@ -1,16 +1,19 @@
|
|
|
1
1
|
from typing import Any, List, Type, Union
|
|
2
2
|
|
|
3
|
+
from mcp import Tool
|
|
3
4
|
from mcp.types import PromptMessage
|
|
4
5
|
|
|
5
|
-
from
|
|
6
|
-
from
|
|
7
|
-
from
|
|
8
|
-
from
|
|
9
|
-
from
|
|
10
|
-
from
|
|
11
|
-
from
|
|
12
|
-
from
|
|
13
|
-
from
|
|
6
|
+
from fast_agent.core.exceptions import ModelConfigError
|
|
7
|
+
from fast_agent.core.prompt import Prompt
|
|
8
|
+
from fast_agent.interfaces import ModelT
|
|
9
|
+
from fast_agent.llm.internal.passthrough import PassthroughLLM
|
|
10
|
+
from fast_agent.llm.provider_types import Provider
|
|
11
|
+
from fast_agent.llm.usage_tracking import create_turn_usage_from_messages
|
|
12
|
+
from fast_agent.mcp.helpers.content_helpers import normalize_to_extended_list
|
|
13
|
+
from fast_agent.mcp.prompts.prompt_helpers import MessageContent
|
|
14
|
+
from fast_agent.types import PromptMessageExtended, RequestParams
|
|
15
|
+
|
|
16
|
+
# TODO -- support tool usage/replay
|
|
14
17
|
|
|
15
18
|
|
|
16
19
|
class PlaybackLLM(PassthroughLLM):
|
|
@@ -28,11 +31,11 @@ class PlaybackLLM(PassthroughLLM):
|
|
|
28
31
|
|
|
29
32
|
def __init__(self, name: str = "Playback", **kwargs: dict[str, Any]) -> None:
|
|
30
33
|
super().__init__(name=name, provider=Provider.FAST_AGENT, **kwargs)
|
|
31
|
-
self._messages: List[
|
|
34
|
+
self._messages: List[PromptMessageExtended] = []
|
|
32
35
|
self._current_index = -1
|
|
33
36
|
self._overage = -1
|
|
34
37
|
|
|
35
|
-
def _get_next_assistant_message(self) ->
|
|
38
|
+
def _get_next_assistant_message(self) -> PromptMessageExtended:
|
|
36
39
|
"""
|
|
37
40
|
Get the next assistant message from the loaded messages.
|
|
38
41
|
Increments the current message index and skips user messages.
|
|
@@ -53,14 +56,23 @@ class PlaybackLLM(PassthroughLLM):
|
|
|
53
56
|
|
|
54
57
|
async def generate(
|
|
55
58
|
self,
|
|
56
|
-
|
|
59
|
+
messages: Union[
|
|
60
|
+
str,
|
|
61
|
+
PromptMessage,
|
|
62
|
+
PromptMessageExtended,
|
|
63
|
+
List[Union[str, PromptMessage, PromptMessageExtended]],
|
|
64
|
+
],
|
|
57
65
|
request_params: RequestParams | None = None,
|
|
58
|
-
|
|
66
|
+
tools: List[Tool] | None = None,
|
|
67
|
+
) -> PromptMessageExtended:
|
|
59
68
|
"""
|
|
60
69
|
Handle playback of messages in two modes:
|
|
61
70
|
1. First call: store messages for playback and return "HISTORY LOADED"
|
|
62
71
|
2. Subsequent calls: return the next assistant message
|
|
63
72
|
"""
|
|
73
|
+
# Normalize all input types to a list of PromptMessageExtended
|
|
74
|
+
multipart_messages = normalize_to_extended_list(messages)
|
|
75
|
+
|
|
64
76
|
# If this is the first call (initialization) or we're loading a prompt template
|
|
65
77
|
# with multiple messages (comes from apply_prompt)
|
|
66
78
|
if -1 == self._current_index:
|
|
@@ -72,24 +84,16 @@ class PlaybackLLM(PassthroughLLM):
|
|
|
72
84
|
# Reset the index to the beginning for proper playback
|
|
73
85
|
self._current_index = 0
|
|
74
86
|
|
|
75
|
-
await self.show_assistant_message(
|
|
76
|
-
message_text=f"HISTORY LOADED ({len(self._messages)} messages)",
|
|
77
|
-
title="ASSISTANT/PLAYBACK",
|
|
78
|
-
)
|
|
79
|
-
|
|
80
87
|
# In PlaybackLLM, we always return "HISTORY LOADED" on initialization,
|
|
81
88
|
# regardless of the prompt content. The next call will return messages.
|
|
82
|
-
return Prompt.assistant("HISTORY LOADED")
|
|
89
|
+
return Prompt.assistant(f"HISTORY LOADED ({len(self._messages)}) messages")
|
|
83
90
|
|
|
84
91
|
response = self._get_next_assistant_message()
|
|
85
|
-
await self.show_assistant_message(
|
|
86
|
-
message_text=MessageContent.get_first_text(response), title="ASSISTANT/PLAYBACK"
|
|
87
|
-
)
|
|
88
92
|
|
|
89
93
|
# Track usage for this playback "turn"
|
|
90
94
|
try:
|
|
91
95
|
input_content = str(multipart_messages) if multipart_messages else ""
|
|
92
|
-
output_content = MessageContent.get_first_text(response)
|
|
96
|
+
output_content = MessageContent.get_first_text(response) or ""
|
|
93
97
|
|
|
94
98
|
turn_usage = create_turn_usage_from_messages(
|
|
95
99
|
input_content=input_content,
|
|
@@ -108,10 +112,10 @@ class PlaybackLLM(PassthroughLLM):
|
|
|
108
112
|
|
|
109
113
|
async def structured(
|
|
110
114
|
self,
|
|
111
|
-
|
|
115
|
+
messages: List[PromptMessageExtended],
|
|
112
116
|
model: Type[ModelT],
|
|
113
117
|
request_params: RequestParams | None = None,
|
|
114
|
-
) -> tuple[ModelT | None,
|
|
118
|
+
) -> tuple[ModelT | None, PromptMessageExtended]:
|
|
115
119
|
"""
|
|
116
120
|
Handle structured requests by returning the next assistant message.
|
|
117
121
|
"""
|
|
@@ -2,47 +2,40 @@
|
|
|
2
2
|
|
|
3
3
|
from typing import Any
|
|
4
4
|
|
|
5
|
-
from
|
|
6
|
-
from
|
|
7
|
-
from
|
|
5
|
+
from fast_agent.llm.internal.passthrough import PassthroughLLM
|
|
6
|
+
from fast_agent.llm.provider_types import Provider
|
|
7
|
+
from fast_agent.llm.usage_tracking import TurnUsage, UsageAccumulator
|
|
8
8
|
|
|
9
9
|
|
|
10
10
|
class ZeroUsageAccumulator(UsageAccumulator):
|
|
11
11
|
"""Usage accumulator that always reports zero usage."""
|
|
12
|
-
|
|
12
|
+
|
|
13
13
|
def add_turn(self, turn: TurnUsage) -> None:
|
|
14
14
|
"""Override to do nothing - no usage accumulation."""
|
|
15
15
|
pass
|
|
16
16
|
|
|
17
17
|
|
|
18
|
+
# TODO -- this won't work anymore
|
|
18
19
|
class SilentLLM(PassthroughLLM):
|
|
19
20
|
"""
|
|
20
21
|
A specialized LLM that processes messages like PassthroughLLM but suppresses all display output.
|
|
21
|
-
|
|
22
|
+
|
|
22
23
|
This is particularly useful for parallel agent workflows where the fan-in agent
|
|
23
24
|
should aggregate results without polluting the console with intermediate output.
|
|
24
25
|
Token counting is disabled - the model always reports zero usage.
|
|
25
26
|
"""
|
|
26
|
-
|
|
27
|
+
|
|
27
28
|
def __init__(
|
|
28
29
|
self, provider=Provider.FAST_AGENT, name: str = "Silent", **kwargs: dict[str, Any]
|
|
29
30
|
) -> None:
|
|
30
31
|
super().__init__(name=name, provider=provider, **kwargs)
|
|
31
32
|
# Override with zero usage accumulator - silent model reports no usage
|
|
32
33
|
self.usage_accumulator = ZeroUsageAccumulator()
|
|
33
|
-
|
|
34
|
-
def show_user_message(self, message: Any, **kwargs) -> None:
|
|
35
|
-
"""Override to suppress user message display."""
|
|
36
|
-
pass
|
|
37
|
-
|
|
38
|
-
async def show_assistant_message(self, message: Any, **kwargs) -> None:
|
|
39
|
-
"""Override to suppress assistant message display."""
|
|
40
|
-
pass
|
|
41
|
-
|
|
34
|
+
|
|
42
35
|
def show_tool_calls(self, tool_calls: Any, **kwargs) -> None:
|
|
43
36
|
"""Override to suppress tool call display."""
|
|
44
37
|
pass
|
|
45
|
-
|
|
38
|
+
|
|
46
39
|
def show_tool_results(self, tool_results: Any, **kwargs) -> None:
|
|
47
40
|
"""Override to suppress tool result display."""
|
|
48
|
-
pass
|
|
41
|
+
pass
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
from typing import Any, List
|
|
3
|
+
|
|
4
|
+
from mcp import Tool
|
|
5
|
+
|
|
6
|
+
from fast_agent.llm.fastagent_llm import (
|
|
7
|
+
RequestParams,
|
|
8
|
+
)
|
|
9
|
+
from fast_agent.llm.internal.passthrough import PassthroughLLM
|
|
10
|
+
from fast_agent.llm.provider_types import Provider
|
|
11
|
+
from fast_agent.types import PromptMessageExtended
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class SlowLLM(PassthroughLLM):
|
|
15
|
+
"""
|
|
16
|
+
A specialized LLM implementation that sleeps for 3 seconds before responding like PassthroughLLM.
|
|
17
|
+
|
|
18
|
+
This is useful for testing scenarios where you want to simulate slow responses
|
|
19
|
+
or for debugging timing-related issues in parallel workflows.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
def __init__(
|
|
23
|
+
self, provider=Provider.FAST_AGENT, name: str = "Slow", **kwargs: dict[str, Any]
|
|
24
|
+
) -> None:
|
|
25
|
+
super().__init__(name=name, provider=provider, **kwargs)
|
|
26
|
+
|
|
27
|
+
async def _apply_prompt_provider_specific(
|
|
28
|
+
self,
|
|
29
|
+
multipart_messages: List["PromptMessageExtended"],
|
|
30
|
+
request_params: RequestParams | None = None,
|
|
31
|
+
tools: list[Tool] | None = None,
|
|
32
|
+
is_template: bool = False,
|
|
33
|
+
) -> PromptMessageExtended:
|
|
34
|
+
"""Sleep for 3 seconds then apply prompt like PassthroughLLM."""
|
|
35
|
+
await asyncio.sleep(3)
|
|
36
|
+
return await super()._apply_prompt_provider_specific(
|
|
37
|
+
multipart_messages, request_params, tools, is_template
|
|
38
|
+
)
|
|
@@ -35,7 +35,9 @@ class SimpleMemory(Memory, Generic[MessageParamT]):
|
|
|
35
35
|
def __init__(self) -> None:
|
|
36
36
|
self.history: List[MessageParamT] = []
|
|
37
37
|
self.prompt_messages: List[MessageParamT] = [] # Always included
|
|
38
|
-
self.conversation_cache_positions: List[
|
|
38
|
+
self.conversation_cache_positions: List[
|
|
39
|
+
int
|
|
40
|
+
] = [] # Track active conversation cache positions
|
|
39
41
|
self.cache_walk_distance: int = 6 # Messages between cache blocks
|
|
40
42
|
self.max_conversation_cache_blocks: int = 2 # Maximum conversation cache blocks
|
|
41
43
|
|
|
@@ -109,79 +111,85 @@ class SimpleMemory(Memory, Generic[MessageParamT]):
|
|
|
109
111
|
def should_apply_conversation_cache(self) -> bool:
|
|
110
112
|
"""
|
|
111
113
|
Determine if conversation caching should be applied based on walking algorithm.
|
|
112
|
-
|
|
114
|
+
|
|
113
115
|
Returns:
|
|
114
116
|
True if we should add or update cache blocks
|
|
115
117
|
"""
|
|
116
118
|
total_messages = len(self.history)
|
|
117
|
-
|
|
119
|
+
|
|
118
120
|
# Need at least cache_walk_distance messages to start caching
|
|
119
121
|
if total_messages < self.cache_walk_distance:
|
|
120
122
|
return False
|
|
121
|
-
|
|
123
|
+
|
|
122
124
|
# Check if we need to add a new cache block
|
|
123
|
-
return len(self._calculate_cache_positions(total_messages)) != len(
|
|
124
|
-
|
|
125
|
+
return len(self._calculate_cache_positions(total_messages)) != len(
|
|
126
|
+
self.conversation_cache_positions
|
|
127
|
+
)
|
|
128
|
+
|
|
125
129
|
def _calculate_cache_positions(self, total_conversation_messages: int) -> List[int]:
|
|
126
130
|
"""
|
|
127
131
|
Calculate where cache blocks should be placed using walking algorithm.
|
|
128
|
-
|
|
132
|
+
|
|
129
133
|
Args:
|
|
130
134
|
total_conversation_messages: Number of conversation messages (not including prompts)
|
|
131
|
-
|
|
135
|
+
|
|
132
136
|
Returns:
|
|
133
137
|
List of positions (relative to conversation start) where cache should be placed
|
|
134
138
|
"""
|
|
135
139
|
positions = []
|
|
136
|
-
|
|
140
|
+
|
|
137
141
|
# Place cache blocks every cache_walk_distance messages
|
|
138
|
-
for i in range(
|
|
142
|
+
for i in range(
|
|
143
|
+
self.cache_walk_distance - 1, total_conversation_messages, self.cache_walk_distance
|
|
144
|
+
):
|
|
139
145
|
positions.append(i)
|
|
140
146
|
if len(positions) >= self.max_conversation_cache_blocks:
|
|
141
147
|
break
|
|
142
|
-
|
|
148
|
+
|
|
143
149
|
# Keep only the most recent cache blocks (walking behavior)
|
|
144
150
|
if len(positions) > self.max_conversation_cache_blocks:
|
|
145
|
-
positions = positions[-self.max_conversation_cache_blocks:]
|
|
146
|
-
|
|
151
|
+
positions = positions[-self.max_conversation_cache_blocks :]
|
|
152
|
+
|
|
147
153
|
return positions
|
|
148
|
-
|
|
154
|
+
|
|
149
155
|
def get_conversation_cache_updates(self) -> dict:
|
|
150
156
|
"""
|
|
151
157
|
Get cache position updates needed for the walking algorithm.
|
|
152
|
-
|
|
158
|
+
|
|
153
159
|
Returns:
|
|
154
160
|
Dict with 'add', 'remove', and 'active' position lists (relative to full message array)
|
|
155
161
|
"""
|
|
156
162
|
total_conversation_messages = len(self.history)
|
|
157
163
|
new_positions = self._calculate_cache_positions(total_conversation_messages)
|
|
158
|
-
|
|
164
|
+
|
|
159
165
|
# Convert to absolute positions (including prompt messages)
|
|
160
166
|
prompt_offset = len(self.prompt_messages)
|
|
161
167
|
new_absolute_positions = [pos + prompt_offset for pos in new_positions]
|
|
162
|
-
|
|
168
|
+
|
|
163
169
|
old_positions_set = set(self.conversation_cache_positions)
|
|
164
170
|
new_positions_set = set(new_absolute_positions)
|
|
165
|
-
|
|
171
|
+
|
|
166
172
|
return {
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
173
|
+
"add": sorted(new_positions_set - old_positions_set),
|
|
174
|
+
"remove": sorted(old_positions_set - new_positions_set),
|
|
175
|
+
"active": sorted(new_absolute_positions),
|
|
170
176
|
}
|
|
171
|
-
|
|
177
|
+
|
|
172
178
|
def apply_conversation_cache_updates(self, updates: dict) -> None:
|
|
173
179
|
"""
|
|
174
180
|
Apply cache position updates.
|
|
175
|
-
|
|
181
|
+
|
|
176
182
|
Args:
|
|
177
183
|
updates: Dict from get_conversation_cache_updates()
|
|
178
184
|
"""
|
|
179
|
-
self.conversation_cache_positions = updates[
|
|
185
|
+
self.conversation_cache_positions = updates["active"].copy()
|
|
180
186
|
|
|
181
|
-
def remove_cache_control_from_messages(
|
|
187
|
+
def remove_cache_control_from_messages(
|
|
188
|
+
self, messages: List[MessageParamT], positions: List[int]
|
|
189
|
+
) -> None:
|
|
182
190
|
"""
|
|
183
191
|
Remove cache control from specified message positions.
|
|
184
|
-
|
|
192
|
+
|
|
185
193
|
Args:
|
|
186
194
|
messages: The message array to modify
|
|
187
195
|
positions: List of positions to remove cache control from
|
|
@@ -196,14 +204,16 @@ class SimpleMemory(Memory, Generic[MessageParamT]):
|
|
|
196
204
|
if isinstance(content_block, dict) and "cache_control" in content_block:
|
|
197
205
|
del content_block["cache_control"]
|
|
198
206
|
|
|
199
|
-
def add_cache_control_to_messages(
|
|
207
|
+
def add_cache_control_to_messages(
|
|
208
|
+
self, messages: List[MessageParamT], positions: List[int]
|
|
209
|
+
) -> int:
|
|
200
210
|
"""
|
|
201
211
|
Add cache control to specified message positions.
|
|
202
|
-
|
|
212
|
+
|
|
203
213
|
Args:
|
|
204
|
-
messages: The message array to modify
|
|
214
|
+
messages: The message array to modify
|
|
205
215
|
positions: List of positions to add cache control to
|
|
206
|
-
|
|
216
|
+
|
|
207
217
|
Returns:
|
|
208
218
|
Number of cache blocks successfully applied
|
|
209
219
|
"""
|
|
@@ -158,8 +158,9 @@ class ModelDatabase:
|
|
|
158
158
|
context_window=2097152, max_output_tokens=8192, tokenizes=GOOGLE_MULTIMODAL
|
|
159
159
|
)
|
|
160
160
|
|
|
161
|
+
# 31/08/25 switched to object mode (even though groq says schema supported and used to work..)
|
|
161
162
|
KIMI_MOONSHOT = ModelParameters(
|
|
162
|
-
context_window=
|
|
163
|
+
context_window=262144, max_output_tokens=16384, tokenizes=TEXT_ONLY, json_mode="object"
|
|
163
164
|
)
|
|
164
165
|
|
|
165
166
|
# FIXME: xAI has not documented the max output tokens for Grok 4. Using Grok 3 as a placeholder. Will need to update when available (if ever)
|
|
@@ -245,7 +246,7 @@ class ModelDatabase:
|
|
|
245
246
|
"grok-3-mini": GROK_3,
|
|
246
247
|
"grok-3-fast": GROK_3,
|
|
247
248
|
"grok-3-mini-fast": GROK_3,
|
|
248
|
-
"moonshotai/kimi-k2-instruct": KIMI_MOONSHOT,
|
|
249
|
+
"moonshotai/kimi-k2-instruct-0905": KIMI_MOONSHOT,
|
|
249
250
|
"qwen/qwen3-32b": QWEN3_REASONER,
|
|
250
251
|
"deepseek-r1-distill-llama-70b": DEEPSEEK_DISTILL,
|
|
251
252
|
"openai/gpt-oss-120b": OPENAI_GPT_OSS_SERIES,
|
|
@@ -275,6 +276,38 @@ class ModelDatabase:
|
|
|
275
276
|
params = cls.get_model_params(model)
|
|
276
277
|
return params.tokenizes if params else None
|
|
277
278
|
|
|
279
|
+
@classmethod
|
|
280
|
+
def supports_mime(cls, model: str, mime_type: str) -> bool:
|
|
281
|
+
"""
|
|
282
|
+
Return True if the given model supports the provided MIME type.
|
|
283
|
+
|
|
284
|
+
Normalizes common aliases (e.g., image/jpg->image/jpeg, document/pdf->application/pdf)
|
|
285
|
+
and also accepts bare extensions like "pdf" or "png".
|
|
286
|
+
"""
|
|
287
|
+
from fast_agent.mcp.mime_utils import normalize_mime_type
|
|
288
|
+
|
|
289
|
+
tokenizes = cls.get_tokenizes(model) or []
|
|
290
|
+
|
|
291
|
+
# Normalize the candidate and the database entries to lowercase
|
|
292
|
+
normalized_supported = [t.lower() for t in tokenizes]
|
|
293
|
+
|
|
294
|
+
# Handle wildcard inputs like "image/*" quickly
|
|
295
|
+
mt = (mime_type or "").strip().lower()
|
|
296
|
+
if mt.endswith("/*") and "/" in mt:
|
|
297
|
+
prefix = mt.split("/", 1)[0] + "/"
|
|
298
|
+
return any(s.startswith(prefix) for s in normalized_supported)
|
|
299
|
+
|
|
300
|
+
normalized = normalize_mime_type(mime_type)
|
|
301
|
+
if not normalized:
|
|
302
|
+
return False
|
|
303
|
+
|
|
304
|
+
return normalized.lower() in normalized_supported
|
|
305
|
+
|
|
306
|
+
@classmethod
|
|
307
|
+
def supports_any_mime(cls, model: str, mime_types: List[str]) -> bool:
|
|
308
|
+
"""Return True if the model supports any of the provided MIME types."""
|
|
309
|
+
return any(cls.supports_mime(model, m) for m in mime_types)
|
|
310
|
+
|
|
278
311
|
@classmethod
|
|
279
312
|
def get_json_mode(cls, model: str) -> str | None:
|
|
280
313
|
"""Get supported json mode (structured output) for a model"""
|