fast-agent-mcp 0.2.57__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fast-agent-mcp might be problematic. Click here for more details.
- fast_agent/__init__.py +127 -0
- fast_agent/agents/__init__.py +36 -0
- {mcp_agent/core → fast_agent/agents}/agent_types.py +2 -1
- fast_agent/agents/llm_agent.py +217 -0
- fast_agent/agents/llm_decorator.py +486 -0
- mcp_agent/agents/base_agent.py → fast_agent/agents/mcp_agent.py +377 -385
- fast_agent/agents/tool_agent.py +168 -0
- {mcp_agent → fast_agent}/agents/workflow/chain_agent.py +43 -33
- {mcp_agent → fast_agent}/agents/workflow/evaluator_optimizer.py +31 -35
- {mcp_agent → fast_agent}/agents/workflow/iterative_planner.py +56 -47
- {mcp_agent → fast_agent}/agents/workflow/orchestrator_models.py +4 -4
- {mcp_agent → fast_agent}/agents/workflow/parallel_agent.py +34 -41
- {mcp_agent → fast_agent}/agents/workflow/router_agent.py +54 -39
- {mcp_agent → fast_agent}/cli/__main__.py +5 -3
- {mcp_agent → fast_agent}/cli/commands/check_config.py +95 -66
- {mcp_agent → fast_agent}/cli/commands/go.py +20 -11
- {mcp_agent → fast_agent}/cli/commands/quickstart.py +4 -4
- {mcp_agent → fast_agent}/cli/commands/server_helpers.py +1 -1
- {mcp_agent → fast_agent}/cli/commands/setup.py +64 -134
- {mcp_agent → fast_agent}/cli/commands/url_parser.py +9 -8
- {mcp_agent → fast_agent}/cli/main.py +36 -16
- {mcp_agent → fast_agent}/cli/terminal.py +2 -2
- {mcp_agent → fast_agent}/config.py +13 -2
- fast_agent/constants.py +8 -0
- {mcp_agent → fast_agent}/context.py +24 -19
- {mcp_agent → fast_agent}/context_dependent.py +9 -5
- fast_agent/core/__init__.py +17 -0
- {mcp_agent → fast_agent}/core/agent_app.py +39 -36
- fast_agent/core/core_app.py +135 -0
- {mcp_agent → fast_agent}/core/direct_decorators.py +12 -26
- {mcp_agent → fast_agent}/core/direct_factory.py +95 -73
- {mcp_agent → fast_agent/core}/executor/executor.py +4 -5
- {mcp_agent → fast_agent}/core/fastagent.py +32 -32
- fast_agent/core/logging/__init__.py +5 -0
- {mcp_agent → fast_agent/core}/logging/events.py +3 -3
- {mcp_agent → fast_agent/core}/logging/json_serializer.py +1 -1
- {mcp_agent → fast_agent/core}/logging/listeners.py +85 -7
- {mcp_agent → fast_agent/core}/logging/logger.py +7 -7
- {mcp_agent → fast_agent/core}/logging/transport.py +10 -11
- fast_agent/core/prompt.py +9 -0
- {mcp_agent → fast_agent}/core/validation.py +4 -4
- fast_agent/event_progress.py +61 -0
- fast_agent/history/history_exporter.py +44 -0
- {mcp_agent → fast_agent}/human_input/__init__.py +9 -12
- {mcp_agent → fast_agent}/human_input/elicitation_handler.py +26 -8
- {mcp_agent → fast_agent}/human_input/elicitation_state.py +7 -7
- {mcp_agent → fast_agent}/human_input/simple_form.py +6 -4
- {mcp_agent → fast_agent}/human_input/types.py +1 -18
- fast_agent/interfaces.py +228 -0
- fast_agent/llm/__init__.py +9 -0
- mcp_agent/llm/augmented_llm.py → fast_agent/llm/fastagent_llm.py +128 -218
- fast_agent/llm/internal/passthrough.py +137 -0
- mcp_agent/llm/augmented_llm_playback.py → fast_agent/llm/internal/playback.py +29 -25
- mcp_agent/llm/augmented_llm_silent.py → fast_agent/llm/internal/silent.py +10 -17
- fast_agent/llm/internal/slow.py +38 -0
- {mcp_agent → fast_agent}/llm/memory.py +40 -30
- {mcp_agent → fast_agent}/llm/model_database.py +35 -2
- {mcp_agent → fast_agent}/llm/model_factory.py +103 -77
- fast_agent/llm/model_info.py +126 -0
- {mcp_agent/llm/providers → fast_agent/llm/provider/anthropic}/anthropic_utils.py +7 -7
- fast_agent/llm/provider/anthropic/llm_anthropic.py +603 -0
- {mcp_agent/llm/providers → fast_agent/llm/provider/anthropic}/multipart_converter_anthropic.py +79 -86
- fast_agent/llm/provider/bedrock/bedrock_utils.py +218 -0
- fast_agent/llm/provider/bedrock/llm_bedrock.py +2192 -0
- {mcp_agent/llm/providers → fast_agent/llm/provider/google}/google_converter.py +66 -14
- fast_agent/llm/provider/google/llm_google_native.py +431 -0
- mcp_agent/llm/providers/augmented_llm_aliyun.py → fast_agent/llm/provider/openai/llm_aliyun.py +6 -7
- mcp_agent/llm/providers/augmented_llm_azure.py → fast_agent/llm/provider/openai/llm_azure.py +4 -4
- mcp_agent/llm/providers/augmented_llm_deepseek.py → fast_agent/llm/provider/openai/llm_deepseek.py +10 -11
- mcp_agent/llm/providers/augmented_llm_generic.py → fast_agent/llm/provider/openai/llm_generic.py +4 -4
- mcp_agent/llm/providers/augmented_llm_google_oai.py → fast_agent/llm/provider/openai/llm_google_oai.py +4 -4
- mcp_agent/llm/providers/augmented_llm_groq.py → fast_agent/llm/provider/openai/llm_groq.py +14 -16
- mcp_agent/llm/providers/augmented_llm_openai.py → fast_agent/llm/provider/openai/llm_openai.py +133 -206
- mcp_agent/llm/providers/augmented_llm_openrouter.py → fast_agent/llm/provider/openai/llm_openrouter.py +6 -6
- mcp_agent/llm/providers/augmented_llm_tensorzero_openai.py → fast_agent/llm/provider/openai/llm_tensorzero_openai.py +17 -16
- mcp_agent/llm/providers/augmented_llm_xai.py → fast_agent/llm/provider/openai/llm_xai.py +6 -6
- {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/multipart_converter_openai.py +125 -63
- {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/openai_multipart.py +12 -12
- {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/openai_utils.py +18 -16
- {mcp_agent → fast_agent}/llm/provider_key_manager.py +2 -2
- {mcp_agent → fast_agent}/llm/provider_types.py +2 -0
- {mcp_agent → fast_agent}/llm/sampling_converter.py +15 -12
- {mcp_agent → fast_agent}/llm/usage_tracking.py +23 -5
- fast_agent/mcp/__init__.py +43 -0
- {mcp_agent → fast_agent}/mcp/elicitation_factory.py +3 -3
- {mcp_agent → fast_agent}/mcp/elicitation_handlers.py +19 -10
- {mcp_agent → fast_agent}/mcp/gen_client.py +3 -3
- fast_agent/mcp/helpers/__init__.py +36 -0
- fast_agent/mcp/helpers/content_helpers.py +183 -0
- {mcp_agent → fast_agent}/mcp/helpers/server_config_helpers.py +8 -8
- {mcp_agent → fast_agent}/mcp/hf_auth.py +25 -23
- fast_agent/mcp/interfaces.py +93 -0
- {mcp_agent → fast_agent}/mcp/logger_textio.py +4 -4
- {mcp_agent → fast_agent}/mcp/mcp_agent_client_session.py +49 -44
- {mcp_agent → fast_agent}/mcp/mcp_aggregator.py +66 -115
- {mcp_agent → fast_agent}/mcp/mcp_connection_manager.py +16 -23
- {mcp_agent/core → fast_agent/mcp}/mcp_content.py +23 -15
- {mcp_agent → fast_agent}/mcp/mime_utils.py +39 -0
- fast_agent/mcp/prompt.py +159 -0
- mcp_agent/mcp/prompt_message_multipart.py → fast_agent/mcp/prompt_message_extended.py +27 -20
- {mcp_agent → fast_agent}/mcp/prompt_render.py +21 -19
- {mcp_agent → fast_agent}/mcp/prompt_serialization.py +46 -46
- fast_agent/mcp/prompts/__main__.py +7 -0
- {mcp_agent → fast_agent}/mcp/prompts/prompt_helpers.py +31 -30
- {mcp_agent → fast_agent}/mcp/prompts/prompt_load.py +8 -8
- {mcp_agent → fast_agent}/mcp/prompts/prompt_server.py +11 -19
- {mcp_agent → fast_agent}/mcp/prompts/prompt_template.py +18 -18
- {mcp_agent → fast_agent}/mcp/resource_utils.py +1 -1
- {mcp_agent → fast_agent}/mcp/sampling.py +31 -26
- {mcp_agent/mcp_server → fast_agent/mcp/server}/__init__.py +1 -1
- {mcp_agent/mcp_server → fast_agent/mcp/server}/agent_server.py +5 -6
- fast_agent/mcp/ui_agent.py +48 -0
- fast_agent/mcp/ui_mixin.py +209 -0
- fast_agent/mcp_server_registry.py +90 -0
- {mcp_agent → fast_agent}/resources/examples/data-analysis/analysis-campaign.py +5 -4
- {mcp_agent → fast_agent}/resources/examples/data-analysis/analysis.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_forms_server.py +25 -3
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/forms_demo.py +3 -3
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/game_character.py +2 -2
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/game_character_handler.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/tool_call.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/agent_one.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/agent_two.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/researcher/researcher-eval.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/researcher/researcher-imp.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/researcher/researcher.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/tensorzero/agent.py +2 -2
- {mcp_agent → fast_agent}/resources/examples/tensorzero/image_demo.py +3 -3
- {mcp_agent → fast_agent}/resources/examples/tensorzero/simple_agent.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/workflows/chaining.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/workflows/evaluator.py +3 -3
- {mcp_agent → fast_agent}/resources/examples/workflows/human_input.py +5 -3
- {mcp_agent → fast_agent}/resources/examples/workflows/orchestrator.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/workflows/parallel.py +2 -2
- {mcp_agent → fast_agent}/resources/examples/workflows/router.py +5 -2
- fast_agent/resources/setup/.gitignore +24 -0
- fast_agent/resources/setup/agent.py +18 -0
- fast_agent/resources/setup/fastagent.config.yaml +44 -0
- fast_agent/resources/setup/fastagent.secrets.yaml.example +38 -0
- fast_agent/tools/elicitation.py +369 -0
- fast_agent/types/__init__.py +32 -0
- fast_agent/types/llm_stop_reason.py +77 -0
- fast_agent/ui/__init__.py +38 -0
- fast_agent/ui/console_display.py +1005 -0
- {mcp_agent/human_input → fast_agent/ui}/elicitation_form.py +56 -39
- mcp_agent/human_input/elicitation_forms.py → fast_agent/ui/elicitation_style.py +1 -1
- {mcp_agent/core → fast_agent/ui}/enhanced_prompt.py +96 -25
- {mcp_agent/core → fast_agent/ui}/interactive_prompt.py +330 -125
- fast_agent/ui/mcp_ui_utils.py +224 -0
- {mcp_agent → fast_agent/ui}/progress_display.py +2 -2
- {mcp_agent/logging → fast_agent/ui}/rich_progress.py +4 -4
- {mcp_agent/core → fast_agent/ui}/usage_display.py +3 -8
- {fast_agent_mcp-0.2.57.dist-info → fast_agent_mcp-0.3.0.dist-info}/METADATA +7 -7
- fast_agent_mcp-0.3.0.dist-info/RECORD +202 -0
- fast_agent_mcp-0.3.0.dist-info/entry_points.txt +5 -0
- fast_agent_mcp-0.2.57.dist-info/RECORD +0 -192
- fast_agent_mcp-0.2.57.dist-info/entry_points.txt +0 -6
- mcp_agent/__init__.py +0 -114
- mcp_agent/agents/agent.py +0 -92
- mcp_agent/agents/workflow/__init__.py +0 -1
- mcp_agent/agents/workflow/orchestrator_agent.py +0 -597
- mcp_agent/app.py +0 -175
- mcp_agent/core/__init__.py +0 -26
- mcp_agent/core/prompt.py +0 -191
- mcp_agent/event_progress.py +0 -134
- mcp_agent/human_input/handler.py +0 -81
- mcp_agent/llm/__init__.py +0 -2
- mcp_agent/llm/augmented_llm_passthrough.py +0 -232
- mcp_agent/llm/augmented_llm_slow.py +0 -53
- mcp_agent/llm/providers/__init__.py +0 -8
- mcp_agent/llm/providers/augmented_llm_anthropic.py +0 -717
- mcp_agent/llm/providers/augmented_llm_bedrock.py +0 -1788
- mcp_agent/llm/providers/augmented_llm_google_native.py +0 -495
- mcp_agent/llm/providers/sampling_converter_anthropic.py +0 -57
- mcp_agent/llm/providers/sampling_converter_openai.py +0 -26
- mcp_agent/llm/sampling_format_converter.py +0 -37
- mcp_agent/logging/__init__.py +0 -0
- mcp_agent/mcp/__init__.py +0 -50
- mcp_agent/mcp/helpers/__init__.py +0 -25
- mcp_agent/mcp/helpers/content_helpers.py +0 -187
- mcp_agent/mcp/interfaces.py +0 -266
- mcp_agent/mcp/prompts/__init__.py +0 -0
- mcp_agent/mcp/prompts/__main__.py +0 -10
- mcp_agent/mcp_server_registry.py +0 -343
- mcp_agent/tools/tool_definition.py +0 -14
- mcp_agent/ui/console_display.py +0 -790
- mcp_agent/ui/console_display_legacy.py +0 -401
- {mcp_agent → fast_agent}/agents/workflow/orchestrator_prompts.py +0 -0
- {mcp_agent/agents → fast_agent/cli}/__init__.py +0 -0
- {mcp_agent → fast_agent}/cli/constants.py +0 -0
- {mcp_agent → fast_agent}/core/error_handling.py +0 -0
- {mcp_agent → fast_agent}/core/exceptions.py +0 -0
- {mcp_agent/cli → fast_agent/core/executor}/__init__.py +0 -0
- {mcp_agent → fast_agent/core}/executor/task_registry.py +0 -0
- {mcp_agent → fast_agent/core}/executor/workflow_signal.py +0 -0
- {mcp_agent → fast_agent}/human_input/form_fields.py +0 -0
- {mcp_agent → fast_agent}/llm/prompt_utils.py +0 -0
- {mcp_agent/core → fast_agent/llm}/request_params.py +0 -0
- {mcp_agent → fast_agent}/mcp/common.py +0 -0
- {mcp_agent/executor → fast_agent/mcp/prompts}/__init__.py +0 -0
- {mcp_agent → fast_agent}/mcp/prompts/prompt_constants.py +0 -0
- {mcp_agent → fast_agent}/py.typed +0 -0
- {mcp_agent → fast_agent}/resources/examples/data-analysis/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_account_server.py +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_game_server.py +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/fastagent.secrets.yaml.example +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example +0 -0
- {mcp_agent → fast_agent}/resources/examples/researcher/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/.env.sample +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/Makefile +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/README.md +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/clam.jpg +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/crab.png +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/shrimp.png +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/docker-compose.yml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/Dockerfile +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/entrypoint.sh +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/mcp_server.py +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/pyproject.toml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/system_schema.json +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/system_template.minijinja +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/tensorzero.toml +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/graded_report.md +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/short_story.md +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/short_story.txt +0 -0
- {mcp_agent → fast_agent/ui}/console.py +0 -0
- {mcp_agent/core → fast_agent/ui}/mermaid_utils.py +0 -0
- {fast_agent_mcp-0.2.57.dist-info → fast_agent_mcp-0.3.0.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.2.57.dist-info → fast_agent_mcp-0.3.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -10,44 +10,32 @@ from typing import (
|
|
|
10
10
|
Tuple,
|
|
11
11
|
Type,
|
|
12
12
|
TypeVar,
|
|
13
|
-
Union,
|
|
14
13
|
cast,
|
|
15
14
|
)
|
|
16
15
|
|
|
16
|
+
from mcp import Tool
|
|
17
17
|
from mcp.types import (
|
|
18
|
-
CallToolRequest,
|
|
19
|
-
CallToolResult,
|
|
20
18
|
GetPromptResult,
|
|
21
19
|
PromptMessage,
|
|
22
|
-
TextContent,
|
|
23
20
|
)
|
|
24
21
|
from openai import NotGiven
|
|
25
22
|
from openai.lib._parsing import type_to_response_format_param as _type_to_response_format
|
|
26
23
|
from pydantic_core import from_json
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
from
|
|
30
|
-
from
|
|
31
|
-
from
|
|
32
|
-
from
|
|
33
|
-
|
|
34
|
-
from mcp_agent.llm.memory import Memory, SimpleMemory
|
|
35
|
-
from mcp_agent.llm.model_database import ModelDatabase
|
|
36
|
-
from mcp_agent.llm.provider_types import Provider
|
|
37
|
-
from mcp_agent.llm.sampling_format_converter import (
|
|
38
|
-
BasicFormatConverter,
|
|
39
|
-
ProviderFormatConverter,
|
|
40
|
-
)
|
|
41
|
-
from mcp_agent.llm.usage_tracking import TurnUsage, UsageAccumulator
|
|
42
|
-
from mcp_agent.logging.logger import get_logger
|
|
43
|
-
from mcp_agent.mcp.helpers.content_helpers import get_text
|
|
44
|
-
from mcp_agent.mcp.interfaces import (
|
|
45
|
-
AugmentedLLMProtocol,
|
|
24
|
+
|
|
25
|
+
from fast_agent.context_dependent import ContextDependent
|
|
26
|
+
from fast_agent.core.logging.logger import get_logger
|
|
27
|
+
from fast_agent.core.prompt import Prompt
|
|
28
|
+
from fast_agent.event_progress import ProgressAction
|
|
29
|
+
from fast_agent.interfaces import (
|
|
30
|
+
FastAgentLLMProtocol,
|
|
46
31
|
ModelT,
|
|
47
32
|
)
|
|
48
|
-
from
|
|
49
|
-
from
|
|
50
|
-
from
|
|
33
|
+
from fast_agent.llm.memory import Memory, SimpleMemory
|
|
34
|
+
from fast_agent.llm.model_database import ModelDatabase
|
|
35
|
+
from fast_agent.llm.provider_types import Provider
|
|
36
|
+
from fast_agent.llm.usage_tracking import TurnUsage, UsageAccumulator
|
|
37
|
+
from fast_agent.mcp.helpers.content_helpers import get_text
|
|
38
|
+
from fast_agent.types import PromptMessageExtended, RequestParams
|
|
51
39
|
|
|
52
40
|
# Define type variables locally
|
|
53
41
|
MessageParamT = TypeVar("MessageParamT")
|
|
@@ -55,15 +43,11 @@ MessageT = TypeVar("MessageT")
|
|
|
55
43
|
|
|
56
44
|
# Forward reference for type annotations
|
|
57
45
|
if TYPE_CHECKING:
|
|
58
|
-
from
|
|
59
|
-
from mcp_agent.context import Context
|
|
46
|
+
from fast_agent.context import Context
|
|
60
47
|
|
|
61
48
|
|
|
62
|
-
# TODO -- move this to a constant
|
|
63
|
-
HUMAN_INPUT_TOOL_NAME = "__human_input__"
|
|
64
|
-
|
|
65
49
|
# Context variable for storing MCP metadata
|
|
66
|
-
_mcp_metadata_var: ContextVar[Dict[str, Any] | None] = ContextVar(
|
|
50
|
+
_mcp_metadata_var: ContextVar[Dict[str, Any] | None] = ContextVar("mcp_metadata", default=None)
|
|
67
51
|
|
|
68
52
|
|
|
69
53
|
def deep_merge(dict1: Dict[Any, Any], dict2: Dict[Any, Any]) -> Dict[Any, Any]:
|
|
@@ -89,7 +73,7 @@ def deep_merge(dict1: Dict[Any, Any], dict2: Dict[Any, Any]) -> Dict[Any, Any]:
|
|
|
89
73
|
return dict1
|
|
90
74
|
|
|
91
75
|
|
|
92
|
-
class
|
|
76
|
+
class FastAgentLLM(ContextDependent, FastAgentLLMProtocol, Generic[MessageParamT, MessageT]):
|
|
93
77
|
# Common parameter names used across providers
|
|
94
78
|
PARAM_MESSAGES = "messages"
|
|
95
79
|
PARAM_MODEL = "model"
|
|
@@ -101,47 +85,34 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
101
85
|
PARAM_USE_HISTORY = "use_history"
|
|
102
86
|
PARAM_MAX_ITERATIONS = "max_iterations"
|
|
103
87
|
PARAM_TEMPLATE_VARS = "template_vars"
|
|
88
|
+
PARAM_MCP_METADATA = "mcp_metadata"
|
|
104
89
|
|
|
105
90
|
# Base set of fields that should always be excluded
|
|
106
91
|
BASE_EXCLUDE_FIELDS = {PARAM_METADATA}
|
|
107
92
|
|
|
108
93
|
"""
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
Our current models can actively use these capabilities—generating their own search queries,
|
|
112
|
-
selecting appropriate tools, and determining what information to retain.
|
|
94
|
+
Implementation of the Llm Protocol - intended be subclassed for Provider
|
|
95
|
+
or behaviour specific reasons. Contains convenience and template methods.
|
|
113
96
|
"""
|
|
114
97
|
|
|
115
|
-
provider: Provider | None = None
|
|
116
|
-
|
|
117
98
|
def __init__(
|
|
118
99
|
self,
|
|
119
100
|
provider: Provider,
|
|
120
|
-
agent: Optional["Agent"] = None,
|
|
121
|
-
server_names: List[str] | None = None,
|
|
122
101
|
instruction: str | None = None,
|
|
123
102
|
name: str | None = None,
|
|
124
103
|
request_params: RequestParams | None = None,
|
|
125
|
-
type_converter: Type[
|
|
126
|
-
ProviderFormatConverter[MessageParamT, MessageT]
|
|
127
|
-
] = BasicFormatConverter,
|
|
128
104
|
context: Optional["Context"] = None,
|
|
129
105
|
model: Optional[str] = None,
|
|
130
106
|
api_key: Optional[str] = None,
|
|
131
107
|
**kwargs: dict[str, Any],
|
|
132
108
|
) -> None:
|
|
133
109
|
"""
|
|
134
|
-
Initialize the LLM with a list of server names and an instruction.
|
|
135
|
-
If a name is provided, it will be used to identify the LLM.
|
|
136
|
-
If an agent is provided, all other properties are optional
|
|
137
110
|
|
|
138
111
|
Args:
|
|
139
|
-
|
|
140
|
-
server_names: List of MCP server names to connect to
|
|
112
|
+
provider: LLM API Provider
|
|
141
113
|
instruction: System prompt for the LLM
|
|
142
|
-
name:
|
|
143
|
-
request_params: RequestParams to configure LLM
|
|
144
|
-
type_converter: Provider-specific format converter class
|
|
114
|
+
name: Name for the LLM (usually attached Agent name)
|
|
115
|
+
request_params: RequestParams to configure LLM behaviour
|
|
145
116
|
context: Application context
|
|
146
117
|
model: Optional model name override
|
|
147
118
|
**kwargs: Additional provider-specific parameters
|
|
@@ -151,24 +122,18 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
151
122
|
super().__init__(context=context, **kwargs)
|
|
152
123
|
self.logger = get_logger(__name__)
|
|
153
124
|
self.executor = self.context.executor
|
|
154
|
-
self.
|
|
155
|
-
self.
|
|
156
|
-
self.
|
|
157
|
-
self.provider = provider
|
|
125
|
+
self.name: str = name or "fast-agent"
|
|
126
|
+
self.instruction = instruction
|
|
127
|
+
self._provider = provider
|
|
158
128
|
# memory contains provider specific API types.
|
|
159
129
|
self.history: Memory[MessageParamT] = SimpleMemory[MessageParamT]()
|
|
160
130
|
|
|
161
|
-
self._message_history: List[
|
|
131
|
+
self._message_history: List[PromptMessageExtended] = []
|
|
162
132
|
|
|
163
133
|
# Initialize the display component
|
|
164
|
-
|
|
165
|
-
from mcp_agent.ui.console_display_legacy import ConsoleDisplay
|
|
166
|
-
else:
|
|
167
|
-
from mcp_agent.ui.console_display import ConsoleDisplay
|
|
168
|
-
self.display = ConsoleDisplay(config=self.context.config)
|
|
134
|
+
from fast_agent.ui.console_display import ConsoleDisplay
|
|
169
135
|
|
|
170
|
-
|
|
171
|
-
self._current_turn_tool_calls = 0
|
|
136
|
+
self.display = ConsoleDisplay(config=self.context.config)
|
|
172
137
|
|
|
173
138
|
# Initialize default parameters, passing model info
|
|
174
139
|
model_kwargs = kwargs.copy()
|
|
@@ -182,13 +147,15 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
182
147
|
self.default_request_params, self._init_request_params
|
|
183
148
|
)
|
|
184
149
|
|
|
185
|
-
|
|
150
|
+
# Cache effective model name for type-safe access
|
|
151
|
+
self._model_name: Optional[str] = getattr(self.default_request_params, "model", None)
|
|
152
|
+
|
|
186
153
|
self.verb = kwargs.get("verb")
|
|
187
154
|
|
|
188
155
|
self._init_api_key = api_key
|
|
189
156
|
|
|
190
157
|
# Initialize usage tracking
|
|
191
|
-
self.
|
|
158
|
+
self._usage_accumulator = UsageAccumulator()
|
|
192
159
|
|
|
193
160
|
def _initialize_default_params(self, kwargs: dict) -> RequestParams:
|
|
194
161
|
"""Initialize default parameters for the LLM.
|
|
@@ -208,43 +175,47 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
208
175
|
|
|
209
176
|
async def generate(
|
|
210
177
|
self,
|
|
211
|
-
|
|
178
|
+
messages: List[PromptMessageExtended],
|
|
212
179
|
request_params: RequestParams | None = None,
|
|
213
|
-
|
|
180
|
+
tools: List[Tool] | None = None,
|
|
181
|
+
) -> PromptMessageExtended:
|
|
214
182
|
"""
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
183
|
+
Generate a completion using normalized message lists.
|
|
184
|
+
|
|
185
|
+
This is the primary LLM interface that works directly with
|
|
186
|
+
List[PromptMessageExtended] for efficient internal usage.
|
|
219
187
|
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
188
|
+
Args:
|
|
189
|
+
messages: List of PromptMessageExtended objects
|
|
190
|
+
request_params: Optional parameters to configure the LLM request
|
|
191
|
+
tools: Optional list of tools available to the LLM
|
|
223
192
|
|
|
193
|
+
Returns:
|
|
194
|
+
A PromptMessageExtended containing the Assistant response
|
|
195
|
+
"""
|
|
224
196
|
# TODO -- create a "fast-agent" control role rather than magic strings
|
|
225
197
|
|
|
226
|
-
if
|
|
227
|
-
parts: list[str] =
|
|
198
|
+
if messages[-1].first_text().startswith("***SAVE_HISTORY"):
|
|
199
|
+
parts: list[str] = messages[-1].first_text().split(" ", 1)
|
|
228
200
|
filename: str = (
|
|
229
201
|
parts[1].strip() if len(parts) > 1 else f"{self.name or 'assistant'}_prompts.txt"
|
|
230
202
|
)
|
|
231
203
|
await self._save_history(filename)
|
|
232
|
-
self.show_user_message(
|
|
233
|
-
f"History saved to {filename}", model=self.default_request_params.model, chat_turn=0
|
|
234
|
-
)
|
|
235
204
|
return Prompt.assistant(f"History saved to {filename}")
|
|
236
205
|
|
|
237
|
-
self._precall(
|
|
206
|
+
self._precall(messages)
|
|
238
207
|
|
|
239
208
|
# Store MCP metadata in context variable
|
|
240
209
|
final_request_params = self.get_request_params(request_params)
|
|
241
210
|
if final_request_params.mcp_metadata:
|
|
242
211
|
_mcp_metadata_var.set(final_request_params.mcp_metadata)
|
|
243
212
|
|
|
244
|
-
assistant_response:
|
|
245
|
-
|
|
213
|
+
assistant_response: PromptMessageExtended = await self._apply_prompt_provider_specific(
|
|
214
|
+
messages, request_params, tools
|
|
246
215
|
)
|
|
247
216
|
|
|
217
|
+
self.usage_accumulator.count_tools(len(assistant_response.tool_calls or {}))
|
|
218
|
+
|
|
248
219
|
# add generic error and termination reason handling/rollback
|
|
249
220
|
self._message_history.append(assistant_response)
|
|
250
221
|
|
|
@@ -253,10 +224,11 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
253
224
|
@abstractmethod
|
|
254
225
|
async def _apply_prompt_provider_specific(
|
|
255
226
|
self,
|
|
256
|
-
multipart_messages: List["
|
|
227
|
+
multipart_messages: List["PromptMessageExtended"],
|
|
257
228
|
request_params: RequestParams | None = None,
|
|
229
|
+
tools: List[Tool] | None = None,
|
|
258
230
|
is_template: bool = False,
|
|
259
|
-
) ->
|
|
231
|
+
) -> PromptMessageExtended:
|
|
260
232
|
"""
|
|
261
233
|
Provider-specific implementation of apply_prompt_template.
|
|
262
234
|
This default implementation handles basic text content for any LLM type.
|
|
@@ -264,7 +236,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
264
236
|
multimodal content appropriately.
|
|
265
237
|
|
|
266
238
|
Args:
|
|
267
|
-
multipart_messages: List of
|
|
239
|
+
multipart_messages: List of PromptMessageExtended objects parsed from the prompt template
|
|
268
240
|
|
|
269
241
|
Returns:
|
|
270
242
|
String representation of the assistant's response if generated,
|
|
@@ -273,25 +245,36 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
273
245
|
|
|
274
246
|
async def structured(
|
|
275
247
|
self,
|
|
276
|
-
|
|
248
|
+
messages: List[PromptMessageExtended],
|
|
277
249
|
model: Type[ModelT],
|
|
278
250
|
request_params: RequestParams | None = None,
|
|
279
|
-
) -> Tuple[ModelT | None,
|
|
280
|
-
"""
|
|
251
|
+
) -> Tuple[ModelT | None, PromptMessageExtended]:
|
|
252
|
+
"""
|
|
253
|
+
Generate a structured response using normalized message lists.
|
|
254
|
+
|
|
255
|
+
This is the primary LLM interface for structured output that works directly with
|
|
256
|
+
List[PromptMessageExtended] for efficient internal usage.
|
|
257
|
+
|
|
258
|
+
Args:
|
|
259
|
+
messages: List of PromptMessageExtended objects
|
|
260
|
+
model: The Pydantic model class to parse the response into
|
|
261
|
+
request_params: Optional parameters to configure the LLM request
|
|
262
|
+
|
|
263
|
+
Returns:
|
|
264
|
+
Tuple of (parsed model instance or None, assistant response message)
|
|
265
|
+
"""
|
|
281
266
|
|
|
282
|
-
|
|
283
|
-
if multipart_messages and isinstance(multipart_messages[0], PromptMessage):
|
|
284
|
-
multipart_messages = PromptMessageMultipart.to_multipart(multipart_messages)
|
|
267
|
+
self._precall(messages)
|
|
285
268
|
|
|
286
|
-
self._precall(multipart_messages)
|
|
287
|
-
|
|
288
269
|
# Store MCP metadata in context variable
|
|
289
270
|
final_request_params = self.get_request_params(request_params)
|
|
271
|
+
|
|
272
|
+
# TODO -- this doesn't need to go here anymore.
|
|
290
273
|
if final_request_params.mcp_metadata:
|
|
291
274
|
_mcp_metadata_var.set(final_request_params.mcp_metadata)
|
|
292
|
-
|
|
275
|
+
|
|
293
276
|
result, assistant_response = await self._apply_prompt_provider_specific_structured(
|
|
294
|
-
|
|
277
|
+
messages, model, request_params
|
|
295
278
|
)
|
|
296
279
|
|
|
297
280
|
self._message_history.append(assistant_response)
|
|
@@ -338,10 +321,10 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
338
321
|
|
|
339
322
|
async def _apply_prompt_provider_specific_structured(
|
|
340
323
|
self,
|
|
341
|
-
multipart_messages: List[
|
|
324
|
+
multipart_messages: List[PromptMessageExtended],
|
|
342
325
|
model: Type[ModelT],
|
|
343
326
|
request_params: RequestParams | None = None,
|
|
344
|
-
) -> Tuple[ModelT | None,
|
|
327
|
+
) -> Tuple[ModelT | None, PromptMessageExtended]:
|
|
345
328
|
"""Base class attempts to parse JSON - subclasses can use provider specific functionality"""
|
|
346
329
|
|
|
347
330
|
request_params = self.get_request_params(request_params)
|
|
@@ -351,14 +334,14 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
351
334
|
if schema is not NotGiven:
|
|
352
335
|
request_params.response_format = schema
|
|
353
336
|
|
|
354
|
-
result:
|
|
337
|
+
result: PromptMessageExtended = await self._apply_prompt_provider_specific(
|
|
355
338
|
multipart_messages, request_params
|
|
356
339
|
)
|
|
357
340
|
return self._structured_from_multipart(result, model)
|
|
358
341
|
|
|
359
342
|
def _structured_from_multipart(
|
|
360
|
-
self, message:
|
|
361
|
-
) -> Tuple[ModelT | None,
|
|
343
|
+
self, message: PromptMessageExtended, model: Type[ModelT]
|
|
344
|
+
) -> Tuple[ModelT | None, PromptMessageExtended]:
|
|
362
345
|
"""Parse the content of a PromptMessage and return the structured model and message itself"""
|
|
363
346
|
try:
|
|
364
347
|
text = get_text(message.content[-1]) or ""
|
|
@@ -370,15 +353,10 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
370
353
|
logger.warning(f"Failed to parse structured response: {str(e)}")
|
|
371
354
|
return None, message
|
|
372
355
|
|
|
373
|
-
def _precall(self, multipart_messages: List[
|
|
356
|
+
def _precall(self, multipart_messages: List[PromptMessageExtended]) -> None:
|
|
374
357
|
"""Pre-call hook to modify the message before sending it to the provider."""
|
|
358
|
+
# Ensure all messages are PromptMessageExtended before extending history
|
|
375
359
|
self._message_history.extend(multipart_messages)
|
|
376
|
-
if multipart_messages[-1].role == "user":
|
|
377
|
-
self.show_user_message(
|
|
378
|
-
render_multipart_message(multipart_messages[-1]),
|
|
379
|
-
model=self.default_request_params.model,
|
|
380
|
-
chat_turn=self.chat_turn(),
|
|
381
|
-
)
|
|
382
360
|
|
|
383
361
|
def chat_turn(self) -> int:
|
|
384
362
|
"""Return the current chat turn number"""
|
|
@@ -470,108 +448,9 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
470
448
|
# Many LLM implementations will allow the same type for input and output messages
|
|
471
449
|
return cast("MessageParamT", message)
|
|
472
450
|
|
|
473
|
-
def show_tool_result(self, result: CallToolResult) -> None:
|
|
474
|
-
"""Display a tool result in a formatted panel."""
|
|
475
|
-
self.display.show_tool_result(result, name=self.name)
|
|
476
|
-
|
|
477
|
-
def show_tool_call(self, available_tools, tool_name, tool_args) -> None:
|
|
478
|
-
"""Display a tool call in a formatted panel."""
|
|
479
|
-
self._current_turn_tool_calls += 1
|
|
480
|
-
self.display.show_tool_call(available_tools, tool_name, tool_args, name=self.name)
|
|
481
|
-
|
|
482
|
-
def _reset_turn_tool_calls(self) -> None:
|
|
483
|
-
"""Reset tool call counter for new turn."""
|
|
484
|
-
self._current_turn_tool_calls = 0
|
|
485
|
-
|
|
486
451
|
def _finalize_turn_usage(self, turn_usage: "TurnUsage") -> None:
|
|
487
452
|
"""Set tool call count on TurnUsage and add to accumulator."""
|
|
488
|
-
|
|
489
|
-
self.usage_accumulator.add_turn(turn_usage)
|
|
490
|
-
|
|
491
|
-
async def show_assistant_message(
|
|
492
|
-
self,
|
|
493
|
-
message_text: str | Text | None,
|
|
494
|
-
highlight_namespaced_tool: str = "",
|
|
495
|
-
title: str = "ASSISTANT",
|
|
496
|
-
) -> None:
|
|
497
|
-
if message_text is None:
|
|
498
|
-
message_text = Text("No content to display", style="dim green italic")
|
|
499
|
-
"""Display an assistant message in a formatted panel."""
|
|
500
|
-
await self.display.show_assistant_message(
|
|
501
|
-
message_text,
|
|
502
|
-
aggregator=self.aggregator,
|
|
503
|
-
highlight_namespaced_tool=highlight_namespaced_tool,
|
|
504
|
-
title=title,
|
|
505
|
-
name=self.name,
|
|
506
|
-
)
|
|
507
|
-
|
|
508
|
-
def show_user_message(self, message, model: str | None, chat_turn: int) -> None:
|
|
509
|
-
"""Display a user message in a formatted panel."""
|
|
510
|
-
self.display.show_user_message(message, model, chat_turn, name=self.name)
|
|
511
|
-
|
|
512
|
-
async def pre_tool_call(
|
|
513
|
-
self, tool_call_id: str | None, request: CallToolRequest
|
|
514
|
-
) -> CallToolRequest | bool:
|
|
515
|
-
"""Called before a tool is executed. Return False to prevent execution."""
|
|
516
|
-
return request
|
|
517
|
-
|
|
518
|
-
async def post_tool_call(
|
|
519
|
-
self, tool_call_id: str | None, request: CallToolRequest, result: CallToolResult
|
|
520
|
-
) -> CallToolResult:
|
|
521
|
-
"""Called after a tool execution. Can modify the result before it's returned."""
|
|
522
|
-
return result
|
|
523
|
-
|
|
524
|
-
async def call_tool(
|
|
525
|
-
self,
|
|
526
|
-
request: CallToolRequest,
|
|
527
|
-
tool_call_id: str | None = None,
|
|
528
|
-
) -> CallToolResult:
|
|
529
|
-
"""Call a tool with the given parameters and optional ID"""
|
|
530
|
-
|
|
531
|
-
try:
|
|
532
|
-
preprocess = await self.pre_tool_call(
|
|
533
|
-
tool_call_id=tool_call_id,
|
|
534
|
-
request=request,
|
|
535
|
-
)
|
|
536
|
-
|
|
537
|
-
if isinstance(preprocess, bool):
|
|
538
|
-
if not preprocess:
|
|
539
|
-
return CallToolResult(
|
|
540
|
-
isError=True,
|
|
541
|
-
content=[
|
|
542
|
-
TextContent(
|
|
543
|
-
type="text",
|
|
544
|
-
text=f"Error: Tool '{request.params.name}' was not allowed to run.",
|
|
545
|
-
)
|
|
546
|
-
],
|
|
547
|
-
)
|
|
548
|
-
else:
|
|
549
|
-
request = preprocess
|
|
550
|
-
|
|
551
|
-
tool_name = request.params.name
|
|
552
|
-
tool_args = request.params.arguments
|
|
553
|
-
result = await self.aggregator.call_tool(tool_name, tool_args)
|
|
554
|
-
|
|
555
|
-
postprocess = await self.post_tool_call(
|
|
556
|
-
tool_call_id=tool_call_id, request=request, result=result
|
|
557
|
-
)
|
|
558
|
-
|
|
559
|
-
if isinstance(postprocess, CallToolResult):
|
|
560
|
-
result = postprocess
|
|
561
|
-
|
|
562
|
-
return result
|
|
563
|
-
except PromptExitError:
|
|
564
|
-
raise
|
|
565
|
-
except Exception as e:
|
|
566
|
-
return CallToolResult(
|
|
567
|
-
isError=True,
|
|
568
|
-
content=[
|
|
569
|
-
TextContent(
|
|
570
|
-
type="text",
|
|
571
|
-
text=f"Error executing tool '{request.params.name}': {str(e)}",
|
|
572
|
-
)
|
|
573
|
-
],
|
|
574
|
-
)
|
|
453
|
+
self._usage_accumulator.add_turn(turn_usage)
|
|
575
454
|
|
|
576
455
|
def _log_chat_progress(
|
|
577
456
|
self, chat_turn: Optional[int] = None, model: Optional[str] = None
|
|
@@ -660,7 +539,6 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
660
539
|
description=description,
|
|
661
540
|
message_count=message_count,
|
|
662
541
|
agent_name=self.name,
|
|
663
|
-
aggregator=self.aggregator,
|
|
664
542
|
arguments=arguments,
|
|
665
543
|
)
|
|
666
544
|
|
|
@@ -678,7 +556,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
678
556
|
String representation of the assistant's response if generated,
|
|
679
557
|
or the last assistant message in the prompt
|
|
680
558
|
"""
|
|
681
|
-
from
|
|
559
|
+
from fast_agent.types import PromptMessageExtended
|
|
682
560
|
|
|
683
561
|
# Check if we have any messages
|
|
684
562
|
if not prompt_result.messages:
|
|
@@ -695,8 +573,8 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
695
573
|
arguments=arguments,
|
|
696
574
|
)
|
|
697
575
|
|
|
698
|
-
# Convert to
|
|
699
|
-
multipart_messages =
|
|
576
|
+
# Convert to PromptMessageExtended objects
|
|
577
|
+
multipart_messages = PromptMessageExtended.parse_get_prompt_result(prompt_result)
|
|
700
578
|
|
|
701
579
|
# Delegate to the provider-specific implementation
|
|
702
580
|
result = await self._apply_prompt_provider_specific(
|
|
@@ -711,21 +589,21 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
711
589
|
Uses JSON format for .json files (MCP SDK compatible format) and
|
|
712
590
|
delimited text format for other extensions.
|
|
713
591
|
"""
|
|
714
|
-
from
|
|
592
|
+
from fast_agent.mcp.prompt_serialization import save_messages_to_file
|
|
715
593
|
|
|
716
594
|
# Save messages using the unified save function that auto-detects format
|
|
717
595
|
save_messages_to_file(self._message_history, filename)
|
|
718
596
|
|
|
719
597
|
@property
|
|
720
|
-
def message_history(self) -> List[
|
|
598
|
+
def message_history(self) -> List[PromptMessageExtended]:
|
|
721
599
|
"""
|
|
722
|
-
Return the agent's message history as
|
|
600
|
+
Return the agent's message history as PromptMessageExtended objects.
|
|
723
601
|
|
|
724
602
|
This history can be used to transfer state between agents or for
|
|
725
603
|
analysis and debugging purposes.
|
|
726
604
|
|
|
727
605
|
Returns:
|
|
728
|
-
List of
|
|
606
|
+
List of PromptMessageExtended objects representing the conversation history
|
|
729
607
|
"""
|
|
730
608
|
return self._message_history
|
|
731
609
|
|
|
@@ -733,11 +611,15 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
733
611
|
if self._init_api_key:
|
|
734
612
|
return self._init_api_key
|
|
735
613
|
|
|
736
|
-
from
|
|
614
|
+
from fast_agent.llm.provider_key_manager import ProviderKeyManager
|
|
737
615
|
|
|
738
616
|
assert self.provider
|
|
739
617
|
return ProviderKeyManager.get_api_key(self.provider.value, self.context.config)
|
|
740
618
|
|
|
619
|
+
@property
|
|
620
|
+
def usage_accumulator(self):
|
|
621
|
+
return self._usage_accumulator
|
|
622
|
+
|
|
741
623
|
def get_usage_summary(self) -> dict:
|
|
742
624
|
"""
|
|
743
625
|
Get a summary of usage statistics for this LLM instance.
|
|
@@ -746,4 +628,32 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
746
628
|
Dictionary containing usage statistics including tokens, cache metrics,
|
|
747
629
|
and context window utilization.
|
|
748
630
|
"""
|
|
749
|
-
return self.
|
|
631
|
+
return self._usage_accumulator.get_summary()
|
|
632
|
+
|
|
633
|
+
@property
|
|
634
|
+
def provider(self) -> Provider:
|
|
635
|
+
"""
|
|
636
|
+
Return the LLM provider type.
|
|
637
|
+
|
|
638
|
+
Returns:
|
|
639
|
+
The Provider enum value representing the LLM provider
|
|
640
|
+
"""
|
|
641
|
+
return self._provider
|
|
642
|
+
|
|
643
|
+
@property
|
|
644
|
+
def model_name(self) -> str | None:
|
|
645
|
+
"""Return the effective model name, if set."""
|
|
646
|
+
return self._model_name
|
|
647
|
+
|
|
648
|
+
@property
|
|
649
|
+
def model_info(self):
|
|
650
|
+
"""Return resolved model information with capabilities.
|
|
651
|
+
|
|
652
|
+
Uses a lightweight resolver backed by the ModelDatabase and provides
|
|
653
|
+
text/document/vision flags, context window, etc.
|
|
654
|
+
"""
|
|
655
|
+
from fast_agent.llm.model_info import ModelInfo
|
|
656
|
+
|
|
657
|
+
if not self._model_name:
|
|
658
|
+
return None
|
|
659
|
+
return ModelInfo.from_name(self._model_name, self._provider)
|