fast-agent-mcp 0.2.58__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fast-agent-mcp might be problematic. Click here for more details.
- fast_agent/__init__.py +127 -0
- fast_agent/agents/__init__.py +36 -0
- {mcp_agent/core → fast_agent/agents}/agent_types.py +2 -1
- fast_agent/agents/llm_agent.py +217 -0
- fast_agent/agents/llm_decorator.py +486 -0
- mcp_agent/agents/base_agent.py → fast_agent/agents/mcp_agent.py +377 -385
- fast_agent/agents/tool_agent.py +168 -0
- {mcp_agent → fast_agent}/agents/workflow/chain_agent.py +43 -33
- {mcp_agent → fast_agent}/agents/workflow/evaluator_optimizer.py +31 -35
- {mcp_agent → fast_agent}/agents/workflow/iterative_planner.py +56 -47
- {mcp_agent → fast_agent}/agents/workflow/orchestrator_models.py +4 -4
- {mcp_agent → fast_agent}/agents/workflow/parallel_agent.py +34 -41
- {mcp_agent → fast_agent}/agents/workflow/router_agent.py +54 -39
- {mcp_agent → fast_agent}/cli/__main__.py +5 -3
- {mcp_agent → fast_agent}/cli/commands/check_config.py +95 -66
- {mcp_agent → fast_agent}/cli/commands/go.py +20 -11
- {mcp_agent → fast_agent}/cli/commands/quickstart.py +4 -4
- {mcp_agent → fast_agent}/cli/commands/server_helpers.py +1 -1
- {mcp_agent → fast_agent}/cli/commands/setup.py +75 -134
- {mcp_agent → fast_agent}/cli/commands/url_parser.py +9 -8
- {mcp_agent → fast_agent}/cli/main.py +36 -16
- {mcp_agent → fast_agent}/cli/terminal.py +2 -2
- {mcp_agent → fast_agent}/config.py +10 -2
- fast_agent/constants.py +8 -0
- {mcp_agent → fast_agent}/context.py +24 -19
- {mcp_agent → fast_agent}/context_dependent.py +9 -5
- fast_agent/core/__init__.py +52 -0
- {mcp_agent → fast_agent}/core/agent_app.py +39 -36
- fast_agent/core/core_app.py +135 -0
- {mcp_agent → fast_agent}/core/direct_decorators.py +12 -26
- {mcp_agent → fast_agent}/core/direct_factory.py +95 -73
- {mcp_agent → fast_agent/core}/executor/executor.py +4 -5
- {mcp_agent → fast_agent}/core/fastagent.py +32 -32
- fast_agent/core/logging/__init__.py +5 -0
- {mcp_agent → fast_agent/core}/logging/events.py +3 -3
- {mcp_agent → fast_agent/core}/logging/json_serializer.py +1 -1
- {mcp_agent → fast_agent/core}/logging/listeners.py +85 -7
- {mcp_agent → fast_agent/core}/logging/logger.py +7 -7
- {mcp_agent → fast_agent/core}/logging/transport.py +10 -11
- fast_agent/core/prompt.py +9 -0
- {mcp_agent → fast_agent}/core/validation.py +4 -4
- fast_agent/event_progress.py +61 -0
- fast_agent/history/history_exporter.py +44 -0
- {mcp_agent → fast_agent}/human_input/__init__.py +9 -12
- {mcp_agent → fast_agent}/human_input/elicitation_handler.py +26 -8
- {mcp_agent → fast_agent}/human_input/elicitation_state.py +7 -7
- {mcp_agent → fast_agent}/human_input/simple_form.py +6 -4
- {mcp_agent → fast_agent}/human_input/types.py +1 -18
- fast_agent/interfaces.py +228 -0
- fast_agent/llm/__init__.py +9 -0
- mcp_agent/llm/augmented_llm.py → fast_agent/llm/fastagent_llm.py +127 -218
- fast_agent/llm/internal/passthrough.py +137 -0
- mcp_agent/llm/augmented_llm_playback.py → fast_agent/llm/internal/playback.py +29 -25
- mcp_agent/llm/augmented_llm_silent.py → fast_agent/llm/internal/silent.py +10 -17
- fast_agent/llm/internal/slow.py +38 -0
- {mcp_agent → fast_agent}/llm/memory.py +40 -30
- {mcp_agent → fast_agent}/llm/model_database.py +35 -2
- {mcp_agent → fast_agent}/llm/model_factory.py +103 -77
- fast_agent/llm/model_info.py +126 -0
- {mcp_agent/llm/providers → fast_agent/llm/provider/anthropic}/anthropic_utils.py +7 -7
- fast_agent/llm/provider/anthropic/llm_anthropic.py +603 -0
- {mcp_agent/llm/providers → fast_agent/llm/provider/anthropic}/multipart_converter_anthropic.py +79 -86
- {mcp_agent/llm/providers → fast_agent/llm/provider/bedrock}/bedrock_utils.py +3 -1
- mcp_agent/llm/providers/augmented_llm_bedrock.py → fast_agent/llm/provider/bedrock/llm_bedrock.py +833 -717
- {mcp_agent/llm/providers → fast_agent/llm/provider/google}/google_converter.py +66 -14
- fast_agent/llm/provider/google/llm_google_native.py +431 -0
- mcp_agent/llm/providers/augmented_llm_aliyun.py → fast_agent/llm/provider/openai/llm_aliyun.py +6 -7
- mcp_agent/llm/providers/augmented_llm_azure.py → fast_agent/llm/provider/openai/llm_azure.py +4 -4
- mcp_agent/llm/providers/augmented_llm_deepseek.py → fast_agent/llm/provider/openai/llm_deepseek.py +10 -11
- mcp_agent/llm/providers/augmented_llm_generic.py → fast_agent/llm/provider/openai/llm_generic.py +4 -4
- mcp_agent/llm/providers/augmented_llm_google_oai.py → fast_agent/llm/provider/openai/llm_google_oai.py +4 -4
- mcp_agent/llm/providers/augmented_llm_groq.py → fast_agent/llm/provider/openai/llm_groq.py +14 -16
- mcp_agent/llm/providers/augmented_llm_openai.py → fast_agent/llm/provider/openai/llm_openai.py +133 -207
- mcp_agent/llm/providers/augmented_llm_openrouter.py → fast_agent/llm/provider/openai/llm_openrouter.py +6 -6
- mcp_agent/llm/providers/augmented_llm_tensorzero_openai.py → fast_agent/llm/provider/openai/llm_tensorzero_openai.py +17 -16
- mcp_agent/llm/providers/augmented_llm_xai.py → fast_agent/llm/provider/openai/llm_xai.py +6 -6
- {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/multipart_converter_openai.py +125 -63
- {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/openai_multipart.py +12 -12
- {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/openai_utils.py +18 -16
- {mcp_agent → fast_agent}/llm/provider_key_manager.py +2 -2
- {mcp_agent → fast_agent}/llm/provider_types.py +2 -0
- {mcp_agent → fast_agent}/llm/sampling_converter.py +15 -12
- {mcp_agent → fast_agent}/llm/usage_tracking.py +23 -5
- fast_agent/mcp/__init__.py +54 -0
- {mcp_agent → fast_agent}/mcp/elicitation_factory.py +3 -3
- {mcp_agent → fast_agent}/mcp/elicitation_handlers.py +19 -10
- {mcp_agent → fast_agent}/mcp/gen_client.py +3 -3
- fast_agent/mcp/helpers/__init__.py +36 -0
- fast_agent/mcp/helpers/content_helpers.py +183 -0
- {mcp_agent → fast_agent}/mcp/helpers/server_config_helpers.py +8 -8
- {mcp_agent → fast_agent}/mcp/hf_auth.py +25 -23
- fast_agent/mcp/interfaces.py +93 -0
- {mcp_agent → fast_agent}/mcp/logger_textio.py +4 -4
- {mcp_agent → fast_agent}/mcp/mcp_agent_client_session.py +49 -44
- {mcp_agent → fast_agent}/mcp/mcp_aggregator.py +66 -115
- {mcp_agent → fast_agent}/mcp/mcp_connection_manager.py +16 -23
- {mcp_agent/core → fast_agent/mcp}/mcp_content.py +23 -15
- {mcp_agent → fast_agent}/mcp/mime_utils.py +39 -0
- fast_agent/mcp/prompt.py +159 -0
- mcp_agent/mcp/prompt_message_multipart.py → fast_agent/mcp/prompt_message_extended.py +27 -20
- {mcp_agent → fast_agent}/mcp/prompt_render.py +21 -19
- {mcp_agent → fast_agent}/mcp/prompt_serialization.py +46 -46
- fast_agent/mcp/prompts/__main__.py +7 -0
- {mcp_agent → fast_agent}/mcp/prompts/prompt_helpers.py +31 -30
- {mcp_agent → fast_agent}/mcp/prompts/prompt_load.py +8 -8
- {mcp_agent → fast_agent}/mcp/prompts/prompt_server.py +11 -19
- {mcp_agent → fast_agent}/mcp/prompts/prompt_template.py +18 -18
- {mcp_agent → fast_agent}/mcp/resource_utils.py +1 -1
- {mcp_agent → fast_agent}/mcp/sampling.py +31 -26
- {mcp_agent/mcp_server → fast_agent/mcp/server}/__init__.py +1 -1
- {mcp_agent/mcp_server → fast_agent/mcp/server}/agent_server.py +5 -6
- fast_agent/mcp/ui_agent.py +48 -0
- fast_agent/mcp/ui_mixin.py +209 -0
- fast_agent/mcp_server_registry.py +90 -0
- {mcp_agent → fast_agent}/resources/examples/data-analysis/analysis-campaign.py +5 -4
- {mcp_agent → fast_agent}/resources/examples/data-analysis/analysis.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/forms_demo.py +3 -3
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/game_character.py +2 -2
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/game_character_handler.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/tool_call.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/agent_one.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/agent_two.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/researcher/researcher-eval.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/researcher/researcher-imp.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/researcher/researcher.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/tensorzero/agent.py +2 -2
- {mcp_agent → fast_agent}/resources/examples/tensorzero/image_demo.py +3 -3
- {mcp_agent → fast_agent}/resources/examples/tensorzero/simple_agent.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/workflows/chaining.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/workflows/evaluator.py +3 -3
- {mcp_agent → fast_agent}/resources/examples/workflows/human_input.py +5 -3
- {mcp_agent → fast_agent}/resources/examples/workflows/orchestrator.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/workflows/parallel.py +2 -2
- {mcp_agent → fast_agent}/resources/examples/workflows/router.py +5 -2
- fast_agent/resources/setup/.gitignore +24 -0
- fast_agent/resources/setup/agent.py +18 -0
- fast_agent/resources/setup/fastagent.config.yaml +44 -0
- fast_agent/resources/setup/fastagent.secrets.yaml.example +38 -0
- fast_agent/resources/setup/pyproject.toml.tmpl +17 -0
- fast_agent/tools/elicitation.py +369 -0
- fast_agent/types/__init__.py +32 -0
- fast_agent/types/llm_stop_reason.py +77 -0
- fast_agent/ui/__init__.py +38 -0
- fast_agent/ui/console_display.py +1005 -0
- {mcp_agent/human_input → fast_agent/ui}/elicitation_form.py +17 -12
- mcp_agent/human_input/elicitation_forms.py → fast_agent/ui/elicitation_style.py +1 -1
- {mcp_agent/core → fast_agent/ui}/enhanced_prompt.py +96 -25
- {mcp_agent/core → fast_agent/ui}/interactive_prompt.py +330 -125
- fast_agent/ui/mcp_ui_utils.py +224 -0
- {mcp_agent → fast_agent/ui}/progress_display.py +2 -2
- {mcp_agent/logging → fast_agent/ui}/rich_progress.py +4 -4
- {mcp_agent/core → fast_agent/ui}/usage_display.py +3 -8
- {fast_agent_mcp-0.2.58.dist-info → fast_agent_mcp-0.3.1.dist-info}/METADATA +7 -7
- fast_agent_mcp-0.3.1.dist-info/RECORD +203 -0
- fast_agent_mcp-0.3.1.dist-info/entry_points.txt +5 -0
- fast_agent_mcp-0.2.58.dist-info/RECORD +0 -193
- fast_agent_mcp-0.2.58.dist-info/entry_points.txt +0 -6
- mcp_agent/__init__.py +0 -114
- mcp_agent/agents/agent.py +0 -92
- mcp_agent/agents/workflow/__init__.py +0 -1
- mcp_agent/agents/workflow/orchestrator_agent.py +0 -597
- mcp_agent/app.py +0 -175
- mcp_agent/core/__init__.py +0 -26
- mcp_agent/core/prompt.py +0 -191
- mcp_agent/event_progress.py +0 -134
- mcp_agent/human_input/handler.py +0 -81
- mcp_agent/llm/__init__.py +0 -2
- mcp_agent/llm/augmented_llm_passthrough.py +0 -232
- mcp_agent/llm/augmented_llm_slow.py +0 -53
- mcp_agent/llm/providers/__init__.py +0 -8
- mcp_agent/llm/providers/augmented_llm_anthropic.py +0 -718
- mcp_agent/llm/providers/augmented_llm_google_native.py +0 -496
- mcp_agent/llm/providers/sampling_converter_anthropic.py +0 -57
- mcp_agent/llm/providers/sampling_converter_openai.py +0 -26
- mcp_agent/llm/sampling_format_converter.py +0 -37
- mcp_agent/logging/__init__.py +0 -0
- mcp_agent/mcp/__init__.py +0 -50
- mcp_agent/mcp/helpers/__init__.py +0 -25
- mcp_agent/mcp/helpers/content_helpers.py +0 -187
- mcp_agent/mcp/interfaces.py +0 -266
- mcp_agent/mcp/prompts/__init__.py +0 -0
- mcp_agent/mcp/prompts/__main__.py +0 -10
- mcp_agent/mcp_server_registry.py +0 -343
- mcp_agent/tools/tool_definition.py +0 -14
- mcp_agent/ui/console_display.py +0 -790
- mcp_agent/ui/console_display_legacy.py +0 -401
- {mcp_agent → fast_agent}/agents/workflow/orchestrator_prompts.py +0 -0
- {mcp_agent/agents → fast_agent/cli}/__init__.py +0 -0
- {mcp_agent → fast_agent}/cli/constants.py +0 -0
- {mcp_agent → fast_agent}/core/error_handling.py +0 -0
- {mcp_agent → fast_agent}/core/exceptions.py +0 -0
- {mcp_agent/cli → fast_agent/core/executor}/__init__.py +0 -0
- {mcp_agent → fast_agent/core}/executor/task_registry.py +0 -0
- {mcp_agent → fast_agent/core}/executor/workflow_signal.py +0 -0
- {mcp_agent → fast_agent}/human_input/form_fields.py +0 -0
- {mcp_agent → fast_agent}/llm/prompt_utils.py +0 -0
- {mcp_agent/core → fast_agent/llm}/request_params.py +0 -0
- {mcp_agent → fast_agent}/mcp/common.py +0 -0
- {mcp_agent/executor → fast_agent/mcp/prompts}/__init__.py +0 -0
- {mcp_agent → fast_agent}/mcp/prompts/prompt_constants.py +0 -0
- {mcp_agent → fast_agent}/py.typed +0 -0
- {mcp_agent → fast_agent}/resources/examples/data-analysis/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_account_server.py +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_forms_server.py +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_game_server.py +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/fastagent.secrets.yaml.example +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example +0 -0
- {mcp_agent → fast_agent}/resources/examples/researcher/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/.env.sample +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/Makefile +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/README.md +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/clam.jpg +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/crab.png +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/shrimp.png +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/docker-compose.yml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/Dockerfile +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/entrypoint.sh +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/mcp_server.py +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/pyproject.toml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/system_schema.json +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/system_template.minijinja +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/tensorzero.toml +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/graded_report.md +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/short_story.md +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/short_story.txt +0 -0
- {mcp_agent → fast_agent/ui}/console.py +0 -0
- {mcp_agent/core → fast_agent/ui}/mermaid_utils.py +0 -0
- {fast_agent_mcp-0.2.58.dist-info → fast_agent_mcp-0.3.1.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.2.58.dist-info → fast_agent_mcp-0.3.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -10,44 +10,32 @@ from typing import (
|
|
|
10
10
|
Tuple,
|
|
11
11
|
Type,
|
|
12
12
|
TypeVar,
|
|
13
|
-
Union,
|
|
14
13
|
cast,
|
|
15
14
|
)
|
|
16
15
|
|
|
16
|
+
from mcp import Tool
|
|
17
17
|
from mcp.types import (
|
|
18
|
-
CallToolRequest,
|
|
19
|
-
CallToolResult,
|
|
20
18
|
GetPromptResult,
|
|
21
19
|
PromptMessage,
|
|
22
|
-
TextContent,
|
|
23
20
|
)
|
|
24
21
|
from openai import NotGiven
|
|
25
22
|
from openai.lib._parsing import type_to_response_format_param as _type_to_response_format
|
|
26
23
|
from pydantic_core import from_json
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
from
|
|
30
|
-
from
|
|
31
|
-
from
|
|
32
|
-
from
|
|
33
|
-
|
|
34
|
-
from mcp_agent.llm.memory import Memory, SimpleMemory
|
|
35
|
-
from mcp_agent.llm.model_database import ModelDatabase
|
|
36
|
-
from mcp_agent.llm.provider_types import Provider
|
|
37
|
-
from mcp_agent.llm.sampling_format_converter import (
|
|
38
|
-
BasicFormatConverter,
|
|
39
|
-
ProviderFormatConverter,
|
|
40
|
-
)
|
|
41
|
-
from mcp_agent.llm.usage_tracking import TurnUsage, UsageAccumulator
|
|
42
|
-
from mcp_agent.logging.logger import get_logger
|
|
43
|
-
from mcp_agent.mcp.helpers.content_helpers import get_text
|
|
44
|
-
from mcp_agent.mcp.interfaces import (
|
|
45
|
-
AugmentedLLMProtocol,
|
|
24
|
+
|
|
25
|
+
from fast_agent.context_dependent import ContextDependent
|
|
26
|
+
from fast_agent.core.logging.logger import get_logger
|
|
27
|
+
from fast_agent.core.prompt import Prompt
|
|
28
|
+
from fast_agent.event_progress import ProgressAction
|
|
29
|
+
from fast_agent.interfaces import (
|
|
30
|
+
FastAgentLLMProtocol,
|
|
46
31
|
ModelT,
|
|
47
32
|
)
|
|
48
|
-
from
|
|
49
|
-
from
|
|
50
|
-
from
|
|
33
|
+
from fast_agent.llm.memory import Memory, SimpleMemory
|
|
34
|
+
from fast_agent.llm.model_database import ModelDatabase
|
|
35
|
+
from fast_agent.llm.provider_types import Provider
|
|
36
|
+
from fast_agent.llm.usage_tracking import TurnUsage, UsageAccumulator
|
|
37
|
+
from fast_agent.mcp.helpers.content_helpers import get_text
|
|
38
|
+
from fast_agent.types import PromptMessageExtended, RequestParams
|
|
51
39
|
|
|
52
40
|
# Define type variables locally
|
|
53
41
|
MessageParamT = TypeVar("MessageParamT")
|
|
@@ -55,15 +43,11 @@ MessageT = TypeVar("MessageT")
|
|
|
55
43
|
|
|
56
44
|
# Forward reference for type annotations
|
|
57
45
|
if TYPE_CHECKING:
|
|
58
|
-
from
|
|
59
|
-
from mcp_agent.context import Context
|
|
46
|
+
from fast_agent.context import Context
|
|
60
47
|
|
|
61
48
|
|
|
62
|
-
# TODO -- move this to a constant
|
|
63
|
-
HUMAN_INPUT_TOOL_NAME = "__human_input__"
|
|
64
|
-
|
|
65
49
|
# Context variable for storing MCP metadata
|
|
66
|
-
_mcp_metadata_var: ContextVar[Dict[str, Any] | None] = ContextVar(
|
|
50
|
+
_mcp_metadata_var: ContextVar[Dict[str, Any] | None] = ContextVar("mcp_metadata", default=None)
|
|
67
51
|
|
|
68
52
|
|
|
69
53
|
def deep_merge(dict1: Dict[Any, Any], dict2: Dict[Any, Any]) -> Dict[Any, Any]:
|
|
@@ -89,7 +73,7 @@ def deep_merge(dict1: Dict[Any, Any], dict2: Dict[Any, Any]) -> Dict[Any, Any]:
|
|
|
89
73
|
return dict1
|
|
90
74
|
|
|
91
75
|
|
|
92
|
-
class
|
|
76
|
+
class FastAgentLLM(ContextDependent, FastAgentLLMProtocol, Generic[MessageParamT, MessageT]):
|
|
93
77
|
# Common parameter names used across providers
|
|
94
78
|
PARAM_MESSAGES = "messages"
|
|
95
79
|
PARAM_MODEL = "model"
|
|
@@ -107,42 +91,28 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
107
91
|
BASE_EXCLUDE_FIELDS = {PARAM_METADATA}
|
|
108
92
|
|
|
109
93
|
"""
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
Our current models can actively use these capabilities—generating their own search queries,
|
|
113
|
-
selecting appropriate tools, and determining what information to retain.
|
|
94
|
+
Implementation of the Llm Protocol - intended be subclassed for Provider
|
|
95
|
+
or behaviour specific reasons. Contains convenience and template methods.
|
|
114
96
|
"""
|
|
115
97
|
|
|
116
|
-
provider: Provider | None = None
|
|
117
|
-
|
|
118
98
|
def __init__(
|
|
119
99
|
self,
|
|
120
100
|
provider: Provider,
|
|
121
|
-
agent: Optional["Agent"] = None,
|
|
122
|
-
server_names: List[str] | None = None,
|
|
123
101
|
instruction: str | None = None,
|
|
124
102
|
name: str | None = None,
|
|
125
103
|
request_params: RequestParams | None = None,
|
|
126
|
-
type_converter: Type[
|
|
127
|
-
ProviderFormatConverter[MessageParamT, MessageT]
|
|
128
|
-
] = BasicFormatConverter,
|
|
129
104
|
context: Optional["Context"] = None,
|
|
130
105
|
model: Optional[str] = None,
|
|
131
106
|
api_key: Optional[str] = None,
|
|
132
107
|
**kwargs: dict[str, Any],
|
|
133
108
|
) -> None:
|
|
134
109
|
"""
|
|
135
|
-
Initialize the LLM with a list of server names and an instruction.
|
|
136
|
-
If a name is provided, it will be used to identify the LLM.
|
|
137
|
-
If an agent is provided, all other properties are optional
|
|
138
110
|
|
|
139
111
|
Args:
|
|
140
|
-
|
|
141
|
-
server_names: List of MCP server names to connect to
|
|
112
|
+
provider: LLM API Provider
|
|
142
113
|
instruction: System prompt for the LLM
|
|
143
|
-
name:
|
|
144
|
-
request_params: RequestParams to configure LLM
|
|
145
|
-
type_converter: Provider-specific format converter class
|
|
114
|
+
name: Name for the LLM (usually attached Agent name)
|
|
115
|
+
request_params: RequestParams to configure LLM behaviour
|
|
146
116
|
context: Application context
|
|
147
117
|
model: Optional model name override
|
|
148
118
|
**kwargs: Additional provider-specific parameters
|
|
@@ -152,24 +122,18 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
152
122
|
super().__init__(context=context, **kwargs)
|
|
153
123
|
self.logger = get_logger(__name__)
|
|
154
124
|
self.executor = self.context.executor
|
|
155
|
-
self.
|
|
156
|
-
self.
|
|
157
|
-
self.
|
|
158
|
-
self.provider = provider
|
|
125
|
+
self.name: str = name or "fast-agent"
|
|
126
|
+
self.instruction = instruction
|
|
127
|
+
self._provider = provider
|
|
159
128
|
# memory contains provider specific API types.
|
|
160
129
|
self.history: Memory[MessageParamT] = SimpleMemory[MessageParamT]()
|
|
161
130
|
|
|
162
|
-
self._message_history: List[
|
|
131
|
+
self._message_history: List[PromptMessageExtended] = []
|
|
163
132
|
|
|
164
133
|
# Initialize the display component
|
|
165
|
-
|
|
166
|
-
from mcp_agent.ui.console_display_legacy import ConsoleDisplay
|
|
167
|
-
else:
|
|
168
|
-
from mcp_agent.ui.console_display import ConsoleDisplay
|
|
169
|
-
self.display = ConsoleDisplay(config=self.context.config)
|
|
134
|
+
from fast_agent.ui.console_display import ConsoleDisplay
|
|
170
135
|
|
|
171
|
-
|
|
172
|
-
self._current_turn_tool_calls = 0
|
|
136
|
+
self.display = ConsoleDisplay(config=self.context.config)
|
|
173
137
|
|
|
174
138
|
# Initialize default parameters, passing model info
|
|
175
139
|
model_kwargs = kwargs.copy()
|
|
@@ -183,13 +147,15 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
183
147
|
self.default_request_params, self._init_request_params
|
|
184
148
|
)
|
|
185
149
|
|
|
186
|
-
|
|
150
|
+
# Cache effective model name for type-safe access
|
|
151
|
+
self._model_name: Optional[str] = getattr(self.default_request_params, "model", None)
|
|
152
|
+
|
|
187
153
|
self.verb = kwargs.get("verb")
|
|
188
154
|
|
|
189
155
|
self._init_api_key = api_key
|
|
190
156
|
|
|
191
157
|
# Initialize usage tracking
|
|
192
|
-
self.
|
|
158
|
+
self._usage_accumulator = UsageAccumulator()
|
|
193
159
|
|
|
194
160
|
def _initialize_default_params(self, kwargs: dict) -> RequestParams:
|
|
195
161
|
"""Initialize default parameters for the LLM.
|
|
@@ -209,43 +175,47 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
209
175
|
|
|
210
176
|
async def generate(
|
|
211
177
|
self,
|
|
212
|
-
|
|
178
|
+
messages: List[PromptMessageExtended],
|
|
213
179
|
request_params: RequestParams | None = None,
|
|
214
|
-
|
|
180
|
+
tools: List[Tool] | None = None,
|
|
181
|
+
) -> PromptMessageExtended:
|
|
215
182
|
"""
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
183
|
+
Generate a completion using normalized message lists.
|
|
184
|
+
|
|
185
|
+
This is the primary LLM interface that works directly with
|
|
186
|
+
List[PromptMessageExtended] for efficient internal usage.
|
|
220
187
|
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
188
|
+
Args:
|
|
189
|
+
messages: List of PromptMessageExtended objects
|
|
190
|
+
request_params: Optional parameters to configure the LLM request
|
|
191
|
+
tools: Optional list of tools available to the LLM
|
|
224
192
|
|
|
193
|
+
Returns:
|
|
194
|
+
A PromptMessageExtended containing the Assistant response
|
|
195
|
+
"""
|
|
225
196
|
# TODO -- create a "fast-agent" control role rather than magic strings
|
|
226
197
|
|
|
227
|
-
if
|
|
228
|
-
parts: list[str] =
|
|
198
|
+
if messages[-1].first_text().startswith("***SAVE_HISTORY"):
|
|
199
|
+
parts: list[str] = messages[-1].first_text().split(" ", 1)
|
|
229
200
|
filename: str = (
|
|
230
201
|
parts[1].strip() if len(parts) > 1 else f"{self.name or 'assistant'}_prompts.txt"
|
|
231
202
|
)
|
|
232
203
|
await self._save_history(filename)
|
|
233
|
-
self.show_user_message(
|
|
234
|
-
f"History saved to {filename}", model=self.default_request_params.model, chat_turn=0
|
|
235
|
-
)
|
|
236
204
|
return Prompt.assistant(f"History saved to {filename}")
|
|
237
205
|
|
|
238
|
-
self._precall(
|
|
206
|
+
self._precall(messages)
|
|
239
207
|
|
|
240
208
|
# Store MCP metadata in context variable
|
|
241
209
|
final_request_params = self.get_request_params(request_params)
|
|
242
210
|
if final_request_params.mcp_metadata:
|
|
243
211
|
_mcp_metadata_var.set(final_request_params.mcp_metadata)
|
|
244
212
|
|
|
245
|
-
assistant_response:
|
|
246
|
-
|
|
213
|
+
assistant_response: PromptMessageExtended = await self._apply_prompt_provider_specific(
|
|
214
|
+
messages, request_params, tools
|
|
247
215
|
)
|
|
248
216
|
|
|
217
|
+
self.usage_accumulator.count_tools(len(assistant_response.tool_calls or {}))
|
|
218
|
+
|
|
249
219
|
# add generic error and termination reason handling/rollback
|
|
250
220
|
self._message_history.append(assistant_response)
|
|
251
221
|
|
|
@@ -254,10 +224,11 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
254
224
|
@abstractmethod
|
|
255
225
|
async def _apply_prompt_provider_specific(
|
|
256
226
|
self,
|
|
257
|
-
multipart_messages: List["
|
|
227
|
+
multipart_messages: List["PromptMessageExtended"],
|
|
258
228
|
request_params: RequestParams | None = None,
|
|
229
|
+
tools: List[Tool] | None = None,
|
|
259
230
|
is_template: bool = False,
|
|
260
|
-
) ->
|
|
231
|
+
) -> PromptMessageExtended:
|
|
261
232
|
"""
|
|
262
233
|
Provider-specific implementation of apply_prompt_template.
|
|
263
234
|
This default implementation handles basic text content for any LLM type.
|
|
@@ -265,7 +236,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
265
236
|
multimodal content appropriately.
|
|
266
237
|
|
|
267
238
|
Args:
|
|
268
|
-
multipart_messages: List of
|
|
239
|
+
multipart_messages: List of PromptMessageExtended objects parsed from the prompt template
|
|
269
240
|
|
|
270
241
|
Returns:
|
|
271
242
|
String representation of the assistant's response if generated,
|
|
@@ -274,25 +245,36 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
274
245
|
|
|
275
246
|
async def structured(
|
|
276
247
|
self,
|
|
277
|
-
|
|
248
|
+
messages: List[PromptMessageExtended],
|
|
278
249
|
model: Type[ModelT],
|
|
279
250
|
request_params: RequestParams | None = None,
|
|
280
|
-
) -> Tuple[ModelT | None,
|
|
281
|
-
"""
|
|
251
|
+
) -> Tuple[ModelT | None, PromptMessageExtended]:
|
|
252
|
+
"""
|
|
253
|
+
Generate a structured response using normalized message lists.
|
|
254
|
+
|
|
255
|
+
This is the primary LLM interface for structured output that works directly with
|
|
256
|
+
List[PromptMessageExtended] for efficient internal usage.
|
|
257
|
+
|
|
258
|
+
Args:
|
|
259
|
+
messages: List of PromptMessageExtended objects
|
|
260
|
+
model: The Pydantic model class to parse the response into
|
|
261
|
+
request_params: Optional parameters to configure the LLM request
|
|
262
|
+
|
|
263
|
+
Returns:
|
|
264
|
+
Tuple of (parsed model instance or None, assistant response message)
|
|
265
|
+
"""
|
|
282
266
|
|
|
283
|
-
|
|
284
|
-
if multipart_messages and isinstance(multipart_messages[0], PromptMessage):
|
|
285
|
-
multipart_messages = PromptMessageMultipart.to_multipart(multipart_messages)
|
|
267
|
+
self._precall(messages)
|
|
286
268
|
|
|
287
|
-
self._precall(multipart_messages)
|
|
288
|
-
|
|
289
269
|
# Store MCP metadata in context variable
|
|
290
270
|
final_request_params = self.get_request_params(request_params)
|
|
271
|
+
|
|
272
|
+
# TODO -- this doesn't need to go here anymore.
|
|
291
273
|
if final_request_params.mcp_metadata:
|
|
292
274
|
_mcp_metadata_var.set(final_request_params.mcp_metadata)
|
|
293
|
-
|
|
275
|
+
|
|
294
276
|
result, assistant_response = await self._apply_prompt_provider_specific_structured(
|
|
295
|
-
|
|
277
|
+
messages, model, request_params
|
|
296
278
|
)
|
|
297
279
|
|
|
298
280
|
self._message_history.append(assistant_response)
|
|
@@ -339,10 +321,10 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
339
321
|
|
|
340
322
|
async def _apply_prompt_provider_specific_structured(
|
|
341
323
|
self,
|
|
342
|
-
multipart_messages: List[
|
|
324
|
+
multipart_messages: List[PromptMessageExtended],
|
|
343
325
|
model: Type[ModelT],
|
|
344
326
|
request_params: RequestParams | None = None,
|
|
345
|
-
) -> Tuple[ModelT | None,
|
|
327
|
+
) -> Tuple[ModelT | None, PromptMessageExtended]:
|
|
346
328
|
"""Base class attempts to parse JSON - subclasses can use provider specific functionality"""
|
|
347
329
|
|
|
348
330
|
request_params = self.get_request_params(request_params)
|
|
@@ -352,14 +334,14 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
352
334
|
if schema is not NotGiven:
|
|
353
335
|
request_params.response_format = schema
|
|
354
336
|
|
|
355
|
-
result:
|
|
337
|
+
result: PromptMessageExtended = await self._apply_prompt_provider_specific(
|
|
356
338
|
multipart_messages, request_params
|
|
357
339
|
)
|
|
358
340
|
return self._structured_from_multipart(result, model)
|
|
359
341
|
|
|
360
342
|
def _structured_from_multipart(
|
|
361
|
-
self, message:
|
|
362
|
-
) -> Tuple[ModelT | None,
|
|
343
|
+
self, message: PromptMessageExtended, model: Type[ModelT]
|
|
344
|
+
) -> Tuple[ModelT | None, PromptMessageExtended]:
|
|
363
345
|
"""Parse the content of a PromptMessage and return the structured model and message itself"""
|
|
364
346
|
try:
|
|
365
347
|
text = get_text(message.content[-1]) or ""
|
|
@@ -371,15 +353,10 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
371
353
|
logger.warning(f"Failed to parse structured response: {str(e)}")
|
|
372
354
|
return None, message
|
|
373
355
|
|
|
374
|
-
def _precall(self, multipart_messages: List[
|
|
356
|
+
def _precall(self, multipart_messages: List[PromptMessageExtended]) -> None:
|
|
375
357
|
"""Pre-call hook to modify the message before sending it to the provider."""
|
|
358
|
+
# Ensure all messages are PromptMessageExtended before extending history
|
|
376
359
|
self._message_history.extend(multipart_messages)
|
|
377
|
-
if multipart_messages[-1].role == "user":
|
|
378
|
-
self.show_user_message(
|
|
379
|
-
render_multipart_message(multipart_messages[-1]),
|
|
380
|
-
model=self.default_request_params.model,
|
|
381
|
-
chat_turn=self.chat_turn(),
|
|
382
|
-
)
|
|
383
360
|
|
|
384
361
|
def chat_turn(self) -> int:
|
|
385
362
|
"""Return the current chat turn number"""
|
|
@@ -471,108 +448,9 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
471
448
|
# Many LLM implementations will allow the same type for input and output messages
|
|
472
449
|
return cast("MessageParamT", message)
|
|
473
450
|
|
|
474
|
-
def show_tool_result(self, result: CallToolResult) -> None:
|
|
475
|
-
"""Display a tool result in a formatted panel."""
|
|
476
|
-
self.display.show_tool_result(result, name=self.name)
|
|
477
|
-
|
|
478
|
-
def show_tool_call(self, available_tools, tool_name, tool_args) -> None:
|
|
479
|
-
"""Display a tool call in a formatted panel."""
|
|
480
|
-
self._current_turn_tool_calls += 1
|
|
481
|
-
self.display.show_tool_call(available_tools, tool_name, tool_args, name=self.name)
|
|
482
|
-
|
|
483
|
-
def _reset_turn_tool_calls(self) -> None:
|
|
484
|
-
"""Reset tool call counter for new turn."""
|
|
485
|
-
self._current_turn_tool_calls = 0
|
|
486
|
-
|
|
487
451
|
def _finalize_turn_usage(self, turn_usage: "TurnUsage") -> None:
|
|
488
452
|
"""Set tool call count on TurnUsage and add to accumulator."""
|
|
489
|
-
|
|
490
|
-
self.usage_accumulator.add_turn(turn_usage)
|
|
491
|
-
|
|
492
|
-
async def show_assistant_message(
|
|
493
|
-
self,
|
|
494
|
-
message_text: str | Text | None,
|
|
495
|
-
highlight_namespaced_tool: str = "",
|
|
496
|
-
title: str = "ASSISTANT",
|
|
497
|
-
) -> None:
|
|
498
|
-
if message_text is None:
|
|
499
|
-
message_text = Text("No content to display", style="dim green italic")
|
|
500
|
-
"""Display an assistant message in a formatted panel."""
|
|
501
|
-
await self.display.show_assistant_message(
|
|
502
|
-
message_text,
|
|
503
|
-
aggregator=self.aggregator,
|
|
504
|
-
highlight_namespaced_tool=highlight_namespaced_tool,
|
|
505
|
-
title=title,
|
|
506
|
-
name=self.name,
|
|
507
|
-
)
|
|
508
|
-
|
|
509
|
-
def show_user_message(self, message, model: str | None, chat_turn: int) -> None:
|
|
510
|
-
"""Display a user message in a formatted panel."""
|
|
511
|
-
self.display.show_user_message(message, model, chat_turn, name=self.name)
|
|
512
|
-
|
|
513
|
-
async def pre_tool_call(
|
|
514
|
-
self, tool_call_id: str | None, request: CallToolRequest
|
|
515
|
-
) -> CallToolRequest | bool:
|
|
516
|
-
"""Called before a tool is executed. Return False to prevent execution."""
|
|
517
|
-
return request
|
|
518
|
-
|
|
519
|
-
async def post_tool_call(
|
|
520
|
-
self, tool_call_id: str | None, request: CallToolRequest, result: CallToolResult
|
|
521
|
-
) -> CallToolResult:
|
|
522
|
-
"""Called after a tool execution. Can modify the result before it's returned."""
|
|
523
|
-
return result
|
|
524
|
-
|
|
525
|
-
async def call_tool(
|
|
526
|
-
self,
|
|
527
|
-
request: CallToolRequest,
|
|
528
|
-
tool_call_id: str | None = None,
|
|
529
|
-
) -> CallToolResult:
|
|
530
|
-
"""Call a tool with the given parameters and optional ID"""
|
|
531
|
-
|
|
532
|
-
try:
|
|
533
|
-
preprocess = await self.pre_tool_call(
|
|
534
|
-
tool_call_id=tool_call_id,
|
|
535
|
-
request=request,
|
|
536
|
-
)
|
|
537
|
-
|
|
538
|
-
if isinstance(preprocess, bool):
|
|
539
|
-
if not preprocess:
|
|
540
|
-
return CallToolResult(
|
|
541
|
-
isError=True,
|
|
542
|
-
content=[
|
|
543
|
-
TextContent(
|
|
544
|
-
type="text",
|
|
545
|
-
text=f"Error: Tool '{request.params.name}' was not allowed to run.",
|
|
546
|
-
)
|
|
547
|
-
],
|
|
548
|
-
)
|
|
549
|
-
else:
|
|
550
|
-
request = preprocess
|
|
551
|
-
|
|
552
|
-
tool_name = request.params.name
|
|
553
|
-
tool_args = request.params.arguments
|
|
554
|
-
result = await self.aggregator.call_tool(tool_name, tool_args)
|
|
555
|
-
|
|
556
|
-
postprocess = await self.post_tool_call(
|
|
557
|
-
tool_call_id=tool_call_id, request=request, result=result
|
|
558
|
-
)
|
|
559
|
-
|
|
560
|
-
if isinstance(postprocess, CallToolResult):
|
|
561
|
-
result = postprocess
|
|
562
|
-
|
|
563
|
-
return result
|
|
564
|
-
except PromptExitError:
|
|
565
|
-
raise
|
|
566
|
-
except Exception as e:
|
|
567
|
-
return CallToolResult(
|
|
568
|
-
isError=True,
|
|
569
|
-
content=[
|
|
570
|
-
TextContent(
|
|
571
|
-
type="text",
|
|
572
|
-
text=f"Error executing tool '{request.params.name}': {str(e)}",
|
|
573
|
-
)
|
|
574
|
-
],
|
|
575
|
-
)
|
|
453
|
+
self._usage_accumulator.add_turn(turn_usage)
|
|
576
454
|
|
|
577
455
|
def _log_chat_progress(
|
|
578
456
|
self, chat_turn: Optional[int] = None, model: Optional[str] = None
|
|
@@ -661,7 +539,6 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
661
539
|
description=description,
|
|
662
540
|
message_count=message_count,
|
|
663
541
|
agent_name=self.name,
|
|
664
|
-
aggregator=self.aggregator,
|
|
665
542
|
arguments=arguments,
|
|
666
543
|
)
|
|
667
544
|
|
|
@@ -679,7 +556,7 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
679
556
|
String representation of the assistant's response if generated,
|
|
680
557
|
or the last assistant message in the prompt
|
|
681
558
|
"""
|
|
682
|
-
from
|
|
559
|
+
from fast_agent.types import PromptMessageExtended
|
|
683
560
|
|
|
684
561
|
# Check if we have any messages
|
|
685
562
|
if not prompt_result.messages:
|
|
@@ -696,8 +573,8 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
696
573
|
arguments=arguments,
|
|
697
574
|
)
|
|
698
575
|
|
|
699
|
-
# Convert to
|
|
700
|
-
multipart_messages =
|
|
576
|
+
# Convert to PromptMessageExtended objects
|
|
577
|
+
multipart_messages = PromptMessageExtended.parse_get_prompt_result(prompt_result)
|
|
701
578
|
|
|
702
579
|
# Delegate to the provider-specific implementation
|
|
703
580
|
result = await self._apply_prompt_provider_specific(
|
|
@@ -712,21 +589,21 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
712
589
|
Uses JSON format for .json files (MCP SDK compatible format) and
|
|
713
590
|
delimited text format for other extensions.
|
|
714
591
|
"""
|
|
715
|
-
from
|
|
592
|
+
from fast_agent.mcp.prompt_serialization import save_messages_to_file
|
|
716
593
|
|
|
717
594
|
# Save messages using the unified save function that auto-detects format
|
|
718
595
|
save_messages_to_file(self._message_history, filename)
|
|
719
596
|
|
|
720
597
|
@property
|
|
721
|
-
def message_history(self) -> List[
|
|
598
|
+
def message_history(self) -> List[PromptMessageExtended]:
|
|
722
599
|
"""
|
|
723
|
-
Return the agent's message history as
|
|
600
|
+
Return the agent's message history as PromptMessageExtended objects.
|
|
724
601
|
|
|
725
602
|
This history can be used to transfer state between agents or for
|
|
726
603
|
analysis and debugging purposes.
|
|
727
604
|
|
|
728
605
|
Returns:
|
|
729
|
-
List of
|
|
606
|
+
List of PromptMessageExtended objects representing the conversation history
|
|
730
607
|
"""
|
|
731
608
|
return self._message_history
|
|
732
609
|
|
|
@@ -734,11 +611,15 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
734
611
|
if self._init_api_key:
|
|
735
612
|
return self._init_api_key
|
|
736
613
|
|
|
737
|
-
from
|
|
614
|
+
from fast_agent.llm.provider_key_manager import ProviderKeyManager
|
|
738
615
|
|
|
739
616
|
assert self.provider
|
|
740
617
|
return ProviderKeyManager.get_api_key(self.provider.value, self.context.config)
|
|
741
618
|
|
|
619
|
+
@property
|
|
620
|
+
def usage_accumulator(self):
|
|
621
|
+
return self._usage_accumulator
|
|
622
|
+
|
|
742
623
|
def get_usage_summary(self) -> dict:
|
|
743
624
|
"""
|
|
744
625
|
Get a summary of usage statistics for this LLM instance.
|
|
@@ -747,4 +628,32 @@ class AugmentedLLM(ContextDependent, AugmentedLLMProtocol, Generic[MessageParamT
|
|
|
747
628
|
Dictionary containing usage statistics including tokens, cache metrics,
|
|
748
629
|
and context window utilization.
|
|
749
630
|
"""
|
|
750
|
-
return self.
|
|
631
|
+
return self._usage_accumulator.get_summary()
|
|
632
|
+
|
|
633
|
+
@property
|
|
634
|
+
def provider(self) -> Provider:
|
|
635
|
+
"""
|
|
636
|
+
Return the LLM provider type.
|
|
637
|
+
|
|
638
|
+
Returns:
|
|
639
|
+
The Provider enum value representing the LLM provider
|
|
640
|
+
"""
|
|
641
|
+
return self._provider
|
|
642
|
+
|
|
643
|
+
@property
|
|
644
|
+
def model_name(self) -> str | None:
|
|
645
|
+
"""Return the effective model name, if set."""
|
|
646
|
+
return self._model_name
|
|
647
|
+
|
|
648
|
+
@property
|
|
649
|
+
def model_info(self):
|
|
650
|
+
"""Return resolved model information with capabilities.
|
|
651
|
+
|
|
652
|
+
Uses a lightweight resolver backed by the ModelDatabase and provides
|
|
653
|
+
text/document/vision flags, context window, etc.
|
|
654
|
+
"""
|
|
655
|
+
from fast_agent.llm.model_info import ModelInfo
|
|
656
|
+
|
|
657
|
+
if not self._model_name:
|
|
658
|
+
return None
|
|
659
|
+
return ModelInfo.from_name(self._model_name, self._provider)
|