fast-agent-mcp 0.2.58__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fast-agent-mcp might be problematic. Click here for more details.
- fast_agent/__init__.py +127 -0
- fast_agent/agents/__init__.py +36 -0
- {mcp_agent/core → fast_agent/agents}/agent_types.py +2 -1
- fast_agent/agents/llm_agent.py +217 -0
- fast_agent/agents/llm_decorator.py +486 -0
- mcp_agent/agents/base_agent.py → fast_agent/agents/mcp_agent.py +377 -385
- fast_agent/agents/tool_agent.py +168 -0
- {mcp_agent → fast_agent}/agents/workflow/chain_agent.py +43 -33
- {mcp_agent → fast_agent}/agents/workflow/evaluator_optimizer.py +31 -35
- {mcp_agent → fast_agent}/agents/workflow/iterative_planner.py +56 -47
- {mcp_agent → fast_agent}/agents/workflow/orchestrator_models.py +4 -4
- {mcp_agent → fast_agent}/agents/workflow/parallel_agent.py +34 -41
- {mcp_agent → fast_agent}/agents/workflow/router_agent.py +54 -39
- {mcp_agent → fast_agent}/cli/__main__.py +5 -3
- {mcp_agent → fast_agent}/cli/commands/check_config.py +95 -66
- {mcp_agent → fast_agent}/cli/commands/go.py +20 -11
- {mcp_agent → fast_agent}/cli/commands/quickstart.py +4 -4
- {mcp_agent → fast_agent}/cli/commands/server_helpers.py +1 -1
- {mcp_agent → fast_agent}/cli/commands/setup.py +75 -134
- {mcp_agent → fast_agent}/cli/commands/url_parser.py +9 -8
- {mcp_agent → fast_agent}/cli/main.py +36 -16
- {mcp_agent → fast_agent}/cli/terminal.py +2 -2
- {mcp_agent → fast_agent}/config.py +10 -2
- fast_agent/constants.py +8 -0
- {mcp_agent → fast_agent}/context.py +24 -19
- {mcp_agent → fast_agent}/context_dependent.py +9 -5
- fast_agent/core/__init__.py +52 -0
- {mcp_agent → fast_agent}/core/agent_app.py +39 -36
- fast_agent/core/core_app.py +135 -0
- {mcp_agent → fast_agent}/core/direct_decorators.py +12 -26
- {mcp_agent → fast_agent}/core/direct_factory.py +95 -73
- {mcp_agent → fast_agent/core}/executor/executor.py +4 -5
- {mcp_agent → fast_agent}/core/fastagent.py +32 -32
- fast_agent/core/logging/__init__.py +5 -0
- {mcp_agent → fast_agent/core}/logging/events.py +3 -3
- {mcp_agent → fast_agent/core}/logging/json_serializer.py +1 -1
- {mcp_agent → fast_agent/core}/logging/listeners.py +85 -7
- {mcp_agent → fast_agent/core}/logging/logger.py +7 -7
- {mcp_agent → fast_agent/core}/logging/transport.py +10 -11
- fast_agent/core/prompt.py +9 -0
- {mcp_agent → fast_agent}/core/validation.py +4 -4
- fast_agent/event_progress.py +61 -0
- fast_agent/history/history_exporter.py +44 -0
- {mcp_agent → fast_agent}/human_input/__init__.py +9 -12
- {mcp_agent → fast_agent}/human_input/elicitation_handler.py +26 -8
- {mcp_agent → fast_agent}/human_input/elicitation_state.py +7 -7
- {mcp_agent → fast_agent}/human_input/simple_form.py +6 -4
- {mcp_agent → fast_agent}/human_input/types.py +1 -18
- fast_agent/interfaces.py +228 -0
- fast_agent/llm/__init__.py +9 -0
- mcp_agent/llm/augmented_llm.py → fast_agent/llm/fastagent_llm.py +127 -218
- fast_agent/llm/internal/passthrough.py +137 -0
- mcp_agent/llm/augmented_llm_playback.py → fast_agent/llm/internal/playback.py +29 -25
- mcp_agent/llm/augmented_llm_silent.py → fast_agent/llm/internal/silent.py +10 -17
- fast_agent/llm/internal/slow.py +38 -0
- {mcp_agent → fast_agent}/llm/memory.py +40 -30
- {mcp_agent → fast_agent}/llm/model_database.py +35 -2
- {mcp_agent → fast_agent}/llm/model_factory.py +103 -77
- fast_agent/llm/model_info.py +126 -0
- {mcp_agent/llm/providers → fast_agent/llm/provider/anthropic}/anthropic_utils.py +7 -7
- fast_agent/llm/provider/anthropic/llm_anthropic.py +603 -0
- {mcp_agent/llm/providers → fast_agent/llm/provider/anthropic}/multipart_converter_anthropic.py +79 -86
- {mcp_agent/llm/providers → fast_agent/llm/provider/bedrock}/bedrock_utils.py +3 -1
- mcp_agent/llm/providers/augmented_llm_bedrock.py → fast_agent/llm/provider/bedrock/llm_bedrock.py +833 -717
- {mcp_agent/llm/providers → fast_agent/llm/provider/google}/google_converter.py +66 -14
- fast_agent/llm/provider/google/llm_google_native.py +431 -0
- mcp_agent/llm/providers/augmented_llm_aliyun.py → fast_agent/llm/provider/openai/llm_aliyun.py +6 -7
- mcp_agent/llm/providers/augmented_llm_azure.py → fast_agent/llm/provider/openai/llm_azure.py +4 -4
- mcp_agent/llm/providers/augmented_llm_deepseek.py → fast_agent/llm/provider/openai/llm_deepseek.py +10 -11
- mcp_agent/llm/providers/augmented_llm_generic.py → fast_agent/llm/provider/openai/llm_generic.py +4 -4
- mcp_agent/llm/providers/augmented_llm_google_oai.py → fast_agent/llm/provider/openai/llm_google_oai.py +4 -4
- mcp_agent/llm/providers/augmented_llm_groq.py → fast_agent/llm/provider/openai/llm_groq.py +14 -16
- mcp_agent/llm/providers/augmented_llm_openai.py → fast_agent/llm/provider/openai/llm_openai.py +133 -207
- mcp_agent/llm/providers/augmented_llm_openrouter.py → fast_agent/llm/provider/openai/llm_openrouter.py +6 -6
- mcp_agent/llm/providers/augmented_llm_tensorzero_openai.py → fast_agent/llm/provider/openai/llm_tensorzero_openai.py +17 -16
- mcp_agent/llm/providers/augmented_llm_xai.py → fast_agent/llm/provider/openai/llm_xai.py +6 -6
- {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/multipart_converter_openai.py +125 -63
- {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/openai_multipart.py +12 -12
- {mcp_agent/llm/providers → fast_agent/llm/provider/openai}/openai_utils.py +18 -16
- {mcp_agent → fast_agent}/llm/provider_key_manager.py +2 -2
- {mcp_agent → fast_agent}/llm/provider_types.py +2 -0
- {mcp_agent → fast_agent}/llm/sampling_converter.py +15 -12
- {mcp_agent → fast_agent}/llm/usage_tracking.py +23 -5
- fast_agent/mcp/__init__.py +54 -0
- {mcp_agent → fast_agent}/mcp/elicitation_factory.py +3 -3
- {mcp_agent → fast_agent}/mcp/elicitation_handlers.py +19 -10
- {mcp_agent → fast_agent}/mcp/gen_client.py +3 -3
- fast_agent/mcp/helpers/__init__.py +36 -0
- fast_agent/mcp/helpers/content_helpers.py +183 -0
- {mcp_agent → fast_agent}/mcp/helpers/server_config_helpers.py +8 -8
- {mcp_agent → fast_agent}/mcp/hf_auth.py +25 -23
- fast_agent/mcp/interfaces.py +93 -0
- {mcp_agent → fast_agent}/mcp/logger_textio.py +4 -4
- {mcp_agent → fast_agent}/mcp/mcp_agent_client_session.py +49 -44
- {mcp_agent → fast_agent}/mcp/mcp_aggregator.py +66 -115
- {mcp_agent → fast_agent}/mcp/mcp_connection_manager.py +16 -23
- {mcp_agent/core → fast_agent/mcp}/mcp_content.py +23 -15
- {mcp_agent → fast_agent}/mcp/mime_utils.py +39 -0
- fast_agent/mcp/prompt.py +159 -0
- mcp_agent/mcp/prompt_message_multipart.py → fast_agent/mcp/prompt_message_extended.py +27 -20
- {mcp_agent → fast_agent}/mcp/prompt_render.py +21 -19
- {mcp_agent → fast_agent}/mcp/prompt_serialization.py +46 -46
- fast_agent/mcp/prompts/__main__.py +7 -0
- {mcp_agent → fast_agent}/mcp/prompts/prompt_helpers.py +31 -30
- {mcp_agent → fast_agent}/mcp/prompts/prompt_load.py +8 -8
- {mcp_agent → fast_agent}/mcp/prompts/prompt_server.py +11 -19
- {mcp_agent → fast_agent}/mcp/prompts/prompt_template.py +18 -18
- {mcp_agent → fast_agent}/mcp/resource_utils.py +1 -1
- {mcp_agent → fast_agent}/mcp/sampling.py +31 -26
- {mcp_agent/mcp_server → fast_agent/mcp/server}/__init__.py +1 -1
- {mcp_agent/mcp_server → fast_agent/mcp/server}/agent_server.py +5 -6
- fast_agent/mcp/ui_agent.py +48 -0
- fast_agent/mcp/ui_mixin.py +209 -0
- fast_agent/mcp_server_registry.py +90 -0
- {mcp_agent → fast_agent}/resources/examples/data-analysis/analysis-campaign.py +5 -4
- {mcp_agent → fast_agent}/resources/examples/data-analysis/analysis.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/forms_demo.py +3 -3
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/game_character.py +2 -2
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/game_character_handler.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/tool_call.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/agent_one.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/agent_two.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/researcher/researcher-eval.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/researcher/researcher-imp.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/researcher/researcher.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/tensorzero/agent.py +2 -2
- {mcp_agent → fast_agent}/resources/examples/tensorzero/image_demo.py +3 -3
- {mcp_agent → fast_agent}/resources/examples/tensorzero/simple_agent.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/workflows/chaining.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/workflows/evaluator.py +3 -3
- {mcp_agent → fast_agent}/resources/examples/workflows/human_input.py +5 -3
- {mcp_agent → fast_agent}/resources/examples/workflows/orchestrator.py +1 -1
- {mcp_agent → fast_agent}/resources/examples/workflows/parallel.py +2 -2
- {mcp_agent → fast_agent}/resources/examples/workflows/router.py +5 -2
- fast_agent/resources/setup/.gitignore +24 -0
- fast_agent/resources/setup/agent.py +18 -0
- fast_agent/resources/setup/fastagent.config.yaml +44 -0
- fast_agent/resources/setup/fastagent.secrets.yaml.example +38 -0
- fast_agent/resources/setup/pyproject.toml.tmpl +17 -0
- fast_agent/tools/elicitation.py +369 -0
- fast_agent/types/__init__.py +32 -0
- fast_agent/types/llm_stop_reason.py +77 -0
- fast_agent/ui/__init__.py +38 -0
- fast_agent/ui/console_display.py +1005 -0
- {mcp_agent/human_input → fast_agent/ui}/elicitation_form.py +17 -12
- mcp_agent/human_input/elicitation_forms.py → fast_agent/ui/elicitation_style.py +1 -1
- {mcp_agent/core → fast_agent/ui}/enhanced_prompt.py +96 -25
- {mcp_agent/core → fast_agent/ui}/interactive_prompt.py +330 -125
- fast_agent/ui/mcp_ui_utils.py +224 -0
- {mcp_agent → fast_agent/ui}/progress_display.py +2 -2
- {mcp_agent/logging → fast_agent/ui}/rich_progress.py +4 -4
- {mcp_agent/core → fast_agent/ui}/usage_display.py +3 -8
- {fast_agent_mcp-0.2.58.dist-info → fast_agent_mcp-0.3.1.dist-info}/METADATA +7 -7
- fast_agent_mcp-0.3.1.dist-info/RECORD +203 -0
- fast_agent_mcp-0.3.1.dist-info/entry_points.txt +5 -0
- fast_agent_mcp-0.2.58.dist-info/RECORD +0 -193
- fast_agent_mcp-0.2.58.dist-info/entry_points.txt +0 -6
- mcp_agent/__init__.py +0 -114
- mcp_agent/agents/agent.py +0 -92
- mcp_agent/agents/workflow/__init__.py +0 -1
- mcp_agent/agents/workflow/orchestrator_agent.py +0 -597
- mcp_agent/app.py +0 -175
- mcp_agent/core/__init__.py +0 -26
- mcp_agent/core/prompt.py +0 -191
- mcp_agent/event_progress.py +0 -134
- mcp_agent/human_input/handler.py +0 -81
- mcp_agent/llm/__init__.py +0 -2
- mcp_agent/llm/augmented_llm_passthrough.py +0 -232
- mcp_agent/llm/augmented_llm_slow.py +0 -53
- mcp_agent/llm/providers/__init__.py +0 -8
- mcp_agent/llm/providers/augmented_llm_anthropic.py +0 -718
- mcp_agent/llm/providers/augmented_llm_google_native.py +0 -496
- mcp_agent/llm/providers/sampling_converter_anthropic.py +0 -57
- mcp_agent/llm/providers/sampling_converter_openai.py +0 -26
- mcp_agent/llm/sampling_format_converter.py +0 -37
- mcp_agent/logging/__init__.py +0 -0
- mcp_agent/mcp/__init__.py +0 -50
- mcp_agent/mcp/helpers/__init__.py +0 -25
- mcp_agent/mcp/helpers/content_helpers.py +0 -187
- mcp_agent/mcp/interfaces.py +0 -266
- mcp_agent/mcp/prompts/__init__.py +0 -0
- mcp_agent/mcp/prompts/__main__.py +0 -10
- mcp_agent/mcp_server_registry.py +0 -343
- mcp_agent/tools/tool_definition.py +0 -14
- mcp_agent/ui/console_display.py +0 -790
- mcp_agent/ui/console_display_legacy.py +0 -401
- {mcp_agent → fast_agent}/agents/workflow/orchestrator_prompts.py +0 -0
- {mcp_agent/agents → fast_agent/cli}/__init__.py +0 -0
- {mcp_agent → fast_agent}/cli/constants.py +0 -0
- {mcp_agent → fast_agent}/core/error_handling.py +0 -0
- {mcp_agent → fast_agent}/core/exceptions.py +0 -0
- {mcp_agent/cli → fast_agent/core/executor}/__init__.py +0 -0
- {mcp_agent → fast_agent/core}/executor/task_registry.py +0 -0
- {mcp_agent → fast_agent/core}/executor/workflow_signal.py +0 -0
- {mcp_agent → fast_agent}/human_input/form_fields.py +0 -0
- {mcp_agent → fast_agent}/llm/prompt_utils.py +0 -0
- {mcp_agent/core → fast_agent/llm}/request_params.py +0 -0
- {mcp_agent → fast_agent}/mcp/common.py +0 -0
- {mcp_agent/executor → fast_agent/mcp/prompts}/__init__.py +0 -0
- {mcp_agent → fast_agent}/mcp/prompts/prompt_constants.py +0 -0
- {mcp_agent → fast_agent}/py.typed +0 -0
- {mcp_agent → fast_agent}/resources/examples/data-analysis/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_account_server.py +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_forms_server.py +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/elicitation_game_server.py +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/elicitations/fastagent.secrets.yaml.example +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example +0 -0
- {mcp_agent → fast_agent}/resources/examples/researcher/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/.env.sample +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/Makefile +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/README.md +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/clam.jpg +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/crab.png +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/demo_images/shrimp.png +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/docker-compose.yml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/Dockerfile +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/entrypoint.sh +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/mcp_server.py +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/mcp_server/pyproject.toml +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/system_schema.json +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/system_template.minijinja +0 -0
- {mcp_agent → fast_agent}/resources/examples/tensorzero/tensorzero_config/tensorzero.toml +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/fastagent.config.yaml +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/graded_report.md +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/short_story.md +0 -0
- {mcp_agent → fast_agent}/resources/examples/workflows/short_story.txt +0 -0
- {mcp_agent → fast_agent/ui}/console.py +0 -0
- {mcp_agent/core → fast_agent/ui}/mermaid_utils.py +0 -0
- {fast_agent_mcp-0.2.58.dist-info → fast_agent_mcp-0.3.1.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.2.58.dist-info → fast_agent_mcp-0.3.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,486 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Decorator for LlmAgent, normalizes PromptMessageExtended, allows easy extension of Agents
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from typing import (
|
|
6
|
+
TYPE_CHECKING,
|
|
7
|
+
Dict,
|
|
8
|
+
List,
|
|
9
|
+
Mapping,
|
|
10
|
+
Optional,
|
|
11
|
+
Sequence,
|
|
12
|
+
Tuple,
|
|
13
|
+
Type,
|
|
14
|
+
TypeVar,
|
|
15
|
+
Union,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
if TYPE_CHECKING:
|
|
19
|
+
from rich.text import Text
|
|
20
|
+
|
|
21
|
+
from a2a.types import AgentCard
|
|
22
|
+
from mcp import Tool
|
|
23
|
+
from mcp.types import (
|
|
24
|
+
GetPromptResult,
|
|
25
|
+
Prompt,
|
|
26
|
+
PromptMessage,
|
|
27
|
+
ReadResourceResult,
|
|
28
|
+
)
|
|
29
|
+
from opentelemetry import trace
|
|
30
|
+
from pydantic import BaseModel
|
|
31
|
+
|
|
32
|
+
from fast_agent.agents.agent_types import AgentConfig, AgentType
|
|
33
|
+
from fast_agent.context import Context
|
|
34
|
+
from fast_agent.core.logging.logger import get_logger
|
|
35
|
+
from fast_agent.interfaces import (
|
|
36
|
+
AgentProtocol,
|
|
37
|
+
FastAgentLLMProtocol,
|
|
38
|
+
LLMFactoryProtocol,
|
|
39
|
+
)
|
|
40
|
+
from fast_agent.llm.provider_types import Provider
|
|
41
|
+
from fast_agent.llm.usage_tracking import UsageAccumulator
|
|
42
|
+
from fast_agent.mcp.helpers.content_helpers import normalize_to_extended_list
|
|
43
|
+
from fast_agent.types import PromptMessageExtended, RequestParams
|
|
44
|
+
|
|
45
|
+
logger = get_logger(__name__)
|
|
46
|
+
# Define a TypeVar for models
|
|
47
|
+
ModelT = TypeVar("ModelT", bound=BaseModel)
|
|
48
|
+
|
|
49
|
+
# Define a TypeVar for AugmentedLLM and its subclasses
|
|
50
|
+
LLM = TypeVar("LLM", bound=FastAgentLLMProtocol)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class LlmDecorator(AgentProtocol):
|
|
54
|
+
"""
|
|
55
|
+
A pure delegation wrapper around LlmAgent instances.
|
|
56
|
+
|
|
57
|
+
This class provides simple delegation to an attached LLM without adding
|
|
58
|
+
any LLM interaction behaviors. Subclasses can add specialized logic
|
|
59
|
+
for stop reason handling, UI display, tool execution, etc.
|
|
60
|
+
"""
|
|
61
|
+
|
|
62
|
+
def __init__(
|
|
63
|
+
self,
|
|
64
|
+
config: AgentConfig,
|
|
65
|
+
context: Context | None = None,
|
|
66
|
+
) -> None:
|
|
67
|
+
self.config = config
|
|
68
|
+
|
|
69
|
+
self._context = context
|
|
70
|
+
self._name = self.config.name
|
|
71
|
+
self._tracer = trace.get_tracer(__name__)
|
|
72
|
+
self.instruction = self.config.instruction
|
|
73
|
+
|
|
74
|
+
# Store the default request params from config
|
|
75
|
+
self._default_request_params = self.config.default_request_params
|
|
76
|
+
|
|
77
|
+
# Initialize the LLM to None (will be set by attach_llm)
|
|
78
|
+
self._llm: Optional[FastAgentLLMProtocol] = None
|
|
79
|
+
self._initialized = False
|
|
80
|
+
|
|
81
|
+
@property
|
|
82
|
+
def initialized(self) -> bool:
|
|
83
|
+
"""Check if the agent is initialized."""
|
|
84
|
+
return self._initialized
|
|
85
|
+
|
|
86
|
+
@initialized.setter
|
|
87
|
+
def initialized(self, value: bool) -> None:
|
|
88
|
+
"""Set the initialized state."""
|
|
89
|
+
self._initialized = value
|
|
90
|
+
|
|
91
|
+
async def initialize(self) -> None:
|
|
92
|
+
self.initialized = True
|
|
93
|
+
|
|
94
|
+
async def shutdown(self) -> None:
|
|
95
|
+
self.initialized = False
|
|
96
|
+
|
|
97
|
+
@property
|
|
98
|
+
def agent_type(self) -> AgentType:
|
|
99
|
+
"""
|
|
100
|
+
Return the type of this agent.
|
|
101
|
+
"""
|
|
102
|
+
return AgentType.LLM
|
|
103
|
+
|
|
104
|
+
@property
|
|
105
|
+
def name(self) -> str:
|
|
106
|
+
"""
|
|
107
|
+
Return the name of this agent.
|
|
108
|
+
"""
|
|
109
|
+
return self._name
|
|
110
|
+
|
|
111
|
+
async def attach_llm(
|
|
112
|
+
self,
|
|
113
|
+
llm_factory: LLMFactoryProtocol,
|
|
114
|
+
model: str | None = None,
|
|
115
|
+
request_params: RequestParams | None = None,
|
|
116
|
+
**additional_kwargs,
|
|
117
|
+
) -> FastAgentLLMProtocol:
|
|
118
|
+
"""
|
|
119
|
+
Create and attach an LLM instance to this agent.
|
|
120
|
+
|
|
121
|
+
Parameters have the following precedence (highest to lowest):
|
|
122
|
+
1. Explicitly passed parameters to this method
|
|
123
|
+
2. Agent's default_request_params
|
|
124
|
+
3. LLM's default values
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
llm_factory: A factory function that constructs an AugmentedLLM
|
|
128
|
+
model: Optional model name override
|
|
129
|
+
request_params: Optional request parameters override
|
|
130
|
+
**additional_kwargs: Additional parameters passed to the LLM constructor
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
The created LLM instance
|
|
134
|
+
"""
|
|
135
|
+
# Merge parameters with proper precedence
|
|
136
|
+
effective_params = self._merge_request_params(
|
|
137
|
+
self._default_request_params, request_params, model
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
# Create the LLM instance
|
|
141
|
+
self._llm = llm_factory(
|
|
142
|
+
agent=self, request_params=effective_params, context=self._context, **additional_kwargs
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
return self._llm
|
|
146
|
+
|
|
147
|
+
async def __call__(
|
|
148
|
+
self,
|
|
149
|
+
message: Union[
|
|
150
|
+
str,
|
|
151
|
+
PromptMessage,
|
|
152
|
+
PromptMessageExtended,
|
|
153
|
+
Sequence[Union[str, PromptMessage, PromptMessageExtended]],
|
|
154
|
+
],
|
|
155
|
+
) -> str:
|
|
156
|
+
"""
|
|
157
|
+
Make the agent callable to send messages.
|
|
158
|
+
|
|
159
|
+
Args:
|
|
160
|
+
message: Optional message to send to the agent
|
|
161
|
+
|
|
162
|
+
Returns:
|
|
163
|
+
The agent's response as a string
|
|
164
|
+
"""
|
|
165
|
+
return await self.send(message)
|
|
166
|
+
|
|
167
|
+
async def send(
|
|
168
|
+
self,
|
|
169
|
+
message: Union[
|
|
170
|
+
str,
|
|
171
|
+
PromptMessage,
|
|
172
|
+
PromptMessageExtended,
|
|
173
|
+
Sequence[Union[str, PromptMessage, PromptMessageExtended]],
|
|
174
|
+
],
|
|
175
|
+
request_params: RequestParams | None = None,
|
|
176
|
+
) -> str:
|
|
177
|
+
"""
|
|
178
|
+
Convenience method to generate and return a string directly
|
|
179
|
+
"""
|
|
180
|
+
response = await self.generate(message, request_params)
|
|
181
|
+
return response.last_text() or ""
|
|
182
|
+
|
|
183
|
+
async def generate(
|
|
184
|
+
self,
|
|
185
|
+
messages: Union[
|
|
186
|
+
str,
|
|
187
|
+
PromptMessage,
|
|
188
|
+
PromptMessageExtended,
|
|
189
|
+
Sequence[Union[str, PromptMessage, PromptMessageExtended]],
|
|
190
|
+
],
|
|
191
|
+
request_params: RequestParams | None = None,
|
|
192
|
+
) -> PromptMessageExtended:
|
|
193
|
+
"""
|
|
194
|
+
Create a completion with the LLM using the provided messages.
|
|
195
|
+
|
|
196
|
+
This method provides the friendly agent interface by normalizing inputs
|
|
197
|
+
and delegating to generate_impl.
|
|
198
|
+
|
|
199
|
+
Args:
|
|
200
|
+
messages: Message(s) in various formats:
|
|
201
|
+
- String: Converted to a user PromptMessageExtended
|
|
202
|
+
- PromptMessage: Converted to PromptMessageExtended
|
|
203
|
+
- PromptMessageExtended: Used directly
|
|
204
|
+
- List of any combination of the above
|
|
205
|
+
request_params: Optional parameters to configure the request
|
|
206
|
+
tools: Optional list of tools available to the LLM
|
|
207
|
+
|
|
208
|
+
Returns:
|
|
209
|
+
The LLM's response as a PromptMessageExtended
|
|
210
|
+
"""
|
|
211
|
+
# Normalize all input types to a list of PromptMessageExtended
|
|
212
|
+
multipart_messages = normalize_to_extended_list(messages)
|
|
213
|
+
|
|
214
|
+
with self._tracer.start_as_current_span(f"Agent: '{self._name}' generate"):
|
|
215
|
+
return await self.generate_impl(multipart_messages, request_params, None)
|
|
216
|
+
|
|
217
|
+
async def generate_impl(
|
|
218
|
+
self,
|
|
219
|
+
messages: List[PromptMessageExtended],
|
|
220
|
+
request_params: RequestParams | None = None,
|
|
221
|
+
tools: List[Tool] | None = None,
|
|
222
|
+
) -> PromptMessageExtended:
|
|
223
|
+
"""
|
|
224
|
+
Implementation method for generate.
|
|
225
|
+
|
|
226
|
+
Default implementation delegates to the attached LLM.
|
|
227
|
+
Subclasses can override this to customize behavior while still
|
|
228
|
+
benefiting from the message normalization in generate().
|
|
229
|
+
|
|
230
|
+
Args:
|
|
231
|
+
messages: Normalized list of PromptMessageExtended objects
|
|
232
|
+
request_params: Optional parameters to configure the request
|
|
233
|
+
tools: Optional list of tools available to the LLM
|
|
234
|
+
|
|
235
|
+
Returns:
|
|
236
|
+
The LLM's response as a PromptMessageExtended
|
|
237
|
+
"""
|
|
238
|
+
assert self._llm, "LLM is not attached"
|
|
239
|
+
return await self._llm.generate(messages, request_params, tools)
|
|
240
|
+
|
|
241
|
+
async def apply_prompt_template(self, prompt_result: GetPromptResult, prompt_name: str) -> str:
|
|
242
|
+
"""
|
|
243
|
+
Apply a prompt template as persistent context that will be included in all future conversations.
|
|
244
|
+
Delegates to the attached LLM.
|
|
245
|
+
|
|
246
|
+
Args:
|
|
247
|
+
prompt_result: The GetPromptResult containing prompt messages
|
|
248
|
+
prompt_name: The name of the prompt being applied
|
|
249
|
+
|
|
250
|
+
Returns:
|
|
251
|
+
String representation of the assistant's response if generated
|
|
252
|
+
"""
|
|
253
|
+
assert self._llm
|
|
254
|
+
return await self._llm.apply_prompt_template(prompt_result, prompt_name)
|
|
255
|
+
|
|
256
|
+
async def apply_prompt(
|
|
257
|
+
self,
|
|
258
|
+
prompt: Union[str, GetPromptResult],
|
|
259
|
+
arguments: Dict[str, str] | None = None,
|
|
260
|
+
as_template: bool = False,
|
|
261
|
+
namespace: str | None = None,
|
|
262
|
+
) -> str:
|
|
263
|
+
"""
|
|
264
|
+
Default, provider-agnostic apply_prompt implementation.
|
|
265
|
+
|
|
266
|
+
- If given a GetPromptResult, optionally store as template or generate once.
|
|
267
|
+
- If given a string, treat it as plain user text and generate.
|
|
268
|
+
|
|
269
|
+
Subclasses that integrate MCP servers should override this.
|
|
270
|
+
"""
|
|
271
|
+
# If a prompt template object is provided
|
|
272
|
+
if isinstance(prompt, GetPromptResult):
|
|
273
|
+
namespaced_name = getattr(prompt, "namespaced_name", "template")
|
|
274
|
+
if as_template:
|
|
275
|
+
return await self.apply_prompt_template(prompt, namespaced_name)
|
|
276
|
+
|
|
277
|
+
messages = PromptMessageExtended.from_get_prompt_result(prompt)
|
|
278
|
+
response = await self.generate_impl(messages, None)
|
|
279
|
+
return response.first_text()
|
|
280
|
+
|
|
281
|
+
# Otherwise treat the string as plain content (ignore arguments here)
|
|
282
|
+
return await self.send(prompt)
|
|
283
|
+
|
|
284
|
+
async def structured(
|
|
285
|
+
self,
|
|
286
|
+
messages: Union[
|
|
287
|
+
str,
|
|
288
|
+
PromptMessage,
|
|
289
|
+
PromptMessageExtended,
|
|
290
|
+
Sequence[Union[str, PromptMessage, PromptMessageExtended]],
|
|
291
|
+
],
|
|
292
|
+
model: Type[ModelT],
|
|
293
|
+
request_params: RequestParams | None = None,
|
|
294
|
+
) -> Tuple[ModelT | None, PromptMessageExtended]:
|
|
295
|
+
"""
|
|
296
|
+
Apply the prompt and return the result as a Pydantic model.
|
|
297
|
+
|
|
298
|
+
This method provides the friendly agent interface by normalizing inputs
|
|
299
|
+
and delegating to structured_impl.
|
|
300
|
+
|
|
301
|
+
Args:
|
|
302
|
+
messages: Message(s) in various formats:
|
|
303
|
+
- String: Converted to a user PromptMessageExtended
|
|
304
|
+
- PromptMessage: Converted to PromptMessageExtended
|
|
305
|
+
- PromptMessageExtended: Used directly
|
|
306
|
+
- List of any combination of the above
|
|
307
|
+
model: The Pydantic model class to parse the result into
|
|
308
|
+
request_params: Optional parameters to configure the LLM request
|
|
309
|
+
|
|
310
|
+
Returns:
|
|
311
|
+
A tuple of (parsed model instance or None, assistant response message)
|
|
312
|
+
"""
|
|
313
|
+
# Normalize all input types to a list of PromptMessageExtended
|
|
314
|
+
multipart_messages = normalize_to_extended_list(messages)
|
|
315
|
+
|
|
316
|
+
with self._tracer.start_as_current_span(f"Agent: '{self._name}' structured"):
|
|
317
|
+
return await self.structured_impl(multipart_messages, model, request_params)
|
|
318
|
+
|
|
319
|
+
async def structured_impl(
|
|
320
|
+
self,
|
|
321
|
+
messages: List[PromptMessageExtended],
|
|
322
|
+
model: Type[ModelT],
|
|
323
|
+
request_params: RequestParams | None = None,
|
|
324
|
+
) -> Tuple[ModelT | None, PromptMessageExtended]:
|
|
325
|
+
"""
|
|
326
|
+
Implementation method for structured.
|
|
327
|
+
|
|
328
|
+
Default implementation delegates to the attached LLM.
|
|
329
|
+
Subclasses can override this to customize behavior while still
|
|
330
|
+
benefiting from the message normalization in structured().
|
|
331
|
+
|
|
332
|
+
Args:
|
|
333
|
+
messages: Normalized list of PromptMessageExtended objects
|
|
334
|
+
model: The Pydantic model class to parse the result into
|
|
335
|
+
request_params: Optional parameters to configure the LLM request
|
|
336
|
+
|
|
337
|
+
Returns:
|
|
338
|
+
A tuple of (parsed model instance or None, assistant response message)
|
|
339
|
+
"""
|
|
340
|
+
assert self._llm, "LLM is not attached"
|
|
341
|
+
return await self._llm.structured(messages, model, request_params)
|
|
342
|
+
|
|
343
|
+
@property
|
|
344
|
+
def message_history(self) -> List[PromptMessageExtended]:
|
|
345
|
+
"""
|
|
346
|
+
Return the agent's message history as PromptMessageExtended objects.
|
|
347
|
+
|
|
348
|
+
This history can be used to transfer state between agents or for
|
|
349
|
+
analysis and debugging purposes.
|
|
350
|
+
|
|
351
|
+
Returns:
|
|
352
|
+
List of PromptMessageExtended objects representing the conversation history
|
|
353
|
+
"""
|
|
354
|
+
if self._llm:
|
|
355
|
+
return self._llm.message_history
|
|
356
|
+
return []
|
|
357
|
+
|
|
358
|
+
@property
|
|
359
|
+
def usage_accumulator(self) -> UsageAccumulator | None:
|
|
360
|
+
"""
|
|
361
|
+
Return the usage accumulator for tracking token usage across turns.
|
|
362
|
+
|
|
363
|
+
Returns:
|
|
364
|
+
UsageAccumulator object if LLM is attached, None otherwise
|
|
365
|
+
"""
|
|
366
|
+
if self._llm:
|
|
367
|
+
return self._llm.usage_accumulator
|
|
368
|
+
return None
|
|
369
|
+
|
|
370
|
+
@property
|
|
371
|
+
def llm(self) -> FastAgentLLMProtocol:
|
|
372
|
+
assert self._llm, "LLM is not attached"
|
|
373
|
+
return self._llm
|
|
374
|
+
|
|
375
|
+
# --- Default MCP-facing convenience methods (no-op for plain LLM agents) ---
|
|
376
|
+
|
|
377
|
+
async def list_prompts(self, namespace: str | None = None) -> Mapping[str, List[Prompt]]:
|
|
378
|
+
"""Default: no prompts; return empty mapping."""
|
|
379
|
+
return {}
|
|
380
|
+
|
|
381
|
+
async def get_prompt(
|
|
382
|
+
self,
|
|
383
|
+
prompt_name: str,
|
|
384
|
+
arguments: Dict[str, str] | None = None,
|
|
385
|
+
namespace: str | None = None,
|
|
386
|
+
) -> GetPromptResult:
|
|
387
|
+
"""Default: prompts unsupported; return empty GetPromptResult."""
|
|
388
|
+
return GetPromptResult(description="", messages=[])
|
|
389
|
+
|
|
390
|
+
async def list_resources(self, namespace: str | None = None) -> Mapping[str, List[str]]:
|
|
391
|
+
"""Default: no resources; return empty mapping."""
|
|
392
|
+
return {}
|
|
393
|
+
|
|
394
|
+
async def list_mcp_tools(self, namespace: str | None = None) -> Mapping[str, List[Tool]]:
|
|
395
|
+
"""Default: no tools; return empty mapping."""
|
|
396
|
+
return {}
|
|
397
|
+
|
|
398
|
+
async def get_resource(
|
|
399
|
+
self, resource_uri: str, namespace: str | None = None
|
|
400
|
+
) -> ReadResourceResult:
|
|
401
|
+
"""Default: resources unsupported; raise capability error."""
|
|
402
|
+
raise NotImplementedError("Resources are not supported by this agent")
|
|
403
|
+
|
|
404
|
+
async def with_resource(
|
|
405
|
+
self,
|
|
406
|
+
prompt_content: Union[str, PromptMessage, PromptMessageExtended],
|
|
407
|
+
resource_uri: str,
|
|
408
|
+
namespace: str | None = None,
|
|
409
|
+
) -> str:
|
|
410
|
+
"""Default: ignore resource, just send the prompt content."""
|
|
411
|
+
return await self.send(prompt_content)
|
|
412
|
+
|
|
413
|
+
@property
|
|
414
|
+
def provider(self) -> Provider:
|
|
415
|
+
return self.llm.provider
|
|
416
|
+
|
|
417
|
+
def _merge_request_params(
|
|
418
|
+
self,
|
|
419
|
+
base_params: RequestParams | None,
|
|
420
|
+
override_params: RequestParams | None,
|
|
421
|
+
model_override: str | None = None,
|
|
422
|
+
) -> RequestParams | None:
|
|
423
|
+
"""
|
|
424
|
+
Merge request parameters with proper precedence.
|
|
425
|
+
|
|
426
|
+
Args:
|
|
427
|
+
base_params: Base parameters (lower precedence)
|
|
428
|
+
override_params: Override parameters (higher precedence)
|
|
429
|
+
model_override: Optional model name to override
|
|
430
|
+
|
|
431
|
+
Returns:
|
|
432
|
+
Merged RequestParams or None if both inputs are None
|
|
433
|
+
"""
|
|
434
|
+
if not base_params and not override_params:
|
|
435
|
+
return None
|
|
436
|
+
|
|
437
|
+
if not base_params:
|
|
438
|
+
result = override_params.model_copy() if override_params else None
|
|
439
|
+
else:
|
|
440
|
+
result = base_params.model_copy()
|
|
441
|
+
if override_params:
|
|
442
|
+
# Merge only the explicitly set values from override_params
|
|
443
|
+
for k, v in override_params.model_dump(exclude_unset=True).items():
|
|
444
|
+
if v is not None:
|
|
445
|
+
setattr(result, k, v)
|
|
446
|
+
|
|
447
|
+
# Apply model override if specified
|
|
448
|
+
if model_override and result:
|
|
449
|
+
result.model = model_override
|
|
450
|
+
|
|
451
|
+
return result
|
|
452
|
+
|
|
453
|
+
async def agent_card(self) -> AgentCard:
|
|
454
|
+
"""
|
|
455
|
+
Return an A2A card describing this Agent
|
|
456
|
+
"""
|
|
457
|
+
from fast_agent.agents.llm_agent import DEFAULT_CAPABILITIES
|
|
458
|
+
|
|
459
|
+
return AgentCard(
|
|
460
|
+
skills=[],
|
|
461
|
+
name=self._name,
|
|
462
|
+
description=self.instruction,
|
|
463
|
+
url=f"fast-agent://agents/{self._name}/",
|
|
464
|
+
version="0.1",
|
|
465
|
+
capabilities=DEFAULT_CAPABILITIES,
|
|
466
|
+
# TODO -- get these from the _llm
|
|
467
|
+
default_input_modes=["text/plain"],
|
|
468
|
+
default_output_modes=["text/plain"],
|
|
469
|
+
provider=None,
|
|
470
|
+
documentation_url=None,
|
|
471
|
+
)
|
|
472
|
+
|
|
473
|
+
async def run_tools(self, request: PromptMessageExtended) -> PromptMessageExtended:
|
|
474
|
+
return request
|
|
475
|
+
|
|
476
|
+
async def show_assistant_message(
|
|
477
|
+
self,
|
|
478
|
+
message: PromptMessageExtended,
|
|
479
|
+
bottom_items: List[str] | None = None,
|
|
480
|
+
highlight_items: str | List[str] | None = None,
|
|
481
|
+
max_item_length: int | None = None,
|
|
482
|
+
name: str | None = None,
|
|
483
|
+
model: str | None = None,
|
|
484
|
+
additional_message: Optional["Text"] = None,
|
|
485
|
+
) -> None:
|
|
486
|
+
pass
|