fast-agent-mcp 0.4.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fast_agent/__init__.py +183 -0
- fast_agent/acp/__init__.py +19 -0
- fast_agent/acp/acp_aware_mixin.py +304 -0
- fast_agent/acp/acp_context.py +437 -0
- fast_agent/acp/content_conversion.py +136 -0
- fast_agent/acp/filesystem_runtime.py +427 -0
- fast_agent/acp/permission_store.py +269 -0
- fast_agent/acp/server/__init__.py +5 -0
- fast_agent/acp/server/agent_acp_server.py +1472 -0
- fast_agent/acp/slash_commands.py +1050 -0
- fast_agent/acp/terminal_runtime.py +408 -0
- fast_agent/acp/tool_permission_adapter.py +125 -0
- fast_agent/acp/tool_permissions.py +474 -0
- fast_agent/acp/tool_progress.py +814 -0
- fast_agent/agents/__init__.py +85 -0
- fast_agent/agents/agent_types.py +64 -0
- fast_agent/agents/llm_agent.py +350 -0
- fast_agent/agents/llm_decorator.py +1139 -0
- fast_agent/agents/mcp_agent.py +1337 -0
- fast_agent/agents/tool_agent.py +271 -0
- fast_agent/agents/workflow/agents_as_tools_agent.py +849 -0
- fast_agent/agents/workflow/chain_agent.py +212 -0
- fast_agent/agents/workflow/evaluator_optimizer.py +380 -0
- fast_agent/agents/workflow/iterative_planner.py +652 -0
- fast_agent/agents/workflow/maker_agent.py +379 -0
- fast_agent/agents/workflow/orchestrator_models.py +218 -0
- fast_agent/agents/workflow/orchestrator_prompts.py +248 -0
- fast_agent/agents/workflow/parallel_agent.py +250 -0
- fast_agent/agents/workflow/router_agent.py +353 -0
- fast_agent/cli/__init__.py +0 -0
- fast_agent/cli/__main__.py +73 -0
- fast_agent/cli/commands/acp.py +159 -0
- fast_agent/cli/commands/auth.py +404 -0
- fast_agent/cli/commands/check_config.py +783 -0
- fast_agent/cli/commands/go.py +514 -0
- fast_agent/cli/commands/quickstart.py +557 -0
- fast_agent/cli/commands/serve.py +143 -0
- fast_agent/cli/commands/server_helpers.py +114 -0
- fast_agent/cli/commands/setup.py +174 -0
- fast_agent/cli/commands/url_parser.py +190 -0
- fast_agent/cli/constants.py +40 -0
- fast_agent/cli/main.py +115 -0
- fast_agent/cli/terminal.py +24 -0
- fast_agent/config.py +798 -0
- fast_agent/constants.py +41 -0
- fast_agent/context.py +279 -0
- fast_agent/context_dependent.py +50 -0
- fast_agent/core/__init__.py +92 -0
- fast_agent/core/agent_app.py +448 -0
- fast_agent/core/core_app.py +137 -0
- fast_agent/core/direct_decorators.py +784 -0
- fast_agent/core/direct_factory.py +620 -0
- fast_agent/core/error_handling.py +27 -0
- fast_agent/core/exceptions.py +90 -0
- fast_agent/core/executor/__init__.py +0 -0
- fast_agent/core/executor/executor.py +280 -0
- fast_agent/core/executor/task_registry.py +32 -0
- fast_agent/core/executor/workflow_signal.py +324 -0
- fast_agent/core/fastagent.py +1186 -0
- fast_agent/core/logging/__init__.py +5 -0
- fast_agent/core/logging/events.py +138 -0
- fast_agent/core/logging/json_serializer.py +164 -0
- fast_agent/core/logging/listeners.py +309 -0
- fast_agent/core/logging/logger.py +278 -0
- fast_agent/core/logging/transport.py +481 -0
- fast_agent/core/prompt.py +9 -0
- fast_agent/core/prompt_templates.py +183 -0
- fast_agent/core/validation.py +326 -0
- fast_agent/event_progress.py +62 -0
- fast_agent/history/history_exporter.py +49 -0
- fast_agent/human_input/__init__.py +47 -0
- fast_agent/human_input/elicitation_handler.py +123 -0
- fast_agent/human_input/elicitation_state.py +33 -0
- fast_agent/human_input/form_elements.py +59 -0
- fast_agent/human_input/form_fields.py +256 -0
- fast_agent/human_input/simple_form.py +113 -0
- fast_agent/human_input/types.py +40 -0
- fast_agent/interfaces.py +310 -0
- fast_agent/llm/__init__.py +9 -0
- fast_agent/llm/cancellation.py +22 -0
- fast_agent/llm/fastagent_llm.py +931 -0
- fast_agent/llm/internal/passthrough.py +161 -0
- fast_agent/llm/internal/playback.py +129 -0
- fast_agent/llm/internal/silent.py +41 -0
- fast_agent/llm/internal/slow.py +38 -0
- fast_agent/llm/memory.py +275 -0
- fast_agent/llm/model_database.py +490 -0
- fast_agent/llm/model_factory.py +388 -0
- fast_agent/llm/model_info.py +102 -0
- fast_agent/llm/prompt_utils.py +155 -0
- fast_agent/llm/provider/anthropic/anthropic_utils.py +84 -0
- fast_agent/llm/provider/anthropic/cache_planner.py +56 -0
- fast_agent/llm/provider/anthropic/llm_anthropic.py +796 -0
- fast_agent/llm/provider/anthropic/multipart_converter_anthropic.py +462 -0
- fast_agent/llm/provider/bedrock/bedrock_utils.py +218 -0
- fast_agent/llm/provider/bedrock/llm_bedrock.py +2207 -0
- fast_agent/llm/provider/bedrock/multipart_converter_bedrock.py +84 -0
- fast_agent/llm/provider/google/google_converter.py +466 -0
- fast_agent/llm/provider/google/llm_google_native.py +681 -0
- fast_agent/llm/provider/openai/llm_aliyun.py +31 -0
- fast_agent/llm/provider/openai/llm_azure.py +143 -0
- fast_agent/llm/provider/openai/llm_deepseek.py +76 -0
- fast_agent/llm/provider/openai/llm_generic.py +35 -0
- fast_agent/llm/provider/openai/llm_google_oai.py +32 -0
- fast_agent/llm/provider/openai/llm_groq.py +42 -0
- fast_agent/llm/provider/openai/llm_huggingface.py +85 -0
- fast_agent/llm/provider/openai/llm_openai.py +1195 -0
- fast_agent/llm/provider/openai/llm_openai_compatible.py +138 -0
- fast_agent/llm/provider/openai/llm_openrouter.py +45 -0
- fast_agent/llm/provider/openai/llm_tensorzero_openai.py +128 -0
- fast_agent/llm/provider/openai/llm_xai.py +38 -0
- fast_agent/llm/provider/openai/multipart_converter_openai.py +561 -0
- fast_agent/llm/provider/openai/openai_multipart.py +169 -0
- fast_agent/llm/provider/openai/openai_utils.py +67 -0
- fast_agent/llm/provider/openai/responses.py +133 -0
- fast_agent/llm/provider_key_manager.py +139 -0
- fast_agent/llm/provider_types.py +34 -0
- fast_agent/llm/request_params.py +61 -0
- fast_agent/llm/sampling_converter.py +98 -0
- fast_agent/llm/stream_types.py +9 -0
- fast_agent/llm/usage_tracking.py +445 -0
- fast_agent/mcp/__init__.py +56 -0
- fast_agent/mcp/common.py +26 -0
- fast_agent/mcp/elicitation_factory.py +84 -0
- fast_agent/mcp/elicitation_handlers.py +164 -0
- fast_agent/mcp/gen_client.py +83 -0
- fast_agent/mcp/helpers/__init__.py +36 -0
- fast_agent/mcp/helpers/content_helpers.py +352 -0
- fast_agent/mcp/helpers/server_config_helpers.py +25 -0
- fast_agent/mcp/hf_auth.py +147 -0
- fast_agent/mcp/interfaces.py +92 -0
- fast_agent/mcp/logger_textio.py +108 -0
- fast_agent/mcp/mcp_agent_client_session.py +411 -0
- fast_agent/mcp/mcp_aggregator.py +2175 -0
- fast_agent/mcp/mcp_connection_manager.py +723 -0
- fast_agent/mcp/mcp_content.py +262 -0
- fast_agent/mcp/mime_utils.py +108 -0
- fast_agent/mcp/oauth_client.py +509 -0
- fast_agent/mcp/prompt.py +159 -0
- fast_agent/mcp/prompt_message_extended.py +155 -0
- fast_agent/mcp/prompt_render.py +84 -0
- fast_agent/mcp/prompt_serialization.py +580 -0
- fast_agent/mcp/prompts/__init__.py +0 -0
- fast_agent/mcp/prompts/__main__.py +7 -0
- fast_agent/mcp/prompts/prompt_constants.py +18 -0
- fast_agent/mcp/prompts/prompt_helpers.py +238 -0
- fast_agent/mcp/prompts/prompt_load.py +186 -0
- fast_agent/mcp/prompts/prompt_server.py +552 -0
- fast_agent/mcp/prompts/prompt_template.py +438 -0
- fast_agent/mcp/resource_utils.py +215 -0
- fast_agent/mcp/sampling.py +200 -0
- fast_agent/mcp/server/__init__.py +4 -0
- fast_agent/mcp/server/agent_server.py +613 -0
- fast_agent/mcp/skybridge.py +44 -0
- fast_agent/mcp/sse_tracking.py +287 -0
- fast_agent/mcp/stdio_tracking_simple.py +59 -0
- fast_agent/mcp/streamable_http_tracking.py +309 -0
- fast_agent/mcp/tool_execution_handler.py +137 -0
- fast_agent/mcp/tool_permission_handler.py +88 -0
- fast_agent/mcp/transport_tracking.py +634 -0
- fast_agent/mcp/types.py +24 -0
- fast_agent/mcp/ui_agent.py +48 -0
- fast_agent/mcp/ui_mixin.py +209 -0
- fast_agent/mcp_server_registry.py +89 -0
- fast_agent/py.typed +0 -0
- fast_agent/resources/examples/data-analysis/analysis-campaign.py +189 -0
- fast_agent/resources/examples/data-analysis/analysis.py +68 -0
- fast_agent/resources/examples/data-analysis/fastagent.config.yaml +41 -0
- fast_agent/resources/examples/data-analysis/mount-point/WA_Fn-UseC_-HR-Employee-Attrition.csv +1471 -0
- fast_agent/resources/examples/mcp/elicitations/elicitation_account_server.py +88 -0
- fast_agent/resources/examples/mcp/elicitations/elicitation_forms_server.py +297 -0
- fast_agent/resources/examples/mcp/elicitations/elicitation_game_server.py +164 -0
- fast_agent/resources/examples/mcp/elicitations/fastagent.config.yaml +35 -0
- fast_agent/resources/examples/mcp/elicitations/fastagent.secrets.yaml.example +17 -0
- fast_agent/resources/examples/mcp/elicitations/forms_demo.py +107 -0
- fast_agent/resources/examples/mcp/elicitations/game_character.py +65 -0
- fast_agent/resources/examples/mcp/elicitations/game_character_handler.py +256 -0
- fast_agent/resources/examples/mcp/elicitations/tool_call.py +21 -0
- fast_agent/resources/examples/mcp/state-transfer/agent_one.py +18 -0
- fast_agent/resources/examples/mcp/state-transfer/agent_two.py +18 -0
- fast_agent/resources/examples/mcp/state-transfer/fastagent.config.yaml +27 -0
- fast_agent/resources/examples/mcp/state-transfer/fastagent.secrets.yaml.example +15 -0
- fast_agent/resources/examples/researcher/fastagent.config.yaml +61 -0
- fast_agent/resources/examples/researcher/researcher-eval.py +53 -0
- fast_agent/resources/examples/researcher/researcher-imp.py +189 -0
- fast_agent/resources/examples/researcher/researcher.py +36 -0
- fast_agent/resources/examples/tensorzero/.env.sample +2 -0
- fast_agent/resources/examples/tensorzero/Makefile +31 -0
- fast_agent/resources/examples/tensorzero/README.md +56 -0
- fast_agent/resources/examples/tensorzero/agent.py +35 -0
- fast_agent/resources/examples/tensorzero/demo_images/clam.jpg +0 -0
- fast_agent/resources/examples/tensorzero/demo_images/crab.png +0 -0
- fast_agent/resources/examples/tensorzero/demo_images/shrimp.png +0 -0
- fast_agent/resources/examples/tensorzero/docker-compose.yml +105 -0
- fast_agent/resources/examples/tensorzero/fastagent.config.yaml +19 -0
- fast_agent/resources/examples/tensorzero/image_demo.py +67 -0
- fast_agent/resources/examples/tensorzero/mcp_server/Dockerfile +25 -0
- fast_agent/resources/examples/tensorzero/mcp_server/entrypoint.sh +35 -0
- fast_agent/resources/examples/tensorzero/mcp_server/mcp_server.py +31 -0
- fast_agent/resources/examples/tensorzero/mcp_server/pyproject.toml +11 -0
- fast_agent/resources/examples/tensorzero/simple_agent.py +25 -0
- fast_agent/resources/examples/tensorzero/tensorzero_config/system_schema.json +29 -0
- fast_agent/resources/examples/tensorzero/tensorzero_config/system_template.minijinja +11 -0
- fast_agent/resources/examples/tensorzero/tensorzero_config/tensorzero.toml +35 -0
- fast_agent/resources/examples/workflows/agents_as_tools_extended.py +73 -0
- fast_agent/resources/examples/workflows/agents_as_tools_simple.py +50 -0
- fast_agent/resources/examples/workflows/chaining.py +37 -0
- fast_agent/resources/examples/workflows/evaluator.py +77 -0
- fast_agent/resources/examples/workflows/fastagent.config.yaml +26 -0
- fast_agent/resources/examples/workflows/graded_report.md +89 -0
- fast_agent/resources/examples/workflows/human_input.py +28 -0
- fast_agent/resources/examples/workflows/maker.py +156 -0
- fast_agent/resources/examples/workflows/orchestrator.py +70 -0
- fast_agent/resources/examples/workflows/parallel.py +56 -0
- fast_agent/resources/examples/workflows/router.py +69 -0
- fast_agent/resources/examples/workflows/short_story.md +13 -0
- fast_agent/resources/examples/workflows/short_story.txt +19 -0
- fast_agent/resources/setup/.gitignore +30 -0
- fast_agent/resources/setup/agent.py +28 -0
- fast_agent/resources/setup/fastagent.config.yaml +65 -0
- fast_agent/resources/setup/fastagent.secrets.yaml.example +38 -0
- fast_agent/resources/setup/pyproject.toml.tmpl +23 -0
- fast_agent/skills/__init__.py +9 -0
- fast_agent/skills/registry.py +235 -0
- fast_agent/tools/elicitation.py +369 -0
- fast_agent/tools/shell_runtime.py +402 -0
- fast_agent/types/__init__.py +59 -0
- fast_agent/types/conversation_summary.py +294 -0
- fast_agent/types/llm_stop_reason.py +78 -0
- fast_agent/types/message_search.py +249 -0
- fast_agent/ui/__init__.py +38 -0
- fast_agent/ui/console.py +59 -0
- fast_agent/ui/console_display.py +1080 -0
- fast_agent/ui/elicitation_form.py +946 -0
- fast_agent/ui/elicitation_style.py +59 -0
- fast_agent/ui/enhanced_prompt.py +1400 -0
- fast_agent/ui/history_display.py +734 -0
- fast_agent/ui/interactive_prompt.py +1199 -0
- fast_agent/ui/markdown_helpers.py +104 -0
- fast_agent/ui/markdown_truncator.py +1004 -0
- fast_agent/ui/mcp_display.py +857 -0
- fast_agent/ui/mcp_ui_utils.py +235 -0
- fast_agent/ui/mermaid_utils.py +169 -0
- fast_agent/ui/message_primitives.py +50 -0
- fast_agent/ui/notification_tracker.py +205 -0
- fast_agent/ui/plain_text_truncator.py +68 -0
- fast_agent/ui/progress_display.py +10 -0
- fast_agent/ui/rich_progress.py +195 -0
- fast_agent/ui/streaming.py +774 -0
- fast_agent/ui/streaming_buffer.py +449 -0
- fast_agent/ui/tool_display.py +422 -0
- fast_agent/ui/usage_display.py +204 -0
- fast_agent/utils/__init__.py +5 -0
- fast_agent/utils/reasoning_stream_parser.py +77 -0
- fast_agent/utils/time.py +22 -0
- fast_agent/workflow_telemetry.py +261 -0
- fast_agent_mcp-0.4.7.dist-info/METADATA +788 -0
- fast_agent_mcp-0.4.7.dist-info/RECORD +261 -0
- fast_agent_mcp-0.4.7.dist-info/WHEEL +4 -0
- fast_agent_mcp-0.4.7.dist-info/entry_points.txt +7 -0
- fast_agent_mcp-0.4.7.dist-info/licenses/LICENSE +201 -0
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Fast Agent - Agent implementations and workflow patterns.
|
|
3
|
+
|
|
4
|
+
This module re-exports agent classes with lazy imports to avoid circular
|
|
5
|
+
dependencies during package initialization while preserving a clean API:
|
|
6
|
+
|
|
7
|
+
from fast_agent.agents import McpAgent, ToolAgent, LlmAgent
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from typing import TYPE_CHECKING
|
|
11
|
+
|
|
12
|
+
from fast_agent.agents.agent_types import AgentConfig
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def __getattr__(name: str):
|
|
16
|
+
"""Lazily resolve agent classes to avoid import cycles."""
|
|
17
|
+
if name == "LlmAgent":
|
|
18
|
+
from .llm_agent import LlmAgent
|
|
19
|
+
|
|
20
|
+
return LlmAgent
|
|
21
|
+
elif name == "LlmDecorator":
|
|
22
|
+
from .llm_decorator import LlmDecorator
|
|
23
|
+
|
|
24
|
+
return LlmDecorator
|
|
25
|
+
elif name == "ToolAgent":
|
|
26
|
+
from .tool_agent import ToolAgent
|
|
27
|
+
|
|
28
|
+
return ToolAgent
|
|
29
|
+
elif name == "McpAgent":
|
|
30
|
+
from .mcp_agent import McpAgent
|
|
31
|
+
|
|
32
|
+
return McpAgent
|
|
33
|
+
elif name == "ChainAgent":
|
|
34
|
+
from .workflow.chain_agent import ChainAgent
|
|
35
|
+
|
|
36
|
+
return ChainAgent
|
|
37
|
+
elif name == "EvaluatorOptimizerAgent":
|
|
38
|
+
from .workflow.evaluator_optimizer import EvaluatorOptimizerAgent
|
|
39
|
+
|
|
40
|
+
return EvaluatorOptimizerAgent
|
|
41
|
+
elif name == "IterativePlanner":
|
|
42
|
+
from .workflow.iterative_planner import IterativePlanner
|
|
43
|
+
|
|
44
|
+
return IterativePlanner
|
|
45
|
+
elif name == "ParallelAgent":
|
|
46
|
+
from .workflow.parallel_agent import ParallelAgent
|
|
47
|
+
|
|
48
|
+
return ParallelAgent
|
|
49
|
+
elif name == "RouterAgent":
|
|
50
|
+
from .workflow.router_agent import RouterAgent
|
|
51
|
+
|
|
52
|
+
return RouterAgent
|
|
53
|
+
else:
|
|
54
|
+
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
if TYPE_CHECKING: # pragma: no cover - type checking only
|
|
58
|
+
from .llm_agent import LlmAgent as LlmAgent # noqa: F401
|
|
59
|
+
from .llm_decorator import LlmDecorator as LlmDecorator # noqa: F401
|
|
60
|
+
from .mcp_agent import McpAgent as McpAgent # noqa: F401
|
|
61
|
+
from .tool_agent import ToolAgent as ToolAgent # noqa: F401
|
|
62
|
+
from .workflow.chain_agent import ChainAgent as ChainAgent # noqa: F401
|
|
63
|
+
from .workflow.evaluator_optimizer import (
|
|
64
|
+
EvaluatorOptimizerAgent as EvaluatorOptimizerAgent,
|
|
65
|
+
) # noqa: F401
|
|
66
|
+
from .workflow.iterative_planner import IterativePlanner as IterativePlanner # noqa: F401
|
|
67
|
+
from .workflow.parallel_agent import ParallelAgent as ParallelAgent # noqa: F401
|
|
68
|
+
from .workflow.router_agent import RouterAgent as RouterAgent # noqa: F401
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
__all__ = [
|
|
72
|
+
# Core agents
|
|
73
|
+
"LlmAgent",
|
|
74
|
+
"LlmDecorator",
|
|
75
|
+
"ToolAgent",
|
|
76
|
+
"McpAgent",
|
|
77
|
+
# Workflow agents
|
|
78
|
+
"ChainAgent",
|
|
79
|
+
"EvaluatorOptimizerAgent",
|
|
80
|
+
"IterativePlanner",
|
|
81
|
+
"ParallelAgent",
|
|
82
|
+
"RouterAgent",
|
|
83
|
+
# Types
|
|
84
|
+
"AgentConfig",
|
|
85
|
+
]
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Type definitions for agents and agent configurations.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass, field
|
|
6
|
+
from enum import StrEnum, auto
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
|
|
9
|
+
from mcp.client.session import ElicitationFnT
|
|
10
|
+
|
|
11
|
+
from fast_agent.constants import DEFAULT_AGENT_INSTRUCTION
|
|
12
|
+
from fast_agent.skills import SkillManifest, SkillRegistry
|
|
13
|
+
|
|
14
|
+
# Forward imports to avoid circular dependencies
|
|
15
|
+
from fast_agent.types import RequestParams
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class AgentType(StrEnum):
|
|
19
|
+
"""Enumeration of supported agent types."""
|
|
20
|
+
|
|
21
|
+
LLM = auto()
|
|
22
|
+
BASIC = auto()
|
|
23
|
+
CUSTOM = auto()
|
|
24
|
+
ORCHESTRATOR = auto()
|
|
25
|
+
PARALLEL = auto()
|
|
26
|
+
EVALUATOR_OPTIMIZER = auto()
|
|
27
|
+
ROUTER = auto()
|
|
28
|
+
CHAIN = auto()
|
|
29
|
+
ITERATIVE_PLANNER = auto()
|
|
30
|
+
MAKER = auto()
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@dataclass
|
|
34
|
+
class AgentConfig:
|
|
35
|
+
"""Configuration for an Agent instance"""
|
|
36
|
+
|
|
37
|
+
name: str
|
|
38
|
+
instruction: str = DEFAULT_AGENT_INSTRUCTION
|
|
39
|
+
servers: list[str] = field(default_factory=list)
|
|
40
|
+
tools: dict[str, list[str]] = field(default_factory=dict) # filters for tools
|
|
41
|
+
resources: dict[str, list[str]] = field(default_factory=dict) # filters for resources
|
|
42
|
+
prompts: dict[str, list[str]] = field(default_factory=dict) # filters for prompts
|
|
43
|
+
skills: SkillManifest | SkillRegistry | Path | str | None = None
|
|
44
|
+
skill_manifests: list[SkillManifest] = field(default_factory=list, repr=False)
|
|
45
|
+
model: str | None = None
|
|
46
|
+
use_history: bool = True
|
|
47
|
+
default_request_params: RequestParams | None = None
|
|
48
|
+
human_input: bool = False
|
|
49
|
+
agent_type: AgentType = AgentType.BASIC
|
|
50
|
+
default: bool = False
|
|
51
|
+
elicitation_handler: ElicitationFnT | None = None
|
|
52
|
+
api_key: str | None = None
|
|
53
|
+
|
|
54
|
+
def __post_init__(self):
|
|
55
|
+
"""Ensure default_request_params exists with proper history setting"""
|
|
56
|
+
if self.default_request_params is None:
|
|
57
|
+
self.default_request_params = RequestParams(
|
|
58
|
+
use_history=self.use_history, systemPrompt=self.instruction
|
|
59
|
+
)
|
|
60
|
+
else:
|
|
61
|
+
# Override the request params history setting if explicitly configured
|
|
62
|
+
self.default_request_params.use_history = self.use_history
|
|
63
|
+
# Ensure instruction takes precedence over any existing systemPrompt
|
|
64
|
+
self.default_request_params.systemPrompt = self.instruction
|
|
@@ -0,0 +1,350 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LLM Agent class that adds interaction behaviors to LlmDecorator.
|
|
3
|
+
|
|
4
|
+
This class extends LlmDecorator with LLM-specific interaction behaviors including:
|
|
5
|
+
- UI display methods for messages, tools, and prompts
|
|
6
|
+
- Stop reason handling
|
|
7
|
+
- Tool call tracking
|
|
8
|
+
- Chat display integration
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from typing import Callable, List, Optional, Tuple
|
|
12
|
+
|
|
13
|
+
from a2a.types import AgentCapabilities
|
|
14
|
+
from mcp import Tool
|
|
15
|
+
from rich.text import Text
|
|
16
|
+
|
|
17
|
+
from fast_agent.agents.agent_types import AgentConfig
|
|
18
|
+
from fast_agent.agents.llm_decorator import LlmDecorator, ModelT
|
|
19
|
+
from fast_agent.constants import FAST_AGENT_ERROR_CHANNEL
|
|
20
|
+
from fast_agent.context import Context
|
|
21
|
+
from fast_agent.mcp.helpers.content_helpers import (
|
|
22
|
+
get_text,
|
|
23
|
+
is_image_content,
|
|
24
|
+
is_resource_content,
|
|
25
|
+
is_resource_link,
|
|
26
|
+
)
|
|
27
|
+
from fast_agent.types import PromptMessageExtended, RequestParams
|
|
28
|
+
from fast_agent.types.llm_stop_reason import LlmStopReason
|
|
29
|
+
from fast_agent.ui.console_display import ConsoleDisplay
|
|
30
|
+
from fast_agent.workflow_telemetry import (
|
|
31
|
+
NoOpWorkflowTelemetryProvider,
|
|
32
|
+
WorkflowTelemetryProvider,
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
# TODO -- decide what to do with type safety for model/chat_turn()
|
|
36
|
+
|
|
37
|
+
DEFAULT_CAPABILITIES = AgentCapabilities(
|
|
38
|
+
streaming=False, push_notifications=False, state_transition_history=False
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class LlmAgent(LlmDecorator):
|
|
43
|
+
"""
|
|
44
|
+
An LLM agent that adds interaction behaviors to the base LlmDecorator.
|
|
45
|
+
|
|
46
|
+
This class provides LLM-specific functionality including UI display methods,
|
|
47
|
+
tool call tracking, and chat interaction patterns while delegating core
|
|
48
|
+
LLM operations to the attached FastAgentLLMProtocol.
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
def __init__(
|
|
52
|
+
self,
|
|
53
|
+
config: AgentConfig,
|
|
54
|
+
context: Context | None = None,
|
|
55
|
+
) -> None:
|
|
56
|
+
super().__init__(config=config, context=context)
|
|
57
|
+
|
|
58
|
+
# Initialize display component
|
|
59
|
+
self._display = ConsoleDisplay(config=self._context.config if self._context else None)
|
|
60
|
+
self._workflow_telemetry_provider: WorkflowTelemetryProvider = (
|
|
61
|
+
NoOpWorkflowTelemetryProvider()
|
|
62
|
+
)
|
|
63
|
+
|
|
64
|
+
@property
|
|
65
|
+
def display(self) -> ConsoleDisplay:
|
|
66
|
+
"""UI display helper for presenting messages and tool activity."""
|
|
67
|
+
return self._display
|
|
68
|
+
|
|
69
|
+
@display.setter
|
|
70
|
+
def display(self, value: ConsoleDisplay) -> None:
|
|
71
|
+
self._display = value
|
|
72
|
+
|
|
73
|
+
@property
|
|
74
|
+
def workflow_telemetry(self) -> WorkflowTelemetryProvider:
|
|
75
|
+
"""Telemetry provider for emitting workflow delegation steps."""
|
|
76
|
+
return self._workflow_telemetry_provider
|
|
77
|
+
|
|
78
|
+
@workflow_telemetry.setter
|
|
79
|
+
def workflow_telemetry(self, provider: WorkflowTelemetryProvider | None) -> None:
|
|
80
|
+
if provider is None:
|
|
81
|
+
provider = NoOpWorkflowTelemetryProvider()
|
|
82
|
+
self._workflow_telemetry_provider = provider
|
|
83
|
+
|
|
84
|
+
async def show_assistant_message(
|
|
85
|
+
self,
|
|
86
|
+
message: PromptMessageExtended,
|
|
87
|
+
bottom_items: List[str] | None = None,
|
|
88
|
+
highlight_items: str | List[str] | None = None,
|
|
89
|
+
max_item_length: int | None = None,
|
|
90
|
+
name: str | None = None,
|
|
91
|
+
model: str | None = None,
|
|
92
|
+
additional_message: Optional[Text] = None,
|
|
93
|
+
) -> None:
|
|
94
|
+
"""Display an assistant message with appropriate styling based on stop reason.
|
|
95
|
+
|
|
96
|
+
Args:
|
|
97
|
+
message: The message to display
|
|
98
|
+
bottom_items: Optional items for bottom bar (e.g., servers, destinations)
|
|
99
|
+
highlight_items: Items to highlight in bottom bar
|
|
100
|
+
max_item_length: Max length for bottom items
|
|
101
|
+
name: Optional agent name to display
|
|
102
|
+
model: Optional model name to display
|
|
103
|
+
additional_message: Optional additional message to display
|
|
104
|
+
"""
|
|
105
|
+
|
|
106
|
+
# Determine display content based on stop reason if not provided
|
|
107
|
+
additional_segments: List[Text] = []
|
|
108
|
+
|
|
109
|
+
# Generate additional message based on stop reason
|
|
110
|
+
match message.stop_reason:
|
|
111
|
+
case LlmStopReason.END_TURN:
|
|
112
|
+
pass
|
|
113
|
+
|
|
114
|
+
case LlmStopReason.MAX_TOKENS:
|
|
115
|
+
additional_segments.append(
|
|
116
|
+
Text(
|
|
117
|
+
"\n\nMaximum output tokens reached - generation stopped.",
|
|
118
|
+
style="dim red italic",
|
|
119
|
+
)
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
case LlmStopReason.SAFETY:
|
|
123
|
+
additional_segments.append(
|
|
124
|
+
Text(
|
|
125
|
+
"\n\nContent filter activated - generation stopped.",
|
|
126
|
+
style="dim red italic",
|
|
127
|
+
)
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
case LlmStopReason.PAUSE:
|
|
131
|
+
additional_segments.append(
|
|
132
|
+
Text("\n\nLLM has requested a pause.", style="dim green italic")
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
case LlmStopReason.STOP_SEQUENCE:
|
|
136
|
+
additional_segments.append(
|
|
137
|
+
Text(
|
|
138
|
+
"\n\nStop Sequence activated - generation stopped.",
|
|
139
|
+
style="dim red italic",
|
|
140
|
+
)
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
case LlmStopReason.TOOL_USE:
|
|
144
|
+
if None is message.last_text():
|
|
145
|
+
additional_segments.append(
|
|
146
|
+
Text("The assistant requested tool calls", style="dim green italic")
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
case LlmStopReason.ERROR:
|
|
150
|
+
# Check if there's detailed error information in the error channel
|
|
151
|
+
if message.channels and FAST_AGENT_ERROR_CHANNEL in message.channels:
|
|
152
|
+
error_blocks = message.channels[FAST_AGENT_ERROR_CHANNEL]
|
|
153
|
+
if error_blocks:
|
|
154
|
+
# Extract text from the error block using the helper function
|
|
155
|
+
error_text = get_text(error_blocks[0])
|
|
156
|
+
if error_text:
|
|
157
|
+
additional_segments.append(
|
|
158
|
+
Text(f"\n\nError details: {error_text}", style="dim red italic")
|
|
159
|
+
)
|
|
160
|
+
else:
|
|
161
|
+
# Fallback if we couldn't extract text
|
|
162
|
+
additional_segments.append(
|
|
163
|
+
Text(
|
|
164
|
+
f"\n\nError details: {str(error_blocks[0])}",
|
|
165
|
+
style="dim red italic",
|
|
166
|
+
)
|
|
167
|
+
)
|
|
168
|
+
else:
|
|
169
|
+
# Fallback if no detailed error is available
|
|
170
|
+
additional_segments.append(
|
|
171
|
+
Text("\n\nAn error occurred during generation.", style="dim red italic")
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
case LlmStopReason.CANCELLED:
|
|
175
|
+
additional_segments.append(
|
|
176
|
+
Text("\n\nGeneration cancelled by user.", style="dim yellow italic")
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
case _:
|
|
180
|
+
if message.stop_reason:
|
|
181
|
+
additional_segments.append(
|
|
182
|
+
Text(
|
|
183
|
+
f"\n\nGeneration stopped for an unhandled reason ({message.stop_reason})",
|
|
184
|
+
style="dim red italic",
|
|
185
|
+
)
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
if additional_message is not None:
|
|
189
|
+
additional_segments.append(
|
|
190
|
+
additional_message
|
|
191
|
+
if isinstance(additional_message, Text)
|
|
192
|
+
else Text(str(additional_message))
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
additional_message_text = None
|
|
196
|
+
if additional_segments:
|
|
197
|
+
combined = Text()
|
|
198
|
+
for segment in additional_segments:
|
|
199
|
+
combined += segment
|
|
200
|
+
additional_message_text = combined
|
|
201
|
+
|
|
202
|
+
message_text = message
|
|
203
|
+
|
|
204
|
+
# Use provided name/model or fall back to defaults
|
|
205
|
+
display_name = name if name is not None else self.name
|
|
206
|
+
display_model = model if model is not None else (self.llm.model_name if self.llm else None)
|
|
207
|
+
|
|
208
|
+
# Convert highlight_items to highlight_index
|
|
209
|
+
highlight_index = None
|
|
210
|
+
if highlight_items and bottom_items:
|
|
211
|
+
if isinstance(highlight_items, str):
|
|
212
|
+
try:
|
|
213
|
+
highlight_index = bottom_items.index(highlight_items)
|
|
214
|
+
except ValueError:
|
|
215
|
+
pass
|
|
216
|
+
elif isinstance(highlight_items, list) and len(highlight_items) > 0:
|
|
217
|
+
try:
|
|
218
|
+
highlight_index = bottom_items.index(highlight_items[0])
|
|
219
|
+
except ValueError:
|
|
220
|
+
pass
|
|
221
|
+
|
|
222
|
+
await self.display.show_assistant_message(
|
|
223
|
+
message_text,
|
|
224
|
+
bottom_items=bottom_items,
|
|
225
|
+
highlight_index=highlight_index,
|
|
226
|
+
max_item_length=max_item_length,
|
|
227
|
+
name=display_name,
|
|
228
|
+
model=display_model,
|
|
229
|
+
additional_message=additional_message_text,
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
def show_user_message(self, message: PromptMessageExtended) -> None:
|
|
233
|
+
"""Display a user message in a formatted panel."""
|
|
234
|
+
model = self.llm.model_name if self.llm else None
|
|
235
|
+
chat_turn = self.llm.chat_turn() if self.llm else 0
|
|
236
|
+
|
|
237
|
+
# Extract attachment descriptions from non-text content
|
|
238
|
+
attachments: list[str] = []
|
|
239
|
+
for content in message.content:
|
|
240
|
+
if is_resource_link(content):
|
|
241
|
+
# ResourceLink: show name or mime type
|
|
242
|
+
from mcp.types import ResourceLink
|
|
243
|
+
|
|
244
|
+
assert isinstance(content, ResourceLink)
|
|
245
|
+
label = content.name or content.mimeType or "resource"
|
|
246
|
+
attachments.append(label)
|
|
247
|
+
elif is_image_content(content):
|
|
248
|
+
attachments.append("image")
|
|
249
|
+
elif is_resource_content(content):
|
|
250
|
+
# EmbeddedResource: show name or uri
|
|
251
|
+
from mcp.types import EmbeddedResource
|
|
252
|
+
|
|
253
|
+
assert isinstance(content, EmbeddedResource)
|
|
254
|
+
label = getattr(content.resource, "name", None) or str(content.resource.uri)
|
|
255
|
+
attachments.append(label)
|
|
256
|
+
|
|
257
|
+
self.display.show_user_message(
|
|
258
|
+
message.last_text() or "",
|
|
259
|
+
model,
|
|
260
|
+
chat_turn,
|
|
261
|
+
name=self.name,
|
|
262
|
+
attachments=attachments if attachments else None,
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
def _should_stream(self) -> bool:
|
|
266
|
+
"""Determine whether streaming display should be used."""
|
|
267
|
+
if getattr(self, "display", None):
|
|
268
|
+
enabled, _ = self.display.resolve_streaming_preferences()
|
|
269
|
+
return enabled
|
|
270
|
+
return True
|
|
271
|
+
|
|
272
|
+
async def generate_impl(
|
|
273
|
+
self,
|
|
274
|
+
messages: List[PromptMessageExtended],
|
|
275
|
+
request_params: RequestParams | None = None,
|
|
276
|
+
tools: List[Tool] | None = None,
|
|
277
|
+
) -> PromptMessageExtended:
|
|
278
|
+
"""
|
|
279
|
+
Enhanced generate implementation that resets tool call tracking.
|
|
280
|
+
Messages are already normalized to List[PromptMessageExtended].
|
|
281
|
+
"""
|
|
282
|
+
if "user" == messages[-1].role:
|
|
283
|
+
self.show_user_message(message=messages[-1])
|
|
284
|
+
|
|
285
|
+
# TODO - manage error catch, recovery, pause
|
|
286
|
+
summary_text: Text | None = None
|
|
287
|
+
|
|
288
|
+
if self._should_stream():
|
|
289
|
+
display_name = self.name
|
|
290
|
+
display_model = self.llm.model_name if self.llm else None
|
|
291
|
+
|
|
292
|
+
remove_listener: Callable[[], None] | None = None
|
|
293
|
+
remove_tool_listener: Callable[[], None] | None = None
|
|
294
|
+
|
|
295
|
+
with self.display.streaming_assistant_message(
|
|
296
|
+
name=display_name,
|
|
297
|
+
model=display_model,
|
|
298
|
+
) as stream_handle:
|
|
299
|
+
try:
|
|
300
|
+
remove_listener = self.llm.add_stream_listener(stream_handle.update_chunk)
|
|
301
|
+
remove_tool_listener = self.llm.add_tool_stream_listener(
|
|
302
|
+
stream_handle.handle_tool_event
|
|
303
|
+
)
|
|
304
|
+
except Exception:
|
|
305
|
+
remove_listener = None
|
|
306
|
+
remove_tool_listener = None
|
|
307
|
+
|
|
308
|
+
try:
|
|
309
|
+
result, summary = await self._generate_with_summary(
|
|
310
|
+
messages, request_params, tools
|
|
311
|
+
)
|
|
312
|
+
finally:
|
|
313
|
+
if remove_listener:
|
|
314
|
+
remove_listener()
|
|
315
|
+
if remove_tool_listener:
|
|
316
|
+
remove_tool_listener()
|
|
317
|
+
|
|
318
|
+
if summary:
|
|
319
|
+
summary_text = Text(f"\n\n{summary.message}", style="dim red italic")
|
|
320
|
+
|
|
321
|
+
stream_handle.finalize(result)
|
|
322
|
+
|
|
323
|
+
await self.show_assistant_message(result, additional_message=summary_text)
|
|
324
|
+
else:
|
|
325
|
+
result, summary = await self._generate_with_summary(
|
|
326
|
+
messages, request_params, tools
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
summary_text = (
|
|
330
|
+
Text(f"\n\n{summary.message}", style="dim red italic") if summary else None
|
|
331
|
+
)
|
|
332
|
+
await self.show_assistant_message(result, additional_message=summary_text)
|
|
333
|
+
|
|
334
|
+
return result
|
|
335
|
+
|
|
336
|
+
async def structured_impl(
|
|
337
|
+
self,
|
|
338
|
+
messages: List[PromptMessageExtended],
|
|
339
|
+
model: type[ModelT],
|
|
340
|
+
request_params: RequestParams | None = None,
|
|
341
|
+
) -> Tuple[ModelT | None, PromptMessageExtended]:
|
|
342
|
+
if "user" == messages[-1].role:
|
|
343
|
+
self.show_user_message(message=messages[-1])
|
|
344
|
+
|
|
345
|
+
(result, message), summary = await self._structured_with_summary(
|
|
346
|
+
messages, model, request_params
|
|
347
|
+
)
|
|
348
|
+
summary_text = Text(f"\n\n{summary.message}", style="dim red italic") if summary else None
|
|
349
|
+
await self.show_assistant_message(message=message, additional_message=summary_text)
|
|
350
|
+
return result, message
|