fast-agent-mcp 0.2.57__py3-none-any.whl → 0.2.58__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fast-agent-mcp might be problematic. Click here for more details.
- {fast_agent_mcp-0.2.57.dist-info → fast_agent_mcp-0.2.58.dist-info}/METADATA +2 -2
- {fast_agent_mcp-0.2.57.dist-info → fast_agent_mcp-0.2.58.dist-info}/RECORD +14 -13
- mcp_agent/config.py +3 -0
- mcp_agent/human_input/elicitation_form.py +45 -33
- mcp_agent/llm/augmented_llm.py +1 -0
- mcp_agent/llm/providers/augmented_llm_anthropic.py +1 -0
- mcp_agent/llm/providers/augmented_llm_bedrock.py +890 -602
- mcp_agent/llm/providers/augmented_llm_google_native.py +1 -0
- mcp_agent/llm/providers/augmented_llm_openai.py +1 -0
- mcp_agent/llm/providers/bedrock_utils.py +216 -0
- mcp_agent/resources/examples/mcp/elicitations/elicitation_forms_server.py +25 -3
- {fast_agent_mcp-0.2.57.dist-info → fast_agent_mcp-0.2.58.dist-info}/WHEEL +0 -0
- {fast_agent_mcp-0.2.57.dist-info → fast_agent_mcp-0.2.58.dist-info}/entry_points.txt +0 -0
- {fast_agent_mcp-0.2.57.dist-info → fast_agent_mcp-0.2.58.dist-info}/licenses/LICENSE +0 -0
|
@@ -160,6 +160,7 @@ class GoogleNativeAugmentedLLM(AugmentedLLM[types.Content, types.Content]):
|
|
|
160
160
|
AugmentedLLM.PARAM_USE_HISTORY, # Handled by AugmentedLLM base / this class's logic
|
|
161
161
|
AugmentedLLM.PARAM_MAX_ITERATIONS, # Handled by this class's loop
|
|
162
162
|
# Add any other OpenAI-specific params not applicable to google.genai
|
|
163
|
+
AugmentedLLM.PARAM_MCP_METADATA,
|
|
163
164
|
}.union(AugmentedLLM.BASE_EXCLUDE_FIELDS)
|
|
164
165
|
|
|
165
166
|
def __init__(self, *args, **kwargs) -> None:
|
|
@@ -59,6 +59,7 @@ class OpenAIAugmentedLLM(AugmentedLLM[ChatCompletionMessageParam, ChatCompletion
|
|
|
59
59
|
AugmentedLLM.PARAM_USE_HISTORY,
|
|
60
60
|
AugmentedLLM.PARAM_MAX_ITERATIONS,
|
|
61
61
|
AugmentedLLM.PARAM_TEMPLATE_VARS,
|
|
62
|
+
AugmentedLLM.PARAM_MCP_METADATA,
|
|
62
63
|
}
|
|
63
64
|
|
|
64
65
|
def __init__(self, provider: Provider = Provider.OPENAI, *args, **kwargs) -> None:
|
|
@@ -0,0 +1,216 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Collection, Dict, List, Literal, Optional, Set, TypedDict, cast
|
|
4
|
+
|
|
5
|
+
# Lightweight, runtime-only loader for AWS Bedrock models.
|
|
6
|
+
# - Fetches once per process via boto3 (region from session; env override supported)
|
|
7
|
+
# - Memory cache only; no disk persistence
|
|
8
|
+
# - Provides filtering and optional prefixing (default 'bedrock.') for model IDs
|
|
9
|
+
|
|
10
|
+
try:
|
|
11
|
+
import boto3
|
|
12
|
+
except Exception: # pragma: no cover - import error path
|
|
13
|
+
boto3 = None # type: ignore[assignment]
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
Modality = Literal["TEXT", "IMAGE", "VIDEO", "SPEECH", "EMBEDDING"]
|
|
17
|
+
Lifecycle = Literal["ACTIVE", "LEGACY"]
|
|
18
|
+
InferenceType = Literal["ON_DEMAND", "PROVISIONED", "INFERENCE_PROFILE"]
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class ModelSummary(TypedDict, total=False):
|
|
22
|
+
modelId: str
|
|
23
|
+
modelName: str
|
|
24
|
+
providerName: str
|
|
25
|
+
inputModalities: List[Modality]
|
|
26
|
+
outputModalities: List[Modality]
|
|
27
|
+
responseStreamingSupported: bool
|
|
28
|
+
customizationsSupported: List[str]
|
|
29
|
+
inferenceTypesSupported: List[InferenceType]
|
|
30
|
+
modelLifecycle: Dict[str, Lifecycle]
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
_MODELS_CACHE_BY_REGION: Dict[str, Dict[str, ModelSummary]] = {}
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def _resolve_region(region: Optional[str]) -> str:
|
|
37
|
+
if region:
|
|
38
|
+
return region
|
|
39
|
+
import os
|
|
40
|
+
|
|
41
|
+
env_region = os.getenv("BEDROCK_REGION")
|
|
42
|
+
if env_region:
|
|
43
|
+
return env_region
|
|
44
|
+
if boto3 is None:
|
|
45
|
+
raise RuntimeError(
|
|
46
|
+
"boto3 is required to load Bedrock models. Install boto3 or provide a static list."
|
|
47
|
+
)
|
|
48
|
+
session = boto3.Session()
|
|
49
|
+
if not session.region_name:
|
|
50
|
+
raise RuntimeError(
|
|
51
|
+
"AWS region could not be resolved. Configure your AWS SSO/profile or set BEDROCK_REGION."
|
|
52
|
+
)
|
|
53
|
+
return session.region_name
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def _strip_prefix(model_id: str, prefix: str) -> str:
|
|
57
|
+
return model_id[len(prefix) :] if prefix and model_id.startswith(prefix) else model_id
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def _ensure_loaded(region: Optional[str] = None) -> Dict[str, ModelSummary]:
|
|
61
|
+
resolved_region = _resolve_region(region)
|
|
62
|
+
cache = _MODELS_CACHE_BY_REGION.get(resolved_region)
|
|
63
|
+
if cache is not None:
|
|
64
|
+
return cache
|
|
65
|
+
|
|
66
|
+
if boto3 is None:
|
|
67
|
+
raise RuntimeError("boto3 is required to load Bedrock models. Install boto3.")
|
|
68
|
+
|
|
69
|
+
try:
|
|
70
|
+
client = boto3.client("bedrock", region_name=resolved_region)
|
|
71
|
+
resp = client.list_foundation_models()
|
|
72
|
+
summaries: List[ModelSummary] = resp.get("modelSummaries", []) # type: ignore[assignment]
|
|
73
|
+
except Exception as exc: # keep error simple and actionable
|
|
74
|
+
raise RuntimeError(
|
|
75
|
+
f"Failed to list Bedrock foundation models in region '{resolved_region}'. "
|
|
76
|
+
f"Ensure AWS credentials (SSO) and permissions (bedrock:ListFoundationModels) are configured. "
|
|
77
|
+
f"Original error: {exc}"
|
|
78
|
+
)
|
|
79
|
+
|
|
80
|
+
cache = {s.get("modelId", ""): s for s in summaries if s.get("modelId")}
|
|
81
|
+
_MODELS_CACHE_BY_REGION[resolved_region] = cache
|
|
82
|
+
return cache
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def refresh_bedrock_models(region: Optional[str] = None) -> None:
|
|
86
|
+
resolved_region = _resolve_region(region)
|
|
87
|
+
# drop and reload on next access
|
|
88
|
+
_MODELS_CACHE_BY_REGION.pop(resolved_region, None)
|
|
89
|
+
_ensure_loaded(resolved_region)
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def _matches_modalities(model_modalities: List[Modality], requested: Collection[Modality]) -> bool:
|
|
93
|
+
# include if all requested are present in the model's modalities
|
|
94
|
+
return set(requested).issubset(set(model_modalities))
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def all_model_summaries(
|
|
98
|
+
input_modalities: Optional[Collection[Modality]] = None,
|
|
99
|
+
output_modalities: Optional[Collection[Modality]] = None,
|
|
100
|
+
include_legacy: bool = False,
|
|
101
|
+
providers: Optional[Collection[str]] = None,
|
|
102
|
+
inference_types: Optional[Collection[InferenceType]] = None,
|
|
103
|
+
direct_invocation_only: bool = True,
|
|
104
|
+
region: Optional[str] = None,
|
|
105
|
+
) -> List[ModelSummary]:
|
|
106
|
+
"""Return filtered Bedrock model summaries.
|
|
107
|
+
|
|
108
|
+
Defaults: input_modalities={"TEXT"}, output_modalities={"TEXT"}, include_legacy=False,
|
|
109
|
+
inference_types={"ON_DEMAND"}, direct_invocation_only=True.
|
|
110
|
+
"""
|
|
111
|
+
|
|
112
|
+
cache = _ensure_loaded(region)
|
|
113
|
+
results: List[ModelSummary] = []
|
|
114
|
+
|
|
115
|
+
effective_output: Set[Modality] = (
|
|
116
|
+
set(output_modalities) if output_modalities is not None else {cast("Modality", "TEXT")}
|
|
117
|
+
)
|
|
118
|
+
effective_input: Optional[Set[Modality]] = (
|
|
119
|
+
set(input_modalities) if input_modalities is not None else {cast("Modality", "TEXT")}
|
|
120
|
+
)
|
|
121
|
+
provider_filter: Optional[Set[str]] = set(providers) if providers is not None else None
|
|
122
|
+
effective_inference: Set[InferenceType] = (
|
|
123
|
+
set(inference_types) if inference_types is not None else {cast("InferenceType", "ON_DEMAND")}
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
for summary in cache.values():
|
|
127
|
+
lifecycle = (summary.get("modelLifecycle") or {}).get("status")
|
|
128
|
+
if not include_legacy and lifecycle == "LEGACY":
|
|
129
|
+
continue
|
|
130
|
+
|
|
131
|
+
if provider_filter is not None and summary.get("providerName") not in provider_filter:
|
|
132
|
+
continue
|
|
133
|
+
|
|
134
|
+
# direct invocation only: exclude profile variants like :0:24k or :mm
|
|
135
|
+
if direct_invocation_only:
|
|
136
|
+
mid = summary.get("modelId") or ""
|
|
137
|
+
if mid.count(":") > 1:
|
|
138
|
+
continue
|
|
139
|
+
|
|
140
|
+
# modalities
|
|
141
|
+
model_inputs: List[Modality] = summary.get("inputModalities", []) # type: ignore[assignment]
|
|
142
|
+
model_outputs: List[Modality] = summary.get("outputModalities", []) # type: ignore[assignment]
|
|
143
|
+
|
|
144
|
+
if effective_input is not None and not _matches_modalities(model_inputs, effective_input):
|
|
145
|
+
continue
|
|
146
|
+
if effective_output and not _matches_modalities(model_outputs, effective_output):
|
|
147
|
+
continue
|
|
148
|
+
|
|
149
|
+
# inference types
|
|
150
|
+
model_inference: List[InferenceType] = summary.get("inferenceTypesSupported", []) # type: ignore[assignment]
|
|
151
|
+
if effective_inference and not set(effective_inference).issubset(set(model_inference)):
|
|
152
|
+
continue
|
|
153
|
+
|
|
154
|
+
results.append(summary)
|
|
155
|
+
|
|
156
|
+
return results
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
def all_bedrock_models(
|
|
160
|
+
input_modalities: Optional[Collection[Modality]] = None,
|
|
161
|
+
output_modalities: Optional[Collection[Modality]] = None,
|
|
162
|
+
include_legacy: bool = False,
|
|
163
|
+
providers: Optional[Collection[str]] = None,
|
|
164
|
+
prefix: str = "bedrock.",
|
|
165
|
+
inference_types: Optional[Collection[InferenceType]] = None,
|
|
166
|
+
direct_invocation_only: bool = True,
|
|
167
|
+
region: Optional[str] = None,
|
|
168
|
+
) -> List[str]:
|
|
169
|
+
"""Return model IDs (optionally prefixed) filtered by the given criteria.
|
|
170
|
+
|
|
171
|
+
Defaults: output_modalities={"TEXT"}, exclude LEGACY,
|
|
172
|
+
inference_types={"ON_DEMAND"}, direct_invocation_only=True.
|
|
173
|
+
"""
|
|
174
|
+
|
|
175
|
+
summaries = all_model_summaries(
|
|
176
|
+
input_modalities=input_modalities,
|
|
177
|
+
output_modalities=output_modalities,
|
|
178
|
+
include_legacy=include_legacy,
|
|
179
|
+
providers=providers,
|
|
180
|
+
inference_types=inference_types,
|
|
181
|
+
direct_invocation_only=direct_invocation_only,
|
|
182
|
+
region=region,
|
|
183
|
+
)
|
|
184
|
+
ids: List[str] = []
|
|
185
|
+
for s in summaries:
|
|
186
|
+
mid = s.get("modelId")
|
|
187
|
+
if mid:
|
|
188
|
+
ids.append(mid)
|
|
189
|
+
if prefix:
|
|
190
|
+
return [f"{prefix}{mid}" for mid in ids]
|
|
191
|
+
return ids
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def get_model_metadata(model_id: str, region: Optional[str] = None) -> Optional[ModelSummary]:
|
|
195
|
+
cache = _ensure_loaded(region)
|
|
196
|
+
# Accept either prefixed or plain model IDs
|
|
197
|
+
plain_id = _strip_prefix(model_id, "bedrock.")
|
|
198
|
+
return cache.get(plain_id)
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
def list_providers(region: Optional[str] = None) -> List[str]:
|
|
202
|
+
cache = _ensure_loaded(region)
|
|
203
|
+
providers = {s.get("providerName") for s in cache.values() if s.get("providerName")}
|
|
204
|
+
return sorted(providers) # type: ignore[arg-type]
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
__all__ = [
|
|
208
|
+
"Modality",
|
|
209
|
+
"Lifecycle",
|
|
210
|
+
"ModelSummary",
|
|
211
|
+
"all_bedrock_models",
|
|
212
|
+
"all_model_summaries",
|
|
213
|
+
"get_model_metadata",
|
|
214
|
+
"list_providers",
|
|
215
|
+
"refresh_bedrock_models",
|
|
216
|
+
]
|
|
@@ -100,11 +100,25 @@ async def product_review() -> ReadResourceResult:
|
|
|
100
100
|
},
|
|
101
101
|
)
|
|
102
102
|
review_text: str = Field(
|
|
103
|
-
description="Tell us about your experience",
|
|
103
|
+
description="Tell us about your experience",
|
|
104
|
+
default="""Great product!
|
|
105
|
+
Here's what I loved:
|
|
106
|
+
|
|
107
|
+
- Excellent build quality
|
|
108
|
+
- Fast shipping
|
|
109
|
+
- Works as advertised
|
|
110
|
+
|
|
111
|
+
One minor issue:
|
|
112
|
+
- Instructions could be clearer
|
|
113
|
+
|
|
114
|
+
Overall, highly recommended!""",
|
|
115
|
+
min_length=10,
|
|
116
|
+
max_length=1000
|
|
104
117
|
)
|
|
105
118
|
|
|
106
119
|
result = await mcp.get_context().elicit(
|
|
107
|
-
"Share your product review - Help others make informed decisions!",
|
|
120
|
+
"Share your product review - Help others make informed decisions!",
|
|
121
|
+
schema=ProductReview,
|
|
108
122
|
)
|
|
109
123
|
|
|
110
124
|
match result:
|
|
@@ -140,6 +154,7 @@ async def account_settings() -> ReadResourceResult:
|
|
|
140
154
|
email_notifications: bool = Field(True, description="Receive email notifications?")
|
|
141
155
|
marketing_emails: bool = Field(False, description="Subscribe to marketing emails?")
|
|
142
156
|
theme: str = Field(
|
|
157
|
+
"dark",
|
|
143
158
|
description="Choose your preferred theme",
|
|
144
159
|
json_schema_extra={
|
|
145
160
|
"enum": ["light", "dark", "auto"],
|
|
@@ -147,7 +162,9 @@ async def account_settings() -> ReadResourceResult:
|
|
|
147
162
|
},
|
|
148
163
|
)
|
|
149
164
|
privacy_public: bool = Field(False, description="Make your profile public?")
|
|
150
|
-
items_per_page: int = Field(
|
|
165
|
+
items_per_page: int = Field(
|
|
166
|
+
25, description="Items to show per page (10-100)", ge=10, le=100
|
|
167
|
+
)
|
|
151
168
|
|
|
152
169
|
result = await mcp.get_context().elicit("Update your account settings", schema=AccountSettings)
|
|
153
170
|
|
|
@@ -182,7 +199,11 @@ async def service_appointment() -> ReadResourceResult:
|
|
|
182
199
|
|
|
183
200
|
class ServiceAppointment(BaseModel):
|
|
184
201
|
customer_name: str = Field(description="Your full name", min_length=2, max_length=50)
|
|
202
|
+
phone_number: str = Field(
|
|
203
|
+
"555-", description="Contact phone number", min_length=10, max_length=20
|
|
204
|
+
)
|
|
185
205
|
vehicle_type: str = Field(
|
|
206
|
+
default="sedan",
|
|
186
207
|
description="What type of vehicle do you have?",
|
|
187
208
|
json_schema_extra={
|
|
188
209
|
"enum": ["sedan", "suv", "truck", "motorcycle", "other"],
|
|
@@ -205,6 +226,7 @@ async def service_appointment() -> ReadResourceResult:
|
|
|
205
226
|
lines = [
|
|
206
227
|
"🔧 Service Appointment Scheduled!",
|
|
207
228
|
f"👤 Customer: {data.customer_name}",
|
|
229
|
+
f"📞 Phone: {data.phone_number}",
|
|
208
230
|
f"🚗 Vehicle: {data.vehicle_type.title()}",
|
|
209
231
|
f"🚙 Loaner needed: {'Yes' if data.needs_loaner else 'No'}",
|
|
210
232
|
f"📅 Appointment: {data.appointment_time}",
|
|
File without changes
|
|
File without changes
|
|
File without changes
|