amazon-ads-mcp 0.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- amazon_ads_mcp/__init__.py +11 -0
- amazon_ads_mcp/auth/__init__.py +33 -0
- amazon_ads_mcp/auth/base.py +211 -0
- amazon_ads_mcp/auth/hooks.py +172 -0
- amazon_ads_mcp/auth/manager.py +791 -0
- amazon_ads_mcp/auth/oauth_state_store.py +277 -0
- amazon_ads_mcp/auth/providers/__init__.py +14 -0
- amazon_ads_mcp/auth/providers/direct.py +393 -0
- amazon_ads_mcp/auth/providers/example_auth0.py.example +216 -0
- amazon_ads_mcp/auth/providers/openbridge.py +512 -0
- amazon_ads_mcp/auth/registry.py +146 -0
- amazon_ads_mcp/auth/secure_token_store.py +297 -0
- amazon_ads_mcp/auth/token_store.py +723 -0
- amazon_ads_mcp/config/__init__.py +5 -0
- amazon_ads_mcp/config/sampling.py +111 -0
- amazon_ads_mcp/config/settings.py +366 -0
- amazon_ads_mcp/exceptions.py +314 -0
- amazon_ads_mcp/middleware/__init__.py +11 -0
- amazon_ads_mcp/middleware/authentication.py +1474 -0
- amazon_ads_mcp/middleware/caching.py +177 -0
- amazon_ads_mcp/middleware/oauth.py +175 -0
- amazon_ads_mcp/middleware/sampling.py +112 -0
- amazon_ads_mcp/models/__init__.py +320 -0
- amazon_ads_mcp/models/amc_models.py +837 -0
- amazon_ads_mcp/models/api_responses.py +847 -0
- amazon_ads_mcp/models/base_models.py +215 -0
- amazon_ads_mcp/models/builtin_responses.py +496 -0
- amazon_ads_mcp/models/dsp_models.py +556 -0
- amazon_ads_mcp/models/stores_brands.py +610 -0
- amazon_ads_mcp/server/__init__.py +6 -0
- amazon_ads_mcp/server/__main__.py +6 -0
- amazon_ads_mcp/server/builtin_prompts.py +269 -0
- amazon_ads_mcp/server/builtin_tools.py +962 -0
- amazon_ads_mcp/server/file_routes.py +547 -0
- amazon_ads_mcp/server/html_templates.py +149 -0
- amazon_ads_mcp/server/mcp_server.py +327 -0
- amazon_ads_mcp/server/openapi_utils.py +158 -0
- amazon_ads_mcp/server/sampling_handler.py +251 -0
- amazon_ads_mcp/server/server_builder.py +751 -0
- amazon_ads_mcp/server/sidecar_loader.py +178 -0
- amazon_ads_mcp/server/transform_executor.py +827 -0
- amazon_ads_mcp/tools/__init__.py +22 -0
- amazon_ads_mcp/tools/cache_management.py +105 -0
- amazon_ads_mcp/tools/download_tools.py +267 -0
- amazon_ads_mcp/tools/identity.py +236 -0
- amazon_ads_mcp/tools/oauth.py +598 -0
- amazon_ads_mcp/tools/profile.py +150 -0
- amazon_ads_mcp/tools/profile_listing.py +285 -0
- amazon_ads_mcp/tools/region.py +320 -0
- amazon_ads_mcp/tools/region_identity.py +175 -0
- amazon_ads_mcp/utils/__init__.py +6 -0
- amazon_ads_mcp/utils/async_compat.py +215 -0
- amazon_ads_mcp/utils/errors.py +452 -0
- amazon_ads_mcp/utils/export_content_type_resolver.py +249 -0
- amazon_ads_mcp/utils/export_download_handler.py +579 -0
- amazon_ads_mcp/utils/header_resolver.py +81 -0
- amazon_ads_mcp/utils/http/__init__.py +56 -0
- amazon_ads_mcp/utils/http/circuit_breaker.py +127 -0
- amazon_ads_mcp/utils/http/client_manager.py +329 -0
- amazon_ads_mcp/utils/http/request.py +207 -0
- amazon_ads_mcp/utils/http/resilience.py +512 -0
- amazon_ads_mcp/utils/http/resilient_client.py +195 -0
- amazon_ads_mcp/utils/http/retry.py +76 -0
- amazon_ads_mcp/utils/http_client.py +873 -0
- amazon_ads_mcp/utils/media/__init__.py +21 -0
- amazon_ads_mcp/utils/media/negotiator.py +243 -0
- amazon_ads_mcp/utils/media/types.py +199 -0
- amazon_ads_mcp/utils/openapi/__init__.py +16 -0
- amazon_ads_mcp/utils/openapi/json.py +55 -0
- amazon_ads_mcp/utils/openapi/loader.py +263 -0
- amazon_ads_mcp/utils/openapi/refs.py +46 -0
- amazon_ads_mcp/utils/region_config.py +200 -0
- amazon_ads_mcp/utils/response_wrapper.py +171 -0
- amazon_ads_mcp/utils/sampling_helpers.py +156 -0
- amazon_ads_mcp/utils/sampling_wrapper.py +173 -0
- amazon_ads_mcp/utils/security.py +630 -0
- amazon_ads_mcp/utils/tool_naming.py +137 -0
- amazon_ads_mcp-0.2.7.dist-info/METADATA +664 -0
- amazon_ads_mcp-0.2.7.dist-info/RECORD +82 -0
- amazon_ads_mcp-0.2.7.dist-info/WHEEL +4 -0
- amazon_ads_mcp-0.2.7.dist-info/entry_points.txt +3 -0
- amazon_ads_mcp-0.2.7.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,251 @@
|
|
|
1
|
+
"""Server-side sampling handler for LLM fallback when client doesn't support sampling."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from typing import Any, Dict, List, Optional
|
|
5
|
+
|
|
6
|
+
from mcp.types import (
|
|
7
|
+
ContentBlock,
|
|
8
|
+
CreateMessageRequestParams,
|
|
9
|
+
SamplingMessage,
|
|
10
|
+
TextContent,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
from ..config.sampling import SamplingConfig
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ServerSamplingHandler:
|
|
19
|
+
"""
|
|
20
|
+
Server-side sampling handler that provides fallback LLM sampling
|
|
21
|
+
when the client doesn't support it.
|
|
22
|
+
|
|
23
|
+
This handler is invoked by sample_with_fallback() when:
|
|
24
|
+
1. Client's ctx.sample() fails with "does not support sampling" error
|
|
25
|
+
2. A server-side handler is available in the context
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
def __init__(self, config: SamplingConfig):
|
|
29
|
+
"""
|
|
30
|
+
Initialize the sampling handler with configuration.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
config: Sampling configuration including provider, model, API key, etc.
|
|
34
|
+
"""
|
|
35
|
+
self.config = config
|
|
36
|
+
self._client = None
|
|
37
|
+
|
|
38
|
+
if not config.is_valid():
|
|
39
|
+
raise ValueError("Invalid sampling configuration")
|
|
40
|
+
|
|
41
|
+
# Log configuration (with redacted API key)
|
|
42
|
+
api_key_status = "configured" if config.api_key else "missing"
|
|
43
|
+
logger.info(
|
|
44
|
+
"Server-side sampling handler initialized: provider=%s, model=%s, api_key=%s",
|
|
45
|
+
config.provider,
|
|
46
|
+
config.model,
|
|
47
|
+
api_key_status,
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
async def __call__(
|
|
51
|
+
self,
|
|
52
|
+
messages: List[SamplingMessage],
|
|
53
|
+
params: CreateMessageRequestParams,
|
|
54
|
+
request_context: Optional[Dict[str, Any]] = None,
|
|
55
|
+
) -> ContentBlock:
|
|
56
|
+
"""
|
|
57
|
+
Handle a sampling request as a fallback when client doesn't support sampling.
|
|
58
|
+
|
|
59
|
+
This method signature matches what sample_with_fallback() expects.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
messages: List of sampling messages to send to the LLM
|
|
63
|
+
params: Request parameters including system prompt, temperature, etc.
|
|
64
|
+
request_context: Optional request context from FastMCP
|
|
65
|
+
|
|
66
|
+
Returns:
|
|
67
|
+
ContentBlock with the LLM's response (TextContent)
|
|
68
|
+
|
|
69
|
+
Raises:
|
|
70
|
+
Exception: If sampling fails or provider is unavailable
|
|
71
|
+
"""
|
|
72
|
+
try:
|
|
73
|
+
# Initialize client if needed
|
|
74
|
+
if self._client is None:
|
|
75
|
+
self._client = await self._initialize_client()
|
|
76
|
+
|
|
77
|
+
# Convert messages to provider format
|
|
78
|
+
provider_messages = self._format_messages(messages)
|
|
79
|
+
|
|
80
|
+
# Add system prompt if provided
|
|
81
|
+
if params.systemPrompt:
|
|
82
|
+
provider_messages.insert(
|
|
83
|
+
0, {"role": "system", "content": params.systemPrompt}
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
# Perform the LLM call based on provider
|
|
87
|
+
if self.config.provider == "openai":
|
|
88
|
+
response_text = await self._sample_openai(
|
|
89
|
+
provider_messages,
|
|
90
|
+
temperature=params.temperature or self.config.temperature,
|
|
91
|
+
max_tokens=params.maxTokens or self.config.max_tokens,
|
|
92
|
+
model_preferences=params.modelPreferences,
|
|
93
|
+
)
|
|
94
|
+
else:
|
|
95
|
+
raise ValueError(f"Unsupported provider: {self.config.provider}")
|
|
96
|
+
|
|
97
|
+
# Return as TextContent block
|
|
98
|
+
return TextContent(type="text", text=response_text)
|
|
99
|
+
|
|
100
|
+
except Exception as e:
|
|
101
|
+
logger.error(f"Server-side sampling failed: {e}")
|
|
102
|
+
# Return a minimal error response rather than raising
|
|
103
|
+
return TextContent(type="text", text=f"[Sampling failed: {str(e)}]")
|
|
104
|
+
|
|
105
|
+
async def _initialize_client(self):
|
|
106
|
+
"""Initialize the LLM provider client."""
|
|
107
|
+
if self.config.provider == "openai":
|
|
108
|
+
try:
|
|
109
|
+
import openai
|
|
110
|
+
except ImportError:
|
|
111
|
+
raise ImportError(
|
|
112
|
+
"OpenAI package not installed. Install with: pip install openai"
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
# Create OpenAI client with optional base URL
|
|
116
|
+
if self.config.base_url:
|
|
117
|
+
client = openai.AsyncOpenAI(
|
|
118
|
+
api_key=self.config.api_key,
|
|
119
|
+
base_url=self.config.base_url,
|
|
120
|
+
timeout=self.config.timeout_ms / 1000, # Convert ms to seconds
|
|
121
|
+
)
|
|
122
|
+
else:
|
|
123
|
+
client = openai.AsyncOpenAI(
|
|
124
|
+
api_key=self.config.api_key,
|
|
125
|
+
timeout=self.config.timeout_ms / 1000,
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
logger.debug("OpenAI client initialized for server-side sampling")
|
|
129
|
+
return client
|
|
130
|
+
else:
|
|
131
|
+
raise ValueError(f"Unknown provider: {self.config.provider}")
|
|
132
|
+
|
|
133
|
+
def _format_messages(self, messages: List[SamplingMessage]) -> List[Dict[str, str]]:
|
|
134
|
+
"""
|
|
135
|
+
Convert SamplingMessage objects to provider format.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
messages: List of SamplingMessage objects
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
141
|
+
List of message dicts in provider format
|
|
142
|
+
"""
|
|
143
|
+
formatted = []
|
|
144
|
+
for msg in messages:
|
|
145
|
+
# Extract text content from the message
|
|
146
|
+
if hasattr(msg.content, "text"):
|
|
147
|
+
content = msg.content.text
|
|
148
|
+
elif isinstance(msg.content, str):
|
|
149
|
+
content = msg.content
|
|
150
|
+
else:
|
|
151
|
+
# Try to extract text from content block
|
|
152
|
+
content = str(msg.content)
|
|
153
|
+
|
|
154
|
+
formatted.append({"role": msg.role, "content": content})
|
|
155
|
+
|
|
156
|
+
return formatted
|
|
157
|
+
|
|
158
|
+
async def _sample_openai(
|
|
159
|
+
self,
|
|
160
|
+
messages: List[Dict[str, str]],
|
|
161
|
+
temperature: float,
|
|
162
|
+
max_tokens: int,
|
|
163
|
+
model_preferences: Optional[Any] = None,
|
|
164
|
+
) -> str:
|
|
165
|
+
"""
|
|
166
|
+
Perform sampling using OpenAI API.
|
|
167
|
+
|
|
168
|
+
Args:
|
|
169
|
+
messages: Formatted messages for OpenAI
|
|
170
|
+
temperature: Sampling temperature
|
|
171
|
+
max_tokens: Maximum tokens to generate
|
|
172
|
+
model_preferences: Optional model preferences
|
|
173
|
+
|
|
174
|
+
Returns:
|
|
175
|
+
Generated text response
|
|
176
|
+
"""
|
|
177
|
+
# Determine model to use
|
|
178
|
+
model = self.config.model
|
|
179
|
+
if model_preferences:
|
|
180
|
+
# Extract model hint if provided
|
|
181
|
+
if isinstance(model_preferences, str):
|
|
182
|
+
model = model_preferences
|
|
183
|
+
elif isinstance(model_preferences, list) and len(model_preferences) > 0:
|
|
184
|
+
model = model_preferences[0]
|
|
185
|
+
elif isinstance(model_preferences, dict):
|
|
186
|
+
hints = model_preferences.get("hints", [])
|
|
187
|
+
if hints and len(hints) > 0:
|
|
188
|
+
if isinstance(hints[0], dict):
|
|
189
|
+
model = hints[0].get("name", model)
|
|
190
|
+
else:
|
|
191
|
+
model = str(hints[0])
|
|
192
|
+
|
|
193
|
+
logger.debug(
|
|
194
|
+
"Performing OpenAI sampling: model=%s, temperature=%.2f, max_tokens=%d",
|
|
195
|
+
model,
|
|
196
|
+
temperature,
|
|
197
|
+
max_tokens,
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
try:
|
|
201
|
+
response = await self._client.chat.completions.create(
|
|
202
|
+
model=model,
|
|
203
|
+
messages=messages,
|
|
204
|
+
temperature=temperature,
|
|
205
|
+
max_tokens=max_tokens,
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
result = response.choices[0].message.content
|
|
209
|
+
logger.debug(
|
|
210
|
+
"Server-side sampling successful, response length: %d",
|
|
211
|
+
len(result),
|
|
212
|
+
)
|
|
213
|
+
return result
|
|
214
|
+
|
|
215
|
+
except Exception as e:
|
|
216
|
+
logger.error(f"OpenAI API call failed: {e}")
|
|
217
|
+
raise
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
def create_sampling_handler(
|
|
221
|
+
config: Optional[SamplingConfig] = None,
|
|
222
|
+
) -> Optional[ServerSamplingHandler]:
|
|
223
|
+
"""
|
|
224
|
+
Create a server-side sampling handler if configuration is valid.
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
config: Optional sampling configuration. If not provided, loads from environment.
|
|
228
|
+
|
|
229
|
+
Returns:
|
|
230
|
+
ServerSamplingHandler instance if configuration is valid, None otherwise
|
|
231
|
+
"""
|
|
232
|
+
if config is None:
|
|
233
|
+
config = SamplingConfig.from_environment()
|
|
234
|
+
|
|
235
|
+
# Log configuration status
|
|
236
|
+
config.log_status()
|
|
237
|
+
|
|
238
|
+
# Only create handler if configuration is valid
|
|
239
|
+
if not config.is_valid():
|
|
240
|
+
logger.info(
|
|
241
|
+
"Server-side sampling handler not created (disabled or invalid config)"
|
|
242
|
+
)
|
|
243
|
+
return None
|
|
244
|
+
|
|
245
|
+
try:
|
|
246
|
+
handler = ServerSamplingHandler(config)
|
|
247
|
+
logger.info("Server-side sampling handler created successfully")
|
|
248
|
+
return handler
|
|
249
|
+
except Exception as e:
|
|
250
|
+
logger.error(f"Failed to create sampling handler: {e}")
|
|
251
|
+
return None
|