tweek 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tweek/__init__.py +16 -0
- tweek/cli.py +3390 -0
- tweek/cli_helpers.py +193 -0
- tweek/config/__init__.py +13 -0
- tweek/config/allowed_dirs.yaml +23 -0
- tweek/config/manager.py +1064 -0
- tweek/config/patterns.yaml +751 -0
- tweek/config/tiers.yaml +129 -0
- tweek/diagnostics.py +589 -0
- tweek/hooks/__init__.py +1 -0
- tweek/hooks/pre_tool_use.py +861 -0
- tweek/integrations/__init__.py +3 -0
- tweek/integrations/moltbot.py +243 -0
- tweek/licensing.py +398 -0
- tweek/logging/__init__.py +9 -0
- tweek/logging/bundle.py +350 -0
- tweek/logging/json_logger.py +150 -0
- tweek/logging/security_log.py +745 -0
- tweek/mcp/__init__.py +24 -0
- tweek/mcp/approval.py +456 -0
- tweek/mcp/approval_cli.py +356 -0
- tweek/mcp/clients/__init__.py +37 -0
- tweek/mcp/clients/chatgpt.py +112 -0
- tweek/mcp/clients/claude_desktop.py +203 -0
- tweek/mcp/clients/gemini.py +178 -0
- tweek/mcp/proxy.py +667 -0
- tweek/mcp/screening.py +175 -0
- tweek/mcp/server.py +317 -0
- tweek/platform/__init__.py +131 -0
- tweek/plugins/__init__.py +835 -0
- tweek/plugins/base.py +1080 -0
- tweek/plugins/compliance/__init__.py +30 -0
- tweek/plugins/compliance/gdpr.py +333 -0
- tweek/plugins/compliance/gov.py +324 -0
- tweek/plugins/compliance/hipaa.py +285 -0
- tweek/plugins/compliance/legal.py +322 -0
- tweek/plugins/compliance/pci.py +361 -0
- tweek/plugins/compliance/soc2.py +275 -0
- tweek/plugins/detectors/__init__.py +30 -0
- tweek/plugins/detectors/continue_dev.py +206 -0
- tweek/plugins/detectors/copilot.py +254 -0
- tweek/plugins/detectors/cursor.py +192 -0
- tweek/plugins/detectors/moltbot.py +205 -0
- tweek/plugins/detectors/windsurf.py +214 -0
- tweek/plugins/git_discovery.py +395 -0
- tweek/plugins/git_installer.py +491 -0
- tweek/plugins/git_lockfile.py +338 -0
- tweek/plugins/git_registry.py +503 -0
- tweek/plugins/git_security.py +482 -0
- tweek/plugins/providers/__init__.py +30 -0
- tweek/plugins/providers/anthropic.py +181 -0
- tweek/plugins/providers/azure_openai.py +289 -0
- tweek/plugins/providers/bedrock.py +248 -0
- tweek/plugins/providers/google.py +197 -0
- tweek/plugins/providers/openai.py +230 -0
- tweek/plugins/scope.py +130 -0
- tweek/plugins/screening/__init__.py +26 -0
- tweek/plugins/screening/llm_reviewer.py +149 -0
- tweek/plugins/screening/pattern_matcher.py +273 -0
- tweek/plugins/screening/rate_limiter.py +174 -0
- tweek/plugins/screening/session_analyzer.py +159 -0
- tweek/proxy/__init__.py +302 -0
- tweek/proxy/addon.py +223 -0
- tweek/proxy/interceptor.py +313 -0
- tweek/proxy/server.py +315 -0
- tweek/sandbox/__init__.py +71 -0
- tweek/sandbox/executor.py +382 -0
- tweek/sandbox/linux.py +278 -0
- tweek/sandbox/profile_generator.py +323 -0
- tweek/screening/__init__.py +13 -0
- tweek/screening/context.py +81 -0
- tweek/security/__init__.py +22 -0
- tweek/security/llm_reviewer.py +348 -0
- tweek/security/rate_limiter.py +682 -0
- tweek/security/secret_scanner.py +506 -0
- tweek/security/session_analyzer.py +600 -0
- tweek/vault/__init__.py +40 -0
- tweek/vault/cross_platform.py +251 -0
- tweek/vault/keychain.py +288 -0
- tweek-0.1.0.dist-info/METADATA +335 -0
- tweek-0.1.0.dist-info/RECORD +85 -0
- tweek-0.1.0.dist-info/WHEEL +5 -0
- tweek-0.1.0.dist-info/entry_points.txt +25 -0
- tweek-0.1.0.dist-info/licenses/LICENSE +190 -0
- tweek-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,289 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Tweek Azure OpenAI Provider Plugin
|
|
4
|
+
|
|
5
|
+
Handles Azure OpenAI API format:
|
|
6
|
+
- Endpoint: *.openai.azure.com
|
|
7
|
+
- Same tool call format as OpenAI (Azure uses OpenAI-compatible API)
|
|
8
|
+
- Supports custom deployment names
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import json
|
|
12
|
+
from typing import Optional, List, Dict, Any
|
|
13
|
+
from tweek.plugins.base import LLMProviderPlugin, ToolCall
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class AzureOpenAIProvider(LLMProviderPlugin):
|
|
17
|
+
"""
|
|
18
|
+
Azure OpenAI API provider plugin.
|
|
19
|
+
|
|
20
|
+
Azure OpenAI uses the same API format as OpenAI but with:
|
|
21
|
+
- Different endpoint structure (*.openai.azure.com)
|
|
22
|
+
- Deployment-based model selection
|
|
23
|
+
- Different API versioning scheme
|
|
24
|
+
|
|
25
|
+
Supports:
|
|
26
|
+
- Chat Completions API
|
|
27
|
+
- Function calling (legacy)
|
|
28
|
+
- Tool use (current)
|
|
29
|
+
- Streaming responses
|
|
30
|
+
"""
|
|
31
|
+
|
|
32
|
+
VERSION = "1.0.0"
|
|
33
|
+
DESCRIPTION = "Azure OpenAI API provider"
|
|
34
|
+
AUTHOR = "Tweek"
|
|
35
|
+
REQUIRES_LICENSE = "free"
|
|
36
|
+
TAGS = ["provider", "azure", "openai", "enterprise"]
|
|
37
|
+
|
|
38
|
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
|
39
|
+
super().__init__(config)
|
|
40
|
+
# Allow custom Azure endpoints via config
|
|
41
|
+
self._custom_hosts = config.get("custom_hosts", []) if config else []
|
|
42
|
+
|
|
43
|
+
@property
|
|
44
|
+
def name(self) -> str:
|
|
45
|
+
return "azure_openai"
|
|
46
|
+
|
|
47
|
+
@property
|
|
48
|
+
def api_hosts(self) -> List[str]:
|
|
49
|
+
# Azure OpenAI endpoints follow pattern: *.openai.azure.com
|
|
50
|
+
# Include common patterns plus any custom hosts
|
|
51
|
+
default_hosts = [
|
|
52
|
+
"openai.azure.com", # Will match via matches_endpoint logic
|
|
53
|
+
]
|
|
54
|
+
return default_hosts + self._custom_hosts
|
|
55
|
+
|
|
56
|
+
def matches_endpoint(self, url: str) -> bool:
|
|
57
|
+
"""
|
|
58
|
+
Check if URL matches Azure OpenAI API.
|
|
59
|
+
|
|
60
|
+
Azure OpenAI uses endpoints like:
|
|
61
|
+
- https://{resource-name}.openai.azure.com/...
|
|
62
|
+
- https://{custom-domain}/openai/...
|
|
63
|
+
"""
|
|
64
|
+
# Extract hostname from URL
|
|
65
|
+
if "://" in url:
|
|
66
|
+
host = url.split("://")[1].split("/")[0]
|
|
67
|
+
else:
|
|
68
|
+
host = url.split("/")[0]
|
|
69
|
+
|
|
70
|
+
# Remove port if present
|
|
71
|
+
host = host.split(":")[0]
|
|
72
|
+
|
|
73
|
+
# Check for Azure OpenAI pattern (*.openai.azure.com)
|
|
74
|
+
if host.endswith(".openai.azure.com"):
|
|
75
|
+
return True
|
|
76
|
+
|
|
77
|
+
# Check for /openai/ path pattern (Azure uses this)
|
|
78
|
+
if "/openai/" in url:
|
|
79
|
+
# Likely an Azure endpoint with custom domain
|
|
80
|
+
return True
|
|
81
|
+
|
|
82
|
+
# Check custom hosts
|
|
83
|
+
return host in self._custom_hosts
|
|
84
|
+
|
|
85
|
+
def extract_tool_calls(self, response: Dict[str, Any]) -> List[ToolCall]:
|
|
86
|
+
"""
|
|
87
|
+
Extract tool calls from Azure OpenAI API response.
|
|
88
|
+
|
|
89
|
+
Azure OpenAI uses the same format as OpenAI:
|
|
90
|
+
{
|
|
91
|
+
"choices": [
|
|
92
|
+
{
|
|
93
|
+
"message": {
|
|
94
|
+
"tool_calls": [
|
|
95
|
+
{
|
|
96
|
+
"id": "call_xxx",
|
|
97
|
+
"type": "function",
|
|
98
|
+
"function": {
|
|
99
|
+
"name": "tool_name",
|
|
100
|
+
"arguments": "{...}"
|
|
101
|
+
}
|
|
102
|
+
}
|
|
103
|
+
]
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
]
|
|
107
|
+
}
|
|
108
|
+
"""
|
|
109
|
+
tool_calls = []
|
|
110
|
+
|
|
111
|
+
choices = response.get("choices", [])
|
|
112
|
+
if not isinstance(choices, list):
|
|
113
|
+
return tool_calls
|
|
114
|
+
|
|
115
|
+
for choice in choices:
|
|
116
|
+
if not isinstance(choice, dict):
|
|
117
|
+
continue
|
|
118
|
+
|
|
119
|
+
message = choice.get("message", {})
|
|
120
|
+
if not isinstance(message, dict):
|
|
121
|
+
continue
|
|
122
|
+
|
|
123
|
+
# Handle tool_calls (current format)
|
|
124
|
+
for tc in message.get("tool_calls", []):
|
|
125
|
+
if not isinstance(tc, dict):
|
|
126
|
+
continue
|
|
127
|
+
|
|
128
|
+
func = tc.get("function", {})
|
|
129
|
+
if not isinstance(func, dict):
|
|
130
|
+
continue
|
|
131
|
+
|
|
132
|
+
# Parse arguments JSON
|
|
133
|
+
args_str = func.get("arguments", "{}")
|
|
134
|
+
try:
|
|
135
|
+
args = json.loads(args_str) if isinstance(args_str, str) else args_str
|
|
136
|
+
except json.JSONDecodeError:
|
|
137
|
+
args = {"_raw": args_str}
|
|
138
|
+
|
|
139
|
+
tool_calls.append(ToolCall(
|
|
140
|
+
id=tc.get("id", ""),
|
|
141
|
+
name=func.get("name", ""),
|
|
142
|
+
input=args if isinstance(args, dict) else {"_value": args},
|
|
143
|
+
provider=self.name,
|
|
144
|
+
raw=tc,
|
|
145
|
+
))
|
|
146
|
+
|
|
147
|
+
# Handle function_call (legacy format)
|
|
148
|
+
function_call = message.get("function_call")
|
|
149
|
+
if isinstance(function_call, dict):
|
|
150
|
+
args_str = function_call.get("arguments", "{}")
|
|
151
|
+
try:
|
|
152
|
+
args = json.loads(args_str) if isinstance(args_str, str) else args_str
|
|
153
|
+
except json.JSONDecodeError:
|
|
154
|
+
args = {"_raw": args_str}
|
|
155
|
+
|
|
156
|
+
tool_calls.append(ToolCall(
|
|
157
|
+
id="function_call",
|
|
158
|
+
name=function_call.get("name", ""),
|
|
159
|
+
input=args if isinstance(args, dict) else {"_value": args},
|
|
160
|
+
provider=self.name,
|
|
161
|
+
raw=function_call,
|
|
162
|
+
))
|
|
163
|
+
|
|
164
|
+
return tool_calls
|
|
165
|
+
|
|
166
|
+
def extract_content(self, response: Dict[str, Any]) -> str:
|
|
167
|
+
"""
|
|
168
|
+
Extract text content from Azure OpenAI API response.
|
|
169
|
+
"""
|
|
170
|
+
choices = response.get("choices", [])
|
|
171
|
+
if not isinstance(choices, list) or not choices:
|
|
172
|
+
return ""
|
|
173
|
+
|
|
174
|
+
content_parts = []
|
|
175
|
+
for choice in choices:
|
|
176
|
+
if not isinstance(choice, dict):
|
|
177
|
+
continue
|
|
178
|
+
|
|
179
|
+
message = choice.get("message", {})
|
|
180
|
+
if isinstance(message, dict):
|
|
181
|
+
content = message.get("content")
|
|
182
|
+
if isinstance(content, str):
|
|
183
|
+
content_parts.append(content)
|
|
184
|
+
|
|
185
|
+
return "\n".join(content_parts)
|
|
186
|
+
|
|
187
|
+
def extract_messages(self, request: Dict[str, Any]) -> List[Dict[str, Any]]:
|
|
188
|
+
"""
|
|
189
|
+
Extract messages from Azure OpenAI API request.
|
|
190
|
+
"""
|
|
191
|
+
return request.get("messages", [])
|
|
192
|
+
|
|
193
|
+
def get_system_prompt(self, request: Dict[str, Any]) -> Optional[str]:
|
|
194
|
+
"""Extract system prompt from request."""
|
|
195
|
+
messages = request.get("messages", [])
|
|
196
|
+
for msg in messages:
|
|
197
|
+
if isinstance(msg, dict) and msg.get("role") == "system":
|
|
198
|
+
content = msg.get("content")
|
|
199
|
+
if isinstance(content, str):
|
|
200
|
+
return content
|
|
201
|
+
elif isinstance(content, list):
|
|
202
|
+
# Handle content array format
|
|
203
|
+
parts = []
|
|
204
|
+
for part in content:
|
|
205
|
+
if isinstance(part, dict) and part.get("type") == "text":
|
|
206
|
+
parts.append(part.get("text", ""))
|
|
207
|
+
return "\n".join(parts)
|
|
208
|
+
return None
|
|
209
|
+
|
|
210
|
+
def get_deployment_name(self, url: str) -> Optional[str]:
|
|
211
|
+
"""
|
|
212
|
+
Extract deployment name from Azure OpenAI URL.
|
|
213
|
+
|
|
214
|
+
Azure URLs follow pattern:
|
|
215
|
+
https://{resource}.openai.azure.com/openai/deployments/{deployment}/chat/completions
|
|
216
|
+
"""
|
|
217
|
+
try:
|
|
218
|
+
parts = url.split("/")
|
|
219
|
+
if "deployments" in parts:
|
|
220
|
+
idx = parts.index("deployments")
|
|
221
|
+
if idx + 1 < len(parts):
|
|
222
|
+
return parts[idx + 1]
|
|
223
|
+
except Exception:
|
|
224
|
+
pass
|
|
225
|
+
return None
|
|
226
|
+
|
|
227
|
+
def is_streaming_response(self, response: Dict[str, Any]) -> bool:
|
|
228
|
+
"""Check if response is a streaming chunk."""
|
|
229
|
+
# Streaming responses have 'object': 'chat.completion.chunk'
|
|
230
|
+
return response.get("object") == "chat.completion.chunk"
|
|
231
|
+
|
|
232
|
+
def extract_streaming_tool_calls(
|
|
233
|
+
self,
|
|
234
|
+
chunks: List[Dict[str, Any]]
|
|
235
|
+
) -> List[ToolCall]:
|
|
236
|
+
"""
|
|
237
|
+
Extract tool calls from streaming chunks.
|
|
238
|
+
|
|
239
|
+
Reassembles tool calls from delta chunks.
|
|
240
|
+
"""
|
|
241
|
+
tool_calls: Dict[int, Dict[str, Any]] = {}
|
|
242
|
+
|
|
243
|
+
for chunk in chunks:
|
|
244
|
+
choices = chunk.get("choices", [])
|
|
245
|
+
for choice in choices:
|
|
246
|
+
if not isinstance(choice, dict):
|
|
247
|
+
continue
|
|
248
|
+
|
|
249
|
+
delta = choice.get("delta", {})
|
|
250
|
+
if not isinstance(delta, dict):
|
|
251
|
+
continue
|
|
252
|
+
|
|
253
|
+
for tc in delta.get("tool_calls", []):
|
|
254
|
+
if not isinstance(tc, dict):
|
|
255
|
+
continue
|
|
256
|
+
|
|
257
|
+
index = tc.get("index", 0)
|
|
258
|
+
|
|
259
|
+
if index not in tool_calls:
|
|
260
|
+
tool_calls[index] = {
|
|
261
|
+
"id": tc.get("id", ""),
|
|
262
|
+
"name": "",
|
|
263
|
+
"arguments": "",
|
|
264
|
+
}
|
|
265
|
+
|
|
266
|
+
func = tc.get("function", {})
|
|
267
|
+
if isinstance(func, dict):
|
|
268
|
+
if func.get("name"):
|
|
269
|
+
tool_calls[index]["name"] = func["name"]
|
|
270
|
+
if func.get("arguments"):
|
|
271
|
+
tool_calls[index]["arguments"] += func["arguments"]
|
|
272
|
+
|
|
273
|
+
# Convert to ToolCall objects
|
|
274
|
+
result = []
|
|
275
|
+
for index in sorted(tool_calls.keys()):
|
|
276
|
+
tc_data = tool_calls[index]
|
|
277
|
+
try:
|
|
278
|
+
args = json.loads(tc_data["arguments"]) if tc_data["arguments"] else {}
|
|
279
|
+
except json.JSONDecodeError:
|
|
280
|
+
args = {"_raw": tc_data["arguments"]}
|
|
281
|
+
|
|
282
|
+
result.append(ToolCall(
|
|
283
|
+
id=tc_data["id"],
|
|
284
|
+
name=tc_data["name"],
|
|
285
|
+
input=args,
|
|
286
|
+
provider=self.name,
|
|
287
|
+
))
|
|
288
|
+
|
|
289
|
+
return result
|
|
@@ -0,0 +1,248 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Tweek AWS Bedrock Provider Plugin
|
|
4
|
+
|
|
5
|
+
Handles AWS Bedrock API format:
|
|
6
|
+
- Endpoint: bedrock-runtime.{region}.amazonaws.com
|
|
7
|
+
- Supports multiple underlying models (Claude, Titan, etc.)
|
|
8
|
+
- Converse API and InvokeModel API
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import re
|
|
12
|
+
from typing import Optional, List, Dict, Any
|
|
13
|
+
from tweek.plugins.base import LLMProviderPlugin, ToolCall
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class BedrockProvider(LLMProviderPlugin):
|
|
17
|
+
"""
|
|
18
|
+
AWS Bedrock API provider plugin.
|
|
19
|
+
|
|
20
|
+
Supports:
|
|
21
|
+
- Converse API (unified format)
|
|
22
|
+
- InvokeModel API (model-specific formats)
|
|
23
|
+
- Multiple model families (Anthropic, Amazon, Meta, etc.)
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
VERSION = "1.0.0"
|
|
27
|
+
DESCRIPTION = "AWS Bedrock API provider"
|
|
28
|
+
AUTHOR = "Tweek"
|
|
29
|
+
REQUIRES_LICENSE = "free"
|
|
30
|
+
TAGS = ["provider", "bedrock", "aws"]
|
|
31
|
+
|
|
32
|
+
# Bedrock endpoint pattern
|
|
33
|
+
ENDPOINT_PATTERN = re.compile(r"bedrock-runtime\.[\w-]+\.amazonaws\.com")
|
|
34
|
+
|
|
35
|
+
@property
|
|
36
|
+
def name(self) -> str:
|
|
37
|
+
return "bedrock"
|
|
38
|
+
|
|
39
|
+
@property
|
|
40
|
+
def api_hosts(self) -> List[str]:
|
|
41
|
+
# Bedrock uses regional endpoints
|
|
42
|
+
# Return empty list - we use matches_endpoint for pattern matching
|
|
43
|
+
return []
|
|
44
|
+
|
|
45
|
+
def matches_endpoint(self, url: str) -> bool:
|
|
46
|
+
"""
|
|
47
|
+
Check if URL matches Bedrock's regional endpoint pattern.
|
|
48
|
+
"""
|
|
49
|
+
# Extract hostname
|
|
50
|
+
if "://" in url:
|
|
51
|
+
host = url.split("://")[1].split("/")[0]
|
|
52
|
+
else:
|
|
53
|
+
host = url.split("/")[0]
|
|
54
|
+
|
|
55
|
+
host = host.split(":")[0]
|
|
56
|
+
|
|
57
|
+
return bool(self.ENDPOINT_PATTERN.match(host))
|
|
58
|
+
|
|
59
|
+
def extract_tool_calls(self, response: Dict[str, Any]) -> List[ToolCall]:
|
|
60
|
+
"""
|
|
61
|
+
Extract tool calls from Bedrock API response.
|
|
62
|
+
|
|
63
|
+
Supports both Converse API and model-specific formats.
|
|
64
|
+
"""
|
|
65
|
+
tool_calls = []
|
|
66
|
+
|
|
67
|
+
# Try Converse API format first
|
|
68
|
+
tool_calls.extend(self._extract_converse_tool_calls(response))
|
|
69
|
+
|
|
70
|
+
# Try Anthropic format (Claude on Bedrock)
|
|
71
|
+
if not tool_calls:
|
|
72
|
+
tool_calls.extend(self._extract_anthropic_tool_calls(response))
|
|
73
|
+
|
|
74
|
+
return tool_calls
|
|
75
|
+
|
|
76
|
+
def _extract_converse_tool_calls(
|
|
77
|
+
self,
|
|
78
|
+
response: Dict[str, Any]
|
|
79
|
+
) -> List[ToolCall]:
|
|
80
|
+
"""
|
|
81
|
+
Extract tool calls from Converse API format.
|
|
82
|
+
|
|
83
|
+
Converse API format:
|
|
84
|
+
{
|
|
85
|
+
"output": {
|
|
86
|
+
"message": {
|
|
87
|
+
"content": [
|
|
88
|
+
{
|
|
89
|
+
"toolUse": {
|
|
90
|
+
"toolUseId": "xxx",
|
|
91
|
+
"name": "tool_name",
|
|
92
|
+
"input": {...}
|
|
93
|
+
}
|
|
94
|
+
}
|
|
95
|
+
]
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
"""
|
|
100
|
+
tool_calls = []
|
|
101
|
+
|
|
102
|
+
output = response.get("output", {})
|
|
103
|
+
if not isinstance(output, dict):
|
|
104
|
+
return tool_calls
|
|
105
|
+
|
|
106
|
+
message = output.get("message", {})
|
|
107
|
+
if not isinstance(message, dict):
|
|
108
|
+
return tool_calls
|
|
109
|
+
|
|
110
|
+
content = message.get("content", [])
|
|
111
|
+
if not isinstance(content, list):
|
|
112
|
+
return tool_calls
|
|
113
|
+
|
|
114
|
+
for block in content:
|
|
115
|
+
if not isinstance(block, dict):
|
|
116
|
+
continue
|
|
117
|
+
|
|
118
|
+
tool_use = block.get("toolUse")
|
|
119
|
+
if isinstance(tool_use, dict):
|
|
120
|
+
tool_calls.append(ToolCall(
|
|
121
|
+
id=tool_use.get("toolUseId", ""),
|
|
122
|
+
name=tool_use.get("name", ""),
|
|
123
|
+
input=tool_use.get("input", {}),
|
|
124
|
+
provider=self.name,
|
|
125
|
+
raw=tool_use,
|
|
126
|
+
))
|
|
127
|
+
|
|
128
|
+
return tool_calls
|
|
129
|
+
|
|
130
|
+
def _extract_anthropic_tool_calls(
|
|
131
|
+
self,
|
|
132
|
+
response: Dict[str, Any]
|
|
133
|
+
) -> List[ToolCall]:
|
|
134
|
+
"""
|
|
135
|
+
Extract tool calls from Anthropic format (Claude on Bedrock).
|
|
136
|
+
|
|
137
|
+
Uses the same format as Anthropic API.
|
|
138
|
+
"""
|
|
139
|
+
tool_calls = []
|
|
140
|
+
|
|
141
|
+
content = response.get("content", [])
|
|
142
|
+
if not isinstance(content, list):
|
|
143
|
+
return tool_calls
|
|
144
|
+
|
|
145
|
+
for block in content:
|
|
146
|
+
if not isinstance(block, dict):
|
|
147
|
+
continue
|
|
148
|
+
|
|
149
|
+
if block.get("type") == "tool_use":
|
|
150
|
+
tool_calls.append(ToolCall(
|
|
151
|
+
id=block.get("id", ""),
|
|
152
|
+
name=block.get("name", ""),
|
|
153
|
+
input=block.get("input", {}),
|
|
154
|
+
provider=self.name,
|
|
155
|
+
raw=block,
|
|
156
|
+
))
|
|
157
|
+
|
|
158
|
+
return tool_calls
|
|
159
|
+
|
|
160
|
+
def extract_content(self, response: Dict[str, Any]) -> str:
|
|
161
|
+
"""
|
|
162
|
+
Extract text content from Bedrock API response.
|
|
163
|
+
"""
|
|
164
|
+
# Try Converse API format
|
|
165
|
+
output = response.get("output", {})
|
|
166
|
+
if isinstance(output, dict):
|
|
167
|
+
message = output.get("message", {})
|
|
168
|
+
if isinstance(message, dict):
|
|
169
|
+
content = message.get("content", [])
|
|
170
|
+
if isinstance(content, list):
|
|
171
|
+
text_parts = []
|
|
172
|
+
for block in content:
|
|
173
|
+
if isinstance(block, dict) and "text" in block:
|
|
174
|
+
text_parts.append(block["text"])
|
|
175
|
+
if text_parts:
|
|
176
|
+
return "\n".join(text_parts)
|
|
177
|
+
|
|
178
|
+
# Try Anthropic format
|
|
179
|
+
content = response.get("content", [])
|
|
180
|
+
if isinstance(content, list):
|
|
181
|
+
text_parts = []
|
|
182
|
+
for block in content:
|
|
183
|
+
if isinstance(block, dict) and block.get("type") == "text":
|
|
184
|
+
text_parts.append(block.get("text", ""))
|
|
185
|
+
if text_parts:
|
|
186
|
+
return "\n".join(text_parts)
|
|
187
|
+
|
|
188
|
+
# Try Titan format
|
|
189
|
+
results = response.get("results", [])
|
|
190
|
+
if isinstance(results, list) and results:
|
|
191
|
+
return results[0].get("outputText", "")
|
|
192
|
+
|
|
193
|
+
return ""
|
|
194
|
+
|
|
195
|
+
def extract_messages(self, request: Dict[str, Any]) -> List[Dict[str, Any]]:
|
|
196
|
+
"""
|
|
197
|
+
Extract messages from Bedrock API request.
|
|
198
|
+
"""
|
|
199
|
+
# Converse API uses 'messages'
|
|
200
|
+
messages = request.get("messages", [])
|
|
201
|
+
if messages:
|
|
202
|
+
return messages
|
|
203
|
+
|
|
204
|
+
# InvokeModel with Anthropic format
|
|
205
|
+
anthropic_messages = request.get("messages", [])
|
|
206
|
+
if anthropic_messages:
|
|
207
|
+
return anthropic_messages
|
|
208
|
+
|
|
209
|
+
# InvokeModel with Titan format (prompt field)
|
|
210
|
+
prompt = request.get("inputText") or request.get("prompt")
|
|
211
|
+
if prompt:
|
|
212
|
+
return [{"role": "user", "content": prompt}]
|
|
213
|
+
|
|
214
|
+
return []
|
|
215
|
+
|
|
216
|
+
def get_system_prompt(self, request: Dict[str, Any]) -> Optional[str]:
|
|
217
|
+
"""Extract system prompt from request."""
|
|
218
|
+
# Converse API
|
|
219
|
+
system = request.get("system")
|
|
220
|
+
if isinstance(system, list):
|
|
221
|
+
text_parts = []
|
|
222
|
+
for block in system:
|
|
223
|
+
if isinstance(block, dict) and "text" in block:
|
|
224
|
+
text_parts.append(block["text"])
|
|
225
|
+
return "\n".join(text_parts) if text_parts else None
|
|
226
|
+
elif isinstance(system, str):
|
|
227
|
+
return system
|
|
228
|
+
|
|
229
|
+
# Anthropic format on Bedrock
|
|
230
|
+
anthropic_system = request.get("system")
|
|
231
|
+
if isinstance(anthropic_system, str):
|
|
232
|
+
return anthropic_system
|
|
233
|
+
|
|
234
|
+
return None
|
|
235
|
+
|
|
236
|
+
def get_model_id(self, request: Dict[str, Any]) -> Optional[str]:
|
|
237
|
+
"""
|
|
238
|
+
Get the model ID from the request.
|
|
239
|
+
|
|
240
|
+
For Bedrock, this is typically in the URL path, but may also
|
|
241
|
+
be in the request body for some APIs.
|
|
242
|
+
"""
|
|
243
|
+
return request.get("modelId")
|
|
244
|
+
|
|
245
|
+
def is_streaming_response(self, response: Dict[str, Any]) -> bool:
|
|
246
|
+
"""Check if response is a streaming event."""
|
|
247
|
+
# Bedrock streaming uses event types
|
|
248
|
+
return "contentBlockDelta" in response or "contentBlockStart" in response
|