tweek 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tweek/__init__.py +16 -0
- tweek/cli.py +3390 -0
- tweek/cli_helpers.py +193 -0
- tweek/config/__init__.py +13 -0
- tweek/config/allowed_dirs.yaml +23 -0
- tweek/config/manager.py +1064 -0
- tweek/config/patterns.yaml +751 -0
- tweek/config/tiers.yaml +129 -0
- tweek/diagnostics.py +589 -0
- tweek/hooks/__init__.py +1 -0
- tweek/hooks/pre_tool_use.py +861 -0
- tweek/integrations/__init__.py +3 -0
- tweek/integrations/moltbot.py +243 -0
- tweek/licensing.py +398 -0
- tweek/logging/__init__.py +9 -0
- tweek/logging/bundle.py +350 -0
- tweek/logging/json_logger.py +150 -0
- tweek/logging/security_log.py +745 -0
- tweek/mcp/__init__.py +24 -0
- tweek/mcp/approval.py +456 -0
- tweek/mcp/approval_cli.py +356 -0
- tweek/mcp/clients/__init__.py +37 -0
- tweek/mcp/clients/chatgpt.py +112 -0
- tweek/mcp/clients/claude_desktop.py +203 -0
- tweek/mcp/clients/gemini.py +178 -0
- tweek/mcp/proxy.py +667 -0
- tweek/mcp/screening.py +175 -0
- tweek/mcp/server.py +317 -0
- tweek/platform/__init__.py +131 -0
- tweek/plugins/__init__.py +835 -0
- tweek/plugins/base.py +1080 -0
- tweek/plugins/compliance/__init__.py +30 -0
- tweek/plugins/compliance/gdpr.py +333 -0
- tweek/plugins/compliance/gov.py +324 -0
- tweek/plugins/compliance/hipaa.py +285 -0
- tweek/plugins/compliance/legal.py +322 -0
- tweek/plugins/compliance/pci.py +361 -0
- tweek/plugins/compliance/soc2.py +275 -0
- tweek/plugins/detectors/__init__.py +30 -0
- tweek/plugins/detectors/continue_dev.py +206 -0
- tweek/plugins/detectors/copilot.py +254 -0
- tweek/plugins/detectors/cursor.py +192 -0
- tweek/plugins/detectors/moltbot.py +205 -0
- tweek/plugins/detectors/windsurf.py +214 -0
- tweek/plugins/git_discovery.py +395 -0
- tweek/plugins/git_installer.py +491 -0
- tweek/plugins/git_lockfile.py +338 -0
- tweek/plugins/git_registry.py +503 -0
- tweek/plugins/git_security.py +482 -0
- tweek/plugins/providers/__init__.py +30 -0
- tweek/plugins/providers/anthropic.py +181 -0
- tweek/plugins/providers/azure_openai.py +289 -0
- tweek/plugins/providers/bedrock.py +248 -0
- tweek/plugins/providers/google.py +197 -0
- tweek/plugins/providers/openai.py +230 -0
- tweek/plugins/scope.py +130 -0
- tweek/plugins/screening/__init__.py +26 -0
- tweek/plugins/screening/llm_reviewer.py +149 -0
- tweek/plugins/screening/pattern_matcher.py +273 -0
- tweek/plugins/screening/rate_limiter.py +174 -0
- tweek/plugins/screening/session_analyzer.py +159 -0
- tweek/proxy/__init__.py +302 -0
- tweek/proxy/addon.py +223 -0
- tweek/proxy/interceptor.py +313 -0
- tweek/proxy/server.py +315 -0
- tweek/sandbox/__init__.py +71 -0
- tweek/sandbox/executor.py +382 -0
- tweek/sandbox/linux.py +278 -0
- tweek/sandbox/profile_generator.py +323 -0
- tweek/screening/__init__.py +13 -0
- tweek/screening/context.py +81 -0
- tweek/security/__init__.py +22 -0
- tweek/security/llm_reviewer.py +348 -0
- tweek/security/rate_limiter.py +682 -0
- tweek/security/secret_scanner.py +506 -0
- tweek/security/session_analyzer.py +600 -0
- tweek/vault/__init__.py +40 -0
- tweek/vault/cross_platform.py +251 -0
- tweek/vault/keychain.py +288 -0
- tweek-0.1.0.dist-info/METADATA +335 -0
- tweek-0.1.0.dist-info/RECORD +85 -0
- tweek-0.1.0.dist-info/WHEEL +5 -0
- tweek-0.1.0.dist-info/entry_points.txt +25 -0
- tweek-0.1.0.dist-info/licenses/LICENSE +190 -0
- tweek-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,482 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Tweek Git Plugin Security Validation
|
|
4
|
+
|
|
5
|
+
5-layer security pipeline for validating git-installed plugins before loading:
|
|
6
|
+
|
|
7
|
+
1. Registry Listing - Plugin must exist in curated registry with verified=true
|
|
8
|
+
2. Signature Verification - HMAC of CHECKSUMS.sha256 validated against Tweek key
|
|
9
|
+
3. Checksum Verification - SHA-256 of every .py file matches CHECKSUMS.sha256
|
|
10
|
+
4. AST Static Analysis - Parse .py files, reject forbidden patterns
|
|
11
|
+
5. Base Class Enforcement - Imported class must inherit from approved base class
|
|
12
|
+
|
|
13
|
+
This module runs BEFORE any plugin code is imported or executed.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
import ast
|
|
17
|
+
import hashlib
|
|
18
|
+
import hmac
|
|
19
|
+
import json
|
|
20
|
+
import logging
|
|
21
|
+
import os
|
|
22
|
+
from pathlib import Path
|
|
23
|
+
from typing import Dict, List, Optional, Tuple, Type
|
|
24
|
+
|
|
25
|
+
logger = logging.getLogger(__name__)
|
|
26
|
+
|
|
27
|
+
# Signing key for plugin verification.
|
|
28
|
+
# In production, this would use asymmetric keys (Ed25519).
|
|
29
|
+
# The HMAC approach is simpler and sufficient for the curated model
|
|
30
|
+
# where Tweek controls both signing and verification.
|
|
31
|
+
TWEEK_SIGNING_KEY = os.environ.get(
|
|
32
|
+
"TWEEK_PLUGIN_SIGNING_KEY",
|
|
33
|
+
"tweek-plugin-signing-key-v1"
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
# Modules/functions that are forbidden in plugin code
|
|
37
|
+
FORBIDDEN_IMPORTS = frozenset({
|
|
38
|
+
"subprocess",
|
|
39
|
+
"os.system",
|
|
40
|
+
"os.popen",
|
|
41
|
+
"os.exec",
|
|
42
|
+
"os.execl",
|
|
43
|
+
"os.execle",
|
|
44
|
+
"os.execlp",
|
|
45
|
+
"os.execv",
|
|
46
|
+
"os.execve",
|
|
47
|
+
"os.execvp",
|
|
48
|
+
"os.execvpe",
|
|
49
|
+
"os.spawn",
|
|
50
|
+
"os.spawnl",
|
|
51
|
+
"os.spawnle",
|
|
52
|
+
"os.spawnlp",
|
|
53
|
+
"os.spawnlpe",
|
|
54
|
+
"os.spawnv",
|
|
55
|
+
"os.spawnve",
|
|
56
|
+
"os.spawnvp",
|
|
57
|
+
"os.spawnvpe",
|
|
58
|
+
"ctypes",
|
|
59
|
+
"multiprocessing",
|
|
60
|
+
})
|
|
61
|
+
|
|
62
|
+
FORBIDDEN_CALLS = frozenset({
|
|
63
|
+
"eval",
|
|
64
|
+
"exec",
|
|
65
|
+
"compile",
|
|
66
|
+
"__import__",
|
|
67
|
+
"os.system",
|
|
68
|
+
"os.popen",
|
|
69
|
+
"os.remove",
|
|
70
|
+
"os.unlink",
|
|
71
|
+
"os.rmdir",
|
|
72
|
+
"os.removedirs",
|
|
73
|
+
"shutil.rmtree",
|
|
74
|
+
"shutil.move",
|
|
75
|
+
})
|
|
76
|
+
|
|
77
|
+
# Modules that indicate network access
|
|
78
|
+
FORBIDDEN_NETWORK_IMPORTS = frozenset({
|
|
79
|
+
"socket",
|
|
80
|
+
"urllib",
|
|
81
|
+
"urllib.request",
|
|
82
|
+
"urllib.parse",
|
|
83
|
+
"http.client",
|
|
84
|
+
"http.server",
|
|
85
|
+
"requests",
|
|
86
|
+
"httpx",
|
|
87
|
+
"aiohttp",
|
|
88
|
+
"websockets",
|
|
89
|
+
"paramiko",
|
|
90
|
+
"ftplib",
|
|
91
|
+
"smtplib",
|
|
92
|
+
"telnetlib",
|
|
93
|
+
})
|
|
94
|
+
|
|
95
|
+
# Required manifest fields
|
|
96
|
+
REQUIRED_MANIFEST_FIELDS = {
|
|
97
|
+
"name", "version", "category", "entry_point", "description",
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
VALID_CATEGORIES = {"compliance", "providers", "detectors", "screening"}
|
|
101
|
+
|
|
102
|
+
VALID_LICENSE_TIERS = {"free", "pro", "enterprise"}
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
class PluginSecurityError(Exception):
|
|
106
|
+
"""Raised when a plugin fails security validation."""
|
|
107
|
+
pass
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def validate_manifest(manifest_path: Path) -> Tuple[bool, Optional[dict], List[str]]:
|
|
111
|
+
"""
|
|
112
|
+
Load and validate a tweek_plugin.json manifest.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
manifest_path: Path to tweek_plugin.json
|
|
116
|
+
|
|
117
|
+
Returns:
|
|
118
|
+
(is_valid, manifest_dict_or_None, list_of_issues)
|
|
119
|
+
"""
|
|
120
|
+
issues = []
|
|
121
|
+
|
|
122
|
+
if not manifest_path.exists():
|
|
123
|
+
return False, None, ["tweek_plugin.json not found"]
|
|
124
|
+
|
|
125
|
+
try:
|
|
126
|
+
with open(manifest_path) as f:
|
|
127
|
+
manifest = json.load(f)
|
|
128
|
+
except json.JSONDecodeError as e:
|
|
129
|
+
return False, None, [f"Invalid JSON in manifest: {e}"]
|
|
130
|
+
|
|
131
|
+
if not isinstance(manifest, dict):
|
|
132
|
+
return False, None, ["Manifest must be a JSON object"]
|
|
133
|
+
|
|
134
|
+
# Check required fields
|
|
135
|
+
for field in REQUIRED_MANIFEST_FIELDS:
|
|
136
|
+
if field not in manifest:
|
|
137
|
+
issues.append(f"Missing required field: {field}")
|
|
138
|
+
|
|
139
|
+
# Validate category
|
|
140
|
+
category = manifest.get("category", "")
|
|
141
|
+
if category not in VALID_CATEGORIES:
|
|
142
|
+
issues.append(f"Invalid category '{category}'. Must be one of: {VALID_CATEGORIES}")
|
|
143
|
+
|
|
144
|
+
# Validate license tier
|
|
145
|
+
tier = manifest.get("requires_license_tier", "free")
|
|
146
|
+
if tier not in VALID_LICENSE_TIERS:
|
|
147
|
+
issues.append(f"Invalid license tier '{tier}'. Must be one of: {VALID_LICENSE_TIERS}")
|
|
148
|
+
|
|
149
|
+
# Validate entry_point format (module:ClassName)
|
|
150
|
+
entry_point = manifest.get("entry_point", "")
|
|
151
|
+
if ":" not in entry_point:
|
|
152
|
+
issues.append(f"Invalid entry_point '{entry_point}'. Must be 'module:ClassName' format")
|
|
153
|
+
|
|
154
|
+
# Validate version format
|
|
155
|
+
version = manifest.get("version", "")
|
|
156
|
+
if version and not _is_valid_version(version):
|
|
157
|
+
issues.append(f"Invalid version '{version}'. Must be semver (e.g., 1.2.3)")
|
|
158
|
+
|
|
159
|
+
is_valid = len(issues) == 0
|
|
160
|
+
return is_valid, manifest if is_valid else None, issues
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def verify_checksums(plugin_dir: Path, expected_checksums: Dict[str, str]) -> Tuple[bool, List[str]]:
|
|
164
|
+
"""
|
|
165
|
+
Verify SHA-256 checksums of all Python files in plugin directory.
|
|
166
|
+
|
|
167
|
+
Args:
|
|
168
|
+
plugin_dir: Path to the plugin directory
|
|
169
|
+
expected_checksums: Dict mapping filename to "sha256:hexdigest"
|
|
170
|
+
|
|
171
|
+
Returns:
|
|
172
|
+
(all_valid, list_of_issues)
|
|
173
|
+
"""
|
|
174
|
+
issues = []
|
|
175
|
+
|
|
176
|
+
# Check all expected files exist and match
|
|
177
|
+
for filename, expected_hash in expected_checksums.items():
|
|
178
|
+
file_path = plugin_dir / filename
|
|
179
|
+
if not file_path.exists():
|
|
180
|
+
issues.append(f"Expected file missing: {filename}")
|
|
181
|
+
continue
|
|
182
|
+
|
|
183
|
+
# Parse hash format
|
|
184
|
+
if expected_hash.startswith("sha256:"):
|
|
185
|
+
expected_hex = expected_hash[7:]
|
|
186
|
+
else:
|
|
187
|
+
expected_hex = expected_hash
|
|
188
|
+
|
|
189
|
+
# Compute actual hash
|
|
190
|
+
actual_hex = _compute_file_sha256(file_path)
|
|
191
|
+
if actual_hex != expected_hex:
|
|
192
|
+
issues.append(f"Checksum mismatch for {filename}: expected {expected_hex[:16]}..., got {actual_hex[:16]}...")
|
|
193
|
+
|
|
194
|
+
# Check for unexpected .py files not in checksums
|
|
195
|
+
for py_file in plugin_dir.glob("*.py"):
|
|
196
|
+
if py_file.name not in expected_checksums:
|
|
197
|
+
issues.append(f"Unexpected Python file not in checksums: {py_file.name}")
|
|
198
|
+
|
|
199
|
+
return len(issues) == 0, issues
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
def verify_checksum_signature(
|
|
203
|
+
checksums_content: bytes,
|
|
204
|
+
signature: str,
|
|
205
|
+
signing_key: str = None,
|
|
206
|
+
) -> bool:
|
|
207
|
+
"""
|
|
208
|
+
Verify HMAC signature of checksum file using Tweek's signing key.
|
|
209
|
+
|
|
210
|
+
Args:
|
|
211
|
+
checksums_content: Raw bytes of the CHECKSUMS.sha256 file
|
|
212
|
+
signature: Hex-encoded HMAC signature
|
|
213
|
+
signing_key: Override signing key (default: TWEEK_SIGNING_KEY)
|
|
214
|
+
|
|
215
|
+
Returns:
|
|
216
|
+
True if signature is valid
|
|
217
|
+
"""
|
|
218
|
+
key = (signing_key or TWEEK_SIGNING_KEY).encode()
|
|
219
|
+
expected_sig = hmac.new(key, checksums_content, hashlib.sha256).hexdigest()
|
|
220
|
+
return hmac.compare_digest(expected_sig, signature)
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
def static_analyze_python_files(plugin_dir: Path) -> Tuple[bool, List[str]]:
|
|
224
|
+
"""
|
|
225
|
+
AST-based static analysis of all Python files in a plugin directory.
|
|
226
|
+
|
|
227
|
+
Scans for forbidden patterns:
|
|
228
|
+
- Importing forbidden modules (subprocess, ctypes, etc.)
|
|
229
|
+
- Calling forbidden functions (eval, exec, os.system, etc.)
|
|
230
|
+
- Network access imports (socket, requests, etc.)
|
|
231
|
+
|
|
232
|
+
Args:
|
|
233
|
+
plugin_dir: Path to the plugin directory
|
|
234
|
+
|
|
235
|
+
Returns:
|
|
236
|
+
(is_safe, list_of_issues)
|
|
237
|
+
"""
|
|
238
|
+
issues = []
|
|
239
|
+
|
|
240
|
+
for py_file in plugin_dir.glob("**/*.py"):
|
|
241
|
+
# Skip test files
|
|
242
|
+
if "test" in py_file.parts:
|
|
243
|
+
continue
|
|
244
|
+
|
|
245
|
+
try:
|
|
246
|
+
source = py_file.read_text()
|
|
247
|
+
tree = ast.parse(source, filename=str(py_file))
|
|
248
|
+
except SyntaxError as e:
|
|
249
|
+
issues.append(f"{py_file.name}: Syntax error: {e}")
|
|
250
|
+
continue
|
|
251
|
+
|
|
252
|
+
file_issues = _analyze_ast(tree, py_file.name)
|
|
253
|
+
issues.extend(file_issues)
|
|
254
|
+
|
|
255
|
+
return len(issues) == 0, issues
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
def verify_base_class(plugin_class: Type, expected_category: str) -> Tuple[bool, str]:
|
|
259
|
+
"""
|
|
260
|
+
Verify that a plugin class inherits from the correct Tweek base class.
|
|
261
|
+
|
|
262
|
+
Args:
|
|
263
|
+
plugin_class: The loaded plugin class
|
|
264
|
+
expected_category: Category from manifest (compliance, providers, etc.)
|
|
265
|
+
|
|
266
|
+
Returns:
|
|
267
|
+
(is_valid, error_message_or_empty_string)
|
|
268
|
+
"""
|
|
269
|
+
from tweek.plugins.base import (
|
|
270
|
+
CompliancePlugin,
|
|
271
|
+
LLMProviderPlugin,
|
|
272
|
+
ToolDetectorPlugin,
|
|
273
|
+
ScreeningPlugin,
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
category_base_map = {
|
|
277
|
+
"compliance": CompliancePlugin,
|
|
278
|
+
"providers": LLMProviderPlugin,
|
|
279
|
+
"detectors": ToolDetectorPlugin,
|
|
280
|
+
"screening": ScreeningPlugin,
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
expected_base = category_base_map.get(expected_category)
|
|
284
|
+
if expected_base is None:
|
|
285
|
+
return False, f"Unknown category: {expected_category}"
|
|
286
|
+
|
|
287
|
+
if not issubclass(plugin_class, expected_base):
|
|
288
|
+
return False, (
|
|
289
|
+
f"Plugin class {plugin_class.__name__} does not inherit from "
|
|
290
|
+
f"{expected_base.__name__} (required for category '{expected_category}')"
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
return True, ""
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
def validate_plugin_full(
|
|
297
|
+
plugin_dir: Path,
|
|
298
|
+
manifest: dict,
|
|
299
|
+
registry_checksums: Optional[Dict[str, str]] = None,
|
|
300
|
+
skip_signature: bool = False,
|
|
301
|
+
) -> Tuple[bool, List[str]]:
|
|
302
|
+
"""
|
|
303
|
+
Run the full 5-layer security validation pipeline on a plugin.
|
|
304
|
+
|
|
305
|
+
Layers:
|
|
306
|
+
1. Manifest validation (format, required fields)
|
|
307
|
+
2. Checksum signature verification (if not skipped)
|
|
308
|
+
3. File checksum verification
|
|
309
|
+
4. AST static analysis
|
|
310
|
+
5. (Base class enforcement happens after import in discovery module)
|
|
311
|
+
|
|
312
|
+
Args:
|
|
313
|
+
plugin_dir: Path to the plugin directory
|
|
314
|
+
manifest: Parsed manifest dict
|
|
315
|
+
registry_checksums: Checksums from the registry for this version
|
|
316
|
+
skip_signature: Skip signature verification (for development)
|
|
317
|
+
|
|
318
|
+
Returns:
|
|
319
|
+
(is_safe, list_of_all_issues)
|
|
320
|
+
"""
|
|
321
|
+
all_issues = []
|
|
322
|
+
|
|
323
|
+
# Layer 1: Manifest validation (already done before calling this)
|
|
324
|
+
# Just verify category is valid
|
|
325
|
+
if manifest.get("category") not in VALID_CATEGORIES:
|
|
326
|
+
all_issues.append(f"Invalid category: {manifest.get('category')}")
|
|
327
|
+
|
|
328
|
+
# Layer 2: Checksum signature verification
|
|
329
|
+
if not skip_signature:
|
|
330
|
+
checksums_file = plugin_dir / "CHECKSUMS.sha256"
|
|
331
|
+
if checksums_file.exists():
|
|
332
|
+
signature = manifest.get("checksum_signature", "")
|
|
333
|
+
if not signature:
|
|
334
|
+
all_issues.append("Missing checksum_signature in manifest")
|
|
335
|
+
else:
|
|
336
|
+
content = checksums_file.read_bytes()
|
|
337
|
+
if not verify_checksum_signature(content, signature):
|
|
338
|
+
all_issues.append("Checksum signature verification failed - plugin may be tampered")
|
|
339
|
+
else:
|
|
340
|
+
all_issues.append("CHECKSUMS.sha256 file missing")
|
|
341
|
+
|
|
342
|
+
# Layer 3: File checksum verification
|
|
343
|
+
if registry_checksums:
|
|
344
|
+
valid, checksum_issues = verify_checksums(plugin_dir, registry_checksums)
|
|
345
|
+
all_issues.extend(checksum_issues)
|
|
346
|
+
|
|
347
|
+
# Layer 4: AST static analysis
|
|
348
|
+
safe, ast_issues = static_analyze_python_files(plugin_dir)
|
|
349
|
+
all_issues.extend(ast_issues)
|
|
350
|
+
|
|
351
|
+
is_safe = len(all_issues) == 0
|
|
352
|
+
|
|
353
|
+
if not is_safe:
|
|
354
|
+
logger.warning(
|
|
355
|
+
f"Plugin {manifest.get('name', 'unknown')} failed security validation: "
|
|
356
|
+
f"{len(all_issues)} issue(s) found"
|
|
357
|
+
)
|
|
358
|
+
for issue in all_issues:
|
|
359
|
+
logger.warning(f" - {issue}")
|
|
360
|
+
|
|
361
|
+
return is_safe, all_issues
|
|
362
|
+
|
|
363
|
+
|
|
364
|
+
# =============================================================================
|
|
365
|
+
# INTERNAL HELPERS
|
|
366
|
+
# =============================================================================
|
|
367
|
+
|
|
368
|
+
def _compute_file_sha256(file_path: Path) -> str:
|
|
369
|
+
"""Compute SHA-256 hex digest of a file."""
|
|
370
|
+
sha256 = hashlib.sha256()
|
|
371
|
+
with open(file_path, "rb") as f:
|
|
372
|
+
for chunk in iter(lambda: f.read(8192), b""):
|
|
373
|
+
sha256.update(chunk)
|
|
374
|
+
return sha256.hexdigest()
|
|
375
|
+
|
|
376
|
+
|
|
377
|
+
def _is_valid_version(version: str) -> bool:
|
|
378
|
+
"""Check if version string is valid semver-like format."""
|
|
379
|
+
parts = version.split(".")
|
|
380
|
+
if len(parts) < 2 or len(parts) > 3:
|
|
381
|
+
return False
|
|
382
|
+
try:
|
|
383
|
+
for part in parts:
|
|
384
|
+
int(part)
|
|
385
|
+
return True
|
|
386
|
+
except ValueError:
|
|
387
|
+
return False
|
|
388
|
+
|
|
389
|
+
|
|
390
|
+
def _analyze_ast(tree: ast.AST, filename: str) -> List[str]:
|
|
391
|
+
"""
|
|
392
|
+
Walk an AST tree and find forbidden patterns.
|
|
393
|
+
|
|
394
|
+
Returns list of issues found.
|
|
395
|
+
"""
|
|
396
|
+
issues = []
|
|
397
|
+
|
|
398
|
+
for node in ast.walk(tree):
|
|
399
|
+
# Check imports
|
|
400
|
+
if isinstance(node, ast.Import):
|
|
401
|
+
for alias in node.names:
|
|
402
|
+
module_name = alias.name
|
|
403
|
+
if module_name in FORBIDDEN_IMPORTS or module_name in FORBIDDEN_NETWORK_IMPORTS:
|
|
404
|
+
issues.append(
|
|
405
|
+
f"{filename}:{node.lineno}: Forbidden import '{module_name}'"
|
|
406
|
+
)
|
|
407
|
+
|
|
408
|
+
elif isinstance(node, ast.ImportFrom):
|
|
409
|
+
module = node.module or ""
|
|
410
|
+
if module in FORBIDDEN_IMPORTS or module in FORBIDDEN_NETWORK_IMPORTS:
|
|
411
|
+
issues.append(
|
|
412
|
+
f"{filename}:{node.lineno}: Forbidden import from '{module}'"
|
|
413
|
+
)
|
|
414
|
+
# Check for partial matches (e.g., "from os import system")
|
|
415
|
+
for alias in (node.names or []):
|
|
416
|
+
full_name = f"{module}.{alias.name}" if module else alias.name
|
|
417
|
+
if full_name in FORBIDDEN_IMPORTS or full_name in FORBIDDEN_CALLS:
|
|
418
|
+
issues.append(
|
|
419
|
+
f"{filename}:{node.lineno}: Forbidden import '{full_name}'"
|
|
420
|
+
)
|
|
421
|
+
|
|
422
|
+
# Check function calls
|
|
423
|
+
elif isinstance(node, ast.Call):
|
|
424
|
+
call_name = _get_call_name(node)
|
|
425
|
+
if call_name in FORBIDDEN_CALLS:
|
|
426
|
+
issues.append(
|
|
427
|
+
f"{filename}:{node.lineno}: Forbidden call to '{call_name}'"
|
|
428
|
+
)
|
|
429
|
+
|
|
430
|
+
return issues
|
|
431
|
+
|
|
432
|
+
|
|
433
|
+
def _get_call_name(node: ast.Call) -> str:
|
|
434
|
+
"""Extract the full dotted name of a function call."""
|
|
435
|
+
if isinstance(node.func, ast.Name):
|
|
436
|
+
return node.func.id
|
|
437
|
+
elif isinstance(node.func, ast.Attribute):
|
|
438
|
+
parts = []
|
|
439
|
+
current = node.func
|
|
440
|
+
while isinstance(current, ast.Attribute):
|
|
441
|
+
parts.append(current.attr)
|
|
442
|
+
current = current.value
|
|
443
|
+
if isinstance(current, ast.Name):
|
|
444
|
+
parts.append(current.id)
|
|
445
|
+
return ".".join(reversed(parts))
|
|
446
|
+
return ""
|
|
447
|
+
|
|
448
|
+
|
|
449
|
+
def generate_checksums(plugin_dir: Path) -> Dict[str, str]:
|
|
450
|
+
"""
|
|
451
|
+
Generate SHA-256 checksums for all Python files in a plugin directory.
|
|
452
|
+
|
|
453
|
+
Utility function for plugin developers.
|
|
454
|
+
|
|
455
|
+
Args:
|
|
456
|
+
plugin_dir: Path to plugin directory
|
|
457
|
+
|
|
458
|
+
Returns:
|
|
459
|
+
Dict mapping filename to "sha256:hexdigest"
|
|
460
|
+
"""
|
|
461
|
+
checksums = {}
|
|
462
|
+
for py_file in sorted(plugin_dir.glob("*.py")):
|
|
463
|
+
hex_digest = _compute_file_sha256(py_file)
|
|
464
|
+
checksums[py_file.name] = f"sha256:{hex_digest}"
|
|
465
|
+
return checksums
|
|
466
|
+
|
|
467
|
+
|
|
468
|
+
def sign_checksums(checksums_content: bytes, signing_key: str = None) -> str:
|
|
469
|
+
"""
|
|
470
|
+
Sign checksum file content with Tweek's signing key.
|
|
471
|
+
|
|
472
|
+
Utility function for registry administrators.
|
|
473
|
+
|
|
474
|
+
Args:
|
|
475
|
+
checksums_content: Raw bytes of CHECKSUMS.sha256
|
|
476
|
+
signing_key: Override signing key (default: TWEEK_SIGNING_KEY)
|
|
477
|
+
|
|
478
|
+
Returns:
|
|
479
|
+
Hex-encoded HMAC signature
|
|
480
|
+
"""
|
|
481
|
+
key = (signing_key or TWEEK_SIGNING_KEY).encode()
|
|
482
|
+
return hmac.new(key, checksums_content, hashlib.sha256).hexdigest()
|
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Tweek LLM Provider Plugins
|
|
4
|
+
|
|
5
|
+
Provider plugins handle API-specific formats for different LLM providers:
|
|
6
|
+
- Anthropic (Claude)
|
|
7
|
+
- OpenAI (GPT)
|
|
8
|
+
- Azure OpenAI (GPT on Azure)
|
|
9
|
+
- Google (Gemini)
|
|
10
|
+
- AWS Bedrock
|
|
11
|
+
|
|
12
|
+
Each provider plugin knows how to:
|
|
13
|
+
- Identify API endpoints
|
|
14
|
+
- Extract tool calls from responses
|
|
15
|
+
- Parse request/response formats
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
from tweek.plugins.providers.anthropic import AnthropicProvider
|
|
19
|
+
from tweek.plugins.providers.openai import OpenAIProvider
|
|
20
|
+
from tweek.plugins.providers.azure_openai import AzureOpenAIProvider
|
|
21
|
+
from tweek.plugins.providers.google import GoogleProvider
|
|
22
|
+
from tweek.plugins.providers.bedrock import BedrockProvider
|
|
23
|
+
|
|
24
|
+
__all__ = [
|
|
25
|
+
"AnthropicProvider",
|
|
26
|
+
"OpenAIProvider",
|
|
27
|
+
"AzureOpenAIProvider",
|
|
28
|
+
"GoogleProvider",
|
|
29
|
+
"BedrockProvider",
|
|
30
|
+
]
|
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Tweek Anthropic Provider Plugin
|
|
4
|
+
|
|
5
|
+
Handles Anthropic Claude API format:
|
|
6
|
+
- Endpoint: api.anthropic.com
|
|
7
|
+
- Tool calls in content blocks with type="tool_use"
|
|
8
|
+
- Messages API format
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from typing import Optional, List, Dict, Any
|
|
12
|
+
from tweek.plugins.base import LLMProviderPlugin, ToolCall
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class AnthropicProvider(LLMProviderPlugin):
|
|
16
|
+
"""
|
|
17
|
+
Anthropic Claude API provider plugin.
|
|
18
|
+
|
|
19
|
+
Supports:
|
|
20
|
+
- Messages API (v1)
|
|
21
|
+
- Tool use blocks
|
|
22
|
+
- Streaming responses
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
VERSION = "1.0.0"
|
|
26
|
+
DESCRIPTION = "Anthropic Claude API provider"
|
|
27
|
+
AUTHOR = "Tweek"
|
|
28
|
+
REQUIRES_LICENSE = "free"
|
|
29
|
+
TAGS = ["provider", "anthropic", "claude"]
|
|
30
|
+
|
|
31
|
+
@property
|
|
32
|
+
def name(self) -> str:
|
|
33
|
+
return "anthropic"
|
|
34
|
+
|
|
35
|
+
@property
|
|
36
|
+
def api_hosts(self) -> List[str]:
|
|
37
|
+
return [
|
|
38
|
+
"api.anthropic.com",
|
|
39
|
+
]
|
|
40
|
+
|
|
41
|
+
def extract_tool_calls(self, response: Dict[str, Any]) -> List[ToolCall]:
|
|
42
|
+
"""
|
|
43
|
+
Extract tool calls from Anthropic API response.
|
|
44
|
+
|
|
45
|
+
Anthropic format:
|
|
46
|
+
{
|
|
47
|
+
"content": [
|
|
48
|
+
{
|
|
49
|
+
"type": "tool_use",
|
|
50
|
+
"id": "toolu_xxx",
|
|
51
|
+
"name": "tool_name",
|
|
52
|
+
"input": {...}
|
|
53
|
+
}
|
|
54
|
+
]
|
|
55
|
+
}
|
|
56
|
+
"""
|
|
57
|
+
tool_calls = []
|
|
58
|
+
|
|
59
|
+
content = response.get("content", [])
|
|
60
|
+
if not isinstance(content, list):
|
|
61
|
+
return tool_calls
|
|
62
|
+
|
|
63
|
+
for block in content:
|
|
64
|
+
if not isinstance(block, dict):
|
|
65
|
+
continue
|
|
66
|
+
|
|
67
|
+
if block.get("type") == "tool_use":
|
|
68
|
+
tool_calls.append(ToolCall(
|
|
69
|
+
id=block.get("id", ""),
|
|
70
|
+
name=block.get("name", ""),
|
|
71
|
+
input=block.get("input", {}),
|
|
72
|
+
provider=self.name,
|
|
73
|
+
raw=block,
|
|
74
|
+
))
|
|
75
|
+
|
|
76
|
+
return tool_calls
|
|
77
|
+
|
|
78
|
+
def extract_content(self, response: Dict[str, Any]) -> str:
|
|
79
|
+
"""
|
|
80
|
+
Extract text content from Anthropic API response.
|
|
81
|
+
|
|
82
|
+
Concatenates all text blocks from the content array.
|
|
83
|
+
"""
|
|
84
|
+
content = response.get("content", [])
|
|
85
|
+
if not isinstance(content, list):
|
|
86
|
+
return ""
|
|
87
|
+
|
|
88
|
+
text_parts = []
|
|
89
|
+
for block in content:
|
|
90
|
+
if isinstance(block, dict) and block.get("type") == "text":
|
|
91
|
+
text_parts.append(block.get("text", ""))
|
|
92
|
+
elif isinstance(block, str):
|
|
93
|
+
text_parts.append(block)
|
|
94
|
+
|
|
95
|
+
return "\n".join(text_parts)
|
|
96
|
+
|
|
97
|
+
def extract_messages(self, request: Dict[str, Any]) -> List[Dict[str, Any]]:
|
|
98
|
+
"""
|
|
99
|
+
Extract messages from Anthropic API request.
|
|
100
|
+
|
|
101
|
+
Returns list of message dicts with role and content.
|
|
102
|
+
"""
|
|
103
|
+
return request.get("messages", [])
|
|
104
|
+
|
|
105
|
+
def get_system_prompt(self, request: Dict[str, Any]) -> Optional[str]:
|
|
106
|
+
"""Extract system prompt from request."""
|
|
107
|
+
system = request.get("system")
|
|
108
|
+
if isinstance(system, str):
|
|
109
|
+
return system
|
|
110
|
+
elif isinstance(system, list):
|
|
111
|
+
# System can be array of content blocks
|
|
112
|
+
parts = []
|
|
113
|
+
for block in system:
|
|
114
|
+
if isinstance(block, dict) and block.get("type") == "text":
|
|
115
|
+
parts.append(block.get("text", ""))
|
|
116
|
+
elif isinstance(block, str):
|
|
117
|
+
parts.append(block)
|
|
118
|
+
return "\n".join(parts)
|
|
119
|
+
return None
|
|
120
|
+
|
|
121
|
+
def is_streaming_response(self, response: Dict[str, Any]) -> bool:
|
|
122
|
+
"""Check if response is a streaming event."""
|
|
123
|
+
return response.get("type") in (
|
|
124
|
+
"message_start",
|
|
125
|
+
"content_block_start",
|
|
126
|
+
"content_block_delta",
|
|
127
|
+
"content_block_stop",
|
|
128
|
+
"message_delta",
|
|
129
|
+
"message_stop",
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
def extract_streaming_tool_call(
|
|
133
|
+
self,
|
|
134
|
+
events: List[Dict[str, Any]]
|
|
135
|
+
) -> List[ToolCall]:
|
|
136
|
+
"""
|
|
137
|
+
Extract tool calls from streaming events.
|
|
138
|
+
|
|
139
|
+
Reassembles tool_use blocks from streaming deltas.
|
|
140
|
+
"""
|
|
141
|
+
tool_calls = []
|
|
142
|
+
current_tools: Dict[int, Dict[str, Any]] = {}
|
|
143
|
+
|
|
144
|
+
for event in events:
|
|
145
|
+
event_type = event.get("type")
|
|
146
|
+
|
|
147
|
+
if event_type == "content_block_start":
|
|
148
|
+
index = event.get("index", 0)
|
|
149
|
+
block = event.get("content_block", {})
|
|
150
|
+
if block.get("type") == "tool_use":
|
|
151
|
+
current_tools[index] = {
|
|
152
|
+
"id": block.get("id", ""),
|
|
153
|
+
"name": block.get("name", ""),
|
|
154
|
+
"input_json": "",
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
elif event_type == "content_block_delta":
|
|
158
|
+
index = event.get("index", 0)
|
|
159
|
+
delta = event.get("delta", {})
|
|
160
|
+
if delta.get("type") == "input_json_delta":
|
|
161
|
+
if index in current_tools:
|
|
162
|
+
current_tools[index]["input_json"] += delta.get("partial_json", "")
|
|
163
|
+
|
|
164
|
+
elif event_type == "content_block_stop":
|
|
165
|
+
index = event.get("index", 0)
|
|
166
|
+
if index in current_tools:
|
|
167
|
+
tool_data = current_tools.pop(index)
|
|
168
|
+
try:
|
|
169
|
+
import json
|
|
170
|
+
input_dict = json.loads(tool_data["input_json"]) if tool_data["input_json"] else {}
|
|
171
|
+
except (json.JSONDecodeError, TypeError):
|
|
172
|
+
input_dict = {"_raw": tool_data["input_json"]}
|
|
173
|
+
|
|
174
|
+
tool_calls.append(ToolCall(
|
|
175
|
+
id=tool_data["id"],
|
|
176
|
+
name=tool_data["name"],
|
|
177
|
+
input=input_dict,
|
|
178
|
+
provider=self.name,
|
|
179
|
+
))
|
|
180
|
+
|
|
181
|
+
return tool_calls
|