tweek 0.1.0__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. tweek/__init__.py +2 -2
  2. tweek/_keygen.py +53 -0
  3. tweek/audit.py +288 -0
  4. tweek/cli.py +5303 -2396
  5. tweek/cli_model.py +380 -0
  6. tweek/config/families.yaml +609 -0
  7. tweek/config/manager.py +42 -5
  8. tweek/config/patterns.yaml +1510 -8
  9. tweek/config/tiers.yaml +161 -11
  10. tweek/diagnostics.py +71 -2
  11. tweek/hooks/break_glass.py +163 -0
  12. tweek/hooks/feedback.py +223 -0
  13. tweek/hooks/overrides.py +531 -0
  14. tweek/hooks/post_tool_use.py +472 -0
  15. tweek/hooks/pre_tool_use.py +1024 -62
  16. tweek/integrations/openclaw.py +443 -0
  17. tweek/integrations/openclaw_server.py +385 -0
  18. tweek/licensing.py +14 -54
  19. tweek/logging/bundle.py +2 -2
  20. tweek/logging/security_log.py +56 -13
  21. tweek/mcp/approval.py +57 -16
  22. tweek/mcp/proxy.py +18 -0
  23. tweek/mcp/screening.py +5 -5
  24. tweek/mcp/server.py +4 -1
  25. tweek/memory/__init__.py +24 -0
  26. tweek/memory/queries.py +223 -0
  27. tweek/memory/safety.py +140 -0
  28. tweek/memory/schemas.py +80 -0
  29. tweek/memory/store.py +989 -0
  30. tweek/platform/__init__.py +4 -4
  31. tweek/plugins/__init__.py +40 -24
  32. tweek/plugins/base.py +1 -1
  33. tweek/plugins/detectors/__init__.py +3 -3
  34. tweek/plugins/detectors/{moltbot.py → openclaw.py} +30 -27
  35. tweek/plugins/git_discovery.py +16 -4
  36. tweek/plugins/git_registry.py +8 -2
  37. tweek/plugins/git_security.py +21 -9
  38. tweek/plugins/screening/__init__.py +10 -1
  39. tweek/plugins/screening/heuristic_scorer.py +477 -0
  40. tweek/plugins/screening/llm_reviewer.py +14 -6
  41. tweek/plugins/screening/local_model_reviewer.py +161 -0
  42. tweek/proxy/__init__.py +38 -37
  43. tweek/proxy/addon.py +22 -3
  44. tweek/proxy/interceptor.py +1 -0
  45. tweek/proxy/server.py +4 -2
  46. tweek/sandbox/__init__.py +11 -0
  47. tweek/sandbox/docker_bridge.py +143 -0
  48. tweek/sandbox/executor.py +9 -6
  49. tweek/sandbox/layers.py +97 -0
  50. tweek/sandbox/linux.py +1 -0
  51. tweek/sandbox/project.py +548 -0
  52. tweek/sandbox/registry.py +149 -0
  53. tweek/security/__init__.py +9 -0
  54. tweek/security/language.py +250 -0
  55. tweek/security/llm_reviewer.py +1146 -60
  56. tweek/security/local_model.py +331 -0
  57. tweek/security/local_reviewer.py +146 -0
  58. tweek/security/model_registry.py +371 -0
  59. tweek/security/rate_limiter.py +11 -6
  60. tweek/security/secret_scanner.py +70 -4
  61. tweek/security/session_analyzer.py +26 -2
  62. tweek/skill_template/SKILL.md +200 -0
  63. tweek/skill_template/__init__.py +0 -0
  64. tweek/skill_template/cli-reference.md +331 -0
  65. tweek/skill_template/overrides-reference.md +184 -0
  66. tweek/skill_template/scripts/__init__.py +0 -0
  67. tweek/skill_template/scripts/check_installed.py +170 -0
  68. tweek/skills/__init__.py +38 -0
  69. tweek/skills/config.py +150 -0
  70. tweek/skills/fingerprints.py +198 -0
  71. tweek/skills/guard.py +293 -0
  72. tweek/skills/isolation.py +469 -0
  73. tweek/skills/scanner.py +715 -0
  74. tweek/vault/__init__.py +0 -1
  75. tweek/vault/cross_platform.py +12 -1
  76. tweek/vault/keychain.py +87 -29
  77. tweek-0.2.0.dist-info/METADATA +281 -0
  78. tweek-0.2.0.dist-info/RECORD +121 -0
  79. {tweek-0.1.0.dist-info → tweek-0.2.0.dist-info}/entry_points.txt +8 -1
  80. {tweek-0.1.0.dist-info → tweek-0.2.0.dist-info}/licenses/LICENSE +80 -0
  81. tweek/integrations/moltbot.py +0 -243
  82. tweek-0.1.0.dist-info/METADATA +0 -335
  83. tweek-0.1.0.dist-info/RECORD +0 -85
  84. {tweek-0.1.0.dist-info → tweek-0.2.0.dist-info}/WHEEL +0 -0
  85. {tweek-0.1.0.dist-info → tweek-0.2.0.dist-info}/top_level.txt +0 -0
tweek/__init__.py CHANGED
@@ -1,5 +1,5 @@
1
1
  """
2
- Tweek - Security sandboxing for Claude Code skills.
2
+ Tweek - Security for AI agents.
3
3
 
4
4
  GAH! Because paranoia is a feature, not a bug.
5
5
 
@@ -10,7 +10,7 @@ Tweek provides:
10
10
  - Per-skill/per-tool security policies
11
11
  """
12
12
 
13
- __version__ = "0.1.0"
13
+ __version__ = "0.2.0"
14
14
  __author__ = "Tommy Mancino"
15
15
 
16
16
  # "TOO MUCH PRESSURE!" - Tweek Tweak
tweek/_keygen.py ADDED
@@ -0,0 +1,53 @@
1
+ """
2
+ Internal license key generation — NOT part of the client distribution.
3
+
4
+ This module exists solely for testing and admin use. In production,
5
+ license keys are generated by the license server (services/license-server/).
6
+ """
7
+
8
+ import base64
9
+ import hashlib
10
+ import hmac
11
+ import json
12
+ import time
13
+ from typing import Optional, List
14
+
15
+ from tweek.licensing import Tier, LICENSE_SECRET
16
+
17
+
18
+ def generate_license_key(
19
+ tier: Tier,
20
+ email: str,
21
+ expires_at: Optional[int] = None,
22
+ features: Optional[List[str]] = None,
23
+ ) -> str:
24
+ """
25
+ Generate a license key (admin/test use only).
26
+
27
+ Args:
28
+ tier: License tier
29
+ email: Customer email
30
+ expires_at: Expiration timestamp (None = never)
31
+ features: Additional feature flags
32
+
33
+ Returns:
34
+ License key string
35
+ """
36
+ payload = {
37
+ "tier": tier.value,
38
+ "email": email,
39
+ "issued_at": int(time.time()),
40
+ "expires_at": expires_at,
41
+ "features": features or [],
42
+ }
43
+
44
+ payload_json = json.dumps(payload, separators=(",", ":"))
45
+ payload_b64 = base64.b64encode(payload_json.encode()).decode()
46
+
47
+ signature = hmac.new(
48
+ LICENSE_SECRET.encode(),
49
+ payload_b64.encode(),
50
+ hashlib.sha256
51
+ ).hexdigest()
52
+
53
+ return f"{payload_b64}.{signature}"
tweek/audit.py ADDED
@@ -0,0 +1,288 @@
1
+ """
2
+ Tweek Skill Audit — Security analysis for skill files and tool descriptions.
3
+
4
+ Reads skill content, detects language, translates non-English content,
5
+ and runs the full 215-pattern regex analysis + LLM semantic review.
6
+ Designed for one-time evaluation of skills before installation.
7
+ """
8
+
9
+ import json
10
+ from dataclasses import dataclass, field
11
+ from pathlib import Path
12
+ from typing import List, Optional, Dict, Any
13
+
14
+
15
+ @dataclass
16
+ class AuditFinding:
17
+ """A single finding from skill audit."""
18
+ pattern_id: int
19
+ pattern_name: str
20
+ severity: str # critical, high, medium, low
21
+ description: str
22
+ matched_text: str = ""
23
+
24
+
25
+ @dataclass
26
+ class AuditResult:
27
+ """Result of auditing a single skill file."""
28
+ skill_path: Path
29
+ skill_name: str
30
+ content_length: int
31
+ findings: List[AuditFinding] = field(default_factory=list)
32
+ risk_level: str = "safe" # safe, suspicious, dangerous
33
+ non_english_detected: bool = False
34
+ detected_language: Optional[str] = None
35
+ translated: bool = False
36
+ translation_confidence: float = 0.0
37
+ llm_review: Optional[Dict[str, Any]] = None
38
+ error: Optional[str] = None
39
+
40
+ @property
41
+ def finding_count(self) -> int:
42
+ return len(self.findings)
43
+
44
+ @property
45
+ def critical_count(self) -> int:
46
+ return sum(1 for f in self.findings if f.severity == "critical")
47
+
48
+ @property
49
+ def high_count(self) -> int:
50
+ return sum(1 for f in self.findings if f.severity == "high")
51
+
52
+
53
+ # Default locations to scan for installed skills
54
+ SKILL_SCAN_LOCATIONS = [
55
+ Path.home() / ".claude" / "skills",
56
+ Path.home() / ".openclaw" / "workspace" / "skills",
57
+ ]
58
+
59
+
60
+ def scan_installed_skills(
61
+ extra_dirs: Optional[List[Path]] = None,
62
+ include_project: bool = True,
63
+ ) -> List[Dict[str, Any]]:
64
+ """
65
+ Scan known locations for installed SKILL.md files.
66
+
67
+ Args:
68
+ extra_dirs: Additional directories to scan
69
+ include_project: Also scan ./.claude/skills/ in current directory
70
+
71
+ Returns:
72
+ List of dicts with path, name, and content for each skill found
73
+ """
74
+ locations = list(SKILL_SCAN_LOCATIONS)
75
+
76
+ if include_project:
77
+ locations.append(Path.cwd() / ".claude" / "skills")
78
+
79
+ if extra_dirs:
80
+ locations.extend(extra_dirs)
81
+
82
+ skills = []
83
+ seen_paths = set()
84
+
85
+ for location in locations:
86
+ try:
87
+ if not location.exists():
88
+ continue
89
+
90
+ for skill_md in location.rglob("SKILL.md"):
91
+ resolved = skill_md.resolve()
92
+ if resolved in seen_paths:
93
+ continue
94
+ seen_paths.add(resolved)
95
+
96
+ try:
97
+ content = skill_md.read_text(encoding="utf-8")
98
+ skills.append({
99
+ "path": skill_md,
100
+ "name": skill_md.parent.name,
101
+ "content": content,
102
+ "source": str(location),
103
+ })
104
+ except (IOError, UnicodeDecodeError) as e:
105
+ skills.append({
106
+ "path": skill_md,
107
+ "name": skill_md.parent.name,
108
+ "content": None,
109
+ "source": str(location),
110
+ "error": str(e),
111
+ })
112
+ except PermissionError:
113
+ continue
114
+
115
+ return skills
116
+
117
+
118
+ def audit_content(
119
+ content: str,
120
+ name: str = "unknown",
121
+ path: Optional[Path] = None,
122
+ translate: bool = True,
123
+ llm_review: bool = True,
124
+ ) -> AuditResult:
125
+ """
126
+ Audit a piece of content (skill file, tool description, etc.) for security risks.
127
+
128
+ Runs language detection, optional translation, pattern matching, and LLM review.
129
+
130
+ Args:
131
+ content: The text content to audit
132
+ name: Name identifier for the content
133
+ path: Optional file path
134
+ translate: Whether to translate non-English content (requires API key)
135
+ llm_review: Whether to run LLM semantic review (requires API key)
136
+
137
+ Returns:
138
+ AuditResult with findings and risk assessment
139
+ """
140
+ import re
141
+ from tweek.security.language import detect_non_english
142
+
143
+ result = AuditResult(
144
+ skill_path=path or Path(name),
145
+ skill_name=name,
146
+ content_length=len(content),
147
+ )
148
+
149
+ # Step 1: Language detection
150
+ lang_result = detect_non_english(content)
151
+ result.non_english_detected = lang_result.has_non_english
152
+
153
+ # The content we'll run patterns against (may be translated)
154
+ analysis_content = content
155
+
156
+ # Step 2: Translation if non-English detected
157
+ if lang_result.has_non_english and translate:
158
+ try:
159
+ from tweek.security.llm_reviewer import get_llm_reviewer
160
+
161
+ reviewer = get_llm_reviewer()
162
+ if reviewer.enabled:
163
+ source_hint = ", ".join(lang_result.detected_scripts)
164
+ translation = reviewer.translate(content, source_hint=source_hint)
165
+
166
+ if translation.get("confidence", 0) > 0.3:
167
+ analysis_content = translation["translated_text"]
168
+ result.translated = True
169
+ result.detected_language = translation.get("detected_language")
170
+ result.translation_confidence = translation.get("confidence", 0.0)
171
+ except ImportError:
172
+ pass
173
+ except Exception:
174
+ pass
175
+
176
+ # Step 3: Pattern matching (all 215 patterns against English content)
177
+ try:
178
+ from tweek.hooks.pre_tool_use import PatternMatcher
179
+
180
+ matcher = PatternMatcher()
181
+ matches = matcher.check_all(analysis_content)
182
+
183
+ for match in matches:
184
+ # Extract the matched text for context
185
+ regex = match.get("regex", "")
186
+ matched_text = ""
187
+ try:
188
+ m = re.search(regex, analysis_content, re.IGNORECASE)
189
+ if m:
190
+ matched_text = m.group(0)[:100]
191
+ except re.error:
192
+ pass
193
+
194
+ result.findings.append(AuditFinding(
195
+ pattern_id=match.get("id", 0),
196
+ pattern_name=match.get("name", "unknown"),
197
+ severity=match.get("severity", "medium"),
198
+ description=match.get("description", ""),
199
+ matched_text=matched_text,
200
+ ))
201
+ except ImportError:
202
+ result.error = "Pattern matcher not available"
203
+
204
+ # Step 4: LLM semantic review
205
+ if llm_review:
206
+ try:
207
+ from tweek.security.llm_reviewer import get_llm_reviewer
208
+
209
+ reviewer = get_llm_reviewer()
210
+ if reviewer.enabled:
211
+ review = reviewer.review(
212
+ command=analysis_content[:500],
213
+ tool="SkillAudit",
214
+ tier="dangerous",
215
+ )
216
+ result.llm_review = {
217
+ "risk_level": review.risk_level.value,
218
+ "reason": review.reason,
219
+ "confidence": review.confidence,
220
+ }
221
+ except ImportError:
222
+ pass
223
+ except Exception:
224
+ pass
225
+
226
+ # Step 5: Determine overall risk level
227
+ if result.critical_count > 0:
228
+ result.risk_level = "dangerous"
229
+ elif result.high_count > 0:
230
+ result.risk_level = "suspicious"
231
+ elif result.finding_count > 0:
232
+ result.risk_level = "suspicious"
233
+ elif result.llm_review and result.llm_review.get("risk_level") == "dangerous":
234
+ result.risk_level = "dangerous"
235
+ elif result.llm_review and result.llm_review.get("risk_level") == "suspicious":
236
+ result.risk_level = "suspicious"
237
+ else:
238
+ result.risk_level = "safe"
239
+
240
+ return result
241
+
242
+
243
+ def audit_skill(
244
+ path: Path,
245
+ translate: bool = True,
246
+ llm_review: bool = True,
247
+ ) -> AuditResult:
248
+ """
249
+ Audit a skill file at the given path.
250
+
251
+ Args:
252
+ path: Path to the skill file (SKILL.md or any text file)
253
+ translate: Whether to translate non-English content
254
+ llm_review: Whether to run LLM semantic review
255
+
256
+ Returns:
257
+ AuditResult with findings and risk assessment
258
+ """
259
+ path = Path(path)
260
+
261
+ if not path.exists():
262
+ result = AuditResult(
263
+ skill_path=path,
264
+ skill_name=path.stem,
265
+ content_length=0,
266
+ error=f"File not found: {path}",
267
+ )
268
+ return result
269
+
270
+ try:
271
+ content = path.read_text(encoding="utf-8")
272
+ except (IOError, UnicodeDecodeError) as e:
273
+ return AuditResult(
274
+ skill_path=path,
275
+ skill_name=path.stem,
276
+ content_length=0,
277
+ error=f"Failed to read file: {e}",
278
+ )
279
+
280
+ name = path.parent.name if path.name == "SKILL.md" else path.stem
281
+
282
+ return audit_content(
283
+ content=content,
284
+ name=name,
285
+ path=path,
286
+ translate=translate,
287
+ llm_review=llm_review,
288
+ )