tweek 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. tweek/__init__.py +16 -0
  2. tweek/cli.py +3390 -0
  3. tweek/cli_helpers.py +193 -0
  4. tweek/config/__init__.py +13 -0
  5. tweek/config/allowed_dirs.yaml +23 -0
  6. tweek/config/manager.py +1064 -0
  7. tweek/config/patterns.yaml +751 -0
  8. tweek/config/tiers.yaml +129 -0
  9. tweek/diagnostics.py +589 -0
  10. tweek/hooks/__init__.py +1 -0
  11. tweek/hooks/pre_tool_use.py +861 -0
  12. tweek/integrations/__init__.py +3 -0
  13. tweek/integrations/moltbot.py +243 -0
  14. tweek/licensing.py +398 -0
  15. tweek/logging/__init__.py +9 -0
  16. tweek/logging/bundle.py +350 -0
  17. tweek/logging/json_logger.py +150 -0
  18. tweek/logging/security_log.py +745 -0
  19. tweek/mcp/__init__.py +24 -0
  20. tweek/mcp/approval.py +456 -0
  21. tweek/mcp/approval_cli.py +356 -0
  22. tweek/mcp/clients/__init__.py +37 -0
  23. tweek/mcp/clients/chatgpt.py +112 -0
  24. tweek/mcp/clients/claude_desktop.py +203 -0
  25. tweek/mcp/clients/gemini.py +178 -0
  26. tweek/mcp/proxy.py +667 -0
  27. tweek/mcp/screening.py +175 -0
  28. tweek/mcp/server.py +317 -0
  29. tweek/platform/__init__.py +131 -0
  30. tweek/plugins/__init__.py +835 -0
  31. tweek/plugins/base.py +1080 -0
  32. tweek/plugins/compliance/__init__.py +30 -0
  33. tweek/plugins/compliance/gdpr.py +333 -0
  34. tweek/plugins/compliance/gov.py +324 -0
  35. tweek/plugins/compliance/hipaa.py +285 -0
  36. tweek/plugins/compliance/legal.py +322 -0
  37. tweek/plugins/compliance/pci.py +361 -0
  38. tweek/plugins/compliance/soc2.py +275 -0
  39. tweek/plugins/detectors/__init__.py +30 -0
  40. tweek/plugins/detectors/continue_dev.py +206 -0
  41. tweek/plugins/detectors/copilot.py +254 -0
  42. tweek/plugins/detectors/cursor.py +192 -0
  43. tweek/plugins/detectors/moltbot.py +205 -0
  44. tweek/plugins/detectors/windsurf.py +214 -0
  45. tweek/plugins/git_discovery.py +395 -0
  46. tweek/plugins/git_installer.py +491 -0
  47. tweek/plugins/git_lockfile.py +338 -0
  48. tweek/plugins/git_registry.py +503 -0
  49. tweek/plugins/git_security.py +482 -0
  50. tweek/plugins/providers/__init__.py +30 -0
  51. tweek/plugins/providers/anthropic.py +181 -0
  52. tweek/plugins/providers/azure_openai.py +289 -0
  53. tweek/plugins/providers/bedrock.py +248 -0
  54. tweek/plugins/providers/google.py +197 -0
  55. tweek/plugins/providers/openai.py +230 -0
  56. tweek/plugins/scope.py +130 -0
  57. tweek/plugins/screening/__init__.py +26 -0
  58. tweek/plugins/screening/llm_reviewer.py +149 -0
  59. tweek/plugins/screening/pattern_matcher.py +273 -0
  60. tweek/plugins/screening/rate_limiter.py +174 -0
  61. tweek/plugins/screening/session_analyzer.py +159 -0
  62. tweek/proxy/__init__.py +302 -0
  63. tweek/proxy/addon.py +223 -0
  64. tweek/proxy/interceptor.py +313 -0
  65. tweek/proxy/server.py +315 -0
  66. tweek/sandbox/__init__.py +71 -0
  67. tweek/sandbox/executor.py +382 -0
  68. tweek/sandbox/linux.py +278 -0
  69. tweek/sandbox/profile_generator.py +323 -0
  70. tweek/screening/__init__.py +13 -0
  71. tweek/screening/context.py +81 -0
  72. tweek/security/__init__.py +22 -0
  73. tweek/security/llm_reviewer.py +348 -0
  74. tweek/security/rate_limiter.py +682 -0
  75. tweek/security/secret_scanner.py +506 -0
  76. tweek/security/session_analyzer.py +600 -0
  77. tweek/vault/__init__.py +40 -0
  78. tweek/vault/cross_platform.py +251 -0
  79. tweek/vault/keychain.py +288 -0
  80. tweek-0.1.0.dist-info/METADATA +335 -0
  81. tweek-0.1.0.dist-info/RECORD +85 -0
  82. tweek-0.1.0.dist-info/WHEEL +5 -0
  83. tweek-0.1.0.dist-info/entry_points.txt +25 -0
  84. tweek-0.1.0.dist-info/licenses/LICENSE +190 -0
  85. tweek-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,861 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Tweek Pre-Tool-Use Hook for Claude Code
4
+
5
+ This hook intercepts tool calls before execution and applies tiered security screening.
6
+
7
+ Security Layers (Defense in Depth):
8
+ 1. Rate Limiting - Detect resource theft and burst attacks
9
+ 2. Pattern Matching - Regex patterns for known attack vectors
10
+ 3. LLM Review - Semantic analysis for risky/dangerous tiers
11
+ 4. Session Analysis - Cross-turn anomaly detection
12
+ 5. Sandbox Preview - Speculative execution (dangerous tier only)
13
+
14
+ Tiers:
15
+ - safe: No screening (trusted operations)
16
+ - default: Regex pattern matching only
17
+ - risky: Regex + LLM rules
18
+ - dangerous: Regex + LLM + Sandbox preview
19
+
20
+ Input (stdin): JSON with tool_name and tool_input
21
+ Output (stdout): JSON with decision (allow/block/ask) and optional message
22
+
23
+ Claude Code Hook Protocol:
24
+ - Empty response or {}: proceed with tool execution
25
+ - "permissionDecision": "ask" - prompt user for confirmation
26
+ - "permissionDecision": "deny" - block execution
27
+ """
28
+
29
+ import json
30
+ import os
31
+ import re
32
+ import sys
33
+ import uuid
34
+ from pathlib import Path
35
+ from typing import Optional, Dict, List, Any, Tuple
36
+
37
+ import yaml
38
+
39
+ # Add parent to path for imports
40
+ sys.path.insert(0, str(Path(__file__).parent.parent.parent))
41
+
42
+ from tweek.logging.security_log import (
43
+ SecurityLogger, SecurityEvent, EventType, get_logger
44
+ )
45
+
46
+
47
+ # =============================================================================
48
+ # PLUGIN SYSTEM INTEGRATION
49
+ # =============================================================================
50
+
51
+ def run_compliance_scans(
52
+ content: str,
53
+ direction: str,
54
+ logger: SecurityLogger,
55
+ session_id: Optional[str] = None,
56
+ tool_name: str = "unknown"
57
+ ) -> Tuple[bool, Optional[str], List[Dict]]:
58
+ """
59
+ Run all enabled compliance plugins on content.
60
+
61
+ Args:
62
+ content: Text content to scan
63
+ direction: "input" or "output"
64
+ logger: Security logger
65
+ session_id: Optional session ID
66
+ tool_name: Tool name for logging
67
+
68
+ Returns:
69
+ (should_block, message, findings)
70
+ """
71
+ try:
72
+ from tweek.plugins import get_registry, PluginCategory
73
+ from tweek.plugins.base import ScanDirection, ActionType
74
+
75
+ registry = get_registry()
76
+ direction_enum = ScanDirection(direction)
77
+
78
+ all_findings = []
79
+ messages = []
80
+ should_block = False
81
+
82
+ # Get all enabled compliance plugins
83
+ for plugin in registry.get_all(PluginCategory.COMPLIANCE):
84
+ try:
85
+ result = plugin.scan(content, direction_enum)
86
+
87
+ if result.findings:
88
+ all_findings.extend([f.to_dict() for f in result.findings])
89
+
90
+ if result.message:
91
+ messages.append(result.message)
92
+
93
+ # Log findings
94
+ logger.log_quick(
95
+ EventType.PATTERN_MATCH,
96
+ tool_name,
97
+ tier="compliance",
98
+ pattern_name=f"compliance_{plugin.name}",
99
+ pattern_severity=result.max_severity.value if result.max_severity else "medium",
100
+ decision="ask" if result.action != ActionType.BLOCK else "block",
101
+ decision_reason=f"Compliance scan ({plugin.name}): {len(result.findings)} finding(s)",
102
+ session_id=session_id,
103
+ metadata={
104
+ "plugin": plugin.name,
105
+ "direction": direction,
106
+ "findings": [f.pattern_name for f in result.findings],
107
+ "action": result.action.value,
108
+ }
109
+ )
110
+
111
+ if result.action == ActionType.BLOCK:
112
+ should_block = True
113
+
114
+ except Exception as e:
115
+ logger.log_quick(
116
+ EventType.ERROR,
117
+ tool_name,
118
+ decision_reason=f"Compliance plugin {plugin.name} error: {e}",
119
+ session_id=session_id
120
+ )
121
+
122
+ combined_message = "\n\n".join(messages) if messages else None
123
+ return should_block, combined_message, all_findings
124
+
125
+ except ImportError:
126
+ # Plugin system not available
127
+ return False, None, []
128
+ except Exception as e:
129
+ logger.log_quick(
130
+ EventType.ERROR,
131
+ tool_name,
132
+ decision_reason=f"Compliance scan error: {e}",
133
+ session_id=session_id
134
+ )
135
+ return False, None, []
136
+
137
+
138
+ def run_screening_plugins(
139
+ tool_name: str,
140
+ content: str,
141
+ context: Dict[str, Any],
142
+ logger: SecurityLogger
143
+ ) -> Tuple[bool, bool, Optional[str], List[Dict]]:
144
+ """
145
+ Run all enabled screening plugins.
146
+
147
+ Args:
148
+ tool_name: Name of the tool
149
+ content: Command/content to screen
150
+ context: Context dict (session_id, tier, etc.)
151
+ logger: Security logger
152
+
153
+ Returns:
154
+ (allowed, should_prompt, message, findings)
155
+ """
156
+ try:
157
+ from tweek.plugins import get_registry, PluginCategory
158
+
159
+ registry = get_registry()
160
+
161
+ allowed = True
162
+ should_prompt = False
163
+ messages = []
164
+ all_findings = []
165
+
166
+ for plugin in registry.get_all(PluginCategory.SCREENING):
167
+ try:
168
+ result = plugin.screen(tool_name, content, context)
169
+
170
+ if result.findings:
171
+ all_findings.extend([f.to_dict() for f in result.findings])
172
+
173
+ if not result.allowed:
174
+ allowed = False
175
+
176
+ if result.should_prompt:
177
+ should_prompt = True
178
+
179
+ if result.reason and (not result.allowed or result.should_prompt):
180
+ messages.append(f"[{plugin.name}] {result.reason}")
181
+
182
+ except Exception as e:
183
+ logger.log_quick(
184
+ EventType.ERROR,
185
+ tool_name,
186
+ decision_reason=f"Screening plugin {plugin.name} error: {e}",
187
+ session_id=context.get("session_id")
188
+ )
189
+
190
+ combined_message = "\n".join(messages) if messages else None
191
+ return allowed, should_prompt, combined_message, all_findings
192
+
193
+ except ImportError:
194
+ # Plugin system not available
195
+ return True, False, None, []
196
+
197
+
198
+ class TierManager:
199
+ """Manages security tier classification and escalation."""
200
+
201
+ def __init__(self, config_path: Optional[Path] = None):
202
+ if config_path is None:
203
+ config_path = Path(__file__).parent.parent / "config" / "tiers.yaml"
204
+
205
+ self.config = self._load_config(config_path)
206
+ self.tools = self.config.get("tools", {})
207
+ self.skills = self.config.get("skills", {})
208
+ self.escalations = self.config.get("escalations", [])
209
+ self.default_tier = self.config.get("default_tier", "default")
210
+ self.tiers = self.config.get("tiers", {})
211
+
212
+ def _load_config(self, path: Path) -> dict:
213
+ """Load tier configuration from YAML."""
214
+ if not path.exists():
215
+ return {}
216
+ with open(path) as f:
217
+ return yaml.safe_load(f) or {}
218
+
219
+ def get_base_tier(self, tool_name: str, skill_name: Optional[str] = None) -> str:
220
+ """Get the base tier for a tool or skill."""
221
+ # Skills override tools if specified
222
+ if skill_name and skill_name in self.skills:
223
+ return self.skills[skill_name]
224
+
225
+ if tool_name in self.tools:
226
+ return self.tools[tool_name]
227
+
228
+ return self.default_tier
229
+
230
+ def check_escalations(self, content: str) -> Optional[Dict]:
231
+ """Check if content triggers any escalation patterns.
232
+
233
+ Returns the highest-priority escalation match, or None.
234
+ """
235
+ tier_priority = {"safe": 0, "default": 1, "risky": 2, "dangerous": 3}
236
+ highest_match = None
237
+ highest_priority = -1
238
+
239
+ for escalation in self.escalations:
240
+ pattern = escalation.get("pattern", "")
241
+ try:
242
+ if re.search(pattern, content, re.IGNORECASE):
243
+ target_tier = escalation.get("escalate_to", "default")
244
+ priority = tier_priority.get(target_tier, 1)
245
+ if priority > highest_priority:
246
+ highest_priority = priority
247
+ highest_match = escalation
248
+ except re.error:
249
+ continue
250
+
251
+ return highest_match
252
+
253
+ def get_effective_tier(
254
+ self,
255
+ tool_name: str,
256
+ content: str,
257
+ skill_name: Optional[str] = None
258
+ ) -> tuple[str, Optional[Dict]]:
259
+ """Get the effective tier after checking escalations.
260
+
261
+ Returns (tier, escalation_match) where escalation_match is None
262
+ if no escalation occurred.
263
+ """
264
+ base_tier = self.get_base_tier(tool_name, skill_name)
265
+ escalation = self.check_escalations(content)
266
+
267
+ if escalation is None:
268
+ return base_tier, None
269
+
270
+ tier_priority = {"safe": 0, "default": 1, "risky": 2, "dangerous": 3}
271
+ base_priority = tier_priority.get(base_tier, 1)
272
+ escalated_tier = escalation.get("escalate_to", "default")
273
+ escalated_priority = tier_priority.get(escalated_tier, 1)
274
+
275
+ # Only escalate, never de-escalate
276
+ if escalated_priority > base_priority:
277
+ return escalated_tier, escalation
278
+
279
+ return base_tier, None
280
+
281
+ def get_screening_methods(self, tier: str) -> List[str]:
282
+ """Get the screening methods for a tier."""
283
+ tier_config = self.tiers.get(tier, {})
284
+ return tier_config.get("screening", [])
285
+
286
+
287
+ class PatternMatcher:
288
+ """Matches commands against hostile patterns."""
289
+
290
+ def __init__(self, patterns_path: Optional[Path] = None):
291
+ # Try user patterns first (~/.tweek/patterns/), fall back to bundled
292
+ user_patterns = Path.home() / ".tweek" / "patterns" / "patterns.yaml"
293
+ bundled_patterns = Path(__file__).parent.parent / "config" / "patterns.yaml"
294
+
295
+ if patterns_path is not None:
296
+ self.patterns = self._load_patterns(patterns_path)
297
+ elif user_patterns.exists():
298
+ self.patterns = self._load_patterns(user_patterns)
299
+ else:
300
+ self.patterns = self._load_patterns(bundled_patterns)
301
+
302
+ def _load_patterns(self, path: Path) -> List[dict]:
303
+ """Load patterns from YAML config.
304
+
305
+ All patterns and security features are available to all users (open source).
306
+ Pro (teams) and Enterprise (compliance) tiers coming soon.
307
+ """
308
+ if not path.exists():
309
+ return []
310
+
311
+ with open(path) as f:
312
+ config = yaml.safe_load(f) or {}
313
+
314
+ return config.get("patterns", [])
315
+
316
+ def check(self, content: str) -> Optional[dict]:
317
+ """Check content against all patterns.
318
+
319
+ Returns the first matching pattern, or None.
320
+ """
321
+ for pattern in self.patterns:
322
+ try:
323
+ if re.search(pattern.get("regex", ""), content, re.IGNORECASE):
324
+ return pattern
325
+ except re.error:
326
+ continue
327
+ return None
328
+
329
+ def check_all(self, content: str) -> List[dict]:
330
+ """Check content against all patterns, returning all matches."""
331
+ matches = []
332
+ for pattern in self.patterns:
333
+ try:
334
+ if re.search(pattern.get("regex", ""), content, re.IGNORECASE):
335
+ matches.append(pattern)
336
+ except re.error:
337
+ continue
338
+ return matches
339
+
340
+
341
+ def format_prompt_message(
342
+ pattern: Optional[dict],
343
+ escalation: Optional[dict],
344
+ command: str,
345
+ tier: str,
346
+ rate_limit_msg: Optional[str] = None,
347
+ llm_msg: Optional[str] = None,
348
+ session_msg: Optional[str] = None
349
+ ) -> str:
350
+ """Format the message shown to user when prompting for confirmation."""
351
+ severity_icons = {
352
+ "critical": "",
353
+ "high": " ",
354
+ "medium": "",
355
+ "low": "",
356
+ }
357
+
358
+ lines = []
359
+
360
+ # Header with tier info
361
+ tier_icons = {"safe": "", "default": "", "risky": "", "dangerous": ""}
362
+ lines.append(f"{tier_icons.get(tier, '')} TWEEK SECURITY CHECK")
363
+ lines.append("" * 45)
364
+
365
+ # Rate limit info if applicable
366
+ if rate_limit_msg:
367
+ lines.append(rate_limit_msg)
368
+ lines.append("")
369
+
370
+ # Session analysis if applicable
371
+ if session_msg:
372
+ lines.append(session_msg)
373
+ lines.append("")
374
+
375
+ # Escalation info if applicable
376
+ if escalation:
377
+ lines.append(f" Escalated to {tier.upper()} tier")
378
+ lines.append(f" Reason: {escalation.get('description', 'Content-based escalation')}")
379
+ lines.append("")
380
+
381
+ # Pattern match info
382
+ if pattern:
383
+ icon = severity_icons.get(pattern.get("severity", "medium"), "")
384
+ lines.append(f"{icon} Pattern Match: {pattern.get('name', 'unknown')}")
385
+ if pattern.get("id"):
386
+ lines.append(f" ID: {pattern.get('id')}")
387
+ lines.append(f" Severity: {pattern.get('severity', 'unknown').upper()}")
388
+ lines.append(f" {pattern.get('description', 'Suspicious command detected')}")
389
+ lines.append("")
390
+
391
+ # LLM review if applicable
392
+ if llm_msg:
393
+ lines.append(llm_msg)
394
+ lines.append("")
395
+
396
+ # Command preview
397
+ display_cmd = command if len(command) < 60 else command[:57] + "..."
398
+ lines.append(f"Command: {display_cmd}")
399
+ lines.append("" * 45)
400
+ lines.append("Allow this command?")
401
+
402
+ return "\n".join(lines)
403
+
404
+
405
+ def process_hook(input_data: dict, logger: SecurityLogger) -> dict:
406
+ """Main hook logic with tiered security screening.
407
+
408
+ Security Layers:
409
+ 1. Rate Limiting - Detect resource theft
410
+ 2. Pattern Matching - Known attack vectors
411
+ 3. LLM Review - Semantic analysis
412
+ 4. Session Analysis - Cross-turn anomalies
413
+ 5. Sandbox Preview - Speculative execution
414
+
415
+ Args:
416
+ input_data: Dict with tool_name, tool_input from Claude Code
417
+ logger: Security logger instance
418
+
419
+ Returns:
420
+ Dict with hookSpecificOutput for Claude Code hook protocol
421
+ """
422
+ tool_name = input_data.get("tool_name", "")
423
+ tool_input = input_data.get("tool_input", {})
424
+ session_id = input_data.get("session_id")
425
+ working_dir = input_data.get("cwd")
426
+
427
+ # Generate correlation ID to link all events in this screening pass
428
+ correlation_id = uuid.uuid4().hex[:12]
429
+
430
+ def _log(event_type, tool, **kwargs):
431
+ """Log with correlation_id, source, and session_id automatically included."""
432
+ logger.log_quick(
433
+ event_type, tool,
434
+ correlation_id=correlation_id, source="hooks",
435
+ session_id=session_id,
436
+ **kwargs
437
+ )
438
+
439
+ # Extract content to analyze (command for Bash, path for Read, etc.)
440
+ if tool_name == "Bash":
441
+ content = tool_input.get("command", "")
442
+ elif tool_name in ("Read", "Write", "Edit"):
443
+ content = tool_input.get("file_path", "")
444
+ elif tool_name == "WebFetch":
445
+ content = tool_input.get("url", "")
446
+ else:
447
+ content = json.dumps(tool_input)
448
+
449
+ if not content:
450
+ return {}
451
+
452
+ # =========================================================================
453
+ # LAYER 0: Compliance Scanning (INPUT direction)
454
+ # Scan incoming content for sensitive data before processing
455
+ # =========================================================================
456
+ compliance_block, compliance_msg, compliance_findings = run_compliance_scans(
457
+ content=content,
458
+ direction="input",
459
+ logger=logger,
460
+ session_id=session_id,
461
+ tool_name=tool_name
462
+ )
463
+
464
+ if compliance_block:
465
+ return {
466
+ "hookSpecificOutput": {
467
+ "hookEventName": "PreToolUse",
468
+ "permissionDecision": "deny",
469
+ "permissionDecisionReason": f" COMPLIANCE BLOCK\n{compliance_msg}",
470
+ }
471
+ }
472
+
473
+ # Initialize managers
474
+ tier_mgr = TierManager()
475
+ pattern_matcher = PatternMatcher()
476
+
477
+ # Determine effective tier
478
+ effective_tier, escalation = tier_mgr.get_effective_tier(tool_name, content)
479
+ screening_methods = tier_mgr.get_screening_methods(effective_tier)
480
+
481
+ # Log tool invocation
482
+ _log(
483
+ EventType.TOOL_INVOKED,
484
+ tool_name,
485
+ command=content if tool_name == "Bash" else None,
486
+ tier=effective_tier,
487
+ working_directory=working_dir,
488
+ metadata={"tool_input": tool_input}
489
+ )
490
+
491
+ # Log escalation if it occurred
492
+ if escalation:
493
+ _log(
494
+ EventType.ESCALATION,
495
+ tool_name,
496
+ command=content if tool_name == "Bash" else None,
497
+ tier=effective_tier,
498
+ decision_reason=escalation.get("description"),
499
+ metadata={"escalation": escalation}
500
+ )
501
+
502
+ # =========================================================================
503
+ # LAYER 1: Rate Limiting
504
+ # =========================================================================
505
+ rate_limit_msg = None
506
+ try:
507
+ from tweek.security.rate_limiter import get_rate_limiter
508
+
509
+ rate_limiter = get_rate_limiter()
510
+ rate_result = rate_limiter.check(
511
+ tool_name=tool_name,
512
+ command=content if tool_name == "Bash" else None,
513
+ session_id=session_id,
514
+ tier=effective_tier
515
+ )
516
+
517
+ if not rate_result.allowed:
518
+ rate_limit_msg = rate_limiter.format_violation_message(rate_result)
519
+ _log(
520
+ EventType.PATTERN_MATCH,
521
+ tool_name,
522
+ command=content if tool_name == "Bash" else None,
523
+ tier=effective_tier,
524
+ pattern_name="rate_limit",
525
+ pattern_severity="high",
526
+ decision="ask",
527
+ decision_reason=f"Rate limit violations: {rate_result.violations}",
528
+ metadata={"rate_limit": rate_result.details}
529
+ )
530
+
531
+ # Rate limit alone triggers prompt
532
+ return {
533
+ "hookSpecificOutput": {
534
+ "hookEventName": "PreToolUse",
535
+ "permissionDecision": "ask",
536
+ "permissionDecisionReason": format_prompt_message(
537
+ None, None, content, effective_tier,
538
+ rate_limit_msg=rate_limit_msg
539
+ ),
540
+ }
541
+ }
542
+ except ImportError:
543
+ pass # Rate limiter not available
544
+ except Exception as e:
545
+ _log(
546
+ EventType.ERROR,
547
+ tool_name,
548
+ decision_reason=f"Rate limiter error: {e}",
549
+ )
550
+
551
+ # Safe tier - no further screening
552
+ if not screening_methods:
553
+ _log(
554
+ EventType.ALLOWED,
555
+ tool_name,
556
+ tier=effective_tier,
557
+ decision="allow",
558
+ decision_reason="Safe tier - no screening required",
559
+ )
560
+ return {}
561
+
562
+ # =========================================================================
563
+ # LAYER 2: Pattern Matching
564
+ # =========================================================================
565
+ pattern_match = None
566
+ if "regex" in screening_methods:
567
+ pattern_match = pattern_matcher.check(content)
568
+
569
+ if pattern_match:
570
+ _log(
571
+ EventType.PATTERN_MATCH,
572
+ tool_name,
573
+ command=content if tool_name == "Bash" else None,
574
+ tier=effective_tier,
575
+ pattern_name=pattern_match.get("name"),
576
+ pattern_severity=pattern_match.get("severity"),
577
+ metadata={"pattern": pattern_match}
578
+ )
579
+
580
+ # =========================================================================
581
+ # LAYER 3: LLM Review (for risky/dangerous tiers)
582
+ # =========================================================================
583
+ llm_msg = None
584
+ llm_triggered = False
585
+ if "llm" in screening_methods and tool_name == "Bash":
586
+ try:
587
+ from tweek.security.llm_reviewer import get_llm_reviewer
588
+
589
+ llm_reviewer = get_llm_reviewer()
590
+ llm_result = llm_reviewer.review(
591
+ command=content,
592
+ tool=tool_name,
593
+ tier=effective_tier,
594
+ tool_input=tool_input,
595
+ session_context=f"session:{session_id}" if session_id else None
596
+ )
597
+
598
+ if llm_result.should_prompt:
599
+ llm_triggered = True
600
+ llm_msg = llm_reviewer.format_review_message(llm_result)
601
+ _log(
602
+ EventType.LLM_RULE_MATCH,
603
+ tool_name,
604
+ command=content,
605
+ tier=effective_tier,
606
+ pattern_name="llm_review",
607
+ pattern_severity=llm_result.risk_level.value,
608
+ decision="ask",
609
+ decision_reason=llm_result.reason,
610
+ metadata={
611
+ "llm_risk": llm_result.risk_level.value,
612
+ "llm_confidence": llm_result.confidence,
613
+ "llm_reason": llm_result.reason
614
+ }
615
+ )
616
+ except ImportError:
617
+ pass # LLM reviewer not available
618
+ except Exception as e:
619
+ _log(
620
+ EventType.ERROR,
621
+ tool_name,
622
+ decision_reason=f"LLM reviewer error: {e}",
623
+ )
624
+
625
+ # =========================================================================
626
+ # LAYER 4: Session Analysis (cross-turn anomaly detection)
627
+ # =========================================================================
628
+ session_msg = None
629
+ session_triggered = False
630
+ if session_id and effective_tier in ("risky", "dangerous"):
631
+ try:
632
+ from tweek.security.session_analyzer import get_session_analyzer
633
+
634
+ session_analyzer = get_session_analyzer()
635
+ session_result = session_analyzer.analyze(session_id)
636
+
637
+ if session_result.is_suspicious:
638
+ session_triggered = True
639
+ session_msg = session_analyzer.format_analysis_message(session_result)
640
+ _log(
641
+ EventType.PATTERN_MATCH,
642
+ tool_name,
643
+ command=content if tool_name == "Bash" else None,
644
+ tier=effective_tier,
645
+ pattern_name="session_anomaly",
646
+ pattern_severity="high" if session_result.is_high_risk else "medium",
647
+ decision="ask",
648
+ decision_reason=f"Session anomalies: {session_result.anomalies}",
649
+ metadata={
650
+ "risk_score": session_result.risk_score,
651
+ "anomalies": [a.value for a in session_result.anomalies]
652
+ }
653
+ )
654
+ except ImportError:
655
+ pass # Session analyzer not available
656
+ except Exception as e:
657
+ _log(
658
+ EventType.ERROR,
659
+ tool_name,
660
+ decision_reason=f"Session analyzer error: {e}",
661
+ )
662
+
663
+ # =========================================================================
664
+ # LAYER 5: Sandbox Preview (dangerous tier, Bash only)
665
+ # =========================================================================
666
+ sandbox_triggered = False
667
+ sandbox_msg = None
668
+ if "sandbox" in screening_methods and tool_name == "Bash":
669
+ try:
670
+ from tweek.sandbox.executor import SandboxExecutor
671
+
672
+ executor = SandboxExecutor()
673
+ preview = executor.preview_command(content, skill="hook-preview", timeout=3.0)
674
+
675
+ if preview.suspicious:
676
+ sandbox_triggered = True
677
+ _log(
678
+ EventType.SANDBOX_PREVIEW,
679
+ tool_name,
680
+ command=content,
681
+ tier=effective_tier,
682
+ pattern_name="sandbox_preview",
683
+ pattern_severity="high",
684
+ decision="ask",
685
+ decision_reason="Sandbox preview detected suspicious behavior",
686
+ metadata={"violations": preview.violations, "denied_ops": preview.denied_operations}
687
+ )
688
+
689
+ violation_text = "\n".join(f" * {v}" for v in preview.violations)
690
+ sandbox_msg = (
691
+ f" SANDBOX PREVIEW\n"
692
+ f"Speculative execution detected suspicious behavior:\n\n"
693
+ f"{violation_text}"
694
+ )
695
+ except ImportError:
696
+ pass # Sandbox not available
697
+ except Exception as e:
698
+ _log(
699
+ EventType.ERROR,
700
+ tool_name,
701
+ command=content,
702
+ tier=effective_tier,
703
+ decision_reason=f"Sandbox preview error: {e}",
704
+ )
705
+
706
+ # =========================================================================
707
+ # Decision: Prompt if any layer triggered
708
+ # =========================================================================
709
+ compliance_triggered = bool(compliance_findings)
710
+
711
+ if pattern_match or llm_triggered or session_triggered or sandbox_triggered or compliance_triggered:
712
+ _log(
713
+ EventType.USER_PROMPTED,
714
+ tool_name,
715
+ command=content if tool_name == "Bash" else None,
716
+ tier=effective_tier,
717
+ pattern_name=pattern_match.get("name") if pattern_match else "multi_layer",
718
+ pattern_severity=pattern_match.get("severity") if pattern_match else "high",
719
+ decision="ask",
720
+ decision_reason="Security check triggered",
721
+ metadata={
722
+ "pattern_triggered": pattern_match is not None,
723
+ "llm_triggered": llm_triggered,
724
+ "session_triggered": session_triggered,
725
+ "sandbox_triggered": sandbox_triggered,
726
+ "compliance_triggered": compliance_triggered,
727
+ "compliance_findings": len(compliance_findings) if compliance_findings else 0,
728
+ }
729
+ )
730
+
731
+ # Combine all messages
732
+ final_msg = format_prompt_message(
733
+ pattern_match, escalation, content, effective_tier,
734
+ rate_limit_msg=rate_limit_msg,
735
+ llm_msg=llm_msg,
736
+ session_msg=session_msg
737
+ )
738
+
739
+ # Add sandbox message if applicable
740
+ if sandbox_msg:
741
+ final_msg += f"\n\n{sandbox_msg}"
742
+
743
+ # Add compliance message if applicable
744
+ if compliance_msg:
745
+ final_msg += f"\n\n COMPLIANCE NOTICE\n{compliance_msg}"
746
+
747
+ return {
748
+ "hookSpecificOutput": {
749
+ "hookEventName": "PreToolUse",
750
+ "permissionDecision": "ask",
751
+ "permissionDecisionReason": final_msg,
752
+ }
753
+ }
754
+
755
+ # No issues found - allow
756
+ _log(
757
+ EventType.ALLOWED,
758
+ tool_name,
759
+ command=content if tool_name == "Bash" else None,
760
+ tier=effective_tier,
761
+ decision="allow",
762
+ decision_reason="Passed all screening layers",
763
+ )
764
+
765
+ return {}
766
+
767
+
768
+ def check_allowed_directory() -> bool:
769
+ """
770
+ Check if current working directory is in the allowed list.
771
+
772
+ This is a SAFETY CHECK to prevent Tweek from accidentally
773
+ running in production or other directories.
774
+
775
+ Returns:
776
+ True if Tweek should activate, False to pass through
777
+ """
778
+ config_path = Path(__file__).parent.parent / "config" / "allowed_dirs.yaml"
779
+
780
+ if not config_path.exists():
781
+ # No config = disabled everywhere (safe default)
782
+ return False
783
+
784
+ try:
785
+ with open(config_path) as f:
786
+ config = yaml.safe_load(f) or {}
787
+ except Exception:
788
+ return False
789
+
790
+ # Check if globally enabled (production mode)
791
+ if config.get("global_enabled", False):
792
+ return True
793
+
794
+ # Check allowed directories
795
+ allowed_dirs = config.get("allowed_directories", [])
796
+ cwd = Path.cwd().resolve()
797
+
798
+ for allowed in allowed_dirs:
799
+ allowed_path = Path(allowed).expanduser().resolve()
800
+ try:
801
+ # Check if cwd is the allowed dir or a subdirectory
802
+ cwd.relative_to(allowed_path)
803
+ return True
804
+ except ValueError:
805
+ continue
806
+
807
+ return False
808
+
809
+
810
+ def main():
811
+ """Entry point for the hook."""
812
+ # SAFETY CHECK: Only activate in allowed directories
813
+ if not check_allowed_directory():
814
+ # Not in allowed directory - pass through without screening
815
+ print("{}")
816
+ return
817
+
818
+ logger = get_logger()
819
+
820
+ try:
821
+ # Read JSON from stdin
822
+ input_text = sys.stdin.read()
823
+ if not input_text.strip():
824
+ print("{}")
825
+ return
826
+
827
+ input_data = json.loads(input_text)
828
+ result = process_hook(input_data, logger)
829
+
830
+ # Output JSON result
831
+ print(json.dumps(result))
832
+
833
+ except json.JSONDecodeError as e:
834
+ # Invalid JSON - fail open (allow) but log
835
+ logger.log_quick(
836
+ EventType.ERROR,
837
+ "unknown",
838
+ decision="allow",
839
+ decision_reason=f"JSON decode error: {e}"
840
+ )
841
+ print("{}")
842
+
843
+ except Exception as e:
844
+ # Any error - fail closed (block for safety)
845
+ logger.log_quick(
846
+ EventType.ERROR,
847
+ "unknown",
848
+ decision="deny",
849
+ decision_reason=f"Hook error: {e}"
850
+ )
851
+ print(json.dumps({
852
+ "hookSpecificOutput": {
853
+ "hookEventName": "PreToolUse",
854
+ "permissionDecision": "deny",
855
+ "permissionDecisionReason": f" TWEEK ERROR: {e}\nBlocking for safety.",
856
+ }
857
+ }))
858
+
859
+
860
+ if __name__ == "__main__":
861
+ main()