tweek 0.3.0__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. tweek/__init__.py +2 -2
  2. tweek/audit.py +2 -2
  3. tweek/cli.py +78 -6559
  4. tweek/cli_config.py +643 -0
  5. tweek/cli_configure.py +413 -0
  6. tweek/cli_core.py +718 -0
  7. tweek/cli_dry_run.py +390 -0
  8. tweek/cli_helpers.py +316 -0
  9. tweek/cli_install.py +1666 -0
  10. tweek/cli_logs.py +301 -0
  11. tweek/cli_mcp.py +148 -0
  12. tweek/cli_memory.py +343 -0
  13. tweek/cli_plugins.py +748 -0
  14. tweek/cli_protect.py +564 -0
  15. tweek/cli_proxy.py +405 -0
  16. tweek/cli_security.py +236 -0
  17. tweek/cli_skills.py +289 -0
  18. tweek/cli_uninstall.py +551 -0
  19. tweek/cli_vault.py +313 -0
  20. tweek/config/__init__.py +8 -0
  21. tweek/config/allowed_dirs.yaml +16 -17
  22. tweek/config/families.yaml +4 -1
  23. tweek/config/manager.py +49 -0
  24. tweek/config/models.py +307 -0
  25. tweek/config/patterns.yaml +29 -5
  26. tweek/config/templates/config.yaml.template +212 -0
  27. tweek/config/templates/env.template +45 -0
  28. tweek/config/templates/overrides.yaml.template +121 -0
  29. tweek/config/templates/tweek.yaml.template +20 -0
  30. tweek/config/templates.py +136 -0
  31. tweek/config/tiers.yaml +5 -4
  32. tweek/diagnostics.py +112 -32
  33. tweek/hooks/overrides.py +4 -0
  34. tweek/hooks/post_tool_use.py +46 -1
  35. tweek/hooks/pre_tool_use.py +149 -49
  36. tweek/integrations/openclaw.py +84 -0
  37. tweek/licensing.py +1 -1
  38. tweek/mcp/__init__.py +7 -9
  39. tweek/mcp/clients/chatgpt.py +2 -2
  40. tweek/mcp/clients/claude_desktop.py +2 -2
  41. tweek/mcp/clients/gemini.py +2 -2
  42. tweek/mcp/proxy.py +165 -1
  43. tweek/memory/provenance.py +438 -0
  44. tweek/memory/queries.py +2 -0
  45. tweek/memory/safety.py +23 -4
  46. tweek/memory/schemas.py +1 -0
  47. tweek/memory/store.py +101 -71
  48. tweek/plugins/screening/heuristic_scorer.py +1 -1
  49. tweek/security/integrity.py +77 -0
  50. tweek/security/llm_reviewer.py +162 -68
  51. tweek/security/local_reviewer.py +44 -2
  52. tweek/security/model_registry.py +73 -7
  53. tweek/skill_template/overrides-reference.md +1 -1
  54. tweek/skills/context.py +221 -0
  55. tweek/skills/scanner.py +2 -2
  56. {tweek-0.3.0.dist-info → tweek-0.4.0.dist-info}/METADATA +9 -7
  57. {tweek-0.3.0.dist-info → tweek-0.4.0.dist-info}/RECORD +62 -39
  58. tweek/mcp/server.py +0 -320
  59. {tweek-0.3.0.dist-info → tweek-0.4.0.dist-info}/WHEEL +0 -0
  60. {tweek-0.3.0.dist-info → tweek-0.4.0.dist-info}/entry_points.txt +0 -0
  61. {tweek-0.3.0.dist-info → tweek-0.4.0.dist-info}/licenses/LICENSE +0 -0
  62. {tweek-0.3.0.dist-info → tweek-0.4.0.dist-info}/licenses/NOTICE +0 -0
  63. {tweek-0.3.0.dist-info → tweek-0.4.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,121 @@
1
+ # ============================================================================
2
+ # Tweek Security Overrides (~/.tweek/overrides.yaml)
3
+ # ============================================================================
4
+ #
5
+ # Human-only security configuration. This file is protected by Tweek's
6
+ # self-protection — AI agents cannot modify it. Only a human editing
7
+ # directly can change these settings.
8
+ #
9
+ # Features:
10
+ # - Whitelist: Exempt specific paths/tools/URLs from all screening
11
+ # - Pattern Toggles: Enable/disable individual detection patterns
12
+ # - Trust Levels: Control severity thresholds
13
+ # - Enforcement: Customize severity+confidence decision matrix
14
+ #
15
+ # Edit this file: tweek config edit overrides
16
+ # Full reference: https://github.com/gettweek/tweek/blob/main/docs/CONFIGURATION.md
17
+ # ============================================================================
18
+
19
+
20
+ # ---------------------------------------------------------------------------
21
+ # Whitelist Rules
22
+ # ---------------------------------------------------------------------------
23
+ # Skip ALL screening for matching targets. Use sparingly — whitelisted
24
+ # content bypasses pattern matching, LLM review, and all other checks.
25
+ #
26
+ # whitelist:
27
+ # # Exempt a specific file for specific tools
28
+ # - path: ~/projects/my-app/templates.yaml
29
+ # tools: [Read]
30
+ # reason: "Known-safe templates file"
31
+ #
32
+ # # Exempt an entire directory (prefix match)
33
+ # - path: ~/projects/trusted-project
34
+ # tools: [Read, Grep]
35
+ # reason: "Trusted project directory"
36
+ #
37
+ # # Exempt a URL prefix
38
+ # - url_prefix: "https://api.example.com/"
39
+ # tools: [WebFetch]
40
+ # reason: "Internal API endpoint"
41
+ #
42
+ # # Exempt a command prefix
43
+ # - tool: Bash
44
+ # command_prefix: "npm test"
45
+ # reason: "Running project tests"
46
+
47
+
48
+ # ---------------------------------------------------------------------------
49
+ # Pattern Toggles
50
+ # ---------------------------------------------------------------------------
51
+ # Control which of the 262 detection patterns are active.
52
+ # Run 'tweek config list' to see all pattern names.
53
+ #
54
+ # patterns:
55
+ # # Globally disable specific patterns
56
+ # disabled:
57
+ # - name: env_command
58
+ # reason: "Used frequently in our workflow"
59
+ # - name: docker_mount_sensitive
60
+ # reason: "Our CI uses Docker volume mounts"
61
+ #
62
+ # # Disable patterns only in specific directories
63
+ # scoped_disables:
64
+ # - name: hook_modification
65
+ # paths:
66
+ # - ~/projects/my-security-tool
67
+ # reason: "This repo contains hook management code"
68
+ #
69
+ # # Force-enable patterns (overrides any disable rule)
70
+ # force_enabled:
71
+ # - credential_theft_critical
72
+ # - private_key_access
73
+
74
+
75
+ # ---------------------------------------------------------------------------
76
+ # Trust Level Configuration
77
+ # ---------------------------------------------------------------------------
78
+ # Controls how much screening is applied based on session type.
79
+ #
80
+ # Auto-detection: interactive sessions get full screening, automated
81
+ # sessions (CI, cron, systemd) can have different thresholds.
82
+ # Override with env var: TWEEK_TRUST_LEVEL=interactive|automated
83
+ #
84
+ # trust:
85
+ # default_mode: interactive # Default when auto-detect is ambiguous
86
+ # interactive:
87
+ # min_severity: low # Prompt on all severities
88
+ # automated:
89
+ # min_severity: high # Only prompt on high and critical
90
+ # skip_llm_for_default_tier: true
91
+
92
+
93
+ # ---------------------------------------------------------------------------
94
+ # Enforcement Policy
95
+ # ---------------------------------------------------------------------------
96
+ # Customize the severity+confidence decision matrix.
97
+ # Decisions: deny (hard block) | ask (prompt user) | log (silent allow)
98
+ #
99
+ # Default matrix:
100
+ # CRITICAL + deterministic → deny
101
+ # CRITICAL + heuristic/contextual → ask
102
+ # HIGH/MEDIUM → ask
103
+ # LOW → log
104
+ #
105
+ # enforcement:
106
+ # critical:
107
+ # deterministic: deny
108
+ # heuristic: ask
109
+ # contextual: ask
110
+ # high:
111
+ # deterministic: ask
112
+ # heuristic: ask
113
+ # contextual: ask
114
+ # medium:
115
+ # deterministic: ask
116
+ # heuristic: ask
117
+ # contextual: ask
118
+ # low:
119
+ # deterministic: log
120
+ # heuristic: log
121
+ # contextual: log
@@ -0,0 +1,20 @@
1
+ # ============================================================================
2
+ # Tweek Hook Control (.tweek.yaml)
3
+ # ============================================================================
4
+ #
5
+ # Per-directory control for Tweek security screening hooks.
6
+ # Place this file in any directory to control whether hooks run there.
7
+ #
8
+ # This file is protected by Tweek's self-protection — only a human
9
+ # editing directly can change these settings.
10
+ #
11
+ # Hooks:
12
+ # pre_tool_use Screens tool calls BEFORE execution (can block)
13
+ # post_tool_use Screens tool responses AFTER execution (advisory)
14
+ #
15
+ # Set to false to disable screening in this directory.
16
+ # ============================================================================
17
+
18
+ hooks:
19
+ pre_tool_use: true
20
+ post_tool_use: true
@@ -0,0 +1,136 @@
1
+ """
2
+ Tweek Configuration Templates
3
+
4
+ Provides template loading and deployment for self-documenting config files.
5
+ Templates are bundled with the package and deployed during installation.
6
+ Users get well-commented files with all options visible and sensible defaults.
7
+ """
8
+ from __future__ import annotations
9
+
10
+ from pathlib import Path
11
+ from typing import Optional
12
+
13
+ TEMPLATES_DIR = Path(__file__).parent / "templates"
14
+
15
+ # Registry of all user-facing configuration files.
16
+ # Used by `tweek config edit` and the install flow.
17
+ CONFIG_FILES = [
18
+ {
19
+ "id": "config",
20
+ "name": "Security Settings",
21
+ "template": "config.yaml.template",
22
+ "target_path": "~/.tweek/config.yaml",
23
+ "description": "LLM providers, tool tiers, rate limiting, session analysis",
24
+ "editable": True,
25
+ },
26
+ {
27
+ "id": "env",
28
+ "name": "API Keys",
29
+ "template": "env.template",
30
+ "target_path": "~/.tweek/.env",
31
+ "description": "LLM provider API keys (Google, OpenAI, xAI, Anthropic)",
32
+ "editable": True,
33
+ },
34
+ {
35
+ "id": "overrides",
36
+ "name": "Security Overrides",
37
+ "template": "overrides.yaml.template",
38
+ "target_path": "~/.tweek/overrides.yaml",
39
+ "description": "Whitelists, pattern toggles, trust levels (human-only)",
40
+ "editable": True,
41
+ },
42
+ {
43
+ "id": "hooks",
44
+ "name": "Hook Control",
45
+ "template": "tweek.yaml.template",
46
+ "target_path": ".tweek.yaml",
47
+ "description": "Per-directory enable/disable for pre and post screening",
48
+ "editable": True,
49
+ },
50
+ {
51
+ "id": "defaults",
52
+ "name": "Default Reference",
53
+ "template": None,
54
+ "target_path": str(Path(__file__).parent / "tiers.yaml"),
55
+ "description": "Bundled defaults — read-only reference for all options",
56
+ "editable": False,
57
+ },
58
+ ]
59
+
60
+
61
+ def get_template_content(template_name: str) -> str:
62
+ """Read a template file and return its content."""
63
+ template_path = TEMPLATES_DIR / template_name
64
+ if not template_path.exists():
65
+ raise FileNotFoundError(f"Template not found: {template_path}")
66
+ return template_path.read_text()
67
+
68
+
69
+ def deploy_template(
70
+ template_name: str,
71
+ target_path: Path,
72
+ overwrite: bool = False,
73
+ ) -> bool:
74
+ """Deploy a template to a target path.
75
+
76
+ Returns True if the file was created, False if skipped.
77
+ Does NOT overwrite existing files unless overwrite=True.
78
+ """
79
+ if target_path.exists() and not overwrite:
80
+ return False
81
+
82
+ content = get_template_content(template_name)
83
+ target_path.parent.mkdir(parents=True, exist_ok=True)
84
+ target_path.write_text(content)
85
+
86
+ # Set restrictive permissions for sensitive files
87
+ if ".env" in target_path.name:
88
+ target_path.chmod(0o600)
89
+
90
+ return True
91
+
92
+
93
+ def resolve_target_path(config_entry: dict, global_scope: bool = True) -> Path:
94
+ """Resolve the target path for a config file entry."""
95
+ path_str = config_entry["target_path"]
96
+ if path_str.startswith("~"):
97
+ return Path(path_str).expanduser()
98
+ if path_str.startswith("."):
99
+ if global_scope:
100
+ return Path.home() / path_str
101
+ return Path.cwd() / path_str
102
+ return Path(path_str)
103
+
104
+
105
+ def deploy_all_templates(global_scope: bool = True) -> list[tuple[str, Path, bool]]:
106
+ """Deploy all templates that don't already exist.
107
+
108
+ Returns list of (name, path, created) tuples.
109
+ """
110
+ results = []
111
+ for entry in CONFIG_FILES:
112
+ if entry["template"] is None:
113
+ continue
114
+ target = resolve_target_path(entry, global_scope)
115
+ created = deploy_template(entry["template"], target)
116
+ results.append((entry["name"], target, created))
117
+ return results
118
+
119
+
120
+ def append_active_section(target_path: Path, section_yaml: str) -> None:
121
+ """Append an active (uncommented) config section to a template-based file.
122
+
123
+ Used by the install flow to write user-selected LLM provider settings
124
+ below the template comments without destroying them (unlike yaml.dump).
125
+ """
126
+ existing = target_path.read_text() if target_path.exists() else ""
127
+ marker = "# --- Active Configuration (set during install) ---"
128
+
129
+ if marker in existing:
130
+ # Replace existing active section
131
+ parts = existing.split(marker)
132
+ existing = parts[0].rstrip()
133
+
134
+ target_path.write_text(
135
+ existing.rstrip() + "\n\n" + marker + "\n" + section_yaml + "\n"
136
+ )
tweek/config/tiers.yaml CHANGED
@@ -9,7 +9,7 @@
9
9
  #
10
10
  # Security Layers:
11
11
  # 1. Rate Limiting - Detect resource theft and burst attacks
12
- # 2. Pattern Match - Regex patterns for known attack vectors (259 patterns)
12
+ # 2. Pattern Match - Regex patterns for known attack vectors (262 patterns)
13
13
  # 2.5 Heuristic Score - Signal-based scoring for confidence-gated LLM escalation
14
14
  # 3. LLM Review - Semantic analysis using Claude Haiku or local LLM
15
15
  # 4. Session Scan - Cross-turn anomaly detection
@@ -18,11 +18,11 @@
18
18
  version: 2
19
19
 
20
20
  # LLM Review Configuration
21
- # Supports: anthropic, openai, google, or any OpenAI-compatible endpoint
22
- # Provider "auto" checks: local (if enabled) ANTHROPIC_API_KEY → OPENAI_API_KEY → GOOGLE_API_KEY
21
+ # Supports: anthropic, openai, google, xai (Grok), or any OpenAI-compatible endpoint
22
+ # Provider "auto" checks: local ONNXGOOGLE_API_KEY → OPENAI_API_KEY → XAI_API_KEY → ANTHROPIC_API_KEY
23
23
  llm_review:
24
24
  enabled: true
25
- provider: auto # auto | local | anthropic | openai | google | fallback
25
+ provider: auto # auto | local | anthropic | openai | google | xai | fallback
26
26
  model: auto # auto = provider default, or explicit model name
27
27
  base_url: null # For OpenAI-compatible endpoints (Ollama, LM Studio, etc.)
28
28
  api_key_env: null # Override env var name (default: provider-specific)
@@ -119,6 +119,7 @@ tools:
119
119
  # Dangerous - system commands, highest scrutiny
120
120
  Bash: dangerous
121
121
  Task: default # Subagents inherit parent screening
122
+ Skill: safe # Skill invocation — tracked for context, not screened itself
122
123
 
123
124
  # Skill classifications (user-defined skills)
124
125
  skills:
tweek/diagnostics.py CHANGED
@@ -54,6 +54,7 @@ def run_health_checks(verbose: bool = False) -> List[HealthCheck]:
54
54
  _check_mcp_available,
55
55
  _check_proxy_config,
56
56
  _check_plugin_integrity,
57
+ _check_local_model,
57
58
  _check_llm_review,
58
59
  ]
59
60
 
@@ -590,6 +591,108 @@ def _check_plugin_integrity(verbose: bool = False) -> HealthCheck:
590
591
  )
591
592
 
592
593
 
594
+ def _check_local_model(verbose: bool = False) -> HealthCheck:
595
+ """Check local classifier model status and verify inference works."""
596
+ try:
597
+ from tweek.security.local_model import LOCAL_MODEL_AVAILABLE, get_local_model
598
+ from tweek.security.model_registry import (
599
+ get_default_model_name,
600
+ get_model_size,
601
+ is_model_installed,
602
+ verify_model_hashes,
603
+ )
604
+ except ImportError:
605
+ return HealthCheck(
606
+ name="local_model",
607
+ label="Local Model",
608
+ status=CheckStatus.SKIPPED,
609
+ message="Local model module not available",
610
+ fix_hint="Install with: pip install tweek[local-models]",
611
+ )
612
+
613
+ if not LOCAL_MODEL_AVAILABLE:
614
+ return HealthCheck(
615
+ name="local_model",
616
+ label="Local Model",
617
+ status=CheckStatus.SKIPPED,
618
+ message="Dependencies not installed (optional)",
619
+ fix_hint="Install with: pip install tweek[local-models]",
620
+ )
621
+
622
+ default_name = get_default_model_name()
623
+
624
+ if not is_model_installed(default_name):
625
+ return HealthCheck(
626
+ name="local_model",
627
+ label="Local Model",
628
+ status=CheckStatus.WARNING,
629
+ message="Dependencies installed but model not downloaded",
630
+ fix_hint="Run: tweek model download",
631
+ )
632
+
633
+ # Model is installed — get size info
634
+ size = get_model_size(default_name)
635
+ size_str = f"{size / 1024 / 1024:.1f} MB" if size else "unknown size"
636
+
637
+ # Verify SHA-256 integrity before running inference
638
+ try:
639
+ hash_results = verify_model_hashes(default_name)
640
+ mismatched = [f for f, status in hash_results.items() if status == "mismatch"]
641
+ if mismatched:
642
+ files_str = ", ".join(mismatched)
643
+ return HealthCheck(
644
+ name="local_model",
645
+ label="Local Model",
646
+ status=CheckStatus.ERROR,
647
+ message=f"SHA-256 integrity check failed for: {files_str}",
648
+ fix_hint="Run: tweek model download --force (to re-download)",
649
+ )
650
+ except Exception:
651
+ pass # Hash verification is best-effort; don't block on it
652
+
653
+ # Run inference smoke test to verify the model actually works
654
+ try:
655
+ model = get_local_model(default_name)
656
+ if model is None:
657
+ return HealthCheck(
658
+ name="local_model",
659
+ label="Local Model",
660
+ status=CheckStatus.ERROR,
661
+ message=f"{default_name} installed ({size_str}) but failed to load",
662
+ fix_hint="Try: tweek model download --force",
663
+ )
664
+
665
+ result = model.predict("hello world")
666
+ if result is None or not hasattr(result, "risk_level"):
667
+ return HealthCheck(
668
+ name="local_model",
669
+ label="Local Model",
670
+ status=CheckStatus.ERROR,
671
+ message=f"{default_name} installed ({size_str}) but inference returned no result",
672
+ fix_hint="Try: tweek model download --force",
673
+ )
674
+
675
+ msg = f"{default_name} installed ({size_str}), inference OK"
676
+ if verbose:
677
+ msg += f" ({result.inference_time_ms:.0f}ms)"
678
+
679
+ return HealthCheck(
680
+ name="local_model",
681
+ label="Local Model",
682
+ status=CheckStatus.OK,
683
+ message=msg,
684
+ )
685
+
686
+ except Exception as e:
687
+ return HealthCheck(
688
+ name="local_model",
689
+ label="Local Model",
690
+ status=CheckStatus.ERROR,
691
+ message=f"{default_name} installed ({size_str}) but inference failed: {e}",
692
+ fix_hint="Try: tweek model download --force",
693
+ )
694
+
695
+
593
696
  def _check_llm_review(verbose: bool = False) -> HealthCheck:
594
697
  """Check LLM review provider availability and configuration."""
595
698
  try:
@@ -604,45 +707,22 @@ def _check_llm_review(verbose: bool = False) -> HealthCheck:
604
707
  reviewer = get_llm_reviewer()
605
708
 
606
709
  if not reviewer.enabled:
607
- # Check which env vars are missing to give specific guidance
608
- missing_keys = []
609
- for provider, env_names in DEFAULT_API_KEY_ENVS.items():
610
- if isinstance(env_names, list):
611
- if not any(os.environ.get(e) for e in env_names):
612
- missing_keys.append(f"{' or '.join(env_names)} ({provider})")
613
- else:
614
- if not os.environ.get(env_names):
615
- missing_keys.append(f"{env_names} ({provider})")
616
-
617
710
  hint_parts = [
618
- "To enable cloud LLM review for uncertain classifications:",
619
- " Set one of: " + ", ".join(
620
- k.split(" (")[0] for k in missing_keys
621
- ),
711
+ "Set one of: ANTHROPIC_API_KEY, OPENAI_API_KEY, GOOGLE_API_KEY,",
712
+ " GEMINI_API_KEY, XAI_API_KEY, or other LLM provider API key",
713
+ " as an environment variable, or configure a provider in",
714
+ " ~/.tweek/config.yaml.",
715
+ "",
716
+ " Alternatively, install a local model for offline review.",
717
+ "",
718
+ " See: docs/CONFIGURATION.md (LLM Review Provider section)",
622
719
  ]
623
720
 
624
- if not LOCAL_MODEL_AVAILABLE:
625
- hint_parts.append(
626
- " Or install the local model: pip install 'tweek[local]'"
627
- )
628
- else:
629
- hint_parts.append(
630
- " Local ONNX model is available but could not initialize"
631
- )
632
-
633
- hint_parts.append(
634
- " Or install Ollama (https://ollama.ai) for local LLM review"
635
- )
636
- hint_parts.append(
637
- " See: docs/CONFIGURATION.md or ~/.tweek/config.yaml"
638
- )
639
-
640
721
  return HealthCheck(
641
722
  name="llm_review",
642
723
  label="LLM Review",
643
724
  status=CheckStatus.WARNING,
644
- message="No LLM provider availablereview disabled, "
645
- "pattern matching and heuristic scoring still active",
725
+ message="No LLM provider configuredusing pattern matching only",
646
726
  fix_hint="\n".join(hint_parts),
647
727
  )
648
728
 
tweek/hooks/overrides.py CHANGED
@@ -346,6 +346,10 @@ def is_protected_config_file(file_path: str) -> bool:
346
346
  if part == ".tweek":
347
347
  return True
348
348
 
349
+ # Protect .tweek.yaml (per-directory hook control)
350
+ if resolved.name == ".tweek.yaml":
351
+ return True
352
+
349
353
  except (OSError, ValueError):
350
354
  pass
351
355
  return False
@@ -11,7 +11,7 @@ web pages, documents, and other ingested content.
11
11
 
12
12
  Screening Pipeline:
13
13
  1. Language Detection — identify non-English content
14
- 2. Pattern Matching — 215 regex patterns for known attack vectors
14
+ 2. Pattern Matching — 262 regex patterns for known attack vectors
15
15
  3. LLM Review — semantic analysis if non-English escalation triggers
16
16
 
17
17
  Claude Code PostToolUse Protocol:
@@ -284,6 +284,32 @@ def screen_content(
284
284
  except Exception:
285
285
  pass
286
286
 
287
+ # Provenance: escalate session taint based on findings
288
+ if (findings or llm_finding) and session_id:
289
+ try:
290
+ from tweek.memory.provenance import get_taint_store, severity_to_taint
291
+ _taint_store = get_taint_store()
292
+ if findings:
293
+ _highest_sev = max(
294
+ findings,
295
+ key=lambda f: {"critical": 4, "high": 3, "medium": 2, "low": 1}.get(f["severity"], 0)
296
+ )["severity"]
297
+ else:
298
+ _highest_sev = llm_finding.get("risk_level", "medium")
299
+ _taint_level = severity_to_taint(_highest_sev)
300
+ _taint_source = tool_input.get("file_path") or tool_input.get("url") or "unknown"
301
+ _taint_reason = f"{len(findings)} pattern(s) found"
302
+ if findings:
303
+ _taint_reason += f": {findings[0]['pattern_name']}"
304
+ _taint_store.record_taint(
305
+ session_id=session_id,
306
+ taint_level=_taint_level,
307
+ source=f"{tool_name}:{_taint_source}",
308
+ reason=_taint_reason,
309
+ )
310
+ except Exception:
311
+ pass # Provenance is best-effort
312
+
287
313
  # Step 5: Content redaction for critical deterministic matches
288
314
  # Replace matched content with [REDACTED] to prevent AI from acting on it
289
315
  redacted_content = None
@@ -385,6 +411,16 @@ def process_hook(input_data: Dict[str, Any]) -> Dict[str, Any]:
385
411
  if whitelist_match:
386
412
  return {}
387
413
 
414
+ # Provenance: record external ingest for content-source tools
415
+ try:
416
+ from tweek.memory.provenance import get_taint_store, EXTERNAL_SOURCE_TOOLS
417
+ if session_id and tool_name in EXTERNAL_SOURCE_TOOLS:
418
+ _taint_store = get_taint_store()
419
+ _source = tool_input.get("file_path") or tool_input.get("url") or tool_name
420
+ _taint_store.record_external_ingest(session_id, f"{tool_name}:{_source}")
421
+ except Exception:
422
+ pass # Provenance is best-effort
423
+
388
424
  # Extract text content from the response
389
425
  content = extract_response_content(tool_name, tool_response)
390
426
 
@@ -422,6 +458,15 @@ def process_hook(input_data: Dict[str, Any]) -> Dict[str, Any]:
422
458
 
423
459
  def main():
424
460
  """Read hook input from stdin, process, and output decision."""
461
+ # Check if post_tool_use hook is enabled for this directory
462
+ try:
463
+ from tweek.hooks.pre_tool_use import check_hook_enabled
464
+ if not check_hook_enabled("post_tool_use"):
465
+ print("{}")
466
+ return
467
+ except ImportError:
468
+ pass # If import fails, default to enabled (fail safe)
469
+
425
470
  try:
426
471
  raw = sys.stdin.read()
427
472
  if not raw.strip():