@trac3er/oh-my-god 2.0.0 → 2.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude-plugin/marketplace.json +8 -8
- package/.claude-plugin/plugin.json +5 -4
- package/.claude-plugin/scripts/uninstall.sh +74 -3
- package/.claude-plugin/scripts/update.sh +78 -3
- package/.coveragerc +26 -0
- package/.mcp.json +4 -4
- package/CHANGELOG.md +14 -0
- package/CODE_OF_CONDUCT.md +27 -0
- package/CONTRIBUTING.md +62 -0
- package/OMG-setup.sh +1201 -355
- package/README.md +77 -56
- package/SECURITY.md +25 -0
- package/agents/__init__.py +1 -0
- package/agents/model_roles.py +196 -0
- package/agents/omg-architect-mode.md +3 -5
- package/agents/omg-backend-engineer.md +3 -5
- package/agents/omg-database-engineer.md +3 -5
- package/agents/omg-frontend-designer.md +4 -5
- package/agents/omg-implement-mode.md +4 -5
- package/agents/omg-infra-engineer.md +3 -5
- package/agents/omg-research-mode.md +4 -6
- package/agents/omg-security-auditor.md +3 -5
- package/agents/omg-testing-engineer.md +3 -5
- package/build/lib/yaml.py +321 -0
- package/commands/OMG:ai-commit.md +101 -14
- package/commands/OMG:arch.md +302 -19
- package/commands/OMG:ccg.md +12 -7
- package/commands/OMG:compat.md +25 -17
- package/commands/OMG:cost.md +173 -13
- package/commands/OMG:crazy.md +1 -1
- package/commands/OMG:create-agent.md +170 -20
- package/commands/OMG:deps.md +235 -17
- package/commands/OMG:domain-init.md +1 -1
- package/commands/OMG:escalate.md +41 -12
- package/commands/OMG:health-check.md +37 -13
- package/commands/OMG:init.md +122 -14
- package/commands/OMG:project-init.md +1 -1
- package/commands/OMG:session-branch.md +76 -9
- package/commands/OMG:session-fork.md +42 -5
- package/commands/OMG:session-merge.md +124 -8
- package/commands/OMG:setup.md +69 -12
- package/commands/OMG:stats.md +215 -14
- package/commands/OMG:teams.md +19 -10
- package/config/lsp_languages.yaml +8 -0
- package/hooks/__init__.py +0 -0
- package/hooks/_agent_registry.py +423 -0
- package/hooks/_analytics.py +291 -0
- package/hooks/_budget.py +31 -0
- package/hooks/_common.py +569 -0
- package/hooks/_compression_optimizer.py +119 -0
- package/hooks/_cost_ledger.py +176 -0
- package/hooks/_learnings.py +126 -0
- package/hooks/_memory.py +103 -0
- package/hooks/_protected_context.py +150 -0
- package/hooks/_token_counter.py +221 -0
- package/hooks/branch_manager.py +236 -0
- package/hooks/budget_governor.py +232 -0
- package/hooks/circuit-breaker.py +270 -0
- package/hooks/compression_feedback.py +254 -0
- package/hooks/config-guard.py +216 -0
- package/hooks/context_pressure.py +53 -0
- package/hooks/credential_store.py +1020 -0
- package/hooks/fetch-rate-limits.py +212 -0
- package/hooks/firewall.py +48 -0
- package/hooks/hashline-formatter-bridge.py +224 -0
- package/hooks/hashline-injector.py +273 -0
- package/hooks/hashline-validator.py +216 -0
- package/hooks/idle-detector.py +95 -0
- package/hooks/intentgate-keyword-detector.py +188 -0
- package/hooks/magic-keyword-router.py +195 -0
- package/hooks/policy_engine.py +505 -0
- package/hooks/post-tool-failure.py +19 -0
- package/hooks/post-write.py +219 -0
- package/hooks/post_write.py +46 -0
- package/hooks/pre-compact.py +398 -0
- package/hooks/pre-tool-inject.py +98 -0
- package/hooks/prompt-enhancer.py +672 -0
- package/hooks/quality-runner.py +191 -0
- package/hooks/query.py +512 -0
- package/hooks/secret-guard.py +61 -0
- package/hooks/secret_audit.py +144 -0
- package/hooks/session-end-capture.py +137 -0
- package/hooks/session-start.py +277 -0
- package/hooks/setup_wizard.py +582 -0
- package/hooks/shadow_manager.py +297 -0
- package/hooks/state_migration.py +225 -0
- package/hooks/stop-gate.py +7 -0
- package/hooks/stop_dispatcher.py +945 -0
- package/hooks/test-validator.py +361 -0
- package/hooks/test_generator_hook.py +123 -0
- package/hooks/todo-state-tracker.py +114 -0
- package/hooks/tool-ledger.py +149 -0
- package/hooks/trust_review.py +585 -0
- package/hud/omg-hud.mjs +31 -1
- package/lab/__init__.py +1 -0
- package/lab/pipeline.py +75 -0
- package/lab/policies.py +52 -0
- package/package.json +7 -18
- package/plugins/README.md +33 -61
- package/plugins/advanced/commands/OMG:deep-plan.md +3 -3
- package/plugins/advanced/commands/OMG:learn.md +1 -1
- package/plugins/advanced/commands/OMG:security-review.md +3 -3
- package/plugins/advanced/commands/OMG:ship.md +1 -1
- package/plugins/advanced/plugin.json +1 -1
- package/plugins/core/plugin.json +8 -3
- package/plugins/dephealth/__init__.py +0 -0
- package/plugins/dephealth/cve_scanner.py +188 -0
- package/plugins/dephealth/license_checker.py +135 -0
- package/plugins/dephealth/manifest_detector.py +423 -0
- package/plugins/dephealth/vuln_analyzer.py +169 -0
- package/plugins/testgen/__init__.py +0 -0
- package/plugins/testgen/codamosa_engine.py +402 -0
- package/plugins/testgen/edge_case_synthesizer.py +184 -0
- package/plugins/testgen/framework_detector.py +271 -0
- package/plugins/testgen/skeleton_generator.py +219 -0
- package/plugins/viz/__init__.py +0 -0
- package/plugins/viz/ast_parser.py +139 -0
- package/plugins/viz/diagram_generator.py +192 -0
- package/plugins/viz/graph_builder.py +444 -0
- package/plugins/viz/native_parsers.py +259 -0
- package/plugins/viz/regex_parser.py +112 -0
- package/pyproject.toml +81 -0
- package/rules/contextual/write-verify.md +2 -2
- package/rules/core/00-truth.md +1 -1
- package/rules/core/01-surgical.md +1 -1
- package/rules/core/02-circuit-breaker.md +2 -2
- package/rules/core/03-ensemble.md +3 -3
- package/rules/core/04-testing.md +3 -3
- package/runtime/__init__.py +32 -0
- package/runtime/adapters/__init__.py +13 -0
- package/runtime/adapters/claude.py +60 -0
- package/runtime/adapters/gpt.py +53 -0
- package/runtime/adapters/local.py +53 -0
- package/runtime/adoption.py +212 -0
- package/runtime/business_workflow.py +220 -0
- package/runtime/cli_provider.py +85 -0
- package/runtime/compat.py +1299 -0
- package/runtime/custom_agent_loader.py +366 -0
- package/runtime/dispatcher.py +47 -0
- package/runtime/ecosystem.py +371 -0
- package/runtime/legacy_compat.py +7 -0
- package/runtime/mcp_config_writers.py +115 -0
- package/runtime/mcp_lifecycle.py +153 -0
- package/runtime/mcp_memory_server.py +135 -0
- package/runtime/memory_parsers/__init__.py +0 -0
- package/runtime/memory_parsers/chatgpt_parser.py +257 -0
- package/runtime/memory_parsers/claude_import.py +107 -0
- package/runtime/memory_parsers/export.py +97 -0
- package/runtime/memory_parsers/gemini_import.py +91 -0
- package/runtime/memory_parsers/kimi_import.py +91 -0
- package/runtime/memory_store.py +215 -0
- package/runtime/omc_compat.py +7 -0
- package/runtime/providers/__init__.py +0 -0
- package/runtime/providers/codex_provider.py +112 -0
- package/runtime/providers/gemini_provider.py +128 -0
- package/runtime/providers/kimi_provider.py +151 -0
- package/runtime/providers/opencode_provider.py +144 -0
- package/runtime/subagent_dispatcher.py +362 -0
- package/runtime/team_router.py +1167 -0
- package/runtime/tmux_session_manager.py +169 -0
- package/scripts/check-omg-compat-contract-snapshot.py +137 -0
- package/scripts/check-omg-contract-snapshot.py +12 -0
- package/scripts/check-omg-public-ready.py +193 -0
- package/scripts/check-omg-standalone-clean.py +103 -0
- package/scripts/legacy_to_omg_migrate.py +29 -0
- package/scripts/migrate-legacy.py +464 -0
- package/scripts/omc_to_omg_migrate.py +12 -0
- package/scripts/omg.py +492 -0
- package/scripts/settings-merge.py +283 -0
- package/scripts/verify-standalone.sh +8 -4
- package/settings.json +126 -29
- package/templates/profile.yaml +1 -1
- package/tools/__init__.py +2 -0
- package/tools/browser_consent.py +289 -0
- package/tools/browser_stealth.py +481 -0
- package/tools/browser_tool.py +448 -0
- package/tools/changelog_generator.py +347 -0
- package/tools/commit_splitter.py +746 -0
- package/tools/config_discovery.py +151 -0
- package/tools/config_merger.py +449 -0
- package/tools/dashboard_generator.py +300 -0
- package/tools/git_inspector.py +298 -0
- package/tools/lsp_client.py +275 -0
- package/tools/lsp_discovery.py +231 -0
- package/tools/lsp_operations.py +392 -0
- package/tools/pr_generator.py +404 -0
- package/tools/python_repl.py +656 -0
- package/tools/python_sandbox.py +609 -0
- package/tools/search_providers/__init__.py +77 -0
- package/tools/search_providers/brave.py +115 -0
- package/tools/search_providers/exa.py +116 -0
- package/tools/search_providers/jina.py +104 -0
- package/tools/search_providers/perplexity.py +139 -0
- package/tools/search_providers/synthetic.py +74 -0
- package/tools/session_snapshot.py +736 -0
- package/tools/ssh_manager.py +912 -0
- package/tools/theme_engine.py +294 -0
- package/tools/theme_selector.py +137 -0
- package/tools/web_search.py +622 -0
- package/yaml.py +321 -0
- package/.claude-plugin/scripts/install.sh +0 -9
- package/bun.lock +0 -23
- package/bunfig.toml +0 -3
- package/hooks/_budget.ts +0 -1
- package/hooks/_common.ts +0 -63
- package/hooks/circuit-breaker.ts +0 -101
- package/hooks/config-guard.ts +0 -4
- package/hooks/firewall.ts +0 -20
- package/hooks/policy_engine.ts +0 -156
- package/hooks/post-tool-failure.ts +0 -22
- package/hooks/post-write.ts +0 -4
- package/hooks/pre-tool-inject.ts +0 -4
- package/hooks/prompt-enhancer.ts +0 -46
- package/hooks/quality-runner.ts +0 -24
- package/hooks/secret-guard.ts +0 -4
- package/hooks/session-end-capture.ts +0 -19
- package/hooks/session-start.ts +0 -19
- package/hooks/shadow_manager.ts +0 -81
- package/hooks/stop-gate.ts +0 -22
- package/hooks/stop_dispatcher.ts +0 -147
- package/hooks/test-generator-hook.ts +0 -4
- package/hooks/tool-ledger.ts +0 -27
- package/hooks/trust_review.ts +0 -175
- package/lab/pipeline.ts +0 -75
- package/lab/policies.ts +0 -68
- package/runtime/common.ts +0 -111
- package/runtime/compat.ts +0 -174
- package/runtime/dispatcher.ts +0 -25
- package/runtime/ecosystem.ts +0 -186
- package/runtime/provider_bootstrap.ts +0 -99
- package/runtime/provider_smoke.ts +0 -34
- package/runtime/release_readiness.ts +0 -186
- package/runtime/team_router.ts +0 -144
- package/scripts/check-omg-compat-contract-snapshot.ts +0 -20
- package/scripts/check-omg-standalone-clean.ts +0 -12
- package/scripts/check-runtime-clean.ts +0 -94
- package/scripts/omg.ts +0 -352
- package/scripts/settings-merge.ts +0 -93
- package/tools/commit_splitter.ts +0 -23
- package/tools/git_inspector.ts +0 -18
- package/tools/session_snapshot.ts +0 -47
- package/trac3er-oh-my-god-2.0.0.tgz +0 -0
- package/tsconfig.json +0 -15
|
@@ -0,0 +1,219 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
PostToolUse Hook (Write/Edit/MultiEdit): Auto-Format + Secret Scan (Enterprise)
|
|
4
|
+
1. Auto-format written files if opted-in via .omg/state/quality-gate.json (non-blocking)
|
|
5
|
+
2. Scan written content for hardcoded secrets (blocking: exit 2)
|
|
6
|
+
"""
|
|
7
|
+
import json, sys, os, re, subprocess
|
|
8
|
+
import contextlib
|
|
9
|
+
import importlib.util
|
|
10
|
+
from datetime import datetime, timezone
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def _load_local_attr(module_name, filename, attr_name):
|
|
14
|
+
module_path = os.path.join(os.path.dirname(__file__), filename)
|
|
15
|
+
spec = importlib.util.spec_from_file_location(module_name, module_path)
|
|
16
|
+
if spec is None or spec.loader is None:
|
|
17
|
+
raise ImportError(f"Unable to load {filename}")
|
|
18
|
+
module = importlib.util.module_from_spec(spec)
|
|
19
|
+
spec.loader.exec_module(module)
|
|
20
|
+
return getattr(module, attr_name)
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
_resolve_project_dir = _load_local_attr("omg_hooks_common", "_common.py", "_resolve_project_dir")
|
|
24
|
+
resolve_state_file = _load_local_attr("omg_hooks_state_migration", "state_migration.py", "resolve_state_file")
|
|
25
|
+
detect_high_entropy_strings = _load_local_attr("omg_hooks_post_write", "post_write.py", "detect_high_entropy_strings")
|
|
26
|
+
|
|
27
|
+
try:
|
|
28
|
+
data = json.load(sys.stdin)
|
|
29
|
+
except (json.JSONDecodeError, EOFError):
|
|
30
|
+
sys.exit(0)
|
|
31
|
+
|
|
32
|
+
file_path = data.get("tool_input", {}).get("file_path", "")
|
|
33
|
+
if not file_path:
|
|
34
|
+
sys.exit(0)
|
|
35
|
+
|
|
36
|
+
# Resolve relative paths against project dir
|
|
37
|
+
project_dir = _resolve_project_dir()
|
|
38
|
+
if not os.path.isabs(file_path):
|
|
39
|
+
file_path = os.path.join(project_dir, file_path)
|
|
40
|
+
|
|
41
|
+
if not os.path.exists(file_path):
|
|
42
|
+
sys.exit(0)
|
|
43
|
+
|
|
44
|
+
ext = os.path.splitext(file_path)[1].lower()
|
|
45
|
+
|
|
46
|
+
# ── 1. AUTO-FORMAT (opt-in via quality-gate.json, non-blocking) ──
|
|
47
|
+
# §4.4: Auto-format only runs if the project has opted in via quality-gate.json.
|
|
48
|
+
# This avoids unintended tool execution (supply-chain risk) on projects without
|
|
49
|
+
# explicit formatter configuration.
|
|
50
|
+
format_enabled = False
|
|
51
|
+
qg_path = resolve_state_file(project_dir, "state/quality-gate.json", "quality-gate.json")
|
|
52
|
+
with contextlib.suppress(Exception): # intentional: cleanup — format stays disabled on config error
|
|
53
|
+
if os.path.exists(qg_path):
|
|
54
|
+
with open(qg_path, "r") as f:
|
|
55
|
+
qg = json.load(f)
|
|
56
|
+
# "format" key must exist and not be null/empty
|
|
57
|
+
if qg.get("format"):
|
|
58
|
+
format_enabled = True
|
|
59
|
+
|
|
60
|
+
FORMAT_MAP = {
|
|
61
|
+
".ts": ["npx", "--no-install", "prettier", "--write"],
|
|
62
|
+
".tsx": ["npx", "--no-install", "prettier", "--write"],
|
|
63
|
+
".js": ["npx", "--no-install", "prettier", "--write"],
|
|
64
|
+
".jsx": ["npx", "--no-install", "prettier", "--write"],
|
|
65
|
+
".css": ["npx", "--no-install", "prettier", "--write"],
|
|
66
|
+
".json": ["npx", "--no-install", "prettier", "--write"],
|
|
67
|
+
".py": ["ruff", "format"], ".go": ["gofmt", "-w"], ".rs": ["rustfmt"],
|
|
68
|
+
}
|
|
69
|
+
if format_enabled and ext in FORMAT_MAP:
|
|
70
|
+
fmt_cmd = FORMAT_MAP[ext]
|
|
71
|
+
# Validate formatter binary exists before running (supply-chain defense)
|
|
72
|
+
import shutil
|
|
73
|
+
if shutil.which(fmt_cmd[0]):
|
|
74
|
+
try:
|
|
75
|
+
subprocess.run(fmt_cmd + [file_path], capture_output=True, timeout=15, cwd=project_dir)
|
|
76
|
+
except (FileNotFoundError, subprocess.TimeoutExpired, OSError):
|
|
77
|
+
pass
|
|
78
|
+
|
|
79
|
+
# ── 2. SECRET SCAN (blocking) ──
|
|
80
|
+
# Skip binary files and very large files
|
|
81
|
+
try:
|
|
82
|
+
file_size = os.path.getsize(file_path)
|
|
83
|
+
if file_size > 1_000_000: # 1MB limit
|
|
84
|
+
sys.exit(0)
|
|
85
|
+
with open(file_path, "r", encoding="utf-8", errors="ignore") as f:
|
|
86
|
+
content = f.read()
|
|
87
|
+
except Exception as e:
|
|
88
|
+
print(f"[OMG] post-write.py: {type(e).__name__}: {e}", file=sys.stderr)
|
|
89
|
+
sys.exit(0)
|
|
90
|
+
|
|
91
|
+
# Skip known non-secret file types
|
|
92
|
+
SKIP_EXTENSIONS = {".lock", ".sum", ".svg", ".png", ".jpg", ".gif", ".ico", ".woff", ".woff2", ".ttf"}
|
|
93
|
+
if ext in SKIP_EXTENSIONS:
|
|
94
|
+
sys.exit(0)
|
|
95
|
+
|
|
96
|
+
SECRET_PATTERNS = [
|
|
97
|
+
# AWS
|
|
98
|
+
(r"AKIA[0-9A-Z]{16}", "AWS Access Key ID"),
|
|
99
|
+
(r"(?:aws_secret_access_key|AWS_SECRET)\s*[:=]\s*['\"]?[A-Za-z0-9/+=]{40}['\"]?", "AWS Secret Key"),
|
|
100
|
+
# Private keys
|
|
101
|
+
(r"-----BEGIN (RSA |EC |DSA |OPENSSH )?PRIVATE KEY-----", "Private Key"),
|
|
102
|
+
# Generic API keys/tokens (in assignment context)
|
|
103
|
+
(r"""(?:api[_-]?key|api[_-]?secret|auth[_-]?token|access[_-]?token|secret[_-]?key)\s*[:=]\s*['"][A-Za-z0-9+/=_\-.]{20,}['"]""", "Hardcoded API Key/Token"),
|
|
104
|
+
# GitHub
|
|
105
|
+
(r"gh[ps]_[A-Za-z0-9_]{36,}", "GitHub Token"),
|
|
106
|
+
(r"github_pat_[A-Za-z0-9_]{22,}", "GitHub Fine-grained PAT"),
|
|
107
|
+
# Slack
|
|
108
|
+
(r"xoxb-[0-9]{10,}-[A-Za-z0-9]{20,}", "Slack Bot Token"),
|
|
109
|
+
(r"xoxp-[0-9]{10,}-[0-9]{10,}-[A-Za-z0-9]{20,}", "Slack User Token"),
|
|
110
|
+
# Stripe
|
|
111
|
+
(r"sk_live_[A-Za-z0-9]{20,}", "Stripe Live Secret Key"),
|
|
112
|
+
(r"rk_live_[A-Za-z0-9]{20,}", "Stripe Restricted Key"),
|
|
113
|
+
(r"pk_live_[A-Za-z0-9]{20,}", "Stripe Live Publishable Key (should use env)"),
|
|
114
|
+
# Supabase / Firebase
|
|
115
|
+
(r"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9\.[A-Za-z0-9_-]{20,}", "Supabase/Firebase Service Key"),
|
|
116
|
+
# Google
|
|
117
|
+
(r"AIza[A-Za-z0-9_-]{35}", "Google API Key"),
|
|
118
|
+
# Twilio
|
|
119
|
+
(r"SK[A-Za-z0-9]{32}", "Twilio API Key"),
|
|
120
|
+
# SendGrid
|
|
121
|
+
(r"SG\.[A-Za-z0-9_-]{22}\.[A-Za-z0-9_-]{43}", "SendGrid API Key"),
|
|
122
|
+
# Passwords in config
|
|
123
|
+
(r"""(?:password|passwd|pwd)\s*[:=]\s*['"][^'"]{8,}['"]""", "Hardcoded Password"),
|
|
124
|
+
# Generic secret in env-like format
|
|
125
|
+
(r"""(?:SECRET|TOKEN|PRIVATE_KEY|ENCRYPTION_KEY)\s*=\s*['"]?[A-Za-z0-9+/=_\-.]{16,}['"]?""", "Hardcoded Secret"),
|
|
126
|
+
# Database connection strings with credentials
|
|
127
|
+
(r"(?:postgres|mysql|mongodb|redis)://[^:]+:[^@]+@", "Database URL with credentials"),
|
|
128
|
+
# JWT tokens (3 base64 segments separated by dots)
|
|
129
|
+
(r"eyJ[A-Za-z0-9_-]{10,}\.eyJ[A-Za-z0-9_-]{10,}\.[A-Za-z0-9_-]{10,}", "JWT Token"),
|
|
130
|
+
# Hardcoded URLs with credentials
|
|
131
|
+
(r"https?://[^:]+:[^@]+@", "URL with embedded credentials"),
|
|
132
|
+
# Webhook URLs (often secret)
|
|
133
|
+
(r"""(?:webhook[_-]?url|slack[_-]?webhook|discord[_-]?webhook)\s*[:=]\s*['"]https?://""", "Hardcoded Webhook URL"),
|
|
134
|
+
]
|
|
135
|
+
|
|
136
|
+
# URI / Security anti-patterns (WARNING, not blocking)
|
|
137
|
+
SECURITY_WARNINGS = [
|
|
138
|
+
(r"cors\s*\(\s*\{[^}]*origin\s*:\s*['\"]?\*['\"]?", "CORS wildcard origin in code — use whitelist in production"),
|
|
139
|
+
(r"httpOnly\s*:\s*false", "Cookie httpOnly disabled — session cookies should be httpOnly"),
|
|
140
|
+
(r"secure\s*:\s*false", "Cookie secure flag disabled — use HTTPS in production"),
|
|
141
|
+
(r"eval\s*\(", "eval() usage — potential code injection risk"),
|
|
142
|
+
(r"innerHTML\s*=", "innerHTML assignment — potential XSS risk"),
|
|
143
|
+
(r"dangerouslySetInnerHTML", "dangerouslySetInnerHTML — verify input is sanitized"),
|
|
144
|
+
]
|
|
145
|
+
|
|
146
|
+
findings = []
|
|
147
|
+
patterns_matched = []
|
|
148
|
+
lowpath = file_path.lower()
|
|
149
|
+
is_test_file = any(d in lowpath for d in ["/__tests__/", "/test/", "/tests/"])
|
|
150
|
+
if not is_test_file:
|
|
151
|
+
basename = os.path.basename(file_path).lower()
|
|
152
|
+
is_test_file = any(p in basename for p in [".test.", ".spec."])
|
|
153
|
+
|
|
154
|
+
for i, line in enumerate(content.split("\n"), 1):
|
|
155
|
+
stripped = line.strip()
|
|
156
|
+
# Skip lines that are entirely comments (bare "*" removed — too broad)
|
|
157
|
+
if stripped.startswith(("#", "//", "/*", "* ", "<!--", "%", ";")):
|
|
158
|
+
continue
|
|
159
|
+
if is_test_file:
|
|
160
|
+
continue
|
|
161
|
+
for pattern, label in SECRET_PATTERNS:
|
|
162
|
+
if re.search(pattern, line, re.IGNORECASE):
|
|
163
|
+
findings.append(f" Line {i}: {label}")
|
|
164
|
+
if label not in patterns_matched:
|
|
165
|
+
patterns_matched.append(label)
|
|
166
|
+
break # One finding per line is enough
|
|
167
|
+
|
|
168
|
+
entropy_matches = detect_high_entropy_strings(line)
|
|
169
|
+
if entropy_matches:
|
|
170
|
+
findings.append(f" Line {i}: High-entropy potential secret")
|
|
171
|
+
if "High-entropy potential secret" not in patterns_matched:
|
|
172
|
+
patterns_matched.append("High-entropy potential secret")
|
|
173
|
+
|
|
174
|
+
if findings:
|
|
175
|
+
try:
|
|
176
|
+
proj_dir = _resolve_project_dir()
|
|
177
|
+
state_dir = os.path.join(proj_dir, ".omg", "state")
|
|
178
|
+
os.makedirs(state_dir, exist_ok=True)
|
|
179
|
+
signal_path = os.path.join(state_dir, "secret-detected.json")
|
|
180
|
+
signal_payload = {
|
|
181
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
182
|
+
"file": file_path,
|
|
183
|
+
"patterns_matched": patterns_matched,
|
|
184
|
+
"action": "blocked",
|
|
185
|
+
}
|
|
186
|
+
with open(signal_path, "w", encoding="utf-8") as f:
|
|
187
|
+
json.dump(signal_payload, f)
|
|
188
|
+
except Exception as e:
|
|
189
|
+
print(f"[OMG] post-write.py: {type(e).__name__}: {e}", file=sys.stderr)
|
|
190
|
+
print(
|
|
191
|
+
f"⚠ SECRET DETECTED in {file_path}. Signal written to .omg/state/secret-detected.json",
|
|
192
|
+
file=sys.stderr,
|
|
193
|
+
)
|
|
194
|
+
msg = f"SECRET DETECTED in {file_path}:\n" + "\n".join(findings[:10])
|
|
195
|
+
if len(findings) > 10:
|
|
196
|
+
msg += f"\n ... and {len(findings) - 10} more"
|
|
197
|
+
msg += "\n\nRemove hardcoded secrets. Use environment variables or a secret manager."
|
|
198
|
+
print(msg, file=sys.stderr)
|
|
199
|
+
# NOTE: exit(0), not exit(2). Non-zero exits crash sibling hooks
|
|
200
|
+
# ("Sibling tool call errored"). The warning in stderr is still visible.
|
|
201
|
+
sys.exit(0)
|
|
202
|
+
|
|
203
|
+
# ── 3. SECURITY WARNING SCAN (non-blocking, advisory) ──
|
|
204
|
+
sec_warnings = []
|
|
205
|
+
for i, line in enumerate(content.split("\n"), 1):
|
|
206
|
+
stripped = line.strip()
|
|
207
|
+
if stripped.startswith(("#", "//", "/*", "*", "<!--")):
|
|
208
|
+
continue
|
|
209
|
+
for pattern, label in SECURITY_WARNINGS:
|
|
210
|
+
if re.search(pattern, line, re.IGNORECASE):
|
|
211
|
+
sec_warnings.append(f" Line {i}: ⚠ {label}")
|
|
212
|
+
break
|
|
213
|
+
|
|
214
|
+
if sec_warnings:
|
|
215
|
+
msg = f"SECURITY WARNINGS in {file_path}:\n" + "\n".join(sec_warnings[:5])
|
|
216
|
+
msg += "\n\nConsider running /OMG:security-review for a full audit."
|
|
217
|
+
print(msg, file=sys.stderr)
|
|
218
|
+
|
|
219
|
+
sys.exit(0)
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import math
|
|
4
|
+
import re
|
|
5
|
+
from collections import Counter
|
|
6
|
+
from typing import cast
|
|
7
|
+
|
|
8
|
+
_CANDIDATE_PATTERN = re.compile(r"[A-Za-z0-9+/=_\-.]{21,}")
|
|
9
|
+
_UUID_PATTERN = re.compile(
|
|
10
|
+
r"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$",
|
|
11
|
+
re.IGNORECASE,
|
|
12
|
+
)
|
|
13
|
+
_HEX_HASH_PATTERN = re.compile(r"^[0-9a-f]{32,64}$", re.IGNORECASE)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def _shannon_entropy(value: str) -> float:
|
|
17
|
+
if not value:
|
|
18
|
+
return 0.0
|
|
19
|
+
counts = Counter(value)
|
|
20
|
+
length = len(value)
|
|
21
|
+
return -sum((count / length) * math.log2(count / length) for count in counts.values())
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def _is_allowlisted_candidate(candidate: str) -> bool:
|
|
25
|
+
lowered = candidate.lower()
|
|
26
|
+
if lowered.startswith("data:image/"):
|
|
27
|
+
return True
|
|
28
|
+
if _UUID_PATTERN.match(candidate):
|
|
29
|
+
return True
|
|
30
|
+
if _HEX_HASH_PATTERN.match(candidate):
|
|
31
|
+
return True
|
|
32
|
+
return False
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def detect_high_entropy_strings(text: str, entropy_threshold: float = 4.5, min_length: int = 21) -> list[str]:
|
|
36
|
+
findings: list[str] = []
|
|
37
|
+
seen: set[str] = set()
|
|
38
|
+
for candidate in cast(list[str], _CANDIDATE_PATTERN.findall(text)):
|
|
39
|
+
if candidate in seen:
|
|
40
|
+
continue
|
|
41
|
+
seen.add(candidate)
|
|
42
|
+
if len(candidate) < min_length or _is_allowlisted_candidate(candidate):
|
|
43
|
+
continue
|
|
44
|
+
if _shannon_entropy(candidate) > entropy_threshold:
|
|
45
|
+
findings.append(candidate)
|
|
46
|
+
return findings
|
|
@@ -0,0 +1,398 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""PreCompact Hook — OMG Standalone state preservation.
|
|
3
|
+
|
|
4
|
+
1) Snapshot key files from .omg/state (fallback .omc via migration)
|
|
5
|
+
2) Auto-generate handoff files in .omg/state
|
|
6
|
+
3) JetBrains hybrid summarization (feature-flagged: CONTEXT_MANAGER)
|
|
7
|
+
"""
|
|
8
|
+
import json
|
|
9
|
+
import importlib
|
|
10
|
+
import math
|
|
11
|
+
import os
|
|
12
|
+
import re
|
|
13
|
+
import shutil
|
|
14
|
+
import subprocess
|
|
15
|
+
import sys
|
|
16
|
+
from datetime import datetime
|
|
17
|
+
|
|
18
|
+
try:
|
|
19
|
+
from hooks.state_migration import resolve_state_file, resolve_state_dir
|
|
20
|
+
from hooks._common import _resolve_project_dir, get_feature_flag
|
|
21
|
+
from hooks._protected_context import collect_protected_context
|
|
22
|
+
except ImportError:
|
|
23
|
+
_state_migration = importlib.import_module("state_migration")
|
|
24
|
+
_common = importlib.import_module("_common")
|
|
25
|
+
resolve_state_file = _state_migration.resolve_state_file
|
|
26
|
+
resolve_state_dir = _state_migration.resolve_state_dir
|
|
27
|
+
_resolve_project_dir = _common._resolve_project_dir
|
|
28
|
+
get_feature_flag = _common.get_feature_flag
|
|
29
|
+
try:
|
|
30
|
+
_protected_ctx = importlib.import_module("_protected_context")
|
|
31
|
+
collect_protected_context = _protected_ctx.collect_protected_context
|
|
32
|
+
except Exception:
|
|
33
|
+
collect_protected_context = None
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
MAX_SNAPSHOT_BYTES = int(os.environ.get("OMG_PRECOMPACT_MAX_SNAPSHOT_BYTES", "262144"))
|
|
37
|
+
GIT_DIFF_TIMEOUT_SEC = int(os.environ.get("OMG_PRECOMPACT_GIT_DIFF_TIMEOUT_SEC", "1"))
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
# ---------------------------------------------------------------------------
|
|
41
|
+
# Pure utility functions (importable for testing)
|
|
42
|
+
# ---------------------------------------------------------------------------
|
|
43
|
+
|
|
44
|
+
def read_file(path, max_lines=None):
|
|
45
|
+
try:
|
|
46
|
+
if not os.path.exists(path):
|
|
47
|
+
return None
|
|
48
|
+
with open(path, "r", encoding="utf-8", errors="ignore") as f:
|
|
49
|
+
content = f.read().strip()
|
|
50
|
+
if not content:
|
|
51
|
+
return None
|
|
52
|
+
if max_lines:
|
|
53
|
+
return "\n".join(content.split("\n")[:max_lines])
|
|
54
|
+
return content
|
|
55
|
+
except Exception:
|
|
56
|
+
return None
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def read_cache(paths):
|
|
60
|
+
cache = {}
|
|
61
|
+
for path in paths:
|
|
62
|
+
cache[path] = read_file(path)
|
|
63
|
+
return cache
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def first_lines(text, max_lines):
|
|
67
|
+
if not text:
|
|
68
|
+
return None
|
|
69
|
+
if not max_lines:
|
|
70
|
+
return text
|
|
71
|
+
return "\n".join(text.splitlines()[:max_lines])
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def snapshot_file(src_path, dst_path, max_bytes):
|
|
75
|
+
os.makedirs(os.path.dirname(dst_path), exist_ok=True)
|
|
76
|
+
try:
|
|
77
|
+
size = os.path.getsize(src_path)
|
|
78
|
+
except OSError:
|
|
79
|
+
return False
|
|
80
|
+
|
|
81
|
+
if max_bytes <= 0 or size <= max_bytes:
|
|
82
|
+
shutil.copy2(src_path, dst_path)
|
|
83
|
+
return True
|
|
84
|
+
|
|
85
|
+
with open(src_path, "rb") as src_f:
|
|
86
|
+
raw = src_f.read(max_bytes)
|
|
87
|
+
note = (
|
|
88
|
+
f"\n\n[TRUNCATED by pre-compact: original_bytes={size}, kept_bytes={len(raw)}]"
|
|
89
|
+
).encode("utf-8")
|
|
90
|
+
with open(dst_path, "wb") as dst_f:
|
|
91
|
+
dst_f.write(raw)
|
|
92
|
+
dst_f.write(note)
|
|
93
|
+
return True
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
# ---------------------------------------------------------------------------
|
|
97
|
+
# JetBrains hybrid summarization (Dec 2025 empirical strategy)
|
|
98
|
+
# ---------------------------------------------------------------------------
|
|
99
|
+
|
|
100
|
+
# Regex for common source file extensions
|
|
101
|
+
_FILE_PATH_RE = re.compile(
|
|
102
|
+
r"(?:[\w./-]+/)?[\w.-]+\."
|
|
103
|
+
r"(?:py|ts|js|tsx|jsx|json|yaml|yml|md|txt|sh|toml|cfg|ini|sql|html|css|go|rs|java|rb|c|h|cpp)"
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
# Keywords indicating causal relationships / decisions
|
|
107
|
+
_CAUSAL_RE = re.compile(
|
|
108
|
+
r"\b(?:decided|chose|because|therefore|fixed|resolved|implemented|"
|
|
109
|
+
r"created|added|removed|deleted|changed|updated|refactored)\b",
|
|
110
|
+
re.IGNORECASE,
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def _extract_entities(text):
|
|
115
|
+
"""Extract file paths and causal decision sentences from text.
|
|
116
|
+
|
|
117
|
+
Returns (file_paths: list[str], decisions: list[str]).
|
|
118
|
+
"""
|
|
119
|
+
files = list(dict.fromkeys(_FILE_PATH_RE.findall(text))) # dedupe, preserve order
|
|
120
|
+
sentences = re.split(r"[.!?\n]", text)
|
|
121
|
+
decisions = [
|
|
122
|
+
s.strip()
|
|
123
|
+
for s in sentences
|
|
124
|
+
if _CAUSAL_RE.search(s) and len(s.strip()) > 5
|
|
125
|
+
]
|
|
126
|
+
return files, decisions
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def _summarize_batch(batch, batch_num, start_idx, end_idx):
|
|
130
|
+
"""Summarize a batch of turns into a single string.
|
|
131
|
+
|
|
132
|
+
Format: "Batch N (turns X-Y): [files] [decisions]"
|
|
133
|
+
"""
|
|
134
|
+
combined_text = " ".join(t.get("content", "") for t in batch)
|
|
135
|
+
files, decisions = _extract_entities(combined_text)
|
|
136
|
+
|
|
137
|
+
parts = [f"Batch {batch_num} (turns {start_idx}-{end_idx}):"]
|
|
138
|
+
if files:
|
|
139
|
+
parts.append(f"[files: {', '.join(files[:10])}]")
|
|
140
|
+
if decisions:
|
|
141
|
+
# Keep at most 3 decision excerpts, truncated
|
|
142
|
+
excerpts = [d[:80] for d in decisions[:3]]
|
|
143
|
+
parts.append(f"[decisions: {'; '.join(excerpts)}]")
|
|
144
|
+
if not files and not decisions:
|
|
145
|
+
parts.append("[no notable entities]")
|
|
146
|
+
|
|
147
|
+
return " ".join(parts)
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def _apply_hybrid_summarization(turns, config):
|
|
151
|
+
"""Apply JetBrains hybrid summarization strategy.
|
|
152
|
+
|
|
153
|
+
Args:
|
|
154
|
+
turns: List of turn dicts (index 0 = newest), each with 'role' and 'content'.
|
|
155
|
+
config: Dict with keys:
|
|
156
|
+
- full_turns: Number of most-recent turns to keep verbatim (default 10)
|
|
157
|
+
- summarize_turns: Max turn index for summarization window (default 50)
|
|
158
|
+
- batch_size: Number of turns per summary batch (default 21)
|
|
159
|
+
|
|
160
|
+
Returns:
|
|
161
|
+
Dict with:
|
|
162
|
+
- full_turns: List of turn dicts kept verbatim
|
|
163
|
+
- summaries: List of batch summary strings
|
|
164
|
+
- discarded_count: Number of turns beyond the summarization window
|
|
165
|
+
"""
|
|
166
|
+
full_n = config.get("full_turns", 10)
|
|
167
|
+
summarize_n = config.get("summarize_turns", 50)
|
|
168
|
+
batch_size = config.get("batch_size", 21)
|
|
169
|
+
|
|
170
|
+
total = len(turns)
|
|
171
|
+
|
|
172
|
+
if total == 0:
|
|
173
|
+
return {"full_turns": [], "summaries": [], "discarded_count": 0}
|
|
174
|
+
|
|
175
|
+
# Latest turns kept verbatim
|
|
176
|
+
full_end = min(full_n, total)
|
|
177
|
+
full_turns = turns[:full_end]
|
|
178
|
+
|
|
179
|
+
# Middle range to summarize: turns[full_end:summarize_n]
|
|
180
|
+
summarize_end = min(summarize_n, total)
|
|
181
|
+
middle_turns = turns[full_end:summarize_end]
|
|
182
|
+
|
|
183
|
+
# Discarded: turns[summarize_n:]
|
|
184
|
+
discarded_count = max(0, total - summarize_n)
|
|
185
|
+
|
|
186
|
+
# Batch the middle turns
|
|
187
|
+
summaries = []
|
|
188
|
+
if middle_turns and batch_size > 0:
|
|
189
|
+
num_batches = math.ceil(len(middle_turns) / batch_size)
|
|
190
|
+
for b in range(num_batches):
|
|
191
|
+
batch_start = b * batch_size
|
|
192
|
+
batch_end = min((b + 1) * batch_size, len(middle_turns))
|
|
193
|
+
batch = middle_turns[batch_start:batch_end]
|
|
194
|
+
|
|
195
|
+
# Absolute indices (relative to original turns list)
|
|
196
|
+
abs_start = full_end + batch_start
|
|
197
|
+
abs_end = full_end + batch_end - 1
|
|
198
|
+
|
|
199
|
+
summary = _summarize_batch(batch, b + 1, abs_start, abs_end)
|
|
200
|
+
summaries.append(summary)
|
|
201
|
+
|
|
202
|
+
return {
|
|
203
|
+
"full_turns": full_turns,
|
|
204
|
+
"summaries": summaries,
|
|
205
|
+
"discarded_count": discarded_count,
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
def _load_context_budget_config(project_dir):
|
|
210
|
+
"""Load context_budget config from settings.json, with defaults."""
|
|
211
|
+
defaults = {"full_turns": 10, "summarize_turns": 50, "batch_size": 21}
|
|
212
|
+
try:
|
|
213
|
+
settings_path = os.path.join(project_dir, "settings.json")
|
|
214
|
+
if os.path.exists(settings_path):
|
|
215
|
+
with open(settings_path, "r", encoding="utf-8") as f:
|
|
216
|
+
settings = json.load(f)
|
|
217
|
+
budget = settings.get("_omg", {}).get("context_budget", {})
|
|
218
|
+
return {
|
|
219
|
+
"full_turns": budget.get("full_turns", defaults["full_turns"]),
|
|
220
|
+
"summarize_turns": budget.get("summarize_turns", defaults["summarize_turns"]),
|
|
221
|
+
"batch_size": budget.get("batch_size", defaults["batch_size"]),
|
|
222
|
+
}
|
|
223
|
+
except Exception:
|
|
224
|
+
pass
|
|
225
|
+
return defaults
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
# ---------------------------------------------------------------------------
|
|
229
|
+
# Main hook execution (side-effects — only runs when invoked as script)
|
|
230
|
+
# ---------------------------------------------------------------------------
|
|
231
|
+
|
|
232
|
+
def main():
|
|
233
|
+
try:
|
|
234
|
+
data = json.load(sys.stdin)
|
|
235
|
+
except (json.JSONDecodeError, EOFError):
|
|
236
|
+
sys.exit(0)
|
|
237
|
+
|
|
238
|
+
project_dir = _resolve_project_dir()
|
|
239
|
+
ts = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
240
|
+
state_dir = resolve_state_dir(project_dir, "state", "")
|
|
241
|
+
snapshot_dir = os.path.join(state_dir, "snapshots", ts)
|
|
242
|
+
|
|
243
|
+
snapshot_files = [
|
|
244
|
+
resolve_state_file(project_dir, "state/profile.yaml", "profile.yaml"),
|
|
245
|
+
resolve_state_file(project_dir, "state/working-memory.md", "working-memory.md"),
|
|
246
|
+
resolve_state_file(project_dir, "state/_plan.md", "_plan.md"),
|
|
247
|
+
resolve_state_file(project_dir, "state/_checklist.md", "_checklist.md"),
|
|
248
|
+
resolve_state_file(project_dir, "state/quality-gate.json", "quality-gate.json"),
|
|
249
|
+
resolve_state_file(project_dir, "state/ledger/tool-ledger.jsonl", "ledger/tool-ledger.jsonl"),
|
|
250
|
+
resolve_state_file(project_dir, "state/ledger/failure-tracker.json", "ledger/failure-tracker.json"),
|
|
251
|
+
resolve_state_file(project_dir, "state/ralph-loop.json", "ralph-loop.json"),
|
|
252
|
+
]
|
|
253
|
+
cached = read_cache(snapshot_files)
|
|
254
|
+
saved = []
|
|
255
|
+
for src in snapshot_files:
|
|
256
|
+
if cached.get(src) is not None:
|
|
257
|
+
dst = os.path.join(snapshot_dir, os.path.basename(src))
|
|
258
|
+
if snapshot_file(src, dst, MAX_SNAPSHOT_BYTES):
|
|
259
|
+
saved.append(os.path.basename(src))
|
|
260
|
+
|
|
261
|
+
profile = first_lines(cached.get(resolve_state_file(project_dir, "state/profile.yaml", "profile.yaml")), 20)
|
|
262
|
+
wm = first_lines(cached.get(resolve_state_file(project_dir, "state/working-memory.md", "working-memory.md")), 15)
|
|
263
|
+
plan = first_lines(cached.get(resolve_state_file(project_dir, "state/_plan.md", "_plan.md")), 10)
|
|
264
|
+
checklist = first_lines(cached.get(resolve_state_file(project_dir, "state/_checklist.md", "_checklist.md")), 50)
|
|
265
|
+
tracker = cached.get(resolve_state_file(project_dir, "state/ledger/failure-tracker.json", "ledger/failure-tracker.json"))
|
|
266
|
+
ralph_loop = cached.get(resolve_state_file(project_dir, "state/ralph-loop.json", "ralph-loop.json"))
|
|
267
|
+
|
|
268
|
+
parts = [
|
|
269
|
+
f"# Handoff -- {datetime.now().strftime('%Y-%m-%d %H:%M')}",
|
|
270
|
+
"Auto-generated before context compaction.",
|
|
271
|
+
]
|
|
272
|
+
if profile:
|
|
273
|
+
parts.append("<!-- section: working-state -->")
|
|
274
|
+
parts.append("## Project\n" + profile)
|
|
275
|
+
if wm:
|
|
276
|
+
parts.append("## Working State\n" + wm)
|
|
277
|
+
if plan:
|
|
278
|
+
parts.append("## Plan\n" + plan)
|
|
279
|
+
if checklist:
|
|
280
|
+
lines = checklist.split("\n")
|
|
281
|
+
done = sum(1 for l in lines if "[x]" in l.lower())
|
|
282
|
+
total = sum(1 for l in lines if l.strip().startswith(("[", "- [")))
|
|
283
|
+
pending = [l.strip() for l in lines if "[ ]" in l][:3]
|
|
284
|
+
parts.append("<!-- section: progress -->")
|
|
285
|
+
parts.append(f"## Progress: {done}/{total}")
|
|
286
|
+
if pending:
|
|
287
|
+
parts.append("Next:\n" + "\n".join(pending))
|
|
288
|
+
if tracker:
|
|
289
|
+
try:
|
|
290
|
+
t = json.loads(tracker)
|
|
291
|
+
active = {k: v for k, v in t.items() if isinstance(v, dict) and v.get("count", 0) >= 2}
|
|
292
|
+
if active:
|
|
293
|
+
warns = [f"- {k}: {v['count']}x" for k, v in list(active.items())[:5]]
|
|
294
|
+
parts.append("## Failed Approaches\n" + "\n".join(warns))
|
|
295
|
+
except Exception:
|
|
296
|
+
pass
|
|
297
|
+
if ralph_loop:
|
|
298
|
+
try:
|
|
299
|
+
rl = json.loads(ralph_loop)
|
|
300
|
+
if rl.get("active"):
|
|
301
|
+
rl_iter = rl.get("iteration", 0)
|
|
302
|
+
rl_max = rl.get("max_iterations", 50)
|
|
303
|
+
rl_goal = rl.get("original_prompt", "")[:80]
|
|
304
|
+
parts.append(f"## Ralph Loop\nIteration: {rl_iter}/{rl_max} | Goal: {rl_goal}")
|
|
305
|
+
except Exception:
|
|
306
|
+
pass
|
|
307
|
+
|
|
308
|
+
try:
|
|
309
|
+
diff_names = subprocess.run(
|
|
310
|
+
["git", "diff", "--name-only"],
|
|
311
|
+
capture_output=True,
|
|
312
|
+
text=True,
|
|
313
|
+
timeout=GIT_DIFF_TIMEOUT_SEC,
|
|
314
|
+
cwd=project_dir,
|
|
315
|
+
)
|
|
316
|
+
changed = [l for l in diff_names.stdout.strip().split("\n") if l]
|
|
317
|
+
if changed:
|
|
318
|
+
parts.append("## Uncommitted\n" + "\n".join(f"- {x}" for x in changed[:5]))
|
|
319
|
+
except Exception:
|
|
320
|
+
pass
|
|
321
|
+
|
|
322
|
+
parts.append("## Resume Instructions")
|
|
323
|
+
parts.append("Read .omg/state/profile.yaml + this file.")
|
|
324
|
+
parts.append("\n---\n*Auto-generated before context compaction.*")
|
|
325
|
+
handoff = "\n\n".join(parts)
|
|
326
|
+
handoff_lines = handoff.split("\n")
|
|
327
|
+
if len(handoff_lines) > 120:
|
|
328
|
+
handoff = "\n".join(handoff_lines[:120]) + "\n\n(truncated)"
|
|
329
|
+
|
|
330
|
+
os.makedirs(state_dir, exist_ok=True)
|
|
331
|
+
with open(os.path.join(state_dir, "handoff.md"), "w", encoding="utf-8") as f:
|
|
332
|
+
f.write(handoff)
|
|
333
|
+
|
|
334
|
+
portable = handoff + "\n\nSelf-contained handoff for other platforms."
|
|
335
|
+
portable_lines = portable.split("\n")
|
|
336
|
+
if len(portable_lines) > 150:
|
|
337
|
+
portable = "\n".join(portable_lines[:150]) + "\n\n(truncated)"
|
|
338
|
+
with open(os.path.join(state_dir, "handoff-portable.md"), "w", encoding="utf-8") as f:
|
|
339
|
+
f.write(portable)
|
|
340
|
+
|
|
341
|
+
# Keep latest 5 snapshots
|
|
342
|
+
snapshots_parent = os.path.join(state_dir, "snapshots")
|
|
343
|
+
try:
|
|
344
|
+
if os.path.isdir(snapshots_parent):
|
|
345
|
+
entries = sorted(
|
|
346
|
+
[d for d in os.listdir(snapshots_parent) if os.path.isdir(os.path.join(snapshots_parent, d))]
|
|
347
|
+
)
|
|
348
|
+
for old in entries[:-5]:
|
|
349
|
+
shutil.rmtree(os.path.join(snapshots_parent, old), ignore_errors=True)
|
|
350
|
+
except Exception:
|
|
351
|
+
pass
|
|
352
|
+
|
|
353
|
+
print(f"[OMG pre-compact] Snapshotted {len(saved)} files -> {snapshot_dir}", file=sys.stderr)
|
|
354
|
+
|
|
355
|
+
# --- Protected context registry (feature-flagged under CONTEXT_MANAGER) ---
|
|
356
|
+
try:
|
|
357
|
+
if collect_protected_context is not None and get_feature_flag("CONTEXT_MANAGER", default=False):
|
|
358
|
+
protected = collect_protected_context(project_dir, context_text=handoff)
|
|
359
|
+
if protected:
|
|
360
|
+
json.dump({"additionalContext": protected}, sys.stdout)
|
|
361
|
+
print(f"[OMG pre-compact] Protected context injected ({len(protected)} chars)", file=sys.stderr)
|
|
362
|
+
except Exception:
|
|
363
|
+
pass # crash isolation: never fail on protected context
|
|
364
|
+
|
|
365
|
+
# --- Hybrid summarization (feature-flagged under CONTEXT_MANAGER) ---
|
|
366
|
+
try:
|
|
367
|
+
if get_feature_flag("CONTEXT_MANAGER", default=False):
|
|
368
|
+
turns = data.get("conversation", [])
|
|
369
|
+
if turns:
|
|
370
|
+
config = _load_context_budget_config(project_dir)
|
|
371
|
+
result = _apply_hybrid_summarization(turns, config)
|
|
372
|
+
# Format as additionalContext supplement
|
|
373
|
+
summary_parts = []
|
|
374
|
+
if result["summaries"]:
|
|
375
|
+
summary_parts.append("## Conversation Context (Hybrid Summary)")
|
|
376
|
+
for s in result["summaries"]:
|
|
377
|
+
summary_parts.append(s)
|
|
378
|
+
if result["discarded_count"] > 0:
|
|
379
|
+
summary_parts.append(
|
|
380
|
+
f"({result['discarded_count']} oldest turns discarded — see memory/handoff)"
|
|
381
|
+
)
|
|
382
|
+
summary_text = "\n".join(summary_parts)
|
|
383
|
+
# Output as JSON if no protected context was already output
|
|
384
|
+
print(
|
|
385
|
+
f"[OMG pre-compact] Hybrid summarization: "
|
|
386
|
+
f"{len(result['full_turns'])} full, "
|
|
387
|
+
f"{len(result['summaries'])} batches, "
|
|
388
|
+
f"{result['discarded_count']} discarded",
|
|
389
|
+
file=sys.stderr,
|
|
390
|
+
)
|
|
391
|
+
except Exception:
|
|
392
|
+
pass # crash isolation: never fail on hybrid summarization
|
|
393
|
+
|
|
394
|
+
sys.exit(0)
|
|
395
|
+
|
|
396
|
+
|
|
397
|
+
if __name__ == "__main__":
|
|
398
|
+
main()
|