@trac3er/oh-my-god 2.0.0 → 2.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude-plugin/marketplace.json +8 -8
- package/.claude-plugin/plugin.json +5 -4
- package/.claude-plugin/scripts/uninstall.sh +74 -3
- package/.claude-plugin/scripts/update.sh +78 -3
- package/.coveragerc +26 -0
- package/.mcp.json +4 -4
- package/CHANGELOG.md +14 -0
- package/CODE_OF_CONDUCT.md +27 -0
- package/CONTRIBUTING.md +62 -0
- package/OMG-setup.sh +1201 -355
- package/README.md +77 -56
- package/SECURITY.md +25 -0
- package/agents/__init__.py +1 -0
- package/agents/model_roles.py +196 -0
- package/agents/omg-architect-mode.md +3 -5
- package/agents/omg-backend-engineer.md +3 -5
- package/agents/omg-database-engineer.md +3 -5
- package/agents/omg-frontend-designer.md +4 -5
- package/agents/omg-implement-mode.md +4 -5
- package/agents/omg-infra-engineer.md +3 -5
- package/agents/omg-research-mode.md +4 -6
- package/agents/omg-security-auditor.md +3 -5
- package/agents/omg-testing-engineer.md +3 -5
- package/build/lib/yaml.py +321 -0
- package/commands/OMG:ai-commit.md +101 -14
- package/commands/OMG:arch.md +302 -19
- package/commands/OMG:ccg.md +12 -7
- package/commands/OMG:compat.md +25 -17
- package/commands/OMG:cost.md +173 -13
- package/commands/OMG:crazy.md +1 -1
- package/commands/OMG:create-agent.md +170 -20
- package/commands/OMG:deps.md +235 -17
- package/commands/OMG:domain-init.md +1 -1
- package/commands/OMG:escalate.md +41 -12
- package/commands/OMG:health-check.md +37 -13
- package/commands/OMG:init.md +122 -14
- package/commands/OMG:project-init.md +1 -1
- package/commands/OMG:session-branch.md +76 -9
- package/commands/OMG:session-fork.md +42 -5
- package/commands/OMG:session-merge.md +124 -8
- package/commands/OMG:setup.md +69 -12
- package/commands/OMG:stats.md +215 -14
- package/commands/OMG:teams.md +19 -10
- package/config/lsp_languages.yaml +8 -0
- package/hooks/__init__.py +0 -0
- package/hooks/_agent_registry.py +423 -0
- package/hooks/_analytics.py +291 -0
- package/hooks/_budget.py +31 -0
- package/hooks/_common.py +569 -0
- package/hooks/_compression_optimizer.py +119 -0
- package/hooks/_cost_ledger.py +176 -0
- package/hooks/_learnings.py +126 -0
- package/hooks/_memory.py +103 -0
- package/hooks/_protected_context.py +150 -0
- package/hooks/_token_counter.py +221 -0
- package/hooks/branch_manager.py +236 -0
- package/hooks/budget_governor.py +232 -0
- package/hooks/circuit-breaker.py +270 -0
- package/hooks/compression_feedback.py +254 -0
- package/hooks/config-guard.py +216 -0
- package/hooks/context_pressure.py +53 -0
- package/hooks/credential_store.py +1020 -0
- package/hooks/fetch-rate-limits.py +212 -0
- package/hooks/firewall.py +48 -0
- package/hooks/hashline-formatter-bridge.py +224 -0
- package/hooks/hashline-injector.py +273 -0
- package/hooks/hashline-validator.py +216 -0
- package/hooks/idle-detector.py +95 -0
- package/hooks/intentgate-keyword-detector.py +188 -0
- package/hooks/magic-keyword-router.py +195 -0
- package/hooks/policy_engine.py +505 -0
- package/hooks/post-tool-failure.py +19 -0
- package/hooks/post-write.py +219 -0
- package/hooks/post_write.py +46 -0
- package/hooks/pre-compact.py +398 -0
- package/hooks/pre-tool-inject.py +98 -0
- package/hooks/prompt-enhancer.py +672 -0
- package/hooks/quality-runner.py +191 -0
- package/hooks/query.py +512 -0
- package/hooks/secret-guard.py +61 -0
- package/hooks/secret_audit.py +144 -0
- package/hooks/session-end-capture.py +137 -0
- package/hooks/session-start.py +277 -0
- package/hooks/setup_wizard.py +582 -0
- package/hooks/shadow_manager.py +297 -0
- package/hooks/state_migration.py +225 -0
- package/hooks/stop-gate.py +7 -0
- package/hooks/stop_dispatcher.py +945 -0
- package/hooks/test-validator.py +361 -0
- package/hooks/test_generator_hook.py +123 -0
- package/hooks/todo-state-tracker.py +114 -0
- package/hooks/tool-ledger.py +149 -0
- package/hooks/trust_review.py +585 -0
- package/hud/omg-hud.mjs +31 -1
- package/lab/__init__.py +1 -0
- package/lab/pipeline.py +75 -0
- package/lab/policies.py +52 -0
- package/package.json +7 -18
- package/plugins/README.md +33 -61
- package/plugins/advanced/commands/OMG:deep-plan.md +3 -3
- package/plugins/advanced/commands/OMG:learn.md +1 -1
- package/plugins/advanced/commands/OMG:security-review.md +3 -3
- package/plugins/advanced/commands/OMG:ship.md +1 -1
- package/plugins/advanced/plugin.json +1 -1
- package/plugins/core/plugin.json +8 -3
- package/plugins/dephealth/__init__.py +0 -0
- package/plugins/dephealth/cve_scanner.py +188 -0
- package/plugins/dephealth/license_checker.py +135 -0
- package/plugins/dephealth/manifest_detector.py +423 -0
- package/plugins/dephealth/vuln_analyzer.py +169 -0
- package/plugins/testgen/__init__.py +0 -0
- package/plugins/testgen/codamosa_engine.py +402 -0
- package/plugins/testgen/edge_case_synthesizer.py +184 -0
- package/plugins/testgen/framework_detector.py +271 -0
- package/plugins/testgen/skeleton_generator.py +219 -0
- package/plugins/viz/__init__.py +0 -0
- package/plugins/viz/ast_parser.py +139 -0
- package/plugins/viz/diagram_generator.py +192 -0
- package/plugins/viz/graph_builder.py +444 -0
- package/plugins/viz/native_parsers.py +259 -0
- package/plugins/viz/regex_parser.py +112 -0
- package/pyproject.toml +81 -0
- package/rules/contextual/write-verify.md +2 -2
- package/rules/core/00-truth.md +1 -1
- package/rules/core/01-surgical.md +1 -1
- package/rules/core/02-circuit-breaker.md +2 -2
- package/rules/core/03-ensemble.md +3 -3
- package/rules/core/04-testing.md +3 -3
- package/runtime/__init__.py +32 -0
- package/runtime/adapters/__init__.py +13 -0
- package/runtime/adapters/claude.py +60 -0
- package/runtime/adapters/gpt.py +53 -0
- package/runtime/adapters/local.py +53 -0
- package/runtime/adoption.py +212 -0
- package/runtime/business_workflow.py +220 -0
- package/runtime/cli_provider.py +85 -0
- package/runtime/compat.py +1299 -0
- package/runtime/custom_agent_loader.py +366 -0
- package/runtime/dispatcher.py +47 -0
- package/runtime/ecosystem.py +371 -0
- package/runtime/legacy_compat.py +7 -0
- package/runtime/mcp_config_writers.py +115 -0
- package/runtime/mcp_lifecycle.py +153 -0
- package/runtime/mcp_memory_server.py +135 -0
- package/runtime/memory_parsers/__init__.py +0 -0
- package/runtime/memory_parsers/chatgpt_parser.py +257 -0
- package/runtime/memory_parsers/claude_import.py +107 -0
- package/runtime/memory_parsers/export.py +97 -0
- package/runtime/memory_parsers/gemini_import.py +91 -0
- package/runtime/memory_parsers/kimi_import.py +91 -0
- package/runtime/memory_store.py +215 -0
- package/runtime/omc_compat.py +7 -0
- package/runtime/providers/__init__.py +0 -0
- package/runtime/providers/codex_provider.py +112 -0
- package/runtime/providers/gemini_provider.py +128 -0
- package/runtime/providers/kimi_provider.py +151 -0
- package/runtime/providers/opencode_provider.py +144 -0
- package/runtime/subagent_dispatcher.py +362 -0
- package/runtime/team_router.py +1167 -0
- package/runtime/tmux_session_manager.py +169 -0
- package/scripts/check-omg-compat-contract-snapshot.py +137 -0
- package/scripts/check-omg-contract-snapshot.py +12 -0
- package/scripts/check-omg-public-ready.py +193 -0
- package/scripts/check-omg-standalone-clean.py +103 -0
- package/scripts/legacy_to_omg_migrate.py +29 -0
- package/scripts/migrate-legacy.py +464 -0
- package/scripts/omc_to_omg_migrate.py +12 -0
- package/scripts/omg.py +492 -0
- package/scripts/settings-merge.py +283 -0
- package/scripts/verify-standalone.sh +8 -4
- package/settings.json +126 -29
- package/templates/profile.yaml +1 -1
- package/tools/__init__.py +2 -0
- package/tools/browser_consent.py +289 -0
- package/tools/browser_stealth.py +481 -0
- package/tools/browser_tool.py +448 -0
- package/tools/changelog_generator.py +347 -0
- package/tools/commit_splitter.py +746 -0
- package/tools/config_discovery.py +151 -0
- package/tools/config_merger.py +449 -0
- package/tools/dashboard_generator.py +300 -0
- package/tools/git_inspector.py +298 -0
- package/tools/lsp_client.py +275 -0
- package/tools/lsp_discovery.py +231 -0
- package/tools/lsp_operations.py +392 -0
- package/tools/pr_generator.py +404 -0
- package/tools/python_repl.py +656 -0
- package/tools/python_sandbox.py +609 -0
- package/tools/search_providers/__init__.py +77 -0
- package/tools/search_providers/brave.py +115 -0
- package/tools/search_providers/exa.py +116 -0
- package/tools/search_providers/jina.py +104 -0
- package/tools/search_providers/perplexity.py +139 -0
- package/tools/search_providers/synthetic.py +74 -0
- package/tools/session_snapshot.py +736 -0
- package/tools/ssh_manager.py +912 -0
- package/tools/theme_engine.py +294 -0
- package/tools/theme_selector.py +137 -0
- package/tools/web_search.py +622 -0
- package/yaml.py +321 -0
- package/.claude-plugin/scripts/install.sh +0 -9
- package/bun.lock +0 -23
- package/bunfig.toml +0 -3
- package/hooks/_budget.ts +0 -1
- package/hooks/_common.ts +0 -63
- package/hooks/circuit-breaker.ts +0 -101
- package/hooks/config-guard.ts +0 -4
- package/hooks/firewall.ts +0 -20
- package/hooks/policy_engine.ts +0 -156
- package/hooks/post-tool-failure.ts +0 -22
- package/hooks/post-write.ts +0 -4
- package/hooks/pre-tool-inject.ts +0 -4
- package/hooks/prompt-enhancer.ts +0 -46
- package/hooks/quality-runner.ts +0 -24
- package/hooks/secret-guard.ts +0 -4
- package/hooks/session-end-capture.ts +0 -19
- package/hooks/session-start.ts +0 -19
- package/hooks/shadow_manager.ts +0 -81
- package/hooks/stop-gate.ts +0 -22
- package/hooks/stop_dispatcher.ts +0 -147
- package/hooks/test-generator-hook.ts +0 -4
- package/hooks/tool-ledger.ts +0 -27
- package/hooks/trust_review.ts +0 -175
- package/lab/pipeline.ts +0 -75
- package/lab/policies.ts +0 -68
- package/runtime/common.ts +0 -111
- package/runtime/compat.ts +0 -174
- package/runtime/dispatcher.ts +0 -25
- package/runtime/ecosystem.ts +0 -186
- package/runtime/provider_bootstrap.ts +0 -99
- package/runtime/provider_smoke.ts +0 -34
- package/runtime/release_readiness.ts +0 -186
- package/runtime/team_router.ts +0 -144
- package/scripts/check-omg-compat-contract-snapshot.ts +0 -20
- package/scripts/check-omg-standalone-clean.ts +0 -12
- package/scripts/check-runtime-clean.ts +0 -94
- package/scripts/omg.ts +0 -352
- package/scripts/settings-merge.ts +0 -93
- package/tools/commit_splitter.ts +0 -23
- package/tools/git_inspector.ts +0 -18
- package/tools/session_snapshot.ts +0 -47
- package/trac3er-oh-my-god-2.0.0.tgz +0 -0
- package/tsconfig.json +0 -15
|
@@ -0,0 +1,945 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Stop Hook Dispatcher — Priority-based multiplexer for stop checks."""
|
|
3
|
+
|
|
4
|
+
import json
|
|
5
|
+
import importlib.util
|
|
6
|
+
import os
|
|
7
|
+
import re
|
|
8
|
+
import subprocess
|
|
9
|
+
import sys
|
|
10
|
+
import time
|
|
11
|
+
from datetime import datetime, timedelta, timezone
|
|
12
|
+
|
|
13
|
+
sys.path.insert(0, os.path.dirname(__file__))
|
|
14
|
+
|
|
15
|
+
from _common import ( # noqa: E402
|
|
16
|
+
atomic_json_write,
|
|
17
|
+
block_decision,
|
|
18
|
+
check_performance_budget,
|
|
19
|
+
get_feature_flag,
|
|
20
|
+
get_project_dir,
|
|
21
|
+
json_input,
|
|
22
|
+
log_hook_error,
|
|
23
|
+
record_stop_block,
|
|
24
|
+
reset_stop_block_tracker,
|
|
25
|
+
_resolve_project_dir,
|
|
26
|
+
setup_crash_handler,
|
|
27
|
+
should_skip_stop_hooks,
|
|
28
|
+
STOP_CHECK_MAX_MS,
|
|
29
|
+
)
|
|
30
|
+
from state_migration import resolve_state_file # noqa: E402
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
setup_crash_handler("stop_dispatcher")
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
NON_SOURCE_PATTERNS = [
|
|
37
|
+
".test.",
|
|
38
|
+
"__test",
|
|
39
|
+
"_test.",
|
|
40
|
+
"/tests/",
|
|
41
|
+
"tests/",
|
|
42
|
+
"/test/",
|
|
43
|
+
"test/",
|
|
44
|
+
"spec",
|
|
45
|
+
"__tests__",
|
|
46
|
+
".spec.",
|
|
47
|
+
"script/",
|
|
48
|
+
"scripts/",
|
|
49
|
+
"/config/",
|
|
50
|
+
".config.",
|
|
51
|
+
"package.json",
|
|
52
|
+
"tsconfig",
|
|
53
|
+
"eslint",
|
|
54
|
+
"prettier",
|
|
55
|
+
".env",
|
|
56
|
+
"mock",
|
|
57
|
+
"fixture",
|
|
58
|
+
"snapshot",
|
|
59
|
+
"__mocks__",
|
|
60
|
+
"jest.",
|
|
61
|
+
"vitest.",
|
|
62
|
+
"setup.",
|
|
63
|
+
".omg/",
|
|
64
|
+
".omc/",
|
|
65
|
+
"omg-",
|
|
66
|
+
"CLAUDE.md",
|
|
67
|
+
"AGENTS.md",
|
|
68
|
+
"readme",
|
|
69
|
+
"changelog",
|
|
70
|
+
"license",
|
|
71
|
+
".gitignore",
|
|
72
|
+
".dockerignore",
|
|
73
|
+
"dockerfile",
|
|
74
|
+
"docker-compose",
|
|
75
|
+
"makefile",
|
|
76
|
+
".github/",
|
|
77
|
+
".vscode/",
|
|
78
|
+
".idea/",
|
|
79
|
+
]
|
|
80
|
+
|
|
81
|
+
INTERNAL_CONTROL_PATH_PATTERNS = [
|
|
82
|
+
".omg/",
|
|
83
|
+
".omc/",
|
|
84
|
+
"hooks/",
|
|
85
|
+
"CLAUDE.md",
|
|
86
|
+
"AGENTS.md",
|
|
87
|
+
]
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def _to_bool(value: str | None, default: bool) -> bool:
|
|
91
|
+
if value is None:
|
|
92
|
+
return default
|
|
93
|
+
normalized = value.strip().lower()
|
|
94
|
+
if normalized in {"1", "true", "yes", "on"}:
|
|
95
|
+
return True
|
|
96
|
+
if normalized in {"0", "false", "no", "off"}:
|
|
97
|
+
return False
|
|
98
|
+
return default
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def _read_policy_flags(project_root: str) -> tuple[str, bool]:
|
|
102
|
+
mode = "warn_and_run"
|
|
103
|
+
require_evidence_pack = False
|
|
104
|
+
policy_path = os.path.join(project_root, ".omg", "policy.yaml")
|
|
105
|
+
if not os.path.exists(policy_path):
|
|
106
|
+
return mode, require_evidence_pack
|
|
107
|
+
|
|
108
|
+
try:
|
|
109
|
+
with open(policy_path, "r", encoding="utf-8", errors="ignore") as f:
|
|
110
|
+
for raw in f:
|
|
111
|
+
line = raw.strip()
|
|
112
|
+
if not line or line.startswith("#"):
|
|
113
|
+
continue
|
|
114
|
+
if line.startswith("mode:"):
|
|
115
|
+
mode = line.split(":", 1)[1].strip().strip("'\"") or mode
|
|
116
|
+
elif line.startswith("require_evidence_pack:"):
|
|
117
|
+
value = line.split(":", 1)[1].strip().strip("'\"")
|
|
118
|
+
require_evidence_pack = _to_bool(value, require_evidence_pack)
|
|
119
|
+
except Exception as e: # security: policy enforcement
|
|
120
|
+
print(f"[OMG] stop_dispatcher: {type(e).__name__}: {e}", file=sys.stderr)
|
|
121
|
+
return mode, require_evidence_pack
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def _is_non_source_path(file_path: str) -> bool:
|
|
125
|
+
fl = str(file_path).lower()
|
|
126
|
+
return any(p in fl for p in NON_SOURCE_PATTERNS)
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def _is_internal_control_path(file_path: str) -> bool:
|
|
130
|
+
fl = str(file_path).lower()
|
|
131
|
+
return any(p.lower() in fl for p in INTERNAL_CONTROL_PATH_PATTERNS)
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
try:
|
|
135
|
+
from shadow_manager import has_recent_evidence # type: ignore
|
|
136
|
+
except Exception: # intentional: optional feature — shadow_manager may not exist
|
|
137
|
+
has_recent_evidence = None
|
|
138
|
+
|
|
139
|
+
# Import hyphenated modules via importlib
|
|
140
|
+
_test_validator_check = None
|
|
141
|
+
_quality_runner_check = None
|
|
142
|
+
try:
|
|
143
|
+
_tv_spec = importlib.util.spec_from_file_location(
|
|
144
|
+
"test_validator", os.path.join(os.path.dirname(__file__), "test-validator.py"))
|
|
145
|
+
if _tv_spec and _tv_spec.loader:
|
|
146
|
+
_tv_mod = importlib.util.module_from_spec(_tv_spec)
|
|
147
|
+
_tv_spec.loader.exec_module(_tv_mod)
|
|
148
|
+
_test_validator_check = getattr(_tv_mod, "check_test_quality", None)
|
|
149
|
+
except Exception: # intentional: crash isolation for optional module
|
|
150
|
+
pass
|
|
151
|
+
try:
|
|
152
|
+
_qr_spec = importlib.util.spec_from_file_location(
|
|
153
|
+
"quality_runner", os.path.join(os.path.dirname(__file__), "quality-runner.py"))
|
|
154
|
+
if _qr_spec and _qr_spec.loader:
|
|
155
|
+
_qr_mod = importlib.util.module_from_spec(_qr_spec)
|
|
156
|
+
_qr_spec.loader.exec_module(_qr_mod)
|
|
157
|
+
_quality_runner_check = getattr(_qr_mod, "check_quality_runner", None)
|
|
158
|
+
except Exception: # intentional: crash isolation for optional module
|
|
159
|
+
pass
|
|
160
|
+
|
|
161
|
+
def _build_context(project_dir: str) -> dict[str, object]:
|
|
162
|
+
ledger_path = resolve_state_file(
|
|
163
|
+
project_dir,
|
|
164
|
+
"state/ledger/tool-ledger.jsonl",
|
|
165
|
+
"ledger/tool-ledger.jsonl",
|
|
166
|
+
)
|
|
167
|
+
ledger_entries = []
|
|
168
|
+
if os.path.exists(ledger_path):
|
|
169
|
+
try:
|
|
170
|
+
with open(ledger_path, "r", encoding="utf-8", errors="ignore") as f:
|
|
171
|
+
for line in f:
|
|
172
|
+
line = line.strip()
|
|
173
|
+
if not line:
|
|
174
|
+
continue
|
|
175
|
+
try:
|
|
176
|
+
entry = json.loads(line)
|
|
177
|
+
ledger_entries.append(entry)
|
|
178
|
+
except json.JSONDecodeError:
|
|
179
|
+
pass # intentional: skip malformed ledger lines
|
|
180
|
+
except Exception as e: # security: dispatch context building
|
|
181
|
+
print(f"[OMG] stop_dispatcher: {type(e).__name__}: {e}", file=sys.stderr)
|
|
182
|
+
|
|
183
|
+
cutoff = (datetime.now(timezone.utc) - timedelta(hours=2)).isoformat()
|
|
184
|
+
recent_entries = []
|
|
185
|
+
for entry in ledger_entries:
|
|
186
|
+
ts = entry.get("ts", "")
|
|
187
|
+
if ts >= cutoff:
|
|
188
|
+
recent_entries.append(entry)
|
|
189
|
+
|
|
190
|
+
recent_commands = [
|
|
191
|
+
e.get("command", "").lower()[:300]
|
|
192
|
+
for e in recent_entries
|
|
193
|
+
if e.get("command")
|
|
194
|
+
]
|
|
195
|
+
recent_tools = {e.get("tool", "") for e in recent_entries}
|
|
196
|
+
recent_exit_codes = [
|
|
197
|
+
(e.get("command", ""), e.get("exit_code"))
|
|
198
|
+
for e in recent_entries
|
|
199
|
+
if e.get("exit_code") is not None
|
|
200
|
+
]
|
|
201
|
+
write_entries = [
|
|
202
|
+
e
|
|
203
|
+
for e in recent_entries
|
|
204
|
+
if e.get("tool") in ("Write", "Edit", "MultiEdit")
|
|
205
|
+
]
|
|
206
|
+
material_write_entries = [
|
|
207
|
+
e for e in write_entries if not _is_internal_control_path(str(e.get("file", "")))
|
|
208
|
+
]
|
|
209
|
+
source_write_entries = [
|
|
210
|
+
e for e in material_write_entries if not _is_non_source_path(str(e.get("file", "")))
|
|
211
|
+
]
|
|
212
|
+
|
|
213
|
+
return {
|
|
214
|
+
"ledger_path": ledger_path,
|
|
215
|
+
"ledger_entries": ledger_entries,
|
|
216
|
+
"recent_entries": recent_entries,
|
|
217
|
+
"recent_commands": recent_commands,
|
|
218
|
+
"recent_tools": recent_tools,
|
|
219
|
+
"recent_exit_codes": recent_exit_codes,
|
|
220
|
+
"write_entries": write_entries,
|
|
221
|
+
"material_write_entries": material_write_entries,
|
|
222
|
+
"source_write_entries": source_write_entries,
|
|
223
|
+
"has_writes": bool(write_entries),
|
|
224
|
+
"has_material_writes": bool(material_write_entries),
|
|
225
|
+
"has_source_writes": bool(source_write_entries),
|
|
226
|
+
}
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
def check_verification(data, project_dir):
|
|
230
|
+
if not get_feature_flag("verification", True):
|
|
231
|
+
return []
|
|
232
|
+
|
|
233
|
+
context = data["_stop_ctx"]
|
|
234
|
+
blocks = []
|
|
235
|
+
advisories = data.setdefault("_stop_advisories", [])
|
|
236
|
+
|
|
237
|
+
recent_commands = context["recent_commands"]
|
|
238
|
+
has_source_writes = context["has_source_writes"]
|
|
239
|
+
|
|
240
|
+
has_test = any(
|
|
241
|
+
any(kw in cmd for kw in ["test", "jest", "vitest", "pytest", "cargo test", "go test"])
|
|
242
|
+
for cmd in recent_commands
|
|
243
|
+
)
|
|
244
|
+
has_lint = any(
|
|
245
|
+
any(kw in cmd for kw in ["lint", "eslint", "ruff check", "golint", "clippy"])
|
|
246
|
+
for cmd in recent_commands
|
|
247
|
+
)
|
|
248
|
+
has_build = any(
|
|
249
|
+
any(kw in cmd for kw in ["build", "compile", "tsc", "cargo build", "go build", "make"])
|
|
250
|
+
for cmd in recent_commands
|
|
251
|
+
)
|
|
252
|
+
has_any_verification = has_test or has_lint or has_build
|
|
253
|
+
|
|
254
|
+
data["_has_test"] = has_test
|
|
255
|
+
|
|
256
|
+
qg_path = resolve_state_file(project_dir, "state/quality-gate.json", "quality-gate.json")
|
|
257
|
+
expected_checks = []
|
|
258
|
+
if os.path.exists(qg_path):
|
|
259
|
+
try:
|
|
260
|
+
with open(qg_path, "r", encoding="utf-8", errors="ignore") as f:
|
|
261
|
+
qg = json.load(f)
|
|
262
|
+
for step in ["format", "lint", "typecheck", "test"]:
|
|
263
|
+
cmd = qg.get(step)
|
|
264
|
+
if isinstance(cmd, str) and cmd.strip():
|
|
265
|
+
expected_checks.append(step)
|
|
266
|
+
except Exception as e: # security: quality gate loading
|
|
267
|
+
print(f"[OMG] stop_dispatcher: {type(e).__name__}: {e}", file=sys.stderr)
|
|
268
|
+
|
|
269
|
+
if has_source_writes and not has_any_verification:
|
|
270
|
+
if expected_checks:
|
|
271
|
+
blocks.append(
|
|
272
|
+
"Code was modified but NO verification commands were run.\n"
|
|
273
|
+
f"Quality gate expects: {', '.join(expected_checks)}.\n"
|
|
274
|
+
"Run your verification commands before completing.\n"
|
|
275
|
+
"If you can't run them, explicitly state what is **Unverified** and why."
|
|
276
|
+
)
|
|
277
|
+
else:
|
|
278
|
+
blocks.append(
|
|
279
|
+
"Code was modified but NO verification commands were run.\n"
|
|
280
|
+
"No quality-gate.json configured, but at minimum run lint/test/build.\n"
|
|
281
|
+
"Run /OMG:init to configure quality gates, or explicitly state\n"
|
|
282
|
+
"what is **Unverified** and why."
|
|
283
|
+
)
|
|
284
|
+
|
|
285
|
+
policy_mode, policy_require_evidence = _read_policy_flags(project_dir)
|
|
286
|
+
env_evidence_required = os.environ.get("OMG_EVIDENCE_REQUIRED")
|
|
287
|
+
evidence_required = _to_bool(env_evidence_required, policy_require_evidence)
|
|
288
|
+
strict_evidence_gate = policy_mode.strip().lower() not in {
|
|
289
|
+
"warn_and_run",
|
|
290
|
+
"warn",
|
|
291
|
+
"advisory",
|
|
292
|
+
"report_only",
|
|
293
|
+
}
|
|
294
|
+
|
|
295
|
+
if has_source_writes and evidence_required:
|
|
296
|
+
has_ev = False
|
|
297
|
+
if has_recent_evidence is not None:
|
|
298
|
+
try:
|
|
299
|
+
has_ev = bool(has_recent_evidence(project_dir, hours=24))
|
|
300
|
+
except Exception as e: # security: evidence verification
|
|
301
|
+
print(f"[OMG] stop_dispatcher: {type(e).__name__}: {e}", file=sys.stderr)
|
|
302
|
+
has_ev = False
|
|
303
|
+
else:
|
|
304
|
+
ev_dir = os.path.join(project_dir, ".omg", "evidence")
|
|
305
|
+
has_ev = os.path.isdir(ev_dir) and any(n.endswith(".json") for n in os.listdir(ev_dir))
|
|
306
|
+
|
|
307
|
+
if not has_ev:
|
|
308
|
+
message = (
|
|
309
|
+
"OMG v1 evidence gate: source code was modified but no EvidencePack was found.\n"
|
|
310
|
+
"Create .omg/evidence/<run-id>.json before completing.\n"
|
|
311
|
+
"Required fields: tests, security_scans, diff_summary, reproducibility, unresolved_risks."
|
|
312
|
+
)
|
|
313
|
+
if strict_evidence_gate:
|
|
314
|
+
blocks.append(message)
|
|
315
|
+
else:
|
|
316
|
+
advisories.append(
|
|
317
|
+
f"[OMG advisory] {message} (policy mode: {policy_mode or 'warn_and_run'})"
|
|
318
|
+
)
|
|
319
|
+
|
|
320
|
+
return blocks
|
|
321
|
+
|
|
322
|
+
|
|
323
|
+
def check_diff_budget(data, project_dir):
|
|
324
|
+
if not get_feature_flag("diff_budget", True):
|
|
325
|
+
return []
|
|
326
|
+
|
|
327
|
+
blocks = []
|
|
328
|
+
changed_files = []
|
|
329
|
+
try:
|
|
330
|
+
max_files, max_loc = 3, 120
|
|
331
|
+
plan_path = resolve_state_file(project_dir, "state/_plan.md", "_plan.md")
|
|
332
|
+
if os.path.exists(plan_path):
|
|
333
|
+
with open(plan_path, "r", encoding="utf-8", errors="ignore") as f:
|
|
334
|
+
plan = f.read()
|
|
335
|
+
if "CHANGE_BUDGET=large" in plan:
|
|
336
|
+
max_files, max_loc = 999, 999999
|
|
337
|
+
elif "CHANGE_BUDGET=medium" in plan:
|
|
338
|
+
max_files, max_loc = 8, 400
|
|
339
|
+
|
|
340
|
+
result = subprocess.run(
|
|
341
|
+
["git", "diff", "--name-only"],
|
|
342
|
+
capture_output=True,
|
|
343
|
+
text=True,
|
|
344
|
+
timeout=10,
|
|
345
|
+
cwd=project_dir,
|
|
346
|
+
)
|
|
347
|
+
changed_files = [line for line in result.stdout.strip().split("\n") if line]
|
|
348
|
+
files_changed = len(changed_files)
|
|
349
|
+
|
|
350
|
+
result2 = subprocess.run(
|
|
351
|
+
["git", "diff", "--numstat"],
|
|
352
|
+
capture_output=True,
|
|
353
|
+
text=True,
|
|
354
|
+
timeout=10,
|
|
355
|
+
cwd=project_dir,
|
|
356
|
+
)
|
|
357
|
+
total_loc = 0
|
|
358
|
+
for line in result2.stdout.strip().split("\n"):
|
|
359
|
+
parts = line.split("\t")
|
|
360
|
+
if len(parts) >= 2:
|
|
361
|
+
try:
|
|
362
|
+
added = int(parts[0]) if parts[0] != "-" else 0
|
|
363
|
+
removed = int(parts[1]) if parts[1] != "-" else 0
|
|
364
|
+
total_loc += added + removed
|
|
365
|
+
except ValueError:
|
|
366
|
+
pass # intentional: skip unparseable numstat lines
|
|
367
|
+
|
|
368
|
+
if files_changed > max_files or total_loc > max_loc:
|
|
369
|
+
blocks.append(
|
|
370
|
+
f"Diff exceeds budget: {files_changed} files / {total_loc} LOC "
|
|
371
|
+
f"(limit: {max_files} / {max_loc}).\n"
|
|
372
|
+
"Reduce scope OR set CHANGE_BUDGET=medium/large in .omg/state/_plan.md."
|
|
373
|
+
)
|
|
374
|
+
except Exception as e: # security: diff budget enforcement
|
|
375
|
+
print(f"[OMG] stop_dispatcher: {type(e).__name__}: {e}", file=sys.stderr)
|
|
376
|
+
|
|
377
|
+
data["_changed_files"] = changed_files
|
|
378
|
+
return blocks
|
|
379
|
+
|
|
380
|
+
|
|
381
|
+
def check_recent_failures(data, project_dir):
|
|
382
|
+
if not get_feature_flag("recent_failures", True):
|
|
383
|
+
return []
|
|
384
|
+
|
|
385
|
+
del project_dir
|
|
386
|
+
|
|
387
|
+
recent_entries = data["_stop_ctx"]["recent_entries"]
|
|
388
|
+
blocks = []
|
|
389
|
+
recent_bash = [
|
|
390
|
+
(e.get("command", "")[:80], e.get("exit_code"))
|
|
391
|
+
for e in recent_entries
|
|
392
|
+
if e.get("tool") == "Bash" and e.get("exit_code") is not None
|
|
393
|
+
]
|
|
394
|
+
if len(recent_bash) >= 3:
|
|
395
|
+
last_three = recent_bash[-3:]
|
|
396
|
+
all_failed = all(exit_code != 0 for _, exit_code in last_three)
|
|
397
|
+
if all_failed:
|
|
398
|
+
cmds = [f" {cmd} (exit {exit_code})" for cmd, exit_code in last_three]
|
|
399
|
+
blocks.append(
|
|
400
|
+
"Last 3 commands ALL FAILED:\n"
|
|
401
|
+
+ "\n".join(cmds)
|
|
402
|
+
+ "\n"
|
|
403
|
+
+ "Do not claim completion with unresolved failures.\n"
|
|
404
|
+
+ "Fix the failures, or document them as **Unverified**."
|
|
405
|
+
)
|
|
406
|
+
return blocks
|
|
407
|
+
|
|
408
|
+
|
|
409
|
+
def check_test_execution(data, project_dir):
|
|
410
|
+
if not get_feature_flag("test_execution", True):
|
|
411
|
+
return []
|
|
412
|
+
|
|
413
|
+
del project_dir
|
|
414
|
+
|
|
415
|
+
context = data["_stop_ctx"]
|
|
416
|
+
has_material_writes = context["has_material_writes"]
|
|
417
|
+
has_test = bool(data.get("_has_test", False))
|
|
418
|
+
changed_files = data.get("_changed_files", [])
|
|
419
|
+
blocks = []
|
|
420
|
+
|
|
421
|
+
if has_material_writes:
|
|
422
|
+
test_files_modified = False
|
|
423
|
+
try:
|
|
424
|
+
for file_path in changed_files:
|
|
425
|
+
fl = file_path.lower()
|
|
426
|
+
if any(p in fl for p in ["test", "spec", "__tests__", ".test.", ".spec.", "_test."]):
|
|
427
|
+
test_files_modified = True
|
|
428
|
+
break
|
|
429
|
+
except Exception as e: # security: test execution check
|
|
430
|
+
print(f"[OMG] stop_dispatcher: {type(e).__name__}: {e}", file=sys.stderr)
|
|
431
|
+
|
|
432
|
+
if test_files_modified and not has_test:
|
|
433
|
+
blocks.append(
|
|
434
|
+
"Test files were modified but test suite was never executed.\n"
|
|
435
|
+
"Run your test command to verify the tests actually pass."
|
|
436
|
+
)
|
|
437
|
+
|
|
438
|
+
return blocks
|
|
439
|
+
|
|
440
|
+
|
|
441
|
+
def check_test_validator_coverage(data, project_dir):
|
|
442
|
+
if not get_feature_flag("test_validator_coverage", True):
|
|
443
|
+
return []
|
|
444
|
+
|
|
445
|
+
del project_dir
|
|
446
|
+
|
|
447
|
+
has_source_writes = data["_stop_ctx"]["has_source_writes"]
|
|
448
|
+
changed_files = data.get("_changed_files", [])
|
|
449
|
+
if not has_source_writes or not changed_files:
|
|
450
|
+
return []
|
|
451
|
+
|
|
452
|
+
source_changed = False
|
|
453
|
+
test_or_qa_changed = False
|
|
454
|
+
for file_path in changed_files:
|
|
455
|
+
fl = file_path.lower()
|
|
456
|
+
is_test_like = any(
|
|
457
|
+
token in fl
|
|
458
|
+
for token in (
|
|
459
|
+
"test",
|
|
460
|
+
"spec",
|
|
461
|
+
"__tests__",
|
|
462
|
+
".test.",
|
|
463
|
+
".spec.",
|
|
464
|
+
"qa",
|
|
465
|
+
"quality",
|
|
466
|
+
"e2e",
|
|
467
|
+
)
|
|
468
|
+
)
|
|
469
|
+
if is_test_like:
|
|
470
|
+
test_or_qa_changed = True
|
|
471
|
+
elif not _is_non_source_path(fl):
|
|
472
|
+
source_changed = True
|
|
473
|
+
|
|
474
|
+
if source_changed and not test_or_qa_changed:
|
|
475
|
+
return [
|
|
476
|
+
"TEST-VALIDATOR: Source changes detected without test/QA updates.\n"
|
|
477
|
+
"Add or update user-journey tests (including edge/error cases) for every new behavior."
|
|
478
|
+
]
|
|
479
|
+
return []
|
|
480
|
+
|
|
481
|
+
|
|
482
|
+
def check_false_fix(data, project_dir):
|
|
483
|
+
if not get_feature_flag("false_fix", True):
|
|
484
|
+
return []
|
|
485
|
+
|
|
486
|
+
del project_dir
|
|
487
|
+
|
|
488
|
+
has_material_writes = data["_stop_ctx"]["has_material_writes"]
|
|
489
|
+
changed_files = data.get("_changed_files", [])
|
|
490
|
+
blocks = []
|
|
491
|
+
|
|
492
|
+
if has_material_writes:
|
|
493
|
+
non_source_only = True
|
|
494
|
+
try:
|
|
495
|
+
for file_path in changed_files:
|
|
496
|
+
fl = file_path.lower()
|
|
497
|
+
is_non_source = any(p in fl for p in NON_SOURCE_PATTERNS)
|
|
498
|
+
if not is_non_source:
|
|
499
|
+
non_source_only = False
|
|
500
|
+
break
|
|
501
|
+
except Exception as e: # security: false fix detection
|
|
502
|
+
print(f"[OMG] stop_dispatcher: {type(e).__name__}: {e}", file=sys.stderr)
|
|
503
|
+
non_source_only = False
|
|
504
|
+
|
|
505
|
+
if non_source_only and has_material_writes and len(changed_files) > 0:
|
|
506
|
+
blocks.append(
|
|
507
|
+
"⚠ FALSE FIX DETECTED: Only test/script/config files were modified.\n"
|
|
508
|
+
"No actual source code was changed. If the task was to fix a bug or\n"
|
|
509
|
+
"implement a feature, you likely changed test expectations to match\n"
|
|
510
|
+
"broken behavior instead of fixing the real code.\n\n"
|
|
511
|
+
"Go back and modify the actual SOURCE files, not just tests/configs."
|
|
512
|
+
)
|
|
513
|
+
|
|
514
|
+
return blocks
|
|
515
|
+
|
|
516
|
+
|
|
517
|
+
def check_write_failures(data, project_dir):
|
|
518
|
+
if not get_feature_flag("write_failures", True):
|
|
519
|
+
return []
|
|
520
|
+
|
|
521
|
+
del project_dir
|
|
522
|
+
|
|
523
|
+
recent_entries = data["_stop_ctx"]["recent_entries"]
|
|
524
|
+
has_material_writes = data["_stop_ctx"]["has_material_writes"]
|
|
525
|
+
blocks = []
|
|
526
|
+
|
|
527
|
+
if has_material_writes:
|
|
528
|
+
failed_writes = []
|
|
529
|
+
for entry in recent_entries[-30:]:
|
|
530
|
+
if entry.get("tool") in ("Write", "Edit", "MultiEdit"):
|
|
531
|
+
success = entry.get("success")
|
|
532
|
+
file_path = entry.get("file", "unknown")
|
|
533
|
+
if _is_internal_control_path(str(file_path)):
|
|
534
|
+
continue
|
|
535
|
+
if success is False:
|
|
536
|
+
failed_writes.append(file_path)
|
|
537
|
+
if failed_writes:
|
|
538
|
+
unique_fails = list(dict.fromkeys(failed_writes))[:5]
|
|
539
|
+
blocks.append(
|
|
540
|
+
"⚠ WRITE/EDIT FAILURE DETECTED:\n"
|
|
541
|
+
f"These file operations may have failed: {', '.join(unique_fails)}\n\n"
|
|
542
|
+
"Before claiming success, you MUST:\n"
|
|
543
|
+
"1. Read the file to verify your changes are actually there\n"
|
|
544
|
+
"2. If the file wasn't updated, retry with a different method:\n"
|
|
545
|
+
" - If Write failed (file exists): use Edit or Bash heredoc\n"
|
|
546
|
+
" - If Edit failed (hook error): verify file, then retry\n"
|
|
547
|
+
" - If hook error from external plugin: the write may have succeeded —\n"
|
|
548
|
+
" READ the file to check before retrying\n"
|
|
549
|
+
"3. Report honestly: 'Write failed' not 'Updated successfully'"
|
|
550
|
+
)
|
|
551
|
+
|
|
552
|
+
return blocks
|
|
553
|
+
|
|
554
|
+
|
|
555
|
+
def check_bare_done(data, project_dir):
|
|
556
|
+
"""CHECK: Bare completion detection — blocks lazy 'Done.' responses."""
|
|
557
|
+
if not get_feature_flag("bare_done", True):
|
|
558
|
+
return []
|
|
559
|
+
|
|
560
|
+
del project_dir
|
|
561
|
+
|
|
562
|
+
transcript_path = data.get("transcript_path", "")
|
|
563
|
+
if not transcript_path or not os.path.isfile(transcript_path):
|
|
564
|
+
return []
|
|
565
|
+
|
|
566
|
+
# Find the last assistant message in the transcript
|
|
567
|
+
last_assistant_text = ""
|
|
568
|
+
try:
|
|
569
|
+
with open(transcript_path, "r", encoding="utf-8", errors="ignore") as f:
|
|
570
|
+
for line in f:
|
|
571
|
+
line = line.strip()
|
|
572
|
+
if not line:
|
|
573
|
+
continue
|
|
574
|
+
try:
|
|
575
|
+
entry = json.loads(line)
|
|
576
|
+
except (json.JSONDecodeError, ValueError):
|
|
577
|
+
continue
|
|
578
|
+
if entry.get("type") != "assistant":
|
|
579
|
+
continue
|
|
580
|
+
msg = entry.get("message", {})
|
|
581
|
+
content = msg.get("content", "")
|
|
582
|
+
if isinstance(content, str):
|
|
583
|
+
last_assistant_text = content
|
|
584
|
+
elif isinstance(content, list):
|
|
585
|
+
for block in content:
|
|
586
|
+
if isinstance(block, dict) and block.get("type") == "text":
|
|
587
|
+
last_assistant_text = block.get("text", "")
|
|
588
|
+
elif isinstance(block, str):
|
|
589
|
+
last_assistant_text = block
|
|
590
|
+
except Exception:
|
|
591
|
+
return []
|
|
592
|
+
|
|
593
|
+
if not last_assistant_text:
|
|
594
|
+
return []
|
|
595
|
+
|
|
596
|
+
# Only flag short responses
|
|
597
|
+
if len(last_assistant_text) >= 200:
|
|
598
|
+
return []
|
|
599
|
+
|
|
600
|
+
# Check for structured content markers — these indicate a real report
|
|
601
|
+
structured_markers = ["##", "- ", "```", "**Checks**", "**Files**"]
|
|
602
|
+
for marker in structured_markers:
|
|
603
|
+
if marker in last_assistant_text:
|
|
604
|
+
return []
|
|
605
|
+
|
|
606
|
+
# Check for bare completion patterns
|
|
607
|
+
bare_patterns = [
|
|
608
|
+
r"^\s*done\.?\s*$",
|
|
609
|
+
r"^\s*complete\.?\s*$",
|
|
610
|
+
r"^\s*completed\.?\s*$",
|
|
611
|
+
r"^\s*finished\.?\s*$",
|
|
612
|
+
r"^\s*all\s+done\.?\s*$",
|
|
613
|
+
]
|
|
614
|
+
text_lower = last_assistant_text.strip()
|
|
615
|
+
for pat in bare_patterns:
|
|
616
|
+
if re.match(pat, text_lower, re.IGNORECASE):
|
|
617
|
+
return [
|
|
618
|
+
"Bare completion detected. Provide a structured report with: "
|
|
619
|
+
"files modified, checks run, and confidence level."
|
|
620
|
+
]
|
|
621
|
+
|
|
622
|
+
return []
|
|
623
|
+
|
|
624
|
+
def check_simplifier(data, project_dir):
|
|
625
|
+
"""CHECK 7: Code simplifier — advisory only, never blocks."""
|
|
626
|
+
if not get_feature_flag("simplifier", True):
|
|
627
|
+
return []
|
|
628
|
+
|
|
629
|
+
context = data["_stop_ctx"]
|
|
630
|
+
source_write_entries = context.get("source_write_entries", [])
|
|
631
|
+
if not source_write_entries:
|
|
632
|
+
return []
|
|
633
|
+
|
|
634
|
+
generic_name_re = re.compile(
|
|
635
|
+
r'\b(data|result|item|temp|val|obj|info|stuff|thing)\b'
|
|
636
|
+
)
|
|
637
|
+
noise_comment_re = re.compile(
|
|
638
|
+
r'^\s*(#|//) (get|set|return|check|create|update|delete) '
|
|
639
|
+
)
|
|
640
|
+
def_line_re = re.compile(r'^\s*(def |let |const |var )')
|
|
641
|
+
|
|
642
|
+
advisories = []
|
|
643
|
+
seen = set()
|
|
644
|
+
|
|
645
|
+
for entry in source_write_entries:
|
|
646
|
+
file_path = str(entry.get("file", ""))
|
|
647
|
+
if not file_path or file_path in seen:
|
|
648
|
+
continue
|
|
649
|
+
seen.add(file_path)
|
|
650
|
+
|
|
651
|
+
full_path = (
|
|
652
|
+
file_path
|
|
653
|
+
if os.path.isabs(file_path)
|
|
654
|
+
else os.path.join(project_dir, file_path)
|
|
655
|
+
)
|
|
656
|
+
|
|
657
|
+
try:
|
|
658
|
+
size = os.path.getsize(full_path)
|
|
659
|
+
if size > 10240: # Skip files >10KB
|
|
660
|
+
continue
|
|
661
|
+
with open(full_path, "r", encoding="utf-8", errors="ignore") as f:
|
|
662
|
+
lines = f.readlines()
|
|
663
|
+
except (OSError, IOError):
|
|
664
|
+
continue # intentional: skip unreadable files
|
|
665
|
+
|
|
666
|
+
if not lines:
|
|
667
|
+
continue
|
|
668
|
+
|
|
669
|
+
total = len(lines)
|
|
670
|
+
comment_count = sum(
|
|
671
|
+
1 for line in lines
|
|
672
|
+
if line.strip() and re.match(r'^\s*(#|//|/\*|\*)', line)
|
|
673
|
+
)
|
|
674
|
+
|
|
675
|
+
if total > 0 and comment_count / total > 0.40:
|
|
676
|
+
pct = comment_count * 100 // total
|
|
677
|
+
advisories.append(
|
|
678
|
+
f"@simplifier: {file_path} — {pct}% comment lines ({comment_count}/{total})"
|
|
679
|
+
)
|
|
680
|
+
|
|
681
|
+
for line in lines:
|
|
682
|
+
if def_line_re.match(line) and generic_name_re.search(line):
|
|
683
|
+
advisories.append(
|
|
684
|
+
f"@simplifier: {file_path} — generic name: {line.strip()[:80]}"
|
|
685
|
+
)
|
|
686
|
+
break
|
|
687
|
+
|
|
688
|
+
for line in lines:
|
|
689
|
+
if noise_comment_re.match(line):
|
|
690
|
+
advisories.append(
|
|
691
|
+
f"@simplifier: {file_path} — noise comment: {line.strip()[:60]}"
|
|
692
|
+
)
|
|
693
|
+
break
|
|
694
|
+
|
|
695
|
+
if advisories:
|
|
696
|
+
for adv in advisories:
|
|
697
|
+
print(adv, file=sys.stderr)
|
|
698
|
+
|
|
699
|
+
return [] # Never blocks
|
|
700
|
+
|
|
701
|
+
|
|
702
|
+
def format_ralph_block_reason(state, project_dir):
|
|
703
|
+
"""Build the rich reason string that Claude sees as its next prompt."""
|
|
704
|
+
original = state.get('original_prompt', '')
|
|
705
|
+
iteration = state.get('iteration', 0)
|
|
706
|
+
max_iter = state.get('max_iterations', 50)
|
|
707
|
+
checklist_path = state.get('checklist_path', '')
|
|
708
|
+
|
|
709
|
+
progress = ''
|
|
710
|
+
if checklist_path:
|
|
711
|
+
full = os.path.join(project_dir, checklist_path)
|
|
712
|
+
if os.path.exists(full):
|
|
713
|
+
try:
|
|
714
|
+
with open(full) as f:
|
|
715
|
+
lines = f.readlines()
|
|
716
|
+
done = sum(1 for l in lines if re.search(r'\[x\]', l, re.IGNORECASE))
|
|
717
|
+
total = sum(1 for l in lines if re.search(r'^\s*-\s*\[[ x!]\]', l))
|
|
718
|
+
progress = f' | Progress: {done}/{total}'
|
|
719
|
+
except OSError:
|
|
720
|
+
pass # intentional: progress display is optional
|
|
721
|
+
|
|
722
|
+
return (
|
|
723
|
+
f"Ralph loop iteration {iteration}/{max_iter}{progress}. "
|
|
724
|
+
f"Continue working on: {original}\n"
|
|
725
|
+
f"If truly done, run: /OMG:ralph-stop"
|
|
726
|
+
)
|
|
727
|
+
|
|
728
|
+
def check_ralph_loop(project_dir, data):
|
|
729
|
+
del data
|
|
730
|
+
|
|
731
|
+
if not get_feature_flag("ralph_loop"):
|
|
732
|
+
return [], []
|
|
733
|
+
ralph_path = os.path.join(project_dir, ".omg", "state", "ralph-loop.json")
|
|
734
|
+
if not os.path.exists(ralph_path):
|
|
735
|
+
return [], []
|
|
736
|
+
try:
|
|
737
|
+
with open(ralph_path, "r", encoding="utf-8") as f:
|
|
738
|
+
state = json.load(f)
|
|
739
|
+
except (json.JSONDecodeError, OSError):
|
|
740
|
+
return [], []
|
|
741
|
+
if not state.get("active"):
|
|
742
|
+
return [], []
|
|
743
|
+
|
|
744
|
+
# Check if Ralph loop has expired
|
|
745
|
+
timeout_minutes = int(os.environ.get("OMG_RALPH_TIMEOUT_MINUTES", "30"))
|
|
746
|
+
started_at_str = state.get("started_at")
|
|
747
|
+
if started_at_str:
|
|
748
|
+
try:
|
|
749
|
+
started_at = datetime.fromisoformat(started_at_str.replace("Z", "+00:00"))
|
|
750
|
+
now = datetime.now(timezone.utc)
|
|
751
|
+
elapsed = now - started_at
|
|
752
|
+
if elapsed.total_seconds() > timeout_minutes * 60:
|
|
753
|
+
state["active"] = False
|
|
754
|
+
atomic_json_write(ralph_path, state)
|
|
755
|
+
return [], [f"Ralph loop expired after {timeout_minutes} minutes. Stopping."]
|
|
756
|
+
except (ValueError, TypeError):
|
|
757
|
+
pass
|
|
758
|
+
|
|
759
|
+
iteration = state.get("iteration", 0)
|
|
760
|
+
max_iter = state.get("max_iterations", 50)
|
|
761
|
+
if iteration >= max_iter:
|
|
762
|
+
state["active"] = False
|
|
763
|
+
atomic_json_write(ralph_path, state)
|
|
764
|
+
return [], ["Ralph loop reached max iterations. Stopping."]
|
|
765
|
+
state["iteration"] = iteration + 1
|
|
766
|
+
atomic_json_write(ralph_path, state)
|
|
767
|
+
reason = format_ralph_block_reason(state, project_dir)
|
|
768
|
+
return [reason], []
|
|
769
|
+
|
|
770
|
+
|
|
771
|
+
def check_planning_gate(project_dir):
|
|
772
|
+
if not get_feature_flag("planning_enforcement"):
|
|
773
|
+
return [], []
|
|
774
|
+
checklist = resolve_state_file(project_dir, "state/_checklist.md", "_checklist.md")
|
|
775
|
+
if not os.path.exists(checklist):
|
|
776
|
+
return [], []
|
|
777
|
+
try:
|
|
778
|
+
with open(checklist, "r", encoding="utf-8") as f:
|
|
779
|
+
lines = f.readlines()
|
|
780
|
+
except OSError:
|
|
781
|
+
return [], []
|
|
782
|
+
total = sum(1 for l in lines if re.search(r"^\s*-\s*\[[ x!]\]", l))
|
|
783
|
+
done = sum(1 for l in lines if re.search(r"^\s*-\s*\[x\]", l, re.IGNORECASE))
|
|
784
|
+
blocked = sum(1 for l in lines if re.search(r"^\s*-\s*\[!\]", l))
|
|
785
|
+
pending = total - done - blocked
|
|
786
|
+
if pending > 0:
|
|
787
|
+
# Check context pressure — demote to advisory if high
|
|
788
|
+
_pressure_path = os.path.join(project_dir, ".omg", "state", ".context-pressure.json")
|
|
789
|
+
_is_high_pressure = False
|
|
790
|
+
try:
|
|
791
|
+
if os.path.exists(_pressure_path):
|
|
792
|
+
with open(_pressure_path, "r") as _f:
|
|
793
|
+
_pressure = json.load(_f)
|
|
794
|
+
_is_high_pressure = _pressure.get("is_high", False)
|
|
795
|
+
except Exception:
|
|
796
|
+
pass
|
|
797
|
+
|
|
798
|
+
if _is_high_pressure:
|
|
799
|
+
# Demote to advisory — don't block when context is exhausted
|
|
800
|
+
return [], [f"[OMG advisory] Planning gate: {done}/{total} complete, {pending} pending. (demoted: context pressure high)"]
|
|
801
|
+
|
|
802
|
+
return [
|
|
803
|
+
f"Planning gate: {done}/{total} complete, {pending} pending. Complete checklist before finishing."
|
|
804
|
+
], []
|
|
805
|
+
return [], []
|
|
806
|
+
|
|
807
|
+
|
|
808
|
+
def check_scope_drift(project_dir):
|
|
809
|
+
try:
|
|
810
|
+
result = subprocess.run(
|
|
811
|
+
["git", "diff", "--name-only", "HEAD"],
|
|
812
|
+
capture_output=True,
|
|
813
|
+
text=True,
|
|
814
|
+
timeout=5,
|
|
815
|
+
cwd=project_dir,
|
|
816
|
+
)
|
|
817
|
+
changed_files = [f.strip() for f in result.stdout.splitlines() if f.strip()]
|
|
818
|
+
if not changed_files:
|
|
819
|
+
return []
|
|
820
|
+
plan_path = resolve_state_file(project_dir, "state/_plan.md", "_plan.md")
|
|
821
|
+
if not os.path.exists(plan_path):
|
|
822
|
+
return []
|
|
823
|
+
with open(plan_path, "r", encoding="utf-8") as f:
|
|
824
|
+
plan_content = f.read()
|
|
825
|
+
mentioned = sum(1 for f in changed_files if os.path.basename(f) in plan_content)
|
|
826
|
+
outside = len(changed_files) - mentioned
|
|
827
|
+
if changed_files and (outside / len(changed_files)) > 0.3:
|
|
828
|
+
return [f"Scope drift: {outside}/{len(changed_files)} changed files not in plan."]
|
|
829
|
+
except Exception as e: # security: scope drift detection
|
|
830
|
+
print(f"[OMG] stop_dispatcher: {type(e).__name__}: {e}", file=sys.stderr)
|
|
831
|
+
return []
|
|
832
|
+
|
|
833
|
+
|
|
834
|
+
|
|
835
|
+
def check_todo_continuation(data: dict) -> dict | None:
|
|
836
|
+
"""Check if agent should continue due to incomplete todos.
|
|
837
|
+
Returns a dict with continuation response if idle, None otherwise.
|
|
838
|
+
Budget: STOP_CHECK_MAX_MS (15s)
|
|
839
|
+
Feature flag: OMG_TODO_ENFORCEMENT_ENABLED
|
|
840
|
+
"""
|
|
841
|
+
if not get_feature_flag("TODO_ENFORCEMENT", default=False):
|
|
842
|
+
return None
|
|
843
|
+
|
|
844
|
+
project_dir = get_project_dir()
|
|
845
|
+
signal_path = os.path.join(project_dir, ".omg", "state", "idle_signal.json")
|
|
846
|
+
|
|
847
|
+
if not os.path.exists(signal_path):
|
|
848
|
+
return None
|
|
849
|
+
|
|
850
|
+
try:
|
|
851
|
+
with open(signal_path, "r", encoding="utf-8") as f:
|
|
852
|
+
idle_signal = json.load(f)
|
|
853
|
+
except (json.JSONDecodeError, OSError):
|
|
854
|
+
return None
|
|
855
|
+
|
|
856
|
+
if not isinstance(idle_signal, dict):
|
|
857
|
+
return None
|
|
858
|
+
|
|
859
|
+
if not idle_signal.get("idle_detected", False):
|
|
860
|
+
return None
|
|
861
|
+
|
|
862
|
+
incomplete_count = idle_signal.get("incomplete_count", 0)
|
|
863
|
+
incomplete_items = idle_signal.get("incomplete_items", [])
|
|
864
|
+
|
|
865
|
+
return {
|
|
866
|
+
"decision": "block",
|
|
867
|
+
"reason": f"Incomplete todos detected ({incomplete_count} items). Please complete: {', '.join(incomplete_items[:3])}"
|
|
868
|
+
}
|
|
869
|
+
|
|
870
|
+
|
|
871
|
+
def main():
|
|
872
|
+
data = json_input()
|
|
873
|
+
|
|
874
|
+
# Unified guard: stop-hook loop, context-limit, and re-entry detection
|
|
875
|
+
if should_skip_stop_hooks(data):
|
|
876
|
+
sys.exit(0)
|
|
877
|
+
|
|
878
|
+
project_dir = _resolve_project_dir()
|
|
879
|
+
data["_stop_ctx"] = _build_context(project_dir)
|
|
880
|
+
data["_stop_advisories"] = []
|
|
881
|
+
|
|
882
|
+
# P1: Ralph loop check (implemented in Task 18)
|
|
883
|
+
block_reasons, advisories = check_ralph_loop(project_dir, data)
|
|
884
|
+
if advisories:
|
|
885
|
+
data["_stop_advisories"].extend(advisories)
|
|
886
|
+
if block_reasons:
|
|
887
|
+
record_stop_block(project_dir, reason="ralph_loop")
|
|
888
|
+
block_decision(block_reasons[0])
|
|
889
|
+
|
|
890
|
+
# P2: Planning enforcement (implemented in Task 22)
|
|
891
|
+
block_reasons, advisories = check_planning_gate(project_dir)
|
|
892
|
+
if block_reasons:
|
|
893
|
+
record_stop_block(project_dir, reason="planning_gate")
|
|
894
|
+
block_decision(block_reasons[0])
|
|
895
|
+
advisories.extend(check_scope_drift(project_dir))
|
|
896
|
+
if advisories:
|
|
897
|
+
data["_stop_advisories"].extend(advisories)
|
|
898
|
+
|
|
899
|
+
# P3: Todo continuation enforcement (Task 1.6)
|
|
900
|
+
_p3_start = time.monotonic()
|
|
901
|
+
todo_result = check_todo_continuation(data)
|
|
902
|
+
_p3_elapsed = (time.monotonic() - _p3_start) * 1000
|
|
903
|
+
check_performance_budget("check_todo_continuation", _p3_elapsed, STOP_CHECK_MAX_MS)
|
|
904
|
+
if todo_result and todo_result.get("decision") == "block":
|
|
905
|
+
record_stop_block(project_dir, reason="todo_continuation")
|
|
906
|
+
block_decision(todo_result["reason"])
|
|
907
|
+
|
|
908
|
+
blocks = []
|
|
909
|
+
for check_fn in [
|
|
910
|
+
check_verification,
|
|
911
|
+
check_diff_budget,
|
|
912
|
+
check_recent_failures,
|
|
913
|
+
check_test_execution,
|
|
914
|
+
check_test_validator_coverage,
|
|
915
|
+
check_false_fix,
|
|
916
|
+
check_write_failures,
|
|
917
|
+
check_bare_done,
|
|
918
|
+
_test_validator_check,
|
|
919
|
+
_quality_runner_check,
|
|
920
|
+
]:
|
|
921
|
+
if check_fn is None:
|
|
922
|
+
continue
|
|
923
|
+
try:
|
|
924
|
+
result = check_fn(data, project_dir)
|
|
925
|
+
if result:
|
|
926
|
+
blocks.extend(result)
|
|
927
|
+
except Exception as exc:
|
|
928
|
+
name = getattr(check_fn, "__name__", str(check_fn))
|
|
929
|
+
log_hook_error("stop_dispatcher", exc, {"check": name})
|
|
930
|
+
|
|
931
|
+
advisories = data.get("_stop_advisories", [])
|
|
932
|
+
if advisories:
|
|
933
|
+
print("\n".join(advisories), file=sys.stderr)
|
|
934
|
+
|
|
935
|
+
if blocks:
|
|
936
|
+
record_stop_block(project_dir, reason="quality_check")
|
|
937
|
+
block_decision("\n\n---\n\n".join(blocks))
|
|
938
|
+
|
|
939
|
+
check_simplifier(data, project_dir)
|
|
940
|
+
reset_stop_block_tracker(project_dir)
|
|
941
|
+
sys.exit(0)
|
|
942
|
+
|
|
943
|
+
|
|
944
|
+
if __name__ == "__main__":
|
|
945
|
+
main()
|