@trac3er/oh-my-god 2.0.0 → 2.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude-plugin/marketplace.json +8 -8
- package/.claude-plugin/plugin.json +5 -4
- package/.claude-plugin/scripts/uninstall.sh +74 -3
- package/.claude-plugin/scripts/update.sh +78 -3
- package/.coveragerc +26 -0
- package/.mcp.json +4 -4
- package/CHANGELOG.md +14 -0
- package/CODE_OF_CONDUCT.md +27 -0
- package/CONTRIBUTING.md +62 -0
- package/OMG-setup.sh +1201 -355
- package/README.md +77 -56
- package/SECURITY.md +25 -0
- package/agents/__init__.py +1 -0
- package/agents/model_roles.py +196 -0
- package/agents/omg-architect-mode.md +3 -5
- package/agents/omg-backend-engineer.md +3 -5
- package/agents/omg-database-engineer.md +3 -5
- package/agents/omg-frontend-designer.md +4 -5
- package/agents/omg-implement-mode.md +4 -5
- package/agents/omg-infra-engineer.md +3 -5
- package/agents/omg-research-mode.md +4 -6
- package/agents/omg-security-auditor.md +3 -5
- package/agents/omg-testing-engineer.md +3 -5
- package/build/lib/yaml.py +321 -0
- package/commands/OMG:ai-commit.md +101 -14
- package/commands/OMG:arch.md +302 -19
- package/commands/OMG:ccg.md +12 -7
- package/commands/OMG:compat.md +25 -17
- package/commands/OMG:cost.md +173 -13
- package/commands/OMG:crazy.md +1 -1
- package/commands/OMG:create-agent.md +170 -20
- package/commands/OMG:deps.md +235 -17
- package/commands/OMG:domain-init.md +1 -1
- package/commands/OMG:escalate.md +41 -12
- package/commands/OMG:health-check.md +37 -13
- package/commands/OMG:init.md +122 -14
- package/commands/OMG:project-init.md +1 -1
- package/commands/OMG:session-branch.md +76 -9
- package/commands/OMG:session-fork.md +42 -5
- package/commands/OMG:session-merge.md +124 -8
- package/commands/OMG:setup.md +69 -12
- package/commands/OMG:stats.md +215 -14
- package/commands/OMG:teams.md +19 -10
- package/config/lsp_languages.yaml +8 -0
- package/hooks/__init__.py +0 -0
- package/hooks/_agent_registry.py +423 -0
- package/hooks/_analytics.py +291 -0
- package/hooks/_budget.py +31 -0
- package/hooks/_common.py +569 -0
- package/hooks/_compression_optimizer.py +119 -0
- package/hooks/_cost_ledger.py +176 -0
- package/hooks/_learnings.py +126 -0
- package/hooks/_memory.py +103 -0
- package/hooks/_protected_context.py +150 -0
- package/hooks/_token_counter.py +221 -0
- package/hooks/branch_manager.py +236 -0
- package/hooks/budget_governor.py +232 -0
- package/hooks/circuit-breaker.py +270 -0
- package/hooks/compression_feedback.py +254 -0
- package/hooks/config-guard.py +216 -0
- package/hooks/context_pressure.py +53 -0
- package/hooks/credential_store.py +1020 -0
- package/hooks/fetch-rate-limits.py +212 -0
- package/hooks/firewall.py +48 -0
- package/hooks/hashline-formatter-bridge.py +224 -0
- package/hooks/hashline-injector.py +273 -0
- package/hooks/hashline-validator.py +216 -0
- package/hooks/idle-detector.py +95 -0
- package/hooks/intentgate-keyword-detector.py +188 -0
- package/hooks/magic-keyword-router.py +195 -0
- package/hooks/policy_engine.py +505 -0
- package/hooks/post-tool-failure.py +19 -0
- package/hooks/post-write.py +219 -0
- package/hooks/post_write.py +46 -0
- package/hooks/pre-compact.py +398 -0
- package/hooks/pre-tool-inject.py +98 -0
- package/hooks/prompt-enhancer.py +672 -0
- package/hooks/quality-runner.py +191 -0
- package/hooks/query.py +512 -0
- package/hooks/secret-guard.py +61 -0
- package/hooks/secret_audit.py +144 -0
- package/hooks/session-end-capture.py +137 -0
- package/hooks/session-start.py +277 -0
- package/hooks/setup_wizard.py +582 -0
- package/hooks/shadow_manager.py +297 -0
- package/hooks/state_migration.py +225 -0
- package/hooks/stop-gate.py +7 -0
- package/hooks/stop_dispatcher.py +945 -0
- package/hooks/test-validator.py +361 -0
- package/hooks/test_generator_hook.py +123 -0
- package/hooks/todo-state-tracker.py +114 -0
- package/hooks/tool-ledger.py +149 -0
- package/hooks/trust_review.py +585 -0
- package/hud/omg-hud.mjs +31 -1
- package/lab/__init__.py +1 -0
- package/lab/pipeline.py +75 -0
- package/lab/policies.py +52 -0
- package/package.json +7 -18
- package/plugins/README.md +33 -61
- package/plugins/advanced/commands/OMG:deep-plan.md +3 -3
- package/plugins/advanced/commands/OMG:learn.md +1 -1
- package/plugins/advanced/commands/OMG:security-review.md +3 -3
- package/plugins/advanced/commands/OMG:ship.md +1 -1
- package/plugins/advanced/plugin.json +1 -1
- package/plugins/core/plugin.json +8 -3
- package/plugins/dephealth/__init__.py +0 -0
- package/plugins/dephealth/cve_scanner.py +188 -0
- package/plugins/dephealth/license_checker.py +135 -0
- package/plugins/dephealth/manifest_detector.py +423 -0
- package/plugins/dephealth/vuln_analyzer.py +169 -0
- package/plugins/testgen/__init__.py +0 -0
- package/plugins/testgen/codamosa_engine.py +402 -0
- package/plugins/testgen/edge_case_synthesizer.py +184 -0
- package/plugins/testgen/framework_detector.py +271 -0
- package/plugins/testgen/skeleton_generator.py +219 -0
- package/plugins/viz/__init__.py +0 -0
- package/plugins/viz/ast_parser.py +139 -0
- package/plugins/viz/diagram_generator.py +192 -0
- package/plugins/viz/graph_builder.py +444 -0
- package/plugins/viz/native_parsers.py +259 -0
- package/plugins/viz/regex_parser.py +112 -0
- package/pyproject.toml +81 -0
- package/rules/contextual/write-verify.md +2 -2
- package/rules/core/00-truth.md +1 -1
- package/rules/core/01-surgical.md +1 -1
- package/rules/core/02-circuit-breaker.md +2 -2
- package/rules/core/03-ensemble.md +3 -3
- package/rules/core/04-testing.md +3 -3
- package/runtime/__init__.py +32 -0
- package/runtime/adapters/__init__.py +13 -0
- package/runtime/adapters/claude.py +60 -0
- package/runtime/adapters/gpt.py +53 -0
- package/runtime/adapters/local.py +53 -0
- package/runtime/adoption.py +212 -0
- package/runtime/business_workflow.py +220 -0
- package/runtime/cli_provider.py +85 -0
- package/runtime/compat.py +1299 -0
- package/runtime/custom_agent_loader.py +366 -0
- package/runtime/dispatcher.py +47 -0
- package/runtime/ecosystem.py +371 -0
- package/runtime/legacy_compat.py +7 -0
- package/runtime/mcp_config_writers.py +115 -0
- package/runtime/mcp_lifecycle.py +153 -0
- package/runtime/mcp_memory_server.py +135 -0
- package/runtime/memory_parsers/__init__.py +0 -0
- package/runtime/memory_parsers/chatgpt_parser.py +257 -0
- package/runtime/memory_parsers/claude_import.py +107 -0
- package/runtime/memory_parsers/export.py +97 -0
- package/runtime/memory_parsers/gemini_import.py +91 -0
- package/runtime/memory_parsers/kimi_import.py +91 -0
- package/runtime/memory_store.py +215 -0
- package/runtime/omc_compat.py +7 -0
- package/runtime/providers/__init__.py +0 -0
- package/runtime/providers/codex_provider.py +112 -0
- package/runtime/providers/gemini_provider.py +128 -0
- package/runtime/providers/kimi_provider.py +151 -0
- package/runtime/providers/opencode_provider.py +144 -0
- package/runtime/subagent_dispatcher.py +362 -0
- package/runtime/team_router.py +1167 -0
- package/runtime/tmux_session_manager.py +169 -0
- package/scripts/check-omg-compat-contract-snapshot.py +137 -0
- package/scripts/check-omg-contract-snapshot.py +12 -0
- package/scripts/check-omg-public-ready.py +193 -0
- package/scripts/check-omg-standalone-clean.py +103 -0
- package/scripts/legacy_to_omg_migrate.py +29 -0
- package/scripts/migrate-legacy.py +464 -0
- package/scripts/omc_to_omg_migrate.py +12 -0
- package/scripts/omg.py +492 -0
- package/scripts/settings-merge.py +283 -0
- package/scripts/verify-standalone.sh +8 -4
- package/settings.json +126 -29
- package/templates/profile.yaml +1 -1
- package/tools/__init__.py +2 -0
- package/tools/browser_consent.py +289 -0
- package/tools/browser_stealth.py +481 -0
- package/tools/browser_tool.py +448 -0
- package/tools/changelog_generator.py +347 -0
- package/tools/commit_splitter.py +746 -0
- package/tools/config_discovery.py +151 -0
- package/tools/config_merger.py +449 -0
- package/tools/dashboard_generator.py +300 -0
- package/tools/git_inspector.py +298 -0
- package/tools/lsp_client.py +275 -0
- package/tools/lsp_discovery.py +231 -0
- package/tools/lsp_operations.py +392 -0
- package/tools/pr_generator.py +404 -0
- package/tools/python_repl.py +656 -0
- package/tools/python_sandbox.py +609 -0
- package/tools/search_providers/__init__.py +77 -0
- package/tools/search_providers/brave.py +115 -0
- package/tools/search_providers/exa.py +116 -0
- package/tools/search_providers/jina.py +104 -0
- package/tools/search_providers/perplexity.py +139 -0
- package/tools/search_providers/synthetic.py +74 -0
- package/tools/session_snapshot.py +736 -0
- package/tools/ssh_manager.py +912 -0
- package/tools/theme_engine.py +294 -0
- package/tools/theme_selector.py +137 -0
- package/tools/web_search.py +622 -0
- package/yaml.py +321 -0
- package/.claude-plugin/scripts/install.sh +0 -9
- package/bun.lock +0 -23
- package/bunfig.toml +0 -3
- package/hooks/_budget.ts +0 -1
- package/hooks/_common.ts +0 -63
- package/hooks/circuit-breaker.ts +0 -101
- package/hooks/config-guard.ts +0 -4
- package/hooks/firewall.ts +0 -20
- package/hooks/policy_engine.ts +0 -156
- package/hooks/post-tool-failure.ts +0 -22
- package/hooks/post-write.ts +0 -4
- package/hooks/pre-tool-inject.ts +0 -4
- package/hooks/prompt-enhancer.ts +0 -46
- package/hooks/quality-runner.ts +0 -24
- package/hooks/secret-guard.ts +0 -4
- package/hooks/session-end-capture.ts +0 -19
- package/hooks/session-start.ts +0 -19
- package/hooks/shadow_manager.ts +0 -81
- package/hooks/stop-gate.ts +0 -22
- package/hooks/stop_dispatcher.ts +0 -147
- package/hooks/test-generator-hook.ts +0 -4
- package/hooks/tool-ledger.ts +0 -27
- package/hooks/trust_review.ts +0 -175
- package/lab/pipeline.ts +0 -75
- package/lab/policies.ts +0 -68
- package/runtime/common.ts +0 -111
- package/runtime/compat.ts +0 -174
- package/runtime/dispatcher.ts +0 -25
- package/runtime/ecosystem.ts +0 -186
- package/runtime/provider_bootstrap.ts +0 -99
- package/runtime/provider_smoke.ts +0 -34
- package/runtime/release_readiness.ts +0 -186
- package/runtime/team_router.ts +0 -144
- package/scripts/check-omg-compat-contract-snapshot.ts +0 -20
- package/scripts/check-omg-standalone-clean.ts +0 -12
- package/scripts/check-runtime-clean.ts +0 -94
- package/scripts/omg.ts +0 -352
- package/scripts/settings-merge.ts +0 -93
- package/tools/commit_splitter.ts +0 -23
- package/tools/git_inspector.ts +0 -18
- package/tools/session_snapshot.ts +0 -47
- package/trac3er-oh-my-god-2.0.0.tgz +0 -0
- package/tsconfig.json +0 -15
|
@@ -0,0 +1,361 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Stop Hook: Test Validator (v5) — Enhanced Anti-Pattern Detection
|
|
4
|
+
|
|
5
|
+
v5 additions (T32):
|
|
6
|
+
- Skip/ignore test detection (pytest.mark.skip, xit, xdescribe, etc.)
|
|
7
|
+
- Mock-heavy test detection (ratio-based: mocks vs assertions)
|
|
8
|
+
- Parameterized test gap detection (same function 3+ literal args)
|
|
9
|
+
- Assertion-free Python test detection (def test_* with no assert)
|
|
10
|
+
- Empty test body detection (pass, ..., comment-only)
|
|
11
|
+
- Coverage metrics persistence to .omg/state/test-metrics.json
|
|
12
|
+
|
|
13
|
+
Callable API:
|
|
14
|
+
check_test_quality(data, project_dir) -> list[str]
|
|
15
|
+
Returns list of block reasons (empty = pass).
|
|
16
|
+
analyze_test_content(content, filename) -> list[str]
|
|
17
|
+
Returns list of issue strings for a single file's content.
|
|
18
|
+
persist_metrics(project_dir, analysis) -> None
|
|
19
|
+
Writes test quality metrics to .omg/state/test-metrics.json.
|
|
20
|
+
"""
|
|
21
|
+
import json, sys, os, re
|
|
22
|
+
from collections import Counter
|
|
23
|
+
from datetime import datetime, timezone
|
|
24
|
+
|
|
25
|
+
HOOKS_DIR = os.path.dirname(__file__)
|
|
26
|
+
if HOOKS_DIR not in sys.path:
|
|
27
|
+
sys.path.insert(0, HOOKS_DIR)
|
|
28
|
+
|
|
29
|
+
from _common import _resolve_project_dir, should_skip_stop_hooks
|
|
30
|
+
|
|
31
|
+
# --- Builtins excluded from parameterized-gap detection ---
|
|
32
|
+
_BUILTIN_FUNCS = frozenset({
|
|
33
|
+
"test", "it", "describe", "print", "len", "range", "str", "int",
|
|
34
|
+
"float", "list", "dict", "set", "tuple", "type", "isinstance",
|
|
35
|
+
"assert_equal", "assertEqual", "patch", "mock", "Mock", "MagicMock",
|
|
36
|
+
"expect", "require", "import", "open", "super", "getattr", "setattr",
|
|
37
|
+
"hasattr", "sorted", "enumerate", "zip", "map", "filter", "min", "max",
|
|
38
|
+
})
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def analyze_test_content(content, filename="test.py"):
|
|
42
|
+
"""
|
|
43
|
+
Analyze test file content for quality anti-patterns.
|
|
44
|
+
|
|
45
|
+
Returns list of issue strings, each prefixed with a category label:
|
|
46
|
+
FAKE:, BOILERPLATE:, HAPPY PATH ONLY:, EMPTY:, OVER-MOCKED:,
|
|
47
|
+
SKIP:, ASSERTION-FREE:, MOCK-HEAVY:, PARAMETERIZED:
|
|
48
|
+
"""
|
|
49
|
+
issues = []
|
|
50
|
+
|
|
51
|
+
# === FAKE TEST PATTERNS (v3, kept) ===
|
|
52
|
+
fake_patterns = [
|
|
53
|
+
(r"expect\s*\(\s*true\s*\)\s*\.to(Be|Equal)\s*\(\s*true\s*\)", "assert true === true"),
|
|
54
|
+
(r"expect\s*\(\s*1\s*\)\s*\.toBe\s*\(\s*1\s*\)", "assert 1 === 1"),
|
|
55
|
+
(r"assert\s+True\b", "assert True (Python)"),
|
|
56
|
+
(r"assert\s+1\s*==\s*1", "assert 1 == 1"),
|
|
57
|
+
]
|
|
58
|
+
for pat, label in fake_patterns:
|
|
59
|
+
if re.search(pat, content):
|
|
60
|
+
issues.append(f"FAKE: {label}")
|
|
61
|
+
|
|
62
|
+
# === BOILERPLATE-ONLY (v4, kept) ===
|
|
63
|
+
type_checks = len(re.findall(
|
|
64
|
+
r"(typeof\s+\w+|instanceof\s+\w+|toBeDefined|toBeInstanceOf|\.type\b)", content))
|
|
65
|
+
behavior_checks = len(re.findall(
|
|
66
|
+
r"(toEqual|toContain|toMatch|toThrow|rejects|resolves|toHaveBeenCalledWith|"
|
|
67
|
+
r"toHaveProperty|toHaveLength|toBeGreaterThan|toBeLessThan|assert.*==|"
|
|
68
|
+
r"assertEqual|assertIn|assertRaises|assert_called_with)", content))
|
|
69
|
+
|
|
70
|
+
if type_checks > 3 and behavior_checks == 0:
|
|
71
|
+
issues.append("BOILERPLATE: Only checks types/existence, never tests actual behavior")
|
|
72
|
+
|
|
73
|
+
# === HAPPY PATH ONLY (v4, kept) ===
|
|
74
|
+
has_error_tests = bool(re.search(
|
|
75
|
+
r"(toThrow|rejects|assertRaises|error|invalid|empty|null|undefined|"
|
|
76
|
+
r"edge.case|boundary|overflow|timeout|unauthorized|forbidden|not.found|"
|
|
77
|
+
r"bad.request|missing|malformed)", content, re.IGNORECASE))
|
|
78
|
+
test_count = len(re.findall(r"(test|it|describe)\s*\(", content))
|
|
79
|
+
|
|
80
|
+
if test_count >= 3 and not has_error_tests:
|
|
81
|
+
issues.append("HAPPY PATH ONLY: No error/edge case tests. "
|
|
82
|
+
"What happens with bad input? Unauthorized? Empty data?")
|
|
83
|
+
|
|
84
|
+
# === NO ASSERTIONS — JS style (v3, kept) ===
|
|
85
|
+
test_bodies = re.findall(
|
|
86
|
+
r"(?:test|it)\s*\([^)]+,\s*(?:async\s*)?\(\)\s*=>\s*\{([^}]*)\}",
|
|
87
|
+
content, re.DOTALL)
|
|
88
|
+
for body in test_bodies:
|
|
89
|
+
if body.strip() and not re.search(
|
|
90
|
+
r"(expect|assert|should|verify|check|toBe|toEqual|toThrow|toHave)",
|
|
91
|
+
body, re.IGNORECASE):
|
|
92
|
+
issues.append("EMPTY: Test body has no assertions")
|
|
93
|
+
break
|
|
94
|
+
|
|
95
|
+
# === OVER-MOCKED (v3, kept) ===
|
|
96
|
+
mock_count = len(re.findall(
|
|
97
|
+
r"(jest\.mock|mock\(|patch\(|MagicMock|stub\(|sinon\.stub)", content))
|
|
98
|
+
if mock_count > 5 and behavior_checks <= 1:
|
|
99
|
+
issues.append("OVER-MOCKED: Heavy mocking but barely tests real behavior")
|
|
100
|
+
|
|
101
|
+
# ============================================================
|
|
102
|
+
# v5 NEW PATTERNS
|
|
103
|
+
# ============================================================
|
|
104
|
+
|
|
105
|
+
# === SKIP / IGNORE TESTS (v5) ===
|
|
106
|
+
skip_patterns = [
|
|
107
|
+
(r"@pytest\.mark\.skip", "@pytest.mark.skip"),
|
|
108
|
+
(r"@pytest\.mark\.skipIf", "@pytest.mark.skipIf"),
|
|
109
|
+
(r"@unittest\.skip", "@unittest.skip"),
|
|
110
|
+
(r"\bit\.skip\s*\(", "it.skip()"),
|
|
111
|
+
(r"\bdescribe\.skip\s*\(", "describe.skip()"),
|
|
112
|
+
(r"\bxit\s*\(", "xit()"),
|
|
113
|
+
(r"\bxdescribe\s*\(", "xdescribe()"),
|
|
114
|
+
]
|
|
115
|
+
for pat, label in skip_patterns:
|
|
116
|
+
if re.search(pat, content):
|
|
117
|
+
issues.append(f"SKIP: {label} — skipped tests hide failures")
|
|
118
|
+
|
|
119
|
+
# === ASSERTION-FREE Python tests (v5) ===
|
|
120
|
+
_detect_assertion_free_python(content, issues)
|
|
121
|
+
|
|
122
|
+
# === EMPTY TEST BODY — Python (v5) ===
|
|
123
|
+
_detect_empty_python_test_body(content, issues)
|
|
124
|
+
|
|
125
|
+
# === MOCK-HEAVY (v5, ratio-based refinement) ===
|
|
126
|
+
# Different from OVER-MOCKED: catches moderate mock counts with poor assertion ratio
|
|
127
|
+
assertion_count = len(re.findall(
|
|
128
|
+
r"(\bassert\b|\bexpect\s*\(|\.should\b|\bverify\s*\()", content))
|
|
129
|
+
if mock_count >= 3 and mock_count <= 5 and assertion_count < mock_count / 2:
|
|
130
|
+
issues.append(
|
|
131
|
+
f"MOCK-HEAVY: {mock_count} mocks but only {assertion_count} assertions "
|
|
132
|
+
f"— tests should verify behavior, not just mock dependencies")
|
|
133
|
+
|
|
134
|
+
# === PARAMETERIZED TEST GAP (v5) ===
|
|
135
|
+
_detect_parameterized_gap(content, issues)
|
|
136
|
+
|
|
137
|
+
return issues
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def _extract_python_test_bodies(content):
|
|
141
|
+
"""
|
|
142
|
+
Extract Python test function names and their body text.
|
|
143
|
+
Returns list of (test_name, body_text) tuples.
|
|
144
|
+
"""
|
|
145
|
+
results = []
|
|
146
|
+
test_defs = list(re.finditer(r'def\s+(test_\w+)\s*\([^)]*\)\s*:', content))
|
|
147
|
+
|
|
148
|
+
for idx, m in enumerate(test_defs):
|
|
149
|
+
body_start = m.end()
|
|
150
|
+
# Body extends to next def at same indent level, or EOF
|
|
151
|
+
if idx + 1 < len(test_defs):
|
|
152
|
+
body_end = test_defs[idx + 1].start()
|
|
153
|
+
else:
|
|
154
|
+
body_end = len(content)
|
|
155
|
+
|
|
156
|
+
raw_body = content[body_start:body_end]
|
|
157
|
+
# Keep only indented lines (the actual function body)
|
|
158
|
+
body_lines = []
|
|
159
|
+
for line in raw_body.split('\n'):
|
|
160
|
+
stripped = line.strip()
|
|
161
|
+
if not stripped:
|
|
162
|
+
continue
|
|
163
|
+
if line and (line[0] == ' ' or line[0] == '\t'):
|
|
164
|
+
body_lines.append(stripped)
|
|
165
|
+
elif body_lines:
|
|
166
|
+
# Non-indented non-empty line after body started = end of function
|
|
167
|
+
break
|
|
168
|
+
|
|
169
|
+
results.append((m.group(1), body_lines))
|
|
170
|
+
|
|
171
|
+
return results
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
def _detect_assertion_free_python(content, issues):
|
|
175
|
+
"""Detect Python test functions with no assertion keywords."""
|
|
176
|
+
for test_name, body_lines in _extract_python_test_bodies(content):
|
|
177
|
+
if not body_lines:
|
|
178
|
+
continue # Empty bodies caught by _detect_empty_python_test_body
|
|
179
|
+
body_text = ' '.join(body_lines)
|
|
180
|
+
if not re.search(
|
|
181
|
+
r'(\bassert\b|\bexpect\s*\(|\.should\b|\bverify\s*\()',
|
|
182
|
+
body_text, re.IGNORECASE
|
|
183
|
+
):
|
|
184
|
+
# Skip bodies that are just pass/ellipsis (caught by empty body detector)
|
|
185
|
+
non_trivial = [l for l in body_lines
|
|
186
|
+
if l not in ('pass', '...') and not l.startswith('#')]
|
|
187
|
+
if non_trivial:
|
|
188
|
+
issues.append(
|
|
189
|
+
f"ASSERTION-FREE: {test_name} has no assertions")
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
def _detect_empty_python_test_body(content, issues):
|
|
193
|
+
"""Detect Python test functions with empty bodies (pass, ..., comment-only)."""
|
|
194
|
+
for test_name, body_lines in _extract_python_test_bodies(content):
|
|
195
|
+
non_trivial = [l for l in body_lines
|
|
196
|
+
if l not in ('pass', '...') and not l.startswith('#')]
|
|
197
|
+
if not non_trivial:
|
|
198
|
+
issues.append(f"EMPTY: {test_name} has empty body (only pass/ellipsis/comments)")
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
def _detect_parameterized_gap(content, issues):
|
|
202
|
+
"""
|
|
203
|
+
Detect functions called 3+ times with different literal arguments,
|
|
204
|
+
suggesting @pytest.mark.parametrize would be more appropriate.
|
|
205
|
+
"""
|
|
206
|
+
# Find function calls with literal arguments (numbers or strings)
|
|
207
|
+
calls = re.findall(
|
|
208
|
+
r'\b(\w+)\s*\(\s*(\d+(?:\.\d+)?|"[^"]*"|\'[^\']*\')\s*(?:\)|,)',
|
|
209
|
+
content)
|
|
210
|
+
|
|
211
|
+
# Group by function name, collect unique literal args
|
|
212
|
+
call_groups = {}
|
|
213
|
+
for func, arg in calls:
|
|
214
|
+
if func.lower() not in _BUILTIN_FUNCS:
|
|
215
|
+
call_groups.setdefault(func, set()).add(arg)
|
|
216
|
+
|
|
217
|
+
for func, args in call_groups.items():
|
|
218
|
+
if len(args) >= 3:
|
|
219
|
+
issues.append(
|
|
220
|
+
f"PARAMETERIZED: '{func}' called with {len(args)} different "
|
|
221
|
+
f"literal values — consider @pytest.mark.parametrize or "
|
|
222
|
+
f"@pytest.mark.parametrize")
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def persist_metrics(project_dir, analysis):
|
|
226
|
+
"""
|
|
227
|
+
Write test quality metrics to .omg/state/test-metrics.json.
|
|
228
|
+
|
|
229
|
+
Args:
|
|
230
|
+
project_dir: Project root directory.
|
|
231
|
+
analysis: Dict with keys: total_tests, fake_count, boilerplate_count,
|
|
232
|
+
edge_case_count, skip_count, assertion_free_count.
|
|
233
|
+
"""
|
|
234
|
+
try:
|
|
235
|
+
state_dir = os.path.join(project_dir, ".omg", "state")
|
|
236
|
+
os.makedirs(state_dir, exist_ok=True)
|
|
237
|
+
|
|
238
|
+
total = analysis.get("total_tests", 0)
|
|
239
|
+
issue_sum = (
|
|
240
|
+
analysis.get("fake_count", 0)
|
|
241
|
+
+ analysis.get("boilerplate_count", 0)
|
|
242
|
+
+ analysis.get("skip_count", 0)
|
|
243
|
+
+ analysis.get("assertion_free_count", 0)
|
|
244
|
+
)
|
|
245
|
+
|
|
246
|
+
# Quality score: 1.0 = perfect, 0.0 = all tests problematic
|
|
247
|
+
if total > 0:
|
|
248
|
+
quality_score = round(max(0.0, 1.0 - (issue_sum / total)), 3)
|
|
249
|
+
else:
|
|
250
|
+
quality_score = 1.0
|
|
251
|
+
|
|
252
|
+
metrics = {
|
|
253
|
+
"ts": datetime.now(timezone.utc).isoformat(),
|
|
254
|
+
"total_tests": analysis.get("total_tests", 0),
|
|
255
|
+
"fake_count": analysis.get("fake_count", 0),
|
|
256
|
+
"boilerplate_count": analysis.get("boilerplate_count", 0),
|
|
257
|
+
"edge_case_count": analysis.get("edge_case_count", 0),
|
|
258
|
+
"skip_count": analysis.get("skip_count", 0),
|
|
259
|
+
"assertion_free_count": analysis.get("assertion_free_count", 0),
|
|
260
|
+
"quality_score": quality_score,
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
metrics_path = os.path.join(state_dir, "test-metrics.json")
|
|
264
|
+
with open(metrics_path, "w", encoding="utf-8") as f:
|
|
265
|
+
json.dump(metrics, f, separators=(",", ":"))
|
|
266
|
+
except Exception:
|
|
267
|
+
pass # Crash isolation: never fail the hook
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
def check_test_quality(data, project_dir):
|
|
271
|
+
"""Core test-quality validation. Returns list of block-reason strings."""
|
|
272
|
+
import subprocess
|
|
273
|
+
|
|
274
|
+
# Find recently modified test files
|
|
275
|
+
test_files = []
|
|
276
|
+
try:
|
|
277
|
+
result = subprocess.run(
|
|
278
|
+
["git", "diff", "--name-only", "--diff-filter=AM"],
|
|
279
|
+
capture_output=True, text=True, timeout=10, cwd=project_dir
|
|
280
|
+
)
|
|
281
|
+
for f in result.stdout.strip().split("\n"):
|
|
282
|
+
if f and any(p in f.lower() for p in
|
|
283
|
+
[".test.", ".spec.", "_test.", "test_", "__tests__", ".tests."]):
|
|
284
|
+
full = os.path.join(project_dir, f)
|
|
285
|
+
if os.path.exists(full):
|
|
286
|
+
test_files.append(full)
|
|
287
|
+
except Exception:
|
|
288
|
+
pass
|
|
289
|
+
|
|
290
|
+
if not test_files:
|
|
291
|
+
return []
|
|
292
|
+
|
|
293
|
+
warnings = []
|
|
294
|
+
# Aggregate metrics across all files
|
|
295
|
+
agg = {
|
|
296
|
+
"total_tests": 0, "fake_count": 0, "boilerplate_count": 0,
|
|
297
|
+
"edge_case_count": 0, "skip_count": 0, "assertion_free_count": 0,
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
for tf in test_files:
|
|
301
|
+
try:
|
|
302
|
+
with open(tf, "r", encoding="utf-8", errors="ignore") as f:
|
|
303
|
+
content = f.read()
|
|
304
|
+
except Exception:
|
|
305
|
+
continue
|
|
306
|
+
|
|
307
|
+
filename = os.path.basename(tf)
|
|
308
|
+
issues = analyze_test_content(content, filename)
|
|
309
|
+
|
|
310
|
+
# Count tests in this file
|
|
311
|
+
py_tests = len(re.findall(r'def\s+test_\w+\s*\(', content))
|
|
312
|
+
js_tests = len(re.findall(r'(test|it)\s*\(', content))
|
|
313
|
+
agg["total_tests"] += py_tests + js_tests
|
|
314
|
+
|
|
315
|
+
# Count issue categories
|
|
316
|
+
for issue in issues:
|
|
317
|
+
if issue.startswith("FAKE:"):
|
|
318
|
+
agg["fake_count"] += 1
|
|
319
|
+
elif issue.startswith("BOILERPLATE:"):
|
|
320
|
+
agg["boilerplate_count"] += 1
|
|
321
|
+
elif "HAPPY PATH" in issue:
|
|
322
|
+
agg["edge_case_count"] += 1
|
|
323
|
+
elif issue.startswith("SKIP:"):
|
|
324
|
+
agg["skip_count"] += 1
|
|
325
|
+
elif issue.startswith("ASSERTION-FREE:"):
|
|
326
|
+
agg["assertion_free_count"] += 1
|
|
327
|
+
|
|
328
|
+
if issues:
|
|
329
|
+
warnings.append(f"{filename}: " + "; ".join(issues))
|
|
330
|
+
|
|
331
|
+
# Persist metrics
|
|
332
|
+
try:
|
|
333
|
+
persist_metrics(project_dir, agg)
|
|
334
|
+
except Exception:
|
|
335
|
+
pass
|
|
336
|
+
|
|
337
|
+
if warnings:
|
|
338
|
+
msg = "TEST QUALITY ISSUES:\n" + "\n".join(f" {w}" for w in warnings)
|
|
339
|
+
msg += ("\n\nTests should verify what USERS need, not just that code exists.\n"
|
|
340
|
+
"Ask: 'What does the user expect to happen? What could go wrong?'\n"
|
|
341
|
+
"Write tests for those scenarios.")
|
|
342
|
+
return [msg]
|
|
343
|
+
|
|
344
|
+
return []
|
|
345
|
+
|
|
346
|
+
|
|
347
|
+
# Standalone execution (backward compat: invoked directly by hook runner)
|
|
348
|
+
if __name__ == "__main__":
|
|
349
|
+
try:
|
|
350
|
+
data = json.load(sys.stdin)
|
|
351
|
+
except (json.JSONDecodeError, EOFError):
|
|
352
|
+
sys.exit(0)
|
|
353
|
+
|
|
354
|
+
if should_skip_stop_hooks(data):
|
|
355
|
+
sys.exit(0)
|
|
356
|
+
|
|
357
|
+
project_dir = _resolve_project_dir()
|
|
358
|
+
blocks = check_test_quality(data, project_dir)
|
|
359
|
+
if blocks:
|
|
360
|
+
json.dump({"decision": "block", "reason": blocks[0]}, sys.stdout)
|
|
361
|
+
sys.exit(0)
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
PostToolUse Hook: Test Generation Suggestion
|
|
4
|
+
Suggests test generation when source files are modified without corresponding tests.
|
|
5
|
+
Feature-gated under TEST_GENERATION flag.
|
|
6
|
+
"""
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import importlib.util
|
|
10
|
+
import json
|
|
11
|
+
import os
|
|
12
|
+
import re
|
|
13
|
+
import sys
|
|
14
|
+
|
|
15
|
+
HOOKS_DIR = os.path.dirname(os.path.abspath(__file__))
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _load_common():
|
|
19
|
+
path = os.path.join(HOOKS_DIR, "_common.py")
|
|
20
|
+
spec = importlib.util.spec_from_file_location("_common", path)
|
|
21
|
+
if spec is None or spec.loader is None:
|
|
22
|
+
raise RuntimeError("Unable to load _common.py")
|
|
23
|
+
module = importlib.util.module_from_spec(spec)
|
|
24
|
+
spec.loader.exec_module(module)
|
|
25
|
+
return module
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
_common = _load_common()
|
|
29
|
+
setup_crash_handler = _common.setup_crash_handler
|
|
30
|
+
json_input = _common.json_input
|
|
31
|
+
get_feature_flag = _common.get_feature_flag
|
|
32
|
+
get_project_dir = _common.get_project_dir
|
|
33
|
+
|
|
34
|
+
# Tool names that modify files
|
|
35
|
+
WRITE_TOOLS = frozenset({"Write", "Edit", "MultiEdit"})
|
|
36
|
+
|
|
37
|
+
# Patterns indicating a file is a test file
|
|
38
|
+
TEST_FILE_PATTERNS = [
|
|
39
|
+
r"(?:^|/)test_", # test_ prefix (Python convention)
|
|
40
|
+
r"_test\.", # _test suffix before extension
|
|
41
|
+
r"\.spec\.", # .spec. (JS/TS convention)
|
|
42
|
+
r"\.test\.", # .test. (JS/TS convention)
|
|
43
|
+
r"(?:^|/)tests/", # inside tests/ directory
|
|
44
|
+
r"(?:^|/)test/", # inside test/ directory
|
|
45
|
+
r"(?:^|/)__tests__/", # inside __tests__/ directory
|
|
46
|
+
]
|
|
47
|
+
|
|
48
|
+
_TEST_RE = re.compile("|".join(TEST_FILE_PATTERNS))
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def _is_test_file(file_path: str) -> bool:
|
|
52
|
+
"""Return True if the file path looks like a test file."""
|
|
53
|
+
normalized = file_path.replace("\\", "/")
|
|
54
|
+
return bool(_TEST_RE.search(normalized))
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def _find_corresponding_test(file_path: str, project_dir: str) -> bool:
|
|
58
|
+
"""Return True if a corresponding test file exists for the given source file."""
|
|
59
|
+
basename = os.path.basename(file_path)
|
|
60
|
+
name, ext = os.path.splitext(basename)
|
|
61
|
+
dir_part = os.path.dirname(file_path)
|
|
62
|
+
|
|
63
|
+
# Python: tests/test_{basename}
|
|
64
|
+
if ext == ".py":
|
|
65
|
+
candidates = [
|
|
66
|
+
os.path.join(project_dir, "tests", f"test_{basename}"),
|
|
67
|
+
os.path.join(project_dir, dir_part, "tests", f"test_{basename}"),
|
|
68
|
+
]
|
|
69
|
+
# JS/TS: same dir + .test.{ext} or .spec.{ext}
|
|
70
|
+
elif ext in (".js", ".ts", ".jsx", ".tsx", ".mjs"):
|
|
71
|
+
candidates = [
|
|
72
|
+
os.path.join(project_dir, dir_part, f"{name}.test{ext}"),
|
|
73
|
+
os.path.join(project_dir, dir_part, f"{name}.spec{ext}"),
|
|
74
|
+
]
|
|
75
|
+
else:
|
|
76
|
+
# Generic: check tests/test_{name}{ext}
|
|
77
|
+
candidates = [
|
|
78
|
+
os.path.join(project_dir, "tests", f"test_{basename}"),
|
|
79
|
+
]
|
|
80
|
+
|
|
81
|
+
return any(os.path.exists(c) for c in candidates)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def main() -> None:
|
|
85
|
+
setup_crash_handler("test-generator-hook", fail_closed=False)
|
|
86
|
+
|
|
87
|
+
payload = json_input()
|
|
88
|
+
|
|
89
|
+
# Gate: feature flag
|
|
90
|
+
if not get_feature_flag("TEST_GENERATION", default=False):
|
|
91
|
+
sys.exit(0)
|
|
92
|
+
|
|
93
|
+
# Gate: only file-modifying tools
|
|
94
|
+
tool_name = payload.get("tool_name", "")
|
|
95
|
+
if tool_name not in WRITE_TOOLS:
|
|
96
|
+
sys.exit(0)
|
|
97
|
+
|
|
98
|
+
# Extract file path (file_path or path)
|
|
99
|
+
tool_input = payload.get("tool_input", {})
|
|
100
|
+
file_path = tool_input.get("file_path") or tool_input.get("path", "")
|
|
101
|
+
if not file_path:
|
|
102
|
+
sys.exit(0)
|
|
103
|
+
|
|
104
|
+
# Gate: skip test files
|
|
105
|
+
if _is_test_file(file_path):
|
|
106
|
+
sys.exit(0)
|
|
107
|
+
|
|
108
|
+
# Check for corresponding test file
|
|
109
|
+
project_dir = get_project_dir()
|
|
110
|
+
if _find_corresponding_test(file_path, project_dir):
|
|
111
|
+
sys.exit(0)
|
|
112
|
+
|
|
113
|
+
# Inject suggestion
|
|
114
|
+
suggestion = (
|
|
115
|
+
f"No test file found for {file_path}. "
|
|
116
|
+
"Consider running /OMG:testgen to generate tests."
|
|
117
|
+
)
|
|
118
|
+
json.dump({"additionalContext": suggestion}, sys.stdout)
|
|
119
|
+
sys.exit(0)
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
if __name__ == "__main__":
|
|
123
|
+
main()
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
PostToolUse Hook: Todo State Tracker (v1)
|
|
4
|
+
|
|
5
|
+
Parses todo lists from agent responses and tracks completion status.
|
|
6
|
+
Persists state to .omg/state/todo_progress.json for cross-turn tracking.
|
|
7
|
+
|
|
8
|
+
Feature flag: OMG_TODO_TRACKING_ENABLED (default: False)
|
|
9
|
+
"""
|
|
10
|
+
import json
|
|
11
|
+
import sys
|
|
12
|
+
import os
|
|
13
|
+
import re
|
|
14
|
+
from datetime import datetime, timezone
|
|
15
|
+
|
|
16
|
+
HOOKS_DIR = os.path.dirname(__file__)
|
|
17
|
+
if HOOKS_DIR not in sys.path:
|
|
18
|
+
sys.path.insert(0, HOOKS_DIR)
|
|
19
|
+
|
|
20
|
+
from _common import (
|
|
21
|
+
setup_crash_handler,
|
|
22
|
+
json_input,
|
|
23
|
+
get_project_dir,
|
|
24
|
+
get_feature_flag,
|
|
25
|
+
atomic_json_write,
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
setup_crash_handler("todo-state-tracker", fail_closed=False)
|
|
29
|
+
|
|
30
|
+
# Feature flag check
|
|
31
|
+
if not get_feature_flag("TODO_TRACKING", default=False):
|
|
32
|
+
sys.exit(0)
|
|
33
|
+
|
|
34
|
+
data = json_input()
|
|
35
|
+
|
|
36
|
+
# Extract response text from various possible fields
|
|
37
|
+
response_text = ""
|
|
38
|
+
if isinstance(data, dict):
|
|
39
|
+
# PostToolUse hook may have response in different fields
|
|
40
|
+
response_text = (
|
|
41
|
+
data.get("response", "")
|
|
42
|
+
or data.get("tool_response", "")
|
|
43
|
+
or data.get("message", "")
|
|
44
|
+
or ""
|
|
45
|
+
)
|
|
46
|
+
if isinstance(response_text, dict):
|
|
47
|
+
response_text = response_text.get("content", "")
|
|
48
|
+
|
|
49
|
+
if not isinstance(response_text, str):
|
|
50
|
+
response_text = str(response_text) if response_text else ""
|
|
51
|
+
|
|
52
|
+
# Parse todo items: regex pattern for markdown todo format
|
|
53
|
+
# Matches: - [ ] task text or - [x] task text
|
|
54
|
+
TODO_PATTERN = r'- \[([ x])\] (.+)'
|
|
55
|
+
matches = re.findall(TODO_PATTERN, response_text, re.IGNORECASE)
|
|
56
|
+
|
|
57
|
+
if not matches:
|
|
58
|
+
# No todos found, exit cleanly
|
|
59
|
+
sys.exit(0)
|
|
60
|
+
|
|
61
|
+
# Separate incomplete and complete items
|
|
62
|
+
incomplete_items = []
|
|
63
|
+
complete_items = []
|
|
64
|
+
|
|
65
|
+
for status, task_text in matches:
|
|
66
|
+
task_text = task_text.strip()
|
|
67
|
+
if status.lower() == 'x':
|
|
68
|
+
complete_items.append(task_text)
|
|
69
|
+
else:
|
|
70
|
+
incomplete_items.append(task_text)
|
|
71
|
+
|
|
72
|
+
# Load existing state
|
|
73
|
+
project_dir = get_project_dir()
|
|
74
|
+
state_path = os.path.join(project_dir, ".omg", "state", "todo_progress.json")
|
|
75
|
+
|
|
76
|
+
existing_state = {}
|
|
77
|
+
if os.path.exists(state_path):
|
|
78
|
+
try:
|
|
79
|
+
with open(state_path, "r", encoding="utf-8") as f:
|
|
80
|
+
existing_state = json.load(f)
|
|
81
|
+
except Exception:
|
|
82
|
+
existing_state = {}
|
|
83
|
+
|
|
84
|
+
# Ensure existing_state is a dict
|
|
85
|
+
if not isinstance(existing_state, dict):
|
|
86
|
+
existing_state = {}
|
|
87
|
+
|
|
88
|
+
# Cross-turn merge strategy:
|
|
89
|
+
# - Keep existing complete items (don't regress)
|
|
90
|
+
# - Add new complete items
|
|
91
|
+
# - Update incomplete items (replace with current turn's findings)
|
|
92
|
+
# - Preserve session_id if available
|
|
93
|
+
|
|
94
|
+
merged_complete = list(set(existing_state.get("complete", []) + complete_items))
|
|
95
|
+
merged_incomplete = incomplete_items # Replace with current turn's findings
|
|
96
|
+
|
|
97
|
+
# Build new state
|
|
98
|
+
new_state = {
|
|
99
|
+
"incomplete": merged_incomplete,
|
|
100
|
+
"complete": merged_complete,
|
|
101
|
+
"total": len(merged_incomplete) + len(merged_complete),
|
|
102
|
+
"last_updated": datetime.now(timezone.utc).isoformat(),
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
# Preserve session_id if available
|
|
106
|
+
if "session_id" in existing_state:
|
|
107
|
+
new_state["session_id"] = existing_state["session_id"]
|
|
108
|
+
elif "session_id" in data:
|
|
109
|
+
new_state["session_id"] = data.get("session_id")
|
|
110
|
+
|
|
111
|
+
# Atomically write state
|
|
112
|
+
atomic_json_write(state_path, new_state)
|
|
113
|
+
|
|
114
|
+
sys.exit(0)
|