@hustle-together/api-dev-tools 3.10.1 → 3.11.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/api-dev-state.json +159 -0
- package/.claude/commands/README.md +185 -0
- package/.claude/commands/add-command.md +209 -0
- package/.claude/commands/api-create.md +499 -0
- package/.claude/commands/api-env.md +50 -0
- package/.claude/commands/api-interview.md +331 -0
- package/.claude/commands/api-research.md +331 -0
- package/.claude/commands/api-status.md +259 -0
- package/.claude/commands/api-verify.md +231 -0
- package/.claude/commands/beepboop.md +97 -0
- package/.claude/commands/busycommit.md +112 -0
- package/.claude/commands/commit.md +83 -0
- package/.claude/commands/cycle.md +142 -0
- package/.claude/commands/gap.md +86 -0
- package/.claude/commands/green.md +142 -0
- package/.claude/commands/issue.md +192 -0
- package/.claude/commands/plan.md +168 -0
- package/.claude/commands/pr.md +122 -0
- package/.claude/commands/red.md +142 -0
- package/.claude/commands/refactor.md +142 -0
- package/.claude/commands/spike.md +142 -0
- package/.claude/commands/summarize.md +94 -0
- package/.claude/commands/tdd.md +144 -0
- package/.claude/commands/worktree-add.md +315 -0
- package/.claude/commands/worktree-cleanup.md +281 -0
- package/.claude/hooks/api-workflow-check.py +227 -0
- package/.claude/hooks/enforce-deep-research.py +185 -0
- package/.claude/hooks/enforce-disambiguation.py +155 -0
- package/.claude/hooks/enforce-documentation.py +192 -0
- package/.claude/hooks/enforce-environment.py +253 -0
- package/.claude/hooks/enforce-external-research.py +328 -0
- package/.claude/hooks/enforce-interview.py +421 -0
- package/.claude/hooks/enforce-refactor.py +189 -0
- package/.claude/hooks/enforce-research.py +159 -0
- package/.claude/hooks/enforce-schema.py +186 -0
- package/.claude/hooks/enforce-scope.py +160 -0
- package/.claude/hooks/enforce-tdd-red.py +250 -0
- package/.claude/hooks/enforce-verify.py +186 -0
- package/.claude/hooks/periodic-reground.py +154 -0
- package/.claude/hooks/session-startup.py +151 -0
- package/.claude/hooks/track-tool-use.py +626 -0
- package/.claude/hooks/verify-after-green.py +282 -0
- package/.claude/hooks/verify-implementation.py +225 -0
- package/.claude/research/index.json +6 -0
- package/.claude/settings.json +93 -0
- package/.claude/settings.local.json +11 -0
- package/.claude-plugin/marketplace.json +112 -0
- package/.skills/README.md +291 -0
- package/.skills/_shared/convert-commands.py +192 -0
- package/.skills/_shared/hooks/api-workflow-check.py +227 -0
- package/.skills/_shared/hooks/enforce-deep-research.py +185 -0
- package/.skills/_shared/hooks/enforce-disambiguation.py +155 -0
- package/.skills/_shared/hooks/enforce-documentation.py +192 -0
- package/.skills/_shared/hooks/enforce-environment.py +253 -0
- package/.skills/_shared/hooks/enforce-external-research.py +328 -0
- package/.skills/_shared/hooks/enforce-interview.py +421 -0
- package/.skills/_shared/hooks/enforce-refactor.py +189 -0
- package/.skills/_shared/hooks/enforce-research.py +159 -0
- package/.skills/_shared/hooks/enforce-schema.py +186 -0
- package/.skills/_shared/hooks/enforce-scope.py +160 -0
- package/.skills/_shared/hooks/enforce-tdd-red.py +250 -0
- package/.skills/_shared/hooks/enforce-verify.py +186 -0
- package/.skills/_shared/hooks/periodic-reground.py +154 -0
- package/.skills/_shared/hooks/session-startup.py +151 -0
- package/.skills/_shared/hooks/track-tool-use.py +626 -0
- package/.skills/_shared/hooks/verify-after-green.py +282 -0
- package/.skills/_shared/hooks/verify-implementation.py +225 -0
- package/.skills/_shared/install.sh +114 -0
- package/.skills/_shared/settings.json +93 -0
- package/.skills/add-command/SKILL.md +222 -0
- package/.skills/api-create/SKILL.md +512 -0
- package/.skills/api-env/SKILL.md +63 -0
- package/.skills/api-interview/SKILL.md +344 -0
- package/.skills/api-research/SKILL.md +344 -0
- package/.skills/api-status/SKILL.md +272 -0
- package/.skills/api-verify/SKILL.md +244 -0
- package/.skills/beepboop/SKILL.md +110 -0
- package/.skills/busycommit/SKILL.md +125 -0
- package/.skills/commit/SKILL.md +96 -0
- package/.skills/cycle/SKILL.md +155 -0
- package/.skills/gap/SKILL.md +99 -0
- package/.skills/green/SKILL.md +155 -0
- package/.skills/issue/SKILL.md +205 -0
- package/.skills/plan/SKILL.md +181 -0
- package/.skills/pr/SKILL.md +135 -0
- package/.skills/red/SKILL.md +155 -0
- package/.skills/refactor/SKILL.md +155 -0
- package/.skills/spike/SKILL.md +155 -0
- package/.skills/summarize/SKILL.md +107 -0
- package/.skills/tdd/SKILL.md +157 -0
- package/.skills/update-todos/SKILL.md +228 -0
- package/.skills/worktree-add/SKILL.md +328 -0
- package/.skills/worktree-cleanup/SKILL.md +294 -0
- package/CHANGELOG.md +97 -0
- package/README.md +58 -17
- package/package.json +22 -11
|
@@ -0,0 +1,250 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Hook: PreToolUse for Write/Edit
|
|
4
|
+
Purpose: Block writing implementation if test matrix not approved WITH USER CONFIRMATION
|
|
5
|
+
|
|
6
|
+
Phase 8 (TDD Red) requires:
|
|
7
|
+
1. Propose test matrix based on interview + schema
|
|
8
|
+
2. SHOW test plan to user (scenarios, edge cases, coverage)
|
|
9
|
+
3. USE AskUserQuestion: "Test plan looks good? [Y/n]"
|
|
10
|
+
4. Loop back if user wants more tests
|
|
11
|
+
5. Only allow route.ts after user approves test matrix
|
|
12
|
+
|
|
13
|
+
Returns:
|
|
14
|
+
- {"permissionDecision": "allow"} - Let the tool run
|
|
15
|
+
- {"permissionDecision": "deny", "reason": "..."} - Block with explanation
|
|
16
|
+
"""
|
|
17
|
+
import json
|
|
18
|
+
import sys
|
|
19
|
+
from pathlib import Path
|
|
20
|
+
|
|
21
|
+
# State file is in .claude/ directory (sibling to hooks/)
|
|
22
|
+
STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def find_test_file(route_path: str) -> tuple[bool, str]:
|
|
26
|
+
"""Check if a test file exists for the given route file."""
|
|
27
|
+
route_file = Path(route_path)
|
|
28
|
+
|
|
29
|
+
# Common test file patterns
|
|
30
|
+
# route.ts -> route.test.ts, __tests__/route.test.ts, route.spec.ts
|
|
31
|
+
possible_tests = [
|
|
32
|
+
route_file.with_suffix(".test.ts"),
|
|
33
|
+
route_file.with_suffix(".test.tsx"),
|
|
34
|
+
route_file.with_suffix(".spec.ts"),
|
|
35
|
+
route_file.parent / "__tests__" / f"{route_file.stem}.test.ts",
|
|
36
|
+
route_file.parent / "__tests__" / f"{route_file.stem}.test.tsx",
|
|
37
|
+
route_file.parent.parent / "__tests__" / f"{route_file.parent.name}.test.ts",
|
|
38
|
+
]
|
|
39
|
+
|
|
40
|
+
for test_path in possible_tests:
|
|
41
|
+
if test_path.exists():
|
|
42
|
+
return True, str(test_path)
|
|
43
|
+
|
|
44
|
+
return False, str(possible_tests[0]) # Return expected path
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def main():
|
|
48
|
+
# Read hook input from stdin
|
|
49
|
+
try:
|
|
50
|
+
input_data = json.load(sys.stdin)
|
|
51
|
+
except json.JSONDecodeError:
|
|
52
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
53
|
+
sys.exit(0)
|
|
54
|
+
|
|
55
|
+
tool_input = input_data.get("tool_input", {})
|
|
56
|
+
file_path = tool_input.get("file_path", "")
|
|
57
|
+
|
|
58
|
+
# Only enforce for route.ts files in /api/ directories
|
|
59
|
+
if not file_path.endswith("route.ts") or "/api/" not in file_path:
|
|
60
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
61
|
+
sys.exit(0)
|
|
62
|
+
|
|
63
|
+
# Allow if this IS a test file (shouldn't match but safety check)
|
|
64
|
+
if ".test." in file_path or "/__tests__/" in file_path or ".spec." in file_path:
|
|
65
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
66
|
+
sys.exit(0)
|
|
67
|
+
|
|
68
|
+
# Check if state file exists
|
|
69
|
+
if not STATE_FILE.exists():
|
|
70
|
+
# Even without state, enforce TDD
|
|
71
|
+
test_exists, expected_path = find_test_file(file_path)
|
|
72
|
+
if not test_exists:
|
|
73
|
+
print(json.dumps({
|
|
74
|
+
"permissionDecision": "deny",
|
|
75
|
+
"reason": f"""❌ TDD VIOLATION: No test file found!
|
|
76
|
+
|
|
77
|
+
You're trying to write: {file_path}
|
|
78
|
+
|
|
79
|
+
But the test file doesn't exist: {expected_path}
|
|
80
|
+
|
|
81
|
+
═══════════════════════════════════════════════════════════
|
|
82
|
+
⚠️ WRITE TESTS FIRST (TDD Red Phase)
|
|
83
|
+
═══════════════════════════════════════════════════════════
|
|
84
|
+
|
|
85
|
+
TDD requires:
|
|
86
|
+
1. Write a FAILING test first
|
|
87
|
+
2. THEN write implementation to make it pass
|
|
88
|
+
|
|
89
|
+
Create the test file first:
|
|
90
|
+
{expected_path}
|
|
91
|
+
|
|
92
|
+
Example test structure:
|
|
93
|
+
import {{ describe, it, expect }} from 'vitest';
|
|
94
|
+
|
|
95
|
+
describe('POST /api/...', () => {{
|
|
96
|
+
it('should return 200 with valid input', async () => {{
|
|
97
|
+
// Test implementation
|
|
98
|
+
}});
|
|
99
|
+
|
|
100
|
+
it('should return 400 with invalid input', async () => {{
|
|
101
|
+
// Test validation
|
|
102
|
+
}});
|
|
103
|
+
}});"""
|
|
104
|
+
}))
|
|
105
|
+
sys.exit(0)
|
|
106
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
107
|
+
sys.exit(0)
|
|
108
|
+
|
|
109
|
+
# Load state
|
|
110
|
+
try:
|
|
111
|
+
state = json.loads(STATE_FILE.read_text())
|
|
112
|
+
except json.JSONDecodeError:
|
|
113
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
114
|
+
sys.exit(0)
|
|
115
|
+
|
|
116
|
+
phases = state.get("phases", {})
|
|
117
|
+
tdd_red = phases.get("tdd_red", {})
|
|
118
|
+
tdd_red_status = tdd_red.get("status", "not_started")
|
|
119
|
+
test_count = tdd_red.get("test_count", 0)
|
|
120
|
+
|
|
121
|
+
# Get user checkpoint fields
|
|
122
|
+
user_question_asked = tdd_red.get("user_question_asked", False)
|
|
123
|
+
user_approved = tdd_red.get("user_approved", False)
|
|
124
|
+
matrix_shown = tdd_red.get("matrix_shown", False)
|
|
125
|
+
test_scenarios = tdd_red.get("test_scenarios", [])
|
|
126
|
+
phase_exit_confirmed = tdd_red.get("phase_exit_confirmed", False)
|
|
127
|
+
|
|
128
|
+
# Check if TDD Red phase is complete
|
|
129
|
+
if tdd_red_status != "complete" or not phase_exit_confirmed:
|
|
130
|
+
test_exists, expected_path = find_test_file(file_path)
|
|
131
|
+
|
|
132
|
+
# Check what's missing for user checkpoint
|
|
133
|
+
missing = []
|
|
134
|
+
if not test_exists:
|
|
135
|
+
missing.append("Test file not created yet")
|
|
136
|
+
if not matrix_shown:
|
|
137
|
+
missing.append("Test matrix not shown to user")
|
|
138
|
+
if not user_question_asked:
|
|
139
|
+
missing.append("User approval question (AskUserQuestion not used)")
|
|
140
|
+
if not user_approved:
|
|
141
|
+
missing.append("User hasn't approved the test plan")
|
|
142
|
+
if not phase_exit_confirmed:
|
|
143
|
+
missing.append("Phase exit confirmation (user must explicitly approve to proceed)")
|
|
144
|
+
|
|
145
|
+
print(json.dumps({
|
|
146
|
+
"permissionDecision": "deny",
|
|
147
|
+
"reason": f"""❌ BLOCKED: TDD Red phase (Phase 8) not complete.
|
|
148
|
+
|
|
149
|
+
Current status: {tdd_red_status}
|
|
150
|
+
Test count: {test_count}
|
|
151
|
+
Test file exists: {test_exists}
|
|
152
|
+
Matrix shown: {matrix_shown}
|
|
153
|
+
User question asked: {user_question_asked}
|
|
154
|
+
User approved: {user_approved}
|
|
155
|
+
Phase exit confirmed: {phase_exit_confirmed}
|
|
156
|
+
Scenarios: {len(test_scenarios)}
|
|
157
|
+
|
|
158
|
+
MISSING:
|
|
159
|
+
{chr(10).join(f" • {m}" for m in missing)}
|
|
160
|
+
|
|
161
|
+
═══════════════════════════════════════════════════════════
|
|
162
|
+
⚠️ GET USER APPROVAL FOR TEST MATRIX
|
|
163
|
+
═══════════════════════════════════════════════════════════
|
|
164
|
+
|
|
165
|
+
REQUIRED STEPS:
|
|
166
|
+
|
|
167
|
+
1. PROPOSE test matrix based on interview + schema:
|
|
168
|
+
┌───────────────────────────────────────────────────────┐
|
|
169
|
+
│ TEST MATRIX │
|
|
170
|
+
│ │
|
|
171
|
+
│ Based on your interview, I'll test: │
|
|
172
|
+
│ │
|
|
173
|
+
│ ✅ Success Scenarios: │
|
|
174
|
+
│ • GET with valid domain → 200 + brand data │
|
|
175
|
+
│ • POST with full payload → 200 + created │
|
|
176
|
+
│ │
|
|
177
|
+
│ ✅ Error Scenarios (your choice: return objects): │
|
|
178
|
+
│ • Invalid domain → 400 + error object │
|
|
179
|
+
│ • Missing API key → 401 + error object │
|
|
180
|
+
│ • Not found → 404 + error object │
|
|
181
|
+
│ │
|
|
182
|
+
│ ✅ Edge Cases: │
|
|
183
|
+
│ • Rate limit exceeded → 429 + retry-after │
|
|
184
|
+
│ • Cache hit → 200 + cached: true │
|
|
185
|
+
│ • Empty response → 200 + empty data │
|
|
186
|
+
│ │
|
|
187
|
+
│ Total: 8 test scenarios │
|
|
188
|
+
│ │
|
|
189
|
+
│ Test plan looks good? [Y] │
|
|
190
|
+
│ Add more tests? [n] ____ │
|
|
191
|
+
└───────────────────────────────────────────────────────┘
|
|
192
|
+
|
|
193
|
+
2. USE AskUserQuestion:
|
|
194
|
+
question: "This test plan cover your requirements?"
|
|
195
|
+
options: [
|
|
196
|
+
{{"value": "approve", "label": "Yes, write these tests"}},
|
|
197
|
+
{{"value": "add", "label": "Add more - I also need [scenario]"}},
|
|
198
|
+
{{"value": "modify", "label": "Change a scenario - [which one]"}}
|
|
199
|
+
]
|
|
200
|
+
|
|
201
|
+
3. If user says "add" or "modify":
|
|
202
|
+
• Update test_scenarios list
|
|
203
|
+
• LOOP BACK and show updated matrix
|
|
204
|
+
|
|
205
|
+
4. If user says "approve":
|
|
206
|
+
• Create test file: {expected_path}
|
|
207
|
+
• Write all approved test scenarios
|
|
208
|
+
• Run tests to confirm they FAIL (red)
|
|
209
|
+
• Set tdd_red.user_approved = true
|
|
210
|
+
• Set tdd_red.user_question_asked = true
|
|
211
|
+
• Set tdd_red.matrix_shown = true
|
|
212
|
+
• Set tdd_red.test_count = N
|
|
213
|
+
• Set tdd_red.status = "complete"
|
|
214
|
+
|
|
215
|
+
Based on interview decisions:
|
|
216
|
+
{_format_interview_hints(phases.get("interview", {}))}
|
|
217
|
+
|
|
218
|
+
WHY: User approves what gets tested BEFORE implementation."""
|
|
219
|
+
}))
|
|
220
|
+
sys.exit(0)
|
|
221
|
+
|
|
222
|
+
# TDD Red complete - allow implementation
|
|
223
|
+
print(json.dumps({
|
|
224
|
+
"permissionDecision": "allow",
|
|
225
|
+
"message": f"""✅ TDD Red phase complete.
|
|
226
|
+
{test_count} tests written and failing.
|
|
227
|
+
User approved {len(test_scenarios)} test scenarios.
|
|
228
|
+
Proceeding to Green phase - make them pass!"""
|
|
229
|
+
}))
|
|
230
|
+
sys.exit(0)
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
def _format_interview_hints(interview: dict) -> str:
|
|
234
|
+
"""Format interview decisions as test hints."""
|
|
235
|
+
decisions = interview.get("decisions", {})
|
|
236
|
+
if not decisions:
|
|
237
|
+
return " (no interview decisions recorded)"
|
|
238
|
+
|
|
239
|
+
hints = []
|
|
240
|
+
for key, data in list(decisions.items())[:5]:
|
|
241
|
+
value = data.get("value", data.get("response", ""))
|
|
242
|
+
if value:
|
|
243
|
+
short_value = str(value)[:50] + "..." if len(str(value)) > 50 else str(value)
|
|
244
|
+
hints.append(f" • {key}: {short_value}")
|
|
245
|
+
|
|
246
|
+
return "\n".join(hints) if hints else " (no interview decisions recorded)"
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
if __name__ == "__main__":
|
|
250
|
+
main()
|
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Hook: PreToolUse for Write/Edit
|
|
4
|
+
Purpose: Block refactoring until verification complete WITH USER GAP DECISION
|
|
5
|
+
|
|
6
|
+
Phase 10 (Verify) requires:
|
|
7
|
+
1. Re-read original documentation (after tests pass)
|
|
8
|
+
2. Compare implementation to docs - find gaps
|
|
9
|
+
3. SHOW gap analysis to user
|
|
10
|
+
4. USE AskUserQuestion: "Fix gaps? [Y] / Skip? [n]"
|
|
11
|
+
5. Loop back to Phase 8 if user wants fixes
|
|
12
|
+
6. Only proceed to refactor when user decides
|
|
13
|
+
|
|
14
|
+
Returns:
|
|
15
|
+
- {"permissionDecision": "allow"} - Let the tool run
|
|
16
|
+
- {"permissionDecision": "deny", "reason": "..."} - Block with explanation
|
|
17
|
+
"""
|
|
18
|
+
import json
|
|
19
|
+
import sys
|
|
20
|
+
from pathlib import Path
|
|
21
|
+
|
|
22
|
+
STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def main():
|
|
26
|
+
try:
|
|
27
|
+
input_data = json.load(sys.stdin)
|
|
28
|
+
except json.JSONDecodeError:
|
|
29
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
30
|
+
sys.exit(0)
|
|
31
|
+
|
|
32
|
+
tool_input = input_data.get("tool_input", {})
|
|
33
|
+
file_path = tool_input.get("file_path", "")
|
|
34
|
+
|
|
35
|
+
# Only enforce for API route files
|
|
36
|
+
is_api_file = "/api/" in file_path and file_path.endswith(".ts")
|
|
37
|
+
|
|
38
|
+
if not is_api_file:
|
|
39
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
40
|
+
sys.exit(0)
|
|
41
|
+
|
|
42
|
+
# Skip test files
|
|
43
|
+
if ".test." in file_path or "/__tests__/" in file_path or ".spec." in file_path:
|
|
44
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
45
|
+
sys.exit(0)
|
|
46
|
+
|
|
47
|
+
# Skip documentation/config files
|
|
48
|
+
if file_path.endswith(".md") or file_path.endswith(".json"):
|
|
49
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
50
|
+
sys.exit(0)
|
|
51
|
+
|
|
52
|
+
if not STATE_FILE.exists():
|
|
53
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
54
|
+
sys.exit(0)
|
|
55
|
+
|
|
56
|
+
try:
|
|
57
|
+
state = json.loads(STATE_FILE.read_text())
|
|
58
|
+
except json.JSONDecodeError:
|
|
59
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
60
|
+
sys.exit(0)
|
|
61
|
+
|
|
62
|
+
endpoint = state.get("endpoint", "unknown")
|
|
63
|
+
phases = state.get("phases", {})
|
|
64
|
+
tdd_green = phases.get("tdd_green", {})
|
|
65
|
+
verify = phases.get("verify", {})
|
|
66
|
+
|
|
67
|
+
# Only enforce after TDD Green is complete
|
|
68
|
+
if tdd_green.get("status") != "complete":
|
|
69
|
+
# Let earlier hooks handle this
|
|
70
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
71
|
+
sys.exit(0)
|
|
72
|
+
|
|
73
|
+
status = verify.get("status", "not_started")
|
|
74
|
+
|
|
75
|
+
if status != "complete":
|
|
76
|
+
user_question_asked = verify.get("user_question_asked", False)
|
|
77
|
+
user_decided = verify.get("user_decided", False)
|
|
78
|
+
gap_analysis_shown = verify.get("gap_analysis_shown", False)
|
|
79
|
+
re_research_done = verify.get("re_research_done", False)
|
|
80
|
+
gaps_found = verify.get("gaps_found", 0)
|
|
81
|
+
gaps_fixed = verify.get("gaps_fixed", 0)
|
|
82
|
+
gaps_skipped = verify.get("gaps_skipped", 0)
|
|
83
|
+
user_decision = verify.get("user_decision", None)
|
|
84
|
+
|
|
85
|
+
missing = []
|
|
86
|
+
if not re_research_done:
|
|
87
|
+
missing.append("Re-research original docs not done")
|
|
88
|
+
if not gap_analysis_shown:
|
|
89
|
+
missing.append("Gap analysis not shown to user")
|
|
90
|
+
if not user_question_asked:
|
|
91
|
+
missing.append("User gap decision question (AskUserQuestion not used)")
|
|
92
|
+
if not user_decided:
|
|
93
|
+
missing.append("User hasn't decided on gaps")
|
|
94
|
+
|
|
95
|
+
print(json.dumps({
|
|
96
|
+
"permissionDecision": "deny",
|
|
97
|
+
"reason": f"""❌ BLOCKED: Verification (Phase 10) not complete.
|
|
98
|
+
|
|
99
|
+
Status: {status}
|
|
100
|
+
Re-research done: {re_research_done}
|
|
101
|
+
Gap analysis shown: {gap_analysis_shown}
|
|
102
|
+
User question asked: {user_question_asked}
|
|
103
|
+
User decided: {user_decided}
|
|
104
|
+
User decision: {user_decision or "None yet"}
|
|
105
|
+
Gaps found: {gaps_found}
|
|
106
|
+
Gaps fixed: {gaps_fixed}
|
|
107
|
+
Gaps skipped: {gaps_skipped}
|
|
108
|
+
|
|
109
|
+
MISSING:
|
|
110
|
+
{chr(10).join(f" • {m}" for m in missing)}
|
|
111
|
+
|
|
112
|
+
═══════════════════════════════════════════════════════════
|
|
113
|
+
⚠️ GET USER DECISION ON IMPLEMENTATION GAPS
|
|
114
|
+
═══════════════════════════════════════════════════════════
|
|
115
|
+
|
|
116
|
+
REQUIRED STEPS:
|
|
117
|
+
|
|
118
|
+
1. Re-read the ORIGINAL API documentation:
|
|
119
|
+
• Use Context7 or WebSearch with SAME queries from Phase 3
|
|
120
|
+
• Compare EVERY documented feature to your implementation
|
|
121
|
+
• Don't rely on memory - actually re-read the docs
|
|
122
|
+
|
|
123
|
+
2. Create and SHOW gap analysis table:
|
|
124
|
+
┌───────────────────────────────────────────────────────┐
|
|
125
|
+
│ VERIFICATION RESULTS │
|
|
126
|
+
│ │
|
|
127
|
+
│ │ Feature │ In Docs │ Implemented │ Status │
|
|
128
|
+
│ ├─────────────────┼─────────┼─────────────┼──────────│
|
|
129
|
+
│ │ domain param │ Yes │ Yes │ ✓ Match │
|
|
130
|
+
│ │ format option │ Yes │ Yes │ ✓ Match │
|
|
131
|
+
│ │ include_fonts │ Yes │ No │ ❌ GAP │
|
|
132
|
+
│ │ webhook_url │ No │ Yes │ ⚠ Extra │
|
|
133
|
+
│ │
|
|
134
|
+
│ Found 1 gap in implementation. │
|
|
135
|
+
│ │
|
|
136
|
+
│ Fix the gap? [Y] - Loop back to add missing feature │
|
|
137
|
+
│ Skip? [n] - Document as intentional omission │
|
|
138
|
+
└───────────────────────────────────────────────────────┘
|
|
139
|
+
|
|
140
|
+
3. USE AskUserQuestion:
|
|
141
|
+
question: "I found {gaps_found} gap(s). How should I proceed?"
|
|
142
|
+
options: [
|
|
143
|
+
{{"value": "fix", "label": "Fix gaps - loop back to Red phase"}},
|
|
144
|
+
{{"value": "skip", "label": "Skip - these are intentional omissions"}},
|
|
145
|
+
{{"value": "partial", "label": "Fix some, skip others - [specify]"}}
|
|
146
|
+
]
|
|
147
|
+
|
|
148
|
+
4. If user says "fix":
|
|
149
|
+
• Loop back to Phase 8 (TDD Red)
|
|
150
|
+
• Write new tests for missing features
|
|
151
|
+
• Implement and verify again
|
|
152
|
+
• REPEAT until no gaps or user says skip
|
|
153
|
+
|
|
154
|
+
5. If user says "skip":
|
|
155
|
+
• Document each skipped gap with reason
|
|
156
|
+
• Set verify.gaps_skipped = count
|
|
157
|
+
• Proceed to refactor
|
|
158
|
+
|
|
159
|
+
6. After user decides:
|
|
160
|
+
• Set verify.user_decided = true
|
|
161
|
+
• Set verify.user_question_asked = true
|
|
162
|
+
• Set verify.gap_analysis_shown = true
|
|
163
|
+
• Set verify.re_research_done = true
|
|
164
|
+
• Set verify.user_decision = "fix" or "skip" or "partial"
|
|
165
|
+
• Set verify.status = "complete"
|
|
166
|
+
|
|
167
|
+
WHY: Catch memory-based implementation errors BEFORE refactoring."""
|
|
168
|
+
}))
|
|
169
|
+
sys.exit(0)
|
|
170
|
+
|
|
171
|
+
# Verify complete
|
|
172
|
+
user_decision = verify.get("user_decision", "unknown")
|
|
173
|
+
print(json.dumps({
|
|
174
|
+
"permissionDecision": "allow",
|
|
175
|
+
"message": f"""✅ Verification complete.
|
|
176
|
+
User decision: {user_decision}
|
|
177
|
+
Gaps found: {gaps_found}
|
|
178
|
+
Gaps fixed: {gaps_fixed}
|
|
179
|
+
Gaps skipped (intentional): {gaps_skipped}
|
|
180
|
+
Proceeding to refactor phase."""
|
|
181
|
+
}))
|
|
182
|
+
sys.exit(0)
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
if __name__ == "__main__":
|
|
186
|
+
main()
|
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Hook: PostToolUse (for periodic re-grounding)
|
|
4
|
+
Purpose: Inject context reminders every N turns to prevent context dilution
|
|
5
|
+
|
|
6
|
+
This hook tracks turn count and periodically injects a summary of:
|
|
7
|
+
- Current endpoint and phase
|
|
8
|
+
- Key decisions from interview
|
|
9
|
+
- Research cache status
|
|
10
|
+
- Important file locations
|
|
11
|
+
|
|
12
|
+
The goal is to keep Claude grounded during long sessions where
|
|
13
|
+
the original CLAUDE.md context may get diluted.
|
|
14
|
+
|
|
15
|
+
Configuration:
|
|
16
|
+
- REGROUND_INTERVAL: Number of turns between re-grounding (default: 7)
|
|
17
|
+
|
|
18
|
+
Returns:
|
|
19
|
+
- {"continue": true} with optional additionalContext on reground turns
|
|
20
|
+
"""
|
|
21
|
+
import json
|
|
22
|
+
import sys
|
|
23
|
+
import os
|
|
24
|
+
from datetime import datetime
|
|
25
|
+
from pathlib import Path
|
|
26
|
+
|
|
27
|
+
# Configuration
|
|
28
|
+
REGROUND_INTERVAL = 7 # Re-ground every N turns
|
|
29
|
+
|
|
30
|
+
# State file is in .claude/ directory (sibling to hooks/)
|
|
31
|
+
STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def main():
|
|
35
|
+
# Read hook input from stdin
|
|
36
|
+
try:
|
|
37
|
+
input_data = json.load(sys.stdin)
|
|
38
|
+
except json.JSONDecodeError:
|
|
39
|
+
print(json.dumps({"continue": True}))
|
|
40
|
+
sys.exit(0)
|
|
41
|
+
|
|
42
|
+
# Check if state file exists
|
|
43
|
+
if not STATE_FILE.exists():
|
|
44
|
+
print(json.dumps({"continue": True}))
|
|
45
|
+
sys.exit(0)
|
|
46
|
+
|
|
47
|
+
try:
|
|
48
|
+
state = json.loads(STATE_FILE.read_text())
|
|
49
|
+
except json.JSONDecodeError:
|
|
50
|
+
print(json.dumps({"continue": True}))
|
|
51
|
+
sys.exit(0)
|
|
52
|
+
|
|
53
|
+
# Increment turn count
|
|
54
|
+
turn_count = state.get("turn_count", 0) + 1
|
|
55
|
+
state["turn_count"] = turn_count
|
|
56
|
+
state["last_turn_timestamp"] = datetime.now().isoformat()
|
|
57
|
+
|
|
58
|
+
# Check if we should re-ground
|
|
59
|
+
should_reground = turn_count % REGROUND_INTERVAL == 0
|
|
60
|
+
|
|
61
|
+
if should_reground and state.get("endpoint"):
|
|
62
|
+
# Build re-grounding context
|
|
63
|
+
context_parts = []
|
|
64
|
+
context_parts.append(f"## Re-Grounding Reminder (Turn {turn_count})")
|
|
65
|
+
context_parts.append("")
|
|
66
|
+
|
|
67
|
+
endpoint = state.get("endpoint", "unknown")
|
|
68
|
+
context_parts.append(f"**Active Endpoint:** {endpoint}")
|
|
69
|
+
|
|
70
|
+
# Get current phase
|
|
71
|
+
phases = state.get("phases", {})
|
|
72
|
+
phase_order = [
|
|
73
|
+
"disambiguation", "scope", "research_initial", "interview",
|
|
74
|
+
"research_deep", "schema_creation", "environment_check",
|
|
75
|
+
"tdd_red", "tdd_green", "verify", "tdd_refactor", "documentation"
|
|
76
|
+
]
|
|
77
|
+
|
|
78
|
+
current_phase = None
|
|
79
|
+
completed_phases = []
|
|
80
|
+
for phase_name in phase_order:
|
|
81
|
+
phase = phases.get(phase_name, {})
|
|
82
|
+
status = phase.get("status", "not_started")
|
|
83
|
+
if status == "complete":
|
|
84
|
+
completed_phases.append(phase_name)
|
|
85
|
+
elif status == "in_progress" and not current_phase:
|
|
86
|
+
current_phase = phase_name
|
|
87
|
+
|
|
88
|
+
if not current_phase:
|
|
89
|
+
# Find first not_started phase
|
|
90
|
+
for phase_name in phase_order:
|
|
91
|
+
phase = phases.get(phase_name, {})
|
|
92
|
+
if phase.get("status", "not_started") == "not_started":
|
|
93
|
+
current_phase = phase_name
|
|
94
|
+
break
|
|
95
|
+
|
|
96
|
+
context_parts.append(f"**Current Phase:** {current_phase or 'documentation'}")
|
|
97
|
+
context_parts.append(f"**Completed:** {', '.join(completed_phases) if completed_phases else 'None'}")
|
|
98
|
+
|
|
99
|
+
# Key decisions summary
|
|
100
|
+
interview = phases.get("interview", {})
|
|
101
|
+
decisions = interview.get("decisions", {})
|
|
102
|
+
if decisions:
|
|
103
|
+
context_parts.append("")
|
|
104
|
+
context_parts.append("**Key Decisions:**")
|
|
105
|
+
for key, value in list(decisions.items())[:5]: # Limit to 5 key decisions
|
|
106
|
+
response = value.get("value", value.get("response", "N/A"))
|
|
107
|
+
if response:
|
|
108
|
+
context_parts.append(f" - {key}: {str(response)[:50]}")
|
|
109
|
+
|
|
110
|
+
# Research freshness warning
|
|
111
|
+
research_index = state.get("research_index", {})
|
|
112
|
+
if endpoint in research_index:
|
|
113
|
+
entry = research_index[endpoint]
|
|
114
|
+
days_old = entry.get("days_old", 0)
|
|
115
|
+
if days_old > 7:
|
|
116
|
+
context_parts.append("")
|
|
117
|
+
context_parts.append(f"**WARNING:** Research is {days_old} days old. Consider re-researching.")
|
|
118
|
+
|
|
119
|
+
# File reminders
|
|
120
|
+
context_parts.append("")
|
|
121
|
+
context_parts.append("**Key Files:** .claude/api-dev-state.json, .claude/research/")
|
|
122
|
+
|
|
123
|
+
# Add to reground history
|
|
124
|
+
reground_history = state.setdefault("reground_history", [])
|
|
125
|
+
reground_history.append({
|
|
126
|
+
"turn": turn_count,
|
|
127
|
+
"timestamp": datetime.now().isoformat(),
|
|
128
|
+
"phase": current_phase
|
|
129
|
+
})
|
|
130
|
+
# Keep only last 10 reground events
|
|
131
|
+
state["reground_history"] = reground_history[-10:]
|
|
132
|
+
|
|
133
|
+
# Save state
|
|
134
|
+
STATE_FILE.write_text(json.dumps(state, indent=2))
|
|
135
|
+
|
|
136
|
+
# Output with context injection
|
|
137
|
+
output = {
|
|
138
|
+
"continue": True,
|
|
139
|
+
"hookSpecificOutput": {
|
|
140
|
+
"hookEventName": "PostToolUse",
|
|
141
|
+
"additionalContext": "\n".join(context_parts)
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
print(json.dumps(output))
|
|
145
|
+
else:
|
|
146
|
+
# Just update turn count and continue
|
|
147
|
+
STATE_FILE.write_text(json.dumps(state, indent=2))
|
|
148
|
+
print(json.dumps({"continue": True}))
|
|
149
|
+
|
|
150
|
+
sys.exit(0)
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
if __name__ == "__main__":
|
|
154
|
+
main()
|