@hustle-together/api-dev-tools 2.0.7 → 3.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +343 -467
- package/bin/cli.js +229 -15
- package/commands/README.md +124 -251
- package/commands/api-create.md +318 -136
- package/commands/api-interview.md +252 -256
- package/commands/api-research.md +209 -234
- package/commands/api-verify.md +231 -0
- package/demo/audio/generate-all-narrations.js +581 -0
- package/demo/audio/generate-narration.js +120 -56
- package/demo/audio/generate-voice-previews.js +140 -0
- package/demo/audio/narration-adam-timing.json +4675 -0
- package/demo/audio/narration-adam.mp3 +0 -0
- package/demo/audio/narration-creature-timing.json +4675 -0
- package/demo/audio/narration-creature.mp3 +0 -0
- package/demo/audio/narration-gaming-timing.json +4675 -0
- package/demo/audio/narration-gaming.mp3 +0 -0
- package/demo/audio/narration-hope-timing.json +4675 -0
- package/demo/audio/narration-hope.mp3 +0 -0
- package/demo/audio/narration-mark-timing.json +4675 -0
- package/demo/audio/narration-mark.mp3 +0 -0
- package/demo/audio/previews/manifest.json +30 -0
- package/demo/audio/previews/preview-creature.mp3 +0 -0
- package/demo/audio/previews/preview-gaming.mp3 +0 -0
- package/demo/audio/previews/preview-hope.mp3 +0 -0
- package/demo/audio/previews/preview-mark.mp3 +0 -0
- package/demo/audio/voices-manifest.json +50 -0
- package/demo/hustle-together/blog/gemini-vs-claude-widgets.html +30 -28
- package/demo/hustle-together/blog/interview-driven-api-development.html +37 -23
- package/demo/hustle-together/index.html +142 -109
- package/demo/workflow-demo.html +2618 -1036
- package/hooks/api-workflow-check.py +2 -0
- package/hooks/enforce-deep-research.py +180 -0
- package/hooks/enforce-disambiguation.py +149 -0
- package/hooks/enforce-documentation.py +187 -0
- package/hooks/enforce-environment.py +249 -0
- package/hooks/enforce-refactor.py +187 -0
- package/hooks/enforce-research.py +93 -46
- package/hooks/enforce-schema.py +186 -0
- package/hooks/enforce-scope.py +156 -0
- package/hooks/enforce-tdd-red.py +246 -0
- package/hooks/enforce-verify.py +186 -0
- package/hooks/periodic-reground.py +154 -0
- package/hooks/session-startup.py +151 -0
- package/hooks/track-tool-use.py +109 -17
- package/hooks/verify-after-green.py +282 -0
- package/package.json +3 -2
- package/scripts/collect-test-results.ts +404 -0
- package/scripts/extract-parameters.ts +483 -0
- package/scripts/generate-test-manifest.ts +520 -0
- package/templates/CLAUDE-SECTION.md +84 -0
- package/templates/api-dev-state.json +83 -8
- package/templates/api-test/page.tsx +315 -0
- package/templates/api-test/test-structure/route.ts +269 -0
- package/templates/research-index.json +6 -0
- package/templates/settings.json +59 -0
|
@@ -0,0 +1,249 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Hook: PreToolUse for Write/Edit
|
|
4
|
+
Purpose: Block writing if environment not verified WITH USER READINESS CONFIRMATION
|
|
5
|
+
|
|
6
|
+
Phase 6 requires:
|
|
7
|
+
1. Check required API keys based on endpoint/interview
|
|
8
|
+
2. Report found/missing keys to user
|
|
9
|
+
3. USE AskUserQuestion: "Ready for testing? [Y/n]"
|
|
10
|
+
4. Only proceed to TDD when user confirms readiness
|
|
11
|
+
|
|
12
|
+
Returns:
|
|
13
|
+
- {"permissionDecision": "allow"} - Let the tool run
|
|
14
|
+
- {"permissionDecision": "deny", "reason": "..."} - Block with explanation
|
|
15
|
+
"""
|
|
16
|
+
import json
|
|
17
|
+
import os
|
|
18
|
+
import sys
|
|
19
|
+
from pathlib import Path
|
|
20
|
+
|
|
21
|
+
# State file is in .claude/ directory (sibling to hooks/)
|
|
22
|
+
STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
23
|
+
|
|
24
|
+
# Common API key patterns to check
|
|
25
|
+
COMMON_KEY_PATTERNS = {
|
|
26
|
+
"openai": ["OPENAI_API_KEY", "NEXT_PUBLIC_OPENAI_API_KEY"],
|
|
27
|
+
"anthropic": ["ANTHROPIC_API_KEY", "NEXT_PUBLIC_ANTHROPIC_API_KEY"],
|
|
28
|
+
"google": ["GOOGLE_API_KEY", "GOOGLE_GENERATIVE_AI_API_KEY"],
|
|
29
|
+
"brandfetch": ["BRANDFETCH_API_KEY"],
|
|
30
|
+
"firecrawl": ["FIRECRAWL_API_KEY"],
|
|
31
|
+
"brave": ["BRAVE_SEARCH_API_KEY"],
|
|
32
|
+
"perplexity": ["PERPLEXITY_API_KEY"],
|
|
33
|
+
"exa": ["EXA_API_KEY"],
|
|
34
|
+
"cartesia": ["CARTESIA_API_KEY"],
|
|
35
|
+
"elevenlabs": ["ELEVENLABS_API_KEY"],
|
|
36
|
+
"unsplash": ["UNSPLASH_ACCESS_KEY"],
|
|
37
|
+
"pexels": ["PEXELS_API_KEY"],
|
|
38
|
+
"supabase": ["SUPABASE_URL", "SUPABASE_ANON_KEY", "SUPABASE_SERVICE_ROLE_KEY"],
|
|
39
|
+
}
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def check_env_keys(required_keys: list) -> tuple[list, list]:
|
|
43
|
+
"""Check which keys exist and which are missing."""
|
|
44
|
+
found = []
|
|
45
|
+
missing = []
|
|
46
|
+
|
|
47
|
+
for key in required_keys:
|
|
48
|
+
if os.environ.get(key):
|
|
49
|
+
found.append(key)
|
|
50
|
+
else:
|
|
51
|
+
missing.append(key)
|
|
52
|
+
|
|
53
|
+
return found, missing
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def infer_required_keys(endpoint: str, external_services: list) -> list:
|
|
57
|
+
"""Infer required API keys from endpoint name and external services."""
|
|
58
|
+
required = []
|
|
59
|
+
|
|
60
|
+
# Check endpoint name against common patterns
|
|
61
|
+
endpoint_lower = endpoint.lower()
|
|
62
|
+
for service, keys in COMMON_KEY_PATTERNS.items():
|
|
63
|
+
if service in endpoint_lower:
|
|
64
|
+
required.extend(keys)
|
|
65
|
+
|
|
66
|
+
# Check external services list
|
|
67
|
+
for service in external_services:
|
|
68
|
+
service_lower = service.lower()
|
|
69
|
+
for pattern, keys in COMMON_KEY_PATTERNS.items():
|
|
70
|
+
if pattern in service_lower:
|
|
71
|
+
required.extend(keys)
|
|
72
|
+
|
|
73
|
+
return list(set(required)) # Deduplicate
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def main():
|
|
77
|
+
# Read hook input from stdin
|
|
78
|
+
try:
|
|
79
|
+
input_data = json.load(sys.stdin)
|
|
80
|
+
except json.JSONDecodeError:
|
|
81
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
82
|
+
sys.exit(0)
|
|
83
|
+
|
|
84
|
+
tool_input = input_data.get("tool_input", {})
|
|
85
|
+
file_path = tool_input.get("file_path", "")
|
|
86
|
+
|
|
87
|
+
# Only enforce for API route files (not tests - tests should fail if keys missing)
|
|
88
|
+
is_api_file = "/api/" in file_path and file_path.endswith(".ts")
|
|
89
|
+
is_route_file = file_path.endswith("route.ts")
|
|
90
|
+
|
|
91
|
+
if not is_api_file or not is_route_file:
|
|
92
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
93
|
+
sys.exit(0)
|
|
94
|
+
|
|
95
|
+
# Skip test files
|
|
96
|
+
if ".test." in file_path or "/__tests__/" in file_path or ".spec." in file_path:
|
|
97
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
98
|
+
sys.exit(0)
|
|
99
|
+
|
|
100
|
+
# Check if state file exists
|
|
101
|
+
if not STATE_FILE.exists():
|
|
102
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
103
|
+
sys.exit(0)
|
|
104
|
+
|
|
105
|
+
# Load state
|
|
106
|
+
try:
|
|
107
|
+
state = json.loads(STATE_FILE.read_text())
|
|
108
|
+
except json.JSONDecodeError:
|
|
109
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
110
|
+
sys.exit(0)
|
|
111
|
+
|
|
112
|
+
endpoint = state.get("endpoint")
|
|
113
|
+
if not endpoint:
|
|
114
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
115
|
+
sys.exit(0)
|
|
116
|
+
|
|
117
|
+
phases = state.get("phases", {})
|
|
118
|
+
schema_creation = phases.get("schema_creation", {})
|
|
119
|
+
environment_check = phases.get("environment_check", {})
|
|
120
|
+
|
|
121
|
+
# Only enforce after schema creation
|
|
122
|
+
if schema_creation.get("status") != "complete":
|
|
123
|
+
# Let earlier hooks handle this
|
|
124
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
125
|
+
sys.exit(0)
|
|
126
|
+
|
|
127
|
+
env_status = environment_check.get("status", "not_started")
|
|
128
|
+
keys_required = environment_check.get("keys_required", [])
|
|
129
|
+
keys_found = environment_check.get("keys_found", [])
|
|
130
|
+
keys_missing = environment_check.get("keys_missing", [])
|
|
131
|
+
user_question_asked = environment_check.get("user_question_asked", False)
|
|
132
|
+
user_ready = environment_check.get("user_ready", False)
|
|
133
|
+
env_shown = environment_check.get("env_shown", False)
|
|
134
|
+
|
|
135
|
+
# Check if environment check is complete
|
|
136
|
+
if env_status != "complete":
|
|
137
|
+
# Infer required keys if not already set
|
|
138
|
+
if not keys_required:
|
|
139
|
+
interview = phases.get("interview", {})
|
|
140
|
+
decisions = interview.get("decisions", {})
|
|
141
|
+
external_services = decisions.get("external_services", {}).get("value", [])
|
|
142
|
+
if isinstance(external_services, str):
|
|
143
|
+
external_services = [external_services]
|
|
144
|
+
keys_required = infer_required_keys(endpoint, external_services)
|
|
145
|
+
|
|
146
|
+
# Check current environment
|
|
147
|
+
if keys_required:
|
|
148
|
+
found, missing = check_env_keys(keys_required)
|
|
149
|
+
else:
|
|
150
|
+
found, missing = [], []
|
|
151
|
+
|
|
152
|
+
# Check what's missing for user checkpoint
|
|
153
|
+
missing_steps = []
|
|
154
|
+
if not env_shown:
|
|
155
|
+
missing_steps.append("Environment status not shown to user")
|
|
156
|
+
if not user_question_asked:
|
|
157
|
+
missing_steps.append("User readiness question (AskUserQuestion not used)")
|
|
158
|
+
if not user_ready:
|
|
159
|
+
missing_steps.append("User hasn't confirmed readiness for TDD")
|
|
160
|
+
|
|
161
|
+
print(json.dumps({
|
|
162
|
+
"permissionDecision": "deny",
|
|
163
|
+
"reason": f"""❌ BLOCKED: Environment check (Phase 6) not complete.
|
|
164
|
+
|
|
165
|
+
Current status: {env_status}
|
|
166
|
+
Required keys: {len(keys_required)}
|
|
167
|
+
Found: {len(found)}
|
|
168
|
+
Missing: {len(missing)}
|
|
169
|
+
User shown env: {env_shown}
|
|
170
|
+
User question asked: {user_question_asked}
|
|
171
|
+
User ready: {user_ready}
|
|
172
|
+
|
|
173
|
+
MISSING:
|
|
174
|
+
{chr(10).join(f" • {m}" for m in missing_steps)}
|
|
175
|
+
|
|
176
|
+
═══════════════════════════════════════════════════════════
|
|
177
|
+
⚠️ GET USER READINESS CONFIRMATION
|
|
178
|
+
═══════════════════════════════════════════════════════════
|
|
179
|
+
|
|
180
|
+
REQUIRED STEPS:
|
|
181
|
+
|
|
182
|
+
1. Check API keys and SHOW status to user:
|
|
183
|
+
┌───────────────────────────────────────────────────────┐
|
|
184
|
+
│ ENVIRONMENT CHECK │
|
|
185
|
+
│ │
|
|
186
|
+
│ Required for {endpoint}: │
|
|
187
|
+
│ │
|
|
188
|
+
│ API Keys: │
|
|
189
|
+
{chr(10).join(f" │ {'✓' if k in found else '❌'} {k:<40} │" for k in keys_required) if keys_required else " │ No API keys required │"}
|
|
190
|
+
│ │
|
|
191
|
+
│ Testing Setup: │
|
|
192
|
+
│ • Schema file ready │
|
|
193
|
+
│ • Test patterns defined │
|
|
194
|
+
│ • Mock data prepared (if needed) │
|
|
195
|
+
│ │
|
|
196
|
+
│ Ready to begin TDD? [Y] │
|
|
197
|
+
│ Need to fix something? [n] │
|
|
198
|
+
└───────────────────────────────────────────────────────┘
|
|
199
|
+
|
|
200
|
+
2. USE AskUserQuestion:
|
|
201
|
+
question: "Environment looks ready. Start TDD?"
|
|
202
|
+
options: [
|
|
203
|
+
{{"value": "ready", "label": "Yes, ready to write tests"}},
|
|
204
|
+
{{"value": "fix_keys", "label": "No, need to set up API keys first"}},
|
|
205
|
+
{{"value": "fix_other", "label": "No, need to fix something else"}}
|
|
206
|
+
]
|
|
207
|
+
|
|
208
|
+
3. If user says "fix_keys" or "fix_other":
|
|
209
|
+
• Help them resolve the issue
|
|
210
|
+
• Re-check environment
|
|
211
|
+
• LOOP BACK and show updated status
|
|
212
|
+
|
|
213
|
+
4. If user says "ready":
|
|
214
|
+
• Set environment_check.user_ready = true
|
|
215
|
+
• Set environment_check.user_question_asked = true
|
|
216
|
+
• Set environment_check.env_shown = true
|
|
217
|
+
• Set environment_check.keys_found = [list]
|
|
218
|
+
• Set environment_check.keys_missing = [list]
|
|
219
|
+
• Set environment_check.status = "complete"
|
|
220
|
+
|
|
221
|
+
{'API KEY ISSUES:' if missing else ''}
|
|
222
|
+
{chr(10).join(f" ❌ {k}" for k in missing) if missing else ''}
|
|
223
|
+
|
|
224
|
+
WHY: Verify environment before writing tests that depend on it."""
|
|
225
|
+
}))
|
|
226
|
+
sys.exit(0)
|
|
227
|
+
|
|
228
|
+
# Environment check complete
|
|
229
|
+
if keys_missing and not user_ready:
|
|
230
|
+
print(json.dumps({
|
|
231
|
+
"permissionDecision": "deny",
|
|
232
|
+
"reason": f"""❌ Missing keys noted but user hasn't confirmed readiness.
|
|
233
|
+
Use AskUserQuestion to confirm user is ready to proceed with missing keys:
|
|
234
|
+
{chr(10).join(f" ⚠️ {k}" for k in keys_missing[:3])}"""
|
|
235
|
+
}))
|
|
236
|
+
sys.exit(0)
|
|
237
|
+
|
|
238
|
+
print(json.dumps({
|
|
239
|
+
"permissionDecision": "allow",
|
|
240
|
+
"message": f"""✅ Environment check complete.
|
|
241
|
+
User confirmed ready for TDD.
|
|
242
|
+
Keys found: {len(keys_found)}
|
|
243
|
+
Keys missing (acknowledged): {len(keys_missing)}"""
|
|
244
|
+
}))
|
|
245
|
+
sys.exit(0)
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
if __name__ == "__main__":
|
|
249
|
+
main()
|
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Hook: PreToolUse for Write/Edit
|
|
4
|
+
Purpose: Block refactoring until verification phase is complete
|
|
5
|
+
|
|
6
|
+
This hook ensures that after tests pass (Green phase), the implementation
|
|
7
|
+
is verified against documentation before any refactoring begins.
|
|
8
|
+
|
|
9
|
+
Phase 10 of the 12-phase workflow requires:
|
|
10
|
+
- TDD Green phase complete (tests passing)
|
|
11
|
+
- Verify phase complete (Phase 9)
|
|
12
|
+
- All gaps found have been fixed or documented as intentional omissions
|
|
13
|
+
|
|
14
|
+
Returns:
|
|
15
|
+
- {"permissionDecision": "allow"} - Let the tool run
|
|
16
|
+
- {"permissionDecision": "deny", "reason": "..."} - Block with explanation
|
|
17
|
+
"""
|
|
18
|
+
import json
|
|
19
|
+
import sys
|
|
20
|
+
from pathlib import Path
|
|
21
|
+
|
|
22
|
+
# State file is in .claude/ directory (sibling to hooks/)
|
|
23
|
+
STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
24
|
+
|
|
25
|
+
# Keywords that suggest refactoring intent
|
|
26
|
+
REFACTOR_KEYWORDS = [
|
|
27
|
+
"refactor",
|
|
28
|
+
"cleanup",
|
|
29
|
+
"clean up",
|
|
30
|
+
"restructure",
|
|
31
|
+
"reorganize",
|
|
32
|
+
"optimize",
|
|
33
|
+
"simplify",
|
|
34
|
+
"extract",
|
|
35
|
+
"rename",
|
|
36
|
+
"move",
|
|
37
|
+
]
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def is_refactoring_edit(tool_input: dict) -> bool:
|
|
41
|
+
"""Detect if this edit appears to be a refactoring operation."""
|
|
42
|
+
# Check the content being written
|
|
43
|
+
new_string = tool_input.get("new_string", "")
|
|
44
|
+
old_string = tool_input.get("old_string", "")
|
|
45
|
+
|
|
46
|
+
# If both old and new exist and are similar length, might be refactoring
|
|
47
|
+
if old_string and new_string:
|
|
48
|
+
len_diff = abs(len(new_string) - len(old_string))
|
|
49
|
+
# Similar length suggests refactoring vs new features
|
|
50
|
+
if len_diff < len(old_string) * 0.3: # Within 30% length
|
|
51
|
+
return True
|
|
52
|
+
|
|
53
|
+
return False
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def main():
|
|
57
|
+
# Read hook input from stdin
|
|
58
|
+
try:
|
|
59
|
+
input_data = json.load(sys.stdin)
|
|
60
|
+
except json.JSONDecodeError:
|
|
61
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
62
|
+
sys.exit(0)
|
|
63
|
+
|
|
64
|
+
tool_name = input_data.get("tool_name", "")
|
|
65
|
+
tool_input = input_data.get("tool_input", {})
|
|
66
|
+
file_path = tool_input.get("file_path", "")
|
|
67
|
+
|
|
68
|
+
# Only check Edit operations on API files (Write is for new content)
|
|
69
|
+
if tool_name != "Edit":
|
|
70
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
71
|
+
sys.exit(0)
|
|
72
|
+
|
|
73
|
+
# Only enforce for API route files
|
|
74
|
+
if "/api/" not in file_path or not file_path.endswith(".ts"):
|
|
75
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
76
|
+
sys.exit(0)
|
|
77
|
+
|
|
78
|
+
# Skip test files - can refactor tests anytime
|
|
79
|
+
if ".test." in file_path or "/__tests__/" in file_path or ".spec." in file_path:
|
|
80
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
81
|
+
sys.exit(0)
|
|
82
|
+
|
|
83
|
+
# Check if state file exists
|
|
84
|
+
if not STATE_FILE.exists():
|
|
85
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
86
|
+
sys.exit(0)
|
|
87
|
+
|
|
88
|
+
# Load state
|
|
89
|
+
try:
|
|
90
|
+
state = json.loads(STATE_FILE.read_text())
|
|
91
|
+
except json.JSONDecodeError:
|
|
92
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
93
|
+
sys.exit(0)
|
|
94
|
+
|
|
95
|
+
endpoint = state.get("endpoint")
|
|
96
|
+
if not endpoint:
|
|
97
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
98
|
+
sys.exit(0)
|
|
99
|
+
|
|
100
|
+
phases = state.get("phases", {})
|
|
101
|
+
tdd_green = phases.get("tdd_green", {})
|
|
102
|
+
verify = phases.get("verify", {})
|
|
103
|
+
tdd_refactor = phases.get("tdd_refactor", {})
|
|
104
|
+
|
|
105
|
+
# Only enforce after TDD Green is complete
|
|
106
|
+
if tdd_green.get("status") != "complete":
|
|
107
|
+
# Still in implementation phase, allow edits
|
|
108
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
109
|
+
sys.exit(0)
|
|
110
|
+
|
|
111
|
+
# Check if this looks like a refactoring edit
|
|
112
|
+
if not is_refactoring_edit(tool_input):
|
|
113
|
+
# Doesn't look like refactoring, might be bug fix
|
|
114
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
115
|
+
sys.exit(0)
|
|
116
|
+
|
|
117
|
+
# Check verify phase status
|
|
118
|
+
verify_status = verify.get("status", "not_started")
|
|
119
|
+
gaps_found = verify.get("gaps_found", 0)
|
|
120
|
+
gaps_fixed = verify.get("gaps_fixed", 0)
|
|
121
|
+
intentional_omissions = verify.get("intentional_omissions", [])
|
|
122
|
+
|
|
123
|
+
if verify_status != "complete":
|
|
124
|
+
print(json.dumps({
|
|
125
|
+
"permissionDecision": "deny",
|
|
126
|
+
"reason": f"""❌ BLOCKED: Verify phase (Phase 9) not complete.
|
|
127
|
+
|
|
128
|
+
Current status: {verify_status}
|
|
129
|
+
Gaps found: {gaps_found}
|
|
130
|
+
Gaps fixed: {gaps_fixed}
|
|
131
|
+
Intentional omissions: {len(intentional_omissions)}
|
|
132
|
+
|
|
133
|
+
═══════════════════════════════════════════════════════════
|
|
134
|
+
⚠️ VERIFY BEFORE REFACTORING
|
|
135
|
+
═══════════════════════════════════════════════════════════
|
|
136
|
+
|
|
137
|
+
Before refactoring, you must:
|
|
138
|
+
|
|
139
|
+
1. Re-read the original documentation
|
|
140
|
+
2. Compare implementation to docs feature-by-feature
|
|
141
|
+
3. Fix any gaps OR document them as intentional omissions
|
|
142
|
+
|
|
143
|
+
Current gaps not addressed:
|
|
144
|
+
• {gaps_found - gaps_fixed} gaps still need attention
|
|
145
|
+
|
|
146
|
+
Once verify phase is complete:
|
|
147
|
+
• All gaps fixed OR documented as omissions
|
|
148
|
+
• Implementation matches documented behavior
|
|
149
|
+
• THEN you can safely refactor
|
|
150
|
+
|
|
151
|
+
WHY THIS MATTERS:
|
|
152
|
+
- Refactoring should not change behavior
|
|
153
|
+
- Must verify behavior is CORRECT before preserving it
|
|
154
|
+
- Otherwise you cement bugs into clean code"""
|
|
155
|
+
}))
|
|
156
|
+
sys.exit(0)
|
|
157
|
+
|
|
158
|
+
# Verify complete - check if all gaps addressed
|
|
159
|
+
unaddressed_gaps = gaps_found - gaps_fixed - len(intentional_omissions)
|
|
160
|
+
if unaddressed_gaps > 0:
|
|
161
|
+
print(json.dumps({
|
|
162
|
+
"permissionDecision": "deny",
|
|
163
|
+
"reason": f"""❌ BLOCKED: {unaddressed_gaps} gaps not addressed.
|
|
164
|
+
|
|
165
|
+
Gaps found: {gaps_found}
|
|
166
|
+
Gaps fixed: {gaps_fixed}
|
|
167
|
+
Intentional omissions: {len(intentional_omissions)}
|
|
168
|
+
Unaddressed: {unaddressed_gaps}
|
|
169
|
+
|
|
170
|
+
Fix the remaining gaps or mark them as intentional omissions
|
|
171
|
+
before refactoring."""
|
|
172
|
+
}))
|
|
173
|
+
sys.exit(0)
|
|
174
|
+
|
|
175
|
+
# All clear for refactoring
|
|
176
|
+
refactor_status = tdd_refactor.get("status", "not_started")
|
|
177
|
+
print(json.dumps({
|
|
178
|
+
"permissionDecision": "allow",
|
|
179
|
+
"message": f"""✅ Verification complete. Safe to refactor.
|
|
180
|
+
Refactor phase status: {refactor_status}
|
|
181
|
+
Remember: Tests must still pass after refactoring!"""
|
|
182
|
+
}))
|
|
183
|
+
sys.exit(0)
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
if __name__ == "__main__":
|
|
187
|
+
main()
|
|
@@ -1,10 +1,13 @@
|
|
|
1
1
|
#!/usr/bin/env python3
|
|
2
2
|
"""
|
|
3
3
|
Hook: PreToolUse for Write/Edit
|
|
4
|
-
Purpose: Block writing API code if research phase not complete
|
|
4
|
+
Purpose: Block writing API code if research phase not complete WITH USER CHECKPOINT
|
|
5
5
|
|
|
6
|
-
|
|
7
|
-
|
|
6
|
+
Phase 2 requires:
|
|
7
|
+
1. Execute 2-3 initial searches (Context7, WebSearch)
|
|
8
|
+
2. Present summary TABLE to user
|
|
9
|
+
3. USE AskUserQuestion: "Proceed? [Y] / Search more? [n]"
|
|
10
|
+
4. Loop back if user wants more research
|
|
8
11
|
|
|
9
12
|
Returns:
|
|
10
13
|
- {"permissionDecision": "allow"} - Let the tool run
|
|
@@ -14,97 +17,141 @@ import json
|
|
|
14
17
|
import sys
|
|
15
18
|
from pathlib import Path
|
|
16
19
|
|
|
17
|
-
# State file is in .claude/ directory (sibling to hooks/)
|
|
18
20
|
STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
19
21
|
|
|
22
|
+
# Minimum sources required
|
|
23
|
+
MIN_SOURCES = 2
|
|
24
|
+
|
|
20
25
|
|
|
21
26
|
def main():
|
|
22
|
-
# Read hook input from stdin (Claude Code passes tool info as JSON)
|
|
23
27
|
try:
|
|
24
28
|
input_data = json.load(sys.stdin)
|
|
25
29
|
except json.JSONDecodeError:
|
|
26
|
-
# If we can't parse input, allow (fail open for safety)
|
|
27
30
|
print(json.dumps({"permissionDecision": "allow"}))
|
|
28
31
|
sys.exit(0)
|
|
29
32
|
|
|
30
|
-
tool_name = input_data.get("tool_name", "")
|
|
31
33
|
tool_input = input_data.get("tool_input", {})
|
|
32
|
-
|
|
33
|
-
# Get the file path being written/edited
|
|
34
34
|
file_path = tool_input.get("file_path", "")
|
|
35
35
|
|
|
36
36
|
# Only enforce for API route files
|
|
37
|
-
# Check for both /api/ and /api-test/ patterns
|
|
38
37
|
if "/api/" not in file_path and "/api-test/" not in file_path:
|
|
39
|
-
# Not an API file, allow without checking
|
|
40
38
|
print(json.dumps({"permissionDecision": "allow"}))
|
|
41
39
|
sys.exit(0)
|
|
42
40
|
|
|
43
|
-
#
|
|
44
|
-
# (TDD Red phase)
|
|
41
|
+
# Skip test files - TDD Red allows tests before research complete
|
|
45
42
|
if ".test." in file_path or "/__tests__/" in file_path:
|
|
46
43
|
print(json.dumps({"permissionDecision": "allow"}))
|
|
47
44
|
sys.exit(0)
|
|
48
45
|
|
|
49
|
-
# Skip for documentation/config files
|
|
50
46
|
if file_path.endswith(".md") or file_path.endswith(".json"):
|
|
51
47
|
print(json.dumps({"permissionDecision": "allow"}))
|
|
52
48
|
sys.exit(0)
|
|
53
49
|
|
|
54
|
-
# Check if state file exists
|
|
55
50
|
if not STATE_FILE.exists():
|
|
56
51
|
print(json.dumps({
|
|
57
52
|
"permissionDecision": "deny",
|
|
58
53
|
"reason": """❌ API development state not initialized.
|
|
59
54
|
|
|
60
|
-
|
|
61
|
-
1. Run /api-create [endpoint-name] to start the workflow
|
|
62
|
-
OR
|
|
63
|
-
2. Run /api-research [library-name] to research dependencies
|
|
64
|
-
|
|
65
|
-
This ensures you're working with current documentation, not outdated training data."""
|
|
55
|
+
Run /api-create [endpoint-name] to start the workflow."""
|
|
66
56
|
}))
|
|
67
57
|
sys.exit(0)
|
|
68
58
|
|
|
69
|
-
# Load and check state
|
|
70
59
|
try:
|
|
71
60
|
state = json.loads(STATE_FILE.read_text())
|
|
72
61
|
except json.JSONDecodeError:
|
|
73
|
-
# Corrupted state file, allow but warn
|
|
74
62
|
print(json.dumps({"permissionDecision": "allow"}))
|
|
75
63
|
sys.exit(0)
|
|
76
64
|
|
|
77
|
-
|
|
65
|
+
endpoint = state.get("endpoint", "unknown")
|
|
78
66
|
phases = state.get("phases", {})
|
|
79
67
|
research = phases.get("research_initial", {})
|
|
80
|
-
|
|
68
|
+
status = research.get("status", "not_started")
|
|
69
|
+
|
|
70
|
+
if status != "complete":
|
|
71
|
+
sources = research.get("sources", [])
|
|
72
|
+
user_question_asked = research.get("user_question_asked", False)
|
|
73
|
+
user_approved = research.get("user_approved", False)
|
|
74
|
+
summary_shown = research.get("summary_shown", False)
|
|
75
|
+
|
|
76
|
+
missing = []
|
|
77
|
+
if len(sources) < MIN_SOURCES:
|
|
78
|
+
missing.append(f"Sources ({len(sources)}/{MIN_SOURCES} minimum)")
|
|
79
|
+
if not summary_shown:
|
|
80
|
+
missing.append("Research summary table not shown to user")
|
|
81
|
+
if not user_question_asked:
|
|
82
|
+
missing.append("User checkpoint (AskUserQuestion not used)")
|
|
83
|
+
if not user_approved:
|
|
84
|
+
missing.append("User approval to proceed")
|
|
81
85
|
|
|
82
|
-
if research_status != "complete":
|
|
83
|
-
sources_count = len(research.get("sources", []))
|
|
84
86
|
print(json.dumps({
|
|
85
87
|
"permissionDecision": "deny",
|
|
86
|
-
"reason": f"""❌
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
88
|
+
"reason": f"""❌ BLOCKED: Initial research (Phase 2) not complete.
|
|
89
|
+
|
|
90
|
+
Status: {status}
|
|
91
|
+
Sources consulted: {len(sources)}
|
|
92
|
+
Summary shown: {summary_shown}
|
|
93
|
+
User question asked: {user_question_asked}
|
|
94
|
+
User approved: {user_approved}
|
|
95
|
+
|
|
96
|
+
MISSING:
|
|
97
|
+
{chr(10).join(f" • {m}" for m in missing)}
|
|
98
|
+
|
|
99
|
+
═══════════════════════════════════════════════════════════
|
|
100
|
+
⚠️ COMPLETE RESEARCH WITH USER CHECKPOINT
|
|
101
|
+
═══════════════════════════════════════════════════════════
|
|
102
|
+
|
|
103
|
+
REQUIRED STEPS:
|
|
104
|
+
|
|
105
|
+
1. Execute 2-3 initial searches:
|
|
106
|
+
• Context7: "{endpoint}"
|
|
107
|
+
• WebSearch: "{endpoint} official documentation"
|
|
108
|
+
• WebSearch: "site:[official-domain] {endpoint} API reference"
|
|
109
|
+
|
|
110
|
+
2. Present summary TABLE to user:
|
|
111
|
+
┌───────────────────────────────────────────────────────┐
|
|
112
|
+
│ RESEARCH SUMMARY │
|
|
113
|
+
│ │
|
|
114
|
+
│ │ Source │ Found │
|
|
115
|
+
│ ├────────────────┼────────────────────────────────────│
|
|
116
|
+
│ │ Official docs │ ✓ [URL] │
|
|
117
|
+
│ │ API Reference │ ✓ REST v2 │
|
|
118
|
+
│ │ Auth method │ ✓ Bearer token │
|
|
119
|
+
│ │ NPM package │ ? Not found │
|
|
120
|
+
│ │
|
|
121
|
+
│ Proceed? [Y] / Search more? [n] ____ │
|
|
122
|
+
└───────────────────────────────────────────────────────┘
|
|
123
|
+
|
|
124
|
+
3. USE AskUserQuestion:
|
|
125
|
+
question: "Research summary above. Proceed or search more?"
|
|
126
|
+
options: [
|
|
127
|
+
{{"value": "proceed", "label": "Proceed to interview"}},
|
|
128
|
+
{{"value": "more", "label": "Search more - I need [topic]"}},
|
|
129
|
+
{{"value": "specific", "label": "Search for something specific"}}
|
|
130
|
+
]
|
|
131
|
+
|
|
132
|
+
4. If user says "more" or "specific":
|
|
133
|
+
• Ask what they want to research
|
|
134
|
+
• Execute additional searches
|
|
135
|
+
• LOOP BACK and show updated summary
|
|
136
|
+
|
|
137
|
+
5. If user says "proceed":
|
|
138
|
+
• Set research_initial.user_approved = true
|
|
139
|
+
• Set research_initial.user_question_asked = true
|
|
140
|
+
• Set research_initial.summary_shown = true
|
|
141
|
+
• Set research_initial.status = "complete"
|
|
142
|
+
|
|
143
|
+
WHY: Implementation must match CURRENT API documentation."""
|
|
103
144
|
}))
|
|
104
145
|
sys.exit(0)
|
|
105
146
|
|
|
106
|
-
# Research complete
|
|
107
|
-
|
|
147
|
+
# Research complete - inject context
|
|
148
|
+
sources = research.get("sources", [])
|
|
149
|
+
print(json.dumps({
|
|
150
|
+
"permissionDecision": "allow",
|
|
151
|
+
"message": f"""✅ Initial research complete.
|
|
152
|
+
Sources: {len(sources)}
|
|
153
|
+
User approved proceeding to interview."""
|
|
154
|
+
}))
|
|
108
155
|
sys.exit(0)
|
|
109
156
|
|
|
110
157
|
|