@hustle-together/api-dev-tools 2.0.7 → 3.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +343 -467
- package/bin/cli.js +229 -15
- package/commands/README.md +124 -251
- package/commands/api-create.md +318 -136
- package/commands/api-interview.md +252 -256
- package/commands/api-research.md +209 -234
- package/commands/api-verify.md +231 -0
- package/demo/audio/generate-all-narrations.js +581 -0
- package/demo/audio/generate-narration.js +120 -56
- package/demo/audio/generate-voice-previews.js +140 -0
- package/demo/audio/narration-adam-timing.json +4675 -0
- package/demo/audio/narration-adam.mp3 +0 -0
- package/demo/audio/narration-creature-timing.json +4675 -0
- package/demo/audio/narration-creature.mp3 +0 -0
- package/demo/audio/narration-gaming-timing.json +4675 -0
- package/demo/audio/narration-gaming.mp3 +0 -0
- package/demo/audio/narration-hope-timing.json +4675 -0
- package/demo/audio/narration-hope.mp3 +0 -0
- package/demo/audio/narration-mark-timing.json +4675 -0
- package/demo/audio/narration-mark.mp3 +0 -0
- package/demo/audio/previews/manifest.json +30 -0
- package/demo/audio/previews/preview-creature.mp3 +0 -0
- package/demo/audio/previews/preview-gaming.mp3 +0 -0
- package/demo/audio/previews/preview-hope.mp3 +0 -0
- package/demo/audio/previews/preview-mark.mp3 +0 -0
- package/demo/audio/voices-manifest.json +50 -0
- package/demo/hustle-together/blog/gemini-vs-claude-widgets.html +30 -28
- package/demo/hustle-together/blog/interview-driven-api-development.html +37 -23
- package/demo/hustle-together/index.html +142 -109
- package/demo/workflow-demo.html +2618 -1036
- package/hooks/api-workflow-check.py +2 -0
- package/hooks/enforce-deep-research.py +180 -0
- package/hooks/enforce-disambiguation.py +149 -0
- package/hooks/enforce-documentation.py +187 -0
- package/hooks/enforce-environment.py +249 -0
- package/hooks/enforce-refactor.py +187 -0
- package/hooks/enforce-research.py +93 -46
- package/hooks/enforce-schema.py +186 -0
- package/hooks/enforce-scope.py +156 -0
- package/hooks/enforce-tdd-red.py +246 -0
- package/hooks/enforce-verify.py +186 -0
- package/hooks/periodic-reground.py +154 -0
- package/hooks/session-startup.py +151 -0
- package/hooks/track-tool-use.py +109 -17
- package/hooks/verify-after-green.py +282 -0
- package/package.json +3 -2
- package/scripts/collect-test-results.ts +404 -0
- package/scripts/extract-parameters.ts +483 -0
- package/scripts/generate-test-manifest.ts +520 -0
- package/templates/CLAUDE-SECTION.md +84 -0
- package/templates/api-dev-state.json +83 -8
- package/templates/api-test/page.tsx +315 -0
- package/templates/api-test/test-structure/route.ts +269 -0
- package/templates/research-index.json +6 -0
- package/templates/settings.json +59 -0
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Hook: PreToolUse for Write/Edit
|
|
4
|
+
Purpose: Block writing implementation if schema not reviewed WITH USER CONFIRMATION
|
|
5
|
+
|
|
6
|
+
Phase 5 requires:
|
|
7
|
+
1. Create Zod schemas based on interview + research
|
|
8
|
+
2. SHOW schema to user in formatted display
|
|
9
|
+
3. USE AskUserQuestion: "Schema matches interview? [Y/n]"
|
|
10
|
+
4. Loop back if user wants changes
|
|
11
|
+
5. Only proceed when user confirms
|
|
12
|
+
|
|
13
|
+
Returns:
|
|
14
|
+
- {"permissionDecision": "allow"} - Let the tool run
|
|
15
|
+
- {"permissionDecision": "deny", "reason": "..."} - Block with explanation
|
|
16
|
+
"""
|
|
17
|
+
import json
|
|
18
|
+
import sys
|
|
19
|
+
from pathlib import Path
|
|
20
|
+
|
|
21
|
+
STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def main():
|
|
25
|
+
try:
|
|
26
|
+
input_data = json.load(sys.stdin)
|
|
27
|
+
except json.JSONDecodeError:
|
|
28
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
29
|
+
sys.exit(0)
|
|
30
|
+
|
|
31
|
+
tool_input = input_data.get("tool_input", {})
|
|
32
|
+
file_path = tool_input.get("file_path", "")
|
|
33
|
+
|
|
34
|
+
# Only enforce for API route and schema files
|
|
35
|
+
is_api_file = "/api/" in file_path and file_path.endswith(".ts")
|
|
36
|
+
is_schema_file = "/schemas/" in file_path and file_path.endswith(".ts")
|
|
37
|
+
|
|
38
|
+
if not is_api_file and not is_schema_file:
|
|
39
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
40
|
+
sys.exit(0)
|
|
41
|
+
|
|
42
|
+
# Skip test files
|
|
43
|
+
if ".test." in file_path or "/__tests__/" in file_path or ".spec." in file_path:
|
|
44
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
45
|
+
sys.exit(0)
|
|
46
|
+
|
|
47
|
+
# Skip documentation/config files
|
|
48
|
+
if file_path.endswith(".md") or file_path.endswith(".json"):
|
|
49
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
50
|
+
sys.exit(0)
|
|
51
|
+
|
|
52
|
+
if not STATE_FILE.exists():
|
|
53
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
54
|
+
sys.exit(0)
|
|
55
|
+
|
|
56
|
+
try:
|
|
57
|
+
state = json.loads(STATE_FILE.read_text())
|
|
58
|
+
except json.JSONDecodeError:
|
|
59
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
60
|
+
sys.exit(0)
|
|
61
|
+
|
|
62
|
+
endpoint = state.get("endpoint", "unknown")
|
|
63
|
+
phases = state.get("phases", {})
|
|
64
|
+
interview = phases.get("interview", {})
|
|
65
|
+
research_deep = phases.get("research_deep", {})
|
|
66
|
+
schema_creation = phases.get("schema_creation", {})
|
|
67
|
+
|
|
68
|
+
# Only enforce after interview is complete
|
|
69
|
+
if interview.get("status") != "complete":
|
|
70
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
71
|
+
sys.exit(0)
|
|
72
|
+
|
|
73
|
+
# Only enforce after deep research is complete (or not needed)
|
|
74
|
+
deep_status = research_deep.get("status", "not_started")
|
|
75
|
+
proposed = research_deep.get("proposed_searches", [])
|
|
76
|
+
if proposed and deep_status != "complete":
|
|
77
|
+
# Let enforce-deep-research.py handle this
|
|
78
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
79
|
+
sys.exit(0)
|
|
80
|
+
|
|
81
|
+
status = schema_creation.get("status", "not_started")
|
|
82
|
+
|
|
83
|
+
if status != "complete":
|
|
84
|
+
user_question_asked = schema_creation.get("user_question_asked", False)
|
|
85
|
+
user_confirmed = schema_creation.get("user_confirmed", False)
|
|
86
|
+
schema_shown = schema_creation.get("schema_shown", False)
|
|
87
|
+
schema_file = schema_creation.get("schema_file", None)
|
|
88
|
+
fields_count = schema_creation.get("fields_count", 0)
|
|
89
|
+
|
|
90
|
+
missing = []
|
|
91
|
+
if not schema_shown:
|
|
92
|
+
missing.append("Schema not shown to user")
|
|
93
|
+
if not user_question_asked:
|
|
94
|
+
missing.append("User review question (AskUserQuestion not used)")
|
|
95
|
+
if not user_confirmed:
|
|
96
|
+
missing.append("User hasn't confirmed schema matches interview")
|
|
97
|
+
|
|
98
|
+
print(json.dumps({
|
|
99
|
+
"permissionDecision": "deny",
|
|
100
|
+
"reason": f"""❌ BLOCKED: Schema creation (Phase 5) not complete.
|
|
101
|
+
|
|
102
|
+
Status: {status}
|
|
103
|
+
Schema shown: {schema_shown}
|
|
104
|
+
User question asked: {user_question_asked}
|
|
105
|
+
User confirmed: {user_confirmed}
|
|
106
|
+
Schema file: {schema_file or "Not created yet"}
|
|
107
|
+
Fields: {fields_count}
|
|
108
|
+
|
|
109
|
+
MISSING:
|
|
110
|
+
{chr(10).join(f" • {m}" for m in missing)}
|
|
111
|
+
|
|
112
|
+
═══════════════════════════════════════════════════════════
|
|
113
|
+
⚠️ GET USER CONFIRMATION FOR SCHEMA
|
|
114
|
+
═══════════════════════════════════════════════════════════
|
|
115
|
+
|
|
116
|
+
REQUIRED STEPS:
|
|
117
|
+
|
|
118
|
+
1. Create Zod schemas based on:
|
|
119
|
+
• Interview answers (error handling, caching, etc.)
|
|
120
|
+
• Research findings (API parameters, response format)
|
|
121
|
+
|
|
122
|
+
2. SHOW formatted schema to user:
|
|
123
|
+
┌───────────────────────────────────────────────────────┐
|
|
124
|
+
│ SCHEMA REVIEW │
|
|
125
|
+
│ │
|
|
126
|
+
│ Request Schema: │
|
|
127
|
+
│ domain: z.string() ← From interview: domain │
|
|
128
|
+
│ mode: z.enum(["full", "logo"]) ← Your choice: full │
|
|
129
|
+
│ includeColors: z.boolean().default(true) │
|
|
130
|
+
│ │
|
|
131
|
+
│ Response Schema: │
|
|
132
|
+
│ success: z.boolean() │
|
|
133
|
+
│ data: BrandDataSchema │
|
|
134
|
+
│ cached: z.boolean() ← From interview: 24h │
|
|
135
|
+
│ error: ErrorSchema.optional() │
|
|
136
|
+
│ │
|
|
137
|
+
│ Based on YOUR interview answers: │
|
|
138
|
+
│ ✓ Error handling: Return objects │
|
|
139
|
+
│ ✓ Caching: 24h (long) │
|
|
140
|
+
│ ✓ Mode: Full brand kit │
|
|
141
|
+
│ │
|
|
142
|
+
│ Does this match your requirements? [Y/n] │
|
|
143
|
+
└───────────────────────────────────────────────────────┘
|
|
144
|
+
|
|
145
|
+
3. USE AskUserQuestion:
|
|
146
|
+
question: "Does this schema match your interview answers?"
|
|
147
|
+
options: [
|
|
148
|
+
{{"value": "confirm", "label": "Yes, schema looks correct"}},
|
|
149
|
+
{{"value": "modify", "label": "No, I need changes - [describe]"}},
|
|
150
|
+
{{"value": "restart", "label": "Let's redo the interview"}}
|
|
151
|
+
]
|
|
152
|
+
|
|
153
|
+
4. If user says "modify":
|
|
154
|
+
• Ask what changes they need
|
|
155
|
+
• Update schema accordingly
|
|
156
|
+
• LOOP BACK and show updated schema
|
|
157
|
+
|
|
158
|
+
5. If user says "restart":
|
|
159
|
+
• Reset interview phase
|
|
160
|
+
• Go back to Phase 3
|
|
161
|
+
|
|
162
|
+
6. If user says "confirm":
|
|
163
|
+
• Set schema_creation.user_confirmed = true
|
|
164
|
+
• Set schema_creation.user_question_asked = true
|
|
165
|
+
• Set schema_creation.schema_shown = true
|
|
166
|
+
• Set schema_creation.status = "complete"
|
|
167
|
+
|
|
168
|
+
WHY: Schema is the CONTRACT. User must approve before implementation."""
|
|
169
|
+
}))
|
|
170
|
+
sys.exit(0)
|
|
171
|
+
|
|
172
|
+
# Schema complete
|
|
173
|
+
schema_file = schema_creation.get("schema_file", "")
|
|
174
|
+
fields_count = schema_creation.get("fields_count", 0)
|
|
175
|
+
print(json.dumps({
|
|
176
|
+
"permissionDecision": "allow",
|
|
177
|
+
"message": f"""✅ Schema creation complete.
|
|
178
|
+
Schema file: {schema_file}
|
|
179
|
+
Fields: {fields_count}
|
|
180
|
+
User confirmed schema matches interview requirements."""
|
|
181
|
+
}))
|
|
182
|
+
sys.exit(0)
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
if __name__ == "__main__":
|
|
186
|
+
main()
|
|
@@ -0,0 +1,156 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Hook: PreToolUse for Write/Edit
|
|
4
|
+
Purpose: Block writing API code if scope not confirmed BY USER
|
|
5
|
+
|
|
6
|
+
Phase 1 requires:
|
|
7
|
+
1. Present scope understanding to user
|
|
8
|
+
2. USE AskUserQuestion: "Is this correct? [Y/n]"
|
|
9
|
+
3. Record any modifications user requests
|
|
10
|
+
4. Loop back if user wants changes
|
|
11
|
+
|
|
12
|
+
Returns:
|
|
13
|
+
- {"permissionDecision": "allow"} - Let the tool run
|
|
14
|
+
- {"permissionDecision": "deny", "reason": "..."} - Block with explanation
|
|
15
|
+
"""
|
|
16
|
+
import json
|
|
17
|
+
import sys
|
|
18
|
+
from pathlib import Path
|
|
19
|
+
|
|
20
|
+
STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def main():
|
|
24
|
+
try:
|
|
25
|
+
input_data = json.load(sys.stdin)
|
|
26
|
+
except json.JSONDecodeError:
|
|
27
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
28
|
+
sys.exit(0)
|
|
29
|
+
|
|
30
|
+
tool_input = input_data.get("tool_input", {})
|
|
31
|
+
file_path = tool_input.get("file_path", "")
|
|
32
|
+
|
|
33
|
+
# Only enforce for API route files
|
|
34
|
+
if "/api/" not in file_path:
|
|
35
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
36
|
+
sys.exit(0)
|
|
37
|
+
|
|
38
|
+
# Skip test files
|
|
39
|
+
if ".test." in file_path or "/__tests__/" in file_path or ".spec." in file_path:
|
|
40
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
41
|
+
sys.exit(0)
|
|
42
|
+
|
|
43
|
+
if file_path.endswith(".md") or file_path.endswith(".json"):
|
|
44
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
45
|
+
sys.exit(0)
|
|
46
|
+
|
|
47
|
+
if not STATE_FILE.exists():
|
|
48
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
49
|
+
sys.exit(0)
|
|
50
|
+
|
|
51
|
+
try:
|
|
52
|
+
state = json.loads(STATE_FILE.read_text())
|
|
53
|
+
except json.JSONDecodeError:
|
|
54
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
55
|
+
sys.exit(0)
|
|
56
|
+
|
|
57
|
+
endpoint = state.get("endpoint")
|
|
58
|
+
if not endpoint:
|
|
59
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
60
|
+
sys.exit(0)
|
|
61
|
+
|
|
62
|
+
phases = state.get("phases", {})
|
|
63
|
+
disambiguation = phases.get("disambiguation", {})
|
|
64
|
+
scope = phases.get("scope", {})
|
|
65
|
+
|
|
66
|
+
# Check disambiguation is complete first
|
|
67
|
+
if disambiguation.get("status") != "complete":
|
|
68
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
69
|
+
sys.exit(0)
|
|
70
|
+
|
|
71
|
+
status = scope.get("status", "not_started")
|
|
72
|
+
user_confirmed = scope.get("user_confirmed", False)
|
|
73
|
+
user_question_asked = scope.get("user_question_asked", False)
|
|
74
|
+
|
|
75
|
+
if status != "complete" or not user_confirmed:
|
|
76
|
+
endpoint_path = scope.get("endpoint_path", f"/api/v2/{endpoint}")
|
|
77
|
+
modifications = scope.get("modifications", [])
|
|
78
|
+
|
|
79
|
+
missing = []
|
|
80
|
+
if not user_question_asked:
|
|
81
|
+
missing.append("User question (AskUserQuestion not used)")
|
|
82
|
+
if not user_confirmed:
|
|
83
|
+
missing.append("User confirmation (user hasn't said 'yes')")
|
|
84
|
+
|
|
85
|
+
print(json.dumps({
|
|
86
|
+
"permissionDecision": "deny",
|
|
87
|
+
"reason": f"""❌ BLOCKED: Scope confirmation (Phase 1) not complete.
|
|
88
|
+
|
|
89
|
+
Status: {status}
|
|
90
|
+
User question asked: {user_question_asked}
|
|
91
|
+
User confirmed: {user_confirmed}
|
|
92
|
+
Proposed path: {endpoint_path}
|
|
93
|
+
Modifications: {len(modifications)}
|
|
94
|
+
|
|
95
|
+
MISSING:
|
|
96
|
+
{chr(10).join(f" • {m}" for m in missing)}
|
|
97
|
+
|
|
98
|
+
═══════════════════════════════════════════════════════════
|
|
99
|
+
⚠️ GET USER CONFIRMATION OF SCOPE
|
|
100
|
+
═══════════════════════════════════════════════════════════
|
|
101
|
+
|
|
102
|
+
REQUIRED STEPS:
|
|
103
|
+
|
|
104
|
+
1. Present your understanding:
|
|
105
|
+
┌───────────────────────────────────────────────────────┐
|
|
106
|
+
│ SCOPE CONFIRMATION │
|
|
107
|
+
│ │
|
|
108
|
+
│ I understand you want: {endpoint_path} │
|
|
109
|
+
│ Purpose: [describe inferred purpose] │
|
|
110
|
+
│ External API: [service name if any] │
|
|
111
|
+
│ │
|
|
112
|
+
│ Is this correct? [Y/n] │
|
|
113
|
+
│ Any modifications needed? ____ │
|
|
114
|
+
└───────────────────────────────────────────────────────┘
|
|
115
|
+
|
|
116
|
+
2. USE AskUserQuestion:
|
|
117
|
+
question: "Is this scope correct? Any modifications?"
|
|
118
|
+
options: [
|
|
119
|
+
{{"value": "yes", "label": "Yes, proceed"}},
|
|
120
|
+
{{"value": "modify", "label": "I have modifications"}},
|
|
121
|
+
{{"value": "no", "label": "No, let me clarify"}}
|
|
122
|
+
]
|
|
123
|
+
|
|
124
|
+
3. If user says "modify" or "no":
|
|
125
|
+
• Ask for their modifications
|
|
126
|
+
• Record them in scope.modifications
|
|
127
|
+
• LOOP BACK and confirm again
|
|
128
|
+
|
|
129
|
+
4. If user says "yes":
|
|
130
|
+
• Set scope.user_confirmed = true
|
|
131
|
+
• Set scope.user_question_asked = true
|
|
132
|
+
• Set scope.status = "complete"
|
|
133
|
+
|
|
134
|
+
WHY: Prevents building the wrong thing."""
|
|
135
|
+
}))
|
|
136
|
+
sys.exit(0)
|
|
137
|
+
|
|
138
|
+
# Scope confirmed - inject context
|
|
139
|
+
endpoint_path = scope.get("endpoint_path", f"/api/v2/{endpoint}")
|
|
140
|
+
modifications = scope.get("modifications", [])
|
|
141
|
+
|
|
142
|
+
context = [f"✅ Scope confirmed: {endpoint_path}"]
|
|
143
|
+
if modifications:
|
|
144
|
+
context.append("User modifications:")
|
|
145
|
+
for mod in modifications[:3]:
|
|
146
|
+
context.append(f" • {mod}")
|
|
147
|
+
|
|
148
|
+
print(json.dumps({
|
|
149
|
+
"permissionDecision": "allow",
|
|
150
|
+
"message": "\n".join(context)
|
|
151
|
+
}))
|
|
152
|
+
sys.exit(0)
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
if __name__ == "__main__":
|
|
156
|
+
main()
|
|
@@ -0,0 +1,246 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Hook: PreToolUse for Write/Edit
|
|
4
|
+
Purpose: Block writing implementation if test matrix not approved WITH USER CONFIRMATION
|
|
5
|
+
|
|
6
|
+
Phase 7 (TDD Red) requires:
|
|
7
|
+
1. Propose test matrix based on interview + schema
|
|
8
|
+
2. SHOW test plan to user (scenarios, edge cases, coverage)
|
|
9
|
+
3. USE AskUserQuestion: "Test plan looks good? [Y/n]"
|
|
10
|
+
4. Loop back if user wants more tests
|
|
11
|
+
5. Only allow route.ts after user approves test matrix
|
|
12
|
+
|
|
13
|
+
Returns:
|
|
14
|
+
- {"permissionDecision": "allow"} - Let the tool run
|
|
15
|
+
- {"permissionDecision": "deny", "reason": "..."} - Block with explanation
|
|
16
|
+
"""
|
|
17
|
+
import json
|
|
18
|
+
import sys
|
|
19
|
+
from pathlib import Path
|
|
20
|
+
|
|
21
|
+
# State file is in .claude/ directory (sibling to hooks/)
|
|
22
|
+
STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def find_test_file(route_path: str) -> tuple[bool, str]:
|
|
26
|
+
"""Check if a test file exists for the given route file."""
|
|
27
|
+
route_file = Path(route_path)
|
|
28
|
+
|
|
29
|
+
# Common test file patterns
|
|
30
|
+
# route.ts -> route.test.ts, __tests__/route.test.ts, route.spec.ts
|
|
31
|
+
possible_tests = [
|
|
32
|
+
route_file.with_suffix(".test.ts"),
|
|
33
|
+
route_file.with_suffix(".test.tsx"),
|
|
34
|
+
route_file.with_suffix(".spec.ts"),
|
|
35
|
+
route_file.parent / "__tests__" / f"{route_file.stem}.test.ts",
|
|
36
|
+
route_file.parent / "__tests__" / f"{route_file.stem}.test.tsx",
|
|
37
|
+
route_file.parent.parent / "__tests__" / f"{route_file.parent.name}.test.ts",
|
|
38
|
+
]
|
|
39
|
+
|
|
40
|
+
for test_path in possible_tests:
|
|
41
|
+
if test_path.exists():
|
|
42
|
+
return True, str(test_path)
|
|
43
|
+
|
|
44
|
+
return False, str(possible_tests[0]) # Return expected path
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def main():
|
|
48
|
+
# Read hook input from stdin
|
|
49
|
+
try:
|
|
50
|
+
input_data = json.load(sys.stdin)
|
|
51
|
+
except json.JSONDecodeError:
|
|
52
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
53
|
+
sys.exit(0)
|
|
54
|
+
|
|
55
|
+
tool_input = input_data.get("tool_input", {})
|
|
56
|
+
file_path = tool_input.get("file_path", "")
|
|
57
|
+
|
|
58
|
+
# Only enforce for route.ts files in /api/ directories
|
|
59
|
+
if not file_path.endswith("route.ts") or "/api/" not in file_path:
|
|
60
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
61
|
+
sys.exit(0)
|
|
62
|
+
|
|
63
|
+
# Allow if this IS a test file (shouldn't match but safety check)
|
|
64
|
+
if ".test." in file_path or "/__tests__/" in file_path or ".spec." in file_path:
|
|
65
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
66
|
+
sys.exit(0)
|
|
67
|
+
|
|
68
|
+
# Check if state file exists
|
|
69
|
+
if not STATE_FILE.exists():
|
|
70
|
+
# Even without state, enforce TDD
|
|
71
|
+
test_exists, expected_path = find_test_file(file_path)
|
|
72
|
+
if not test_exists:
|
|
73
|
+
print(json.dumps({
|
|
74
|
+
"permissionDecision": "deny",
|
|
75
|
+
"reason": f"""❌ TDD VIOLATION: No test file found!
|
|
76
|
+
|
|
77
|
+
You're trying to write: {file_path}
|
|
78
|
+
|
|
79
|
+
But the test file doesn't exist: {expected_path}
|
|
80
|
+
|
|
81
|
+
═══════════════════════════════════════════════════════════
|
|
82
|
+
⚠️ WRITE TESTS FIRST (TDD Red Phase)
|
|
83
|
+
═══════════════════════════════════════════════════════════
|
|
84
|
+
|
|
85
|
+
TDD requires:
|
|
86
|
+
1. Write a FAILING test first
|
|
87
|
+
2. THEN write implementation to make it pass
|
|
88
|
+
|
|
89
|
+
Create the test file first:
|
|
90
|
+
{expected_path}
|
|
91
|
+
|
|
92
|
+
Example test structure:
|
|
93
|
+
import {{ describe, it, expect }} from 'vitest';
|
|
94
|
+
|
|
95
|
+
describe('POST /api/...', () => {{
|
|
96
|
+
it('should return 200 with valid input', async () => {{
|
|
97
|
+
// Test implementation
|
|
98
|
+
}});
|
|
99
|
+
|
|
100
|
+
it('should return 400 with invalid input', async () => {{
|
|
101
|
+
// Test validation
|
|
102
|
+
}});
|
|
103
|
+
}});"""
|
|
104
|
+
}))
|
|
105
|
+
sys.exit(0)
|
|
106
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
107
|
+
sys.exit(0)
|
|
108
|
+
|
|
109
|
+
# Load state
|
|
110
|
+
try:
|
|
111
|
+
state = json.loads(STATE_FILE.read_text())
|
|
112
|
+
except json.JSONDecodeError:
|
|
113
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
114
|
+
sys.exit(0)
|
|
115
|
+
|
|
116
|
+
phases = state.get("phases", {})
|
|
117
|
+
tdd_red = phases.get("tdd_red", {})
|
|
118
|
+
tdd_red_status = tdd_red.get("status", "not_started")
|
|
119
|
+
test_count = tdd_red.get("test_count", 0)
|
|
120
|
+
|
|
121
|
+
# Get user checkpoint fields
|
|
122
|
+
user_question_asked = tdd_red.get("user_question_asked", False)
|
|
123
|
+
user_approved = tdd_red.get("user_approved", False)
|
|
124
|
+
matrix_shown = tdd_red.get("matrix_shown", False)
|
|
125
|
+
test_scenarios = tdd_red.get("test_scenarios", [])
|
|
126
|
+
|
|
127
|
+
# Check if TDD Red phase is complete
|
|
128
|
+
if tdd_red_status != "complete":
|
|
129
|
+
test_exists, expected_path = find_test_file(file_path)
|
|
130
|
+
|
|
131
|
+
# Check what's missing for user checkpoint
|
|
132
|
+
missing = []
|
|
133
|
+
if not test_exists:
|
|
134
|
+
missing.append("Test file not created yet")
|
|
135
|
+
if not matrix_shown:
|
|
136
|
+
missing.append("Test matrix not shown to user")
|
|
137
|
+
if not user_question_asked:
|
|
138
|
+
missing.append("User approval question (AskUserQuestion not used)")
|
|
139
|
+
if not user_approved:
|
|
140
|
+
missing.append("User hasn't approved the test plan")
|
|
141
|
+
|
|
142
|
+
print(json.dumps({
|
|
143
|
+
"permissionDecision": "deny",
|
|
144
|
+
"reason": f"""❌ BLOCKED: TDD Red phase (Phase 7) not complete.
|
|
145
|
+
|
|
146
|
+
Current status: {tdd_red_status}
|
|
147
|
+
Test count: {test_count}
|
|
148
|
+
Test file exists: {test_exists}
|
|
149
|
+
Matrix shown: {matrix_shown}
|
|
150
|
+
User question asked: {user_question_asked}
|
|
151
|
+
User approved: {user_approved}
|
|
152
|
+
Scenarios: {len(test_scenarios)}
|
|
153
|
+
|
|
154
|
+
MISSING:
|
|
155
|
+
{chr(10).join(f" • {m}" for m in missing)}
|
|
156
|
+
|
|
157
|
+
═══════════════════════════════════════════════════════════
|
|
158
|
+
⚠️ GET USER APPROVAL FOR TEST MATRIX
|
|
159
|
+
═══════════════════════════════════════════════════════════
|
|
160
|
+
|
|
161
|
+
REQUIRED STEPS:
|
|
162
|
+
|
|
163
|
+
1. PROPOSE test matrix based on interview + schema:
|
|
164
|
+
┌───────────────────────────────────────────────────────┐
|
|
165
|
+
│ TEST MATRIX │
|
|
166
|
+
│ │
|
|
167
|
+
│ Based on your interview, I'll test: │
|
|
168
|
+
│ │
|
|
169
|
+
│ ✅ Success Scenarios: │
|
|
170
|
+
│ • GET with valid domain → 200 + brand data │
|
|
171
|
+
│ • POST with full payload → 200 + created │
|
|
172
|
+
│ │
|
|
173
|
+
│ ✅ Error Scenarios (your choice: return objects): │
|
|
174
|
+
│ • Invalid domain → 400 + error object │
|
|
175
|
+
│ • Missing API key → 401 + error object │
|
|
176
|
+
│ • Not found → 404 + error object │
|
|
177
|
+
│ │
|
|
178
|
+
│ ✅ Edge Cases: │
|
|
179
|
+
│ • Rate limit exceeded → 429 + retry-after │
|
|
180
|
+
│ • Cache hit → 200 + cached: true │
|
|
181
|
+
│ • Empty response → 200 + empty data │
|
|
182
|
+
│ │
|
|
183
|
+
│ Total: 8 test scenarios │
|
|
184
|
+
│ │
|
|
185
|
+
│ Test plan looks good? [Y] │
|
|
186
|
+
│ Add more tests? [n] ____ │
|
|
187
|
+
└───────────────────────────────────────────────────────┘
|
|
188
|
+
|
|
189
|
+
2. USE AskUserQuestion:
|
|
190
|
+
question: "This test plan cover your requirements?"
|
|
191
|
+
options: [
|
|
192
|
+
{{"value": "approve", "label": "Yes, write these tests"}},
|
|
193
|
+
{{"value": "add", "label": "Add more - I also need [scenario]"}},
|
|
194
|
+
{{"value": "modify", "label": "Change a scenario - [which one]"}}
|
|
195
|
+
]
|
|
196
|
+
|
|
197
|
+
3. If user says "add" or "modify":
|
|
198
|
+
• Update test_scenarios list
|
|
199
|
+
• LOOP BACK and show updated matrix
|
|
200
|
+
|
|
201
|
+
4. If user says "approve":
|
|
202
|
+
• Create test file: {expected_path}
|
|
203
|
+
• Write all approved test scenarios
|
|
204
|
+
• Run tests to confirm they FAIL (red)
|
|
205
|
+
• Set tdd_red.user_approved = true
|
|
206
|
+
• Set tdd_red.user_question_asked = true
|
|
207
|
+
• Set tdd_red.matrix_shown = true
|
|
208
|
+
• Set tdd_red.test_count = N
|
|
209
|
+
• Set tdd_red.status = "complete"
|
|
210
|
+
|
|
211
|
+
Based on interview decisions:
|
|
212
|
+
{_format_interview_hints(phases.get("interview", {}))}
|
|
213
|
+
|
|
214
|
+
WHY: User approves what gets tested BEFORE implementation."""
|
|
215
|
+
}))
|
|
216
|
+
sys.exit(0)
|
|
217
|
+
|
|
218
|
+
# TDD Red complete - allow implementation
|
|
219
|
+
print(json.dumps({
|
|
220
|
+
"permissionDecision": "allow",
|
|
221
|
+
"message": f"""✅ TDD Red phase complete.
|
|
222
|
+
{test_count} tests written and failing.
|
|
223
|
+
User approved {len(test_scenarios)} test scenarios.
|
|
224
|
+
Proceeding to Green phase - make them pass!"""
|
|
225
|
+
}))
|
|
226
|
+
sys.exit(0)
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
def _format_interview_hints(interview: dict) -> str:
|
|
230
|
+
"""Format interview decisions as test hints."""
|
|
231
|
+
decisions = interview.get("decisions", {})
|
|
232
|
+
if not decisions:
|
|
233
|
+
return " (no interview decisions recorded)"
|
|
234
|
+
|
|
235
|
+
hints = []
|
|
236
|
+
for key, data in list(decisions.items())[:5]:
|
|
237
|
+
value = data.get("value", data.get("response", ""))
|
|
238
|
+
if value:
|
|
239
|
+
short_value = str(value)[:50] + "..." if len(str(value)) > 50 else str(value)
|
|
240
|
+
hints.append(f" • {key}: {short_value}")
|
|
241
|
+
|
|
242
|
+
return "\n".join(hints) if hints else " (no interview decisions recorded)"
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
if __name__ == "__main__":
|
|
246
|
+
main()
|