@hustle-together/api-dev-tools 3.10.1 → 3.11.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/api-dev-state.json +159 -0
- package/.claude/commands/README.md +185 -0
- package/.claude/commands/add-command.md +209 -0
- package/.claude/commands/api-create.md +499 -0
- package/.claude/commands/api-env.md +50 -0
- package/.claude/commands/api-interview.md +331 -0
- package/.claude/commands/api-research.md +331 -0
- package/.claude/commands/api-status.md +259 -0
- package/.claude/commands/api-verify.md +231 -0
- package/.claude/commands/beepboop.md +97 -0
- package/.claude/commands/busycommit.md +112 -0
- package/.claude/commands/commit.md +83 -0
- package/.claude/commands/cycle.md +142 -0
- package/.claude/commands/gap.md +86 -0
- package/.claude/commands/green.md +142 -0
- package/.claude/commands/issue.md +192 -0
- package/.claude/commands/plan.md +168 -0
- package/.claude/commands/pr.md +122 -0
- package/.claude/commands/red.md +142 -0
- package/.claude/commands/refactor.md +142 -0
- package/.claude/commands/spike.md +142 -0
- package/.claude/commands/summarize.md +94 -0
- package/.claude/commands/tdd.md +144 -0
- package/.claude/commands/worktree-add.md +315 -0
- package/.claude/commands/worktree-cleanup.md +281 -0
- package/.claude/hooks/api-workflow-check.py +227 -0
- package/.claude/hooks/enforce-deep-research.py +185 -0
- package/.claude/hooks/enforce-disambiguation.py +155 -0
- package/.claude/hooks/enforce-documentation.py +192 -0
- package/.claude/hooks/enforce-environment.py +253 -0
- package/.claude/hooks/enforce-external-research.py +328 -0
- package/.claude/hooks/enforce-interview.py +421 -0
- package/.claude/hooks/enforce-refactor.py +189 -0
- package/.claude/hooks/enforce-research.py +159 -0
- package/.claude/hooks/enforce-schema.py +186 -0
- package/.claude/hooks/enforce-scope.py +160 -0
- package/.claude/hooks/enforce-tdd-red.py +250 -0
- package/.claude/hooks/enforce-verify.py +186 -0
- package/.claude/hooks/periodic-reground.py +154 -0
- package/.claude/hooks/session-startup.py +151 -0
- package/.claude/hooks/track-tool-use.py +626 -0
- package/.claude/hooks/verify-after-green.py +282 -0
- package/.claude/hooks/verify-implementation.py +225 -0
- package/.claude/research/index.json +6 -0
- package/.claude/settings.json +93 -0
- package/.claude/settings.local.json +11 -0
- package/.claude-plugin/marketplace.json +112 -0
- package/.skills/README.md +291 -0
- package/.skills/_shared/convert-commands.py +192 -0
- package/.skills/_shared/hooks/api-workflow-check.py +227 -0
- package/.skills/_shared/hooks/enforce-deep-research.py +185 -0
- package/.skills/_shared/hooks/enforce-disambiguation.py +155 -0
- package/.skills/_shared/hooks/enforce-documentation.py +192 -0
- package/.skills/_shared/hooks/enforce-environment.py +253 -0
- package/.skills/_shared/hooks/enforce-external-research.py +328 -0
- package/.skills/_shared/hooks/enforce-interview.py +421 -0
- package/.skills/_shared/hooks/enforce-refactor.py +189 -0
- package/.skills/_shared/hooks/enforce-research.py +159 -0
- package/.skills/_shared/hooks/enforce-schema.py +186 -0
- package/.skills/_shared/hooks/enforce-scope.py +160 -0
- package/.skills/_shared/hooks/enforce-tdd-red.py +250 -0
- package/.skills/_shared/hooks/enforce-verify.py +186 -0
- package/.skills/_shared/hooks/periodic-reground.py +154 -0
- package/.skills/_shared/hooks/session-startup.py +151 -0
- package/.skills/_shared/hooks/track-tool-use.py +626 -0
- package/.skills/_shared/hooks/verify-after-green.py +282 -0
- package/.skills/_shared/hooks/verify-implementation.py +225 -0
- package/.skills/_shared/install.sh +114 -0
- package/.skills/_shared/settings.json +93 -0
- package/.skills/add-command/SKILL.md +222 -0
- package/.skills/api-create/SKILL.md +512 -0
- package/.skills/api-env/SKILL.md +63 -0
- package/.skills/api-interview/SKILL.md +344 -0
- package/.skills/api-research/SKILL.md +344 -0
- package/.skills/api-status/SKILL.md +272 -0
- package/.skills/api-verify/SKILL.md +244 -0
- package/.skills/beepboop/SKILL.md +110 -0
- package/.skills/busycommit/SKILL.md +125 -0
- package/.skills/commit/SKILL.md +96 -0
- package/.skills/cycle/SKILL.md +155 -0
- package/.skills/gap/SKILL.md +99 -0
- package/.skills/green/SKILL.md +155 -0
- package/.skills/issue/SKILL.md +205 -0
- package/.skills/plan/SKILL.md +181 -0
- package/.skills/pr/SKILL.md +135 -0
- package/.skills/red/SKILL.md +155 -0
- package/.skills/refactor/SKILL.md +155 -0
- package/.skills/spike/SKILL.md +155 -0
- package/.skills/summarize/SKILL.md +107 -0
- package/.skills/tdd/SKILL.md +157 -0
- package/.skills/update-todos/SKILL.md +228 -0
- package/.skills/worktree-add/SKILL.md +328 -0
- package/.skills/worktree-cleanup/SKILL.md +294 -0
- package/CHANGELOG.md +97 -0
- package/README.md +58 -17
- package/package.json +22 -11
|
@@ -0,0 +1,421 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Hook: PreToolUse for Write/Edit
|
|
4
|
+
Purpose: Block proceeding to schema/TDD if interview has no USER answers
|
|
5
|
+
|
|
6
|
+
This hook ensures Claude actually asks the user questions and records
|
|
7
|
+
their answers, rather than self-answering the interview.
|
|
8
|
+
|
|
9
|
+
v1.8.0 MAJOR UPDATE: Now requires STRUCTURED questions with multiple-choice
|
|
10
|
+
options derived from research phase findings.
|
|
11
|
+
|
|
12
|
+
It checks:
|
|
13
|
+
1. Research phase is complete (questions must be based on research)
|
|
14
|
+
2. Interview status is "complete"
|
|
15
|
+
3. Questions used AskUserQuestion tool with STRUCTURED OPTIONS
|
|
16
|
+
4. At least MIN_STRUCTURED_QUESTIONS have multiple-choice or typed options
|
|
17
|
+
5. Answers don't look auto-generated (contain user-specific details)
|
|
18
|
+
|
|
19
|
+
The goal: Questions like Claude Code shows - with numbered options and
|
|
20
|
+
"Type something" at the end, all based on research findings.
|
|
21
|
+
|
|
22
|
+
Returns:
|
|
23
|
+
- {"permissionDecision": "allow"} - Let the tool run
|
|
24
|
+
- {"permissionDecision": "deny", "reason": "..."} - Block with explanation
|
|
25
|
+
"""
|
|
26
|
+
import json
|
|
27
|
+
import sys
|
|
28
|
+
from pathlib import Path
|
|
29
|
+
|
|
30
|
+
# State file is in .claude/ directory (sibling to hooks/)
|
|
31
|
+
STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
32
|
+
|
|
33
|
+
# Minimum questions required for a valid interview
|
|
34
|
+
MIN_QUESTIONS = 5 # Increased - need comprehensive interview
|
|
35
|
+
|
|
36
|
+
# Minimum questions that MUST have structured options (multiple-choice)
|
|
37
|
+
MIN_STRUCTURED_QUESTIONS = 3
|
|
38
|
+
|
|
39
|
+
# Phrases that indicate self-answered (not real user input)
|
|
40
|
+
SELF_ANSWER_INDICATORS = [
|
|
41
|
+
"based on common",
|
|
42
|
+
"self-answered",
|
|
43
|
+
"assumed",
|
|
44
|
+
"typical use case",
|
|
45
|
+
"standard implementation",
|
|
46
|
+
"common pattern",
|
|
47
|
+
"i'll assume",
|
|
48
|
+
"assuming",
|
|
49
|
+
"probably",
|
|
50
|
+
"most likely",
|
|
51
|
+
"default to",
|
|
52
|
+
"usually",
|
|
53
|
+
]
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def main():
|
|
57
|
+
# Read hook input from stdin
|
|
58
|
+
try:
|
|
59
|
+
input_data = json.load(sys.stdin)
|
|
60
|
+
except json.JSONDecodeError:
|
|
61
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
62
|
+
sys.exit(0)
|
|
63
|
+
|
|
64
|
+
tool_input = input_data.get("tool_input", {})
|
|
65
|
+
file_path = tool_input.get("file_path", "")
|
|
66
|
+
|
|
67
|
+
# Enforce for ANY file in /api/ directory (not just route.ts)
|
|
68
|
+
# This forces Claude to stop and interview before ANY API work
|
|
69
|
+
is_api_file = "/api/" in file_path and file_path.endswith(".ts")
|
|
70
|
+
is_schema_file = "/schemas/" in file_path and file_path.endswith(".ts")
|
|
71
|
+
|
|
72
|
+
# Skip test files - those are allowed during TDD
|
|
73
|
+
is_test_file = ".test." in file_path or "/__tests__/" in file_path or ".spec." in file_path
|
|
74
|
+
|
|
75
|
+
if is_test_file:
|
|
76
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
77
|
+
sys.exit(0)
|
|
78
|
+
|
|
79
|
+
if not is_schema_file and not is_api_file:
|
|
80
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
81
|
+
sys.exit(0)
|
|
82
|
+
|
|
83
|
+
# Check if state file exists
|
|
84
|
+
if not STATE_FILE.exists():
|
|
85
|
+
print(json.dumps({
|
|
86
|
+
"permissionDecision": "deny",
|
|
87
|
+
"reason": """❌ API workflow not started.
|
|
88
|
+
|
|
89
|
+
Run /api-create [endpoint-name] to begin the interview-driven workflow."""
|
|
90
|
+
}))
|
|
91
|
+
sys.exit(0)
|
|
92
|
+
|
|
93
|
+
# Load state
|
|
94
|
+
try:
|
|
95
|
+
state = json.loads(STATE_FILE.read_text())
|
|
96
|
+
except json.JSONDecodeError:
|
|
97
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
98
|
+
sys.exit(0)
|
|
99
|
+
|
|
100
|
+
phases = state.get("phases", {})
|
|
101
|
+
research = phases.get("research_initial", {})
|
|
102
|
+
interview = phases.get("interview", {})
|
|
103
|
+
interview_status = interview.get("status", "not_started")
|
|
104
|
+
interview_desc = interview.get("description", "").lower()
|
|
105
|
+
questions = interview.get("questions", [])
|
|
106
|
+
research_queries = state.get("research_queries", [])
|
|
107
|
+
|
|
108
|
+
# Check 0: Research must be complete FIRST (questions based on research)
|
|
109
|
+
research_status = research.get("status", "not_started")
|
|
110
|
+
if research_status != "complete":
|
|
111
|
+
sources_count = len(research.get("sources", []))
|
|
112
|
+
print(json.dumps({
|
|
113
|
+
"permissionDecision": "deny",
|
|
114
|
+
"reason": f"""❌ BLOCKED: Research phase must complete BEFORE interview.
|
|
115
|
+
|
|
116
|
+
Research status: {research_status}
|
|
117
|
+
Sources consulted: {sources_count}
|
|
118
|
+
Research queries: {len(research_queries)}
|
|
119
|
+
|
|
120
|
+
═══════════════════════════════════════════════════════════
|
|
121
|
+
⚠️ COMPLETE RESEARCH FIRST - THEN ASK QUESTIONS
|
|
122
|
+
═══════════════════════════════════════════════════════════
|
|
123
|
+
|
|
124
|
+
The interview questions MUST be based on research findings:
|
|
125
|
+
1. Use Context7 to get SDK/API documentation
|
|
126
|
+
2. Use WebSearch (2-3 searches) for official docs
|
|
127
|
+
3. THEN generate interview questions with STRUCTURED OPTIONS
|
|
128
|
+
based on what you discovered
|
|
129
|
+
|
|
130
|
+
Example: If research found 5 available models, ask:
|
|
131
|
+
"Which model should this endpoint use?"
|
|
132
|
+
1. gpt-4o (fastest, cheapest)
|
|
133
|
+
2. claude-sonnet-4-20250514 (best reasoning)
|
|
134
|
+
3. gemini-pro (multimodal)
|
|
135
|
+
4. Type something else...
|
|
136
|
+
|
|
137
|
+
Research INFORMS the options. No research = no good options."""
|
|
138
|
+
}))
|
|
139
|
+
sys.exit(0)
|
|
140
|
+
|
|
141
|
+
# Check 1: Interview must be complete
|
|
142
|
+
if interview_status != "complete":
|
|
143
|
+
# Build example based on actual research
|
|
144
|
+
research_based_example = _build_research_based_example(research_queries)
|
|
145
|
+
|
|
146
|
+
print(json.dumps({
|
|
147
|
+
"permissionDecision": "deny",
|
|
148
|
+
"reason": f"""❌ BLOCKED: Interview phase not complete.
|
|
149
|
+
|
|
150
|
+
Current status: {interview_status}
|
|
151
|
+
AskUserQuestion calls: {interview.get('user_question_count', 0)}
|
|
152
|
+
Structured questions: {interview.get('structured_question_count', 0)}
|
|
153
|
+
|
|
154
|
+
═══════════════════════════════════════════════════════════
|
|
155
|
+
⚠️ USE STRUCTURED QUESTIONS WITH OPTIONS
|
|
156
|
+
═══════════════════════════════════════════════════════════
|
|
157
|
+
|
|
158
|
+
Based on your research, ask questions using AskUserQuestion with
|
|
159
|
+
the 'options' parameter to provide multiple-choice selections:
|
|
160
|
+
|
|
161
|
+
{research_based_example}
|
|
162
|
+
|
|
163
|
+
REQUIRED FORMAT for AskUserQuestion:
|
|
164
|
+
- question: "Your question text"
|
|
165
|
+
- options: [
|
|
166
|
+
{{"value": "option1", "label": "Option 1 description"}},
|
|
167
|
+
{{"value": "option2", "label": "Option 2 description"}},
|
|
168
|
+
{{"value": "custom", "label": "Type something..."}}
|
|
169
|
+
]
|
|
170
|
+
|
|
171
|
+
You need at least {MIN_STRUCTURED_QUESTIONS} structured questions with options.
|
|
172
|
+
Current: {interview.get('structured_question_count', 0)}
|
|
173
|
+
|
|
174
|
+
DO NOT:
|
|
175
|
+
❌ Ask open-ended questions without options
|
|
176
|
+
❌ Make up options not based on research
|
|
177
|
+
❌ Skip the AskUserQuestion tool
|
|
178
|
+
❌ Self-answer questions"""
|
|
179
|
+
}))
|
|
180
|
+
sys.exit(0)
|
|
181
|
+
|
|
182
|
+
# Check 2: Must have minimum questions
|
|
183
|
+
if len(questions) < MIN_QUESTIONS:
|
|
184
|
+
print(json.dumps({
|
|
185
|
+
"permissionDecision": "deny",
|
|
186
|
+
"reason": f"""❌ Interview incomplete - not enough questions asked.
|
|
187
|
+
|
|
188
|
+
Questions recorded: {len(questions)}
|
|
189
|
+
Minimum required: {MIN_QUESTIONS}
|
|
190
|
+
|
|
191
|
+
You must ask the user more questions about their requirements.
|
|
192
|
+
Use AskUserQuestion with structured options based on your research."""
|
|
193
|
+
}))
|
|
194
|
+
sys.exit(0)
|
|
195
|
+
|
|
196
|
+
# Check 3: Verify AskUserQuestion tool was actually used
|
|
197
|
+
user_question_count = interview.get("user_question_count", 0)
|
|
198
|
+
tool_used_count = sum(1 for q in questions if q.get("tool_used", False))
|
|
199
|
+
|
|
200
|
+
if tool_used_count < MIN_QUESTIONS:
|
|
201
|
+
print(json.dumps({
|
|
202
|
+
"permissionDecision": "deny",
|
|
203
|
+
"reason": f"""❌ Interview not conducted properly.
|
|
204
|
+
|
|
205
|
+
AskUserQuestion tool uses tracked: {tool_used_count}
|
|
206
|
+
Minimum required: {MIN_QUESTIONS}
|
|
207
|
+
|
|
208
|
+
You MUST use the AskUserQuestion tool to ask the user directly.
|
|
209
|
+
Do NOT make up answers or mark the interview as complete without
|
|
210
|
+
actually asking the user and receiving their responses."""
|
|
211
|
+
}))
|
|
212
|
+
sys.exit(0)
|
|
213
|
+
|
|
214
|
+
# Check 4: Verify structured questions were used
|
|
215
|
+
structured_count = interview.get("structured_question_count", 0)
|
|
216
|
+
questions_with_options = sum(1 for q in questions if q.get("has_options", False))
|
|
217
|
+
actual_structured = max(structured_count, questions_with_options)
|
|
218
|
+
|
|
219
|
+
if actual_structured < MIN_STRUCTURED_QUESTIONS:
|
|
220
|
+
print(json.dumps({
|
|
221
|
+
"permissionDecision": "deny",
|
|
222
|
+
"reason": f"""❌ Not enough STRUCTURED questions with options.
|
|
223
|
+
|
|
224
|
+
Structured questions (with options): {actual_structured}
|
|
225
|
+
Minimum required: {MIN_STRUCTURED_QUESTIONS}
|
|
226
|
+
|
|
227
|
+
You MUST use AskUserQuestion with the 'options' parameter to
|
|
228
|
+
provide multiple-choice answers based on your research.
|
|
229
|
+
|
|
230
|
+
Example:
|
|
231
|
+
AskUserQuestion(
|
|
232
|
+
question="Which AI provider should this endpoint support?",
|
|
233
|
+
options=[
|
|
234
|
+
{{"value": "openai", "label": "OpenAI (GPT-4o)"}},
|
|
235
|
+
{{"value": "anthropic", "label": "Anthropic (Claude)"}},
|
|
236
|
+
{{"value": "google", "label": "Google (Gemini)"}},
|
|
237
|
+
{{"value": "all", "label": "All of the above"}},
|
|
238
|
+
{{"value": "custom", "label": "Type something else..."}}
|
|
239
|
+
]
|
|
240
|
+
)
|
|
241
|
+
|
|
242
|
+
This gives the user clear choices based on what you researched."""
|
|
243
|
+
}))
|
|
244
|
+
sys.exit(0)
|
|
245
|
+
|
|
246
|
+
# Check 5: Look for self-answer indicators
|
|
247
|
+
for indicator in SELF_ANSWER_INDICATORS:
|
|
248
|
+
if indicator in interview_desc:
|
|
249
|
+
print(json.dumps({
|
|
250
|
+
"permissionDecision": "deny",
|
|
251
|
+
"reason": f"""❌ Interview appears to be self-answered.
|
|
252
|
+
|
|
253
|
+
Detected: "{indicator}" in interview description.
|
|
254
|
+
|
|
255
|
+
You MUST actually ask the user questions using AskUserQuestion
|
|
256
|
+
with structured options. Self-answering defeats the purpose.
|
|
257
|
+
|
|
258
|
+
Reset the interview and ask with options based on research."""
|
|
259
|
+
}))
|
|
260
|
+
sys.exit(0)
|
|
261
|
+
|
|
262
|
+
# Check 6: FINAL USER CONFIRMATION - must confirm interview is complete
|
|
263
|
+
user_question_asked_final = interview.get("user_question_asked", False)
|
|
264
|
+
user_completed = interview.get("user_completed", False)
|
|
265
|
+
phase_exit_confirmed = interview.get("phase_exit_confirmed", False)
|
|
266
|
+
decisions = interview.get("decisions", {})
|
|
267
|
+
|
|
268
|
+
if not user_completed or not user_question_asked_final or not phase_exit_confirmed:
|
|
269
|
+
decision_summary = _build_decision_summary(decisions)
|
|
270
|
+
missing = []
|
|
271
|
+
if not user_question_asked_final:
|
|
272
|
+
missing.append("Final confirmation question (AskUserQuestion not used)")
|
|
273
|
+
if not user_completed:
|
|
274
|
+
missing.append("User hasn't confirmed interview complete")
|
|
275
|
+
if not phase_exit_confirmed:
|
|
276
|
+
missing.append("Phase exit confirmation (user must explicitly approve to proceed)")
|
|
277
|
+
|
|
278
|
+
print(json.dumps({
|
|
279
|
+
"permissionDecision": "deny",
|
|
280
|
+
"reason": f"""❌ BLOCKED: Interview needs FINAL USER CONFIRMATION.
|
|
281
|
+
|
|
282
|
+
Questions asked: {len(questions)}
|
|
283
|
+
Structured questions: {actual_structured}
|
|
284
|
+
User final confirmation: {user_completed}
|
|
285
|
+
Phase exit confirmed: {phase_exit_confirmed}
|
|
286
|
+
|
|
287
|
+
MISSING:
|
|
288
|
+
{chr(10).join(f" • {m}" for m in missing)}
|
|
289
|
+
|
|
290
|
+
═══════════════════════════════════════════════════════════
|
|
291
|
+
⚠️ GET USER CONFIRMATION BEFORE PROCEEDING
|
|
292
|
+
═══════════════════════════════════════════════════════════
|
|
293
|
+
|
|
294
|
+
REQUIRED STEPS:
|
|
295
|
+
|
|
296
|
+
1. SHOW interview summary to user:
|
|
297
|
+
┌───────────────────────────────────────────────────────┐
|
|
298
|
+
│ INTERVIEW COMPLETE │
|
|
299
|
+
│ │
|
|
300
|
+
│ Your decisions: │
|
|
301
|
+
{chr(10).join(f" │ • {line:<49} │" for line in decision_summary.split(chr(10))[:8]) if decision_summary else " │ (no decisions recorded yet) │"}
|
|
302
|
+
│ │
|
|
303
|
+
│ These will guide the schema, tests, and implementation│
|
|
304
|
+
│ │
|
|
305
|
+
│ All correct? [Y] │
|
|
306
|
+
│ Change an answer? [n] ____ │
|
|
307
|
+
└───────────────────────────────────────────────────────┘
|
|
308
|
+
|
|
309
|
+
2. USE AskUserQuestion:
|
|
310
|
+
question: "Interview decisions correct? Ready to proceed?"
|
|
311
|
+
options: [
|
|
312
|
+
{{"value": "confirm", "label": "Yes, proceed to schema creation"}},
|
|
313
|
+
{{"value": "change", "label": "No, I want to change [which question]"}},
|
|
314
|
+
{{"value": "add", "label": "Add another question about [topic]"}}
|
|
315
|
+
]
|
|
316
|
+
|
|
317
|
+
3. If user says "change" or "add":
|
|
318
|
+
• Ask which question/topic
|
|
319
|
+
• Re-ask with AskUserQuestion
|
|
320
|
+
• Update decisions
|
|
321
|
+
• LOOP BACK and show updated summary
|
|
322
|
+
|
|
323
|
+
4. If user says "confirm":
|
|
324
|
+
• Set interview.user_question_asked = true
|
|
325
|
+
• Set interview.user_completed = true
|
|
326
|
+
• Set interview.status = "complete"
|
|
327
|
+
|
|
328
|
+
WHY: User must approve their decisions before they drive implementation."""
|
|
329
|
+
}))
|
|
330
|
+
sys.exit(0)
|
|
331
|
+
|
|
332
|
+
if decisions:
|
|
333
|
+
# Build a reminder of what the user decided
|
|
334
|
+
decision_summary = _build_decision_summary(decisions)
|
|
335
|
+
|
|
336
|
+
# Allow but inject context about user decisions
|
|
337
|
+
print(json.dumps({
|
|
338
|
+
"permissionDecision": "allow",
|
|
339
|
+
"message": f"""✅ Interview complete. REMEMBER THE USER'S DECISIONS:
|
|
340
|
+
|
|
341
|
+
{decision_summary}
|
|
342
|
+
|
|
343
|
+
Your implementation MUST align with these choices.
|
|
344
|
+
The state file tracks these for consistency verification."""
|
|
345
|
+
}))
|
|
346
|
+
else:
|
|
347
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
348
|
+
|
|
349
|
+
sys.exit(0)
|
|
350
|
+
|
|
351
|
+
|
|
352
|
+
def _build_decision_summary(decisions: dict) -> str:
|
|
353
|
+
"""Build a human-readable summary of user decisions from the interview."""
|
|
354
|
+
if not decisions:
|
|
355
|
+
return "No key decisions recorded."
|
|
356
|
+
|
|
357
|
+
lines = []
|
|
358
|
+
decision_labels = {
|
|
359
|
+
"provider": "AI Provider",
|
|
360
|
+
"purpose": "Primary Purpose",
|
|
361
|
+
"response_format": "Response Format",
|
|
362
|
+
"required_params": "Required Parameters",
|
|
363
|
+
"optional_params": "Optional Parameters",
|
|
364
|
+
"error_handling": "Error Handling",
|
|
365
|
+
"api_key_handling": "API Key Handling",
|
|
366
|
+
"external_services": "External Services",
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
for key, data in decisions.items():
|
|
370
|
+
label = decision_labels.get(key, key.replace("_", " ").title())
|
|
371
|
+
response = data.get("response", "")
|
|
372
|
+
value = data.get("value", "")
|
|
373
|
+
|
|
374
|
+
if value:
|
|
375
|
+
lines.append(f"• {label}: {value}")
|
|
376
|
+
elif response:
|
|
377
|
+
# Truncate long responses
|
|
378
|
+
short_response = response[:80] + "..." if len(response) > 80 else response
|
|
379
|
+
lines.append(f"• {label}: {short_response}")
|
|
380
|
+
|
|
381
|
+
return "\n".join(lines) if lines else "No key decisions recorded."
|
|
382
|
+
|
|
383
|
+
|
|
384
|
+
def _build_research_based_example(research_queries: list) -> str:
|
|
385
|
+
"""Build an example question based on actual research queries."""
|
|
386
|
+
if not research_queries:
|
|
387
|
+
return """Example (generic - do research first!):
|
|
388
|
+
"What is the main use case for this endpoint?"
|
|
389
|
+
1. Data retrieval
|
|
390
|
+
2. Data transformation
|
|
391
|
+
3. AI processing
|
|
392
|
+
4. Type something..."""
|
|
393
|
+
|
|
394
|
+
# Extract terms from research to suggest relevant options
|
|
395
|
+
all_terms = []
|
|
396
|
+
for query in research_queries[-5:]: # Last 5 queries
|
|
397
|
+
terms = query.get("terms", [])
|
|
398
|
+
all_terms.extend(terms)
|
|
399
|
+
|
|
400
|
+
# Deduplicate and get top terms
|
|
401
|
+
unique_terms = list(dict.fromkeys(all_terms))[:4]
|
|
402
|
+
|
|
403
|
+
if unique_terms:
|
|
404
|
+
options_example = "\n ".join([
|
|
405
|
+
f"{i+1}. {term.title()}" for i, term in enumerate(unique_terms)
|
|
406
|
+
])
|
|
407
|
+
return f"""Example based on your research:
|
|
408
|
+
"Which of these should be the primary focus?"
|
|
409
|
+
{options_example}
|
|
410
|
+
{len(unique_terms)+1}. Type something else..."""
|
|
411
|
+
|
|
412
|
+
return """Example:
|
|
413
|
+
"What capability is most important?"
|
|
414
|
+
1. Option based on research finding 1
|
|
415
|
+
2. Option based on research finding 2
|
|
416
|
+
3. Option based on research finding 3
|
|
417
|
+
4. Type something..."""
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
if __name__ == "__main__":
|
|
421
|
+
main()
|
|
@@ -0,0 +1,189 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Hook: PreToolUse for Write/Edit
|
|
4
|
+
Purpose: Block refactoring until verification phase is complete
|
|
5
|
+
|
|
6
|
+
This hook ensures that after tests pass (Green phase), the implementation
|
|
7
|
+
is verified against documentation before any refactoring begins.
|
|
8
|
+
|
|
9
|
+
Phase 20 of the 13-phase workflow requires:
|
|
10
|
+
- TDD Green phase complete (tests passing)
|
|
11
|
+
- Verify phase complete (Phase 10)
|
|
12
|
+
- All gaps found have been fixed or documented as intentional omissions
|
|
13
|
+
|
|
14
|
+
Returns:
|
|
15
|
+
- {"permissionDecision": "allow"} - Let the tool run
|
|
16
|
+
- {"permissionDecision": "deny", "reason": "..."} - Block with explanation
|
|
17
|
+
"""
|
|
18
|
+
import json
|
|
19
|
+
import sys
|
|
20
|
+
from pathlib import Path
|
|
21
|
+
|
|
22
|
+
# State file is in .claude/ directory (sibling to hooks/)
|
|
23
|
+
STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
24
|
+
|
|
25
|
+
# Keywords that suggest refactoring intent
|
|
26
|
+
REFACTOR_KEYWORDS = [
|
|
27
|
+
"refactor",
|
|
28
|
+
"cleanup",
|
|
29
|
+
"clean up",
|
|
30
|
+
"restructure",
|
|
31
|
+
"reorganize",
|
|
32
|
+
"optimize",
|
|
33
|
+
"simplify",
|
|
34
|
+
"extract",
|
|
35
|
+
"rename",
|
|
36
|
+
"move",
|
|
37
|
+
]
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def is_refactoring_edit(tool_input: dict) -> bool:
|
|
41
|
+
"""Detect if this edit appears to be a refactoring operation."""
|
|
42
|
+
# Check the content being written
|
|
43
|
+
new_string = tool_input.get("new_string", "")
|
|
44
|
+
old_string = tool_input.get("old_string", "")
|
|
45
|
+
|
|
46
|
+
# If both old and new exist and are similar length, might be refactoring
|
|
47
|
+
if old_string and new_string:
|
|
48
|
+
len_diff = abs(len(new_string) - len(old_string))
|
|
49
|
+
# Similar length suggests refactoring vs new features
|
|
50
|
+
if len_diff < len(old_string) * 0.3: # Within 30% length
|
|
51
|
+
return True
|
|
52
|
+
|
|
53
|
+
return False
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def main():
|
|
57
|
+
# Read hook input from stdin
|
|
58
|
+
try:
|
|
59
|
+
input_data = json.load(sys.stdin)
|
|
60
|
+
except json.JSONDecodeError:
|
|
61
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
62
|
+
sys.exit(0)
|
|
63
|
+
|
|
64
|
+
tool_name = input_data.get("tool_name", "")
|
|
65
|
+
tool_input = input_data.get("tool_input", {})
|
|
66
|
+
file_path = tool_input.get("file_path", "")
|
|
67
|
+
|
|
68
|
+
# Only check Edit operations on API files (Write is for new content)
|
|
69
|
+
if tool_name != "Edit":
|
|
70
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
71
|
+
sys.exit(0)
|
|
72
|
+
|
|
73
|
+
# Only enforce for API route files
|
|
74
|
+
if "/api/" not in file_path or not file_path.endswith(".ts"):
|
|
75
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
76
|
+
sys.exit(0)
|
|
77
|
+
|
|
78
|
+
# Skip test files - can refactor tests anytime
|
|
79
|
+
if ".test." in file_path or "/__tests__/" in file_path or ".spec." in file_path:
|
|
80
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
81
|
+
sys.exit(0)
|
|
82
|
+
|
|
83
|
+
# Check if state file exists
|
|
84
|
+
if not STATE_FILE.exists():
|
|
85
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
86
|
+
sys.exit(0)
|
|
87
|
+
|
|
88
|
+
# Load state
|
|
89
|
+
try:
|
|
90
|
+
state = json.loads(STATE_FILE.read_text())
|
|
91
|
+
except json.JSONDecodeError:
|
|
92
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
93
|
+
sys.exit(0)
|
|
94
|
+
|
|
95
|
+
endpoint = state.get("endpoint")
|
|
96
|
+
if not endpoint:
|
|
97
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
98
|
+
sys.exit(0)
|
|
99
|
+
|
|
100
|
+
phases = state.get("phases", {})
|
|
101
|
+
tdd_green = phases.get("tdd_green", {})
|
|
102
|
+
verify = phases.get("verify", {})
|
|
103
|
+
tdd_refactor = phases.get("tdd_refactor", {})
|
|
104
|
+
|
|
105
|
+
# Only enforce after TDD Green is complete
|
|
106
|
+
if tdd_green.get("status") != "complete":
|
|
107
|
+
# Still in implementation phase, allow edits
|
|
108
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
109
|
+
sys.exit(0)
|
|
110
|
+
|
|
111
|
+
# Check if this looks like a refactoring edit
|
|
112
|
+
if not is_refactoring_edit(tool_input):
|
|
113
|
+
# Doesn't look like refactoring, might be bug fix
|
|
114
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
115
|
+
sys.exit(0)
|
|
116
|
+
|
|
117
|
+
# Check verify phase status
|
|
118
|
+
verify_status = verify.get("status", "not_started")
|
|
119
|
+
gaps_found = verify.get("gaps_found", 0)
|
|
120
|
+
gaps_fixed = verify.get("gaps_fixed", 0)
|
|
121
|
+
intentional_omissions = verify.get("intentional_omissions", [])
|
|
122
|
+
phase_exit_confirmed = verify.get("phase_exit_confirmed", False)
|
|
123
|
+
|
|
124
|
+
if verify_status != "complete" or not phase_exit_confirmed:
|
|
125
|
+
print(json.dumps({
|
|
126
|
+
"permissionDecision": "deny",
|
|
127
|
+
"reason": f"""❌ BLOCKED: Verify phase (Phase 10) not complete.
|
|
128
|
+
|
|
129
|
+
Current status: {verify_status}
|
|
130
|
+
Gaps found: {gaps_found}
|
|
131
|
+
Gaps fixed: {gaps_fixed}
|
|
132
|
+
Intentional omissions: {len(intentional_omissions)}
|
|
133
|
+
Phase exit confirmed: {phase_exit_confirmed}
|
|
134
|
+
|
|
135
|
+
═══════════════════════════════════════════════════════════
|
|
136
|
+
⚠️ VERIFY BEFORE REFACTORING
|
|
137
|
+
═══════════════════════════════════════════════════════════
|
|
138
|
+
|
|
139
|
+
Before refactoring, you must:
|
|
140
|
+
|
|
141
|
+
1. Re-read the original documentation
|
|
142
|
+
2. Compare implementation to docs feature-by-feature
|
|
143
|
+
3. Fix any gaps OR document them as intentional omissions
|
|
144
|
+
|
|
145
|
+
Current gaps not addressed:
|
|
146
|
+
• {gaps_found - gaps_fixed} gaps still need attention
|
|
147
|
+
|
|
148
|
+
Once verify phase is complete:
|
|
149
|
+
• All gaps fixed OR documented as omissions
|
|
150
|
+
• Implementation matches documented behavior
|
|
151
|
+
• THEN you can safely refactor
|
|
152
|
+
|
|
153
|
+
WHY THIS MATTERS:
|
|
154
|
+
- Refactoring should not change behavior
|
|
155
|
+
- Must verify behavior is CORRECT before preserving it
|
|
156
|
+
- Otherwise you cement bugs into clean code"""
|
|
157
|
+
}))
|
|
158
|
+
sys.exit(0)
|
|
159
|
+
|
|
160
|
+
# Verify complete - check if all gaps addressed
|
|
161
|
+
unaddressed_gaps = gaps_found - gaps_fixed - len(intentional_omissions)
|
|
162
|
+
if unaddressed_gaps > 0:
|
|
163
|
+
print(json.dumps({
|
|
164
|
+
"permissionDecision": "deny",
|
|
165
|
+
"reason": f"""❌ BLOCKED: {unaddressed_gaps} gaps not addressed.
|
|
166
|
+
|
|
167
|
+
Gaps found: {gaps_found}
|
|
168
|
+
Gaps fixed: {gaps_fixed}
|
|
169
|
+
Intentional omissions: {len(intentional_omissions)}
|
|
170
|
+
Unaddressed: {unaddressed_gaps}
|
|
171
|
+
|
|
172
|
+
Fix the remaining gaps or mark them as intentional omissions
|
|
173
|
+
before refactoring."""
|
|
174
|
+
}))
|
|
175
|
+
sys.exit(0)
|
|
176
|
+
|
|
177
|
+
# All clear for refactoring
|
|
178
|
+
refactor_status = tdd_refactor.get("status", "not_started")
|
|
179
|
+
print(json.dumps({
|
|
180
|
+
"permissionDecision": "allow",
|
|
181
|
+
"message": f"""✅ Verification complete. Safe to refactor.
|
|
182
|
+
Refactor phase status: {refactor_status}
|
|
183
|
+
Remember: Tests must still pass after refactoring!"""
|
|
184
|
+
}))
|
|
185
|
+
sys.exit(0)
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
if __name__ == "__main__":
|
|
189
|
+
main()
|