@hustle-together/api-dev-tools 1.3.0 → 1.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,183 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Hook: PreToolUse for Write/Edit
4
+ Purpose: Block proceeding to schema/TDD if interview has no USER answers
5
+
6
+ This hook ensures Claude actually asks the user questions and records
7
+ their answers, rather than self-answering the interview.
8
+
9
+ It checks:
10
+ 1. Interview status is "complete"
11
+ 2. There are actual questions with answers
12
+ 3. Answers don't look auto-generated (contain user-specific details)
13
+
14
+ Returns:
15
+ - {"permissionDecision": "allow"} - Let the tool run
16
+ - {"permissionDecision": "deny", "reason": "..."} - Block with explanation
17
+ """
18
+ import json
19
+ import sys
20
+ from pathlib import Path
21
+
22
+ # State file is in .claude/ directory (sibling to hooks/)
23
+ STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
24
+
25
+ # Minimum questions required for a valid interview
26
+ MIN_QUESTIONS = 3
27
+
28
+ # Phrases that indicate self-answered (not real user input)
29
+ SELF_ANSWER_INDICATORS = [
30
+ "based on common",
31
+ "self-answered",
32
+ "assumed",
33
+ "typical use case",
34
+ "standard implementation",
35
+ "common pattern",
36
+ ]
37
+
38
+
39
+ def main():
40
+ # Read hook input from stdin
41
+ try:
42
+ input_data = json.load(sys.stdin)
43
+ except json.JSONDecodeError:
44
+ print(json.dumps({"permissionDecision": "allow"}))
45
+ sys.exit(0)
46
+
47
+ tool_input = input_data.get("tool_input", {})
48
+ file_path = tool_input.get("file_path", "")
49
+
50
+ # Enforce for ANY file in /api/ directory (not just route.ts)
51
+ # This forces Claude to stop and interview before ANY API work
52
+ is_api_file = "/api/" in file_path and file_path.endswith(".ts")
53
+ is_schema_file = "/schemas/" in file_path and file_path.endswith(".ts")
54
+
55
+ # Skip test files - those are allowed during TDD
56
+ is_test_file = ".test." in file_path or "/__tests__/" in file_path or ".spec." in file_path
57
+
58
+ if is_test_file:
59
+ print(json.dumps({"permissionDecision": "allow"}))
60
+ sys.exit(0)
61
+
62
+ if not is_schema_file and not is_api_file:
63
+ print(json.dumps({"permissionDecision": "allow"}))
64
+ sys.exit(0)
65
+
66
+ # Check if state file exists
67
+ if not STATE_FILE.exists():
68
+ print(json.dumps({
69
+ "permissionDecision": "deny",
70
+ "reason": """❌ API workflow not started.
71
+
72
+ Run /api-create [endpoint-name] to begin the interview-driven workflow."""
73
+ }))
74
+ sys.exit(0)
75
+
76
+ # Load state
77
+ try:
78
+ state = json.loads(STATE_FILE.read_text())
79
+ except json.JSONDecodeError:
80
+ print(json.dumps({"permissionDecision": "allow"}))
81
+ sys.exit(0)
82
+
83
+ phases = state.get("phases", {})
84
+ interview = phases.get("interview", {})
85
+ interview_status = interview.get("status", "not_started")
86
+ interview_desc = interview.get("description", "").lower()
87
+ questions = interview.get("questions", [])
88
+
89
+ # Check 1: Interview must be complete
90
+ if interview_status != "complete":
91
+ print(json.dumps({
92
+ "permissionDecision": "deny",
93
+ "reason": f"""❌ BLOCKED: Interview phase not complete.
94
+
95
+ Current status: {interview_status}
96
+ AskUserQuestion calls: {interview.get('user_question_count', 0)}
97
+
98
+ ═══════════════════════════════════════════════════════════
99
+ ⚠️ YOU MUST STOP AND ASK THE USER QUESTIONS NOW
100
+ ═══════════════════════════════════════════════════════════
101
+
102
+ Use the AskUserQuestion tool to ask EACH of these questions ONE AT A TIME:
103
+
104
+ 1. "What is the primary purpose of this endpoint?"
105
+ 2. "Who will use it and how?"
106
+ 3. "What parameters are essential vs optional?"
107
+
108
+ WAIT for the user's response after EACH question before continuing.
109
+
110
+ DO NOT:
111
+ ❌ Make up answers yourself
112
+ ❌ Assume what the user wants
113
+ ❌ Mark the interview as complete without asking
114
+ ❌ Try to write any code until you have real answers
115
+
116
+ The system is tracking your AskUserQuestion calls. You need at least 3
117
+ actual calls with user responses to proceed."""
118
+ }))
119
+ sys.exit(0)
120
+
121
+ # Check 2: Must have minimum questions
122
+ if len(questions) < MIN_QUESTIONS:
123
+ print(json.dumps({
124
+ "permissionDecision": "deny",
125
+ "reason": f"""❌ Interview incomplete - not enough questions asked.
126
+
127
+ Questions recorded: {len(questions)}
128
+ Minimum required: {MIN_QUESTIONS}
129
+
130
+ You must ask the user more questions about their requirements.
131
+ DO NOT proceed without understanding the user's actual needs."""
132
+ }))
133
+ sys.exit(0)
134
+
135
+ # Check 2.5: Verify AskUserQuestion tool was actually used
136
+ user_question_count = interview.get("user_question_count", 0)
137
+ tool_used_count = sum(1 for q in questions if q.get("tool_used", False))
138
+
139
+ if tool_used_count < MIN_QUESTIONS:
140
+ print(json.dumps({
141
+ "permissionDecision": "deny",
142
+ "reason": f"""❌ Interview not conducted properly.
143
+
144
+ AskUserQuestion tool uses tracked: {tool_used_count}
145
+ Minimum required: {MIN_QUESTIONS}
146
+
147
+ You MUST use the AskUserQuestion tool to ask the user directly.
148
+ Do NOT make up answers or mark the interview as complete without
149
+ actually asking the user and receiving their responses.
150
+
151
+ The system tracks when AskUserQuestion is used. Self-answering
152
+ will be detected and blocked."""
153
+ }))
154
+ sys.exit(0)
155
+
156
+ # Check 3: Look for self-answer indicators
157
+ for indicator in SELF_ANSWER_INDICATORS:
158
+ if indicator in interview_desc:
159
+ print(json.dumps({
160
+ "permissionDecision": "deny",
161
+ "reason": f"""❌ Interview appears to be self-answered.
162
+
163
+ Detected: "{indicator}" in interview description.
164
+
165
+ You MUST actually ask the user questions using AskUserQuestion.
166
+ Self-answering the interview defeats its purpose.
167
+
168
+ Reset the interview phase and ask the user directly:
169
+ 1. What do you want this endpoint to do?
170
+ 2. Which providers/models should it support?
171
+ 3. What parameters matter most to you?
172
+
173
+ Wait for their real answers before proceeding."""
174
+ }))
175
+ sys.exit(0)
176
+
177
+ # All checks passed
178
+ print(json.dumps({"permissionDecision": "allow"}))
179
+ sys.exit(0)
180
+
181
+
182
+ if __name__ == "__main__":
183
+ main()
@@ -34,11 +34,12 @@ def main():
34
34
  tool_input = input_data.get("tool_input", {})
35
35
  tool_output = input_data.get("tool_output", {})
36
36
 
37
- # Only track research-related tools
37
+ # Track research tools AND user questions
38
38
  research_tools = ["WebSearch", "WebFetch", "mcp__context7"]
39
39
  is_research_tool = any(t in tool_name for t in research_tools)
40
+ is_user_question = tool_name == "AskUserQuestion"
40
41
 
41
- if not is_research_tool:
42
+ if not is_research_tool and not is_user_question:
42
43
  print(json.dumps({"continue": True}))
43
44
  sys.exit(0)
44
45
 
@@ -51,8 +52,42 @@ def main():
51
52
  else:
52
53
  state = create_initial_state()
53
54
 
54
- # Get or create research phase
55
+ # Get phases
55
56
  phases = state.setdefault("phases", {})
57
+
58
+ # Handle AskUserQuestion separately - track in interview phase
59
+ if is_user_question:
60
+ interview = phases.setdefault("interview", {
61
+ "status": "not_started",
62
+ "questions": [],
63
+ "user_question_count": 0
64
+ })
65
+
66
+ # Track the question
67
+ questions = interview.setdefault("questions", [])
68
+ user_count = interview.get("user_question_count", 0) + 1
69
+ interview["user_question_count"] = user_count
70
+
71
+ question_entry = {
72
+ "question": tool_input.get("question", ""),
73
+ "timestamp": datetime.now().isoformat(),
74
+ "tool_used": True # Proves AskUserQuestion was actually called
75
+ }
76
+ questions.append(question_entry)
77
+
78
+ # Update interview status
79
+ if interview.get("status") == "not_started":
80
+ interview["status"] = "in_progress"
81
+ interview["started_at"] = datetime.now().isoformat()
82
+
83
+ interview["last_activity"] = datetime.now().isoformat()
84
+
85
+ # Save and exit
86
+ STATE_FILE.write_text(json.dumps(state, indent=2))
87
+ print(json.dumps({"continue": True}))
88
+ sys.exit(0)
89
+
90
+ # Get or create research phase (for research tools)
56
91
  research = phases.setdefault("research_initial", {
57
92
  "status": "in_progress",
58
93
  "sources": [],
@@ -112,6 +147,29 @@ def main():
112
147
  # Add to sources list
113
148
  sources.append(source_entry)
114
149
 
150
+ # Also add to research_queries for prompt verification
151
+ research_queries = state.setdefault("research_queries", [])
152
+ query_entry = {
153
+ "timestamp": timestamp,
154
+ "tool": tool_name,
155
+ }
156
+
157
+ # Extract query/term based on tool type
158
+ if tool_name == "WebSearch":
159
+ query_entry["query"] = tool_input.get("query", "")
160
+ query_entry["terms"] = extract_terms(tool_input.get("query", ""))
161
+ elif tool_name == "WebFetch":
162
+ query_entry["url"] = tool_input.get("url", "")
163
+ query_entry["terms"] = extract_terms_from_url(tool_input.get("url", ""))
164
+ elif "context7" in tool_name.lower():
165
+ query_entry["library"] = tool_input.get("libraryName", tool_input.get("libraryId", ""))
166
+ query_entry["terms"] = [tool_input.get("libraryName", "").lower()]
167
+
168
+ research_queries.append(query_entry)
169
+
170
+ # Keep only last 50 queries
171
+ state["research_queries"] = research_queries[-50:]
172
+
115
173
  # Update last activity timestamp
116
174
  research["last_activity"] = timestamp
117
175
  research["source_count"] = len(sources)
@@ -155,7 +213,7 @@ def main():
155
213
  def create_initial_state():
156
214
  """Create initial state structure"""
157
215
  return {
158
- "version": "1.0.0",
216
+ "version": "1.1.0",
159
217
  "created_at": datetime.now().isoformat(),
160
218
  "phases": {
161
219
  "scope": {"status": "not_started"},
@@ -174,7 +232,9 @@ def create_initial_state():
174
232
  "schema_matches_docs": False,
175
233
  "tests_cover_params": False,
176
234
  "all_tests_passing": False
177
- }
235
+ },
236
+ "research_queries": [],
237
+ "prompt_detections": []
178
238
  }
179
239
 
180
240
 
@@ -190,5 +250,48 @@ def sanitize_input(tool_input):
190
250
  return sanitized
191
251
 
192
252
 
253
+ def extract_terms(query: str) -> list:
254
+ """Extract searchable terms from a query string."""
255
+ import re
256
+ # Remove common words and extract meaningful terms
257
+ stop_words = {"the", "a", "an", "is", "are", "was", "were", "be", "been",
258
+ "how", "to", "do", "does", "what", "which", "for", "and", "or",
259
+ "in", "on", "at", "with", "from", "this", "that", "it"}
260
+
261
+ # Extract words
262
+ words = re.findall(r'\b[\w@/-]+\b', query.lower())
263
+
264
+ # Filter and return
265
+ terms = [w for w in words if w not in stop_words and len(w) > 2]
266
+ return terms[:10] # Limit to 10 terms
267
+
268
+
269
+ def extract_terms_from_url(url: str) -> list:
270
+ """Extract meaningful terms from a URL."""
271
+ import re
272
+ from urllib.parse import urlparse
273
+
274
+ try:
275
+ parsed = urlparse(url)
276
+ # Get domain parts and path parts
277
+ domain_parts = parsed.netloc.replace("www.", "").split(".")
278
+ path_parts = [p for p in parsed.path.split("/") if p]
279
+
280
+ # Combine and filter
281
+ all_parts = domain_parts + path_parts
282
+ terms = []
283
+ for part in all_parts:
284
+ # Split by common separators
285
+ sub_parts = re.split(r'[-_.]', part.lower())
286
+ terms.extend(sub_parts)
287
+
288
+ # Filter short/common terms
289
+ stop_terms = {"com", "org", "io", "dev", "api", "docs", "www", "http", "https"}
290
+ terms = [t for t in terms if t not in stop_terms and len(t) > 2]
291
+ return terms[:10]
292
+ except Exception:
293
+ return []
294
+
295
+
193
296
  if __name__ == "__main__":
194
297
  main()
@@ -0,0 +1,225 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Hook: PreToolUse for Write/Edit (runs AFTER enforce-research and enforce-interview)
4
+ Purpose: Verify implementation matches interview requirements
5
+
6
+ This hook addresses these gaps:
7
+ 1. AI uses exact user terminology when researching (not paraphrasing)
8
+ 2. All changed files are tracked and verified
9
+ 3. Test files use same patterns as production code
10
+
11
+ Returns:
12
+ - {"permissionDecision": "allow"} - Let the tool run
13
+ - {"permissionDecision": "deny", "reason": "..."} - Block with explanation
14
+ """
15
+ import json
16
+ import sys
17
+ import re
18
+ from pathlib import Path
19
+
20
+ # State file is in .claude/ directory (sibling to hooks/)
21
+ STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
22
+
23
+
24
+ def extract_key_terms(text: str) -> list[str]:
25
+ """Extract likely important terms from interview answers.
26
+
27
+ These are terms that should appear in research and implementation:
28
+ - Proper nouns (capitalized multi-word phrases)
29
+ - Technical terms (SDK names, API names, etc.)
30
+ - Specific patterns (e.g., "via X", "using X", "with X")
31
+ """
32
+ terms = []
33
+
34
+ # Look for "via X", "using X", "with X" patterns
35
+ via_patterns = re.findall(r'(?:via|using|with|through)\s+([A-Z][A-Za-z0-9\s]+?)(?:[,.\n]|$)', text)
36
+ terms.extend(via_patterns)
37
+
38
+ # Look for capitalized phrases (likely proper nouns/product names)
39
+ # e.g., "Vercel AI Gateway", "OpenAI API"
40
+ proper_nouns = re.findall(r'[A-Z][a-z]+(?:\s+[A-Z][a-z]+)+', text)
41
+ terms.extend(proper_nouns)
42
+
43
+ # Clean up and dedupe
44
+ terms = [t.strip() for t in terms if len(t.strip()) > 3]
45
+ return list(set(terms))
46
+
47
+
48
+ def check_research_used_exact_terms(state: dict) -> list[str]:
49
+ """Verify research sources used the exact terms from interview.
50
+
51
+ Gap 1 Fix: When user provides a term, use THAT EXACT TERM to search.
52
+ """
53
+ issues = []
54
+
55
+ interview = state.get("phases", {}).get("interview", {})
56
+ research = state.get("phases", {}).get("research_initial", {})
57
+ deep_research = state.get("phases", {}).get("research_deep", {})
58
+
59
+ questions = interview.get("questions", [])
60
+ if isinstance(questions, list) and len(questions) > 0:
61
+ # Extract key terms from all interview answers
62
+ all_text = " ".join(str(q) for q in questions)
63
+ key_terms = extract_key_terms(all_text)
64
+
65
+ # Check if these terms appear in research sources
66
+ research_sources = research.get("sources", []) + deep_research.get("sources", [])
67
+ research_text = " ".join(str(s) for s in research_sources).lower()
68
+
69
+ missing_terms = []
70
+ for term in key_terms:
71
+ # Check if term or close variant appears in research
72
+ term_lower = term.lower()
73
+ if term_lower not in research_text:
74
+ # Check for partial matches (e.g., "AI Gateway" in "Vercel AI Gateway")
75
+ words = term_lower.split()
76
+ if not any(all(w in research_text for w in words) for _ in [1]):
77
+ missing_terms.append(term)
78
+
79
+ if missing_terms:
80
+ issues.append(
81
+ f"⚠️ Gap 1 Warning: User-specified terms not found in research:\n"
82
+ f" Terms from interview: {missing_terms}\n"
83
+ f" These EXACT terms should have been searched."
84
+ )
85
+
86
+ return issues
87
+
88
+
89
+ def check_files_tracked(state: dict, file_path: str) -> list[str]:
90
+ """Verify we're tracking all files being modified.
91
+
92
+ Gap 2 Fix: Track files as they're modified, not after claiming completion.
93
+ """
94
+ issues = []
95
+
96
+ files_created = state.get("files_created", [])
97
+ files_modified = state.get("files_modified", [])
98
+ all_tracked = files_created + files_modified
99
+
100
+ # Normalize paths for comparison
101
+ normalized_path = file_path.replace("\\", "/")
102
+
103
+ # Check if this file is a test file
104
+ is_test = ".test." in file_path or "/__tests__/" in file_path or ".spec." in file_path
105
+
106
+ # For non-test files in api/ or lib/, they should be tracked
107
+ is_trackable = ("/api/" in file_path or "/lib/" in file_path) and file_path.endswith(".ts")
108
+
109
+ if is_trackable and not is_test:
110
+ # Check if any tracked file matches this one
111
+ found = False
112
+ for tracked in all_tracked:
113
+ if normalized_path.endswith(tracked) or tracked in normalized_path:
114
+ found = True
115
+ break
116
+
117
+ # Don't block, but log that this file should be tracked
118
+ if not found:
119
+ state.setdefault("files_modified", []).append(normalized_path.split("/src/")[-1] if "/src/" in normalized_path else normalized_path)
120
+ STATE_FILE.write_text(json.dumps(state, indent=2))
121
+
122
+ return issues
123
+
124
+
125
+ def check_test_production_alignment(state: dict, file_path: str, content: str = "") -> list[str]:
126
+ """Verify test files use same patterns as production code.
127
+
128
+ Gap 5 Fix: Test files must use the same patterns as production code.
129
+ """
130
+ issues = []
131
+
132
+ is_test = ".test." in file_path or "/__tests__/" in file_path or ".spec." in file_path
133
+
134
+ if not is_test:
135
+ return issues
136
+
137
+ # Check interview for key configuration patterns
138
+ interview = state.get("phases", {}).get("interview", {})
139
+ questions = interview.get("questions", [])
140
+ all_text = " ".join(str(q) for q in questions)
141
+
142
+ # Look for environment variable patterns mentioned in interview
143
+ env_patterns = re.findall(r'[A-Z_]+_(?:KEY|API_KEY|TOKEN|SECRET)', all_text)
144
+
145
+ if env_patterns and content:
146
+ # If interview mentions specific env vars, test should check those
147
+ for pattern in env_patterns:
148
+ if pattern in content:
149
+ # Good - test is checking the right env var
150
+ pass
151
+
152
+ # Look for mismatches - e.g., checking OPENAI_API_KEY when we said "single gateway key"
153
+ if "gateway" in all_text.lower() or "single key" in all_text.lower():
154
+ # Interview mentioned gateway/single key - tests shouldn't check individual provider keys
155
+ old_patterns = ["OPENAI_API_KEY", "ANTHROPIC_API_KEY", "GOOGLE_API_KEY", "PERPLEXITY_API_KEY"]
156
+ found_old = [p for p in old_patterns if p in content]
157
+
158
+ if found_old and "AI_GATEWAY" not in content:
159
+ issues.append(
160
+ f"⚠️ Gap 5 Warning: Test may be checking wrong environment variables.\n"
161
+ f" Interview mentioned: gateway/single key pattern\n"
162
+ f" Test checks: {found_old}\n"
163
+ f" Consider: Should test check AI_GATEWAY_API_KEY instead?"
164
+ )
165
+
166
+ return issues
167
+
168
+
169
+ def main():
170
+ # Read hook input from stdin
171
+ try:
172
+ input_data = json.load(sys.stdin)
173
+ except json.JSONDecodeError:
174
+ print(json.dumps({"permissionDecision": "allow"}))
175
+ sys.exit(0)
176
+
177
+ tool_input = input_data.get("tool_input", {})
178
+ file_path = tool_input.get("file_path", "")
179
+ new_content = tool_input.get("content", "") or tool_input.get("new_string", "")
180
+
181
+ # Only check for API/schema/lib files
182
+ is_api_file = "/api/" in file_path and file_path.endswith(".ts")
183
+ is_lib_file = "/lib/" in file_path and file_path.endswith(".ts")
184
+
185
+ if not is_api_file and not is_lib_file:
186
+ print(json.dumps({"permissionDecision": "allow"}))
187
+ sys.exit(0)
188
+
189
+ # Load state
190
+ if not STATE_FILE.exists():
191
+ print(json.dumps({"permissionDecision": "allow"}))
192
+ sys.exit(0)
193
+
194
+ try:
195
+ state = json.loads(STATE_FILE.read_text())
196
+ except json.JSONDecodeError:
197
+ print(json.dumps({"permissionDecision": "allow"}))
198
+ sys.exit(0)
199
+
200
+ # Run verification checks
201
+ all_issues = []
202
+
203
+ # Check 1: Research used exact terms from interview
204
+ all_issues.extend(check_research_used_exact_terms(state))
205
+
206
+ # Check 2: Track this file
207
+ all_issues.extend(check_files_tracked(state, file_path))
208
+
209
+ # Check 5: Test/production alignment
210
+ all_issues.extend(check_test_production_alignment(state, file_path, new_content))
211
+
212
+ # If there are issues, warn but don't block (these are warnings)
213
+ # The user can review these in the state file
214
+ if all_issues:
215
+ # Store warnings in state for later review
216
+ state.setdefault("verification_warnings", []).extend(all_issues)
217
+ STATE_FILE.write_text(json.dumps(state, indent=2))
218
+
219
+ # Allow the operation - these are warnings, not blockers
220
+ print(json.dumps({"permissionDecision": "allow"}))
221
+ sys.exit(0)
222
+
223
+
224
+ if __name__ == "__main__":
225
+ main()
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@hustle-together/api-dev-tools",
3
- "version": "1.3.0",
3
+ "version": "1.7.0",
4
4
  "description": "Interview-driven API development workflow for Claude Code - Automates research, testing, and documentation",
5
5
  "main": "bin/cli.js",
6
6
  "bin": {
@@ -11,6 +11,7 @@
11
11
  "commands/",
12
12
  "hooks/",
13
13
  "templates/",
14
+ "demo/",
14
15
  "README.md",
15
16
  "LICENSE"
16
17
  ],
@@ -1,8 +1,10 @@
1
1
  {
2
- "version": "1.0.0",
2
+ "version": "1.1.0",
3
3
  "created_at": null,
4
4
  "endpoint": null,
5
5
  "library": null,
6
+ "research_queries": [],
7
+ "prompt_detections": [],
6
8
  "phases": {
7
9
  "scope": {
8
10
  "status": "not_started",
@@ -1,5 +1,31 @@
1
1
  {
2
+ "permissions": {
3
+ "allow": [
4
+ "WebSearch",
5
+ "WebFetch",
6
+ "mcp__context7",
7
+ "mcp__context7__resolve-library-id",
8
+ "mcp__context7__get-library-docs",
9
+ "mcp__github",
10
+ "Bash(claude mcp:*)",
11
+ "Bash(pnpm test:*)",
12
+ "Bash(npm test:*)",
13
+ "Bash(git status:*)",
14
+ "Bash(git diff:*)",
15
+ "Bash(git log:*)"
16
+ ]
17
+ },
2
18
  "hooks": {
19
+ "UserPromptSubmit": [
20
+ {
21
+ "hooks": [
22
+ {
23
+ "type": "command",
24
+ "command": "$CLAUDE_PROJECT_DIR/.claude/hooks/enforce-external-research.py"
25
+ }
26
+ ]
27
+ }
28
+ ],
3
29
  "PreToolUse": [
4
30
  {
5
31
  "matcher": "Write|Edit",
@@ -7,13 +33,21 @@
7
33
  {
8
34
  "type": "command",
9
35
  "command": "$CLAUDE_PROJECT_DIR/.claude/hooks/enforce-research.py"
36
+ },
37
+ {
38
+ "type": "command",
39
+ "command": "$CLAUDE_PROJECT_DIR/.claude/hooks/enforce-interview.py"
40
+ },
41
+ {
42
+ "type": "command",
43
+ "command": "$CLAUDE_PROJECT_DIR/.claude/hooks/verify-implementation.py"
10
44
  }
11
45
  ]
12
46
  }
13
47
  ],
14
48
  "PostToolUse": [
15
49
  {
16
- "matcher": "WebSearch|WebFetch|mcp__context7.*",
50
+ "matcher": "WebSearch|WebFetch|mcp__context7.*|AskUserQuestion",
17
51
  "hooks": [
18
52
  {
19
53
  "type": "command",