@hustle-together/api-dev-tools 1.7.1 → 1.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -6,10 +6,18 @@ Purpose: Block proceeding to schema/TDD if interview has no USER answers
6
6
  This hook ensures Claude actually asks the user questions and records
7
7
  their answers, rather than self-answering the interview.
8
8
 
9
+ v1.8.0 MAJOR UPDATE: Now requires STRUCTURED questions with multiple-choice
10
+ options derived from research phase findings.
11
+
9
12
  It checks:
10
- 1. Interview status is "complete"
11
- 2. There are actual questions with answers
12
- 3. Answers don't look auto-generated (contain user-specific details)
13
+ 1. Research phase is complete (questions must be based on research)
14
+ 2. Interview status is "complete"
15
+ 3. Questions used AskUserQuestion tool with STRUCTURED OPTIONS
16
+ 4. At least MIN_STRUCTURED_QUESTIONS have multiple-choice or typed options
17
+ 5. Answers don't look auto-generated (contain user-specific details)
18
+
19
+ The goal: Questions like Claude Code shows - with numbered options and
20
+ "Type something" at the end, all based on research findings.
13
21
 
14
22
  Returns:
15
23
  - {"permissionDecision": "allow"} - Let the tool run
@@ -23,7 +31,10 @@ from pathlib import Path
23
31
  STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
24
32
 
25
33
  # Minimum questions required for a valid interview
26
- MIN_QUESTIONS = 3
34
+ MIN_QUESTIONS = 5 # Increased - need comprehensive interview
35
+
36
+ # Minimum questions that MUST have structured options (multiple-choice)
37
+ MIN_STRUCTURED_QUESTIONS = 3
27
38
 
28
39
  # Phrases that indicate self-answered (not real user input)
29
40
  SELF_ANSWER_INDICATORS = [
@@ -33,6 +44,12 @@ SELF_ANSWER_INDICATORS = [
33
44
  "typical use case",
34
45
  "standard implementation",
35
46
  "common pattern",
47
+ "i'll assume",
48
+ "assuming",
49
+ "probably",
50
+ "most likely",
51
+ "default to",
52
+ "usually",
36
53
  ]
37
54
 
38
55
 
@@ -81,40 +98,84 @@ Run /api-create [endpoint-name] to begin the interview-driven workflow."""
81
98
  sys.exit(0)
82
99
 
83
100
  phases = state.get("phases", {})
101
+ research = phases.get("research_initial", {})
84
102
  interview = phases.get("interview", {})
85
103
  interview_status = interview.get("status", "not_started")
86
104
  interview_desc = interview.get("description", "").lower()
87
105
  questions = interview.get("questions", [])
106
+ research_queries = state.get("research_queries", [])
107
+
108
+ # Check 0: Research must be complete FIRST (questions based on research)
109
+ research_status = research.get("status", "not_started")
110
+ if research_status != "complete":
111
+ sources_count = len(research.get("sources", []))
112
+ print(json.dumps({
113
+ "permissionDecision": "deny",
114
+ "reason": f"""❌ BLOCKED: Research phase must complete BEFORE interview.
115
+
116
+ Research status: {research_status}
117
+ Sources consulted: {sources_count}
118
+ Research queries: {len(research_queries)}
119
+
120
+ ═══════════════════════════════════════════════════════════
121
+ ⚠️ COMPLETE RESEARCH FIRST - THEN ASK QUESTIONS
122
+ ═══════════════════════════════════════════════════════════
123
+
124
+ The interview questions MUST be based on research findings:
125
+ 1. Use Context7 to get SDK/API documentation
126
+ 2. Use WebSearch (2-3 searches) for official docs
127
+ 3. THEN generate interview questions with STRUCTURED OPTIONS
128
+ based on what you discovered
129
+
130
+ Example: If research found 5 available models, ask:
131
+ "Which model should this endpoint use?"
132
+ 1. gpt-4o (fastest, cheapest)
133
+ 2. claude-sonnet-4-20250514 (best reasoning)
134
+ 3. gemini-pro (multimodal)
135
+ 4. Type something else...
136
+
137
+ Research INFORMS the options. No research = no good options."""
138
+ }))
139
+ sys.exit(0)
88
140
 
89
141
  # Check 1: Interview must be complete
90
142
  if interview_status != "complete":
143
+ # Build example based on actual research
144
+ research_based_example = _build_research_based_example(research_queries)
145
+
91
146
  print(json.dumps({
92
147
  "permissionDecision": "deny",
93
148
  "reason": f"""❌ BLOCKED: Interview phase not complete.
94
149
 
95
150
  Current status: {interview_status}
96
151
  AskUserQuestion calls: {interview.get('user_question_count', 0)}
152
+ Structured questions: {interview.get('structured_question_count', 0)}
97
153
 
98
154
  ═══════════════════════════════════════════════════════════
99
- ⚠️ YOU MUST STOP AND ASK THE USER QUESTIONS NOW
155
+ ⚠️ USE STRUCTURED QUESTIONS WITH OPTIONS
100
156
  ═══════════════════════════════════════════════════════════
101
157
 
102
- Use the AskUserQuestion tool to ask EACH of these questions ONE AT A TIME:
158
+ Based on your research, ask questions using AskUserQuestion with
159
+ the 'options' parameter to provide multiple-choice selections:
103
160
 
104
- 1. "What is the primary purpose of this endpoint?"
105
- 2. "Who will use it and how?"
106
- 3. "What parameters are essential vs optional?"
161
+ {research_based_example}
107
162
 
108
- WAIT for the user's response after EACH question before continuing.
163
+ REQUIRED FORMAT for AskUserQuestion:
164
+ - question: "Your question text"
165
+ - options: [
166
+ {{"value": "option1", "label": "Option 1 description"}},
167
+ {{"value": "option2", "label": "Option 2 description"}},
168
+ {{"value": "custom", "label": "Type something..."}}
169
+ ]
109
170
 
110
- DO NOT:
111
- Make up answers yourself
112
- ❌ Assume what the user wants
113
- ❌ Mark the interview as complete without asking
114
- ❌ Try to write any code until you have real answers
171
+ You need at least {MIN_STRUCTURED_QUESTIONS} structured questions with options.
172
+ Current: {interview.get('structured_question_count', 0)}
115
173
 
116
- The system is tracking your AskUserQuestion calls. You need at least 3
117
- actual calls with user responses to proceed."""
174
+ DO NOT:
175
+ Ask open-ended questions without options
176
+ ❌ Make up options not based on research
177
+ ❌ Skip the AskUserQuestion tool
178
+ ❌ Self-answer questions"""
118
179
  }))
119
180
  sys.exit(0)
120
181
 
@@ -128,11 +189,11 @@ Questions recorded: {len(questions)}
128
189
  Minimum required: {MIN_QUESTIONS}
129
190
 
130
191
  You must ask the user more questions about their requirements.
131
- DO NOT proceed without understanding the user's actual needs."""
192
+ Use AskUserQuestion with structured options based on your research."""
132
193
  }))
133
194
  sys.exit(0)
134
195
 
135
- # Check 2.5: Verify AskUserQuestion tool was actually used
196
+ # Check 3: Verify AskUserQuestion tool was actually used
136
197
  user_question_count = interview.get("user_question_count", 0)
137
198
  tool_used_count = sum(1 for q in questions if q.get("tool_used", False))
138
199
 
@@ -146,14 +207,43 @@ Minimum required: {MIN_QUESTIONS}
146
207
 
147
208
  You MUST use the AskUserQuestion tool to ask the user directly.
148
209
  Do NOT make up answers or mark the interview as complete without
149
- actually asking the user and receiving their responses.
210
+ actually asking the user and receiving their responses."""
211
+ }))
212
+ sys.exit(0)
150
213
 
151
- The system tracks when AskUserQuestion is used. Self-answering
152
- will be detected and blocked."""
214
+ # Check 4: Verify structured questions were used
215
+ structured_count = interview.get("structured_question_count", 0)
216
+ questions_with_options = sum(1 for q in questions if q.get("has_options", False))
217
+ actual_structured = max(structured_count, questions_with_options)
218
+
219
+ if actual_structured < MIN_STRUCTURED_QUESTIONS:
220
+ print(json.dumps({
221
+ "permissionDecision": "deny",
222
+ "reason": f"""❌ Not enough STRUCTURED questions with options.
223
+
224
+ Structured questions (with options): {actual_structured}
225
+ Minimum required: {MIN_STRUCTURED_QUESTIONS}
226
+
227
+ You MUST use AskUserQuestion with the 'options' parameter to
228
+ provide multiple-choice answers based on your research.
229
+
230
+ Example:
231
+ AskUserQuestion(
232
+ question="Which AI provider should this endpoint support?",
233
+ options=[
234
+ {{"value": "openai", "label": "OpenAI (GPT-4o)"}},
235
+ {{"value": "anthropic", "label": "Anthropic (Claude)"}},
236
+ {{"value": "google", "label": "Google (Gemini)"}},
237
+ {{"value": "all", "label": "All of the above"}},
238
+ {{"value": "custom", "label": "Type something else..."}}
239
+ ]
240
+ )
241
+
242
+ This gives the user clear choices based on what you researched."""
153
243
  }))
154
244
  sys.exit(0)
155
245
 
156
- # Check 3: Look for self-answer indicators
246
+ # Check 5: Look for self-answer indicators
157
247
  for indicator in SELF_ANSWER_INDICATORS:
158
248
  if indicator in interview_desc:
159
249
  print(json.dumps({
@@ -162,22 +252,103 @@ will be detected and blocked."""
162
252
 
163
253
  Detected: "{indicator}" in interview description.
164
254
 
165
- You MUST actually ask the user questions using AskUserQuestion.
166
- Self-answering the interview defeats its purpose.
167
-
168
- Reset the interview phase and ask the user directly:
169
- 1. What do you want this endpoint to do?
170
- 2. Which providers/models should it support?
171
- 3. What parameters matter most to you?
255
+ You MUST actually ask the user questions using AskUserQuestion
256
+ with structured options. Self-answering defeats the purpose.
172
257
 
173
- Wait for their real answers before proceeding."""
258
+ Reset the interview and ask with options based on research."""
174
259
  }))
175
260
  sys.exit(0)
176
261
 
177
- # All checks passed
178
- print(json.dumps({"permissionDecision": "allow"}))
262
+ # All checks passed - inject interview decisions as context reminder
263
+ decisions = interview.get("decisions", {})
264
+
265
+ if decisions:
266
+ # Build a reminder of what the user decided
267
+ decision_summary = _build_decision_summary(decisions)
268
+
269
+ # Allow but inject context about user decisions
270
+ print(json.dumps({
271
+ "permissionDecision": "allow",
272
+ "message": f"""✅ Interview complete. REMEMBER THE USER'S DECISIONS:
273
+
274
+ {decision_summary}
275
+
276
+ Your implementation MUST align with these choices.
277
+ The state file tracks these for consistency verification."""
278
+ }))
279
+ else:
280
+ print(json.dumps({"permissionDecision": "allow"}))
281
+
179
282
  sys.exit(0)
180
283
 
181
284
 
285
+ def _build_decision_summary(decisions: dict) -> str:
286
+ """Build a human-readable summary of user decisions from the interview."""
287
+ if not decisions:
288
+ return "No key decisions recorded."
289
+
290
+ lines = []
291
+ decision_labels = {
292
+ "provider": "AI Provider",
293
+ "purpose": "Primary Purpose",
294
+ "response_format": "Response Format",
295
+ "required_params": "Required Parameters",
296
+ "optional_params": "Optional Parameters",
297
+ "error_handling": "Error Handling",
298
+ "api_key_handling": "API Key Handling",
299
+ "external_services": "External Services",
300
+ }
301
+
302
+ for key, data in decisions.items():
303
+ label = decision_labels.get(key, key.replace("_", " ").title())
304
+ response = data.get("response", "")
305
+ value = data.get("value", "")
306
+
307
+ if value:
308
+ lines.append(f"• {label}: {value}")
309
+ elif response:
310
+ # Truncate long responses
311
+ short_response = response[:80] + "..." if len(response) > 80 else response
312
+ lines.append(f"• {label}: {short_response}")
313
+
314
+ return "\n".join(lines) if lines else "No key decisions recorded."
315
+
316
+
317
+ def _build_research_based_example(research_queries: list) -> str:
318
+ """Build an example question based on actual research queries."""
319
+ if not research_queries:
320
+ return """Example (generic - do research first!):
321
+ "What is the main use case for this endpoint?"
322
+ 1. Data retrieval
323
+ 2. Data transformation
324
+ 3. AI processing
325
+ 4. Type something..."""
326
+
327
+ # Extract terms from research to suggest relevant options
328
+ all_terms = []
329
+ for query in research_queries[-5:]: # Last 5 queries
330
+ terms = query.get("terms", [])
331
+ all_terms.extend(terms)
332
+
333
+ # Deduplicate and get top terms
334
+ unique_terms = list(dict.fromkeys(all_terms))[:4]
335
+
336
+ if unique_terms:
337
+ options_example = "\n ".join([
338
+ f"{i+1}. {term.title()}" for i, term in enumerate(unique_terms)
339
+ ])
340
+ return f"""Example based on your research:
341
+ "Which of these should be the primary focus?"
342
+ {options_example}
343
+ {len(unique_terms)+1}. Type something else..."""
344
+
345
+ return """Example:
346
+ "What capability is most important?"
347
+ 1. Option based on research finding 1
348
+ 2. Option based on research finding 2
349
+ 3. Option based on research finding 3
350
+ 4. Type something..."""
351
+
352
+
182
353
  if __name__ == "__main__":
183
354
  main()
@@ -60,7 +60,9 @@ def main():
60
60
  interview = phases.setdefault("interview", {
61
61
  "status": "not_started",
62
62
  "questions": [],
63
- "user_question_count": 0
63
+ "user_question_count": 0,
64
+ "structured_question_count": 0,
65
+ "decisions": {} # Track key decisions for consistency checking
64
66
  })
65
67
 
66
68
  # Track the question
@@ -68,13 +70,71 @@ def main():
68
70
  user_count = interview.get("user_question_count", 0) + 1
69
71
  interview["user_question_count"] = user_count
70
72
 
73
+ # Check if this question has structured options (multiple-choice)
74
+ options = tool_input.get("options", [])
75
+ has_options = len(options) > 0
76
+
77
+ # Track structured questions count
78
+ if has_options:
79
+ structured_count = interview.get("structured_question_count", 0) + 1
80
+ interview["structured_question_count"] = structured_count
81
+
82
+ # IMPORTANT: Capture the user's response from tool_output
83
+ # PostToolUse runs AFTER the tool completes, so we have the response
84
+ user_response = None
85
+ selected_value = None
86
+
87
+ # tool_output contains the user's response
88
+ if isinstance(tool_output, str):
89
+ user_response = tool_output
90
+ elif isinstance(tool_output, dict):
91
+ user_response = tool_output.get("response", tool_output.get("result", str(tool_output)))
92
+
93
+ # Try to match response to an option value
94
+ if has_options and user_response:
95
+ response_lower = user_response.lower().strip()
96
+ for opt in options:
97
+ opt_value = opt.get("value", "").lower()
98
+ opt_label = opt.get("label", "").lower()
99
+ # Check if response matches value or label
100
+ if opt_value in response_lower or response_lower in opt_label or opt_label in response_lower:
101
+ selected_value = opt.get("value")
102
+ break
103
+
71
104
  question_entry = {
72
105
  "question": tool_input.get("question", ""),
73
106
  "timestamp": datetime.now().isoformat(),
74
- "tool_used": True # Proves AskUserQuestion was actually called
107
+ "tool_used": True, # Proves AskUserQuestion was actually called
108
+ "has_options": has_options,
109
+ "options_count": len(options),
110
+ "options": [opt.get("label", opt.get("value", "")) for opt in options[:5]] if options else [],
111
+ "user_response": user_response[:500] if user_response else None, # Capture actual response
112
+ "selected_value": selected_value # Matched option value if applicable
75
113
  }
76
114
  questions.append(question_entry)
77
115
 
116
+ # Track key decisions in a summary dict for easy reference during implementation
117
+ decisions = interview.setdefault("decisions", {})
118
+ question_text = tool_input.get("question", "").lower()
119
+
120
+ # Categorize common decision types
121
+ if "provider" in question_text or "ai provider" in question_text:
122
+ decisions["provider"] = {"response": user_response, "value": selected_value}
123
+ elif "purpose" in question_text or "primary purpose" in question_text:
124
+ decisions["purpose"] = {"response": user_response, "value": selected_value}
125
+ elif "format" in question_text or "response format" in question_text:
126
+ decisions["response_format"] = {"response": user_response, "value": selected_value}
127
+ elif "parameter" in question_text and "required" in question_text:
128
+ decisions["required_params"] = {"response": user_response, "value": selected_value}
129
+ elif "parameter" in question_text and "optional" in question_text:
130
+ decisions["optional_params"] = {"response": user_response, "value": selected_value}
131
+ elif "error" in question_text:
132
+ decisions["error_handling"] = {"response": user_response, "value": selected_value}
133
+ elif "api key" in question_text or "key" in question_text:
134
+ decisions["api_key_handling"] = {"response": user_response, "value": selected_value}
135
+ elif "service" in question_text or "external" in question_text:
136
+ decisions["external_services"] = {"response": user_response, "value": selected_value}
137
+
78
138
  # Update interview status
79
139
  if interview.get("status") == "not_started":
80
140
  interview["status"] = "in_progress"
@@ -82,6 +142,16 @@ def main():
82
142
 
83
143
  interview["last_activity"] = datetime.now().isoformat()
84
144
 
145
+ # Log for visibility
146
+ if has_options:
147
+ interview["last_structured_question"] = {
148
+ "question": tool_input.get("question", "")[:100],
149
+ "options_count": len(options),
150
+ "user_response": user_response[:100] if user_response else None,
151
+ "selected_value": selected_value,
152
+ "timestamp": datetime.now().isoformat()
153
+ }
154
+
85
155
  # Save and exit
86
156
  STATE_FILE.write_text(json.dumps(state, indent=2))
87
157
  print(json.dumps({"continue": True}))
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@hustle-together/api-dev-tools",
3
- "version": "1.7.1",
3
+ "version": "1.9.0",
4
4
  "description": "Interview-driven API development workflow for Claude Code - Automates research, testing, and documentation",
5
5
  "main": "bin/cli.js",
6
6
  "bin": {