@hustle-together/api-dev-tools 3.0.0 → 3.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. package/README.md +71 -0
  2. package/bin/cli.js +184 -14
  3. package/demo/audio/generate-all-narrations.js +124 -59
  4. package/demo/audio/generate-narration.js +120 -56
  5. package/demo/audio/narration-adam-timing.json +3086 -2077
  6. package/demo/audio/narration-adam.mp3 +0 -0
  7. package/demo/audio/narration-creature-timing.json +3094 -2085
  8. package/demo/audio/narration-creature.mp3 +0 -0
  9. package/demo/audio/narration-gaming-timing.json +3091 -2082
  10. package/demo/audio/narration-gaming.mp3 +0 -0
  11. package/demo/audio/narration-hope-timing.json +3072 -2063
  12. package/demo/audio/narration-hope.mp3 +0 -0
  13. package/demo/audio/narration-mark-timing.json +3090 -2081
  14. package/demo/audio/narration-mark.mp3 +0 -0
  15. package/demo/audio/voices-manifest.json +16 -16
  16. package/demo/workflow-demo.html +1528 -411
  17. package/hooks/api-workflow-check.py +2 -0
  18. package/hooks/enforce-deep-research.py +180 -0
  19. package/hooks/enforce-disambiguation.py +149 -0
  20. package/hooks/enforce-documentation.py +187 -0
  21. package/hooks/enforce-environment.py +249 -0
  22. package/hooks/enforce-interview.py +64 -1
  23. package/hooks/enforce-refactor.py +187 -0
  24. package/hooks/enforce-research.py +93 -46
  25. package/hooks/enforce-schema.py +186 -0
  26. package/hooks/enforce-scope.py +156 -0
  27. package/hooks/enforce-tdd-red.py +246 -0
  28. package/hooks/enforce-verify.py +186 -0
  29. package/hooks/verify-after-green.py +136 -6
  30. package/package.json +2 -1
  31. package/scripts/collect-test-results.ts +404 -0
  32. package/scripts/extract-parameters.ts +483 -0
  33. package/scripts/generate-test-manifest.ts +520 -0
  34. package/templates/CLAUDE-SECTION.md +84 -0
  35. package/templates/api-dev-state.json +45 -5
  36. package/templates/api-test/page.tsx +315 -0
  37. package/templates/api-test/test-structure/route.ts +269 -0
  38. package/templates/settings.json +36 -0
@@ -0,0 +1,246 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Hook: PreToolUse for Write/Edit
4
+ Purpose: Block writing implementation if test matrix not approved WITH USER CONFIRMATION
5
+
6
+ Phase 7 (TDD Red) requires:
7
+ 1. Propose test matrix based on interview + schema
8
+ 2. SHOW test plan to user (scenarios, edge cases, coverage)
9
+ 3. USE AskUserQuestion: "Test plan looks good? [Y/n]"
10
+ 4. Loop back if user wants more tests
11
+ 5. Only allow route.ts after user approves test matrix
12
+
13
+ Returns:
14
+ - {"permissionDecision": "allow"} - Let the tool run
15
+ - {"permissionDecision": "deny", "reason": "..."} - Block with explanation
16
+ """
17
+ import json
18
+ import sys
19
+ from pathlib import Path
20
+
21
+ # State file is in .claude/ directory (sibling to hooks/)
22
+ STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
23
+
24
+
25
+ def find_test_file(route_path: str) -> tuple[bool, str]:
26
+ """Check if a test file exists for the given route file."""
27
+ route_file = Path(route_path)
28
+
29
+ # Common test file patterns
30
+ # route.ts -> route.test.ts, __tests__/route.test.ts, route.spec.ts
31
+ possible_tests = [
32
+ route_file.with_suffix(".test.ts"),
33
+ route_file.with_suffix(".test.tsx"),
34
+ route_file.with_suffix(".spec.ts"),
35
+ route_file.parent / "__tests__" / f"{route_file.stem}.test.ts",
36
+ route_file.parent / "__tests__" / f"{route_file.stem}.test.tsx",
37
+ route_file.parent.parent / "__tests__" / f"{route_file.parent.name}.test.ts",
38
+ ]
39
+
40
+ for test_path in possible_tests:
41
+ if test_path.exists():
42
+ return True, str(test_path)
43
+
44
+ return False, str(possible_tests[0]) # Return expected path
45
+
46
+
47
+ def main():
48
+ # Read hook input from stdin
49
+ try:
50
+ input_data = json.load(sys.stdin)
51
+ except json.JSONDecodeError:
52
+ print(json.dumps({"permissionDecision": "allow"}))
53
+ sys.exit(0)
54
+
55
+ tool_input = input_data.get("tool_input", {})
56
+ file_path = tool_input.get("file_path", "")
57
+
58
+ # Only enforce for route.ts files in /api/ directories
59
+ if not file_path.endswith("route.ts") or "/api/" not in file_path:
60
+ print(json.dumps({"permissionDecision": "allow"}))
61
+ sys.exit(0)
62
+
63
+ # Allow if this IS a test file (shouldn't match but safety check)
64
+ if ".test." in file_path or "/__tests__/" in file_path or ".spec." in file_path:
65
+ print(json.dumps({"permissionDecision": "allow"}))
66
+ sys.exit(0)
67
+
68
+ # Check if state file exists
69
+ if not STATE_FILE.exists():
70
+ # Even without state, enforce TDD
71
+ test_exists, expected_path = find_test_file(file_path)
72
+ if not test_exists:
73
+ print(json.dumps({
74
+ "permissionDecision": "deny",
75
+ "reason": f"""❌ TDD VIOLATION: No test file found!
76
+
77
+ You're trying to write: {file_path}
78
+
79
+ But the test file doesn't exist: {expected_path}
80
+
81
+ ═══════════════════════════════════════════════════════════
82
+ ⚠️ WRITE TESTS FIRST (TDD Red Phase)
83
+ ═══════════════════════════════════════════════════════════
84
+
85
+ TDD requires:
86
+ 1. Write a FAILING test first
87
+ 2. THEN write implementation to make it pass
88
+
89
+ Create the test file first:
90
+ {expected_path}
91
+
92
+ Example test structure:
93
+ import {{ describe, it, expect }} from 'vitest';
94
+
95
+ describe('POST /api/...', () => {{
96
+ it('should return 200 with valid input', async () => {{
97
+ // Test implementation
98
+ }});
99
+
100
+ it('should return 400 with invalid input', async () => {{
101
+ // Test validation
102
+ }});
103
+ }});"""
104
+ }))
105
+ sys.exit(0)
106
+ print(json.dumps({"permissionDecision": "allow"}))
107
+ sys.exit(0)
108
+
109
+ # Load state
110
+ try:
111
+ state = json.loads(STATE_FILE.read_text())
112
+ except json.JSONDecodeError:
113
+ print(json.dumps({"permissionDecision": "allow"}))
114
+ sys.exit(0)
115
+
116
+ phases = state.get("phases", {})
117
+ tdd_red = phases.get("tdd_red", {})
118
+ tdd_red_status = tdd_red.get("status", "not_started")
119
+ test_count = tdd_red.get("test_count", 0)
120
+
121
+ # Get user checkpoint fields
122
+ user_question_asked = tdd_red.get("user_question_asked", False)
123
+ user_approved = tdd_red.get("user_approved", False)
124
+ matrix_shown = tdd_red.get("matrix_shown", False)
125
+ test_scenarios = tdd_red.get("test_scenarios", [])
126
+
127
+ # Check if TDD Red phase is complete
128
+ if tdd_red_status != "complete":
129
+ test_exists, expected_path = find_test_file(file_path)
130
+
131
+ # Check what's missing for user checkpoint
132
+ missing = []
133
+ if not test_exists:
134
+ missing.append("Test file not created yet")
135
+ if not matrix_shown:
136
+ missing.append("Test matrix not shown to user")
137
+ if not user_question_asked:
138
+ missing.append("User approval question (AskUserQuestion not used)")
139
+ if not user_approved:
140
+ missing.append("User hasn't approved the test plan")
141
+
142
+ print(json.dumps({
143
+ "permissionDecision": "deny",
144
+ "reason": f"""❌ BLOCKED: TDD Red phase (Phase 7) not complete.
145
+
146
+ Current status: {tdd_red_status}
147
+ Test count: {test_count}
148
+ Test file exists: {test_exists}
149
+ Matrix shown: {matrix_shown}
150
+ User question asked: {user_question_asked}
151
+ User approved: {user_approved}
152
+ Scenarios: {len(test_scenarios)}
153
+
154
+ MISSING:
155
+ {chr(10).join(f" • {m}" for m in missing)}
156
+
157
+ ═══════════════════════════════════════════════════════════
158
+ ⚠️ GET USER APPROVAL FOR TEST MATRIX
159
+ ═══════════════════════════════════════════════════════════
160
+
161
+ REQUIRED STEPS:
162
+
163
+ 1. PROPOSE test matrix based on interview + schema:
164
+ ┌───────────────────────────────────────────────────────┐
165
+ │ TEST MATRIX │
166
+ │ │
167
+ │ Based on your interview, I'll test: │
168
+ │ │
169
+ │ ✅ Success Scenarios: │
170
+ │ • GET with valid domain → 200 + brand data │
171
+ │ • POST with full payload → 200 + created │
172
+ │ │
173
+ │ ✅ Error Scenarios (your choice: return objects): │
174
+ │ • Invalid domain → 400 + error object │
175
+ │ • Missing API key → 401 + error object │
176
+ │ • Not found → 404 + error object │
177
+ │ │
178
+ │ ✅ Edge Cases: │
179
+ │ • Rate limit exceeded → 429 + retry-after │
180
+ │ • Cache hit → 200 + cached: true │
181
+ │ • Empty response → 200 + empty data │
182
+ │ │
183
+ │ Total: 8 test scenarios │
184
+ │ │
185
+ │ Test plan looks good? [Y] │
186
+ │ Add more tests? [n] ____ │
187
+ └───────────────────────────────────────────────────────┘
188
+
189
+ 2. USE AskUserQuestion:
190
+ question: "This test plan cover your requirements?"
191
+ options: [
192
+ {{"value": "approve", "label": "Yes, write these tests"}},
193
+ {{"value": "add", "label": "Add more - I also need [scenario]"}},
194
+ {{"value": "modify", "label": "Change a scenario - [which one]"}}
195
+ ]
196
+
197
+ 3. If user says "add" or "modify":
198
+ • Update test_scenarios list
199
+ • LOOP BACK and show updated matrix
200
+
201
+ 4. If user says "approve":
202
+ • Create test file: {expected_path}
203
+ • Write all approved test scenarios
204
+ • Run tests to confirm they FAIL (red)
205
+ • Set tdd_red.user_approved = true
206
+ • Set tdd_red.user_question_asked = true
207
+ • Set tdd_red.matrix_shown = true
208
+ • Set tdd_red.test_count = N
209
+ • Set tdd_red.status = "complete"
210
+
211
+ Based on interview decisions:
212
+ {_format_interview_hints(phases.get("interview", {}))}
213
+
214
+ WHY: User approves what gets tested BEFORE implementation."""
215
+ }))
216
+ sys.exit(0)
217
+
218
+ # TDD Red complete - allow implementation
219
+ print(json.dumps({
220
+ "permissionDecision": "allow",
221
+ "message": f"""✅ TDD Red phase complete.
222
+ {test_count} tests written and failing.
223
+ User approved {len(test_scenarios)} test scenarios.
224
+ Proceeding to Green phase - make them pass!"""
225
+ }))
226
+ sys.exit(0)
227
+
228
+
229
+ def _format_interview_hints(interview: dict) -> str:
230
+ """Format interview decisions as test hints."""
231
+ decisions = interview.get("decisions", {})
232
+ if not decisions:
233
+ return " (no interview decisions recorded)"
234
+
235
+ hints = []
236
+ for key, data in list(decisions.items())[:5]:
237
+ value = data.get("value", data.get("response", ""))
238
+ if value:
239
+ short_value = str(value)[:50] + "..." if len(str(value)) > 50 else str(value)
240
+ hints.append(f" • {key}: {short_value}")
241
+
242
+ return "\n".join(hints) if hints else " (no interview decisions recorded)"
243
+
244
+
245
+ if __name__ == "__main__":
246
+ main()
@@ -0,0 +1,186 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Hook: PreToolUse for Write/Edit
4
+ Purpose: Block refactoring until verification complete WITH USER GAP DECISION
5
+
6
+ Phase 9 (Verify) requires:
7
+ 1. Re-read original documentation (after tests pass)
8
+ 2. Compare implementation to docs - find gaps
9
+ 3. SHOW gap analysis to user
10
+ 4. USE AskUserQuestion: "Fix gaps? [Y] / Skip? [n]"
11
+ 5. Loop back to Phase 7 if user wants fixes
12
+ 6. Only proceed to refactor when user decides
13
+
14
+ Returns:
15
+ - {"permissionDecision": "allow"} - Let the tool run
16
+ - {"permissionDecision": "deny", "reason": "..."} - Block with explanation
17
+ """
18
+ import json
19
+ import sys
20
+ from pathlib import Path
21
+
22
+ STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
23
+
24
+
25
+ def main():
26
+ try:
27
+ input_data = json.load(sys.stdin)
28
+ except json.JSONDecodeError:
29
+ print(json.dumps({"permissionDecision": "allow"}))
30
+ sys.exit(0)
31
+
32
+ tool_input = input_data.get("tool_input", {})
33
+ file_path = tool_input.get("file_path", "")
34
+
35
+ # Only enforce for API route files
36
+ is_api_file = "/api/" in file_path and file_path.endswith(".ts")
37
+
38
+ if not is_api_file:
39
+ print(json.dumps({"permissionDecision": "allow"}))
40
+ sys.exit(0)
41
+
42
+ # Skip test files
43
+ if ".test." in file_path or "/__tests__/" in file_path or ".spec." in file_path:
44
+ print(json.dumps({"permissionDecision": "allow"}))
45
+ sys.exit(0)
46
+
47
+ # Skip documentation/config files
48
+ if file_path.endswith(".md") or file_path.endswith(".json"):
49
+ print(json.dumps({"permissionDecision": "allow"}))
50
+ sys.exit(0)
51
+
52
+ if not STATE_FILE.exists():
53
+ print(json.dumps({"permissionDecision": "allow"}))
54
+ sys.exit(0)
55
+
56
+ try:
57
+ state = json.loads(STATE_FILE.read_text())
58
+ except json.JSONDecodeError:
59
+ print(json.dumps({"permissionDecision": "allow"}))
60
+ sys.exit(0)
61
+
62
+ endpoint = state.get("endpoint", "unknown")
63
+ phases = state.get("phases", {})
64
+ tdd_green = phases.get("tdd_green", {})
65
+ verify = phases.get("verify", {})
66
+
67
+ # Only enforce after TDD Green is complete
68
+ if tdd_green.get("status") != "complete":
69
+ # Let earlier hooks handle this
70
+ print(json.dumps({"permissionDecision": "allow"}))
71
+ sys.exit(0)
72
+
73
+ status = verify.get("status", "not_started")
74
+
75
+ if status != "complete":
76
+ user_question_asked = verify.get("user_question_asked", False)
77
+ user_decided = verify.get("user_decided", False)
78
+ gap_analysis_shown = verify.get("gap_analysis_shown", False)
79
+ re_research_done = verify.get("re_research_done", False)
80
+ gaps_found = verify.get("gaps_found", 0)
81
+ gaps_fixed = verify.get("gaps_fixed", 0)
82
+ gaps_skipped = verify.get("gaps_skipped", 0)
83
+ user_decision = verify.get("user_decision", None)
84
+
85
+ missing = []
86
+ if not re_research_done:
87
+ missing.append("Re-research original docs not done")
88
+ if not gap_analysis_shown:
89
+ missing.append("Gap analysis not shown to user")
90
+ if not user_question_asked:
91
+ missing.append("User gap decision question (AskUserQuestion not used)")
92
+ if not user_decided:
93
+ missing.append("User hasn't decided on gaps")
94
+
95
+ print(json.dumps({
96
+ "permissionDecision": "deny",
97
+ "reason": f"""❌ BLOCKED: Verification (Phase 9) not complete.
98
+
99
+ Status: {status}
100
+ Re-research done: {re_research_done}
101
+ Gap analysis shown: {gap_analysis_shown}
102
+ User question asked: {user_question_asked}
103
+ User decided: {user_decided}
104
+ User decision: {user_decision or "None yet"}
105
+ Gaps found: {gaps_found}
106
+ Gaps fixed: {gaps_fixed}
107
+ Gaps skipped: {gaps_skipped}
108
+
109
+ MISSING:
110
+ {chr(10).join(f" • {m}" for m in missing)}
111
+
112
+ ═══════════════════════════════════════════════════════════
113
+ ⚠️ GET USER DECISION ON IMPLEMENTATION GAPS
114
+ ═══════════════════════════════════════════════════════════
115
+
116
+ REQUIRED STEPS:
117
+
118
+ 1. Re-read the ORIGINAL API documentation:
119
+ • Use Context7 or WebSearch with SAME queries from Phase 2
120
+ • Compare EVERY documented feature to your implementation
121
+ • Don't rely on memory - actually re-read the docs
122
+
123
+ 2. Create and SHOW gap analysis table:
124
+ ┌───────────────────────────────────────────────────────┐
125
+ │ VERIFICATION RESULTS │
126
+ │ │
127
+ │ │ Feature │ In Docs │ Implemented │ Status │
128
+ │ ├─────────────────┼─────────┼─────────────┼──────────│
129
+ │ │ domain param │ Yes │ Yes │ ✓ Match │
130
+ │ │ format option │ Yes │ Yes │ ✓ Match │
131
+ │ │ include_fonts │ Yes │ No │ ❌ GAP │
132
+ │ │ webhook_url │ No │ Yes │ ⚠ Extra │
133
+ │ │
134
+ │ Found 1 gap in implementation. │
135
+ │ │
136
+ │ Fix the gap? [Y] - Loop back to add missing feature │
137
+ │ Skip? [n] - Document as intentional omission │
138
+ └───────────────────────────────────────────────────────┘
139
+
140
+ 3. USE AskUserQuestion:
141
+ question: "I found {gaps_found} gap(s). How should I proceed?"
142
+ options: [
143
+ {{"value": "fix", "label": "Fix gaps - loop back to Red phase"}},
144
+ {{"value": "skip", "label": "Skip - these are intentional omissions"}},
145
+ {{"value": "partial", "label": "Fix some, skip others - [specify]"}}
146
+ ]
147
+
148
+ 4. If user says "fix":
149
+ • Loop back to Phase 7 (TDD Red)
150
+ • Write new tests for missing features
151
+ • Implement and verify again
152
+ • REPEAT until no gaps or user says skip
153
+
154
+ 5. If user says "skip":
155
+ • Document each skipped gap with reason
156
+ • Set verify.gaps_skipped = count
157
+ • Proceed to refactor
158
+
159
+ 6. After user decides:
160
+ • Set verify.user_decided = true
161
+ • Set verify.user_question_asked = true
162
+ • Set verify.gap_analysis_shown = true
163
+ • Set verify.re_research_done = true
164
+ • Set verify.user_decision = "fix" or "skip" or "partial"
165
+ • Set verify.status = "complete"
166
+
167
+ WHY: Catch memory-based implementation errors BEFORE refactoring."""
168
+ }))
169
+ sys.exit(0)
170
+
171
+ # Verify complete
172
+ user_decision = verify.get("user_decision", "unknown")
173
+ print(json.dumps({
174
+ "permissionDecision": "allow",
175
+ "message": f"""✅ Verification complete.
176
+ User decision: {user_decision}
177
+ Gaps found: {gaps_found}
178
+ Gaps fixed: {gaps_fixed}
179
+ Gaps skipped (intentional): {gaps_skipped}
180
+ Proceeding to refactor phase."""
181
+ }))
182
+ sys.exit(0)
183
+
184
+
185
+ if __name__ == "__main__":
186
+ main()
@@ -1,15 +1,17 @@
1
1
  #!/usr/bin/env python3
2
2
  """
3
3
  Hook: PostToolUse (after test runs)
4
- Purpose: Trigger Phase 9 (Verify) after tests pass - force re-research
4
+ Purpose: Trigger Phase 9 (Verify) + Manifest Generation after tests pass
5
5
 
6
6
  This hook detects when tests pass (TDD Green phase complete) and:
7
- 1. Reminds Claude to re-research the original documentation
8
- 2. Compares implemented features to documented features
9
- 3. Requires user confirmation before proceeding
7
+ 1. Runs the programmatic manifest generation scripts
8
+ 2. Reminds Claude to re-research the original documentation
9
+ 3. Compares implemented features to documented features
10
+ 4. Requires user confirmation before proceeding
10
11
 
11
- The goal is to catch cases where Claude implemented from memory
12
- instead of from the researched documentation.
12
+ The goal is to:
13
+ - Automatically generate api-tests-manifest.json from test files (programmatic, not LLM)
14
+ - Catch cases where Claude implemented from memory instead of from researched docs
13
15
 
14
16
  Triggers on: Bash commands containing "test" that exit successfully
15
17
 
@@ -19,11 +21,106 @@ Returns:
19
21
  import json
20
22
  import sys
21
23
  import os
24
+ import subprocess
22
25
  from datetime import datetime
23
26
  from pathlib import Path
24
27
 
25
28
  # State file is in .claude/ directory (sibling to hooks/)
26
29
  STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
30
+ # Scripts locations (try in order):
31
+ # 1. Installed in project: scripts/api-dev-tools/
32
+ # 2. In node_modules (if running from package)
33
+ # 3. Package root (development)
34
+ PROJECT_ROOT = Path(__file__).parent.parent.parent
35
+ SCRIPTS_LOCATIONS = [
36
+ PROJECT_ROOT / "scripts" / "api-dev-tools", # CLI-installed location
37
+ PROJECT_ROOT / "node_modules" / "@hustle-together" / "api-dev-tools" / "scripts",
38
+ Path(__file__).parent.parent.parent / "scripts", # Development fallback
39
+ ]
40
+
41
+
42
+ def run_manifest_scripts() -> dict:
43
+ """
44
+ Run the programmatic manifest generation scripts.
45
+
46
+ These scripts are 100% deterministic - they parse source files,
47
+ extract parameters from Zod schemas, and generate the manifest.
48
+ NO LLM involvement.
49
+
50
+ Returns dict with results of each script.
51
+ """
52
+ results = {
53
+ "manifest_generated": False,
54
+ "parameters_extracted": False,
55
+ "results_collected": False,
56
+ "errors": []
57
+ }
58
+
59
+ # Find the scripts directory (try multiple locations)
60
+ scripts_dir = None
61
+ for loc in SCRIPTS_LOCATIONS:
62
+ if loc.exists():
63
+ scripts_dir = loc
64
+ break
65
+
66
+ if scripts_dir is None:
67
+ results["errors"].append("Scripts directory not found in any expected location")
68
+ return results
69
+
70
+ project_root = PROJECT_ROOT
71
+
72
+ # Run generate-test-manifest.ts
73
+ manifest_script = scripts_dir / "generate-test-manifest.ts"
74
+ if manifest_script.exists():
75
+ try:
76
+ subprocess.run(
77
+ ["npx", "tsx", str(manifest_script), str(project_root)],
78
+ cwd=str(project_root),
79
+ capture_output=True,
80
+ text=True,
81
+ timeout=60
82
+ )
83
+ results["manifest_generated"] = True
84
+ except subprocess.TimeoutExpired:
85
+ results["errors"].append("Manifest generation timed out")
86
+ except Exception as e:
87
+ results["errors"].append(f"Manifest generation failed: {e}")
88
+
89
+ # Run extract-parameters.ts
90
+ params_script = scripts_dir / "extract-parameters.ts"
91
+ if params_script.exists():
92
+ try:
93
+ subprocess.run(
94
+ ["npx", "tsx", str(params_script), str(project_root)],
95
+ cwd=str(project_root),
96
+ capture_output=True,
97
+ text=True,
98
+ timeout=60
99
+ )
100
+ results["parameters_extracted"] = True
101
+ except subprocess.TimeoutExpired:
102
+ results["errors"].append("Parameter extraction timed out")
103
+ except Exception as e:
104
+ results["errors"].append(f"Parameter extraction failed: {e}")
105
+
106
+ # Run collect-test-results.ts (optional - only if tests were just run)
107
+ results_script = scripts_dir / "collect-test-results.ts"
108
+ if results_script.exists():
109
+ try:
110
+ subprocess.run(
111
+ ["npx", "tsx", str(results_script), str(project_root)],
112
+ cwd=str(project_root),
113
+ capture_output=True,
114
+ text=True,
115
+ timeout=120 # Test collection can take longer
116
+ )
117
+ results["results_collected"] = True
118
+ except subprocess.TimeoutExpired:
119
+ results["errors"].append("Test results collection timed out")
120
+ except Exception as e:
121
+ results["errors"].append(f"Test results collection failed: {e}")
122
+
123
+ return results
27
124
 
28
125
 
29
126
  def main():
@@ -72,6 +169,9 @@ def main():
72
169
  print(json.dumps({"continue": True}))
73
170
  sys.exit(0)
74
171
 
172
+ # Tests passed - run manifest generation scripts
173
+ manifest_output = run_manifest_scripts()
174
+
75
175
  # Tests passed - check state file
76
176
  if not STATE_FILE.exists():
77
177
  print(json.dumps({"continue": True}))
@@ -106,6 +206,15 @@ def main():
106
206
  verify["status"] = "in_progress"
107
207
  verify["started_at"] = datetime.now().isoformat()
108
208
 
209
+ # Update manifest_generation section in state
210
+ if "manifest_generation" not in state:
211
+ state["manifest_generation"] = {}
212
+
213
+ state["manifest_generation"]["last_run"] = datetime.now().isoformat()
214
+ state["manifest_generation"]["manifest_generated"] = manifest_output.get("manifest_generated", False)
215
+ state["manifest_generation"]["parameters_extracted"] = manifest_output.get("parameters_extracted", False)
216
+ state["manifest_generation"]["test_results_collected"] = manifest_output.get("results_collected", False)
217
+
109
218
  # Save state
110
219
  STATE_FILE.write_text(json.dumps(state, indent=2))
111
220
 
@@ -113,6 +222,27 @@ def main():
113
222
  endpoint = state.get("endpoint", "the endpoint")
114
223
 
115
224
  context_parts = []
225
+
226
+ # Report manifest generation results
227
+ if manifest_output.get("manifest_generated"):
228
+ context_parts.append("## ✅ Manifest Generation Complete")
229
+ context_parts.append("")
230
+ context_parts.append("Programmatically generated from test files (no LLM):")
231
+ if manifest_output.get("manifest_generated"):
232
+ context_parts.append(" - ✓ api-tests-manifest.json")
233
+ if manifest_output.get("parameters_extracted"):
234
+ context_parts.append(" - ✓ parameter-matrix.json")
235
+ if manifest_output.get("results_collected"):
236
+ context_parts.append(" - ✓ test-results.json")
237
+ if manifest_output.get("errors"):
238
+ context_parts.append("")
239
+ context_parts.append("⚠️ Some scripts had issues:")
240
+ for err in manifest_output["errors"]:
241
+ context_parts.append(f" - {err}")
242
+ context_parts.append("")
243
+ context_parts.append("---")
244
+ context_parts.append("")
245
+
116
246
  context_parts.append("## Phase 9: Implementation Verification Required")
117
247
  context_parts.append("")
118
248
  context_parts.append("Tests are passing. Before proceeding, you MUST verify your implementation:")
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@hustle-together/api-dev-tools",
3
- "version": "3.0.0",
3
+ "version": "3.2.0",
4
4
  "description": "Interview-driven, research-first API development workflow with continuous verification loops for Claude Code",
5
5
  "main": "bin/cli.js",
6
6
  "bin": {
@@ -10,6 +10,7 @@
10
10
  "bin/",
11
11
  "commands/",
12
12
  "hooks/",
13
+ "scripts/",
13
14
  "templates/",
14
15
  "demo/",
15
16
  "README.md",