@hustle-together/api-dev-tools 2.0.7 → 3.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/README.md +343 -467
  2. package/bin/cli.js +229 -15
  3. package/commands/README.md +124 -251
  4. package/commands/api-create.md +318 -136
  5. package/commands/api-interview.md +252 -256
  6. package/commands/api-research.md +209 -234
  7. package/commands/api-verify.md +231 -0
  8. package/demo/audio/generate-all-narrations.js +581 -0
  9. package/demo/audio/generate-narration.js +120 -56
  10. package/demo/audio/generate-voice-previews.js +140 -0
  11. package/demo/audio/narration-adam-timing.json +4675 -0
  12. package/demo/audio/narration-adam.mp3 +0 -0
  13. package/demo/audio/narration-creature-timing.json +4675 -0
  14. package/demo/audio/narration-creature.mp3 +0 -0
  15. package/demo/audio/narration-gaming-timing.json +4675 -0
  16. package/demo/audio/narration-gaming.mp3 +0 -0
  17. package/demo/audio/narration-hope-timing.json +4675 -0
  18. package/demo/audio/narration-hope.mp3 +0 -0
  19. package/demo/audio/narration-mark-timing.json +4675 -0
  20. package/demo/audio/narration-mark.mp3 +0 -0
  21. package/demo/audio/previews/manifest.json +30 -0
  22. package/demo/audio/previews/preview-creature.mp3 +0 -0
  23. package/demo/audio/previews/preview-gaming.mp3 +0 -0
  24. package/demo/audio/previews/preview-hope.mp3 +0 -0
  25. package/demo/audio/previews/preview-mark.mp3 +0 -0
  26. package/demo/audio/voices-manifest.json +50 -0
  27. package/demo/hustle-together/blog/gemini-vs-claude-widgets.html +30 -28
  28. package/demo/hustle-together/blog/interview-driven-api-development.html +37 -23
  29. package/demo/hustle-together/index.html +142 -109
  30. package/demo/workflow-demo.html +2618 -1036
  31. package/hooks/api-workflow-check.py +2 -0
  32. package/hooks/enforce-deep-research.py +180 -0
  33. package/hooks/enforce-disambiguation.py +149 -0
  34. package/hooks/enforce-documentation.py +187 -0
  35. package/hooks/enforce-environment.py +249 -0
  36. package/hooks/enforce-refactor.py +187 -0
  37. package/hooks/enforce-research.py +93 -46
  38. package/hooks/enforce-schema.py +186 -0
  39. package/hooks/enforce-scope.py +156 -0
  40. package/hooks/enforce-tdd-red.py +246 -0
  41. package/hooks/enforce-verify.py +186 -0
  42. package/hooks/periodic-reground.py +154 -0
  43. package/hooks/session-startup.py +151 -0
  44. package/hooks/track-tool-use.py +109 -17
  45. package/hooks/verify-after-green.py +282 -0
  46. package/package.json +3 -2
  47. package/scripts/collect-test-results.ts +404 -0
  48. package/scripts/extract-parameters.ts +483 -0
  49. package/scripts/generate-test-manifest.ts +520 -0
  50. package/templates/CLAUDE-SECTION.md +84 -0
  51. package/templates/api-dev-state.json +83 -8
  52. package/templates/api-test/page.tsx +315 -0
  53. package/templates/api-test/test-structure/route.ts +269 -0
  54. package/templates/research-index.json +6 -0
  55. package/templates/settings.json +59 -0
@@ -0,0 +1,282 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Hook: PostToolUse (after test runs)
4
+ Purpose: Trigger Phase 9 (Verify) + Manifest Generation after tests pass
5
+
6
+ This hook detects when tests pass (TDD Green phase complete) and:
7
+ 1. Runs the programmatic manifest generation scripts
8
+ 2. Reminds Claude to re-research the original documentation
9
+ 3. Compares implemented features to documented features
10
+ 4. Requires user confirmation before proceeding
11
+
12
+ The goal is to:
13
+ - Automatically generate api-tests-manifest.json from test files (programmatic, not LLM)
14
+ - Catch cases where Claude implemented from memory instead of from researched docs
15
+
16
+ Triggers on: Bash commands containing "test" that exit successfully
17
+
18
+ Returns:
19
+ - {"continue": true} with additionalContext prompting verification
20
+ """
21
+ import json
22
+ import sys
23
+ import os
24
+ import subprocess
25
+ from datetime import datetime
26
+ from pathlib import Path
27
+
28
+ # State file is in .claude/ directory (sibling to hooks/)
29
+ STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
30
+ # Scripts locations (try in order):
31
+ # 1. Installed in project: scripts/api-dev-tools/
32
+ # 2. In node_modules (if running from package)
33
+ # 3. Package root (development)
34
+ PROJECT_ROOT = Path(__file__).parent.parent.parent
35
+ SCRIPTS_LOCATIONS = [
36
+ PROJECT_ROOT / "scripts" / "api-dev-tools", # CLI-installed location
37
+ PROJECT_ROOT / "node_modules" / "@hustle-together" / "api-dev-tools" / "scripts",
38
+ Path(__file__).parent.parent.parent / "scripts", # Development fallback
39
+ ]
40
+
41
+
42
+ def run_manifest_scripts() -> dict:
43
+ """
44
+ Run the programmatic manifest generation scripts.
45
+
46
+ These scripts are 100% deterministic - they parse source files,
47
+ extract parameters from Zod schemas, and generate the manifest.
48
+ NO LLM involvement.
49
+
50
+ Returns dict with results of each script.
51
+ """
52
+ results = {
53
+ "manifest_generated": False,
54
+ "parameters_extracted": False,
55
+ "results_collected": False,
56
+ "errors": []
57
+ }
58
+
59
+ # Find the scripts directory (try multiple locations)
60
+ scripts_dir = None
61
+ for loc in SCRIPTS_LOCATIONS:
62
+ if loc.exists():
63
+ scripts_dir = loc
64
+ break
65
+
66
+ if scripts_dir is None:
67
+ results["errors"].append("Scripts directory not found in any expected location")
68
+ return results
69
+
70
+ project_root = PROJECT_ROOT
71
+
72
+ # Run generate-test-manifest.ts
73
+ manifest_script = scripts_dir / "generate-test-manifest.ts"
74
+ if manifest_script.exists():
75
+ try:
76
+ subprocess.run(
77
+ ["npx", "tsx", str(manifest_script), str(project_root)],
78
+ cwd=str(project_root),
79
+ capture_output=True,
80
+ text=True,
81
+ timeout=60
82
+ )
83
+ results["manifest_generated"] = True
84
+ except subprocess.TimeoutExpired:
85
+ results["errors"].append("Manifest generation timed out")
86
+ except Exception as e:
87
+ results["errors"].append(f"Manifest generation failed: {e}")
88
+
89
+ # Run extract-parameters.ts
90
+ params_script = scripts_dir / "extract-parameters.ts"
91
+ if params_script.exists():
92
+ try:
93
+ subprocess.run(
94
+ ["npx", "tsx", str(params_script), str(project_root)],
95
+ cwd=str(project_root),
96
+ capture_output=True,
97
+ text=True,
98
+ timeout=60
99
+ )
100
+ results["parameters_extracted"] = True
101
+ except subprocess.TimeoutExpired:
102
+ results["errors"].append("Parameter extraction timed out")
103
+ except Exception as e:
104
+ results["errors"].append(f"Parameter extraction failed: {e}")
105
+
106
+ # Run collect-test-results.ts (optional - only if tests were just run)
107
+ results_script = scripts_dir / "collect-test-results.ts"
108
+ if results_script.exists():
109
+ try:
110
+ subprocess.run(
111
+ ["npx", "tsx", str(results_script), str(project_root)],
112
+ cwd=str(project_root),
113
+ capture_output=True,
114
+ text=True,
115
+ timeout=120 # Test collection can take longer
116
+ )
117
+ results["results_collected"] = True
118
+ except subprocess.TimeoutExpired:
119
+ results["errors"].append("Test results collection timed out")
120
+ except Exception as e:
121
+ results["errors"].append(f"Test results collection failed: {e}")
122
+
123
+ return results
124
+
125
+
126
+ def main():
127
+ # Read hook input from stdin
128
+ try:
129
+ input_data = json.load(sys.stdin)
130
+ except json.JSONDecodeError:
131
+ print(json.dumps({"continue": True}))
132
+ sys.exit(0)
133
+
134
+ tool_name = input_data.get("tool_name", "")
135
+ tool_input = input_data.get("tool_input", {})
136
+ tool_output = input_data.get("tool_output", {})
137
+
138
+ # Only trigger on Bash commands
139
+ if tool_name != "Bash":
140
+ print(json.dumps({"continue": True}))
141
+ sys.exit(0)
142
+
143
+ # Check if this is a test command
144
+ command = tool_input.get("command", "")
145
+ is_test_command = any(test_keyword in command.lower() for test_keyword in [
146
+ "pnpm test", "npm test", "vitest", "jest", "pytest", "test:run"
147
+ ])
148
+
149
+ if not is_test_command:
150
+ print(json.dumps({"continue": True}))
151
+ sys.exit(0)
152
+
153
+ # Check if tests passed (exit code 0 or output indicates success)
154
+ output_text = ""
155
+ if isinstance(tool_output, str):
156
+ output_text = tool_output
157
+ elif isinstance(tool_output, dict):
158
+ output_text = tool_output.get("output", tool_output.get("stdout", ""))
159
+
160
+ # Look for success indicators
161
+ tests_passed = any(indicator in output_text.lower() for indicator in [
162
+ "tests passed", "all tests passed", "test suites passed",
163
+ "✓", "passed", "0 failed", "pass"
164
+ ]) and not any(fail in output_text.lower() for fail in [
165
+ "failed", "error", "fail"
166
+ ])
167
+
168
+ if not tests_passed:
169
+ print(json.dumps({"continue": True}))
170
+ sys.exit(0)
171
+
172
+ # Tests passed - run manifest generation scripts
173
+ manifest_output = run_manifest_scripts()
174
+
175
+ # Tests passed - check state file
176
+ if not STATE_FILE.exists():
177
+ print(json.dumps({"continue": True}))
178
+ sys.exit(0)
179
+
180
+ try:
181
+ state = json.loads(STATE_FILE.read_text())
182
+ except json.JSONDecodeError:
183
+ print(json.dumps({"continue": True}))
184
+ sys.exit(0)
185
+
186
+ phases = state.get("phases", {})
187
+ tdd_green = phases.get("tdd_green", {})
188
+ verify = phases.get("verify", {})
189
+
190
+ # Check if we're in TDD Green phase
191
+ if tdd_green.get("status") != "in_progress":
192
+ print(json.dumps({"continue": True}))
193
+ sys.exit(0)
194
+
195
+ # Check if verify phase already done
196
+ if verify.get("status") == "complete":
197
+ print(json.dumps({"continue": True}))
198
+ sys.exit(0)
199
+
200
+ # Mark TDD Green as complete
201
+ tdd_green["status"] = "complete"
202
+ tdd_green["all_tests_passing"] = True
203
+ tdd_green["completed_at"] = datetime.now().isoformat()
204
+
205
+ # Start verify phase
206
+ verify["status"] = "in_progress"
207
+ verify["started_at"] = datetime.now().isoformat()
208
+
209
+ # Update manifest_generation section in state
210
+ if "manifest_generation" not in state:
211
+ state["manifest_generation"] = {}
212
+
213
+ state["manifest_generation"]["last_run"] = datetime.now().isoformat()
214
+ state["manifest_generation"]["manifest_generated"] = manifest_output.get("manifest_generated", False)
215
+ state["manifest_generation"]["parameters_extracted"] = manifest_output.get("parameters_extracted", False)
216
+ state["manifest_generation"]["test_results_collected"] = manifest_output.get("results_collected", False)
217
+
218
+ # Save state
219
+ STATE_FILE.write_text(json.dumps(state, indent=2))
220
+
221
+ # Build verification prompt
222
+ endpoint = state.get("endpoint", "the endpoint")
223
+
224
+ context_parts = []
225
+
226
+ # Report manifest generation results
227
+ if manifest_output.get("manifest_generated"):
228
+ context_parts.append("## ✅ Manifest Generation Complete")
229
+ context_parts.append("")
230
+ context_parts.append("Programmatically generated from test files (no LLM):")
231
+ if manifest_output.get("manifest_generated"):
232
+ context_parts.append(" - ✓ api-tests-manifest.json")
233
+ if manifest_output.get("parameters_extracted"):
234
+ context_parts.append(" - ✓ parameter-matrix.json")
235
+ if manifest_output.get("results_collected"):
236
+ context_parts.append(" - ✓ test-results.json")
237
+ if manifest_output.get("errors"):
238
+ context_parts.append("")
239
+ context_parts.append("⚠️ Some scripts had issues:")
240
+ for err in manifest_output["errors"]:
241
+ context_parts.append(f" - {err}")
242
+ context_parts.append("")
243
+ context_parts.append("---")
244
+ context_parts.append("")
245
+
246
+ context_parts.append("## Phase 9: Implementation Verification Required")
247
+ context_parts.append("")
248
+ context_parts.append("Tests are passing. Before proceeding, you MUST verify your implementation:")
249
+ context_parts.append("")
250
+ context_parts.append("**Required Actions:**")
251
+ context_parts.append("1. Re-read the original API documentation (use Context7 or WebSearch)")
252
+ context_parts.append("2. Compare EVERY documented parameter/feature to your implementation")
253
+ context_parts.append("3. Report any discrepancies in this format:")
254
+ context_parts.append("")
255
+ context_parts.append("```")
256
+ context_parts.append("| Feature | In Docs | Implemented | Status |")
257
+ context_parts.append("|------------------|---------|-------------|-----------------|")
258
+ context_parts.append("| param_name | Yes | Yes | Match |")
259
+ context_parts.append("| missing_param | Yes | No | MISSING |")
260
+ context_parts.append("| extra_param | No | Yes | EXTRA (OK) |")
261
+ context_parts.append("```")
262
+ context_parts.append("")
263
+ context_parts.append("**After comparison, ask the user:**")
264
+ context_parts.append("- Fix gaps? [Y] - Loop back to Red phase")
265
+ context_parts.append("- Skip (intentional omissions)? [n] - Document and proceed")
266
+ context_parts.append("")
267
+ context_parts.append("DO NOT proceed to Refactor until verification is complete.")
268
+
269
+ output = {
270
+ "continue": True,
271
+ "hookSpecificOutput": {
272
+ "hookEventName": "PostToolUse",
273
+ "additionalContext": "\n".join(context_parts)
274
+ }
275
+ }
276
+
277
+ print(json.dumps(output))
278
+ sys.exit(0)
279
+
280
+
281
+ if __name__ == "__main__":
282
+ main()
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@hustle-together/api-dev-tools",
3
- "version": "2.0.7",
4
- "description": "Interview-driven API development workflow for Claude Code - Automates research, testing, and documentation",
3
+ "version": "3.1.0",
4
+ "description": "Interview-driven, research-first API development workflow with continuous verification loops for Claude Code",
5
5
  "main": "bin/cli.js",
6
6
  "bin": {
7
7
  "api-dev-tools": "./bin/cli.js"
@@ -10,6 +10,7 @@
10
10
  "bin/",
11
11
  "commands/",
12
12
  "hooks/",
13
+ "scripts/",
13
14
  "templates/",
14
15
  "demo/",
15
16
  "README.md",
@@ -0,0 +1,404 @@
1
+ #!/usr/bin/env npx tsx
2
+ /**
3
+ * Collect Test Results Script
4
+ *
5
+ * Runs Vitest and collects results programmatically.
6
+ * Updates the manifest with actual pass/fail status.
7
+ *
8
+ * IMPORTANT: This is 100% programmatic - NO LLM involvement.
9
+ * Tests are executed and results are collected automatically.
10
+ *
11
+ * @generated by @hustle-together/api-dev-tools v3.0
12
+ */
13
+
14
+ import { execSync, spawn } from 'child_process';
15
+ import fs from 'fs';
16
+ import path from 'path';
17
+
18
+ // ============================================
19
+ // Types
20
+ // ============================================
21
+
22
+ interface TestResult {
23
+ name: string;
24
+ file: string;
25
+ status: 'passed' | 'failed' | 'skipped';
26
+ duration: number;
27
+ error?: string;
28
+ }
29
+
30
+ interface TestSuiteResult {
31
+ file: string;
32
+ tests: TestResult[];
33
+ passed: number;
34
+ failed: number;
35
+ skipped: number;
36
+ duration: number;
37
+ }
38
+
39
+ interface CollectedResults {
40
+ version: string;
41
+ collectedAt: string;
42
+ suites: TestSuiteResult[];
43
+ summary: {
44
+ totalSuites: number;
45
+ totalTests: number;
46
+ passed: number;
47
+ failed: number;
48
+ skipped: number;
49
+ duration: number;
50
+ success: boolean;
51
+ };
52
+ }
53
+
54
+ // ============================================
55
+ // Vitest Output Parser
56
+ // ============================================
57
+
58
+ function parseVitestJson(jsonOutput: string): CollectedResults {
59
+ try {
60
+ const data = JSON.parse(jsonOutput);
61
+
62
+ const suites: TestSuiteResult[] = [];
63
+ let totalPassed = 0;
64
+ let totalFailed = 0;
65
+ let totalSkipped = 0;
66
+ let totalDuration = 0;
67
+
68
+ // Parse Vitest JSON reporter output
69
+ if (data.testResults) {
70
+ for (const fileResult of data.testResults) {
71
+ const suite: TestSuiteResult = {
72
+ file: fileResult.name || fileResult.filepath,
73
+ tests: [],
74
+ passed: 0,
75
+ failed: 0,
76
+ skipped: 0,
77
+ duration: fileResult.duration || 0
78
+ };
79
+
80
+ if (fileResult.assertionResults) {
81
+ for (const test of fileResult.assertionResults) {
82
+ const result: TestResult = {
83
+ name: test.title || test.fullName,
84
+ file: suite.file,
85
+ status: test.status === 'passed' ? 'passed' :
86
+ test.status === 'failed' ? 'failed' : 'skipped',
87
+ duration: test.duration || 0
88
+ };
89
+
90
+ if (test.failureMessages && test.failureMessages.length > 0) {
91
+ result.error = test.failureMessages.join('\n');
92
+ }
93
+
94
+ suite.tests.push(result);
95
+
96
+ if (result.status === 'passed') suite.passed++;
97
+ else if (result.status === 'failed') suite.failed++;
98
+ else suite.skipped++;
99
+ }
100
+ }
101
+
102
+ totalPassed += suite.passed;
103
+ totalFailed += suite.failed;
104
+ totalSkipped += suite.skipped;
105
+ totalDuration += suite.duration;
106
+
107
+ suites.push(suite);
108
+ }
109
+ }
110
+
111
+ return {
112
+ version: '3.0.0',
113
+ collectedAt: new Date().toISOString(),
114
+ suites,
115
+ summary: {
116
+ totalSuites: suites.length,
117
+ totalTests: totalPassed + totalFailed + totalSkipped,
118
+ passed: totalPassed,
119
+ failed: totalFailed,
120
+ skipped: totalSkipped,
121
+ duration: totalDuration,
122
+ success: totalFailed === 0
123
+ }
124
+ };
125
+ } catch (error) {
126
+ throw new Error(`Failed to parse Vitest JSON output: ${error}`);
127
+ }
128
+ }
129
+
130
+ // ============================================
131
+ // Console Output Parser (Fallback)
132
+ // ============================================
133
+
134
+ function parseVitestConsole(output: string): CollectedResults {
135
+ const suites: TestSuiteResult[] = [];
136
+ let currentSuite: TestSuiteResult | null = null;
137
+
138
+ const lines = output.split('\n');
139
+
140
+ for (const line of lines) {
141
+ // Match file header: ✓ src/path/file.test.ts (5 tests) 123ms
142
+ const fileMatch = line.match(/[✓✗◯]\s+([^\s]+\.(?:test|spec)\.tsx?)\s+\((\d+)\s+tests?\)/);
143
+ if (fileMatch) {
144
+ if (currentSuite) {
145
+ suites.push(currentSuite);
146
+ }
147
+
148
+ const durationMatch = line.match(/(\d+)ms$/);
149
+
150
+ currentSuite = {
151
+ file: fileMatch[1],
152
+ tests: [],
153
+ passed: 0,
154
+ failed: 0,
155
+ skipped: 0,
156
+ duration: durationMatch ? parseInt(durationMatch[1]) : 0
157
+ };
158
+ continue;
159
+ }
160
+
161
+ // Match test result: ✓ should do something (5ms)
162
+ const testMatch = line.match(/^\s*([✓✗◯⊘])\s+(.+?)(?:\s+\((\d+)ms\))?$/);
163
+ if (testMatch && currentSuite) {
164
+ const [, icon, name, duration] = testMatch;
165
+
166
+ const status: 'passed' | 'failed' | 'skipped' =
167
+ icon === '✓' ? 'passed' :
168
+ icon === '✗' ? 'failed' : 'skipped';
169
+
170
+ currentSuite.tests.push({
171
+ name,
172
+ file: currentSuite.file,
173
+ status,
174
+ duration: duration ? parseInt(duration) : 0
175
+ });
176
+
177
+ if (status === 'passed') currentSuite.passed++;
178
+ else if (status === 'failed') currentSuite.failed++;
179
+ else currentSuite.skipped++;
180
+ }
181
+ }
182
+
183
+ if (currentSuite) {
184
+ suites.push(currentSuite);
185
+ }
186
+
187
+ // Calculate summary
188
+ const summary = suites.reduce((acc, suite) => ({
189
+ totalSuites: acc.totalSuites + 1,
190
+ totalTests: acc.totalTests + suite.tests.length,
191
+ passed: acc.passed + suite.passed,
192
+ failed: acc.failed + suite.failed,
193
+ skipped: acc.skipped + suite.skipped,
194
+ duration: acc.duration + suite.duration,
195
+ success: acc.success && suite.failed === 0
196
+ }), {
197
+ totalSuites: 0,
198
+ totalTests: 0,
199
+ passed: 0,
200
+ failed: 0,
201
+ skipped: 0,
202
+ duration: 0,
203
+ success: true
204
+ });
205
+
206
+ return {
207
+ version: '3.0.0',
208
+ collectedAt: new Date().toISOString(),
209
+ suites,
210
+ summary
211
+ };
212
+ }
213
+
214
+ // ============================================
215
+ // Test Runner
216
+ // ============================================
217
+
218
+ function runVitest(baseDir: string, filter?: string): CollectedResults {
219
+ console.log('🧪 Running Vitest...');
220
+
221
+ const vitestArgs = ['vitest', 'run', '--reporter=json'];
222
+ if (filter) {
223
+ vitestArgs.push(filter);
224
+ }
225
+
226
+ try {
227
+ // Try running with JSON reporter
228
+ const result = execSync(`npx ${vitestArgs.join(' ')}`, {
229
+ cwd: baseDir,
230
+ encoding: 'utf-8',
231
+ stdio: ['pipe', 'pipe', 'pipe'],
232
+ maxBuffer: 50 * 1024 * 1024 // 50MB buffer
233
+ });
234
+
235
+ return parseVitestJson(result);
236
+ } catch (error: unknown) {
237
+ // Vitest may exit with non-zero on test failures
238
+ // Try to parse the output anyway
239
+ const execError = error as { stdout?: string; stderr?: string };
240
+ if (execError.stdout) {
241
+ try {
242
+ return parseVitestJson(execError.stdout);
243
+ } catch {
244
+ // Fall back to console parsing
245
+ return parseVitestConsole(execError.stdout);
246
+ }
247
+ }
248
+
249
+ // Try fallback: run without JSON reporter
250
+ console.log(' ⚠️ JSON reporter failed, trying console output...');
251
+
252
+ try {
253
+ const consoleResult = execSync(`npx vitest run ${filter || ''}`, {
254
+ cwd: baseDir,
255
+ encoding: 'utf-8',
256
+ stdio: ['pipe', 'pipe', 'pipe']
257
+ });
258
+
259
+ return parseVitestConsole(consoleResult);
260
+ } catch (fallbackError: unknown) {
261
+ const fbError = fallbackError as { stdout?: string };
262
+ if (fbError.stdout) {
263
+ return parseVitestConsole(fbError.stdout);
264
+ }
265
+ throw error;
266
+ }
267
+ }
268
+ }
269
+
270
+ // ============================================
271
+ // Manifest Updater
272
+ // ============================================
273
+
274
+ function updateManifest(manifestPath: string, results: CollectedResults): void {
275
+ if (!fs.existsSync(manifestPath)) {
276
+ console.log(' ⚠️ Manifest not found, skipping update');
277
+ return;
278
+ }
279
+
280
+ const manifest = JSON.parse(fs.readFileSync(manifestPath, 'utf-8'));
281
+
282
+ // Create a map of test results by file
283
+ const resultsByFile = new Map<string, TestSuiteResult>();
284
+ for (const suite of results.suites) {
285
+ const basename = path.basename(suite.file);
286
+ resultsByFile.set(basename, suite);
287
+ }
288
+
289
+ // Update each endpoint's test status
290
+ if (manifest.endpoints) {
291
+ for (const endpoint of manifest.endpoints) {
292
+ const testBasename = path.basename(endpoint.testFile || '');
293
+ const suiteResult = resultsByFile.get(testBasename);
294
+
295
+ if (suiteResult) {
296
+ endpoint.testResults = {
297
+ passed: suiteResult.passed,
298
+ failed: suiteResult.failed,
299
+ skipped: suiteResult.skipped,
300
+ duration: suiteResult.duration,
301
+ lastRun: results.collectedAt
302
+ };
303
+ }
304
+ }
305
+ }
306
+
307
+ // Update summary
308
+ manifest.lastTestRun = {
309
+ ...results.summary,
310
+ timestamp: results.collectedAt
311
+ };
312
+
313
+ fs.writeFileSync(manifestPath, JSON.stringify(manifest, null, 2));
314
+ console.log(` ✅ Updated manifest with test results`);
315
+ }
316
+
317
+ // ============================================
318
+ // CLI Entry Point
319
+ // ============================================
320
+
321
+ function main() {
322
+ const args = process.argv.slice(2);
323
+ const baseDir = args[0] || process.cwd();
324
+ const filter = args[1] || undefined;
325
+ const outputPath = args[2] || path.join(baseDir, 'src', 'app', 'api-test', 'test-results.json');
326
+ const manifestPath = path.join(baseDir, 'src', 'app', 'api-test', 'api-tests-manifest.json');
327
+
328
+ console.log('═══════════════════════════════════════════════════════════════');
329
+ console.log(' 🧪 Test Results Collector');
330
+ console.log(' @hustle-together/api-dev-tools v3.0');
331
+ console.log('═══════════════════════════════════════════════════════════════');
332
+ console.log(`\n📁 Base directory: ${baseDir}`);
333
+ if (filter) {
334
+ console.log(`🔍 Filter: ${filter}`);
335
+ }
336
+ console.log(`📄 Output file: ${outputPath}\n`);
337
+
338
+ try {
339
+ const results = runVitest(baseDir, filter);
340
+
341
+ // Ensure output directory exists
342
+ const outputDir = path.dirname(outputPath);
343
+ if (!fs.existsSync(outputDir)) {
344
+ fs.mkdirSync(outputDir, { recursive: true });
345
+ }
346
+
347
+ // Write results
348
+ fs.writeFileSync(outputPath, JSON.stringify(results, null, 2));
349
+
350
+ // Update manifest with results
351
+ updateManifest(manifestPath, results);
352
+
353
+ console.log('\n═══════════════════════════════════════════════════════════════');
354
+ if (results.summary.success) {
355
+ console.log(' ✅ All tests passed!');
356
+ } else {
357
+ console.log(' ❌ Some tests failed');
358
+ }
359
+ console.log('═══════════════════════════════════════════════════════════════');
360
+
361
+ console.log(`\n📊 Summary:`);
362
+ console.log(` • Suites: ${results.summary.totalSuites}`);
363
+ console.log(` • Tests: ${results.summary.totalTests}`);
364
+ console.log(` • Passed: ${results.summary.passed} ✓`);
365
+ if (results.summary.failed > 0) {
366
+ console.log(` • Failed: ${results.summary.failed} ✗`);
367
+ }
368
+ if (results.summary.skipped > 0) {
369
+ console.log(` • Skipped: ${results.summary.skipped} ⊘`);
370
+ }
371
+ console.log(` • Duration: ${results.summary.duration}ms`);
372
+
373
+ // List failed tests
374
+ const failedTests = results.suites.flatMap(suite =>
375
+ suite.tests.filter(t => t.status === 'failed').map(t => ({
376
+ file: suite.file,
377
+ name: t.name,
378
+ error: t.error
379
+ }))
380
+ );
381
+
382
+ if (failedTests.length > 0) {
383
+ console.log(`\n❌ Failed tests:`);
384
+ for (const test of failedTests) {
385
+ console.log(` • ${test.file}: ${test.name}`);
386
+ if (test.error) {
387
+ console.log(` ${test.error.split('\n')[0]}`);
388
+ }
389
+ }
390
+ }
391
+
392
+ console.log(`\n📄 Results: ${outputPath}`);
393
+ console.log(`📄 Manifest: ${manifestPath}\n`);
394
+
395
+ // Exit with appropriate code
396
+ process.exit(results.summary.success ? 0 : 1);
397
+
398
+ } catch (error) {
399
+ console.error('\n❌ Failed to collect test results:', error);
400
+ process.exit(1);
401
+ }
402
+ }
403
+
404
+ main();