@hustle-together/api-dev-tools 3.0.0 → 3.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +71 -0
- package/bin/cli.js +184 -14
- package/demo/audio/generate-all-narrations.js +124 -59
- package/demo/audio/generate-narration.js +120 -56
- package/demo/audio/narration-adam-timing.json +3086 -2077
- package/demo/audio/narration-adam.mp3 +0 -0
- package/demo/audio/narration-creature-timing.json +3094 -2085
- package/demo/audio/narration-creature.mp3 +0 -0
- package/demo/audio/narration-gaming-timing.json +3091 -2082
- package/demo/audio/narration-gaming.mp3 +0 -0
- package/demo/audio/narration-hope-timing.json +3072 -2063
- package/demo/audio/narration-hope.mp3 +0 -0
- package/demo/audio/narration-mark-timing.json +3090 -2081
- package/demo/audio/narration-mark.mp3 +0 -0
- package/demo/audio/voices-manifest.json +16 -16
- package/demo/workflow-demo.html +1528 -411
- package/hooks/api-workflow-check.py +2 -0
- package/hooks/enforce-deep-research.py +180 -0
- package/hooks/enforce-disambiguation.py +149 -0
- package/hooks/enforce-documentation.py +187 -0
- package/hooks/enforce-environment.py +249 -0
- package/hooks/enforce-refactor.py +187 -0
- package/hooks/enforce-research.py +93 -46
- package/hooks/enforce-schema.py +186 -0
- package/hooks/enforce-scope.py +156 -0
- package/hooks/enforce-tdd-red.py +246 -0
- package/hooks/enforce-verify.py +186 -0
- package/hooks/verify-after-green.py +136 -6
- package/package.json +2 -1
- package/scripts/collect-test-results.ts +404 -0
- package/scripts/extract-parameters.ts +483 -0
- package/scripts/generate-test-manifest.ts +520 -0
- package/templates/CLAUDE-SECTION.md +84 -0
- package/templates/api-dev-state.json +45 -5
- package/templates/api-test/page.tsx +315 -0
- package/templates/api-test/test-structure/route.ts +269 -0
- package/templates/settings.json +36 -0
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Hook: PreToolUse for Write/Edit
|
|
4
|
+
Purpose: Block refactoring until verification complete WITH USER GAP DECISION
|
|
5
|
+
|
|
6
|
+
Phase 9 (Verify) requires:
|
|
7
|
+
1. Re-read original documentation (after tests pass)
|
|
8
|
+
2. Compare implementation to docs - find gaps
|
|
9
|
+
3. SHOW gap analysis to user
|
|
10
|
+
4. USE AskUserQuestion: "Fix gaps? [Y] / Skip? [n]"
|
|
11
|
+
5. Loop back to Phase 7 if user wants fixes
|
|
12
|
+
6. Only proceed to refactor when user decides
|
|
13
|
+
|
|
14
|
+
Returns:
|
|
15
|
+
- {"permissionDecision": "allow"} - Let the tool run
|
|
16
|
+
- {"permissionDecision": "deny", "reason": "..."} - Block with explanation
|
|
17
|
+
"""
|
|
18
|
+
import json
|
|
19
|
+
import sys
|
|
20
|
+
from pathlib import Path
|
|
21
|
+
|
|
22
|
+
STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def main():
|
|
26
|
+
try:
|
|
27
|
+
input_data = json.load(sys.stdin)
|
|
28
|
+
except json.JSONDecodeError:
|
|
29
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
30
|
+
sys.exit(0)
|
|
31
|
+
|
|
32
|
+
tool_input = input_data.get("tool_input", {})
|
|
33
|
+
file_path = tool_input.get("file_path", "")
|
|
34
|
+
|
|
35
|
+
# Only enforce for API route files
|
|
36
|
+
is_api_file = "/api/" in file_path and file_path.endswith(".ts")
|
|
37
|
+
|
|
38
|
+
if not is_api_file:
|
|
39
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
40
|
+
sys.exit(0)
|
|
41
|
+
|
|
42
|
+
# Skip test files
|
|
43
|
+
if ".test." in file_path or "/__tests__/" in file_path or ".spec." in file_path:
|
|
44
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
45
|
+
sys.exit(0)
|
|
46
|
+
|
|
47
|
+
# Skip documentation/config files
|
|
48
|
+
if file_path.endswith(".md") or file_path.endswith(".json"):
|
|
49
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
50
|
+
sys.exit(0)
|
|
51
|
+
|
|
52
|
+
if not STATE_FILE.exists():
|
|
53
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
54
|
+
sys.exit(0)
|
|
55
|
+
|
|
56
|
+
try:
|
|
57
|
+
state = json.loads(STATE_FILE.read_text())
|
|
58
|
+
except json.JSONDecodeError:
|
|
59
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
60
|
+
sys.exit(0)
|
|
61
|
+
|
|
62
|
+
endpoint = state.get("endpoint", "unknown")
|
|
63
|
+
phases = state.get("phases", {})
|
|
64
|
+
tdd_green = phases.get("tdd_green", {})
|
|
65
|
+
verify = phases.get("verify", {})
|
|
66
|
+
|
|
67
|
+
# Only enforce after TDD Green is complete
|
|
68
|
+
if tdd_green.get("status") != "complete":
|
|
69
|
+
# Let earlier hooks handle this
|
|
70
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
71
|
+
sys.exit(0)
|
|
72
|
+
|
|
73
|
+
status = verify.get("status", "not_started")
|
|
74
|
+
|
|
75
|
+
if status != "complete":
|
|
76
|
+
user_question_asked = verify.get("user_question_asked", False)
|
|
77
|
+
user_decided = verify.get("user_decided", False)
|
|
78
|
+
gap_analysis_shown = verify.get("gap_analysis_shown", False)
|
|
79
|
+
re_research_done = verify.get("re_research_done", False)
|
|
80
|
+
gaps_found = verify.get("gaps_found", 0)
|
|
81
|
+
gaps_fixed = verify.get("gaps_fixed", 0)
|
|
82
|
+
gaps_skipped = verify.get("gaps_skipped", 0)
|
|
83
|
+
user_decision = verify.get("user_decision", None)
|
|
84
|
+
|
|
85
|
+
missing = []
|
|
86
|
+
if not re_research_done:
|
|
87
|
+
missing.append("Re-research original docs not done")
|
|
88
|
+
if not gap_analysis_shown:
|
|
89
|
+
missing.append("Gap analysis not shown to user")
|
|
90
|
+
if not user_question_asked:
|
|
91
|
+
missing.append("User gap decision question (AskUserQuestion not used)")
|
|
92
|
+
if not user_decided:
|
|
93
|
+
missing.append("User hasn't decided on gaps")
|
|
94
|
+
|
|
95
|
+
print(json.dumps({
|
|
96
|
+
"permissionDecision": "deny",
|
|
97
|
+
"reason": f"""❌ BLOCKED: Verification (Phase 9) not complete.
|
|
98
|
+
|
|
99
|
+
Status: {status}
|
|
100
|
+
Re-research done: {re_research_done}
|
|
101
|
+
Gap analysis shown: {gap_analysis_shown}
|
|
102
|
+
User question asked: {user_question_asked}
|
|
103
|
+
User decided: {user_decided}
|
|
104
|
+
User decision: {user_decision or "None yet"}
|
|
105
|
+
Gaps found: {gaps_found}
|
|
106
|
+
Gaps fixed: {gaps_fixed}
|
|
107
|
+
Gaps skipped: {gaps_skipped}
|
|
108
|
+
|
|
109
|
+
MISSING:
|
|
110
|
+
{chr(10).join(f" • {m}" for m in missing)}
|
|
111
|
+
|
|
112
|
+
═══════════════════════════════════════════════════════════
|
|
113
|
+
⚠️ GET USER DECISION ON IMPLEMENTATION GAPS
|
|
114
|
+
═══════════════════════════════════════════════════════════
|
|
115
|
+
|
|
116
|
+
REQUIRED STEPS:
|
|
117
|
+
|
|
118
|
+
1. Re-read the ORIGINAL API documentation:
|
|
119
|
+
• Use Context7 or WebSearch with SAME queries from Phase 2
|
|
120
|
+
• Compare EVERY documented feature to your implementation
|
|
121
|
+
• Don't rely on memory - actually re-read the docs
|
|
122
|
+
|
|
123
|
+
2. Create and SHOW gap analysis table:
|
|
124
|
+
┌───────────────────────────────────────────────────────┐
|
|
125
|
+
│ VERIFICATION RESULTS │
|
|
126
|
+
│ │
|
|
127
|
+
│ │ Feature │ In Docs │ Implemented │ Status │
|
|
128
|
+
│ ├─────────────────┼─────────┼─────────────┼──────────│
|
|
129
|
+
│ │ domain param │ Yes │ Yes │ ✓ Match │
|
|
130
|
+
│ │ format option │ Yes │ Yes │ ✓ Match │
|
|
131
|
+
│ │ include_fonts │ Yes │ No │ ❌ GAP │
|
|
132
|
+
│ │ webhook_url │ No │ Yes │ ⚠ Extra │
|
|
133
|
+
│ │
|
|
134
|
+
│ Found 1 gap in implementation. │
|
|
135
|
+
│ │
|
|
136
|
+
│ Fix the gap? [Y] - Loop back to add missing feature │
|
|
137
|
+
│ Skip? [n] - Document as intentional omission │
|
|
138
|
+
└───────────────────────────────────────────────────────┘
|
|
139
|
+
|
|
140
|
+
3. USE AskUserQuestion:
|
|
141
|
+
question: "I found {gaps_found} gap(s). How should I proceed?"
|
|
142
|
+
options: [
|
|
143
|
+
{{"value": "fix", "label": "Fix gaps - loop back to Red phase"}},
|
|
144
|
+
{{"value": "skip", "label": "Skip - these are intentional omissions"}},
|
|
145
|
+
{{"value": "partial", "label": "Fix some, skip others - [specify]"}}
|
|
146
|
+
]
|
|
147
|
+
|
|
148
|
+
4. If user says "fix":
|
|
149
|
+
• Loop back to Phase 7 (TDD Red)
|
|
150
|
+
• Write new tests for missing features
|
|
151
|
+
• Implement and verify again
|
|
152
|
+
• REPEAT until no gaps or user says skip
|
|
153
|
+
|
|
154
|
+
5. If user says "skip":
|
|
155
|
+
• Document each skipped gap with reason
|
|
156
|
+
• Set verify.gaps_skipped = count
|
|
157
|
+
• Proceed to refactor
|
|
158
|
+
|
|
159
|
+
6. After user decides:
|
|
160
|
+
• Set verify.user_decided = true
|
|
161
|
+
• Set verify.user_question_asked = true
|
|
162
|
+
• Set verify.gap_analysis_shown = true
|
|
163
|
+
• Set verify.re_research_done = true
|
|
164
|
+
• Set verify.user_decision = "fix" or "skip" or "partial"
|
|
165
|
+
• Set verify.status = "complete"
|
|
166
|
+
|
|
167
|
+
WHY: Catch memory-based implementation errors BEFORE refactoring."""
|
|
168
|
+
}))
|
|
169
|
+
sys.exit(0)
|
|
170
|
+
|
|
171
|
+
# Verify complete
|
|
172
|
+
user_decision = verify.get("user_decision", "unknown")
|
|
173
|
+
print(json.dumps({
|
|
174
|
+
"permissionDecision": "allow",
|
|
175
|
+
"message": f"""✅ Verification complete.
|
|
176
|
+
User decision: {user_decision}
|
|
177
|
+
Gaps found: {gaps_found}
|
|
178
|
+
Gaps fixed: {gaps_fixed}
|
|
179
|
+
Gaps skipped (intentional): {gaps_skipped}
|
|
180
|
+
Proceeding to refactor phase."""
|
|
181
|
+
}))
|
|
182
|
+
sys.exit(0)
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
if __name__ == "__main__":
|
|
186
|
+
main()
|
|
@@ -1,15 +1,17 @@
|
|
|
1
1
|
#!/usr/bin/env python3
|
|
2
2
|
"""
|
|
3
3
|
Hook: PostToolUse (after test runs)
|
|
4
|
-
Purpose: Trigger Phase 9 (Verify) after tests pass
|
|
4
|
+
Purpose: Trigger Phase 9 (Verify) + Manifest Generation after tests pass
|
|
5
5
|
|
|
6
6
|
This hook detects when tests pass (TDD Green phase complete) and:
|
|
7
|
-
1.
|
|
8
|
-
2.
|
|
9
|
-
3.
|
|
7
|
+
1. Runs the programmatic manifest generation scripts
|
|
8
|
+
2. Reminds Claude to re-research the original documentation
|
|
9
|
+
3. Compares implemented features to documented features
|
|
10
|
+
4. Requires user confirmation before proceeding
|
|
10
11
|
|
|
11
|
-
The goal is to
|
|
12
|
-
|
|
12
|
+
The goal is to:
|
|
13
|
+
- Automatically generate api-tests-manifest.json from test files (programmatic, not LLM)
|
|
14
|
+
- Catch cases where Claude implemented from memory instead of from researched docs
|
|
13
15
|
|
|
14
16
|
Triggers on: Bash commands containing "test" that exit successfully
|
|
15
17
|
|
|
@@ -19,11 +21,106 @@ Returns:
|
|
|
19
21
|
import json
|
|
20
22
|
import sys
|
|
21
23
|
import os
|
|
24
|
+
import subprocess
|
|
22
25
|
from datetime import datetime
|
|
23
26
|
from pathlib import Path
|
|
24
27
|
|
|
25
28
|
# State file is in .claude/ directory (sibling to hooks/)
|
|
26
29
|
STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
30
|
+
# Scripts locations (try in order):
|
|
31
|
+
# 1. Installed in project: scripts/api-dev-tools/
|
|
32
|
+
# 2. In node_modules (if running from package)
|
|
33
|
+
# 3. Package root (development)
|
|
34
|
+
PROJECT_ROOT = Path(__file__).parent.parent.parent
|
|
35
|
+
SCRIPTS_LOCATIONS = [
|
|
36
|
+
PROJECT_ROOT / "scripts" / "api-dev-tools", # CLI-installed location
|
|
37
|
+
PROJECT_ROOT / "node_modules" / "@hustle-together" / "api-dev-tools" / "scripts",
|
|
38
|
+
Path(__file__).parent.parent.parent / "scripts", # Development fallback
|
|
39
|
+
]
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def run_manifest_scripts() -> dict:
|
|
43
|
+
"""
|
|
44
|
+
Run the programmatic manifest generation scripts.
|
|
45
|
+
|
|
46
|
+
These scripts are 100% deterministic - they parse source files,
|
|
47
|
+
extract parameters from Zod schemas, and generate the manifest.
|
|
48
|
+
NO LLM involvement.
|
|
49
|
+
|
|
50
|
+
Returns dict with results of each script.
|
|
51
|
+
"""
|
|
52
|
+
results = {
|
|
53
|
+
"manifest_generated": False,
|
|
54
|
+
"parameters_extracted": False,
|
|
55
|
+
"results_collected": False,
|
|
56
|
+
"errors": []
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
# Find the scripts directory (try multiple locations)
|
|
60
|
+
scripts_dir = None
|
|
61
|
+
for loc in SCRIPTS_LOCATIONS:
|
|
62
|
+
if loc.exists():
|
|
63
|
+
scripts_dir = loc
|
|
64
|
+
break
|
|
65
|
+
|
|
66
|
+
if scripts_dir is None:
|
|
67
|
+
results["errors"].append("Scripts directory not found in any expected location")
|
|
68
|
+
return results
|
|
69
|
+
|
|
70
|
+
project_root = PROJECT_ROOT
|
|
71
|
+
|
|
72
|
+
# Run generate-test-manifest.ts
|
|
73
|
+
manifest_script = scripts_dir / "generate-test-manifest.ts"
|
|
74
|
+
if manifest_script.exists():
|
|
75
|
+
try:
|
|
76
|
+
subprocess.run(
|
|
77
|
+
["npx", "tsx", str(manifest_script), str(project_root)],
|
|
78
|
+
cwd=str(project_root),
|
|
79
|
+
capture_output=True,
|
|
80
|
+
text=True,
|
|
81
|
+
timeout=60
|
|
82
|
+
)
|
|
83
|
+
results["manifest_generated"] = True
|
|
84
|
+
except subprocess.TimeoutExpired:
|
|
85
|
+
results["errors"].append("Manifest generation timed out")
|
|
86
|
+
except Exception as e:
|
|
87
|
+
results["errors"].append(f"Manifest generation failed: {e}")
|
|
88
|
+
|
|
89
|
+
# Run extract-parameters.ts
|
|
90
|
+
params_script = scripts_dir / "extract-parameters.ts"
|
|
91
|
+
if params_script.exists():
|
|
92
|
+
try:
|
|
93
|
+
subprocess.run(
|
|
94
|
+
["npx", "tsx", str(params_script), str(project_root)],
|
|
95
|
+
cwd=str(project_root),
|
|
96
|
+
capture_output=True,
|
|
97
|
+
text=True,
|
|
98
|
+
timeout=60
|
|
99
|
+
)
|
|
100
|
+
results["parameters_extracted"] = True
|
|
101
|
+
except subprocess.TimeoutExpired:
|
|
102
|
+
results["errors"].append("Parameter extraction timed out")
|
|
103
|
+
except Exception as e:
|
|
104
|
+
results["errors"].append(f"Parameter extraction failed: {e}")
|
|
105
|
+
|
|
106
|
+
# Run collect-test-results.ts (optional - only if tests were just run)
|
|
107
|
+
results_script = scripts_dir / "collect-test-results.ts"
|
|
108
|
+
if results_script.exists():
|
|
109
|
+
try:
|
|
110
|
+
subprocess.run(
|
|
111
|
+
["npx", "tsx", str(results_script), str(project_root)],
|
|
112
|
+
cwd=str(project_root),
|
|
113
|
+
capture_output=True,
|
|
114
|
+
text=True,
|
|
115
|
+
timeout=120 # Test collection can take longer
|
|
116
|
+
)
|
|
117
|
+
results["results_collected"] = True
|
|
118
|
+
except subprocess.TimeoutExpired:
|
|
119
|
+
results["errors"].append("Test results collection timed out")
|
|
120
|
+
except Exception as e:
|
|
121
|
+
results["errors"].append(f"Test results collection failed: {e}")
|
|
122
|
+
|
|
123
|
+
return results
|
|
27
124
|
|
|
28
125
|
|
|
29
126
|
def main():
|
|
@@ -72,6 +169,9 @@ def main():
|
|
|
72
169
|
print(json.dumps({"continue": True}))
|
|
73
170
|
sys.exit(0)
|
|
74
171
|
|
|
172
|
+
# Tests passed - run manifest generation scripts
|
|
173
|
+
manifest_output = run_manifest_scripts()
|
|
174
|
+
|
|
75
175
|
# Tests passed - check state file
|
|
76
176
|
if not STATE_FILE.exists():
|
|
77
177
|
print(json.dumps({"continue": True}))
|
|
@@ -106,6 +206,15 @@ def main():
|
|
|
106
206
|
verify["status"] = "in_progress"
|
|
107
207
|
verify["started_at"] = datetime.now().isoformat()
|
|
108
208
|
|
|
209
|
+
# Update manifest_generation section in state
|
|
210
|
+
if "manifest_generation" not in state:
|
|
211
|
+
state["manifest_generation"] = {}
|
|
212
|
+
|
|
213
|
+
state["manifest_generation"]["last_run"] = datetime.now().isoformat()
|
|
214
|
+
state["manifest_generation"]["manifest_generated"] = manifest_output.get("manifest_generated", False)
|
|
215
|
+
state["manifest_generation"]["parameters_extracted"] = manifest_output.get("parameters_extracted", False)
|
|
216
|
+
state["manifest_generation"]["test_results_collected"] = manifest_output.get("results_collected", False)
|
|
217
|
+
|
|
109
218
|
# Save state
|
|
110
219
|
STATE_FILE.write_text(json.dumps(state, indent=2))
|
|
111
220
|
|
|
@@ -113,6 +222,27 @@ def main():
|
|
|
113
222
|
endpoint = state.get("endpoint", "the endpoint")
|
|
114
223
|
|
|
115
224
|
context_parts = []
|
|
225
|
+
|
|
226
|
+
# Report manifest generation results
|
|
227
|
+
if manifest_output.get("manifest_generated"):
|
|
228
|
+
context_parts.append("## ✅ Manifest Generation Complete")
|
|
229
|
+
context_parts.append("")
|
|
230
|
+
context_parts.append("Programmatically generated from test files (no LLM):")
|
|
231
|
+
if manifest_output.get("manifest_generated"):
|
|
232
|
+
context_parts.append(" - ✓ api-tests-manifest.json")
|
|
233
|
+
if manifest_output.get("parameters_extracted"):
|
|
234
|
+
context_parts.append(" - ✓ parameter-matrix.json")
|
|
235
|
+
if manifest_output.get("results_collected"):
|
|
236
|
+
context_parts.append(" - ✓ test-results.json")
|
|
237
|
+
if manifest_output.get("errors"):
|
|
238
|
+
context_parts.append("")
|
|
239
|
+
context_parts.append("⚠️ Some scripts had issues:")
|
|
240
|
+
for err in manifest_output["errors"]:
|
|
241
|
+
context_parts.append(f" - {err}")
|
|
242
|
+
context_parts.append("")
|
|
243
|
+
context_parts.append("---")
|
|
244
|
+
context_parts.append("")
|
|
245
|
+
|
|
116
246
|
context_parts.append("## Phase 9: Implementation Verification Required")
|
|
117
247
|
context_parts.append("")
|
|
118
248
|
context_parts.append("Tests are passing. Before proceeding, you MUST verify your implementation:")
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@hustle-together/api-dev-tools",
|
|
3
|
-
"version": "3.
|
|
3
|
+
"version": "3.1.0",
|
|
4
4
|
"description": "Interview-driven, research-first API development workflow with continuous verification loops for Claude Code",
|
|
5
5
|
"main": "bin/cli.js",
|
|
6
6
|
"bin": {
|
|
@@ -10,6 +10,7 @@
|
|
|
10
10
|
"bin/",
|
|
11
11
|
"commands/",
|
|
12
12
|
"hooks/",
|
|
13
|
+
"scripts/",
|
|
13
14
|
"templates/",
|
|
14
15
|
"demo/",
|
|
15
16
|
"README.md",
|