@hustle-together/api-dev-tools 3.10.0 → 3.11.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (97) hide show
  1. package/.claude/api-dev-state.json +159 -0
  2. package/.claude/commands/README.md +185 -0
  3. package/.claude/commands/add-command.md +209 -0
  4. package/.claude/commands/api-create.md +499 -0
  5. package/.claude/commands/api-env.md +50 -0
  6. package/.claude/commands/api-interview.md +331 -0
  7. package/.claude/commands/api-research.md +331 -0
  8. package/.claude/commands/api-status.md +259 -0
  9. package/.claude/commands/api-verify.md +231 -0
  10. package/.claude/commands/beepboop.md +97 -0
  11. package/.claude/commands/busycommit.md +112 -0
  12. package/.claude/commands/commit.md +83 -0
  13. package/.claude/commands/cycle.md +142 -0
  14. package/.claude/commands/gap.md +86 -0
  15. package/.claude/commands/green.md +142 -0
  16. package/.claude/commands/issue.md +192 -0
  17. package/.claude/commands/plan.md +168 -0
  18. package/.claude/commands/pr.md +122 -0
  19. package/.claude/commands/red.md +142 -0
  20. package/.claude/commands/refactor.md +142 -0
  21. package/.claude/commands/spike.md +142 -0
  22. package/.claude/commands/summarize.md +94 -0
  23. package/.claude/commands/tdd.md +144 -0
  24. package/.claude/commands/worktree-add.md +315 -0
  25. package/.claude/commands/worktree-cleanup.md +281 -0
  26. package/.claude/hooks/api-workflow-check.py +227 -0
  27. package/.claude/hooks/enforce-deep-research.py +185 -0
  28. package/.claude/hooks/enforce-disambiguation.py +155 -0
  29. package/.claude/hooks/enforce-documentation.py +192 -0
  30. package/.claude/hooks/enforce-environment.py +253 -0
  31. package/.claude/hooks/enforce-external-research.py +328 -0
  32. package/.claude/hooks/enforce-interview.py +421 -0
  33. package/.claude/hooks/enforce-refactor.py +189 -0
  34. package/.claude/hooks/enforce-research.py +159 -0
  35. package/.claude/hooks/enforce-schema.py +186 -0
  36. package/.claude/hooks/enforce-scope.py +160 -0
  37. package/.claude/hooks/enforce-tdd-red.py +250 -0
  38. package/.claude/hooks/enforce-verify.py +186 -0
  39. package/.claude/hooks/periodic-reground.py +154 -0
  40. package/.claude/hooks/session-startup.py +151 -0
  41. package/.claude/hooks/track-tool-use.py +626 -0
  42. package/.claude/hooks/verify-after-green.py +282 -0
  43. package/.claude/hooks/verify-implementation.py +225 -0
  44. package/.claude/research/index.json +6 -0
  45. package/.claude/settings.json +93 -0
  46. package/.claude/settings.local.json +11 -0
  47. package/.claude-plugin/marketplace.json +112 -0
  48. package/.skills/README.md +291 -0
  49. package/.skills/_shared/convert-commands.py +192 -0
  50. package/.skills/_shared/hooks/api-workflow-check.py +227 -0
  51. package/.skills/_shared/hooks/enforce-deep-research.py +185 -0
  52. package/.skills/_shared/hooks/enforce-disambiguation.py +155 -0
  53. package/.skills/_shared/hooks/enforce-documentation.py +192 -0
  54. package/.skills/_shared/hooks/enforce-environment.py +253 -0
  55. package/.skills/_shared/hooks/enforce-external-research.py +328 -0
  56. package/.skills/_shared/hooks/enforce-interview.py +421 -0
  57. package/.skills/_shared/hooks/enforce-refactor.py +189 -0
  58. package/.skills/_shared/hooks/enforce-research.py +159 -0
  59. package/.skills/_shared/hooks/enforce-schema.py +186 -0
  60. package/.skills/_shared/hooks/enforce-scope.py +160 -0
  61. package/.skills/_shared/hooks/enforce-tdd-red.py +250 -0
  62. package/.skills/_shared/hooks/enforce-verify.py +186 -0
  63. package/.skills/_shared/hooks/periodic-reground.py +154 -0
  64. package/.skills/_shared/hooks/session-startup.py +151 -0
  65. package/.skills/_shared/hooks/track-tool-use.py +626 -0
  66. package/.skills/_shared/hooks/verify-after-green.py +282 -0
  67. package/.skills/_shared/hooks/verify-implementation.py +225 -0
  68. package/.skills/_shared/install.sh +114 -0
  69. package/.skills/_shared/settings.json +93 -0
  70. package/.skills/add-command/SKILL.md +222 -0
  71. package/.skills/api-create/SKILL.md +512 -0
  72. package/.skills/api-env/SKILL.md +63 -0
  73. package/.skills/api-interview/SKILL.md +344 -0
  74. package/.skills/api-research/SKILL.md +344 -0
  75. package/.skills/api-status/SKILL.md +272 -0
  76. package/.skills/api-verify/SKILL.md +244 -0
  77. package/.skills/beepboop/SKILL.md +110 -0
  78. package/.skills/busycommit/SKILL.md +125 -0
  79. package/.skills/commit/SKILL.md +96 -0
  80. package/.skills/cycle/SKILL.md +155 -0
  81. package/.skills/gap/SKILL.md +99 -0
  82. package/.skills/green/SKILL.md +155 -0
  83. package/.skills/issue/SKILL.md +205 -0
  84. package/.skills/plan/SKILL.md +181 -0
  85. package/.skills/pr/SKILL.md +135 -0
  86. package/.skills/red/SKILL.md +155 -0
  87. package/.skills/refactor/SKILL.md +155 -0
  88. package/.skills/spike/SKILL.md +155 -0
  89. package/.skills/summarize/SKILL.md +107 -0
  90. package/.skills/tdd/SKILL.md +157 -0
  91. package/.skills/update-todos/SKILL.md +228 -0
  92. package/.skills/worktree-add/SKILL.md +328 -0
  93. package/.skills/worktree-cleanup/SKILL.md +294 -0
  94. package/CHANGELOG.md +97 -0
  95. package/README.md +66 -20
  96. package/bin/cli.js +7 -6
  97. package/package.json +22 -11
@@ -0,0 +1,282 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Hook: PostToolUse (after test runs)
4
+ Purpose: Trigger Phase 10 (Verify) + Manifest Generation after tests pass
5
+
6
+ This hook detects when tests pass (TDD Green phase complete) and:
7
+ 1. Runs the programmatic manifest generation scripts
8
+ 2. Reminds Claude to re-research the original documentation
9
+ 3. Compares implemented features to documented features
10
+ 4. Requires user confirmation before proceeding
11
+
12
+ The goal is to:
13
+ - Automatically generate api-tests-manifest.json from test files (programmatic, not LLM)
14
+ - Catch cases where Claude implemented from memory instead of from researched docs
15
+
16
+ Triggers on: Bash commands containing "test" that exit successfully
17
+
18
+ Returns:
19
+ - {"continue": true} with additionalContext prompting verification
20
+ """
21
+ import json
22
+ import sys
23
+ import os
24
+ import subprocess
25
+ from datetime import datetime
26
+ from pathlib import Path
27
+
28
+ # State file is in .claude/ directory (sibling to hooks/)
29
+ STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
30
+ # Scripts locations (try in order):
31
+ # 1. Installed in project: scripts/api-dev-tools/
32
+ # 2. In node_modules (if running from package)
33
+ # 3. Package root (development)
34
+ PROJECT_ROOT = Path(__file__).parent.parent.parent
35
+ SCRIPTS_LOCATIONS = [
36
+ PROJECT_ROOT / "scripts" / "api-dev-tools", # CLI-installed location
37
+ PROJECT_ROOT / "node_modules" / "@hustle-together" / "api-dev-tools" / "scripts",
38
+ Path(__file__).parent.parent.parent / "scripts", # Development fallback
39
+ ]
40
+
41
+
42
+ def run_manifest_scripts() -> dict:
43
+ """
44
+ Run the programmatic manifest generation scripts.
45
+
46
+ These scripts are 100% deterministic - they parse source files,
47
+ extract parameters from Zod schemas, and generate the manifest.
48
+ NO LLM involvement.
49
+
50
+ Returns dict with results of each script.
51
+ """
52
+ results = {
53
+ "manifest_generated": False,
54
+ "parameters_extracted": False,
55
+ "results_collected": False,
56
+ "errors": []
57
+ }
58
+
59
+ # Find the scripts directory (try multiple locations)
60
+ scripts_dir = None
61
+ for loc in SCRIPTS_LOCATIONS:
62
+ if loc.exists():
63
+ scripts_dir = loc
64
+ break
65
+
66
+ if scripts_dir is None:
67
+ results["errors"].append("Scripts directory not found in any expected location")
68
+ return results
69
+
70
+ project_root = PROJECT_ROOT
71
+
72
+ # Run generate-test-manifest.ts
73
+ manifest_script = scripts_dir / "generate-test-manifest.ts"
74
+ if manifest_script.exists():
75
+ try:
76
+ subprocess.run(
77
+ ["npx", "tsx", str(manifest_script), str(project_root)],
78
+ cwd=str(project_root),
79
+ capture_output=True,
80
+ text=True,
81
+ timeout=60
82
+ )
83
+ results["manifest_generated"] = True
84
+ except subprocess.TimeoutExpired:
85
+ results["errors"].append("Manifest generation timed out")
86
+ except Exception as e:
87
+ results["errors"].append(f"Manifest generation failed: {e}")
88
+
89
+ # Run extract-parameters.ts
90
+ params_script = scripts_dir / "extract-parameters.ts"
91
+ if params_script.exists():
92
+ try:
93
+ subprocess.run(
94
+ ["npx", "tsx", str(params_script), str(project_root)],
95
+ cwd=str(project_root),
96
+ capture_output=True,
97
+ text=True,
98
+ timeout=60
99
+ )
100
+ results["parameters_extracted"] = True
101
+ except subprocess.TimeoutExpired:
102
+ results["errors"].append("Parameter extraction timed out")
103
+ except Exception as e:
104
+ results["errors"].append(f"Parameter extraction failed: {e}")
105
+
106
+ # Run collect-test-results.ts (optional - only if tests were just run)
107
+ results_script = scripts_dir / "collect-test-results.ts"
108
+ if results_script.exists():
109
+ try:
110
+ subprocess.run(
111
+ ["npx", "tsx", str(results_script), str(project_root)],
112
+ cwd=str(project_root),
113
+ capture_output=True,
114
+ text=True,
115
+ timeout=120 # Test collection can take longer
116
+ )
117
+ results["results_collected"] = True
118
+ except subprocess.TimeoutExpired:
119
+ results["errors"].append("Test results collection timed out")
120
+ except Exception as e:
121
+ results["errors"].append(f"Test results collection failed: {e}")
122
+
123
+ return results
124
+
125
+
126
+ def main():
127
+ # Read hook input from stdin
128
+ try:
129
+ input_data = json.load(sys.stdin)
130
+ except json.JSONDecodeError:
131
+ print(json.dumps({"continue": True}))
132
+ sys.exit(0)
133
+
134
+ tool_name = input_data.get("tool_name", "")
135
+ tool_input = input_data.get("tool_input", {})
136
+ tool_output = input_data.get("tool_output", {})
137
+
138
+ # Only trigger on Bash commands
139
+ if tool_name != "Bash":
140
+ print(json.dumps({"continue": True}))
141
+ sys.exit(0)
142
+
143
+ # Check if this is a test command
144
+ command = tool_input.get("command", "")
145
+ is_test_command = any(test_keyword in command.lower() for test_keyword in [
146
+ "pnpm test", "npm test", "vitest", "jest", "pytest", "test:run"
147
+ ])
148
+
149
+ if not is_test_command:
150
+ print(json.dumps({"continue": True}))
151
+ sys.exit(0)
152
+
153
+ # Check if tests passed (exit code 0 or output indicates success)
154
+ output_text = ""
155
+ if isinstance(tool_output, str):
156
+ output_text = tool_output
157
+ elif isinstance(tool_output, dict):
158
+ output_text = tool_output.get("output", tool_output.get("stdout", ""))
159
+
160
+ # Look for success indicators
161
+ tests_passed = any(indicator in output_text.lower() for indicator in [
162
+ "tests passed", "all tests passed", "test suites passed",
163
+ "✓", "passed", "0 failed", "pass"
164
+ ]) and not any(fail in output_text.lower() for fail in [
165
+ "failed", "error", "fail"
166
+ ])
167
+
168
+ if not tests_passed:
169
+ print(json.dumps({"continue": True}))
170
+ sys.exit(0)
171
+
172
+ # Tests passed - run manifest generation scripts
173
+ manifest_output = run_manifest_scripts()
174
+
175
+ # Tests passed - check state file
176
+ if not STATE_FILE.exists():
177
+ print(json.dumps({"continue": True}))
178
+ sys.exit(0)
179
+
180
+ try:
181
+ state = json.loads(STATE_FILE.read_text())
182
+ except json.JSONDecodeError:
183
+ print(json.dumps({"continue": True}))
184
+ sys.exit(0)
185
+
186
+ phases = state.get("phases", {})
187
+ tdd_green = phases.get("tdd_green", {})
188
+ verify = phases.get("verify", {})
189
+
190
+ # Check if we're in TDD Green phase
191
+ if tdd_green.get("status") != "in_progress":
192
+ print(json.dumps({"continue": True}))
193
+ sys.exit(0)
194
+
195
+ # Check if verify phase already done
196
+ if verify.get("status") == "complete":
197
+ print(json.dumps({"continue": True}))
198
+ sys.exit(0)
199
+
200
+ # Mark TDD Green as complete
201
+ tdd_green["status"] = "complete"
202
+ tdd_green["all_tests_passing"] = True
203
+ tdd_green["completed_at"] = datetime.now().isoformat()
204
+
205
+ # Start verify phase
206
+ verify["status"] = "in_progress"
207
+ verify["started_at"] = datetime.now().isoformat()
208
+
209
+ # Update manifest_generation section in state
210
+ if "manifest_generation" not in state:
211
+ state["manifest_generation"] = {}
212
+
213
+ state["manifest_generation"]["last_run"] = datetime.now().isoformat()
214
+ state["manifest_generation"]["manifest_generated"] = manifest_output.get("manifest_generated", False)
215
+ state["manifest_generation"]["parameters_extracted"] = manifest_output.get("parameters_extracted", False)
216
+ state["manifest_generation"]["test_results_collected"] = manifest_output.get("results_collected", False)
217
+
218
+ # Save state
219
+ STATE_FILE.write_text(json.dumps(state, indent=2))
220
+
221
+ # Build verification prompt
222
+ endpoint = state.get("endpoint", "the endpoint")
223
+
224
+ context_parts = []
225
+
226
+ # Report manifest generation results
227
+ if manifest_output.get("manifest_generated"):
228
+ context_parts.append("## ✅ Manifest Generation Complete")
229
+ context_parts.append("")
230
+ context_parts.append("Programmatically generated from test files (no LLM):")
231
+ if manifest_output.get("manifest_generated"):
232
+ context_parts.append(" - ✓ api-tests-manifest.json")
233
+ if manifest_output.get("parameters_extracted"):
234
+ context_parts.append(" - ✓ parameter-matrix.json")
235
+ if manifest_output.get("results_collected"):
236
+ context_parts.append(" - ✓ test-results.json")
237
+ if manifest_output.get("errors"):
238
+ context_parts.append("")
239
+ context_parts.append("⚠️ Some scripts had issues:")
240
+ for err in manifest_output["errors"]:
241
+ context_parts.append(f" - {err}")
242
+ context_parts.append("")
243
+ context_parts.append("---")
244
+ context_parts.append("")
245
+
246
+ context_parts.append("## Phase 10: Implementation Verification Required")
247
+ context_parts.append("")
248
+ context_parts.append("Tests are passing. Before proceeding, you MUST verify your implementation:")
249
+ context_parts.append("")
250
+ context_parts.append("**Required Actions:**")
251
+ context_parts.append("1. Re-read the original API documentation (use Context7 or WebSearch)")
252
+ context_parts.append("2. Compare EVERY documented parameter/feature to your implementation")
253
+ context_parts.append("3. Report any discrepancies in this format:")
254
+ context_parts.append("")
255
+ context_parts.append("```")
256
+ context_parts.append("| Feature | In Docs | Implemented | Status |")
257
+ context_parts.append("|------------------|---------|-------------|-----------------|")
258
+ context_parts.append("| param_name | Yes | Yes | Match |")
259
+ context_parts.append("| missing_param | Yes | No | MISSING |")
260
+ context_parts.append("| extra_param | No | Yes | EXTRA (OK) |")
261
+ context_parts.append("```")
262
+ context_parts.append("")
263
+ context_parts.append("**After comparison, ask the user:**")
264
+ context_parts.append("- Fix gaps? [Y] - Loop back to Red phase")
265
+ context_parts.append("- Skip (intentional omissions)? [n] - Document and proceed")
266
+ context_parts.append("")
267
+ context_parts.append("DO NOT proceed to Refactor until verification is complete.")
268
+
269
+ output = {
270
+ "continue": True,
271
+ "hookSpecificOutput": {
272
+ "hookEventName": "PostToolUse",
273
+ "additionalContext": "\n".join(context_parts)
274
+ }
275
+ }
276
+
277
+ print(json.dumps(output))
278
+ sys.exit(0)
279
+
280
+
281
+ if __name__ == "__main__":
282
+ main()
@@ -0,0 +1,225 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Hook: PreToolUse for Write/Edit (runs AFTER enforce-research and enforce-interview)
4
+ Purpose: Verify implementation matches interview requirements
5
+
6
+ This hook addresses these gaps:
7
+ 1. AI uses exact user terminology when researching (not paraphrasing)
8
+ 2. All changed files are tracked and verified
9
+ 3. Test files use same patterns as production code
10
+
11
+ Returns:
12
+ - {"permissionDecision": "allow"} - Let the tool run
13
+ - {"permissionDecision": "deny", "reason": "..."} - Block with explanation
14
+ """
15
+ import json
16
+ import sys
17
+ import re
18
+ from pathlib import Path
19
+
20
+ # State file is in .claude/ directory (sibling to hooks/)
21
+ STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
22
+
23
+
24
+ def extract_key_terms(text: str) -> list[str]:
25
+ """Extract likely important terms from interview answers.
26
+
27
+ These are terms that should appear in research and implementation:
28
+ - Proper nouns (capitalized multi-word phrases)
29
+ - Technical terms (SDK names, API names, etc.)
30
+ - Specific patterns (e.g., "via X", "using X", "with X")
31
+ """
32
+ terms = []
33
+
34
+ # Look for "via X", "using X", "with X" patterns
35
+ via_patterns = re.findall(r'(?:via|using|with|through)\s+([A-Z][A-Za-z0-9\s]+?)(?:[,.\n]|$)', text)
36
+ terms.extend(via_patterns)
37
+
38
+ # Look for capitalized phrases (likely proper nouns/product names)
39
+ # e.g., "Vercel AI Gateway", "OpenAI API"
40
+ proper_nouns = re.findall(r'[A-Z][a-z]+(?:\s+[A-Z][a-z]+)+', text)
41
+ terms.extend(proper_nouns)
42
+
43
+ # Clean up and dedupe
44
+ terms = [t.strip() for t in terms if len(t.strip()) > 3]
45
+ return list(set(terms))
46
+
47
+
48
+ def check_research_used_exact_terms(state: dict) -> list[str]:
49
+ """Verify research sources used the exact terms from interview.
50
+
51
+ Gap 1 Fix: When user provides a term, use THAT EXACT TERM to search.
52
+ """
53
+ issues = []
54
+
55
+ interview = state.get("phases", {}).get("interview", {})
56
+ research = state.get("phases", {}).get("research_initial", {})
57
+ deep_research = state.get("phases", {}).get("research_deep", {})
58
+
59
+ questions = interview.get("questions", [])
60
+ if isinstance(questions, list) and len(questions) > 0:
61
+ # Extract key terms from all interview answers
62
+ all_text = " ".join(str(q) for q in questions)
63
+ key_terms = extract_key_terms(all_text)
64
+
65
+ # Check if these terms appear in research sources
66
+ research_sources = research.get("sources", []) + deep_research.get("sources", [])
67
+ research_text = " ".join(str(s) for s in research_sources).lower()
68
+
69
+ missing_terms = []
70
+ for term in key_terms:
71
+ # Check if term or close variant appears in research
72
+ term_lower = term.lower()
73
+ if term_lower not in research_text:
74
+ # Check for partial matches (e.g., "AI Gateway" in "Vercel AI Gateway")
75
+ words = term_lower.split()
76
+ if not any(all(w in research_text for w in words) for _ in [1]):
77
+ missing_terms.append(term)
78
+
79
+ if missing_terms:
80
+ issues.append(
81
+ f"⚠️ Gap 1 Warning: User-specified terms not found in research:\n"
82
+ f" Terms from interview: {missing_terms}\n"
83
+ f" These EXACT terms should have been searched."
84
+ )
85
+
86
+ return issues
87
+
88
+
89
+ def check_files_tracked(state: dict, file_path: str) -> list[str]:
90
+ """Verify we're tracking all files being modified.
91
+
92
+ Gap 2 Fix: Track files as they're modified, not after claiming completion.
93
+ """
94
+ issues = []
95
+
96
+ files_created = state.get("files_created", [])
97
+ files_modified = state.get("files_modified", [])
98
+ all_tracked = files_created + files_modified
99
+
100
+ # Normalize paths for comparison
101
+ normalized_path = file_path.replace("\\", "/")
102
+
103
+ # Check if this file is a test file
104
+ is_test = ".test." in file_path or "/__tests__/" in file_path or ".spec." in file_path
105
+
106
+ # For non-test files in api/ or lib/, they should be tracked
107
+ is_trackable = ("/api/" in file_path or "/lib/" in file_path) and file_path.endswith(".ts")
108
+
109
+ if is_trackable and not is_test:
110
+ # Check if any tracked file matches this one
111
+ found = False
112
+ for tracked in all_tracked:
113
+ if normalized_path.endswith(tracked) or tracked in normalized_path:
114
+ found = True
115
+ break
116
+
117
+ # Don't block, but log that this file should be tracked
118
+ if not found:
119
+ state.setdefault("files_modified", []).append(normalized_path.split("/src/")[-1] if "/src/" in normalized_path else normalized_path)
120
+ STATE_FILE.write_text(json.dumps(state, indent=2))
121
+
122
+ return issues
123
+
124
+
125
+ def check_test_production_alignment(state: dict, file_path: str, content: str = "") -> list[str]:
126
+ """Verify test files use same patterns as production code.
127
+
128
+ Gap 5 Fix: Test files must use the same patterns as production code.
129
+ """
130
+ issues = []
131
+
132
+ is_test = ".test." in file_path or "/__tests__/" in file_path or ".spec." in file_path
133
+
134
+ if not is_test:
135
+ return issues
136
+
137
+ # Check interview for key configuration patterns
138
+ interview = state.get("phases", {}).get("interview", {})
139
+ questions = interview.get("questions", [])
140
+ all_text = " ".join(str(q) for q in questions)
141
+
142
+ # Look for environment variable patterns mentioned in interview
143
+ env_patterns = re.findall(r'[A-Z_]+_(?:KEY|API_KEY|TOKEN|SECRET)', all_text)
144
+
145
+ if env_patterns and content:
146
+ # If interview mentions specific env vars, test should check those
147
+ for pattern in env_patterns:
148
+ if pattern in content:
149
+ # Good - test is checking the right env var
150
+ pass
151
+
152
+ # Look for mismatches - e.g., checking OPENAI_API_KEY when we said "single gateway key"
153
+ if "gateway" in all_text.lower() or "single key" in all_text.lower():
154
+ # Interview mentioned gateway/single key - tests shouldn't check individual provider keys
155
+ old_patterns = ["OPENAI_API_KEY", "ANTHROPIC_API_KEY", "GOOGLE_API_KEY", "PERPLEXITY_API_KEY"]
156
+ found_old = [p for p in old_patterns if p in content]
157
+
158
+ if found_old and "AI_GATEWAY" not in content:
159
+ issues.append(
160
+ f"⚠️ Gap 5 Warning: Test may be checking wrong environment variables.\n"
161
+ f" Interview mentioned: gateway/single key pattern\n"
162
+ f" Test checks: {found_old}\n"
163
+ f" Consider: Should test check AI_GATEWAY_API_KEY instead?"
164
+ )
165
+
166
+ return issues
167
+
168
+
169
+ def main():
170
+ # Read hook input from stdin
171
+ try:
172
+ input_data = json.load(sys.stdin)
173
+ except json.JSONDecodeError:
174
+ print(json.dumps({"permissionDecision": "allow"}))
175
+ sys.exit(0)
176
+
177
+ tool_input = input_data.get("tool_input", {})
178
+ file_path = tool_input.get("file_path", "")
179
+ new_content = tool_input.get("content", "") or tool_input.get("new_string", "")
180
+
181
+ # Only check for API/schema/lib files
182
+ is_api_file = "/api/" in file_path and file_path.endswith(".ts")
183
+ is_lib_file = "/lib/" in file_path and file_path.endswith(".ts")
184
+
185
+ if not is_api_file and not is_lib_file:
186
+ print(json.dumps({"permissionDecision": "allow"}))
187
+ sys.exit(0)
188
+
189
+ # Load state
190
+ if not STATE_FILE.exists():
191
+ print(json.dumps({"permissionDecision": "allow"}))
192
+ sys.exit(0)
193
+
194
+ try:
195
+ state = json.loads(STATE_FILE.read_text())
196
+ except json.JSONDecodeError:
197
+ print(json.dumps({"permissionDecision": "allow"}))
198
+ sys.exit(0)
199
+
200
+ # Run verification checks
201
+ all_issues = []
202
+
203
+ # Check 1: Research used exact terms from interview
204
+ all_issues.extend(check_research_used_exact_terms(state))
205
+
206
+ # Check 2: Track this file
207
+ all_issues.extend(check_files_tracked(state, file_path))
208
+
209
+ # Check 5: Test/production alignment
210
+ all_issues.extend(check_test_production_alignment(state, file_path, new_content))
211
+
212
+ # If there are issues, warn but don't block (these are warnings)
213
+ # The user can review these in the state file
214
+ if all_issues:
215
+ # Store warnings in state for later review
216
+ state.setdefault("verification_warnings", []).extend(all_issues)
217
+ STATE_FILE.write_text(json.dumps(state, indent=2))
218
+
219
+ # Allow the operation - these are warnings, not blockers
220
+ print(json.dumps({"permissionDecision": "allow"}))
221
+ sys.exit(0)
222
+
223
+
224
+ if __name__ == "__main__":
225
+ main()
@@ -0,0 +1,114 @@
1
+ #!/bin/bash
2
+ # API Dev Tools - Post-Install Setup
3
+ # Installs hooks, settings, and initializes state files
4
+
5
+ set -e
6
+
7
+ echo "🚀 Installing API Dev Tools v3.11.0..."
8
+ echo ""
9
+
10
+ # Determine installation directory
11
+ if [ -d ".claude" ]; then
12
+ INSTALL_DIR=".claude"
13
+ INSTALL_TYPE="project"
14
+ echo "📁 Installing to project: .claude/"
15
+ else
16
+ INSTALL_DIR="$HOME/.claude"
17
+ INSTALL_TYPE="user"
18
+ echo "📁 Installing to user directory: ~/.claude/"
19
+ fi
20
+
21
+ # Create directories
22
+ echo "📂 Creating directories..."
23
+ mkdir -p "$INSTALL_DIR/hooks"
24
+ mkdir -p "$INSTALL_DIR/research"
25
+ mkdir -p "$INSTALL_DIR/commands"
26
+
27
+ # Get script directory
28
+ SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
29
+ SKILLS_DIR="$(dirname "$(dirname "$SCRIPT_DIR")")"
30
+
31
+ # Copy hooks
32
+ if [ -d "$SKILLS_DIR/.skills/_shared/hooks" ]; then
33
+ echo "🔗 Installing enforcement hooks..."
34
+ cp -r "$SKILLS_DIR/.skills/_shared/hooks"/* "$INSTALL_DIR/hooks/" 2>/dev/null || true
35
+ chmod +x "$INSTALL_DIR/hooks/"*.py 2>/dev/null || true
36
+ echo " ✓ 18 enforcement hooks installed"
37
+ fi
38
+
39
+ # Copy settings
40
+ if [ -f "$SKILLS_DIR/.skills/_shared/settings.json" ]; then
41
+ if [ -f "$INSTALL_DIR/settings.json" ]; then
42
+ echo "⚙️ Settings.json already exists"
43
+ read -p " Overwrite? (y/N): " -n 1 -r
44
+ echo
45
+ if [[ $REPLY =~ ^[Yy]$ ]]; then
46
+ cp "$SKILLS_DIR/.skills/_shared/settings.json" "$INSTALL_DIR/settings.json"
47
+ echo " ✓ Settings updated"
48
+ else
49
+ echo " ⏭️ Skipped settings.json"
50
+ fi
51
+ else
52
+ echo "⚙️ Installing settings..."
53
+ cp "$SKILLS_DIR/.skills/_shared/settings.json" "$INSTALL_DIR/settings.json"
54
+ echo " ✓ Settings installed"
55
+ fi
56
+ fi
57
+
58
+ # Initialize state file
59
+ if [ ! -f "$INSTALL_DIR/api-dev-state.json" ]; then
60
+ echo "📊 Initializing state file..."
61
+ cat > "$INSTALL_DIR/api-dev-state.json" << 'EOF'
62
+ {
63
+ "version": "3.0.0",
64
+ "endpoint": null,
65
+ "turn_count": 0,
66
+ "phases": {},
67
+ "research_index": {}
68
+ }
69
+ EOF
70
+ echo " ✓ State file created"
71
+ else
72
+ echo "📊 State file already exists"
73
+ fi
74
+
75
+ # Initialize research index
76
+ if [ ! -f "$INSTALL_DIR/research/index.json" ]; then
77
+ echo "🔍 Initializing research cache..."
78
+ cat > "$INSTALL_DIR/research/index.json" << 'EOF'
79
+ {
80
+ "version": "1.0.0",
81
+ "cache": {}
82
+ }
83
+ EOF
84
+ echo " ✓ Research index created"
85
+ else
86
+ echo "🔍 Research index already exists"
87
+ fi
88
+
89
+ echo ""
90
+ echo "✅ Installation complete!"
91
+ echo ""
92
+ echo "📚 Next steps:"
93
+ echo ""
94
+ echo " 1. Install MCP servers in Claude Code:"
95
+ echo " • Context7 (for documentation search)"
96
+ echo " • GitHub (for PR and issue integration)"
97
+ echo ""
98
+ echo " 2. Verify installation:"
99
+ echo " /api-status"
100
+ echo ""
101
+ echo " 3. Start creating APIs:"
102
+ echo " /api-create my-endpoint"
103
+ echo ""
104
+ echo " 4. Read documentation:"
105
+ echo " • Quick start: .claude/commands/README.md"
106
+ echo " • Skills guide: .skills/README.md"
107
+ echo " • Full roadmap: ENHANCEMENT_ROADMAP_v3.11.0.md"
108
+ echo ""
109
+ echo "📦 Installed to: $INSTALL_DIR ($INSTALL_TYPE)"
110
+ echo "🎯 23 skills available"
111
+ echo "🔗 18 enforcement hooks active"
112
+ echo ""
113
+ echo "💡 Tip: Run '/help' in Claude Code to see all available skills"
114
+ echo ""