@hustle-together/api-dev-tools 3.6.5 → 3.10.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +5599 -258
- package/bin/cli.js +395 -20
- package/commands/README.md +459 -71
- package/commands/hustle-api-continue.md +158 -0
- package/commands/{api-create.md → hustle-api-create.md} +35 -15
- package/commands/{api-env.md → hustle-api-env.md} +4 -4
- package/commands/{api-interview.md → hustle-api-interview.md} +1 -1
- package/commands/{api-research.md → hustle-api-research.md} +3 -3
- package/commands/hustle-api-sessions.md +149 -0
- package/commands/{api-status.md → hustle-api-status.md} +16 -16
- package/commands/{api-verify.md → hustle-api-verify.md} +2 -2
- package/commands/hustle-combine.md +763 -0
- package/commands/hustle-ui-create-page.md +933 -0
- package/commands/hustle-ui-create.md +825 -0
- package/hooks/api-workflow-check.py +545 -21
- package/hooks/cache-research.py +337 -0
- package/hooks/check-api-routes.py +168 -0
- package/hooks/check-playwright-setup.py +103 -0
- package/hooks/check-storybook-setup.py +81 -0
- package/hooks/detect-interruption.py +165 -0
- package/hooks/enforce-a11y-audit.py +202 -0
- package/hooks/enforce-brand-guide.py +241 -0
- package/hooks/enforce-documentation.py +60 -8
- package/hooks/enforce-freshness.py +184 -0
- package/hooks/enforce-page-components.py +186 -0
- package/hooks/enforce-page-data-schema.py +155 -0
- package/hooks/enforce-questions-sourced.py +146 -0
- package/hooks/enforce-schema-from-interview.py +248 -0
- package/hooks/enforce-ui-disambiguation.py +108 -0
- package/hooks/enforce-ui-interview.py +130 -0
- package/hooks/generate-manifest-entry.py +1161 -0
- package/hooks/session-logger.py +297 -0
- package/hooks/session-startup.py +160 -15
- package/hooks/track-scope-coverage.py +220 -0
- package/hooks/track-tool-use.py +81 -1
- package/hooks/update-api-showcase.py +149 -0
- package/hooks/update-registry.py +352 -0
- package/hooks/update-ui-showcase.py +212 -0
- package/package.json +8 -3
- package/templates/BRAND_GUIDE.md +299 -0
- package/templates/CLAUDE-SECTION.md +56 -24
- package/templates/SPEC.json +640 -0
- package/templates/api-dev-state.json +217 -161
- package/templates/api-showcase/_components/APICard.tsx +153 -0
- package/templates/api-showcase/_components/APIModal.tsx +375 -0
- package/templates/api-showcase/_components/APIShowcase.tsx +231 -0
- package/templates/api-showcase/_components/APITester.tsx +522 -0
- package/templates/api-showcase/page.tsx +41 -0
- package/templates/component/Component.stories.tsx +172 -0
- package/templates/component/Component.test.tsx +237 -0
- package/templates/component/Component.tsx +86 -0
- package/templates/component/Component.types.ts +55 -0
- package/templates/component/index.ts +15 -0
- package/templates/dev-tools/_components/DevToolsLanding.tsx +320 -0
- package/templates/dev-tools/page.tsx +10 -0
- package/templates/page/page.e2e.test.ts +218 -0
- package/templates/page/page.tsx +42 -0
- package/templates/performance-budgets.json +58 -0
- package/templates/registry.json +13 -0
- package/templates/settings.json +90 -0
- package/templates/shared/HeroHeader.tsx +261 -0
- package/templates/shared/index.ts +1 -0
- package/templates/ui-showcase/_components/PreviewCard.tsx +315 -0
- package/templates/ui-showcase/_components/PreviewModal.tsx +676 -0
- package/templates/ui-showcase/_components/UIShowcase.tsx +262 -0
- package/templates/ui-showcase/page.tsx +26 -0
- package/demo/hustle-together/blog/gemini-vs-claude-widgets.html +0 -959
- package/demo/hustle-together/blog/interview-driven-api-development.html +0 -1146
- package/demo/hustle-together/blog/tdd-for-ai.html +0 -982
- package/demo/hustle-together/index.html +0 -1312
- package/demo/workflow-demo-v3.5-backup.html +0 -5008
- package/demo/workflow-demo.html +0 -6202
|
@@ -0,0 +1,337 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Hook: PostToolUse for Write/Edit
|
|
4
|
+
Purpose: Create research cache files from state when documentation phase starts
|
|
5
|
+
|
|
6
|
+
This hook creates the following files that enforce-documentation.py expects:
|
|
7
|
+
- .claude/research/{endpoint}/sources.json - Research sources with URLs
|
|
8
|
+
- .claude/research/{endpoint}/interview.json - Interview decisions
|
|
9
|
+
- .claude/research/{endpoint}/schema.json - Schema snapshot
|
|
10
|
+
- .claude/research/{endpoint}/CURRENT.md - Aggregated research (if not exists)
|
|
11
|
+
- .claude/research/index.json - Updates the freshness index
|
|
12
|
+
|
|
13
|
+
Added in v3.6.7 to fix critical gap where these files were expected but never created.
|
|
14
|
+
|
|
15
|
+
Returns:
|
|
16
|
+
- JSON with cacheCreated info
|
|
17
|
+
"""
|
|
18
|
+
import json
|
|
19
|
+
import sys
|
|
20
|
+
import os
|
|
21
|
+
from datetime import datetime
|
|
22
|
+
from pathlib import Path
|
|
23
|
+
|
|
24
|
+
STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
25
|
+
RESEARCH_DIR = Path(__file__).parent.parent / "research"
|
|
26
|
+
RESEARCH_INDEX = RESEARCH_DIR / "index.json"
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def get_active_endpoint(state):
|
|
30
|
+
"""Get active endpoint - supports both old and new state formats."""
|
|
31
|
+
# New format (v3.6.7+): endpoints object with active_endpoint pointer
|
|
32
|
+
if "endpoints" in state and "active_endpoint" in state:
|
|
33
|
+
active = state.get("active_endpoint")
|
|
34
|
+
if active and active in state["endpoints"]:
|
|
35
|
+
return active, state["endpoints"][active]
|
|
36
|
+
return None, None
|
|
37
|
+
|
|
38
|
+
# Old format: single endpoint field
|
|
39
|
+
endpoint = state.get("endpoint")
|
|
40
|
+
if endpoint:
|
|
41
|
+
return endpoint, state
|
|
42
|
+
|
|
43
|
+
return None, None
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def create_sources_json(endpoint_dir, state, endpoint_data):
|
|
47
|
+
"""Create sources.json from research queries in state."""
|
|
48
|
+
sources_file = endpoint_dir / "sources.json"
|
|
49
|
+
|
|
50
|
+
# Collect sources from various places in state
|
|
51
|
+
sources = []
|
|
52
|
+
|
|
53
|
+
# From research_queries array
|
|
54
|
+
for query in state.get("research_queries", []):
|
|
55
|
+
source = {
|
|
56
|
+
"query": query.get("query", ""),
|
|
57
|
+
"tool": query.get("tool", "unknown"),
|
|
58
|
+
"timestamp": query.get("timestamp", ""),
|
|
59
|
+
"url": query.get("url", ""),
|
|
60
|
+
"summary": query.get("summary", "")
|
|
61
|
+
}
|
|
62
|
+
sources.append(source)
|
|
63
|
+
|
|
64
|
+
# From initial research phase
|
|
65
|
+
initial_research = endpoint_data.get("phases", {}).get("research_initial", {})
|
|
66
|
+
for src in initial_research.get("sources", []):
|
|
67
|
+
if isinstance(src, dict):
|
|
68
|
+
sources.append(src)
|
|
69
|
+
elif isinstance(src, str):
|
|
70
|
+
sources.append({"url": src, "summary": ""})
|
|
71
|
+
|
|
72
|
+
# From deep research phase
|
|
73
|
+
deep_research = endpoint_data.get("phases", {}).get("research_deep", {})
|
|
74
|
+
for src in deep_research.get("sources", []):
|
|
75
|
+
if isinstance(src, dict):
|
|
76
|
+
sources.append(src)
|
|
77
|
+
elif isinstance(src, str):
|
|
78
|
+
sources.append({"url": src, "summary": ""})
|
|
79
|
+
|
|
80
|
+
# Deduplicate by URL
|
|
81
|
+
seen_urls = set()
|
|
82
|
+
unique_sources = []
|
|
83
|
+
for src in sources:
|
|
84
|
+
url = src.get("url", src.get("query", ""))
|
|
85
|
+
if url and url not in seen_urls:
|
|
86
|
+
seen_urls.add(url)
|
|
87
|
+
unique_sources.append(src)
|
|
88
|
+
|
|
89
|
+
data = {
|
|
90
|
+
"created_at": datetime.now().isoformat(),
|
|
91
|
+
"updated_at": datetime.now().isoformat(),
|
|
92
|
+
"endpoint": endpoint_data.get("endpoint", state.get("endpoint", "")),
|
|
93
|
+
"source_count": len(unique_sources),
|
|
94
|
+
"sources": unique_sources
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
sources_file.write_text(json.dumps(data, indent=2))
|
|
98
|
+
return True
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def create_interview_json(endpoint_dir, endpoint_data):
|
|
102
|
+
"""Create interview.json from interview decisions in state."""
|
|
103
|
+
interview_file = endpoint_dir / "interview.json"
|
|
104
|
+
|
|
105
|
+
interview = endpoint_data.get("phases", {}).get("interview", {})
|
|
106
|
+
decisions = interview.get("decisions", {})
|
|
107
|
+
questions = interview.get("questions", [])
|
|
108
|
+
|
|
109
|
+
data = {
|
|
110
|
+
"created_at": datetime.now().isoformat(),
|
|
111
|
+
"updated_at": datetime.now().isoformat(),
|
|
112
|
+
"question_count": len(questions),
|
|
113
|
+
"decision_count": len(decisions),
|
|
114
|
+
"questions": questions,
|
|
115
|
+
"decisions": decisions
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
interview_file.write_text(json.dumps(data, indent=2))
|
|
119
|
+
return True
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def create_schema_json(endpoint_dir, endpoint_data, state):
|
|
123
|
+
"""Create schema.json from schema creation phase in state."""
|
|
124
|
+
schema_json_file = endpoint_dir / "schema.json"
|
|
125
|
+
|
|
126
|
+
schema_phase = endpoint_data.get("phases", {}).get("schema_creation", {})
|
|
127
|
+
schema_file = schema_phase.get("schema_file", schema_phase.get("file", ""))
|
|
128
|
+
fields_count = schema_phase.get("fields_count", 0)
|
|
129
|
+
|
|
130
|
+
# Try to read actual schema file if it exists
|
|
131
|
+
schema_content = None
|
|
132
|
+
if schema_file:
|
|
133
|
+
schema_path = Path(schema_file)
|
|
134
|
+
if schema_path.exists():
|
|
135
|
+
try:
|
|
136
|
+
schema_content = schema_path.read_text()
|
|
137
|
+
except IOError:
|
|
138
|
+
pass
|
|
139
|
+
|
|
140
|
+
data = {
|
|
141
|
+
"created_at": datetime.now().isoformat(),
|
|
142
|
+
"updated_at": datetime.now().isoformat(),
|
|
143
|
+
"schema_file": schema_file,
|
|
144
|
+
"fields_count": fields_count,
|
|
145
|
+
"schema_content": schema_content
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
schema_json_file.write_text(json.dumps(data, indent=2))
|
|
149
|
+
return True
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
def create_current_md(endpoint_dir, endpoint, endpoint_data, state):
|
|
153
|
+
"""Create CURRENT.md if it doesn't exist."""
|
|
154
|
+
current_md = endpoint_dir / "CURRENT.md"
|
|
155
|
+
|
|
156
|
+
# Only create if doesn't exist (don't overwrite manual research)
|
|
157
|
+
if current_md.exists():
|
|
158
|
+
return False
|
|
159
|
+
|
|
160
|
+
# Build aggregated research content
|
|
161
|
+
lines = [
|
|
162
|
+
f"# Research: {endpoint}",
|
|
163
|
+
"",
|
|
164
|
+
f"*Generated: {datetime.now().isoformat()}*",
|
|
165
|
+
"",
|
|
166
|
+
"## Sources",
|
|
167
|
+
""
|
|
168
|
+
]
|
|
169
|
+
|
|
170
|
+
# Add sources
|
|
171
|
+
sources_file = endpoint_dir / "sources.json"
|
|
172
|
+
if sources_file.exists():
|
|
173
|
+
try:
|
|
174
|
+
sources = json.loads(sources_file.read_text())
|
|
175
|
+
for src in sources.get("sources", []):
|
|
176
|
+
url = src.get("url", "")
|
|
177
|
+
summary = src.get("summary", "")
|
|
178
|
+
if url:
|
|
179
|
+
lines.append(f"- {url}")
|
|
180
|
+
if summary:
|
|
181
|
+
lines.append(f" - {summary}")
|
|
182
|
+
except (json.JSONDecodeError, IOError):
|
|
183
|
+
pass
|
|
184
|
+
|
|
185
|
+
lines.extend(["", "## Interview Decisions", ""])
|
|
186
|
+
|
|
187
|
+
# Add interview decisions
|
|
188
|
+
interview_file = endpoint_dir / "interview.json"
|
|
189
|
+
if interview_file.exists():
|
|
190
|
+
try:
|
|
191
|
+
interview = json.loads(interview_file.read_text())
|
|
192
|
+
for key, value in interview.get("decisions", {}).items():
|
|
193
|
+
response = value.get("response", value.get("value", "N/A"))
|
|
194
|
+
lines.append(f"- **{key}**: {response}")
|
|
195
|
+
except (json.JSONDecodeError, IOError):
|
|
196
|
+
pass
|
|
197
|
+
|
|
198
|
+
lines.extend(["", "## Schema", ""])
|
|
199
|
+
|
|
200
|
+
# Add schema info
|
|
201
|
+
schema_file = endpoint_dir / "schema.json"
|
|
202
|
+
if schema_file.exists():
|
|
203
|
+
try:
|
|
204
|
+
schema = json.loads(schema_file.read_text())
|
|
205
|
+
lines.append(f"- File: `{schema.get('schema_file', 'N/A')}`")
|
|
206
|
+
lines.append(f"- Fields: {schema.get('fields_count', 0)}")
|
|
207
|
+
except (json.JSONDecodeError, IOError):
|
|
208
|
+
pass
|
|
209
|
+
|
|
210
|
+
current_md.write_text("\n".join(lines))
|
|
211
|
+
return True
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def update_research_index(endpoint):
|
|
215
|
+
"""Update the research index with this endpoint."""
|
|
216
|
+
RESEARCH_DIR.mkdir(parents=True, exist_ok=True)
|
|
217
|
+
|
|
218
|
+
# Load existing index or create new
|
|
219
|
+
if RESEARCH_INDEX.exists():
|
|
220
|
+
try:
|
|
221
|
+
index = json.loads(RESEARCH_INDEX.read_text())
|
|
222
|
+
except json.JSONDecodeError:
|
|
223
|
+
index = {"version": "3.6.7", "apis": {}}
|
|
224
|
+
else:
|
|
225
|
+
index = {"version": "3.6.7", "apis": {}}
|
|
226
|
+
|
|
227
|
+
# Ensure apis object exists
|
|
228
|
+
if "apis" not in index:
|
|
229
|
+
index["apis"] = {}
|
|
230
|
+
|
|
231
|
+
# Update this endpoint's entry
|
|
232
|
+
now = datetime.now().isoformat()
|
|
233
|
+
index["apis"][endpoint] = {
|
|
234
|
+
"last_updated": now,
|
|
235
|
+
"freshness_days": 0,
|
|
236
|
+
"cache_path": f".claude/research/{endpoint}/",
|
|
237
|
+
"files": ["sources.json", "interview.json", "schema.json", "CURRENT.md"]
|
|
238
|
+
}
|
|
239
|
+
|
|
240
|
+
RESEARCH_INDEX.write_text(json.dumps(index, indent=2))
|
|
241
|
+
return True
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
def main():
|
|
245
|
+
try:
|
|
246
|
+
input_data = json.load(sys.stdin)
|
|
247
|
+
except json.JSONDecodeError:
|
|
248
|
+
print(json.dumps({"continue": True}))
|
|
249
|
+
sys.exit(0)
|
|
250
|
+
|
|
251
|
+
tool_name = input_data.get("tool_name", "")
|
|
252
|
+
tool_input = input_data.get("tool_input", {})
|
|
253
|
+
tool_result = input_data.get("tool_result", {})
|
|
254
|
+
file_path = tool_input.get("file_path", "")
|
|
255
|
+
|
|
256
|
+
# Only trigger on Write/Edit to documentation-related files
|
|
257
|
+
if tool_name not in ["Write", "Edit"]:
|
|
258
|
+
print(json.dumps({"continue": True}))
|
|
259
|
+
sys.exit(0)
|
|
260
|
+
|
|
261
|
+
# Check if this is a documentation-related write
|
|
262
|
+
is_manifest = "api-tests-manifest.json" in file_path
|
|
263
|
+
is_readme = file_path.endswith("README.md") and "/api/" in file_path
|
|
264
|
+
is_state = "api-dev-state.json" in file_path
|
|
265
|
+
|
|
266
|
+
# Also trigger when documentation phase is in progress
|
|
267
|
+
if not STATE_FILE.exists():
|
|
268
|
+
print(json.dumps({"continue": True}))
|
|
269
|
+
sys.exit(0)
|
|
270
|
+
|
|
271
|
+
try:
|
|
272
|
+
state = json.loads(STATE_FILE.read_text())
|
|
273
|
+
except json.JSONDecodeError:
|
|
274
|
+
print(json.dumps({"continue": True}))
|
|
275
|
+
sys.exit(0)
|
|
276
|
+
|
|
277
|
+
endpoint, endpoint_data = get_active_endpoint(state)
|
|
278
|
+
if not endpoint or not endpoint_data:
|
|
279
|
+
print(json.dumps({"continue": True}))
|
|
280
|
+
sys.exit(0)
|
|
281
|
+
|
|
282
|
+
# Check if documentation phase is in progress or we're writing doc files
|
|
283
|
+
doc_phase = endpoint_data.get("phases", {}).get("documentation", {})
|
|
284
|
+
doc_status = doc_phase.get("status", "not_started")
|
|
285
|
+
|
|
286
|
+
if doc_status not in ["in_progress", "complete"] and not is_manifest and not is_readme:
|
|
287
|
+
print(json.dumps({"continue": True}))
|
|
288
|
+
sys.exit(0)
|
|
289
|
+
|
|
290
|
+
# Create research cache directory
|
|
291
|
+
endpoint_dir = RESEARCH_DIR / endpoint
|
|
292
|
+
endpoint_dir.mkdir(parents=True, exist_ok=True)
|
|
293
|
+
|
|
294
|
+
# Create cache files
|
|
295
|
+
files_created = []
|
|
296
|
+
|
|
297
|
+
sources_created = create_sources_json(endpoint_dir, state, endpoint_data)
|
|
298
|
+
if sources_created:
|
|
299
|
+
files_created.append("sources.json")
|
|
300
|
+
|
|
301
|
+
interview_created = create_interview_json(endpoint_dir, endpoint_data)
|
|
302
|
+
if interview_created:
|
|
303
|
+
files_created.append("interview.json")
|
|
304
|
+
|
|
305
|
+
schema_created = create_schema_json(endpoint_dir, endpoint_data, state)
|
|
306
|
+
if schema_created:
|
|
307
|
+
files_created.append("schema.json")
|
|
308
|
+
|
|
309
|
+
current_created = create_current_md(endpoint_dir, endpoint, endpoint_data, state)
|
|
310
|
+
if current_created:
|
|
311
|
+
files_created.append("CURRENT.md")
|
|
312
|
+
|
|
313
|
+
# Update index
|
|
314
|
+
index_updated = update_research_index(endpoint)
|
|
315
|
+
if index_updated:
|
|
316
|
+
files_created.append("index.json")
|
|
317
|
+
|
|
318
|
+
# Update state to indicate research is cached
|
|
319
|
+
if files_created:
|
|
320
|
+
doc_phase["research_cached"] = True
|
|
321
|
+
STATE_FILE.write_text(json.dumps(state, indent=2))
|
|
322
|
+
|
|
323
|
+
output = {
|
|
324
|
+
"hookSpecificOutput": {
|
|
325
|
+
"cacheCreated": True,
|
|
326
|
+
"endpoint": endpoint,
|
|
327
|
+
"files": files_created,
|
|
328
|
+
"cacheDir": str(endpoint_dir)
|
|
329
|
+
}
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
print(json.dumps(output))
|
|
333
|
+
sys.exit(0)
|
|
334
|
+
|
|
335
|
+
|
|
336
|
+
if __name__ == "__main__":
|
|
337
|
+
main()
|
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Hook: check-api-routes.py
|
|
4
|
+
Trigger: PreToolUse (Write|Edit)
|
|
5
|
+
Purpose: Verify required API routes exist before page implementation
|
|
6
|
+
|
|
7
|
+
For ui-create-page workflow, ensures Phase 7 (ENVIRONMENT) has verified
|
|
8
|
+
that required API routes are available.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import json
|
|
12
|
+
import sys
|
|
13
|
+
import os
|
|
14
|
+
import glob
|
|
15
|
+
|
|
16
|
+
def load_state():
|
|
17
|
+
"""Load the api-dev-state.json file"""
|
|
18
|
+
state_paths = [
|
|
19
|
+
".claude/api-dev-state.json",
|
|
20
|
+
os.path.join(os.environ.get("CLAUDE_PROJECT_DIR", ""), ".claude/api-dev-state.json")
|
|
21
|
+
]
|
|
22
|
+
|
|
23
|
+
for path in state_paths:
|
|
24
|
+
if os.path.exists(path):
|
|
25
|
+
with open(path, 'r') as f:
|
|
26
|
+
return json.load(f)
|
|
27
|
+
return None
|
|
28
|
+
|
|
29
|
+
def is_page_workflow(state):
|
|
30
|
+
"""Check if current workflow is ui-create-page"""
|
|
31
|
+
workflow = state.get("workflow", "")
|
|
32
|
+
return workflow == "ui-create-page"
|
|
33
|
+
|
|
34
|
+
def get_active_element(state):
|
|
35
|
+
"""Get the active element being worked on"""
|
|
36
|
+
active = state.get("active_element", "")
|
|
37
|
+
if not active:
|
|
38
|
+
active = state.get("endpoint", "")
|
|
39
|
+
return active
|
|
40
|
+
|
|
41
|
+
def is_page_implementation(file_path, element_name):
|
|
42
|
+
"""Check if the file is a page implementation file"""
|
|
43
|
+
if not file_path or not element_name:
|
|
44
|
+
return False
|
|
45
|
+
|
|
46
|
+
patterns = [
|
|
47
|
+
f"src/app/{element_name}/page.tsx",
|
|
48
|
+
f"app/{element_name}/page.tsx",
|
|
49
|
+
]
|
|
50
|
+
|
|
51
|
+
return any(pattern in file_path for pattern in patterns)
|
|
52
|
+
|
|
53
|
+
def check_environment_phase(state, element_name):
|
|
54
|
+
"""Check if environment phase is complete"""
|
|
55
|
+
elements = state.get("elements", {})
|
|
56
|
+
element = elements.get(element_name, {})
|
|
57
|
+
phases = element.get("phases", {})
|
|
58
|
+
|
|
59
|
+
environment = phases.get("environment_check", {})
|
|
60
|
+
return environment.get("status") == "complete"
|
|
61
|
+
|
|
62
|
+
def get_required_api_routes(state, element_name):
|
|
63
|
+
"""Get list of required API routes from interview decisions"""
|
|
64
|
+
elements = state.get("elements", {})
|
|
65
|
+
element = elements.get(element_name, {})
|
|
66
|
+
ui_config = element.get("ui_config", {})
|
|
67
|
+
|
|
68
|
+
# Check if data sources were defined
|
|
69
|
+
data_sources = ui_config.get("data_sources", [])
|
|
70
|
+
return data_sources
|
|
71
|
+
|
|
72
|
+
def find_existing_api_routes():
|
|
73
|
+
"""Find all existing API routes in the project"""
|
|
74
|
+
routes = []
|
|
75
|
+
|
|
76
|
+
# Check src/app/api paths
|
|
77
|
+
api_patterns = [
|
|
78
|
+
"src/app/api/**/*.ts",
|
|
79
|
+
"src/app/api/**/*.tsx",
|
|
80
|
+
"app/api/**/*.ts",
|
|
81
|
+
"app/api/**/*.tsx",
|
|
82
|
+
]
|
|
83
|
+
|
|
84
|
+
for pattern in api_patterns:
|
|
85
|
+
for file_path in glob.glob(pattern, recursive=True):
|
|
86
|
+
if "route.ts" in file_path or "route.tsx" in file_path:
|
|
87
|
+
# Extract route name from path
|
|
88
|
+
route = file_path.replace("src/app/api/", "/api/")
|
|
89
|
+
route = route.replace("app/api/", "/api/")
|
|
90
|
+
route = route.replace("/route.ts", "")
|
|
91
|
+
route = route.replace("/route.tsx", "")
|
|
92
|
+
routes.append(route)
|
|
93
|
+
|
|
94
|
+
return routes
|
|
95
|
+
|
|
96
|
+
def main():
|
|
97
|
+
try:
|
|
98
|
+
# Read tool input from stdin
|
|
99
|
+
input_data = json.loads(sys.stdin.read())
|
|
100
|
+
tool_name = input_data.get("tool_name", "")
|
|
101
|
+
tool_input = input_data.get("tool_input", {})
|
|
102
|
+
|
|
103
|
+
# Only check Write tool
|
|
104
|
+
if tool_name != "Write":
|
|
105
|
+
print(json.dumps({"decision": "allow"}))
|
|
106
|
+
return
|
|
107
|
+
|
|
108
|
+
file_path = tool_input.get("file_path", "")
|
|
109
|
+
|
|
110
|
+
# Load state
|
|
111
|
+
state = load_state()
|
|
112
|
+
if not state:
|
|
113
|
+
print(json.dumps({"decision": "allow"}))
|
|
114
|
+
return
|
|
115
|
+
|
|
116
|
+
# Only apply to ui-create-page workflow
|
|
117
|
+
if not is_page_workflow(state):
|
|
118
|
+
print(json.dumps({"decision": "allow"}))
|
|
119
|
+
return
|
|
120
|
+
|
|
121
|
+
element_name = get_active_element(state)
|
|
122
|
+
if not element_name:
|
|
123
|
+
print(json.dumps({"decision": "allow"}))
|
|
124
|
+
return
|
|
125
|
+
|
|
126
|
+
# Check if writing main page file
|
|
127
|
+
if is_page_implementation(file_path, element_name):
|
|
128
|
+
# Verify environment phase is complete
|
|
129
|
+
if not check_environment_phase(state, element_name):
|
|
130
|
+
# Find existing API routes for reference
|
|
131
|
+
existing_routes = find_existing_api_routes()
|
|
132
|
+
routes_list = "\n".join([f" - {r}" for r in existing_routes[:15]])
|
|
133
|
+
if len(existing_routes) > 15:
|
|
134
|
+
routes_list += f"\n ... and {len(existing_routes) - 15} more"
|
|
135
|
+
|
|
136
|
+
print(json.dumps({
|
|
137
|
+
"decision": "block",
|
|
138
|
+
"reason": f"""
|
|
139
|
+
ENVIRONMENT CHECK REQUIRED (Phase 7)
|
|
140
|
+
|
|
141
|
+
You are implementing the main page, but the Environment phase is not complete.
|
|
142
|
+
|
|
143
|
+
Before implementing page.tsx:
|
|
144
|
+
1. Verify required API routes exist
|
|
145
|
+
2. Check authentication configuration
|
|
146
|
+
3. Verify required packages are installed
|
|
147
|
+
4. Update state: phases.environment_check.status = "complete"
|
|
148
|
+
|
|
149
|
+
Existing API Routes Found:
|
|
150
|
+
{routes_list if existing_routes else " (No API routes found)"}
|
|
151
|
+
|
|
152
|
+
If you need new API routes, use /api-create to create them first.
|
|
153
|
+
"""
|
|
154
|
+
}))
|
|
155
|
+
return
|
|
156
|
+
|
|
157
|
+
# Allow everything else
|
|
158
|
+
print(json.dumps({"decision": "allow"}))
|
|
159
|
+
|
|
160
|
+
except Exception as e:
|
|
161
|
+
# On error, allow to avoid blocking workflow
|
|
162
|
+
print(json.dumps({
|
|
163
|
+
"decision": "allow",
|
|
164
|
+
"error": str(e)
|
|
165
|
+
}))
|
|
166
|
+
|
|
167
|
+
if __name__ == "__main__":
|
|
168
|
+
main()
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Hook: PreToolUse for Write
|
|
4
|
+
Purpose: Verify Playwright is configured before writing E2E test files
|
|
5
|
+
|
|
6
|
+
This hook runs before writing E2E test files. It checks that:
|
|
7
|
+
- playwright.config.ts or playwright.config.js exists
|
|
8
|
+
- @playwright/test is in package.json dependencies
|
|
9
|
+
|
|
10
|
+
If Playwright is not configured, it blocks and suggests installation.
|
|
11
|
+
|
|
12
|
+
Version: 3.9.0
|
|
13
|
+
|
|
14
|
+
Returns:
|
|
15
|
+
- {"continue": true} - If Playwright is configured or not an E2E test file
|
|
16
|
+
- {"continue": false, "reason": "..."} - If Playwright is not configured
|
|
17
|
+
"""
|
|
18
|
+
import json
|
|
19
|
+
import sys
|
|
20
|
+
from pathlib import Path
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def main():
|
|
24
|
+
# Read hook input from stdin
|
|
25
|
+
try:
|
|
26
|
+
input_data = json.load(sys.stdin)
|
|
27
|
+
except json.JSONDecodeError:
|
|
28
|
+
print(json.dumps({"continue": True}))
|
|
29
|
+
sys.exit(0)
|
|
30
|
+
|
|
31
|
+
tool_name = input_data.get("tool_name", "")
|
|
32
|
+
tool_input = input_data.get("tool_input", {})
|
|
33
|
+
|
|
34
|
+
# Only check Write operations
|
|
35
|
+
if tool_name != "Write":
|
|
36
|
+
print(json.dumps({"continue": True}))
|
|
37
|
+
sys.exit(0)
|
|
38
|
+
|
|
39
|
+
# Check if writing an E2E test file
|
|
40
|
+
file_path = tool_input.get("file_path", "")
|
|
41
|
+
|
|
42
|
+
# Common E2E test patterns
|
|
43
|
+
is_e2e_test = (
|
|
44
|
+
file_path.endswith(".spec.ts") or
|
|
45
|
+
file_path.endswith(".spec.tsx") or
|
|
46
|
+
file_path.endswith(".e2e.ts") or
|
|
47
|
+
file_path.endswith(".e2e.tsx") or
|
|
48
|
+
"/e2e/" in file_path or
|
|
49
|
+
"/tests/e2e/" in file_path
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
if not is_e2e_test:
|
|
53
|
+
print(json.dumps({"continue": True}))
|
|
54
|
+
sys.exit(0)
|
|
55
|
+
|
|
56
|
+
# Look for playwright config in common locations
|
|
57
|
+
cwd = Path.cwd()
|
|
58
|
+
config_files = [
|
|
59
|
+
cwd / "playwright.config.ts",
|
|
60
|
+
cwd / "playwright.config.js",
|
|
61
|
+
cwd.parent / "playwright.config.ts",
|
|
62
|
+
cwd.parent / "playwright.config.js",
|
|
63
|
+
]
|
|
64
|
+
|
|
65
|
+
playwright_found = False
|
|
66
|
+
for config_file in config_files:
|
|
67
|
+
if config_file.exists():
|
|
68
|
+
playwright_found = True
|
|
69
|
+
break
|
|
70
|
+
|
|
71
|
+
# Also check package.json for @playwright/test
|
|
72
|
+
if not playwright_found:
|
|
73
|
+
package_json = cwd / "package.json"
|
|
74
|
+
if package_json.exists():
|
|
75
|
+
try:
|
|
76
|
+
pkg = json.loads(package_json.read_text())
|
|
77
|
+
deps = pkg.get("devDependencies", {})
|
|
78
|
+
deps.update(pkg.get("dependencies", {}))
|
|
79
|
+
if "@playwright/test" in deps:
|
|
80
|
+
playwright_found = True
|
|
81
|
+
except (json.JSONDecodeError, IOError):
|
|
82
|
+
pass
|
|
83
|
+
|
|
84
|
+
if not playwright_found:
|
|
85
|
+
print(json.dumps({
|
|
86
|
+
"continue": False,
|
|
87
|
+
"reason": (
|
|
88
|
+
"Playwright is not configured in this project.\n\n"
|
|
89
|
+
"Before writing E2E test files, please install Playwright:\n\n"
|
|
90
|
+
" npm init playwright@latest\n\n"
|
|
91
|
+
"This will create playwright.config.ts and install browsers.\n"
|
|
92
|
+
"After installation, run 'npx playwright test' to run tests."
|
|
93
|
+
)
|
|
94
|
+
}))
|
|
95
|
+
sys.exit(0)
|
|
96
|
+
|
|
97
|
+
# Playwright is configured
|
|
98
|
+
print(json.dumps({"continue": True}))
|
|
99
|
+
sys.exit(0)
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
if __name__ == "__main__":
|
|
103
|
+
main()
|
|
@@ -0,0 +1,81 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Hook: PreToolUse for Write
|
|
4
|
+
Purpose: Verify Storybook is configured before writing story files
|
|
5
|
+
|
|
6
|
+
This hook runs before writing .stories.tsx files. It checks that:
|
|
7
|
+
- .storybook/ directory exists
|
|
8
|
+
- main.ts or main.js config exists
|
|
9
|
+
|
|
10
|
+
If Storybook is not configured, it blocks and suggests installation.
|
|
11
|
+
|
|
12
|
+
Version: 3.9.0
|
|
13
|
+
|
|
14
|
+
Returns:
|
|
15
|
+
- {"continue": true} - If Storybook is configured or not a story file
|
|
16
|
+
- {"continue": false, "reason": "..."} - If Storybook is not configured
|
|
17
|
+
"""
|
|
18
|
+
import json
|
|
19
|
+
import sys
|
|
20
|
+
from pathlib import Path
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def main():
|
|
24
|
+
# Read hook input from stdin
|
|
25
|
+
try:
|
|
26
|
+
input_data = json.load(sys.stdin)
|
|
27
|
+
except json.JSONDecodeError:
|
|
28
|
+
print(json.dumps({"continue": True}))
|
|
29
|
+
sys.exit(0)
|
|
30
|
+
|
|
31
|
+
tool_name = input_data.get("tool_name", "")
|
|
32
|
+
tool_input = input_data.get("tool_input", {})
|
|
33
|
+
|
|
34
|
+
# Only check Write operations
|
|
35
|
+
if tool_name != "Write":
|
|
36
|
+
print(json.dumps({"continue": True}))
|
|
37
|
+
sys.exit(0)
|
|
38
|
+
|
|
39
|
+
# Check if writing a story file
|
|
40
|
+
file_path = tool_input.get("file_path", "")
|
|
41
|
+
if not file_path.endswith(".stories.tsx") and not file_path.endswith(".stories.ts"):
|
|
42
|
+
print(json.dumps({"continue": True}))
|
|
43
|
+
sys.exit(0)
|
|
44
|
+
|
|
45
|
+
# Look for .storybook directory in common locations
|
|
46
|
+
cwd = Path.cwd()
|
|
47
|
+
storybook_dirs = [
|
|
48
|
+
cwd / ".storybook",
|
|
49
|
+
cwd.parent / ".storybook", # In case running from subdirectory
|
|
50
|
+
]
|
|
51
|
+
|
|
52
|
+
storybook_found = False
|
|
53
|
+
for storybook_dir in storybook_dirs:
|
|
54
|
+
if storybook_dir.exists():
|
|
55
|
+
# Check for main config file
|
|
56
|
+
main_ts = storybook_dir / "main.ts"
|
|
57
|
+
main_js = storybook_dir / "main.js"
|
|
58
|
+
if main_ts.exists() or main_js.exists():
|
|
59
|
+
storybook_found = True
|
|
60
|
+
break
|
|
61
|
+
|
|
62
|
+
if not storybook_found:
|
|
63
|
+
print(json.dumps({
|
|
64
|
+
"continue": False,
|
|
65
|
+
"reason": (
|
|
66
|
+
"Storybook is not configured in this project.\n\n"
|
|
67
|
+
"Before writing story files, please install Storybook:\n\n"
|
|
68
|
+
" npx storybook@latest init\n\n"
|
|
69
|
+
"This will create the .storybook/ directory and configuration.\n"
|
|
70
|
+
"After installation, run 'pnpm storybook' to start the dev server."
|
|
71
|
+
)
|
|
72
|
+
}))
|
|
73
|
+
sys.exit(0)
|
|
74
|
+
|
|
75
|
+
# Storybook is configured
|
|
76
|
+
print(json.dumps({"continue": True}))
|
|
77
|
+
sys.exit(0)
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
if __name__ == "__main__":
|
|
81
|
+
main()
|