@hustle-together/api-dev-tools 3.12.3 → 3.12.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/commands/hustle-build.md +259 -0
- package/.claude/commands/hustle-combine.md +1089 -0
- package/.claude/commands/hustle-ui-create-page.md +1078 -0
- package/.claude/commands/hustle-ui-create.md +1058 -0
- package/.claude/hooks/auto-answer.py +305 -0
- package/.claude/hooks/cache-research.py +337 -0
- package/.claude/hooks/check-api-routes.py +168 -0
- package/.claude/hooks/check-playwright-setup.py +103 -0
- package/.claude/hooks/check-storybook-setup.py +81 -0
- package/.claude/hooks/check-update.py +132 -0
- package/.claude/hooks/completion-promise-detector.py +293 -0
- package/.claude/hooks/context-capacity-warning.py +171 -0
- package/.claude/hooks/detect-interruption.py +165 -0
- package/.claude/hooks/docs-update-check.py +120 -0
- package/.claude/hooks/enforce-a11y-audit.py +202 -0
- package/.claude/hooks/enforce-brand-guide.py +241 -0
- package/.claude/hooks/enforce-component-type-confirm.py +97 -0
- package/.claude/hooks/enforce-dry-run.py +134 -0
- package/.claude/hooks/enforce-freshness.py +184 -0
- package/.claude/hooks/enforce-page-components.py +186 -0
- package/.claude/hooks/enforce-page-data-schema.py +155 -0
- package/.claude/hooks/enforce-questions-sourced.py +146 -0
- package/.claude/hooks/enforce-schema-from-interview.py +248 -0
- package/.claude/hooks/enforce-ui-disambiguation.py +108 -0
- package/.claude/hooks/enforce-ui-interview.py +130 -0
- package/.claude/hooks/generate-adr-options.py +282 -0
- package/.claude/hooks/generate-manifest-entry.py +1161 -0
- package/.claude/hooks/hook_utils.py +609 -0
- package/.claude/hooks/lib/__init__.py +1 -0
- package/.claude/hooks/lib/__pycache__/__init__.cpython-314.pyc +0 -0
- package/.claude/hooks/lib/__pycache__/greptile.cpython-314.pyc +0 -0
- package/.claude/hooks/lib/__pycache__/ntfy.cpython-314.pyc +0 -0
- package/.claude/hooks/lib/greptile.py +355 -0
- package/.claude/hooks/lib/ntfy.py +209 -0
- package/.claude/hooks/notify-input-needed.py +73 -0
- package/.claude/hooks/notify-phase-complete.py +90 -0
- package/.claude/hooks/ntfy-on-question.py +240 -0
- package/.claude/hooks/orchestrator-completion.py +313 -0
- package/.claude/hooks/orchestrator-handoff.py +267 -0
- package/.claude/hooks/orchestrator-session-startup.py +146 -0
- package/.claude/hooks/parallel-orchestrator.py +451 -0
- package/.claude/hooks/project-document-prompt.py +302 -0
- package/.claude/hooks/remote-question-proxy.py +284 -0
- package/.claude/hooks/remote-question-server.py +1224 -0
- package/.claude/hooks/run-code-review.py +393 -0
- package/.claude/hooks/run-visual-qa.py +338 -0
- package/.claude/hooks/session-logger.py +323 -0
- package/.claude/hooks/test-orchestrator-reground.py +248 -0
- package/.claude/hooks/track-scope-coverage.py +220 -0
- package/.claude/hooks/track-token-usage.py +121 -0
- package/.claude/hooks/update-adr-decision.py +236 -0
- package/.claude/hooks/update-api-showcase.py +161 -0
- package/.claude/hooks/update-registry.py +352 -0
- package/.claude/hooks/update-testing-checklist.py +195 -0
- package/.claude/hooks/update-ui-showcase.py +224 -0
- package/.claude/settings.local.json +7 -1
- package/.claude/test-auto-answer-bot.py +183 -0
- package/.claude/test-completion-detector.py +263 -0
- package/.claude/test-orchestrator-state.json +20 -0
- package/.claude/test-orchestrator.sh +271 -0
- package/.skills/api-create/SKILL.md +88 -3
- package/.skills/docs-sync/SKILL.md +260 -0
- package/.skills/hustle-build/SKILL.md +459 -0
- package/.skills/hustle-build-review/SKILL.md +518 -0
- package/CHANGELOG.md +87 -0
- package/README.md +86 -9
- package/bin/cli.js +1302 -88
- package/commands/hustle-api-create.md +22 -0
- package/commands/hustle-combine.md +81 -2
- package/commands/hustle-ui-create-page.md +84 -2
- package/commands/hustle-ui-create.md +82 -2
- package/hooks/auto-answer.py +228 -0
- package/hooks/check-update.py +132 -0
- package/hooks/ntfy-on-question.py +227 -0
- package/hooks/orchestrator-completion.py +313 -0
- package/hooks/orchestrator-handoff.py +189 -0
- package/hooks/orchestrator-session-startup.py +146 -0
- package/hooks/periodic-reground.py +230 -67
- package/hooks/update-api-showcase.py +13 -1
- package/hooks/update-ui-showcase.py +13 -1
- package/package.json +7 -3
- package/scripts/extract-schema-docs.cjs +322 -0
- package/templates/CLAUDE-SECTION.md +89 -64
- package/templates/api-showcase/_components/APIModal.tsx +100 -8
- package/templates/api-showcase/_components/APIShowcase.tsx +36 -4
- package/templates/api-showcase/_components/APITester.tsx +367 -58
- package/templates/docs/page.tsx +230 -0
- package/templates/hustle-build-defaults.json +84 -0
- package/templates/hustle-dev-dashboard/page.tsx +365 -0
- package/templates/playwright-report/page.tsx +258 -0
- package/templates/settings.json +88 -7
- package/templates/test-results/page.tsx +237 -0
- package/templates/typedoc.json +19 -0
- package/templates/ui-showcase/_components/UIShowcase.tsx +1 -1
- package/templates/ui-showcase/page.tsx +1 -1
- package/.claude/api-dev-state.json +0 -466
|
@@ -0,0 +1,338 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Visual QA Hook (Ralph Wiggum Loop Pattern)
|
|
4
|
+
|
|
5
|
+
Runs visual analysis with AI (Haiku) and LOOPS until all issues are fixed.
|
|
6
|
+
This ensures visual quality before proceeding to next phase.
|
|
7
|
+
|
|
8
|
+
Hook Type: PostToolUse (triggers after Storybook/visual tests)
|
|
9
|
+
|
|
10
|
+
Ralph Wiggum Pattern:
|
|
11
|
+
1. Run visual tests / capture screenshots
|
|
12
|
+
2. Analyze with AI (Haiku subagent)
|
|
13
|
+
3. If issues found → inject context for agent to fix
|
|
14
|
+
4. Agent fixes CSS/layout
|
|
15
|
+
5. Re-run visual tests → hook triggers again
|
|
16
|
+
6. Re-analyze with Haiku
|
|
17
|
+
7. Loop until clean OR max iterations
|
|
18
|
+
8. Emit <promise>VISUAL_CLEAN</promise>
|
|
19
|
+
|
|
20
|
+
Environment Variables:
|
|
21
|
+
VISUAL_QA_ENABLED: Set to 'true' to enable (default: true)
|
|
22
|
+
VISUAL_QA_MAX_ITERATIONS: Max QA cycles (default: 5)
|
|
23
|
+
|
|
24
|
+
Version: 1.0.0
|
|
25
|
+
"""
|
|
26
|
+
import os
|
|
27
|
+
import sys
|
|
28
|
+
import json
|
|
29
|
+
from pathlib import Path
|
|
30
|
+
from datetime import datetime
|
|
31
|
+
|
|
32
|
+
# State file for tracking visual QA loops
|
|
33
|
+
VISUAL_STATE_FILE = ".claude/visual-qa-state.json"
|
|
34
|
+
MAX_ITERATIONS = int(os.environ.get("VISUAL_QA_MAX_ITERATIONS", "5"))
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def load_visual_state() -> dict:
|
|
38
|
+
"""Load visual QA loop state."""
|
|
39
|
+
state_file = Path.cwd() / VISUAL_STATE_FILE
|
|
40
|
+
if state_file.exists():
|
|
41
|
+
try:
|
|
42
|
+
return json.loads(state_file.read_text())
|
|
43
|
+
except (json.JSONDecodeError, IOError):
|
|
44
|
+
pass
|
|
45
|
+
return {
|
|
46
|
+
"iteration": 0,
|
|
47
|
+
"issues_found": [],
|
|
48
|
+
"components_checked": [],
|
|
49
|
+
"viewports_passed": [],
|
|
50
|
+
"status": "pending",
|
|
51
|
+
"started_at": None,
|
|
52
|
+
"last_check_at": None
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def save_visual_state(state: dict):
|
|
57
|
+
"""Save visual QA loop state."""
|
|
58
|
+
state_file = Path.cwd() / VISUAL_STATE_FILE
|
|
59
|
+
state_file.parent.mkdir(parents=True, exist_ok=True)
|
|
60
|
+
try:
|
|
61
|
+
state_file.write_text(json.dumps(state, indent=2))
|
|
62
|
+
except IOError:
|
|
63
|
+
pass
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def clear_visual_state():
|
|
67
|
+
"""Clear visual state after successful completion."""
|
|
68
|
+
state_file = Path.cwd() / VISUAL_STATE_FILE
|
|
69
|
+
if state_file.exists():
|
|
70
|
+
try:
|
|
71
|
+
state_file.unlink()
|
|
72
|
+
except IOError:
|
|
73
|
+
pass
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
def load_workflow_state() -> dict:
|
|
77
|
+
"""Load current workflow state."""
|
|
78
|
+
state_file = Path.cwd() / ".claude" / "api-dev-state.json"
|
|
79
|
+
if state_file.exists():
|
|
80
|
+
try:
|
|
81
|
+
return json.loads(state_file.read_text())
|
|
82
|
+
except (json.JSONDecodeError, IOError):
|
|
83
|
+
pass
|
|
84
|
+
|
|
85
|
+
# Also check hustle-build state
|
|
86
|
+
hustle_state = Path.cwd() / ".claude" / "hustle-build-state.json"
|
|
87
|
+
if hustle_state.exists():
|
|
88
|
+
try:
|
|
89
|
+
return json.loads(hustle_state.read_text())
|
|
90
|
+
except (json.JSONDecodeError, IOError):
|
|
91
|
+
pass
|
|
92
|
+
|
|
93
|
+
return {}
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def update_workflow_state(issues_count: int, iteration: int):
|
|
97
|
+
"""Update workflow state with visual QA results."""
|
|
98
|
+
state_file = Path.cwd() / ".claude" / "api-dev-state.json"
|
|
99
|
+
state = load_workflow_state()
|
|
100
|
+
|
|
101
|
+
if "phases" not in state:
|
|
102
|
+
state["phases"] = {}
|
|
103
|
+
|
|
104
|
+
state["phases"]["visual_qa"] = {
|
|
105
|
+
"status": "in_progress" if issues_count > 0 else "complete",
|
|
106
|
+
"iteration": iteration,
|
|
107
|
+
"issues_found": issues_count,
|
|
108
|
+
"checked_at": datetime.now().isoformat()
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
try:
|
|
112
|
+
state_file.write_text(json.dumps(state, indent=2))
|
|
113
|
+
except IOError:
|
|
114
|
+
pass
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def should_run_visual_qa(hook_input: dict) -> bool:
|
|
118
|
+
"""Determine if visual QA should run based on hook context."""
|
|
119
|
+
# Check if visual QA is enabled
|
|
120
|
+
if os.environ.get("VISUAL_QA_ENABLED", "true").lower() == "false":
|
|
121
|
+
return False
|
|
122
|
+
|
|
123
|
+
tool_name = hook_input.get("tool_name", "")
|
|
124
|
+
tool_input = hook_input.get("tool_input", {})
|
|
125
|
+
|
|
126
|
+
# Run after Storybook tests
|
|
127
|
+
if tool_name == "Bash":
|
|
128
|
+
command = tool_input.get("command", "")
|
|
129
|
+
tool_result = hook_input.get("tool_result", {})
|
|
130
|
+
stdout = tool_result.get("stdout", "")
|
|
131
|
+
|
|
132
|
+
# Check if visual/storybook tests ran
|
|
133
|
+
visual_triggers = [
|
|
134
|
+
"storybook",
|
|
135
|
+
"test-storybook",
|
|
136
|
+
"chromatic",
|
|
137
|
+
"playwright test --project=visual",
|
|
138
|
+
"visual",
|
|
139
|
+
"screenshot"
|
|
140
|
+
]
|
|
141
|
+
|
|
142
|
+
if any(trigger in command.lower() for trigger in visual_triggers):
|
|
143
|
+
return True
|
|
144
|
+
|
|
145
|
+
# Run after Task with visual-analyzer
|
|
146
|
+
if tool_name == "Task":
|
|
147
|
+
subagent_type = tool_input.get("subagent_type", "")
|
|
148
|
+
if subagent_type == "visual-analyzer":
|
|
149
|
+
return True
|
|
150
|
+
|
|
151
|
+
return False
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
def parse_visual_issues(hook_input: dict) -> list:
|
|
155
|
+
"""Parse visual issues from tool output."""
|
|
156
|
+
issues = []
|
|
157
|
+
|
|
158
|
+
tool_result = hook_input.get("tool_result", {})
|
|
159
|
+
stdout = tool_result.get("stdout", "")
|
|
160
|
+
message = tool_result.get("message", "")
|
|
161
|
+
|
|
162
|
+
output = stdout + "\n" + message
|
|
163
|
+
|
|
164
|
+
# Look for common issue patterns
|
|
165
|
+
issue_keywords = [
|
|
166
|
+
"touch target",
|
|
167
|
+
"contrast",
|
|
168
|
+
"overflow",
|
|
169
|
+
"clipping",
|
|
170
|
+
"alignment",
|
|
171
|
+
"spacing",
|
|
172
|
+
"typography",
|
|
173
|
+
"safe area",
|
|
174
|
+
"layout issue",
|
|
175
|
+
"responsive",
|
|
176
|
+
"accessibility",
|
|
177
|
+
"wcag"
|
|
178
|
+
]
|
|
179
|
+
|
|
180
|
+
lines = output.split("\n")
|
|
181
|
+
for line in lines:
|
|
182
|
+
line_lower = line.lower()
|
|
183
|
+
if any(keyword in line_lower for keyword in issue_keywords):
|
|
184
|
+
if "issue" in line_lower or "warning" in line_lower or "error" in line_lower or "fail" in line_lower:
|
|
185
|
+
issues.append(line.strip())
|
|
186
|
+
|
|
187
|
+
# Also look for severity markers
|
|
188
|
+
for line in lines:
|
|
189
|
+
if "⚠️" in line or "❌" in line or "warning" in line.lower():
|
|
190
|
+
if line.strip() and line.strip() not in issues:
|
|
191
|
+
issues.append(line.strip())
|
|
192
|
+
|
|
193
|
+
return issues[:10] # Limit to 10 issues
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
def format_issues_for_context(issues: list, iteration: int) -> str:
|
|
197
|
+
"""Format issues as context for the agent to fix."""
|
|
198
|
+
if not issues:
|
|
199
|
+
return ""
|
|
200
|
+
|
|
201
|
+
lines = [
|
|
202
|
+
"",
|
|
203
|
+
"=" * 60,
|
|
204
|
+
"VISUAL QA ISSUES TO FIX (Ralph Wiggum Loop)",
|
|
205
|
+
"=" * 60,
|
|
206
|
+
"",
|
|
207
|
+
f"Iteration {iteration}/{MAX_ITERATIONS}",
|
|
208
|
+
"",
|
|
209
|
+
"The following visual issues were found by AI analysis.",
|
|
210
|
+
"Please fix ALL issues, then re-run visual tests.",
|
|
211
|
+
"The QA will re-run automatically.",
|
|
212
|
+
"",
|
|
213
|
+
"ISSUES:",
|
|
214
|
+
]
|
|
215
|
+
|
|
216
|
+
for i, issue in enumerate(issues, 1):
|
|
217
|
+
lines.append(f" {i}. {issue}")
|
|
218
|
+
|
|
219
|
+
lines.extend([
|
|
220
|
+
"",
|
|
221
|
+
"Common fixes:",
|
|
222
|
+
" - Touch targets: Add min-h-[44px] min-w-[44px]",
|
|
223
|
+
" - Contrast: Check text color against background",
|
|
224
|
+
" - Safe areas: Use safe-area-inset-* CSS",
|
|
225
|
+
" - Overflow: Add overflow-hidden or adjust sizing",
|
|
226
|
+
"",
|
|
227
|
+
"After fixing, run: /test-visual",
|
|
228
|
+
"=" * 60,
|
|
229
|
+
""
|
|
230
|
+
])
|
|
231
|
+
|
|
232
|
+
return "\n".join(lines)
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
def main():
|
|
236
|
+
"""Main hook entry point with Ralph Wiggum loop pattern."""
|
|
237
|
+
# Read hook input
|
|
238
|
+
try:
|
|
239
|
+
hook_input = json.loads(sys.stdin.read())
|
|
240
|
+
except json.JSONDecodeError:
|
|
241
|
+
hook_input = {}
|
|
242
|
+
|
|
243
|
+
# Check if we should run
|
|
244
|
+
if not should_run_visual_qa(hook_input):
|
|
245
|
+
print(json.dumps({"continue": True}))
|
|
246
|
+
return
|
|
247
|
+
|
|
248
|
+
# Load current visual QA state
|
|
249
|
+
visual_state = load_visual_state()
|
|
250
|
+
|
|
251
|
+
# Increment iteration
|
|
252
|
+
visual_state["iteration"] += 1
|
|
253
|
+
iteration = visual_state["iteration"]
|
|
254
|
+
|
|
255
|
+
# Check max iterations
|
|
256
|
+
if iteration > MAX_ITERATIONS:
|
|
257
|
+
output = f"""
|
|
258
|
+
================================================================================
|
|
259
|
+
VISUAL QA - MAX ITERATIONS REACHED ({MAX_ITERATIONS})
|
|
260
|
+
================================================================================
|
|
261
|
+
Proceeding with remaining warnings. Consider reviewing manually.
|
|
262
|
+
|
|
263
|
+
<promise>VISUAL_CLEAN</promise>
|
|
264
|
+
"""
|
|
265
|
+
print(json.dumps({
|
|
266
|
+
"continue": True,
|
|
267
|
+
"message": output
|
|
268
|
+
}))
|
|
269
|
+
clear_visual_state()
|
|
270
|
+
return
|
|
271
|
+
|
|
272
|
+
# Track timing
|
|
273
|
+
if iteration == 1:
|
|
274
|
+
visual_state["started_at"] = datetime.now().isoformat()
|
|
275
|
+
visual_state["last_check_at"] = datetime.now().isoformat()
|
|
276
|
+
|
|
277
|
+
# Parse issues from the visual test output
|
|
278
|
+
issues = parse_visual_issues(hook_input)
|
|
279
|
+
issue_count = len(issues)
|
|
280
|
+
|
|
281
|
+
update_workflow_state(issue_count, iteration)
|
|
282
|
+
|
|
283
|
+
if issue_count == 0:
|
|
284
|
+
# All clean! Emit promise and proceed
|
|
285
|
+
visual_state["status"] = "complete"
|
|
286
|
+
save_visual_state(visual_state)
|
|
287
|
+
|
|
288
|
+
output = f"""
|
|
289
|
+
================================================================================
|
|
290
|
+
VISUAL QA LOOP COMPLETE (Iteration {iteration}/{MAX_ITERATIONS})
|
|
291
|
+
================================================================================
|
|
292
|
+
All visual checks passed!
|
|
293
|
+
- Layout: ✅
|
|
294
|
+
- Typography: ✅
|
|
295
|
+
- Touch Targets: ✅
|
|
296
|
+
- Safe Areas: ✅
|
|
297
|
+
- Brand Consistency: ✅
|
|
298
|
+
|
|
299
|
+
Proceeding to next phase.
|
|
300
|
+
|
|
301
|
+
<promise>VISUAL_CLEAN</promise>
|
|
302
|
+
"""
|
|
303
|
+
print(json.dumps({
|
|
304
|
+
"continue": True,
|
|
305
|
+
"message": output
|
|
306
|
+
}))
|
|
307
|
+
clear_visual_state()
|
|
308
|
+
return
|
|
309
|
+
|
|
310
|
+
# Issues found - save state and provide context for fixes
|
|
311
|
+
visual_state["status"] = "needs_fixing"
|
|
312
|
+
visual_state["issues_found"] = issues
|
|
313
|
+
save_visual_state(visual_state)
|
|
314
|
+
|
|
315
|
+
# Format issues as context
|
|
316
|
+
issues_context = format_issues_for_context(issues, iteration)
|
|
317
|
+
|
|
318
|
+
output = f"""
|
|
319
|
+
================================================================================
|
|
320
|
+
VISUAL QA LOOP - ITERATION {iteration}/{MAX_ITERATIONS}
|
|
321
|
+
================================================================================
|
|
322
|
+
{issue_count} visual issue(s) found. Fix them and re-run visual tests.
|
|
323
|
+
{issues_context}
|
|
324
|
+
"""
|
|
325
|
+
|
|
326
|
+
# Block workflow - agent needs to fix issues
|
|
327
|
+
print(json.dumps({
|
|
328
|
+
"continue": False, # Block until fixed
|
|
329
|
+
"message": output,
|
|
330
|
+
"issues_count": issue_count,
|
|
331
|
+
"iteration": iteration,
|
|
332
|
+
"action_required": True,
|
|
333
|
+
"next_action": "Fix the visual issues above, then run /test-visual"
|
|
334
|
+
}))
|
|
335
|
+
|
|
336
|
+
|
|
337
|
+
if __name__ == "__main__":
|
|
338
|
+
main()
|
|
@@ -0,0 +1,323 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Hook: Stop
|
|
4
|
+
Purpose: Save session to .claude/api-sessions/ for later review
|
|
5
|
+
|
|
6
|
+
This hook runs when a Claude Code session ends (Stop event).
|
|
7
|
+
It saves the session data for the completed workflow including:
|
|
8
|
+
- State snapshot at completion
|
|
9
|
+
- Files created during the workflow
|
|
10
|
+
- Summary of phases completed
|
|
11
|
+
- Research sources used
|
|
12
|
+
- Interview decisions made
|
|
13
|
+
|
|
14
|
+
Added in v3.6.7 for session logging support.
|
|
15
|
+
|
|
16
|
+
Returns:
|
|
17
|
+
- JSON with session save info
|
|
18
|
+
"""
|
|
19
|
+
import json
|
|
20
|
+
import sys
|
|
21
|
+
import os
|
|
22
|
+
from datetime import datetime
|
|
23
|
+
from pathlib import Path
|
|
24
|
+
import shutil
|
|
25
|
+
|
|
26
|
+
# Import shared utilities for NTFY
|
|
27
|
+
try:
|
|
28
|
+
from hook_utils import send_ntfy_notification
|
|
29
|
+
HAS_NTFY = True
|
|
30
|
+
except ImportError:
|
|
31
|
+
HAS_NTFY = False
|
|
32
|
+
|
|
33
|
+
STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
34
|
+
SESSIONS_DIR = Path(__file__).parent.parent / "api-sessions"
|
|
35
|
+
RESEARCH_DIR = Path(__file__).parent.parent / "research"
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def get_active_endpoint(state):
|
|
39
|
+
"""Get active endpoint - supports both old and new state formats."""
|
|
40
|
+
# New format (v3.6.7+): endpoints object with active_endpoint pointer
|
|
41
|
+
if "endpoints" in state and "active_endpoint" in state:
|
|
42
|
+
active = state.get("active_endpoint")
|
|
43
|
+
if active and active in state["endpoints"]:
|
|
44
|
+
return active, state["endpoints"][active]
|
|
45
|
+
return None, None
|
|
46
|
+
|
|
47
|
+
# Old format: single endpoint field
|
|
48
|
+
endpoint = state.get("endpoint")
|
|
49
|
+
if endpoint:
|
|
50
|
+
return endpoint, state
|
|
51
|
+
|
|
52
|
+
return None, None
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def get_completed_phases(endpoint_data):
|
|
56
|
+
"""Get list of completed phases."""
|
|
57
|
+
completed = []
|
|
58
|
+
phases = endpoint_data.get("phases", {})
|
|
59
|
+
|
|
60
|
+
phase_order = [
|
|
61
|
+
"disambiguation", "scope", "research_initial", "interview",
|
|
62
|
+
"research_deep", "schema_creation", "environment_check",
|
|
63
|
+
"tdd_red", "tdd_green", "verify", "tdd_refactor", "documentation", "completion"
|
|
64
|
+
]
|
|
65
|
+
|
|
66
|
+
for phase_name in phase_order:
|
|
67
|
+
phase = phases.get(phase_name, {})
|
|
68
|
+
if phase.get("status") == "complete":
|
|
69
|
+
completed.append(phase_name)
|
|
70
|
+
|
|
71
|
+
return completed
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def get_files_created(endpoint_data):
|
|
75
|
+
"""Get list of files created during this workflow."""
|
|
76
|
+
files = []
|
|
77
|
+
|
|
78
|
+
# From completion phase
|
|
79
|
+
completion = endpoint_data.get("phases", {}).get("completion", {})
|
|
80
|
+
files.extend(completion.get("files_created", []))
|
|
81
|
+
|
|
82
|
+
# From schema phase
|
|
83
|
+
schema = endpoint_data.get("phases", {}).get("schema_creation", {})
|
|
84
|
+
if schema.get("schema_file"):
|
|
85
|
+
files.append(schema.get("schema_file"))
|
|
86
|
+
|
|
87
|
+
# From TDD phases
|
|
88
|
+
tdd_red = endpoint_data.get("phases", {}).get("tdd_red", {})
|
|
89
|
+
if tdd_red.get("test_file"):
|
|
90
|
+
files.append(tdd_red.get("test_file"))
|
|
91
|
+
|
|
92
|
+
tdd_green = endpoint_data.get("phases", {}).get("tdd_green", {})
|
|
93
|
+
if tdd_green.get("implementation_file"):
|
|
94
|
+
files.append(tdd_green.get("implementation_file"))
|
|
95
|
+
|
|
96
|
+
return list(set(files)) # Deduplicate
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def generate_summary(endpoint, endpoint_data, state):
|
|
100
|
+
"""Generate a markdown summary of the session."""
|
|
101
|
+
completed = get_completed_phases(endpoint_data)
|
|
102
|
+
files = get_files_created(endpoint_data)
|
|
103
|
+
decisions = endpoint_data.get("phases", {}).get("interview", {}).get("decisions", {})
|
|
104
|
+
|
|
105
|
+
lines = [
|
|
106
|
+
f"# Session Summary: {endpoint}",
|
|
107
|
+
"",
|
|
108
|
+
f"*Generated: {datetime.now().isoformat()}*",
|
|
109
|
+
"",
|
|
110
|
+
"## Overview",
|
|
111
|
+
"",
|
|
112
|
+
f"- **Endpoint:** {endpoint}",
|
|
113
|
+
f"- **Library:** {endpoint_data.get('library', 'N/A')}",
|
|
114
|
+
f"- **Started:** {endpoint_data.get('started_at', 'N/A')}",
|
|
115
|
+
f"- **Completed Phases:** {len(completed)}/13",
|
|
116
|
+
f"- **Status:** {endpoint_data.get('status', 'unknown')}",
|
|
117
|
+
"",
|
|
118
|
+
"## Phases Completed",
|
|
119
|
+
""
|
|
120
|
+
]
|
|
121
|
+
|
|
122
|
+
for i, phase in enumerate(completed, 1):
|
|
123
|
+
lines.append(f"{i}. {phase.replace('_', ' ').title()}")
|
|
124
|
+
|
|
125
|
+
lines.extend([
|
|
126
|
+
"",
|
|
127
|
+
"## Files Created",
|
|
128
|
+
""
|
|
129
|
+
])
|
|
130
|
+
|
|
131
|
+
for f in files:
|
|
132
|
+
lines.append(f"- `{f}`")
|
|
133
|
+
|
|
134
|
+
if decisions:
|
|
135
|
+
lines.extend([
|
|
136
|
+
"",
|
|
137
|
+
"## Interview Decisions",
|
|
138
|
+
""
|
|
139
|
+
])
|
|
140
|
+
for key, value in decisions.items():
|
|
141
|
+
response = value.get("response", value.get("value", "N/A"))
|
|
142
|
+
lines.append(f"- **{key}:** {response}")
|
|
143
|
+
|
|
144
|
+
lines.extend([
|
|
145
|
+
"",
|
|
146
|
+
"## Research Sources",
|
|
147
|
+
""
|
|
148
|
+
])
|
|
149
|
+
|
|
150
|
+
# Check for research cache
|
|
151
|
+
research_path = RESEARCH_DIR / endpoint / "sources.json"
|
|
152
|
+
if research_path.exists():
|
|
153
|
+
try:
|
|
154
|
+
sources = json.loads(research_path.read_text())
|
|
155
|
+
for src in sources.get("sources", [])[:10]: # Limit to 10
|
|
156
|
+
url = src.get("url", src.get("query", ""))
|
|
157
|
+
if url:
|
|
158
|
+
lines.append(f"- {url}")
|
|
159
|
+
except (json.JSONDecodeError, IOError):
|
|
160
|
+
lines.append("- (sources.json not readable)")
|
|
161
|
+
else:
|
|
162
|
+
lines.append("- (no sources.json found)")
|
|
163
|
+
|
|
164
|
+
lines.extend([
|
|
165
|
+
"",
|
|
166
|
+
"---",
|
|
167
|
+
"",
|
|
168
|
+
f"*Session saved to: .claude/api-sessions/{endpoint}_{{timestamp}}/*"
|
|
169
|
+
])
|
|
170
|
+
|
|
171
|
+
return "\n".join(lines)
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
def save_session(endpoint, endpoint_data, state):
|
|
175
|
+
"""Save session to .claude/api-sessions/."""
|
|
176
|
+
# Create timestamp
|
|
177
|
+
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
|
178
|
+
session_dir = SESSIONS_DIR / f"{endpoint}_{timestamp}"
|
|
179
|
+
session_dir.mkdir(parents=True, exist_ok=True)
|
|
180
|
+
|
|
181
|
+
# 1. Save state snapshot
|
|
182
|
+
state_snapshot = {
|
|
183
|
+
"saved_at": datetime.now().isoformat(),
|
|
184
|
+
"endpoint": endpoint,
|
|
185
|
+
"endpoint_data": endpoint_data,
|
|
186
|
+
"turn_count": state.get("turn_count", 0),
|
|
187
|
+
"research_queries": state.get("research_queries", [])
|
|
188
|
+
}
|
|
189
|
+
(session_dir / "state-snapshot.json").write_text(json.dumps(state_snapshot, indent=2))
|
|
190
|
+
|
|
191
|
+
# 2. Save files list
|
|
192
|
+
files = get_files_created(endpoint_data)
|
|
193
|
+
(session_dir / "files-created.txt").write_text("\n".join(files))
|
|
194
|
+
|
|
195
|
+
# 3. Generate and save summary
|
|
196
|
+
summary = generate_summary(endpoint, endpoint_data, state)
|
|
197
|
+
(session_dir / "summary.md").write_text(summary)
|
|
198
|
+
|
|
199
|
+
# 4. Copy research cache if exists
|
|
200
|
+
research_src = RESEARCH_DIR / endpoint
|
|
201
|
+
if research_src.exists():
|
|
202
|
+
research_dst = session_dir / "research-cache"
|
|
203
|
+
research_dst.mkdir(exist_ok=True)
|
|
204
|
+
for f in research_src.iterdir():
|
|
205
|
+
if f.is_file():
|
|
206
|
+
shutil.copy2(f, research_dst / f.name)
|
|
207
|
+
|
|
208
|
+
# 5. Update sessions index
|
|
209
|
+
update_sessions_index(endpoint, timestamp, endpoint_data)
|
|
210
|
+
|
|
211
|
+
return session_dir
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def update_sessions_index(endpoint, timestamp, endpoint_data):
|
|
215
|
+
"""Update the sessions index file."""
|
|
216
|
+
index_file = SESSIONS_DIR / "index.json"
|
|
217
|
+
|
|
218
|
+
if index_file.exists():
|
|
219
|
+
try:
|
|
220
|
+
index = json.loads(index_file.read_text())
|
|
221
|
+
except json.JSONDecodeError:
|
|
222
|
+
index = {"version": "3.6.7", "sessions": []}
|
|
223
|
+
else:
|
|
224
|
+
index = {"version": "3.6.7", "sessions": []}
|
|
225
|
+
|
|
226
|
+
# Add this session
|
|
227
|
+
completed = get_completed_phases(endpoint_data)
|
|
228
|
+
index["sessions"].append({
|
|
229
|
+
"endpoint": endpoint,
|
|
230
|
+
"timestamp": timestamp,
|
|
231
|
+
"folder": f"{endpoint}_{timestamp}",
|
|
232
|
+
"status": endpoint_data.get("status", "unknown"),
|
|
233
|
+
"phases_completed": len(completed),
|
|
234
|
+
"created_at": datetime.now().isoformat()
|
|
235
|
+
})
|
|
236
|
+
|
|
237
|
+
index_file.write_text(json.dumps(index, indent=2))
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
def main():
|
|
241
|
+
try:
|
|
242
|
+
input_data = json.load(sys.stdin)
|
|
243
|
+
except json.JSONDecodeError:
|
|
244
|
+
print(json.dumps({"continue": True}))
|
|
245
|
+
sys.exit(0)
|
|
246
|
+
|
|
247
|
+
# Check if state file exists
|
|
248
|
+
if not STATE_FILE.exists():
|
|
249
|
+
print(json.dumps({"continue": True}))
|
|
250
|
+
sys.exit(0)
|
|
251
|
+
|
|
252
|
+
try:
|
|
253
|
+
state = json.loads(STATE_FILE.read_text())
|
|
254
|
+
except json.JSONDecodeError:
|
|
255
|
+
print(json.dumps({"continue": True}))
|
|
256
|
+
sys.exit(0)
|
|
257
|
+
|
|
258
|
+
# Get active endpoint
|
|
259
|
+
endpoint, endpoint_data = get_active_endpoint(state)
|
|
260
|
+
if not endpoint or not endpoint_data:
|
|
261
|
+
print(json.dumps({"continue": True}))
|
|
262
|
+
sys.exit(0)
|
|
263
|
+
|
|
264
|
+
# Only save if there's meaningful progress
|
|
265
|
+
completed = get_completed_phases(endpoint_data)
|
|
266
|
+
if len(completed) < 2:
|
|
267
|
+
# Not enough progress to save
|
|
268
|
+
print(json.dumps({
|
|
269
|
+
"hookSpecificOutput": {
|
|
270
|
+
"sessionSaved": False,
|
|
271
|
+
"reason": "Not enough progress to save (need at least 2 completed phases)"
|
|
272
|
+
}
|
|
273
|
+
}))
|
|
274
|
+
sys.exit(0)
|
|
275
|
+
|
|
276
|
+
# Save the session
|
|
277
|
+
try:
|
|
278
|
+
session_dir = save_session(endpoint, endpoint_data, state)
|
|
279
|
+
|
|
280
|
+
# Send NTFY notification on session end
|
|
281
|
+
if HAS_NTFY:
|
|
282
|
+
status = endpoint_data.get("status", "unknown")
|
|
283
|
+
if status == "complete":
|
|
284
|
+
send_ntfy_notification(
|
|
285
|
+
title=f"✅ Session Complete: {endpoint}",
|
|
286
|
+
message=f"Completed {len(completed)}/13 phases. Session saved.",
|
|
287
|
+
priority="default",
|
|
288
|
+
tags=["white_check_mark", "robot"]
|
|
289
|
+
)
|
|
290
|
+
else:
|
|
291
|
+
send_ntfy_notification(
|
|
292
|
+
title=f"📋 Session Ended: {endpoint}",
|
|
293
|
+
message=f"Completed {len(completed)}/13 phases. Status: {status}",
|
|
294
|
+
priority="low",
|
|
295
|
+
tags=["clipboard", "robot"]
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
output = {
|
|
299
|
+
"hookSpecificOutput": {
|
|
300
|
+
"sessionSaved": True,
|
|
301
|
+
"endpoint": endpoint,
|
|
302
|
+
"sessionDir": str(session_dir),
|
|
303
|
+
"phasesCompleted": len(completed),
|
|
304
|
+
"notificationSent": HAS_NTFY
|
|
305
|
+
}
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
print(json.dumps(output))
|
|
309
|
+
sys.exit(0)
|
|
310
|
+
|
|
311
|
+
except Exception as e:
|
|
312
|
+
output = {
|
|
313
|
+
"hookSpecificOutput": {
|
|
314
|
+
"sessionSaved": False,
|
|
315
|
+
"error": str(e)
|
|
316
|
+
}
|
|
317
|
+
}
|
|
318
|
+
print(json.dumps(output))
|
|
319
|
+
sys.exit(0)
|
|
320
|
+
|
|
321
|
+
|
|
322
|
+
if __name__ == "__main__":
|
|
323
|
+
main()
|