@hustle-together/api-dev-tools 3.12.3 → 3.12.16
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/commands/hustle-build.md +259 -0
- package/.claude/commands/hustle-combine.md +1089 -0
- package/.claude/commands/hustle-ui-create-page.md +1078 -0
- package/.claude/commands/hustle-ui-create.md +1058 -0
- package/.claude/hooks/auto-answer.py +305 -0
- package/.claude/hooks/cache-research.py +337 -0
- package/.claude/hooks/check-api-routes.py +168 -0
- package/.claude/hooks/check-playwright-setup.py +103 -0
- package/.claude/hooks/check-storybook-setup.py +81 -0
- package/.claude/hooks/check-update.py +132 -0
- package/.claude/hooks/completion-promise-detector.py +293 -0
- package/.claude/hooks/context-capacity-warning.py +171 -0
- package/.claude/hooks/detect-interruption.py +165 -0
- package/.claude/hooks/docs-update-check.py +120 -0
- package/.claude/hooks/enforce-a11y-audit.py +202 -0
- package/.claude/hooks/enforce-brand-guide.py +241 -0
- package/.claude/hooks/enforce-component-type-confirm.py +97 -0
- package/.claude/hooks/enforce-dry-run.py +134 -0
- package/.claude/hooks/enforce-freshness.py +184 -0
- package/.claude/hooks/enforce-page-components.py +186 -0
- package/.claude/hooks/enforce-page-data-schema.py +155 -0
- package/.claude/hooks/enforce-questions-sourced.py +146 -0
- package/.claude/hooks/enforce-schema-from-interview.py +248 -0
- package/.claude/hooks/enforce-ui-disambiguation.py +108 -0
- package/.claude/hooks/enforce-ui-interview.py +130 -0
- package/.claude/hooks/generate-adr-options.py +282 -0
- package/.claude/hooks/generate-manifest-entry.py +1161 -0
- package/.claude/hooks/hook_utils.py +609 -0
- package/.claude/hooks/lib/__init__.py +1 -0
- package/.claude/hooks/lib/__pycache__/__init__.cpython-314.pyc +0 -0
- package/.claude/hooks/lib/__pycache__/greptile.cpython-314.pyc +0 -0
- package/.claude/hooks/lib/__pycache__/ntfy.cpython-314.pyc +0 -0
- package/.claude/hooks/lib/greptile.py +355 -0
- package/.claude/hooks/lib/ntfy.py +209 -0
- package/.claude/hooks/notify-input-needed.py +73 -0
- package/.claude/hooks/notify-phase-complete.py +90 -0
- package/.claude/hooks/ntfy-on-question.py +240 -0
- package/.claude/hooks/orchestrator-completion.py +313 -0
- package/.claude/hooks/orchestrator-handoff.py +267 -0
- package/.claude/hooks/orchestrator-session-startup.py +146 -0
- package/.claude/hooks/parallel-orchestrator.py +451 -0
- package/.claude/hooks/project-document-prompt.py +302 -0
- package/.claude/hooks/remote-question-proxy.py +284 -0
- package/.claude/hooks/remote-question-server.py +1224 -0
- package/.claude/hooks/run-code-review.py +393 -0
- package/.claude/hooks/run-visual-qa.py +338 -0
- package/.claude/hooks/session-logger.py +323 -0
- package/.claude/hooks/test-orchestrator-reground.py +248 -0
- package/.claude/hooks/track-scope-coverage.py +220 -0
- package/.claude/hooks/track-token-usage.py +121 -0
- package/.claude/hooks/update-adr-decision.py +236 -0
- package/.claude/hooks/update-api-showcase.py +161 -0
- package/.claude/hooks/update-registry.py +352 -0
- package/.claude/hooks/update-testing-checklist.py +195 -0
- package/.claude/hooks/update-ui-showcase.py +224 -0
- package/.claude/settings.local.json +7 -1
- package/.claude/test-auto-answer-bot.py +183 -0
- package/.claude/test-completion-detector.py +263 -0
- package/.claude/test-orchestrator-state.json +20 -0
- package/.claude/test-orchestrator.sh +271 -0
- package/.skills/api-create/SKILL.md +88 -3
- package/.skills/docs-sync/SKILL.md +260 -0
- package/.skills/hustle-build/SKILL.md +459 -0
- package/.skills/hustle-build-review/SKILL.md +518 -0
- package/CHANGELOG.md +87 -0
- package/README.md +86 -9
- package/bin/cli.js +1302 -88
- package/commands/hustle-api-create.md +22 -0
- package/commands/hustle-combine.md +81 -2
- package/commands/hustle-ui-create-page.md +84 -2
- package/commands/hustle-ui-create.md +82 -2
- package/hooks/auto-answer.py +228 -0
- package/hooks/check-update.py +132 -0
- package/hooks/ntfy-on-question.py +227 -0
- package/hooks/orchestrator-completion.py +313 -0
- package/hooks/orchestrator-handoff.py +189 -0
- package/hooks/orchestrator-session-startup.py +146 -0
- package/hooks/periodic-reground.py +230 -67
- package/hooks/update-api-showcase.py +13 -1
- package/hooks/update-ui-showcase.py +13 -1
- package/package.json +7 -3
- package/scripts/extract-schema-docs.cjs +322 -0
- package/templates/CLAUDE-SECTION.md +89 -64
- package/templates/api-showcase/_components/APIModal.tsx +100 -8
- package/templates/api-showcase/_components/APIShowcase.tsx +36 -4
- package/templates/api-showcase/_components/APITester.tsx +367 -58
- package/templates/docs/page.tsx +230 -0
- package/templates/hustle-build-defaults.json +84 -0
- package/templates/hustle-dev-dashboard/page.tsx +365 -0
- package/templates/playwright-report/page.tsx +258 -0
- package/templates/settings.json +88 -7
- package/templates/test-results/page.tsx +237 -0
- package/templates/typedoc.json +19 -0
- package/templates/ui-showcase/_components/UIShowcase.tsx +1 -1
- package/templates/ui-showcase/page.tsx +1 -1
- package/.claude/api-dev-state.json +0 -466
|
@@ -0,0 +1,189 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Orchestrator handoff hook.
|
|
4
|
+
|
|
5
|
+
When a Skill is invoked, this hook checks if we're in an orchestrated build
|
|
6
|
+
and injects shared_decisions into the sub-workflow's state.
|
|
7
|
+
|
|
8
|
+
Hook Type: PreToolUse (matcher: Skill)
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import json
|
|
12
|
+
import os
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from datetime import datetime
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def load_build_state():
|
|
18
|
+
"""Load hustle-build orchestration state"""
|
|
19
|
+
project_dir = os.environ.get("CLAUDE_PROJECT_DIR", ".")
|
|
20
|
+
state_file = Path(project_dir) / ".claude" / "hustle-build-state.json"
|
|
21
|
+
|
|
22
|
+
if state_file.exists():
|
|
23
|
+
try:
|
|
24
|
+
return json.loads(state_file.read_text())
|
|
25
|
+
except Exception:
|
|
26
|
+
pass
|
|
27
|
+
return None
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def load_api_state():
|
|
31
|
+
"""Load api-dev state"""
|
|
32
|
+
project_dir = os.environ.get("CLAUDE_PROJECT_DIR", ".")
|
|
33
|
+
state_file = Path(project_dir) / ".claude" / "api-dev-state.json"
|
|
34
|
+
|
|
35
|
+
if state_file.exists():
|
|
36
|
+
try:
|
|
37
|
+
return json.loads(state_file.read_text())
|
|
38
|
+
except Exception:
|
|
39
|
+
pass
|
|
40
|
+
return {}
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def save_api_state(state):
|
|
44
|
+
"""Save api-dev state with shared decisions"""
|
|
45
|
+
project_dir = os.environ.get("CLAUDE_PROJECT_DIR", ".")
|
|
46
|
+
state_file = Path(project_dir) / ".claude" / "api-dev-state.json"
|
|
47
|
+
|
|
48
|
+
try:
|
|
49
|
+
state_file.write_text(json.dumps(state, indent=2))
|
|
50
|
+
return True
|
|
51
|
+
except Exception:
|
|
52
|
+
return False
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def get_skill_name(tool_input):
|
|
56
|
+
"""Extract skill name from tool input"""
|
|
57
|
+
try:
|
|
58
|
+
data = json.loads(tool_input)
|
|
59
|
+
return data.get("skill", "")
|
|
60
|
+
except Exception:
|
|
61
|
+
return ""
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def main():
|
|
65
|
+
tool_input = os.environ.get("CLAUDE_TOOL_INPUT", "{}")
|
|
66
|
+
|
|
67
|
+
# Get skill being invoked
|
|
68
|
+
skill_name = get_skill_name(tool_input)
|
|
69
|
+
|
|
70
|
+
# Check if this is a workflow skill
|
|
71
|
+
workflow_skills = [
|
|
72
|
+
"api-create", "hustle-ui-create", "hustle-ui-create-page",
|
|
73
|
+
"hustle-combine", "red", "green", "refactor", "cycle"
|
|
74
|
+
]
|
|
75
|
+
|
|
76
|
+
if skill_name not in workflow_skills:
|
|
77
|
+
print(json.dumps({"continue": True}))
|
|
78
|
+
return
|
|
79
|
+
|
|
80
|
+
# Check if we're in an orchestrated build
|
|
81
|
+
build_state = load_build_state()
|
|
82
|
+
|
|
83
|
+
if not build_state or build_state.get("status") != "in_progress":
|
|
84
|
+
print(json.dumps({"continue": True}))
|
|
85
|
+
return
|
|
86
|
+
|
|
87
|
+
# Get shared decisions
|
|
88
|
+
shared_decisions = build_state.get("shared_decisions", {})
|
|
89
|
+
mode = build_state.get("mode", "interactive")
|
|
90
|
+
|
|
91
|
+
if not shared_decisions and mode != "auto":
|
|
92
|
+
print(json.dumps({"continue": True}))
|
|
93
|
+
return
|
|
94
|
+
|
|
95
|
+
# Load current api-dev state
|
|
96
|
+
api_state = load_api_state()
|
|
97
|
+
|
|
98
|
+
# Inject shared decisions
|
|
99
|
+
api_state["orchestrated"] = True
|
|
100
|
+
api_state["build_id"] = build_state.get("build_id")
|
|
101
|
+
api_state["mode"] = mode
|
|
102
|
+
|
|
103
|
+
# Pre-fill interview decisions from shared decisions
|
|
104
|
+
if "phases" not in api_state:
|
|
105
|
+
api_state["phases"] = {}
|
|
106
|
+
|
|
107
|
+
if "interview" not in api_state["phases"]:
|
|
108
|
+
api_state["phases"]["interview"] = {"status": "not_started", "decisions": {}}
|
|
109
|
+
|
|
110
|
+
# Map shared decisions to interview decisions
|
|
111
|
+
decision_mappings = {
|
|
112
|
+
"auth_required": "authentication",
|
|
113
|
+
"error_handling": "error_strategy",
|
|
114
|
+
"brand_guide": "use_brand_guide",
|
|
115
|
+
"testing_level": "testing_thoroughness",
|
|
116
|
+
"caching_strategy": "caching"
|
|
117
|
+
}
|
|
118
|
+
|
|
119
|
+
for shared_key, interview_key in decision_mappings.items():
|
|
120
|
+
if shared_key in shared_decisions:
|
|
121
|
+
api_state["phases"]["interview"]["decisions"][interview_key] = shared_decisions[shared_key]
|
|
122
|
+
|
|
123
|
+
# Mark which decisions are from orchestrator (so sub-workflow knows not to re-ask)
|
|
124
|
+
api_state["shared_decisions_applied"] = list(shared_decisions.keys())
|
|
125
|
+
|
|
126
|
+
# Save updated state
|
|
127
|
+
save_api_state(api_state)
|
|
128
|
+
|
|
129
|
+
# Update build state with current active workflow
|
|
130
|
+
decomposition = build_state.get("decomposition", {})
|
|
131
|
+
|
|
132
|
+
# Find the workflow being started
|
|
133
|
+
for wf_type in ["apis", "components", "combined_apis", "pages"]:
|
|
134
|
+
workflows = decomposition.get(wf_type, [])
|
|
135
|
+
for wf in workflows:
|
|
136
|
+
if wf.get("status") == "pending":
|
|
137
|
+
# This might be the one being started
|
|
138
|
+
# We'll rely on the skill to update status
|
|
139
|
+
break
|
|
140
|
+
|
|
141
|
+
# Log the handoff
|
|
142
|
+
project_dir = os.environ.get("CLAUDE_PROJECT_DIR", ".")
|
|
143
|
+
logs_dir = Path(project_dir) / ".claude" / "workflow-logs"
|
|
144
|
+
logs_dir.mkdir(parents=True, exist_ok=True)
|
|
145
|
+
|
|
146
|
+
log_file = logs_dir / f"{build_state.get('build_id', 'unknown')}.json"
|
|
147
|
+
|
|
148
|
+
try:
|
|
149
|
+
if log_file.exists():
|
|
150
|
+
log = json.loads(log_file.read_text())
|
|
151
|
+
else:
|
|
152
|
+
log = {"handoffs": []}
|
|
153
|
+
|
|
154
|
+
log["handoffs"].append({
|
|
155
|
+
"timestamp": datetime.now().isoformat(),
|
|
156
|
+
"skill": skill_name,
|
|
157
|
+
"shared_decisions_applied": list(shared_decisions.keys()),
|
|
158
|
+
"mode": mode
|
|
159
|
+
})
|
|
160
|
+
|
|
161
|
+
log_file.write_text(json.dumps(log, indent=2))
|
|
162
|
+
except Exception:
|
|
163
|
+
pass
|
|
164
|
+
|
|
165
|
+
# Inject context about orchestration
|
|
166
|
+
context = f"""
|
|
167
|
+
## Orchestrated Workflow
|
|
168
|
+
|
|
169
|
+
This workflow is part of a larger build: **{build_state.get('build_id')}**
|
|
170
|
+
|
|
171
|
+
### Pre-Filled Decisions (from orchestrator):
|
|
172
|
+
{json.dumps(shared_decisions, indent=2)}
|
|
173
|
+
|
|
174
|
+
These decisions are already applied. **Do not re-ask** questions about:
|
|
175
|
+
{', '.join(shared_decisions.keys())}
|
|
176
|
+
|
|
177
|
+
Only ask workflow-specific questions not covered above.
|
|
178
|
+
"""
|
|
179
|
+
|
|
180
|
+
result = {
|
|
181
|
+
"continue": True,
|
|
182
|
+
"additionalContext": context
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
print(json.dumps(result))
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
if __name__ == "__main__":
|
|
189
|
+
main()
|
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Orchestrator session startup hook.
|
|
4
|
+
|
|
5
|
+
Injects hustle-build orchestration state into context at session start.
|
|
6
|
+
This ensures Claude has awareness of multi-workflow builds in progress.
|
|
7
|
+
|
|
8
|
+
Hook Type: SessionStart
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import json
|
|
12
|
+
import os
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from datetime import datetime
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def load_build_state():
|
|
18
|
+
"""Load hustle-build orchestration state if exists"""
|
|
19
|
+
project_dir = os.environ.get("CLAUDE_PROJECT_DIR", ".")
|
|
20
|
+
state_file = Path(project_dir) / ".claude" / "hustle-build-state.json"
|
|
21
|
+
|
|
22
|
+
if state_file.exists():
|
|
23
|
+
try:
|
|
24
|
+
return json.loads(state_file.read_text())
|
|
25
|
+
except Exception:
|
|
26
|
+
pass
|
|
27
|
+
return None
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def format_workflow_status(workflows):
|
|
31
|
+
"""Format workflow list for context injection"""
|
|
32
|
+
if not workflows:
|
|
33
|
+
return "No sub-workflows defined yet."
|
|
34
|
+
|
|
35
|
+
lines = []
|
|
36
|
+
for wf in workflows:
|
|
37
|
+
status_emoji = {
|
|
38
|
+
"complete": "✅",
|
|
39
|
+
"in_progress": "🔄",
|
|
40
|
+
"pending": "⏳",
|
|
41
|
+
"failed": "❌"
|
|
42
|
+
}.get(wf.get("status", "pending"), "⏳")
|
|
43
|
+
|
|
44
|
+
wf_type = wf.get("type", "unknown")
|
|
45
|
+
name = wf.get("name", "unnamed")
|
|
46
|
+
deps = wf.get("depends_on", [])
|
|
47
|
+
|
|
48
|
+
line = f" {status_emoji} [{wf_type}] {name}"
|
|
49
|
+
if deps:
|
|
50
|
+
line += f" (depends on: {', '.join(deps)})"
|
|
51
|
+
lines.append(line)
|
|
52
|
+
|
|
53
|
+
return "\n".join(lines)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def format_shared_decisions(decisions):
|
|
57
|
+
"""Format shared decisions for context injection"""
|
|
58
|
+
if not decisions:
|
|
59
|
+
return "No shared decisions configured."
|
|
60
|
+
|
|
61
|
+
lines = []
|
|
62
|
+
for key, value in decisions.items():
|
|
63
|
+
lines.append(f" - {key}: {value}")
|
|
64
|
+
|
|
65
|
+
return "\n".join(lines)
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def main():
|
|
69
|
+
state = load_build_state()
|
|
70
|
+
|
|
71
|
+
if not state:
|
|
72
|
+
# No active build, continue normally
|
|
73
|
+
print(json.dumps({"continue": True}))
|
|
74
|
+
return
|
|
75
|
+
|
|
76
|
+
# Check if build is in progress
|
|
77
|
+
status = state.get("status", "unknown")
|
|
78
|
+
|
|
79
|
+
if status not in ["in_progress", "paused"]:
|
|
80
|
+
print(json.dumps({"continue": True}))
|
|
81
|
+
return
|
|
82
|
+
|
|
83
|
+
# Build context for injection
|
|
84
|
+
build_id = state.get("build_id", "unknown")
|
|
85
|
+
mode = state.get("mode", "interactive")
|
|
86
|
+
request = state.get("request", {}).get("original", "Unknown request")
|
|
87
|
+
|
|
88
|
+
# Get workflow statuses
|
|
89
|
+
decomposition = state.get("decomposition", {})
|
|
90
|
+
all_workflows = []
|
|
91
|
+
|
|
92
|
+
for wf_type in ["apis", "components", "combined_apis", "pages"]:
|
|
93
|
+
workflows = decomposition.get(wf_type, [])
|
|
94
|
+
for wf in workflows:
|
|
95
|
+
wf["type"] = wf_type.rstrip("s")
|
|
96
|
+
all_workflows.append(wf)
|
|
97
|
+
|
|
98
|
+
# Count progress
|
|
99
|
+
completed = len([w for w in all_workflows if w.get("status") == "complete"])
|
|
100
|
+
total = len(all_workflows)
|
|
101
|
+
in_progress = [w for w in all_workflows if w.get("status") == "in_progress"]
|
|
102
|
+
|
|
103
|
+
# Get active sub-workflow
|
|
104
|
+
active = state.get("active_sub_workflow", {})
|
|
105
|
+
active_name = active.get("name", "None")
|
|
106
|
+
active_type = active.get("type", "unknown")
|
|
107
|
+
|
|
108
|
+
# Format shared decisions
|
|
109
|
+
shared_decisions = state.get("shared_decisions", {})
|
|
110
|
+
|
|
111
|
+
context = f"""
|
|
112
|
+
## Hustle Build In Progress
|
|
113
|
+
|
|
114
|
+
**Build ID:** {build_id}
|
|
115
|
+
**Mode:** {mode}
|
|
116
|
+
**Original Request:** "{request}"
|
|
117
|
+
|
|
118
|
+
### Progress: {completed}/{total} workflows complete
|
|
119
|
+
|
|
120
|
+
**Currently Active:** [{active_type}] {active_name}
|
|
121
|
+
|
|
122
|
+
### Sub-Workflows:
|
|
123
|
+
{format_workflow_status(all_workflows)}
|
|
124
|
+
|
|
125
|
+
### Shared Decisions (applied to all):
|
|
126
|
+
{format_shared_decisions(shared_decisions)}
|
|
127
|
+
|
|
128
|
+
---
|
|
129
|
+
|
|
130
|
+
**Commands:**
|
|
131
|
+
- Continue current workflow
|
|
132
|
+
- `/hustle-build-review {build_id}` - View build log
|
|
133
|
+
- Set `mode: "paused"` in state to pause
|
|
134
|
+
|
|
135
|
+
"""
|
|
136
|
+
|
|
137
|
+
result = {
|
|
138
|
+
"continue": True,
|
|
139
|
+
"additionalContext": context
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
print(json.dumps(result))
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
if __name__ == "__main__":
|
|
146
|
+
main()
|
|
@@ -3,14 +3,22 @@
|
|
|
3
3
|
Hook: PostToolUse (for periodic re-grounding)
|
|
4
4
|
Purpose: Inject context reminders every N turns to prevent context dilution
|
|
5
5
|
|
|
6
|
-
This hook tracks turn count and periodically injects a summary of:
|
|
6
|
+
This hook tracks turn count and periodically injects a comprehensive summary of:
|
|
7
7
|
- Current endpoint and phase
|
|
8
8
|
- Key decisions from interview
|
|
9
|
+
- Existing registry elements (APIs, components, pages)
|
|
10
|
+
- Deferred features (don't re-suggest)
|
|
11
|
+
- Last test status
|
|
12
|
+
- Brand guide status
|
|
9
13
|
- Research cache status
|
|
10
|
-
-
|
|
14
|
+
- Orchestrator context (if in /hustle-build)
|
|
11
15
|
|
|
12
16
|
The goal is to keep Claude grounded during long sessions where
|
|
13
|
-
the original CLAUDE.md context may get diluted.
|
|
17
|
+
the original CLAUDE.md context may get diluted ("lost in the middle").
|
|
18
|
+
|
|
19
|
+
Based on best practices from:
|
|
20
|
+
- Manus: "Manipulate Attention Through Recitation"
|
|
21
|
+
- Sankalp: "Context as limited attention budget"
|
|
14
22
|
|
|
15
23
|
Configuration:
|
|
16
24
|
- REGROUND_INTERVAL: Number of turns between re-grounding (default: 7)
|
|
@@ -27,8 +35,221 @@ from pathlib import Path
|
|
|
27
35
|
# Configuration
|
|
28
36
|
REGROUND_INTERVAL = 7 # Re-ground every N turns
|
|
29
37
|
|
|
30
|
-
# State
|
|
31
|
-
|
|
38
|
+
# State files (in .claude/ directory)
|
|
39
|
+
PROJECT_DIR = Path(os.environ.get("CLAUDE_PROJECT_DIR", "."))
|
|
40
|
+
STATE_FILE = PROJECT_DIR / ".claude" / "api-dev-state.json"
|
|
41
|
+
REGISTRY_FILE = PROJECT_DIR / ".claude" / "registry.json"
|
|
42
|
+
BUILD_STATE_FILE = PROJECT_DIR / ".claude" / "hustle-build-state.json"
|
|
43
|
+
BRAND_GUIDE_FILE = PROJECT_DIR / ".claude" / "BRAND_GUIDE.md"
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def load_json_file(filepath):
|
|
47
|
+
"""Safely load a JSON file"""
|
|
48
|
+
if filepath.exists():
|
|
49
|
+
try:
|
|
50
|
+
return json.loads(filepath.read_text())
|
|
51
|
+
except (json.JSONDecodeError, Exception):
|
|
52
|
+
pass
|
|
53
|
+
return None
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def format_list(items, max_items=5, max_chars=80):
|
|
57
|
+
"""Format a list of items with truncation"""
|
|
58
|
+
if not items:
|
|
59
|
+
return "None"
|
|
60
|
+
truncated = list(items)[:max_items]
|
|
61
|
+
result = ", ".join(str(item)[:20] for item in truncated)
|
|
62
|
+
if len(items) > max_items:
|
|
63
|
+
result += f" (+{len(items) - max_items} more)"
|
|
64
|
+
return result[:max_chars]
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def get_registry_summary(registry):
|
|
68
|
+
"""Get summary of existing registry elements"""
|
|
69
|
+
if not registry:
|
|
70
|
+
return None
|
|
71
|
+
|
|
72
|
+
summary = {}
|
|
73
|
+
for category in ["apis", "components", "pages", "combined"]:
|
|
74
|
+
items = registry.get(category, {})
|
|
75
|
+
if items:
|
|
76
|
+
summary[category] = list(items.keys())
|
|
77
|
+
return summary if summary else None
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def get_test_status(state):
|
|
81
|
+
"""Get last test run status"""
|
|
82
|
+
test_run = state.get("last_test_run", {})
|
|
83
|
+
if not test_run:
|
|
84
|
+
return None
|
|
85
|
+
|
|
86
|
+
passed = test_run.get("passed", 0)
|
|
87
|
+
failed = test_run.get("failed", 0)
|
|
88
|
+
timestamp = test_run.get("timestamp", "")
|
|
89
|
+
|
|
90
|
+
if passed or failed:
|
|
91
|
+
return {
|
|
92
|
+
"passed": passed,
|
|
93
|
+
"failed": failed,
|
|
94
|
+
"total": passed + failed,
|
|
95
|
+
"status": "GREEN" if failed == 0 else "RED",
|
|
96
|
+
"timestamp": timestamp
|
|
97
|
+
}
|
|
98
|
+
return None
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def get_brand_guide_status():
|
|
102
|
+
"""Check if brand guide exists and get key info"""
|
|
103
|
+
if not BRAND_GUIDE_FILE.exists():
|
|
104
|
+
return None
|
|
105
|
+
|
|
106
|
+
try:
|
|
107
|
+
content = BRAND_GUIDE_FILE.read_text()
|
|
108
|
+
# Extract key colors if present
|
|
109
|
+
colors = []
|
|
110
|
+
for line in content.split("\n"):
|
|
111
|
+
if "primary" in line.lower() and "#" in line:
|
|
112
|
+
colors.append("primary found")
|
|
113
|
+
break
|
|
114
|
+
return {"exists": True, "has_colors": len(colors) > 0}
|
|
115
|
+
except Exception:
|
|
116
|
+
return {"exists": True}
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def get_orchestrator_status(build_state):
|
|
120
|
+
"""Get orchestrator build status if active"""
|
|
121
|
+
if not build_state:
|
|
122
|
+
return None
|
|
123
|
+
|
|
124
|
+
status = build_state.get("status")
|
|
125
|
+
if status not in ["in_progress", "paused"]:
|
|
126
|
+
return None
|
|
127
|
+
|
|
128
|
+
build_id = build_state.get("build_id", "unknown")
|
|
129
|
+
decomposition = build_state.get("decomposition", {})
|
|
130
|
+
|
|
131
|
+
total = 0
|
|
132
|
+
completed = 0
|
|
133
|
+
for wf_type in ["apis", "components", "combined_apis", "pages"]:
|
|
134
|
+
workflows = decomposition.get(wf_type, [])
|
|
135
|
+
total += len(workflows)
|
|
136
|
+
completed += len([w for w in workflows if w.get("status") == "complete"])
|
|
137
|
+
|
|
138
|
+
active = build_state.get("active_sub_workflow", {})
|
|
139
|
+
|
|
140
|
+
return {
|
|
141
|
+
"build_id": build_id,
|
|
142
|
+
"progress": f"{completed}/{total}",
|
|
143
|
+
"active_type": active.get("type", "none"),
|
|
144
|
+
"active_name": active.get("name", "none")
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def build_reground_context(state, turn_count):
|
|
149
|
+
"""Build comprehensive re-grounding context"""
|
|
150
|
+
parts = []
|
|
151
|
+
parts.append(f"## Re-Grounding Reminder (Turn {turn_count})")
|
|
152
|
+
parts.append("")
|
|
153
|
+
|
|
154
|
+
# === Current Workflow ===
|
|
155
|
+
endpoint = state.get("endpoint", "unknown")
|
|
156
|
+
parts.append(f"**Active Endpoint:** `{endpoint}`")
|
|
157
|
+
|
|
158
|
+
# Get current phase
|
|
159
|
+
phases = state.get("phases", {})
|
|
160
|
+
phase_order = [
|
|
161
|
+
"disambiguation", "scope", "research_initial", "interview",
|
|
162
|
+
"research_deep", "schema_creation", "environment_check",
|
|
163
|
+
"tdd_red", "tdd_green", "verify", "code_review", "tdd_refactor",
|
|
164
|
+
"documentation", "completion"
|
|
165
|
+
]
|
|
166
|
+
|
|
167
|
+
current_phase = None
|
|
168
|
+
completed_phases = []
|
|
169
|
+
for phase_name in phase_order:
|
|
170
|
+
phase = phases.get(phase_name, {})
|
|
171
|
+
status = phase.get("status", "not_started")
|
|
172
|
+
if status == "complete":
|
|
173
|
+
completed_phases.append(phase_name)
|
|
174
|
+
elif status == "in_progress" and not current_phase:
|
|
175
|
+
current_phase = phase_name
|
|
176
|
+
|
|
177
|
+
if not current_phase:
|
|
178
|
+
for phase_name in phase_order:
|
|
179
|
+
phase = phases.get(phase_name, {})
|
|
180
|
+
if phase.get("status", "not_started") == "not_started":
|
|
181
|
+
current_phase = phase_name
|
|
182
|
+
break
|
|
183
|
+
|
|
184
|
+
parts.append(f"**Current Phase:** {current_phase or 'completion'}")
|
|
185
|
+
parts.append(f"**Completed:** {len(completed_phases)}/{len(phase_order)} phases")
|
|
186
|
+
|
|
187
|
+
# === Key Decisions ===
|
|
188
|
+
interview = phases.get("interview", {})
|
|
189
|
+
decisions = interview.get("decisions", {})
|
|
190
|
+
if decisions:
|
|
191
|
+
parts.append("")
|
|
192
|
+
parts.append("**Key Decisions:**")
|
|
193
|
+
for key, value in list(decisions.items())[:5]:
|
|
194
|
+
response = value.get("value", value.get("response", "N/A"))
|
|
195
|
+
if response:
|
|
196
|
+
parts.append(f" - {key}: {str(response)[:40]}")
|
|
197
|
+
|
|
198
|
+
# === Registry Summary ===
|
|
199
|
+
registry = load_json_file(REGISTRY_FILE)
|
|
200
|
+
registry_summary = get_registry_summary(registry)
|
|
201
|
+
if registry_summary:
|
|
202
|
+
parts.append("")
|
|
203
|
+
parts.append("**Existing Elements (don't recreate):**")
|
|
204
|
+
if registry_summary.get("apis"):
|
|
205
|
+
parts.append(f" - APIs: {format_list(registry_summary['apis'])}")
|
|
206
|
+
if registry_summary.get("components"):
|
|
207
|
+
parts.append(f" - Components: {format_list(registry_summary['components'])}")
|
|
208
|
+
if registry_summary.get("pages"):
|
|
209
|
+
parts.append(f" - Pages: {format_list(registry_summary['pages'])}")
|
|
210
|
+
|
|
211
|
+
# === Deferred Features ===
|
|
212
|
+
deferred = state.get("deferred_features", [])
|
|
213
|
+
if deferred:
|
|
214
|
+
parts.append("")
|
|
215
|
+
parts.append(f"**Deferred (don't re-suggest):** {format_list(deferred, max_items=3)}")
|
|
216
|
+
|
|
217
|
+
# === Test Status ===
|
|
218
|
+
test_status = get_test_status(state)
|
|
219
|
+
if test_status:
|
|
220
|
+
parts.append("")
|
|
221
|
+
status_emoji = "GREEN" if test_status["status"] == "GREEN" else "RED"
|
|
222
|
+
parts.append(f"**Last Tests:** {status_emoji} ({test_status['passed']} passed, {test_status['failed']} failed)")
|
|
223
|
+
|
|
224
|
+
# === Brand Guide ===
|
|
225
|
+
brand_status = get_brand_guide_status()
|
|
226
|
+
if brand_status and brand_status.get("exists"):
|
|
227
|
+
parts.append("")
|
|
228
|
+
parts.append("**Brand Guide:** Active - use `.claude/BRAND_GUIDE.md` for styling")
|
|
229
|
+
|
|
230
|
+
# === Research Freshness ===
|
|
231
|
+
research_index = state.get("research_index", {})
|
|
232
|
+
if endpoint in research_index:
|
|
233
|
+
entry = research_index[endpoint]
|
|
234
|
+
days_old = entry.get("days_old", 0)
|
|
235
|
+
if days_old > 7:
|
|
236
|
+
parts.append("")
|
|
237
|
+
parts.append(f"**WARNING:** Research is {days_old} days old. Consider `/api-research`.")
|
|
238
|
+
|
|
239
|
+
# === Orchestrator Context ===
|
|
240
|
+
build_state = load_json_file(BUILD_STATE_FILE)
|
|
241
|
+
orchestrator = get_orchestrator_status(build_state)
|
|
242
|
+
if orchestrator:
|
|
243
|
+
parts.append("")
|
|
244
|
+
parts.append(f"**Orchestrated Build:** {orchestrator['build_id']}")
|
|
245
|
+
parts.append(f" - Progress: {orchestrator['progress']} workflows")
|
|
246
|
+
parts.append(f" - Active: [{orchestrator['active_type']}] {orchestrator['active_name']}")
|
|
247
|
+
|
|
248
|
+
# === Quick Reminders ===
|
|
249
|
+
parts.append("")
|
|
250
|
+
parts.append("**Remember:** Research-first | Questions FROM findings | Verify after green")
|
|
251
|
+
|
|
252
|
+
return "\n".join(parts)
|
|
32
253
|
|
|
33
254
|
|
|
34
255
|
def main():
|
|
@@ -59,73 +280,15 @@ def main():
|
|
|
59
280
|
should_reground = turn_count % REGROUND_INTERVAL == 0
|
|
60
281
|
|
|
61
282
|
if should_reground and state.get("endpoint"):
|
|
62
|
-
# Build re-grounding context
|
|
63
|
-
|
|
64
|
-
context_parts.append(f"## Re-Grounding Reminder (Turn {turn_count})")
|
|
65
|
-
context_parts.append("")
|
|
66
|
-
|
|
67
|
-
endpoint = state.get("endpoint", "unknown")
|
|
68
|
-
context_parts.append(f"**Active Endpoint:** {endpoint}")
|
|
69
|
-
|
|
70
|
-
# Get current phase
|
|
71
|
-
phases = state.get("phases", {})
|
|
72
|
-
phase_order = [
|
|
73
|
-
"disambiguation", "scope", "research_initial", "interview",
|
|
74
|
-
"research_deep", "schema_creation", "environment_check",
|
|
75
|
-
"tdd_red", "tdd_green", "verify", "tdd_refactor", "documentation"
|
|
76
|
-
]
|
|
77
|
-
|
|
78
|
-
current_phase = None
|
|
79
|
-
completed_phases = []
|
|
80
|
-
for phase_name in phase_order:
|
|
81
|
-
phase = phases.get(phase_name, {})
|
|
82
|
-
status = phase.get("status", "not_started")
|
|
83
|
-
if status == "complete":
|
|
84
|
-
completed_phases.append(phase_name)
|
|
85
|
-
elif status == "in_progress" and not current_phase:
|
|
86
|
-
current_phase = phase_name
|
|
87
|
-
|
|
88
|
-
if not current_phase:
|
|
89
|
-
# Find first not_started phase
|
|
90
|
-
for phase_name in phase_order:
|
|
91
|
-
phase = phases.get(phase_name, {})
|
|
92
|
-
if phase.get("status", "not_started") == "not_started":
|
|
93
|
-
current_phase = phase_name
|
|
94
|
-
break
|
|
95
|
-
|
|
96
|
-
context_parts.append(f"**Current Phase:** {current_phase or 'documentation'}")
|
|
97
|
-
context_parts.append(f"**Completed:** {', '.join(completed_phases) if completed_phases else 'None'}")
|
|
98
|
-
|
|
99
|
-
# Key decisions summary
|
|
100
|
-
interview = phases.get("interview", {})
|
|
101
|
-
decisions = interview.get("decisions", {})
|
|
102
|
-
if decisions:
|
|
103
|
-
context_parts.append("")
|
|
104
|
-
context_parts.append("**Key Decisions:**")
|
|
105
|
-
for key, value in list(decisions.items())[:5]: # Limit to 5 key decisions
|
|
106
|
-
response = value.get("value", value.get("response", "N/A"))
|
|
107
|
-
if response:
|
|
108
|
-
context_parts.append(f" - {key}: {str(response)[:50]}")
|
|
109
|
-
|
|
110
|
-
# Research freshness warning
|
|
111
|
-
research_index = state.get("research_index", {})
|
|
112
|
-
if endpoint in research_index:
|
|
113
|
-
entry = research_index[endpoint]
|
|
114
|
-
days_old = entry.get("days_old", 0)
|
|
115
|
-
if days_old > 7:
|
|
116
|
-
context_parts.append("")
|
|
117
|
-
context_parts.append(f"**WARNING:** Research is {days_old} days old. Consider re-researching.")
|
|
118
|
-
|
|
119
|
-
# File reminders
|
|
120
|
-
context_parts.append("")
|
|
121
|
-
context_parts.append("**Key Files:** .claude/api-dev-state.json, .claude/research/")
|
|
283
|
+
# Build comprehensive re-grounding context
|
|
284
|
+
context = build_reground_context(state, turn_count)
|
|
122
285
|
|
|
123
286
|
# Add to reground history
|
|
124
287
|
reground_history = state.setdefault("reground_history", [])
|
|
125
288
|
reground_history.append({
|
|
126
289
|
"turn": turn_count,
|
|
127
290
|
"timestamp": datetime.now().isoformat(),
|
|
128
|
-
"phase": current_phase
|
|
291
|
+
"phase": state.get("phases", {}).get("current_phase", "unknown")
|
|
129
292
|
})
|
|
130
293
|
# Keep only last 10 reground events
|
|
131
294
|
state["reground_history"] = reground_history[-10:]
|
|
@@ -138,7 +301,7 @@ def main():
|
|
|
138
301
|
"continue": True,
|
|
139
302
|
"hookSpecificOutput": {
|
|
140
303
|
"hookEventName": "PostToolUse",
|
|
141
|
-
"additionalContext":
|
|
304
|
+
"additionalContext": context
|
|
142
305
|
}
|
|
143
306
|
}
|
|
144
307
|
print(json.dumps(output))
|