@hustle-together/api-dev-tools 3.6.5 → 3.10.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +5599 -258
- package/bin/cli.js +395 -20
- package/commands/README.md +459 -71
- package/commands/hustle-api-continue.md +158 -0
- package/commands/{api-create.md → hustle-api-create.md} +35 -15
- package/commands/{api-env.md → hustle-api-env.md} +4 -4
- package/commands/{api-interview.md → hustle-api-interview.md} +1 -1
- package/commands/{api-research.md → hustle-api-research.md} +3 -3
- package/commands/hustle-api-sessions.md +149 -0
- package/commands/{api-status.md → hustle-api-status.md} +16 -16
- package/commands/{api-verify.md → hustle-api-verify.md} +2 -2
- package/commands/hustle-combine.md +763 -0
- package/commands/hustle-ui-create-page.md +933 -0
- package/commands/hustle-ui-create.md +825 -0
- package/hooks/api-workflow-check.py +545 -21
- package/hooks/cache-research.py +337 -0
- package/hooks/check-api-routes.py +168 -0
- package/hooks/check-playwright-setup.py +103 -0
- package/hooks/check-storybook-setup.py +81 -0
- package/hooks/detect-interruption.py +165 -0
- package/hooks/enforce-a11y-audit.py +202 -0
- package/hooks/enforce-brand-guide.py +241 -0
- package/hooks/enforce-documentation.py +60 -8
- package/hooks/enforce-freshness.py +184 -0
- package/hooks/enforce-page-components.py +186 -0
- package/hooks/enforce-page-data-schema.py +155 -0
- package/hooks/enforce-questions-sourced.py +146 -0
- package/hooks/enforce-schema-from-interview.py +248 -0
- package/hooks/enforce-ui-disambiguation.py +108 -0
- package/hooks/enforce-ui-interview.py +130 -0
- package/hooks/generate-manifest-entry.py +1161 -0
- package/hooks/session-logger.py +297 -0
- package/hooks/session-startup.py +160 -15
- package/hooks/track-scope-coverage.py +220 -0
- package/hooks/track-tool-use.py +81 -1
- package/hooks/update-api-showcase.py +149 -0
- package/hooks/update-registry.py +352 -0
- package/hooks/update-ui-showcase.py +212 -0
- package/package.json +8 -3
- package/templates/BRAND_GUIDE.md +299 -0
- package/templates/CLAUDE-SECTION.md +56 -24
- package/templates/SPEC.json +640 -0
- package/templates/api-dev-state.json +217 -161
- package/templates/api-showcase/_components/APICard.tsx +153 -0
- package/templates/api-showcase/_components/APIModal.tsx +375 -0
- package/templates/api-showcase/_components/APIShowcase.tsx +231 -0
- package/templates/api-showcase/_components/APITester.tsx +522 -0
- package/templates/api-showcase/page.tsx +41 -0
- package/templates/component/Component.stories.tsx +172 -0
- package/templates/component/Component.test.tsx +237 -0
- package/templates/component/Component.tsx +86 -0
- package/templates/component/Component.types.ts +55 -0
- package/templates/component/index.ts +15 -0
- package/templates/dev-tools/_components/DevToolsLanding.tsx +320 -0
- package/templates/dev-tools/page.tsx +10 -0
- package/templates/page/page.e2e.test.ts +218 -0
- package/templates/page/page.tsx +42 -0
- package/templates/performance-budgets.json +58 -0
- package/templates/registry.json +13 -0
- package/templates/settings.json +90 -0
- package/templates/shared/HeroHeader.tsx +261 -0
- package/templates/shared/index.ts +1 -0
- package/templates/ui-showcase/_components/PreviewCard.tsx +315 -0
- package/templates/ui-showcase/_components/PreviewModal.tsx +676 -0
- package/templates/ui-showcase/_components/UIShowcase.tsx +262 -0
- package/templates/ui-showcase/page.tsx +26 -0
- package/demo/hustle-together/blog/gemini-vs-claude-widgets.html +0 -959
- package/demo/hustle-together/blog/interview-driven-api-development.html +0 -1146
- package/demo/hustle-together/blog/tdd-for-ai.html +0 -982
- package/demo/hustle-together/index.html +0 -1312
- package/demo/workflow-demo-v3.5-backup.html +0 -5008
- package/demo/workflow-demo.html +0 -6202
|
@@ -0,0 +1,297 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Hook: Stop
|
|
4
|
+
Purpose: Save session to .claude/api-sessions/ for later review
|
|
5
|
+
|
|
6
|
+
This hook runs when a Claude Code session ends (Stop event).
|
|
7
|
+
It saves the session data for the completed workflow including:
|
|
8
|
+
- State snapshot at completion
|
|
9
|
+
- Files created during the workflow
|
|
10
|
+
- Summary of phases completed
|
|
11
|
+
- Research sources used
|
|
12
|
+
- Interview decisions made
|
|
13
|
+
|
|
14
|
+
Added in v3.6.7 for session logging support.
|
|
15
|
+
|
|
16
|
+
Returns:
|
|
17
|
+
- JSON with session save info
|
|
18
|
+
"""
|
|
19
|
+
import json
|
|
20
|
+
import sys
|
|
21
|
+
import os
|
|
22
|
+
from datetime import datetime
|
|
23
|
+
from pathlib import Path
|
|
24
|
+
import shutil
|
|
25
|
+
|
|
26
|
+
STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
27
|
+
SESSIONS_DIR = Path(__file__).parent.parent / "api-sessions"
|
|
28
|
+
RESEARCH_DIR = Path(__file__).parent.parent / "research"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def get_active_endpoint(state):
|
|
32
|
+
"""Get active endpoint - supports both old and new state formats."""
|
|
33
|
+
# New format (v3.6.7+): endpoints object with active_endpoint pointer
|
|
34
|
+
if "endpoints" in state and "active_endpoint" in state:
|
|
35
|
+
active = state.get("active_endpoint")
|
|
36
|
+
if active and active in state["endpoints"]:
|
|
37
|
+
return active, state["endpoints"][active]
|
|
38
|
+
return None, None
|
|
39
|
+
|
|
40
|
+
# Old format: single endpoint field
|
|
41
|
+
endpoint = state.get("endpoint")
|
|
42
|
+
if endpoint:
|
|
43
|
+
return endpoint, state
|
|
44
|
+
|
|
45
|
+
return None, None
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def get_completed_phases(endpoint_data):
|
|
49
|
+
"""Get list of completed phases."""
|
|
50
|
+
completed = []
|
|
51
|
+
phases = endpoint_data.get("phases", {})
|
|
52
|
+
|
|
53
|
+
phase_order = [
|
|
54
|
+
"disambiguation", "scope", "research_initial", "interview",
|
|
55
|
+
"research_deep", "schema_creation", "environment_check",
|
|
56
|
+
"tdd_red", "tdd_green", "verify", "tdd_refactor", "documentation", "completion"
|
|
57
|
+
]
|
|
58
|
+
|
|
59
|
+
for phase_name in phase_order:
|
|
60
|
+
phase = phases.get(phase_name, {})
|
|
61
|
+
if phase.get("status") == "complete":
|
|
62
|
+
completed.append(phase_name)
|
|
63
|
+
|
|
64
|
+
return completed
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def get_files_created(endpoint_data):
|
|
68
|
+
"""Get list of files created during this workflow."""
|
|
69
|
+
files = []
|
|
70
|
+
|
|
71
|
+
# From completion phase
|
|
72
|
+
completion = endpoint_data.get("phases", {}).get("completion", {})
|
|
73
|
+
files.extend(completion.get("files_created", []))
|
|
74
|
+
|
|
75
|
+
# From schema phase
|
|
76
|
+
schema = endpoint_data.get("phases", {}).get("schema_creation", {})
|
|
77
|
+
if schema.get("schema_file"):
|
|
78
|
+
files.append(schema.get("schema_file"))
|
|
79
|
+
|
|
80
|
+
# From TDD phases
|
|
81
|
+
tdd_red = endpoint_data.get("phases", {}).get("tdd_red", {})
|
|
82
|
+
if tdd_red.get("test_file"):
|
|
83
|
+
files.append(tdd_red.get("test_file"))
|
|
84
|
+
|
|
85
|
+
tdd_green = endpoint_data.get("phases", {}).get("tdd_green", {})
|
|
86
|
+
if tdd_green.get("implementation_file"):
|
|
87
|
+
files.append(tdd_green.get("implementation_file"))
|
|
88
|
+
|
|
89
|
+
return list(set(files)) # Deduplicate
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def generate_summary(endpoint, endpoint_data, state):
|
|
93
|
+
"""Generate a markdown summary of the session."""
|
|
94
|
+
completed = get_completed_phases(endpoint_data)
|
|
95
|
+
files = get_files_created(endpoint_data)
|
|
96
|
+
decisions = endpoint_data.get("phases", {}).get("interview", {}).get("decisions", {})
|
|
97
|
+
|
|
98
|
+
lines = [
|
|
99
|
+
f"# Session Summary: {endpoint}",
|
|
100
|
+
"",
|
|
101
|
+
f"*Generated: {datetime.now().isoformat()}*",
|
|
102
|
+
"",
|
|
103
|
+
"## Overview",
|
|
104
|
+
"",
|
|
105
|
+
f"- **Endpoint:** {endpoint}",
|
|
106
|
+
f"- **Library:** {endpoint_data.get('library', 'N/A')}",
|
|
107
|
+
f"- **Started:** {endpoint_data.get('started_at', 'N/A')}",
|
|
108
|
+
f"- **Completed Phases:** {len(completed)}/13",
|
|
109
|
+
f"- **Status:** {endpoint_data.get('status', 'unknown')}",
|
|
110
|
+
"",
|
|
111
|
+
"## Phases Completed",
|
|
112
|
+
""
|
|
113
|
+
]
|
|
114
|
+
|
|
115
|
+
for i, phase in enumerate(completed, 1):
|
|
116
|
+
lines.append(f"{i}. {phase.replace('_', ' ').title()}")
|
|
117
|
+
|
|
118
|
+
lines.extend([
|
|
119
|
+
"",
|
|
120
|
+
"## Files Created",
|
|
121
|
+
""
|
|
122
|
+
])
|
|
123
|
+
|
|
124
|
+
for f in files:
|
|
125
|
+
lines.append(f"- `{f}`")
|
|
126
|
+
|
|
127
|
+
if decisions:
|
|
128
|
+
lines.extend([
|
|
129
|
+
"",
|
|
130
|
+
"## Interview Decisions",
|
|
131
|
+
""
|
|
132
|
+
])
|
|
133
|
+
for key, value in decisions.items():
|
|
134
|
+
response = value.get("response", value.get("value", "N/A"))
|
|
135
|
+
lines.append(f"- **{key}:** {response}")
|
|
136
|
+
|
|
137
|
+
lines.extend([
|
|
138
|
+
"",
|
|
139
|
+
"## Research Sources",
|
|
140
|
+
""
|
|
141
|
+
])
|
|
142
|
+
|
|
143
|
+
# Check for research cache
|
|
144
|
+
research_path = RESEARCH_DIR / endpoint / "sources.json"
|
|
145
|
+
if research_path.exists():
|
|
146
|
+
try:
|
|
147
|
+
sources = json.loads(research_path.read_text())
|
|
148
|
+
for src in sources.get("sources", [])[:10]: # Limit to 10
|
|
149
|
+
url = src.get("url", src.get("query", ""))
|
|
150
|
+
if url:
|
|
151
|
+
lines.append(f"- {url}")
|
|
152
|
+
except (json.JSONDecodeError, IOError):
|
|
153
|
+
lines.append("- (sources.json not readable)")
|
|
154
|
+
else:
|
|
155
|
+
lines.append("- (no sources.json found)")
|
|
156
|
+
|
|
157
|
+
lines.extend([
|
|
158
|
+
"",
|
|
159
|
+
"---",
|
|
160
|
+
"",
|
|
161
|
+
f"*Session saved to: .claude/api-sessions/{endpoint}_{{timestamp}}/*"
|
|
162
|
+
])
|
|
163
|
+
|
|
164
|
+
return "\n".join(lines)
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
def save_session(endpoint, endpoint_data, state):
|
|
168
|
+
"""Save session to .claude/api-sessions/."""
|
|
169
|
+
# Create timestamp
|
|
170
|
+
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
|
171
|
+
session_dir = SESSIONS_DIR / f"{endpoint}_{timestamp}"
|
|
172
|
+
session_dir.mkdir(parents=True, exist_ok=True)
|
|
173
|
+
|
|
174
|
+
# 1. Save state snapshot
|
|
175
|
+
state_snapshot = {
|
|
176
|
+
"saved_at": datetime.now().isoformat(),
|
|
177
|
+
"endpoint": endpoint,
|
|
178
|
+
"endpoint_data": endpoint_data,
|
|
179
|
+
"turn_count": state.get("turn_count", 0),
|
|
180
|
+
"research_queries": state.get("research_queries", [])
|
|
181
|
+
}
|
|
182
|
+
(session_dir / "state-snapshot.json").write_text(json.dumps(state_snapshot, indent=2))
|
|
183
|
+
|
|
184
|
+
# 2. Save files list
|
|
185
|
+
files = get_files_created(endpoint_data)
|
|
186
|
+
(session_dir / "files-created.txt").write_text("\n".join(files))
|
|
187
|
+
|
|
188
|
+
# 3. Generate and save summary
|
|
189
|
+
summary = generate_summary(endpoint, endpoint_data, state)
|
|
190
|
+
(session_dir / "summary.md").write_text(summary)
|
|
191
|
+
|
|
192
|
+
# 4. Copy research cache if exists
|
|
193
|
+
research_src = RESEARCH_DIR / endpoint
|
|
194
|
+
if research_src.exists():
|
|
195
|
+
research_dst = session_dir / "research-cache"
|
|
196
|
+
research_dst.mkdir(exist_ok=True)
|
|
197
|
+
for f in research_src.iterdir():
|
|
198
|
+
if f.is_file():
|
|
199
|
+
shutil.copy2(f, research_dst / f.name)
|
|
200
|
+
|
|
201
|
+
# 5. Update sessions index
|
|
202
|
+
update_sessions_index(endpoint, timestamp, endpoint_data)
|
|
203
|
+
|
|
204
|
+
return session_dir
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
def update_sessions_index(endpoint, timestamp, endpoint_data):
|
|
208
|
+
"""Update the sessions index file."""
|
|
209
|
+
index_file = SESSIONS_DIR / "index.json"
|
|
210
|
+
|
|
211
|
+
if index_file.exists():
|
|
212
|
+
try:
|
|
213
|
+
index = json.loads(index_file.read_text())
|
|
214
|
+
except json.JSONDecodeError:
|
|
215
|
+
index = {"version": "3.6.7", "sessions": []}
|
|
216
|
+
else:
|
|
217
|
+
index = {"version": "3.6.7", "sessions": []}
|
|
218
|
+
|
|
219
|
+
# Add this session
|
|
220
|
+
completed = get_completed_phases(endpoint_data)
|
|
221
|
+
index["sessions"].append({
|
|
222
|
+
"endpoint": endpoint,
|
|
223
|
+
"timestamp": timestamp,
|
|
224
|
+
"folder": f"{endpoint}_{timestamp}",
|
|
225
|
+
"status": endpoint_data.get("status", "unknown"),
|
|
226
|
+
"phases_completed": len(completed),
|
|
227
|
+
"created_at": datetime.now().isoformat()
|
|
228
|
+
})
|
|
229
|
+
|
|
230
|
+
index_file.write_text(json.dumps(index, indent=2))
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
def main():
|
|
234
|
+
try:
|
|
235
|
+
input_data = json.load(sys.stdin)
|
|
236
|
+
except json.JSONDecodeError:
|
|
237
|
+
print(json.dumps({"continue": True}))
|
|
238
|
+
sys.exit(0)
|
|
239
|
+
|
|
240
|
+
# Check if state file exists
|
|
241
|
+
if not STATE_FILE.exists():
|
|
242
|
+
print(json.dumps({"continue": True}))
|
|
243
|
+
sys.exit(0)
|
|
244
|
+
|
|
245
|
+
try:
|
|
246
|
+
state = json.loads(STATE_FILE.read_text())
|
|
247
|
+
except json.JSONDecodeError:
|
|
248
|
+
print(json.dumps({"continue": True}))
|
|
249
|
+
sys.exit(0)
|
|
250
|
+
|
|
251
|
+
# Get active endpoint
|
|
252
|
+
endpoint, endpoint_data = get_active_endpoint(state)
|
|
253
|
+
if not endpoint or not endpoint_data:
|
|
254
|
+
print(json.dumps({"continue": True}))
|
|
255
|
+
sys.exit(0)
|
|
256
|
+
|
|
257
|
+
# Only save if there's meaningful progress
|
|
258
|
+
completed = get_completed_phases(endpoint_data)
|
|
259
|
+
if len(completed) < 2:
|
|
260
|
+
# Not enough progress to save
|
|
261
|
+
print(json.dumps({
|
|
262
|
+
"hookSpecificOutput": {
|
|
263
|
+
"sessionSaved": False,
|
|
264
|
+
"reason": "Not enough progress to save (need at least 2 completed phases)"
|
|
265
|
+
}
|
|
266
|
+
}))
|
|
267
|
+
sys.exit(0)
|
|
268
|
+
|
|
269
|
+
# Save the session
|
|
270
|
+
try:
|
|
271
|
+
session_dir = save_session(endpoint, endpoint_data, state)
|
|
272
|
+
|
|
273
|
+
output = {
|
|
274
|
+
"hookSpecificOutput": {
|
|
275
|
+
"sessionSaved": True,
|
|
276
|
+
"endpoint": endpoint,
|
|
277
|
+
"sessionDir": str(session_dir),
|
|
278
|
+
"phasesCompleted": len(completed)
|
|
279
|
+
}
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
print(json.dumps(output))
|
|
283
|
+
sys.exit(0)
|
|
284
|
+
|
|
285
|
+
except Exception as e:
|
|
286
|
+
output = {
|
|
287
|
+
"hookSpecificOutput": {
|
|
288
|
+
"sessionSaved": False,
|
|
289
|
+
"error": str(e)
|
|
290
|
+
}
|
|
291
|
+
}
|
|
292
|
+
print(json.dumps(output))
|
|
293
|
+
sys.exit(0)
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
if __name__ == "__main__":
|
|
297
|
+
main()
|
package/hooks/session-startup.py
CHANGED
|
@@ -13,6 +13,11 @@ helping to re-ground Claude on:
|
|
|
13
13
|
|
|
14
14
|
Returns:
|
|
15
15
|
- JSON with additionalContext to inject into Claude's context
|
|
16
|
+
|
|
17
|
+
Updated in v3.6.7:
|
|
18
|
+
- Support multi-API state structure (endpoints object)
|
|
19
|
+
- Read research index from .claude/research/index.json file
|
|
20
|
+
- Calculate freshness from timestamps
|
|
16
21
|
"""
|
|
17
22
|
import json
|
|
18
23
|
import sys
|
|
@@ -25,6 +30,75 @@ STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
|
25
30
|
RESEARCH_INDEX = Path(__file__).parent.parent / "research" / "index.json"
|
|
26
31
|
|
|
27
32
|
|
|
33
|
+
def get_workflow_type(state):
|
|
34
|
+
"""Detect the workflow type from state."""
|
|
35
|
+
workflow = state.get("workflow", "")
|
|
36
|
+
if workflow:
|
|
37
|
+
return workflow
|
|
38
|
+
|
|
39
|
+
# Infer from state structure
|
|
40
|
+
if state.get("combine_config"):
|
|
41
|
+
return "combine-api"
|
|
42
|
+
if state.get("ui_config"):
|
|
43
|
+
mode = state.get("ui_config", {}).get("mode", "")
|
|
44
|
+
return f"ui-create-{mode}" if mode else "ui-create-component"
|
|
45
|
+
|
|
46
|
+
return "api-create"
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def get_active_endpoint(state):
|
|
50
|
+
"""Get active endpoint - supports both old and new state formats."""
|
|
51
|
+
# New format (v3.6.7+): endpoints object with active_endpoint pointer
|
|
52
|
+
if "endpoints" in state and "active_endpoint" in state:
|
|
53
|
+
active = state.get("active_endpoint")
|
|
54
|
+
if active and active in state["endpoints"]:
|
|
55
|
+
return active, state["endpoints"][active]
|
|
56
|
+
return None, None
|
|
57
|
+
|
|
58
|
+
# Support for elements (UI workflow)
|
|
59
|
+
if "elements" in state and "active_element" in state:
|
|
60
|
+
active = state.get("active_element")
|
|
61
|
+
if active and active in state["elements"]:
|
|
62
|
+
return active, state["elements"][active]
|
|
63
|
+
return None, None
|
|
64
|
+
|
|
65
|
+
# Old format: single endpoint field
|
|
66
|
+
endpoint = state.get("endpoint")
|
|
67
|
+
if endpoint:
|
|
68
|
+
# Return endpoint name and the entire state as endpoint data
|
|
69
|
+
return endpoint, state
|
|
70
|
+
|
|
71
|
+
# Try active_element without elements dict
|
|
72
|
+
active = state.get("active_element")
|
|
73
|
+
if active:
|
|
74
|
+
return active, state
|
|
75
|
+
|
|
76
|
+
return None, None
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def load_research_index():
|
|
80
|
+
"""Load research index from .claude/research/index.json file."""
|
|
81
|
+
if not RESEARCH_INDEX.exists():
|
|
82
|
+
return {}
|
|
83
|
+
try:
|
|
84
|
+
index = json.loads(RESEARCH_INDEX.read_text())
|
|
85
|
+
return index.get("apis", {})
|
|
86
|
+
except (json.JSONDecodeError, IOError):
|
|
87
|
+
return {}
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
def calculate_days_old(timestamp_str):
|
|
91
|
+
"""Calculate how many days old a timestamp is."""
|
|
92
|
+
if not timestamp_str:
|
|
93
|
+
return 0
|
|
94
|
+
try:
|
|
95
|
+
last_updated = datetime.fromisoformat(timestamp_str.replace('Z', '+00:00'))
|
|
96
|
+
now = datetime.now(last_updated.tzinfo) if last_updated.tzinfo else datetime.now()
|
|
97
|
+
return (now - last_updated).days
|
|
98
|
+
except (ValueError, TypeError):
|
|
99
|
+
return 0
|
|
100
|
+
|
|
101
|
+
|
|
28
102
|
def main():
|
|
29
103
|
# Read hook input from stdin
|
|
30
104
|
try:
|
|
@@ -46,21 +120,74 @@ def main():
|
|
|
46
120
|
print(json.dumps({"continue": True}))
|
|
47
121
|
sys.exit(0)
|
|
48
122
|
|
|
49
|
-
#
|
|
50
|
-
endpoint = state
|
|
51
|
-
if not endpoint:
|
|
123
|
+
# Get active endpoint (supports both old and new formats)
|
|
124
|
+
endpoint, endpoint_data = get_active_endpoint(state)
|
|
125
|
+
if not endpoint or not endpoint_data:
|
|
52
126
|
# No active endpoint - just continue
|
|
53
127
|
print(json.dumps({"continue": True}))
|
|
54
128
|
sys.exit(0)
|
|
55
129
|
|
|
130
|
+
# Detect workflow type
|
|
131
|
+
workflow_type = get_workflow_type(state)
|
|
132
|
+
|
|
56
133
|
# Build context summary
|
|
57
134
|
context_parts = []
|
|
58
|
-
|
|
135
|
+
|
|
136
|
+
# Header based on workflow type
|
|
137
|
+
if workflow_type == "combine-api":
|
|
138
|
+
context_parts.append("## Combined API Development Session Context")
|
|
139
|
+
elif workflow_type.startswith("ui-create"):
|
|
140
|
+
mode = "Page" if "page" in workflow_type else "Component"
|
|
141
|
+
context_parts.append(f"## UI {mode} Development Session Context")
|
|
142
|
+
else:
|
|
143
|
+
context_parts.append("## API Development Session Context")
|
|
144
|
+
|
|
59
145
|
context_parts.append("")
|
|
60
|
-
context_parts.append(f"**
|
|
146
|
+
context_parts.append(f"**Workflow:** {workflow_type}")
|
|
147
|
+
context_parts.append(f"**Active Element:** {endpoint}")
|
|
148
|
+
|
|
149
|
+
# Add combine-specific context
|
|
150
|
+
if workflow_type == "combine-api":
|
|
151
|
+
combine_config = state.get("combine_config", {})
|
|
152
|
+
source_elements = combine_config.get("source_elements", [])
|
|
153
|
+
flow_type = combine_config.get("flow_type", "sequential")
|
|
154
|
+
error_strategy = combine_config.get("error_strategy", "fail-fast")
|
|
155
|
+
|
|
156
|
+
if source_elements:
|
|
157
|
+
source_names = []
|
|
158
|
+
for elem in source_elements:
|
|
159
|
+
if isinstance(elem, dict):
|
|
160
|
+
source_names.append(elem.get("name", "unknown"))
|
|
161
|
+
else:
|
|
162
|
+
source_names.append(str(elem))
|
|
163
|
+
|
|
164
|
+
context_parts.append("")
|
|
165
|
+
context_parts.append("**Combining APIs:**")
|
|
166
|
+
for name in source_names:
|
|
167
|
+
context_parts.append(f" - {name}")
|
|
168
|
+
context_parts.append(f" Flow: {flow_type}")
|
|
169
|
+
context_parts.append(f" Error Strategy: {error_strategy}")
|
|
170
|
+
|
|
171
|
+
# Add UI-specific context
|
|
172
|
+
elif workflow_type.startswith("ui-create"):
|
|
173
|
+
ui_config = state.get("ui_config", {})
|
|
174
|
+
if not ui_config and endpoint_data:
|
|
175
|
+
ui_config = endpoint_data.get("ui_config", {})
|
|
176
|
+
|
|
177
|
+
if ui_config:
|
|
178
|
+
context_parts.append("")
|
|
179
|
+
context_parts.append("**UI Configuration:**")
|
|
180
|
+
if ui_config.get("use_brand_guide"):
|
|
181
|
+
context_parts.append(" - Brand guide: Applied")
|
|
182
|
+
if ui_config.get("component_type"):
|
|
183
|
+
context_parts.append(f" - Type: {ui_config['component_type']}")
|
|
184
|
+
if ui_config.get("accessibility_level"):
|
|
185
|
+
context_parts.append(f" - A11y: {ui_config['accessibility_level']}")
|
|
186
|
+
if ui_config.get("data_sources"):
|
|
187
|
+
context_parts.append(f" - Data sources: {len(ui_config['data_sources'])}")
|
|
61
188
|
|
|
62
|
-
# Get phase status
|
|
63
|
-
phases =
|
|
189
|
+
# Get phase status (from endpoint_data for multi-API, or state for legacy)
|
|
190
|
+
phases = endpoint_data.get("phases", {})
|
|
64
191
|
completed = []
|
|
65
192
|
in_progress = []
|
|
66
193
|
not_started = []
|
|
@@ -103,17 +230,26 @@ def main():
|
|
|
103
230
|
if response:
|
|
104
231
|
context_parts.append(f" - {key}: {str(response)[:100]}")
|
|
105
232
|
|
|
106
|
-
# Research cache info
|
|
107
|
-
research_index =
|
|
233
|
+
# Research cache info - READ FROM index.json FILE (v3.6.7 fix)
|
|
234
|
+
research_index = load_research_index()
|
|
108
235
|
if endpoint in research_index:
|
|
109
236
|
entry = research_index[endpoint]
|
|
110
|
-
|
|
237
|
+
last_updated = entry.get("last_updated", "")
|
|
238
|
+
days_old = calculate_days_old(last_updated)
|
|
111
239
|
context_parts.append("")
|
|
112
240
|
context_parts.append("**Research Cache:**")
|
|
113
241
|
context_parts.append(f" - Location: .claude/research/{endpoint}/CURRENT.md")
|
|
114
|
-
context_parts.append(f" - Last Updated: {
|
|
242
|
+
context_parts.append(f" - Last Updated: {last_updated or 'Unknown'}")
|
|
115
243
|
if days_old > 7:
|
|
116
|
-
context_parts.append(f" - WARNING: Research is {days_old} days old. Consider re-researching.")
|
|
244
|
+
context_parts.append(f" - ⚠️ WARNING: Research is {days_old} days old. Consider re-researching.")
|
|
245
|
+
else:
|
|
246
|
+
# Check if research directory exists even without index entry
|
|
247
|
+
research_dir = Path(__file__).parent.parent / "research" / endpoint
|
|
248
|
+
if research_dir.exists():
|
|
249
|
+
context_parts.append("")
|
|
250
|
+
context_parts.append("**Research Cache:**")
|
|
251
|
+
context_parts.append(f" - Location: .claude/research/{endpoint}/")
|
|
252
|
+
context_parts.append(f" - ⚠️ Not indexed - run /api-research to update")
|
|
117
253
|
|
|
118
254
|
# Turn count for re-grounding awareness
|
|
119
255
|
turn_count = state.get("turn_count", 0)
|
|
@@ -128,10 +264,19 @@ def main():
|
|
|
128
264
|
context_parts.append(" - Research: .claude/research/")
|
|
129
265
|
context_parts.append(" - Manifest: src/app/api-test/api-tests-manifest.json (if exists)")
|
|
130
266
|
|
|
131
|
-
# Workflow reminder
|
|
267
|
+
# Workflow reminder based on type
|
|
132
268
|
context_parts.append("")
|
|
133
|
-
|
|
134
|
-
|
|
269
|
+
if workflow_type == "combine-api":
|
|
270
|
+
context_parts.append("**Workflow Reminder:** This is a combined API workflow.")
|
|
271
|
+
context_parts.append("Ensure all source APIs exist in registry before orchestration.")
|
|
272
|
+
context_parts.append("Test both individual APIs and the combined flow.")
|
|
273
|
+
elif workflow_type.startswith("ui-create"):
|
|
274
|
+
context_parts.append("**Workflow Reminder:** This is a UI development workflow.")
|
|
275
|
+
context_parts.append("Check registry for reusable components before creating new ones.")
|
|
276
|
+
context_parts.append("Ensure brand guide compliance and accessibility requirements.")
|
|
277
|
+
else:
|
|
278
|
+
context_parts.append("**Workflow Reminder:** This project uses interview-driven API development.")
|
|
279
|
+
context_parts.append("Phases loop back if verification fails. Research before answering API questions.")
|
|
135
280
|
|
|
136
281
|
# Build the output
|
|
137
282
|
additional_context = "\n".join(context_parts)
|