@hustle-together/api-dev-tools 3.6.4 → 3.9.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +5307 -258
- package/bin/cli.js +348 -20
- package/commands/README.md +459 -71
- package/commands/hustle-api-continue.md +158 -0
- package/commands/{api-create.md → hustle-api-create.md} +22 -2
- package/commands/{api-env.md → hustle-api-env.md} +4 -4
- package/commands/{api-interview.md → hustle-api-interview.md} +1 -1
- package/commands/{api-research.md → hustle-api-research.md} +3 -3
- package/commands/hustle-api-sessions.md +149 -0
- package/commands/{api-status.md → hustle-api-status.md} +16 -16
- package/commands/{api-verify.md → hustle-api-verify.md} +2 -2
- package/commands/hustle-combine.md +763 -0
- package/commands/hustle-ui-create.md +825 -0
- package/hooks/api-workflow-check.py +385 -19
- package/hooks/cache-research.py +337 -0
- package/hooks/check-playwright-setup.py +103 -0
- package/hooks/check-storybook-setup.py +81 -0
- package/hooks/detect-interruption.py +165 -0
- package/hooks/enforce-brand-guide.py +131 -0
- package/hooks/enforce-documentation.py +60 -8
- package/hooks/enforce-freshness.py +184 -0
- package/hooks/enforce-questions-sourced.py +146 -0
- package/hooks/enforce-schema-from-interview.py +248 -0
- package/hooks/enforce-ui-disambiguation.py +108 -0
- package/hooks/enforce-ui-interview.py +130 -0
- package/hooks/generate-manifest-entry.py +981 -0
- package/hooks/session-logger.py +297 -0
- package/hooks/session-startup.py +65 -10
- package/hooks/track-scope-coverage.py +220 -0
- package/hooks/track-tool-use.py +81 -1
- package/hooks/update-api-showcase.py +149 -0
- package/hooks/update-registry.py +352 -0
- package/hooks/update-ui-showcase.py +148 -0
- package/package.json +8 -2
- package/templates/BRAND_GUIDE.md +299 -0
- package/templates/CLAUDE-SECTION.md +56 -24
- package/templates/SPEC.json +640 -0
- package/templates/api-dev-state.json +179 -161
- package/templates/api-showcase/APICard.tsx +153 -0
- package/templates/api-showcase/APIModal.tsx +375 -0
- package/templates/api-showcase/APIShowcase.tsx +231 -0
- package/templates/api-showcase/APITester.tsx +522 -0
- package/templates/api-showcase/page.tsx +41 -0
- package/templates/component/Component.stories.tsx +172 -0
- package/templates/component/Component.test.tsx +237 -0
- package/templates/component/Component.tsx +86 -0
- package/templates/component/Component.types.ts +55 -0
- package/templates/component/index.ts +15 -0
- package/templates/dev-tools/_components/DevToolsLanding.tsx +320 -0
- package/templates/dev-tools/page.tsx +10 -0
- package/templates/page/page.e2e.test.ts +218 -0
- package/templates/page/page.tsx +42 -0
- package/templates/performance-budgets.json +58 -0
- package/templates/registry.json +13 -0
- package/templates/settings.json +74 -0
- package/templates/shared/HeroHeader.tsx +261 -0
- package/templates/shared/index.ts +1 -0
- package/templates/ui-showcase/PreviewCard.tsx +315 -0
- package/templates/ui-showcase/PreviewModal.tsx +676 -0
- package/templates/ui-showcase/UIShowcase.tsx +262 -0
- package/templates/ui-showcase/page.tsx +26 -0
|
@@ -0,0 +1,297 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Hook: Stop
|
|
4
|
+
Purpose: Save session to .claude/api-sessions/ for later review
|
|
5
|
+
|
|
6
|
+
This hook runs when a Claude Code session ends (Stop event).
|
|
7
|
+
It saves the session data for the completed workflow including:
|
|
8
|
+
- State snapshot at completion
|
|
9
|
+
- Files created during the workflow
|
|
10
|
+
- Summary of phases completed
|
|
11
|
+
- Research sources used
|
|
12
|
+
- Interview decisions made
|
|
13
|
+
|
|
14
|
+
Added in v3.6.7 for session logging support.
|
|
15
|
+
|
|
16
|
+
Returns:
|
|
17
|
+
- JSON with session save info
|
|
18
|
+
"""
|
|
19
|
+
import json
|
|
20
|
+
import sys
|
|
21
|
+
import os
|
|
22
|
+
from datetime import datetime
|
|
23
|
+
from pathlib import Path
|
|
24
|
+
import shutil
|
|
25
|
+
|
|
26
|
+
STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
27
|
+
SESSIONS_DIR = Path(__file__).parent.parent / "api-sessions"
|
|
28
|
+
RESEARCH_DIR = Path(__file__).parent.parent / "research"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def get_active_endpoint(state):
|
|
32
|
+
"""Get active endpoint - supports both old and new state formats."""
|
|
33
|
+
# New format (v3.6.7+): endpoints object with active_endpoint pointer
|
|
34
|
+
if "endpoints" in state and "active_endpoint" in state:
|
|
35
|
+
active = state.get("active_endpoint")
|
|
36
|
+
if active and active in state["endpoints"]:
|
|
37
|
+
return active, state["endpoints"][active]
|
|
38
|
+
return None, None
|
|
39
|
+
|
|
40
|
+
# Old format: single endpoint field
|
|
41
|
+
endpoint = state.get("endpoint")
|
|
42
|
+
if endpoint:
|
|
43
|
+
return endpoint, state
|
|
44
|
+
|
|
45
|
+
return None, None
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def get_completed_phases(endpoint_data):
|
|
49
|
+
"""Get list of completed phases."""
|
|
50
|
+
completed = []
|
|
51
|
+
phases = endpoint_data.get("phases", {})
|
|
52
|
+
|
|
53
|
+
phase_order = [
|
|
54
|
+
"disambiguation", "scope", "research_initial", "interview",
|
|
55
|
+
"research_deep", "schema_creation", "environment_check",
|
|
56
|
+
"tdd_red", "tdd_green", "verify", "tdd_refactor", "documentation", "completion"
|
|
57
|
+
]
|
|
58
|
+
|
|
59
|
+
for phase_name in phase_order:
|
|
60
|
+
phase = phases.get(phase_name, {})
|
|
61
|
+
if phase.get("status") == "complete":
|
|
62
|
+
completed.append(phase_name)
|
|
63
|
+
|
|
64
|
+
return completed
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def get_files_created(endpoint_data):
|
|
68
|
+
"""Get list of files created during this workflow."""
|
|
69
|
+
files = []
|
|
70
|
+
|
|
71
|
+
# From completion phase
|
|
72
|
+
completion = endpoint_data.get("phases", {}).get("completion", {})
|
|
73
|
+
files.extend(completion.get("files_created", []))
|
|
74
|
+
|
|
75
|
+
# From schema phase
|
|
76
|
+
schema = endpoint_data.get("phases", {}).get("schema_creation", {})
|
|
77
|
+
if schema.get("schema_file"):
|
|
78
|
+
files.append(schema.get("schema_file"))
|
|
79
|
+
|
|
80
|
+
# From TDD phases
|
|
81
|
+
tdd_red = endpoint_data.get("phases", {}).get("tdd_red", {})
|
|
82
|
+
if tdd_red.get("test_file"):
|
|
83
|
+
files.append(tdd_red.get("test_file"))
|
|
84
|
+
|
|
85
|
+
tdd_green = endpoint_data.get("phases", {}).get("tdd_green", {})
|
|
86
|
+
if tdd_green.get("implementation_file"):
|
|
87
|
+
files.append(tdd_green.get("implementation_file"))
|
|
88
|
+
|
|
89
|
+
return list(set(files)) # Deduplicate
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def generate_summary(endpoint, endpoint_data, state):
|
|
93
|
+
"""Generate a markdown summary of the session."""
|
|
94
|
+
completed = get_completed_phases(endpoint_data)
|
|
95
|
+
files = get_files_created(endpoint_data)
|
|
96
|
+
decisions = endpoint_data.get("phases", {}).get("interview", {}).get("decisions", {})
|
|
97
|
+
|
|
98
|
+
lines = [
|
|
99
|
+
f"# Session Summary: {endpoint}",
|
|
100
|
+
"",
|
|
101
|
+
f"*Generated: {datetime.now().isoformat()}*",
|
|
102
|
+
"",
|
|
103
|
+
"## Overview",
|
|
104
|
+
"",
|
|
105
|
+
f"- **Endpoint:** {endpoint}",
|
|
106
|
+
f"- **Library:** {endpoint_data.get('library', 'N/A')}",
|
|
107
|
+
f"- **Started:** {endpoint_data.get('started_at', 'N/A')}",
|
|
108
|
+
f"- **Completed Phases:** {len(completed)}/13",
|
|
109
|
+
f"- **Status:** {endpoint_data.get('status', 'unknown')}",
|
|
110
|
+
"",
|
|
111
|
+
"## Phases Completed",
|
|
112
|
+
""
|
|
113
|
+
]
|
|
114
|
+
|
|
115
|
+
for i, phase in enumerate(completed, 1):
|
|
116
|
+
lines.append(f"{i}. {phase.replace('_', ' ').title()}")
|
|
117
|
+
|
|
118
|
+
lines.extend([
|
|
119
|
+
"",
|
|
120
|
+
"## Files Created",
|
|
121
|
+
""
|
|
122
|
+
])
|
|
123
|
+
|
|
124
|
+
for f in files:
|
|
125
|
+
lines.append(f"- `{f}`")
|
|
126
|
+
|
|
127
|
+
if decisions:
|
|
128
|
+
lines.extend([
|
|
129
|
+
"",
|
|
130
|
+
"## Interview Decisions",
|
|
131
|
+
""
|
|
132
|
+
])
|
|
133
|
+
for key, value in decisions.items():
|
|
134
|
+
response = value.get("response", value.get("value", "N/A"))
|
|
135
|
+
lines.append(f"- **{key}:** {response}")
|
|
136
|
+
|
|
137
|
+
lines.extend([
|
|
138
|
+
"",
|
|
139
|
+
"## Research Sources",
|
|
140
|
+
""
|
|
141
|
+
])
|
|
142
|
+
|
|
143
|
+
# Check for research cache
|
|
144
|
+
research_path = RESEARCH_DIR / endpoint / "sources.json"
|
|
145
|
+
if research_path.exists():
|
|
146
|
+
try:
|
|
147
|
+
sources = json.loads(research_path.read_text())
|
|
148
|
+
for src in sources.get("sources", [])[:10]: # Limit to 10
|
|
149
|
+
url = src.get("url", src.get("query", ""))
|
|
150
|
+
if url:
|
|
151
|
+
lines.append(f"- {url}")
|
|
152
|
+
except (json.JSONDecodeError, IOError):
|
|
153
|
+
lines.append("- (sources.json not readable)")
|
|
154
|
+
else:
|
|
155
|
+
lines.append("- (no sources.json found)")
|
|
156
|
+
|
|
157
|
+
lines.extend([
|
|
158
|
+
"",
|
|
159
|
+
"---",
|
|
160
|
+
"",
|
|
161
|
+
f"*Session saved to: .claude/api-sessions/{endpoint}_{{timestamp}}/*"
|
|
162
|
+
])
|
|
163
|
+
|
|
164
|
+
return "\n".join(lines)
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
def save_session(endpoint, endpoint_data, state):
|
|
168
|
+
"""Save session to .claude/api-sessions/."""
|
|
169
|
+
# Create timestamp
|
|
170
|
+
timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
|
171
|
+
session_dir = SESSIONS_DIR / f"{endpoint}_{timestamp}"
|
|
172
|
+
session_dir.mkdir(parents=True, exist_ok=True)
|
|
173
|
+
|
|
174
|
+
# 1. Save state snapshot
|
|
175
|
+
state_snapshot = {
|
|
176
|
+
"saved_at": datetime.now().isoformat(),
|
|
177
|
+
"endpoint": endpoint,
|
|
178
|
+
"endpoint_data": endpoint_data,
|
|
179
|
+
"turn_count": state.get("turn_count", 0),
|
|
180
|
+
"research_queries": state.get("research_queries", [])
|
|
181
|
+
}
|
|
182
|
+
(session_dir / "state-snapshot.json").write_text(json.dumps(state_snapshot, indent=2))
|
|
183
|
+
|
|
184
|
+
# 2. Save files list
|
|
185
|
+
files = get_files_created(endpoint_data)
|
|
186
|
+
(session_dir / "files-created.txt").write_text("\n".join(files))
|
|
187
|
+
|
|
188
|
+
# 3. Generate and save summary
|
|
189
|
+
summary = generate_summary(endpoint, endpoint_data, state)
|
|
190
|
+
(session_dir / "summary.md").write_text(summary)
|
|
191
|
+
|
|
192
|
+
# 4. Copy research cache if exists
|
|
193
|
+
research_src = RESEARCH_DIR / endpoint
|
|
194
|
+
if research_src.exists():
|
|
195
|
+
research_dst = session_dir / "research-cache"
|
|
196
|
+
research_dst.mkdir(exist_ok=True)
|
|
197
|
+
for f in research_src.iterdir():
|
|
198
|
+
if f.is_file():
|
|
199
|
+
shutil.copy2(f, research_dst / f.name)
|
|
200
|
+
|
|
201
|
+
# 5. Update sessions index
|
|
202
|
+
update_sessions_index(endpoint, timestamp, endpoint_data)
|
|
203
|
+
|
|
204
|
+
return session_dir
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
def update_sessions_index(endpoint, timestamp, endpoint_data):
|
|
208
|
+
"""Update the sessions index file."""
|
|
209
|
+
index_file = SESSIONS_DIR / "index.json"
|
|
210
|
+
|
|
211
|
+
if index_file.exists():
|
|
212
|
+
try:
|
|
213
|
+
index = json.loads(index_file.read_text())
|
|
214
|
+
except json.JSONDecodeError:
|
|
215
|
+
index = {"version": "3.6.7", "sessions": []}
|
|
216
|
+
else:
|
|
217
|
+
index = {"version": "3.6.7", "sessions": []}
|
|
218
|
+
|
|
219
|
+
# Add this session
|
|
220
|
+
completed = get_completed_phases(endpoint_data)
|
|
221
|
+
index["sessions"].append({
|
|
222
|
+
"endpoint": endpoint,
|
|
223
|
+
"timestamp": timestamp,
|
|
224
|
+
"folder": f"{endpoint}_{timestamp}",
|
|
225
|
+
"status": endpoint_data.get("status", "unknown"),
|
|
226
|
+
"phases_completed": len(completed),
|
|
227
|
+
"created_at": datetime.now().isoformat()
|
|
228
|
+
})
|
|
229
|
+
|
|
230
|
+
index_file.write_text(json.dumps(index, indent=2))
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
def main():
|
|
234
|
+
try:
|
|
235
|
+
input_data = json.load(sys.stdin)
|
|
236
|
+
except json.JSONDecodeError:
|
|
237
|
+
print(json.dumps({"continue": True}))
|
|
238
|
+
sys.exit(0)
|
|
239
|
+
|
|
240
|
+
# Check if state file exists
|
|
241
|
+
if not STATE_FILE.exists():
|
|
242
|
+
print(json.dumps({"continue": True}))
|
|
243
|
+
sys.exit(0)
|
|
244
|
+
|
|
245
|
+
try:
|
|
246
|
+
state = json.loads(STATE_FILE.read_text())
|
|
247
|
+
except json.JSONDecodeError:
|
|
248
|
+
print(json.dumps({"continue": True}))
|
|
249
|
+
sys.exit(0)
|
|
250
|
+
|
|
251
|
+
# Get active endpoint
|
|
252
|
+
endpoint, endpoint_data = get_active_endpoint(state)
|
|
253
|
+
if not endpoint or not endpoint_data:
|
|
254
|
+
print(json.dumps({"continue": True}))
|
|
255
|
+
sys.exit(0)
|
|
256
|
+
|
|
257
|
+
# Only save if there's meaningful progress
|
|
258
|
+
completed = get_completed_phases(endpoint_data)
|
|
259
|
+
if len(completed) < 2:
|
|
260
|
+
# Not enough progress to save
|
|
261
|
+
print(json.dumps({
|
|
262
|
+
"hookSpecificOutput": {
|
|
263
|
+
"sessionSaved": False,
|
|
264
|
+
"reason": "Not enough progress to save (need at least 2 completed phases)"
|
|
265
|
+
}
|
|
266
|
+
}))
|
|
267
|
+
sys.exit(0)
|
|
268
|
+
|
|
269
|
+
# Save the session
|
|
270
|
+
try:
|
|
271
|
+
session_dir = save_session(endpoint, endpoint_data, state)
|
|
272
|
+
|
|
273
|
+
output = {
|
|
274
|
+
"hookSpecificOutput": {
|
|
275
|
+
"sessionSaved": True,
|
|
276
|
+
"endpoint": endpoint,
|
|
277
|
+
"sessionDir": str(session_dir),
|
|
278
|
+
"phasesCompleted": len(completed)
|
|
279
|
+
}
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
print(json.dumps(output))
|
|
283
|
+
sys.exit(0)
|
|
284
|
+
|
|
285
|
+
except Exception as e:
|
|
286
|
+
output = {
|
|
287
|
+
"hookSpecificOutput": {
|
|
288
|
+
"sessionSaved": False,
|
|
289
|
+
"error": str(e)
|
|
290
|
+
}
|
|
291
|
+
}
|
|
292
|
+
print(json.dumps(output))
|
|
293
|
+
sys.exit(0)
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
if __name__ == "__main__":
|
|
297
|
+
main()
|
package/hooks/session-startup.py
CHANGED
|
@@ -13,6 +13,11 @@ helping to re-ground Claude on:
|
|
|
13
13
|
|
|
14
14
|
Returns:
|
|
15
15
|
- JSON with additionalContext to inject into Claude's context
|
|
16
|
+
|
|
17
|
+
Updated in v3.6.7:
|
|
18
|
+
- Support multi-API state structure (endpoints object)
|
|
19
|
+
- Read research index from .claude/research/index.json file
|
|
20
|
+
- Calculate freshness from timestamps
|
|
16
21
|
"""
|
|
17
22
|
import json
|
|
18
23
|
import sys
|
|
@@ -25,6 +30,47 @@ STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
|
25
30
|
RESEARCH_INDEX = Path(__file__).parent.parent / "research" / "index.json"
|
|
26
31
|
|
|
27
32
|
|
|
33
|
+
def get_active_endpoint(state):
|
|
34
|
+
"""Get active endpoint - supports both old and new state formats."""
|
|
35
|
+
# New format (v3.6.7+): endpoints object with active_endpoint pointer
|
|
36
|
+
if "endpoints" in state and "active_endpoint" in state:
|
|
37
|
+
active = state.get("active_endpoint")
|
|
38
|
+
if active and active in state["endpoints"]:
|
|
39
|
+
return active, state["endpoints"][active]
|
|
40
|
+
return None, None
|
|
41
|
+
|
|
42
|
+
# Old format: single endpoint field
|
|
43
|
+
endpoint = state.get("endpoint")
|
|
44
|
+
if endpoint:
|
|
45
|
+
# Return endpoint name and the entire state as endpoint data
|
|
46
|
+
return endpoint, state
|
|
47
|
+
|
|
48
|
+
return None, None
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def load_research_index():
|
|
52
|
+
"""Load research index from .claude/research/index.json file."""
|
|
53
|
+
if not RESEARCH_INDEX.exists():
|
|
54
|
+
return {}
|
|
55
|
+
try:
|
|
56
|
+
index = json.loads(RESEARCH_INDEX.read_text())
|
|
57
|
+
return index.get("apis", {})
|
|
58
|
+
except (json.JSONDecodeError, IOError):
|
|
59
|
+
return {}
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def calculate_days_old(timestamp_str):
|
|
63
|
+
"""Calculate how many days old a timestamp is."""
|
|
64
|
+
if not timestamp_str:
|
|
65
|
+
return 0
|
|
66
|
+
try:
|
|
67
|
+
last_updated = datetime.fromisoformat(timestamp_str.replace('Z', '+00:00'))
|
|
68
|
+
now = datetime.now(last_updated.tzinfo) if last_updated.tzinfo else datetime.now()
|
|
69
|
+
return (now - last_updated).days
|
|
70
|
+
except (ValueError, TypeError):
|
|
71
|
+
return 0
|
|
72
|
+
|
|
73
|
+
|
|
28
74
|
def main():
|
|
29
75
|
# Read hook input from stdin
|
|
30
76
|
try:
|
|
@@ -46,9 +92,9 @@ def main():
|
|
|
46
92
|
print(json.dumps({"continue": True}))
|
|
47
93
|
sys.exit(0)
|
|
48
94
|
|
|
49
|
-
#
|
|
50
|
-
endpoint = state
|
|
51
|
-
if not endpoint:
|
|
95
|
+
# Get active endpoint (supports both old and new formats)
|
|
96
|
+
endpoint, endpoint_data = get_active_endpoint(state)
|
|
97
|
+
if not endpoint or not endpoint_data:
|
|
52
98
|
# No active endpoint - just continue
|
|
53
99
|
print(json.dumps({"continue": True}))
|
|
54
100
|
sys.exit(0)
|
|
@@ -59,8 +105,8 @@ def main():
|
|
|
59
105
|
context_parts.append("")
|
|
60
106
|
context_parts.append(f"**Active Endpoint:** {endpoint}")
|
|
61
107
|
|
|
62
|
-
# Get phase status
|
|
63
|
-
phases =
|
|
108
|
+
# Get phase status (from endpoint_data for multi-API, or state for legacy)
|
|
109
|
+
phases = endpoint_data.get("phases", {})
|
|
64
110
|
completed = []
|
|
65
111
|
in_progress = []
|
|
66
112
|
not_started = []
|
|
@@ -103,17 +149,26 @@ def main():
|
|
|
103
149
|
if response:
|
|
104
150
|
context_parts.append(f" - {key}: {str(response)[:100]}")
|
|
105
151
|
|
|
106
|
-
# Research cache info
|
|
107
|
-
research_index =
|
|
152
|
+
# Research cache info - READ FROM index.json FILE (v3.6.7 fix)
|
|
153
|
+
research_index = load_research_index()
|
|
108
154
|
if endpoint in research_index:
|
|
109
155
|
entry = research_index[endpoint]
|
|
110
|
-
|
|
156
|
+
last_updated = entry.get("last_updated", "")
|
|
157
|
+
days_old = calculate_days_old(last_updated)
|
|
111
158
|
context_parts.append("")
|
|
112
159
|
context_parts.append("**Research Cache:**")
|
|
113
160
|
context_parts.append(f" - Location: .claude/research/{endpoint}/CURRENT.md")
|
|
114
|
-
context_parts.append(f" - Last Updated: {
|
|
161
|
+
context_parts.append(f" - Last Updated: {last_updated or 'Unknown'}")
|
|
115
162
|
if days_old > 7:
|
|
116
|
-
context_parts.append(f" - WARNING: Research is {days_old} days old. Consider re-researching.")
|
|
163
|
+
context_parts.append(f" - ⚠️ WARNING: Research is {days_old} days old. Consider re-researching.")
|
|
164
|
+
else:
|
|
165
|
+
# Check if research directory exists even without index entry
|
|
166
|
+
research_dir = Path(__file__).parent.parent / "research" / endpoint
|
|
167
|
+
if research_dir.exists():
|
|
168
|
+
context_parts.append("")
|
|
169
|
+
context_parts.append("**Research Cache:**")
|
|
170
|
+
context_parts.append(f" - Location: .claude/research/{endpoint}/")
|
|
171
|
+
context_parts.append(f" - ⚠️ Not indexed - run /api-research to update")
|
|
117
172
|
|
|
118
173
|
# Turn count for re-grounding awareness
|
|
119
174
|
turn_count = state.get("turn_count", 0)
|
|
@@ -0,0 +1,220 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Hook: PostToolUse for AskUserQuestion
|
|
4
|
+
Purpose: Track implemented vs deferred features for scope coverage
|
|
5
|
+
|
|
6
|
+
This hook tracks which features discovered during research are:
|
|
7
|
+
- Implemented (user chose to include)
|
|
8
|
+
- Deferred (user chose to skip for later)
|
|
9
|
+
- Discovered (found in docs but not yet decided)
|
|
10
|
+
|
|
11
|
+
Added in v3.6.7 for feature scope tracking.
|
|
12
|
+
|
|
13
|
+
Returns:
|
|
14
|
+
- JSON with scope coverage update info
|
|
15
|
+
"""
|
|
16
|
+
import json
|
|
17
|
+
import sys
|
|
18
|
+
from datetime import datetime
|
|
19
|
+
from pathlib import Path
|
|
20
|
+
|
|
21
|
+
STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def get_active_endpoint(state):
|
|
25
|
+
"""Get active endpoint - supports both old and new state formats."""
|
|
26
|
+
if "endpoints" in state and "active_endpoint" in state:
|
|
27
|
+
active = state.get("active_endpoint")
|
|
28
|
+
if active and active in state["endpoints"]:
|
|
29
|
+
return active, state["endpoints"][active]
|
|
30
|
+
return None, None
|
|
31
|
+
|
|
32
|
+
endpoint = state.get("endpoint")
|
|
33
|
+
if endpoint:
|
|
34
|
+
return endpoint, state
|
|
35
|
+
|
|
36
|
+
return None, None
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def extract_feature_from_question(question, options):
|
|
40
|
+
"""Try to extract a feature name from the question."""
|
|
41
|
+
# Look for common patterns
|
|
42
|
+
patterns = [
|
|
43
|
+
"implement",
|
|
44
|
+
"include",
|
|
45
|
+
"support",
|
|
46
|
+
"enable",
|
|
47
|
+
"add"
|
|
48
|
+
]
|
|
49
|
+
|
|
50
|
+
question_lower = question.lower()
|
|
51
|
+
for pattern in patterns:
|
|
52
|
+
if pattern in question_lower:
|
|
53
|
+
# Extract the words after the pattern
|
|
54
|
+
idx = question_lower.find(pattern)
|
|
55
|
+
after = question_lower[idx:].split("?")[0]
|
|
56
|
+
# Clean up
|
|
57
|
+
words = after.split()[1:4] # Get 1-3 words after pattern
|
|
58
|
+
if words:
|
|
59
|
+
return " ".join(words).strip(",.?")
|
|
60
|
+
|
|
61
|
+
return None
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def is_feature_decision(question, answer, options):
|
|
65
|
+
"""Determine if this was a feature implementation decision."""
|
|
66
|
+
question_lower = question.lower()
|
|
67
|
+
|
|
68
|
+
# Keywords suggesting feature decision
|
|
69
|
+
feature_keywords = [
|
|
70
|
+
"implement", "include", "support", "enable", "add",
|
|
71
|
+
"feature", "functionality", "capability"
|
|
72
|
+
]
|
|
73
|
+
|
|
74
|
+
has_keyword = any(k in question_lower for k in feature_keywords)
|
|
75
|
+
|
|
76
|
+
# Check if answer indicates yes/no/defer decision
|
|
77
|
+
answer_lower = str(answer).lower() if answer else ""
|
|
78
|
+
is_decision = any(word in answer_lower for word in [
|
|
79
|
+
"yes", "no", "skip", "defer", "later", "include", "exclude",
|
|
80
|
+
"implement", "confirm", "reject"
|
|
81
|
+
])
|
|
82
|
+
|
|
83
|
+
return has_keyword and is_decision
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def categorize_decision(answer):
|
|
87
|
+
"""Categorize the decision as implement/defer/skip."""
|
|
88
|
+
answer_lower = str(answer).lower() if answer else ""
|
|
89
|
+
|
|
90
|
+
if any(word in answer_lower for word in ["yes", "include", "implement", "confirm"]):
|
|
91
|
+
return "implement"
|
|
92
|
+
elif any(word in answer_lower for word in ["defer", "later", "phase 2", "future"]):
|
|
93
|
+
return "defer"
|
|
94
|
+
elif any(word in answer_lower for word in ["no", "skip", "exclude", "reject"]):
|
|
95
|
+
return "skip"
|
|
96
|
+
|
|
97
|
+
return "unknown"
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def main():
|
|
101
|
+
try:
|
|
102
|
+
input_data = json.load(sys.stdin)
|
|
103
|
+
except json.JSONDecodeError:
|
|
104
|
+
print(json.dumps({"continue": True}))
|
|
105
|
+
sys.exit(0)
|
|
106
|
+
|
|
107
|
+
tool_name = input_data.get("tool_name", "")
|
|
108
|
+
tool_input = input_data.get("tool_input", {})
|
|
109
|
+
tool_result = input_data.get("tool_result", {})
|
|
110
|
+
|
|
111
|
+
if tool_name != "AskUserQuestion":
|
|
112
|
+
print(json.dumps({"continue": True}))
|
|
113
|
+
sys.exit(0)
|
|
114
|
+
|
|
115
|
+
if not STATE_FILE.exists():
|
|
116
|
+
print(json.dumps({"continue": True}))
|
|
117
|
+
sys.exit(0)
|
|
118
|
+
|
|
119
|
+
try:
|
|
120
|
+
state = json.loads(STATE_FILE.read_text())
|
|
121
|
+
except json.JSONDecodeError:
|
|
122
|
+
print(json.dumps({"continue": True}))
|
|
123
|
+
sys.exit(0)
|
|
124
|
+
|
|
125
|
+
endpoint, endpoint_data = get_active_endpoint(state)
|
|
126
|
+
if not endpoint or not endpoint_data:
|
|
127
|
+
print(json.dumps({"continue": True}))
|
|
128
|
+
sys.exit(0)
|
|
129
|
+
|
|
130
|
+
# Get question and answer
|
|
131
|
+
question = tool_input.get("question", "")
|
|
132
|
+
options = tool_input.get("options", [])
|
|
133
|
+
|
|
134
|
+
# Get user's answer from result
|
|
135
|
+
answer = None
|
|
136
|
+
if isinstance(tool_result, dict):
|
|
137
|
+
answer = tool_result.get("answer", tool_result.get("value", ""))
|
|
138
|
+
elif isinstance(tool_result, str):
|
|
139
|
+
answer = tool_result
|
|
140
|
+
|
|
141
|
+
# Check if this is a feature decision
|
|
142
|
+
if not is_feature_decision(question, answer, options):
|
|
143
|
+
print(json.dumps({"continue": True}))
|
|
144
|
+
sys.exit(0)
|
|
145
|
+
|
|
146
|
+
# Extract feature name
|
|
147
|
+
feature = extract_feature_from_question(question, options)
|
|
148
|
+
if not feature:
|
|
149
|
+
feature = f"feature_{datetime.now().strftime('%H%M%S')}"
|
|
150
|
+
|
|
151
|
+
# Categorize decision
|
|
152
|
+
category = categorize_decision(answer)
|
|
153
|
+
|
|
154
|
+
# Ensure scope object exists
|
|
155
|
+
if "endpoints" in state:
|
|
156
|
+
if "scope" not in state["endpoints"][endpoint]:
|
|
157
|
+
state["endpoints"][endpoint]["scope"] = {
|
|
158
|
+
"discovered_features": [],
|
|
159
|
+
"implemented_features": [],
|
|
160
|
+
"deferred_features": [],
|
|
161
|
+
"coverage_percent": 0
|
|
162
|
+
}
|
|
163
|
+
scope = state["endpoints"][endpoint]["scope"]
|
|
164
|
+
else:
|
|
165
|
+
if "scope" not in state:
|
|
166
|
+
state["scope"] = {
|
|
167
|
+
"discovered_features": [],
|
|
168
|
+
"implemented_features": [],
|
|
169
|
+
"deferred_features": [],
|
|
170
|
+
"coverage_percent": 0
|
|
171
|
+
}
|
|
172
|
+
scope = state["scope"]
|
|
173
|
+
|
|
174
|
+
# Add to discovered if not already there
|
|
175
|
+
feature_entry = {
|
|
176
|
+
"name": feature,
|
|
177
|
+
"discovered_at": datetime.now().isoformat(),
|
|
178
|
+
"question": question[:100],
|
|
179
|
+
"decision": category
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
if feature not in [f.get("name") if isinstance(f, dict) else f for f in scope["discovered_features"]]:
|
|
183
|
+
scope["discovered_features"].append(feature_entry)
|
|
184
|
+
|
|
185
|
+
# Add to appropriate category
|
|
186
|
+
if category == "implement":
|
|
187
|
+
if feature not in scope["implemented_features"]:
|
|
188
|
+
scope["implemented_features"].append(feature)
|
|
189
|
+
elif category == "defer":
|
|
190
|
+
defer_entry = {
|
|
191
|
+
"name": feature,
|
|
192
|
+
"reason": f"User chose to defer: {str(answer)[:50]}",
|
|
193
|
+
"deferred_at": datetime.now().isoformat()
|
|
194
|
+
}
|
|
195
|
+
if feature not in [f.get("name") if isinstance(f, dict) else f for f in scope["deferred_features"]]:
|
|
196
|
+
scope["deferred_features"].append(defer_entry)
|
|
197
|
+
|
|
198
|
+
# Calculate coverage
|
|
199
|
+
total = len(scope["discovered_features"])
|
|
200
|
+
implemented = len(scope["implemented_features"])
|
|
201
|
+
if total > 0:
|
|
202
|
+
scope["coverage_percent"] = round((implemented / total) * 100, 1)
|
|
203
|
+
|
|
204
|
+
# Save state
|
|
205
|
+
STATE_FILE.write_text(json.dumps(state, indent=2))
|
|
206
|
+
|
|
207
|
+
output = {
|
|
208
|
+
"hookSpecificOutput": {
|
|
209
|
+
"featureTracked": feature,
|
|
210
|
+
"decision": category,
|
|
211
|
+
"coveragePercent": scope["coverage_percent"]
|
|
212
|
+
}
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
print(json.dumps(output))
|
|
216
|
+
sys.exit(0)
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
if __name__ == "__main__":
|
|
220
|
+
main()
|