@hustle-together/api-dev-tools 2.0.7 → 3.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +343 -467
- package/bin/cli.js +229 -15
- package/commands/README.md +124 -251
- package/commands/api-create.md +318 -136
- package/commands/api-interview.md +252 -256
- package/commands/api-research.md +209 -234
- package/commands/api-verify.md +231 -0
- package/demo/audio/generate-all-narrations.js +581 -0
- package/demo/audio/generate-narration.js +120 -56
- package/demo/audio/generate-voice-previews.js +140 -0
- package/demo/audio/narration-adam-timing.json +4675 -0
- package/demo/audio/narration-adam.mp3 +0 -0
- package/demo/audio/narration-creature-timing.json +4675 -0
- package/demo/audio/narration-creature.mp3 +0 -0
- package/demo/audio/narration-gaming-timing.json +4675 -0
- package/demo/audio/narration-gaming.mp3 +0 -0
- package/demo/audio/narration-hope-timing.json +4675 -0
- package/demo/audio/narration-hope.mp3 +0 -0
- package/demo/audio/narration-mark-timing.json +4675 -0
- package/demo/audio/narration-mark.mp3 +0 -0
- package/demo/audio/previews/manifest.json +30 -0
- package/demo/audio/previews/preview-creature.mp3 +0 -0
- package/demo/audio/previews/preview-gaming.mp3 +0 -0
- package/demo/audio/previews/preview-hope.mp3 +0 -0
- package/demo/audio/previews/preview-mark.mp3 +0 -0
- package/demo/audio/voices-manifest.json +50 -0
- package/demo/hustle-together/blog/gemini-vs-claude-widgets.html +30 -28
- package/demo/hustle-together/blog/interview-driven-api-development.html +37 -23
- package/demo/hustle-together/index.html +142 -109
- package/demo/workflow-demo.html +2618 -1036
- package/hooks/api-workflow-check.py +2 -0
- package/hooks/enforce-deep-research.py +180 -0
- package/hooks/enforce-disambiguation.py +149 -0
- package/hooks/enforce-documentation.py +187 -0
- package/hooks/enforce-environment.py +249 -0
- package/hooks/enforce-refactor.py +187 -0
- package/hooks/enforce-research.py +93 -46
- package/hooks/enforce-schema.py +186 -0
- package/hooks/enforce-scope.py +156 -0
- package/hooks/enforce-tdd-red.py +246 -0
- package/hooks/enforce-verify.py +186 -0
- package/hooks/periodic-reground.py +154 -0
- package/hooks/session-startup.py +151 -0
- package/hooks/track-tool-use.py +109 -17
- package/hooks/verify-after-green.py +282 -0
- package/package.json +3 -2
- package/scripts/collect-test-results.ts +404 -0
- package/scripts/extract-parameters.ts +483 -0
- package/scripts/generate-test-manifest.ts +520 -0
- package/templates/CLAUDE-SECTION.md +84 -0
- package/templates/api-dev-state.json +83 -8
- package/templates/api-test/page.tsx +315 -0
- package/templates/api-test/test-structure/route.ts +269 -0
- package/templates/research-index.json +6 -0
- package/templates/settings.json +59 -0
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Hook: PreToolUse for Write/Edit
|
|
4
|
+
Purpose: Block refactoring until verification complete WITH USER GAP DECISION
|
|
5
|
+
|
|
6
|
+
Phase 9 (Verify) requires:
|
|
7
|
+
1. Re-read original documentation (after tests pass)
|
|
8
|
+
2. Compare implementation to docs - find gaps
|
|
9
|
+
3. SHOW gap analysis to user
|
|
10
|
+
4. USE AskUserQuestion: "Fix gaps? [Y] / Skip? [n]"
|
|
11
|
+
5. Loop back to Phase 7 if user wants fixes
|
|
12
|
+
6. Only proceed to refactor when user decides
|
|
13
|
+
|
|
14
|
+
Returns:
|
|
15
|
+
- {"permissionDecision": "allow"} - Let the tool run
|
|
16
|
+
- {"permissionDecision": "deny", "reason": "..."} - Block with explanation
|
|
17
|
+
"""
|
|
18
|
+
import json
|
|
19
|
+
import sys
|
|
20
|
+
from pathlib import Path
|
|
21
|
+
|
|
22
|
+
STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def main():
|
|
26
|
+
try:
|
|
27
|
+
input_data = json.load(sys.stdin)
|
|
28
|
+
except json.JSONDecodeError:
|
|
29
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
30
|
+
sys.exit(0)
|
|
31
|
+
|
|
32
|
+
tool_input = input_data.get("tool_input", {})
|
|
33
|
+
file_path = tool_input.get("file_path", "")
|
|
34
|
+
|
|
35
|
+
# Only enforce for API route files
|
|
36
|
+
is_api_file = "/api/" in file_path and file_path.endswith(".ts")
|
|
37
|
+
|
|
38
|
+
if not is_api_file:
|
|
39
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
40
|
+
sys.exit(0)
|
|
41
|
+
|
|
42
|
+
# Skip test files
|
|
43
|
+
if ".test." in file_path or "/__tests__/" in file_path or ".spec." in file_path:
|
|
44
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
45
|
+
sys.exit(0)
|
|
46
|
+
|
|
47
|
+
# Skip documentation/config files
|
|
48
|
+
if file_path.endswith(".md") or file_path.endswith(".json"):
|
|
49
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
50
|
+
sys.exit(0)
|
|
51
|
+
|
|
52
|
+
if not STATE_FILE.exists():
|
|
53
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
54
|
+
sys.exit(0)
|
|
55
|
+
|
|
56
|
+
try:
|
|
57
|
+
state = json.loads(STATE_FILE.read_text())
|
|
58
|
+
except json.JSONDecodeError:
|
|
59
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
60
|
+
sys.exit(0)
|
|
61
|
+
|
|
62
|
+
endpoint = state.get("endpoint", "unknown")
|
|
63
|
+
phases = state.get("phases", {})
|
|
64
|
+
tdd_green = phases.get("tdd_green", {})
|
|
65
|
+
verify = phases.get("verify", {})
|
|
66
|
+
|
|
67
|
+
# Only enforce after TDD Green is complete
|
|
68
|
+
if tdd_green.get("status") != "complete":
|
|
69
|
+
# Let earlier hooks handle this
|
|
70
|
+
print(json.dumps({"permissionDecision": "allow"}))
|
|
71
|
+
sys.exit(0)
|
|
72
|
+
|
|
73
|
+
status = verify.get("status", "not_started")
|
|
74
|
+
|
|
75
|
+
if status != "complete":
|
|
76
|
+
user_question_asked = verify.get("user_question_asked", False)
|
|
77
|
+
user_decided = verify.get("user_decided", False)
|
|
78
|
+
gap_analysis_shown = verify.get("gap_analysis_shown", False)
|
|
79
|
+
re_research_done = verify.get("re_research_done", False)
|
|
80
|
+
gaps_found = verify.get("gaps_found", 0)
|
|
81
|
+
gaps_fixed = verify.get("gaps_fixed", 0)
|
|
82
|
+
gaps_skipped = verify.get("gaps_skipped", 0)
|
|
83
|
+
user_decision = verify.get("user_decision", None)
|
|
84
|
+
|
|
85
|
+
missing = []
|
|
86
|
+
if not re_research_done:
|
|
87
|
+
missing.append("Re-research original docs not done")
|
|
88
|
+
if not gap_analysis_shown:
|
|
89
|
+
missing.append("Gap analysis not shown to user")
|
|
90
|
+
if not user_question_asked:
|
|
91
|
+
missing.append("User gap decision question (AskUserQuestion not used)")
|
|
92
|
+
if not user_decided:
|
|
93
|
+
missing.append("User hasn't decided on gaps")
|
|
94
|
+
|
|
95
|
+
print(json.dumps({
|
|
96
|
+
"permissionDecision": "deny",
|
|
97
|
+
"reason": f"""❌ BLOCKED: Verification (Phase 9) not complete.
|
|
98
|
+
|
|
99
|
+
Status: {status}
|
|
100
|
+
Re-research done: {re_research_done}
|
|
101
|
+
Gap analysis shown: {gap_analysis_shown}
|
|
102
|
+
User question asked: {user_question_asked}
|
|
103
|
+
User decided: {user_decided}
|
|
104
|
+
User decision: {user_decision or "None yet"}
|
|
105
|
+
Gaps found: {gaps_found}
|
|
106
|
+
Gaps fixed: {gaps_fixed}
|
|
107
|
+
Gaps skipped: {gaps_skipped}
|
|
108
|
+
|
|
109
|
+
MISSING:
|
|
110
|
+
{chr(10).join(f" • {m}" for m in missing)}
|
|
111
|
+
|
|
112
|
+
═══════════════════════════════════════════════════════════
|
|
113
|
+
⚠️ GET USER DECISION ON IMPLEMENTATION GAPS
|
|
114
|
+
═══════════════════════════════════════════════════════════
|
|
115
|
+
|
|
116
|
+
REQUIRED STEPS:
|
|
117
|
+
|
|
118
|
+
1. Re-read the ORIGINAL API documentation:
|
|
119
|
+
• Use Context7 or WebSearch with SAME queries from Phase 2
|
|
120
|
+
• Compare EVERY documented feature to your implementation
|
|
121
|
+
• Don't rely on memory - actually re-read the docs
|
|
122
|
+
|
|
123
|
+
2. Create and SHOW gap analysis table:
|
|
124
|
+
┌───────────────────────────────────────────────────────┐
|
|
125
|
+
│ VERIFICATION RESULTS │
|
|
126
|
+
│ │
|
|
127
|
+
│ │ Feature │ In Docs │ Implemented │ Status │
|
|
128
|
+
│ ├─────────────────┼─────────┼─────────────┼──────────│
|
|
129
|
+
│ │ domain param │ Yes │ Yes │ ✓ Match │
|
|
130
|
+
│ │ format option │ Yes │ Yes │ ✓ Match │
|
|
131
|
+
│ │ include_fonts │ Yes │ No │ ❌ GAP │
|
|
132
|
+
│ │ webhook_url │ No │ Yes │ ⚠ Extra │
|
|
133
|
+
│ │
|
|
134
|
+
│ Found 1 gap in implementation. │
|
|
135
|
+
│ │
|
|
136
|
+
│ Fix the gap? [Y] - Loop back to add missing feature │
|
|
137
|
+
│ Skip? [n] - Document as intentional omission │
|
|
138
|
+
└───────────────────────────────────────────────────────┘
|
|
139
|
+
|
|
140
|
+
3. USE AskUserQuestion:
|
|
141
|
+
question: "I found {gaps_found} gap(s). How should I proceed?"
|
|
142
|
+
options: [
|
|
143
|
+
{{"value": "fix", "label": "Fix gaps - loop back to Red phase"}},
|
|
144
|
+
{{"value": "skip", "label": "Skip - these are intentional omissions"}},
|
|
145
|
+
{{"value": "partial", "label": "Fix some, skip others - [specify]"}}
|
|
146
|
+
]
|
|
147
|
+
|
|
148
|
+
4. If user says "fix":
|
|
149
|
+
• Loop back to Phase 7 (TDD Red)
|
|
150
|
+
• Write new tests for missing features
|
|
151
|
+
• Implement and verify again
|
|
152
|
+
• REPEAT until no gaps or user says skip
|
|
153
|
+
|
|
154
|
+
5. If user says "skip":
|
|
155
|
+
• Document each skipped gap with reason
|
|
156
|
+
• Set verify.gaps_skipped = count
|
|
157
|
+
• Proceed to refactor
|
|
158
|
+
|
|
159
|
+
6. After user decides:
|
|
160
|
+
• Set verify.user_decided = true
|
|
161
|
+
• Set verify.user_question_asked = true
|
|
162
|
+
• Set verify.gap_analysis_shown = true
|
|
163
|
+
• Set verify.re_research_done = true
|
|
164
|
+
• Set verify.user_decision = "fix" or "skip" or "partial"
|
|
165
|
+
• Set verify.status = "complete"
|
|
166
|
+
|
|
167
|
+
WHY: Catch memory-based implementation errors BEFORE refactoring."""
|
|
168
|
+
}))
|
|
169
|
+
sys.exit(0)
|
|
170
|
+
|
|
171
|
+
# Verify complete
|
|
172
|
+
user_decision = verify.get("user_decision", "unknown")
|
|
173
|
+
print(json.dumps({
|
|
174
|
+
"permissionDecision": "allow",
|
|
175
|
+
"message": f"""✅ Verification complete.
|
|
176
|
+
User decision: {user_decision}
|
|
177
|
+
Gaps found: {gaps_found}
|
|
178
|
+
Gaps fixed: {gaps_fixed}
|
|
179
|
+
Gaps skipped (intentional): {gaps_skipped}
|
|
180
|
+
Proceeding to refactor phase."""
|
|
181
|
+
}))
|
|
182
|
+
sys.exit(0)
|
|
183
|
+
|
|
184
|
+
|
|
185
|
+
if __name__ == "__main__":
|
|
186
|
+
main()
|
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Hook: PostToolUse (for periodic re-grounding)
|
|
4
|
+
Purpose: Inject context reminders every N turns to prevent context dilution
|
|
5
|
+
|
|
6
|
+
This hook tracks turn count and periodically injects a summary of:
|
|
7
|
+
- Current endpoint and phase
|
|
8
|
+
- Key decisions from interview
|
|
9
|
+
- Research cache status
|
|
10
|
+
- Important file locations
|
|
11
|
+
|
|
12
|
+
The goal is to keep Claude grounded during long sessions where
|
|
13
|
+
the original CLAUDE.md context may get diluted.
|
|
14
|
+
|
|
15
|
+
Configuration:
|
|
16
|
+
- REGROUND_INTERVAL: Number of turns between re-grounding (default: 7)
|
|
17
|
+
|
|
18
|
+
Returns:
|
|
19
|
+
- {"continue": true} with optional additionalContext on reground turns
|
|
20
|
+
"""
|
|
21
|
+
import json
|
|
22
|
+
import sys
|
|
23
|
+
import os
|
|
24
|
+
from datetime import datetime
|
|
25
|
+
from pathlib import Path
|
|
26
|
+
|
|
27
|
+
# Configuration
|
|
28
|
+
REGROUND_INTERVAL = 7 # Re-ground every N turns
|
|
29
|
+
|
|
30
|
+
# State file is in .claude/ directory (sibling to hooks/)
|
|
31
|
+
STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def main():
|
|
35
|
+
# Read hook input from stdin
|
|
36
|
+
try:
|
|
37
|
+
input_data = json.load(sys.stdin)
|
|
38
|
+
except json.JSONDecodeError:
|
|
39
|
+
print(json.dumps({"continue": True}))
|
|
40
|
+
sys.exit(0)
|
|
41
|
+
|
|
42
|
+
# Check if state file exists
|
|
43
|
+
if not STATE_FILE.exists():
|
|
44
|
+
print(json.dumps({"continue": True}))
|
|
45
|
+
sys.exit(0)
|
|
46
|
+
|
|
47
|
+
try:
|
|
48
|
+
state = json.loads(STATE_FILE.read_text())
|
|
49
|
+
except json.JSONDecodeError:
|
|
50
|
+
print(json.dumps({"continue": True}))
|
|
51
|
+
sys.exit(0)
|
|
52
|
+
|
|
53
|
+
# Increment turn count
|
|
54
|
+
turn_count = state.get("turn_count", 0) + 1
|
|
55
|
+
state["turn_count"] = turn_count
|
|
56
|
+
state["last_turn_timestamp"] = datetime.now().isoformat()
|
|
57
|
+
|
|
58
|
+
# Check if we should re-ground
|
|
59
|
+
should_reground = turn_count % REGROUND_INTERVAL == 0
|
|
60
|
+
|
|
61
|
+
if should_reground and state.get("endpoint"):
|
|
62
|
+
# Build re-grounding context
|
|
63
|
+
context_parts = []
|
|
64
|
+
context_parts.append(f"## Re-Grounding Reminder (Turn {turn_count})")
|
|
65
|
+
context_parts.append("")
|
|
66
|
+
|
|
67
|
+
endpoint = state.get("endpoint", "unknown")
|
|
68
|
+
context_parts.append(f"**Active Endpoint:** {endpoint}")
|
|
69
|
+
|
|
70
|
+
# Get current phase
|
|
71
|
+
phases = state.get("phases", {})
|
|
72
|
+
phase_order = [
|
|
73
|
+
"disambiguation", "scope", "research_initial", "interview",
|
|
74
|
+
"research_deep", "schema_creation", "environment_check",
|
|
75
|
+
"tdd_red", "tdd_green", "verify", "tdd_refactor", "documentation"
|
|
76
|
+
]
|
|
77
|
+
|
|
78
|
+
current_phase = None
|
|
79
|
+
completed_phases = []
|
|
80
|
+
for phase_name in phase_order:
|
|
81
|
+
phase = phases.get(phase_name, {})
|
|
82
|
+
status = phase.get("status", "not_started")
|
|
83
|
+
if status == "complete":
|
|
84
|
+
completed_phases.append(phase_name)
|
|
85
|
+
elif status == "in_progress" and not current_phase:
|
|
86
|
+
current_phase = phase_name
|
|
87
|
+
|
|
88
|
+
if not current_phase:
|
|
89
|
+
# Find first not_started phase
|
|
90
|
+
for phase_name in phase_order:
|
|
91
|
+
phase = phases.get(phase_name, {})
|
|
92
|
+
if phase.get("status", "not_started") == "not_started":
|
|
93
|
+
current_phase = phase_name
|
|
94
|
+
break
|
|
95
|
+
|
|
96
|
+
context_parts.append(f"**Current Phase:** {current_phase or 'documentation'}")
|
|
97
|
+
context_parts.append(f"**Completed:** {', '.join(completed_phases) if completed_phases else 'None'}")
|
|
98
|
+
|
|
99
|
+
# Key decisions summary
|
|
100
|
+
interview = phases.get("interview", {})
|
|
101
|
+
decisions = interview.get("decisions", {})
|
|
102
|
+
if decisions:
|
|
103
|
+
context_parts.append("")
|
|
104
|
+
context_parts.append("**Key Decisions:**")
|
|
105
|
+
for key, value in list(decisions.items())[:5]: # Limit to 5 key decisions
|
|
106
|
+
response = value.get("value", value.get("response", "N/A"))
|
|
107
|
+
if response:
|
|
108
|
+
context_parts.append(f" - {key}: {str(response)[:50]}")
|
|
109
|
+
|
|
110
|
+
# Research freshness warning
|
|
111
|
+
research_index = state.get("research_index", {})
|
|
112
|
+
if endpoint in research_index:
|
|
113
|
+
entry = research_index[endpoint]
|
|
114
|
+
days_old = entry.get("days_old", 0)
|
|
115
|
+
if days_old > 7:
|
|
116
|
+
context_parts.append("")
|
|
117
|
+
context_parts.append(f"**WARNING:** Research is {days_old} days old. Consider re-researching.")
|
|
118
|
+
|
|
119
|
+
# File reminders
|
|
120
|
+
context_parts.append("")
|
|
121
|
+
context_parts.append("**Key Files:** .claude/api-dev-state.json, .claude/research/")
|
|
122
|
+
|
|
123
|
+
# Add to reground history
|
|
124
|
+
reground_history = state.setdefault("reground_history", [])
|
|
125
|
+
reground_history.append({
|
|
126
|
+
"turn": turn_count,
|
|
127
|
+
"timestamp": datetime.now().isoformat(),
|
|
128
|
+
"phase": current_phase
|
|
129
|
+
})
|
|
130
|
+
# Keep only last 10 reground events
|
|
131
|
+
state["reground_history"] = reground_history[-10:]
|
|
132
|
+
|
|
133
|
+
# Save state
|
|
134
|
+
STATE_FILE.write_text(json.dumps(state, indent=2))
|
|
135
|
+
|
|
136
|
+
# Output with context injection
|
|
137
|
+
output = {
|
|
138
|
+
"continue": True,
|
|
139
|
+
"hookSpecificOutput": {
|
|
140
|
+
"hookEventName": "PostToolUse",
|
|
141
|
+
"additionalContext": "\n".join(context_parts)
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
print(json.dumps(output))
|
|
145
|
+
else:
|
|
146
|
+
# Just update turn count and continue
|
|
147
|
+
STATE_FILE.write_text(json.dumps(state, indent=2))
|
|
148
|
+
print(json.dumps({"continue": True}))
|
|
149
|
+
|
|
150
|
+
sys.exit(0)
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
if __name__ == "__main__":
|
|
154
|
+
main()
|
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Hook: SessionStart
|
|
4
|
+
Purpose: Inject current state and context at the beginning of each session
|
|
5
|
+
|
|
6
|
+
This hook runs when Claude Code starts a new session or resumes.
|
|
7
|
+
It reads the api-dev-state.json and injects a summary into Claude's context,
|
|
8
|
+
helping to re-ground Claude on:
|
|
9
|
+
- Current endpoint being developed
|
|
10
|
+
- Which phases are complete/in-progress
|
|
11
|
+
- Key decisions from interviews
|
|
12
|
+
- Research cache location and freshness
|
|
13
|
+
|
|
14
|
+
Returns:
|
|
15
|
+
- JSON with additionalContext to inject into Claude's context
|
|
16
|
+
"""
|
|
17
|
+
import json
|
|
18
|
+
import sys
|
|
19
|
+
import os
|
|
20
|
+
from datetime import datetime
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
|
|
23
|
+
# State file is in .claude/ directory (sibling to hooks/)
|
|
24
|
+
STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
25
|
+
RESEARCH_INDEX = Path(__file__).parent.parent / "research" / "index.json"
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def main():
|
|
29
|
+
# Read hook input from stdin
|
|
30
|
+
try:
|
|
31
|
+
input_data = json.load(sys.stdin)
|
|
32
|
+
except json.JSONDecodeError:
|
|
33
|
+
input_data = {}
|
|
34
|
+
|
|
35
|
+
cwd = input_data.get("cwd", os.getcwd())
|
|
36
|
+
|
|
37
|
+
# Check if state file exists
|
|
38
|
+
if not STATE_FILE.exists():
|
|
39
|
+
# No active workflow - just continue without injection
|
|
40
|
+
print(json.dumps({"continue": True}))
|
|
41
|
+
sys.exit(0)
|
|
42
|
+
|
|
43
|
+
try:
|
|
44
|
+
state = json.loads(STATE_FILE.read_text())
|
|
45
|
+
except json.JSONDecodeError:
|
|
46
|
+
print(json.dumps({"continue": True}))
|
|
47
|
+
sys.exit(0)
|
|
48
|
+
|
|
49
|
+
# Check if there's an active workflow
|
|
50
|
+
endpoint = state.get("endpoint")
|
|
51
|
+
if not endpoint:
|
|
52
|
+
# No active endpoint - just continue
|
|
53
|
+
print(json.dumps({"continue": True}))
|
|
54
|
+
sys.exit(0)
|
|
55
|
+
|
|
56
|
+
# Build context summary
|
|
57
|
+
context_parts = []
|
|
58
|
+
context_parts.append("## API Development Session Context")
|
|
59
|
+
context_parts.append("")
|
|
60
|
+
context_parts.append(f"**Active Endpoint:** {endpoint}")
|
|
61
|
+
|
|
62
|
+
# Get phase status
|
|
63
|
+
phases = state.get("phases", {})
|
|
64
|
+
completed = []
|
|
65
|
+
in_progress = []
|
|
66
|
+
not_started = []
|
|
67
|
+
|
|
68
|
+
phase_order = [
|
|
69
|
+
"disambiguation", "scope", "research_initial", "interview",
|
|
70
|
+
"research_deep", "schema_creation", "environment_check",
|
|
71
|
+
"tdd_red", "tdd_green", "verify", "tdd_refactor", "documentation"
|
|
72
|
+
]
|
|
73
|
+
|
|
74
|
+
for phase_name in phase_order:
|
|
75
|
+
phase = phases.get(phase_name, {})
|
|
76
|
+
status = phase.get("status", "not_started")
|
|
77
|
+
if status == "complete":
|
|
78
|
+
completed.append(phase_name)
|
|
79
|
+
elif status == "in_progress":
|
|
80
|
+
in_progress.append(phase_name)
|
|
81
|
+
else:
|
|
82
|
+
not_started.append(phase_name)
|
|
83
|
+
|
|
84
|
+
context_parts.append("")
|
|
85
|
+
context_parts.append("**Phase Status:**")
|
|
86
|
+
if completed:
|
|
87
|
+
context_parts.append(f" - Completed: {', '.join(completed)}")
|
|
88
|
+
if in_progress:
|
|
89
|
+
context_parts.append(f" - In Progress: {', '.join(in_progress)}")
|
|
90
|
+
|
|
91
|
+
# Current phase (first in_progress or first not_started)
|
|
92
|
+
current_phase = in_progress[0] if in_progress else (not_started[0] if not_started else "documentation")
|
|
93
|
+
context_parts.append(f" - Current: **{current_phase}**")
|
|
94
|
+
|
|
95
|
+
# Key decisions from interview
|
|
96
|
+
interview = phases.get("interview", {})
|
|
97
|
+
decisions = interview.get("decisions", {})
|
|
98
|
+
if decisions:
|
|
99
|
+
context_parts.append("")
|
|
100
|
+
context_parts.append("**Key Interview Decisions:**")
|
|
101
|
+
for key, value in decisions.items():
|
|
102
|
+
response = value.get("response", value.get("value", "N/A"))
|
|
103
|
+
if response:
|
|
104
|
+
context_parts.append(f" - {key}: {str(response)[:100]}")
|
|
105
|
+
|
|
106
|
+
# Research cache info
|
|
107
|
+
research_index = state.get("research_index", {})
|
|
108
|
+
if endpoint in research_index:
|
|
109
|
+
entry = research_index[endpoint]
|
|
110
|
+
days_old = entry.get("days_old", 0)
|
|
111
|
+
context_parts.append("")
|
|
112
|
+
context_parts.append("**Research Cache:**")
|
|
113
|
+
context_parts.append(f" - Location: .claude/research/{endpoint}/CURRENT.md")
|
|
114
|
+
context_parts.append(f" - Last Updated: {entry.get('last_updated', 'Unknown')}")
|
|
115
|
+
if days_old > 7:
|
|
116
|
+
context_parts.append(f" - WARNING: Research is {days_old} days old. Consider re-researching.")
|
|
117
|
+
|
|
118
|
+
# Turn count for re-grounding awareness
|
|
119
|
+
turn_count = state.get("turn_count", 0)
|
|
120
|
+
if turn_count > 0:
|
|
121
|
+
context_parts.append("")
|
|
122
|
+
context_parts.append(f"**Session Info:** Turn {turn_count} of previous session")
|
|
123
|
+
|
|
124
|
+
# Important file locations
|
|
125
|
+
context_parts.append("")
|
|
126
|
+
context_parts.append("**Key Files:**")
|
|
127
|
+
context_parts.append(" - State: .claude/api-dev-state.json")
|
|
128
|
+
context_parts.append(" - Research: .claude/research/")
|
|
129
|
+
context_parts.append(" - Manifest: src/app/api-test/api-tests-manifest.json (if exists)")
|
|
130
|
+
|
|
131
|
+
# Workflow reminder
|
|
132
|
+
context_parts.append("")
|
|
133
|
+
context_parts.append("**Workflow Reminder:** This project uses interview-driven API development.")
|
|
134
|
+
context_parts.append("Phases loop back if verification fails. Research before answering API questions.")
|
|
135
|
+
|
|
136
|
+
# Build the output
|
|
137
|
+
additional_context = "\n".join(context_parts)
|
|
138
|
+
|
|
139
|
+
output = {
|
|
140
|
+
"hookSpecificOutput": {
|
|
141
|
+
"hookEventName": "SessionStart",
|
|
142
|
+
"additionalContext": additional_context
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
print(json.dumps(output))
|
|
147
|
+
sys.exit(0)
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
if __name__ == "__main__":
|
|
151
|
+
main()
|
package/hooks/track-tool-use.py
CHANGED
|
@@ -1,13 +1,16 @@
|
|
|
1
1
|
#!/usr/bin/env python3
|
|
2
2
|
"""
|
|
3
|
-
Hook: PostToolUse for WebSearch, WebFetch, Context7 MCP
|
|
4
|
-
Purpose: Track all research activity in the state file
|
|
3
|
+
Hook: PostToolUse for WebSearch, WebFetch, Context7 MCP, AskUserQuestion
|
|
4
|
+
Purpose: Track all research activity and turn counts in the state file
|
|
5
5
|
|
|
6
6
|
This hook runs AFTER Claude uses research tools (WebSearch, WebFetch, Context7).
|
|
7
7
|
It logs each research action to api-dev-state.json for:
|
|
8
8
|
- Auditing what research was done
|
|
9
9
|
- Verifying prerequisites before allowing implementation
|
|
10
10
|
- Providing visibility to the user
|
|
11
|
+
- Tracking turn counts for periodic re-grounding
|
|
12
|
+
|
|
13
|
+
Version: 3.0.0
|
|
11
14
|
|
|
12
15
|
Returns:
|
|
13
16
|
- {"continue": true} - Always continues (logging only, no blocking)
|
|
@@ -20,6 +23,9 @@ from pathlib import Path
|
|
|
20
23
|
# State file is in .claude/ directory (sibling to hooks/)
|
|
21
24
|
STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
22
25
|
|
|
26
|
+
# Re-grounding interval (also used by periodic-reground.py)
|
|
27
|
+
REGROUND_INTERVAL = 7
|
|
28
|
+
|
|
23
29
|
|
|
24
30
|
def main():
|
|
25
31
|
# Read hook input from stdin
|
|
@@ -52,6 +58,14 @@ def main():
|
|
|
52
58
|
else:
|
|
53
59
|
state = create_initial_state()
|
|
54
60
|
|
|
61
|
+
# ========================================
|
|
62
|
+
# TURN COUNTING (for periodic re-grounding)
|
|
63
|
+
# ========================================
|
|
64
|
+
# Increment turn count on every tracked tool use
|
|
65
|
+
turn_count = state.get("turn_count", 0) + 1
|
|
66
|
+
state["turn_count"] = turn_count
|
|
67
|
+
state["last_turn_timestamp"] = datetime.now().isoformat()
|
|
68
|
+
|
|
55
69
|
# Get phases
|
|
56
70
|
phases = state.setdefault("phases", {})
|
|
57
71
|
|
|
@@ -281,30 +295,108 @@ def main():
|
|
|
281
295
|
|
|
282
296
|
|
|
283
297
|
def create_initial_state():
|
|
284
|
-
"""Create initial state structure"""
|
|
298
|
+
"""Create initial state structure (v3.0.0)"""
|
|
285
299
|
return {
|
|
286
|
-
"version": "
|
|
300
|
+
"version": "3.0.0",
|
|
287
301
|
"created_at": datetime.now().isoformat(),
|
|
302
|
+
"endpoint": None,
|
|
303
|
+
"library": None,
|
|
304
|
+
"session_id": None,
|
|
305
|
+
"turn_count": 0,
|
|
306
|
+
"last_turn_timestamp": None,
|
|
307
|
+
"research_queries": [],
|
|
308
|
+
"prompt_detections": [],
|
|
288
309
|
"phases": {
|
|
289
|
-
"
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
"
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
310
|
+
"disambiguation": {
|
|
311
|
+
"status": "not_started",
|
|
312
|
+
"clarified": None,
|
|
313
|
+
"search_variations": [],
|
|
314
|
+
"description": "Pre-research disambiguation to clarify ambiguous requests"
|
|
315
|
+
},
|
|
316
|
+
"scope": {
|
|
317
|
+
"status": "not_started",
|
|
318
|
+
"confirmed": False,
|
|
319
|
+
"description": "Initial scope understanding and confirmation"
|
|
320
|
+
},
|
|
321
|
+
"research_initial": {
|
|
322
|
+
"status": "not_started",
|
|
323
|
+
"sources": [],
|
|
324
|
+
"summary_approved": False,
|
|
325
|
+
"description": "Context7/WebSearch research for live documentation"
|
|
326
|
+
},
|
|
327
|
+
"interview": {
|
|
328
|
+
"status": "not_started",
|
|
329
|
+
"questions": [],
|
|
330
|
+
"user_question_count": 0,
|
|
331
|
+
"structured_question_count": 0,
|
|
332
|
+
"decisions": {},
|
|
333
|
+
"description": "Structured interview about requirements (generated FROM research)"
|
|
334
|
+
},
|
|
335
|
+
"research_deep": {
|
|
336
|
+
"status": "not_started",
|
|
337
|
+
"sources": [],
|
|
338
|
+
"proposed_searches": [],
|
|
339
|
+
"approved_searches": [],
|
|
340
|
+
"skipped_searches": [],
|
|
341
|
+
"description": "Deep dive based on interview answers (adaptive, not shotgun)"
|
|
342
|
+
},
|
|
343
|
+
"schema_creation": {
|
|
344
|
+
"status": "not_started",
|
|
345
|
+
"schema_file": None,
|
|
346
|
+
"schema_approved": False,
|
|
347
|
+
"description": "Zod schema creation from research"
|
|
348
|
+
},
|
|
349
|
+
"environment_check": {
|
|
350
|
+
"status": "not_started",
|
|
351
|
+
"keys_verified": [],
|
|
352
|
+
"keys_missing": [],
|
|
353
|
+
"confirmed": False,
|
|
354
|
+
"description": "API key and environment verification"
|
|
355
|
+
},
|
|
356
|
+
"tdd_red": {
|
|
357
|
+
"status": "not_started",
|
|
358
|
+
"test_file": None,
|
|
359
|
+
"test_count": 0,
|
|
360
|
+
"test_matrix_approved": False,
|
|
361
|
+
"description": "Write failing tests first"
|
|
362
|
+
},
|
|
363
|
+
"tdd_green": {
|
|
364
|
+
"status": "not_started",
|
|
365
|
+
"implementation_file": None,
|
|
366
|
+
"all_tests_passing": False,
|
|
367
|
+
"description": "Minimal implementation to pass tests"
|
|
368
|
+
},
|
|
369
|
+
"verify": {
|
|
370
|
+
"status": "not_started",
|
|
371
|
+
"gaps_found": 0,
|
|
372
|
+
"gaps_fixed": 0,
|
|
373
|
+
"intentional_omissions": [],
|
|
374
|
+
"re_research_done": False,
|
|
375
|
+
"description": "Re-research after Green to verify implementation matches docs"
|
|
376
|
+
},
|
|
377
|
+
"tdd_refactor": {
|
|
378
|
+
"status": "not_started",
|
|
379
|
+
"description": "Code cleanup while keeping tests green"
|
|
380
|
+
},
|
|
381
|
+
"documentation": {
|
|
382
|
+
"status": "not_started",
|
|
383
|
+
"files_updated": [],
|
|
384
|
+
"manifest_updated": False,
|
|
385
|
+
"openapi_updated": False,
|
|
386
|
+
"research_cached": False,
|
|
387
|
+
"description": "Update manifests, OpenAPI, cache research"
|
|
388
|
+
}
|
|
299
389
|
},
|
|
300
390
|
"verification": {
|
|
301
391
|
"all_sources_fetched": False,
|
|
302
392
|
"schema_matches_docs": False,
|
|
303
393
|
"tests_cover_params": False,
|
|
304
|
-
"all_tests_passing": False
|
|
394
|
+
"all_tests_passing": False,
|
|
395
|
+
"coverage_percent": None,
|
|
396
|
+
"post_green_verification": False
|
|
305
397
|
},
|
|
306
|
-
"
|
|
307
|
-
"
|
|
398
|
+
"research_index": {},
|
|
399
|
+
"reground_history": []
|
|
308
400
|
}
|
|
309
401
|
|
|
310
402
|
|