@hustle-together/api-dev-tools 3.12.16 → 4.5.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/adr-requests/.gitkeep +10 -0
- package/.claude/agents/adr-researcher.md +109 -0
- package/.claude/agents/visual-analyzer.md +183 -0
- package/.claude/api-dev-state.json +10 -0
- package/.claude/documentation-audit.json +114 -0
- package/.claude/registry.json +289 -0
- package/.claude/settings.json +45 -1
- package/.claude/settings.local.json +1 -7
- package/.claude/workflow-logs/None.json +49 -0
- package/.claude/workflow-logs/session-20251230-143727.json +106 -0
- package/.skills/adr-deep-research/SKILL.md +351 -0
- package/.skills/api-create/SKILL.md +34 -20
- package/.skills/api-research/SKILL.md +130 -0
- package/.skills/docs-update/SKILL.md +205 -0
- package/.skills/hustle-brand/SKILL.md +368 -0
- package/.skills/hustle-build/SKILL.md +365 -38
- package/.skills/parallel-spawn/SKILL.md +212 -0
- package/.skills/ralph-continue/SKILL.md +151 -0
- package/.skills/ralph-loop/SKILL.md +341 -0
- package/.skills/ralph-status/SKILL.md +87 -0
- package/.skills/refactor/SKILL.md +59 -0
- package/.skills/shadcn/SKILL.md +522 -0
- package/.skills/test-all/SKILL.md +210 -0
- package/.skills/test-builds/SKILL.md +208 -0
- package/.skills/test-debug/SKILL.md +212 -0
- package/.skills/test-e2e/SKILL.md +168 -0
- package/.skills/test-review/SKILL.md +707 -0
- package/.skills/test-unit/SKILL.md +143 -0
- package/.skills/test-visual/SKILL.md +301 -0
- package/.skills/token-report/SKILL.md +132 -0
- package/CHANGELOG.md +488 -0
- package/README.md +346 -53
- package/bin/cli.js +359 -123
- package/hooks/__pycache__/api-workflow-check.cpython-314.pyc +0 -0
- package/hooks/__pycache__/auto-answer.cpython-314.pyc +0 -0
- package/hooks/__pycache__/cache-research.cpython-314.pyc +0 -0
- package/hooks/__pycache__/check-api-routes.cpython-314.pyc +0 -0
- package/hooks/__pycache__/check-playwright-setup.cpython-314.pyc +0 -0
- package/hooks/__pycache__/check-storybook-setup.cpython-314.pyc +0 -0
- package/hooks/__pycache__/check-update.cpython-314.pyc +0 -0
- package/hooks/__pycache__/completion-promise-detector.cpython-314.pyc +0 -0
- package/hooks/__pycache__/context-capacity-warning.cpython-314.pyc +0 -0
- package/hooks/__pycache__/detect-interruption.cpython-314.pyc +0 -0
- package/hooks/__pycache__/docs-update-check.cpython-314.pyc +0 -0
- package/hooks/__pycache__/enforce-a11y-audit.cpython-314.pyc +0 -0
- package/hooks/__pycache__/enforce-brand-guide.cpython-314.pyc +0 -0
- package/hooks/__pycache__/enforce-component-type-confirm.cpython-314.pyc +0 -0
- package/hooks/__pycache__/enforce-deep-research.cpython-314.pyc +0 -0
- package/hooks/__pycache__/enforce-disambiguation.cpython-314.pyc +0 -0
- package/hooks/__pycache__/enforce-documentation.cpython-314.pyc +0 -0
- package/hooks/__pycache__/enforce-dry-run.cpython-314.pyc +0 -0
- package/hooks/__pycache__/enforce-environment.cpython-314.pyc +0 -0
- package/hooks/__pycache__/enforce-external-research.cpython-314.pyc +0 -0
- package/hooks/__pycache__/enforce-freshness.cpython-314.pyc +0 -0
- package/hooks/__pycache__/enforce-interview.cpython-314.pyc +0 -0
- package/hooks/__pycache__/enforce-page-components.cpython-314.pyc +0 -0
- package/hooks/__pycache__/enforce-page-data-schema.cpython-314.pyc +0 -0
- package/hooks/__pycache__/enforce-questions-sourced.cpython-314.pyc +0 -0
- package/hooks/__pycache__/enforce-refactor.cpython-314.pyc +0 -0
- package/hooks/__pycache__/enforce-research.cpython-314.pyc +0 -0
- package/hooks/__pycache__/enforce-schema-from-interview.cpython-314.pyc +0 -0
- package/hooks/__pycache__/enforce-schema.cpython-314.pyc +0 -0
- package/hooks/__pycache__/enforce-scope.cpython-314.pyc +0 -0
- package/hooks/__pycache__/enforce-tdd-red.cpython-314.pyc +0 -0
- package/hooks/__pycache__/enforce-ui-disambiguation.cpython-314.pyc +0 -0
- package/hooks/__pycache__/enforce-ui-interview.cpython-314.pyc +0 -0
- package/hooks/__pycache__/enforce-verify.cpython-314.pyc +0 -0
- package/hooks/__pycache__/generate-adr-options.cpython-314.pyc +0 -0
- package/hooks/__pycache__/generate-manifest-entry.cpython-314.pyc +0 -0
- package/hooks/__pycache__/hook_utils.cpython-314.pyc +0 -0
- package/hooks/__pycache__/notify-input-needed.cpython-314.pyc +0 -0
- package/hooks/__pycache__/notify-phase-complete.cpython-314.pyc +0 -0
- package/hooks/__pycache__/ntfy-on-question.cpython-314.pyc +0 -0
- package/hooks/__pycache__/orchestrator-completion.cpython-314.pyc +0 -0
- package/hooks/__pycache__/orchestrator-handoff.cpython-314.pyc +0 -0
- package/hooks/__pycache__/orchestrator-session-startup.cpython-314.pyc +0 -0
- package/hooks/__pycache__/parallel-orchestrator.cpython-314.pyc +0 -0
- package/hooks/__pycache__/periodic-reground.cpython-314.pyc +0 -0
- package/hooks/__pycache__/project-document-prompt.cpython-314.pyc +0 -0
- package/hooks/__pycache__/remote-question-proxy.cpython-314.pyc +0 -0
- package/hooks/__pycache__/remote-question-server.cpython-314.pyc +0 -0
- package/hooks/__pycache__/run-code-review.cpython-314.pyc +0 -0
- package/hooks/__pycache__/run-visual-qa.cpython-314.pyc +0 -0
- package/hooks/__pycache__/session-logger.cpython-314.pyc +0 -0
- package/hooks/__pycache__/session-startup.cpython-314.pyc +0 -0
- package/hooks/__pycache__/track-scope-coverage.cpython-314.pyc +0 -0
- package/hooks/__pycache__/track-token-usage.cpython-314.pyc +0 -0
- package/hooks/__pycache__/track-tool-use.cpython-314.pyc +0 -0
- package/hooks/__pycache__/update-adr-decision.cpython-314.pyc +0 -0
- package/hooks/__pycache__/update-api-showcase.cpython-314.pyc +0 -0
- package/hooks/__pycache__/update-registry.cpython-314.pyc +0 -0
- package/hooks/__pycache__/update-ui-showcase.cpython-314.pyc +0 -0
- package/hooks/__pycache__/verify-after-green.cpython-314.pyc +0 -0
- package/hooks/__pycache__/verify-implementation.cpython-314.pyc +0 -0
- package/hooks/api-workflow-check.py +34 -0
- package/hooks/auto-answer.py +97 -20
- package/{.claude/hooks → hooks}/completion-promise-detector.py +0 -0
- package/{.claude/hooks → hooks}/context-capacity-warning.py +0 -0
- package/{.claude/hooks → hooks}/docs-update-check.py +0 -0
- package/{.claude/hooks → hooks}/enforce-dry-run.py +0 -0
- package/hooks/enforce-external-research.py +25 -0
- package/hooks/enforce-interview.py +20 -0
- package/{.claude/hooks → hooks}/generate-adr-options.py +0 -0
- package/{.claude/hooks → hooks}/hook_utils.py +0 -0
- package/hooks/ntfy-on-question.py +15 -2
- package/hooks/orchestrator-handoff.py +81 -3
- package/{.claude/hooks → hooks}/parallel-orchestrator.py +0 -0
- package/hooks/periodic-reground.py +40 -0
- package/{.claude/hooks → hooks}/remote-question-server.py +0 -0
- package/hooks/run-code-review.py +176 -29
- package/{.claude/hooks → hooks}/run-visual-qa.py +0 -0
- package/hooks/session-logger.py +27 -1
- package/hooks/session-startup.py +113 -0
- package/{.claude/hooks → hooks}/update-adr-decision.py +0 -0
- package/package.json +1 -1
- package/templates/.skills/hustle-interview/SKILL.md +174 -0
- package/templates/adr-viewer/_components/ADRViewer.tsx +326 -0
- package/templates/api-dev-state.json +33 -1
- package/templates/brand-page/page.tsx +645 -0
- package/templates/component/Component.visual.spec.ts +30 -24
- package/templates/eslint-plugin-zod-schema/index.js +446 -0
- package/templates/eslint-plugin-zod-schema/package.json +26 -0
- package/templates/github-workflows/security.yml +274 -0
- package/templates/hustle-build-defaults.json +53 -1
- package/templates/page/page.e2e.test.ts +30 -26
- package/templates/performance-budgets.json +63 -5
- package/templates/registry.json +279 -3
- package/templates/review-dashboard/page.tsx +510 -0
- package/templates/settings.json +74 -7
- package/templates/ui-showcase/_components/UIShowcase.tsx +47 -0
- package/templates/ui-showcase/_components/VisualTestingDashboard.tsx +579 -0
- package/.claude/commands/hustle-combine.md +0 -1089
- package/.claude/commands/hustle-ui-create-page.md +0 -1078
- package/.claude/commands/hustle-ui-create.md +0 -1058
- package/.claude/hooks/auto-answer.py +0 -305
- package/.claude/hooks/cache-research.py +0 -337
- package/.claude/hooks/check-api-routes.py +0 -168
- package/.claude/hooks/check-playwright-setup.py +0 -103
- package/.claude/hooks/check-storybook-setup.py +0 -81
- package/.claude/hooks/check-update.py +0 -132
- package/.claude/hooks/detect-interruption.py +0 -165
- package/.claude/hooks/enforce-a11y-audit.py +0 -202
- package/.claude/hooks/enforce-brand-guide.py +0 -241
- package/.claude/hooks/enforce-component-type-confirm.py +0 -97
- package/.claude/hooks/enforce-freshness.py +0 -184
- package/.claude/hooks/enforce-page-components.py +0 -186
- package/.claude/hooks/enforce-page-data-schema.py +0 -155
- package/.claude/hooks/enforce-questions-sourced.py +0 -146
- package/.claude/hooks/enforce-schema-from-interview.py +0 -248
- package/.claude/hooks/enforce-ui-disambiguation.py +0 -108
- package/.claude/hooks/enforce-ui-interview.py +0 -130
- package/.claude/hooks/generate-manifest-entry.py +0 -1161
- package/.claude/hooks/lib/__init__.py +0 -1
- package/.claude/hooks/lib/greptile.py +0 -355
- package/.claude/hooks/lib/ntfy.py +0 -209
- package/.claude/hooks/notify-input-needed.py +0 -73
- package/.claude/hooks/notify-phase-complete.py +0 -90
- package/.claude/hooks/ntfy-on-question.py +0 -240
- package/.claude/hooks/orchestrator-completion.py +0 -313
- package/.claude/hooks/orchestrator-handoff.py +0 -267
- package/.claude/hooks/orchestrator-session-startup.py +0 -146
- package/.claude/hooks/run-code-review.py +0 -393
- package/.claude/hooks/session-logger.py +0 -323
- package/.claude/hooks/test-orchestrator-reground.py +0 -248
- package/.claude/hooks/track-scope-coverage.py +0 -220
- package/.claude/hooks/track-token-usage.py +0 -121
- package/.claude/hooks/update-api-showcase.py +0 -161
- package/.claude/hooks/update-registry.py +0 -352
- package/.claude/hooks/update-ui-showcase.py +0 -224
- package/.claude/test-auto-answer-bot.py +0 -183
- package/.claude/test-completion-detector.py +0 -263
- package/.claude/test-orchestrator-state.json +0 -20
- package/.claude/test-orchestrator.sh +0 -271
- /package/{.claude/commands → commands}/hustle-build.md +0 -0
- /package/{.claude/hooks → hooks}/lib/__pycache__/__init__.cpython-314.pyc +0 -0
- /package/{.claude/hooks → hooks}/lib/__pycache__/greptile.cpython-314.pyc +0 -0
- /package/{.claude/hooks → hooks}/lib/__pycache__/ntfy.cpython-314.pyc +0 -0
- /package/{.claude/hooks → hooks}/project-document-prompt.py +0 -0
- /package/{.claude/hooks → hooks}/remote-question-proxy.py +0 -0
- /package/{.claude/hooks → hooks}/update-testing-checklist.py +0 -0
|
@@ -1,305 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
"""
|
|
3
|
-
Auto-answer hook for --auto mode.
|
|
4
|
-
|
|
5
|
-
This hook intercepts AskUserQuestion calls when running in auto-mode
|
|
6
|
-
and either:
|
|
7
|
-
1. Uses pre-configured defaults from hustle-build-defaults.json
|
|
8
|
-
2. Spawns a Haiku sub-agent to pick the most comprehensive option
|
|
9
|
-
|
|
10
|
-
Hook Type: PreToolUse (matcher: AskUserQuestion)
|
|
11
|
-
|
|
12
|
-
Updated in v4.5.0:
|
|
13
|
-
- Use shared hook_utils for logging
|
|
14
|
-
- Log all auto-answered questions to workflow logs
|
|
15
|
-
"""
|
|
16
|
-
|
|
17
|
-
import json
|
|
18
|
-
import os
|
|
19
|
-
import sys
|
|
20
|
-
from pathlib import Path
|
|
21
|
-
|
|
22
|
-
# Import shared utilities
|
|
23
|
-
try:
|
|
24
|
-
from hook_utils import log_workflow_event
|
|
25
|
-
UTILS_AVAILABLE = True
|
|
26
|
-
except ImportError:
|
|
27
|
-
UTILS_AVAILABLE = False
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
def load_state():
|
|
31
|
-
"""Load workflow state to check if in auto mode"""
|
|
32
|
-
project_dir = os.environ.get("CLAUDE_PROJECT_DIR", ".")
|
|
33
|
-
|
|
34
|
-
# Check hustle-build state first
|
|
35
|
-
build_state = Path(project_dir) / ".claude" / "hustle-build-state.json"
|
|
36
|
-
if build_state.exists():
|
|
37
|
-
try:
|
|
38
|
-
state = json.loads(build_state.read_text())
|
|
39
|
-
if state.get("mode") == "auto":
|
|
40
|
-
return state, "build"
|
|
41
|
-
except Exception:
|
|
42
|
-
pass
|
|
43
|
-
|
|
44
|
-
# Check api-dev state
|
|
45
|
-
api_state = Path(project_dir) / ".claude" / "api-dev-state.json"
|
|
46
|
-
if api_state.exists():
|
|
47
|
-
try:
|
|
48
|
-
state = json.loads(api_state.read_text())
|
|
49
|
-
if state.get("mode") == "auto":
|
|
50
|
-
return state, "workflow"
|
|
51
|
-
except Exception:
|
|
52
|
-
pass
|
|
53
|
-
|
|
54
|
-
return None, None
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
def load_defaults():
|
|
58
|
-
"""Load pre-configured default answers"""
|
|
59
|
-
project_dir = os.environ.get("CLAUDE_PROJECT_DIR", ".")
|
|
60
|
-
|
|
61
|
-
# Check project-specific defaults first
|
|
62
|
-
defaults_file = Path(project_dir) / ".claude" / "hustle-build-defaults.json"
|
|
63
|
-
if defaults_file.exists():
|
|
64
|
-
try:
|
|
65
|
-
return json.loads(defaults_file.read_text())
|
|
66
|
-
except Exception:
|
|
67
|
-
pass
|
|
68
|
-
|
|
69
|
-
# Fall back to template defaults
|
|
70
|
-
template_defaults = Path(project_dir) / "templates" / "hustle-build-defaults.json"
|
|
71
|
-
if template_defaults.exists():
|
|
72
|
-
try:
|
|
73
|
-
return json.loads(template_defaults.read_text())
|
|
74
|
-
except Exception:
|
|
75
|
-
pass
|
|
76
|
-
|
|
77
|
-
return {}
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
def is_autonomous_enabled():
|
|
81
|
-
"""Check if autonomous mode is enabled by default in settings"""
|
|
82
|
-
defaults = load_defaults()
|
|
83
|
-
autonomous = defaults.get("autonomous", {})
|
|
84
|
-
return autonomous.get("enabled", False) and autonomous.get("skip_interviews", False)
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
def find_comprehensive_option(options):
|
|
88
|
-
"""
|
|
89
|
-
Find the most comprehensive option based on keywords.
|
|
90
|
-
|
|
91
|
-
Comprehensive options typically include words like:
|
|
92
|
-
- "all", "full", "complete", "comprehensive"
|
|
93
|
-
- Higher numbers (e.g., "100%" vs "50%")
|
|
94
|
-
- More features listed
|
|
95
|
-
|
|
96
|
-
Also prioritizes affirmative options for phase exits:
|
|
97
|
-
- "yes", "proceed", "continue", "approve", "confirm"
|
|
98
|
-
"""
|
|
99
|
-
if not options:
|
|
100
|
-
return None
|
|
101
|
-
|
|
102
|
-
comprehensive_keywords = [
|
|
103
|
-
"all", "full", "complete", "comprehensive", "everything",
|
|
104
|
-
"maximum", "extensive", "detailed", "thorough", "wcag-aa"
|
|
105
|
-
]
|
|
106
|
-
|
|
107
|
-
# Affirmative keywords for phase exit questions
|
|
108
|
-
affirmative_keywords = [
|
|
109
|
-
"yes", "proceed", "continue", "approve", "confirm",
|
|
110
|
-
"accept", "ready", "go ahead", "move forward",
|
|
111
|
-
"auto", "defaults", "use auto", "use defaults"
|
|
112
|
-
]
|
|
113
|
-
|
|
114
|
-
# Negative keywords to avoid
|
|
115
|
-
negative_keywords = [
|
|
116
|
-
"no", "skip", "cancel", "stop", "more research", "not ready"
|
|
117
|
-
]
|
|
118
|
-
|
|
119
|
-
# Score each option
|
|
120
|
-
scored = []
|
|
121
|
-
for i, opt in enumerate(options):
|
|
122
|
-
label = opt.get("label", "").lower()
|
|
123
|
-
description = opt.get("description", "").lower()
|
|
124
|
-
text = f"{label} {description}"
|
|
125
|
-
|
|
126
|
-
score = 0
|
|
127
|
-
|
|
128
|
-
# Check for negative keywords first (penalize heavily)
|
|
129
|
-
for keyword in negative_keywords:
|
|
130
|
-
if keyword in text:
|
|
131
|
-
score -= 50
|
|
132
|
-
|
|
133
|
-
# Check for comprehensive keywords
|
|
134
|
-
for keyword in comprehensive_keywords:
|
|
135
|
-
if keyword in text:
|
|
136
|
-
score += 10
|
|
137
|
-
|
|
138
|
-
# Check for affirmative keywords (high priority for phase exits)
|
|
139
|
-
for keyword in affirmative_keywords:
|
|
140
|
-
if keyword in text:
|
|
141
|
-
score += 25
|
|
142
|
-
|
|
143
|
-
# Check for "(Recommended)" suffix
|
|
144
|
-
if "recommended" in label.lower():
|
|
145
|
-
score += 20
|
|
146
|
-
|
|
147
|
-
# Prefer options with more content (longer descriptions = more features)
|
|
148
|
-
score += len(description) / 50
|
|
149
|
-
|
|
150
|
-
scored.append((i, score, opt))
|
|
151
|
-
|
|
152
|
-
# Sort by score descending
|
|
153
|
-
scored.sort(key=lambda x: x[1], reverse=True)
|
|
154
|
-
|
|
155
|
-
# Return the index of the best option (0-based)
|
|
156
|
-
if scored:
|
|
157
|
-
return scored[0][0]
|
|
158
|
-
|
|
159
|
-
return 0 # Default to first option
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
def get_question_key(questions):
|
|
163
|
-
"""Extract a key from the question for lookup in defaults"""
|
|
164
|
-
if not questions or len(questions) == 0:
|
|
165
|
-
return None
|
|
166
|
-
|
|
167
|
-
q = questions[0]
|
|
168
|
-
header = q.get("header", "").lower().replace(" ", "_")
|
|
169
|
-
return header
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
def main():
|
|
173
|
-
# Get tool input from environment
|
|
174
|
-
tool_input = os.environ.get("CLAUDE_TOOL_INPUT", "{}")
|
|
175
|
-
|
|
176
|
-
try:
|
|
177
|
-
input_data = json.loads(tool_input)
|
|
178
|
-
except Exception:
|
|
179
|
-
print(json.dumps({"continue": True}))
|
|
180
|
-
return
|
|
181
|
-
|
|
182
|
-
# Check if in auto mode (explicit flag OR defaults enabled)
|
|
183
|
-
state, state_type = load_state()
|
|
184
|
-
autonomous_by_default = is_autonomous_enabled()
|
|
185
|
-
|
|
186
|
-
if not state and not autonomous_by_default:
|
|
187
|
-
# Not in auto mode and autonomous not enabled, continue normally
|
|
188
|
-
print(json.dumps({"continue": True}))
|
|
189
|
-
return
|
|
190
|
-
|
|
191
|
-
# If no state but autonomous is enabled, create a minimal state
|
|
192
|
-
if not state and autonomous_by_default:
|
|
193
|
-
state = {"mode": "auto", "source": "defaults"}
|
|
194
|
-
|
|
195
|
-
# Load defaults
|
|
196
|
-
defaults = load_defaults()
|
|
197
|
-
|
|
198
|
-
questions = input_data.get("questions", [])
|
|
199
|
-
if not questions:
|
|
200
|
-
print(json.dumps({"continue": True}))
|
|
201
|
-
return
|
|
202
|
-
|
|
203
|
-
# Try to find pre-configured answer
|
|
204
|
-
question_key = get_question_key(questions)
|
|
205
|
-
answers = {}
|
|
206
|
-
|
|
207
|
-
for q in questions:
|
|
208
|
-
header = q.get("header", "")
|
|
209
|
-
options = q.get("options", [])
|
|
210
|
-
question_text = q.get("question", "")
|
|
211
|
-
|
|
212
|
-
# Check defaults first
|
|
213
|
-
default_answer = None
|
|
214
|
-
if question_key and question_key in defaults:
|
|
215
|
-
default_answer = defaults[question_key]
|
|
216
|
-
elif header.lower().replace(" ", "_") in defaults:
|
|
217
|
-
default_answer = defaults[header.lower().replace(" ", "_")]
|
|
218
|
-
|
|
219
|
-
if default_answer is not None:
|
|
220
|
-
# Use pre-configured default
|
|
221
|
-
answers[question_text] = default_answer
|
|
222
|
-
else:
|
|
223
|
-
# Auto-select comprehensive option
|
|
224
|
-
best_idx = find_comprehensive_option(options)
|
|
225
|
-
if best_idx is not None and options:
|
|
226
|
-
answers[question_text] = options[best_idx].get("label", "")
|
|
227
|
-
|
|
228
|
-
if answers:
|
|
229
|
-
# Log the auto-answer
|
|
230
|
-
log_auto_answer(state, questions, answers)
|
|
231
|
-
|
|
232
|
-
# Get the first question and answer for display
|
|
233
|
-
first_question = questions[0] if questions else {}
|
|
234
|
-
header = first_question.get("header", "Question")
|
|
235
|
-
question_text = first_question.get("question", "")
|
|
236
|
-
answer = list(answers.values())[0] if answers else "Unknown"
|
|
237
|
-
|
|
238
|
-
# BLOCK the tool and provide the answer in the reason
|
|
239
|
-
# This prevents the question UI from showing and tells the AI to use this answer
|
|
240
|
-
result = {
|
|
241
|
-
"continue": False,
|
|
242
|
-
"reason": f"""## 🤖 Auto-Selected
|
|
243
|
-
|
|
244
|
-
**{header}:** {answer}
|
|
245
|
-
|
|
246
|
-
_Question: {question_text}_
|
|
247
|
-
|
|
248
|
-
---
|
|
249
|
-
|
|
250
|
-
Autonomous mode is active. The workflow will proceed with this answer.
|
|
251
|
-
|
|
252
|
-
To review auto-selected answers: `.claude/workflow-logs/`
|
|
253
|
-
To disable: Set `autonomous.enabled: false` in `.claude/hustle-build-defaults.json`
|
|
254
|
-
"""
|
|
255
|
-
}
|
|
256
|
-
print(json.dumps(result))
|
|
257
|
-
else:
|
|
258
|
-
print(json.dumps({"continue": True}))
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
def log_auto_answer(state, questions, answers):
|
|
262
|
-
"""Log auto-answered questions to workflow log using shared utility (v4.5.0)"""
|
|
263
|
-
# Use shared utility if available
|
|
264
|
-
if UTILS_AVAILABLE:
|
|
265
|
-
try:
|
|
266
|
-
log_workflow_event("auto_answer", {
|
|
267
|
-
"questions": [q.get("question") for q in questions],
|
|
268
|
-
"headers": [q.get("header") for q in questions],
|
|
269
|
-
"answers": answers,
|
|
270
|
-
"reason": "auto-comprehensive",
|
|
271
|
-
"mode": state.get("mode", "auto") if state else "auto"
|
|
272
|
-
})
|
|
273
|
-
return
|
|
274
|
-
except Exception:
|
|
275
|
-
pass
|
|
276
|
-
|
|
277
|
-
# Fallback to legacy logging
|
|
278
|
-
project_dir = os.environ.get("CLAUDE_PROJECT_DIR", ".")
|
|
279
|
-
logs_dir = Path(project_dir) / ".claude" / "workflow-logs"
|
|
280
|
-
logs_dir.mkdir(parents=True, exist_ok=True)
|
|
281
|
-
|
|
282
|
-
build_id = state.get("build_id", state.get("workflow_id", "unknown")) if state else "unknown"
|
|
283
|
-
log_file = logs_dir / f"{build_id}.json"
|
|
284
|
-
|
|
285
|
-
try:
|
|
286
|
-
if log_file.exists():
|
|
287
|
-
log = json.loads(log_file.read_text())
|
|
288
|
-
else:
|
|
289
|
-
log = {"auto_answers": [], "events": []}
|
|
290
|
-
|
|
291
|
-
from datetime import datetime
|
|
292
|
-
log["auto_answers"].append({
|
|
293
|
-
"timestamp": datetime.now().isoformat(),
|
|
294
|
-
"questions": [q.get("question") for q in questions],
|
|
295
|
-
"answers": answers,
|
|
296
|
-
"reason": "auto-comprehensive"
|
|
297
|
-
})
|
|
298
|
-
|
|
299
|
-
log_file.write_text(json.dumps(log, indent=2))
|
|
300
|
-
except Exception:
|
|
301
|
-
pass
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
if __name__ == "__main__":
|
|
305
|
-
main()
|
|
@@ -1,337 +0,0 @@
|
|
|
1
|
-
#!/usr/bin/env python3
|
|
2
|
-
"""
|
|
3
|
-
Hook: PostToolUse for Write/Edit
|
|
4
|
-
Purpose: Create research cache files from state when documentation phase starts
|
|
5
|
-
|
|
6
|
-
This hook creates the following files that enforce-documentation.py expects:
|
|
7
|
-
- .claude/research/{endpoint}/sources.json - Research sources with URLs
|
|
8
|
-
- .claude/research/{endpoint}/interview.json - Interview decisions
|
|
9
|
-
- .claude/research/{endpoint}/schema.json - Schema snapshot
|
|
10
|
-
- .claude/research/{endpoint}/CURRENT.md - Aggregated research (if not exists)
|
|
11
|
-
- .claude/research/index.json - Updates the freshness index
|
|
12
|
-
|
|
13
|
-
Added in v3.6.7 to fix critical gap where these files were expected but never created.
|
|
14
|
-
|
|
15
|
-
Returns:
|
|
16
|
-
- JSON with cacheCreated info
|
|
17
|
-
"""
|
|
18
|
-
import json
|
|
19
|
-
import sys
|
|
20
|
-
import os
|
|
21
|
-
from datetime import datetime
|
|
22
|
-
from pathlib import Path
|
|
23
|
-
|
|
24
|
-
STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
25
|
-
RESEARCH_DIR = Path(__file__).parent.parent / "research"
|
|
26
|
-
RESEARCH_INDEX = RESEARCH_DIR / "index.json"
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
def get_active_endpoint(state):
|
|
30
|
-
"""Get active endpoint - supports both old and new state formats."""
|
|
31
|
-
# New format (v3.6.7+): endpoints object with active_endpoint pointer
|
|
32
|
-
if "endpoints" in state and "active_endpoint" in state:
|
|
33
|
-
active = state.get("active_endpoint")
|
|
34
|
-
if active and active in state["endpoints"]:
|
|
35
|
-
return active, state["endpoints"][active]
|
|
36
|
-
return None, None
|
|
37
|
-
|
|
38
|
-
# Old format: single endpoint field
|
|
39
|
-
endpoint = state.get("endpoint")
|
|
40
|
-
if endpoint:
|
|
41
|
-
return endpoint, state
|
|
42
|
-
|
|
43
|
-
return None, None
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
def create_sources_json(endpoint_dir, state, endpoint_data):
|
|
47
|
-
"""Create sources.json from research queries in state."""
|
|
48
|
-
sources_file = endpoint_dir / "sources.json"
|
|
49
|
-
|
|
50
|
-
# Collect sources from various places in state
|
|
51
|
-
sources = []
|
|
52
|
-
|
|
53
|
-
# From research_queries array
|
|
54
|
-
for query in state.get("research_queries", []):
|
|
55
|
-
source = {
|
|
56
|
-
"query": query.get("query", ""),
|
|
57
|
-
"tool": query.get("tool", "unknown"),
|
|
58
|
-
"timestamp": query.get("timestamp", ""),
|
|
59
|
-
"url": query.get("url", ""),
|
|
60
|
-
"summary": query.get("summary", "")
|
|
61
|
-
}
|
|
62
|
-
sources.append(source)
|
|
63
|
-
|
|
64
|
-
# From initial research phase
|
|
65
|
-
initial_research = endpoint_data.get("phases", {}).get("research_initial", {})
|
|
66
|
-
for src in initial_research.get("sources", []):
|
|
67
|
-
if isinstance(src, dict):
|
|
68
|
-
sources.append(src)
|
|
69
|
-
elif isinstance(src, str):
|
|
70
|
-
sources.append({"url": src, "summary": ""})
|
|
71
|
-
|
|
72
|
-
# From deep research phase
|
|
73
|
-
deep_research = endpoint_data.get("phases", {}).get("research_deep", {})
|
|
74
|
-
for src in deep_research.get("sources", []):
|
|
75
|
-
if isinstance(src, dict):
|
|
76
|
-
sources.append(src)
|
|
77
|
-
elif isinstance(src, str):
|
|
78
|
-
sources.append({"url": src, "summary": ""})
|
|
79
|
-
|
|
80
|
-
# Deduplicate by URL
|
|
81
|
-
seen_urls = set()
|
|
82
|
-
unique_sources = []
|
|
83
|
-
for src in sources:
|
|
84
|
-
url = src.get("url", src.get("query", ""))
|
|
85
|
-
if url and url not in seen_urls:
|
|
86
|
-
seen_urls.add(url)
|
|
87
|
-
unique_sources.append(src)
|
|
88
|
-
|
|
89
|
-
data = {
|
|
90
|
-
"created_at": datetime.now().isoformat(),
|
|
91
|
-
"updated_at": datetime.now().isoformat(),
|
|
92
|
-
"endpoint": endpoint_data.get("endpoint", state.get("endpoint", "")),
|
|
93
|
-
"source_count": len(unique_sources),
|
|
94
|
-
"sources": unique_sources
|
|
95
|
-
}
|
|
96
|
-
|
|
97
|
-
sources_file.write_text(json.dumps(data, indent=2))
|
|
98
|
-
return True
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
def create_interview_json(endpoint_dir, endpoint_data):
|
|
102
|
-
"""Create interview.json from interview decisions in state."""
|
|
103
|
-
interview_file = endpoint_dir / "interview.json"
|
|
104
|
-
|
|
105
|
-
interview = endpoint_data.get("phases", {}).get("interview", {})
|
|
106
|
-
decisions = interview.get("decisions", {})
|
|
107
|
-
questions = interview.get("questions", [])
|
|
108
|
-
|
|
109
|
-
data = {
|
|
110
|
-
"created_at": datetime.now().isoformat(),
|
|
111
|
-
"updated_at": datetime.now().isoformat(),
|
|
112
|
-
"question_count": len(questions),
|
|
113
|
-
"decision_count": len(decisions),
|
|
114
|
-
"questions": questions,
|
|
115
|
-
"decisions": decisions
|
|
116
|
-
}
|
|
117
|
-
|
|
118
|
-
interview_file.write_text(json.dumps(data, indent=2))
|
|
119
|
-
return True
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
def create_schema_json(endpoint_dir, endpoint_data, state):
|
|
123
|
-
"""Create schema.json from schema creation phase in state."""
|
|
124
|
-
schema_json_file = endpoint_dir / "schema.json"
|
|
125
|
-
|
|
126
|
-
schema_phase = endpoint_data.get("phases", {}).get("schema_creation", {})
|
|
127
|
-
schema_file = schema_phase.get("schema_file", schema_phase.get("file", ""))
|
|
128
|
-
fields_count = schema_phase.get("fields_count", 0)
|
|
129
|
-
|
|
130
|
-
# Try to read actual schema file if it exists
|
|
131
|
-
schema_content = None
|
|
132
|
-
if schema_file:
|
|
133
|
-
schema_path = Path(schema_file)
|
|
134
|
-
if schema_path.exists():
|
|
135
|
-
try:
|
|
136
|
-
schema_content = schema_path.read_text()
|
|
137
|
-
except IOError:
|
|
138
|
-
pass
|
|
139
|
-
|
|
140
|
-
data = {
|
|
141
|
-
"created_at": datetime.now().isoformat(),
|
|
142
|
-
"updated_at": datetime.now().isoformat(),
|
|
143
|
-
"schema_file": schema_file,
|
|
144
|
-
"fields_count": fields_count,
|
|
145
|
-
"schema_content": schema_content
|
|
146
|
-
}
|
|
147
|
-
|
|
148
|
-
schema_json_file.write_text(json.dumps(data, indent=2))
|
|
149
|
-
return True
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
def create_current_md(endpoint_dir, endpoint, endpoint_data, state):
|
|
153
|
-
"""Create CURRENT.md if it doesn't exist."""
|
|
154
|
-
current_md = endpoint_dir / "CURRENT.md"
|
|
155
|
-
|
|
156
|
-
# Only create if doesn't exist (don't overwrite manual research)
|
|
157
|
-
if current_md.exists():
|
|
158
|
-
return False
|
|
159
|
-
|
|
160
|
-
# Build aggregated research content
|
|
161
|
-
lines = [
|
|
162
|
-
f"# Research: {endpoint}",
|
|
163
|
-
"",
|
|
164
|
-
f"*Generated: {datetime.now().isoformat()}*",
|
|
165
|
-
"",
|
|
166
|
-
"## Sources",
|
|
167
|
-
""
|
|
168
|
-
]
|
|
169
|
-
|
|
170
|
-
# Add sources
|
|
171
|
-
sources_file = endpoint_dir / "sources.json"
|
|
172
|
-
if sources_file.exists():
|
|
173
|
-
try:
|
|
174
|
-
sources = json.loads(sources_file.read_text())
|
|
175
|
-
for src in sources.get("sources", []):
|
|
176
|
-
url = src.get("url", "")
|
|
177
|
-
summary = src.get("summary", "")
|
|
178
|
-
if url:
|
|
179
|
-
lines.append(f"- {url}")
|
|
180
|
-
if summary:
|
|
181
|
-
lines.append(f" - {summary}")
|
|
182
|
-
except (json.JSONDecodeError, IOError):
|
|
183
|
-
pass
|
|
184
|
-
|
|
185
|
-
lines.extend(["", "## Interview Decisions", ""])
|
|
186
|
-
|
|
187
|
-
# Add interview decisions
|
|
188
|
-
interview_file = endpoint_dir / "interview.json"
|
|
189
|
-
if interview_file.exists():
|
|
190
|
-
try:
|
|
191
|
-
interview = json.loads(interview_file.read_text())
|
|
192
|
-
for key, value in interview.get("decisions", {}).items():
|
|
193
|
-
response = value.get("response", value.get("value", "N/A"))
|
|
194
|
-
lines.append(f"- **{key}**: {response}")
|
|
195
|
-
except (json.JSONDecodeError, IOError):
|
|
196
|
-
pass
|
|
197
|
-
|
|
198
|
-
lines.extend(["", "## Schema", ""])
|
|
199
|
-
|
|
200
|
-
# Add schema info
|
|
201
|
-
schema_file = endpoint_dir / "schema.json"
|
|
202
|
-
if schema_file.exists():
|
|
203
|
-
try:
|
|
204
|
-
schema = json.loads(schema_file.read_text())
|
|
205
|
-
lines.append(f"- File: `{schema.get('schema_file', 'N/A')}`")
|
|
206
|
-
lines.append(f"- Fields: {schema.get('fields_count', 0)}")
|
|
207
|
-
except (json.JSONDecodeError, IOError):
|
|
208
|
-
pass
|
|
209
|
-
|
|
210
|
-
current_md.write_text("\n".join(lines))
|
|
211
|
-
return True
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
def update_research_index(endpoint):
|
|
215
|
-
"""Update the research index with this endpoint."""
|
|
216
|
-
RESEARCH_DIR.mkdir(parents=True, exist_ok=True)
|
|
217
|
-
|
|
218
|
-
# Load existing index or create new
|
|
219
|
-
if RESEARCH_INDEX.exists():
|
|
220
|
-
try:
|
|
221
|
-
index = json.loads(RESEARCH_INDEX.read_text())
|
|
222
|
-
except json.JSONDecodeError:
|
|
223
|
-
index = {"version": "3.6.7", "apis": {}}
|
|
224
|
-
else:
|
|
225
|
-
index = {"version": "3.6.7", "apis": {}}
|
|
226
|
-
|
|
227
|
-
# Ensure apis object exists
|
|
228
|
-
if "apis" not in index:
|
|
229
|
-
index["apis"] = {}
|
|
230
|
-
|
|
231
|
-
# Update this endpoint's entry
|
|
232
|
-
now = datetime.now().isoformat()
|
|
233
|
-
index["apis"][endpoint] = {
|
|
234
|
-
"last_updated": now,
|
|
235
|
-
"freshness_days": 0,
|
|
236
|
-
"cache_path": f".claude/research/{endpoint}/",
|
|
237
|
-
"files": ["sources.json", "interview.json", "schema.json", "CURRENT.md"]
|
|
238
|
-
}
|
|
239
|
-
|
|
240
|
-
RESEARCH_INDEX.write_text(json.dumps(index, indent=2))
|
|
241
|
-
return True
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
def main():
|
|
245
|
-
try:
|
|
246
|
-
input_data = json.load(sys.stdin)
|
|
247
|
-
except json.JSONDecodeError:
|
|
248
|
-
print(json.dumps({"continue": True}))
|
|
249
|
-
sys.exit(0)
|
|
250
|
-
|
|
251
|
-
tool_name = input_data.get("tool_name", "")
|
|
252
|
-
tool_input = input_data.get("tool_input", {})
|
|
253
|
-
tool_result = input_data.get("tool_result", {})
|
|
254
|
-
file_path = tool_input.get("file_path", "")
|
|
255
|
-
|
|
256
|
-
# Only trigger on Write/Edit to documentation-related files
|
|
257
|
-
if tool_name not in ["Write", "Edit"]:
|
|
258
|
-
print(json.dumps({"continue": True}))
|
|
259
|
-
sys.exit(0)
|
|
260
|
-
|
|
261
|
-
# Check if this is a documentation-related write
|
|
262
|
-
is_manifest = "api-tests-manifest.json" in file_path
|
|
263
|
-
is_readme = file_path.endswith("README.md") and "/api/" in file_path
|
|
264
|
-
is_state = "api-dev-state.json" in file_path
|
|
265
|
-
|
|
266
|
-
# Also trigger when documentation phase is in progress
|
|
267
|
-
if not STATE_FILE.exists():
|
|
268
|
-
print(json.dumps({"continue": True}))
|
|
269
|
-
sys.exit(0)
|
|
270
|
-
|
|
271
|
-
try:
|
|
272
|
-
state = json.loads(STATE_FILE.read_text())
|
|
273
|
-
except json.JSONDecodeError:
|
|
274
|
-
print(json.dumps({"continue": True}))
|
|
275
|
-
sys.exit(0)
|
|
276
|
-
|
|
277
|
-
endpoint, endpoint_data = get_active_endpoint(state)
|
|
278
|
-
if not endpoint or not endpoint_data:
|
|
279
|
-
print(json.dumps({"continue": True}))
|
|
280
|
-
sys.exit(0)
|
|
281
|
-
|
|
282
|
-
# Check if documentation phase is in progress or we're writing doc files
|
|
283
|
-
doc_phase = endpoint_data.get("phases", {}).get("documentation", {})
|
|
284
|
-
doc_status = doc_phase.get("status", "not_started")
|
|
285
|
-
|
|
286
|
-
if doc_status not in ["in_progress", "complete"] and not is_manifest and not is_readme:
|
|
287
|
-
print(json.dumps({"continue": True}))
|
|
288
|
-
sys.exit(0)
|
|
289
|
-
|
|
290
|
-
# Create research cache directory
|
|
291
|
-
endpoint_dir = RESEARCH_DIR / endpoint
|
|
292
|
-
endpoint_dir.mkdir(parents=True, exist_ok=True)
|
|
293
|
-
|
|
294
|
-
# Create cache files
|
|
295
|
-
files_created = []
|
|
296
|
-
|
|
297
|
-
sources_created = create_sources_json(endpoint_dir, state, endpoint_data)
|
|
298
|
-
if sources_created:
|
|
299
|
-
files_created.append("sources.json")
|
|
300
|
-
|
|
301
|
-
interview_created = create_interview_json(endpoint_dir, endpoint_data)
|
|
302
|
-
if interview_created:
|
|
303
|
-
files_created.append("interview.json")
|
|
304
|
-
|
|
305
|
-
schema_created = create_schema_json(endpoint_dir, endpoint_data, state)
|
|
306
|
-
if schema_created:
|
|
307
|
-
files_created.append("schema.json")
|
|
308
|
-
|
|
309
|
-
current_created = create_current_md(endpoint_dir, endpoint, endpoint_data, state)
|
|
310
|
-
if current_created:
|
|
311
|
-
files_created.append("CURRENT.md")
|
|
312
|
-
|
|
313
|
-
# Update index
|
|
314
|
-
index_updated = update_research_index(endpoint)
|
|
315
|
-
if index_updated:
|
|
316
|
-
files_created.append("index.json")
|
|
317
|
-
|
|
318
|
-
# Update state to indicate research is cached
|
|
319
|
-
if files_created:
|
|
320
|
-
doc_phase["research_cached"] = True
|
|
321
|
-
STATE_FILE.write_text(json.dumps(state, indent=2))
|
|
322
|
-
|
|
323
|
-
output = {
|
|
324
|
-
"hookSpecificOutput": {
|
|
325
|
-
"cacheCreated": True,
|
|
326
|
-
"endpoint": endpoint,
|
|
327
|
-
"files": files_created,
|
|
328
|
-
"cacheDir": str(endpoint_dir)
|
|
329
|
-
}
|
|
330
|
-
}
|
|
331
|
-
|
|
332
|
-
print(json.dumps(output))
|
|
333
|
-
sys.exit(0)
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
if __name__ == "__main__":
|
|
337
|
-
main()
|