@hustle-together/api-dev-tools 3.10.0 → 3.11.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/api-dev-state.json +159 -0
- package/.claude/commands/README.md +185 -0
- package/.claude/commands/add-command.md +209 -0
- package/.claude/commands/api-create.md +499 -0
- package/.claude/commands/api-env.md +50 -0
- package/.claude/commands/api-interview.md +331 -0
- package/.claude/commands/api-research.md +331 -0
- package/.claude/commands/api-status.md +259 -0
- package/.claude/commands/api-verify.md +231 -0
- package/.claude/commands/beepboop.md +97 -0
- package/.claude/commands/busycommit.md +112 -0
- package/.claude/commands/commit.md +83 -0
- package/.claude/commands/cycle.md +142 -0
- package/.claude/commands/gap.md +86 -0
- package/.claude/commands/green.md +142 -0
- package/.claude/commands/issue.md +192 -0
- package/.claude/commands/plan.md +168 -0
- package/.claude/commands/pr.md +122 -0
- package/.claude/commands/red.md +142 -0
- package/.claude/commands/refactor.md +142 -0
- package/.claude/commands/spike.md +142 -0
- package/.claude/commands/summarize.md +94 -0
- package/.claude/commands/tdd.md +144 -0
- package/.claude/commands/worktree-add.md +315 -0
- package/.claude/commands/worktree-cleanup.md +281 -0
- package/.claude/hooks/api-workflow-check.py +227 -0
- package/.claude/hooks/enforce-deep-research.py +185 -0
- package/.claude/hooks/enforce-disambiguation.py +155 -0
- package/.claude/hooks/enforce-documentation.py +192 -0
- package/.claude/hooks/enforce-environment.py +253 -0
- package/.claude/hooks/enforce-external-research.py +328 -0
- package/.claude/hooks/enforce-interview.py +421 -0
- package/.claude/hooks/enforce-refactor.py +189 -0
- package/.claude/hooks/enforce-research.py +159 -0
- package/.claude/hooks/enforce-schema.py +186 -0
- package/.claude/hooks/enforce-scope.py +160 -0
- package/.claude/hooks/enforce-tdd-red.py +250 -0
- package/.claude/hooks/enforce-verify.py +186 -0
- package/.claude/hooks/periodic-reground.py +154 -0
- package/.claude/hooks/session-startup.py +151 -0
- package/.claude/hooks/track-tool-use.py +626 -0
- package/.claude/hooks/verify-after-green.py +282 -0
- package/.claude/hooks/verify-implementation.py +225 -0
- package/.claude/research/index.json +6 -0
- package/.claude/settings.json +93 -0
- package/.claude/settings.local.json +11 -0
- package/.claude-plugin/marketplace.json +112 -0
- package/.skills/README.md +291 -0
- package/.skills/_shared/convert-commands.py +192 -0
- package/.skills/_shared/hooks/api-workflow-check.py +227 -0
- package/.skills/_shared/hooks/enforce-deep-research.py +185 -0
- package/.skills/_shared/hooks/enforce-disambiguation.py +155 -0
- package/.skills/_shared/hooks/enforce-documentation.py +192 -0
- package/.skills/_shared/hooks/enforce-environment.py +253 -0
- package/.skills/_shared/hooks/enforce-external-research.py +328 -0
- package/.skills/_shared/hooks/enforce-interview.py +421 -0
- package/.skills/_shared/hooks/enforce-refactor.py +189 -0
- package/.skills/_shared/hooks/enforce-research.py +159 -0
- package/.skills/_shared/hooks/enforce-schema.py +186 -0
- package/.skills/_shared/hooks/enforce-scope.py +160 -0
- package/.skills/_shared/hooks/enforce-tdd-red.py +250 -0
- package/.skills/_shared/hooks/enforce-verify.py +186 -0
- package/.skills/_shared/hooks/periodic-reground.py +154 -0
- package/.skills/_shared/hooks/session-startup.py +151 -0
- package/.skills/_shared/hooks/track-tool-use.py +626 -0
- package/.skills/_shared/hooks/verify-after-green.py +282 -0
- package/.skills/_shared/hooks/verify-implementation.py +225 -0
- package/.skills/_shared/install.sh +114 -0
- package/.skills/_shared/settings.json +93 -0
- package/.skills/add-command/SKILL.md +222 -0
- package/.skills/api-create/SKILL.md +512 -0
- package/.skills/api-env/SKILL.md +63 -0
- package/.skills/api-interview/SKILL.md +344 -0
- package/.skills/api-research/SKILL.md +344 -0
- package/.skills/api-status/SKILL.md +272 -0
- package/.skills/api-verify/SKILL.md +244 -0
- package/.skills/beepboop/SKILL.md +110 -0
- package/.skills/busycommit/SKILL.md +125 -0
- package/.skills/commit/SKILL.md +96 -0
- package/.skills/cycle/SKILL.md +155 -0
- package/.skills/gap/SKILL.md +99 -0
- package/.skills/green/SKILL.md +155 -0
- package/.skills/issue/SKILL.md +205 -0
- package/.skills/plan/SKILL.md +181 -0
- package/.skills/pr/SKILL.md +135 -0
- package/.skills/red/SKILL.md +155 -0
- package/.skills/refactor/SKILL.md +155 -0
- package/.skills/spike/SKILL.md +155 -0
- package/.skills/summarize/SKILL.md +107 -0
- package/.skills/tdd/SKILL.md +157 -0
- package/.skills/update-todos/SKILL.md +228 -0
- package/.skills/worktree-add/SKILL.md +328 -0
- package/.skills/worktree-cleanup/SKILL.md +294 -0
- package/CHANGELOG.md +97 -0
- package/README.md +66 -20
- package/bin/cli.js +7 -6
- package/package.json +22 -11
|
@@ -0,0 +1,626 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Hook: PostToolUse for WebSearch, WebFetch, Context7 MCP, AskUserQuestion
|
|
4
|
+
Purpose: Track all research activity and turn counts in the state file
|
|
5
|
+
|
|
6
|
+
This hook runs AFTER Claude uses research tools (WebSearch, WebFetch, Context7).
|
|
7
|
+
It logs each research action to api-dev-state.json for:
|
|
8
|
+
- Auditing what research was done
|
|
9
|
+
- Verifying prerequisites before allowing implementation
|
|
10
|
+
- Providing visibility to the user
|
|
11
|
+
- Tracking turn counts for periodic re-grounding
|
|
12
|
+
|
|
13
|
+
Version: 3.0.0
|
|
14
|
+
|
|
15
|
+
Returns:
|
|
16
|
+
- {"continue": true} - Always continues (logging only, no blocking)
|
|
17
|
+
"""
|
|
18
|
+
import json
|
|
19
|
+
import sys
|
|
20
|
+
from datetime import datetime
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
|
|
23
|
+
# State file is in .claude/ directory (sibling to hooks/)
|
|
24
|
+
STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
25
|
+
|
|
26
|
+
# Re-grounding interval (also used by periodic-reground.py)
|
|
27
|
+
REGROUND_INTERVAL = 7
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def main():
|
|
31
|
+
# Read hook input from stdin
|
|
32
|
+
try:
|
|
33
|
+
input_data = json.load(sys.stdin)
|
|
34
|
+
except json.JSONDecodeError:
|
|
35
|
+
# Can't parse, just continue
|
|
36
|
+
print(json.dumps({"continue": True}))
|
|
37
|
+
sys.exit(0)
|
|
38
|
+
|
|
39
|
+
tool_name = input_data.get("tool_name", "")
|
|
40
|
+
tool_input = input_data.get("tool_input", {})
|
|
41
|
+
tool_output = input_data.get("tool_output", {})
|
|
42
|
+
|
|
43
|
+
# Track research tools AND user questions
|
|
44
|
+
research_tools = ["WebSearch", "WebFetch", "mcp__context7"]
|
|
45
|
+
is_research_tool = any(t in tool_name for t in research_tools)
|
|
46
|
+
is_user_question = tool_name == "AskUserQuestion"
|
|
47
|
+
|
|
48
|
+
if not is_research_tool and not is_user_question:
|
|
49
|
+
print(json.dumps({"continue": True}))
|
|
50
|
+
sys.exit(0)
|
|
51
|
+
|
|
52
|
+
# Load or create state file
|
|
53
|
+
if STATE_FILE.exists():
|
|
54
|
+
try:
|
|
55
|
+
state = json.loads(STATE_FILE.read_text())
|
|
56
|
+
except json.JSONDecodeError:
|
|
57
|
+
state = create_initial_state()
|
|
58
|
+
else:
|
|
59
|
+
state = create_initial_state()
|
|
60
|
+
|
|
61
|
+
# ========================================
|
|
62
|
+
# TURN COUNTING (for periodic re-grounding)
|
|
63
|
+
# ========================================
|
|
64
|
+
# Increment turn count on every tracked tool use
|
|
65
|
+
turn_count = state.get("turn_count", 0) + 1
|
|
66
|
+
state["turn_count"] = turn_count
|
|
67
|
+
state["last_turn_timestamp"] = datetime.now().isoformat()
|
|
68
|
+
|
|
69
|
+
# Get phases
|
|
70
|
+
phases = state.setdefault("phases", {})
|
|
71
|
+
|
|
72
|
+
# Handle AskUserQuestion separately - track in interview phase
|
|
73
|
+
if is_user_question:
|
|
74
|
+
interview = phases.setdefault("interview", {
|
|
75
|
+
"status": "not_started",
|
|
76
|
+
"questions": [],
|
|
77
|
+
"user_question_count": 0,
|
|
78
|
+
"structured_question_count": 0,
|
|
79
|
+
"decisions": {} # Track key decisions for consistency checking
|
|
80
|
+
})
|
|
81
|
+
|
|
82
|
+
# Track the question
|
|
83
|
+
questions = interview.setdefault("questions", [])
|
|
84
|
+
user_count = interview.get("user_question_count", 0) + 1
|
|
85
|
+
interview["user_question_count"] = user_count
|
|
86
|
+
|
|
87
|
+
# Check if this question has structured options (multiple-choice)
|
|
88
|
+
options = tool_input.get("options", [])
|
|
89
|
+
has_options = len(options) > 0
|
|
90
|
+
|
|
91
|
+
# Track structured questions count
|
|
92
|
+
if has_options:
|
|
93
|
+
structured_count = interview.get("structured_question_count", 0) + 1
|
|
94
|
+
interview["structured_question_count"] = structured_count
|
|
95
|
+
|
|
96
|
+
# IMPORTANT: Capture the user's response from tool_output
|
|
97
|
+
# PostToolUse runs AFTER the tool completes, so we have the response
|
|
98
|
+
user_response = None
|
|
99
|
+
selected_value = None
|
|
100
|
+
|
|
101
|
+
# tool_output contains the user's response
|
|
102
|
+
if isinstance(tool_output, str):
|
|
103
|
+
user_response = tool_output
|
|
104
|
+
elif isinstance(tool_output, dict):
|
|
105
|
+
user_response = tool_output.get("response", tool_output.get("result", str(tool_output)))
|
|
106
|
+
|
|
107
|
+
# Try to match response to an option value
|
|
108
|
+
if has_options and user_response:
|
|
109
|
+
response_lower = user_response.lower().strip()
|
|
110
|
+
for opt in options:
|
|
111
|
+
opt_value = opt.get("value", "").lower()
|
|
112
|
+
opt_label = opt.get("label", "").lower()
|
|
113
|
+
# Check if response matches value or label
|
|
114
|
+
if opt_value in response_lower or response_lower in opt_label or opt_label in response_lower:
|
|
115
|
+
selected_value = opt.get("value")
|
|
116
|
+
break
|
|
117
|
+
|
|
118
|
+
question_entry = {
|
|
119
|
+
"question": tool_input.get("question", ""),
|
|
120
|
+
"timestamp": datetime.now().isoformat(),
|
|
121
|
+
"tool_used": True, # Proves AskUserQuestion was actually called
|
|
122
|
+
"has_options": has_options,
|
|
123
|
+
"options_count": len(options),
|
|
124
|
+
"options": [opt.get("label", opt.get("value", "")) for opt in options[:5]] if options else [],
|
|
125
|
+
"user_response": user_response[:500] if user_response else None, # Capture actual response
|
|
126
|
+
"selected_value": selected_value # Matched option value if applicable
|
|
127
|
+
}
|
|
128
|
+
questions.append(question_entry)
|
|
129
|
+
|
|
130
|
+
# Track key decisions in a summary dict for easy reference during implementation
|
|
131
|
+
decisions = interview.setdefault("decisions", {})
|
|
132
|
+
question_text = tool_input.get("question", "").lower()
|
|
133
|
+
|
|
134
|
+
# Categorize common decision types
|
|
135
|
+
if "provider" in question_text or "ai provider" in question_text:
|
|
136
|
+
decisions["provider"] = {"response": user_response, "value": selected_value}
|
|
137
|
+
elif "purpose" in question_text or "primary purpose" in question_text:
|
|
138
|
+
decisions["purpose"] = {"response": user_response, "value": selected_value}
|
|
139
|
+
elif "format" in question_text or "response format" in question_text:
|
|
140
|
+
decisions["response_format"] = {"response": user_response, "value": selected_value}
|
|
141
|
+
elif "parameter" in question_text and "required" in question_text:
|
|
142
|
+
decisions["required_params"] = {"response": user_response, "value": selected_value}
|
|
143
|
+
elif "parameter" in question_text and "optional" in question_text:
|
|
144
|
+
decisions["optional_params"] = {"response": user_response, "value": selected_value}
|
|
145
|
+
elif "error" in question_text:
|
|
146
|
+
decisions["error_handling"] = {"response": user_response, "value": selected_value}
|
|
147
|
+
elif "api key" in question_text or "key" in question_text:
|
|
148
|
+
decisions["api_key_handling"] = {"response": user_response, "value": selected_value}
|
|
149
|
+
elif "service" in question_text or "external" in question_text:
|
|
150
|
+
decisions["external_services"] = {"response": user_response, "value": selected_value}
|
|
151
|
+
|
|
152
|
+
# Update interview status
|
|
153
|
+
if interview.get("status") == "not_started":
|
|
154
|
+
interview["status"] = "in_progress"
|
|
155
|
+
interview["started_at"] = datetime.now().isoformat()
|
|
156
|
+
|
|
157
|
+
interview["last_activity"] = datetime.now().isoformat()
|
|
158
|
+
|
|
159
|
+
# ========================================
|
|
160
|
+
# CRITICAL: Set user_question_asked flags
|
|
161
|
+
# This is what the enforcement hooks check!
|
|
162
|
+
# ========================================
|
|
163
|
+
interview["user_question_asked"] = True
|
|
164
|
+
|
|
165
|
+
# Also update the CURRENT phase based on workflow state
|
|
166
|
+
# Determine which phase we're in and set its user_question_asked flag
|
|
167
|
+
current_phase = _determine_current_phase(phases)
|
|
168
|
+
if current_phase and current_phase in phases:
|
|
169
|
+
phases[current_phase]["user_question_asked"] = True
|
|
170
|
+
# If user responded, also track that
|
|
171
|
+
if user_response:
|
|
172
|
+
phases[current_phase]["last_user_response"] = user_response[:200]
|
|
173
|
+
phases[current_phase]["last_question_timestamp"] = datetime.now().isoformat()
|
|
174
|
+
|
|
175
|
+
# ========================================
|
|
176
|
+
# CRITICAL: Detect phase exit confirmations
|
|
177
|
+
# This prevents Claude from self-answering
|
|
178
|
+
# ========================================
|
|
179
|
+
question_text = tool_input.get("question", "").lower()
|
|
180
|
+
question_type = _detect_question_type(question_text, options)
|
|
181
|
+
phases[current_phase]["last_question_type"] = question_type
|
|
182
|
+
|
|
183
|
+
# If this is an exit confirmation question AND user responded affirmatively
|
|
184
|
+
if question_type == "exit_confirmation":
|
|
185
|
+
# Check if user's response indicates approval/confirmation
|
|
186
|
+
if user_response and _is_affirmative_response(user_response, options):
|
|
187
|
+
phases[current_phase]["phase_exit_confirmed"] = True
|
|
188
|
+
|
|
189
|
+
# Log for visibility
|
|
190
|
+
if has_options:
|
|
191
|
+
interview["last_structured_question"] = {
|
|
192
|
+
"question": tool_input.get("question", "")[:100],
|
|
193
|
+
"options_count": len(options),
|
|
194
|
+
"user_response": user_response[:100] if user_response else None,
|
|
195
|
+
"selected_value": selected_value,
|
|
196
|
+
"timestamp": datetime.now().isoformat()
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
# Save and exit
|
|
200
|
+
STATE_FILE.write_text(json.dumps(state, indent=2))
|
|
201
|
+
print(json.dumps({"continue": True}))
|
|
202
|
+
sys.exit(0)
|
|
203
|
+
|
|
204
|
+
# Get or create research phase (for research tools)
|
|
205
|
+
research = phases.setdefault("research_initial", {
|
|
206
|
+
"status": "in_progress",
|
|
207
|
+
"sources": [],
|
|
208
|
+
"started_at": datetime.now().isoformat()
|
|
209
|
+
})
|
|
210
|
+
|
|
211
|
+
# Update status if not started
|
|
212
|
+
if research.get("status") == "not_started":
|
|
213
|
+
research["status"] = "in_progress"
|
|
214
|
+
research["started_at"] = datetime.now().isoformat()
|
|
215
|
+
|
|
216
|
+
# Get sources list
|
|
217
|
+
sources = research.setdefault("sources", [])
|
|
218
|
+
|
|
219
|
+
# Create source entry based on tool type
|
|
220
|
+
timestamp = datetime.now().isoformat()
|
|
221
|
+
|
|
222
|
+
if "context7" in tool_name.lower():
|
|
223
|
+
source_entry = {
|
|
224
|
+
"type": "context7",
|
|
225
|
+
"tool": tool_name,
|
|
226
|
+
"input": sanitize_input(tool_input),
|
|
227
|
+
"timestamp": timestamp,
|
|
228
|
+
"success": True
|
|
229
|
+
}
|
|
230
|
+
# Extract library info if available
|
|
231
|
+
if "libraryName" in tool_input:
|
|
232
|
+
source_entry["library"] = tool_input["libraryName"]
|
|
233
|
+
if "libraryId" in tool_input:
|
|
234
|
+
source_entry["library_id"] = tool_input["libraryId"]
|
|
235
|
+
|
|
236
|
+
elif tool_name == "WebSearch":
|
|
237
|
+
source_entry = {
|
|
238
|
+
"type": "websearch",
|
|
239
|
+
"query": tool_input.get("query", ""),
|
|
240
|
+
"timestamp": timestamp,
|
|
241
|
+
"success": True
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
elif tool_name == "WebFetch":
|
|
245
|
+
source_entry = {
|
|
246
|
+
"type": "webfetch",
|
|
247
|
+
"url": tool_input.get("url", ""),
|
|
248
|
+
"timestamp": timestamp,
|
|
249
|
+
"success": True
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
else:
|
|
253
|
+
# Generic research tool
|
|
254
|
+
source_entry = {
|
|
255
|
+
"type": "other",
|
|
256
|
+
"tool": tool_name,
|
|
257
|
+
"timestamp": timestamp,
|
|
258
|
+
"success": True
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
# Add to sources list
|
|
262
|
+
sources.append(source_entry)
|
|
263
|
+
|
|
264
|
+
# Also add to research_queries for prompt verification
|
|
265
|
+
research_queries = state.setdefault("research_queries", [])
|
|
266
|
+
query_entry = {
|
|
267
|
+
"timestamp": timestamp,
|
|
268
|
+
"tool": tool_name,
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
# Extract query/term based on tool type
|
|
272
|
+
if tool_name == "WebSearch":
|
|
273
|
+
query_entry["query"] = tool_input.get("query", "")
|
|
274
|
+
query_entry["terms"] = extract_terms(tool_input.get("query", ""))
|
|
275
|
+
elif tool_name == "WebFetch":
|
|
276
|
+
query_entry["url"] = tool_input.get("url", "")
|
|
277
|
+
query_entry["terms"] = extract_terms_from_url(tool_input.get("url", ""))
|
|
278
|
+
elif "context7" in tool_name.lower():
|
|
279
|
+
query_entry["library"] = tool_input.get("libraryName", tool_input.get("libraryId", ""))
|
|
280
|
+
query_entry["terms"] = [tool_input.get("libraryName", "").lower()]
|
|
281
|
+
|
|
282
|
+
research_queries.append(query_entry)
|
|
283
|
+
|
|
284
|
+
# Keep only last 50 queries
|
|
285
|
+
state["research_queries"] = research_queries[-50:]
|
|
286
|
+
|
|
287
|
+
# Update last activity timestamp
|
|
288
|
+
research["last_activity"] = timestamp
|
|
289
|
+
research["source_count"] = len(sources)
|
|
290
|
+
|
|
291
|
+
# Check if we have enough sources to consider research "complete"
|
|
292
|
+
# More robust criteria:
|
|
293
|
+
# - At least 2 sources total (prevents single accidental search from completing)
|
|
294
|
+
# - At least one of: Context7 docs fetch, WebFetch of docs page
|
|
295
|
+
# - At least one search (WebSearch or Context7 resolve)
|
|
296
|
+
context7_count = sum(1 for s in sources if s.get("type") == "context7")
|
|
297
|
+
websearch_count = sum(1 for s in sources if s.get("type") == "websearch")
|
|
298
|
+
webfetch_count = sum(1 for s in sources if s.get("type") == "webfetch")
|
|
299
|
+
total_sources = len(sources)
|
|
300
|
+
|
|
301
|
+
# Minimum threshold: 2+ sources with at least one being docs-related
|
|
302
|
+
has_docs = webfetch_count >= 1 or context7_count >= 1
|
|
303
|
+
has_search = websearch_count >= 1 or context7_count >= 1
|
|
304
|
+
sufficient = total_sources >= 2 and has_docs and has_search
|
|
305
|
+
|
|
306
|
+
# Auto-complete research if sufficient sources
|
|
307
|
+
if sufficient:
|
|
308
|
+
if research.get("status") == "in_progress":
|
|
309
|
+
research["status"] = "complete"
|
|
310
|
+
research["completed_at"] = timestamp
|
|
311
|
+
research["completion_reason"] = "sufficient_sources"
|
|
312
|
+
research["completion_summary"] = {
|
|
313
|
+
"total_sources": total_sources,
|
|
314
|
+
"context7_calls": context7_count,
|
|
315
|
+
"web_searches": websearch_count,
|
|
316
|
+
"doc_fetches": webfetch_count
|
|
317
|
+
}
|
|
318
|
+
|
|
319
|
+
# Save state file
|
|
320
|
+
STATE_FILE.write_text(json.dumps(state, indent=2))
|
|
321
|
+
|
|
322
|
+
# Return success
|
|
323
|
+
print(json.dumps({"continue": True}))
|
|
324
|
+
sys.exit(0)
|
|
325
|
+
|
|
326
|
+
|
|
327
|
+
def _detect_question_type(question_text: str, options: list) -> str:
|
|
328
|
+
"""
|
|
329
|
+
Detect the type of question being asked.
|
|
330
|
+
Returns: 'exit_confirmation', 'data_collection', 'clarification', or 'unknown'
|
|
331
|
+
"""
|
|
332
|
+
question_lower = question_text.lower()
|
|
333
|
+
|
|
334
|
+
# Exit confirmation patterns - questions asking to proceed/continue/move to next phase
|
|
335
|
+
exit_patterns = [
|
|
336
|
+
"proceed",
|
|
337
|
+
"continue",
|
|
338
|
+
"ready to",
|
|
339
|
+
"move to",
|
|
340
|
+
"is this correct",
|
|
341
|
+
"all correct",
|
|
342
|
+
"looks correct",
|
|
343
|
+
"approve",
|
|
344
|
+
"approved",
|
|
345
|
+
"confirm",
|
|
346
|
+
"complete",
|
|
347
|
+
"shall i",
|
|
348
|
+
"should i proceed",
|
|
349
|
+
"does this match",
|
|
350
|
+
"ready for",
|
|
351
|
+
"start tdd",
|
|
352
|
+
"start tests",
|
|
353
|
+
"begin",
|
|
354
|
+
"next phase",
|
|
355
|
+
"move on",
|
|
356
|
+
"go ahead"
|
|
357
|
+
]
|
|
358
|
+
|
|
359
|
+
# Check options for exit-like labels
|
|
360
|
+
option_labels = [opt.get("label", "").lower() for opt in options] if options else []
|
|
361
|
+
exit_option_patterns = [
|
|
362
|
+
"yes", "proceed", "continue", "approve", "confirm",
|
|
363
|
+
"ready", "looks good", "correct", "done", "complete"
|
|
364
|
+
]
|
|
365
|
+
|
|
366
|
+
# If question matches exit patterns
|
|
367
|
+
for pattern in exit_patterns:
|
|
368
|
+
if pattern in question_lower:
|
|
369
|
+
return "exit_confirmation"
|
|
370
|
+
|
|
371
|
+
# If options suggest it's an exit confirmation
|
|
372
|
+
for opt_label in option_labels:
|
|
373
|
+
for pattern in exit_option_patterns:
|
|
374
|
+
if pattern in opt_label:
|
|
375
|
+
return "exit_confirmation"
|
|
376
|
+
|
|
377
|
+
# Data collection - asking for choices about implementation
|
|
378
|
+
data_patterns = [
|
|
379
|
+
"which", "what", "how should", "prefer", "want",
|
|
380
|
+
"format", "handling", "strategy", "method"
|
|
381
|
+
]
|
|
382
|
+
for pattern in data_patterns:
|
|
383
|
+
if pattern in question_lower:
|
|
384
|
+
return "data_collection"
|
|
385
|
+
|
|
386
|
+
# Clarification - asking for more info
|
|
387
|
+
clarify_patterns = [
|
|
388
|
+
"clarify", "explain", "more detail", "what do you mean"
|
|
389
|
+
]
|
|
390
|
+
for pattern in clarify_patterns:
|
|
391
|
+
if pattern in question_lower:
|
|
392
|
+
return "clarification"
|
|
393
|
+
|
|
394
|
+
return "unknown"
|
|
395
|
+
|
|
396
|
+
|
|
397
|
+
def _is_affirmative_response(response: str, options: list) -> bool:
|
|
398
|
+
"""
|
|
399
|
+
Check if the user's response indicates approval/confirmation.
|
|
400
|
+
"""
|
|
401
|
+
response_lower = response.lower().strip()
|
|
402
|
+
|
|
403
|
+
# Direct affirmative words
|
|
404
|
+
affirmative_words = [
|
|
405
|
+
"yes", "y", "proceed", "continue", "approve", "confirm",
|
|
406
|
+
"correct", "ready", "go", "ok", "okay", "looks good",
|
|
407
|
+
"sounds good", "perfect", "great", "fine", "done",
|
|
408
|
+
"all good", "looks correct", "is correct", "all correct"
|
|
409
|
+
]
|
|
410
|
+
|
|
411
|
+
for word in affirmative_words:
|
|
412
|
+
if word in response_lower:
|
|
413
|
+
return True
|
|
414
|
+
|
|
415
|
+
# Check if response matches an affirmative option
|
|
416
|
+
if options:
|
|
417
|
+
for opt in options:
|
|
418
|
+
opt_label = opt.get("label", "").lower()
|
|
419
|
+
opt_value = opt.get("value", "").lower()
|
|
420
|
+
|
|
421
|
+
# If response matches an option that sounds affirmative
|
|
422
|
+
if opt_label in response_lower or response_lower in opt_label:
|
|
423
|
+
for aff in affirmative_words:
|
|
424
|
+
if aff in opt_label:
|
|
425
|
+
return True
|
|
426
|
+
|
|
427
|
+
# Check for negative responses (to avoid false positives)
|
|
428
|
+
negative_words = ["no", "change", "modify", "add more", "not yet", "wait"]
|
|
429
|
+
for word in negative_words:
|
|
430
|
+
if word in response_lower:
|
|
431
|
+
return False
|
|
432
|
+
|
|
433
|
+
return False
|
|
434
|
+
|
|
435
|
+
|
|
436
|
+
def _determine_current_phase(phases: dict) -> str:
|
|
437
|
+
"""Determine which phase is currently active based on status."""
|
|
438
|
+
# Phase order - return first incomplete phase
|
|
439
|
+
phase_order = [
|
|
440
|
+
"disambiguation",
|
|
441
|
+
"scope",
|
|
442
|
+
"research_initial",
|
|
443
|
+
"interview",
|
|
444
|
+
"research_deep",
|
|
445
|
+
"schema_creation",
|
|
446
|
+
"environment_check",
|
|
447
|
+
"tdd_red",
|
|
448
|
+
"tdd_green",
|
|
449
|
+
"verify",
|
|
450
|
+
"tdd_refactor",
|
|
451
|
+
"documentation"
|
|
452
|
+
]
|
|
453
|
+
|
|
454
|
+
for phase_name in phase_order:
|
|
455
|
+
phase = phases.get(phase_name, {})
|
|
456
|
+
status = phase.get("status", "not_started")
|
|
457
|
+
if status != "complete":
|
|
458
|
+
return phase_name
|
|
459
|
+
|
|
460
|
+
# All complete, return documentation
|
|
461
|
+
return "documentation"
|
|
462
|
+
|
|
463
|
+
|
|
464
|
+
def create_initial_state():
|
|
465
|
+
"""Create initial state structure (v3.0.0)"""
|
|
466
|
+
return {
|
|
467
|
+
"version": "3.0.0",
|
|
468
|
+
"created_at": datetime.now().isoformat(),
|
|
469
|
+
"endpoint": None,
|
|
470
|
+
"library": None,
|
|
471
|
+
"session_id": None,
|
|
472
|
+
"turn_count": 0,
|
|
473
|
+
"last_turn_timestamp": None,
|
|
474
|
+
"research_queries": [],
|
|
475
|
+
"prompt_detections": [],
|
|
476
|
+
"phases": {
|
|
477
|
+
"disambiguation": {
|
|
478
|
+
"status": "not_started",
|
|
479
|
+
"clarified": None,
|
|
480
|
+
"search_variations": [],
|
|
481
|
+
"description": "Pre-research disambiguation to clarify ambiguous requests"
|
|
482
|
+
},
|
|
483
|
+
"scope": {
|
|
484
|
+
"status": "not_started",
|
|
485
|
+
"confirmed": False,
|
|
486
|
+
"description": "Initial scope understanding and confirmation"
|
|
487
|
+
},
|
|
488
|
+
"research_initial": {
|
|
489
|
+
"status": "not_started",
|
|
490
|
+
"sources": [],
|
|
491
|
+
"summary_approved": False,
|
|
492
|
+
"description": "Context7/WebSearch research for live documentation"
|
|
493
|
+
},
|
|
494
|
+
"interview": {
|
|
495
|
+
"status": "not_started",
|
|
496
|
+
"questions": [],
|
|
497
|
+
"user_question_count": 0,
|
|
498
|
+
"structured_question_count": 0,
|
|
499
|
+
"decisions": {},
|
|
500
|
+
"description": "Structured interview about requirements (generated FROM research)"
|
|
501
|
+
},
|
|
502
|
+
"research_deep": {
|
|
503
|
+
"status": "not_started",
|
|
504
|
+
"sources": [],
|
|
505
|
+
"proposed_searches": [],
|
|
506
|
+
"approved_searches": [],
|
|
507
|
+
"skipped_searches": [],
|
|
508
|
+
"description": "Deep dive based on interview answers (adaptive, not shotgun)"
|
|
509
|
+
},
|
|
510
|
+
"schema_creation": {
|
|
511
|
+
"status": "not_started",
|
|
512
|
+
"schema_file": None,
|
|
513
|
+
"schema_approved": False,
|
|
514
|
+
"description": "Zod schema creation from research"
|
|
515
|
+
},
|
|
516
|
+
"environment_check": {
|
|
517
|
+
"status": "not_started",
|
|
518
|
+
"keys_verified": [],
|
|
519
|
+
"keys_missing": [],
|
|
520
|
+
"confirmed": False,
|
|
521
|
+
"description": "API key and environment verification"
|
|
522
|
+
},
|
|
523
|
+
"tdd_red": {
|
|
524
|
+
"status": "not_started",
|
|
525
|
+
"test_file": None,
|
|
526
|
+
"test_count": 0,
|
|
527
|
+
"test_matrix_approved": False,
|
|
528
|
+
"description": "Write failing tests first"
|
|
529
|
+
},
|
|
530
|
+
"tdd_green": {
|
|
531
|
+
"status": "not_started",
|
|
532
|
+
"implementation_file": None,
|
|
533
|
+
"all_tests_passing": False,
|
|
534
|
+
"description": "Minimal implementation to pass tests"
|
|
535
|
+
},
|
|
536
|
+
"verify": {
|
|
537
|
+
"status": "not_started",
|
|
538
|
+
"gaps_found": 0,
|
|
539
|
+
"gaps_fixed": 0,
|
|
540
|
+
"intentional_omissions": [],
|
|
541
|
+
"re_research_done": False,
|
|
542
|
+
"description": "Re-research after Green to verify implementation matches docs"
|
|
543
|
+
},
|
|
544
|
+
"tdd_refactor": {
|
|
545
|
+
"status": "not_started",
|
|
546
|
+
"description": "Code cleanup while keeping tests green"
|
|
547
|
+
},
|
|
548
|
+
"documentation": {
|
|
549
|
+
"status": "not_started",
|
|
550
|
+
"files_updated": [],
|
|
551
|
+
"manifest_updated": False,
|
|
552
|
+
"openapi_updated": False,
|
|
553
|
+
"research_cached": False,
|
|
554
|
+
"description": "Update manifests, OpenAPI, cache research"
|
|
555
|
+
}
|
|
556
|
+
},
|
|
557
|
+
"verification": {
|
|
558
|
+
"all_sources_fetched": False,
|
|
559
|
+
"schema_matches_docs": False,
|
|
560
|
+
"tests_cover_params": False,
|
|
561
|
+
"all_tests_passing": False,
|
|
562
|
+
"coverage_percent": None,
|
|
563
|
+
"post_green_verification": False
|
|
564
|
+
},
|
|
565
|
+
"research_index": {},
|
|
566
|
+
"reground_history": []
|
|
567
|
+
}
|
|
568
|
+
|
|
569
|
+
|
|
570
|
+
def sanitize_input(tool_input):
|
|
571
|
+
"""Remove potentially sensitive data from input before logging"""
|
|
572
|
+
sanitized = {}
|
|
573
|
+
for key, value in tool_input.items():
|
|
574
|
+
# Skip API keys or tokens
|
|
575
|
+
if any(sensitive in key.lower() for sensitive in ["key", "token", "secret", "password"]):
|
|
576
|
+
sanitized[key] = "[REDACTED]"
|
|
577
|
+
else:
|
|
578
|
+
sanitized[key] = value
|
|
579
|
+
return sanitized
|
|
580
|
+
|
|
581
|
+
|
|
582
|
+
def extract_terms(query: str) -> list:
|
|
583
|
+
"""Extract searchable terms from a query string."""
|
|
584
|
+
import re
|
|
585
|
+
# Remove common words and extract meaningful terms
|
|
586
|
+
stop_words = {"the", "a", "an", "is", "are", "was", "were", "be", "been",
|
|
587
|
+
"how", "to", "do", "does", "what", "which", "for", "and", "or",
|
|
588
|
+
"in", "on", "at", "with", "from", "this", "that", "it"}
|
|
589
|
+
|
|
590
|
+
# Extract words
|
|
591
|
+
words = re.findall(r'\b[\w@/-]+\b', query.lower())
|
|
592
|
+
|
|
593
|
+
# Filter and return
|
|
594
|
+
terms = [w for w in words if w not in stop_words and len(w) > 2]
|
|
595
|
+
return terms[:10] # Limit to 10 terms
|
|
596
|
+
|
|
597
|
+
|
|
598
|
+
def extract_terms_from_url(url: str) -> list:
|
|
599
|
+
"""Extract meaningful terms from a URL."""
|
|
600
|
+
import re
|
|
601
|
+
from urllib.parse import urlparse
|
|
602
|
+
|
|
603
|
+
try:
|
|
604
|
+
parsed = urlparse(url)
|
|
605
|
+
# Get domain parts and path parts
|
|
606
|
+
domain_parts = parsed.netloc.replace("www.", "").split(".")
|
|
607
|
+
path_parts = [p for p in parsed.path.split("/") if p]
|
|
608
|
+
|
|
609
|
+
# Combine and filter
|
|
610
|
+
all_parts = domain_parts + path_parts
|
|
611
|
+
terms = []
|
|
612
|
+
for part in all_parts:
|
|
613
|
+
# Split by common separators
|
|
614
|
+
sub_parts = re.split(r'[-_.]', part.lower())
|
|
615
|
+
terms.extend(sub_parts)
|
|
616
|
+
|
|
617
|
+
# Filter short/common terms
|
|
618
|
+
stop_terms = {"com", "org", "io", "dev", "api", "docs", "www", "http", "https"}
|
|
619
|
+
terms = [t for t in terms if t not in stop_terms and len(t) > 2]
|
|
620
|
+
return terms[:10]
|
|
621
|
+
except Exception:
|
|
622
|
+
return []
|
|
623
|
+
|
|
624
|
+
|
|
625
|
+
if __name__ == "__main__":
|
|
626
|
+
main()
|