@hustle-together/api-dev-tools 1.3.0 → 1.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +71 -3
- package/commands/api-research.md +77 -0
- package/demo/workflow-demo.html +1945 -0
- package/hooks/api-workflow-check.py +135 -26
- package/hooks/enforce-external-research.py +318 -0
- package/hooks/enforce-interview.py +183 -0
- package/hooks/track-tool-use.py +108 -5
- package/hooks/verify-implementation.py +225 -0
- package/package.json +2 -1
- package/templates/api-dev-state.json +3 -1
- package/templates/settings.json +35 -1
|
@@ -6,33 +6,104 @@ Purpose: Check if all required phases are complete before allowing stop
|
|
|
6
6
|
This hook runs when Claude tries to stop/end the conversation.
|
|
7
7
|
It checks api-dev-state.json to ensure critical workflow phases completed.
|
|
8
8
|
|
|
9
|
+
Gap Fixes Applied:
|
|
10
|
+
- Gap 2: Shows files_created vs files_modified to verify all claimed changes
|
|
11
|
+
- Gap 3: Warns if there are verification_warnings that weren't addressed
|
|
12
|
+
- Gap 4: Requires explicit verification that implementation matches interview
|
|
13
|
+
|
|
9
14
|
Returns:
|
|
10
15
|
- {"decision": "approve"} - Allow stopping
|
|
11
16
|
- {"decision": "block", "reason": "..."} - Prevent stopping with explanation
|
|
12
17
|
"""
|
|
13
18
|
import json
|
|
14
19
|
import sys
|
|
20
|
+
import subprocess
|
|
15
21
|
from pathlib import Path
|
|
16
22
|
|
|
17
23
|
# State file is in .claude/ directory (sibling to hooks/)
|
|
18
24
|
STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
19
25
|
|
|
20
26
|
# Phases that MUST be complete before stopping
|
|
21
|
-
# These are the critical phases - others are optional
|
|
22
27
|
REQUIRED_PHASES = [
|
|
23
28
|
("research_initial", "Initial research (Context7/WebSearch)"),
|
|
29
|
+
("interview", "User interview"),
|
|
24
30
|
("tdd_red", "TDD Red phase (failing tests written)"),
|
|
25
31
|
("tdd_green", "TDD Green phase (tests passing)"),
|
|
26
32
|
]
|
|
27
33
|
|
|
28
34
|
# Phases that SHOULD be complete (warning but don't block)
|
|
29
35
|
RECOMMENDED_PHASES = [
|
|
30
|
-
("interview", "User interview"),
|
|
31
36
|
("schema_creation", "Schema creation"),
|
|
37
|
+
("tdd_refactor", "TDD Refactor phase"),
|
|
32
38
|
("documentation", "Documentation updates"),
|
|
33
39
|
]
|
|
34
40
|
|
|
35
41
|
|
|
42
|
+
def get_git_modified_files() -> list[str]:
|
|
43
|
+
"""Get list of modified files from git.
|
|
44
|
+
|
|
45
|
+
Gap 2 Fix: Verify which files actually changed.
|
|
46
|
+
"""
|
|
47
|
+
try:
|
|
48
|
+
result = subprocess.run(
|
|
49
|
+
["git", "diff", "--name-only", "HEAD"],
|
|
50
|
+
capture_output=True,
|
|
51
|
+
text=True,
|
|
52
|
+
cwd=STATE_FILE.parent.parent # Project root
|
|
53
|
+
)
|
|
54
|
+
if result.returncode == 0:
|
|
55
|
+
return [f.strip() for f in result.stdout.strip().split("\n") if f.strip()]
|
|
56
|
+
except Exception:
|
|
57
|
+
pass
|
|
58
|
+
return []
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def check_verification_warnings(state: dict) -> list[str]:
|
|
62
|
+
"""Check for unaddressed verification warnings.
|
|
63
|
+
|
|
64
|
+
Gap 3 Fix: Don't accept "skipped" or warnings without explanation.
|
|
65
|
+
"""
|
|
66
|
+
warnings = state.get("verification_warnings", [])
|
|
67
|
+
if warnings:
|
|
68
|
+
return [
|
|
69
|
+
"⚠️ Unaddressed verification warnings:",
|
|
70
|
+
*[f" - {w}" for w in warnings[-5:]], # Show last 5
|
|
71
|
+
"",
|
|
72
|
+
"Please review and address these warnings before completing."
|
|
73
|
+
]
|
|
74
|
+
return []
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def check_interview_implementation_match(state: dict) -> list[str]:
|
|
78
|
+
"""Verify implementation matches interview requirements.
|
|
79
|
+
|
|
80
|
+
Gap 4 Fix: Define specific "done" criteria based on interview.
|
|
81
|
+
"""
|
|
82
|
+
issues = []
|
|
83
|
+
|
|
84
|
+
interview = state.get("phases", {}).get("interview", {})
|
|
85
|
+
questions = interview.get("questions", [])
|
|
86
|
+
|
|
87
|
+
# Extract key requirements from interview
|
|
88
|
+
all_text = " ".join(str(q) for q in questions)
|
|
89
|
+
|
|
90
|
+
# Check files_created includes expected patterns
|
|
91
|
+
files_created = state.get("files_created", [])
|
|
92
|
+
|
|
93
|
+
# Look for route files if interview mentioned endpoints
|
|
94
|
+
if "endpoint" in all_text.lower() or "/api/" in all_text.lower():
|
|
95
|
+
route_files = [f for f in files_created if "route.ts" in f]
|
|
96
|
+
if not route_files:
|
|
97
|
+
issues.append("⚠️ Interview mentioned endpoints but no route.ts files were created")
|
|
98
|
+
|
|
99
|
+
# Look for test files
|
|
100
|
+
test_files = [f for f in files_created if ".test." in f or "__tests__" in f]
|
|
101
|
+
if not test_files:
|
|
102
|
+
issues.append("⚠️ No test files tracked in files_created")
|
|
103
|
+
|
|
104
|
+
return issues
|
|
105
|
+
|
|
106
|
+
|
|
36
107
|
def main():
|
|
37
108
|
# If no state file, we're not in an API workflow - allow stop
|
|
38
109
|
if not STATE_FILE.exists():
|
|
@@ -56,6 +127,9 @@ def main():
|
|
|
56
127
|
print(json.dumps({"decision": "approve"}))
|
|
57
128
|
sys.exit(0)
|
|
58
129
|
|
|
130
|
+
# Collect all issues
|
|
131
|
+
all_issues = []
|
|
132
|
+
|
|
59
133
|
# Check required phases
|
|
60
134
|
incomplete_required = []
|
|
61
135
|
for phase_key, phase_name in REQUIRED_PHASES:
|
|
@@ -64,6 +138,10 @@ def main():
|
|
|
64
138
|
if status != "complete":
|
|
65
139
|
incomplete_required.append(f" - {phase_name} ({status})")
|
|
66
140
|
|
|
141
|
+
if incomplete_required:
|
|
142
|
+
all_issues.append("❌ REQUIRED phases incomplete:")
|
|
143
|
+
all_issues.extend(incomplete_required)
|
|
144
|
+
|
|
67
145
|
# Check recommended phases
|
|
68
146
|
incomplete_recommended = []
|
|
69
147
|
for phase_key, phase_name in RECOMMENDED_PHASES:
|
|
@@ -72,42 +150,73 @@ def main():
|
|
|
72
150
|
if status != "complete":
|
|
73
151
|
incomplete_recommended.append(f" - {phase_name} ({status})")
|
|
74
152
|
|
|
153
|
+
# Gap 2: Check git diff vs tracked files
|
|
154
|
+
git_files = get_git_modified_files()
|
|
155
|
+
tracked_files = state.get("files_created", []) + state.get("files_modified", [])
|
|
156
|
+
|
|
157
|
+
if git_files and tracked_files:
|
|
158
|
+
# Find files in git but not tracked
|
|
159
|
+
untracked_changes = []
|
|
160
|
+
for gf in git_files:
|
|
161
|
+
if not any(gf.endswith(tf) or tf in gf for tf in tracked_files):
|
|
162
|
+
if gf.endswith(".ts") and ("/api/" in gf or "/lib/" in gf):
|
|
163
|
+
untracked_changes.append(gf)
|
|
164
|
+
|
|
165
|
+
if untracked_changes:
|
|
166
|
+
all_issues.append("\n⚠️ Gap 2: Files changed but not tracked:")
|
|
167
|
+
all_issues.extend([f" - {f}" for f in untracked_changes[:5]])
|
|
168
|
+
|
|
169
|
+
# Gap 3: Check for unaddressed warnings
|
|
170
|
+
warning_issues = check_verification_warnings(state)
|
|
171
|
+
if warning_issues:
|
|
172
|
+
all_issues.append("\n" + "\n".join(warning_issues))
|
|
173
|
+
|
|
174
|
+
# Gap 4: Check interview-implementation match
|
|
175
|
+
match_issues = check_interview_implementation_match(state)
|
|
176
|
+
if match_issues:
|
|
177
|
+
all_issues.append("\n⚠️ Gap 4: Implementation verification:")
|
|
178
|
+
all_issues.extend([f" {i}" for i in match_issues])
|
|
179
|
+
|
|
75
180
|
# Block if required phases incomplete
|
|
76
181
|
if incomplete_required:
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
reason_parts.append("\n\n⚠️ Also recommended but not complete:")
|
|
82
|
-
reason_parts.extend(incomplete_recommended)
|
|
83
|
-
|
|
84
|
-
reason_parts.append("\n\nTo continue:")
|
|
85
|
-
reason_parts.append(" 1. Complete required phases above")
|
|
86
|
-
reason_parts.append(" 2. Use /api-status to see detailed progress")
|
|
87
|
-
reason_parts.append(" 3. Or manually mark phases complete in .claude/api-dev-state.json")
|
|
182
|
+
all_issues.append("\n\nTo continue:")
|
|
183
|
+
all_issues.append(" 1. Complete required phases above")
|
|
184
|
+
all_issues.append(" 2. Use /api-status to see detailed progress")
|
|
185
|
+
all_issues.append(" 3. Run `git diff --name-only` to verify changes")
|
|
88
186
|
|
|
89
187
|
print(json.dumps({
|
|
90
188
|
"decision": "block",
|
|
91
|
-
"reason": "\n".join(
|
|
189
|
+
"reason": "\n".join(all_issues)
|
|
92
190
|
}))
|
|
93
191
|
sys.exit(0)
|
|
94
192
|
|
|
95
|
-
#
|
|
96
|
-
|
|
97
|
-
# Allow but the reason will be shown to user
|
|
98
|
-
print(json.dumps({
|
|
99
|
-
"decision": "approve",
|
|
100
|
-
"message": f"""⚠️ API workflow completing with optional phases pending:
|
|
101
|
-
{chr(10).join(incomplete_recommended)}
|
|
193
|
+
# Build completion message
|
|
194
|
+
message_parts = ["✅ API workflow completing"]
|
|
102
195
|
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
196
|
+
if incomplete_recommended:
|
|
197
|
+
message_parts.append("\n⚠️ Optional phases skipped:")
|
|
198
|
+
message_parts.extend(incomplete_recommended)
|
|
199
|
+
|
|
200
|
+
# Show summary of tracked files
|
|
201
|
+
files_created = state.get("files_created", [])
|
|
202
|
+
if files_created:
|
|
203
|
+
message_parts.append(f"\n📁 Files created: {len(files_created)}")
|
|
204
|
+
for f in files_created[:5]:
|
|
205
|
+
message_parts.append(f" - {f}")
|
|
206
|
+
if len(files_created) > 5:
|
|
207
|
+
message_parts.append(f" ... and {len(files_created) - 5} more")
|
|
208
|
+
|
|
209
|
+
# Show any remaining warnings
|
|
210
|
+
if warning_issues or match_issues:
|
|
211
|
+
message_parts.append("\n⚠️ Review suggested:")
|
|
212
|
+
if warning_issues:
|
|
213
|
+
message_parts.extend(warning_issues[:3])
|
|
214
|
+
if match_issues:
|
|
215
|
+
message_parts.extend(match_issues[:3])
|
|
106
216
|
|
|
107
|
-
# All phases complete
|
|
108
217
|
print(json.dumps({
|
|
109
218
|
"decision": "approve",
|
|
110
|
-
"message": "
|
|
219
|
+
"message": "\n".join(message_parts)
|
|
111
220
|
}))
|
|
112
221
|
sys.exit(0)
|
|
113
222
|
|
|
@@ -0,0 +1,318 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
Hook: UserPromptSubmit
|
|
4
|
+
Purpose: Enforce research before answering external API/SDK questions
|
|
5
|
+
|
|
6
|
+
This hook runs BEFORE Claude processes the user's prompt. It detects
|
|
7
|
+
questions about external APIs, SDKs, or services and injects context
|
|
8
|
+
requiring Claude to research first before answering.
|
|
9
|
+
|
|
10
|
+
Philosophy: "When in doubt, research. Training data is ALWAYS potentially outdated."
|
|
11
|
+
|
|
12
|
+
Returns:
|
|
13
|
+
- Prints context to stdout (injected into conversation)
|
|
14
|
+
- Exit 0 to allow the prompt to proceed
|
|
15
|
+
"""
|
|
16
|
+
import json
|
|
17
|
+
import sys
|
|
18
|
+
import re
|
|
19
|
+
from pathlib import Path
|
|
20
|
+
from datetime import datetime
|
|
21
|
+
|
|
22
|
+
# State file is in .claude/ directory (sibling to hooks/)
|
|
23
|
+
STATE_FILE = Path(__file__).parent.parent / "api-dev-state.json"
|
|
24
|
+
|
|
25
|
+
# ============================================================================
|
|
26
|
+
# PATTERN-BASED DETECTION
|
|
27
|
+
# ============================================================================
|
|
28
|
+
|
|
29
|
+
# Patterns that indicate external service/API mentions
|
|
30
|
+
EXTERNAL_SERVICE_PATTERNS = [
|
|
31
|
+
# Package names
|
|
32
|
+
r"@[\w-]+/[\w-]+", # @scope/package
|
|
33
|
+
r"\b[\w-]+-(?:sdk|api|js|ts|py)\b", # something-sdk, something-api, something-js
|
|
34
|
+
|
|
35
|
+
# API/SDK keywords
|
|
36
|
+
r"\b(?:api|sdk|library|package|module|framework)\b",
|
|
37
|
+
|
|
38
|
+
# Technical implementation terms
|
|
39
|
+
r"\b(?:endpoint|route|webhook|oauth|auth|token)\b",
|
|
40
|
+
|
|
41
|
+
# Version references
|
|
42
|
+
r"\bv?\d+\.\d+(?:\.\d+)?\b", # version numbers like v1.2.3, 2.0
|
|
43
|
+
|
|
44
|
+
# Import/require patterns
|
|
45
|
+
r"(?:import|require|from)\s+['\"][\w@/-]+['\"]",
|
|
46
|
+
]
|
|
47
|
+
|
|
48
|
+
# Patterns that indicate asking about features/capabilities
|
|
49
|
+
CAPABILITY_QUESTION_PATTERNS = [
|
|
50
|
+
# "What does X support/have/do"
|
|
51
|
+
r"what\s+(?:does|can|are|is)\s+\w+",
|
|
52
|
+
r"what\s+\w+\s+(?:support|have|provide|offer)",
|
|
53
|
+
|
|
54
|
+
# "Does X support/have"
|
|
55
|
+
r"(?:does|can|will)\s+\w+\s+(?:support|have|handle|do|work)",
|
|
56
|
+
|
|
57
|
+
# "How to/do" questions
|
|
58
|
+
r"how\s+(?:to|do|does|can|should)\s+",
|
|
59
|
+
|
|
60
|
+
# Lists and availability
|
|
61
|
+
r"(?:list|show)\s+(?:of|all|available)",
|
|
62
|
+
r"which\s+\w+\s+(?:are|is)\s+(?:available|supported)",
|
|
63
|
+
r"all\s+(?:available|supported)\s+\w+",
|
|
64
|
+
|
|
65
|
+
# Examples and implementation
|
|
66
|
+
r"example\s+(?:of|for|using|with)",
|
|
67
|
+
r"how\s+to\s+(?:use|implement|integrate|connect|setup|configure)",
|
|
68
|
+
]
|
|
69
|
+
|
|
70
|
+
# Common external service/company names (partial list - patterns catch the rest)
|
|
71
|
+
KNOWN_SERVICES = [
|
|
72
|
+
# AI/ML
|
|
73
|
+
"openai", "anthropic", "google", "gemini", "gpt", "claude", "llama",
|
|
74
|
+
"groq", "perplexity", "mistral", "cohere", "huggingface", "replicate",
|
|
75
|
+
|
|
76
|
+
# Cloud/Infrastructure
|
|
77
|
+
"aws", "azure", "gcp", "vercel", "netlify", "cloudflare", "supabase",
|
|
78
|
+
"firebase", "mongodb", "postgres", "redis", "elasticsearch",
|
|
79
|
+
|
|
80
|
+
# APIs/Services
|
|
81
|
+
"stripe", "twilio", "sendgrid", "mailchimp", "slack", "discord",
|
|
82
|
+
"github", "gitlab", "bitbucket", "jira", "notion", "airtable",
|
|
83
|
+
"shopify", "salesforce", "hubspot", "zendesk",
|
|
84
|
+
|
|
85
|
+
# Data/Analytics
|
|
86
|
+
"segment", "mixpanel", "amplitude", "datadog", "sentry", "grafana",
|
|
87
|
+
|
|
88
|
+
# Media/Content
|
|
89
|
+
"cloudinary", "imgix", "mux", "brandfetch", "unsplash", "pexels",
|
|
90
|
+
|
|
91
|
+
# Auth
|
|
92
|
+
"auth0", "okta", "clerk", "nextauth", "passport",
|
|
93
|
+
|
|
94
|
+
# Payments
|
|
95
|
+
"paypal", "square", "braintree", "adyen",
|
|
96
|
+
]
|
|
97
|
+
|
|
98
|
+
# ============================================================================
|
|
99
|
+
# DETECTION LOGIC
|
|
100
|
+
# ============================================================================
|
|
101
|
+
|
|
102
|
+
def detect_external_api_question(prompt: str) -> dict:
|
|
103
|
+
"""
|
|
104
|
+
Detect if the prompt is asking about external APIs/SDKs.
|
|
105
|
+
|
|
106
|
+
Returns:
|
|
107
|
+
{
|
|
108
|
+
"detected": bool,
|
|
109
|
+
"terms": list of detected terms,
|
|
110
|
+
"patterns_matched": list of pattern types matched,
|
|
111
|
+
"confidence": "high" | "medium" | "low"
|
|
112
|
+
}
|
|
113
|
+
"""
|
|
114
|
+
prompt_lower = prompt.lower()
|
|
115
|
+
detected_terms = []
|
|
116
|
+
patterns_matched = []
|
|
117
|
+
|
|
118
|
+
# Check for known services
|
|
119
|
+
for service in KNOWN_SERVICES:
|
|
120
|
+
if service in prompt_lower:
|
|
121
|
+
detected_terms.append(service)
|
|
122
|
+
patterns_matched.append("known_service")
|
|
123
|
+
|
|
124
|
+
# Check external service patterns
|
|
125
|
+
for pattern in EXTERNAL_SERVICE_PATTERNS:
|
|
126
|
+
matches = re.findall(pattern, prompt_lower, re.IGNORECASE)
|
|
127
|
+
if matches:
|
|
128
|
+
detected_terms.extend(matches)
|
|
129
|
+
patterns_matched.append("external_service_pattern")
|
|
130
|
+
|
|
131
|
+
# Check capability question patterns
|
|
132
|
+
for pattern in CAPABILITY_QUESTION_PATTERNS:
|
|
133
|
+
if re.search(pattern, prompt_lower, re.IGNORECASE):
|
|
134
|
+
patterns_matched.append("capability_question")
|
|
135
|
+
break
|
|
136
|
+
|
|
137
|
+
# Deduplicate
|
|
138
|
+
detected_terms = list(set(detected_terms))
|
|
139
|
+
patterns_matched = list(set(patterns_matched))
|
|
140
|
+
|
|
141
|
+
# Determine confidence
|
|
142
|
+
if "known_service" in patterns_matched and "capability_question" in patterns_matched:
|
|
143
|
+
confidence = "high"
|
|
144
|
+
elif "known_service" in patterns_matched or len(detected_terms) >= 2:
|
|
145
|
+
confidence = "medium"
|
|
146
|
+
elif patterns_matched:
|
|
147
|
+
confidence = "low"
|
|
148
|
+
else:
|
|
149
|
+
confidence = "none"
|
|
150
|
+
|
|
151
|
+
return {
|
|
152
|
+
"detected": confidence in ["high", "medium"],
|
|
153
|
+
"terms": detected_terms[:10], # Limit to 10 terms
|
|
154
|
+
"patterns_matched": patterns_matched,
|
|
155
|
+
"confidence": confidence,
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
def check_active_workflow() -> bool:
|
|
160
|
+
"""Check if there's an active API development workflow."""
|
|
161
|
+
if not STATE_FILE.exists():
|
|
162
|
+
return False
|
|
163
|
+
|
|
164
|
+
try:
|
|
165
|
+
state = json.loads(STATE_FILE.read_text())
|
|
166
|
+
phases = state.get("phases", {})
|
|
167
|
+
|
|
168
|
+
# Check if any phase is in progress
|
|
169
|
+
for phase_key, phase_data in phases.items():
|
|
170
|
+
if isinstance(phase_data, dict):
|
|
171
|
+
status = phase_data.get("status", "")
|
|
172
|
+
if status in ["in_progress", "pending"]:
|
|
173
|
+
return True
|
|
174
|
+
|
|
175
|
+
return False
|
|
176
|
+
except (json.JSONDecodeError, Exception):
|
|
177
|
+
return False
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
def check_already_researched(terms: list) -> list:
|
|
181
|
+
"""Check which terms have already been researched."""
|
|
182
|
+
if not STATE_FILE.exists():
|
|
183
|
+
return []
|
|
184
|
+
|
|
185
|
+
try:
|
|
186
|
+
state = json.loads(STATE_FILE.read_text())
|
|
187
|
+
research_queries = state.get("research_queries", [])
|
|
188
|
+
|
|
189
|
+
# Also check sources in phases
|
|
190
|
+
phases = state.get("phases", {})
|
|
191
|
+
all_sources = []
|
|
192
|
+
for phase_data in phases.values():
|
|
193
|
+
if isinstance(phase_data, dict):
|
|
194
|
+
sources = phase_data.get("sources", [])
|
|
195
|
+
all_sources.extend(sources)
|
|
196
|
+
|
|
197
|
+
# Combine all research text
|
|
198
|
+
all_research_text = " ".join(str(s) for s in all_sources)
|
|
199
|
+
all_research_text += " ".join(
|
|
200
|
+
str(q.get("query", "")) + " " + str(q.get("term", ""))
|
|
201
|
+
for q in research_queries
|
|
202
|
+
if isinstance(q, dict)
|
|
203
|
+
)
|
|
204
|
+
all_research_text = all_research_text.lower()
|
|
205
|
+
|
|
206
|
+
# Find which terms were already researched
|
|
207
|
+
already_researched = []
|
|
208
|
+
for term in terms:
|
|
209
|
+
if term.lower() in all_research_text:
|
|
210
|
+
already_researched.append(term)
|
|
211
|
+
|
|
212
|
+
return already_researched
|
|
213
|
+
except (json.JSONDecodeError, Exception):
|
|
214
|
+
return []
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
def log_detection(prompt: str, detection: dict) -> None:
|
|
218
|
+
"""Log this detection for debugging/auditing."""
|
|
219
|
+
if not STATE_FILE.exists():
|
|
220
|
+
return
|
|
221
|
+
|
|
222
|
+
try:
|
|
223
|
+
state = json.loads(STATE_FILE.read_text())
|
|
224
|
+
|
|
225
|
+
if "prompt_detections" not in state:
|
|
226
|
+
state["prompt_detections"] = []
|
|
227
|
+
|
|
228
|
+
state["prompt_detections"].append({
|
|
229
|
+
"timestamp": datetime.now().isoformat(),
|
|
230
|
+
"prompt_preview": prompt[:100] + "..." if len(prompt) > 100 else prompt,
|
|
231
|
+
"detection": detection,
|
|
232
|
+
})
|
|
233
|
+
|
|
234
|
+
# Keep only last 20 detections
|
|
235
|
+
state["prompt_detections"] = state["prompt_detections"][-20:]
|
|
236
|
+
|
|
237
|
+
STATE_FILE.write_text(json.dumps(state, indent=2))
|
|
238
|
+
except Exception:
|
|
239
|
+
pass # Don't fail the hook on logging errors
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
# ============================================================================
|
|
243
|
+
# MAIN
|
|
244
|
+
# ============================================================================
|
|
245
|
+
|
|
246
|
+
def main():
|
|
247
|
+
# Read hook input from stdin
|
|
248
|
+
try:
|
|
249
|
+
input_data = json.load(sys.stdin)
|
|
250
|
+
except json.JSONDecodeError:
|
|
251
|
+
# If we can't parse input, allow without injection
|
|
252
|
+
sys.exit(0)
|
|
253
|
+
|
|
254
|
+
prompt = input_data.get("prompt", "")
|
|
255
|
+
|
|
256
|
+
if not prompt:
|
|
257
|
+
sys.exit(0)
|
|
258
|
+
|
|
259
|
+
# Check if in active workflow mode (stricter enforcement)
|
|
260
|
+
active_workflow = check_active_workflow()
|
|
261
|
+
|
|
262
|
+
# Detect external API questions
|
|
263
|
+
detection = detect_external_api_question(prompt)
|
|
264
|
+
|
|
265
|
+
# Log for debugging
|
|
266
|
+
if detection["detected"] or active_workflow:
|
|
267
|
+
log_detection(prompt, detection)
|
|
268
|
+
|
|
269
|
+
# Determine if we should inject research requirement
|
|
270
|
+
should_inject = False
|
|
271
|
+
inject_reason = ""
|
|
272
|
+
|
|
273
|
+
if active_workflow:
|
|
274
|
+
# In active workflow, ALWAYS inject for technical questions
|
|
275
|
+
if detection["confidence"] in ["high", "medium", "low"]:
|
|
276
|
+
should_inject = True
|
|
277
|
+
inject_reason = "active_workflow"
|
|
278
|
+
elif detection["detected"]:
|
|
279
|
+
# Check if already researched
|
|
280
|
+
already_researched = check_already_researched(detection["terms"])
|
|
281
|
+
unresearched_terms = [t for t in detection["terms"] if t not in already_researched]
|
|
282
|
+
|
|
283
|
+
if unresearched_terms:
|
|
284
|
+
should_inject = True
|
|
285
|
+
inject_reason = "unresearched_terms"
|
|
286
|
+
detection["unresearched"] = unresearched_terms
|
|
287
|
+
|
|
288
|
+
# Inject context if needed
|
|
289
|
+
if should_inject:
|
|
290
|
+
terms_str = ", ".join(detection.get("unresearched", detection["terms"])[:5])
|
|
291
|
+
|
|
292
|
+
injection = f"""
|
|
293
|
+
<user-prompt-submit-hook>
|
|
294
|
+
EXTERNAL API/SDK DETECTED: {terms_str}
|
|
295
|
+
Confidence: {detection["confidence"]}
|
|
296
|
+
{"Mode: Active API Development Workflow" if active_workflow else ""}
|
|
297
|
+
|
|
298
|
+
MANDATORY RESEARCH REQUIREMENT:
|
|
299
|
+
Before answering this question, you MUST:
|
|
300
|
+
|
|
301
|
+
1. Use Context7 (mcp__context7__resolve-library-id + get-library-docs) to look up current documentation
|
|
302
|
+
2. Use WebSearch to find official documentation and recent updates
|
|
303
|
+
3. NEVER answer from training data alone - it may be outdated
|
|
304
|
+
|
|
305
|
+
Training data can be months or years old. APIs change constantly.
|
|
306
|
+
Research first. Then answer with verified, current information.
|
|
307
|
+
|
|
308
|
+
After researching, cite your sources in your response.
|
|
309
|
+
</user-prompt-submit-hook>
|
|
310
|
+
"""
|
|
311
|
+
print(injection)
|
|
312
|
+
|
|
313
|
+
# Always allow the prompt to proceed
|
|
314
|
+
sys.exit(0)
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
if __name__ == "__main__":
|
|
318
|
+
main()
|