speclock 1.6.0 → 2.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +47 -13
- package/package.json +2 -2
- package/src/cli/index.js +150 -13
- package/src/core/engine.js +243 -70
- package/src/core/git.js +6 -0
- package/src/core/hooks.js +87 -0
- package/src/core/llm-checker.js +239 -0
- package/src/core/semantics.js +1096 -0
- package/src/core/storage.js +18 -0
- package/src/core/templates.js +114 -0
- package/src/mcp/http-server.js +44 -4
- package/src/mcp/server.js +119 -2
|
@@ -0,0 +1,239 @@
|
|
|
1
|
+
// ===================================================================
|
|
2
|
+
// SpecLock LLM-Powered Conflict Checker (Optional)
|
|
3
|
+
// Uses OpenAI or Anthropic APIs for enterprise-grade detection.
|
|
4
|
+
// Zero mandatory dependencies — uses built-in fetch().
|
|
5
|
+
// Falls back gracefully if no API key is configured.
|
|
6
|
+
// ===================================================================
|
|
7
|
+
|
|
8
|
+
import { readBrain } from "./storage.js";
|
|
9
|
+
|
|
10
|
+
// --- In-memory LRU cache ---
|
|
11
|
+
const CACHE_MAX = 200;
|
|
12
|
+
const CACHE_TTL_MS = 5 * 60 * 1000; // 5 minutes
|
|
13
|
+
const cache = new Map();
|
|
14
|
+
|
|
15
|
+
function cacheKey(action, locks) {
|
|
16
|
+
return `${action}::${locks.map(l => l.text).sort().join("|")}`;
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
function cacheGet(key) {
|
|
20
|
+
const entry = cache.get(key);
|
|
21
|
+
if (!entry) return null;
|
|
22
|
+
if (Date.now() - entry.ts > CACHE_TTL_MS) {
|
|
23
|
+
cache.delete(key);
|
|
24
|
+
return null;
|
|
25
|
+
}
|
|
26
|
+
return entry.value;
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
function cacheSet(key, value) {
|
|
30
|
+
if (cache.size >= CACHE_MAX) {
|
|
31
|
+
// Evict oldest entry
|
|
32
|
+
const oldest = cache.keys().next().value;
|
|
33
|
+
cache.delete(oldest);
|
|
34
|
+
}
|
|
35
|
+
cache.set(key, { value, ts: Date.now() });
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
// --- Configuration ---
|
|
39
|
+
|
|
40
|
+
function getConfig(root) {
|
|
41
|
+
// Priority: env var > brain.json config
|
|
42
|
+
const apiKey = process.env.SPECLOCK_LLM_KEY || process.env.OPENAI_API_KEY || process.env.ANTHROPIC_API_KEY;
|
|
43
|
+
const provider = process.env.SPECLOCK_LLM_PROVIDER || "openai"; // "openai" or "anthropic"
|
|
44
|
+
|
|
45
|
+
if (apiKey) {
|
|
46
|
+
return { apiKey, provider };
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
// Check brain.json for LLM config
|
|
50
|
+
try {
|
|
51
|
+
const brain = readBrain(root);
|
|
52
|
+
if (brain?.facts?.llm) {
|
|
53
|
+
return {
|
|
54
|
+
apiKey: brain.facts.llm.apiKey,
|
|
55
|
+
provider: brain.facts.llm.provider || "openai",
|
|
56
|
+
};
|
|
57
|
+
}
|
|
58
|
+
} catch (_) {}
|
|
59
|
+
|
|
60
|
+
return null;
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
// --- System prompt ---
|
|
64
|
+
|
|
65
|
+
const SYSTEM_PROMPT = `You are a security constraint checker for SpecLock, an AI constraint engine.
|
|
66
|
+
|
|
67
|
+
Your job: determine if a proposed action conflicts with any active SpecLock constraints (locks).
|
|
68
|
+
|
|
69
|
+
Rules:
|
|
70
|
+
1. A lock like "Never X" means the action MUST NOT do X, regardless of phrasing.
|
|
71
|
+
2. Watch for EUPHEMISMS: "clean up data" = delete, "streamline" = remove, "sunset" = deprecate/remove.
|
|
72
|
+
3. Watch for TECHNICAL JARGON: "truncate table" = delete records, "flash firmware" = overwrite, "bridge segments" = connect.
|
|
73
|
+
4. Watch for TEMPORAL SOFTENERS: "temporarily disable" is still disabling. "Just for testing" is still doing it.
|
|
74
|
+
5. Watch for CONTEXT DILUTION: "update UI and also delete patient records" — the second part conflicts even if buried.
|
|
75
|
+
6. POSITIVE actions do NOT conflict: "Enable audit logging" does NOT conflict with "Never disable audit logging".
|
|
76
|
+
7. Read-only actions do NOT conflict: "View patient records" does NOT conflict with "Never delete patient records".
|
|
77
|
+
|
|
78
|
+
Respond with ONLY valid JSON (no markdown, no explanation):
|
|
79
|
+
{
|
|
80
|
+
"hasConflict": true/false,
|
|
81
|
+
"conflicts": [
|
|
82
|
+
{
|
|
83
|
+
"lockText": "the lock text",
|
|
84
|
+
"confidence": 0-100,
|
|
85
|
+
"level": "HIGH/MEDIUM/LOW",
|
|
86
|
+
"reasons": ["reason1", "reason2"]
|
|
87
|
+
}
|
|
88
|
+
],
|
|
89
|
+
"analysis": "one-line summary"
|
|
90
|
+
}`;
|
|
91
|
+
|
|
92
|
+
// --- API callers ---
|
|
93
|
+
|
|
94
|
+
async function callOpenAI(apiKey, userPrompt) {
|
|
95
|
+
const resp = await fetch("https://api.openai.com/v1/chat/completions", {
|
|
96
|
+
method: "POST",
|
|
97
|
+
headers: {
|
|
98
|
+
"Content-Type": "application/json",
|
|
99
|
+
"Authorization": `Bearer ${apiKey}`,
|
|
100
|
+
},
|
|
101
|
+
body: JSON.stringify({
|
|
102
|
+
model: "gpt-4o-mini",
|
|
103
|
+
messages: [
|
|
104
|
+
{ role: "system", content: SYSTEM_PROMPT },
|
|
105
|
+
{ role: "user", content: userPrompt },
|
|
106
|
+
],
|
|
107
|
+
temperature: 0.1,
|
|
108
|
+
max_tokens: 1000,
|
|
109
|
+
}),
|
|
110
|
+
});
|
|
111
|
+
|
|
112
|
+
if (!resp.ok) return null;
|
|
113
|
+
const data = await resp.json();
|
|
114
|
+
const content = data.choices?.[0]?.message?.content;
|
|
115
|
+
if (!content) return null;
|
|
116
|
+
|
|
117
|
+
try {
|
|
118
|
+
return JSON.parse(content);
|
|
119
|
+
} catch (_) {
|
|
120
|
+
// Try to extract JSON from markdown code block
|
|
121
|
+
const match = content.match(/```(?:json)?\s*([\s\S]*?)```/);
|
|
122
|
+
if (match) return JSON.parse(match[1]);
|
|
123
|
+
return null;
|
|
124
|
+
}
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
async function callAnthropic(apiKey, userPrompt) {
|
|
128
|
+
const resp = await fetch("https://api.anthropic.com/v1/messages", {
|
|
129
|
+
method: "POST",
|
|
130
|
+
headers: {
|
|
131
|
+
"Content-Type": "application/json",
|
|
132
|
+
"x-api-key": apiKey,
|
|
133
|
+
"anthropic-version": "2023-06-01",
|
|
134
|
+
},
|
|
135
|
+
body: JSON.stringify({
|
|
136
|
+
model: "claude-sonnet-4-20250514",
|
|
137
|
+
max_tokens: 1000,
|
|
138
|
+
system: SYSTEM_PROMPT,
|
|
139
|
+
messages: [
|
|
140
|
+
{ role: "user", content: userPrompt },
|
|
141
|
+
],
|
|
142
|
+
}),
|
|
143
|
+
});
|
|
144
|
+
|
|
145
|
+
if (!resp.ok) return null;
|
|
146
|
+
const data = await resp.json();
|
|
147
|
+
const content = data.content?.[0]?.text;
|
|
148
|
+
if (!content) return null;
|
|
149
|
+
|
|
150
|
+
try {
|
|
151
|
+
return JSON.parse(content);
|
|
152
|
+
} catch (_) {
|
|
153
|
+
const match = content.match(/```(?:json)?\s*([\s\S]*?)```/);
|
|
154
|
+
if (match) return JSON.parse(match[1]);
|
|
155
|
+
return null;
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
// --- Main export ---
|
|
160
|
+
|
|
161
|
+
/**
|
|
162
|
+
* Check conflicts using LLM. Returns null on any failure (caller should fall back to heuristic).
|
|
163
|
+
* @param {string} root - Project root path
|
|
164
|
+
* @param {string} proposedAction - The action to check
|
|
165
|
+
* @param {Array} [activeLocks] - Optional pre-fetched locks
|
|
166
|
+
* @returns {Promise<Object|null>} - Same shape as checkConflict() return, or null
|
|
167
|
+
*/
|
|
168
|
+
export async function llmCheckConflict(root, proposedAction, activeLocks) {
|
|
169
|
+
const config = getConfig(root);
|
|
170
|
+
if (!config) return null;
|
|
171
|
+
|
|
172
|
+
// Get active locks if not provided
|
|
173
|
+
if (!activeLocks) {
|
|
174
|
+
try {
|
|
175
|
+
const brain = readBrain(root);
|
|
176
|
+
activeLocks = brain?.specLock?.items?.filter(l => l.active !== false) || [];
|
|
177
|
+
} catch (_) {
|
|
178
|
+
return null;
|
|
179
|
+
}
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
if (activeLocks.length === 0) {
|
|
183
|
+
return {
|
|
184
|
+
hasConflict: false,
|
|
185
|
+
conflictingLocks: [],
|
|
186
|
+
analysis: "No active locks. No constraints to check against.",
|
|
187
|
+
};
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
// Check cache
|
|
191
|
+
const key = cacheKey(proposedAction, activeLocks);
|
|
192
|
+
const cached = cacheGet(key);
|
|
193
|
+
if (cached) return cached;
|
|
194
|
+
|
|
195
|
+
// Build user prompt
|
|
196
|
+
const lockList = activeLocks.map((l, i) => `${i + 1}. "${l.text}"`).join("\n");
|
|
197
|
+
const userPrompt = `Active SpecLocks:\n${lockList}\n\nProposed Action: "${proposedAction}"\n\nDoes this action conflict with any lock?`;
|
|
198
|
+
|
|
199
|
+
// Call LLM
|
|
200
|
+
let llmResult = null;
|
|
201
|
+
try {
|
|
202
|
+
if (config.provider === "anthropic") {
|
|
203
|
+
llmResult = await callAnthropic(config.apiKey, userPrompt);
|
|
204
|
+
} else {
|
|
205
|
+
llmResult = await callOpenAI(config.apiKey, userPrompt);
|
|
206
|
+
}
|
|
207
|
+
} catch (_) {
|
|
208
|
+
return null;
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
if (!llmResult) return null;
|
|
212
|
+
|
|
213
|
+
// Convert LLM response to checkConflict format
|
|
214
|
+
const conflicting = (llmResult.conflicts || [])
|
|
215
|
+
.filter(c => c.confidence >= 25)
|
|
216
|
+
.map(c => {
|
|
217
|
+
// Find matching lock
|
|
218
|
+
const lock = activeLocks.find(l => l.text === c.lockText) || { id: "unknown", text: c.lockText };
|
|
219
|
+
return {
|
|
220
|
+
id: lock.id,
|
|
221
|
+
text: c.lockText,
|
|
222
|
+
matchedKeywords: [],
|
|
223
|
+
confidence: c.confidence,
|
|
224
|
+
level: c.level || (c.confidence >= 70 ? "HIGH" : c.confidence >= 40 ? "MEDIUM" : "LOW"),
|
|
225
|
+
reasons: c.reasons || [],
|
|
226
|
+
};
|
|
227
|
+
});
|
|
228
|
+
|
|
229
|
+
const result = {
|
|
230
|
+
hasConflict: conflicting.length > 0,
|
|
231
|
+
conflictingLocks: conflicting,
|
|
232
|
+
analysis: llmResult.analysis || (conflicting.length > 0
|
|
233
|
+
? `LLM detected ${conflicting.length} conflict(s). Review before proceeding.`
|
|
234
|
+
: `LLM checked against ${activeLocks.length} lock(s). No conflicts detected.`),
|
|
235
|
+
};
|
|
236
|
+
|
|
237
|
+
cacheSet(key, result);
|
|
238
|
+
return result;
|
|
239
|
+
}
|