speclock 4.5.7 → 5.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +145 -9
- package/package.json +67 -131
- package/src/cli/index.js +1 -1
- package/src/core/code-graph.js +635 -0
- package/src/core/compliance.js +1 -1
- package/src/core/conflict.js +1 -0
- package/src/core/engine.js +32 -2
- package/src/core/llm-checker.js +3 -156
- package/src/core/llm-provider.js +208 -0
- package/src/core/memory.js +115 -0
- package/src/core/spec-compiler.js +315 -0
- package/src/core/typed-constraints.js +408 -0
- package/src/dashboard/index.html +5 -4
- package/src/mcp/http-server.js +596 -7
- package/src/mcp/server.js +383 -1
package/src/core/llm-checker.js
CHANGED
|
@@ -8,6 +8,7 @@
|
|
|
8
8
|
// ===================================================================
|
|
9
9
|
|
|
10
10
|
import { readBrain } from "./storage.js";
|
|
11
|
+
import { getConfig, callLLM } from "./llm-provider.js";
|
|
11
12
|
|
|
12
13
|
// --- In-memory LRU cache ---
|
|
13
14
|
const CACHE_MAX = 200;
|
|
@@ -37,44 +38,6 @@ function cacheSet(key, value) {
|
|
|
37
38
|
cache.set(key, { value, ts: Date.now() });
|
|
38
39
|
}
|
|
39
40
|
|
|
40
|
-
// --- Configuration ---
|
|
41
|
-
|
|
42
|
-
function getConfig(root) {
|
|
43
|
-
// Priority: explicit SPECLOCK key > provider-specific keys > brain.json
|
|
44
|
-
const apiKey =
|
|
45
|
-
process.env.SPECLOCK_LLM_KEY ||
|
|
46
|
-
process.env.GEMINI_API_KEY ||
|
|
47
|
-
process.env.GOOGLE_API_KEY ||
|
|
48
|
-
process.env.OPENAI_API_KEY ||
|
|
49
|
-
process.env.ANTHROPIC_API_KEY;
|
|
50
|
-
|
|
51
|
-
// Auto-detect provider from which env var is set
|
|
52
|
-
const provider =
|
|
53
|
-
process.env.SPECLOCK_LLM_PROVIDER ||
|
|
54
|
-
(process.env.SPECLOCK_LLM_KEY ? "gemini" : null) ||
|
|
55
|
-
(process.env.GEMINI_API_KEY || process.env.GOOGLE_API_KEY ? "gemini" : null) ||
|
|
56
|
-
(process.env.OPENAI_API_KEY ? "openai" : null) ||
|
|
57
|
-
(process.env.ANTHROPIC_API_KEY ? "anthropic" : null) ||
|
|
58
|
-
"gemini"; // default to gemini (cheapest, free tier)
|
|
59
|
-
|
|
60
|
-
if (apiKey) {
|
|
61
|
-
return { apiKey, provider };
|
|
62
|
-
}
|
|
63
|
-
|
|
64
|
-
// Check brain.json for LLM config
|
|
65
|
-
try {
|
|
66
|
-
const brain = readBrain(root);
|
|
67
|
-
if (brain?.facts?.llm) {
|
|
68
|
-
return {
|
|
69
|
-
apiKey: brain.facts.llm.apiKey,
|
|
70
|
-
provider: brain.facts.llm.provider || "gemini",
|
|
71
|
-
};
|
|
72
|
-
}
|
|
73
|
-
} catch (_) {}
|
|
74
|
-
|
|
75
|
-
return null;
|
|
76
|
-
}
|
|
77
|
-
|
|
78
41
|
// --- System prompt ---
|
|
79
42
|
|
|
80
43
|
const SYSTEM_PROMPT = `You are a security constraint checker for SpecLock, an AI constraint engine.
|
|
@@ -104,111 +67,6 @@ Respond with ONLY valid JSON (no markdown, no explanation):
|
|
|
104
67
|
"analysis": "one-line summary"
|
|
105
68
|
}`;
|
|
106
69
|
|
|
107
|
-
// --- API callers ---
|
|
108
|
-
|
|
109
|
-
async function callOpenAI(apiKey, userPrompt) {
|
|
110
|
-
const resp = await fetch("https://api.openai.com/v1/chat/completions", {
|
|
111
|
-
method: "POST",
|
|
112
|
-
headers: {
|
|
113
|
-
"Content-Type": "application/json",
|
|
114
|
-
"Authorization": `Bearer ${apiKey}`,
|
|
115
|
-
},
|
|
116
|
-
body: JSON.stringify({
|
|
117
|
-
model: "gpt-4o-mini",
|
|
118
|
-
messages: [
|
|
119
|
-
{ role: "system", content: SYSTEM_PROMPT },
|
|
120
|
-
{ role: "user", content: userPrompt },
|
|
121
|
-
],
|
|
122
|
-
temperature: 0.1,
|
|
123
|
-
max_tokens: 1000,
|
|
124
|
-
}),
|
|
125
|
-
});
|
|
126
|
-
|
|
127
|
-
if (!resp.ok) return null;
|
|
128
|
-
const data = await resp.json();
|
|
129
|
-
const content = data.choices?.[0]?.message?.content;
|
|
130
|
-
if (!content) return null;
|
|
131
|
-
|
|
132
|
-
try {
|
|
133
|
-
return JSON.parse(content);
|
|
134
|
-
} catch (_) {
|
|
135
|
-
// Try to extract JSON from markdown code block
|
|
136
|
-
const match = content.match(/```(?:json)?\s*([\s\S]*?)```/);
|
|
137
|
-
if (match) return JSON.parse(match[1]);
|
|
138
|
-
return null;
|
|
139
|
-
}
|
|
140
|
-
}
|
|
141
|
-
|
|
142
|
-
async function callAnthropic(apiKey, userPrompt) {
|
|
143
|
-
const resp = await fetch("https://api.anthropic.com/v1/messages", {
|
|
144
|
-
method: "POST",
|
|
145
|
-
headers: {
|
|
146
|
-
"Content-Type": "application/json",
|
|
147
|
-
"x-api-key": apiKey,
|
|
148
|
-
"anthropic-version": "2023-06-01",
|
|
149
|
-
},
|
|
150
|
-
body: JSON.stringify({
|
|
151
|
-
model: "claude-sonnet-4-20250514",
|
|
152
|
-
max_tokens: 1000,
|
|
153
|
-
system: SYSTEM_PROMPT,
|
|
154
|
-
messages: [
|
|
155
|
-
{ role: "user", content: userPrompt },
|
|
156
|
-
],
|
|
157
|
-
}),
|
|
158
|
-
});
|
|
159
|
-
|
|
160
|
-
if (!resp.ok) return null;
|
|
161
|
-
const data = await resp.json();
|
|
162
|
-
const content = data.content?.[0]?.text;
|
|
163
|
-
if (!content) return null;
|
|
164
|
-
|
|
165
|
-
try {
|
|
166
|
-
return JSON.parse(content);
|
|
167
|
-
} catch (_) {
|
|
168
|
-
const match = content.match(/```(?:json)?\s*([\s\S]*?)```/);
|
|
169
|
-
if (match) return JSON.parse(match[1]);
|
|
170
|
-
return null;
|
|
171
|
-
}
|
|
172
|
-
}
|
|
173
|
-
|
|
174
|
-
async function callGemini(apiKey, userPrompt) {
|
|
175
|
-
const resp = await fetch(
|
|
176
|
-
`https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent?key=${apiKey}`,
|
|
177
|
-
{
|
|
178
|
-
method: "POST",
|
|
179
|
-
headers: { "Content-Type": "application/json" },
|
|
180
|
-
body: JSON.stringify({
|
|
181
|
-
contents: [
|
|
182
|
-
{
|
|
183
|
-
parts: [
|
|
184
|
-
{ text: SYSTEM_PROMPT + "\n\n" + userPrompt },
|
|
185
|
-
],
|
|
186
|
-
},
|
|
187
|
-
],
|
|
188
|
-
generationConfig: {
|
|
189
|
-
temperature: 0.1,
|
|
190
|
-
maxOutputTokens: 1000,
|
|
191
|
-
},
|
|
192
|
-
}),
|
|
193
|
-
signal: AbortSignal.timeout(3000), // 3s timeout for Gemini calls
|
|
194
|
-
}
|
|
195
|
-
);
|
|
196
|
-
|
|
197
|
-
if (!resp.ok) return null;
|
|
198
|
-
const data = await resp.json();
|
|
199
|
-
const content = data.candidates?.[0]?.content?.parts?.[0]?.text;
|
|
200
|
-
if (!content) return null;
|
|
201
|
-
|
|
202
|
-
try {
|
|
203
|
-
return JSON.parse(content);
|
|
204
|
-
} catch (_) {
|
|
205
|
-
// Try to extract JSON from markdown code block
|
|
206
|
-
const match = content.match(/```(?:json)?\s*([\s\S]*?)```/);
|
|
207
|
-
if (match) return JSON.parse(match[1]);
|
|
208
|
-
return null;
|
|
209
|
-
}
|
|
210
|
-
}
|
|
211
|
-
|
|
212
70
|
// --- Main export ---
|
|
213
71
|
|
|
214
72
|
/**
|
|
@@ -249,19 +107,8 @@ export async function llmCheckConflict(root, proposedAction, activeLocks) {
|
|
|
249
107
|
const lockList = activeLocks.map((l, i) => `${i + 1}. "${l.text}"`).join("\n");
|
|
250
108
|
const userPrompt = `Active SpecLocks:\n${lockList}\n\nProposed Action: "${proposedAction}"\n\nDoes this action conflict with any lock?`;
|
|
251
109
|
|
|
252
|
-
// Call LLM
|
|
253
|
-
|
|
254
|
-
try {
|
|
255
|
-
if (config.provider === "gemini") {
|
|
256
|
-
llmResult = await callGemini(config.apiKey, userPrompt);
|
|
257
|
-
} else if (config.provider === "anthropic") {
|
|
258
|
-
llmResult = await callAnthropic(config.apiKey, userPrompt);
|
|
259
|
-
} else {
|
|
260
|
-
llmResult = await callOpenAI(config.apiKey, userPrompt);
|
|
261
|
-
}
|
|
262
|
-
} catch (_) {
|
|
263
|
-
return null;
|
|
264
|
-
}
|
|
110
|
+
// Call LLM via shared provider
|
|
111
|
+
const llmResult = await callLLM(root, SYSTEM_PROMPT, userPrompt, { timeout: 3000 });
|
|
265
112
|
|
|
266
113
|
if (!llmResult) return null;
|
|
267
114
|
|
|
@@ -0,0 +1,208 @@
|
|
|
1
|
+
// ===================================================================
|
|
2
|
+
// SpecLock LLM Provider — Shared LLM Calling Utilities
|
|
3
|
+
// Supports Gemini, OpenAI, and Anthropic APIs.
|
|
4
|
+
// Zero mandatory dependencies — uses built-in fetch().
|
|
5
|
+
// Falls back gracefully if no API key is configured.
|
|
6
|
+
//
|
|
7
|
+
// Developed by Sandeep Roy (https://github.com/sgroy10)
|
|
8
|
+
// ===================================================================
|
|
9
|
+
|
|
10
|
+
import { readBrain } from "./storage.js";
|
|
11
|
+
|
|
12
|
+
// --- Configuration ---
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* Get LLM configuration (API key + provider).
|
|
16
|
+
* Priority: explicit SPECLOCK key > provider-specific env vars > brain.json
|
|
17
|
+
* @param {string} root - Project root path
|
|
18
|
+
* @returns {{ apiKey: string, provider: string } | null}
|
|
19
|
+
*/
|
|
20
|
+
export function getConfig(root) {
|
|
21
|
+
const apiKey =
|
|
22
|
+
process.env.SPECLOCK_LLM_KEY ||
|
|
23
|
+
process.env.GEMINI_API_KEY ||
|
|
24
|
+
process.env.GOOGLE_API_KEY ||
|
|
25
|
+
process.env.OPENAI_API_KEY ||
|
|
26
|
+
process.env.ANTHROPIC_API_KEY;
|
|
27
|
+
|
|
28
|
+
const provider =
|
|
29
|
+
process.env.SPECLOCK_LLM_PROVIDER ||
|
|
30
|
+
(process.env.SPECLOCK_LLM_KEY ? "gemini" : null) ||
|
|
31
|
+
(process.env.GEMINI_API_KEY || process.env.GOOGLE_API_KEY ? "gemini" : null) ||
|
|
32
|
+
(process.env.OPENAI_API_KEY ? "openai" : null) ||
|
|
33
|
+
(process.env.ANTHROPIC_API_KEY ? "anthropic" : null) ||
|
|
34
|
+
"gemini";
|
|
35
|
+
|
|
36
|
+
if (apiKey) {
|
|
37
|
+
return { apiKey, provider };
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
// Check brain.json for LLM config
|
|
41
|
+
try {
|
|
42
|
+
const brain = readBrain(root);
|
|
43
|
+
if (brain?.facts?.llm) {
|
|
44
|
+
return {
|
|
45
|
+
apiKey: brain.facts.llm.apiKey,
|
|
46
|
+
provider: brain.facts.llm.provider || "gemini",
|
|
47
|
+
};
|
|
48
|
+
}
|
|
49
|
+
} catch (_) {}
|
|
50
|
+
|
|
51
|
+
return null;
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
// --- API callers ---
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* Call OpenAI API.
|
|
58
|
+
* @param {string} apiKey
|
|
59
|
+
* @param {string} systemPrompt
|
|
60
|
+
* @param {string} userPrompt
|
|
61
|
+
* @param {{ timeout?: number, maxTokens?: number }} options
|
|
62
|
+
* @returns {Promise<Object|null>}
|
|
63
|
+
*/
|
|
64
|
+
export async function callOpenAI(apiKey, systemPrompt, userPrompt, options = {}) {
|
|
65
|
+
const { timeout = 5000, maxTokens = 1000 } = options;
|
|
66
|
+
const resp = await fetch("https://api.openai.com/v1/chat/completions", {
|
|
67
|
+
method: "POST",
|
|
68
|
+
headers: {
|
|
69
|
+
"Content-Type": "application/json",
|
|
70
|
+
"Authorization": `Bearer ${apiKey}`,
|
|
71
|
+
},
|
|
72
|
+
body: JSON.stringify({
|
|
73
|
+
model: "gpt-4o-mini",
|
|
74
|
+
messages: [
|
|
75
|
+
{ role: "system", content: systemPrompt },
|
|
76
|
+
{ role: "user", content: userPrompt },
|
|
77
|
+
],
|
|
78
|
+
temperature: 0.1,
|
|
79
|
+
max_tokens: maxTokens,
|
|
80
|
+
}),
|
|
81
|
+
signal: AbortSignal.timeout(timeout),
|
|
82
|
+
});
|
|
83
|
+
|
|
84
|
+
if (!resp.ok) return null;
|
|
85
|
+
const data = await resp.json();
|
|
86
|
+
const content = data.choices?.[0]?.message?.content;
|
|
87
|
+
return parseJsonResponse(content);
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
/**
|
|
91
|
+
* Call Anthropic API.
|
|
92
|
+
* @param {string} apiKey
|
|
93
|
+
* @param {string} systemPrompt
|
|
94
|
+
* @param {string} userPrompt
|
|
95
|
+
* @param {{ timeout?: number, maxTokens?: number }} options
|
|
96
|
+
* @returns {Promise<Object|null>}
|
|
97
|
+
*/
|
|
98
|
+
export async function callAnthropic(apiKey, systemPrompt, userPrompt, options = {}) {
|
|
99
|
+
const { timeout = 5000, maxTokens = 1000 } = options;
|
|
100
|
+
const resp = await fetch("https://api.anthropic.com/v1/messages", {
|
|
101
|
+
method: "POST",
|
|
102
|
+
headers: {
|
|
103
|
+
"Content-Type": "application/json",
|
|
104
|
+
"x-api-key": apiKey,
|
|
105
|
+
"anthropic-version": "2023-06-01",
|
|
106
|
+
},
|
|
107
|
+
body: JSON.stringify({
|
|
108
|
+
model: "claude-sonnet-4-20250514",
|
|
109
|
+
max_tokens: maxTokens,
|
|
110
|
+
system: systemPrompt,
|
|
111
|
+
messages: [
|
|
112
|
+
{ role: "user", content: userPrompt },
|
|
113
|
+
],
|
|
114
|
+
}),
|
|
115
|
+
signal: AbortSignal.timeout(timeout),
|
|
116
|
+
});
|
|
117
|
+
|
|
118
|
+
if (!resp.ok) return null;
|
|
119
|
+
const data = await resp.json();
|
|
120
|
+
const content = data.content?.[0]?.text;
|
|
121
|
+
return parseJsonResponse(content);
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
/**
|
|
125
|
+
* Call Gemini API.
|
|
126
|
+
* @param {string} apiKey
|
|
127
|
+
* @param {string} systemPrompt
|
|
128
|
+
* @param {string} userPrompt
|
|
129
|
+
* @param {{ timeout?: number, maxTokens?: number }} options
|
|
130
|
+
* @returns {Promise<Object|null>}
|
|
131
|
+
*/
|
|
132
|
+
export async function callGemini(apiKey, systemPrompt, userPrompt, options = {}) {
|
|
133
|
+
const { timeout = 3000, maxTokens = 1000 } = options;
|
|
134
|
+
const resp = await fetch(
|
|
135
|
+
`https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:generateContent?key=${apiKey}`,
|
|
136
|
+
{
|
|
137
|
+
method: "POST",
|
|
138
|
+
headers: { "Content-Type": "application/json" },
|
|
139
|
+
body: JSON.stringify({
|
|
140
|
+
contents: [
|
|
141
|
+
{
|
|
142
|
+
parts: [
|
|
143
|
+
{ text: systemPrompt + "\n\n" + userPrompt },
|
|
144
|
+
],
|
|
145
|
+
},
|
|
146
|
+
],
|
|
147
|
+
generationConfig: {
|
|
148
|
+
temperature: 0.1,
|
|
149
|
+
maxOutputTokens: maxTokens,
|
|
150
|
+
},
|
|
151
|
+
}),
|
|
152
|
+
signal: AbortSignal.timeout(timeout),
|
|
153
|
+
}
|
|
154
|
+
);
|
|
155
|
+
|
|
156
|
+
if (!resp.ok) return null;
|
|
157
|
+
const data = await resp.json();
|
|
158
|
+
const content = data.candidates?.[0]?.content?.parts?.[0]?.text;
|
|
159
|
+
return parseJsonResponse(content);
|
|
160
|
+
}
|
|
161
|
+
|
|
162
|
+
/**
|
|
163
|
+
* Call the configured LLM provider.
|
|
164
|
+
* @param {string} root - Project root (for config lookup)
|
|
165
|
+
* @param {string} systemPrompt
|
|
166
|
+
* @param {string} userPrompt
|
|
167
|
+
* @param {{ timeout?: number, maxTokens?: number }} options
|
|
168
|
+
* @returns {Promise<Object|null>}
|
|
169
|
+
*/
|
|
170
|
+
export async function callLLM(root, systemPrompt, userPrompt, options = {}) {
|
|
171
|
+
const config = getConfig(root);
|
|
172
|
+
if (!config) return null;
|
|
173
|
+
|
|
174
|
+
try {
|
|
175
|
+
if (config.provider === "gemini") {
|
|
176
|
+
return await callGemini(config.apiKey, systemPrompt, userPrompt, options);
|
|
177
|
+
} else if (config.provider === "anthropic") {
|
|
178
|
+
return await callAnthropic(config.apiKey, systemPrompt, userPrompt, options);
|
|
179
|
+
} else {
|
|
180
|
+
return await callOpenAI(config.apiKey, systemPrompt, userPrompt, options);
|
|
181
|
+
}
|
|
182
|
+
} catch (_) {
|
|
183
|
+
return null;
|
|
184
|
+
}
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
// --- JSON response parser ---
|
|
188
|
+
|
|
189
|
+
/**
|
|
190
|
+
* Parse a JSON response from an LLM, handling markdown code blocks.
|
|
191
|
+
* @param {string} content - Raw LLM response text
|
|
192
|
+
* @returns {Object|null}
|
|
193
|
+
*/
|
|
194
|
+
export function parseJsonResponse(content) {
|
|
195
|
+
if (!content) return null;
|
|
196
|
+
try {
|
|
197
|
+
return JSON.parse(content);
|
|
198
|
+
} catch (_) {
|
|
199
|
+
// Try to extract JSON from markdown code block
|
|
200
|
+
const match = content.match(/```(?:json)?\s*([\s\S]*?)```/);
|
|
201
|
+
if (match) {
|
|
202
|
+
try {
|
|
203
|
+
return JSON.parse(match[1]);
|
|
204
|
+
} catch (_) {}
|
|
205
|
+
}
|
|
206
|
+
return null;
|
|
207
|
+
}
|
|
208
|
+
}
|
package/src/core/memory.js
CHANGED
|
@@ -22,6 +22,7 @@ import {
|
|
|
22
22
|
import { hasGit, getHead, getDefaultBranch } from "./git.js";
|
|
23
23
|
import { ensureAuditKeyGitignored } from "./audit.js";
|
|
24
24
|
import { normalizeLock } from "./lock-author.js";
|
|
25
|
+
import { validateTypedLock, formatTypedLockText } from "./typed-constraints.js";
|
|
25
26
|
|
|
26
27
|
// --- Internal helpers ---
|
|
27
28
|
|
|
@@ -173,6 +174,120 @@ export function addNote(root, text, pinned = true) {
|
|
|
173
174
|
return { brain, noteId };
|
|
174
175
|
}
|
|
175
176
|
|
|
177
|
+
/**
|
|
178
|
+
* Add a typed constraint lock (numerical, range, state, temporal).
|
|
179
|
+
* These are for autonomous systems governance — real-time value/state checking.
|
|
180
|
+
* Existing text locks use addLock() and are unaffected.
|
|
181
|
+
*
|
|
182
|
+
* @param {string} root - Project root
|
|
183
|
+
* @param {Object} constraint - Typed constraint definition:
|
|
184
|
+
* { constraintType, metric?, operator?, value?, min?, max?, entity?, forbidden?, unit?, requireApproval? }
|
|
185
|
+
* @param {string[]} tags - Category tags
|
|
186
|
+
* @param {string} source - "user" or "agent"
|
|
187
|
+
* @param {string} description - Human-readable description (optional, auto-generated if missing)
|
|
188
|
+
*/
|
|
189
|
+
export function addTypedLock(root, constraint, tags, source, description) {
|
|
190
|
+
const validation = validateTypedLock(constraint);
|
|
191
|
+
if (!validation.valid) {
|
|
192
|
+
return { brain: null, lockId: null, error: validation.error };
|
|
193
|
+
}
|
|
194
|
+
|
|
195
|
+
const brain = ensureInit(root);
|
|
196
|
+
const lockId = newId("lock");
|
|
197
|
+
const text = description || formatTypedLockText(constraint);
|
|
198
|
+
|
|
199
|
+
brain.specLock.items.unshift({
|
|
200
|
+
id: lockId,
|
|
201
|
+
text,
|
|
202
|
+
constraintType: constraint.constraintType,
|
|
203
|
+
// Type-specific fields
|
|
204
|
+
...(constraint.metric && { metric: constraint.metric }),
|
|
205
|
+
...(constraint.operator && { operator: constraint.operator }),
|
|
206
|
+
...(constraint.value !== undefined && { value: constraint.value }),
|
|
207
|
+
...(constraint.min !== undefined && { min: constraint.min }),
|
|
208
|
+
...(constraint.max !== undefined && { max: constraint.max }),
|
|
209
|
+
...(constraint.unit && { unit: constraint.unit }),
|
|
210
|
+
...(constraint.entity && { entity: constraint.entity }),
|
|
211
|
+
...(constraint.forbidden && { forbidden: constraint.forbidden }),
|
|
212
|
+
...(constraint.requireApproval !== undefined && { requireApproval: constraint.requireApproval }),
|
|
213
|
+
createdAt: nowIso(),
|
|
214
|
+
source: source || "user",
|
|
215
|
+
tags: tags || [],
|
|
216
|
+
active: true,
|
|
217
|
+
});
|
|
218
|
+
|
|
219
|
+
const eventId = newId("evt");
|
|
220
|
+
const event = {
|
|
221
|
+
eventId,
|
|
222
|
+
type: "lock_added",
|
|
223
|
+
at: nowIso(),
|
|
224
|
+
files: [],
|
|
225
|
+
summary: `Typed lock added (${constraint.constraintType}): ${text.substring(0, 80)}`,
|
|
226
|
+
patchPath: "",
|
|
227
|
+
};
|
|
228
|
+
recordEvent(root, brain, event);
|
|
229
|
+
return { brain, lockId, constraintType: constraint.constraintType };
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
/**
|
|
233
|
+
* Update a typed lock's threshold value (for numerical/range/temporal).
|
|
234
|
+
* Records the change in audit trail.
|
|
235
|
+
*/
|
|
236
|
+
export function updateTypedLockThreshold(root, lockId, updates) {
|
|
237
|
+
const brain = ensureInit(root);
|
|
238
|
+
const lock = brain.specLock.items.find((l) => l.id === lockId);
|
|
239
|
+
|
|
240
|
+
if (!lock) {
|
|
241
|
+
return { brain: null, error: `Lock not found: ${lockId}` };
|
|
242
|
+
}
|
|
243
|
+
if (!lock.constraintType) {
|
|
244
|
+
return { brain: null, error: `Lock ${lockId} is a text lock, not a typed constraint` };
|
|
245
|
+
}
|
|
246
|
+
|
|
247
|
+
const oldValues = {};
|
|
248
|
+
|
|
249
|
+
// Update allowed fields based on constraint type
|
|
250
|
+
if (lock.constraintType === "numerical" || lock.constraintType === "temporal") {
|
|
251
|
+
if (updates.value !== undefined) {
|
|
252
|
+
oldValues.value = lock.value;
|
|
253
|
+
lock.value = updates.value;
|
|
254
|
+
}
|
|
255
|
+
if (updates.operator) {
|
|
256
|
+
oldValues.operator = lock.operator;
|
|
257
|
+
lock.operator = updates.operator;
|
|
258
|
+
}
|
|
259
|
+
} else if (lock.constraintType === "range") {
|
|
260
|
+
if (updates.min !== undefined) {
|
|
261
|
+
oldValues.min = lock.min;
|
|
262
|
+
lock.min = updates.min;
|
|
263
|
+
}
|
|
264
|
+
if (updates.max !== undefined) {
|
|
265
|
+
oldValues.max = lock.max;
|
|
266
|
+
lock.max = updates.max;
|
|
267
|
+
}
|
|
268
|
+
} else if (lock.constraintType === "state") {
|
|
269
|
+
if (updates.forbidden) {
|
|
270
|
+
oldValues.forbidden = lock.forbidden;
|
|
271
|
+
lock.forbidden = updates.forbidden;
|
|
272
|
+
}
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
// Regenerate text description
|
|
276
|
+
lock.text = formatTypedLockText(lock);
|
|
277
|
+
|
|
278
|
+
const eventId = newId("evt");
|
|
279
|
+
const event = {
|
|
280
|
+
eventId,
|
|
281
|
+
type: "lock_updated",
|
|
282
|
+
at: nowIso(),
|
|
283
|
+
files: [],
|
|
284
|
+
summary: `Typed lock ${lockId} threshold updated: ${JSON.stringify(oldValues)} -> ${JSON.stringify(updates)}`,
|
|
285
|
+
patchPath: "",
|
|
286
|
+
};
|
|
287
|
+
recordEvent(root, brain, event);
|
|
288
|
+
return { brain, lockId, oldValues, newValues: updates };
|
|
289
|
+
}
|
|
290
|
+
|
|
176
291
|
export function updateDeployFacts(root, payload) {
|
|
177
292
|
const brain = ensureInit(root);
|
|
178
293
|
const deploy = brain.facts.deploy;
|