speclock 4.3.1 → 4.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "speclock",
3
- "version": "4.3.1",
3
+ "version": "4.3.3",
4
4
  "description": "AI constraint engine with Gemini LLM universal detection, Policy-as-Code DSL, OAuth/OIDC SSO, admin dashboard, telemetry, API key auth, RBAC, AES-256-GCM encryption, hard enforcement, semantic pre-commit, HMAC audit chain, SOC 2/HIPAA compliance. Cross-platform: MCP + direct API. 31 MCP tools + CLI. Enterprise platform.",
5
5
  "type": "module",
6
6
  "main": "src/mcp/server.js",
package/src/cli/index.js CHANGED
@@ -116,7 +116,7 @@ function refreshContext(root) {
116
116
 
117
117
  function printHelp() {
118
118
  console.log(`
119
- SpecLock v4.3.1 — AI Constraint Engine (Gemini LLM + Policy-as-Code + SSO + Dashboard + Telemetry + Auth + RBAC + Encryption)
119
+ SpecLock v4.3.3 — AI Constraint Engine (Gemini LLM + Policy-as-Code + SSO + Dashboard + Telemetry + Auth + RBAC + Encryption)
120
120
  Developed by Sandeep Roy (github.com/sgroy10)
121
121
 
122
122
  Usage: speclock <command> [options]
@@ -9,7 +9,7 @@
9
9
  import { readBrain, readEvents } from "./storage.js";
10
10
  import { verifyAuditChain } from "./audit.js";
11
11
 
12
- const VERSION = "4.3.1";
12
+ const VERSION = "4.3.3";
13
13
 
14
14
  // PHI-related keywords for HIPAA filtering
15
15
  const PHI_KEYWORDS = [
@@ -204,12 +204,69 @@ export function checkConflict(rootOrAction, proposedActionOrLock) {
204
204
  return result;
205
205
  }
206
206
 
207
+ /**
208
+ * Default proxy URL for npm-install users who don't have their own LLM API key.
209
+ * The Railway-hosted SpecLock server provides Gemini LLM checking via /api/check.
210
+ * Disable with SPECLOCK_NO_PROXY=true. Override with SPECLOCK_PROXY_URL.
211
+ */
212
+ const DEFAULT_PROXY_URL = "https://speclock-mcp-production.up.railway.app/api/check";
213
+
214
+ /**
215
+ * Call the Railway proxy for LLM-powered conflict checking.
216
+ * Used when no local LLM API key is available.
217
+ * @returns {Object|null} Proxy result or null on failure
218
+ */
219
+ async function callProxy(actionText, lockTexts) {
220
+ if (process.env.SPECLOCK_NO_PROXY === "true") return null;
221
+ const proxyUrl = process.env.SPECLOCK_PROXY_URL || DEFAULT_PROXY_URL;
222
+
223
+ try {
224
+ const controller = new AbortController();
225
+ const timeout = setTimeout(() => controller.abort(), 10000); // 10s timeout
226
+
227
+ const resp = await fetch(proxyUrl, {
228
+ method: "POST",
229
+ headers: { "Content-Type": "application/json" },
230
+ body: JSON.stringify({ action: actionText, locks: lockTexts }),
231
+ signal: controller.signal,
232
+ });
233
+ clearTimeout(timeout);
234
+
235
+ if (!resp.ok) return null;
236
+ const data = await resp.json();
237
+
238
+ if (!data || typeof data.hasConflict !== "boolean") return null;
239
+
240
+ // Convert proxy response to internal format
241
+ const conflicts = (data.conflicts || []).map((c) => ({
242
+ id: "proxy",
243
+ text: c.lockText,
244
+ matchedKeywords: [],
245
+ confidence: c.confidence,
246
+ level: c.level || "MEDIUM",
247
+ reasons: c.reasons || [],
248
+ }));
249
+
250
+ return {
251
+ hasConflict: data.hasConflict,
252
+ conflictingLocks: conflicts,
253
+ analysis: data.hasConflict
254
+ ? `${conflicts.length} conflict(s) detected via proxy (${data.source}).`
255
+ : `Proxy verified as safe (${data.source}). No conflicts.`,
256
+ };
257
+ } catch (_) {
258
+ // Proxy unavailable — graceful degradation
259
+ return null;
260
+ }
261
+ }
262
+
207
263
  /**
208
264
  * Async conflict check with LLM fallback for grey-zone cases.
209
265
  * Supports both brain mode and direct mode (same as checkConflict).
210
266
  * Strategy: Run heuristic first (fast, free, offline).
211
267
  * - Score > 70% on ALL conflicts → trust heuristic (skip LLM)
212
268
  * - Everything else → call LLM for universal domain coverage
269
+ * - If no local LLM key → call Railway proxy for Gemini coverage
213
270
  */
214
271
  export async function checkConflictAsync(rootOrAction, proposedActionOrLock) {
215
272
  // 1. Always run the fast heuristic first (handles both brain + direct mode)
@@ -224,12 +281,9 @@ export async function checkConflictAsync(rootOrAction, proposedActionOrLock) {
224
281
  return heuristicResult;
225
282
  }
226
283
 
227
- // 3. Call LLM for everything else including score 0.
228
- // Score 0 means "heuristic vocabulary doesn't cover this domain",
229
- // which is EXACTLY when an LLM (which knows every domain) adds value.
284
+ // 3. Try local LLM first (if user has their own API key)
230
285
  try {
231
286
  const { llmCheckConflict } = await import("./llm-checker.js");
232
- // In direct mode, build activeLocks from the lock text(s) passed directly
233
287
  let llmResult;
234
288
  if (isDirect) {
235
289
  const lockTexts = Array.isArray(proposedActionOrLock)
@@ -246,45 +300,77 @@ export async function checkConflictAsync(rootOrAction, proposedActionOrLock) {
246
300
  }
247
301
 
248
302
  if (llmResult) {
249
- // Keep HIGH heuristic conflicts (>70%) — they're already certain
250
- const highConfidence = heuristicResult.conflictingLocks.filter(
251
- (c) => c.confidence > 70
252
- );
253
- const llmConflicts = llmResult.conflictingLocks || [];
254
- const merged = [...highConfidence, ...llmConflicts];
255
-
256
- // Deduplicate by lock text, keeping the higher-confidence entry
257
- const byText = new Map();
258
- for (const c of merged) {
259
- const existing = byText.get(c.text);
260
- if (!existing || c.confidence > existing.confidence) {
261
- byText.set(c.text, c);
262
- }
263
- }
264
- const unique = [...byText.values()];
265
-
266
- if (unique.length === 0) {
267
- return {
268
- hasConflict: false,
269
- conflictingLocks: [],
270
- analysis: `Heuristic had partial signal, LLM verified as safe. No conflicts.`,
271
- };
272
- }
303
+ return mergeLLMResult(heuristicResult, llmResult);
304
+ }
305
+ } catch (_) {
306
+ // Local LLM not available
307
+ }
273
308
 
274
- unique.sort((a, b) => b.confidence - a.confidence);
275
- return {
276
- hasConflict: true,
277
- conflictingLocks: unique,
278
- analysis: `${unique.length} conflict(s) confirmed (${highConfidence.length} heuristic + ${llmConflicts.length} LLM-verified).`,
279
- };
309
+ // 4. No local LLM call Railway proxy for Gemini coverage
310
+ try {
311
+ let lockTexts;
312
+ if (isDirect) {
313
+ lockTexts = Array.isArray(proposedActionOrLock)
314
+ ? proposedActionOrLock
315
+ : [proposedActionOrLock];
316
+ } else {
317
+ // Brain mode: extract lock texts from brain
318
+ const brain = ensureInit(rootOrAction);
319
+ const activeLocks = (brain.specLock?.items || []).filter((l) => l.active !== false);
320
+ lockTexts = activeLocks.map((l) => l.text);
321
+ }
322
+
323
+ const actionText = isDirect ? rootOrAction : proposedActionOrLock;
324
+ if (lockTexts.length > 0) {
325
+ const proxyResult = await callProxy(actionText, lockTexts);
326
+ if (proxyResult) {
327
+ return mergeLLMResult(heuristicResult, proxyResult);
328
+ }
280
329
  }
281
330
  } catch (_) {
282
- // LLM not available return heuristic result as-is
331
+ // Proxy unavailablegraceful degradation
283
332
  }
284
333
 
285
334
  return heuristicResult;
286
335
  }
287
336
 
337
+ /**
338
+ * Merge heuristic result with LLM/proxy result.
339
+ * Keeps HIGH heuristic conflicts + all LLM conflicts, deduplicates, takes MAX.
340
+ */
341
+ function mergeLLMResult(heuristicResult, llmResult) {
342
+ const highConfidence = heuristicResult.conflictingLocks.filter(
343
+ (c) => c.confidence > 70
344
+ );
345
+ const llmConflicts = llmResult.conflictingLocks || [];
346
+ const merged = [...highConfidence, ...llmConflicts];
347
+
348
+ // Deduplicate by lock text, keeping the higher-confidence entry
349
+ const byText = new Map();
350
+ for (const c of merged) {
351
+ const existing = byText.get(c.text);
352
+ if (!existing || c.confidence > existing.confidence) {
353
+ byText.set(c.text, c);
354
+ }
355
+ }
356
+ const unique = [...byText.values()];
357
+
358
+ if (unique.length === 0) {
359
+ return {
360
+ hasConflict: false,
361
+ conflictingLocks: [],
362
+ analysis: `Heuristic had partial signal, LLM verified as safe. No conflicts.`,
363
+ };
364
+ }
365
+
366
+ unique.sort((a, b) => b.confidence - a.confidence);
367
+ return {
368
+ hasConflict: true,
369
+ conflictingLocks: unique,
370
+ analysis: `${unique.length} conflict(s) confirmed (${highConfidence.length} heuristic + ${llmConflicts.length} LLM-verified).`,
371
+ };
372
+ }
373
+
288
374
  export function suggestLocks(root) {
289
375
  const brain = ensureInit(root);
290
376
  const suggestions = [];
@@ -257,7 +257,7 @@ export async function flushToRemote(root) {
257
257
  // Build anonymized payload
258
258
  const payload = {
259
259
  instanceId: summary.instanceId,
260
- version: "4.3.1",
260
+ version: "4.3.3",
261
261
  totalCalls: summary.totalCalls,
262
262
  avgResponseMs: summary.avgResponseMs,
263
263
  conflicts: summary.conflicts,
@@ -89,7 +89,7 @@
89
89
  <div class="header">
90
90
  <div>
91
91
  <h1><span>SpecLock</span> Dashboard</h1>
92
- <div class="meta">v4.3.1 &mdash; AI Constraint Engine</div>
92
+ <div class="meta">v4.3.3 &mdash; AI Constraint Engine</div>
93
93
  </div>
94
94
  <div style="display:flex;align-items:center;gap:12px;">
95
95
  <span id="health-badge" class="status-badge healthy">Loading...</span>
@@ -182,7 +182,7 @@
182
182
  </div>
183
183
 
184
184
  <div style="text-align:center;padding:24px;color:var(--muted);font-size:12px;">
185
- SpecLock v4.3.1 &mdash; Developed by Sandeep Roy &mdash; <a href="https://github.com/sgroy10/speclock" style="color:var(--accent)">GitHub</a>
185
+ SpecLock v4.3.3 &mdash; Developed by Sandeep Roy &mdash; <a href="https://github.com/sgroy10/speclock" style="color:var(--accent)">GitHub</a>
186
186
  </div>
187
187
 
188
188
  <script>
@@ -91,7 +91,7 @@ import { fileURLToPath } from "url";
91
91
  import _path from "path";
92
92
 
93
93
  const PROJECT_ROOT = process.env.SPECLOCK_PROJECT_ROOT || process.cwd();
94
- const VERSION = "4.3.1";
94
+ const VERSION = "4.3.3";
95
95
  const AUTHOR = "Sandeep Roy";
96
96
  const START_TIME = Date.now();
97
97
 
@@ -201,7 +201,7 @@ function createSpecLockServer() {
201
201
  server.tool("speclock_add_lock", "Add a non-negotiable constraint (SpecLock).", { text: z.string().min(1).describe("The constraint text"), tags: z.array(z.string()).default([]).describe("Category tags"), source: z.enum(["user", "agent"]).default("agent").describe("Who created this lock") }, async ({ text, tags, source }) => {
202
202
  ensureInit(PROJECT_ROOT);
203
203
  const lock = addLock(PROJECT_ROOT, text, tags, source);
204
- return { content: [{ type: "text", text: `Lock added [${lock.id}]: ${text}` }] };
204
+ return { content: [{ type: "text", text: `Lock added [${lock.lockId}]: ${text}` }] };
205
205
  });
206
206
 
207
207
  // Tool 5: speclock_remove_lock
@@ -215,7 +215,7 @@ function createSpecLockServer() {
215
215
  server.tool("speclock_add_decision", "Record an architectural or design decision.", { text: z.string().min(1).describe("The decision text"), tags: z.array(z.string()).default([]), source: z.enum(["user", "agent"]).default("agent") }, async ({ text, tags, source }) => {
216
216
  ensureInit(PROJECT_ROOT);
217
217
  const d = addDecision(PROJECT_ROOT, text, tags, source);
218
- return { content: [{ type: "text", text: `Decision recorded [${d.id}]: ${text}` }] };
218
+ return { content: [{ type: "text", text: `Decision recorded [${d.decId}]: ${text}` }] };
219
219
  });
220
220
 
221
221
  // Tool 7: speclock_add_note
@@ -656,6 +656,110 @@ app.delete("/mcp", async (req, res) => {
656
656
  res.writeHead(405).end(JSON.stringify({ jsonrpc: "2.0", error: { code: -32000, message: "Method not allowed." }, id: null }));
657
657
  });
658
658
 
659
+ // ========================================
660
+ // PUBLIC PROXY API (v4.3 — for npm-install users)
661
+ // Allows npm-install users to get Gemini LLM coverage without
662
+ // needing their own API key. Heuristic runs locally, grey-zone
663
+ // cases are proxied here for LLM verification.
664
+ // ========================================
665
+
666
+ app.post("/api/check", async (req, res) => {
667
+ setCorsHeaders(res);
668
+
669
+ // Rate limiting
670
+ const clientIp = req.headers["x-forwarded-for"]?.split(",")[0]?.trim() || req.socket?.remoteAddress || "unknown";
671
+ if (!checkRateLimit(clientIp)) {
672
+ return res.status(429).json({ error: "Rate limit exceeded. Try again later." });
673
+ }
674
+
675
+ const { action, locks } = req.body || {};
676
+ if (!action || typeof action !== "string") {
677
+ return res.status(400).json({ error: "Missing required field: action (string)" });
678
+ }
679
+ if (!locks || !Array.isArray(locks) || locks.length === 0) {
680
+ return res.status(400).json({ error: "Missing required field: locks (non-empty array of strings)" });
681
+ }
682
+ if (locks.length > 50) {
683
+ return res.status(400).json({ error: "Too many locks (max 50)" });
684
+ }
685
+
686
+ try {
687
+ // Build lock objects for the LLM checker
688
+ const activeLocks = locks.map((text, i) => ({
689
+ id: `proxy-${i}`,
690
+ text: String(text),
691
+ active: true,
692
+ }));
693
+
694
+ // Run heuristic first (same as local)
695
+ const { analyzeConflict } = await import("../core/semantics.js");
696
+ const heuristicConflicts = [];
697
+ for (const lock of activeLocks) {
698
+ const result = analyzeConflict(action, lock.text);
699
+ if (result.isConflict) {
700
+ heuristicConflicts.push({
701
+ lockText: lock.text,
702
+ confidence: result.confidence,
703
+ level: result.level,
704
+ reasons: result.reasons,
705
+ source: "heuristic",
706
+ });
707
+ }
708
+ }
709
+
710
+ // If all heuristic conflicts are HIGH (>70%), return immediately
711
+ if (heuristicConflicts.length > 0 && heuristicConflicts.every(c => c.confidence > 70)) {
712
+ return res.json({
713
+ hasConflict: true,
714
+ conflicts: heuristicConflicts,
715
+ source: "heuristic",
716
+ });
717
+ }
718
+
719
+ // Call LLM for full coverage
720
+ const { llmCheckConflict } = await import("../core/llm-checker.js");
721
+ const llmResult = await llmCheckConflict(null, action, activeLocks);
722
+
723
+ if (llmResult) {
724
+ // Merge: keep HIGH heuristic + all LLM conflicts
725
+ const highHeuristic = heuristicConflicts.filter(c => c.confidence > 70);
726
+ const llmConflicts = (llmResult.conflictingLocks || []).map(c => ({
727
+ lockText: c.text,
728
+ confidence: c.confidence,
729
+ level: c.level,
730
+ reasons: c.reasons || [],
731
+ source: "gemini",
732
+ }));
733
+ const merged = [...highHeuristic, ...llmConflicts];
734
+
735
+ // Deduplicate by lock text
736
+ const byText = new Map();
737
+ for (const c of merged) {
738
+ const existing = byText.get(c.lockText);
739
+ if (!existing || c.confidence > existing.confidence) {
740
+ byText.set(c.lockText, c);
741
+ }
742
+ }
743
+ const unique = [...byText.values()];
744
+
745
+ return res.json({
746
+ hasConflict: unique.length > 0,
747
+ conflicts: unique,
748
+ source: unique.some(c => c.source === "gemini") ? "hybrid" : "heuristic",
749
+ });
750
+ }
751
+
752
+ // LLM unavailable — return heuristic result
753
+ return res.json({
754
+ hasConflict: heuristicConflicts.length > 0,
755
+ conflicts: heuristicConflicts,
756
+ source: "heuristic-only",
757
+ });
758
+ } catch (err) {
759
+ return res.status(500).json({ error: `Check failed: ${err.message}` });
760
+ }
761
+ });
762
+
659
763
  // Health check endpoint (enhanced for enterprise)
660
764
  app.get("/health", (req, res) => {
661
765
  setCorsHeaders(res);
package/src/mcp/server.js CHANGED
@@ -100,7 +100,7 @@ const PROJECT_ROOT =
100
100
  args.project || process.env.SPECLOCK_PROJECT_ROOT || process.cwd();
101
101
 
102
102
  // --- MCP Server ---
103
- const VERSION = "4.3.1";
103
+ const VERSION = "4.3.3";
104
104
  const AUTHOR = "Sandeep Roy";
105
105
 
106
106
  const server = new McpServer(