speclock 4.3.2 → 4.3.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "speclock",
3
- "version": "4.3.2",
3
+ "version": "4.3.4",
4
4
  "description": "AI constraint engine with Gemini LLM universal detection, Policy-as-Code DSL, OAuth/OIDC SSO, admin dashboard, telemetry, API key auth, RBAC, AES-256-GCM encryption, hard enforcement, semantic pre-commit, HMAC audit chain, SOC 2/HIPAA compliance. Cross-platform: MCP + direct API. 31 MCP tools + CLI. Enterprise platform.",
5
5
  "type": "module",
6
6
  "main": "src/mcp/server.js",
package/src/cli/index.js CHANGED
@@ -116,7 +116,7 @@ function refreshContext(root) {
116
116
 
117
117
  function printHelp() {
118
118
  console.log(`
119
- SpecLock v4.3.2 — AI Constraint Engine (Gemini LLM + Policy-as-Code + SSO + Dashboard + Telemetry + Auth + RBAC + Encryption)
119
+ SpecLock v4.3.4 — AI Constraint Engine (Gemini LLM + Policy-as-Code + SSO + Dashboard + Telemetry + Auth + RBAC + Encryption)
120
120
  Developed by Sandeep Roy (github.com/sgroy10)
121
121
 
122
122
  Usage: speclock <command> [options]
@@ -9,7 +9,7 @@
9
9
  import { readBrain, readEvents } from "./storage.js";
10
10
  import { verifyAuditChain } from "./audit.js";
11
11
 
12
- const VERSION = "4.3.2";
12
+ const VERSION = "4.3.4";
13
13
 
14
14
  // PHI-related keywords for HIPAA filtering
15
15
  const PHI_KEYWORDS = [
@@ -59,13 +59,18 @@ const GUARD_TAG = "SPECLOCK-GUARD";
59
59
  /**
60
60
  * Detect if the first argument is a file-system path (brain mode)
61
61
  * or natural text (direct mode for cross-platform usage).
62
+ * Must be strict: "spay/neuter" is NOT a path, "/app" IS a path.
62
63
  */
63
64
  function isDirectoryPath(str) {
64
65
  if (!str || typeof str !== "string") return false;
65
66
  // Absolute paths: /foo, C:\foo, \\server
66
- if (str.startsWith("/") || str.startsWith("\\") || /^[A-Z]:/i.test(str)) return true;
67
- // Relative path with separator or current dir
68
- if (str === "." || str === ".." || str.includes("/") || str.includes("\\")) return true;
67
+ if (/^[A-Z]:/i.test(str)) return true; // C:\Users\...
68
+ if (str.startsWith("\\\\")) return true; // \\server\share
69
+ if (str.startsWith("/") && !str.includes(" ")) return true; // /app, /usr/local (no spaces = likely path)
70
+ // Relative path starting with . or ..
71
+ if (/^\.\.?[/\\]/.test(str)) return true; // ./foo, ../bar
72
+ if (str === "." || str === "..") return true;
73
+ // Natural language with / in the middle (spay/neuter, TCP/IP, etc.) is NOT a path
69
74
  return false;
70
75
  }
71
76
 
@@ -204,12 +209,69 @@ export function checkConflict(rootOrAction, proposedActionOrLock) {
204
209
  return result;
205
210
  }
206
211
 
212
+ /**
213
+ * Default proxy URL for npm-install users who don't have their own LLM API key.
214
+ * The Railway-hosted SpecLock server provides Gemini LLM checking via /api/check.
215
+ * Disable with SPECLOCK_NO_PROXY=true. Override with SPECLOCK_PROXY_URL.
216
+ */
217
+ const DEFAULT_PROXY_URL = "https://speclock-mcp-production.up.railway.app/api/check";
218
+
219
+ /**
220
+ * Call the Railway proxy for LLM-powered conflict checking.
221
+ * Used when no local LLM API key is available.
222
+ * @returns {Object|null} Proxy result or null on failure
223
+ */
224
+ async function callProxy(actionText, lockTexts) {
225
+ if (process.env.SPECLOCK_NO_PROXY === "true") return null;
226
+ const proxyUrl = process.env.SPECLOCK_PROXY_URL || DEFAULT_PROXY_URL;
227
+
228
+ try {
229
+ const controller = new AbortController();
230
+ const timeout = setTimeout(() => controller.abort(), 10000); // 10s timeout
231
+
232
+ const resp = await fetch(proxyUrl, {
233
+ method: "POST",
234
+ headers: { "Content-Type": "application/json" },
235
+ body: JSON.stringify({ action: actionText, locks: lockTexts }),
236
+ signal: controller.signal,
237
+ });
238
+ clearTimeout(timeout);
239
+
240
+ if (!resp.ok) return null;
241
+ const data = await resp.json();
242
+
243
+ if (!data || typeof data.hasConflict !== "boolean") return null;
244
+
245
+ // Convert proxy response to internal format
246
+ const conflicts = (data.conflicts || []).map((c) => ({
247
+ id: "proxy",
248
+ text: c.lockText,
249
+ matchedKeywords: [],
250
+ confidence: c.confidence,
251
+ level: c.level || "MEDIUM",
252
+ reasons: c.reasons || [],
253
+ }));
254
+
255
+ return {
256
+ hasConflict: data.hasConflict,
257
+ conflictingLocks: conflicts,
258
+ analysis: data.hasConflict
259
+ ? `${conflicts.length} conflict(s) detected via proxy (${data.source}).`
260
+ : `Proxy verified as safe (${data.source}). No conflicts.`,
261
+ };
262
+ } catch (_) {
263
+ // Proxy unavailable — graceful degradation
264
+ return null;
265
+ }
266
+ }
267
+
207
268
  /**
208
269
  * Async conflict check with LLM fallback for grey-zone cases.
209
270
  * Supports both brain mode and direct mode (same as checkConflict).
210
271
  * Strategy: Run heuristic first (fast, free, offline).
211
272
  * - Score > 70% on ALL conflicts → trust heuristic (skip LLM)
212
273
  * - Everything else → call LLM for universal domain coverage
274
+ * - If no local LLM key → call Railway proxy for Gemini coverage
213
275
  */
214
276
  export async function checkConflictAsync(rootOrAction, proposedActionOrLock) {
215
277
  // 1. Always run the fast heuristic first (handles both brain + direct mode)
@@ -224,12 +286,9 @@ export async function checkConflictAsync(rootOrAction, proposedActionOrLock) {
224
286
  return heuristicResult;
225
287
  }
226
288
 
227
- // 3. Call LLM for everything else including score 0.
228
- // Score 0 means "heuristic vocabulary doesn't cover this domain",
229
- // which is EXACTLY when an LLM (which knows every domain) adds value.
289
+ // 3. Try local LLM first (if user has their own API key)
230
290
  try {
231
291
  const { llmCheckConflict } = await import("./llm-checker.js");
232
- // In direct mode, build activeLocks from the lock text(s) passed directly
233
292
  let llmResult;
234
293
  if (isDirect) {
235
294
  const lockTexts = Array.isArray(proposedActionOrLock)
@@ -246,45 +305,77 @@ export async function checkConflictAsync(rootOrAction, proposedActionOrLock) {
246
305
  }
247
306
 
248
307
  if (llmResult) {
249
- // Keep HIGH heuristic conflicts (>70%) — they're already certain
250
- const highConfidence = heuristicResult.conflictingLocks.filter(
251
- (c) => c.confidence > 70
252
- );
253
- const llmConflicts = llmResult.conflictingLocks || [];
254
- const merged = [...highConfidence, ...llmConflicts];
255
-
256
- // Deduplicate by lock text, keeping the higher-confidence entry
257
- const byText = new Map();
258
- for (const c of merged) {
259
- const existing = byText.get(c.text);
260
- if (!existing || c.confidence > existing.confidence) {
261
- byText.set(c.text, c);
262
- }
263
- }
264
- const unique = [...byText.values()];
265
-
266
- if (unique.length === 0) {
267
- return {
268
- hasConflict: false,
269
- conflictingLocks: [],
270
- analysis: `Heuristic had partial signal, LLM verified as safe. No conflicts.`,
271
- };
272
- }
308
+ return mergeLLMResult(heuristicResult, llmResult);
309
+ }
310
+ } catch (_) {
311
+ // Local LLM not available
312
+ }
273
313
 
274
- unique.sort((a, b) => b.confidence - a.confidence);
275
- return {
276
- hasConflict: true,
277
- conflictingLocks: unique,
278
- analysis: `${unique.length} conflict(s) confirmed (${highConfidence.length} heuristic + ${llmConflicts.length} LLM-verified).`,
279
- };
314
+ // 4. No local LLM call Railway proxy for Gemini coverage
315
+ try {
316
+ let lockTexts;
317
+ if (isDirect) {
318
+ lockTexts = Array.isArray(proposedActionOrLock)
319
+ ? proposedActionOrLock
320
+ : [proposedActionOrLock];
321
+ } else {
322
+ // Brain mode: extract lock texts from brain
323
+ const brain = ensureInit(rootOrAction);
324
+ const activeLocks = (brain.specLock?.items || []).filter((l) => l.active !== false);
325
+ lockTexts = activeLocks.map((l) => l.text);
326
+ }
327
+
328
+ const actionText = isDirect ? rootOrAction : proposedActionOrLock;
329
+ if (lockTexts.length > 0) {
330
+ const proxyResult = await callProxy(actionText, lockTexts);
331
+ if (proxyResult) {
332
+ return mergeLLMResult(heuristicResult, proxyResult);
333
+ }
280
334
  }
281
335
  } catch (_) {
282
- // LLM not available return heuristic result as-is
336
+ // Proxy unavailablegraceful degradation
283
337
  }
284
338
 
285
339
  return heuristicResult;
286
340
  }
287
341
 
342
+ /**
343
+ * Merge heuristic result with LLM/proxy result.
344
+ * Keeps HIGH heuristic conflicts + all LLM conflicts, deduplicates, takes MAX.
345
+ */
346
+ function mergeLLMResult(heuristicResult, llmResult) {
347
+ const highConfidence = heuristicResult.conflictingLocks.filter(
348
+ (c) => c.confidence > 70
349
+ );
350
+ const llmConflicts = llmResult.conflictingLocks || [];
351
+ const merged = [...highConfidence, ...llmConflicts];
352
+
353
+ // Deduplicate by lock text, keeping the higher-confidence entry
354
+ const byText = new Map();
355
+ for (const c of merged) {
356
+ const existing = byText.get(c.text);
357
+ if (!existing || c.confidence > existing.confidence) {
358
+ byText.set(c.text, c);
359
+ }
360
+ }
361
+ const unique = [...byText.values()];
362
+
363
+ if (unique.length === 0) {
364
+ return {
365
+ hasConflict: false,
366
+ conflictingLocks: [],
367
+ analysis: `Heuristic had partial signal, LLM verified as safe. No conflicts.`,
368
+ };
369
+ }
370
+
371
+ unique.sort((a, b) => b.confidence - a.confidence);
372
+ return {
373
+ hasConflict: true,
374
+ conflictingLocks: unique,
375
+ analysis: `${unique.length} conflict(s) confirmed (${highConfidence.length} heuristic + ${llmConflicts.length} LLM-verified).`,
376
+ };
377
+ }
378
+
288
379
  export function suggestLocks(root) {
289
380
  const brain = ensureInit(root);
290
381
  const suggestions = [];
@@ -257,7 +257,7 @@ export async function flushToRemote(root) {
257
257
  // Build anonymized payload
258
258
  const payload = {
259
259
  instanceId: summary.instanceId,
260
- version: "4.3.2",
260
+ version: "4.3.4",
261
261
  totalCalls: summary.totalCalls,
262
262
  avgResponseMs: summary.avgResponseMs,
263
263
  conflicts: summary.conflicts,
@@ -89,7 +89,7 @@
89
89
  <div class="header">
90
90
  <div>
91
91
  <h1><span>SpecLock</span> Dashboard</h1>
92
- <div class="meta">v4.3.2 &mdash; AI Constraint Engine</div>
92
+ <div class="meta">v4.3.4 &mdash; AI Constraint Engine</div>
93
93
  </div>
94
94
  <div style="display:flex;align-items:center;gap:12px;">
95
95
  <span id="health-badge" class="status-badge healthy">Loading...</span>
@@ -182,7 +182,7 @@
182
182
  </div>
183
183
 
184
184
  <div style="text-align:center;padding:24px;color:var(--muted);font-size:12px;">
185
- SpecLock v4.3.2 &mdash; Developed by Sandeep Roy &mdash; <a href="https://github.com/sgroy10/speclock" style="color:var(--accent)">GitHub</a>
185
+ SpecLock v4.3.4 &mdash; Developed by Sandeep Roy &mdash; <a href="https://github.com/sgroy10/speclock" style="color:var(--accent)">GitHub</a>
186
186
  </div>
187
187
 
188
188
  <script>
@@ -91,7 +91,7 @@ import { fileURLToPath } from "url";
91
91
  import _path from "path";
92
92
 
93
93
  const PROJECT_ROOT = process.env.SPECLOCK_PROJECT_ROOT || process.cwd();
94
- const VERSION = "4.3.2";
94
+ const VERSION = "4.3.4";
95
95
  const AUTHOR = "Sandeep Roy";
96
96
  const START_TIME = Date.now();
97
97
 
@@ -656,6 +656,110 @@ app.delete("/mcp", async (req, res) => {
656
656
  res.writeHead(405).end(JSON.stringify({ jsonrpc: "2.0", error: { code: -32000, message: "Method not allowed." }, id: null }));
657
657
  });
658
658
 
659
+ // ========================================
660
+ // PUBLIC PROXY API (v4.3 — for npm-install users)
661
+ // Allows npm-install users to get Gemini LLM coverage without
662
+ // needing their own API key. Heuristic runs locally, grey-zone
663
+ // cases are proxied here for LLM verification.
664
+ // ========================================
665
+
666
+ app.post("/api/check", async (req, res) => {
667
+ setCorsHeaders(res);
668
+
669
+ // Rate limiting
670
+ const clientIp = req.headers["x-forwarded-for"]?.split(",")[0]?.trim() || req.socket?.remoteAddress || "unknown";
671
+ if (!checkRateLimit(clientIp)) {
672
+ return res.status(429).json({ error: "Rate limit exceeded. Try again later." });
673
+ }
674
+
675
+ const { action, locks } = req.body || {};
676
+ if (!action || typeof action !== "string") {
677
+ return res.status(400).json({ error: "Missing required field: action (string)" });
678
+ }
679
+ if (!locks || !Array.isArray(locks) || locks.length === 0) {
680
+ return res.status(400).json({ error: "Missing required field: locks (non-empty array of strings)" });
681
+ }
682
+ if (locks.length > 50) {
683
+ return res.status(400).json({ error: "Too many locks (max 50)" });
684
+ }
685
+
686
+ try {
687
+ // Build lock objects for the LLM checker
688
+ const activeLocks = locks.map((text, i) => ({
689
+ id: `proxy-${i}`,
690
+ text: String(text),
691
+ active: true,
692
+ }));
693
+
694
+ // Run heuristic first (same as local)
695
+ const { analyzeConflict } = await import("../core/semantics.js");
696
+ const heuristicConflicts = [];
697
+ for (const lock of activeLocks) {
698
+ const result = analyzeConflict(action, lock.text);
699
+ if (result.isConflict) {
700
+ heuristicConflicts.push({
701
+ lockText: lock.text,
702
+ confidence: result.confidence,
703
+ level: result.level,
704
+ reasons: result.reasons,
705
+ source: "heuristic",
706
+ });
707
+ }
708
+ }
709
+
710
+ // If all heuristic conflicts are HIGH (>70%), return immediately
711
+ if (heuristicConflicts.length > 0 && heuristicConflicts.every(c => c.confidence > 70)) {
712
+ return res.json({
713
+ hasConflict: true,
714
+ conflicts: heuristicConflicts,
715
+ source: "heuristic",
716
+ });
717
+ }
718
+
719
+ // Call LLM for full coverage
720
+ const { llmCheckConflict } = await import("../core/llm-checker.js");
721
+ const llmResult = await llmCheckConflict(null, action, activeLocks);
722
+
723
+ if (llmResult) {
724
+ // Merge: keep HIGH heuristic + all LLM conflicts
725
+ const highHeuristic = heuristicConflicts.filter(c => c.confidence > 70);
726
+ const llmConflicts = (llmResult.conflictingLocks || []).map(c => ({
727
+ lockText: c.text,
728
+ confidence: c.confidence,
729
+ level: c.level,
730
+ reasons: c.reasons || [],
731
+ source: "gemini",
732
+ }));
733
+ const merged = [...highHeuristic, ...llmConflicts];
734
+
735
+ // Deduplicate by lock text
736
+ const byText = new Map();
737
+ for (const c of merged) {
738
+ const existing = byText.get(c.lockText);
739
+ if (!existing || c.confidence > existing.confidence) {
740
+ byText.set(c.lockText, c);
741
+ }
742
+ }
743
+ const unique = [...byText.values()];
744
+
745
+ return res.json({
746
+ hasConflict: unique.length > 0,
747
+ conflicts: unique,
748
+ source: unique.some(c => c.source === "gemini") ? "hybrid" : "heuristic",
749
+ });
750
+ }
751
+
752
+ // LLM unavailable — return heuristic result
753
+ return res.json({
754
+ hasConflict: heuristicConflicts.length > 0,
755
+ conflicts: heuristicConflicts,
756
+ source: "heuristic-only",
757
+ });
758
+ } catch (err) {
759
+ return res.status(500).json({ error: `Check failed: ${err.message}` });
760
+ }
761
+ });
762
+
659
763
  // Health check endpoint (enhanced for enterprise)
660
764
  app.get("/health", (req, res) => {
661
765
  setCorsHeaders(res);
package/src/mcp/server.js CHANGED
@@ -100,7 +100,7 @@ const PROJECT_ROOT =
100
100
  args.project || process.env.SPECLOCK_PROJECT_ROOT || process.cwd();
101
101
 
102
102
  // --- MCP Server ---
103
- const VERSION = "4.3.2";
103
+ const VERSION = "4.3.4";
104
104
  const AUTHOR = "Sandeep Roy";
105
105
 
106
106
  const server = new McpServer(