speclock 5.0.2 → 5.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,565 @@
1
+ // ===================================================================
2
+ // SpecLock Patch Gateway — Change Decision Engine
3
+ // Combines semantic conflict detection, lock-to-file mapping, blast
4
+ // radius analysis, and typed constraints into a single ALLOW/WARN/BLOCK
5
+ // verdict for any proposed change.
6
+ //
7
+ // Developed by Sandeep Roy (https://github.com/sgroy10)
8
+ // ===================================================================
9
+
10
+ import { readBrain } from "./storage.js";
11
+ import { ensureInit } from "./memory.js";
12
+ import { analyzeConflict } from "./semantics.js";
13
+ import { checkAllTypedConstraints } from "./typed-constraints.js";
14
+ import { getOrBuildGraph, getBlastRadius, mapLocksToFiles } from "./code-graph.js";
15
+
16
+ // --- Thresholds ---
17
+
18
+ const BLOCK_CONFIDENCE = 70; // Semantic confidence >= 70 → BLOCK
19
+ const WARN_CONFIDENCE = 40; // Semantic confidence >= 40 → WARN
20
+ const HIGH_BLAST_RADIUS = 20; // > 20% impact → adds to risk
21
+ const MED_BLAST_RADIUS = 10; // > 10% impact → moderate risk
22
+
23
+ // --- Main Gateway ---
24
+
25
+ /**
26
+ * Review a proposed change against all active constraints.
27
+ *
28
+ * @param {string} root - Project root
29
+ * @param {object} opts
30
+ * @param {string} opts.description - What the change does (natural language)
31
+ * @param {string[]} [opts.files] - Files being changed (project-relative paths)
32
+ * @param {boolean} [opts.includeGraph=true] - Whether to run blast radius analysis
33
+ * @returns {object} Verdict with risk score, reasons, and summary
34
+ */
35
+ export function reviewPatch(root, { description, files = [], includeGraph = true }) {
36
+ if (!description || typeof description !== "string" || !description.trim()) {
37
+ return {
38
+ verdict: "ERROR",
39
+ riskScore: 0,
40
+ error: "description is required (describe what the change does)",
41
+ reasons: [],
42
+ summary: "No change description provided.",
43
+ };
44
+ }
45
+
46
+ const brain = ensureInit(root);
47
+ const activeLocks = (brain.specLock?.items || []).filter(l => l.active !== false);
48
+ const textLocks = activeLocks.filter(l => !l.constraintType);
49
+ const typedLocks = activeLocks.filter(l => l.constraintType);
50
+
51
+ const reasons = [];
52
+ let maxConfidence = 0;
53
+
54
+ // --- Step 1: Semantic conflict check against all text locks ---
55
+ for (const lock of textLocks) {
56
+ const result = analyzeConflict(description, lock.text);
57
+ if (result.isConflict) {
58
+ const confidence = result.confidence || 0;
59
+ if (confidence > maxConfidence) maxConfidence = confidence;
60
+ reasons.push({
61
+ type: "semantic_conflict",
62
+ severity: confidence >= BLOCK_CONFIDENCE ? "block" : "warn",
63
+ lockId: lock.id,
64
+ lockText: lock.text,
65
+ confidence,
66
+ level: result.level || "MEDIUM",
67
+ details: result.reasons || [],
68
+ });
69
+ }
70
+ }
71
+
72
+ // --- Step 2: Lock-to-file mapping (do changed files touch locked zones?) ---
73
+ let lockFileMatches = [];
74
+ if (files.length > 0) {
75
+ try {
76
+ const lockMap = mapLocksToFiles(root);
77
+ const normalizedFiles = files.map(f => f.replace(/\\/g, "/"));
78
+
79
+ for (const mapping of lockMap) {
80
+ const overlapping = mapping.matchedFiles.filter(mf =>
81
+ normalizedFiles.some(cf => {
82
+ const cfNorm = cf.toLowerCase();
83
+ const mfNorm = mf.toLowerCase();
84
+ return cfNorm === mfNorm || cfNorm.endsWith("/" + mfNorm) || mfNorm.endsWith("/" + cfNorm);
85
+ })
86
+ );
87
+ if (overlapping.length > 0) {
88
+ lockFileMatches.push({
89
+ lockId: mapping.lockId,
90
+ lockText: mapping.lockText,
91
+ overlappingFiles: overlapping,
92
+ });
93
+ // Find the lock's existing semantic confidence, or default to 60
94
+ const existingSemantic = reasons.find(r => r.lockId === mapping.lockId);
95
+ if (!existingSemantic) {
96
+ // This lock wasn't caught by semantic analysis but the files overlap
97
+ reasons.push({
98
+ type: "lock_file_overlap",
99
+ severity: "warn",
100
+ lockId: mapping.lockId,
101
+ lockText: mapping.lockText,
102
+ confidence: 60,
103
+ level: "MEDIUM",
104
+ details: [`Changed files overlap with locked zone: ${overlapping.join(", ")}`],
105
+ });
106
+ if (60 > maxConfidence) maxConfidence = 60;
107
+ } else {
108
+ // Boost existing semantic match — file evidence confirms it
109
+ existingSemantic.confidence = Math.min(100, existingSemantic.confidence + 15);
110
+ existingSemantic.details.push(`File-level confirmation: ${overlapping.join(", ")}`);
111
+ if (existingSemantic.confidence > maxConfidence) maxConfidence = existingSemantic.confidence;
112
+ if (existingSemantic.confidence >= BLOCK_CONFIDENCE) existingSemantic.severity = "block";
113
+ }
114
+ }
115
+ }
116
+ } catch (_) {
117
+ // Lock mapping failed (no graph), continue without it
118
+ }
119
+ }
120
+
121
+ // --- Step 3: Blast radius for each changed file ---
122
+ let blastDetails = [];
123
+ let maxImpactPercent = 0;
124
+ if (includeGraph && files.length > 0) {
125
+ try {
126
+ for (const file of files) {
127
+ const br = getBlastRadius(root, file);
128
+ if (br.found) {
129
+ blastDetails.push({
130
+ file: br.file,
131
+ directDependents: br.directDependents?.length || 0,
132
+ transitiveDependents: br.blastRadius || 0,
133
+ impactPercent: br.impactPercent || 0,
134
+ depth: br.depth || 0,
135
+ });
136
+ if (br.impactPercent > maxImpactPercent) maxImpactPercent = br.impactPercent;
137
+ }
138
+ }
139
+
140
+ if (maxImpactPercent > HIGH_BLAST_RADIUS) {
141
+ reasons.push({
142
+ type: "high_blast_radius",
143
+ severity: "warn",
144
+ confidence: Math.min(90, 50 + maxImpactPercent),
145
+ level: "HIGH",
146
+ details: [`Change affects ${maxImpactPercent.toFixed(1)}% of the codebase`],
147
+ });
148
+ } else if (maxImpactPercent > MED_BLAST_RADIUS) {
149
+ reasons.push({
150
+ type: "moderate_blast_radius",
151
+ severity: "info",
152
+ confidence: 30 + maxImpactPercent,
153
+ level: "MEDIUM",
154
+ details: [`Change affects ${maxImpactPercent.toFixed(1)}% of the codebase`],
155
+ });
156
+ }
157
+ } catch (_) {
158
+ // Graph not available, skip blast radius
159
+ }
160
+ }
161
+
162
+ // --- Step 4: Typed constraint awareness ---
163
+ let typedWarnings = [];
164
+ if (typedLocks.length > 0) {
165
+ // Check if the description mentions any typed constraint metrics
166
+ const descLower = description.toLowerCase();
167
+ for (const lock of typedLocks) {
168
+ const metric = (lock.metric || lock.description || lock.text || "").toLowerCase();
169
+ if (metric && descLower.includes(metric.split(" ")[0])) {
170
+ typedWarnings.push({
171
+ lockId: lock.id,
172
+ constraintType: lock.constraintType,
173
+ metric: lock.metric || lock.description,
174
+ text: lock.text,
175
+ });
176
+ reasons.push({
177
+ type: "typed_constraint_relevant",
178
+ severity: "info",
179
+ lockId: lock.id,
180
+ lockText: lock.text,
181
+ confidence: 30,
182
+ level: "LOW",
183
+ details: [`Typed constraint (${lock.constraintType}) may be relevant: ${lock.text}`],
184
+ });
185
+ }
186
+ }
187
+ }
188
+
189
+ // --- Step 5: Calculate risk score & verdict ---
190
+ let riskScore = 0;
191
+
192
+ // Base risk from semantic conflicts
193
+ if (maxConfidence > 0) {
194
+ riskScore = maxConfidence;
195
+ }
196
+
197
+ // Boost from lock-file overlaps
198
+ if (lockFileMatches.length > 0) {
199
+ riskScore = Math.max(riskScore, 55);
200
+ riskScore = Math.min(100, riskScore + lockFileMatches.length * 5);
201
+ }
202
+
203
+ // Boost from blast radius
204
+ if (maxImpactPercent > HIGH_BLAST_RADIUS) {
205
+ riskScore = Math.min(100, riskScore + 15);
206
+ } else if (maxImpactPercent > MED_BLAST_RADIUS) {
207
+ riskScore = Math.min(100, riskScore + 8);
208
+ }
209
+
210
+ // Determine verdict
211
+ let verdict;
212
+ const hasBlockSeverity = reasons.some(r => r.severity === "block");
213
+ if (hasBlockSeverity || riskScore >= BLOCK_CONFIDENCE) {
214
+ verdict = "BLOCK";
215
+ } else if (riskScore >= WARN_CONFIDENCE || reasons.some(r => r.severity === "warn")) {
216
+ verdict = "WARN";
217
+ } else {
218
+ verdict = "ALLOW";
219
+ }
220
+
221
+ // --- Step 6: Build human-readable summary ---
222
+ const summary = buildSummary(verdict, riskScore, reasons, files, blastDetails, lockFileMatches);
223
+
224
+ return {
225
+ verdict,
226
+ riskScore,
227
+ description,
228
+ fileCount: files.length,
229
+ lockCount: activeLocks.length,
230
+ reasons,
231
+ blastRadius: blastDetails.length > 0 ? {
232
+ files: blastDetails,
233
+ maxImpactPercent,
234
+ } : undefined,
235
+ lockFileOverlaps: lockFileMatches.length > 0 ? lockFileMatches : undefined,
236
+ typedConstraints: typedWarnings.length > 0 ? typedWarnings : undefined,
237
+ summary,
238
+ };
239
+ }
240
+
241
+ /**
242
+ * Async version — adds LLM-powered conflict checking for grey-zone decisions.
243
+ */
244
+ export async function reviewPatchAsync(root, opts) {
245
+ // Start with heuristic review
246
+ const result = reviewPatch(root, opts);
247
+
248
+ // If verdict is already BLOCK or ALLOW with high confidence, return immediately
249
+ if (result.verdict === "BLOCK" || (result.verdict === "ALLOW" && result.riskScore < 20)) {
250
+ result.source = "heuristic";
251
+ return result;
252
+ }
253
+
254
+ // For WARN / uncertain cases, try LLM enhancement
255
+ try {
256
+ const { llmCheckConflict } = await import("./llm-checker.js");
257
+ const brain = readBrain(root);
258
+ const activeLocks = (brain?.specLock?.items || []).filter(l => l.active !== false && !l.constraintType);
259
+
260
+ if (activeLocks.length > 0) {
261
+ const llmResult = await llmCheckConflict(root, opts.description, activeLocks);
262
+ if (llmResult && llmResult.hasConflict) {
263
+ for (const lc of (llmResult.conflictingLocks || [])) {
264
+ const confidence = lc.confidence || 50;
265
+ const existing = result.reasons.find(r => r.lockId === lc.id);
266
+ if (existing) {
267
+ // LLM confirms heuristic — boost confidence
268
+ existing.confidence = Math.max(existing.confidence, confidence);
269
+ existing.details.push("LLM confirmed conflict");
270
+ if (existing.confidence >= BLOCK_CONFIDENCE) existing.severity = "block";
271
+ } else {
272
+ // LLM found new conflict heuristic missed
273
+ result.reasons.push({
274
+ type: "llm_conflict",
275
+ severity: confidence >= BLOCK_CONFIDENCE ? "block" : "warn",
276
+ lockId: lc.id,
277
+ lockText: lc.text,
278
+ confidence,
279
+ level: lc.level || "MEDIUM",
280
+ details: lc.reasons || ["LLM-detected conflict"],
281
+ });
282
+ }
283
+ if (confidence > result.riskScore) result.riskScore = confidence;
284
+ }
285
+
286
+ // Re-evaluate verdict with LLM data
287
+ const hasBlock = result.reasons.some(r => r.severity === "block");
288
+ if (hasBlock || result.riskScore >= BLOCK_CONFIDENCE) {
289
+ result.verdict = "BLOCK";
290
+ } else if (result.riskScore >= WARN_CONFIDENCE) {
291
+ result.verdict = "WARN";
292
+ }
293
+
294
+ result.summary = buildSummary(
295
+ result.verdict, result.riskScore, result.reasons,
296
+ opts.files || [], result.blastRadius?.files || [],
297
+ result.lockFileOverlaps || []
298
+ );
299
+ }
300
+ }
301
+ result.source = "hybrid";
302
+ } catch (_) {
303
+ result.source = "heuristic-only";
304
+ }
305
+
306
+ return result;
307
+ }
308
+
309
+ // --- Summary builder ---
310
+
311
+ function buildSummary(verdict, riskScore, reasons, files, blastDetails, lockFileMatches) {
312
+ const parts = [];
313
+
314
+ if (verdict === "BLOCK") {
315
+ parts.push(`BLOCKED (risk: ${riskScore}/100)`);
316
+ } else if (verdict === "WARN") {
317
+ parts.push(`WARNING (risk: ${riskScore}/100)`);
318
+ } else {
319
+ parts.push(`ALLOWED (risk: ${riskScore}/100)`);
320
+ }
321
+
322
+ const semanticConflicts = reasons.filter(r => r.type === "semantic_conflict" || r.type === "llm_conflict");
323
+ if (semanticConflicts.length > 0) {
324
+ parts.push(`${semanticConflicts.length} constraint conflict(s): ${semanticConflicts.map(r => `"${r.lockText}"`).join(", ")}`);
325
+ }
326
+
327
+ if (lockFileMatches.length > 0) {
328
+ const fileCount = lockFileMatches.reduce((acc, m) => acc + m.overlappingFiles.length, 0);
329
+ parts.push(`${fileCount} file(s) in locked zones`);
330
+ }
331
+
332
+ if (blastDetails.length > 0) {
333
+ const maxImpact = Math.max(...blastDetails.map(b => b.impactPercent));
334
+ const totalDeps = blastDetails.reduce((acc, b) => acc + b.transitiveDependents, 0);
335
+ if (maxImpact > 0) {
336
+ parts.push(`blast radius: ${totalDeps} dependent file(s), ${maxImpact.toFixed(1)}% impact`);
337
+ }
338
+ }
339
+
340
+ const typedReasons = reasons.filter(r => r.type === "typed_constraint_relevant");
341
+ if (typedReasons.length > 0) {
342
+ parts.push(`${typedReasons.length} typed constraint(s) may be affected`);
343
+ }
344
+
345
+ return parts.join(". ") + ".";
346
+ }
347
+
348
+ // ===================================================================
349
+ // DIFF-NATIVE REVIEW (v5.2) — Actual patch analysis
350
+ // ===================================================================
351
+
352
+ import { parseDiff } from "./diff-parser.js";
353
+ import { analyzeDiff, calculateVerdict } from "./diff-analyzer.js";
354
+
355
+ /**
356
+ * Review a proposed change using actual diff content.
357
+ * Combines diff-level signal extraction with project constraints.
358
+ *
359
+ * @param {string} root - Project root
360
+ * @param {object} opts
361
+ * @param {string} opts.description - What the change does
362
+ * @param {string[]} [opts.files] - Files being changed
363
+ * @param {string} opts.diff - Raw unified diff (git diff output)
364
+ * @param {object} [opts.options] - Analysis options
365
+ * @returns {object} Diff-native review result
366
+ */
367
+ export function reviewPatchDiff(root, { description, files = [], diff, options = {} }) {
368
+ if (!description || typeof description !== "string" || !description.trim()) {
369
+ return {
370
+ verdict: "ERROR",
371
+ riskScore: 0,
372
+ reviewMode: "diff-native",
373
+ error: "description is required",
374
+ signals: {},
375
+ reasons: [],
376
+ summary: "No change description provided.",
377
+ };
378
+ }
379
+
380
+ if (!diff || typeof diff !== "string" || !diff.trim()) {
381
+ return {
382
+ verdict: "ERROR",
383
+ riskScore: 0,
384
+ reviewMode: "diff-native",
385
+ error: "diff is required (provide git diff output)",
386
+ signals: {},
387
+ reasons: [],
388
+ summary: "No diff content provided.",
389
+ };
390
+ }
391
+
392
+ // Parse the diff
393
+ const parsedDiff = parseDiff(diff);
394
+
395
+ // If files not provided, extract from parsed diff
396
+ if (files.length === 0 && parsedDiff.files.length > 0) {
397
+ files = parsedDiff.files.map(f => f.path);
398
+ }
399
+
400
+ // Run all signal analyzers
401
+ const { signals, reasons } = analyzeDiff(root, parsedDiff, description, options);
402
+
403
+ // Calculate verdict from signals
404
+ const { verdict, riskScore, recommendation } = calculateVerdict(signals, reasons);
405
+
406
+ // Build summary
407
+ const summaryParts = [`${verdict} (risk: ${riskScore}/100)`];
408
+ const criticalReasons = reasons.filter(r => r.severity === "critical");
409
+ const highReasons = reasons.filter(r => r.severity === "high");
410
+ if (criticalReasons.length > 0) summaryParts.push(`${criticalReasons.length} critical issue(s)`);
411
+ if (highReasons.length > 0) summaryParts.push(`${highReasons.length} high-severity issue(s)`);
412
+ if (parsedDiff.stats.filesChanged > 0) {
413
+ summaryParts.push(`${parsedDiff.stats.filesChanged} file(s), +${parsedDiff.stats.additions}/-${parsedDiff.stats.deletions}`);
414
+ }
415
+
416
+ return {
417
+ verdict,
418
+ riskScore,
419
+ reviewMode: "diff-native",
420
+ description,
421
+ files,
422
+ signals,
423
+ reasons,
424
+ parsedDiff: parsedDiff.stats,
425
+ recommendation,
426
+ summary: summaryParts.join(". ") + ".",
427
+ api_version: "v2",
428
+ };
429
+ }
430
+
431
+ /**
432
+ * Async diff review — adds LLM conflict checking for ambiguous cases.
433
+ */
434
+ export async function reviewPatchDiffAsync(root, opts) {
435
+ const result = reviewPatchDiff(root, opts);
436
+
437
+ if (result.verdict === "ERROR" || result.verdict === "BLOCK") {
438
+ result.source = result.verdict === "BLOCK" ? "diff-native" : "error";
439
+ return result;
440
+ }
441
+
442
+ // For WARN / ALLOW, try LLM enhancement
443
+ try {
444
+ const { llmCheckConflict } = await import("./llm-checker.js");
445
+ const brain = readBrain(root);
446
+ const activeLocks = (brain?.specLock?.items || []).filter(l => l.active !== false && !l.constraintType);
447
+
448
+ if (activeLocks.length > 0) {
449
+ const llmResult = await llmCheckConflict(root, opts.description, activeLocks);
450
+ if (llmResult && llmResult.hasConflict) {
451
+ for (const lc of (llmResult.conflictingLocks || [])) {
452
+ const confidence = (lc.confidence || 50) / 100;
453
+ result.signals.llmConflict.used = true;
454
+ result.signals.llmConflict.score = Math.min(CAPS_LLM, Math.round(confidence * 10));
455
+ result.reasons.push({
456
+ type: "llm_conflict",
457
+ severity: confidence >= 0.7 ? "critical" : "high",
458
+ confidence,
459
+ message: `LLM detected conflict with: "${lc.text}"`,
460
+ details: { lockId: lc.id, lockText: lc.text },
461
+ });
462
+ }
463
+ // Recalculate verdict
464
+ const recalc = calculateVerdict(result.signals, result.reasons);
465
+ result.verdict = recalc.verdict;
466
+ result.riskScore = recalc.riskScore;
467
+ result.recommendation = recalc.recommendation;
468
+ }
469
+ }
470
+ result.source = "diff-native+llm";
471
+ } catch (_) {
472
+ result.source = "diff-native";
473
+ }
474
+
475
+ return result;
476
+ }
477
+
478
+ const CAPS_LLM = 10;
479
+
480
+ /**
481
+ * Unified review — runs both intent review (v5.1) and diff review (v5.2),
482
+ * then merges results. Takes the stronger verdict.
483
+ *
484
+ * @param {string} root - Project root
485
+ * @param {object} opts - Same as reviewPatchDiff but diff is optional
486
+ * @returns {object} Unified review result
487
+ */
488
+ export function reviewPatchUnified(root, opts) {
489
+ const hasDiff = opts.diff && typeof opts.diff === "string" && opts.diff.trim();
490
+
491
+ // Always run intent review (v5.1)
492
+ const intentResult = reviewPatch(root, {
493
+ description: opts.description,
494
+ files: opts.files || [],
495
+ includeGraph: true,
496
+ });
497
+
498
+ if (!hasDiff) {
499
+ // No diff available — return intent review only
500
+ return {
501
+ ...intentResult,
502
+ reviewMode: "intent-only",
503
+ source: "v5.1-intent",
504
+ };
505
+ }
506
+
507
+ // Run diff review (v5.2)
508
+ const diffResult = reviewPatchDiff(root, opts);
509
+
510
+ if (diffResult.verdict === "ERROR") {
511
+ // Diff parsing failed — fallback to intent only
512
+ return {
513
+ ...intentResult,
514
+ reviewMode: "intent-only",
515
+ source: "v5.1-intent (diff parse failed)",
516
+ };
517
+ }
518
+
519
+ // Merge results — weighted: intent 35%, diff 65%
520
+ const intentWeight = 0.35;
521
+ const diffWeight = 0.65;
522
+ const mergedRisk = Math.min(100, Math.round(
523
+ intentResult.riskScore * intentWeight + diffResult.riskScore * diffWeight
524
+ ));
525
+
526
+ // Take stronger verdict
527
+ const verdictRank = { ALLOW: 0, WARN: 1, BLOCK: 2 };
528
+ const finalVerdict = verdictRank[diffResult.verdict] >= verdictRank[intentResult.verdict]
529
+ ? diffResult.verdict
530
+ : intentResult.verdict;
531
+
532
+ // Merge reasons (deduplicate by type+lockId)
533
+ const mergedReasons = [...diffResult.reasons];
534
+ for (const ir of intentResult.reasons) {
535
+ const exists = mergedReasons.find(r =>
536
+ r.type === ir.type && r.details?.lockId === ir.lockId
537
+ );
538
+ if (!exists) {
539
+ mergedReasons.push({
540
+ ...ir,
541
+ confidence: typeof ir.confidence === "number" && ir.confidence > 1
542
+ ? ir.confidence / 100 : ir.confidence,
543
+ });
544
+ }
545
+ }
546
+
547
+ return {
548
+ verdict: finalVerdict,
549
+ riskScore: mergedRisk,
550
+ reviewMode: "unified",
551
+ description: opts.description,
552
+ files: diffResult.files,
553
+ signals: diffResult.signals,
554
+ reasons: mergedReasons,
555
+ parsedDiff: diffResult.parsedDiff,
556
+ blastRadius: intentResult.blastRadius,
557
+ recommendation: diffResult.recommendation,
558
+ summary: `${finalVerdict} (risk: ${mergedRisk}/100). Intent: ${intentResult.verdict}(${intentResult.riskScore}). Diff: ${diffResult.verdict}(${diffResult.riskScore}).`,
559
+ intentVerdict: intentResult.verdict,
560
+ intentRisk: intentResult.riskScore,
561
+ diffVerdict: diffResult.verdict,
562
+ diffRisk: diffResult.riskScore,
563
+ api_version: "v2",
564
+ };
565
+ }
@@ -89,7 +89,7 @@
89
89
  <div class="header">
90
90
  <div>
91
91
  <h1><span>SpecLock</span> Dashboard</h1>
92
- <div class="meta">v5.0.0 &mdash; AI Constraint Engine</div>
92
+ <div class="meta">v5.2.0 &mdash; AI Constraint Engine</div>
93
93
  </div>
94
94
  <div style="display:flex;align-items:center;gap:12px;">
95
95
  <span id="health-badge" class="status-badge healthy">Loading...</span>
@@ -182,7 +182,7 @@
182
182
  </div>
183
183
 
184
184
  <div style="text-align:center;padding:24px;color:var(--muted);font-size:12px;">
185
- SpecLock v5.0.0 &mdash; Developed by Sandeep Roy &mdash; <a href="https://github.com/sgroy10/speclock" style="color:var(--accent)">GitHub</a>
185
+ SpecLock v5.2.0 &mdash; Developed by Sandeep Roy &mdash; <a href="https://github.com/sgroy10/speclock" style="color:var(--accent)">GitHub</a>
186
186
  </div>
187
187
 
188
188
  <script>