@nforma.ai/nforma 0.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +22 -0
- package/README.md +1024 -0
- package/agents/qgsd-codebase-mapper.md +764 -0
- package/agents/qgsd-debugger.md +1201 -0
- package/agents/qgsd-executor.md +472 -0
- package/agents/qgsd-integration-checker.md +443 -0
- package/agents/qgsd-phase-researcher.md +502 -0
- package/agents/qgsd-plan-checker.md +643 -0
- package/agents/qgsd-planner.md +1182 -0
- package/agents/qgsd-project-researcher.md +621 -0
- package/agents/qgsd-quorum-orchestrator.md +628 -0
- package/agents/qgsd-quorum-slot-worker.md +41 -0
- package/agents/qgsd-quorum-synthesizer.md +133 -0
- package/agents/qgsd-quorum-test-worker.md +37 -0
- package/agents/qgsd-quorum-worker.md +161 -0
- package/agents/qgsd-research-synthesizer.md +239 -0
- package/agents/qgsd-roadmapper.md +660 -0
- package/agents/qgsd-verifier.md +628 -0
- package/bin/accept-debug-invariant.cjs +165 -0
- package/bin/account-manager.cjs +719 -0
- package/bin/aggregate-requirements.cjs +466 -0
- package/bin/analyze-assumptions.cjs +757 -0
- package/bin/analyze-state-space.cjs +921 -0
- package/bin/attribute-trace-divergence.cjs +150 -0
- package/bin/auth-drivers/gh-cli.cjs +93 -0
- package/bin/auth-drivers/index.cjs +46 -0
- package/bin/auth-drivers/pool.cjs +67 -0
- package/bin/auth-drivers/simple.cjs +95 -0
- package/bin/autoClosePtoF.cjs +110 -0
- package/bin/blessed-terminal.cjs +350 -0
- package/bin/build-phase-index.cjs +472 -0
- package/bin/call-quorum-slot.cjs +541 -0
- package/bin/ccr-secure-config.cjs +99 -0
- package/bin/ccr-secure-start.cjs +83 -0
- package/bin/check-bundled-sdks.cjs +177 -0
- package/bin/check-coverage-guard.cjs +112 -0
- package/bin/check-liveness-fairness.cjs +95 -0
- package/bin/check-mcp-health.cjs +123 -0
- package/bin/check-provider-health.cjs +395 -0
- package/bin/check-results-exit.cjs +24 -0
- package/bin/check-spec-sync.cjs +360 -0
- package/bin/check-trace-redaction.cjs +271 -0
- package/bin/check-trace-schema-drift.cjs +99 -0
- package/bin/compareDrift.cjs +21 -0
- package/bin/conformance-schema.cjs +12 -0
- package/bin/count-scenarios.cjs +420 -0
- package/bin/debt-dedup.cjs +144 -0
- package/bin/debt-ledger.cjs +61 -0
- package/bin/debt-retention.cjs +76 -0
- package/bin/debt-state-machine.cjs +80 -0
- package/bin/detect-coverage-gaps.cjs +204 -0
- package/bin/detect-project-intent.cjs +362 -0
- package/bin/export-prism-constants.cjs +164 -0
- package/bin/extract-annotations.cjs +633 -0
- package/bin/extractFormalExpected.cjs +104 -0
- package/bin/fingerprint-drift.cjs +24 -0
- package/bin/fingerprint-issue.cjs +46 -0
- package/bin/formal-core.cjs +519 -0
- package/bin/formal-ref-linker.cjs +141 -0
- package/bin/formal-test-sync.cjs +788 -0
- package/bin/generate-formal-specs.cjs +588 -0
- package/bin/generate-petri-net.cjs +397 -0
- package/bin/generate-phase-spec.cjs +249 -0
- package/bin/generate-proposed-changes.cjs +194 -0
- package/bin/generate-tla-cfg.cjs +122 -0
- package/bin/generate-traceability-matrix.cjs +701 -0
- package/bin/generate-triage-bundle.cjs +300 -0
- package/bin/gh-account-rotate.cjs +34 -0
- package/bin/initialize-model-registry.cjs +105 -0
- package/bin/install-formal-tools.cjs +382 -0
- package/bin/install.js +2424 -0
- package/bin/isNumericThreshold.cjs +34 -0
- package/bin/issue-classifier.cjs +151 -0
- package/bin/levenshtein.cjs +74 -0
- package/bin/lint-formal-models.cjs +580 -0
- package/bin/load-baseline-requirements.cjs +275 -0
- package/bin/manage-agents-core.cjs +815 -0
- package/bin/migrate-formal-dir.cjs +172 -0
- package/bin/migrate-planning.cjs +206 -0
- package/bin/migrate-to-slots.cjs +255 -0
- package/bin/nForma.cjs +2726 -0
- package/bin/observe-config.cjs +353 -0
- package/bin/observe-debt-writer.cjs +140 -0
- package/bin/observe-handler-grafana.cjs +128 -0
- package/bin/observe-handler-internal.cjs +301 -0
- package/bin/observe-handler-logstash.cjs +153 -0
- package/bin/observe-handler-prometheus.cjs +185 -0
- package/bin/observe-handlers.cjs +436 -0
- package/bin/observe-registry.cjs +131 -0
- package/bin/observe-render.cjs +168 -0
- package/bin/planning-paths.cjs +167 -0
- package/bin/polyrepo.cjs +560 -0
- package/bin/prism-priority.cjs +153 -0
- package/bin/probe-quorum-slots.cjs +167 -0
- package/bin/promote-model.cjs +225 -0
- package/bin/propose-debug-invariants.cjs +165 -0
- package/bin/providers.json +392 -0
- package/bin/pty-proxy.py +129 -0
- package/bin/qgsd-solve.cjs +2477 -0
- package/bin/quorum-consensus-gate.cjs +238 -0
- package/bin/quorum-formal-context.cjs +183 -0
- package/bin/quorum-slot-dispatch.cjs +934 -0
- package/bin/read-policy.cjs +60 -0
- package/bin/requirement-map.cjs +63 -0
- package/bin/requirements-core.cjs +247 -0
- package/bin/resolve-cli.cjs +101 -0
- package/bin/review-mcp-logs.cjs +294 -0
- package/bin/run-account-manager-tlc.cjs +188 -0
- package/bin/run-account-pool-alloy.cjs +158 -0
- package/bin/run-alloy.cjs +153 -0
- package/bin/run-audit-alloy.cjs +187 -0
- package/bin/run-breaker-tlc.cjs +181 -0
- package/bin/run-formal-check.cjs +395 -0
- package/bin/run-formal-verify.cjs +701 -0
- package/bin/run-installer-alloy.cjs +188 -0
- package/bin/run-oauth-rotation-prism.cjs +132 -0
- package/bin/run-oscillation-tlc.cjs +202 -0
- package/bin/run-phase-tlc.cjs +228 -0
- package/bin/run-prism.cjs +446 -0
- package/bin/run-protocol-tlc.cjs +201 -0
- package/bin/run-quorum-composition-alloy.cjs +155 -0
- package/bin/run-sensitivity-sweep.cjs +231 -0
- package/bin/run-stop-hook-tlc.cjs +188 -0
- package/bin/run-tlc.cjs +467 -0
- package/bin/run-transcript-alloy.cjs +173 -0
- package/bin/run-uppaal.cjs +264 -0
- package/bin/secrets.cjs +134 -0
- package/bin/sensitivity-report.cjs +219 -0
- package/bin/sensitivity-sweep-feedback.cjs +194 -0
- package/bin/set-secret.cjs +29 -0
- package/bin/setup-telemetry-cron.sh +36 -0
- package/bin/sweepPtoF.cjs +63 -0
- package/bin/sync-baseline-requirements.cjs +290 -0
- package/bin/task-envelope.cjs +360 -0
- package/bin/telemetry-collector.cjs +229 -0
- package/bin/unified-mcp-server.mjs +735 -0
- package/bin/update-agents.cjs +369 -0
- package/bin/update-scoreboard.cjs +1134 -0
- package/bin/validate-debt-entry.cjs +207 -0
- package/bin/validate-invariant.cjs +419 -0
- package/bin/validate-memory.cjs +389 -0
- package/bin/validate-requirements-haiku.cjs +435 -0
- package/bin/validate-traces.cjs +438 -0
- package/bin/verify-formal-results.cjs +124 -0
- package/bin/verify-quorum-health.cjs +273 -0
- package/bin/write-check-result.cjs +106 -0
- package/bin/xstate-to-tla.cjs +483 -0
- package/bin/xstate-trace-walker.cjs +205 -0
- package/commands/qgsd/add-phase.md +43 -0
- package/commands/qgsd/add-requirement.md +24 -0
- package/commands/qgsd/add-todo.md +47 -0
- package/commands/qgsd/audit-milestone.md +37 -0
- package/commands/qgsd/check-todos.md +45 -0
- package/commands/qgsd/cleanup.md +18 -0
- package/commands/qgsd/close-formal-gaps.md +33 -0
- package/commands/qgsd/complete-milestone.md +136 -0
- package/commands/qgsd/debug.md +166 -0
- package/commands/qgsd/discuss-phase.md +83 -0
- package/commands/qgsd/execute-phase.md +117 -0
- package/commands/qgsd/fix-tests.md +27 -0
- package/commands/qgsd/formal-test-sync.md +32 -0
- package/commands/qgsd/health.md +22 -0
- package/commands/qgsd/help.md +22 -0
- package/commands/qgsd/insert-phase.md +32 -0
- package/commands/qgsd/join-discord.md +18 -0
- package/commands/qgsd/list-phase-assumptions.md +46 -0
- package/commands/qgsd/map-codebase.md +71 -0
- package/commands/qgsd/map-requirements.md +20 -0
- package/commands/qgsd/mcp-restart.md +176 -0
- package/commands/qgsd/mcp-set-model.md +134 -0
- package/commands/qgsd/mcp-setup.md +1371 -0
- package/commands/qgsd/mcp-status.md +274 -0
- package/commands/qgsd/mcp-update.md +238 -0
- package/commands/qgsd/new-milestone.md +44 -0
- package/commands/qgsd/new-project.md +42 -0
- package/commands/qgsd/observe.md +260 -0
- package/commands/qgsd/pause-work.md +38 -0
- package/commands/qgsd/plan-milestone-gaps.md +34 -0
- package/commands/qgsd/plan-phase.md +44 -0
- package/commands/qgsd/polyrepo.md +50 -0
- package/commands/qgsd/progress.md +24 -0
- package/commands/qgsd/queue.md +54 -0
- package/commands/qgsd/quick.md +133 -0
- package/commands/qgsd/quorum-test.md +275 -0
- package/commands/qgsd/quorum.md +707 -0
- package/commands/qgsd/reapply-patches.md +110 -0
- package/commands/qgsd/remove-phase.md +31 -0
- package/commands/qgsd/research-phase.md +189 -0
- package/commands/qgsd/resume-work.md +40 -0
- package/commands/qgsd/set-profile.md +34 -0
- package/commands/qgsd/settings.md +39 -0
- package/commands/qgsd/solve.md +565 -0
- package/commands/qgsd/sync-baselines.md +119 -0
- package/commands/qgsd/triage.md +233 -0
- package/commands/qgsd/update.md +37 -0
- package/commands/qgsd/verify-work.md +38 -0
- package/hooks/dist/config-loader.js +297 -0
- package/hooks/dist/conformance-schema.cjs +12 -0
- package/hooks/dist/gsd-context-monitor.js +64 -0
- package/hooks/dist/qgsd-check-update.js +62 -0
- package/hooks/dist/qgsd-circuit-breaker.js +682 -0
- package/hooks/dist/qgsd-precompact.js +156 -0
- package/hooks/dist/qgsd-prompt.js +653 -0
- package/hooks/dist/qgsd-session-start.js +122 -0
- package/hooks/dist/qgsd-slot-correlator.js +58 -0
- package/hooks/dist/qgsd-spec-regen.js +86 -0
- package/hooks/dist/qgsd-statusline.js +91 -0
- package/hooks/dist/qgsd-stop.js +553 -0
- package/hooks/dist/qgsd-token-collector.js +133 -0
- package/hooks/dist/unified-mcp-server.mjs +669 -0
- package/package.json +95 -0
- package/scripts/build-hooks.js +46 -0
- package/scripts/postinstall.js +48 -0
- package/scripts/secret-audit.sh +45 -0
- package/templates/qgsd.json +49 -0
|
@@ -0,0 +1,757 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
'use strict';
|
|
3
|
+
// bin/analyze-assumptions.cjs
|
|
4
|
+
// Assumption-to-instrumentation analysis CLI
|
|
5
|
+
// Parses formal models (TLA+, Alloy, PRISM) to extract assumptions/thresholds,
|
|
6
|
+
// cross-references against observe handlers and debt ledger,
|
|
7
|
+
// outputs a gap report with proposed metrics and instrumentation snippets.
|
|
8
|
+
|
|
9
|
+
const fs = require('node:fs');
|
|
10
|
+
const path = require('node:path');
|
|
11
|
+
|
|
12
|
+
// ── TLA+ Parser ─────────────────────────────────────────────────────────────
|
|
13
|
+
|
|
14
|
+
/**
|
|
15
|
+
* Extract assumptions from a TLA+ .tla file
|
|
16
|
+
* Parses ASSUME statements, CONSTANTS declarations
|
|
17
|
+
* @param {string} filePath - Path to .tla file
|
|
18
|
+
* @returns {Array<object>} Array of assumption objects
|
|
19
|
+
*/
|
|
20
|
+
function extractTlaAssumptions(filePath) {
|
|
21
|
+
try {
|
|
22
|
+
const stat = fs.statSync(filePath);
|
|
23
|
+
if (stat.size === 0) {
|
|
24
|
+
process.stderr.write(`[warn] Skipping empty TLA+ file: ${filePath}\n`);
|
|
25
|
+
return [];
|
|
26
|
+
}
|
|
27
|
+
const content = fs.readFileSync(filePath, 'utf8');
|
|
28
|
+
const results = [];
|
|
29
|
+
const relFile = filePath;
|
|
30
|
+
|
|
31
|
+
// Extract ASSUME statements
|
|
32
|
+
// e.g., ASSUME MaxDeliberation \in Nat /\ MaxDeliberation > 0
|
|
33
|
+
const assumeRe = /^ASSUME\s+(.+)$/gm;
|
|
34
|
+
let m;
|
|
35
|
+
while ((m = assumeRe.exec(content)) !== null) {
|
|
36
|
+
const rawText = m[0].trim();
|
|
37
|
+
const body = m[1].trim();
|
|
38
|
+
// Try to extract the variable name (first identifier)
|
|
39
|
+
const nameMatch = body.match(/^(\w+)/);
|
|
40
|
+
const name = nameMatch ? nameMatch[1] : 'unknown';
|
|
41
|
+
// Try to extract a threshold value
|
|
42
|
+
const valMatch = body.match(/(?:>=?|<=?|=)\s*(\d+)/);
|
|
43
|
+
const value = valMatch ? parseInt(valMatch[1], 10) : null;
|
|
44
|
+
results.push({
|
|
45
|
+
source: 'tla', file: relFile, name, type: 'assume', value, rawText
|
|
46
|
+
});
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
// Extract CONSTANTS declarations
|
|
50
|
+
// e.g., CONSTANTS\n Agents,\n MaxDeliberation,\n MaxSize
|
|
51
|
+
// Only grab indented continuation lines (lines starting with whitespace)
|
|
52
|
+
const constantsRe = /^CONSTANTS?\s*\n((?:\s{2,}\S.*\n)*)/gm;
|
|
53
|
+
while ((m = constantsRe.exec(content)) !== null) {
|
|
54
|
+
const block = m[1];
|
|
55
|
+
// Extract identifiers from the block, filtering out comment text
|
|
56
|
+
// Each line typically looks like: " MaxDeliberation, \* comment"
|
|
57
|
+
const lines = block.split('\n').filter(l => l.trim());
|
|
58
|
+
for (const line of lines) {
|
|
59
|
+
// Remove TLA+ comments (\* ...)
|
|
60
|
+
const noComment = line.split('\\*')[0].trim();
|
|
61
|
+
// Extract identifiers (comma-separated)
|
|
62
|
+
const ids = noComment.split(/[,\s]+/).filter(id => /^[A-Z]\w*$/.test(id));
|
|
63
|
+
for (const cname of ids) {
|
|
64
|
+
results.push({
|
|
65
|
+
source: 'tla', file: relFile, name: cname, type: 'constant', value: null, rawText: `CONSTANTS ... ${cname}`
|
|
66
|
+
});
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
// Extract INVARIANT definitions (name == expression patterns)
|
|
72
|
+
// e.g., TypeOK == ...
|
|
73
|
+
// We look for identifiers that end with a == pattern and contain INVARIANT-like names
|
|
74
|
+
const invariantRe = /^(\w+)\s*==\s*(.+)$/gm;
|
|
75
|
+
while ((m = invariantRe.exec(content)) !== null) {
|
|
76
|
+
const name = m[1];
|
|
77
|
+
// Skip non-invariant definitions (look for common invariant naming patterns)
|
|
78
|
+
if (/(?:Invariant|TypeOK|Safety|Bounded|Met|Valid|Monotone)/i.test(name)) {
|
|
79
|
+
results.push({
|
|
80
|
+
source: 'tla', file: relFile, name, type: 'invariant', value: null, rawText: m[0].trim()
|
|
81
|
+
});
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
return results;
|
|
86
|
+
} catch (err) {
|
|
87
|
+
process.stderr.write(`[warn] Failed to parse TLA+ file ${filePath}: ${err.message}\n`);
|
|
88
|
+
return [];
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
/**
|
|
93
|
+
* Extract constant values and invariant names from a TLA+ .cfg file
|
|
94
|
+
* @param {string} filePath - Path to .cfg file
|
|
95
|
+
* @returns {Array<object>} Array of assumption objects
|
|
96
|
+
*/
|
|
97
|
+
function extractTlaCfgValues(filePath) {
|
|
98
|
+
try {
|
|
99
|
+
const stat = fs.statSync(filePath);
|
|
100
|
+
if (stat.size === 0) {
|
|
101
|
+
process.stderr.write(`[warn] Skipping empty cfg file: ${filePath}\n`);
|
|
102
|
+
return [];
|
|
103
|
+
}
|
|
104
|
+
const content = fs.readFileSync(filePath, 'utf8');
|
|
105
|
+
const results = [];
|
|
106
|
+
const relFile = filePath;
|
|
107
|
+
|
|
108
|
+
// Extract key = value patterns (concrete constant assignments)
|
|
109
|
+
// e.g., MaxDeliberation = 9
|
|
110
|
+
const kvRe = /^\s*(\w+)\s*=\s*(\d+)\s*$/gm;
|
|
111
|
+
let m;
|
|
112
|
+
while ((m = kvRe.exec(content)) !== null) {
|
|
113
|
+
const name = m[1];
|
|
114
|
+
const value = parseInt(m[2], 10);
|
|
115
|
+
// Skip agent assignments like a1 = a1
|
|
116
|
+
if (/^a\d+$/.test(name)) continue;
|
|
117
|
+
results.push({
|
|
118
|
+
source: 'tla', file: relFile, name, type: 'constant', value, rawText: m[0].trim()
|
|
119
|
+
});
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
// Extract INVARIANT names
|
|
123
|
+
const invSection = content.match(/^INVARIANT\s+([\s\S]*?)(?=^(?:SPECIFICATION|CONSTANTS|SYMMETRY|PROPERTY|CHECK_DEADLOCK|$))/gm);
|
|
124
|
+
if (invSection) {
|
|
125
|
+
for (const section of invSection) {
|
|
126
|
+
const names = section.replace(/^INVARIANT\s*/, '').trim().split(/\s+/);
|
|
127
|
+
for (const name of names) {
|
|
128
|
+
if (name && /^\w+$/.test(name)) {
|
|
129
|
+
results.push({
|
|
130
|
+
source: 'tla', file: relFile, name, type: 'invariant', value: null, rawText: `INVARIANT ${name}`
|
|
131
|
+
});
|
|
132
|
+
}
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
// Extract PROPERTY names
|
|
138
|
+
const propRe = /^PROPERTY\s+(\w+)/gm;
|
|
139
|
+
while ((m = propRe.exec(content)) !== null) {
|
|
140
|
+
results.push({
|
|
141
|
+
source: 'tla', file: relFile, name: m[1], type: 'invariant', value: null, rawText: m[0].trim()
|
|
142
|
+
});
|
|
143
|
+
}
|
|
144
|
+
|
|
145
|
+
return results;
|
|
146
|
+
} catch (err) {
|
|
147
|
+
process.stderr.write(`[warn] Failed to parse cfg file ${filePath}: ${err.message}\n`);
|
|
148
|
+
return [];
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
// ── Alloy Parser ────────────────────────────────────────────────────────────
|
|
153
|
+
|
|
154
|
+
/**
|
|
155
|
+
* Extract assumptions from an Alloy .als file
|
|
156
|
+
* Parses fact blocks, assert blocks, and numeric constraints
|
|
157
|
+
* @param {string} filePath - Path to .als file
|
|
158
|
+
* @returns {Array<object>} Array of assumption objects
|
|
159
|
+
*/
|
|
160
|
+
function extractAlloyAssumptions(filePath) {
|
|
161
|
+
try {
|
|
162
|
+
const stat = fs.statSync(filePath);
|
|
163
|
+
if (stat.size === 0) {
|
|
164
|
+
process.stderr.write(`[warn] Skipping empty Alloy file: ${filePath}\n`);
|
|
165
|
+
return [];
|
|
166
|
+
}
|
|
167
|
+
const content = fs.readFileSync(filePath, 'utf8');
|
|
168
|
+
const results = [];
|
|
169
|
+
const relFile = filePath;
|
|
170
|
+
|
|
171
|
+
// Extract fact blocks: fact Name { ... }
|
|
172
|
+
const factRe = /fact\s+(\w+)\s*\{([^}]*)\}/gs;
|
|
173
|
+
let m;
|
|
174
|
+
while ((m = factRe.exec(content)) !== null) {
|
|
175
|
+
const name = m[1];
|
|
176
|
+
const body = m[2].trim();
|
|
177
|
+
// Try to extract numeric constraint
|
|
178
|
+
const numMatch = body.match(/(?:=|>=?|<=?)\s*(\d+)/);
|
|
179
|
+
const value = numMatch ? parseInt(numMatch[1], 10) : null;
|
|
180
|
+
results.push({
|
|
181
|
+
source: 'alloy', file: relFile, name, type: 'fact', value, rawText: m[0].trim()
|
|
182
|
+
});
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
// Extract assert blocks: assert Name { ... }
|
|
186
|
+
const assertRe = /assert\s+(\w+)\s*\{([^}]*)\}/gs;
|
|
187
|
+
while ((m = assertRe.exec(content)) !== null) {
|
|
188
|
+
const name = m[1];
|
|
189
|
+
const body = m[2].trim();
|
|
190
|
+
const numMatch = body.match(/(?:>=?|<=?)\s*(\d+)/);
|
|
191
|
+
const value = numMatch ? parseInt(numMatch[1], 10) : null;
|
|
192
|
+
results.push({
|
|
193
|
+
source: 'alloy', file: relFile, name, type: 'assert', value, rawText: m[0].trim()
|
|
194
|
+
});
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
// Extract numeric constraints from predicates: pred Name [...] { ... >= N ... }
|
|
198
|
+
const predRe = /pred\s+(\w+)\s*(?:\[[^\]]*\])?\s*\{([^}]*)\}/gs;
|
|
199
|
+
while ((m = predRe.exec(content)) !== null) {
|
|
200
|
+
const body = m[2].trim();
|
|
201
|
+
const constraints = body.match(/(\w[\w.#]*)\s*(>=?|<=?|=)\s*(\d+)/g);
|
|
202
|
+
if (constraints) {
|
|
203
|
+
for (const constraint of constraints) {
|
|
204
|
+
const parts = constraint.match(/(\w[\w.#]*)\s*(>=?|<=?|=)\s*(\d+)/);
|
|
205
|
+
if (parts) {
|
|
206
|
+
results.push({
|
|
207
|
+
source: 'alloy', file: relFile, name: `${m[1]}_${parts[1]}`,
|
|
208
|
+
type: 'constraint', value: parseInt(parts[3], 10),
|
|
209
|
+
rawText: constraint.trim()
|
|
210
|
+
});
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
}
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
return results;
|
|
217
|
+
} catch (err) {
|
|
218
|
+
process.stderr.write(`[warn] Failed to parse Alloy file ${filePath}: ${err.message}\n`);
|
|
219
|
+
return [];
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
// ── PRISM Parser ────────────────────────────────────────────────────────────
|
|
224
|
+
|
|
225
|
+
/**
|
|
226
|
+
* Extract assumptions from a PRISM .pm file
|
|
227
|
+
* Parses const declarations, module variable bounds, transition rate expressions
|
|
228
|
+
* @param {string} filePath - Path to .pm file
|
|
229
|
+
* @returns {Array<object>} Array of assumption objects
|
|
230
|
+
*/
|
|
231
|
+
function extractPrismAssumptions(filePath) {
|
|
232
|
+
try {
|
|
233
|
+
const stat = fs.statSync(filePath);
|
|
234
|
+
if (stat.size === 0) {
|
|
235
|
+
process.stderr.write(`[warn] Skipping empty PRISM file: ${filePath}\n`);
|
|
236
|
+
return [];
|
|
237
|
+
}
|
|
238
|
+
const content = fs.readFileSync(filePath, 'utf8');
|
|
239
|
+
const results = [];
|
|
240
|
+
const relFile = filePath;
|
|
241
|
+
|
|
242
|
+
// Extract const declarations: const double tp_rate; or const int max_rounds = 9;
|
|
243
|
+
const constRe = /const\s+(?:double|int)\s+(\w+)(?:\s*=\s*([^;]+))?;/g;
|
|
244
|
+
let m;
|
|
245
|
+
while ((m = constRe.exec(content)) !== null) {
|
|
246
|
+
const name = m[1];
|
|
247
|
+
const rawVal = m[2] ? m[2].trim() : null;
|
|
248
|
+
const value = rawVal !== null ? (isNaN(Number(rawVal)) ? rawVal : Number(rawVal)) : null;
|
|
249
|
+
results.push({
|
|
250
|
+
source: 'prism', file: relFile, name, type: 'const', value, rawText: m[0].trim()
|
|
251
|
+
});
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
// Extract module variable bounds: s : [0..2] init 0;
|
|
255
|
+
const boundRe = /(\w+)\s*:\s*\[(\d+)\.\.(\d+)\]\s*init\s*(\d+)/g;
|
|
256
|
+
while ((m = boundRe.exec(content)) !== null) {
|
|
257
|
+
results.push({
|
|
258
|
+
source: 'prism', file: relFile, name: m[1], type: 'bound',
|
|
259
|
+
value: `[${m[2]}..${m[3]}]`,
|
|
260
|
+
rawText: m[0].trim()
|
|
261
|
+
});
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
// Extract props from paired .props file
|
|
265
|
+
const propsPath = filePath.replace(/\.pm$/, '.props');
|
|
266
|
+
try {
|
|
267
|
+
if (fs.existsSync(propsPath)) {
|
|
268
|
+
const propsContent = fs.readFileSync(propsPath, 'utf8');
|
|
269
|
+
// Extract property thresholds: F<=9, F<=10, P>=0.95, etc.
|
|
270
|
+
const propRe = /([PR])([>=<]+)([\d.]+)\s*\[\s*(?:F([>=<]+)(\d+))?\s*(?:"([^"]+)")?\s*\]/g;
|
|
271
|
+
while ((m = propRe.exec(propsContent)) !== null) {
|
|
272
|
+
const propType = m[1]; // P or R
|
|
273
|
+
const propOp = m[2];
|
|
274
|
+
const propVal = m[3];
|
|
275
|
+
const stepOp = m[4] || null;
|
|
276
|
+
const stepVal = m[5] || null;
|
|
277
|
+
const label = m[6] || null;
|
|
278
|
+
|
|
279
|
+
if (stepVal) {
|
|
280
|
+
results.push({
|
|
281
|
+
source: 'prism', file: propsPath, name: label || `step_bound_${stepVal}`,
|
|
282
|
+
type: 'property', value: parseInt(stepVal, 10),
|
|
283
|
+
rawText: m[0].trim()
|
|
284
|
+
});
|
|
285
|
+
}
|
|
286
|
+
if (propVal && propType === 'P' && propOp !== '=?') {
|
|
287
|
+
results.push({
|
|
288
|
+
source: 'prism', file: propsPath,
|
|
289
|
+
name: label ? `prob_${label}` : `prob_threshold_${propVal}`,
|
|
290
|
+
type: 'property', value: parseFloat(propVal),
|
|
291
|
+
rawText: m[0].trim()
|
|
292
|
+
});
|
|
293
|
+
}
|
|
294
|
+
}
|
|
295
|
+
}
|
|
296
|
+
} catch (err) {
|
|
297
|
+
process.stderr.write(`[warn] Failed to parse PRISM props file ${propsPath}: ${err.message}\n`);
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
return results;
|
|
301
|
+
} catch (err) {
|
|
302
|
+
process.stderr.write(`[warn] Failed to parse PRISM file ${filePath}: ${err.message}\n`);
|
|
303
|
+
return [];
|
|
304
|
+
}
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
// ── Scanner ─────────────────────────────────────────────────────────────────
|
|
308
|
+
|
|
309
|
+
/**
|
|
310
|
+
* Scan all formal models under a root directory
|
|
311
|
+
* @param {string} root - Root directory (default: process.cwd())
|
|
312
|
+
* @returns {Array<object>} Flat array of all extracted assumptions
|
|
313
|
+
*/
|
|
314
|
+
function scanAllFormalModels(root) {
|
|
315
|
+
const baseDir = root || process.cwd();
|
|
316
|
+
const formalDir = path.join(baseDir, '.planning', 'formal');
|
|
317
|
+
|
|
318
|
+
// Check if .planning/formal/ directory exists
|
|
319
|
+
if (!fs.existsSync(formalDir) || !fs.statSync(formalDir).isDirectory()) {
|
|
320
|
+
process.stderr.write(`[warn] .planning/formal/ directory not found at ${baseDir} — returning empty results\n`);
|
|
321
|
+
return [];
|
|
322
|
+
}
|
|
323
|
+
|
|
324
|
+
const results = [];
|
|
325
|
+
|
|
326
|
+
// Scan TLA+ files (excluding TTrace files)
|
|
327
|
+
const tlaDir = path.join(formalDir, 'tla');
|
|
328
|
+
if (fs.existsSync(tlaDir)) {
|
|
329
|
+
const tlaFiles = fs.readdirSync(tlaDir).filter(f => f.endsWith('.tla') && !f.includes('_TTrace_'));
|
|
330
|
+
for (const f of tlaFiles) {
|
|
331
|
+
results.push(...extractTlaAssumptions(path.join(tlaDir, f)));
|
|
332
|
+
}
|
|
333
|
+
// Also scan cfg files
|
|
334
|
+
const cfgFiles = fs.readdirSync(tlaDir).filter(f => f.endsWith('.cfg'));
|
|
335
|
+
for (const f of cfgFiles) {
|
|
336
|
+
results.push(...extractTlaCfgValues(path.join(tlaDir, f)));
|
|
337
|
+
}
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
// Scan Alloy files
|
|
341
|
+
const alloyDir = path.join(formalDir, 'alloy');
|
|
342
|
+
if (fs.existsSync(alloyDir)) {
|
|
343
|
+
const alsFiles = fs.readdirSync(alloyDir).filter(f => f.endsWith('.als'));
|
|
344
|
+
for (const f of alsFiles) {
|
|
345
|
+
results.push(...extractAlloyAssumptions(path.join(alloyDir, f)));
|
|
346
|
+
}
|
|
347
|
+
}
|
|
348
|
+
|
|
349
|
+
// Scan PRISM files
|
|
350
|
+
const prismDir = path.join(formalDir, 'prism');
|
|
351
|
+
if (fs.existsSync(prismDir)) {
|
|
352
|
+
const pmFiles = fs.readdirSync(prismDir).filter(f => f.endsWith('.pm'));
|
|
353
|
+
for (const f of pmFiles) {
|
|
354
|
+
results.push(...extractPrismAssumptions(path.join(prismDir, f)));
|
|
355
|
+
}
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
return results;
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
// ── Tier Classification ─────────────────────────────────────────────────────
|
|
362
|
+
|
|
363
|
+
/**
|
|
364
|
+
* Classify an assumption into a priority tier (1, 2, or 3).
|
|
365
|
+
*
|
|
366
|
+
* Tier 1 (directly monitorable): Numeric constants with concrete values
|
|
367
|
+
* sourced from .cfg files or PRISM const with numeric value.
|
|
368
|
+
* Tier 2 (indirectly monitorable): Named invariants and assertions
|
|
369
|
+
* that can be checked via periodic probes.
|
|
370
|
+
* Tier 3 (not runtime-observable): Structural type constraints,
|
|
371
|
+
* state-space bounds, assumptions without concrete numeric values.
|
|
372
|
+
*
|
|
373
|
+
* @param {object} assumption - Assumption object with type and value fields
|
|
374
|
+
* @returns {number} 1, 2, or 3
|
|
375
|
+
*/
|
|
376
|
+
function classifyTier(assumption) {
|
|
377
|
+
const { type, value } = assumption;
|
|
378
|
+
const isNumeric = value !== null && value !== undefined && !isNaN(Number(value));
|
|
379
|
+
const tier1Types = ['constant', 'const', 'property', 'constraint'];
|
|
380
|
+
|
|
381
|
+
// NOTE: 'constraint' with numeric value is tier 1 because the numeric value
|
|
382
|
+
// represents a cardinality bound that can be monitored at runtime (e.g.,
|
|
383
|
+
// max replicas). Alloy constraints that express relational/structural rules
|
|
384
|
+
// without numeric values fall through to tier 3. This is a deliberate
|
|
385
|
+
// classification choice -- not all constraints are monitorable, only those
|
|
386
|
+
// with numeric thresholds.
|
|
387
|
+
|
|
388
|
+
// Tier 1: numeric value AND monitorable type
|
|
389
|
+
if (isNumeric && tier1Types.includes(type)) {
|
|
390
|
+
return 1;
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
// Edge case: 'assume' with numeric value is tier 1 (has a monitorable threshold)
|
|
394
|
+
if (type === 'assume' && isNumeric) {
|
|
395
|
+
return 1;
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
// Tier 2: invariants, assertions, facts (checkable via periodic probes)
|
|
399
|
+
if (['invariant', 'assert', 'fact'].includes(type)) {
|
|
400
|
+
return 2;
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
// Tier 3: everything else
|
|
404
|
+
return 3;
|
|
405
|
+
}
|
|
406
|
+
|
|
407
|
+
// ── Cross-reference ─────────────────────────────────────────────────────────
|
|
408
|
+
|
|
409
|
+
/**
|
|
410
|
+
* Cross-reference assumptions against debt ledger and observe handlers.
|
|
411
|
+
*
|
|
412
|
+
* Matching strategy (two-tier):
|
|
413
|
+
* 1. Primary: debt entry has formal_ref matching "spec:{file}:{name}"
|
|
414
|
+
* 2. Fallback fuzzy: If formal_ref is null, check if debt entry id or title
|
|
415
|
+
* contains the assumption name (case-insensitive substring match).
|
|
416
|
+
* This handles the common case where debt entries exist but weren't linked via formal_ref.
|
|
417
|
+
*
|
|
418
|
+
* Handler matching:
|
|
419
|
+
* - Numeric thresholds -> look for bash/internal handlers that emit gauge/counter
|
|
420
|
+
* - State invariants -> look for internal/bash handlers
|
|
421
|
+
* - If handler type exists but no specific config targets assumption -> partial
|
|
422
|
+
*
|
|
423
|
+
* @param {Array<object>} assumptions - Array of assumption objects
|
|
424
|
+
* @param {object} [options] - { root?: string }
|
|
425
|
+
* @returns {Array<object>} Assumptions with coverage classification
|
|
426
|
+
*/
|
|
427
|
+
function crossReference(assumptions, options = {}) {
|
|
428
|
+
const root = options.root || process.cwd();
|
|
429
|
+
|
|
430
|
+
// Load debt ledger
|
|
431
|
+
let debtEntries = [];
|
|
432
|
+
try {
|
|
433
|
+
const ledgerPath = path.join(root, '.planning', 'formal', 'debt.json');
|
|
434
|
+
const { readDebtLedger } = require('./debt-ledger.cjs');
|
|
435
|
+
const ledger = readDebtLedger(ledgerPath);
|
|
436
|
+
debtEntries = ledger.debt_entries || [];
|
|
437
|
+
} catch {
|
|
438
|
+
// No debt ledger available
|
|
439
|
+
}
|
|
440
|
+
|
|
441
|
+
// Load observe handlers (must require observe-handlers to register them first)
|
|
442
|
+
let handlerTypes = [];
|
|
443
|
+
try {
|
|
444
|
+
require('./observe-handlers.cjs');
|
|
445
|
+
const { listHandlers } = require('./observe-registry.cjs');
|
|
446
|
+
handlerTypes = listHandlers();
|
|
447
|
+
} catch {
|
|
448
|
+
// Observe handlers not available
|
|
449
|
+
}
|
|
450
|
+
|
|
451
|
+
return assumptions.map(assumption => {
|
|
452
|
+
let coverage = 'uncovered';
|
|
453
|
+
let matchSource = null;
|
|
454
|
+
|
|
455
|
+
// Check debt ledger coverage
|
|
456
|
+
for (const entry of debtEntries) {
|
|
457
|
+
// Primary match: formal_ref matches spec:{file}:{name}
|
|
458
|
+
if (entry.formal_ref) {
|
|
459
|
+
const refPattern = `spec:${assumption.file}:${assumption.name}`;
|
|
460
|
+
if (entry.formal_ref === refPattern || entry.formal_ref.includes(assumption.name)) {
|
|
461
|
+
coverage = 'covered';
|
|
462
|
+
matchSource = `debt:${entry.id}`;
|
|
463
|
+
break;
|
|
464
|
+
}
|
|
465
|
+
}
|
|
466
|
+
// Fallback fuzzy match: id or title contains assumption name
|
|
467
|
+
if (!entry.formal_ref || entry.formal_ref === null) {
|
|
468
|
+
const nameLower = assumption.name.toLowerCase();
|
|
469
|
+
const idMatch = entry.id && entry.id.toLowerCase().includes(nameLower);
|
|
470
|
+
const titleMatch = entry.title && entry.title.toLowerCase().includes(nameLower);
|
|
471
|
+
if (idMatch || titleMatch) {
|
|
472
|
+
coverage = 'covered';
|
|
473
|
+
matchSource = `debt:${entry.id}(fuzzy)`;
|
|
474
|
+
break;
|
|
475
|
+
}
|
|
476
|
+
}
|
|
477
|
+
}
|
|
478
|
+
|
|
479
|
+
// If not covered by debt, check observe handlers
|
|
480
|
+
if (coverage === 'uncovered' && handlerTypes.length > 0) {
|
|
481
|
+
// Numeric thresholds -> bash/internal handlers
|
|
482
|
+
if (['constant', 'bound', 'property', 'constraint'].includes(assumption.type)) {
|
|
483
|
+
if (handlerTypes.includes('bash') || handlerTypes.includes('internal')) {
|
|
484
|
+
coverage = 'partial';
|
|
485
|
+
matchSource = 'handler:bash/internal(generic)';
|
|
486
|
+
}
|
|
487
|
+
}
|
|
488
|
+
// State invariants -> internal/bash handlers
|
|
489
|
+
if (['invariant', 'assert'].includes(assumption.type)) {
|
|
490
|
+
if (handlerTypes.includes('internal') || handlerTypes.includes('bash')) {
|
|
491
|
+
coverage = 'partial';
|
|
492
|
+
matchSource = 'handler:internal/bash(generic)';
|
|
493
|
+
}
|
|
494
|
+
}
|
|
495
|
+
}
|
|
496
|
+
|
|
497
|
+
return { ...assumption, coverage, matchSource };
|
|
498
|
+
});
|
|
499
|
+
}
|
|
500
|
+
|
|
501
|
+
// ── Gap Report ──────────────────────────────────────────────────────────────
|
|
502
|
+
|
|
503
|
+
/**
|
|
504
|
+
* Generate a gap report with proposed metrics and instrumentation snippets
|
|
505
|
+
* @param {Array<object>} crossRefResults - Assumptions with coverage classification
|
|
506
|
+
* @returns {object} Structured report JSON
|
|
507
|
+
*/
|
|
508
|
+
function generateGapReport(crossRefResults) {
|
|
509
|
+
const total_assumptions = crossRefResults.length;
|
|
510
|
+
const covered = crossRefResults.filter(a => a.coverage === 'covered').length;
|
|
511
|
+
const partial = crossRefResults.filter(a => a.coverage === 'partial').length;
|
|
512
|
+
const uncovered = crossRefResults.filter(a => a.coverage === 'uncovered').length;
|
|
513
|
+
|
|
514
|
+
// Generate metric names with qgsd_ prefix and collision detection
|
|
515
|
+
const metricNameCounts = new Map();
|
|
516
|
+
const gaps = crossRefResults
|
|
517
|
+
.filter(a => a.coverage !== 'covered')
|
|
518
|
+
.map(a => {
|
|
519
|
+
// Generate canonical metric name: qgsd_ + lowercase + replace non-alnum with _
|
|
520
|
+
const baseName = 'qgsd_' + a.name.toLowerCase().replace(/[^a-z0-9]+/g, '_').replace(/^_|_$/g, '');
|
|
521
|
+
// Track for collision detection
|
|
522
|
+
const count = metricNameCounts.get(baseName) || 0;
|
|
523
|
+
metricNameCounts.set(baseName, count + 1);
|
|
524
|
+
|
|
525
|
+
return { ...a, _baseName: baseName };
|
|
526
|
+
});
|
|
527
|
+
|
|
528
|
+
// Apply collision suffix and classify tier
|
|
529
|
+
for (const gap of gaps) {
|
|
530
|
+
const count = metricNameCounts.get(gap._baseName);
|
|
531
|
+
if (count > 1) {
|
|
532
|
+
gap.metric_name = `${gap._baseName}__${gap.source}`;
|
|
533
|
+
} else {
|
|
534
|
+
gap.metric_name = gap._baseName;
|
|
535
|
+
}
|
|
536
|
+
delete gap._baseName;
|
|
537
|
+
|
|
538
|
+
// Classify tier
|
|
539
|
+
gap.tier = classifyTier(gap);
|
|
540
|
+
|
|
541
|
+
// Determine metric type
|
|
542
|
+
if (['constant', 'bound', 'property'].includes(gap.type)) {
|
|
543
|
+
gap.metric_type = 'gauge';
|
|
544
|
+
} else if (['assume', 'fact', 'constraint'].includes(gap.type)) {
|
|
545
|
+
gap.metric_type = 'gauge';
|
|
546
|
+
} else if (['invariant', 'assert'].includes(gap.type)) {
|
|
547
|
+
gap.metric_type = 'counter';
|
|
548
|
+
} else {
|
|
549
|
+
gap.metric_type = 'gauge';
|
|
550
|
+
}
|
|
551
|
+
|
|
552
|
+
// Generate instrumentation snippet
|
|
553
|
+
gap.instrumentation_snippet = generateSnippet(gap);
|
|
554
|
+
}
|
|
555
|
+
|
|
556
|
+
// Sort by tier ascending (tier 1 first), preserving original order within same tier
|
|
557
|
+
gaps.sort((a, b) => a.tier - b.tier);
|
|
558
|
+
|
|
559
|
+
return {
|
|
560
|
+
total_assumptions,
|
|
561
|
+
covered,
|
|
562
|
+
partial,
|
|
563
|
+
uncovered,
|
|
564
|
+
gaps
|
|
565
|
+
};
|
|
566
|
+
}
|
|
567
|
+
|
|
568
|
+
/**
|
|
569
|
+
* Generate an instrumentation code snippet for an uncovered assumption
|
|
570
|
+
* Uses the observe handler pattern from observe-handlers.cjs
|
|
571
|
+
* @param {object} gap - Gap object with metric_name, metric_type, source, type, name
|
|
572
|
+
* @returns {string} Code snippet
|
|
573
|
+
*/
|
|
574
|
+
function generateSnippet(gap) {
|
|
575
|
+
const metricName = gap.metric_name;
|
|
576
|
+
const metricType = gap.metric_type;
|
|
577
|
+
|
|
578
|
+
// Tier 1: Prometheus gauge/histogram instrumentation snippets
|
|
579
|
+
if (gap.tier === 1) {
|
|
580
|
+
const thresholdVal = gap.value !== null && gap.value !== undefined ? Number(gap.value) : 0;
|
|
581
|
+
// Use histogram for probability-like property thresholds (0 < value < 1)
|
|
582
|
+
if (gap.type === 'property' && thresholdVal > 0 && thresholdVal < 1) {
|
|
583
|
+
return [
|
|
584
|
+
`// Prometheus histogram for ${gap.name} (${gap.source}/${gap.type})`,
|
|
585
|
+
`# HELP ${metricName} Monitors ${gap.name} from ${gap.source} model`,
|
|
586
|
+
`# TYPE ${metricName} histogram`,
|
|
587
|
+
`const ${metricName} = new Histogram({`,
|
|
588
|
+
` name: '${metricName}',`,
|
|
589
|
+
` help: 'Monitors ${gap.name} from ${gap.source} model',`,
|
|
590
|
+
` labelNames: ['model', 'file'],`,
|
|
591
|
+
` buckets: [0.5, 0.75, 0.9, 0.95, 0.99, 1.0]`,
|
|
592
|
+
`});`,
|
|
593
|
+
`${metricName}.observe({ model: 'formal', file: '${gap.file}' }, ${thresholdVal});`
|
|
594
|
+
].join('\n');
|
|
595
|
+
}
|
|
596
|
+
// Default tier 1: Prometheus gauge
|
|
597
|
+
return [
|
|
598
|
+
`// Prometheus gauge for ${gap.name} (${gap.source}/${gap.type})`,
|
|
599
|
+
`# HELP ${metricName} Monitors ${gap.name} from ${gap.source} model`,
|
|
600
|
+
`# TYPE ${metricName} gauge`,
|
|
601
|
+
`const ${metricName} = new Gauge({`,
|
|
602
|
+
` name: '${metricName}',`,
|
|
603
|
+
` help: 'Monitors ${gap.name} from ${gap.source} model',`,
|
|
604
|
+
` labelNames: ['model', 'file']`,
|
|
605
|
+
`});`,
|
|
606
|
+
`${metricName}.set({ model: 'formal', file: '${gap.file}' }, ${thresholdVal});`
|
|
607
|
+
].join('\n');
|
|
608
|
+
}
|
|
609
|
+
|
|
610
|
+
// Tier 2/3 (or undefined tier -- defensive default): observe handler JSON format
|
|
611
|
+
if (metricType === 'counter') {
|
|
612
|
+
return [
|
|
613
|
+
`// Observe handler for ${gap.name} (${gap.source}/${gap.type})`,
|
|
614
|
+
`// Add to observe config sources:`,
|
|
615
|
+
`{`,
|
|
616
|
+
` "type": "internal",`,
|
|
617
|
+
` "label": "${metricName}",`,
|
|
618
|
+
` "check": "formal-invariant",`,
|
|
619
|
+
` "config": {`,
|
|
620
|
+
` "metric": "${metricName}",`,
|
|
621
|
+
` "type": "counter",`,
|
|
622
|
+
` "formal_ref": "spec:${gap.file}:${gap.name}",`,
|
|
623
|
+
` "description": "Counts violations of ${gap.name}"`,
|
|
624
|
+
` }`,
|
|
625
|
+
`}`
|
|
626
|
+
].join('\n');
|
|
627
|
+
}
|
|
628
|
+
|
|
629
|
+
const thresholdVal = gap.value !== null && gap.value !== undefined ? JSON.stringify(gap.value) : 'null';
|
|
630
|
+
return [
|
|
631
|
+
`// Observe handler for ${gap.name} (${gap.source}/${gap.type})`,
|
|
632
|
+
`// Add to observe config sources:`,
|
|
633
|
+
`{`,
|
|
634
|
+
` "type": "bash",`,
|
|
635
|
+
` "label": "${metricName}",`,
|
|
636
|
+
` "command": "echo '{\\"value\\": 0, \\"status\\": \\"ok\\"}'",`,
|
|
637
|
+
` "parser": "json",`,
|
|
638
|
+
` "config": {`,
|
|
639
|
+
` "metric": "${metricName}",`,
|
|
640
|
+
` "type": "${metricType}",`,
|
|
641
|
+
` "formal_ref": "spec:${gap.file}:${gap.name}",`,
|
|
642
|
+
` "threshold": ${thresholdVal},`,
|
|
643
|
+
` "description": "Monitors ${gap.name} from ${gap.source} model"`,
|
|
644
|
+
` }`,
|
|
645
|
+
`}`
|
|
646
|
+
].join('\n');
|
|
647
|
+
}
|
|
648
|
+
|
|
649
|
+
/**
|
|
650
|
+
* Generate a markdown gap report
|
|
651
|
+
* @param {object} report - Gap report object from generateGapReport
|
|
652
|
+
* @returns {string} Markdown content
|
|
653
|
+
*/
|
|
654
|
+
function formatMarkdownReport(report) {
|
|
655
|
+
const lines = [
|
|
656
|
+
'# Assumption-to-Instrumentation Gap Report',
|
|
657
|
+
'',
|
|
658
|
+
`Generated: ${new Date().toISOString()}`,
|
|
659
|
+
'',
|
|
660
|
+
'## Summary',
|
|
661
|
+
'',
|
|
662
|
+
`| Metric | Count |`,
|
|
663
|
+
`|--------|-------|`,
|
|
664
|
+
`| Total assumptions | ${report.total_assumptions} |`,
|
|
665
|
+
`| Covered | ${report.covered} |`,
|
|
666
|
+
`| Partial | ${report.partial} |`,
|
|
667
|
+
`| Uncovered | ${report.uncovered} |`,
|
|
668
|
+
'',
|
|
669
|
+
];
|
|
670
|
+
|
|
671
|
+
if (report.gaps.length === 0) {
|
|
672
|
+
lines.push('All assumptions are covered by debt ledger entries or observe handlers.');
|
|
673
|
+
return lines.join('\n');
|
|
674
|
+
}
|
|
675
|
+
|
|
676
|
+
lines.push('## Gaps');
|
|
677
|
+
lines.push('');
|
|
678
|
+
lines.push('| # | Source | Name | Type | Tier | Coverage | Proposed Metric | Metric Type |');
|
|
679
|
+
lines.push('|---|--------|------|------|------|----------|-----------------|-------------|');
|
|
680
|
+
|
|
681
|
+
report.gaps.forEach((gap, idx) => {
|
|
682
|
+
lines.push(`| ${idx + 1} | ${gap.source} | ${gap.name} | ${gap.type} | ${gap.tier} | ${gap.coverage} | \`${gap.metric_name}\` | ${gap.metric_type} |`);
|
|
683
|
+
});
|
|
684
|
+
|
|
685
|
+
lines.push('');
|
|
686
|
+
lines.push('## Instrumentation Snippets');
|
|
687
|
+
lines.push('');
|
|
688
|
+
|
|
689
|
+
for (const gap of report.gaps) {
|
|
690
|
+
lines.push(`### ${gap.metric_name}`);
|
|
691
|
+
lines.push('');
|
|
692
|
+
lines.push('```json');
|
|
693
|
+
lines.push(gap.instrumentation_snippet);
|
|
694
|
+
lines.push('```');
|
|
695
|
+
lines.push('');
|
|
696
|
+
}
|
|
697
|
+
|
|
698
|
+
return lines.join('\n');
|
|
699
|
+
}
|
|
700
|
+
|
|
701
|
+
// ── CLI entrypoint ──────────────────────────────────────────────────────────
|
|
702
|
+
|
|
703
|
+
if (require.main === module) {
|
|
704
|
+
const args = process.argv.slice(2);
|
|
705
|
+
const outputArg = args.find(a => a.startsWith('--output='));
|
|
706
|
+
const jsonOnly = args.includes('--json');
|
|
707
|
+
const verbose = args.includes('--verbose');
|
|
708
|
+
const actionable = args.includes('--actionable');
|
|
709
|
+
|
|
710
|
+
const root = process.cwd();
|
|
711
|
+
const assumptions = scanAllFormalModels(root);
|
|
712
|
+
let crossRefed = crossReference(assumptions, { root });
|
|
713
|
+
|
|
714
|
+
// --actionable: filter to tier 1 assumptions only before gap report generation
|
|
715
|
+
if (actionable) {
|
|
716
|
+
crossRefed = crossRefed.filter(a => classifyTier(a) === 1);
|
|
717
|
+
}
|
|
718
|
+
|
|
719
|
+
const report = generateGapReport(crossRefed);
|
|
720
|
+
|
|
721
|
+
if (jsonOnly) {
|
|
722
|
+
process.stdout.write(JSON.stringify(report, null, 2) + '\n');
|
|
723
|
+
} else {
|
|
724
|
+
// Write markdown report
|
|
725
|
+
const mdPath = outputArg
|
|
726
|
+
? outputArg.split('=')[1]
|
|
727
|
+
: path.join(root, '.planning', 'formal', 'assumption-gaps.md');
|
|
728
|
+
|
|
729
|
+
const mdContent = formatMarkdownReport(report);
|
|
730
|
+
const mdDir = path.dirname(mdPath);
|
|
731
|
+
fs.mkdirSync(mdDir, { recursive: true });
|
|
732
|
+
fs.writeFileSync(mdPath, mdContent);
|
|
733
|
+
process.stderr.write(`[analyze-assumptions] Gap report written to ${mdPath}\n`);
|
|
734
|
+
|
|
735
|
+
// Also output JSON to stdout
|
|
736
|
+
process.stdout.write(JSON.stringify(report, null, 2) + '\n');
|
|
737
|
+
}
|
|
738
|
+
|
|
739
|
+
if (verbose) {
|
|
740
|
+
process.stderr.write(`[analyze-assumptions] Total: ${report.total_assumptions}, Covered: ${report.covered}, Partial: ${report.partial}, Uncovered: ${report.uncovered}\n`);
|
|
741
|
+
}
|
|
742
|
+
|
|
743
|
+
// Exit code: 0 if no uncovered, 1 if uncovered gaps exist
|
|
744
|
+
process.exitCode = report.uncovered > 0 ? 1 : 0;
|
|
745
|
+
}
|
|
746
|
+
|
|
747
|
+
module.exports = {
|
|
748
|
+
classifyTier,
|
|
749
|
+
extractTlaAssumptions,
|
|
750
|
+
extractTlaCfgValues,
|
|
751
|
+
extractAlloyAssumptions,
|
|
752
|
+
extractPrismAssumptions,
|
|
753
|
+
scanAllFormalModels,
|
|
754
|
+
crossReference,
|
|
755
|
+
generateGapReport,
|
|
756
|
+
formatMarkdownReport
|
|
757
|
+
};
|