@svrnsec/pulse 0.3.1 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/svrnsec-pulse.js +7 -0
- package/index.d.ts +130 -0
- package/package.json +70 -25
- package/src/analysis/audio.js +213 -0
- package/src/analysis/coherence.js +502 -0
- package/src/analysis/heuristic.js +428 -0
- package/src/analysis/jitter.js +446 -0
- package/src/analysis/llm.js +472 -0
- package/src/analysis/populationEntropy.js +403 -0
- package/src/analysis/provider.js +248 -0
- package/src/analysis/trustScore.js +356 -0
- package/src/cli/args.js +36 -0
- package/src/cli/commands/scan.js +192 -0
- package/src/cli/runner.js +157 -0
- package/src/collector/adaptive.js +200 -0
- package/src/collector/bio.js +287 -0
- package/src/collector/canvas.js +239 -0
- package/src/collector/dram.js +203 -0
- package/src/collector/enf.js +311 -0
- package/src/collector/entropy.js +195 -0
- package/src/collector/gpu.js +245 -0
- package/src/collector/idleAttestation.js +480 -0
- package/src/collector/sabTimer.js +191 -0
- package/src/fingerprint.js +475 -0
- package/src/index.js +342 -0
- package/src/integrations/react-native.js +459 -0
- package/src/proof/challenge.js +249 -0
- package/src/proof/engagementToken.js +394 -0
- package/src/terminal.js +263 -0
- package/src/update-notifier.js +264 -0
|
@@ -0,0 +1,502 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @sovereign/pulse — Zero-Latency Second-Stage Coherence Analysis
|
|
3
|
+
*
|
|
4
|
+
* Runs entirely on data already collected by the entropy probe, bio
|
|
5
|
+
* collector, canvas fingerprinter, and audio analyser.
|
|
6
|
+
* Adds approximately 1–3 ms of CPU time. Zero WASM, zero network.
|
|
7
|
+
*
|
|
8
|
+
* Architecture:
|
|
9
|
+
* Stage 1 — classifyJitter() → rawScore [0, 1]
|
|
10
|
+
* Stage 2 — runHeuristicEngine() → netAdjustment (physics coherence)
|
|
11
|
+
* Stage 3 — runCoherenceAnalysis() → THIS MODULE
|
|
12
|
+
* ↳ small score refinement [-0.15, +0.18]
|
|
13
|
+
* ↳ dynamic threshold [0.55, 0.67]
|
|
14
|
+
* ↳ hard override 'vm' | null
|
|
15
|
+
*
|
|
16
|
+
* Why a third stage?
|
|
17
|
+
* Stage 1 checks individual metrics in isolation.
|
|
18
|
+
* Stage 2 checks pairwise relationships between metrics.
|
|
19
|
+
* Stage 3 checks STRUCTURAL properties of the entire time-series and
|
|
20
|
+
* signal evolution that require the full dataset to evaluate — and that
|
|
21
|
+
* a sophisticated attacker cannot spoof without also spoofing every
|
|
22
|
+
* other correlated signal simultaneously.
|
|
23
|
+
*
|
|
24
|
+
* The six checks cover orthogonal signal dimensions so they are hard to
|
|
25
|
+
* spoof together even if one is individually defeated:
|
|
26
|
+
*
|
|
27
|
+
* 1. Timing distinctness — frequency domain (quantization density)
|
|
28
|
+
* 2. AC decay shape — temporal domain (Brownian vs harmonic)
|
|
29
|
+
* 3. Chunk CV stability — stationarity axis (thermal non-stationarity)
|
|
30
|
+
* 4. Level-dependent noise — noise model axis (multiplicative vs additive)
|
|
31
|
+
* 5. Batch convergence — measurement stability (adaptive mode)
|
|
32
|
+
* 6. Phase trajectory — EJR monotonicity (thermal sequence integrity)
|
|
33
|
+
*
|
|
34
|
+
* Dynamic threshold:
|
|
35
|
+
* The evidence weight reflects how much data was actually collected.
|
|
36
|
+
* An early-exit proof with 50 iterations and no bio activity has far less
|
|
37
|
+
* support than a 200-iteration proof with bio, audio, and phased data.
|
|
38
|
+
* The threshold rises automatically as evidence decreases:
|
|
39
|
+
* Full evidence → threshold 0.55 (standard)
|
|
40
|
+
* Minimal proof → threshold 0.67 (conservative gate)
|
|
41
|
+
* This prevents low-evidence proofs from passing the same bar as full ones.
|
|
42
|
+
*/
|
|
43
|
+
|
|
44
|
+
// ---------------------------------------------------------------------------
|
|
45
|
+
// runCoherenceAnalysis
|
|
46
|
+
// ---------------------------------------------------------------------------
|
|
47
|
+
|
|
48
|
+
/**
|
|
49
|
+
* @param {object} p
|
|
50
|
+
* @param {number[]} p.timings — raw timing array (already collected)
|
|
51
|
+
* @param {object} p.jitter — JitterAnalysis from classifyJitter()
|
|
52
|
+
* @param {object|null} p.phases — phased entropy result (optional)
|
|
53
|
+
* @param {object[]|null} p.batches — adaptive batch snapshots (optional)
|
|
54
|
+
* @param {object} p.bio — bio snapshot
|
|
55
|
+
* @param {object} p.canvas — canvas fingerprint
|
|
56
|
+
* @param {object} p.audio — audio jitter result
|
|
57
|
+
* @returns {CoherenceReport}
|
|
58
|
+
*/
|
|
59
|
+
export function runCoherenceAnalysis({ timings, jitter, phases, batches, bio, canvas, audio }) {
|
|
60
|
+
if (!timings || timings.length < 10) {
|
|
61
|
+
// Insufficient data — return a conservative threshold and no adjustments.
|
|
62
|
+
return _empty(0.64);
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
const checks = []; // anomalies found (each may carry a penalty)
|
|
66
|
+
const bonuses = []; // physical properties confirmed (each carries a bonus)
|
|
67
|
+
let penalty = 0;
|
|
68
|
+
let bonus = 0;
|
|
69
|
+
let hardOverride = null; // 'vm' | null
|
|
70
|
+
|
|
71
|
+
const n = timings.length;
|
|
72
|
+
const ac = jitter.autocorrelations ?? {};
|
|
73
|
+
|
|
74
|
+
// ── Check 1: Timing Distinctness Ratio ──────────────────────────────────────
|
|
75
|
+
// VM quantized timers repeat the same integer-millisecond values.
|
|
76
|
+
// Real silicon at sub-ms resolution produces mostly unique values.
|
|
77
|
+
//
|
|
78
|
+
// Bin width: 0.2 ms (matches detectQuantizationEntropy for consistency).
|
|
79
|
+
// Normalized by sample count → iteration-count-independent.
|
|
80
|
+
//
|
|
81
|
+
// distinctRatio > 0.65 → sub-ms resolution confirmed → physical bonus
|
|
82
|
+
// distinctRatio < 0.30 → heavy quantization → VM penalty
|
|
83
|
+
// distinctRatio < 0.45 at n ≥ 100 → mild VM penalty
|
|
84
|
+
{
|
|
85
|
+
const bins = new Set(timings.map(t => Math.round(t / 0.2)));
|
|
86
|
+
const distinctRatio = bins.size / n;
|
|
87
|
+
|
|
88
|
+
if (n >= 50) {
|
|
89
|
+
if (distinctRatio > 0.65) {
|
|
90
|
+
bonuses.push({
|
|
91
|
+
id: 'HIGH_TIMING_DISTINCTNESS',
|
|
92
|
+
label: 'Timer produces mostly unique values — sub-ms resolution confirmed',
|
|
93
|
+
detail: `ratio=${distinctRatio.toFixed(3)} (${bins.size}/${n} distinct 0.2ms bins)`,
|
|
94
|
+
value: 0.06,
|
|
95
|
+
});
|
|
96
|
+
bonus += 0.06;
|
|
97
|
+
|
|
98
|
+
} else if (distinctRatio < 0.30) {
|
|
99
|
+
// Severely quantized — VM with integer-ms timer emulation
|
|
100
|
+
checks.push({
|
|
101
|
+
id: 'LOW_TIMING_DISTINCTNESS',
|
|
102
|
+
label: 'Heavy timer quantization — integer-ms VM timer suspected',
|
|
103
|
+
detail: `ratio=${distinctRatio.toFixed(3)} (only ${bins.size}/${n} distinct 0.2ms bins)`,
|
|
104
|
+
severity: 'high',
|
|
105
|
+
penalty: 0.12,
|
|
106
|
+
});
|
|
107
|
+
penalty += 0.12;
|
|
108
|
+
|
|
109
|
+
} else if (distinctRatio < 0.45 && n >= 100) {
|
|
110
|
+
// At 100+ iterations we expect more spread; below 0.45 is suspicious
|
|
111
|
+
checks.push({
|
|
112
|
+
id: 'BORDERLINE_TIMING_DISTINCTNESS',
|
|
113
|
+
label: 'Below-expected timer resolution — coarse-grained timer suspected',
|
|
114
|
+
detail: `ratio=${distinctRatio.toFixed(3)} at n=${n}`,
|
|
115
|
+
severity: 'medium',
|
|
116
|
+
penalty: 0.05,
|
|
117
|
+
});
|
|
118
|
+
penalty += 0.05;
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
// ── Check 2: Autocorrelation Decay Shape ────────────────────────────────────
|
|
124
|
+
// Genuine Brownian noise decays monotonically: lag1 > lag2 > lag5 > lag10 > …
|
|
125
|
+
// VM scheduler rhythms create harmonic revivals: lag25 or lag50 elevated
|
|
126
|
+
// above lag10 because steal-time bursts recur at the scheduler quantum period.
|
|
127
|
+
//
|
|
128
|
+
// This is structurally orthogonal to the Picket Fence detector (stage 2),
|
|
129
|
+
// which checks absolute magnitude — this checks the SHAPE of the decay curve.
|
|
130
|
+
{
|
|
131
|
+
const l1 = Math.abs(ac.lag1 ?? 0);
|
|
132
|
+
const l2 = Math.abs(ac.lag2 ?? 0);
|
|
133
|
+
const l3 = Math.abs(ac.lag3 ?? 0);
|
|
134
|
+
const l5 = Math.abs(ac.lag5 ?? 0);
|
|
135
|
+
const l10 = Math.abs(ac.lag10 ?? 0);
|
|
136
|
+
const l25 = Math.abs(ac.lag25 ?? 0);
|
|
137
|
+
const l50 = Math.abs(ac.lag50 ?? 0);
|
|
138
|
+
|
|
139
|
+
// Strict Brownian decay: each successive lag is no higher than the previous
|
|
140
|
+
// (+0.03 tolerance for estimation noise)
|
|
141
|
+
const isBrownianDecay =
|
|
142
|
+
l1 < 0.20 &&
|
|
143
|
+
l2 <= l1 + 0.03 &&
|
|
144
|
+
l5 <= l2 + 0.03 &&
|
|
145
|
+
l10 <= l5 + 0.03 &&
|
|
146
|
+
l25 <= l10 + 0.05 &&
|
|
147
|
+
l50 <= l10 + 0.05;
|
|
148
|
+
|
|
149
|
+
// Harmonic revival: a long lag significantly exceeds medium lags
|
|
150
|
+
// (scheduler quantum footprint)
|
|
151
|
+
const revival25 = l25 > l5 + 0.12 && l25 > 0.18;
|
|
152
|
+
const revival50 = l50 > l5 + 0.12 && l50 > 0.18;
|
|
153
|
+
|
|
154
|
+
if (isBrownianDecay && l1 < 0.15) {
|
|
155
|
+
bonuses.push({
|
|
156
|
+
id: 'BROWNIAN_DECAY_SHAPE',
|
|
157
|
+
label: 'AC decays monotonically at all measured lags — genuine Brownian noise structure',
|
|
158
|
+
detail: `lag1=${l1.toFixed(3)} lag3=${l3.toFixed(3)} lag5=${l5.toFixed(3)} lag10=${l10.toFixed(3)} lag50=${l50.toFixed(3)}`,
|
|
159
|
+
value: 0.09,
|
|
160
|
+
});
|
|
161
|
+
bonus += 0.09;
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
if (revival25 || revival50) {
|
|
165
|
+
const peakLag = revival25 ? 25 : 50;
|
|
166
|
+
const peakVal = revival25 ? l25 : l50;
|
|
167
|
+
checks.push({
|
|
168
|
+
id: 'HARMONIC_AUTOCORR_REVIVAL',
|
|
169
|
+
label: `Long-lag AC revival at lag ${peakLag} — VM scheduler harmonic footprint`,
|
|
170
|
+
detail: `lag5=${l5.toFixed(3)} lag${peakLag}=${peakVal.toFixed(3)} Δ=${(peakVal - l5).toFixed(3)}`,
|
|
171
|
+
severity: 'high',
|
|
172
|
+
penalty: 0.10,
|
|
173
|
+
});
|
|
174
|
+
penalty += 0.10;
|
|
175
|
+
}
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
// ── Check 3: Chunk CV Stability (temporal stationarity test) ─────────────────
|
|
179
|
+
// Split the timing series into 4 equal windows and compute CV per window.
|
|
180
|
+
// Real hardware: CV varies across chunks — CPU temperature changes, workload
|
|
181
|
+
// varies, OS scheduling fluctuates — making the process non-stationary.
|
|
182
|
+
// VM hypervisor: CV is nearly identical in every chunk because the hypervisor's
|
|
183
|
+
// scheduling behaviour is constant — a stationary process.
|
|
184
|
+
//
|
|
185
|
+
// Metric: CV of the 4 chunk CVs (CV-of-CVs).
|
|
186
|
+
// > 0.15 → non-stationary noise → physical bonus
|
|
187
|
+
// < 0.06 → suspiciously constant → VM penalty
|
|
188
|
+
if (n >= 40) {
|
|
189
|
+
const chunkSize = Math.floor(n / 4);
|
|
190
|
+
const chunkCVs = [];
|
|
191
|
+
|
|
192
|
+
for (let c = 0; c < 4; c++) {
|
|
193
|
+
const chunk = timings.slice(c * chunkSize, (c + 1) * chunkSize);
|
|
194
|
+
const m = chunk.reduce((a, b) => a + b, 0) / chunk.length;
|
|
195
|
+
const s = Math.sqrt(chunk.reduce((acc, v) => acc + (v - m) ** 2, 0) / chunk.length);
|
|
196
|
+
if (m > 0) chunkCVs.push(s / m);
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
if (chunkCVs.length === 4) {
|
|
200
|
+
const cvMean = chunkCVs.reduce((a, b) => a + b, 0) / 4;
|
|
201
|
+
const cvStd = Math.sqrt(chunkCVs.reduce((s, v) => s + (v - cvMean) ** 2, 0) / 4);
|
|
202
|
+
const cvOfCVs = cvMean > 1e-9 ? cvStd / cvMean : 0;
|
|
203
|
+
|
|
204
|
+
if (cvOfCVs > 0.15) {
|
|
205
|
+
bonuses.push({
|
|
206
|
+
id: 'TEMPORAL_NON_STATIONARITY',
|
|
207
|
+
label: 'Noise level varies across time windows — thermal non-stationarity confirmed',
|
|
208
|
+
detail: `CV-of-CVs=${cvOfCVs.toFixed(3)} windows=[${chunkCVs.map(v => v.toFixed(3)).join(', ')}]`,
|
|
209
|
+
value: 0.07,
|
|
210
|
+
});
|
|
211
|
+
bonus += 0.07;
|
|
212
|
+
|
|
213
|
+
} else if (cvOfCVs < 0.06 && cvMean > 0.01) {
|
|
214
|
+
checks.push({
|
|
215
|
+
id: 'STATIONARY_NOISE_PROCESS',
|
|
216
|
+
label: 'Noise level constant across all time windows — hypervisor stationarity suspected',
|
|
217
|
+
detail: `CV-of-CVs=${cvOfCVs.toFixed(3)} windows=[${chunkCVs.map(v => v.toFixed(3)).join(', ')}]`,
|
|
218
|
+
severity: 'high',
|
|
219
|
+
penalty: 0.09,
|
|
220
|
+
});
|
|
221
|
+
penalty += 0.09;
|
|
222
|
+
}
|
|
223
|
+
}
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
// ── Check 4: Level-Dependent Volatility (noise model test) ──────────────────
|
|
227
|
+
// Thermal noise is multiplicative: the physical process that adds jitter
|
|
228
|
+
// (electron thermal motion, gate capacitance variation) scales with the
|
|
229
|
+
// operating conditions that also drive longer execution times.
|
|
230
|
+
// Consequence: larger timing values tend to have more incremental variance.
|
|
231
|
+
// This produces a positive Pearson correlation between:
|
|
232
|
+
// — timing[i] (level — how long that iteration took)
|
|
233
|
+
// — |timing[i+1]-timing[i]| (volatility — how much it changed)
|
|
234
|
+
//
|
|
235
|
+
// VM hypervisor noise is additive: a constant scheduling jitter is applied
|
|
236
|
+
// regardless of the iteration's timing level → near-zero correlation.
|
|
237
|
+
//
|
|
238
|
+
// r > 0.15 → multiplicative noise → physical bonus
|
|
239
|
+
// r < 0.04 at n ≥ 80 → additive noise → VM penalty
|
|
240
|
+
if (n >= 30) {
|
|
241
|
+
const levels = timings.slice(0, n - 1);
|
|
242
|
+
const deltas = timings.slice(1).map((v, i) => Math.abs(v - timings[i]));
|
|
243
|
+
const lMean = levels.reduce((a, b) => a + b, 0) / levels.length;
|
|
244
|
+
const dMean = deltas.reduce((a, b) => a + b, 0) / deltas.length;
|
|
245
|
+
|
|
246
|
+
let cov = 0, lVar = 0, dVar = 0;
|
|
247
|
+
for (let i = 0; i < levels.length; i++) {
|
|
248
|
+
const ld = levels[i] - lMean;
|
|
249
|
+
const dd = deltas[i] - dMean;
|
|
250
|
+
cov += ld * dd;
|
|
251
|
+
lVar += ld * ld;
|
|
252
|
+
dVar += dd * dd;
|
|
253
|
+
}
|
|
254
|
+
const denom = Math.sqrt(lVar * dVar);
|
|
255
|
+
const levelVolCorr = denom < 1e-14 ? 0 : cov / denom;
|
|
256
|
+
|
|
257
|
+
if (levelVolCorr > 0.15) {
|
|
258
|
+
bonuses.push({
|
|
259
|
+
id: 'MULTIPLICATIVE_NOISE_MODEL',
|
|
260
|
+
label: 'Timing variance scales with level — multiplicative thermal noise confirmed',
|
|
261
|
+
detail: `level-volatility r=${levelVolCorr.toFixed(3)} (expected >0.15 for real silicon)`,
|
|
262
|
+
value: 0.07,
|
|
263
|
+
});
|
|
264
|
+
bonus += 0.07;
|
|
265
|
+
|
|
266
|
+
} else if (levelVolCorr < 0.04 && n >= 80) {
|
|
267
|
+
// Enough samples to trust the estimate; near-zero = additive hypervisor noise
|
|
268
|
+
checks.push({
|
|
269
|
+
id: 'ADDITIVE_NOISE_MODEL',
|
|
270
|
+
label: 'Timing variance independent of level — additive hypervisor noise suspected',
|
|
271
|
+
detail: `level-volatility r=${levelVolCorr.toFixed(3)} (expected >0.15 for real silicon)`,
|
|
272
|
+
severity: 'medium',
|
|
273
|
+
penalty: 0.07,
|
|
274
|
+
});
|
|
275
|
+
penalty += 0.07;
|
|
276
|
+
}
|
|
277
|
+
}
|
|
278
|
+
|
|
279
|
+
// ── Check 5: Batch Convergence Variance (adaptive mode only) ─────────────────
|
|
280
|
+
// In adaptive mode each batch of 25 iterations produces a vmConf estimate.
|
|
281
|
+
// Real hardware: these estimates wander batch-to-batch because the underlying
|
|
282
|
+
// physical source is genuinely stochastic.
|
|
283
|
+
// VM hypervisor: estimates lock in immediately — deterministic scheduling means
|
|
284
|
+
// each batch produces essentially the same picture.
|
|
285
|
+
//
|
|
286
|
+
// Most diagnostic in the ambiguous zone (vmConf 0.25–0.70) where stability
|
|
287
|
+
// is suspicious. A clearly obvious VM (vmConf 0.90 every batch) is expected
|
|
288
|
+
// to be stable. A borderline device (vmConf 0.45 across 6 identical batches)
|
|
289
|
+
// is exhibiting VM-like stability despite claiming ambiguity.
|
|
290
|
+
//
|
|
291
|
+
// Uses only batches collected after iteration 75 to avoid early-sample noise.
|
|
292
|
+
if (batches && batches.length >= 4) {
|
|
293
|
+
const stableBatches = batches.filter(b => b.iterations >= 75);
|
|
294
|
+
|
|
295
|
+
if (stableBatches.length >= 3) {
|
|
296
|
+
const vmConfs = stableBatches.map(b => b.vmConf);
|
|
297
|
+
const hwConfs = stableBatches.map(b => b.hwConf);
|
|
298
|
+
const vmMean = vmConfs.reduce((a, b) => a + b, 0) / vmConfs.length;
|
|
299
|
+
const hwMean = hwConfs.reduce((a, b) => a + b, 0) / hwConfs.length;
|
|
300
|
+
const vmStd = Math.sqrt(
|
|
301
|
+
vmConfs.reduce((s, v) => s + (v - vmMean) ** 2, 0) / vmConfs.length
|
|
302
|
+
);
|
|
303
|
+
|
|
304
|
+
// Only meaningful in the ambiguous zone
|
|
305
|
+
const isAmbiguous = vmMean > 0.25 && vmMean < 0.70 && hwMean < 0.55;
|
|
306
|
+
|
|
307
|
+
if (isAmbiguous) {
|
|
308
|
+
if (vmStd > 0.06) {
|
|
309
|
+
bonuses.push({
|
|
310
|
+
id: 'SIGNAL_FLUCTUATES_STOCHASTICALLY',
|
|
311
|
+
label: 'Batch-by-batch signal variance confirms genuine stochastic noise source',
|
|
312
|
+
detail: `vmConf σ=${vmStd.toFixed(3)} across ${stableBatches.length} stable batches (μ=${vmMean.toFixed(3)})`,
|
|
313
|
+
value: 0.05,
|
|
314
|
+
});
|
|
315
|
+
bonus += 0.05;
|
|
316
|
+
|
|
317
|
+
} else if (vmStd < 0.025 && stableBatches.length >= 4) {
|
|
318
|
+
checks.push({
|
|
319
|
+
id: 'SIGNAL_DETERMINISTICALLY_STABLE',
|
|
320
|
+
label: 'Signal locked-in immediately — deterministic hypervisor suspected',
|
|
321
|
+
detail: `vmConf σ=${vmStd.toFixed(3)} across ${stableBatches.length} stable batches (μ=${vmMean.toFixed(3)})`,
|
|
322
|
+
severity: 'medium',
|
|
323
|
+
penalty: 0.06,
|
|
324
|
+
});
|
|
325
|
+
penalty += 0.06;
|
|
326
|
+
}
|
|
327
|
+
}
|
|
328
|
+
}
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
// ── Check 6: Phase Entropy Trajectory ────────────────────────────────────────
|
|
332
|
+
// The EJR (hot_QE / cold_QE) captures the endpoint ratio, but misses the
|
|
333
|
+
// intermediate trajectory. We additionally verify monotonic growth:
|
|
334
|
+
// cold_QE < load_QE < hot_QE
|
|
335
|
+
//
|
|
336
|
+
// If all three phases are available, monotonic growth is a strong bonus.
|
|
337
|
+
// Non-monotonic trajectory (entropy dropped then recovered) is suspicious.
|
|
338
|
+
//
|
|
339
|
+
// HARD OVERRIDE: if EJR ≥ 1.08 (claims entropy grew from cold to hot)
|
|
340
|
+
// but cold_QE ≥ hot_QE (entropy provably didn't grow), the proof is
|
|
341
|
+
// mathematically self-contradictory → forgery attempt.
|
|
342
|
+
if (phases) {
|
|
343
|
+
const coldQE = phases.cold?.qe ?? null;
|
|
344
|
+
const loadQE = phases.load?.qe ?? null;
|
|
345
|
+
const hotQE = phases.hot?.qe ?? null;
|
|
346
|
+
const ejr = phases.entropyJitterRatio ?? null;
|
|
347
|
+
|
|
348
|
+
// Mathematical contradiction: EJR = hot_QE / cold_QE, so EJR ≥ 1.08
|
|
349
|
+
// implies hot_QE ≥ 1.08 × cold_QE > cold_QE. Violation = tampered proof.
|
|
350
|
+
if (ejr !== null && ejr >= 1.08 && coldQE !== null && hotQE !== null) {
|
|
351
|
+
if (coldQE >= hotQE) {
|
|
352
|
+
hardOverride = 'vm';
|
|
353
|
+
checks.push({
|
|
354
|
+
id: 'EJR_QE_CONTRADICTION',
|
|
355
|
+
label: 'HARD OVERRIDE: EJR claims entropy growth but cold_QE ≥ hot_QE — mathematically impossible',
|
|
356
|
+
detail: `ejr=${ejr.toFixed(4)} cold_QE=${coldQE.toFixed(3)} hot_QE=${hotQE.toFixed(3)} (ejr=hot/cold requires hot>cold)`,
|
|
357
|
+
severity: 'critical',
|
|
358
|
+
penalty: 0.60, // overwhelms any bonus
|
|
359
|
+
});
|
|
360
|
+
penalty += 0.60;
|
|
361
|
+
}
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
// Monotonic trajectory check (requires all three phases)
|
|
365
|
+
if (!hardOverride && coldQE !== null && loadQE !== null && hotQE !== null) {
|
|
366
|
+
if (coldQE < loadQE && loadQE < hotQE) {
|
|
367
|
+
bonuses.push({
|
|
368
|
+
id: 'MONOTONIC_ENTROPY_TRAJECTORY',
|
|
369
|
+
label: 'QE increased continuously cold→load→hot — unbroken thermal feedback confirmed',
|
|
370
|
+
detail: `${coldQE.toFixed(3)} → ${loadQE.toFixed(3)} → ${hotQE.toFixed(3)}`,
|
|
371
|
+
value: 0.09,
|
|
372
|
+
});
|
|
373
|
+
bonus += 0.09;
|
|
374
|
+
|
|
375
|
+
} else if (coldQE >= loadQE || loadQE >= hotQE) {
|
|
376
|
+
// Entropy stalled or reversed mid-run — unusual for real silicon
|
|
377
|
+
checks.push({
|
|
378
|
+
id: 'NON_MONOTONIC_ENTROPY_TRAJECTORY',
|
|
379
|
+
label: 'Entropy did not increase monotonically across load phases',
|
|
380
|
+
detail: `cold=${coldQE.toFixed(3)} load=${loadQE.toFixed(3)} hot=${hotQE.toFixed(3)}`,
|
|
381
|
+
severity: 'medium',
|
|
382
|
+
penalty: 0.06,
|
|
383
|
+
});
|
|
384
|
+
penalty += 0.06;
|
|
385
|
+
}
|
|
386
|
+
}
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
// ── Dynamic threshold ─────────────────────────────────────────────────────────
|
|
390
|
+
// A proof built from more evidence earns a more permissive (lower) passing bar.
|
|
391
|
+
// Weights:
|
|
392
|
+
// iterations (0→200): up to 0.65 of the evidence score
|
|
393
|
+
// phased collection: +0.15 (gold standard of thermal measurement)
|
|
394
|
+
// bio activity: +0.10 (human presence confirmed)
|
|
395
|
+
// audio available: +0.05 (additional timing channel)
|
|
396
|
+
// canvas available: +0.05 (hardware renderer identified)
|
|
397
|
+
//
|
|
398
|
+
// dynamicThreshold = 0.55 + (1 − evidenceWeight) × 0.12
|
|
399
|
+
// Full evidence → 0.55 (standard gate)
|
|
400
|
+
// Minimal proof → 0.67 (tightened gate for low-evidence submissions)
|
|
401
|
+
const iterFraction = Math.min(1.0, n / 200);
|
|
402
|
+
const phasedBonus = phases ? 0.15 : 0.0;
|
|
403
|
+
const bioBonus = bio?.hasActivity ? 0.10 : 0.0;
|
|
404
|
+
const audioBonus = audio?.available ? 0.05 : 0.0;
|
|
405
|
+
const canvasBonus = canvas?.available ? 0.05 : 0.0;
|
|
406
|
+
|
|
407
|
+
const evidenceWeight = Math.min(1.0,
|
|
408
|
+
iterFraction * 0.65 + phasedBonus + bioBonus + audioBonus + canvasBonus
|
|
409
|
+
);
|
|
410
|
+
|
|
411
|
+
const dynamicThreshold = +(0.55 + (1 - evidenceWeight) * 0.12).toFixed(4);
|
|
412
|
+
|
|
413
|
+
// ── Stage-3 caps ─────────────────────────────────────────────────────────────
|
|
414
|
+
// Stage 3 is a REFINEMENT, not the primary classifier. The caps are smaller
|
|
415
|
+
// than stage 2 to prevent triple-compounding across all three stages on
|
|
416
|
+
// legitimate hardware with multiple marginal-but-not-damning signals.
|
|
417
|
+
const totalPenalty = Math.min(0.15, penalty); // hard floor: stage 3 can't reject alone
|
|
418
|
+
const totalBonus = Math.min(0.18, bonus);
|
|
419
|
+
|
|
420
|
+
return {
|
|
421
|
+
penalty: totalPenalty,
|
|
422
|
+
bonus: totalBonus,
|
|
423
|
+
netAdjustment: +(totalBonus - totalPenalty).toFixed(4),
|
|
424
|
+
checks,
|
|
425
|
+
bonuses,
|
|
426
|
+
hardOverride, // 'vm' | null
|
|
427
|
+
dynamicThreshold, // [0.55, 0.67]
|
|
428
|
+
evidenceWeight: +evidenceWeight.toFixed(4),
|
|
429
|
+
coherenceFlags: checks.map(c => c.id),
|
|
430
|
+
physicalFlags: bonuses.map(b => b.id),
|
|
431
|
+
};
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
// ---------------------------------------------------------------------------
|
|
435
|
+
// computeServerDynamicThreshold
|
|
436
|
+
// ---------------------------------------------------------------------------
|
|
437
|
+
|
|
438
|
+
/**
|
|
439
|
+
* Server-side recomputation of the dynamic threshold.
|
|
440
|
+
* The server NEVER trusts the client's dynamicThreshold value; it recomputes
|
|
441
|
+
* from known payload fields.
|
|
442
|
+
*
|
|
443
|
+
* @param {object} payload - validated ProofPayload
|
|
444
|
+
* @returns {number} - minimum passing score for this proof [0.50, 0.62]
|
|
445
|
+
*/
|
|
446
|
+
export function computeServerDynamicThreshold(payload) {
|
|
447
|
+
const entropy = payload?.signals?.entropy;
|
|
448
|
+
const bio = payload?.signals?.bio;
|
|
449
|
+
const audio = payload?.signals?.audio;
|
|
450
|
+
const canvas = payload?.signals?.canvas;
|
|
451
|
+
|
|
452
|
+
const n = entropy?.iterations ?? 0;
|
|
453
|
+
const hasPhases = payload?.heuristic?.entropyJitterRatio != null;
|
|
454
|
+
const hasBio = bio?.hasActivity === true;
|
|
455
|
+
const hasAudio = audio?.available === true;
|
|
456
|
+
const hasCanvas = canvas?.available === true;
|
|
457
|
+
|
|
458
|
+
const iterFraction = Math.min(1.0, n / 200);
|
|
459
|
+
const evidenceWeight = Math.min(1.0,
|
|
460
|
+
iterFraction * 0.65 +
|
|
461
|
+
(hasPhases ? 0.15 : 0) +
|
|
462
|
+
(hasBio ? 0.10 : 0) +
|
|
463
|
+
(hasAudio ? 0.05 : 0) +
|
|
464
|
+
(hasCanvas ? 0.05 : 0)
|
|
465
|
+
);
|
|
466
|
+
|
|
467
|
+
// Server threshold: [0.50, 0.62]
|
|
468
|
+
// Slightly more lenient than client [0.55, 0.67] because the server already
|
|
469
|
+
// applies minJitterScore as an independent check. The dynamic component
|
|
470
|
+
// adds an ADDITIONAL evidence-proportional tightening on top.
|
|
471
|
+
return +(0.50 + (1 - evidenceWeight) * 0.12).toFixed(4);
|
|
472
|
+
}
|
|
473
|
+
|
|
474
|
+
// ---------------------------------------------------------------------------
|
|
475
|
+
// Internal helpers
|
|
476
|
+
// ---------------------------------------------------------------------------
|
|
477
|
+
|
|
478
|
+
function _empty(threshold) {
|
|
479
|
+
return {
|
|
480
|
+
penalty: 0, bonus: 0, netAdjustment: 0,
|
|
481
|
+
checks: [], bonuses: [],
|
|
482
|
+
hardOverride: null,
|
|
483
|
+
dynamicThreshold: threshold,
|
|
484
|
+
evidenceWeight: 0,
|
|
485
|
+
coherenceFlags: [],
|
|
486
|
+
physicalFlags: [],
|
|
487
|
+
};
|
|
488
|
+
}
|
|
489
|
+
|
|
490
|
+
/**
|
|
491
|
+
* @typedef {object} CoherenceReport
|
|
492
|
+
* @property {number} penalty - total score penalty [0, 0.15]
|
|
493
|
+
* @property {number} bonus - total score bonus [0, 0.18]
|
|
494
|
+
* @property {number} netAdjustment - bonus − penalty [-0.15, +0.18]
|
|
495
|
+
* @property {object[]} checks - anomalies found (with penalty values)
|
|
496
|
+
* @property {object[]} bonuses - physical properties confirmed
|
|
497
|
+
* @property {'vm'|null} hardOverride - overrides score when set
|
|
498
|
+
* @property {number} dynamicThreshold - computed passing threshold [0.55, 0.67]
|
|
499
|
+
* @property {number} evidenceWeight - how much evidence was collected [0, 1]
|
|
500
|
+
* @property {string[]} coherenceFlags - check IDs for logging
|
|
501
|
+
* @property {string[]} physicalFlags - bonus IDs for logging
|
|
502
|
+
*/
|