@svrnsec/pulse 0.7.0 → 0.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -21
- package/README.md +883 -782
- package/SECURITY.md +86 -86
- package/bin/svrnsec-pulse.js +7 -7
- package/dist/{pulse.cjs.js → pulse.cjs} +6378 -6419
- package/dist/pulse.cjs.map +1 -0
- package/dist/pulse.esm.js +6379 -6420
- package/dist/pulse.esm.js.map +1 -1
- package/index.d.ts +895 -846
- package/package.json +185 -184
- package/pkg/pulse_core.js +174 -173
- package/src/analysis/audio.js +213 -213
- package/src/analysis/authenticityAudit.js +408 -393
- package/src/analysis/coherence.js +502 -502
- package/src/analysis/coordinatedBehavior.js +825 -804
- package/src/analysis/heuristic.js +428 -428
- package/src/analysis/jitter.js +446 -446
- package/src/analysis/llm.js +473 -472
- package/src/analysis/populationEntropy.js +404 -403
- package/src/analysis/provider.js +248 -248
- package/src/analysis/refraction.js +392 -391
- package/src/analysis/trustScore.js +356 -356
- package/src/cli/args.js +36 -36
- package/src/cli/commands/scan.js +192 -192
- package/src/cli/runner.js +157 -157
- package/src/collector/adaptive.js +200 -200
- package/src/collector/bio.js +297 -287
- package/src/collector/canvas.js +247 -239
- package/src/collector/dram.js +203 -203
- package/src/collector/enf.js +311 -311
- package/src/collector/entropy.js +195 -195
- package/src/collector/gpu.js +248 -245
- package/src/collector/idleAttestation.js +480 -480
- package/src/collector/sabTimer.js +189 -191
- package/src/fingerprint.js +475 -475
- package/src/index.js +342 -342
- package/src/integrations/react-native.js +462 -459
- package/src/integrations/react.js +184 -185
- package/src/middleware/express.js +155 -155
- package/src/middleware/next.js +174 -175
- package/src/proof/challenge.js +249 -249
- package/src/proof/engagementToken.js +426 -394
- package/src/proof/fingerprint.js +268 -268
- package/src/proof/validator.js +82 -142
- package/src/registry/serializer.js +349 -349
- package/src/terminal.js +263 -263
- package/src/update-notifier.js +259 -264
- package/dist/pulse.cjs.js.map +0 -1
package/src/collector/dram.js
CHANGED
|
@@ -1,203 +1,203 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* @svrnsec/pulse — DRAM Refresh Cycle Detector
|
|
3
|
-
*
|
|
4
|
-
* DDR4 DRAM refreshes every 7.8 ms (tREFI per JEDEC JESD79-4). During a
|
|
5
|
-
* refresh, the memory controller stalls all access requests for ~350 ns.
|
|
6
|
-
* In a tight sequential memory access loop this appears as a periodic
|
|
7
|
-
* slowdown — detectable as a ~128Hz peak in the autocorrelation of access
|
|
8
|
-
* timings.
|
|
9
|
-
*
|
|
10
|
-
* Virtual machines do not have physical DRAM. The hypervisor's memory
|
|
11
|
-
* subsystem does not reproduce the refresh cycle because:
|
|
12
|
-
* 1. The guest never touches real DRAM directly — there is always a
|
|
13
|
-
* hypervisor-controlled indirection layer.
|
|
14
|
-
* 2. EPT/NPT (Extended/Nested Page Tables) absorb the timing.
|
|
15
|
-
* 3. The hypervisor's memory balloon driver further smooths access latency.
|
|
16
|
-
*
|
|
17
|
-
* What we detect
|
|
18
|
-
* ──────────────
|
|
19
|
-
* refreshPeriodMs estimated DRAM refresh period (should be ~7.8ms on real DDR4)
|
|
20
|
-
* refreshPresent true if the ~7.8ms periodicity is statistically significant
|
|
21
|
-
* peakLag autocorrelation lag with the highest power (units: sample index)
|
|
22
|
-
* peakPower autocorrelation power at peakLag (0–1)
|
|
23
|
-
* verdict 'dram' | 'virtual' | 'ambiguous'
|
|
24
|
-
*
|
|
25
|
-
* Calibration
|
|
26
|
-
* ───────────
|
|
27
|
-
* We allocate a buffer large enough to exceed all CPU caches (typically
|
|
28
|
-
* L3 = 8–32 MB on consumer parts). Sequential reads then go to DRAM, not
|
|
29
|
-
* cache. The refresh stall is only visible when we're actually hitting DRAM —
|
|
30
|
-
* a cache-resident access loop shows no refresh signal.
|
|
31
|
-
*
|
|
32
|
-
* Buffer size: 64 MB — comfortably above L3 on all tested platforms.
|
|
33
|
-
* Sampling interval: ~1 ms per iteration (chosen to resolve 7.8ms at ≥8 pts).
|
|
34
|
-
* Total probe time: ~400 ms — well within the fingerprint collection window.
|
|
35
|
-
*/
|
|
36
|
-
|
|
37
|
-
const DRAM_REFRESH_MS = 7.8; // JEDEC DDR4 nominal
|
|
38
|
-
const DRAM_REFRESH_SLACK = 1.5; // ±1.5 ms acceptable range for real hardware
|
|
39
|
-
const BUFFER_MB = 64; // must exceed L3 cache
|
|
40
|
-
const PROBE_ITERATIONS = 400; // ~400 ms total
|
|
41
|
-
|
|
42
|
-
/* ─── collectDramTimings ─────────────────────────────────────────────────── */
|
|
43
|
-
|
|
44
|
-
/**
|
|
45
|
-
* @param {object} [opts]
|
|
46
|
-
* @param {number} [opts.iterations=400]
|
|
47
|
-
* @param {number} [opts.bufferMb=64]
|
|
48
|
-
* @returns {{ timings: number[], refreshPeriodMs: number|null,
|
|
49
|
-
* refreshPresent: boolean, peakLag: number, peakPower: number,
|
|
50
|
-
* verdict: string }}
|
|
51
|
-
*/
|
|
52
|
-
export function collectDramTimings(opts = {}) {
|
|
53
|
-
const {
|
|
54
|
-
iterations = PROBE_ITERATIONS,
|
|
55
|
-
bufferMb = BUFFER_MB,
|
|
56
|
-
} = opts;
|
|
57
|
-
|
|
58
|
-
// ── Allocate cache-busting buffer ────────────────────────────────────────
|
|
59
|
-
const nElements = (bufferMb * 1024 * 1024) / 8; // 64-bit doubles
|
|
60
|
-
let buf;
|
|
61
|
-
|
|
62
|
-
try {
|
|
63
|
-
buf = new Float64Array(nElements);
|
|
64
|
-
// Touch every cache line to ensure OS actually maps the pages
|
|
65
|
-
const stride = 64 / 8; // 64-byte cache lines, 8 bytes per element
|
|
66
|
-
for (let i = 0; i < nElements; i += stride) buf[i] = i;
|
|
67
|
-
} catch {
|
|
68
|
-
// Allocation failure (memory constrained) — cannot run this probe
|
|
69
|
-
return _noSignal('buffer allocation failed');
|
|
70
|
-
}
|
|
71
|
-
|
|
72
|
-
// ── Sequential access loop ───────────────────────────────────────────────
|
|
73
|
-
// Each iteration does a full sequential pass over `passElements` worth of
|
|
74
|
-
// the buffer. Pass size is tuned so each iteration takes ~1 ms wall-clock,
|
|
75
|
-
// giving us enough resolution to see the 7.8 ms refresh cycle.
|
|
76
|
-
//
|
|
77
|
-
// We start with a small pass and auto-calibrate to hit the 1 ms target.
|
|
78
|
-
const passElements = _calibratePassSize(buf);
|
|
79
|
-
|
|
80
|
-
const timings = new Float64Array(iterations);
|
|
81
|
-
let checksum = 0;
|
|
82
|
-
|
|
83
|
-
for (let iter = 0; iter < iterations; iter++) {
|
|
84
|
-
const t0 = performance.now();
|
|
85
|
-
for (let i = 0; i < passElements; i++) checksum += buf[i];
|
|
86
|
-
timings[iter] = performance.now() - t0;
|
|
87
|
-
}
|
|
88
|
-
|
|
89
|
-
// Prevent dead-code elimination
|
|
90
|
-
if (checksum === 0) buf[0] = 1;
|
|
91
|
-
|
|
92
|
-
// ── Autocorrelation over timings ─────────────────────────────────────────
|
|
93
|
-
// The refresh stall appears as elevated autocorrelation at lag ≈ 7.8 / Δt
|
|
94
|
-
// where Δt is the mean iteration time in ms.
|
|
95
|
-
const meanIterMs = _mean(timings);
|
|
96
|
-
if (meanIterMs <= 0) return _noSignal('zero mean iteration time');
|
|
97
|
-
|
|
98
|
-
const targetLag = Math.round(DRAM_REFRESH_MS / meanIterMs);
|
|
99
|
-
const maxLag = Math.min(Math.round(50 / meanIterMs), iterations >> 1);
|
|
100
|
-
|
|
101
|
-
const ac = _autocorr(Array.from(timings), maxLag);
|
|
102
|
-
|
|
103
|
-
// Find the peak in the range [targetLag ± slack]
|
|
104
|
-
const slackLags = Math.round(DRAM_REFRESH_SLACK / meanIterMs);
|
|
105
|
-
const lagLo = Math.max(1, targetLag - slackLags);
|
|
106
|
-
const lagHi = Math.min(maxLag, targetLag + slackLags);
|
|
107
|
-
|
|
108
|
-
let peakPower = -Infinity;
|
|
109
|
-
let peakLag = targetLag;
|
|
110
|
-
for (let l = lagLo; l <= lagHi; l++) {
|
|
111
|
-
if (ac[l - 1] > peakPower) {
|
|
112
|
-
peakPower = ac[l - 1];
|
|
113
|
-
peakLag = l;
|
|
114
|
-
}
|
|
115
|
-
}
|
|
116
|
-
|
|
117
|
-
// Baseline: average autocorrelation outside the refresh window
|
|
118
|
-
const baseline = _mean(
|
|
119
|
-
Array.from({ length: maxLag }, (_, i) => ac[i])
|
|
120
|
-
.filter((_, i) => i + 1 < lagLo || i + 1 > lagHi)
|
|
121
|
-
);
|
|
122
|
-
|
|
123
|
-
const snr = baseline > 0 ? peakPower / baseline : 0;
|
|
124
|
-
const refreshPresent = peakPower > 0.15 && snr > 1.8;
|
|
125
|
-
const refreshPeriodMs = refreshPresent ? peakLag * meanIterMs : null;
|
|
126
|
-
|
|
127
|
-
const verdict =
|
|
128
|
-
refreshPresent && refreshPeriodMs !== null &&
|
|
129
|
-
Math.abs(refreshPeriodMs - DRAM_REFRESH_MS) < DRAM_REFRESH_SLACK
|
|
130
|
-
? 'dram'
|
|
131
|
-
: peakPower < 0.05
|
|
132
|
-
? 'virtual'
|
|
133
|
-
: 'ambiguous';
|
|
134
|
-
|
|
135
|
-
return {
|
|
136
|
-
timings: Array.from(timings),
|
|
137
|
-
refreshPeriodMs,
|
|
138
|
-
refreshPresent,
|
|
139
|
-
peakLag,
|
|
140
|
-
peakPower: +peakPower.toFixed(4),
|
|
141
|
-
snr: +snr.toFixed(2),
|
|
142
|
-
meanIterMs: +meanIterMs.toFixed(3),
|
|
143
|
-
verdict,
|
|
144
|
-
};
|
|
145
|
-
}
|
|
146
|
-
|
|
147
|
-
/* ─── helpers ────────────────────────────────────────────────────────────── */
|
|
148
|
-
|
|
149
|
-
function _noSignal(reason) {
|
|
150
|
-
return {
|
|
151
|
-
timings: [], refreshPeriodMs: null, refreshPresent: false,
|
|
152
|
-
peakLag: 0, peakPower: 0, snr: 0, meanIterMs: 0,
|
|
153
|
-
verdict: 'ambiguous', reason,
|
|
154
|
-
};
|
|
155
|
-
}
|
|
156
|
-
|
|
157
|
-
/**
|
|
158
|
-
* Run a quick calibration pass to find how many elements to read per
|
|
159
|
-
* iteration so each iteration takes approximately 1 ms.
|
|
160
|
-
*/
|
|
161
|
-
function _calibratePassSize(buf) {
|
|
162
|
-
const target = 1.0; // ms
|
|
163
|
-
let n = Math.min(100_000, buf.length);
|
|
164
|
-
let elapsed = 0;
|
|
165
|
-
let dummy = 0;
|
|
166
|
-
|
|
167
|
-
// Warm up
|
|
168
|
-
for (let i = 0; i < n; i++) dummy += buf[i];
|
|
169
|
-
|
|
170
|
-
// Measure
|
|
171
|
-
const t0 = performance.now();
|
|
172
|
-
for (let i = 0; i < n; i++) dummy += buf[i];
|
|
173
|
-
elapsed = performance.now() - t0;
|
|
174
|
-
if (dummy === 0) buf[0] = 1; // prevent DCE
|
|
175
|
-
|
|
176
|
-
if (elapsed <= 0) return n;
|
|
177
|
-
return Math.min(buf.length, Math.round(n * (target / elapsed)));
|
|
178
|
-
}
|
|
179
|
-
|
|
180
|
-
function _mean(arr) {
|
|
181
|
-
if (!arr.length) return 0;
|
|
182
|
-
return arr.reduce((s, v) => s + v, 0) / arr.length;
|
|
183
|
-
}
|
|
184
|
-
|
|
185
|
-
function _autocorr(data, maxLag) {
|
|
186
|
-
const n = data.length;
|
|
187
|
-
const mean = _mean(data);
|
|
188
|
-
let v = 0;
|
|
189
|
-
for (let i = 0; i < n; i++) v += (data[i] - mean) ** 2;
|
|
190
|
-
v /= n;
|
|
191
|
-
|
|
192
|
-
const result = new Float64Array(maxLag);
|
|
193
|
-
if (v < 1e-14) return result;
|
|
194
|
-
|
|
195
|
-
for (let lag = 1; lag <= maxLag; lag++) {
|
|
196
|
-
let cov = 0;
|
|
197
|
-
for (let i = 0; i < n - lag; i++) {
|
|
198
|
-
cov += (data[i] - mean) * (data[i + lag] - mean);
|
|
199
|
-
}
|
|
200
|
-
result[lag - 1] = cov / ((n - lag) * v);
|
|
201
|
-
}
|
|
202
|
-
return result;
|
|
203
|
-
}
|
|
1
|
+
/**
|
|
2
|
+
* @svrnsec/pulse — DRAM Refresh Cycle Detector
|
|
3
|
+
*
|
|
4
|
+
* DDR4 DRAM refreshes every 7.8 ms (tREFI per JEDEC JESD79-4). During a
|
|
5
|
+
* refresh, the memory controller stalls all access requests for ~350 ns.
|
|
6
|
+
* In a tight sequential memory access loop this appears as a periodic
|
|
7
|
+
* slowdown — detectable as a ~128Hz peak in the autocorrelation of access
|
|
8
|
+
* timings.
|
|
9
|
+
*
|
|
10
|
+
* Virtual machines do not have physical DRAM. The hypervisor's memory
|
|
11
|
+
* subsystem does not reproduce the refresh cycle because:
|
|
12
|
+
* 1. The guest never touches real DRAM directly — there is always a
|
|
13
|
+
* hypervisor-controlled indirection layer.
|
|
14
|
+
* 2. EPT/NPT (Extended/Nested Page Tables) absorb the timing.
|
|
15
|
+
* 3. The hypervisor's memory balloon driver further smooths access latency.
|
|
16
|
+
*
|
|
17
|
+
* What we detect
|
|
18
|
+
* ──────────────
|
|
19
|
+
* refreshPeriodMs estimated DRAM refresh period (should be ~7.8ms on real DDR4)
|
|
20
|
+
* refreshPresent true if the ~7.8ms periodicity is statistically significant
|
|
21
|
+
* peakLag autocorrelation lag with the highest power (units: sample index)
|
|
22
|
+
* peakPower autocorrelation power at peakLag (0–1)
|
|
23
|
+
* verdict 'dram' | 'virtual' | 'ambiguous'
|
|
24
|
+
*
|
|
25
|
+
* Calibration
|
|
26
|
+
* ───────────
|
|
27
|
+
* We allocate a buffer large enough to exceed all CPU caches (typically
|
|
28
|
+
* L3 = 8–32 MB on consumer parts). Sequential reads then go to DRAM, not
|
|
29
|
+
* cache. The refresh stall is only visible when we're actually hitting DRAM —
|
|
30
|
+
* a cache-resident access loop shows no refresh signal.
|
|
31
|
+
*
|
|
32
|
+
* Buffer size: 64 MB — comfortably above L3 on all tested platforms.
|
|
33
|
+
* Sampling interval: ~1 ms per iteration (chosen to resolve 7.8ms at ≥8 pts).
|
|
34
|
+
* Total probe time: ~400 ms — well within the fingerprint collection window.
|
|
35
|
+
*/
|
|
36
|
+
|
|
37
|
+
const DRAM_REFRESH_MS = 7.8; // JEDEC DDR4 nominal
|
|
38
|
+
const DRAM_REFRESH_SLACK = 1.5; // ±1.5 ms acceptable range for real hardware
|
|
39
|
+
const BUFFER_MB = 64; // must exceed L3 cache
|
|
40
|
+
const PROBE_ITERATIONS = 400; // ~400 ms total
|
|
41
|
+
|
|
42
|
+
/* ─── collectDramTimings ─────────────────────────────────────────────────── */
|
|
43
|
+
|
|
44
|
+
/**
|
|
45
|
+
* @param {object} [opts]
|
|
46
|
+
* @param {number} [opts.iterations=400]
|
|
47
|
+
* @param {number} [opts.bufferMb=64]
|
|
48
|
+
* @returns {{ timings: number[], refreshPeriodMs: number|null,
|
|
49
|
+
* refreshPresent: boolean, peakLag: number, peakPower: number,
|
|
50
|
+
* verdict: string }}
|
|
51
|
+
*/
|
|
52
|
+
export function collectDramTimings(opts = {}) {
|
|
53
|
+
const {
|
|
54
|
+
iterations = PROBE_ITERATIONS,
|
|
55
|
+
bufferMb = BUFFER_MB,
|
|
56
|
+
} = opts;
|
|
57
|
+
|
|
58
|
+
// ── Allocate cache-busting buffer ────────────────────────────────────────
|
|
59
|
+
const nElements = (bufferMb * 1024 * 1024) / 8; // 64-bit doubles
|
|
60
|
+
let buf;
|
|
61
|
+
|
|
62
|
+
try {
|
|
63
|
+
buf = new Float64Array(nElements);
|
|
64
|
+
// Touch every cache line to ensure OS actually maps the pages
|
|
65
|
+
const stride = 64 / 8; // 64-byte cache lines, 8 bytes per element
|
|
66
|
+
for (let i = 0; i < nElements; i += stride) buf[i] = i;
|
|
67
|
+
} catch {
|
|
68
|
+
// Allocation failure (memory constrained) — cannot run this probe
|
|
69
|
+
return _noSignal('buffer allocation failed');
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
// ── Sequential access loop ───────────────────────────────────────────────
|
|
73
|
+
// Each iteration does a full sequential pass over `passElements` worth of
|
|
74
|
+
// the buffer. Pass size is tuned so each iteration takes ~1 ms wall-clock,
|
|
75
|
+
// giving us enough resolution to see the 7.8 ms refresh cycle.
|
|
76
|
+
//
|
|
77
|
+
// We start with a small pass and auto-calibrate to hit the 1 ms target.
|
|
78
|
+
const passElements = _calibratePassSize(buf);
|
|
79
|
+
|
|
80
|
+
const timings = new Float64Array(iterations);
|
|
81
|
+
let checksum = 0;
|
|
82
|
+
|
|
83
|
+
for (let iter = 0; iter < iterations; iter++) {
|
|
84
|
+
const t0 = performance.now();
|
|
85
|
+
for (let i = 0; i < passElements; i++) checksum += buf[i];
|
|
86
|
+
timings[iter] = performance.now() - t0;
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
// Prevent dead-code elimination
|
|
90
|
+
if (checksum === 0) buf[0] = 1;
|
|
91
|
+
|
|
92
|
+
// ── Autocorrelation over timings ─────────────────────────────────────────
|
|
93
|
+
// The refresh stall appears as elevated autocorrelation at lag ≈ 7.8 / Δt
|
|
94
|
+
// where Δt is the mean iteration time in ms.
|
|
95
|
+
const meanIterMs = _mean(timings);
|
|
96
|
+
if (meanIterMs <= 0) return _noSignal('zero mean iteration time');
|
|
97
|
+
|
|
98
|
+
const targetLag = Math.round(DRAM_REFRESH_MS / meanIterMs);
|
|
99
|
+
const maxLag = Math.min(Math.round(50 / meanIterMs), iterations >> 1);
|
|
100
|
+
|
|
101
|
+
const ac = _autocorr(Array.from(timings), maxLag);
|
|
102
|
+
|
|
103
|
+
// Find the peak in the range [targetLag ± slack]
|
|
104
|
+
const slackLags = Math.round(DRAM_REFRESH_SLACK / meanIterMs);
|
|
105
|
+
const lagLo = Math.max(1, targetLag - slackLags);
|
|
106
|
+
const lagHi = Math.min(maxLag, targetLag + slackLags);
|
|
107
|
+
|
|
108
|
+
let peakPower = -Infinity;
|
|
109
|
+
let peakLag = targetLag;
|
|
110
|
+
for (let l = lagLo; l <= lagHi; l++) {
|
|
111
|
+
if (ac[l - 1] > peakPower) {
|
|
112
|
+
peakPower = ac[l - 1];
|
|
113
|
+
peakLag = l;
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
// Baseline: average autocorrelation outside the refresh window
|
|
118
|
+
const baseline = _mean(
|
|
119
|
+
Array.from({ length: maxLag }, (_, i) => ac[i])
|
|
120
|
+
.filter((_, i) => i + 1 < lagLo || i + 1 > lagHi)
|
|
121
|
+
);
|
|
122
|
+
|
|
123
|
+
const snr = baseline > 0 ? peakPower / baseline : 0;
|
|
124
|
+
const refreshPresent = peakPower > 0.15 && snr > 1.8;
|
|
125
|
+
const refreshPeriodMs = refreshPresent ? peakLag * meanIterMs : null;
|
|
126
|
+
|
|
127
|
+
const verdict =
|
|
128
|
+
refreshPresent && refreshPeriodMs !== null &&
|
|
129
|
+
Math.abs(refreshPeriodMs - DRAM_REFRESH_MS) < DRAM_REFRESH_SLACK
|
|
130
|
+
? 'dram'
|
|
131
|
+
: peakPower < 0.05
|
|
132
|
+
? 'virtual'
|
|
133
|
+
: 'ambiguous';
|
|
134
|
+
|
|
135
|
+
return {
|
|
136
|
+
timings: Array.from(timings),
|
|
137
|
+
refreshPeriodMs,
|
|
138
|
+
refreshPresent,
|
|
139
|
+
peakLag,
|
|
140
|
+
peakPower: +peakPower.toFixed(4),
|
|
141
|
+
snr: +snr.toFixed(2),
|
|
142
|
+
meanIterMs: +meanIterMs.toFixed(3),
|
|
143
|
+
verdict,
|
|
144
|
+
};
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
/* ─── helpers ────────────────────────────────────────────────────────────── */
|
|
148
|
+
|
|
149
|
+
function _noSignal(reason) {
|
|
150
|
+
return {
|
|
151
|
+
timings: [], refreshPeriodMs: null, refreshPresent: false,
|
|
152
|
+
peakLag: 0, peakPower: 0, snr: 0, meanIterMs: 0,
|
|
153
|
+
verdict: 'ambiguous', reason,
|
|
154
|
+
};
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
/**
|
|
158
|
+
* Run a quick calibration pass to find how many elements to read per
|
|
159
|
+
* iteration so each iteration takes approximately 1 ms.
|
|
160
|
+
*/
|
|
161
|
+
function _calibratePassSize(buf) {
|
|
162
|
+
const target = 1.0; // ms
|
|
163
|
+
let n = Math.min(100_000, buf.length);
|
|
164
|
+
let elapsed = 0;
|
|
165
|
+
let dummy = 0;
|
|
166
|
+
|
|
167
|
+
// Warm up
|
|
168
|
+
for (let i = 0; i < n; i++) dummy += buf[i];
|
|
169
|
+
|
|
170
|
+
// Measure
|
|
171
|
+
const t0 = performance.now();
|
|
172
|
+
for (let i = 0; i < n; i++) dummy += buf[i];
|
|
173
|
+
elapsed = performance.now() - t0;
|
|
174
|
+
if (dummy === 0) buf[0] = 1; // prevent DCE
|
|
175
|
+
|
|
176
|
+
if (elapsed <= 0) return n;
|
|
177
|
+
return Math.max(1, Math.min(buf.length, Math.round(n * (target / elapsed))));
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
function _mean(arr) {
|
|
181
|
+
if (!arr.length) return 0;
|
|
182
|
+
return arr.reduce((s, v) => s + v, 0) / arr.length;
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
function _autocorr(data, maxLag) {
|
|
186
|
+
const n = data.length;
|
|
187
|
+
const mean = _mean(data);
|
|
188
|
+
let v = 0;
|
|
189
|
+
for (let i = 0; i < n; i++) v += (data[i] - mean) ** 2;
|
|
190
|
+
v /= n;
|
|
191
|
+
|
|
192
|
+
const result = new Float64Array(maxLag);
|
|
193
|
+
if (v < 1e-14) return result;
|
|
194
|
+
|
|
195
|
+
for (let lag = 1; lag <= maxLag; lag++) {
|
|
196
|
+
let cov = 0;
|
|
197
|
+
for (let i = 0; i < n - lag; i++) {
|
|
198
|
+
cov += (data[i] - mean) * (data[i + lag] - mean);
|
|
199
|
+
}
|
|
200
|
+
result[lag - 1] = cov / ((n - lag) * v);
|
|
201
|
+
}
|
|
202
|
+
return result;
|
|
203
|
+
}
|