@svrnsec/pulse 0.6.0 → 0.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. package/LICENSE +21 -21
  2. package/README.md +883 -622
  3. package/SECURITY.md +86 -86
  4. package/bin/svrnsec-pulse.js +7 -7
  5. package/dist/{pulse.cjs.js → pulse.cjs} +6379 -6420
  6. package/dist/pulse.cjs.map +1 -0
  7. package/dist/pulse.esm.js +6380 -6421
  8. package/dist/pulse.esm.js.map +1 -1
  9. package/index.d.ts +895 -846
  10. package/package.json +185 -165
  11. package/pkg/pulse_core.js +174 -173
  12. package/src/analysis/audio.js +213 -213
  13. package/src/analysis/authenticityAudit.js +408 -390
  14. package/src/analysis/coherence.js +502 -502
  15. package/src/analysis/coordinatedBehavior.js +825 -0
  16. package/src/analysis/heuristic.js +428 -428
  17. package/src/analysis/jitter.js +446 -446
  18. package/src/analysis/llm.js +473 -472
  19. package/src/analysis/populationEntropy.js +404 -403
  20. package/src/analysis/provider.js +248 -248
  21. package/src/analysis/refraction.js +392 -0
  22. package/src/analysis/trustScore.js +356 -356
  23. package/src/cli/args.js +36 -36
  24. package/src/cli/commands/scan.js +192 -192
  25. package/src/cli/runner.js +157 -157
  26. package/src/collector/adaptive.js +200 -200
  27. package/src/collector/bio.js +297 -287
  28. package/src/collector/canvas.js +247 -239
  29. package/src/collector/dram.js +203 -203
  30. package/src/collector/enf.js +311 -311
  31. package/src/collector/entropy.js +195 -195
  32. package/src/collector/gpu.js +248 -245
  33. package/src/collector/idleAttestation.js +480 -480
  34. package/src/collector/sabTimer.js +189 -191
  35. package/src/fingerprint.js +475 -475
  36. package/src/index.js +342 -342
  37. package/src/integrations/react-native.js +462 -459
  38. package/src/integrations/react.js +184 -185
  39. package/src/middleware/express.js +155 -155
  40. package/src/middleware/next.js +174 -175
  41. package/src/proof/challenge.js +249 -249
  42. package/src/proof/engagementToken.js +426 -394
  43. package/src/proof/fingerprint.js +268 -268
  44. package/src/proof/validator.js +83 -143
  45. package/src/registry/serializer.js +349 -349
  46. package/src/terminal.js +263 -263
  47. package/src/update-notifier.js +259 -264
  48. package/dist/pulse.cjs.js.map +0 -1
package/pkg/pulse_core.js CHANGED
@@ -1,173 +1,174 @@
1
- /**
2
- * pulse_core — pure-JavaScript probe engine
3
- *
4
- * This module ships the entropy probe as portable JS so the package works
5
- * out-of-the-box without a Rust toolchain. When a compiled .wasm binary is
6
- * present (dropped in via `build.sh`) this file is replaced by the wasm-pack
7
- * output and the native engine runs instead.
8
- *
9
- * Physics model
10
- * ─────────────
11
- * Real silicon: DRAM refresh cycles, branch-predictor misses, and L3-cache
12
- * evictions inject sub-microsecond noise into any tight compute loop.
13
- * Hypervisors virtualise the TSC and smooth those interrupts out, leaving
14
- * a near-flat timing distribution that our QE/EJR checks catch.
15
- *
16
- * The JS loop below is a faithful port of the Rust matrix-multiply probe:
17
- * same work unit (N×N DGEMM-style loop), same checksum accumulation to
18
- * prevent dead-code elimination, same resolution micro-probe.
19
- */
20
-
21
- /* ─── clock ─────────────────────────────────────────────────────────────── */
22
-
23
- const _now = (typeof performance !== 'undefined' && typeof performance.now === 'function')
24
- ? () => performance.now()
25
- : (() => {
26
- // Node.js fallback: process.hrtime.bigint() → milliseconds
27
- const _hr = process.hrtime.bigint;
28
- return () => Number(_hr()) / 1_000_000;
29
- })();
30
-
31
- /* ─── init (no-op for the JS engine) ───────────────────────────────────── */
32
-
33
- /**
34
- * Initialise the engine. When a real .wasm binary is supplied the wasm-pack
35
- * glue calls WebAssembly.instantiateStreaming here. The JS engine is already
36
- * "compiled", so we return immediately.
37
- *
38
- * @param {string|URL|Request|BufferSource|WebAssembly.Module} [_source]
39
- * @returns {Promise<void>}
40
- */
41
- export default async function init(_source) {
42
- // JS engine is ready synchronously — nothing to stream or compile.
43
- }
44
-
45
- /* ─── run_entropy_probe ─────────────────────────────────────────────────── */
46
-
47
- /**
48
- * Run N iterations of a matrix-multiply work unit and record wall-clock time
49
- * per iteration. The distribution of those times is what the heuristic
50
- * engine analyses.
51
- *
52
- * @param {number} iterations – number of timing samples to collect
53
- * @param {number} matrixSize – N for the N×N multiply (default 64)
54
- * @returns {{ timings: Float64Array, checksum: number, resolution_probe: Float64Array }}
55
- */
56
- export function run_entropy_probe(iterations, matrixSize = 64) {
57
- const N = matrixSize | 0;
58
-
59
- // Persistent working matrices — allocated once per probe to avoid GC noise.
60
- const A = new Float64Array(N * N);
61
- const B = new Float64Array(N * N);
62
- const C = new Float64Array(N * N);
63
-
64
- // Seed matrices with pseudo-random data (deterministic per call for
65
- // reproducibility, but different each run due to xorshift seeding from time).
66
- let seed = (_now() * 1e6) | 0 || 0xdeadbeef;
67
- const xr = () => { seed ^= seed << 13; seed ^= seed >> 17; seed ^= seed << 5; return (seed >>> 0) / 4294967296; };
68
- for (let i = 0; i < N * N; i++) { A[i] = xr(); B[i] = xr(); }
69
-
70
- const timings = new Float64Array(iterations);
71
- const resolution_probe = new Float64Array(32);
72
- let checksum = 0;
73
-
74
- for (let iter = 0; iter < iterations; iter++) {
75
- // Zero accumulator each round (realistic cache pressure).
76
- C.fill(0);
77
-
78
- const t0 = _now();
79
-
80
- // N×N matrix multiply: C = A · B (ikj loop order for cache friendliness)
81
- for (let i = 0; i < N; i++) {
82
- const rowA = i * N;
83
- const rowC = i * N;
84
- for (let k = 0; k < N; k++) {
85
- const aik = A[rowA + k];
86
- const rowBk = k * N;
87
- for (let j = 0; j < N; j++) {
88
- C[rowC + j] += aik * B[rowBk + j];
89
- }
90
- }
91
- }
92
-
93
- const t1 = _now();
94
- timings[iter] = t1 - t0;
95
-
96
- // Accumulate one element so the compiler cannot eliminate the work.
97
- checksum += C[0];
98
- }
99
-
100
- // Resolution micro-probe: fire 32 back-to-back timestamps.
101
- // The minimum non-zero delta reveals timer granularity.
102
- for (let i = 0; i < resolution_probe.length; i++) {
103
- resolution_probe[i] = _now();
104
- }
105
-
106
- return { timings, checksum, resolution_probe };
107
- }
108
-
109
- /* ─── run_memory_probe ──────────────────────────────────────────────────── */
110
-
111
- /**
112
- * Sequential read/write bandwidth probe over a large buffer.
113
- * Memory latency variance is a secondary signal (NUMA, DRAM refresh).
114
- *
115
- * @param {number} memSizeKb – buffer size in kibibytes
116
- * @param {number} memIterations
117
- * @returns {{ timings: Float64Array, checksum: number }}
118
- */
119
- export function run_memory_probe(memSizeKb = 512, memIterations = 50) {
120
- const len = (memSizeKb * 1024 / 8) | 0; // 64-bit elements
121
- const buf = new Float64Array(len);
122
- const timings = new Float64Array(memIterations);
123
- let checksum = 0;
124
-
125
- // Warm-up pass (fills TLB, avoids first-access bias)
126
- for (let i = 0; i < len; i++) buf[i] = i;
127
-
128
- for (let iter = 0; iter < memIterations; iter++) {
129
- const t0 = _now();
130
- // Sequential read-modify-write
131
- for (let i = 0; i < len; i++) buf[i] = buf[i] * 1.0000001;
132
- const t1 = _now();
133
-
134
- timings[iter] = t1 - t0;
135
- checksum += buf[0];
136
- }
137
-
138
- return { timings, checksum };
139
- }
140
-
141
- /* ─── compute_autocorrelation ───────────────────────────────────────────── */
142
-
143
- /**
144
- * Pearson autocorrelation for lags 1..maxLag.
145
- * O(n·maxLag) kept cheap by the adaptive early-exit cap.
146
- *
147
- * @param {ArrayLike<number>} data
148
- * @param {number} maxLag
149
- * @returns {Float64Array} length = maxLag, index 0 = lag-1
150
- */
151
- export function compute_autocorrelation(data, maxLag) {
152
- const n = data.length;
153
- let mean = 0;
154
- for (let i = 0; i < n; i++) mean += data[i];
155
- mean /= n;
156
-
157
- let variance = 0;
158
- for (let i = 0; i < n; i++) variance += (data[i] - mean) ** 2;
159
- variance /= n;
160
-
161
- const result = new Float64Array(maxLag);
162
- if (variance < 1e-14) return result; // degenerate — all identical
163
-
164
- for (let lag = 1; lag <= maxLag; lag++) {
165
- let cov = 0;
166
- for (let i = 0; i < n - lag; i++) {
167
- cov += (data[i] - mean) * (data[i + lag] - mean);
168
- }
169
- result[lag - 1] = cov / ((n - lag) * variance);
170
- }
171
-
172
- return result;
173
- }
1
+ /**
2
+ * pulse_core — pure-JavaScript probe engine
3
+ *
4
+ * This module ships the entropy probe as portable JS so the package works
5
+ * out-of-the-box without a Rust toolchain. When a compiled .wasm binary is
6
+ * present (dropped in via `build.sh`) this file is replaced by the wasm-pack
7
+ * output and the native engine runs instead.
8
+ *
9
+ * Physics model
10
+ * ─────────────
11
+ * Real silicon: DRAM refresh cycles, branch-predictor misses, and L3-cache
12
+ * evictions inject sub-microsecond noise into any tight compute loop.
13
+ * Hypervisors virtualise the TSC and smooth those interrupts out, leaving
14
+ * a near-flat timing distribution that our QE/EJR checks catch.
15
+ *
16
+ * The JS loop below is a faithful port of the Rust matrix-multiply probe:
17
+ * same work unit (N×N DGEMM-style loop), same checksum accumulation to
18
+ * prevent dead-code elimination, same resolution micro-probe.
19
+ */
20
+
21
+ /* ─── clock ─────────────────────────────────────────────────────────────── */
22
+
23
+ const _now = (typeof performance !== 'undefined' && typeof performance.now === 'function')
24
+ ? () => performance.now()
25
+ : (() => {
26
+ // Node.js fallback: process.hrtime.bigint() → milliseconds
27
+ const _hr = process.hrtime.bigint;
28
+ return () => Number(_hr()) / 1_000_000;
29
+ })();
30
+
31
+ /* ─── init (no-op for the JS engine) ───────────────────────────────────── */
32
+
33
+ /**
34
+ * Initialise the engine. When a real .wasm binary is supplied the wasm-pack
35
+ * glue calls WebAssembly.instantiateStreaming here. The JS engine is already
36
+ * "compiled", so we return immediately.
37
+ *
38
+ * @param {string|URL|Request|BufferSource|WebAssembly.Module} [_source]
39
+ * @returns {Promise<void>}
40
+ */
41
+ export default async function init(_source) {
42
+ // JS engine is ready synchronously — nothing to stream or compile.
43
+ }
44
+
45
+ /* ─── run_entropy_probe ─────────────────────────────────────────────────── */
46
+
47
+ /**
48
+ * Run N iterations of a matrix-multiply work unit and record wall-clock time
49
+ * per iteration. The distribution of those times is what the heuristic
50
+ * engine analyses.
51
+ *
52
+ * @param {number} iterations – number of timing samples to collect
53
+ * @param {number} matrixSize – N for the N×N multiply (default 64)
54
+ * @returns {{ timings: Float64Array, checksum: number, resolution_probe: Float64Array }}
55
+ */
56
+ export function run_entropy_probe(iterations, matrixSize = 64) {
57
+ const N = matrixSize | 0;
58
+
59
+ // Persistent working matrices — allocated once per probe to avoid GC noise.
60
+ const A = new Float64Array(N * N);
61
+ const B = new Float64Array(N * N);
62
+ const C = new Float64Array(N * N);
63
+
64
+ // Seed matrices with pseudo-random data (deterministic per call for
65
+ // reproducibility, but different each run due to xorshift seeding from time).
66
+ // Zero-seed protection: xorshift has 0 as a fixed point, so we fall back to 0xdeadbeef if _now() truncates to 0
67
+ let seed = (_now() * 1e6) | 0 || 0xdeadbeef;
68
+ const xr = () => { seed ^= seed << 13; seed ^= seed >> 17; seed ^= seed << 5; return (seed >>> 0) / 4294967296; };
69
+ for (let i = 0; i < N * N; i++) { A[i] = xr(); B[i] = xr(); }
70
+
71
+ const timings = new Float64Array(iterations);
72
+ const resolution_probe = new Float64Array(32);
73
+ let checksum = 0;
74
+
75
+ for (let iter = 0; iter < iterations; iter++) {
76
+ // Zero accumulator each round (realistic cache pressure).
77
+ C.fill(0);
78
+
79
+ const t0 = _now();
80
+
81
+ // N×N matrix multiply: C = A · B (ikj loop order for cache friendliness)
82
+ for (let i = 0; i < N; i++) {
83
+ const rowA = i * N;
84
+ const rowC = i * N;
85
+ for (let k = 0; k < N; k++) {
86
+ const aik = A[rowA + k];
87
+ const rowBk = k * N;
88
+ for (let j = 0; j < N; j++) {
89
+ C[rowC + j] += aik * B[rowBk + j];
90
+ }
91
+ }
92
+ }
93
+
94
+ const t1 = _now();
95
+ timings[iter] = t1 - t0;
96
+
97
+ // Accumulate one element so the compiler cannot eliminate the work.
98
+ checksum += C[0];
99
+ }
100
+
101
+ // Resolution micro-probe: fire 32 back-to-back timestamps.
102
+ // The minimum non-zero delta reveals timer granularity.
103
+ for (let i = 0; i < resolution_probe.length; i++) {
104
+ resolution_probe[i] = _now();
105
+ }
106
+
107
+ return { timings, checksum, resolution_probe };
108
+ }
109
+
110
+ /* ─── run_memory_probe ──────────────────────────────────────────────────── */
111
+
112
+ /**
113
+ * Sequential read/write bandwidth probe over a large buffer.
114
+ * Memory latency variance is a secondary signal (NUMA, DRAM refresh).
115
+ *
116
+ * @param {number} memSizeKb – buffer size in kibibytes
117
+ * @param {number} memIterations
118
+ * @returns {{ timings: Float64Array, checksum: number }}
119
+ */
120
+ export function run_memory_probe(memSizeKb = 512, memIterations = 50) {
121
+ const len = (memSizeKb * 1024 / 8) | 0; // 64-bit elements
122
+ const buf = new Float64Array(len);
123
+ const timings = new Float64Array(memIterations);
124
+ let checksum = 0;
125
+
126
+ // Warm-up pass (fills TLB, avoids first-access bias)
127
+ for (let i = 0; i < len; i++) buf[i] = i;
128
+
129
+ for (let iter = 0; iter < memIterations; iter++) {
130
+ const t0 = _now();
131
+ // Sequential read-modify-write
132
+ for (let i = 0; i < len; i++) buf[i] = buf[i] * 1.0000001;
133
+ const t1 = _now();
134
+
135
+ timings[iter] = t1 - t0;
136
+ checksum += buf[0];
137
+ }
138
+
139
+ return { timings, checksum };
140
+ }
141
+
142
+ /* ─── compute_autocorrelation ───────────────────────────────────────────── */
143
+
144
+ /**
145
+ * Pearson autocorrelation for lags 1..maxLag.
146
+ * O(n·maxLag) — kept cheap by the adaptive early-exit cap.
147
+ *
148
+ * @param {ArrayLike<number>} data
149
+ * @param {number} maxLag
150
+ * @returns {Float64Array} length = maxLag, index 0 = lag-1
151
+ */
152
+ export function compute_autocorrelation(data, maxLag) {
153
+ const n = data.length;
154
+ let mean = 0;
155
+ for (let i = 0; i < n; i++) mean += data[i];
156
+ mean /= n;
157
+
158
+ let variance = 0;
159
+ for (let i = 0; i < n; i++) variance += (data[i] - mean) ** 2;
160
+ variance /= n;
161
+
162
+ const result = new Float64Array(maxLag);
163
+ if (variance < 1e-14) return result; // degenerate — all identical
164
+
165
+ for (let lag = 1; lag <= maxLag; lag++) {
166
+ let cov = 0;
167
+ for (let i = 0; i < n - lag; i++) {
168
+ cov += (data[i] - mean) * (data[i + lag] - mean);
169
+ }
170
+ result[lag - 1] = cov / ((n - lag) * variance);
171
+ }
172
+
173
+ return result;
174
+ }