@svrnsec/pulse 0.3.1 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/svrnsec-pulse.js +7 -0
- package/index.d.ts +130 -0
- package/package.json +70 -25
- package/src/analysis/audio.js +213 -0
- package/src/analysis/coherence.js +502 -0
- package/src/analysis/heuristic.js +428 -0
- package/src/analysis/jitter.js +446 -0
- package/src/analysis/llm.js +472 -0
- package/src/analysis/populationEntropy.js +403 -0
- package/src/analysis/provider.js +248 -0
- package/src/analysis/trustScore.js +356 -0
- package/src/cli/args.js +36 -0
- package/src/cli/commands/scan.js +192 -0
- package/src/cli/runner.js +157 -0
- package/src/collector/adaptive.js +200 -0
- package/src/collector/bio.js +287 -0
- package/src/collector/canvas.js +239 -0
- package/src/collector/dram.js +203 -0
- package/src/collector/enf.js +311 -0
- package/src/collector/entropy.js +195 -0
- package/src/collector/gpu.js +245 -0
- package/src/collector/idleAttestation.js +480 -0
- package/src/collector/sabTimer.js +191 -0
- package/src/fingerprint.js +475 -0
- package/src/index.js +342 -0
- package/src/integrations/react-native.js +459 -0
- package/src/proof/challenge.js +249 -0
- package/src/proof/engagementToken.js +394 -0
- package/src/terminal.js +263 -0
- package/src/update-notifier.js +264 -0
|
@@ -0,0 +1,472 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @sovereign/pulse — LLM / AI Agent Behavioral Fingerprint
|
|
3
|
+
*
|
|
4
|
+
* Detects automation driven by large language models, headless browsers
|
|
5
|
+
* controlled by AI agents (AutoGPT, CrewAI, browser-use, Playwright+LLM,
|
|
6
|
+
* Selenium+GPT-4), and synthetic user emulators.
|
|
7
|
+
*
|
|
8
|
+
* Why LLMs are detectable at the behavioral layer
|
|
9
|
+
* ───────────────────────────────────────────────
|
|
10
|
+
* A human interacting with a browser produces a signal shaped by:
|
|
11
|
+
* – Motor control noise (Fitts' Law, signal-dependent noise in arm movement)
|
|
12
|
+
* – Cognitive processing time (fixation → decision → motor initiation)
|
|
13
|
+
* – Error and correction cycles (overshooting, backspacing, re-reading)
|
|
14
|
+
* – Physiological rhythms (micro-tremor at 8–12 Hz, respiration at 0.2–0.3 Hz)
|
|
15
|
+
*
|
|
16
|
+
* An LLM agent produces:
|
|
17
|
+
* – Think-time spikes at multiples of the model's token generation latency
|
|
18
|
+
* (GPT-4 Turbo: ~50ms/token; Claude 3: ~30ms/token)
|
|
19
|
+
* – Mouse paths generated by a trajectory model (no signal-dependent noise)
|
|
20
|
+
* – Keystrokes at WPM limited by the agent's typing function, not human anatomy
|
|
21
|
+
* – Absent micro-corrections (humans correct 7–12% of keystrokes; agents: 0%)
|
|
22
|
+
*
|
|
23
|
+
* Signals
|
|
24
|
+
* ───────
|
|
25
|
+
* thinkTimePattern peak in inter-event timing at known LLM token latencies
|
|
26
|
+
* mousePathSmoothness human paths are fractal; LLM paths are piecewise linear
|
|
27
|
+
* correctionRate keystrokes followed by Backspace (human: 7–12%, LLM: <1%)
|
|
28
|
+
* pauseDistribution human pauses are Pareto-distributed; LLM pauses are uniform
|
|
29
|
+
* rhythmicity presence of physiological tremor (8–12 Hz) in pointer data
|
|
30
|
+
* eventGapCV coefficient of variation of inter-event gaps
|
|
31
|
+
*
|
|
32
|
+
* Scoring
|
|
33
|
+
* ───────
|
|
34
|
+
* Each signal contributes a weight to an overall `aiConf` score (0–1).
|
|
35
|
+
* A score above 0.70 indicates likely AI agent. Above 0.85 is high confidence.
|
|
36
|
+
* The score is designed to be combined with the physics layer — AI agents running
|
|
37
|
+
* on real hardware (a human's machine being remote-controlled) will pass the
|
|
38
|
+
* physics check but fail the behavioral check.
|
|
39
|
+
*/
|
|
40
|
+
|
|
41
|
+
// ── Known LLM token latency ranges (ms per token, observed empirically) ──────
|
|
42
|
+
// These appear as periodic peaks in inter-event timing when the LLM is
|
|
43
|
+
// "thinking" between actions.
|
|
44
|
+
const LLM_LATENCY_RANGES = [
|
|
45
|
+
{ name: 'gpt-4-turbo', minMs: 40, maxMs: 80 },
|
|
46
|
+
{ name: 'gpt-4o', minMs: 20, maxMs: 50 },
|
|
47
|
+
{ name: 'claude-3-sonnet', minMs: 25, maxMs: 60 },
|
|
48
|
+
{ name: 'claude-3-opus', minMs: 50, maxMs: 120 },
|
|
49
|
+
{ name: 'gemini-1.5-pro', minMs: 30, maxMs: 70 },
|
|
50
|
+
{ name: 'llama-3-70b', minMs: 15, maxMs: 45 },
|
|
51
|
+
];
|
|
52
|
+
|
|
53
|
+
// ── Human physiological constants ─────────────────────────────────────────────
|
|
54
|
+
const HUMAN_TREMOR_HZ_LO = 8; // micro-tremor band low (Hz)
|
|
55
|
+
const HUMAN_TREMOR_HZ_HI = 12; // micro-tremor band high (Hz)
|
|
56
|
+
const HUMAN_CORRECTION_MIN = 0.05; // minimum human backspace rate
|
|
57
|
+
const HUMAN_CORRECTION_MAX = 0.18; // maximum human backspace rate
|
|
58
|
+
|
|
59
|
+
/* ─── Public API ─────────────────────────────────────────────────────────── */
|
|
60
|
+
|
|
61
|
+
/**
|
|
62
|
+
* Analyse collected behavioral signals for AI agent indicators.
|
|
63
|
+
*
|
|
64
|
+
* @param {object} signals
|
|
65
|
+
* @param {Array<{t:number, x:number, y:number}>} [signals.mouseEvents] – {t ms, x, y}
|
|
66
|
+
* @param {Array<{t:number, key:string}>} [signals.keyEvents] – {t ms, key}
|
|
67
|
+
* @param {Array<number>} [signals.interEventGaps] – ms between any UI events
|
|
68
|
+
* @returns {LlmFingerprint}
|
|
69
|
+
*/
|
|
70
|
+
export function detectLlmAgent(signals = {}) {
|
|
71
|
+
const {
|
|
72
|
+
mouseEvents = [],
|
|
73
|
+
keyEvents = [],
|
|
74
|
+
interEventGaps = [],
|
|
75
|
+
} = signals;
|
|
76
|
+
|
|
77
|
+
const checks = [];
|
|
78
|
+
|
|
79
|
+
// ── 1. Think-time pattern ────────────────────────────────────────────────
|
|
80
|
+
if (interEventGaps.length >= 20) {
|
|
81
|
+
const thinkCheck = _analyseThinkTime(interEventGaps);
|
|
82
|
+
checks.push(thinkCheck);
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
// ── 2. Mouse path smoothness ─────────────────────────────────────────────
|
|
86
|
+
if (mouseEvents.length >= 30) {
|
|
87
|
+
const pathCheck = _analyseMousePath(mouseEvents);
|
|
88
|
+
checks.push(pathCheck);
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
// ── 3. Keystroke correction rate ─────────────────────────────────────────
|
|
92
|
+
if (keyEvents.length >= 15) {
|
|
93
|
+
const corrCheck = _analyseCorrectionRate(keyEvents);
|
|
94
|
+
checks.push(corrCheck);
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
// ── 4. Physiological tremor ───────────────────────────────────────────────
|
|
98
|
+
if (mouseEvents.length >= 50) {
|
|
99
|
+
const tremorCheck = _analyseTremor(mouseEvents);
|
|
100
|
+
checks.push(tremorCheck);
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
// ── 5. Inter-event gap distribution ──────────────────────────────────────
|
|
104
|
+
if (interEventGaps.length >= 30) {
|
|
105
|
+
const gapCheck = _analyseGapDistribution(interEventGaps);
|
|
106
|
+
checks.push(gapCheck);
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
if (!checks.length) {
|
|
110
|
+
return { aiConf: 0, humanConf: 0, checks: [], verdict: 'insufficient_data',
|
|
111
|
+
dataPoints: { mouseEvents: mouseEvents.length,
|
|
112
|
+
keyEvents: keyEvents.length,
|
|
113
|
+
gaps: interEventGaps.length } };
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
// Weighted average — weight by how many data points each check had
|
|
117
|
+
const totalWeight = checks.reduce((s, c) => s + c.weight, 0);
|
|
118
|
+
const aiConf = checks.reduce((s, c) => s + c.aiScore * c.weight, 0) / totalWeight;
|
|
119
|
+
const humanConf = checks.reduce((s, c) => s + c.humanScore * c.weight, 0) / totalWeight;
|
|
120
|
+
|
|
121
|
+
const verdict =
|
|
122
|
+
aiConf > 0.85 ? 'ai_agent_high_confidence'
|
|
123
|
+
: aiConf > 0.70 ? 'ai_agent_likely'
|
|
124
|
+
: humanConf > 0.75 ? 'human_likely'
|
|
125
|
+
: 'ambiguous';
|
|
126
|
+
|
|
127
|
+
return {
|
|
128
|
+
aiConf: +aiConf.toFixed(3),
|
|
129
|
+
humanConf: +humanConf.toFixed(3),
|
|
130
|
+
checks,
|
|
131
|
+
verdict,
|
|
132
|
+
dataPoints: {
|
|
133
|
+
mouseEvents: mouseEvents.length,
|
|
134
|
+
keyEvents: keyEvents.length,
|
|
135
|
+
gaps: interEventGaps.length,
|
|
136
|
+
},
|
|
137
|
+
};
|
|
138
|
+
}
|
|
139
|
+
|
|
140
|
+
/* ─── Signal collectors (attach to the live DOM) ─────────────────────────── */
|
|
141
|
+
|
|
142
|
+
/**
|
|
143
|
+
* Create a collector that listens to DOM events and returns a snapshot
|
|
144
|
+
* of raw signals. Call `.stop()` when done collecting.
|
|
145
|
+
*
|
|
146
|
+
* @param {object} [opts]
|
|
147
|
+
* @param {EventTarget} [opts.target=window]
|
|
148
|
+
* @param {number} [opts.maxEvents=2000]
|
|
149
|
+
* @returns {{ stop: () => LlmSignals, isCollecting: boolean }}
|
|
150
|
+
*/
|
|
151
|
+
export function createBehaviorCollector(opts = {}) {
|
|
152
|
+
const { target = globalThis, maxEvents = 2000 } = opts;
|
|
153
|
+
|
|
154
|
+
const mouseEvents = [];
|
|
155
|
+
const keyEvents = [];
|
|
156
|
+
const interEventGaps = [];
|
|
157
|
+
let lastEventTs = null;
|
|
158
|
+
let collecting = true;
|
|
159
|
+
|
|
160
|
+
const onMouse = (e) => {
|
|
161
|
+
if (!collecting || mouseEvents.length >= maxEvents) return;
|
|
162
|
+
mouseEvents.push({ t: e.timeStamp, x: e.clientX, y: e.clientY });
|
|
163
|
+
_recordGap(e.timeStamp);
|
|
164
|
+
};
|
|
165
|
+
|
|
166
|
+
const onKey = (e) => {
|
|
167
|
+
if (!collecting || keyEvents.length >= maxEvents) return;
|
|
168
|
+
keyEvents.push({ t: e.timeStamp, key: e.key });
|
|
169
|
+
_recordGap(e.timeStamp);
|
|
170
|
+
};
|
|
171
|
+
|
|
172
|
+
const onClick = (e) => _recordGap(e.timeStamp);
|
|
173
|
+
const onScroll = () => _recordGap(performance.now());
|
|
174
|
+
|
|
175
|
+
function _recordGap(ts) {
|
|
176
|
+
if (lastEventTs !== null && ts > lastEventTs) {
|
|
177
|
+
interEventGaps.push(ts - lastEventTs);
|
|
178
|
+
}
|
|
179
|
+
lastEventTs = ts;
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
if (typeof target.addEventListener === 'function') {
|
|
183
|
+
target.addEventListener('mousemove', onMouse, { passive: true });
|
|
184
|
+
target.addEventListener('keydown', onKey, { passive: true });
|
|
185
|
+
target.addEventListener('click', onClick, { passive: true });
|
|
186
|
+
target.addEventListener('scroll', onScroll, { passive: true });
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
return {
|
|
190
|
+
get isCollecting() { return collecting; },
|
|
191
|
+
stop() {
|
|
192
|
+
collecting = false;
|
|
193
|
+
if (typeof target.removeEventListener === 'function') {
|
|
194
|
+
target.removeEventListener('mousemove', onMouse);
|
|
195
|
+
target.removeEventListener('keydown', onKey);
|
|
196
|
+
target.removeEventListener('click', onClick);
|
|
197
|
+
target.removeEventListener('scroll', onScroll);
|
|
198
|
+
}
|
|
199
|
+
return { mouseEvents: [...mouseEvents],
|
|
200
|
+
keyEvents: [...keyEvents],
|
|
201
|
+
interEventGaps: [...interEventGaps] };
|
|
202
|
+
},
|
|
203
|
+
};
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
/* ─── Internal checks ────────────────────────────────────────────────────── */
|
|
207
|
+
|
|
208
|
+
function _analyseThinkTime(gaps) {
|
|
209
|
+
// Look for peaks in gap histogram that align with known LLM latency ranges
|
|
210
|
+
const histogram = _histogram(gaps, 200); // 200 bins over the gap range
|
|
211
|
+
|
|
212
|
+
let matchScore = 0;
|
|
213
|
+
const matched = [];
|
|
214
|
+
|
|
215
|
+
for (const llm of LLM_LATENCY_RANGES) {
|
|
216
|
+
const binPower = _histogramPowerInRange(histogram, llm.minMs, llm.maxMs);
|
|
217
|
+
if (binPower > 0.15) { // >15% of all gaps fall in this LLM's latency range
|
|
218
|
+
matchScore = Math.max(matchScore, binPower);
|
|
219
|
+
matched.push(llm.name);
|
|
220
|
+
}
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
// Human think times follow a Pareto distribution: many short, exponentially
|
|
224
|
+
// fewer long pauses. A spike at a fixed latency range is anomalous.
|
|
225
|
+
const cv = _cv(gaps);
|
|
226
|
+
const isPareto = cv > 1.0; // Pareto CV is always > 1
|
|
227
|
+
|
|
228
|
+
return {
|
|
229
|
+
name: 'think_time',
|
|
230
|
+
aiScore: matchScore > 0.20 ? Math.min(1, matchScore * 3) : 0,
|
|
231
|
+
humanScore: isPareto && matchScore < 0.10 ? 0.8 : 0.2,
|
|
232
|
+
weight: Math.min(gaps.length / 50, 1),
|
|
233
|
+
detail: { matchedLlms: matched, peakBinPower: matchScore, isPareto },
|
|
234
|
+
};
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
function _analyseMousePath(events) {
|
|
238
|
+
// Compute path curvature at each triplet of points
|
|
239
|
+
// Human paths are fractal (self-similar at multiple scales); AI paths are
|
|
240
|
+
// smooth cubic splines or straight lines with programmatic waypoints.
|
|
241
|
+
const curvatures = [];
|
|
242
|
+
for (let i = 1; i < events.length - 1; i++) {
|
|
243
|
+
const p0 = events[i - 1];
|
|
244
|
+
const p1 = events[i];
|
|
245
|
+
const p2 = events[i + 1];
|
|
246
|
+
|
|
247
|
+
const d01 = Math.hypot(p1.x - p0.x, p1.y - p0.y);
|
|
248
|
+
const d12 = Math.hypot(p2.x - p1.x, p2.y - p1.y);
|
|
249
|
+
const d02 = Math.hypot(p2.x - p0.x, p2.y - p0.y);
|
|
250
|
+
|
|
251
|
+
// Curvature = deviation from straight line (0 = straight, 1 = sharp turn)
|
|
252
|
+
if (d01 + d12 > 0) {
|
|
253
|
+
curvatures.push(1 - (d02 / (d01 + d12)));
|
|
254
|
+
}
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
if (!curvatures.length) return { name: 'mouse_path', aiScore: 0, humanScore: 0.5, weight: 0.1, detail: {} };
|
|
258
|
+
|
|
259
|
+
const meanCurv = _mean(curvatures);
|
|
260
|
+
const cvCurv = _cv(curvatures);
|
|
261
|
+
|
|
262
|
+
// Human: moderate mean curvature (0.05–0.25), high CV (varying turns)
|
|
263
|
+
// AI agent: very low mean curvature (near-straight lines), low CV (consistent)
|
|
264
|
+
const isTooSmooth = meanCurv < 0.02 && cvCurv < 0.3;
|
|
265
|
+
const isTooRegular = cvCurv < 0.2 && meanCurv > 0 && meanCurv < 0.05;
|
|
266
|
+
|
|
267
|
+
// Velocity profile: human acceleration follows a bell curve (min jerk model)
|
|
268
|
+
// AI: piecewise constant velocity (linear interpolation between waypoints)
|
|
269
|
+
const speeds = [];
|
|
270
|
+
for (let i = 1; i < events.length; i++) {
|
|
271
|
+
const dt = events[i].t - events[i - 1].t;
|
|
272
|
+
const ds = Math.hypot(events[i].x - events[i - 1].x, events[i].y - events[i - 1].y);
|
|
273
|
+
if (dt > 0) speeds.push(ds / dt);
|
|
274
|
+
}
|
|
275
|
+
const speedCV = _cv(speeds);
|
|
276
|
+
|
|
277
|
+
// Human speed is highly variable (CV > 0.5); AI speed is consistent (CV < 0.3)
|
|
278
|
+
const aiScore = (
|
|
279
|
+
(isTooSmooth ? 0.40 : 0) +
|
|
280
|
+
(isTooRegular ? 0.30 : 0) +
|
|
281
|
+
(speedCV < 0.25 ? 0.30 : speedCV < 0.40 ? 0.15 : 0)
|
|
282
|
+
);
|
|
283
|
+
|
|
284
|
+
return {
|
|
285
|
+
name: 'mouse_path',
|
|
286
|
+
aiScore: Math.min(1, aiScore),
|
|
287
|
+
humanScore: aiScore < 0.2 ? 0.8 : 0.2,
|
|
288
|
+
weight: Math.min(events.length / 100, 1),
|
|
289
|
+
detail: { meanCurvature: +meanCurv.toFixed(4), curvatureCV: +cvCurv.toFixed(3), speedCV: +speedCV.toFixed(3) },
|
|
290
|
+
};
|
|
291
|
+
}
|
|
292
|
+
|
|
293
|
+
function _analyseCorrectionRate(keyEvents) {
|
|
294
|
+
const total = keyEvents.length;
|
|
295
|
+
const backspaces = keyEvents.filter(e => e.key === 'Backspace').length;
|
|
296
|
+
const rate = backspaces / total;
|
|
297
|
+
|
|
298
|
+
// Human: 5–18% correction rate (typos, editing)
|
|
299
|
+
// AI agent: <1% (generates correct text directly from LLM output)
|
|
300
|
+
const isTooClean = rate < HUMAN_CORRECTION_MIN;
|
|
301
|
+
const isHuman = rate >= HUMAN_CORRECTION_MIN && rate <= HUMAN_CORRECTION_MAX;
|
|
302
|
+
|
|
303
|
+
return {
|
|
304
|
+
name: 'correction_rate',
|
|
305
|
+
aiScore: isTooClean ? 0.75 : 0,
|
|
306
|
+
humanScore: isHuman ? 0.85 : 0.2,
|
|
307
|
+
weight: Math.min(total / 30, 1),
|
|
308
|
+
detail: { correctionRate: +rate.toFixed(3), backspaces, total },
|
|
309
|
+
};
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
function _analyseTremor(mouseEvents) {
|
|
313
|
+
// Extract velocity time series and look for 8–12 Hz component
|
|
314
|
+
// Human hands exhibit involuntary micro-tremor in this band.
|
|
315
|
+
if (mouseEvents.length < 50) return { name: 'tremor', aiScore: 0, humanScore: 0.5, weight: 0.1, detail: {} };
|
|
316
|
+
|
|
317
|
+
const dt = (mouseEvents[mouseEvents.length - 1].t - mouseEvents[0].t) / (mouseEvents.length - 1);
|
|
318
|
+
const sampleHz = dt > 0 ? 1000 / dt : 0;
|
|
319
|
+
|
|
320
|
+
if (sampleHz < 30) {
|
|
321
|
+
// Not enough temporal resolution to detect 8–12 Hz
|
|
322
|
+
return { name: 'tremor', aiScore: 0, humanScore: 0.5, weight: 0.1, detail: { reason: 'low_sample_rate' } };
|
|
323
|
+
}
|
|
324
|
+
|
|
325
|
+
// Compute x-velocity series
|
|
326
|
+
const vx = [];
|
|
327
|
+
for (let i = 1; i < mouseEvents.length; i++) {
|
|
328
|
+
const dtt = mouseEvents[i].t - mouseEvents[i - 1].t;
|
|
329
|
+
vx.push(dtt > 0 ? (mouseEvents[i].x - mouseEvents[i - 1].x) / dtt : 0);
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
// Rough power estimation in the tremor band using DFT on a windowed segment
|
|
333
|
+
const n = Math.min(vx.length, 256);
|
|
334
|
+
const segment = vx.slice(0, n);
|
|
335
|
+
const tremorPower = _bandPower(segment, sampleHz, HUMAN_TREMOR_HZ_LO, HUMAN_TREMOR_HZ_HI);
|
|
336
|
+
const totalPower = _bandPower(segment, sampleHz, 0, sampleHz / 2);
|
|
337
|
+
const tremorRatio = totalPower > 0 ? tremorPower / totalPower : 0;
|
|
338
|
+
|
|
339
|
+
// Human: some tremor power present (ratio > 0.03)
|
|
340
|
+
// AI: tremor band is silent (ratio ≈ 0)
|
|
341
|
+
const hasTremor = tremorRatio > 0.03;
|
|
342
|
+
|
|
343
|
+
return {
|
|
344
|
+
name: 'tremor',
|
|
345
|
+
aiScore: hasTremor ? 0 : 0.55,
|
|
346
|
+
humanScore: hasTremor ? 0.75 : 0.1,
|
|
347
|
+
weight: 0.6,
|
|
348
|
+
detail: { tremorRatio: +tremorRatio.toFixed(4), sampleHz: +sampleHz.toFixed(1), hasTremor },
|
|
349
|
+
};
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
function _analyseGapDistribution(gaps) {
|
|
353
|
+
// Human inter-event gaps follow a heavy-tailed Pareto/lognormal distribution.
|
|
354
|
+
// AI agents produce gaps that cluster around fixed latencies (think-time = API call)
|
|
355
|
+
// making the distribution multimodal with low overall entropy.
|
|
356
|
+
const cv = _cv(gaps);
|
|
357
|
+
const skew = _skewness(gaps);
|
|
358
|
+
const entropy = _shannonEntropy(gaps, 50);
|
|
359
|
+
|
|
360
|
+
// Human: high CV (>0.8), right-skewed (skew > 1), decent entropy (>3 bits)
|
|
361
|
+
// AI: moderate CV, low skew, low entropy (gaps cluster at API latency values)
|
|
362
|
+
const humanScore = (
|
|
363
|
+
(cv > 0.8 ? 0.35 : cv > 0.5 ? 0.15 : 0) +
|
|
364
|
+
(skew > 1.0 ? 0.35 : skew > 0.5 ? 0.15 : 0) +
|
|
365
|
+
(entropy > 3.5 ? 0.30 : entropy > 2.5 ? 0.15 : 0)
|
|
366
|
+
);
|
|
367
|
+
|
|
368
|
+
const aiScore = (
|
|
369
|
+
(cv < 0.4 ? 0.40 : 0) +
|
|
370
|
+
(skew < 0.3 ? 0.30 : 0) +
|
|
371
|
+
(entropy < 2.0 ? 0.30 : 0)
|
|
372
|
+
);
|
|
373
|
+
|
|
374
|
+
return {
|
|
375
|
+
name: 'gap_distribution',
|
|
376
|
+
aiScore: Math.min(1, aiScore),
|
|
377
|
+
humanScore: Math.min(1, humanScore),
|
|
378
|
+
weight: Math.min(gaps.length / 60, 1),
|
|
379
|
+
detail: { cv: +cv.toFixed(3), skewness: +skew.toFixed(3), entropyBits: +entropy.toFixed(2) },
|
|
380
|
+
};
|
|
381
|
+
}
|
|
382
|
+
|
|
383
|
+
/* ─── Math helpers ───────────────────────────────────────────────────────── */
|
|
384
|
+
|
|
385
|
+
function _mean(arr) {
|
|
386
|
+
return arr.length ? arr.reduce((s, v) => s + v, 0) / arr.length : 0;
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
function _std(arr) {
|
|
390
|
+
const m = _mean(arr);
|
|
391
|
+
return Math.sqrt(arr.reduce((s, v) => s + (v - m) ** 2, 0) / arr.length);
|
|
392
|
+
}
|
|
393
|
+
|
|
394
|
+
function _cv(arr) {
|
|
395
|
+
const m = _mean(arr);
|
|
396
|
+
return m > 0 ? _std(arr) / m : 0;
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
function _skewness(arr) {
|
|
400
|
+
const m = _mean(arr);
|
|
401
|
+
const s = _std(arr);
|
|
402
|
+
if (s === 0) return 0;
|
|
403
|
+
const n = arr.length;
|
|
404
|
+
return arr.reduce((sum, v) => sum + ((v - m) / s) ** 3, 0) / n;
|
|
405
|
+
}
|
|
406
|
+
|
|
407
|
+
function _shannonEntropy(values, bins) {
|
|
408
|
+
if (!values.length) return 0;
|
|
409
|
+
const min = Math.min(...values);
|
|
410
|
+
const max = Math.max(...values);
|
|
411
|
+
if (max === min) return 0;
|
|
412
|
+
const width = (max - min) / bins;
|
|
413
|
+
const counts = new Array(bins).fill(0);
|
|
414
|
+
for (const v of values) {
|
|
415
|
+
const b = Math.min(bins - 1, Math.floor((v - min) / width));
|
|
416
|
+
counts[b]++;
|
|
417
|
+
}
|
|
418
|
+
const n = values.length;
|
|
419
|
+
return -counts.reduce((s, c) => {
|
|
420
|
+
if (c === 0) return s;
|
|
421
|
+
const p = c / n;
|
|
422
|
+
return s + p * Math.log2(p);
|
|
423
|
+
}, 0);
|
|
424
|
+
}
|
|
425
|
+
|
|
426
|
+
function _histogram(values, bins) {
|
|
427
|
+
if (!values.length) return { bins: [], min: 0, max: 0, binWidth: 0 };
|
|
428
|
+
const min = Math.min(...values);
|
|
429
|
+
const max = Math.max(...values) + 1e-9;
|
|
430
|
+
const binWidth = (max - min) / bins;
|
|
431
|
+
const counts = new Array(bins).fill(0);
|
|
432
|
+
for (const v of values) counts[Math.floor((v - min) / binWidth)]++;
|
|
433
|
+
return { bins: counts, min, max, binWidth };
|
|
434
|
+
}
|
|
435
|
+
|
|
436
|
+
function _histogramPowerInRange(hist, lo, hi) {
|
|
437
|
+
const total = hist.bins.reduce((s, c) => s + c, 0);
|
|
438
|
+
if (!total) return 0;
|
|
439
|
+
let power = 0;
|
|
440
|
+
for (let i = 0; i < hist.bins.length; i++) {
|
|
441
|
+
const center = hist.min + (i + 0.5) * hist.binWidth;
|
|
442
|
+
if (center >= lo && center <= hi) power += hist.bins[i];
|
|
443
|
+
}
|
|
444
|
+
return power / total;
|
|
445
|
+
}
|
|
446
|
+
|
|
447
|
+
// Discrete Fourier Transform power in a frequency band (O(n²) DFT — n ≤ 256)
|
|
448
|
+
function _bandPower(signal, sampleHz, fLo, fHi) {
|
|
449
|
+
const n = signal.length;
|
|
450
|
+
let power = 0;
|
|
451
|
+
for (let k = 0; k < n / 2; k++) {
|
|
452
|
+
const freq = (k * sampleHz) / n;
|
|
453
|
+
if (freq < fLo || freq > fHi) continue;
|
|
454
|
+
let re = 0, im = 0;
|
|
455
|
+
for (let t = 0; t < n; t++) {
|
|
456
|
+
const angle = (2 * Math.PI * k * t) / n;
|
|
457
|
+
re += signal[t] * Math.cos(angle);
|
|
458
|
+
im -= signal[t] * Math.sin(angle);
|
|
459
|
+
}
|
|
460
|
+
power += (re * re + im * im) / (n * n);
|
|
461
|
+
}
|
|
462
|
+
return power;
|
|
463
|
+
}
|
|
464
|
+
|
|
465
|
+
/**
|
|
466
|
+
* @typedef {object} LlmFingerprint
|
|
467
|
+
* @property {number} aiConf 0–1 AI agent confidence
|
|
468
|
+
* @property {number} humanConf 0–1 human confidence
|
|
469
|
+
* @property {object[]} checks per-signal breakdown
|
|
470
|
+
* @property {string} verdict
|
|
471
|
+
* @property {object} dataPoints
|
|
472
|
+
*/
|