aura-glass 2.0.20 → 2.0.22
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +3 -3
- package/dist/index.mjs +3 -3
- package/package.json +4 -3
- package/workers/biometricWorker.js +91 -0
- package/workers/biometricWorker.js.map +7 -0
- package/workers/eyeTrackingWorker.js +52 -0
- package/workers/eyeTrackingWorker.js.map +7 -0
- package/workers/predictiveWorker.js +136 -0
- package/workers/predictiveWorker.js.map +7 -0
- package/workers/biometricWorker.ts +0 -162
- package/workers/eyeTrackingWorker.ts +0 -89
- package/workers/predictiveWorker.ts +0 -218
package/dist/index.js
CHANGED
|
@@ -119946,11 +119946,11 @@ class ConsciousnessResourcePool {
|
|
|
119946
119946
|
if (typeof Worker !== "undefined") {
|
|
119947
119947
|
try {
|
|
119948
119948
|
// Create workers with error handling to prevent build failures
|
|
119949
|
-
const eyeWorker = new Worker(new URL("../workers/eyeTrackingWorker.
|
|
119949
|
+
const eyeWorker = new Worker(new URL("../workers/eyeTrackingWorker.js", (typeof document === 'undefined' ? require('u' + 'rl').pathToFileURL(__filename).href : (_documentCurrentScript && _documentCurrentScript.tagName.toUpperCase() === 'SCRIPT' && _documentCurrentScript.src || new URL('index.js', document.baseURI).href))));
|
|
119950
119950
|
this.eyeTrackingWorkers.push(eyeWorker);
|
|
119951
|
-
const bioWorker = new Worker(new URL("../workers/biometricWorker.
|
|
119951
|
+
const bioWorker = new Worker(new URL("../workers/biometricWorker.js", (typeof document === 'undefined' ? require('u' + 'rl').pathToFileURL(__filename).href : (_documentCurrentScript && _documentCurrentScript.tagName.toUpperCase() === 'SCRIPT' && _documentCurrentScript.src || new URL('index.js', document.baseURI).href))));
|
|
119952
119952
|
this.biometricProcessors.push(bioWorker);
|
|
119953
|
-
const predWorker = new Worker(new URL("../workers/predictiveWorker.
|
|
119953
|
+
const predWorker = new Worker(new URL("../workers/predictiveWorker.js", (typeof document === 'undefined' ? require('u' + 'rl').pathToFileURL(__filename).href : (_documentCurrentScript && _documentCurrentScript.tagName.toUpperCase() === 'SCRIPT' && _documentCurrentScript.src || new URL('index.js', document.baseURI).href))));
|
|
119954
119954
|
this.predictiveAnalyzers.push(predWorker);
|
|
119955
119955
|
} catch (error) {
|
|
119956
119956
|
// Workers not available in this environment (e.g., SSR, old browsers)
|
package/dist/index.mjs
CHANGED
|
@@ -119922,11 +119922,11 @@ class ConsciousnessResourcePool {
|
|
|
119922
119922
|
if (typeof Worker !== "undefined") {
|
|
119923
119923
|
try {
|
|
119924
119924
|
// Create workers with error handling to prevent build failures
|
|
119925
|
-
const eyeWorker = new Worker(new URL("../workers/eyeTrackingWorker.
|
|
119925
|
+
const eyeWorker = new Worker(new URL("../workers/eyeTrackingWorker.js", import.meta.url));
|
|
119926
119926
|
this.eyeTrackingWorkers.push(eyeWorker);
|
|
119927
|
-
const bioWorker = new Worker(new URL("../workers/biometricWorker.
|
|
119927
|
+
const bioWorker = new Worker(new URL("../workers/biometricWorker.js", import.meta.url));
|
|
119928
119928
|
this.biometricProcessors.push(bioWorker);
|
|
119929
|
-
const predWorker = new Worker(new URL("../workers/predictiveWorker.
|
|
119929
|
+
const predWorker = new Worker(new URL("../workers/predictiveWorker.js", import.meta.url));
|
|
119930
119930
|
this.predictiveAnalyzers.push(predWorker);
|
|
119931
119931
|
} catch (error) {
|
|
119932
119932
|
// Workers not available in this environment (e.g., SSR, old browsers)
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "aura-glass",
|
|
3
|
-
"version": "2.0.
|
|
3
|
+
"version": "2.0.22",
|
|
4
4
|
"description": "A comprehensive glassmorphism design system for React applications with 142+ production-ready components",
|
|
5
5
|
"main": "dist/index.js",
|
|
6
6
|
"module": "dist/index.mjs",
|
|
@@ -27,8 +27,8 @@
|
|
|
27
27
|
],
|
|
28
28
|
"scripts": {
|
|
29
29
|
"dev": "rollup -c --watch",
|
|
30
|
-
"
|
|
31
|
-
"build": "npm run
|
|
30
|
+
"build:workers": "node scripts/build-workers.js",
|
|
31
|
+
"build": "npm run build:workers && rollup -c",
|
|
32
32
|
"build:server": "tsc --project tsconfig.server.json",
|
|
33
33
|
"typecheck": "tsc --noEmit",
|
|
34
34
|
"lint": "eslint src --fix",
|
|
@@ -162,6 +162,7 @@
|
|
|
162
162
|
"@typescript-eslint/parser": "^6.0.0",
|
|
163
163
|
"@vitejs/plugin-react": "^5.0.2",
|
|
164
164
|
"bundlesize": "^0.18.0",
|
|
165
|
+
"esbuild": "^0.25.12",
|
|
165
166
|
"eslint": "^8.45.0",
|
|
166
167
|
"eslint-plugin-jsx-a11y": "^6.7.0",
|
|
167
168
|
"eslint-plugin-react": "^7.33.0",
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
(() => {
|
|
3
|
+
function processBiometricBatch(batch) {
|
|
4
|
+
if (batch.length === 0) {
|
|
5
|
+
return {
|
|
6
|
+
stressLevel: 0,
|
|
7
|
+
engagementLevel: 0,
|
|
8
|
+
emotionalState: "neutral",
|
|
9
|
+
recommendations: [],
|
|
10
|
+
timestamp: Date.now()
|
|
11
|
+
};
|
|
12
|
+
}
|
|
13
|
+
const avgHeartRate = batch.reduce((sum, d) => sum + (d.heartRate || 0), 0) / batch.length;
|
|
14
|
+
const avgHRV = batch.reduce((sum, d) => sum + (d.heartRateVariability || 0), 0) / batch.length;
|
|
15
|
+
const avgSkinConductance = batch.reduce((sum, d) => sum + (d.skinConductance || 0), 0) / batch.length;
|
|
16
|
+
let stressLevel = 0;
|
|
17
|
+
if (avgHeartRate > 80 && avgHRV < 50) {
|
|
18
|
+
stressLevel += 0.4;
|
|
19
|
+
}
|
|
20
|
+
if (avgSkinConductance > 0.5) {
|
|
21
|
+
stressLevel += 0.3;
|
|
22
|
+
}
|
|
23
|
+
const stressedExpressions = batch.filter(
|
|
24
|
+
(d) => d.facialExpression === "frown" || d.facialExpression === "tense"
|
|
25
|
+
).length;
|
|
26
|
+
stressLevel += stressedExpressions / batch.length * 0.3;
|
|
27
|
+
stressLevel = Math.min(1, stressLevel);
|
|
28
|
+
let engagementLevel = 0;
|
|
29
|
+
if (avgHeartRate > 70 && avgHeartRate < 90 && avgHRV > 50) {
|
|
30
|
+
engagementLevel += 0.5;
|
|
31
|
+
}
|
|
32
|
+
const focusedExpressions = batch.filter(
|
|
33
|
+
(d) => d.facialExpression === "focused" || d.facialExpression === "smile"
|
|
34
|
+
).length;
|
|
35
|
+
engagementLevel += focusedExpressions / batch.length * 0.5;
|
|
36
|
+
engagementLevel = Math.min(1, engagementLevel);
|
|
37
|
+
let emotionalState = "neutral";
|
|
38
|
+
if (stressLevel > 0.7) {
|
|
39
|
+
emotionalState = "stressed";
|
|
40
|
+
} else if (engagementLevel > 0.7) {
|
|
41
|
+
emotionalState = "focused";
|
|
42
|
+
} else if (avgHeartRate > 90) {
|
|
43
|
+
emotionalState = "excited";
|
|
44
|
+
} else if (stressLevel < 0.3 && avgHeartRate < 70) {
|
|
45
|
+
emotionalState = "calm";
|
|
46
|
+
}
|
|
47
|
+
const recommendations = [];
|
|
48
|
+
if (stressLevel > 0.6) {
|
|
49
|
+
recommendations.push("Consider taking a break");
|
|
50
|
+
recommendations.push("Try deep breathing exercises");
|
|
51
|
+
}
|
|
52
|
+
if (engagementLevel < 0.4) {
|
|
53
|
+
recommendations.push("Content may not be engaging enough");
|
|
54
|
+
recommendations.push("Consider adding interactive elements");
|
|
55
|
+
}
|
|
56
|
+
if (avgHeartRate < 60 && engagementLevel < 0.3) {
|
|
57
|
+
recommendations.push("User may be disengaged or fatigued");
|
|
58
|
+
}
|
|
59
|
+
return {
|
|
60
|
+
stressLevel,
|
|
61
|
+
engagementLevel,
|
|
62
|
+
emotionalState,
|
|
63
|
+
recommendations,
|
|
64
|
+
timestamp: Date.now()
|
|
65
|
+
};
|
|
66
|
+
}
|
|
67
|
+
function analyzeSingleDataPoint(dataPoint) {
|
|
68
|
+
return processBiometricBatch([dataPoint]);
|
|
69
|
+
}
|
|
70
|
+
self.addEventListener("message", (event) => {
|
|
71
|
+
const { command, data } = event.data;
|
|
72
|
+
switch (command) {
|
|
73
|
+
case "processBatch":
|
|
74
|
+
if (Array.isArray(data)) {
|
|
75
|
+
const analysis = processBiometricBatch(data);
|
|
76
|
+
self.postMessage(analysis);
|
|
77
|
+
}
|
|
78
|
+
break;
|
|
79
|
+
case "analyze":
|
|
80
|
+
if (data && !Array.isArray(data)) {
|
|
81
|
+
const analysis = analyzeSingleDataPoint(data);
|
|
82
|
+
self.postMessage(analysis);
|
|
83
|
+
}
|
|
84
|
+
break;
|
|
85
|
+
default:
|
|
86
|
+
self.postMessage({ error: "Unknown command" });
|
|
87
|
+
}
|
|
88
|
+
});
|
|
89
|
+
self.postMessage({ status: "ready" });
|
|
90
|
+
})();
|
|
91
|
+
//# sourceMappingURL=biometricWorker.js.map
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
{
|
|
2
|
+
"version": 3,
|
|
3
|
+
"sources": ["../src/workers/biometricWorker.ts"],
|
|
4
|
+
"sourcesContent": ["/**\n * Biometric Processing Web Worker\n * Handles batch processing of biometric data (heart rate, stress, engagement)\n */\n\ninterface BiometricMessage {\n command: \"processBatch\" | \"analyze\";\n data?: BiometricDataPoint[] | BiometricDataPoint;\n}\n\ninterface BiometricDataPoint {\n heartRate?: number;\n heartRateVariability?: number;\n skinConductance?: number;\n facialExpression?: string;\n timestamp: number;\n}\n\ninterface BiometricAnalysis {\n stressLevel: number; // 0-1\n engagementLevel: number; // 0-1\n emotionalState: \"calm\" | \"focused\" | \"stressed\" | \"excited\" | \"neutral\";\n recommendations: string[];\n timestamp: number;\n}\n\n/**\n * Analyze batch of biometric data\n */\nfunction processBiometricBatch(batch: BiometricDataPoint[]): BiometricAnalysis {\n if (batch.length === 0) {\n return {\n stressLevel: 0,\n engagementLevel: 0,\n emotionalState: \"neutral\",\n recommendations: [],\n timestamp: Date.now(),\n };\n }\n\n // Calculate average metrics\n const avgHeartRate =\n batch.reduce((sum, d) => sum + (d.heartRate || 0), 0) / batch.length;\n\n const avgHRV =\n batch.reduce((sum, d) => sum + (d.heartRateVariability || 0), 0) /\n batch.length;\n\n const avgSkinConductance =\n batch.reduce((sum, d) => sum + (d.skinConductance || 0), 0) / batch.length;\n\n // Analyze stress level (simplified algorithm)\n // In production, use ML model trained on biometric data\n let stressLevel = 0;\n\n // High heart rate + low HRV = stress\n if (avgHeartRate > 80 && avgHRV < 50) {\n stressLevel += 0.4;\n }\n\n // High skin conductance = stress/arousal\n if (avgSkinConductance > 0.5) {\n stressLevel += 0.3;\n }\n\n // Facial expression analysis\n const stressedExpressions = batch.filter(\n (d) => d.facialExpression === \"frown\" || d.facialExpression === \"tense\"\n ).length;\n stressLevel += (stressedExpressions / batch.length) * 0.3;\n\n stressLevel = Math.min(1, stressLevel);\n\n // Calculate engagement level\n // Higher heart rate + higher HRV = engagement\n let engagementLevel = 0;\n\n if (avgHeartRate > 70 && avgHeartRate < 90 && avgHRV > 50) {\n engagementLevel += 0.5;\n }\n\n const focusedExpressions = batch.filter(\n (d) => d.facialExpression === \"focused\" || d.facialExpression === \"smile\"\n ).length;\n engagementLevel += (focusedExpressions / batch.length) * 0.5;\n\n engagementLevel = Math.min(1, engagementLevel);\n\n // Determine emotional state\n let emotionalState: BiometricAnalysis[\"emotionalState\"] = \"neutral\";\n\n if (stressLevel > 0.7) {\n emotionalState = \"stressed\";\n } else if (engagementLevel > 0.7) {\n emotionalState = \"focused\";\n } else if (avgHeartRate > 90) {\n emotionalState = \"excited\";\n } else if (stressLevel < 0.3 && avgHeartRate < 70) {\n emotionalState = \"calm\";\n }\n\n // Generate recommendations\n const recommendations: string[] = [];\n\n if (stressLevel > 0.6) {\n recommendations.push(\"Consider taking a break\");\n recommendations.push(\"Try deep breathing exercises\");\n }\n\n if (engagementLevel < 0.4) {\n recommendations.push(\"Content may not be engaging enough\");\n recommendations.push(\"Consider adding interactive elements\");\n }\n\n if (avgHeartRate < 60 && engagementLevel < 0.3) {\n recommendations.push(\"User may be disengaged or fatigued\");\n }\n\n return {\n stressLevel,\n engagementLevel,\n emotionalState,\n recommendations,\n timestamp: Date.now(),\n };\n}\n\n/**\n * Analyze single biometric data point\n */\nfunction analyzeSingleDataPoint(\n dataPoint: BiometricDataPoint\n): BiometricAnalysis {\n return processBiometricBatch([dataPoint]);\n}\n\n// Listen for messages from main thread\nself.addEventListener(\"message\", (event: MessageEvent<BiometricMessage>) => {\n const { command, data } = event.data;\n\n switch (command) {\n case \"processBatch\":\n if (Array.isArray(data)) {\n const analysis = processBiometricBatch(data);\n self.postMessage(analysis);\n }\n break;\n\n case \"analyze\":\n if (data && !Array.isArray(data)) {\n const analysis = analyzeSingleDataPoint(data);\n self.postMessage(analysis);\n }\n break;\n\n default:\n self.postMessage({ error: \"Unknown command\" });\n }\n});\n\n// Notify main thread that worker is ready\nself.postMessage({ status: \"ready\" });\n"],
|
|
5
|
+
"mappings": ";;AA6BA,WAAS,sBAAsB,OAAgD;AAC7E,QAAI,MAAM,WAAW,GAAG;AACtB,aAAO;AAAA,QACL,aAAa;AAAA,QACb,iBAAiB;AAAA,QACjB,gBAAgB;AAAA,QAChB,iBAAiB,CAAC;AAAA,QAClB,WAAW,KAAK,IAAI;AAAA,MACtB;AAAA,IACF;AAGA,UAAM,eACJ,MAAM,OAAO,CAAC,KAAK,MAAM,OAAO,EAAE,aAAa,IAAI,CAAC,IAAI,MAAM;AAEhE,UAAM,SACJ,MAAM,OAAO,CAAC,KAAK,MAAM,OAAO,EAAE,wBAAwB,IAAI,CAAC,IAC/D,MAAM;AAER,UAAM,qBACJ,MAAM,OAAO,CAAC,KAAK,MAAM,OAAO,EAAE,mBAAmB,IAAI,CAAC,IAAI,MAAM;AAItE,QAAI,cAAc;AAGlB,QAAI,eAAe,MAAM,SAAS,IAAI;AACpC,qBAAe;AAAA,IACjB;AAGA,QAAI,qBAAqB,KAAK;AAC5B,qBAAe;AAAA,IACjB;AAGA,UAAM,sBAAsB,MAAM;AAAA,MAChC,CAAC,MAAM,EAAE,qBAAqB,WAAW,EAAE,qBAAqB;AAAA,IAClE,EAAE;AACF,mBAAgB,sBAAsB,MAAM,SAAU;AAEtD,kBAAc,KAAK,IAAI,GAAG,WAAW;AAIrC,QAAI,kBAAkB;AAEtB,QAAI,eAAe,MAAM,eAAe,MAAM,SAAS,IAAI;AACzD,yBAAmB;AAAA,IACrB;AAEA,UAAM,qBAAqB,MAAM;AAAA,MAC/B,CAAC,MAAM,EAAE,qBAAqB,aAAa,EAAE,qBAAqB;AAAA,IACpE,EAAE;AACF,uBAAoB,qBAAqB,MAAM,SAAU;AAEzD,sBAAkB,KAAK,IAAI,GAAG,eAAe;AAG7C,QAAI,iBAAsD;AAE1D,QAAI,cAAc,KAAK;AACrB,uBAAiB;AAAA,IACnB,WAAW,kBAAkB,KAAK;AAChC,uBAAiB;AAAA,IACnB,WAAW,eAAe,IAAI;AAC5B,uBAAiB;AAAA,IACnB,WAAW,cAAc,OAAO,eAAe,IAAI;AACjD,uBAAiB;AAAA,IACnB;AAGA,UAAM,kBAA4B,CAAC;AAEnC,QAAI,cAAc,KAAK;AACrB,sBAAgB,KAAK,yBAAyB;AAC9C,sBAAgB,KAAK,8BAA8B;AAAA,IACrD;AAEA,QAAI,kBAAkB,KAAK;AACzB,sBAAgB,KAAK,oCAAoC;AACzD,sBAAgB,KAAK,sCAAsC;AAAA,IAC7D;AAEA,QAAI,eAAe,MAAM,kBAAkB,KAAK;AAC9C,sBAAgB,KAAK,oCAAoC;AAAA,IAC3D;AAEA,WAAO;AAAA,MACL;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,WAAW,KAAK,IAAI;AAAA,IACtB;AAAA,EACF;AAKA,WAAS,uBACP,WACmB;AACnB,WAAO,sBAAsB,CAAC,SAAS,CAAC;AAAA,EAC1C;AAGA,OAAK,iBAAiB,WAAW,CAAC,UAA0C;AAC1E,UAAM,EAAE,SAAS,KAAK,IAAI,MAAM;AAEhC,YAAQ,SAAS;AAAA,MACf,KAAK;AACH,YAAI,MAAM,QAAQ,IAAI,GAAG;AACvB,gBAAM,WAAW,sBAAsB,IAAI;AAC3C,eAAK,YAAY,QAAQ;AAAA,QAC3B;AACA;AAAA,MAEF,KAAK;AACH,YAAI,QAAQ,CAAC,MAAM,QAAQ,IAAI,GAAG;AAChC,gBAAM,WAAW,uBAAuB,IAAI;AAC5C,eAAK,YAAY,QAAQ;AAAA,QAC3B;AACA;AAAA,MAEF;AACE,aAAK,YAAY,EAAE,OAAO,kBAAkB,CAAC;AAAA,IACjD;AAAA,EACF,CAAC;AAGD,OAAK,YAAY,EAAE,QAAQ,QAAQ,CAAC;",
|
|
6
|
+
"names": []
|
|
7
|
+
}
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
(() => {
|
|
3
|
+
let isTracking = false;
|
|
4
|
+
let processingInterval = null;
|
|
5
|
+
function processEyeTracking(frame) {
|
|
6
|
+
const timestamp = Date.now();
|
|
7
|
+
const baseX = 0.5 + Math.sin(timestamp / 1e3) * 0.2;
|
|
8
|
+
const baseY = 0.5 + Math.cos(timestamp / 1500) * 0.2;
|
|
9
|
+
const x = Math.max(0, Math.min(1, baseX + (Math.random() - 0.5) * 0.05));
|
|
10
|
+
const y = Math.max(0, Math.min(1, baseY + (Math.random() - 0.5) * 0.05));
|
|
11
|
+
return {
|
|
12
|
+
x,
|
|
13
|
+
y,
|
|
14
|
+
confidence: 0.85 + Math.random() * 0.15,
|
|
15
|
+
// 85-100% confidence
|
|
16
|
+
timestamp
|
|
17
|
+
};
|
|
18
|
+
}
|
|
19
|
+
self.addEventListener("message", (event) => {
|
|
20
|
+
const { command, data } = event.data;
|
|
21
|
+
switch (command) {
|
|
22
|
+
case "start":
|
|
23
|
+
isTracking = true;
|
|
24
|
+
processingInterval = setInterval(() => {
|
|
25
|
+
if (isTracking) {
|
|
26
|
+
const gazeData = processEyeTracking();
|
|
27
|
+
self.postMessage(gazeData);
|
|
28
|
+
}
|
|
29
|
+
}, 16.67);
|
|
30
|
+
self.postMessage({ status: "started" });
|
|
31
|
+
break;
|
|
32
|
+
case "stop":
|
|
33
|
+
isTracking = false;
|
|
34
|
+
if (processingInterval) {
|
|
35
|
+
clearInterval(processingInterval);
|
|
36
|
+
processingInterval = null;
|
|
37
|
+
}
|
|
38
|
+
self.postMessage({ status: "stopped" });
|
|
39
|
+
break;
|
|
40
|
+
case "process":
|
|
41
|
+
if (data?.videoFrame) {
|
|
42
|
+
const gazeData = processEyeTracking(data.videoFrame);
|
|
43
|
+
self.postMessage(gazeData);
|
|
44
|
+
}
|
|
45
|
+
break;
|
|
46
|
+
default:
|
|
47
|
+
self.postMessage({ error: "Unknown command" });
|
|
48
|
+
}
|
|
49
|
+
});
|
|
50
|
+
self.postMessage({ status: "ready" });
|
|
51
|
+
})();
|
|
52
|
+
//# sourceMappingURL=eyeTrackingWorker.js.map
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
{
|
|
2
|
+
"version": 3,
|
|
3
|
+
"sources": ["../src/workers/eyeTrackingWorker.ts"],
|
|
4
|
+
"sourcesContent": ["/**\n * Eye Tracking Web Worker\n * Handles computationally intensive eye tracking calculations off the main thread\n */\n\ninterface EyeTrackingMessage {\n command: \"start\" | \"stop\" | \"process\";\n data?: {\n videoFrame?: ImageData;\n timestamp?: number;\n };\n}\n\ninterface GazeData {\n x: number;\n y: number;\n confidence: number;\n timestamp: number;\n}\n\nlet isTracking = false;\nlet processingInterval: ReturnType<typeof setInterval> | null = null;\n\n// Simplified eye tracking algorithm (placeholder for actual ML model)\nfunction processEyeTracking(frame?: ImageData): GazeData {\n // In production, this would use TensorFlow.js or similar\n // For now, return mock data with realistic patterns\n const timestamp = Date.now();\n\n // Simulate natural eye movement patterns\n const baseX = 0.5 + Math.sin(timestamp / 1000) * 0.2;\n const baseY = 0.5 + Math.cos(timestamp / 1500) * 0.2;\n\n // Add small random variations (saccades)\n const x = Math.max(0, Math.min(1, baseX + (Math.random() - 0.5) * 0.05));\n const y = Math.max(0, Math.min(1, baseY + (Math.random() - 0.5) * 0.05));\n\n return {\n x,\n y,\n confidence: 0.85 + Math.random() * 0.15, // 85-100% confidence\n timestamp,\n };\n}\n\n// Listen for messages from main thread\nself.addEventListener(\"message\", (event: MessageEvent<EyeTrackingMessage>) => {\n const { command, data } = event.data;\n\n switch (command) {\n case \"start\":\n isTracking = true;\n\n // Process eye tracking at 60fps\n processingInterval = setInterval(() => {\n if (isTracking) {\n const gazeData = processEyeTracking();\n self.postMessage(gazeData);\n }\n }, 16.67); // ~60fps\n\n self.postMessage({ status: \"started\" });\n break;\n\n case \"stop\":\n isTracking = false;\n\n if (processingInterval) {\n clearInterval(processingInterval);\n processingInterval = null;\n }\n\n self.postMessage({ status: \"stopped\" });\n break;\n\n case \"process\":\n if (data?.videoFrame) {\n const gazeData = processEyeTracking(data.videoFrame);\n self.postMessage(gazeData);\n }\n break;\n\n default:\n self.postMessage({ error: \"Unknown command\" });\n }\n});\n\n// Notify main thread that worker is ready\nself.postMessage({ status: \"ready\" });\n"],
|
|
5
|
+
"mappings": ";;AAoBA,MAAI,aAAa;AACjB,MAAI,qBAA4D;AAGhE,WAAS,mBAAmB,OAA6B;AAGvD,UAAM,YAAY,KAAK,IAAI;AAG3B,UAAM,QAAQ,MAAM,KAAK,IAAI,YAAY,GAAI,IAAI;AACjD,UAAM,QAAQ,MAAM,KAAK,IAAI,YAAY,IAAI,IAAI;AAGjD,UAAM,IAAI,KAAK,IAAI,GAAG,KAAK,IAAI,GAAG,SAAS,KAAK,OAAO,IAAI,OAAO,IAAI,CAAC;AACvE,UAAM,IAAI,KAAK,IAAI,GAAG,KAAK,IAAI,GAAG,SAAS,KAAK,OAAO,IAAI,OAAO,IAAI,CAAC;AAEvE,WAAO;AAAA,MACL;AAAA,MACA;AAAA,MACA,YAAY,OAAO,KAAK,OAAO,IAAI;AAAA;AAAA,MACnC;AAAA,IACF;AAAA,EACF;AAGA,OAAK,iBAAiB,WAAW,CAAC,UAA4C;AAC5E,UAAM,EAAE,SAAS,KAAK,IAAI,MAAM;AAEhC,YAAQ,SAAS;AAAA,MACf,KAAK;AACH,qBAAa;AAGb,6BAAqB,YAAY,MAAM;AACrC,cAAI,YAAY;AACd,kBAAM,WAAW,mBAAmB;AACpC,iBAAK,YAAY,QAAQ;AAAA,UAC3B;AAAA,QACF,GAAG,KAAK;AAER,aAAK,YAAY,EAAE,QAAQ,UAAU,CAAC;AACtC;AAAA,MAEF,KAAK;AACH,qBAAa;AAEb,YAAI,oBAAoB;AACtB,wBAAc,kBAAkB;AAChC,+BAAqB;AAAA,QACvB;AAEA,aAAK,YAAY,EAAE,QAAQ,UAAU,CAAC;AACtC;AAAA,MAEF,KAAK;AACH,YAAI,MAAM,YAAY;AACpB,gBAAM,WAAW,mBAAmB,KAAK,UAAU;AACnD,eAAK,YAAY,QAAQ;AAAA,QAC3B;AACA;AAAA,MAEF;AACE,aAAK,YAAY,EAAE,OAAO,kBAAkB,CAAC;AAAA,IACjD;AAAA,EACF,CAAC;AAGD,OAAK,YAAY,EAAE,QAAQ,QAAQ,CAAC;",
|
|
6
|
+
"names": []
|
|
7
|
+
}
|
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
(() => {
|
|
3
|
+
function analyzePatternsAndPredict(patterns) {
|
|
4
|
+
if (patterns.length === 0) {
|
|
5
|
+
return {
|
|
6
|
+
predictions: [],
|
|
7
|
+
confidence: 0,
|
|
8
|
+
patterns: {
|
|
9
|
+
mostCommonAction: "none",
|
|
10
|
+
averageSessionDuration: 0,
|
|
11
|
+
engagementScore: 0
|
|
12
|
+
},
|
|
13
|
+
recommendations: []
|
|
14
|
+
};
|
|
15
|
+
}
|
|
16
|
+
const actionCounts = /* @__PURE__ */ new Map();
|
|
17
|
+
const actionSequences = [];
|
|
18
|
+
patterns.forEach((pattern) => {
|
|
19
|
+
const action = `${pattern.type}:${pattern.target || "unknown"}`;
|
|
20
|
+
actionCounts.set(action, (actionCounts.get(action) || 0) + 1);
|
|
21
|
+
actionSequences.push(action);
|
|
22
|
+
});
|
|
23
|
+
let mostCommonAction = "none";
|
|
24
|
+
let maxCount = 0;
|
|
25
|
+
actionCounts.forEach((count, action) => {
|
|
26
|
+
if (count > maxCount) {
|
|
27
|
+
maxCount = count;
|
|
28
|
+
mostCommonAction = action;
|
|
29
|
+
}
|
|
30
|
+
});
|
|
31
|
+
const sessionStart = patterns[0].timestamp;
|
|
32
|
+
const sessionEnd = patterns[patterns.length - 1].timestamp;
|
|
33
|
+
const averageSessionDuration = sessionEnd - sessionStart;
|
|
34
|
+
const interactionRate = patterns.length / (averageSessionDuration / 1e3) * 60;
|
|
35
|
+
const engagementScore = Math.min(1, interactionRate / 20);
|
|
36
|
+
const predictions = [];
|
|
37
|
+
const recentPatterns = patterns.slice(-5);
|
|
38
|
+
const recentSequence = recentPatterns.map(
|
|
39
|
+
(p) => `${p.type}:${p.target || "unknown"}`
|
|
40
|
+
);
|
|
41
|
+
const sequenceMap = /* @__PURE__ */ new Map();
|
|
42
|
+
for (let i = 0; i < actionSequences.length - 1; i++) {
|
|
43
|
+
const current = actionSequences[i];
|
|
44
|
+
const next = actionSequences[i + 1];
|
|
45
|
+
if (!sequenceMap.has(current)) {
|
|
46
|
+
sequenceMap.set(current, /* @__PURE__ */ new Map());
|
|
47
|
+
}
|
|
48
|
+
const nextMap = sequenceMap.get(current);
|
|
49
|
+
nextMap.set(next, (nextMap.get(next) || 0) + 1);
|
|
50
|
+
}
|
|
51
|
+
const lastAction = actionSequences[actionSequences.length - 1];
|
|
52
|
+
const nextActionMap = sequenceMap.get(lastAction);
|
|
53
|
+
if (nextActionMap) {
|
|
54
|
+
const totalNext = Array.from(nextActionMap.values()).reduce(
|
|
55
|
+
(sum, count) => sum + count,
|
|
56
|
+
0
|
|
57
|
+
);
|
|
58
|
+
nextActionMap.forEach((count, action) => {
|
|
59
|
+
const confidence = count / totalNext;
|
|
60
|
+
if (confidence > 0.2) {
|
|
61
|
+
predictions.push({
|
|
62
|
+
action: action.replace(":", " on "),
|
|
63
|
+
confidence,
|
|
64
|
+
suggestedOptimization: generateOptimization(action, confidence)
|
|
65
|
+
});
|
|
66
|
+
}
|
|
67
|
+
});
|
|
68
|
+
}
|
|
69
|
+
predictions.sort((a, b) => b.confidence - a.confidence);
|
|
70
|
+
const recommendations = [];
|
|
71
|
+
if (engagementScore < 0.3) {
|
|
72
|
+
recommendations.push(
|
|
73
|
+
"Low engagement detected. Consider adding interactive elements."
|
|
74
|
+
);
|
|
75
|
+
}
|
|
76
|
+
if (engagementScore > 0.8) {
|
|
77
|
+
recommendations.push(
|
|
78
|
+
"High engagement! Current UX patterns are working well."
|
|
79
|
+
);
|
|
80
|
+
}
|
|
81
|
+
const repetitiveActions = Array.from(actionCounts.entries()).filter(
|
|
82
|
+
([_, count]) => count > patterns.length * 0.3
|
|
83
|
+
);
|
|
84
|
+
if (repetitiveActions.length > 0) {
|
|
85
|
+
recommendations.push(
|
|
86
|
+
`Repetitive action detected: ${repetitiveActions[0][0]}. User may be stuck or confused.`
|
|
87
|
+
);
|
|
88
|
+
}
|
|
89
|
+
const scrollCount = patterns.filter((p) => p.type === "scroll").length;
|
|
90
|
+
if (scrollCount > patterns.length * 0.5) {
|
|
91
|
+
recommendations.push(
|
|
92
|
+
"High scroll rate. Consider improving content visibility or adding sticky navigation."
|
|
93
|
+
);
|
|
94
|
+
}
|
|
95
|
+
return {
|
|
96
|
+
predictions: predictions.slice(0, 3),
|
|
97
|
+
// Top 3 predictions
|
|
98
|
+
confidence: predictions.length > 0 ? predictions[0].confidence : 0,
|
|
99
|
+
patterns: {
|
|
100
|
+
mostCommonAction,
|
|
101
|
+
averageSessionDuration,
|
|
102
|
+
engagementScore
|
|
103
|
+
},
|
|
104
|
+
recommendations
|
|
105
|
+
};
|
|
106
|
+
}
|
|
107
|
+
function generateOptimization(action, confidence) {
|
|
108
|
+
if (confidence > 0.7) {
|
|
109
|
+
return `Preload resources for "${action}" (high confidence)`;
|
|
110
|
+
} else if (confidence > 0.5) {
|
|
111
|
+
return `Consider prefetching data for "${action}"`;
|
|
112
|
+
} else {
|
|
113
|
+
return `Monitor pattern for "${action}"`;
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
self.addEventListener("message", (event) => {
|
|
117
|
+
const { patterns } = event.data;
|
|
118
|
+
if (patterns && Array.isArray(patterns)) {
|
|
119
|
+
const result = analyzePatternsAndPredict(patterns);
|
|
120
|
+
self.postMessage(result);
|
|
121
|
+
} else {
|
|
122
|
+
self.postMessage({
|
|
123
|
+
predictions: [],
|
|
124
|
+
confidence: 0,
|
|
125
|
+
patterns: {
|
|
126
|
+
mostCommonAction: "none",
|
|
127
|
+
averageSessionDuration: 0,
|
|
128
|
+
engagementScore: 0
|
|
129
|
+
},
|
|
130
|
+
recommendations: ["No pattern data provided"]
|
|
131
|
+
});
|
|
132
|
+
}
|
|
133
|
+
});
|
|
134
|
+
self.postMessage({ status: "ready" });
|
|
135
|
+
})();
|
|
136
|
+
//# sourceMappingURL=predictiveWorker.js.map
|
|
@@ -0,0 +1,7 @@
|
|
|
1
|
+
{
|
|
2
|
+
"version": 3,
|
|
3
|
+
"sources": ["../src/workers/predictiveWorker.ts"],
|
|
4
|
+
"sourcesContent": ["/**\n * Predictive Analysis Web Worker\n * Analyzes user behavior patterns to predict future actions and optimize UX\n */\n\ninterface PredictiveMessage {\n command?: string;\n patterns?: UserInteractionPattern[];\n}\n\ninterface UserInteractionPattern {\n type: \"click\" | \"hover\" | \"scroll\" | \"focus\" | \"gesture\";\n target?: string;\n position?: { x: number; y: number };\n timestamp: number;\n duration?: number;\n}\n\ninterface Prediction {\n action: string;\n confidence: number;\n suggestedOptimization?: string;\n}\n\ninterface PredictiveAnalysisResult {\n predictions: Prediction[];\n confidence: number;\n patterns: {\n mostCommonAction: string;\n averageSessionDuration: number;\n engagementScore: number;\n };\n recommendations: string[];\n}\n\n/**\n * Analyze user interaction patterns and make predictions\n */\nfunction analyzePatternsAndPredict(\n patterns: UserInteractionPattern[]\n): PredictiveAnalysisResult {\n if (patterns.length === 0) {\n return {\n predictions: [],\n confidence: 0,\n patterns: {\n mostCommonAction: \"none\",\n averageSessionDuration: 0,\n engagementScore: 0,\n },\n recommendations: [],\n };\n }\n\n // Calculate pattern statistics\n const actionCounts = new Map<string, number>();\n const actionSequences: string[] = [];\n\n patterns.forEach((pattern) => {\n const action = `${pattern.type}:${pattern.target || \"unknown\"}`;\n actionCounts.set(action, (actionCounts.get(action) || 0) + 1);\n actionSequences.push(action);\n });\n\n // Find most common action\n let mostCommonAction = \"none\";\n let maxCount = 0;\n\n actionCounts.forEach((count, action) => {\n if (count > maxCount) {\n maxCount = count;\n mostCommonAction = action;\n }\n });\n\n // Calculate session metrics\n const sessionStart = patterns[0].timestamp;\n const sessionEnd = patterns[patterns.length - 1].timestamp;\n const averageSessionDuration = sessionEnd - sessionStart;\n\n // Calculate engagement score based on interaction density\n const interactionRate =\n (patterns.length / (averageSessionDuration / 1000)) * 60; // interactions per minute\n const engagementScore = Math.min(1, interactionRate / 20); // normalized to 0-1\n\n // Sequence analysis for predictions\n const predictions: Prediction[] = [];\n\n // Analyze last 5 interactions to predict next action\n const recentPatterns = patterns.slice(-5);\n const recentSequence = recentPatterns.map(\n (p) => `${p.type}:${p.target || \"unknown\"}`\n );\n\n // Simple Markov chain prediction\n const sequenceMap = new Map<string, Map<string, number>>();\n\n for (let i = 0; i < actionSequences.length - 1; i++) {\n const current = actionSequences[i];\n const next = actionSequences[i + 1];\n\n if (!sequenceMap.has(current)) {\n sequenceMap.set(current, new Map());\n }\n\n const nextMap = sequenceMap.get(current)!;\n nextMap.set(next, (nextMap.get(next) || 0) + 1);\n }\n\n // Predict next actions based on last action\n const lastAction = actionSequences[actionSequences.length - 1];\n const nextActionMap = sequenceMap.get(lastAction);\n\n if (nextActionMap) {\n const totalNext = Array.from(nextActionMap.values()).reduce(\n (sum, count) => sum + count,\n 0\n );\n\n nextActionMap.forEach((count, action) => {\n const confidence = count / totalNext;\n\n if (confidence > 0.2) {\n // Only include predictions with >20% confidence\n predictions.push({\n action: action.replace(\":\", \" on \"),\n confidence,\n suggestedOptimization: generateOptimization(action, confidence),\n });\n }\n });\n }\n\n // Sort predictions by confidence\n predictions.sort((a, b) => b.confidence - a.confidence);\n\n // Generate recommendations\n const recommendations: string[] = [];\n\n if (engagementScore < 0.3) {\n recommendations.push(\n \"Low engagement detected. Consider adding interactive elements.\"\n );\n }\n\n if (engagementScore > 0.8) {\n recommendations.push(\n \"High engagement! Current UX patterns are working well.\"\n );\n }\n\n // Check for repetitive actions (potential frustration)\n const repetitiveActions = Array.from(actionCounts.entries()).filter(\n ([_, count]) => count > patterns.length * 0.3\n );\n\n if (repetitiveActions.length > 0) {\n recommendations.push(\n `Repetitive action detected: ${repetitiveActions[0][0]}. User may be stuck or confused.`\n );\n }\n\n // Check for scroll-heavy behavior\n const scrollCount = patterns.filter((p) => p.type === \"scroll\").length;\n if (scrollCount > patterns.length * 0.5) {\n recommendations.push(\n \"High scroll rate. Consider improving content visibility or adding sticky navigation.\"\n );\n }\n\n return {\n predictions: predictions.slice(0, 3), // Top 3 predictions\n confidence: predictions.length > 0 ? predictions[0].confidence : 0,\n patterns: {\n mostCommonAction,\n averageSessionDuration,\n engagementScore,\n },\n recommendations,\n };\n}\n\n/**\n * Generate UX optimization suggestions based on predicted action\n */\nfunction generateOptimization(action: string, confidence: number): string {\n if (confidence > 0.7) {\n return `Preload resources for \"${action}\" (high confidence)`;\n } else if (confidence > 0.5) {\n return `Consider prefetching data for \"${action}\"`;\n } else {\n return `Monitor pattern for \"${action}\"`;\n }\n}\n\n// Listen for messages from main thread\nself.addEventListener(\"message\", (event: MessageEvent<PredictiveMessage>) => {\n const { patterns } = event.data;\n\n if (patterns && Array.isArray(patterns)) {\n const result = analyzePatternsAndPredict(patterns);\n self.postMessage(result);\n } else {\n self.postMessage({\n predictions: [],\n confidence: 0,\n patterns: {\n mostCommonAction: \"none\",\n averageSessionDuration: 0,\n engagementScore: 0,\n },\n recommendations: [\"No pattern data provided\"],\n });\n }\n});\n\n// Notify main thread that worker is ready\nself.postMessage({ status: \"ready\" });\n"],
|
|
5
|
+
"mappings": ";;AAsCA,WAAS,0BACP,UAC0B;AAC1B,QAAI,SAAS,WAAW,GAAG;AACzB,aAAO;AAAA,QACL,aAAa,CAAC;AAAA,QACd,YAAY;AAAA,QACZ,UAAU;AAAA,UACR,kBAAkB;AAAA,UAClB,wBAAwB;AAAA,UACxB,iBAAiB;AAAA,QACnB;AAAA,QACA,iBAAiB,CAAC;AAAA,MACpB;AAAA,IACF;AAGA,UAAM,eAAe,oBAAI,IAAoB;AAC7C,UAAM,kBAA4B,CAAC;AAEnC,aAAS,QAAQ,CAAC,YAAY;AAC5B,YAAM,SAAS,GAAG,QAAQ,IAAI,IAAI,QAAQ,UAAU,SAAS;AAC7D,mBAAa,IAAI,SAAS,aAAa,IAAI,MAAM,KAAK,KAAK,CAAC;AAC5D,sBAAgB,KAAK,MAAM;AAAA,IAC7B,CAAC;AAGD,QAAI,mBAAmB;AACvB,QAAI,WAAW;AAEf,iBAAa,QAAQ,CAAC,OAAO,WAAW;AACtC,UAAI,QAAQ,UAAU;AACpB,mBAAW;AACX,2BAAmB;AAAA,MACrB;AAAA,IACF,CAAC;AAGD,UAAM,eAAe,SAAS,CAAC,EAAE;AACjC,UAAM,aAAa,SAAS,SAAS,SAAS,CAAC,EAAE;AACjD,UAAM,yBAAyB,aAAa;AAG5C,UAAM,kBACH,SAAS,UAAU,yBAAyB,OAAS;AACxD,UAAM,kBAAkB,KAAK,IAAI,GAAG,kBAAkB,EAAE;AAGxD,UAAM,cAA4B,CAAC;AAGnC,UAAM,iBAAiB,SAAS,MAAM,EAAE;AACxC,UAAM,iBAAiB,eAAe;AAAA,MACpC,CAAC,MAAM,GAAG,EAAE,IAAI,IAAI,EAAE,UAAU,SAAS;AAAA,IAC3C;AAGA,UAAM,cAAc,oBAAI,IAAiC;AAEzD,aAAS,IAAI,GAAG,IAAI,gBAAgB,SAAS,GAAG,KAAK;AACnD,YAAM,UAAU,gBAAgB,CAAC;AACjC,YAAM,OAAO,gBAAgB,IAAI,CAAC;AAElC,UAAI,CAAC,YAAY,IAAI,OAAO,GAAG;AAC7B,oBAAY,IAAI,SAAS,oBAAI,IAAI,CAAC;AAAA,MACpC;AAEA,YAAM,UAAU,YAAY,IAAI,OAAO;AACvC,cAAQ,IAAI,OAAO,QAAQ,IAAI,IAAI,KAAK,KAAK,CAAC;AAAA,IAChD;AAGA,UAAM,aAAa,gBAAgB,gBAAgB,SAAS,CAAC;AAC7D,UAAM,gBAAgB,YAAY,IAAI,UAAU;AAEhD,QAAI,eAAe;AACjB,YAAM,YAAY,MAAM,KAAK,cAAc,OAAO,CAAC,EAAE;AAAA,QACnD,CAAC,KAAK,UAAU,MAAM;AAAA,QACtB;AAAA,MACF;AAEA,oBAAc,QAAQ,CAAC,OAAO,WAAW;AACvC,cAAM,aAAa,QAAQ;AAE3B,YAAI,aAAa,KAAK;AAEpB,sBAAY,KAAK;AAAA,YACf,QAAQ,OAAO,QAAQ,KAAK,MAAM;AAAA,YAClC;AAAA,YACA,uBAAuB,qBAAqB,QAAQ,UAAU;AAAA,UAChE,CAAC;AAAA,QACH;AAAA,MACF,CAAC;AAAA,IACH;AAGA,gBAAY,KAAK,CAAC,GAAG,MAAM,EAAE,aAAa,EAAE,UAAU;AAGtD,UAAM,kBAA4B,CAAC;AAEnC,QAAI,kBAAkB,KAAK;AACzB,sBAAgB;AAAA,QACd;AAAA,MACF;AAAA,IACF;AAEA,QAAI,kBAAkB,KAAK;AACzB,sBAAgB;AAAA,QACd;AAAA,MACF;AAAA,IACF;AAGA,UAAM,oBAAoB,MAAM,KAAK,aAAa,QAAQ,CAAC,EAAE;AAAA,MAC3D,CAAC,CAAC,GAAG,KAAK,MAAM,QAAQ,SAAS,SAAS;AAAA,IAC5C;AAEA,QAAI,kBAAkB,SAAS,GAAG;AAChC,sBAAgB;AAAA,QACd,+BAA+B,kBAAkB,CAAC,EAAE,CAAC,CAAC;AAAA,MACxD;AAAA,IACF;AAGA,UAAM,cAAc,SAAS,OAAO,CAAC,MAAM,EAAE,SAAS,QAAQ,EAAE;AAChE,QAAI,cAAc,SAAS,SAAS,KAAK;AACvC,sBAAgB;AAAA,QACd;AAAA,MACF;AAAA,IACF;AAEA,WAAO;AAAA,MACL,aAAa,YAAY,MAAM,GAAG,CAAC;AAAA;AAAA,MACnC,YAAY,YAAY,SAAS,IAAI,YAAY,CAAC,EAAE,aAAa;AAAA,MACjE,UAAU;AAAA,QACR;AAAA,QACA;AAAA,QACA;AAAA,MACF;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAKA,WAAS,qBAAqB,QAAgB,YAA4B;AACxE,QAAI,aAAa,KAAK;AACpB,aAAO,0BAA0B,MAAM;AAAA,IACzC,WAAW,aAAa,KAAK;AAC3B,aAAO,kCAAkC,MAAM;AAAA,IACjD,OAAO;AACL,aAAO,wBAAwB,MAAM;AAAA,IACvC;AAAA,EACF;AAGA,OAAK,iBAAiB,WAAW,CAAC,UAA2C;AAC3E,UAAM,EAAE,SAAS,IAAI,MAAM;AAE3B,QAAI,YAAY,MAAM,QAAQ,QAAQ,GAAG;AACvC,YAAM,SAAS,0BAA0B,QAAQ;AACjD,WAAK,YAAY,MAAM;AAAA,IACzB,OAAO;AACL,WAAK,YAAY;AAAA,QACf,aAAa,CAAC;AAAA,QACd,YAAY;AAAA,QACZ,UAAU;AAAA,UACR,kBAAkB;AAAA,UAClB,wBAAwB;AAAA,UACxB,iBAAiB;AAAA,QACnB;AAAA,QACA,iBAAiB,CAAC,0BAA0B;AAAA,MAC9C,CAAC;AAAA,IACH;AAAA,EACF,CAAC;AAGD,OAAK,YAAY,EAAE,QAAQ,QAAQ,CAAC;",
|
|
6
|
+
"names": []
|
|
7
|
+
}
|
|
@@ -1,162 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Biometric Processing Web Worker
|
|
3
|
-
* Handles batch processing of biometric data (heart rate, stress, engagement)
|
|
4
|
-
*/
|
|
5
|
-
|
|
6
|
-
interface BiometricMessage {
|
|
7
|
-
command: "processBatch" | "analyze";
|
|
8
|
-
data?: BiometricDataPoint[] | BiometricDataPoint;
|
|
9
|
-
}
|
|
10
|
-
|
|
11
|
-
interface BiometricDataPoint {
|
|
12
|
-
heartRate?: number;
|
|
13
|
-
heartRateVariability?: number;
|
|
14
|
-
skinConductance?: number;
|
|
15
|
-
facialExpression?: string;
|
|
16
|
-
timestamp: number;
|
|
17
|
-
}
|
|
18
|
-
|
|
19
|
-
interface BiometricAnalysis {
|
|
20
|
-
stressLevel: number; // 0-1
|
|
21
|
-
engagementLevel: number; // 0-1
|
|
22
|
-
emotionalState: "calm" | "focused" | "stressed" | "excited" | "neutral";
|
|
23
|
-
recommendations: string[];
|
|
24
|
-
timestamp: number;
|
|
25
|
-
}
|
|
26
|
-
|
|
27
|
-
/**
|
|
28
|
-
* Analyze batch of biometric data
|
|
29
|
-
*/
|
|
30
|
-
function processBiometricBatch(batch: BiometricDataPoint[]): BiometricAnalysis {
|
|
31
|
-
if (batch.length === 0) {
|
|
32
|
-
return {
|
|
33
|
-
stressLevel: 0,
|
|
34
|
-
engagementLevel: 0,
|
|
35
|
-
emotionalState: "neutral",
|
|
36
|
-
recommendations: [],
|
|
37
|
-
timestamp: Date.now(),
|
|
38
|
-
};
|
|
39
|
-
}
|
|
40
|
-
|
|
41
|
-
// Calculate average metrics
|
|
42
|
-
const avgHeartRate =
|
|
43
|
-
batch.reduce((sum, d) => sum + (d.heartRate || 0), 0) / batch.length;
|
|
44
|
-
|
|
45
|
-
const avgHRV =
|
|
46
|
-
batch.reduce((sum, d) => sum + (d.heartRateVariability || 0), 0) /
|
|
47
|
-
batch.length;
|
|
48
|
-
|
|
49
|
-
const avgSkinConductance =
|
|
50
|
-
batch.reduce((sum, d) => sum + (d.skinConductance || 0), 0) / batch.length;
|
|
51
|
-
|
|
52
|
-
// Analyze stress level (simplified algorithm)
|
|
53
|
-
// In production, use ML model trained on biometric data
|
|
54
|
-
let stressLevel = 0;
|
|
55
|
-
|
|
56
|
-
// High heart rate + low HRV = stress
|
|
57
|
-
if (avgHeartRate > 80 && avgHRV < 50) {
|
|
58
|
-
stressLevel += 0.4;
|
|
59
|
-
}
|
|
60
|
-
|
|
61
|
-
// High skin conductance = stress/arousal
|
|
62
|
-
if (avgSkinConductance > 0.5) {
|
|
63
|
-
stressLevel += 0.3;
|
|
64
|
-
}
|
|
65
|
-
|
|
66
|
-
// Facial expression analysis
|
|
67
|
-
const stressedExpressions = batch.filter(
|
|
68
|
-
(d) => d.facialExpression === "frown" || d.facialExpression === "tense"
|
|
69
|
-
).length;
|
|
70
|
-
stressLevel += (stressedExpressions / batch.length) * 0.3;
|
|
71
|
-
|
|
72
|
-
stressLevel = Math.min(1, stressLevel);
|
|
73
|
-
|
|
74
|
-
// Calculate engagement level
|
|
75
|
-
// Higher heart rate + higher HRV = engagement
|
|
76
|
-
let engagementLevel = 0;
|
|
77
|
-
|
|
78
|
-
if (avgHeartRate > 70 && avgHeartRate < 90 && avgHRV > 50) {
|
|
79
|
-
engagementLevel += 0.5;
|
|
80
|
-
}
|
|
81
|
-
|
|
82
|
-
const focusedExpressions = batch.filter(
|
|
83
|
-
(d) => d.facialExpression === "focused" || d.facialExpression === "smile"
|
|
84
|
-
).length;
|
|
85
|
-
engagementLevel += (focusedExpressions / batch.length) * 0.5;
|
|
86
|
-
|
|
87
|
-
engagementLevel = Math.min(1, engagementLevel);
|
|
88
|
-
|
|
89
|
-
// Determine emotional state
|
|
90
|
-
let emotionalState: BiometricAnalysis["emotionalState"] = "neutral";
|
|
91
|
-
|
|
92
|
-
if (stressLevel > 0.7) {
|
|
93
|
-
emotionalState = "stressed";
|
|
94
|
-
} else if (engagementLevel > 0.7) {
|
|
95
|
-
emotionalState = "focused";
|
|
96
|
-
} else if (avgHeartRate > 90) {
|
|
97
|
-
emotionalState = "excited";
|
|
98
|
-
} else if (stressLevel < 0.3 && avgHeartRate < 70) {
|
|
99
|
-
emotionalState = "calm";
|
|
100
|
-
}
|
|
101
|
-
|
|
102
|
-
// Generate recommendations
|
|
103
|
-
const recommendations: string[] = [];
|
|
104
|
-
|
|
105
|
-
if (stressLevel > 0.6) {
|
|
106
|
-
recommendations.push("Consider taking a break");
|
|
107
|
-
recommendations.push("Try deep breathing exercises");
|
|
108
|
-
}
|
|
109
|
-
|
|
110
|
-
if (engagementLevel < 0.4) {
|
|
111
|
-
recommendations.push("Content may not be engaging enough");
|
|
112
|
-
recommendations.push("Consider adding interactive elements");
|
|
113
|
-
}
|
|
114
|
-
|
|
115
|
-
if (avgHeartRate < 60 && engagementLevel < 0.3) {
|
|
116
|
-
recommendations.push("User may be disengaged or fatigued");
|
|
117
|
-
}
|
|
118
|
-
|
|
119
|
-
return {
|
|
120
|
-
stressLevel,
|
|
121
|
-
engagementLevel,
|
|
122
|
-
emotionalState,
|
|
123
|
-
recommendations,
|
|
124
|
-
timestamp: Date.now(),
|
|
125
|
-
};
|
|
126
|
-
}
|
|
127
|
-
|
|
128
|
-
/**
|
|
129
|
-
* Analyze single biometric data point
|
|
130
|
-
*/
|
|
131
|
-
function analyzeSingleDataPoint(
|
|
132
|
-
dataPoint: BiometricDataPoint
|
|
133
|
-
): BiometricAnalysis {
|
|
134
|
-
return processBiometricBatch([dataPoint]);
|
|
135
|
-
}
|
|
136
|
-
|
|
137
|
-
// Listen for messages from main thread
|
|
138
|
-
self.addEventListener("message", (event: MessageEvent<BiometricMessage>) => {
|
|
139
|
-
const { command, data } = event.data;
|
|
140
|
-
|
|
141
|
-
switch (command) {
|
|
142
|
-
case "processBatch":
|
|
143
|
-
if (Array.isArray(data)) {
|
|
144
|
-
const analysis = processBiometricBatch(data);
|
|
145
|
-
self.postMessage(analysis);
|
|
146
|
-
}
|
|
147
|
-
break;
|
|
148
|
-
|
|
149
|
-
case "analyze":
|
|
150
|
-
if (data && !Array.isArray(data)) {
|
|
151
|
-
const analysis = analyzeSingleDataPoint(data);
|
|
152
|
-
self.postMessage(analysis);
|
|
153
|
-
}
|
|
154
|
-
break;
|
|
155
|
-
|
|
156
|
-
default:
|
|
157
|
-
self.postMessage({ error: "Unknown command" });
|
|
158
|
-
}
|
|
159
|
-
});
|
|
160
|
-
|
|
161
|
-
// Notify main thread that worker is ready
|
|
162
|
-
self.postMessage({ status: "ready" });
|
|
@@ -1,89 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Eye Tracking Web Worker
|
|
3
|
-
* Handles computationally intensive eye tracking calculations off the main thread
|
|
4
|
-
*/
|
|
5
|
-
|
|
6
|
-
interface EyeTrackingMessage {
|
|
7
|
-
command: "start" | "stop" | "process";
|
|
8
|
-
data?: {
|
|
9
|
-
videoFrame?: ImageData;
|
|
10
|
-
timestamp?: number;
|
|
11
|
-
};
|
|
12
|
-
}
|
|
13
|
-
|
|
14
|
-
interface GazeData {
|
|
15
|
-
x: number;
|
|
16
|
-
y: number;
|
|
17
|
-
confidence: number;
|
|
18
|
-
timestamp: number;
|
|
19
|
-
}
|
|
20
|
-
|
|
21
|
-
let isTracking = false;
|
|
22
|
-
let processingInterval: ReturnType<typeof setInterval> | null = null;
|
|
23
|
-
|
|
24
|
-
// Simplified eye tracking algorithm (placeholder for actual ML model)
|
|
25
|
-
function processEyeTracking(frame?: ImageData): GazeData {
|
|
26
|
-
// In production, this would use TensorFlow.js or similar
|
|
27
|
-
// For now, return mock data with realistic patterns
|
|
28
|
-
const timestamp = Date.now();
|
|
29
|
-
|
|
30
|
-
// Simulate natural eye movement patterns
|
|
31
|
-
const baseX = 0.5 + Math.sin(timestamp / 1000) * 0.2;
|
|
32
|
-
const baseY = 0.5 + Math.cos(timestamp / 1500) * 0.2;
|
|
33
|
-
|
|
34
|
-
// Add small random variations (saccades)
|
|
35
|
-
const x = Math.max(0, Math.min(1, baseX + (Math.random() - 0.5) * 0.05));
|
|
36
|
-
const y = Math.max(0, Math.min(1, baseY + (Math.random() - 0.5) * 0.05));
|
|
37
|
-
|
|
38
|
-
return {
|
|
39
|
-
x,
|
|
40
|
-
y,
|
|
41
|
-
confidence: 0.85 + Math.random() * 0.15, // 85-100% confidence
|
|
42
|
-
timestamp,
|
|
43
|
-
};
|
|
44
|
-
}
|
|
45
|
-
|
|
46
|
-
// Listen for messages from main thread
|
|
47
|
-
self.addEventListener("message", (event: MessageEvent<EyeTrackingMessage>) => {
|
|
48
|
-
const { command, data } = event.data;
|
|
49
|
-
|
|
50
|
-
switch (command) {
|
|
51
|
-
case "start":
|
|
52
|
-
isTracking = true;
|
|
53
|
-
|
|
54
|
-
// Process eye tracking at 60fps
|
|
55
|
-
processingInterval = setInterval(() => {
|
|
56
|
-
if (isTracking) {
|
|
57
|
-
const gazeData = processEyeTracking();
|
|
58
|
-
self.postMessage(gazeData);
|
|
59
|
-
}
|
|
60
|
-
}, 16.67); // ~60fps
|
|
61
|
-
|
|
62
|
-
self.postMessage({ status: "started" });
|
|
63
|
-
break;
|
|
64
|
-
|
|
65
|
-
case "stop":
|
|
66
|
-
isTracking = false;
|
|
67
|
-
|
|
68
|
-
if (processingInterval) {
|
|
69
|
-
clearInterval(processingInterval);
|
|
70
|
-
processingInterval = null;
|
|
71
|
-
}
|
|
72
|
-
|
|
73
|
-
self.postMessage({ status: "stopped" });
|
|
74
|
-
break;
|
|
75
|
-
|
|
76
|
-
case "process":
|
|
77
|
-
if (data?.videoFrame) {
|
|
78
|
-
const gazeData = processEyeTracking(data.videoFrame);
|
|
79
|
-
self.postMessage(gazeData);
|
|
80
|
-
}
|
|
81
|
-
break;
|
|
82
|
-
|
|
83
|
-
default:
|
|
84
|
-
self.postMessage({ error: "Unknown command" });
|
|
85
|
-
}
|
|
86
|
-
});
|
|
87
|
-
|
|
88
|
-
// Notify main thread that worker is ready
|
|
89
|
-
self.postMessage({ status: "ready" });
|
|
@@ -1,218 +0,0 @@
|
|
|
1
|
-
/**
|
|
2
|
-
* Predictive Analysis Web Worker
|
|
3
|
-
* Analyzes user behavior patterns to predict future actions and optimize UX
|
|
4
|
-
*/
|
|
5
|
-
|
|
6
|
-
interface PredictiveMessage {
|
|
7
|
-
command?: string;
|
|
8
|
-
patterns?: UserInteractionPattern[];
|
|
9
|
-
}
|
|
10
|
-
|
|
11
|
-
interface UserInteractionPattern {
|
|
12
|
-
type: "click" | "hover" | "scroll" | "focus" | "gesture";
|
|
13
|
-
target?: string;
|
|
14
|
-
position?: { x: number; y: number };
|
|
15
|
-
timestamp: number;
|
|
16
|
-
duration?: number;
|
|
17
|
-
}
|
|
18
|
-
|
|
19
|
-
interface Prediction {
|
|
20
|
-
action: string;
|
|
21
|
-
confidence: number;
|
|
22
|
-
suggestedOptimization?: string;
|
|
23
|
-
}
|
|
24
|
-
|
|
25
|
-
interface PredictiveAnalysisResult {
|
|
26
|
-
predictions: Prediction[];
|
|
27
|
-
confidence: number;
|
|
28
|
-
patterns: {
|
|
29
|
-
mostCommonAction: string;
|
|
30
|
-
averageSessionDuration: number;
|
|
31
|
-
engagementScore: number;
|
|
32
|
-
};
|
|
33
|
-
recommendations: string[];
|
|
34
|
-
}
|
|
35
|
-
|
|
36
|
-
/**
|
|
37
|
-
* Analyze user interaction patterns and make predictions
|
|
38
|
-
*/
|
|
39
|
-
function analyzePatternsAndPredict(
|
|
40
|
-
patterns: UserInteractionPattern[]
|
|
41
|
-
): PredictiveAnalysisResult {
|
|
42
|
-
if (patterns.length === 0) {
|
|
43
|
-
return {
|
|
44
|
-
predictions: [],
|
|
45
|
-
confidence: 0,
|
|
46
|
-
patterns: {
|
|
47
|
-
mostCommonAction: "none",
|
|
48
|
-
averageSessionDuration: 0,
|
|
49
|
-
engagementScore: 0,
|
|
50
|
-
},
|
|
51
|
-
recommendations: [],
|
|
52
|
-
};
|
|
53
|
-
}
|
|
54
|
-
|
|
55
|
-
// Calculate pattern statistics
|
|
56
|
-
const actionCounts = new Map<string, number>();
|
|
57
|
-
const actionSequences: string[] = [];
|
|
58
|
-
|
|
59
|
-
patterns.forEach((pattern) => {
|
|
60
|
-
const action = `${pattern.type}:${pattern.target || "unknown"}`;
|
|
61
|
-
actionCounts.set(action, (actionCounts.get(action) || 0) + 1);
|
|
62
|
-
actionSequences.push(action);
|
|
63
|
-
});
|
|
64
|
-
|
|
65
|
-
// Find most common action
|
|
66
|
-
let mostCommonAction = "none";
|
|
67
|
-
let maxCount = 0;
|
|
68
|
-
|
|
69
|
-
actionCounts.forEach((count, action) => {
|
|
70
|
-
if (count > maxCount) {
|
|
71
|
-
maxCount = count;
|
|
72
|
-
mostCommonAction = action;
|
|
73
|
-
}
|
|
74
|
-
});
|
|
75
|
-
|
|
76
|
-
// Calculate session metrics
|
|
77
|
-
const sessionStart = patterns[0].timestamp;
|
|
78
|
-
const sessionEnd = patterns[patterns.length - 1].timestamp;
|
|
79
|
-
const averageSessionDuration = sessionEnd - sessionStart;
|
|
80
|
-
|
|
81
|
-
// Calculate engagement score based on interaction density
|
|
82
|
-
const interactionRate =
|
|
83
|
-
(patterns.length / (averageSessionDuration / 1000)) * 60; // interactions per minute
|
|
84
|
-
const engagementScore = Math.min(1, interactionRate / 20); // normalized to 0-1
|
|
85
|
-
|
|
86
|
-
// Sequence analysis for predictions
|
|
87
|
-
const predictions: Prediction[] = [];
|
|
88
|
-
|
|
89
|
-
// Analyze last 5 interactions to predict next action
|
|
90
|
-
const recentPatterns = patterns.slice(-5);
|
|
91
|
-
const recentSequence = recentPatterns.map(
|
|
92
|
-
(p) => `${p.type}:${p.target || "unknown"}`
|
|
93
|
-
);
|
|
94
|
-
|
|
95
|
-
// Simple Markov chain prediction
|
|
96
|
-
const sequenceMap = new Map<string, Map<string, number>>();
|
|
97
|
-
|
|
98
|
-
for (let i = 0; i < actionSequences.length - 1; i++) {
|
|
99
|
-
const current = actionSequences[i];
|
|
100
|
-
const next = actionSequences[i + 1];
|
|
101
|
-
|
|
102
|
-
if (!sequenceMap.has(current)) {
|
|
103
|
-
sequenceMap.set(current, new Map());
|
|
104
|
-
}
|
|
105
|
-
|
|
106
|
-
const nextMap = sequenceMap.get(current)!;
|
|
107
|
-
nextMap.set(next, (nextMap.get(next) || 0) + 1);
|
|
108
|
-
}
|
|
109
|
-
|
|
110
|
-
// Predict next actions based on last action
|
|
111
|
-
const lastAction = actionSequences[actionSequences.length - 1];
|
|
112
|
-
const nextActionMap = sequenceMap.get(lastAction);
|
|
113
|
-
|
|
114
|
-
if (nextActionMap) {
|
|
115
|
-
const totalNext = Array.from(nextActionMap.values()).reduce(
|
|
116
|
-
(sum, count) => sum + count,
|
|
117
|
-
0
|
|
118
|
-
);
|
|
119
|
-
|
|
120
|
-
nextActionMap.forEach((count, action) => {
|
|
121
|
-
const confidence = count / totalNext;
|
|
122
|
-
|
|
123
|
-
if (confidence > 0.2) {
|
|
124
|
-
// Only include predictions with >20% confidence
|
|
125
|
-
predictions.push({
|
|
126
|
-
action: action.replace(":", " on "),
|
|
127
|
-
confidence,
|
|
128
|
-
suggestedOptimization: generateOptimization(action, confidence),
|
|
129
|
-
});
|
|
130
|
-
}
|
|
131
|
-
});
|
|
132
|
-
}
|
|
133
|
-
|
|
134
|
-
// Sort predictions by confidence
|
|
135
|
-
predictions.sort((a, b) => b.confidence - a.confidence);
|
|
136
|
-
|
|
137
|
-
// Generate recommendations
|
|
138
|
-
const recommendations: string[] = [];
|
|
139
|
-
|
|
140
|
-
if (engagementScore < 0.3) {
|
|
141
|
-
recommendations.push(
|
|
142
|
-
"Low engagement detected. Consider adding interactive elements."
|
|
143
|
-
);
|
|
144
|
-
}
|
|
145
|
-
|
|
146
|
-
if (engagementScore > 0.8) {
|
|
147
|
-
recommendations.push(
|
|
148
|
-
"High engagement! Current UX patterns are working well."
|
|
149
|
-
);
|
|
150
|
-
}
|
|
151
|
-
|
|
152
|
-
// Check for repetitive actions (potential frustration)
|
|
153
|
-
const repetitiveActions = Array.from(actionCounts.entries()).filter(
|
|
154
|
-
([_, count]) => count > patterns.length * 0.3
|
|
155
|
-
);
|
|
156
|
-
|
|
157
|
-
if (repetitiveActions.length > 0) {
|
|
158
|
-
recommendations.push(
|
|
159
|
-
`Repetitive action detected: ${repetitiveActions[0][0]}. User may be stuck or confused.`
|
|
160
|
-
);
|
|
161
|
-
}
|
|
162
|
-
|
|
163
|
-
// Check for scroll-heavy behavior
|
|
164
|
-
const scrollCount = patterns.filter((p) => p.type === "scroll").length;
|
|
165
|
-
if (scrollCount > patterns.length * 0.5) {
|
|
166
|
-
recommendations.push(
|
|
167
|
-
"High scroll rate. Consider improving content visibility or adding sticky navigation."
|
|
168
|
-
);
|
|
169
|
-
}
|
|
170
|
-
|
|
171
|
-
return {
|
|
172
|
-
predictions: predictions.slice(0, 3), // Top 3 predictions
|
|
173
|
-
confidence: predictions.length > 0 ? predictions[0].confidence : 0,
|
|
174
|
-
patterns: {
|
|
175
|
-
mostCommonAction,
|
|
176
|
-
averageSessionDuration,
|
|
177
|
-
engagementScore,
|
|
178
|
-
},
|
|
179
|
-
recommendations,
|
|
180
|
-
};
|
|
181
|
-
}
|
|
182
|
-
|
|
183
|
-
/**
|
|
184
|
-
* Generate UX optimization suggestions based on predicted action
|
|
185
|
-
*/
|
|
186
|
-
function generateOptimization(action: string, confidence: number): string {
|
|
187
|
-
if (confidence > 0.7) {
|
|
188
|
-
return `Preload resources for "${action}" (high confidence)`;
|
|
189
|
-
} else if (confidence > 0.5) {
|
|
190
|
-
return `Consider prefetching data for "${action}"`;
|
|
191
|
-
} else {
|
|
192
|
-
return `Monitor pattern for "${action}"`;
|
|
193
|
-
}
|
|
194
|
-
}
|
|
195
|
-
|
|
196
|
-
// Listen for messages from main thread
|
|
197
|
-
self.addEventListener("message", (event: MessageEvent<PredictiveMessage>) => {
|
|
198
|
-
const { patterns } = event.data;
|
|
199
|
-
|
|
200
|
-
if (patterns && Array.isArray(patterns)) {
|
|
201
|
-
const result = analyzePatternsAndPredict(patterns);
|
|
202
|
-
self.postMessage(result);
|
|
203
|
-
} else {
|
|
204
|
-
self.postMessage({
|
|
205
|
-
predictions: [],
|
|
206
|
-
confidence: 0,
|
|
207
|
-
patterns: {
|
|
208
|
-
mostCommonAction: "none",
|
|
209
|
-
averageSessionDuration: 0,
|
|
210
|
-
engagementScore: 0,
|
|
211
|
-
},
|
|
212
|
-
recommendations: ["No pattern data provided"],
|
|
213
|
-
});
|
|
214
|
-
}
|
|
215
|
-
});
|
|
216
|
-
|
|
217
|
-
// Notify main thread that worker is ready
|
|
218
|
-
self.postMessage({ status: "ready" });
|