solana-age-verify-sdk 2.0.0-beta.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/README.md +185 -0
  2. package/dist/adapters/blazeface.d.ts +15 -0
  3. package/dist/adapters/blazeface.js +258 -0
  4. package/dist/adapters/mediapipe.d.ts +7 -0
  5. package/dist/adapters/mediapipe.js +55 -0
  6. package/dist/adapters/onnx.d.ts +10 -0
  7. package/dist/adapters/onnx.js +171 -0
  8. package/dist/camera.d.ts +15 -0
  9. package/dist/camera.js +76 -0
  10. package/dist/embedding/descriptor.d.ts +22 -0
  11. package/dist/embedding/descriptor.js +134 -0
  12. package/dist/hashing/facehash.d.ts +3 -0
  13. package/dist/hashing/facehash.js +27 -0
  14. package/dist/hashing/webcrypto.d.ts +1 -0
  15. package/dist/hashing/webcrypto.js +1 -0
  16. package/dist/index.d.ts +6 -0
  17. package/dist/index.js +7 -0
  18. package/dist/liveness/challenges.d.ts +3 -0
  19. package/dist/liveness/challenges.js +34 -0
  20. package/dist/liveness/scorer.d.ts +1 -0
  21. package/dist/liveness/scorer.js +3 -0
  22. package/dist/liveness/texture.d.ts +72 -0
  23. package/dist/liveness/texture.js +266 -0
  24. package/dist/types.d.ts +86 -0
  25. package/dist/types.js +9 -0
  26. package/dist/verify.d.ts +4 -0
  27. package/dist/verify.js +892 -0
  28. package/dist/worker/frame.d.ts +5 -0
  29. package/dist/worker/frame.js +1 -0
  30. package/dist/worker/infer.d.ts +4 -0
  31. package/dist/worker/infer.js +22 -0
  32. package/dist/worker/worker.d.ts +0 -0
  33. package/dist/worker/worker.js +61 -0
  34. package/package.json +46 -0
  35. package/public/models/age_gender.onnx +1446 -0
  36. package/public/models/age_gender_model-weights_manifest.json +62 -0
  37. package/public/models/age_gender_model.shard1 +1447 -0
  38. package/public/models/face_landmark_68_model-weights_manifest.json +60 -0
  39. package/public/models/face_landmark_68_model.shard1 +1447 -0
  40. package/public/models/face_recognition_model-weights_manifest.json +128 -0
  41. package/public/models/face_recognition_model.shard1 +1447 -0
  42. package/public/models/face_recognition_model.shard2 +1447 -0
  43. package/public/models/ort-wasm-simd-threaded.asyncify.wasm +0 -0
  44. package/public/models/ort-wasm-simd-threaded.jsep.wasm +0 -0
  45. package/public/models/ort-wasm-simd-threaded.wasm +0 -0
  46. package/public/models/tiny_face_detector_model-weights_manifest.json +30 -0
  47. package/public/models/tiny_face_detector_model.shard1 +1447 -0
@@ -0,0 +1,171 @@
1
+ import * as ort from 'onnxruntime-web';
2
+ export class ONNXAgeEstimator {
3
+ constructor() {
4
+ Object.defineProperty(this, "session", {
5
+ enumerable: true,
6
+ configurable: true,
7
+ writable: true,
8
+ value: null
9
+ });
10
+ Object.defineProperty(this, "inputName", {
11
+ enumerable: true,
12
+ configurable: true,
13
+ writable: true,
14
+ value: 'input_1'
15
+ });
16
+ }
17
+ async load(modelUrl) {
18
+ try {
19
+ // Set WASM paths to point to root (where vite-plugin-static-copy puts them)
20
+ // In a worker, self.location is available.
21
+ const isWorker = typeof self.importScripts === 'function';
22
+ const origin = isWorker ? self.location.origin : (typeof window !== 'undefined' ? window.location.origin : '');
23
+ ort.env.wasm.wasmPaths = origin + '/';
24
+ console.log(`ONNX: Loading model from ${modelUrl}...`);
25
+ this.session = await ort.InferenceSession.create(modelUrl, {
26
+ executionProviders: ['wasm', 'webgl']
27
+ });
28
+ this.inputName = this.session.inputNames[0];
29
+ console.log(`ONNX: Model loaded successfully. Input: ${this.inputName}`);
30
+ }
31
+ catch (e) {
32
+ console.error('ONNX: Failed to load model', e);
33
+ // If WebGL fails, try WASM only as fallback
34
+ if (e.message && (e.message.includes('WebGL') || e.message.includes('backend'))) {
35
+ try {
36
+ console.log('ONNX: Retrying with WASM-only backend...');
37
+ this.session = await ort.InferenceSession.create(modelUrl, {
38
+ executionProviders: ['wasm']
39
+ });
40
+ this.inputName = this.session.inputNames[0];
41
+ return;
42
+ }
43
+ catch (e2) {
44
+ console.error('ONNX: WASM fallback also failed', e2);
45
+ }
46
+ }
47
+ throw e;
48
+ }
49
+ }
50
+ async estimateAge(faceCanvas) {
51
+ if (!this.session)
52
+ return { age: 0, confidence: 0 };
53
+ try {
54
+ // 1. Preprocess: Resize to 64x64 (typical for genderage.onnx) and NCHW
55
+ const inputTensor = await this.preprocess(faceCanvas);
56
+ // 2. Run Inference
57
+ const feeds = {};
58
+ feeds[this.inputName] = inputTensor;
59
+ const results = await this.session.run(feeds);
60
+ // 3. Postprocess: Get Age
61
+ // Model typically outputs: [gender_prob, age_score] or similar
62
+ // For yakyo/genderage.onnx, output is often:
63
+ // Output 0: Gender (1x2)
64
+ // Output 1: Age (1x1)? Or 1x100 (distribution)?
65
+ // We'll inspect standard usage: typically it's a regression or classification
66
+ // Let's assume standard output for now and log it to debug if needed
67
+ // If it's the specific 'genderage.onnx' from yakyo:
68
+ // It has 'dense_1' (gender) and 'dense_2' (age)
69
+ const outputNames = this.session.outputNames;
70
+ // Assuming one of them is age. Usually the second one for multi-task models.
71
+ // But let's look for "age" keyword or "dense_2"
72
+ // Simple heuristic for now: find output with bigger shape or just take the explicit one if known
73
+ // yakyo model: output 0 is gender, output 1 is age * 100? or age class?
74
+ // Actually yakyo model outputs age as a regression value (0-1) * 100?
75
+ let ageVal = 25;
76
+ let confVal = 0.5;
77
+ for (const name of outputNames) {
78
+ const tensor = results[name];
79
+ const data = tensor.data;
80
+ const isLikelyAge = name.toLowerCase().includes('age') || name.includes('dense_2');
81
+ // If single value, likely regression
82
+ if (data.length === 1) {
83
+ const val = data[0];
84
+ console.log(`ONNX: Raw regression output (${name}):`, val);
85
+ // If it's 0-1, it's likely normalized age / 100
86
+ // Higher probability of being normalized if < 1.0 but some models use 0.0-1.0 to map to 0-80 or 0-100
87
+ ageVal = val < 1.0 ? val * 100 : val;
88
+ confVal = 0.85;
89
+ if (isLikelyAge)
90
+ break; // Found it!
91
+ }
92
+ // If vector ~100 length, likely distribution (Softmax expectation)
93
+ else if (data.length >= 80 && data.length <= 110) {
94
+ console.log(`ONNX: Distribution output (${name}), size ${data.length}`);
95
+ let expectedAge = 0;
96
+ // ... (rest of logic remains same but we check summing)
97
+ // Check if it's already softmaxed (sums to ~1)
98
+ let sum = 0;
99
+ for (let i = 0; i < data.length; i++)
100
+ sum += data[i];
101
+ let probs = data;
102
+ if (Math.abs(sum - 1.0) > 0.1) {
103
+ // Needs Softmax
104
+ const expData = Array.from(data).map(v => Math.exp(v));
105
+ const expSum = expData.reduce((a, b) => a + b, 0);
106
+ probs = new Float32Array(expData.map(v => v / expSum));
107
+ }
108
+ for (let i = 0; i < probs.length; i++) {
109
+ expectedAge += i * probs[i];
110
+ }
111
+ // Confidence is the max probability (peakiness)
112
+ confVal = Math.max(...Array.from(probs));
113
+ ageVal = expectedAge;
114
+ if (isLikelyAge)
115
+ break;
116
+ }
117
+ }
118
+ return {
119
+ age: Math.max(1, Math.min(100, ageVal)),
120
+ confidence: confVal
121
+ };
122
+ }
123
+ catch (e) {
124
+ console.warn('ONNX Inference failed:', e);
125
+ return { age: 0, confidence: 0 };
126
+ }
127
+ }
128
+ async preprocess(canvas) {
129
+ // Create a temporary implementation for resizing and normalization
130
+ // This expects 64x64 input usually
131
+ const width = 64;
132
+ const height = 64;
133
+ // Resize logic (using OffscreenCanvas or temp canvas)
134
+ let ctx;
135
+ // Simple resize using a temp canvas
136
+ let tempCanvas;
137
+ if (typeof OffscreenCanvas !== 'undefined') {
138
+ tempCanvas = new OffscreenCanvas(width, height);
139
+ ctx = tempCanvas.getContext('2d');
140
+ }
141
+ else {
142
+ tempCanvas = document.createElement('canvas');
143
+ tempCanvas.width = width;
144
+ tempCanvas.height = height;
145
+ ctx = tempCanvas.getContext('2d');
146
+ }
147
+ if (!ctx)
148
+ throw new Error('No context');
149
+ ctx.drawImage(canvas, 0, 0, width, height);
150
+ const imageData = ctx.getImageData(0, 0, width, height);
151
+ const { data } = imageData; // RGBA
152
+ // Convert to Float32 NCHW [1, 3, 64, 64]
153
+ const float32Data = new Float32Array(1 * 3 * width * height);
154
+ for (let i = 0; i < width * height; i++) {
155
+ // Normalize: (x - mean) / std ? Or just 0-1?
156
+ // Common for these models: 0-1 or -1 to 1
157
+ // yakyo model usually checks for 0-1
158
+ const r = data[i * 4] / 255.0;
159
+ const g = data[i * 4 + 1] / 255.0;
160
+ const b = data[i * 4 + 2] / 255.0;
161
+ // NCHW
162
+ // R plane
163
+ float32Data[i] = r;
164
+ // G plane
165
+ float32Data[width * height + i] = g;
166
+ // B plane
167
+ float32Data[2 * width * height + i] = b;
168
+ }
169
+ return new ort.Tensor('float32', float32Data, [1, 3, height, width]);
170
+ }
171
+ }
@@ -0,0 +1,15 @@
1
+ export declare class Camera {
2
+ private stream;
3
+ private video;
4
+ private canvas;
5
+ private ctx;
6
+ constructor(videoElement?: HTMLVideoElement);
7
+ start(): Promise<void>;
8
+ stop(): Promise<void>;
9
+ captureFrame(): {
10
+ data: Uint8ClampedArray;
11
+ width: number;
12
+ height: number;
13
+ timestamp: number;
14
+ };
15
+ }
package/dist/camera.js ADDED
@@ -0,0 +1,76 @@
1
+ export class Camera {
2
+ constructor(videoElement) {
3
+ Object.defineProperty(this, "stream", {
4
+ enumerable: true,
5
+ configurable: true,
6
+ writable: true,
7
+ value: null
8
+ });
9
+ Object.defineProperty(this, "video", {
10
+ enumerable: true,
11
+ configurable: true,
12
+ writable: true,
13
+ value: void 0
14
+ });
15
+ Object.defineProperty(this, "canvas", {
16
+ enumerable: true,
17
+ configurable: true,
18
+ writable: true,
19
+ value: void 0
20
+ });
21
+ Object.defineProperty(this, "ctx", {
22
+ enumerable: true,
23
+ configurable: true,
24
+ writable: true,
25
+ value: void 0
26
+ });
27
+ this.video = videoElement || document.createElement('video');
28
+ this.video.setAttribute('playsinline', 'true');
29
+ this.video.style.display = videoElement ? 'block' : 'none';
30
+ this.canvas = document.createElement('canvas');
31
+ const ctx = this.canvas.getContext('2d');
32
+ if (!ctx)
33
+ throw new Error('Could not get 2D context');
34
+ this.ctx = ctx;
35
+ }
36
+ async start() {
37
+ if (this.stream)
38
+ return;
39
+ this.stream = await navigator.mediaDevices.getUserMedia({
40
+ video: {
41
+ width: { ideal: 640 },
42
+ height: { ideal: 480 },
43
+ facingMode: 'user'
44
+ },
45
+ audio: false
46
+ });
47
+ this.video.srcObject = this.stream;
48
+ await new Promise((resolve) => {
49
+ this.video.onloadedmetadata = () => {
50
+ this.video.play();
51
+ resolve();
52
+ };
53
+ });
54
+ this.canvas.width = this.video.videoWidth;
55
+ this.canvas.height = this.video.videoHeight;
56
+ }
57
+ async stop() {
58
+ if (this.stream) {
59
+ this.stream.getTracks().forEach(track => track.stop());
60
+ this.stream = null;
61
+ }
62
+ this.video.srcObject = null;
63
+ }
64
+ captureFrame() {
65
+ if (!this.stream)
66
+ throw new Error('Camera not started');
67
+ this.ctx.drawImage(this.video, 0, 0);
68
+ const imageData = this.ctx.getImageData(0, 0, this.canvas.width, this.canvas.height);
69
+ return {
70
+ data: imageData.data,
71
+ width: imageData.width,
72
+ height: imageData.height,
73
+ timestamp: Date.now()
74
+ };
75
+ }
76
+ }
@@ -0,0 +1,22 @@
1
+ /**
2
+ * Generates a deterministic 128-dimensional face embedding from landmarks.
3
+ * This is a geometric descriptor based on normalized facial feature relationships.
4
+ *
5
+ * IMPORTANT: This is NOT a deep learning embedding like FaceNet.
6
+ * It's a deterministic geometric feature vector for uniqueness/hashing purposes.
7
+ */
8
+ export declare class FaceDescriptor {
9
+ /**
10
+ * Generate a 128-dimensional embedding from facial landmarks.
11
+ * The embedding is deterministic - same landmarks = same embedding.
12
+ *
13
+ * @param landmarks Flat array of [x, y, z, x, y, z, ...] coordinates
14
+ * @returns 128-dimensional float array
15
+ */
16
+ static generate(landmarks: number[]): number[];
17
+ /**
18
+ * Quantize embedding to reduce precision for more stable hashing.
19
+ * Rounds each dimension to 3 decimal places.
20
+ */
21
+ static quantize(embedding: number[]): number[];
22
+ }
@@ -0,0 +1,134 @@
1
+ /**
2
+ * Generates a deterministic 128-dimensional face embedding from landmarks.
3
+ * This is a geometric descriptor based on normalized facial feature relationships.
4
+ *
5
+ * IMPORTANT: This is NOT a deep learning embedding like FaceNet.
6
+ * It's a deterministic geometric feature vector for uniqueness/hashing purposes.
7
+ */
8
+ export class FaceDescriptor {
9
+ /**
10
+ * Generate a 128-dimensional embedding from facial landmarks.
11
+ * The embedding is deterministic - same landmarks = same embedding.
12
+ *
13
+ * @param landmarks Flat array of [x, y, z, x, y, z, ...] coordinates
14
+ * @returns 128-dimensional float array
15
+ */
16
+ static generate(landmarks) {
17
+ if (!landmarks || landmarks.length < 18) {
18
+ // Return zero vector if insufficient data
19
+ return new Array(128).fill(0);
20
+ }
21
+ // Parse landmarks into points (assuming groups of 3: x, y, z)
22
+ const points = [];
23
+ for (let i = 0; i < landmarks.length; i += 3) {
24
+ points.push({
25
+ x: landmarks[i],
26
+ y: landmarks[i + 1],
27
+ z: landmarks[i + 2] || 0
28
+ });
29
+ }
30
+ if (points.length < 6) {
31
+ return new Array(128).fill(0);
32
+ }
33
+ // BlazeFace 6 keypoints: R-Eye, L-Eye, Nose, Mouth, R-Ear, L-Ear
34
+ const eyeR = points[0];
35
+ const eyeL = points[1];
36
+ const nose = points[2];
37
+ const mouth = points[3];
38
+ const earR = points[4];
39
+ const earL = points[5];
40
+ // Normalize coordinates relative to face center and scale
41
+ const centerX = (eyeR.x + eyeL.x + nose.x + mouth.x) / 4;
42
+ const centerY = (eyeR.y + eyeL.y + nose.y + mouth.y) / 4;
43
+ // Use inter-eye distance as scale reference
44
+ const eyeDist = Math.sqrt(Math.pow(eyeL.x - eyeR.x, 2) +
45
+ Math.pow(eyeL.y - eyeR.y, 2));
46
+ const scale = eyeDist > 0 ? eyeDist : 1;
47
+ // Normalize all points
48
+ const normalized = points.map(p => ({
49
+ x: (p.x - centerX) / scale,
50
+ y: (p.y - centerY) / scale,
51
+ z: p.z / scale
52
+ }));
53
+ // Build 128-dimensional feature vector
54
+ const embedding = [];
55
+ // 1. Normalized coordinates (6 points × 3 = 18 features)
56
+ normalized.forEach(p => {
57
+ embedding.push(p.x, p.y, p.z);
58
+ });
59
+ // 2. Pairwise distances (15 unique pairs from 6 points)
60
+ const pairwiseDistances = [];
61
+ for (let i = 0; i < normalized.length; i++) {
62
+ for (let j = i + 1; j < normalized.length; j++) {
63
+ const dist = Math.sqrt(Math.pow(normalized[j].x - normalized[i].x, 2) +
64
+ Math.pow(normalized[j].y - normalized[i].y, 2) +
65
+ Math.pow(normalized[j].z - normalized[i].z, 2));
66
+ pairwiseDistances.push(dist);
67
+ }
68
+ }
69
+ embedding.push(...pairwiseDistances);
70
+ // 3. Angular features (angles between key feature vectors)
71
+ const angles = [];
72
+ // Eye-to-nose angles
73
+ const eyeRToNose = {
74
+ x: nose.x - eyeR.x,
75
+ y: nose.y - eyeR.y
76
+ };
77
+ const eyeLToNose = {
78
+ x: nose.x - eyeL.x,
79
+ y: nose.y - eyeL.y
80
+ };
81
+ angles.push(Math.atan2(eyeRToNose.y, eyeRToNose.x));
82
+ angles.push(Math.atan2(eyeLToNose.y, eyeLToNose.x));
83
+ // Nose-to-mouth angle
84
+ const noseToMouth = {
85
+ x: mouth.x - nose.x,
86
+ y: mouth.y - nose.y
87
+ };
88
+ angles.push(Math.atan2(noseToMouth.y, noseToMouth.x));
89
+ // Eye-to-ear angles
90
+ const eyeRToEarR = {
91
+ x: earR.x - eyeR.x,
92
+ y: earR.y - eyeR.y
93
+ };
94
+ const eyeLToEarL = {
95
+ x: earL.x - eyeL.x,
96
+ y: earL.y - eyeL.y
97
+ };
98
+ angles.push(Math.atan2(eyeRToEarR.y, eyeRToEarR.x));
99
+ angles.push(Math.atan2(eyeLToEarL.y, eyeLToEarL.x));
100
+ embedding.push(...angles);
101
+ // 4. Ratios and geometric features
102
+ const faceWidth = Math.sqrt(Math.pow(earL.x - earR.x, 2) +
103
+ Math.pow(earL.y - earR.y, 2));
104
+ const faceHeight = Math.sqrt(Math.pow(mouth.y - ((eyeR.y + eyeL.y) / 2), 2));
105
+ embedding.push(eyeDist / faceWidth, // Eye spacing ratio
106
+ faceHeight / faceWidth, // Face aspect ratio
107
+ (nose.y - eyeR.y) / faceHeight, // Nose position ratio
108
+ (mouth.y - nose.y) / faceHeight // Mouth position ratio
109
+ );
110
+ // 5. Symmetry features
111
+ const leftSideWidth = Math.sqrt(Math.pow(earL.x - eyeL.x, 2) +
112
+ Math.pow(earL.y - eyeL.y, 2));
113
+ const rightSideWidth = Math.sqrt(Math.pow(eyeR.x - earR.x, 2) +
114
+ Math.pow(eyeR.y - earR.y, 2));
115
+ embedding.push(leftSideWidth / rightSideWidth); // Symmetry ratio
116
+ // 6. Pad to exactly 128 dimensions with deterministic derived features
117
+ while (embedding.length < 128) {
118
+ // Use polynomial combinations of existing features for padding
119
+ const idx = embedding.length % (embedding.length > 0 ? embedding.length : 1);
120
+ const val = embedding[idx] || 0;
121
+ // Deterministic transformation
122
+ embedding.push(Math.sin(val * Math.PI) * 0.1);
123
+ }
124
+ // Ensure exactly 128 dimensions
125
+ return embedding.slice(0, 128);
126
+ }
127
+ /**
128
+ * Quantize embedding to reduce precision for more stable hashing.
129
+ * Rounds each dimension to 3 decimal places.
130
+ */
131
+ static quantize(embedding) {
132
+ return embedding.map(v => Math.round(v * 1000) / 1000);
133
+ }
134
+ }
@@ -0,0 +1,3 @@
1
+ export declare function computeFaceHash(walletPubkey: string, salt: Uint8Array, embedding: number[]): Promise<string>;
2
+ export declare function generateSalt(): Uint8Array;
3
+ export declare function toHex(buffer: Uint8Array): string;
@@ -0,0 +1,27 @@
1
+ export async function computeFaceHash(walletPubkey, salt, embedding) {
2
+ const encoder = new TextEncoder();
3
+ const prefixBytes = encoder.encode("talkchain-verify:v1");
4
+ // In production use bs58 to decode. Use raw utf8 bytes of string for now to satisfy mock requirements without extra deps
5
+ const pubkeyBytes = encoder.encode(walletPubkey);
6
+ const embeddingFloat32 = new Float32Array(embedding);
7
+ const embeddingBytes = new Uint8Array(embeddingFloat32.buffer);
8
+ const totalLength = prefixBytes.length + pubkeyBytes.length + salt.length + embeddingBytes.length;
9
+ const msg = new Uint8Array(totalLength);
10
+ let offset = 0;
11
+ msg.set(prefixBytes, offset);
12
+ offset += prefixBytes.length;
13
+ msg.set(pubkeyBytes, offset);
14
+ offset += pubkeyBytes.length;
15
+ msg.set(salt, offset);
16
+ offset += salt.length;
17
+ msg.set(embeddingBytes, offset);
18
+ const hashBuffer = await crypto.subtle.digest('SHA-256', msg);
19
+ const hashArray = Array.from(new Uint8Array(hashBuffer));
20
+ return hashArray.map(b => b.toString(16).padStart(2, '0')).join('');
21
+ }
22
+ export function generateSalt() {
23
+ return crypto.getRandomValues(new Uint8Array(16));
24
+ }
25
+ export function toHex(buffer) {
26
+ return Array.from(buffer).map(b => b.toString(16).padStart(2, '0')).join('');
27
+ }
@@ -0,0 +1 @@
1
+ export declare const crypto: Crypto;
@@ -0,0 +1 @@
1
+ export const crypto = globalThis.crypto;
@@ -0,0 +1,6 @@
1
+ export * from './verify';
2
+ export * from './types';
3
+ export * from './camera';
4
+ export * from './hashing/facehash';
5
+ export * from './adapters/blazeface';
6
+ export { createVerificationUI, setExecutionBackend } from './verify';
package/dist/index.js ADDED
@@ -0,0 +1,7 @@
1
+ export * from './verify';
2
+ export * from './types';
3
+ export * from './camera';
4
+ export * from './hashing/facehash';
5
+ export * from './adapters/blazeface';
6
+ // Re-export specific UI helper
7
+ export { createVerificationUI, setExecutionBackend } from './verify';
@@ -0,0 +1,3 @@
1
+ export type ChallengeType = 'turn_left' | 'turn_right' | 'look_up' | 'look_down' | 'nod_yes' | 'shake_no';
2
+ export declare function generateChallengeSequence(length?: number): ChallengeType[];
3
+ export declare const CHALLENGES: ChallengeType[];
@@ -0,0 +1,34 @@
1
+ // Basic set of challenges for random selection
2
+ const AVAILABLE_CHALLENGES = [
3
+ 'turn_left', 'turn_right', 'look_up', 'look_down', 'nod_yes', 'shake_no'
4
+ ];
5
+ export function generateChallengeSequence(length = 5) {
6
+ const sequence = [];
7
+ const counts = {};
8
+ AVAILABLE_CHALLENGES.forEach(c => counts[c] = 0);
9
+ for (let i = 0; i < length; i++) {
10
+ let candidates = AVAILABLE_CHALLENGES.filter(c => {
11
+ // Constraint 2: No more than 2 of the same motion
12
+ if (counts[c] >= 2)
13
+ return false;
14
+ // Constraint 3: No consecutive challenges
15
+ if (i > 0 && sequence[i - 1] === c)
16
+ return false;
17
+ return true;
18
+ });
19
+ // Fallback if constraints lock us out
20
+ if (candidates.length === 0) {
21
+ candidates = AVAILABLE_CHALLENGES.filter(c => i === 0 || sequence[i - 1] !== c);
22
+ }
23
+ // Final safety check to ensure we definitely have candidates
24
+ if (candidates.length === 0) {
25
+ // This mathematically shouldn't happen with > 1 available challenges
26
+ candidates = AVAILABLE_CHALLENGES;
27
+ }
28
+ const choice = candidates[Math.floor(Math.random() * candidates.length)];
29
+ sequence.push(choice);
30
+ counts[choice]++;
31
+ }
32
+ return sequence;
33
+ }
34
+ export const CHALLENGES = generateChallengeSequence(5);
@@ -0,0 +1 @@
1
+ export declare function scoreLiveness(): number;
@@ -0,0 +1,3 @@
1
+ export function scoreLiveness() {
2
+ return 0;
3
+ }
@@ -0,0 +1,72 @@
1
+ /**
2
+ * Texture Analysis for Passive Liveness Detection
3
+ *
4
+ * This module analyzes the surface characteristics of a face to distinguish between:
5
+ * - Real human faces (3D, natural skin texture, pores, wrinkles)
6
+ * - Spoofing attempts (2D photos, screens, masks)
7
+ *
8
+ * Techniques used:
9
+ * 1. Local Binary Patterns (LBP) - Detects local texture patterns
10
+ * 2. Frequency Analysis - Identifies print/screen artifacts
11
+ * 3. Moiré Pattern Detection - Detects screen recapture
12
+ * 4. Reflectance Analysis - Analyzes light interaction with surface
13
+ */
14
+ export interface TextureAnalysisResult {
15
+ isReal: boolean;
16
+ confidence: number;
17
+ features: {
18
+ skinComplexity: number;
19
+ moireDetected: boolean;
20
+ frequencyScore: number;
21
+ reflectancePattern: 'natural' | 'artificial' | 'unknown';
22
+ };
23
+ debugInfo?: {
24
+ lbpHistogram: number[];
25
+ dominantFrequencies: number[];
26
+ };
27
+ }
28
+ /**
29
+ * Analyzes texture from a face region to detect liveness
30
+ */
31
+ export declare class TextureAnalyzer {
32
+ private canvas;
33
+ private ctx;
34
+ constructor();
35
+ /**
36
+ * Main analysis function
37
+ */
38
+ analyze(frame: ImageData | HTMLCanvasElement | OffscreenCanvas, faceRegion?: {
39
+ x: number;
40
+ y: number;
41
+ width: number;
42
+ height: number;
43
+ }): Promise<TextureAnalysisResult>;
44
+ /**
45
+ * Extract region of interest from frame
46
+ */
47
+ private extractRegion;
48
+ /**
49
+ * Local Binary Patterns (LBP) Analysis
50
+ * Detects micro-texture patterns in skin
51
+ */
52
+ private analyzeLBP;
53
+ /**
54
+ * Frequency Domain Analysis
55
+ * Detects print patterns and screen grids
56
+ */
57
+ private analyzeFrequency;
58
+ /**
59
+ * Moiré Pattern Detection
60
+ * Detects interference patterns from screen recapture
61
+ */
62
+ private detectMoire;
63
+ /**
64
+ * Reflectance Pattern Analysis
65
+ * Analyzes how light interacts with the surface
66
+ */
67
+ private analyzeReflectance;
68
+ /**
69
+ * Convert RGBA to grayscale
70
+ */
71
+ private toGrayscale;
72
+ }