@facesmash/sdk 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/react.cjs ADDED
@@ -0,0 +1,1068 @@
1
+ 'use strict';
2
+
3
+ var PocketBase = require('pocketbase');
4
+ var faceapi = require('@vladmandic/face-api');
5
+ var react = require('react');
6
+ var jsxRuntime = require('react/jsx-runtime');
7
+
8
+ function _interopDefault (e) { return e && e.__esModule ? e : { default: e }; }
9
+
10
+ function _interopNamespace(e) {
11
+ if (e && e.__esModule) return e;
12
+ var n = Object.create(null);
13
+ if (e) {
14
+ Object.keys(e).forEach(function (k) {
15
+ if (k !== 'default') {
16
+ var d = Object.getOwnPropertyDescriptor(e, k);
17
+ Object.defineProperty(n, k, d.get ? d : {
18
+ enumerable: true,
19
+ get: function () { return e[k]; }
20
+ });
21
+ }
22
+ });
23
+ }
24
+ n.default = e;
25
+ return Object.freeze(n);
26
+ }
27
+
28
+ var PocketBase__default = /*#__PURE__*/_interopDefault(PocketBase);
29
+ var faceapi__namespace = /*#__PURE__*/_interopNamespace(faceapi);
30
+
31
+ // src/core/client.ts
32
+ var modelsLoaded = false;
33
+ async function loadModels(config, onProgress) {
34
+ if (modelsLoaded) {
35
+ onProgress?.(100);
36
+ return true;
37
+ }
38
+ try {
39
+ try {
40
+ const tf2 = faceapi__namespace.tf;
41
+ if (tf2) {
42
+ await tf2.setBackend("webgl");
43
+ await tf2.ready();
44
+ if (tf2.env().flagRegistry?.CANVAS2D_WILL_READ_FREQUENTLY) {
45
+ tf2.env().set("CANVAS2D_WILL_READ_FREQUENTLY", true);
46
+ }
47
+ if (tf2.env().flagRegistry?.WEBGL_EXP_CONV) {
48
+ tf2.env().set("WEBGL_EXP_CONV", true);
49
+ }
50
+ if (config.debug) {
51
+ console.log(`[FaceSmash] TF.js backend: ${tf2.getBackend()}`);
52
+ }
53
+ }
54
+ } catch {
55
+ }
56
+ onProgress?.(10);
57
+ await Promise.all([
58
+ faceapi__namespace.nets.ssdMobilenetv1.loadFromUri(config.modelUrl),
59
+ faceapi__namespace.nets.tinyFaceDetector.loadFromUri(config.modelUrl),
60
+ faceapi__namespace.nets.faceLandmark68Net.loadFromUri(config.modelUrl),
61
+ faceapi__namespace.nets.faceRecognitionNet.loadFromUri(config.modelUrl),
62
+ faceapi__namespace.nets.faceExpressionNet.loadFromUri(config.modelUrl)
63
+ ]);
64
+ modelsLoaded = true;
65
+ onProgress?.(100);
66
+ if (config.debug) {
67
+ console.log("[FaceSmash] Models loaded successfully");
68
+ }
69
+ return true;
70
+ } catch (error) {
71
+ if (config.debug) {
72
+ console.error("[FaceSmash] Failed to load models:", error);
73
+ }
74
+ return false;
75
+ }
76
+ }
77
+ function areModelsLoaded() {
78
+ return modelsLoaded;
79
+ }
80
+ function getSsdOptions(minConfidence) {
81
+ return new faceapi__namespace.SsdMobilenetv1Options({ minConfidence });
82
+ }
83
+ function getTinyOptions() {
84
+ return new faceapi__namespace.TinyFaceDetectorOptions({ inputSize: 224, scoreThreshold: 0.4 });
85
+ }
86
+ async function extractDescriptor(input, config) {
87
+ try {
88
+ const media = typeof input === "string" ? await faceapi__namespace.fetchImage(input) : input;
89
+ let detection = await faceapi__namespace.detectSingleFace(media, getSsdOptions(config.minDetectionConfidence)).withFaceLandmarks().withFaceDescriptor();
90
+ if (!detection) {
91
+ detection = await faceapi__namespace.detectSingleFace(media, getTinyOptions()).withFaceLandmarks().withFaceDescriptor();
92
+ }
93
+ return detection?.descriptor ?? null;
94
+ } catch (error) {
95
+ if (config.debug) {
96
+ console.error("[FaceSmash] Descriptor extraction failed:", error);
97
+ }
98
+ return null;
99
+ }
100
+ }
101
+ async function analyzeFace(imageData, config) {
102
+ try {
103
+ const img = await faceapi__namespace.fetchImage(imageData);
104
+ let detection = await faceapi__namespace.detectSingleFace(img, getSsdOptions(config.minDetectionConfidence)).withFaceLandmarks().withFaceDescriptor();
105
+ if (!detection) {
106
+ detection = await faceapi__namespace.detectSingleFace(img, getTinyOptions()).withFaceLandmarks().withFaceDescriptor();
107
+ }
108
+ if (!detection) return null;
109
+ const headPose = estimateHeadPose(detection.landmarks, detection.detection.box);
110
+ const imgWidth = img.width || 640;
111
+ const imgHeight = img.height || 480;
112
+ const faceSizeCheck = validateFaceSize(detection.detection.box, imgWidth, imgHeight);
113
+ if (!faceSizeCheck.isValid) {
114
+ return {
115
+ descriptor: detection.descriptor,
116
+ normalizedDescriptor: normalizeDescriptor(detection.descriptor),
117
+ confidence: detection.detection.score,
118
+ qualityScore: 0,
119
+ lightingScore: 0,
120
+ headPose,
121
+ faceSizeCheck,
122
+ eyeAspectRatio: 0,
123
+ rejectionReason: faceSizeCheck.reason
124
+ };
125
+ }
126
+ const { avgEAR } = getEyeAspectRatios(detection.landmarks);
127
+ let lightingAnalysis;
128
+ try {
129
+ lightingAnalysis = analyzeLighting(detection, img);
130
+ } catch {
131
+ lightingAnalysis = {
132
+ score: 0.5,
133
+ brightness: 0.5,
134
+ contrast: 0.5,
135
+ evenness: 0.5,
136
+ conditions: { tooDark: false, tooBright: false, uneven: false, optimal: false }
137
+ };
138
+ }
139
+ let qualityScore = Math.min(detection.detection.score, 1);
140
+ qualityScore *= 0.7 + lightingAnalysis.score * 0.3;
141
+ const faceArea = detection.detection.box.width * detection.detection.box.height;
142
+ const imageArea = 640 * 640;
143
+ const sizeRatio = Math.min(faceArea / imageArea, 0.3) / 0.3;
144
+ qualityScore *= 0.8 + sizeRatio * 0.2;
145
+ if (!headPose.isFrontal) {
146
+ const anglePenalty = Math.max(0.5, 1 - (Math.abs(headPose.yaw) + Math.abs(headPose.pitch)) * 0.3);
147
+ qualityScore *= anglePenalty;
148
+ }
149
+ qualityScore = Math.max(0, Math.min(1, qualityScore));
150
+ return {
151
+ descriptor: detection.descriptor,
152
+ normalizedDescriptor: normalizeDescriptor(detection.descriptor),
153
+ confidence: detection.detection.score,
154
+ qualityScore,
155
+ lightingScore: lightingAnalysis.score,
156
+ headPose,
157
+ faceSizeCheck,
158
+ eyeAspectRatio: avgEAR
159
+ };
160
+ } catch (error) {
161
+ if (config.debug) {
162
+ console.error("[FaceSmash] Face analysis failed:", error);
163
+ }
164
+ return null;
165
+ }
166
+ }
167
+ async function processImages(images, config) {
168
+ if (images.length === 1) {
169
+ return extractDescriptor(images[0], config);
170
+ }
171
+ const descriptors = [];
172
+ for (const image of images) {
173
+ const d = await extractDescriptor(image, config);
174
+ if (d) descriptors.push(d);
175
+ }
176
+ if (descriptors.length === 0) return null;
177
+ const avg = new Float32Array(descriptors[0].length);
178
+ for (let i = 0; i < avg.length; i++) {
179
+ let sum = 0;
180
+ for (const d of descriptors) sum += d[i];
181
+ avg[i] = sum / descriptors.length;
182
+ }
183
+ return avg;
184
+ }
185
+ function euclidean(a, b) {
186
+ return Math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2);
187
+ }
188
+ function calculateEAR(eye) {
189
+ if (eye.length < 6) return 0.3;
190
+ const v1 = euclidean(eye[1], eye[5]);
191
+ const v2 = euclidean(eye[2], eye[4]);
192
+ const h = euclidean(eye[0], eye[3]);
193
+ return h === 0 ? 0 : (v1 + v2) / (2 * h);
194
+ }
195
+ function getEyeAspectRatios(landmarks) {
196
+ const leftEAR = calculateEAR(landmarks.getLeftEye());
197
+ const rightEAR = calculateEAR(landmarks.getRightEye());
198
+ return { leftEAR, rightEAR, avgEAR: (leftEAR + rightEAR) / 2 };
199
+ }
200
+ function estimateHeadPose(landmarks, box) {
201
+ const nose = landmarks.getNose();
202
+ const jaw = landmarks.getJawOutline();
203
+ const noseTip = nose[3];
204
+ const faceCenterX = box.x + box.width / 2;
205
+ const faceCenterY = box.y + box.height / 2;
206
+ const yaw = (noseTip.x - faceCenterX) / (box.width / 2);
207
+ const pitch = (noseTip.y - faceCenterY) / (box.height / 2);
208
+ const jawLeft = jaw[0];
209
+ const jawRight = jaw[jaw.length - 1];
210
+ const roll = Math.atan2(jawRight.y - jawLeft.y, jawRight.x - jawLeft.x);
211
+ const isFrontal = Math.abs(yaw) < 0.35 && Math.abs(pitch) < 0.4 && Math.abs(roll) < 0.25;
212
+ return { yaw, pitch, roll, isFrontal };
213
+ }
214
+ function validateFaceSize(box, frameWidth = 640, frameHeight = 480) {
215
+ const ratio = box.width * box.height / (frameWidth * frameHeight);
216
+ if (ratio < 0.02) return { isValid: false, ratio, reason: "Face too far from camera" };
217
+ if (ratio > 0.65) return { isValid: false, ratio, reason: "Face too close to camera" };
218
+ if (box.width < 80 || box.height < 80) return { isValid: false, ratio, reason: "Face too small for reliable recognition" };
219
+ return { isValid: true, ratio };
220
+ }
221
+ function normalizeDescriptor(descriptor) {
222
+ let norm = 0;
223
+ for (let i = 0; i < descriptor.length; i++) norm += descriptor[i] ** 2;
224
+ norm = Math.sqrt(norm);
225
+ if (norm === 0) return descriptor;
226
+ const normalized = new Float32Array(descriptor.length);
227
+ for (let i = 0; i < descriptor.length; i++) normalized[i] = descriptor[i] / norm;
228
+ return normalized;
229
+ }
230
+ function analyzeLighting(detection, imageElement) {
231
+ const canvas = document.createElement("canvas");
232
+ const ctx = canvas.getContext("2d");
233
+ if (!ctx) throw new Error("Cannot get canvas context");
234
+ canvas.width = imageElement.width || 640;
235
+ canvas.height = imageElement.height || 640;
236
+ if (imageElement instanceof HTMLImageElement) {
237
+ ctx.drawImage(imageElement, 0, 0, canvas.width, canvas.height);
238
+ } else {
239
+ ctx.drawImage(imageElement, 0, 0);
240
+ }
241
+ const faceBox = detection.detection.box;
242
+ const faceImageData = ctx.getImageData(
243
+ Math.max(0, faceBox.x - 20),
244
+ Math.max(0, faceBox.y - 20),
245
+ Math.min(canvas.width - faceBox.x, faceBox.width + 40),
246
+ Math.min(canvas.height - faceBox.y, faceBox.height + 40)
247
+ );
248
+ const pixels = faceImageData.data;
249
+ let totalBrightness = 0;
250
+ const brightnessValues = [];
251
+ for (let i = 0; i < pixels.length; i += 4) {
252
+ const brightness = (pixels[i] + pixels[i + 1] + pixels[i + 2]) / 3;
253
+ totalBrightness += brightness;
254
+ brightnessValues.push(brightness);
255
+ }
256
+ const avgBrightness = totalBrightness / (pixels.length / 4);
257
+ const variance = brightnessValues.reduce((acc, val) => acc + (val - avgBrightness) ** 2, 0) / brightnessValues.length;
258
+ const contrast = Math.sqrt(variance);
259
+ const evenness = Math.max(0, 1 - contrast / 128);
260
+ const tooDark = avgBrightness < 80;
261
+ const tooBright = avgBrightness > 200;
262
+ const uneven = evenness < 0.6;
263
+ const optimal = !tooDark && !tooBright && !uneven;
264
+ let score = 0.5;
265
+ if (optimal) score = 0.9;
266
+ else if (tooDark) score = Math.max(0.2, avgBrightness / 160);
267
+ else if (tooBright) score = Math.max(0.2, (255 - avgBrightness) / 110);
268
+ else if (uneven) score = Math.max(0.3, evenness);
269
+ return {
270
+ score,
271
+ brightness: avgBrightness / 255,
272
+ contrast: Math.min(contrast / 64, 1),
273
+ evenness,
274
+ conditions: { tooDark, tooBright, uneven, optimal }
275
+ };
276
+ }
277
+ function calculateSimilarity(d1, d2) {
278
+ return 1 - faceapi__namespace.euclideanDistance(d1, d2);
279
+ }
280
+ function facesMatch(d1, d2, threshold = 0.45) {
281
+ return calculateSimilarity(d1, d2) >= threshold;
282
+ }
283
+ function enhancedMatch(descriptor1, descriptor2, baseThreshold = 0.45, confidenceBoost = 0, lightingScore = 0.5) {
284
+ if (descriptor1.length !== descriptor2.length) {
285
+ return { isMatch: false, similarity: 0, adaptedThreshold: baseThreshold };
286
+ }
287
+ const similarity = calculateSimilarity(descriptor1, descriptor2);
288
+ let adaptedThreshold = baseThreshold;
289
+ if (lightingScore < 0.4) {
290
+ adaptedThreshold = Math.max(0.35, adaptedThreshold - 0.05);
291
+ } else if (lightingScore > 0.8) {
292
+ adaptedThreshold = Math.min(0.6, adaptedThreshold + 0.02);
293
+ }
294
+ adaptedThreshold = Math.max(0.35, adaptedThreshold - confidenceBoost * 0.05);
295
+ return {
296
+ isMatch: similarity >= adaptedThreshold,
297
+ similarity,
298
+ adaptedThreshold
299
+ };
300
+ }
301
+ function multiTemplateMatch(newDescriptor, templates, baseThreshold, lightingScore = 0.5) {
302
+ if (templates.length === 0) {
303
+ return { isMatch: false, bestSimilarity: 0, avgSimilarity: 0, matchCount: 0 };
304
+ }
305
+ let bestSimilarity = 0;
306
+ let weightedSum = 0;
307
+ let totalWeight = 0;
308
+ let matchCount = 0;
309
+ for (const template of templates) {
310
+ if (!template.descriptor || template.descriptor.length === 0) continue;
311
+ const result = enhancedMatch(
312
+ newDescriptor,
313
+ template.descriptor,
314
+ baseThreshold,
315
+ template.weight,
316
+ lightingScore
317
+ );
318
+ if (result.similarity > bestSimilarity) {
319
+ bestSimilarity = result.similarity;
320
+ }
321
+ const w = template.quality * template.weight;
322
+ weightedSum += result.similarity * w;
323
+ totalWeight += w;
324
+ if (result.isMatch) matchCount++;
325
+ }
326
+ const avgSimilarity = totalWeight > 0 ? weightedSum / totalWeight : 0;
327
+ const isMatch = bestSimilarity >= baseThreshold || matchCount / templates.length >= 0.6;
328
+ return { isMatch, bestSimilarity, avgSimilarity, matchCount };
329
+ }
330
+ function calculateLearningWeight(qualityScore, lightingScore, confidence) {
331
+ let weight = 1;
332
+ if (qualityScore > 0.8) weight *= 1.5;
333
+ else if (qualityScore > 0.6) weight *= 1.2;
334
+ else if (qualityScore < 0.4) weight *= 0.5;
335
+ if (lightingScore > 0.7) weight *= 1.3;
336
+ else if (lightingScore < 0.4) weight *= 0.7;
337
+ if (confidence > 0.8) weight *= 1.2;
338
+ else if (confidence < 0.5) weight *= 0.8;
339
+ return Math.max(0.1, Math.min(weight, 3));
340
+ }
341
+
342
+ // src/core/types.ts
343
+ var DEFAULT_CONFIG = {
344
+ apiUrl: "https://api.facesmash.app",
345
+ modelUrl: "https://cdn.jsdelivr.net/npm/@vladmandic/face-api/model",
346
+ minDetectionConfidence: 0.3,
347
+ matchThreshold: 0.45,
348
+ minQualityScore: 0.2,
349
+ maxTemplatesPerUser: 10,
350
+ debug: false
351
+ };
352
+ function resolveConfig(config) {
353
+ return { ...DEFAULT_CONFIG, ...config };
354
+ }
355
+
356
+ // src/core/client.ts
357
+ var FaceSmashClient = class {
358
+ constructor(config) {
359
+ this.listeners = [];
360
+ this._modelsLoaded = false;
361
+ this.config = resolveConfig(config);
362
+ this.pb = new PocketBase__default.default(this.config.apiUrl);
363
+ this.pb.autoCancellation(false);
364
+ }
365
+ // ─── Event System ───────────────────────────────────────────
366
+ on(listener) {
367
+ this.listeners.push(listener);
368
+ return () => {
369
+ this.listeners = this.listeners.filter((l) => l !== listener);
370
+ };
371
+ }
372
+ emit(event) {
373
+ for (const listener of this.listeners) {
374
+ try {
375
+ listener(event);
376
+ } catch {
377
+ }
378
+ }
379
+ }
380
+ // ─── Model Loading ──────────────────────────────────────────
381
+ get isReady() {
382
+ return this._modelsLoaded;
383
+ }
384
+ async init(onProgress) {
385
+ if (this._modelsLoaded) return true;
386
+ this.emit({ type: "models-loading", progress: 0 });
387
+ const success = await loadModels(this.config, (progress) => {
388
+ onProgress?.(progress);
389
+ this.emit({ type: "models-loading", progress });
390
+ });
391
+ if (success) {
392
+ this._modelsLoaded = true;
393
+ this.emit({ type: "models-loaded" });
394
+ } else {
395
+ this.emit({ type: "models-error", error: "Failed to load face recognition models" });
396
+ }
397
+ return success;
398
+ }
399
+ // ─── Face Analysis ──────────────────────────────────────────
400
+ async analyzeFace(imageData) {
401
+ this.ensureReady();
402
+ const result = await analyzeFace(imageData, this.config);
403
+ if (result) {
404
+ this.emit({ type: "face-detected", analysis: result });
405
+ } else {
406
+ this.emit({ type: "face-lost" });
407
+ }
408
+ return result;
409
+ }
410
+ // ─── Login ──────────────────────────────────────────────────
411
+ async login(images) {
412
+ this.ensureReady();
413
+ this.emit({ type: "login-start" });
414
+ try {
415
+ let bestAnalysis = null;
416
+ for (const img of images) {
417
+ const analysis = await analyzeFace(img, this.config);
418
+ if (analysis && !analysis.rejectionReason) {
419
+ if (!bestAnalysis || analysis.qualityScore > bestAnalysis.qualityScore) {
420
+ bestAnalysis = analysis;
421
+ }
422
+ }
423
+ }
424
+ if (!bestAnalysis) {
425
+ const error2 = "No face detected in any image";
426
+ this.emit({ type: "login-failed", error: error2 });
427
+ return { success: false, error: error2 };
428
+ }
429
+ if (bestAnalysis.qualityScore < this.config.minQualityScore) {
430
+ const error2 = "Face quality too low. Improve lighting and face the camera directly.";
431
+ this.emit({ type: "login-failed", error: error2 });
432
+ return { success: false, error: error2 };
433
+ }
434
+ const profiles = await this.pb.collection("user_profiles").getFullList();
435
+ if (profiles.length === 0) {
436
+ const error2 = "No registered users found";
437
+ this.emit({ type: "login-failed", error: error2 });
438
+ return { success: false, error: error2 };
439
+ }
440
+ let bestMatch = { user: null, similarity: 0 };
441
+ for (const profile of profiles) {
442
+ if (!profile.face_embedding) continue;
443
+ const storedEmbedding = new Float32Array(profile.face_embedding);
444
+ let matchResult = enhancedMatch(
445
+ bestAnalysis.descriptor,
446
+ storedEmbedding,
447
+ this.config.matchThreshold,
448
+ 0,
449
+ bestAnalysis.lightingScore
450
+ );
451
+ try {
452
+ const templates = await this.pb.collection("face_templates").getList(1, 50, {
453
+ filter: `user_email="${profile.email}"`,
454
+ sort: "-quality_score"
455
+ });
456
+ if (templates.items.length > 0) {
457
+ const templateData = templates.items.filter((t) => t.descriptor && t.descriptor.length > 0).map((t) => ({
458
+ descriptor: new Float32Array(t.descriptor),
459
+ quality: t.quality_score || 0.5,
460
+ weight: 1
461
+ }));
462
+ if (templateData.length > 0) {
463
+ const multiResult = multiTemplateMatch(
464
+ bestAnalysis.descriptor,
465
+ templateData,
466
+ this.config.matchThreshold,
467
+ bestAnalysis.lightingScore
468
+ );
469
+ if (multiResult.bestSimilarity > matchResult.similarity) {
470
+ matchResult = {
471
+ isMatch: multiResult.isMatch,
472
+ similarity: multiResult.bestSimilarity,
473
+ adaptedThreshold: this.config.matchThreshold
474
+ };
475
+ }
476
+ }
477
+ }
478
+ } catch {
479
+ }
480
+ const userProfile = {
481
+ id: profile.id,
482
+ name: profile.name,
483
+ email: profile.email,
484
+ face_embedding: profile.face_embedding,
485
+ created: profile.created,
486
+ updated: profile.updated
487
+ };
488
+ if (matchResult.similarity > bestMatch.similarity) {
489
+ bestMatch = { user: userProfile, similarity: matchResult.similarity };
490
+ }
491
+ if (matchResult.isMatch) {
492
+ try {
493
+ await this.storeLoginScan(userProfile, bestAnalysis);
494
+ } catch {
495
+ }
496
+ this.emit({
497
+ type: "login-success",
498
+ user: userProfile,
499
+ similarity: matchResult.similarity
500
+ });
501
+ return { success: true, user: userProfile, similarity: matchResult.similarity };
502
+ }
503
+ }
504
+ const error = bestMatch.similarity > 0.4 ? "Face partially matched but did not meet security threshold." : "Face not recognized.";
505
+ this.emit({ type: "login-failed", error, bestSimilarity: bestMatch.similarity });
506
+ return { success: false, error, similarity: bestMatch.similarity };
507
+ } catch (err) {
508
+ const error = err instanceof Error ? err.message : "Unknown error during login";
509
+ this.emit({ type: "login-failed", error });
510
+ return { success: false, error };
511
+ }
512
+ }
513
+ // ─── Registration ───────────────────────────────────────────
514
+ async register(name, images, email) {
515
+ this.ensureReady();
516
+ this.emit({ type: "register-start" });
517
+ try {
518
+ let bestAnalysis = null;
519
+ let bestImageIdx = 0;
520
+ for (let i = 0; i < images.length; i++) {
521
+ const analysis = await analyzeFace(images[i], this.config);
522
+ if (analysis && !analysis.rejectionReason) {
523
+ if (!bestAnalysis || analysis.qualityScore > bestAnalysis.qualityScore) {
524
+ bestAnalysis = analysis;
525
+ bestImageIdx = i;
526
+ }
527
+ }
528
+ }
529
+ if (!bestAnalysis) {
530
+ const error = "No face detected in any image";
531
+ this.emit({ type: "register-failed", error });
532
+ return { success: false, error };
533
+ }
534
+ if (bestAnalysis.qualityScore < this.config.minQualityScore) {
535
+ const error = "Face quality too low for registration.";
536
+ this.emit({ type: "register-failed", error });
537
+ return { success: false, error };
538
+ }
539
+ const existingProfiles = await this.pb.collection("user_profiles").getFullList();
540
+ for (const profile of existingProfiles) {
541
+ if (!profile.face_embedding) continue;
542
+ const stored = new Float32Array(profile.face_embedding);
543
+ if (stored.length !== bestAnalysis.descriptor.length) continue;
544
+ const similarity = 1 - (await import('@vladmandic/face-api')).euclideanDistance(bestAnalysis.descriptor, stored);
545
+ if (similarity >= 0.75) {
546
+ const error = `This face is already registered to ${profile.name || profile.email}`;
547
+ this.emit({ type: "register-failed", error });
548
+ return { success: false, error };
549
+ }
550
+ }
551
+ const embeddingArray = Array.from(bestAnalysis.descriptor);
552
+ const record = await this.pb.collection("user_profiles").create({
553
+ name,
554
+ email: email || `${name.toLowerCase().replace(/\s+/g, ".")}@facesmash.app`,
555
+ face_embedding: embeddingArray
556
+ });
557
+ await this.pb.collection("face_templates").create({
558
+ user_email: record.email,
559
+ descriptor: embeddingArray,
560
+ quality_score: bestAnalysis.qualityScore,
561
+ label: "registration"
562
+ });
563
+ await this.pb.collection("face_scans").create({
564
+ user_email: record.email,
565
+ face_embedding: JSON.stringify(embeddingArray),
566
+ confidence: String(bestAnalysis.confidence),
567
+ scan_type: "registration",
568
+ quality_score: String(bestAnalysis.qualityScore)
569
+ });
570
+ const user = {
571
+ id: record.id,
572
+ name: record.name,
573
+ email: record.email,
574
+ face_embedding: embeddingArray,
575
+ created: record.created,
576
+ updated: record.updated
577
+ };
578
+ this.emit({ type: "register-success", user });
579
+ return { success: true, user };
580
+ } catch (err) {
581
+ const error = err instanceof Error ? err.message : "Unknown error during registration";
582
+ this.emit({ type: "register-failed", error });
583
+ return { success: false, error };
584
+ }
585
+ }
586
+ // ─── Helpers ────────────────────────────────────────────────
587
+ ensureReady() {
588
+ if (!areModelsLoaded()) {
589
+ throw new Error(
590
+ "FaceSmash models not loaded. Call client.init() first."
591
+ );
592
+ }
593
+ }
594
+ async storeLoginScan(user, analysis) {
595
+ const embeddingArray = Array.from(analysis.descriptor);
596
+ await this.pb.collection("sign_in_logs").create({
597
+ user_email: user.email,
598
+ success: true
599
+ });
600
+ await this.pb.collection("face_scans").create({
601
+ user_email: user.email,
602
+ face_embedding: JSON.stringify(embeddingArray),
603
+ confidence: String(analysis.confidence),
604
+ scan_type: "login",
605
+ quality_score: String(analysis.qualityScore)
606
+ });
607
+ if (analysis.qualityScore > 0.5) {
608
+ const weight = calculateLearningWeight(
609
+ analysis.qualityScore,
610
+ analysis.lightingScore,
611
+ analysis.confidence
612
+ );
613
+ const learningRate = Math.min(weight * 0.1, 0.3);
614
+ const current = new Float32Array(user.face_embedding);
615
+ const updated = new Float32Array(current.length);
616
+ for (let i = 0; i < current.length; i++) {
617
+ updated[i] = current[i] * (1 - learningRate) + analysis.descriptor[i] * learningRate;
618
+ }
619
+ await this.pb.collection("user_profiles").update(user.id, {
620
+ face_embedding: Array.from(updated)
621
+ });
622
+ }
623
+ if (analysis.qualityScore > 0.6) {
624
+ const existing = await this.pb.collection("face_templates").getList(1, 50, {
625
+ filter: `user_email="${user.email}"`,
626
+ sort: "quality_score"
627
+ });
628
+ if (existing.items.length >= this.config.maxTemplatesPerUser) {
629
+ await this.pb.collection("face_templates").delete(existing.items[0].id);
630
+ }
631
+ await this.pb.collection("face_templates").create({
632
+ user_email: user.email,
633
+ descriptor: embeddingArray,
634
+ quality_score: analysis.qualityScore,
635
+ label: "auto"
636
+ });
637
+ }
638
+ }
639
+ };
640
+
641
+ // src/index.ts
642
+ function createFaceSmash(config) {
643
+ return new FaceSmashClient(config);
644
+ }
645
+ var FaceSmashContext = react.createContext(null);
646
+ function FaceSmashProvider({
647
+ children,
648
+ config,
649
+ onReady,
650
+ onError,
651
+ onEvent
652
+ }) {
653
+ const clientRef = react.useRef(null);
654
+ if (!clientRef.current) {
655
+ clientRef.current = new FaceSmashClient(config);
656
+ }
657
+ const client = clientRef.current;
658
+ const [isReady, setIsReady] = react.useState(false);
659
+ const [isLoading, setIsLoading] = react.useState(true);
660
+ const [loadProgress, setLoadProgress] = react.useState(0);
661
+ const [error, setError] = react.useState(null);
662
+ const initModels = react.useCallback(async () => {
663
+ setIsLoading(true);
664
+ setError(null);
665
+ setLoadProgress(0);
666
+ const success = await client.init((progress) => {
667
+ setLoadProgress(progress);
668
+ });
669
+ if (success) {
670
+ setIsReady(true);
671
+ setIsLoading(false);
672
+ onReady?.();
673
+ } else {
674
+ const msg = "Failed to load face recognition models";
675
+ setError(msg);
676
+ setIsLoading(false);
677
+ onError?.(msg);
678
+ }
679
+ }, [client, onReady, onError]);
680
+ react.useEffect(() => {
681
+ initModels();
682
+ }, [initModels]);
683
+ react.useEffect(() => {
684
+ if (!onEvent) return;
685
+ return client.on(onEvent);
686
+ }, [client, onEvent]);
687
+ const retryInit = react.useCallback(() => {
688
+ initModels();
689
+ }, [initModels]);
690
+ return /* @__PURE__ */ jsxRuntime.jsx(
691
+ FaceSmashContext.Provider,
692
+ {
693
+ value: { client, isReady, isLoading, loadProgress, error, retryInit },
694
+ children
695
+ }
696
+ );
697
+ }
698
+ function useFaceSmash() {
699
+ const ctx = react.useContext(FaceSmashContext);
700
+ if (!ctx) {
701
+ throw new Error("useFaceSmash must be used within a <FaceSmashProvider>");
702
+ }
703
+ return ctx;
704
+ }
705
+ function FaceLogin({
706
+ onResult,
707
+ captureCount = 3,
708
+ captureDelay = 500,
709
+ autoStart = true,
710
+ className,
711
+ overlay,
712
+ loadingContent,
713
+ errorContent
714
+ }) {
715
+ const { client, isReady, isLoading, error: initError } = useFaceSmash();
716
+ const videoRef = react.useRef(null);
717
+ const canvasRef = react.useRef(null);
718
+ const streamRef = react.useRef(null);
719
+ const [cameraError, setCameraError] = react.useState(null);
720
+ const [isScanning, setIsScanning] = react.useState(false);
721
+ const [status, setStatus] = react.useState("loading");
722
+ const startCamera = react.useCallback(async () => {
723
+ try {
724
+ const stream = await navigator.mediaDevices.getUserMedia({
725
+ video: { width: 640, height: 480, facingMode: "user" }
726
+ });
727
+ streamRef.current = stream;
728
+ if (videoRef.current) {
729
+ videoRef.current.srcObject = stream;
730
+ await videoRef.current.play();
731
+ }
732
+ setCameraError(null);
733
+ } catch {
734
+ setCameraError("Camera access denied or not available");
735
+ setStatus("error");
736
+ }
737
+ }, []);
738
+ const stopCamera = react.useCallback(() => {
739
+ streamRef.current?.getTracks().forEach((t) => t.stop());
740
+ streamRef.current = null;
741
+ }, []);
742
+ const captureFrame = react.useCallback(() => {
743
+ const video = videoRef.current;
744
+ const canvas = canvasRef.current;
745
+ if (!video || !canvas) return null;
746
+ canvas.width = video.videoWidth || 640;
747
+ canvas.height = video.videoHeight || 480;
748
+ const ctx = canvas.getContext("2d");
749
+ if (!ctx) return null;
750
+ ctx.drawImage(video, 0, 0);
751
+ return canvas.toDataURL("image/jpeg", 0.9);
752
+ }, []);
753
+ const scan = react.useCallback(async () => {
754
+ if (!isReady || isScanning) return;
755
+ setIsScanning(true);
756
+ setStatus("scanning");
757
+ const images = [];
758
+ for (let i = 0; i < captureCount; i++) {
759
+ const frame = captureFrame();
760
+ if (frame) images.push(frame);
761
+ if (i < captureCount - 1) {
762
+ await new Promise((r) => setTimeout(r, captureDelay));
763
+ }
764
+ }
765
+ if (images.length === 0) {
766
+ const result2 = { success: false, error: "Failed to capture images from camera" };
767
+ onResult(result2);
768
+ setIsScanning(false);
769
+ setStatus("error");
770
+ return;
771
+ }
772
+ const result = await client.login(images);
773
+ onResult(result);
774
+ setIsScanning(false);
775
+ setStatus("done");
776
+ }, [isReady, isScanning, captureCount, captureDelay, captureFrame, client, onResult]);
777
+ react.useEffect(() => {
778
+ if (isReady) {
779
+ startCamera();
780
+ setStatus("ready");
781
+ }
782
+ return () => stopCamera();
783
+ }, [isReady, startCamera, stopCamera]);
784
+ react.useEffect(() => {
785
+ if (autoStart && status === "ready" && !isScanning) {
786
+ const timer = setTimeout(scan, 2e3);
787
+ return () => clearTimeout(timer);
788
+ }
789
+ }, [autoStart, status, isScanning, scan]);
790
+ const retry = react.useCallback(() => {
791
+ setCameraError(null);
792
+ setStatus("loading");
793
+ startCamera().then(() => setStatus("ready"));
794
+ }, [startCamera]);
795
+ const displayError = cameraError || initError;
796
+ if (displayError && errorContent) {
797
+ return /* @__PURE__ */ jsxRuntime.jsx(jsxRuntime.Fragment, { children: errorContent(displayError, retry) });
798
+ }
799
+ if (isLoading && loadingContent) {
800
+ return /* @__PURE__ */ jsxRuntime.jsx(jsxRuntime.Fragment, { children: loadingContent });
801
+ }
802
+ return /* @__PURE__ */ jsxRuntime.jsxs("div", { className, style: { position: "relative" }, children: [
803
+ /* @__PURE__ */ jsxRuntime.jsx(
804
+ "video",
805
+ {
806
+ ref: videoRef,
807
+ autoPlay: true,
808
+ playsInline: true,
809
+ muted: true,
810
+ style: {
811
+ width: "100%",
812
+ height: "100%",
813
+ objectFit: "cover",
814
+ transform: "scaleX(-1)"
815
+ }
816
+ }
817
+ ),
818
+ /* @__PURE__ */ jsxRuntime.jsx("canvas", { ref: canvasRef, style: { display: "none" } }),
819
+ overlay,
820
+ displayError && /* @__PURE__ */ jsxRuntime.jsx(
821
+ "div",
822
+ {
823
+ style: {
824
+ position: "absolute",
825
+ inset: 0,
826
+ display: "flex",
827
+ alignItems: "center",
828
+ justifyContent: "center",
829
+ backgroundColor: "rgba(0,0,0,0.8)",
830
+ color: "white",
831
+ padding: "1rem",
832
+ textAlign: "center"
833
+ },
834
+ children: /* @__PURE__ */ jsxRuntime.jsxs("div", { children: [
835
+ /* @__PURE__ */ jsxRuntime.jsx("p", { children: displayError }),
836
+ /* @__PURE__ */ jsxRuntime.jsx("button", { onClick: retry, style: { marginTop: "0.5rem", cursor: "pointer" }, children: "Retry" })
837
+ ] })
838
+ }
839
+ )
840
+ ] });
841
+ }
842
+ function FaceRegister({
843
+ name,
844
+ email,
845
+ onResult,
846
+ captureCount = 3,
847
+ captureDelay = 500,
848
+ autoStart = true,
849
+ className,
850
+ overlay,
851
+ loadingContent,
852
+ errorContent
853
+ }) {
854
+ const { client, isReady, isLoading, error: initError } = useFaceSmash();
855
+ const videoRef = react.useRef(null);
856
+ const canvasRef = react.useRef(null);
857
+ const streamRef = react.useRef(null);
858
+ const [cameraError, setCameraError] = react.useState(null);
859
+ const [isCapturing, setIsCapturing] = react.useState(false);
860
+ const [status, setStatus] = react.useState("loading");
861
+ const startCamera = react.useCallback(async () => {
862
+ try {
863
+ const stream = await navigator.mediaDevices.getUserMedia({
864
+ video: { width: 640, height: 480, facingMode: "user" }
865
+ });
866
+ streamRef.current = stream;
867
+ if (videoRef.current) {
868
+ videoRef.current.srcObject = stream;
869
+ await videoRef.current.play();
870
+ }
871
+ setCameraError(null);
872
+ } catch {
873
+ setCameraError("Camera access denied or not available");
874
+ setStatus("error");
875
+ }
876
+ }, []);
877
+ const stopCamera = react.useCallback(() => {
878
+ streamRef.current?.getTracks().forEach((t) => t.stop());
879
+ streamRef.current = null;
880
+ }, []);
881
+ const captureFrame = react.useCallback(() => {
882
+ const video = videoRef.current;
883
+ const canvas = canvasRef.current;
884
+ if (!video || !canvas) return null;
885
+ canvas.width = video.videoWidth || 640;
886
+ canvas.height = video.videoHeight || 480;
887
+ const ctx = canvas.getContext("2d");
888
+ if (!ctx) return null;
889
+ ctx.drawImage(video, 0, 0);
890
+ return canvas.toDataURL("image/jpeg", 0.9);
891
+ }, []);
892
+ const capture = react.useCallback(async () => {
893
+ if (!isReady || isCapturing) return;
894
+ setIsCapturing(true);
895
+ setStatus("capturing");
896
+ const images = [];
897
+ for (let i = 0; i < captureCount; i++) {
898
+ const frame = captureFrame();
899
+ if (frame) images.push(frame);
900
+ if (i < captureCount - 1) {
901
+ await new Promise((r) => setTimeout(r, captureDelay));
902
+ }
903
+ }
904
+ if (images.length === 0) {
905
+ const result2 = { success: false, error: "Failed to capture images" };
906
+ onResult(result2);
907
+ setIsCapturing(false);
908
+ setStatus("error");
909
+ return;
910
+ }
911
+ const result = await client.register(name, images, email);
912
+ onResult(result);
913
+ setIsCapturing(false);
914
+ setStatus("done");
915
+ }, [isReady, isCapturing, captureCount, captureDelay, captureFrame, client, name, email, onResult]);
916
+ react.useEffect(() => {
917
+ if (isReady) {
918
+ startCamera();
919
+ setStatus("ready");
920
+ }
921
+ return () => stopCamera();
922
+ }, [isReady, startCamera, stopCamera]);
923
+ react.useEffect(() => {
924
+ if (autoStart && status === "ready" && !isCapturing) {
925
+ const timer = setTimeout(capture, 2e3);
926
+ return () => clearTimeout(timer);
927
+ }
928
+ }, [autoStart, status, isCapturing, capture]);
929
+ const retry = react.useCallback(() => {
930
+ setCameraError(null);
931
+ setStatus("loading");
932
+ startCamera().then(() => setStatus("ready"));
933
+ }, [startCamera]);
934
+ const displayError = cameraError || initError;
935
+ if (displayError && errorContent) {
936
+ return /* @__PURE__ */ jsxRuntime.jsx(jsxRuntime.Fragment, { children: errorContent(displayError, retry) });
937
+ }
938
+ if (isLoading && loadingContent) {
939
+ return /* @__PURE__ */ jsxRuntime.jsx(jsxRuntime.Fragment, { children: loadingContent });
940
+ }
941
+ return /* @__PURE__ */ jsxRuntime.jsxs("div", { className, style: { position: "relative" }, children: [
942
+ /* @__PURE__ */ jsxRuntime.jsx(
943
+ "video",
944
+ {
945
+ ref: videoRef,
946
+ autoPlay: true,
947
+ playsInline: true,
948
+ muted: true,
949
+ style: {
950
+ width: "100%",
951
+ height: "100%",
952
+ objectFit: "cover",
953
+ transform: "scaleX(-1)"
954
+ }
955
+ }
956
+ ),
957
+ /* @__PURE__ */ jsxRuntime.jsx("canvas", { ref: canvasRef, style: { display: "none" } }),
958
+ overlay,
959
+ displayError && /* @__PURE__ */ jsxRuntime.jsx(
960
+ "div",
961
+ {
962
+ style: {
963
+ position: "absolute",
964
+ inset: 0,
965
+ display: "flex",
966
+ alignItems: "center",
967
+ justifyContent: "center",
968
+ backgroundColor: "rgba(0,0,0,0.8)",
969
+ color: "white",
970
+ padding: "1rem",
971
+ textAlign: "center"
972
+ },
973
+ children: /* @__PURE__ */ jsxRuntime.jsxs("div", { children: [
974
+ /* @__PURE__ */ jsxRuntime.jsx("p", { children: displayError }),
975
+ /* @__PURE__ */ jsxRuntime.jsx("button", { onClick: retry, style: { marginTop: "0.5rem", cursor: "pointer" }, children: "Retry" })
976
+ ] })
977
+ }
978
+ )
979
+ ] });
980
+ }
981
+ function useFaceLogin() {
982
+ const { client, isReady } = useFaceSmash();
983
+ const [isScanning, setIsScanning] = react.useState(false);
984
+ const [result, setResult] = react.useState(null);
985
+ const login = react.useCallback(
986
+ async (images) => {
987
+ if (!isReady) {
988
+ return { success: false, error: "Models not loaded yet" };
989
+ }
990
+ setIsScanning(true);
991
+ setResult(null);
992
+ const loginResult = await client.login(images);
993
+ setResult(loginResult);
994
+ setIsScanning(false);
995
+ return loginResult;
996
+ },
997
+ [client, isReady]
998
+ );
999
+ const reset = react.useCallback(() => {
1000
+ setIsScanning(false);
1001
+ setResult(null);
1002
+ }, []);
1003
+ return { login, isScanning, result, reset, isReady };
1004
+ }
1005
+ function useFaceRegister() {
1006
+ const { client, isReady } = useFaceSmash();
1007
+ const [isRegistering, setIsRegistering] = react.useState(false);
1008
+ const [result, setResult] = react.useState(null);
1009
+ const register = react.useCallback(
1010
+ async (name, images, email) => {
1011
+ if (!isReady) {
1012
+ return { success: false, error: "Models not loaded yet" };
1013
+ }
1014
+ setIsRegistering(true);
1015
+ setResult(null);
1016
+ const regResult = await client.register(name, images, email);
1017
+ setResult(regResult);
1018
+ setIsRegistering(false);
1019
+ return regResult;
1020
+ },
1021
+ [client, isReady]
1022
+ );
1023
+ const reset = react.useCallback(() => {
1024
+ setIsRegistering(false);
1025
+ setResult(null);
1026
+ }, []);
1027
+ return { register, isRegistering, result, reset, isReady };
1028
+ }
1029
+ function useFaceAnalysis() {
1030
+ const { client, isReady } = useFaceSmash();
1031
+ const [analysis, setAnalysis] = react.useState(null);
1032
+ const [isAnalyzing, setIsAnalyzing] = react.useState(false);
1033
+ const analyze = react.useCallback(
1034
+ async (imageData) => {
1035
+ if (!isReady) return null;
1036
+ setIsAnalyzing(true);
1037
+ const result = await client.analyzeFace(imageData);
1038
+ setAnalysis(result);
1039
+ setIsAnalyzing(false);
1040
+ return result;
1041
+ },
1042
+ [client, isReady]
1043
+ );
1044
+ return { analyze, analysis, isAnalyzing, isReady };
1045
+ }
1046
+
1047
+ exports.FaceLogin = FaceLogin;
1048
+ exports.FaceRegister = FaceRegister;
1049
+ exports.FaceSmashClient = FaceSmashClient;
1050
+ exports.FaceSmashProvider = FaceSmashProvider;
1051
+ exports.analyzeFace = analyzeFace;
1052
+ exports.areModelsLoaded = areModelsLoaded;
1053
+ exports.calculateLearningWeight = calculateLearningWeight;
1054
+ exports.calculateSimilarity = calculateSimilarity;
1055
+ exports.createFaceSmash = createFaceSmash;
1056
+ exports.enhancedMatch = enhancedMatch;
1057
+ exports.extractDescriptor = extractDescriptor;
1058
+ exports.facesMatch = facesMatch;
1059
+ exports.loadModels = loadModels;
1060
+ exports.multiTemplateMatch = multiTemplateMatch;
1061
+ exports.normalizeDescriptor = normalizeDescriptor;
1062
+ exports.processImages = processImages;
1063
+ exports.useFaceAnalysis = useFaceAnalysis;
1064
+ exports.useFaceLogin = useFaceLogin;
1065
+ exports.useFaceRegister = useFaceRegister;
1066
+ exports.useFaceSmash = useFaceSmash;
1067
+ //# sourceMappingURL=react.cjs.map
1068
+ //# sourceMappingURL=react.cjs.map