@jupitermetalabs/face-zk-sdk 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +201 -0
- package/README.md +181 -0
- package/assets/README.md +22 -0
- package/assets/face-guidance/face-logic.js.txt +77 -0
- package/assets/face-guidance/index.html +173 -0
- package/assets/face-guidance/pose-guidance.js.txt +403 -0
- package/assets/liveness/antispoof.js.txt +143 -0
- package/assets/liveness/index.html +451 -0
- package/assets/liveness/liveness.js.txt +1003 -0
- package/assets/mediapipe/face_mesh.js.txt +131 -0
- package/assets/mediapipe/face_mesh_solution_packed_assets.data +0 -0
- package/assets/mediapipe/face_mesh_solution_simd_wasm_bin.wasm +0 -0
- package/assets/mediapipe/face_mesh_solution_wasm_bin.wasm +0 -0
- package/assets/onnx/ort-wasm-simd.wasm +0 -0
- package/assets/onnx/ort-wasm.wasm +0 -0
- package/assets/onnx/ort.min.js.txt +7 -0
- package/assets/wasm/zk_face_wasm_bg.wasm +0 -0
- package/assets/zk-worker.html +472 -0
- package/cli/copy-ort-assets.js +65 -0
- package/cli/setup.js +266 -0
- package/dist/FaceZkSdk.d.ts +69 -0
- package/dist/FaceZkSdk.js +132 -0
- package/dist/assets/onnx/ort-min.d.ts +1 -0
- package/dist/assets/onnx/ort-min.js +8 -0
- package/dist/config/defaults.d.ts +49 -0
- package/dist/config/defaults.js +55 -0
- package/dist/config/types.d.ts +123 -0
- package/dist/config/types.js +16 -0
- package/dist/core/enrollment-core.d.ts +68 -0
- package/dist/core/enrollment-core.js +202 -0
- package/dist/core/matching.d.ts +69 -0
- package/dist/core/matching.js +96 -0
- package/dist/core/types.d.ts +365 -0
- package/dist/core/types.js +34 -0
- package/dist/core/verification-core.d.ts +120 -0
- package/dist/core/verification-core.js +434 -0
- package/dist/core/zk-core.d.ts +69 -0
- package/dist/core/zk-core.js +240 -0
- package/dist/index.d.ts +29 -0
- package/dist/index.js +39 -0
- package/dist/react-native/adapters/faceEmbeddingProvider.d.ts +38 -0
- package/dist/react-native/adapters/faceEmbeddingProvider.js +41 -0
- package/dist/react-native/adapters/imageDataProvider.d.ts +53 -0
- package/dist/react-native/adapters/imageDataProvider.js +97 -0
- package/dist/react-native/adapters/livenessProvider.d.ts +133 -0
- package/dist/react-native/adapters/livenessProvider.js +144 -0
- package/dist/react-native/adapters/zkProofEngine-webview.d.ts +73 -0
- package/dist/react-native/adapters/zkProofEngine-webview.js +129 -0
- package/dist/react-native/components/FacePoseGuidanceWebView.d.ts +30 -0
- package/dist/react-native/components/FacePoseGuidanceWebView.js +474 -0
- package/dist/react-native/components/LivenessWebView.d.ts +39 -0
- package/dist/react-native/components/LivenessWebView.js +348 -0
- package/dist/react-native/components/OnnxRuntimeWebView.d.ts +54 -0
- package/dist/react-native/components/OnnxRuntimeWebView.js +394 -0
- package/dist/react-native/components/ZkProofWebView.d.ts +59 -0
- package/dist/react-native/components/ZkProofWebView.js +259 -0
- package/dist/react-native/dependencies.d.ts +144 -0
- package/dist/react-native/dependencies.js +123 -0
- package/dist/react-native/hooks/useOnnxLoader.d.ts +38 -0
- package/dist/react-native/hooks/useOnnxLoader.js +81 -0
- package/dist/react-native/hooks/useWasmLoader.d.ts +30 -0
- package/dist/react-native/hooks/useWasmLoader.js +122 -0
- package/dist/react-native/index.d.ts +59 -0
- package/dist/react-native/index.js +96 -0
- package/dist/react-native/services/FaceRecognition.d.ts +70 -0
- package/dist/react-native/services/FaceRecognition.js +517 -0
- package/dist/react-native/ui/FaceZkVerificationFlow.d.ts +97 -0
- package/dist/react-native/ui/FaceZkVerificationFlow.js +433 -0
- package/dist/react-native/ui/ReferenceEnrollmentFlow.d.ts +72 -0
- package/dist/react-native/ui/ReferenceEnrollmentFlow.js +321 -0
- package/dist/react-native/utils/faceAlignment.d.ts +37 -0
- package/dist/react-native/utils/faceAlignment.js +182 -0
- package/dist/react-native/utils/modelInitialisationChecks.d.ts +36 -0
- package/dist/react-native/utils/modelInitialisationChecks.js +92 -0
- package/dist/react-native/utils/resolveModelUri.d.ts +55 -0
- package/dist/react-native/utils/resolveModelUri.js +172 -0
- package/dist/react-native/utils/resolveUiConfig.d.ts +41 -0
- package/dist/react-native/utils/resolveUiConfig.js +76 -0
- package/dist/storage/defaultStorageAdapter.d.ts +44 -0
- package/dist/storage/defaultStorageAdapter.js +299 -0
- package/dist/tsconfig.tsbuildinfo +1 -0
- package/face-zk.config.example.js +88 -0
- package/package.json +76 -0
|
@@ -0,0 +1,517 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Copyright 2026 JupiterMeta Labs
|
|
3
|
+
*
|
|
4
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
* you may not use this file except in compliance with the License.
|
|
6
|
+
* You may obtain a copy of the License at
|
|
7
|
+
*
|
|
8
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
*
|
|
10
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
* See the License for the specific language governing permissions and
|
|
14
|
+
* limitations under the License.
|
|
15
|
+
*/
|
|
16
|
+
import { Asset } from "expo-asset";
|
|
17
|
+
import * as FileSystem from "expo-file-system/legacy";
|
|
18
|
+
import * as ImageManipulator from "expo-image-manipulator";
|
|
19
|
+
import * as jpeg from "jpeg-js";
|
|
20
|
+
import { estimateUmeyama, warpAffine } from "../utils/faceAlignment";
|
|
21
|
+
import { FaceZkSdk } from "../../FaceZkSdk";
|
|
22
|
+
import { resolveModelUri } from "../utils/resolveModelUri";
|
|
23
|
+
/**
|
|
24
|
+
* Unified Face Recognition Service for iOS and Android
|
|
25
|
+
* Uses ONNX Runtime Web in WebView for cross-platform compatibility
|
|
26
|
+
* Models: buffalo_sc (SCRFD detection + MobileFaceNet recognition)
|
|
27
|
+
*/
|
|
28
|
+
export class FaceRecognitionService {
|
|
29
|
+
bridge = null;
|
|
30
|
+
modelsLoaded = false;
|
|
31
|
+
setBridge(bridge) {
|
|
32
|
+
console.log("[FaceRecognition] setBridge called with:", bridge ? "valid bridge" : "null bridge");
|
|
33
|
+
this.bridge = bridge;
|
|
34
|
+
console.log("[FaceRecognition] Bridge set, this.bridge:", this.bridge ? "set" : "still null");
|
|
35
|
+
}
|
|
36
|
+
isBridgeSet() {
|
|
37
|
+
return !!this.bridge;
|
|
38
|
+
}
|
|
39
|
+
async loadModels() {
|
|
40
|
+
console.log("[FaceRecognition] loadModels called, this.bridge:", this.bridge ? "exists" : "NULL");
|
|
41
|
+
if (!this.bridge) {
|
|
42
|
+
throw new Error("WebView bridge not initialized. Call setBridge() first.");
|
|
43
|
+
}
|
|
44
|
+
try {
|
|
45
|
+
let detUrl;
|
|
46
|
+
let recUrl;
|
|
47
|
+
if (FaceZkSdk.isInitialized()) {
|
|
48
|
+
// ── SDK-configured model sources ───────────────────────────────────
|
|
49
|
+
// Supports bundled modules, CDN URLs, or pre-downloaded local URIs.
|
|
50
|
+
const sdkConfig = FaceZkSdk.getConfig();
|
|
51
|
+
console.log("[FaceRecognition] Step 1: Resolving detection model from SDK config");
|
|
52
|
+
detUrl = await resolveModelUri(sdkConfig.models.detection, undefined, sdkConfig.allowedDomains);
|
|
53
|
+
console.log("[FaceRecognition] Detection model URI:", detUrl);
|
|
54
|
+
console.log("[FaceRecognition] Step 2: Resolving recognition model from SDK config");
|
|
55
|
+
recUrl = await resolveModelUri(sdkConfig.models.recognition, undefined, sdkConfig.allowedDomains);
|
|
56
|
+
console.log("[FaceRecognition] Recognition model URI:", recUrl);
|
|
57
|
+
}
|
|
58
|
+
else {
|
|
59
|
+
// ── Bundled fallback (in-repo / monorepo usage) ────────────────────
|
|
60
|
+
// Static require() calls resolved by Metro at build time.
|
|
61
|
+
console.log("[FaceRecognition] Step 1: Loading detection model asset (bundled fallback)");
|
|
62
|
+
const detAsset = Asset.fromModule(require("../../assets/models/det_500m.onnx"));
|
|
63
|
+
await detAsset.downloadAsync();
|
|
64
|
+
detUrl = detAsset.localUri || detAsset.uri;
|
|
65
|
+
console.log("[FaceRecognition] Detection model URL:", detUrl);
|
|
66
|
+
console.log("[FaceRecognition] Step 2: Loading recognition model asset (bundled fallback)");
|
|
67
|
+
const recAsset = Asset.fromModule(require("../../assets/models/w600k_mbf.onnx"));
|
|
68
|
+
await recAsset.downloadAsync();
|
|
69
|
+
recUrl = recAsset.localUri || recAsset.uri;
|
|
70
|
+
console.log("[FaceRecognition] Recognition model URL:", recUrl);
|
|
71
|
+
}
|
|
72
|
+
console.log("[FaceRecognition] Step 2.5: Loading ONNX WASM asset");
|
|
73
|
+
const wasmAsset = Asset.fromModule(require("../../assets/onnx/ort-wasm-simd.wasm"));
|
|
74
|
+
await wasmAsset.downloadAsync();
|
|
75
|
+
const wasmUrl = wasmAsset.localUri || wasmAsset.uri;
|
|
76
|
+
console.log("[FaceRecognition] ONNX WASM URL:", wasmUrl);
|
|
77
|
+
console.log("[FaceRecognition] Step 3: Reading model files as base64");
|
|
78
|
+
// Read models as base64 to send to WebView
|
|
79
|
+
const detBase64 = await FileSystem.readAsStringAsync(detUrl, {
|
|
80
|
+
encoding: FileSystem.EncodingType.Base64,
|
|
81
|
+
});
|
|
82
|
+
console.log("[FaceRecognition] Detection model size:", Math.round(detBase64.length / 1024), "KB");
|
|
83
|
+
const recBase64 = await FileSystem.readAsStringAsync(recUrl, {
|
|
84
|
+
encoding: FileSystem.EncodingType.Base64,
|
|
85
|
+
});
|
|
86
|
+
console.log("[FaceRecognition] Recognition model size:", Math.round(recBase64.length / 1024), "KB");
|
|
87
|
+
const wasmBase64 = await FileSystem.readAsStringAsync(wasmUrl, {
|
|
88
|
+
encoding: FileSystem.EncodingType.Base64,
|
|
89
|
+
});
|
|
90
|
+
console.log("[FaceRecognition] ONNX WASM size:", Math.round(wasmBase64.length / 1024), "KB");
|
|
91
|
+
console.log("[FaceRecognition] Step 4: Sending model data to WebView");
|
|
92
|
+
// Send base64 data to WebView - it will convert to Blob URLs
|
|
93
|
+
const loadPromise = this.bridge.loadModels(detBase64, recBase64, wasmBase64);
|
|
94
|
+
console.log("[FaceRecognition] Step 5: Waiting for WebView to load models...");
|
|
95
|
+
await loadPromise;
|
|
96
|
+
console.log("[FaceRecognition] Step 6: WebView confirmed models loaded!");
|
|
97
|
+
this.modelsLoaded = true;
|
|
98
|
+
console.log("[FaceRecognition] ✅ Models loaded successfully via WebView");
|
|
99
|
+
}
|
|
100
|
+
catch (e) {
|
|
101
|
+
console.error("[FaceRecognition] ❌ Error loading models:", e);
|
|
102
|
+
throw e;
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
async processImageForEmbedding(imageUri) {
|
|
106
|
+
if (!this.bridge || !this.modelsLoaded) {
|
|
107
|
+
throw new Error("Models not loaded");
|
|
108
|
+
}
|
|
109
|
+
try {
|
|
110
|
+
console.log("[FaceRecognition] 📸 Processing image:", imageUri);
|
|
111
|
+
// 1. Resize and preprocess image for detection
|
|
112
|
+
console.log("[FaceRecognition] Step 1: Preprocessing image to 640x640...");
|
|
113
|
+
const { processedUri, data: processedData } = await this.preprocessImage(imageUri, 640, 640);
|
|
114
|
+
console.log("[FaceRecognition] Preprocessed image data size:", processedData.length);
|
|
115
|
+
// 2. Run face detection
|
|
116
|
+
console.log("[FaceRecognition] Step 2: Running face detection via WebView...");
|
|
117
|
+
const detectionResult = await this.bridge.runDetection(processedData, 640, 640);
|
|
118
|
+
console.log("[FaceRecognition] Detection result outputs:", Object.keys(detectionResult.outputs).length);
|
|
119
|
+
// 3. Parse detection results
|
|
120
|
+
console.log("[FaceRecognition] Step 3: Parsing detection output...");
|
|
121
|
+
const boxes = this.parseDetectionOutput(detectionResult.outputs);
|
|
122
|
+
console.log("[FaceRecognition] Detected boxes count:", boxes.length);
|
|
123
|
+
if (boxes.length > 0) {
|
|
124
|
+
console.log("[FaceRecognition] First box:", boxes[0]);
|
|
125
|
+
}
|
|
126
|
+
if (boxes.length === 0) {
|
|
127
|
+
console.warn("[FaceRecognition] ⚠️ No faces detected in image");
|
|
128
|
+
return {
|
|
129
|
+
status: "no_face",
|
|
130
|
+
message: "No face detected. Please ensure your face is clearly visible.",
|
|
131
|
+
};
|
|
132
|
+
}
|
|
133
|
+
if (boxes.length > 1) {
|
|
134
|
+
console.warn(`[FaceRecognition] ⚠️ ${boxes.length} faces detected — rejecting.`);
|
|
135
|
+
return {
|
|
136
|
+
status: "multiple_faces",
|
|
137
|
+
message: "Multiple faces detected. Please ensure only one face is visible.",
|
|
138
|
+
};
|
|
139
|
+
}
|
|
140
|
+
const box = boxes[0];
|
|
141
|
+
console.log("[FaceRecognition] ✅ Single face detected:", box);
|
|
142
|
+
// 4. Align face using 5-point landmarks (Umeyama + WarpAffine)
|
|
143
|
+
console.log("[FaceRecognition] Step 4: Aligning face using landmarks...");
|
|
144
|
+
// Calculate Similarity Transform Matrix
|
|
145
|
+
const matrix = estimateUmeyama(box.landmarks);
|
|
146
|
+
console.log("[FaceRecognition] Affine Matrix estimated:", matrix);
|
|
147
|
+
// Warp affine to get 112x112 aligned face
|
|
148
|
+
const faceImage = warpAffine(processedData, 640, 640, matrix, 112);
|
|
149
|
+
console.log("[FaceRecognition] Face aligned and warped. Data size:", faceImage.length);
|
|
150
|
+
// 5. Run recognition to get embedding
|
|
151
|
+
console.log("[FaceRecognition] Step 5: Running recognition to get embedding...");
|
|
152
|
+
const embeddingResult = await this.bridge.runRecognition(faceImage, 112, 112);
|
|
153
|
+
console.log("[FaceRecognition] Embedding dims:", embeddingResult.dims);
|
|
154
|
+
console.log("[FaceRecognition] Embedding size:", embeddingResult.data.length);
|
|
155
|
+
const embedding = this.normalizeEmbedding(Array.from(embeddingResult.data));
|
|
156
|
+
console.log("[FaceRecognition] ✅ Final normalized embedding sample (first 10):", embedding.slice(0, 10));
|
|
157
|
+
// 6. Estimate Pose
|
|
158
|
+
const pose = this.estimatePoseFromLandmarks(box.landmarks);
|
|
159
|
+
console.log("[FaceRecognition] Estimated Pose:", pose);
|
|
160
|
+
return {
|
|
161
|
+
status: "ok",
|
|
162
|
+
embedding,
|
|
163
|
+
box,
|
|
164
|
+
pose,
|
|
165
|
+
};
|
|
166
|
+
}
|
|
167
|
+
catch (error) {
|
|
168
|
+
console.error("[FaceRecognition] ❌ Error:", error);
|
|
169
|
+
const message = error instanceof Error ? error.message : "Unknown error";
|
|
170
|
+
if (message.startsWith("NO_FACE:")) {
|
|
171
|
+
return { status: "no_face", message: "No usable face detected in the image" };
|
|
172
|
+
}
|
|
173
|
+
return { status: "error", message };
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
async getEmbeddings(imageUri) {
|
|
177
|
+
const result = await this.processImageForEmbedding(imageUri);
|
|
178
|
+
return result.status === "ok" ? result.embedding || null : null;
|
|
179
|
+
}
|
|
180
|
+
/**
|
|
181
|
+
* Process a pre-cropped reference image without face detection.
|
|
182
|
+
* This skips bounding box detection and directly extracts the embedding.
|
|
183
|
+
* Use this for small, already-cropped document photos to preserve quality.
|
|
184
|
+
*/
|
|
185
|
+
async processPreCroppedImage(imageUri) {
|
|
186
|
+
if (!this.bridge || !this.modelsLoaded) {
|
|
187
|
+
throw new Error("Models not loaded");
|
|
188
|
+
}
|
|
189
|
+
try {
|
|
190
|
+
console.log("[FaceRecognition] 📸 Processing pre-cropped image (no detection):", imageUri);
|
|
191
|
+
// Get image dimensions first
|
|
192
|
+
const imageInfo = await ImageManipulator.manipulateAsync(imageUri, [], {
|
|
193
|
+
format: ImageManipulator.SaveFormat.JPEG,
|
|
194
|
+
});
|
|
195
|
+
// Center crop to square to avoid aspect ratio distortion
|
|
196
|
+
console.log("[FaceRecognition] Step 1: Center-cropping to square...");
|
|
197
|
+
const size = Math.min(imageInfo.width, imageInfo.height);
|
|
198
|
+
const originX = (imageInfo.width - size) / 2;
|
|
199
|
+
const originY = (imageInfo.height - size) / 2;
|
|
200
|
+
const croppedResult = await ImageManipulator.manipulateAsync(imageUri, [
|
|
201
|
+
{
|
|
202
|
+
crop: {
|
|
203
|
+
originX,
|
|
204
|
+
originY,
|
|
205
|
+
width: size,
|
|
206
|
+
height: size,
|
|
207
|
+
},
|
|
208
|
+
},
|
|
209
|
+
{ resize: { width: 112, height: 112 } },
|
|
210
|
+
], { format: ImageManipulator.SaveFormat.JPEG, compress: 1 });
|
|
211
|
+
console.log("[FaceRecognition] Step 2: Converting to tensor format...");
|
|
212
|
+
const base64 = await FileSystem.readAsStringAsync(croppedResult.uri, {
|
|
213
|
+
encoding: FileSystem.EncodingType.Base64,
|
|
214
|
+
});
|
|
215
|
+
const binaryString = atob(base64);
|
|
216
|
+
const bytes = new Uint8Array(binaryString.length);
|
|
217
|
+
for (let i = 0; i < binaryString.length; i++) {
|
|
218
|
+
bytes[i] = binaryString.charCodeAt(i);
|
|
219
|
+
}
|
|
220
|
+
const rawImageData = jpeg.decode(bytes, { useTArray: true });
|
|
221
|
+
const data = new Float32Array(3 * 112 * 112);
|
|
222
|
+
const pixelData = rawImageData.data;
|
|
223
|
+
// Convert from HWC to CHW and normalize
|
|
224
|
+
for (let h = 0; h < 112; h++) {
|
|
225
|
+
for (let w = 0; w < 112; w++) {
|
|
226
|
+
const srcIdx = (h * 112 + w) * 4;
|
|
227
|
+
const dstIdxR = 0 * 112 * 112 + h * 112 + w;
|
|
228
|
+
const dstIdxG = 1 * 112 * 112 + h * 112 + w;
|
|
229
|
+
const dstIdxB = 2 * 112 * 112 + h * 112 + w;
|
|
230
|
+
data[dstIdxR] = (pixelData[srcIdx + 0] - 127.5) / 128.0;
|
|
231
|
+
data[dstIdxG] = (pixelData[srcIdx + 1] - 127.5) / 128.0;
|
|
232
|
+
data[dstIdxB] = (pixelData[srcIdx + 2] - 127.5) / 128.0;
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
console.log("[FaceRecognition] Face image data size:", data.length);
|
|
236
|
+
// Run recognition to get embedding
|
|
237
|
+
console.log("[FaceRecognition] Step 3: Running recognition to get embedding...");
|
|
238
|
+
const embeddingResult = await this.bridge.runRecognition(data, 112, 112);
|
|
239
|
+
console.log("[FaceRecognition] Embedding dims:", embeddingResult.dims);
|
|
240
|
+
console.log("[FaceRecognition] Embedding size:", embeddingResult.data.length);
|
|
241
|
+
// Normalize embedding
|
|
242
|
+
console.log("[FaceRecognition] Step 4: Normalizing embedding...");
|
|
243
|
+
const embedding = this.normalizeEmbedding(Array.from(embeddingResult.data));
|
|
244
|
+
console.log("[FaceRecognition] ✅ Final normalized embedding sample (first 10):", embedding.slice(0, 10));
|
|
245
|
+
// Step 5: Run Detection to get Pose (New requirement)
|
|
246
|
+
console.log("[FaceRecognition] Step 5: Running detection on cropped image for POSE extraction...");
|
|
247
|
+
// We need to run detection on the *original* image (or a larger resized version),
|
|
248
|
+
// NOT the 112x112 blob, because 112 is too small for accurate landmarks if we already warped it?
|
|
249
|
+
// Wait, we warped it manually in processPreCropped?
|
|
250
|
+
// No, processPreCropped manipulates the URI then reads it.
|
|
251
|
+
// The `data` variable is the 112x112 CHW tensor.
|
|
252
|
+
// Detection needs 640x640 usually for best results with this model.
|
|
253
|
+
// Let's use the preprocessImage helper to get a 640x640 version of the URI
|
|
254
|
+
const detectionInput = await this.preprocessImage(imageUri, 640, 640);
|
|
255
|
+
const detResult = await this.bridge.runDetection(detectionInput.data, 640, 640);
|
|
256
|
+
const boxes = this.parseDetectionOutput(detResult.outputs);
|
|
257
|
+
let pose = { yaw: 0, pitch: 0, roll: 0 };
|
|
258
|
+
if (boxes.length > 0) {
|
|
259
|
+
pose = this.estimatePoseFromLandmarks(boxes[0].landmarks);
|
|
260
|
+
console.log("[FaceRecognition] ✅ Pose extracted from reference:", pose);
|
|
261
|
+
}
|
|
262
|
+
else {
|
|
263
|
+
console.warn("[FaceRecognition] ⚠️ No face detected for pose extraction, defaulting to 0");
|
|
264
|
+
}
|
|
265
|
+
return {
|
|
266
|
+
status: "ok",
|
|
267
|
+
embedding,
|
|
268
|
+
pose,
|
|
269
|
+
// No bounding box to return as main result, as this was pre-cropped flow
|
|
270
|
+
};
|
|
271
|
+
}
|
|
272
|
+
catch (error) {
|
|
273
|
+
console.error("[FaceRecognition] ❌ Error:", error);
|
|
274
|
+
return {
|
|
275
|
+
status: "error",
|
|
276
|
+
message: error instanceof Error ? error.message : "Unknown error",
|
|
277
|
+
};
|
|
278
|
+
}
|
|
279
|
+
}
|
|
280
|
+
// Helper methods (same as iOS version)
|
|
281
|
+
async preprocessImage(imageUri, targetWidth, targetHeight) {
|
|
282
|
+
// Implementation similar to iOS version
|
|
283
|
+
const manipResult = await ImageManipulator.manipulateAsync(imageUri, [{ resize: { width: targetWidth, height: targetHeight } }], { format: ImageManipulator.SaveFormat.JPEG, compress: 1 });
|
|
284
|
+
const base64 = await FileSystem.readAsStringAsync(manipResult.uri, {
|
|
285
|
+
encoding: FileSystem.EncodingType.Base64,
|
|
286
|
+
});
|
|
287
|
+
// Convert base64 to Uint8Array (React Native compatible)
|
|
288
|
+
const binaryString = atob(base64);
|
|
289
|
+
const bytes = new Uint8Array(binaryString.length);
|
|
290
|
+
for (let i = 0; i < binaryString.length; i++) {
|
|
291
|
+
bytes[i] = binaryString.charCodeAt(i);
|
|
292
|
+
}
|
|
293
|
+
const rawImageData = jpeg.decode(bytes, { useTArray: true });
|
|
294
|
+
// Convert to CHW format and normalize
|
|
295
|
+
// Input: HWC (height x width x channels) RGB
|
|
296
|
+
// Output: CHW (channels x height x width) normalized
|
|
297
|
+
const data = new Float32Array(3 * targetHeight * targetWidth);
|
|
298
|
+
const pixelData = rawImageData.data; // RGBA format
|
|
299
|
+
// Convert from HWC to CHW and normalize
|
|
300
|
+
// Mean: [127.5, 127.5, 127.5], Std: [128, 128, 128]
|
|
301
|
+
for (let h = 0; h < targetHeight; h++) {
|
|
302
|
+
for (let w = 0; w < targetWidth; w++) {
|
|
303
|
+
const srcIdx = (h * targetWidth + w) * 4; // RGBA, so *4
|
|
304
|
+
const dstIdxR = 0 * targetHeight * targetWidth + h * targetWidth + w;
|
|
305
|
+
const dstIdxG = 1 * targetHeight * targetWidth + h * targetWidth + w;
|
|
306
|
+
const dstIdxB = 2 * targetHeight * targetWidth + h * targetWidth + w;
|
|
307
|
+
// Normalize: (pixel - 127.5) / 128.0
|
|
308
|
+
data[dstIdxR] = (pixelData[srcIdx + 0] - 127.5) / 128.0;
|
|
309
|
+
data[dstIdxG] = (pixelData[srcIdx + 1] - 127.5) / 128.0;
|
|
310
|
+
data[dstIdxB] = (pixelData[srcIdx + 2] - 127.5) / 128.0;
|
|
311
|
+
}
|
|
312
|
+
}
|
|
313
|
+
return {
|
|
314
|
+
processedUri: manipResult.uri,
|
|
315
|
+
data,
|
|
316
|
+
width: targetWidth,
|
|
317
|
+
height: targetHeight,
|
|
318
|
+
};
|
|
319
|
+
}
|
|
320
|
+
parseDetectionOutput(outputs) {
|
|
321
|
+
console.log("[FaceRecognition] Parsing SCRFD detection output...");
|
|
322
|
+
console.log("[FaceRecognition] Number of output tensors:", Object.keys(outputs).length);
|
|
323
|
+
const boxes = [];
|
|
324
|
+
const scoreThreshold = 0.5; // Raised from 0.25 — eliminates spurious detections that cause false "multiple faces"
|
|
325
|
+
// Group outputs by type (scores, bboxes, landmarks)
|
|
326
|
+
const scoreTensors = [];
|
|
327
|
+
const bboxTensors = [];
|
|
328
|
+
const landmarkTensors = [];
|
|
329
|
+
Object.keys(outputs).forEach((key) => {
|
|
330
|
+
const tensor = outputs[key];
|
|
331
|
+
const lastDim = tensor.dims[tensor.dims.length - 1];
|
|
332
|
+
if (lastDim === 1) {
|
|
333
|
+
scoreTensors.push(tensor);
|
|
334
|
+
}
|
|
335
|
+
else if (lastDim === 4) {
|
|
336
|
+
bboxTensors.push(tensor);
|
|
337
|
+
}
|
|
338
|
+
else if (lastDim === 10) {
|
|
339
|
+
landmarkTensors.push(tensor);
|
|
340
|
+
}
|
|
341
|
+
});
|
|
342
|
+
console.log(`[FaceRecognition] Found ${scoreTensors.length} score tensors, ${bboxTensors.length} bbox tensors`);
|
|
343
|
+
// Process each scale
|
|
344
|
+
for (let scaleIdx = 0; scaleIdx < Math.min(scoreTensors.length, bboxTensors.length); scaleIdx++) {
|
|
345
|
+
const scores = scoreTensors[scaleIdx];
|
|
346
|
+
const bboxes = bboxTensors[scaleIdx];
|
|
347
|
+
if (!scores || !bboxes)
|
|
348
|
+
continue;
|
|
349
|
+
const numAnchors = scores.dims[0];
|
|
350
|
+
const stride = scaleIdx === 0 ? 8 : scaleIdx === 1 ? 16 : 32;
|
|
351
|
+
const height = Math.floor(640 / stride);
|
|
352
|
+
const width = Math.floor(640 / stride);
|
|
353
|
+
console.log(`[FaceRecognition] Scale ${scaleIdx}: stride=${stride}, grid=${height}x${width}, anchors=${numAnchors}`);
|
|
354
|
+
// Generate anchor centers (2 anchors per grid point)
|
|
355
|
+
const numAnchorsPerPoint = 2;
|
|
356
|
+
const anchorCenters = [];
|
|
357
|
+
for (let y = 0; y < height; y++) {
|
|
358
|
+
for (let x = 0; x < width; x++) {
|
|
359
|
+
for (let a = 0; a < numAnchorsPerPoint; a++) {
|
|
360
|
+
anchorCenters.push([x * stride, y * stride]);
|
|
361
|
+
}
|
|
362
|
+
}
|
|
363
|
+
}
|
|
364
|
+
// Parse detections
|
|
365
|
+
for (let i = 0; i < numAnchors; i++) {
|
|
366
|
+
const score = scores.data[i];
|
|
367
|
+
if (score >= scoreThreshold) {
|
|
368
|
+
// Get bbox distance predictions and multiply by stride
|
|
369
|
+
const distLeft = bboxes.data[i * 4 + 0] * stride;
|
|
370
|
+
const distTop = bboxes.data[i * 4 + 1] * stride;
|
|
371
|
+
const distRight = bboxes.data[i * 4 + 2] * stride;
|
|
372
|
+
const distBottom = bboxes.data[i * 4 + 3] * stride;
|
|
373
|
+
// Get anchor center
|
|
374
|
+
const [anchorX, anchorY] = anchorCenters[i];
|
|
375
|
+
// distance2bbox decoding
|
|
376
|
+
const x1 = anchorX - distLeft;
|
|
377
|
+
const y1 = anchorY - distTop;
|
|
378
|
+
const x2 = anchorX + distRight;
|
|
379
|
+
const y2 = anchorY + distBottom;
|
|
380
|
+
// Clamp to image bounds
|
|
381
|
+
const clampedX1 = Math.max(0, Math.min(640, x1));
|
|
382
|
+
const clampedY1 = Math.max(0, Math.min(640, y1));
|
|
383
|
+
const clampedX2 = Math.max(0, Math.min(640, x2));
|
|
384
|
+
const clampedY2 = Math.max(0, Math.min(640, y2));
|
|
385
|
+
// Extract Landmarks
|
|
386
|
+
const landmarks = [];
|
|
387
|
+
// Check if we have landmark tensors for this scale
|
|
388
|
+
if (scaleIdx < landmarkTensors.length) {
|
|
389
|
+
const lmkTensor = landmarkTensors[scaleIdx];
|
|
390
|
+
// 10 values per anchor (5 points x 2 coords)
|
|
391
|
+
const lmkStart = i * 10;
|
|
392
|
+
for (let k = 0; k < 5; k++) {
|
|
393
|
+
const predX = lmkTensor.data[lmkStart + k * 2];
|
|
394
|
+
const predY = lmkTensor.data[lmkStart + k * 2 + 1];
|
|
395
|
+
const lmX = anchorX + predX * stride;
|
|
396
|
+
const lmY = anchorY + predY * stride;
|
|
397
|
+
landmarks.push([lmX, lmY]);
|
|
398
|
+
}
|
|
399
|
+
}
|
|
400
|
+
// Sanity check
|
|
401
|
+
if (clampedX2 > clampedX1 && clampedY2 > clampedY1) {
|
|
402
|
+
const boxWidth = clampedX2 - clampedX1;
|
|
403
|
+
const boxHeight = clampedY2 - clampedY1;
|
|
404
|
+
if (boxWidth >= 20 && boxHeight >= 20) {
|
|
405
|
+
boxes.push({
|
|
406
|
+
x1: clampedX1,
|
|
407
|
+
y1: clampedY1,
|
|
408
|
+
x2: clampedX2,
|
|
409
|
+
y2: clampedY2,
|
|
410
|
+
score,
|
|
411
|
+
landmarks,
|
|
412
|
+
});
|
|
413
|
+
}
|
|
414
|
+
}
|
|
415
|
+
}
|
|
416
|
+
}
|
|
417
|
+
}
|
|
418
|
+
console.log(`[FaceRecognition] Found ${boxes.length} boxes above threshold`);
|
|
419
|
+
// Apply NMS
|
|
420
|
+
const nmsBoxes = this.applyNMS(boxes, 0.4);
|
|
421
|
+
console.log(`[FaceRecognition] After NMS: ${nmsBoxes.length} boxes`);
|
|
422
|
+
// Return boxes sorted by score (no Y-axis adjustment to match InsightFace)
|
|
423
|
+
return nmsBoxes.sort((a, b) => b.score - a.score);
|
|
424
|
+
}
|
|
425
|
+
applyNMS(boxes, iouThreshold) {
|
|
426
|
+
if (boxes.length === 0)
|
|
427
|
+
return [];
|
|
428
|
+
// Sort by score
|
|
429
|
+
boxes.sort((a, b) => b.score - a.score);
|
|
430
|
+
const selected = [];
|
|
431
|
+
const suppressed = new Set();
|
|
432
|
+
for (let i = 0; i < boxes.length; i++) {
|
|
433
|
+
if (suppressed.has(i))
|
|
434
|
+
continue;
|
|
435
|
+
selected.push(boxes[i]);
|
|
436
|
+
for (let j = i + 1; j < boxes.length; j++) {
|
|
437
|
+
if (suppressed.has(j))
|
|
438
|
+
continue;
|
|
439
|
+
const iou = this.calculateIOU(boxes[i], boxes[j]);
|
|
440
|
+
if (iou > iouThreshold) {
|
|
441
|
+
suppressed.add(j);
|
|
442
|
+
}
|
|
443
|
+
}
|
|
444
|
+
}
|
|
445
|
+
return selected;
|
|
446
|
+
}
|
|
447
|
+
calculateIOU(box1, box2) {
|
|
448
|
+
const x1 = Math.max(box1.x1, box2.x1);
|
|
449
|
+
const y1 = Math.max(box1.y1, box2.y1);
|
|
450
|
+
const x2 = Math.min(box1.x2, box2.x2);
|
|
451
|
+
const y2 = Math.min(box1.y2, box2.y2);
|
|
452
|
+
const intersection = Math.max(0, x2 - x1) * Math.max(0, y2 - y1);
|
|
453
|
+
const area1 = (box1.x2 - box1.x1) * (box1.y2 - box1.y1);
|
|
454
|
+
const area2 = (box2.x2 - box2.x1) * (box2.y2 - box2.y1);
|
|
455
|
+
const union = area1 + area2 - intersection;
|
|
456
|
+
return union === 0 ? 0 : intersection / union;
|
|
457
|
+
}
|
|
458
|
+
expandBox(box, margin = 0.2, imageWidth = 640, imageHeight = 640) {
|
|
459
|
+
// Expand bounding box by margin percentage to preserve more context
|
|
460
|
+
const width = box.x2 - box.x1;
|
|
461
|
+
const height = box.y2 - box.y1;
|
|
462
|
+
const expandX = width * margin;
|
|
463
|
+
const expandY = height * margin;
|
|
464
|
+
return {
|
|
465
|
+
x1: Math.max(0, box.x1 - expandX),
|
|
466
|
+
y1: Math.max(0, box.y1 - expandY),
|
|
467
|
+
x2: Math.min(imageWidth, box.x2 + expandX),
|
|
468
|
+
y2: Math.min(imageHeight, box.y2 + expandY),
|
|
469
|
+
score: box.score,
|
|
470
|
+
landmarks: box.landmarks,
|
|
471
|
+
};
|
|
472
|
+
}
|
|
473
|
+
normalizeEmbedding(embedding) {
|
|
474
|
+
const norm = Math.sqrt(embedding.reduce((sum, val) => sum + val * val, 0));
|
|
475
|
+
if (norm === 0)
|
|
476
|
+
throw new Error("NO_FACE: model returned a zero-vector — face crop may be empty or invalid");
|
|
477
|
+
return embedding.map((val) => val / norm);
|
|
478
|
+
}
|
|
479
|
+
/**
|
|
480
|
+
* Estimate Pose (Yaw, Pitch, Roll) from 5 landmarks (SCRFD)
|
|
481
|
+
* Landmarks: [LeftEye, RightEye, Nose, LeftMouth, RightMouth]
|
|
482
|
+
*/
|
|
483
|
+
estimatePoseFromLandmarks(landmarks) {
|
|
484
|
+
if (!landmarks || landmarks.length !== 5) {
|
|
485
|
+
return { yaw: 0, pitch: 0, roll: 0 };
|
|
486
|
+
}
|
|
487
|
+
const [leftEye, rightEye, nose, leftMouth, rightMouth] = landmarks;
|
|
488
|
+
// 1. Roll: Angle between eyes
|
|
489
|
+
const dy = rightEye[1] - leftEye[1];
|
|
490
|
+
const dx = rightEye[0] - leftEye[0];
|
|
491
|
+
const roll = (Math.atan2(dy, dx) * 180) / Math.PI;
|
|
492
|
+
// 2. Yaw: Ratio of nose to eyes
|
|
493
|
+
// Midpoint of eyes
|
|
494
|
+
const eyeMidX = (leftEye[0] + rightEye[0]) / 2;
|
|
495
|
+
// Distance from nose to eye midpoint
|
|
496
|
+
// If nose is to the left of midpoint -> Looking Left (Positive Yaw in some systems, let's normalize)
|
|
497
|
+
// In our guidance: (nose.x - midPointX) * 200
|
|
498
|
+
// Let's use the same logic as face-logic.js to match values
|
|
499
|
+
// face-logic: (nose.x - midPointX) * 200 (normalized coords)
|
|
500
|
+
// Here coords are absolute.
|
|
501
|
+
const eyeDist = Math.hypot(dx, dy);
|
|
502
|
+
if (eyeDist === 0)
|
|
503
|
+
return { yaw: 0, pitch: 0, roll: 0 };
|
|
504
|
+
// Normalize deviation by face scale (eye distance)
|
|
505
|
+
const yawRatio = (nose[0] - eyeMidX) / eyeDist;
|
|
506
|
+
const yaw = yawRatio * 90; // Approx degrees scaling
|
|
507
|
+
// 3. Pitch: Ratio of nose to eyes/mouth center
|
|
508
|
+
const mouthMidY = (leftMouth[1] + rightMouth[1]) / 2;
|
|
509
|
+
const eyeMidY = (leftEye[1] + rightEye[1]) / 2;
|
|
510
|
+
const midFaceY = (eyeMidY + mouthMidY) / 2;
|
|
511
|
+
const faceHeight = Math.hypot(mouthMidY - eyeMidY, (leftMouth[0] + rightMouth[0]) / 2 - eyeMidX);
|
|
512
|
+
const pitchRatio = faceHeight === 0 ? 0 : (nose[1] - midFaceY) / faceHeight;
|
|
513
|
+
const pitch = pitchRatio * 90;
|
|
514
|
+
return { yaw, pitch, roll };
|
|
515
|
+
}
|
|
516
|
+
}
|
|
517
|
+
export const faceRecognitionService = new FaceRecognitionService();
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Copyright 2026 JupiterMeta Labs
|
|
3
|
+
*
|
|
4
|
+
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
* you may not use this file except in compliance with the License.
|
|
6
|
+
* You may obtain a copy of the License at
|
|
7
|
+
*
|
|
8
|
+
* http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
*
|
|
10
|
+
* Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
* See the License for the specific language governing permissions and
|
|
14
|
+
* limitations under the License.
|
|
15
|
+
*/
|
|
16
|
+
/**
|
|
17
|
+
* Face+ZK Verification Flow Component
|
|
18
|
+
*
|
|
19
|
+
* A pre-built React Native UI component for face verification with optional ZK proofs.
|
|
20
|
+
* This component handles:
|
|
21
|
+
* - Reference resolution (template, input, or ID from storage)
|
|
22
|
+
* - Liveness detection (via ZkFaceAuth WebView)
|
|
23
|
+
* - Live face capture and embedding extraction
|
|
24
|
+
* - Face matching against reference
|
|
25
|
+
* - Optional ZK proof generation and verification
|
|
26
|
+
*/
|
|
27
|
+
import React from "react";
|
|
28
|
+
import type { ReferenceTemplate, ReferenceTemplateInput, ReferenceId, VerificationOutcome, FaceZkRuntimeConfig, VerificationOptions, UiConfig, VerificationStage } from "../../core/types";
|
|
29
|
+
import type { FaceEmbeddingProvider, LivenessProvider } from "../../core/verification-core";
|
|
30
|
+
/**
|
|
31
|
+
* Props for FaceZkVerificationFlow component
|
|
32
|
+
*/
|
|
33
|
+
export interface FaceZkVerificationFlowProps {
|
|
34
|
+
/** SDK configuration */
|
|
35
|
+
sdkConfig: FaceZkRuntimeConfig;
|
|
36
|
+
/** Reference to verify against (template, input, or ID) */
|
|
37
|
+
reference: ReferenceTemplate | ReferenceTemplateInput | ReferenceId;
|
|
38
|
+
/** Verification mode */
|
|
39
|
+
mode: "verify-only" | "verify-with-proof";
|
|
40
|
+
/** Face embedding provider */
|
|
41
|
+
embeddingProvider: FaceEmbeddingProvider;
|
|
42
|
+
/** Optional liveness provider */
|
|
43
|
+
livenessProvider?: LivenessProvider;
|
|
44
|
+
/** Per-call verification options */
|
|
45
|
+
verificationOptions?: VerificationOptions;
|
|
46
|
+
/** UI customization config */
|
|
47
|
+
uiConfig?: UiConfig;
|
|
48
|
+
/**
|
|
49
|
+
* Optional reference pose for guided liveness.
|
|
50
|
+
* When provided, the liveness check will ask the user to match this pose
|
|
51
|
+
* (extracted from the enrolled reference template).
|
|
52
|
+
*/
|
|
53
|
+
referencePose?: {
|
|
54
|
+
yaw: number;
|
|
55
|
+
pitch: number;
|
|
56
|
+
roll: number;
|
|
57
|
+
};
|
|
58
|
+
/** Called when verification completes (success or failure) */
|
|
59
|
+
onComplete: (outcome: VerificationOutcome) => void;
|
|
60
|
+
/** Called when user cancels */
|
|
61
|
+
onCancel?: () => void;
|
|
62
|
+
/** Called on stage changes */
|
|
63
|
+
onStageChange?: (stage: VerificationStage) => void;
|
|
64
|
+
/** Whether to show the flow as a modal */
|
|
65
|
+
modal?: boolean;
|
|
66
|
+
/** Modal visibility (if modal=true) */
|
|
67
|
+
visible?: boolean;
|
|
68
|
+
/** Custom overlay renderer for liveness */
|
|
69
|
+
renderOverlay?: (state: unknown) => React.ReactNode;
|
|
70
|
+
}
|
|
71
|
+
/**
|
|
72
|
+
* A drop-in React Native UI component orchestrating the complete Face+ZK verification lifecycle.
|
|
73
|
+
*
|
|
74
|
+
* This component handles the complex choreography between:
|
|
75
|
+
* 1. Loading reference templates and initializing cryptographic WASM engines.
|
|
76
|
+
* 2. Mounting the `ZkFaceAuth` camera view to capture liveness and extract embeddings.
|
|
77
|
+
* 3. Delegating the matched vectors to the background ZK engine to cryptographically prove identity.
|
|
78
|
+
*
|
|
79
|
+
* **UI Customization:** You can aggressively customize this flow using the `uiConfig` prop, appending your own brand colors, localized strings, or entirely replacing the rendering of different stages (Loading, Success, Error).
|
|
80
|
+
*
|
|
81
|
+
* @param {FaceZkVerificationFlowProps} props - Configuration for the UI and required platform adapters.
|
|
82
|
+
* @returns {React.FC} A safely encapsulated verification modal or inline view.
|
|
83
|
+
*
|
|
84
|
+
* @example
|
|
85
|
+
* <FaceZkVerificationFlow
|
|
86
|
+
* sdkConfig={config}
|
|
87
|
+
* reference={refId}
|
|
88
|
+
* mode="verify-with-proof"
|
|
89
|
+
* embeddingProvider={provider}
|
|
90
|
+
* onComplete={(outcome) => {
|
|
91
|
+
* if (outcome.success) {
|
|
92
|
+
* Alert.alert("Authorized", `Hash: ${outcome.zkProof.hash}`);
|
|
93
|
+
* }
|
|
94
|
+
* }}
|
|
95
|
+
* />
|
|
96
|
+
*/
|
|
97
|
+
export declare const FaceZkVerificationFlow: React.FC<FaceZkVerificationFlowProps>;
|