@jupitermetalabs/face-zk-sdk 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. package/LICENSE +201 -0
  2. package/README.md +181 -0
  3. package/assets/README.md +22 -0
  4. package/assets/face-guidance/face-logic.js.txt +77 -0
  5. package/assets/face-guidance/index.html +173 -0
  6. package/assets/face-guidance/pose-guidance.js.txt +403 -0
  7. package/assets/liveness/antispoof.js.txt +143 -0
  8. package/assets/liveness/index.html +451 -0
  9. package/assets/liveness/liveness.js.txt +1003 -0
  10. package/assets/mediapipe/face_mesh.js.txt +131 -0
  11. package/assets/mediapipe/face_mesh_solution_packed_assets.data +0 -0
  12. package/assets/mediapipe/face_mesh_solution_simd_wasm_bin.wasm +0 -0
  13. package/assets/mediapipe/face_mesh_solution_wasm_bin.wasm +0 -0
  14. package/assets/onnx/ort-wasm-simd.wasm +0 -0
  15. package/assets/onnx/ort-wasm.wasm +0 -0
  16. package/assets/onnx/ort.min.js.txt +7 -0
  17. package/assets/wasm/zk_face_wasm_bg.wasm +0 -0
  18. package/assets/zk-worker.html +472 -0
  19. package/cli/copy-ort-assets.js +65 -0
  20. package/cli/setup.js +266 -0
  21. package/dist/FaceZkSdk.d.ts +69 -0
  22. package/dist/FaceZkSdk.js +132 -0
  23. package/dist/assets/onnx/ort-min.d.ts +1 -0
  24. package/dist/assets/onnx/ort-min.js +8 -0
  25. package/dist/config/defaults.d.ts +49 -0
  26. package/dist/config/defaults.js +55 -0
  27. package/dist/config/types.d.ts +123 -0
  28. package/dist/config/types.js +16 -0
  29. package/dist/core/enrollment-core.d.ts +68 -0
  30. package/dist/core/enrollment-core.js +202 -0
  31. package/dist/core/matching.d.ts +69 -0
  32. package/dist/core/matching.js +96 -0
  33. package/dist/core/types.d.ts +365 -0
  34. package/dist/core/types.js +34 -0
  35. package/dist/core/verification-core.d.ts +120 -0
  36. package/dist/core/verification-core.js +434 -0
  37. package/dist/core/zk-core.d.ts +69 -0
  38. package/dist/core/zk-core.js +240 -0
  39. package/dist/index.d.ts +29 -0
  40. package/dist/index.js +39 -0
  41. package/dist/react-native/adapters/faceEmbeddingProvider.d.ts +38 -0
  42. package/dist/react-native/adapters/faceEmbeddingProvider.js +41 -0
  43. package/dist/react-native/adapters/imageDataProvider.d.ts +53 -0
  44. package/dist/react-native/adapters/imageDataProvider.js +97 -0
  45. package/dist/react-native/adapters/livenessProvider.d.ts +133 -0
  46. package/dist/react-native/adapters/livenessProvider.js +144 -0
  47. package/dist/react-native/adapters/zkProofEngine-webview.d.ts +73 -0
  48. package/dist/react-native/adapters/zkProofEngine-webview.js +129 -0
  49. package/dist/react-native/components/FacePoseGuidanceWebView.d.ts +30 -0
  50. package/dist/react-native/components/FacePoseGuidanceWebView.js +474 -0
  51. package/dist/react-native/components/LivenessWebView.d.ts +39 -0
  52. package/dist/react-native/components/LivenessWebView.js +348 -0
  53. package/dist/react-native/components/OnnxRuntimeWebView.d.ts +54 -0
  54. package/dist/react-native/components/OnnxRuntimeWebView.js +394 -0
  55. package/dist/react-native/components/ZkProofWebView.d.ts +59 -0
  56. package/dist/react-native/components/ZkProofWebView.js +259 -0
  57. package/dist/react-native/dependencies.d.ts +144 -0
  58. package/dist/react-native/dependencies.js +123 -0
  59. package/dist/react-native/hooks/useOnnxLoader.d.ts +38 -0
  60. package/dist/react-native/hooks/useOnnxLoader.js +81 -0
  61. package/dist/react-native/hooks/useWasmLoader.d.ts +30 -0
  62. package/dist/react-native/hooks/useWasmLoader.js +122 -0
  63. package/dist/react-native/index.d.ts +59 -0
  64. package/dist/react-native/index.js +96 -0
  65. package/dist/react-native/services/FaceRecognition.d.ts +70 -0
  66. package/dist/react-native/services/FaceRecognition.js +517 -0
  67. package/dist/react-native/ui/FaceZkVerificationFlow.d.ts +97 -0
  68. package/dist/react-native/ui/FaceZkVerificationFlow.js +433 -0
  69. package/dist/react-native/ui/ReferenceEnrollmentFlow.d.ts +72 -0
  70. package/dist/react-native/ui/ReferenceEnrollmentFlow.js +321 -0
  71. package/dist/react-native/utils/faceAlignment.d.ts +37 -0
  72. package/dist/react-native/utils/faceAlignment.js +182 -0
  73. package/dist/react-native/utils/modelInitialisationChecks.d.ts +36 -0
  74. package/dist/react-native/utils/modelInitialisationChecks.js +92 -0
  75. package/dist/react-native/utils/resolveModelUri.d.ts +55 -0
  76. package/dist/react-native/utils/resolveModelUri.js +172 -0
  77. package/dist/react-native/utils/resolveUiConfig.d.ts +41 -0
  78. package/dist/react-native/utils/resolveUiConfig.js +76 -0
  79. package/dist/storage/defaultStorageAdapter.d.ts +44 -0
  80. package/dist/storage/defaultStorageAdapter.js +299 -0
  81. package/dist/tsconfig.tsbuildinfo +1 -0
  82. package/face-zk.config.example.js +88 -0
  83. package/package.json +76 -0
@@ -0,0 +1,321 @@
1
+ /**
2
+ * Copyright 2026 JupiterMeta Labs
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+ /**
17
+ * Reference Enrollment Flow Component
18
+ *
19
+ * A pre-built React Native UI component for enrolling a reference template.
20
+ * This component handles:
21
+ * - Camera capture with pose guidance
22
+ * - Face detection and embedding extraction
23
+ * - Reference template creation
24
+ * - Optional persistence via storage adapter
25
+ */
26
+ import React, { useEffect, useState } from "react";
27
+ import { ActivityIndicator, Modal, SafeAreaView, StatusBar, StyleSheet, Text, TouchableOpacity, View, } from "react-native";
28
+ import { createReferenceFromImage } from "../../core/enrollment-core";
29
+ import { getSdkDependencies } from "../dependencies";
30
+ import { resolveUiConfig } from "../utils/resolveUiConfig";
31
+ import { FaceZkSdk } from "../../FaceZkSdk";
32
+ /**
33
+ * A drop-in React Native UI component orchestrating face capture and template enrollment.
34
+ *
35
+ * This component guides a user through positioning their face correctly using the `FacePoseGuidanceWebView`, captures an optimal frame, extracts their facial embedding, and saves the resulting `ReferenceTemplate` directly to local storage (if `persist: true` is provided).
36
+ *
37
+ * **Context:** Once a reference is created via this UI, its `referenceId` is the key needed by `FaceZkVerificationFlow` to authenticate the user in the future.
38
+ *
39
+ * @param {ReferenceEnrollmentFlowProps} props - Configuration and dependencies.
40
+ * @returns {React.FC} An encapsulated camera flow for enrollment.
41
+ *
42
+ * @example
43
+ * <ReferenceEnrollmentFlow
44
+ * sdkConfig={config}
45
+ * embeddingProvider={provider}
46
+ * enrollmentOptions={{ persist: true, metadata: { role: "admin" } }}
47
+ * onComplete={(template) => {
48
+ * myBackend.saveRefId(template.referenceId);
49
+ * }}
50
+ * />
51
+ */
52
+ export const ReferenceEnrollmentFlow = ({ sdkConfig, embeddingProvider, enrollmentOptions = {}, uiConfig = {}, onComplete, onCancel, onError, modal = false, visible = true, }) => {
53
+ const [stage, setStage] = useState("INIT");
54
+ const [error, setError] = useState(null);
55
+ const [bridgeReady, setBridgeReady] = useState(false);
56
+ // Resolve theme + strings from uiConfig
57
+ const ui = resolveUiConfig(uiConfig);
58
+ const { theme, strings } = ui;
59
+ // Get injected dependencies
60
+ const deps = getSdkDependencies();
61
+ const { OnnxRuntimeWebView, FacePoseGuidanceWebView, faceRecognitionService } = deps;
62
+ // Load models when bridge is ready
63
+ useEffect(() => {
64
+ if (bridgeReady && faceRecognitionService.isBridgeSet()) {
65
+ setStage("BRIDGE_LOADING");
66
+ faceRecognitionService
67
+ .loadModels()
68
+ .then(() => {
69
+ console.log("[ReferenceEnrollmentFlow] Models loaded, ready to capture");
70
+ setStage("CAPTURING");
71
+ })
72
+ .catch((err) => {
73
+ console.error("[ReferenceEnrollmentFlow] Model loading failed:", err);
74
+ const sdkError = {
75
+ code: "SYSTEM_ERROR",
76
+ message: "Failed to load face recognition models",
77
+ details: { error: String(err) },
78
+ };
79
+ setError(sdkError);
80
+ setStage("ERROR");
81
+ onError?.(sdkError);
82
+ });
83
+ }
84
+ }, [bridgeReady, faceRecognitionService, onError]);
85
+ // Guard: SDK must be initialized before rendering
86
+ if (!FaceZkSdk.isInitialized()) {
87
+ return (<View style={{ flex: 1, justifyContent: "center", alignItems: "center", padding: 24 }}>
88
+ <Text style={{ color: "#f97316", fontSize: 16, textAlign: "center" }}>
89
+ FaceZkSdk is not initialized.{"\n"}Call initializeSdk() from '@jupitermetalabs/face-zk-sdk/react-native' before rendering this component.
90
+ </Text>
91
+ </View>);
92
+ }
93
+ // Initialize bridge for face recognition
94
+ const handleBridgeReady = (bridge) => {
95
+ console.log("[ReferenceEnrollmentFlow] ONNX bridge ready");
96
+ faceRecognitionService.setBridge(bridge);
97
+ setBridgeReady(true);
98
+ };
99
+ // Handle image capture from pose guidance
100
+ const handleCaptureSuccess = async (imageUri) => {
101
+ console.log("[ReferenceEnrollmentFlow] Image captured:", imageUri);
102
+ setStage("PROCESSING");
103
+ try {
104
+ // Create reference template using SDK
105
+ const template = await createReferenceFromImage(imageUri, sdkConfig, embeddingProvider, enrollmentOptions);
106
+ console.log("[ReferenceEnrollmentFlow] Reference created:", template.referenceId);
107
+ setStage("SUCCESS");
108
+ onComplete(template);
109
+ }
110
+ catch (err) {
111
+ console.error("[ReferenceEnrollmentFlow] Enrollment failed:", err);
112
+ const sdkError = err && typeof err === "object" && "code" in err
113
+ ? err
114
+ : {
115
+ code: "SYSTEM_ERROR",
116
+ message: err instanceof Error ? err.message : "Enrollment failed",
117
+ details: { error: String(err) },
118
+ };
119
+ setError(sdkError);
120
+ setStage("ERROR");
121
+ onError?.(sdkError);
122
+ }
123
+ };
124
+ const handleCaptureError = (message) => {
125
+ console.error("[ReferenceEnrollmentFlow] Capture error:", message);
126
+ const sdkError = {
127
+ code: "SYSTEM_ERROR",
128
+ message,
129
+ details: { stage: "capture" },
130
+ };
131
+ setError(sdkError);
132
+ setStage("ERROR");
133
+ onError?.(sdkError);
134
+ };
135
+ const handleRetry = () => {
136
+ setError(null);
137
+ setStage("CAPTURING");
138
+ };
139
+ const handleCancel = () => {
140
+ onCancel?.();
141
+ };
142
+ const content = (<SafeAreaView style={[styles.container, { backgroundColor: theme.colors.background }]}>
143
+ <StatusBar barStyle="light-content"/>
144
+
145
+ {/* Hidden ONNX Runtime WebView for face recognition */}
146
+ <OnnxRuntimeWebView onReady={handleBridgeReady} onError={(err) => {
147
+ console.error("[ReferenceEnrollmentFlow] Bridge error:", err);
148
+ const sdkError = {
149
+ code: "SYSTEM_ERROR",
150
+ message: "Face recognition initialization failed",
151
+ details: { error: err },
152
+ };
153
+ setError(sdkError);
154
+ setStage("ERROR");
155
+ onError?.(sdkError);
156
+ }}/>
157
+
158
+ {/* Loading States */}
159
+ {(stage === "INIT" || stage === "BRIDGE_LOADING") && (ui.renderLoading ? ui.renderLoading(stage, strings.loadingModels) : (<View style={[styles.loadingContainer, { backgroundColor: theme.colors.background }]}>
160
+ <ActivityIndicator size="large" color={theme.colors.primary}/>
161
+ <Text style={[styles.loadingText, { color: theme.colors.text }]}>
162
+ {strings.loadingModels}
163
+ </Text>
164
+ </View>))}
165
+
166
+ {/* Capture State */}
167
+ {stage === "CAPTURING" && (<View style={styles.captureContainer}>
168
+ <FacePoseGuidanceWebView onSuccess={handleCaptureSuccess} onError={handleCaptureError} headless={false}/>
169
+ <TouchableOpacity style={[styles.cancelButton, {
170
+ backgroundColor: theme.colors.surface,
171
+ borderRadius: theme.borderRadius,
172
+ }]} onPress={handleCancel}>
173
+ <Text style={[styles.cancelButtonText, { color: theme.colors.text }]}>
174
+ {strings.cancelButton}
175
+ </Text>
176
+ </TouchableOpacity>
177
+ </View>)}
178
+
179
+ {/* Processing State */}
180
+ {stage === "PROCESSING" && (ui.renderLoading ? ui.renderLoading(stage, strings.loadingProcessing) : (<View style={[styles.loadingContainer, { backgroundColor: theme.colors.background }]}>
181
+ <ActivityIndicator size="large" color={theme.colors.primary}/>
182
+ <Text style={[styles.loadingText, { color: theme.colors.text }]}>
183
+ {strings.loadingProcessing}
184
+ </Text>
185
+ </View>))}
186
+
187
+ {/* Success State */}
188
+ {stage === "SUCCESS" && (ui.renderSuccess ? ui.renderSuccess({ success: true, score: 100 }) : (<View style={[styles.resultContainer, { backgroundColor: theme.colors.background }]}>
189
+ <Text style={[styles.successIcon, { color: theme.colors.primary }]}>✓</Text>
190
+ <Text style={[styles.successTitle, { color: theme.colors.text }]}>
191
+ {strings.enrollmentSuccessTitle}
192
+ </Text>
193
+ <Text style={[styles.successText, { color: theme.colors.textMuted }]}>
194
+ {strings.enrollmentSuccessSubtitle}
195
+ </Text>
196
+ </View>))}
197
+
198
+ {/* Error State */}
199
+ {stage === "ERROR" && error && (ui.renderError ? ui.renderError(error, { onRetry: handleRetry, onCancel: handleCancel }) : (<View style={[styles.resultContainer, { backgroundColor: theme.colors.background }]}>
200
+ <Text style={[styles.errorIcon, { color: theme.colors.error }]}>✕</Text>
201
+ <Text style={[styles.errorTitle, { color: theme.colors.text }]}>
202
+ {strings.enrollmentErrorTitle}
203
+ </Text>
204
+ <Text style={[styles.errorText, { color: theme.colors.textMuted }]}>{error.message}</Text>
205
+ <View style={styles.buttonRow}>
206
+ <TouchableOpacity style={[styles.retryButton, {
207
+ backgroundColor: theme.colors.primary,
208
+ borderRadius: theme.borderRadius,
209
+ }]} onPress={handleRetry}>
210
+ <Text style={[styles.retryButtonText, { color: theme.colors.text }]}>
211
+ {strings.retryButton}
212
+ </Text>
213
+ </TouchableOpacity>
214
+ <TouchableOpacity style={[styles.cancelButton, {
215
+ backgroundColor: theme.colors.surface,
216
+ borderRadius: theme.borderRadius,
217
+ }]} onPress={handleCancel}>
218
+ <Text style={[styles.cancelButtonText, { color: theme.colors.text }]}>
219
+ {strings.cancelButton}
220
+ </Text>
221
+ </TouchableOpacity>
222
+ </View>
223
+ </View>))}
224
+ </SafeAreaView>);
225
+ if (modal) {
226
+ return (<Modal visible={visible} animationType="slide" presentationStyle="fullScreen" onRequestClose={handleCancel}>
227
+ {content}
228
+ </Modal>);
229
+ }
230
+ return content;
231
+ };
232
+ const styles = StyleSheet.create({
233
+ container: {
234
+ flex: 1,
235
+ backgroundColor: "#000",
236
+ },
237
+ loadingContainer: {
238
+ flex: 1,
239
+ justifyContent: "center",
240
+ alignItems: "center",
241
+ backgroundColor: "#000",
242
+ },
243
+ loadingText: {
244
+ marginTop: 16,
245
+ color: "#fff",
246
+ fontSize: 16,
247
+ },
248
+ captureContainer: {
249
+ flex: 1,
250
+ },
251
+ resultContainer: {
252
+ flex: 1,
253
+ justifyContent: "center",
254
+ alignItems: "center",
255
+ backgroundColor: "#000",
256
+ padding: 24,
257
+ },
258
+ successIcon: {
259
+ fontSize: 72,
260
+ color: "#4CAF50",
261
+ marginBottom: 24,
262
+ },
263
+ successTitle: {
264
+ fontSize: 24,
265
+ fontWeight: "bold",
266
+ color: "#fff",
267
+ marginBottom: 12,
268
+ },
269
+ successText: {
270
+ fontSize: 16,
271
+ color: "#aaa",
272
+ textAlign: "center",
273
+ },
274
+ errorIcon: {
275
+ fontSize: 72,
276
+ color: "#F44336",
277
+ marginBottom: 24,
278
+ },
279
+ errorTitle: {
280
+ fontSize: 24,
281
+ fontWeight: "bold",
282
+ color: "#fff",
283
+ marginBottom: 12,
284
+ },
285
+ errorText: {
286
+ fontSize: 16,
287
+ color: "#aaa",
288
+ textAlign: "center",
289
+ marginBottom: 32,
290
+ },
291
+ buttonRow: {
292
+ flexDirection: "row",
293
+ gap: 16,
294
+ },
295
+ retryButton: {
296
+ backgroundColor: "#4CAF50",
297
+ paddingHorizontal: 32,
298
+ paddingVertical: 12,
299
+ borderRadius: 8,
300
+ },
301
+ retryButtonText: {
302
+ color: "#fff",
303
+ fontSize: 16,
304
+ fontWeight: "600",
305
+ },
306
+ cancelButton: {
307
+ position: "absolute",
308
+ bottom: 40,
309
+ left: 24,
310
+ right: 24,
311
+ backgroundColor: "rgba(255, 255, 255, 0.1)",
312
+ paddingVertical: 16,
313
+ borderRadius: 8,
314
+ alignItems: "center",
315
+ },
316
+ cancelButtonText: {
317
+ color: "#fff",
318
+ fontSize: 16,
319
+ fontWeight: "600",
320
+ },
321
+ });
@@ -0,0 +1,37 @@
1
+ /**
2
+ * Copyright 2026 JupiterMeta Labs
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+ export type Point = [number, number];
17
+ /**
18
+ * Calculates the similarity transform matrix (2x3) that maps src points to dst points.
19
+ * Uses the Least Squares method (Umeyama's algorithm simplified for 2D similarity).
20
+ *
21
+ * Matrix format: [a, b, tx, c, d, ty]
22
+ * where:
23
+ * x' = a*x + b*y + tx
24
+ * y' = c*x + d*y + ty
25
+ */
26
+ export declare function estimateUmeyama(src: Point[], dst?: Point[]): number[];
27
+ /**
28
+ * Applies affine transformation to an image buffer using bilinear interpolation.
29
+ *
30
+ * @param srcData Float32Array containing source image data (CHW format: RRR...GGG...BBB...)
31
+ * @param srcWidth Width of source image
32
+ * @param srcHeight Height of source image
33
+ * @param matrix The 2x3 Affine Matrix computed by estimateUmeyama
34
+ * @param dstSize Output size (default 112)
35
+ * @returns Float32Array (CHW format) of size 3 * dstSize * dstSize, normalized
36
+ */
37
+ export declare function warpAffine(srcData: Float32Array, srcWidth: number, srcHeight: number, matrix: number[], dstSize?: number): Float32Array;
@@ -0,0 +1,182 @@
1
+ /**
2
+ * Copyright 2026 JupiterMeta Labs
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+ // Standard ArcFace reference points (112x112)
17
+ // Source: insightface/utils/face_align.py
18
+ const ARCFACE_DST = [
19
+ [38.2946, 51.6963], // Left Eye
20
+ [73.5318, 51.6963], // Right Eye
21
+ [56.0252, 71.7366], // Nose
22
+ [41.5493, 92.3655], // Left Mouth
23
+ [70.7299, 92.3655], // Right Mouth
24
+ ];
25
+ /**
26
+ * Calculates the similarity transform matrix (2x3) that maps src points to dst points.
27
+ * Uses the Least Squares method (Umeyama's algorithm simplified for 2D similarity).
28
+ *
29
+ * Matrix format: [a, b, tx, c, d, ty]
30
+ * where:
31
+ * x' = a*x + b*y + tx
32
+ * y' = c*x + d*y + ty
33
+ */
34
+ export function estimateUmeyama(src, dst = ARCFACE_DST) {
35
+ const num = src.length;
36
+ if (num !== 5 || dst.length !== 5) {
37
+ throw new Error("Umeyama expects 5 points");
38
+ }
39
+ let srcMeanX = 0, srcMeanY = 0, dstMeanX = 0, dstMeanY = 0;
40
+ for (let i = 0; i < num; i++) {
41
+ srcMeanX += src[i][0];
42
+ srcMeanY += src[i][1];
43
+ dstMeanX += dst[i][0];
44
+ dstMeanY += dst[i][1];
45
+ }
46
+ srcMeanX /= num;
47
+ srcMeanY /= num;
48
+ dstMeanX /= num;
49
+ dstMeanY /= num;
50
+ let srcVar = 0;
51
+ let crossCovarianceX = 0; // term 1 of numerator
52
+ let crossCovarianceY = 0; // term 2 of numerator
53
+ for (let i = 0; i < num; i++) {
54
+ const srcDiffX = src[i][0] - srcMeanX;
55
+ const srcDiffY = src[i][1] - srcMeanY;
56
+ const dstDiffX = dst[i][0] - dstMeanX;
57
+ const dstDiffY = dst[i][1] - dstMeanY;
58
+ srcVar += srcDiffX * srcDiffX + srcDiffY * srcDiffY;
59
+ // Sum(x*x' + y*y') and Sum(x*y' - y*x')
60
+ // For Rotation + Scale estimation
61
+ crossCovarianceX += srcDiffX * dstDiffX + srcDiffY * dstDiffY;
62
+ crossCovarianceY += srcDiffX * dstDiffY - srcDiffY * dstDiffX;
63
+ }
64
+ if (srcVar === 0) {
65
+ throw new Error("estimateUmeyama: all landmarks are coincident — degenerate detection");
66
+ }
67
+ // Scale
68
+ const scale = Math.sqrt((crossCovarianceX * crossCovarianceX +
69
+ crossCovarianceY * crossCovarianceY) /
70
+ (srcVar * srcVar));
71
+ // Rotation (cos theta, sin theta)
72
+ const norm = Math.sqrt(crossCovarianceX * crossCovarianceX + crossCovarianceY * crossCovarianceY);
73
+ if (norm === 0) {
74
+ throw new Error("estimateUmeyama: zero covariance norm — landmarks may be collinear or degenerate");
75
+ }
76
+ const cosTheta = crossCovarianceX / norm;
77
+ const sinTheta = crossCovarianceY / norm;
78
+ // Combined parameters
79
+ const a = scale * cosTheta;
80
+ const b = -scale * sinTheta; // note: standard affine usually -sin
81
+ const c = scale * sinTheta;
82
+ const d = scale * cosTheta;
83
+ // Translation
84
+ const tx = dstMeanX - (a * srcMeanX + b * srcMeanY);
85
+ const ty = dstMeanY - (c * srcMeanX + d * srcMeanY);
86
+ // M = [[a, b, tx], [c, d, ty]]
87
+ return [a, b, tx, c, d, ty];
88
+ }
89
+ /**
90
+ * Inverts a 2x3 affine matrix.
91
+ * M = [[a, b, tx], [c, d, ty]]
92
+ * Inverse is needed to map destination pixels back to source pixels for sampling.
93
+ */
94
+ function invertAffineMatrix(m) {
95
+ const [a, b, tx, c, d, ty] = m;
96
+ const det = a * d - b * c;
97
+ if (Math.abs(det) < 1e-6) {
98
+ throw new Error("Matrix not invertible");
99
+ }
100
+ const invDet = 1.0 / det;
101
+ const A = d * invDet;
102
+ const B = -b * invDet;
103
+ const C = -c * invDet;
104
+ const D = a * invDet;
105
+ const TX = -(A * tx + B * ty);
106
+ const TY = -(C * tx + D * ty);
107
+ return [A, B, TX, C, D, TY];
108
+ }
109
+ /**
110
+ * Applies affine transformation to an image buffer using bilinear interpolation.
111
+ *
112
+ * @param srcData Float32Array containing source image data (CHW format: RRR...GGG...BBB...)
113
+ * @param srcWidth Width of source image
114
+ * @param srcHeight Height of source image
115
+ * @param matrix The 2x3 Affine Matrix computed by estimateUmeyama
116
+ * @param dstSize Output size (default 112)
117
+ * @returns Float32Array (CHW format) of size 3 * dstSize * dstSize, normalized
118
+ */
119
+ export function warpAffine(srcData, srcWidth, srcHeight, matrix, dstSize = 112) {
120
+ // 1. Invert matrix to map dst -> src
121
+ const [a, b, tx, c, d, ty] = invertAffineMatrix(matrix);
122
+ const dstData = new Float32Array(3 * dstSize * dstSize);
123
+ const channelSize = dstSize * dstSize;
124
+ const srcChannelSize = srcWidth * srcHeight;
125
+ // 2. Iterate over destination pixels
126
+ for (let y = 0; y < dstSize; y++) {
127
+ for (let x = 0; x < dstSize; x++) {
128
+ // Map to source coordinates
129
+ const srcX = a * x + b * y + tx;
130
+ const srcY = c * x + d * y + ty;
131
+ // Bilinear Interpolation
132
+ // Check bounds (with 1px padding for interpolation)
133
+ if (srcX >= 0 &&
134
+ srcX <= srcWidth - 1 &&
135
+ srcY >= 0 &&
136
+ srcY <= srcHeight - 1) {
137
+ const x0 = Math.floor(srcX);
138
+ const y0 = Math.floor(srcY);
139
+ const x1 = Math.min(x0 + 1, srcWidth - 1);
140
+ const y1 = Math.min(y0 + 1, srcHeight - 1);
141
+ const dx = srcX - x0;
142
+ const dy = srcY - y0;
143
+ const w00 = (1 - dx) * (1 - dy);
144
+ const w10 = dx * (1 - dy);
145
+ const w01 = (1 - dx) * dy;
146
+ const w11 = dx * dy;
147
+ const baseIdx = y * dstSize + x;
148
+ const srcBase00 = y0 * srcWidth + x0;
149
+ const srcBase10 = y0 * srcWidth + x1;
150
+ const srcBase01 = y1 * srcWidth + x0;
151
+ const srcBase11 = y1 * srcWidth + x1;
152
+ // Process R, G, B channels
153
+ for (let ch = 0; ch < 3; ch++) {
154
+ const chOffsetDst = ch * channelSize;
155
+ const chOffsetSrc = ch * srcChannelSize;
156
+ /*
157
+ Note: srcData is already normalized (CHW float),
158
+ or raw (HWC uint8)?
159
+
160
+ FaceRecognition.ts preprocessImage gives us CHW normalized float32.
161
+ So we can just interpolate directly.
162
+ */
163
+ const val = srcData[chOffsetSrc + srcBase00] * w00 +
164
+ srcData[chOffsetSrc + srcBase10] * w10 +
165
+ srcData[chOffsetSrc + srcBase01] * w01 +
166
+ srcData[chOffsetSrc + srcBase11] * w11;
167
+ dstData[chOffsetDst + baseIdx] = val;
168
+ }
169
+ }
170
+ else {
171
+ // Out of bounds - pad with black (or mean -0.0 in normalized space? -1.0?)
172
+ // Insightface uses 0 which corresponds to 127.5 in pixel space if not normalized.
173
+ // Since our input is normalized (-1..1 approx), we should probably use -0.99 (black) or 0 (grey).
174
+ // Let's use 0 (grey) if input was centered.
175
+ // Effectively, if we assume input is normalized, 0.0 is grey.
176
+ // Let's stick to 0.0 for now, or copy nearest edge?
177
+ // For simplicity, 0.0 is safe.
178
+ }
179
+ }
180
+ }
181
+ return dstData;
182
+ }
@@ -0,0 +1,36 @@
1
+ /**
2
+ * Copyright 2026 JupiterMeta Labs
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+ import type { FaceZkModelsConfig } from "../../config/types";
17
+ /** Keys corresponding to each entry in FaceZkModelsConfig. */
18
+ export type ModelKey = "detection" | "recognition" | "antispoof" | "wasm" | "zkWorkerHtml";
19
+ export interface ModelReadinessResult {
20
+ /** True when every configured model source is present on device. */
21
+ ready: boolean;
22
+ /** Models that are configured but not yet available locally. */
23
+ missing: ModelKey[];
24
+ /** Models that are configured and already available locally. */
25
+ present: ModelKey[];
26
+ }
27
+ /**
28
+ * Check whether all configured model sources are already resolved locally.
29
+ *
30
+ * Does NOT download anything. Safe to call on every app launch — fast when
31
+ * all models are present (only stat calls, no network).
32
+ *
33
+ * @param models The same FaceZkModelsConfig you intend to pass to initializeSdk().
34
+ * @returns Readiness result with `ready` flag and `missing`/`present` arrays.
35
+ */
36
+ export declare function modelInitialisationChecks(models: FaceZkModelsConfig): Promise<ModelReadinessResult>;
@@ -0,0 +1,92 @@
1
+ /**
2
+ * Copyright 2026 JupiterMeta Labs
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+ /**
17
+ * Face+ZK SDK – Model Initialisation Checks
18
+ *
19
+ * Pre-flight utility that inspects whether each configured model source is
20
+ * already available on device without downloading anything.
21
+ *
22
+ * Call this before mounting any SDK screen to decide whether a download step
23
+ * is needed. The result tells you exactly which models are present and which
24
+ * need to be fetched, so your app can show an appropriate UI (progress bar,
25
+ * "first-time setup" screen, etc.) before calling initializeSdk().
26
+ *
27
+ * Usage:
28
+ * const result = await modelInitialisationChecks(modelConfig);
29
+ * if (!result.ready) {
30
+ * // download missing models, show progress, then proceed
31
+ * }
32
+ */
33
+ import * as FileSystem from "expo-file-system/legacy";
34
+ import { deriveStorePath } from "./resolveModelUri";
35
+ // ── Implementation ─────────────────────────────────────────────────────────
36
+ /**
37
+ * Returns whether a single ModelSource is already locally available.
38
+ * Does NOT download or resolve assets — read-only check.
39
+ */
40
+ async function isSourceReady(source) {
41
+ if (source.localUri) {
42
+ const info = await FileSystem.getInfoAsync(source.localUri);
43
+ return info.exists;
44
+ }
45
+ if (source.module != null) {
46
+ // Metro-bundled assets are always present in the binary.
47
+ return true;
48
+ }
49
+ if (source.url) {
50
+ const info = await FileSystem.getInfoAsync(deriveStorePath(source.url));
51
+ return info.exists;
52
+ }
53
+ // Source has no resolvable value — treat as not ready.
54
+ return false;
55
+ }
56
+ /**
57
+ * Check whether all configured model sources are already resolved locally.
58
+ *
59
+ * Does NOT download anything. Safe to call on every app launch — fast when
60
+ * all models are present (only stat calls, no network).
61
+ *
62
+ * @param models The same FaceZkModelsConfig you intend to pass to initializeSdk().
63
+ * @returns Readiness result with `ready` flag and `missing`/`present` arrays.
64
+ */
65
+ export async function modelInitialisationChecks(models) {
66
+ const entries = [
67
+ ["detection", models.detection],
68
+ ["recognition", models.recognition],
69
+ ["antispoof", models.antispoof],
70
+ ["wasm", models.wasm],
71
+ ["zkWorkerHtml", models.zkWorkerHtml],
72
+ ];
73
+ const missing = [];
74
+ const present = [];
75
+ for (const [key, source] of entries) {
76
+ if (source == null) {
77
+ // Optional model not configured — not required, skip.
78
+ continue;
79
+ }
80
+ if (await isSourceReady(source)) {
81
+ present.push(key);
82
+ }
83
+ else {
84
+ missing.push(key);
85
+ }
86
+ }
87
+ return {
88
+ ready: missing.length === 0,
89
+ missing,
90
+ present,
91
+ };
92
+ }