@incodetech/core 0.0.0-dev-20260126-4504c5b
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/Manager-Co-PsiG9.d.ts +19 -0
- package/dist/OpenViduLogger-BLxxXoyF.esm.js +803 -0
- package/dist/OpenViduLogger-DyqID_-7.esm.js +3 -0
- package/dist/api-DfRLAneb.esm.js +53 -0
- package/dist/chunk-V5DOKNPJ.esm.js +49 -0
- package/dist/deepsightLoader-BMT0FSg6.esm.js +24 -0
- package/dist/deepsightService-j5zMt6wf.esm.js +236 -0
- package/dist/email.d.ts +264 -0
- package/dist/email.esm.js +478 -0
- package/dist/endpoints-BUsSVoJV.esm.js +3288 -0
- package/dist/events-B8ZkhAZo.esm.js +285 -0
- package/dist/flow.d.ts +278 -0
- package/dist/flow.esm.js +638 -0
- package/dist/getDeviceClass-DkfbtsIJ.esm.js +41 -0
- package/dist/id-r1mw9zBM.esm.js +1827 -0
- package/dist/id.d.ts +5 -0
- package/dist/id.esm.js +9 -0
- package/dist/index-CJMK8K5u.d.ts +614 -0
- package/dist/index.d.ts +445 -0
- package/dist/index.esm.js +163 -0
- package/dist/lib-CbAibJlt.esm.js +11700 -0
- package/dist/phone.d.ts +292 -0
- package/dist/phone.esm.js +552 -0
- package/dist/selfie.d.ts +592 -0
- package/dist/selfie.esm.js +1221 -0
- package/dist/src-DYtpbFY5.esm.js +2781 -0
- package/dist/stats-DnU4uUFv.esm.js +16 -0
- package/dist/stats.d.ts +12 -0
- package/dist/stats.esm.js +4 -0
- package/dist/streamingEvents-CfEJv3xH.esm.js +96 -0
- package/dist/types-CMR6NkxW.d.ts +359 -0
- package/dist/types-CRVSv38Q.d.ts +344 -0
- package/package.json +58 -0
package/dist/selfie.d.ts
ADDED
|
@@ -0,0 +1,592 @@
|
|
|
1
|
+
import { a as CameraStream, d as IMLProviderCapability, f as MLProviderConfig, i as StreamCanvasCapture, l as MotionPermissionState, n as PermissionStatus, o as IRecordingCapability, p as IncodeCanvas, r as BaseWasmProvider, t as PermissionResult, u as MotionStatus } from "./types-CRVSv38Q.js";
|
|
2
|
+
import { a as AnyStateMachine, o as StateMachine, r as FlowModuleConfig, s as WasmPipeline, t as Flow } from "./types-CMR6NkxW.js";
|
|
3
|
+
import { t as Manager } from "./Manager-Co-PsiG9.js";
|
|
4
|
+
|
|
5
|
+
//#region ../infra/src/capabilities/IFaceDetectionCapability.d.ts
|
|
6
|
+
|
|
7
|
+
/**
|
|
8
|
+
* Configuration for face detection provider.
|
|
9
|
+
* Extends base ML provider config with face-detection specific options.
|
|
10
|
+
*/
|
|
11
|
+
interface FaceDetectionConfig extends MLProviderConfig {
|
|
12
|
+
autocaptureInterval?: number;
|
|
13
|
+
}
|
|
14
|
+
interface FacePositionConstraints {
|
|
15
|
+
minX: number;
|
|
16
|
+
minY: number;
|
|
17
|
+
maxX: number;
|
|
18
|
+
maxY: number;
|
|
19
|
+
}
|
|
20
|
+
interface FaceDetectionThresholds {
|
|
21
|
+
brightnessThreshold: number;
|
|
22
|
+
blurrinessThreshold: number;
|
|
23
|
+
tiltRotationAngleThreshold: number;
|
|
24
|
+
minMagicCropSize: number;
|
|
25
|
+
autocaptureInterval: number;
|
|
26
|
+
minFaceQualityScore: number;
|
|
27
|
+
faceOcclusionThreshold: number;
|
|
28
|
+
}
|
|
29
|
+
interface FaceAttributesThresholds {
|
|
30
|
+
headwearThreshold: number;
|
|
31
|
+
lensesThreshold: number;
|
|
32
|
+
closedEyesThreshold: number;
|
|
33
|
+
maskThreshold: number;
|
|
34
|
+
}
|
|
35
|
+
interface FaceChecksConfig {
|
|
36
|
+
lenses: boolean;
|
|
37
|
+
mask: boolean;
|
|
38
|
+
closedEyes: boolean;
|
|
39
|
+
headWear: boolean;
|
|
40
|
+
occlusion: boolean;
|
|
41
|
+
}
|
|
42
|
+
interface FaceData {
|
|
43
|
+
rect: {
|
|
44
|
+
x: number;
|
|
45
|
+
y: number;
|
|
46
|
+
width: number;
|
|
47
|
+
height: number;
|
|
48
|
+
};
|
|
49
|
+
rightEye: {
|
|
50
|
+
x: number;
|
|
51
|
+
y: number;
|
|
52
|
+
};
|
|
53
|
+
leftEye: {
|
|
54
|
+
x: number;
|
|
55
|
+
y: number;
|
|
56
|
+
};
|
|
57
|
+
noseTip: {
|
|
58
|
+
x: number;
|
|
59
|
+
y: number;
|
|
60
|
+
};
|
|
61
|
+
rightMouthCorner: {
|
|
62
|
+
x: number;
|
|
63
|
+
y: number;
|
|
64
|
+
};
|
|
65
|
+
leftMouthCorner: {
|
|
66
|
+
x: number;
|
|
67
|
+
y: number;
|
|
68
|
+
};
|
|
69
|
+
pitch: number;
|
|
70
|
+
yaw: number;
|
|
71
|
+
roll: number;
|
|
72
|
+
}
|
|
73
|
+
type FaceCoordinates = {
|
|
74
|
+
rightEyeX: number;
|
|
75
|
+
rightEyeY: number;
|
|
76
|
+
leftEyeX: number;
|
|
77
|
+
leftEyeY: number;
|
|
78
|
+
noseTipX: number;
|
|
79
|
+
noseTipY: number;
|
|
80
|
+
rightMouthX: number;
|
|
81
|
+
rightMouthY: number;
|
|
82
|
+
mouthX: number;
|
|
83
|
+
mouthY: number;
|
|
84
|
+
x: number;
|
|
85
|
+
y: number;
|
|
86
|
+
width: number;
|
|
87
|
+
height: number;
|
|
88
|
+
};
|
|
89
|
+
interface FaceDetectionCallbacks {
|
|
90
|
+
onFarAway?: () => void;
|
|
91
|
+
onTooClose?: () => void;
|
|
92
|
+
onTooManyFaces?: () => void;
|
|
93
|
+
onNoFace?: () => void;
|
|
94
|
+
onCapture?: (canvas: IncodeCanvas, faceCoordinates: FaceCoordinates) => void;
|
|
95
|
+
onGetReady?: () => void;
|
|
96
|
+
onGetReadyFinished?: () => void;
|
|
97
|
+
onCenterFace?: () => void;
|
|
98
|
+
onDark?: () => void;
|
|
99
|
+
onBlur?: () => void;
|
|
100
|
+
onFaceAngle?: () => void;
|
|
101
|
+
onBestShot?: (face: FaceData) => void;
|
|
102
|
+
onLenses?: () => void;
|
|
103
|
+
onMask?: () => void;
|
|
104
|
+
onEyesClosed?: () => void;
|
|
105
|
+
onHeadWear?: () => void;
|
|
106
|
+
onSwitchToManualCapture?: () => void;
|
|
107
|
+
onFaceOccluded?: () => void;
|
|
108
|
+
}
|
|
109
|
+
/**
|
|
110
|
+
* Capability interface for face detection and selfie capture.
|
|
111
|
+
* Extends the base ML provider capability with face-detection specific methods.
|
|
112
|
+
*/
|
|
113
|
+
interface IFaceDetectionCapability extends IMLProviderCapability<FaceDetectionConfig> {
|
|
114
|
+
/**
|
|
115
|
+
* Sets callbacks for face detection events.
|
|
116
|
+
* @param callbacks - Object containing callback functions for various detection events
|
|
117
|
+
*/
|
|
118
|
+
setCallbacks(callbacks: FaceDetectionCallbacks): void;
|
|
119
|
+
/**
|
|
120
|
+
* Sets position constraints for face detection.
|
|
121
|
+
* @param constraints - Bounding box constraints for valid face position
|
|
122
|
+
*/
|
|
123
|
+
setPositionConstraints(constraints: FacePositionConstraints): void;
|
|
124
|
+
/**
|
|
125
|
+
* Sets detection thresholds for quality checks.
|
|
126
|
+
* @param thresholds - Threshold values for various quality metrics
|
|
127
|
+
*/
|
|
128
|
+
setThresholds(thresholds: FaceDetectionThresholds): void;
|
|
129
|
+
/**
|
|
130
|
+
* Sets thresholds for face attribute detection.
|
|
131
|
+
* @param thresholds - Threshold values for attribute detection (headwear, lenses, etc.)
|
|
132
|
+
*/
|
|
133
|
+
setAttributesThresholds(thresholds: FaceAttributesThresholds): void;
|
|
134
|
+
/**
|
|
135
|
+
* Enables or disables specific face checks.
|
|
136
|
+
* @param config - Configuration for which checks to enable
|
|
137
|
+
*/
|
|
138
|
+
setChecksEnabled(config: FaceChecksConfig): void;
|
|
139
|
+
/**
|
|
140
|
+
* Sets video selfie mode.
|
|
141
|
+
* @param enabled - Whether to enable video selfie mode
|
|
142
|
+
*/
|
|
143
|
+
setVideoSelfieMode(enabled: boolean): void;
|
|
144
|
+
}
|
|
145
|
+
//#endregion
|
|
146
|
+
//#region ../infra/src/capabilities/IStorageCapability.d.ts
|
|
147
|
+
/**
|
|
148
|
+
* Storage capability interface for abstracting storage operations.
|
|
149
|
+
* Enables swapping between browser localStorage and future WASM-based storage.
|
|
150
|
+
*/
|
|
151
|
+
interface IStorageCapability {
|
|
152
|
+
/**
|
|
153
|
+
* Retrieves a value from storage.
|
|
154
|
+
* @param key - The storage key
|
|
155
|
+
* @returns The stored value or null if not found
|
|
156
|
+
*/
|
|
157
|
+
get<T>(key: string): Promise<T | null>;
|
|
158
|
+
/**
|
|
159
|
+
* Stores a value in storage.
|
|
160
|
+
* @param key - The storage key
|
|
161
|
+
* @param value - The value to store (will be serialized)
|
|
162
|
+
*/
|
|
163
|
+
set<T>(key: string, value: T): Promise<void>;
|
|
164
|
+
/**
|
|
165
|
+
* Removes a value from storage.
|
|
166
|
+
* @param key - The storage key to remove
|
|
167
|
+
*/
|
|
168
|
+
remove(key: string): Promise<void>;
|
|
169
|
+
/**
|
|
170
|
+
* Clears all values from storage.
|
|
171
|
+
*/
|
|
172
|
+
clear(): Promise<void>;
|
|
173
|
+
}
|
|
174
|
+
//#endregion
|
|
175
|
+
//#region ../infra/src/capabilities/IWasmUtilCapability.d.ts
|
|
176
|
+
type WasmUtilConfig = MLProviderConfig & {
|
|
177
|
+
pipelines?: WasmPipeline[];
|
|
178
|
+
};
|
|
179
|
+
type VirtualCameraCheckOutput = {
|
|
180
|
+
canvas: HTMLCanvasElement | null;
|
|
181
|
+
itr: boolean | null;
|
|
182
|
+
skipped: boolean | null;
|
|
183
|
+
};
|
|
184
|
+
type IWasmUtilCapability = {
|
|
185
|
+
/**
|
|
186
|
+
* Whether the provider has been initialized and is ready to be used.
|
|
187
|
+
*/
|
|
188
|
+
readonly initialized: boolean;
|
|
189
|
+
/**
|
|
190
|
+
* Initializes the provider with the given configuration.
|
|
191
|
+
* If WASM was already warmed up via `warmupWasm()`, this returns almost instantly.
|
|
192
|
+
* @param config - Provider configuration including WASM paths
|
|
193
|
+
*/
|
|
194
|
+
initialize(config: WasmUtilConfig): Promise<void>;
|
|
195
|
+
/**
|
|
196
|
+
* Encrypts a base64 image using the WASM utility API.
|
|
197
|
+
* @param image - Base64 image string (no data URL prefix)
|
|
198
|
+
* @returns Encrypted string
|
|
199
|
+
*/
|
|
200
|
+
encryptImage(image: string): string;
|
|
201
|
+
setSdkVersion(version: string): void;
|
|
202
|
+
setSdkPlatform(platform: string): void;
|
|
203
|
+
setDeviceInfo(deviceInfo: object, overrideExisting?: boolean): void;
|
|
204
|
+
setBrowserInfo(browserInfo: object, overrideExisting?: boolean): void;
|
|
205
|
+
setCameraInfo(cameraInfo: object, overrideExisting?: boolean): void;
|
|
206
|
+
setMotionStatus(status: string): void;
|
|
207
|
+
setBackgroundMode(backgroundMode: boolean): void;
|
|
208
|
+
setZc(zc: string): void;
|
|
209
|
+
setInspectorOpened(opened: boolean): void;
|
|
210
|
+
getMetadata(): string;
|
|
211
|
+
analyzeFrame(image: ImageData): Promise<void>;
|
|
212
|
+
getCheck(): string;
|
|
213
|
+
estimatePerformance(): string;
|
|
214
|
+
isVirtualCamera(label: string | null): boolean;
|
|
215
|
+
prc(): Promise<void>;
|
|
216
|
+
poc(output: VirtualCameraCheckOutput): Promise<void>;
|
|
217
|
+
ckvcks(data: ArrayBuffer): void;
|
|
218
|
+
getVersions(): Promise<unknown>;
|
|
219
|
+
/**
|
|
220
|
+
* Disposes of resources and resets initialization state.
|
|
221
|
+
*/
|
|
222
|
+
dispose(): Promise<void>;
|
|
223
|
+
};
|
|
224
|
+
//#endregion
|
|
225
|
+
//#region ../infra/src/providers/wasm/FaceDetectionProvider.d.ts
|
|
226
|
+
declare class FaceDetectionProvider extends BaseWasmProvider implements IFaceDetectionCapability {
|
|
227
|
+
private defaultThresholds;
|
|
228
|
+
private currentThresholds;
|
|
229
|
+
private currentFrame;
|
|
230
|
+
private bestCanvas;
|
|
231
|
+
private bestFace;
|
|
232
|
+
constructor();
|
|
233
|
+
processFrame(image: ImageData): Promise<void>;
|
|
234
|
+
initialize(config: FaceDetectionConfig): Promise<void>;
|
|
235
|
+
setCallbacks(callbacks: FaceDetectionCallbacks): void;
|
|
236
|
+
setPositionConstraints(constraints: FacePositionConstraints): void;
|
|
237
|
+
applyDefaults(autocaptureInterval?: number): void;
|
|
238
|
+
setAutocaptureInterval(interval: number): void;
|
|
239
|
+
setThresholds(thresholds: FaceDetectionThresholds): void;
|
|
240
|
+
setAttributesThresholds(thresholds: FaceAttributesThresholds): void;
|
|
241
|
+
setChecksEnabled(config: FaceChecksConfig): void;
|
|
242
|
+
setVideoSelfieMode(enabled: boolean): void;
|
|
243
|
+
reset(): void;
|
|
244
|
+
private createDefaultFaceCoordinates;
|
|
245
|
+
private formatFaceCoordinates;
|
|
246
|
+
}
|
|
247
|
+
//#endregion
|
|
248
|
+
//#region src/internal/deepsight/metadataService.d.ts
|
|
249
|
+
type MetadataService = {
|
|
250
|
+
initialize(sdkVersion: string, disableIpify?: boolean): Promise<void>;
|
|
251
|
+
updateCameraInfo(videoTrack: MediaStreamTrack): void;
|
|
252
|
+
checkForVirtualCameraByLabel(videoTrack: MediaStreamTrack | null): Promise<boolean>;
|
|
253
|
+
analyzeFrame(imageData: ImageData): Promise<void>;
|
|
254
|
+
setMotionStatus(status: string): void;
|
|
255
|
+
setBackgroundMode(backgroundMode: boolean): void;
|
|
256
|
+
estimatePerformance(): string;
|
|
257
|
+
getMetadata(): string;
|
|
258
|
+
getCheck(): string;
|
|
259
|
+
};
|
|
260
|
+
//#endregion
|
|
261
|
+
//#region src/internal/deepsight/motionStatusService.d.ts
|
|
262
|
+
type MotionStatusService = {
|
|
263
|
+
requestPermission(): Promise<MotionPermissionState>;
|
|
264
|
+
start(): Promise<void>;
|
|
265
|
+
stop(): void;
|
|
266
|
+
check(): MotionStatus;
|
|
267
|
+
readonly isRunning: boolean;
|
|
268
|
+
readonly hasPermission: boolean;
|
|
269
|
+
};
|
|
270
|
+
//#endregion
|
|
271
|
+
//#region src/internal/virtualCameraCheck.d.ts
|
|
272
|
+
declare const frameSources: {
|
|
273
|
+
readonly front: "FRONT_ID";
|
|
274
|
+
readonly back: "BACK_ID";
|
|
275
|
+
readonly selfie: "SELFIE";
|
|
276
|
+
};
|
|
277
|
+
type FrameSource = (typeof frameSources)[keyof typeof frameSources];
|
|
278
|
+
//#endregion
|
|
279
|
+
//#region src/internal/deepsight/deepsightService.d.ts
|
|
280
|
+
type DeepsightService = {
|
|
281
|
+
readonly metadata: MetadataService;
|
|
282
|
+
readonly motion: MotionStatusService;
|
|
283
|
+
initialize(disableIpify?: boolean): Promise<void>;
|
|
284
|
+
requestMotionPermission(): Promise<'granted' | 'denied' | 'not-required'>;
|
|
285
|
+
startMotionSensors(): Promise<void>;
|
|
286
|
+
stopMotionSensors(): void;
|
|
287
|
+
checkVirtualCamera(videoTrack: MediaStreamTrack): Promise<boolean>;
|
|
288
|
+
performVirtualCameraCheck(sessionToken: string | null, source: FrameSource): Promise<void>;
|
|
289
|
+
/**
|
|
290
|
+
* Runs PRC warmup and stores DS flag using infrastructure providers.
|
|
291
|
+
*/
|
|
292
|
+
performPrcCheck(params: DeepsightPrcCheckParams): Promise<void>;
|
|
293
|
+
analyzeFrame(imageData: ImageData): Promise<void>;
|
|
294
|
+
getMetadata(): string;
|
|
295
|
+
cleanup(): void;
|
|
296
|
+
};
|
|
297
|
+
type DeepsightPrcCheckParams = {
|
|
298
|
+
constraints: MediaStreamConstraints;
|
|
299
|
+
ds?: boolean;
|
|
300
|
+
storage: IStorageCapability;
|
|
301
|
+
};
|
|
302
|
+
//#endregion
|
|
303
|
+
//#region src/modules/selfie/types.d.ts
|
|
304
|
+
type SelfieConfig = FlowModuleConfig['SELFIE'] & {
|
|
305
|
+
recording?: {
|
|
306
|
+
capability?: IRecordingCapability;
|
|
307
|
+
};
|
|
308
|
+
ds?: Flow['ds'];
|
|
309
|
+
};
|
|
310
|
+
type SelfieDependencies = {
|
|
311
|
+
storage: IStorageCapability;
|
|
312
|
+
getWasmUtil: () => Promise<IWasmUtilCapability>;
|
|
313
|
+
};
|
|
314
|
+
type DetectionStatus = 'idle' | 'detecting' | 'noFace' | 'tooManyFaces' | 'tooClose' | 'tooFar' | 'blur' | 'dark' | 'faceAngle' | 'headWear' | 'lenses' | 'eyesClosed' | 'faceMask' | 'centerFace' | 'manualCapture' | 'success' | 'error' | 'capturing' | 'getReady' | 'getReadyFinished' | 'offline';
|
|
315
|
+
type SendFaceImageResponse = {
|
|
316
|
+
age: number;
|
|
317
|
+
confidence: number;
|
|
318
|
+
hasClosedEyes: boolean;
|
|
319
|
+
hasFaceMask: boolean;
|
|
320
|
+
hasHeadCover: boolean;
|
|
321
|
+
hasLenses: boolean;
|
|
322
|
+
isBright: boolean;
|
|
323
|
+
liveness: boolean;
|
|
324
|
+
imageBase64: string;
|
|
325
|
+
sessionStatus: string;
|
|
326
|
+
};
|
|
327
|
+
declare const FACE_ERROR_CODES: {
|
|
328
|
+
readonly FACE_OCCLUDED: "FACE_OCCLUDED";
|
|
329
|
+
readonly LIVENESS: "LIVENESS_ERROR";
|
|
330
|
+
readonly BRIGHTNESS: "BRIGHTNESS_ERROR";
|
|
331
|
+
readonly LENSES: "LENSES_ERROR";
|
|
332
|
+
readonly MASK: "MASK_ERROR";
|
|
333
|
+
readonly CLOSED_EYES: "CLOSED_EYES_ERROR";
|
|
334
|
+
readonly HEAD_COVER: "HEAD_COVER_ERROR";
|
|
335
|
+
readonly SERVER: "SERVER_ERROR";
|
|
336
|
+
readonly FACE_NOT_FOUND: "FACE_NOT_FOUND";
|
|
337
|
+
readonly MULTIPLE_FACES: "MULTIPLE_FACES";
|
|
338
|
+
readonly TOO_BLURRY: "TOO_BLURRY_ERROR";
|
|
339
|
+
readonly TOO_DARK: "TOO_DARK_ERROR";
|
|
340
|
+
readonly USER_IS_NOT_RECOGNIZED: "USER_IS_NOT_RECOGNIZED";
|
|
341
|
+
readonly SPOOF_ATTEMPT_DETECTED: "SPOOF_ATTEMPT_DETECTED";
|
|
342
|
+
readonly FACE_TOO_DARK: "FACE_TOO_DARK";
|
|
343
|
+
readonly LENSES_DETECTED: "LENSES_DETECTED";
|
|
344
|
+
readonly FACE_MASK_DETECTED: "FACE_MASK_DETECTED";
|
|
345
|
+
readonly CLOSED_EYES_DETECTED: "CLOSED_EYES_DETECTED";
|
|
346
|
+
readonly HEAD_COVER_DETECTED: "HEAD_COVER_DETECTED";
|
|
347
|
+
readonly FACE_CROPPING_FAILED: "FACE_CROPPING_FAILED";
|
|
348
|
+
readonly FACE_TOO_SMALL: "FACE_TOO_SMALL";
|
|
349
|
+
readonly FACE_TOO_BLURRY: "FACE_TOO_BLURRY";
|
|
350
|
+
readonly BAD_PHOTO_QUALITY: "BAD_PHOTO_QUALITY";
|
|
351
|
+
readonly PROCESSING_ERROR: "PROCESSING_ERROR";
|
|
352
|
+
readonly BAD_REQUEST: "BAD_REQUEST";
|
|
353
|
+
readonly NONEXISTENT_CUSTOMER: "NONEXISTENT_CUSTOMER";
|
|
354
|
+
readonly HINT_NOT_PROVIDED: "HINT_NOT_PROVIDED";
|
|
355
|
+
readonly SELFIE_IMAGE_LOW_QUALITY: "SELFIE_IMAGE_LOW_QUALITY";
|
|
356
|
+
};
|
|
357
|
+
type FaceErrorCode = (typeof FACE_ERROR_CODES)[keyof typeof FACE_ERROR_CODES];
|
|
358
|
+
//#endregion
|
|
359
|
+
//#region src/modules/selfie/selfieUploadService.d.ts
|
|
360
|
+
type ProcessFaceImageType = 'selfie' | 'videoSelfie';
|
|
361
|
+
type ProcessFaceResponse = {
|
|
362
|
+
faceMatch: boolean;
|
|
363
|
+
confidence: number;
|
|
364
|
+
existingUser: boolean;
|
|
365
|
+
};
|
|
366
|
+
declare function processFace(imageType?: ProcessFaceImageType, signal?: AbortSignal): Promise<ProcessFaceResponse>;
|
|
367
|
+
//#endregion
|
|
368
|
+
//#region src/modules/selfie/recordingService.d.ts
|
|
369
|
+
type RecordingService = {
|
|
370
|
+
start(stream: MediaStream): Promise<void>;
|
|
371
|
+
stop(): Promise<{
|
|
372
|
+
recordingId: string | null;
|
|
373
|
+
}>;
|
|
374
|
+
cleanup(): void;
|
|
375
|
+
};
|
|
376
|
+
//#endregion
|
|
377
|
+
//#region src/modules/selfie/selfieStateMachine.d.ts
|
|
378
|
+
type SelfieContext = {
|
|
379
|
+
config: SelfieConfig;
|
|
380
|
+
dependencies: SelfieDependencies;
|
|
381
|
+
stream: CameraStream | undefined;
|
|
382
|
+
provider: FaceDetectionProvider | undefined;
|
|
383
|
+
frameCapturer: StreamCanvasCapture | undefined;
|
|
384
|
+
error: string | undefined;
|
|
385
|
+
detectionStatus: DetectionStatus;
|
|
386
|
+
debugFrame: ImageData | undefined;
|
|
387
|
+
capturedImage: IncodeCanvas | undefined;
|
|
388
|
+
faceCoordinates: FaceCoordinates | undefined;
|
|
389
|
+
uploadResponse: SendFaceImageResponse | undefined;
|
|
390
|
+
processResponse: ProcessFaceResponse | undefined;
|
|
391
|
+
recordingService: RecordingService | undefined;
|
|
392
|
+
attemptsRemaining: number;
|
|
393
|
+
uploadError: FaceErrorCode | undefined;
|
|
394
|
+
permissionResult: PermissionResult | 'refresh' | undefined;
|
|
395
|
+
resetDetection: (() => void) | undefined;
|
|
396
|
+
deepsightService: DeepsightService | undefined;
|
|
397
|
+
};
|
|
398
|
+
type SelfieEvent = {
|
|
399
|
+
type: 'LOAD';
|
|
400
|
+
} | {
|
|
401
|
+
type: 'NEXT_STEP';
|
|
402
|
+
} | {
|
|
403
|
+
type: 'REQUEST_PERMISSION';
|
|
404
|
+
} | {
|
|
405
|
+
type: 'GO_TO_LEARN_MORE';
|
|
406
|
+
} | {
|
|
407
|
+
type: 'BACK';
|
|
408
|
+
} | {
|
|
409
|
+
type: 'QUIT';
|
|
410
|
+
} | {
|
|
411
|
+
type: 'RESET';
|
|
412
|
+
} | {
|
|
413
|
+
type: 'MANUAL_CAPTURE';
|
|
414
|
+
} | {
|
|
415
|
+
type: 'DETECTION_UPDATE';
|
|
416
|
+
status: DetectionStatus;
|
|
417
|
+
} | {
|
|
418
|
+
type: 'DETECTION_FRAME';
|
|
419
|
+
frame: ImageData;
|
|
420
|
+
} | {
|
|
421
|
+
type: 'DETECTION_SUCCESS';
|
|
422
|
+
canvas: IncodeCanvas;
|
|
423
|
+
faceCoordinates?: FaceCoordinates;
|
|
424
|
+
} | {
|
|
425
|
+
type: 'DETECTION_RESET_READY';
|
|
426
|
+
reset: () => void;
|
|
427
|
+
} | {
|
|
428
|
+
type: 'RETRY_CAPTURE';
|
|
429
|
+
};
|
|
430
|
+
type SelfieInput = {
|
|
431
|
+
config: SelfieConfig;
|
|
432
|
+
dependencies: SelfieDependencies;
|
|
433
|
+
};
|
|
434
|
+
/**
|
|
435
|
+
* The selfie capture state machine.
|
|
436
|
+
*
|
|
437
|
+
* Note: Uses AnyStateMachine type for declaration file portability.
|
|
438
|
+
* Type safety is ensured via the machine configuration.
|
|
439
|
+
*/
|
|
440
|
+
declare const selfieMachine: AnyStateMachine;
|
|
441
|
+
/**
|
|
442
|
+
* Type representing the selfie machine.
|
|
443
|
+
* For advanced use cases requiring specific machine types.
|
|
444
|
+
*/
|
|
445
|
+
type SelfieMachine = StateMachine<SelfieContext, SelfieEvent, any, any, any, any, any, any, any, SelfieInput, any, any, any, any>;
|
|
446
|
+
//#endregion
|
|
447
|
+
//#region src/modules/selfie/selfieActor.d.ts
|
|
448
|
+
type CreateSelfieActorOptions = {
|
|
449
|
+
config: SelfieConfig;
|
|
450
|
+
dependencies?: SelfieDependencies;
|
|
451
|
+
};
|
|
452
|
+
//#endregion
|
|
453
|
+
//#region src/modules/selfie/selfieManager.d.ts
|
|
454
|
+
type CaptureStatus = 'initializing' | 'detecting' | 'capturing' | 'uploading' | 'uploadError' | 'success';
|
|
455
|
+
/** Selfie manager is waiting to be started */
|
|
456
|
+
type SelfieIdleState = {
|
|
457
|
+
status: 'idle';
|
|
458
|
+
};
|
|
459
|
+
/** Checking camera permissions (when no tutorial) */
|
|
460
|
+
type SelfieLoadingState = {
|
|
461
|
+
status: 'loading';
|
|
462
|
+
};
|
|
463
|
+
/** Showing selfie tutorial */
|
|
464
|
+
type SelfieTutorialState = {
|
|
465
|
+
status: 'tutorial';
|
|
466
|
+
};
|
|
467
|
+
/** Handling camera permissions */
|
|
468
|
+
type SelfiePermissionsState = {
|
|
469
|
+
status: 'permissions';
|
|
470
|
+
/** Current permission sub-state: initial, requesting, denied, or learnMore */
|
|
471
|
+
permissionStatus: PermissionStatus;
|
|
472
|
+
};
|
|
473
|
+
/** Camera is ready for selfie capture */
|
|
474
|
+
type SelfieCaptureState = {
|
|
475
|
+
status: 'capture';
|
|
476
|
+
/** Current capture sub-state */
|
|
477
|
+
captureStatus: CaptureStatus;
|
|
478
|
+
/** The active camera stream */
|
|
479
|
+
stream: CameraStream | undefined;
|
|
480
|
+
/** Current face detection status */
|
|
481
|
+
detectionStatus: DetectionStatus;
|
|
482
|
+
/** Latest frame processed in the detection loop (for UI debug rendering) */
|
|
483
|
+
debugFrame: ImageData | undefined;
|
|
484
|
+
/** Number of capture attempts remaining */
|
|
485
|
+
attemptsRemaining: number;
|
|
486
|
+
/** Error message from failed upload */
|
|
487
|
+
uploadError: string | undefined;
|
|
488
|
+
};
|
|
489
|
+
/** Processing the captured selfie */
|
|
490
|
+
type SelfieProcessingState = {
|
|
491
|
+
status: 'processing';
|
|
492
|
+
};
|
|
493
|
+
/** Selfie capture completed successfully */
|
|
494
|
+
type SelfieFinishedState = {
|
|
495
|
+
status: 'finished';
|
|
496
|
+
/** Face processing result (face match, confidence, existing user) */
|
|
497
|
+
processResponse: ProcessFaceResponse | undefined;
|
|
498
|
+
};
|
|
499
|
+
/** User closed the selfie flow */
|
|
500
|
+
type SelfieClosedState = {
|
|
501
|
+
status: 'closed';
|
|
502
|
+
};
|
|
503
|
+
/** An error occurred during the flow */
|
|
504
|
+
type SelfieErrorState = {
|
|
505
|
+
status: 'error';
|
|
506
|
+
/** The error message */
|
|
507
|
+
error: string;
|
|
508
|
+
};
|
|
509
|
+
/** Union of all possible selfie states */
|
|
510
|
+
type SelfieState = SelfieIdleState | SelfieLoadingState | SelfieTutorialState | SelfiePermissionsState | SelfieCaptureState | SelfieProcessingState | SelfieFinishedState | SelfieClosedState | SelfieErrorState;
|
|
511
|
+
/**
|
|
512
|
+
* Creates a selfie manager instance for handling selfie capture flow.
|
|
513
|
+
*
|
|
514
|
+
* The selfie manager provides:
|
|
515
|
+
* - State management with statuses: `idle`, `loading`, `tutorial`, `permissions`, `capture`, `finished`, `closed`, `error`
|
|
516
|
+
* - Permission handling with nested states: `idle`, `requesting`, `denied`, `learnMore`
|
|
517
|
+
* - Capture handling with nested states: `initializing`, `startingRecorder`, `recordingActive`, `detecting`, `capturing`, `uploading`, `uploadError`, `success`
|
|
518
|
+
* - Camera stream access when in `capture` state
|
|
519
|
+
* - Detection status feedback during face detection
|
|
520
|
+
* - Attempt tracking with `attemptsRemaining`
|
|
521
|
+
*
|
|
522
|
+
* @param options - Configuration for the selfie actor
|
|
523
|
+
* @param options.config - The selfie module configuration from the flow
|
|
524
|
+
* @returns A manager instance with state subscription, API methods, and lifecycle controls
|
|
525
|
+
*
|
|
526
|
+
* @example
|
|
527
|
+
* ```ts
|
|
528
|
+
* const selfieManager = createSelfieManager({ config: selfieConfig });
|
|
529
|
+
*
|
|
530
|
+
* selfieManager.subscribe((state) => {
|
|
531
|
+
* if (state.status === 'capture') {
|
|
532
|
+
* console.log('Camera ready:', state.stream);
|
|
533
|
+
* console.log('Detection status:', state.detectionStatus);
|
|
534
|
+
* }
|
|
535
|
+
* });
|
|
536
|
+
*
|
|
537
|
+
* selfieManager.load();
|
|
538
|
+
* ```
|
|
539
|
+
*/
|
|
540
|
+
declare function createSelfieManager(options: CreateSelfieActorOptions): Manager<SelfieState> & {
|
|
541
|
+
/**
|
|
542
|
+
* Starts the selfie flow.
|
|
543
|
+
* Goes to `tutorial` if showTutorial is true, otherwise to `loading`.
|
|
544
|
+
* Requires setup() to have been called with a token first.
|
|
545
|
+
*/
|
|
546
|
+
load(): void;
|
|
547
|
+
/**
|
|
548
|
+
* Advances to the next step.
|
|
549
|
+
* From `tutorial` → permissions or capture (based on permission status).
|
|
550
|
+
* From `capture` → finished.
|
|
551
|
+
*/
|
|
552
|
+
nextStep(): void;
|
|
553
|
+
/**
|
|
554
|
+
* Requests camera permission via getUserMedia.
|
|
555
|
+
* Only effective when in `permissions.idle` or `permissions.learnMore` state.
|
|
556
|
+
*/
|
|
557
|
+
requestPermission(): void;
|
|
558
|
+
/**
|
|
559
|
+
* Navigates to the "learn more" permission screen.
|
|
560
|
+
* Only effective when in `permissions.idle` state.
|
|
561
|
+
*/
|
|
562
|
+
goToLearnMore(): void;
|
|
563
|
+
/**
|
|
564
|
+
* Goes back from "learn more" to the initial permission screen.
|
|
565
|
+
* Only effective when in `permissions.learnMore` state.
|
|
566
|
+
*/
|
|
567
|
+
back(): void;
|
|
568
|
+
/**
|
|
569
|
+
* Closes the selfie flow and transitions to `closed` state.
|
|
570
|
+
* Can be called from any state.
|
|
571
|
+
*/
|
|
572
|
+
close(): void;
|
|
573
|
+
/**
|
|
574
|
+
* Resets the selfie manager to its initial `idle` state.
|
|
575
|
+
* Can be called from `error` state. Not available from `finished` (final state).
|
|
576
|
+
*/
|
|
577
|
+
reset(): void;
|
|
578
|
+
/**
|
|
579
|
+
* Retries the capture after an upload error.
|
|
580
|
+
* Only effective when in `capture.uploadError` state and `attemptsRemaining > 0`.
|
|
581
|
+
* If no attempts remaining, the transition is blocked.
|
|
582
|
+
*/
|
|
583
|
+
retryCapture(): void;
|
|
584
|
+
/**
|
|
585
|
+
* Captures a selfie in manual capture mode.
|
|
586
|
+
* Only effective when in `capture.detecting` state and `detectionStatus === 'manualCapture'`.
|
|
587
|
+
*/
|
|
588
|
+
capture(): void;
|
|
589
|
+
};
|
|
590
|
+
type SelfieManager = ReturnType<typeof createSelfieManager>;
|
|
591
|
+
//#endregion
|
|
592
|
+
export { type CameraStream, type DeepsightService, type DetectionStatus, type FaceErrorCode, type PermissionResult, type PermissionStatus, type ProcessFaceResponse, type SelfieConfig, type SelfieMachine, type SelfieManager, type SelfieState, createSelfieManager, processFace, selfieMachine };
|