@incodetech/core 2.0.0-alpha.1 → 2.0.0-alpha.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (104) hide show
  1. package/dist/Manager-6BwbaI_H.d.ts +19 -0
  2. package/dist/StateMachine-7c1gcu94.d.ts +2 -0
  3. package/dist/addEvent-1Mi5CEiq.esm.js +16 -0
  4. package/dist/chunk-C_Yo44FK.esm.js +49 -0
  5. package/dist/email.d.ts +264 -0
  6. package/dist/email.esm.js +479 -0
  7. package/dist/endpoints-D_pUMaqA.esm.js +1701 -0
  8. package/dist/flow.d.ts +578 -0
  9. package/dist/flow.esm.js +628 -0
  10. package/dist/index.d.ts +226 -0
  11. package/dist/index.esm.js +155 -0
  12. package/dist/lib-CyIAFRfr.esm.js +12499 -0
  13. package/dist/permissionServices-CVR0Pq38.esm.js +72 -0
  14. package/dist/phone.d.ts +292 -0
  15. package/dist/phone.esm.js +550 -0
  16. package/dist/selfie.d.ts +758 -0
  17. package/dist/selfie.esm.js +978 -0
  18. package/dist/types-tq1ypYSL.d.ts +5 -0
  19. package/dist/warmup-Dr7OcFND.d.ts +55 -0
  20. package/dist/xstate.esm-B_rda9yU.esm.js +3261 -0
  21. package/package.json +14 -11
  22. package/src/camera/cameraActor.ts +0 -21
  23. package/src/camera/cameraService.test.ts +0 -437
  24. package/src/camera/cameraService.ts +0 -165
  25. package/src/camera/cameraServices.test.ts +0 -66
  26. package/src/camera/cameraServices.ts +0 -26
  27. package/src/camera/cameraStateMachine.test.ts +0 -602
  28. package/src/camera/cameraStateMachine.ts +0 -264
  29. package/src/camera/index.ts +0 -5
  30. package/src/camera/types.ts +0 -17
  31. package/src/device/getBrowser.ts +0 -31
  32. package/src/device/getDeviceClass.ts +0 -29
  33. package/src/device/index.ts +0 -2
  34. package/src/email/__mocks__/emailMocks.ts +0 -59
  35. package/src/email/emailActor.ts +0 -15
  36. package/src/email/emailManager.test.ts +0 -573
  37. package/src/email/emailManager.ts +0 -427
  38. package/src/email/emailServices.ts +0 -66
  39. package/src/email/emailStateMachine.test.ts +0 -741
  40. package/src/email/emailStateMachine.ts +0 -367
  41. package/src/email/index.ts +0 -39
  42. package/src/email/types.ts +0 -60
  43. package/src/events/addEvent.ts +0 -20
  44. package/src/events/types.ts +0 -7
  45. package/src/flow/__mocks__/flowMocks.ts +0 -84
  46. package/src/flow/flowActor.ts +0 -13
  47. package/src/flow/flowAnalyzer.test.ts +0 -266
  48. package/src/flow/flowAnalyzer.ts +0 -37
  49. package/src/flow/flowCompletionService.ts +0 -21
  50. package/src/flow/flowManager.test.ts +0 -560
  51. package/src/flow/flowManager.ts +0 -235
  52. package/src/flow/flowServices.test.ts +0 -109
  53. package/src/flow/flowServices.ts +0 -13
  54. package/src/flow/flowStateMachine.test.ts +0 -334
  55. package/src/flow/flowStateMachine.ts +0 -182
  56. package/src/flow/index.ts +0 -21
  57. package/src/flow/moduleLoader.test.ts +0 -136
  58. package/src/flow/moduleLoader.ts +0 -73
  59. package/src/flow/orchestratedFlowManager.test.ts +0 -240
  60. package/src/flow/orchestratedFlowManager.ts +0 -231
  61. package/src/flow/orchestratedFlowStateMachine.test.ts +0 -199
  62. package/src/flow/orchestratedFlowStateMachine.ts +0 -325
  63. package/src/flow/types.ts +0 -434
  64. package/src/http/__mocks__/api.ts +0 -88
  65. package/src/http/api.test.ts +0 -231
  66. package/src/http/api.ts +0 -90
  67. package/src/http/endpoints.ts +0 -17
  68. package/src/index.ts +0 -33
  69. package/src/permissions/index.ts +0 -2
  70. package/src/permissions/permissionServices.ts +0 -31
  71. package/src/permissions/types.ts +0 -3
  72. package/src/phone/__mocks__/phoneMocks.ts +0 -71
  73. package/src/phone/index.ts +0 -39
  74. package/src/phone/phoneActor.ts +0 -15
  75. package/src/phone/phoneManager.test.ts +0 -393
  76. package/src/phone/phoneManager.ts +0 -458
  77. package/src/phone/phoneServices.ts +0 -98
  78. package/src/phone/phoneStateMachine.test.ts +0 -918
  79. package/src/phone/phoneStateMachine.ts +0 -422
  80. package/src/phone/types.ts +0 -83
  81. package/src/recordings/recordingsRepository.test.ts +0 -87
  82. package/src/recordings/recordingsRepository.ts +0 -48
  83. package/src/recordings/streamingEvents.ts +0 -10
  84. package/src/selfie/__mocks__/selfieMocks.ts +0 -26
  85. package/src/selfie/index.ts +0 -14
  86. package/src/selfie/selfieActor.ts +0 -17
  87. package/src/selfie/selfieErrorUtils.test.ts +0 -116
  88. package/src/selfie/selfieErrorUtils.ts +0 -66
  89. package/src/selfie/selfieManager.test.ts +0 -297
  90. package/src/selfie/selfieManager.ts +0 -301
  91. package/src/selfie/selfieServices.ts +0 -362
  92. package/src/selfie/selfieStateMachine.test.ts +0 -283
  93. package/src/selfie/selfieStateMachine.ts +0 -804
  94. package/src/selfie/selfieUploadService.test.ts +0 -90
  95. package/src/selfie/selfieUploadService.ts +0 -81
  96. package/src/selfie/types.ts +0 -103
  97. package/src/session/index.ts +0 -5
  98. package/src/session/sessionService.ts +0 -78
  99. package/src/setup.test.ts +0 -61
  100. package/src/setup.ts +0 -171
  101. package/tsconfig.json +0 -13
  102. package/tsdown.config.ts +0 -22
  103. package/vitest.config.ts +0 -37
  104. package/vitest.setup.ts +0 -135
@@ -0,0 +1,758 @@
1
+ import { t as WasmPipeline } from "./warmup-Dr7OcFND.js";
2
+ import { t as Manager } from "./Manager-6BwbaI_H.js";
3
+ import { n as StateMachine, t as AnyStateMachine } from "./StateMachine-7c1gcu94.js";
4
+ import { n as PermissionStatus, t as PermissionResult } from "./types-tq1ypYSL.js";
5
+
6
+ //#region ../infra/src/media/canvas.d.ts
7
+ /**
8
+ * Class representing a canvas element for image capture and manipulation.
9
+ */
10
+ declare class IncodeCanvas {
11
+ canvas: HTMLCanvasElement;
12
+ private base64Image;
13
+ private blobData;
14
+ /**
15
+ * Creates an {@link IncodeCanvas} from a raw {@link ImageData} frame.
16
+ * @param imageData - Frame pixels in RGBA format
17
+ * @returns An {@link IncodeCanvas} containing the provided pixels
18
+ */
19
+ static fromImageData(imageData: ImageData): IncodeCanvas;
20
+ /**
21
+ * Create a new canvas element.
22
+ * @param canvas_ - The canvas element to clone.
23
+ */
24
+ constructor(canvas_: HTMLCanvasElement);
25
+ /**
26
+ * Check if the current canvas is valid.
27
+ */
28
+ private checkCanvas;
29
+ /**
30
+ * Release the data stored by IncodeCanvas.
31
+ */
32
+ release(): void;
33
+ /**
34
+ * Get the width of the canvas.
35
+ */
36
+ width(): number | null;
37
+ /**
38
+ * Get the height of the canvas.
39
+ */
40
+ height(): number | null;
41
+ /**
42
+ * Set the width of the canvas.
43
+ */
44
+ setWidth(width: number): void;
45
+ /**
46
+ * Set the height of the canvas.
47
+ */
48
+ setHeight(height: number): void;
49
+ /**
50
+ * Clone the current canvas.
51
+ */
52
+ clone(): IncodeCanvas | null;
53
+ /**
54
+ * Deep clone the current IncodeCanvas including blob data.
55
+ */
56
+ deepClone(): Promise<IncodeCanvas | null>;
57
+ /**
58
+ * Returns the drawing context on the canvas.
59
+ */
60
+ getContext(contextId: '2d', contextAttributes?: CanvasRenderingContext2DSettings): CanvasRenderingContext2D | null;
61
+ /**
62
+ * Retrieves the image data from the canvas.
63
+ */
64
+ getImageData(): ImageData | null;
65
+ /**
66
+ * Updates the base64 representation of the current canvas image.
67
+ */
68
+ updateBase64Image(jpegQuality?: number): void;
69
+ /**
70
+ * Converts the current canvas element to a base64 string.
71
+ */
72
+ getBase64Image(jpegQuality?: number, includeDataURLPrefix?: boolean): string | null;
73
+ /**
74
+ * Sets the base64 representation of the current canvas image.
75
+ */
76
+ setBase64Image(base64Image: string | null): void;
77
+ /**
78
+ * Updates the Blob representation of the current canvas image.
79
+ */
80
+ updateBlob(jpegQuality?: number, includeDataURLPrefix?: boolean): void;
81
+ /**
82
+ * Converts a base64 string to a Blob and creates a URL for it.
83
+ */
84
+ static base64ToBlob(base64: string): {
85
+ blob: Blob;
86
+ url: string;
87
+ } | null;
88
+ /**
89
+ * Retrieves the Blob data and its URL from the current canvas.
90
+ */
91
+ getBlobData(jpegQuality?: number, includeDataURLPrefix?: boolean): {
92
+ blob: Blob;
93
+ url: string;
94
+ } | null;
95
+ /**
96
+ * Sets the Blob data of the current canvas image.
97
+ */
98
+ setBlobData(blobData: {
99
+ blob: Blob;
100
+ url: string;
101
+ }): Promise<void>;
102
+ /**
103
+ * Returns a resized canvas according to video element size.
104
+ */
105
+ getResizedCanvas(videoElementWidth: number, videoElementHeight: number): IncodeCanvas | null;
106
+ }
107
+ //#endregion
108
+ //#region ../infra/src/capabilities/IMLProviderCapability.d.ts
109
+ /**
110
+ * Base configuration shared by all ML provider capabilities.
111
+ */
112
+ interface MLProviderConfig {
113
+ /** Path to the WASM binary */
114
+ wasmPath?: string;
115
+ /** Path to the SIMD-optimized WASM binary (optional) */
116
+ wasmSimdPath?: string;
117
+ /** Path to the WASM glue code */
118
+ glueCodePath?: string;
119
+ /** Whether to use SIMD optimizations (default: true) */
120
+ useSimd?: boolean;
121
+ /**
122
+ * Base path for ML model files. Models will be loaded from `${modelsBasePath}/${modelFileName}`.
123
+ * If not provided, models are expected in a 'models' subdirectory relative to the WASM binary.
124
+ */
125
+ modelsBasePath?: string;
126
+ }
127
+ /**
128
+ * Base interface for ML provider capabilities.
129
+ * Provides common lifecycle and frame processing methods shared by
130
+ * FaceDetectionCapability and IdCaptureCapability.
131
+ */
132
+ interface IMLProviderCapability<TConfig extends MLProviderConfig> {
133
+ /**
134
+ * Whether the provider has been initialized and is ready to process frames.
135
+ */
136
+ readonly initialized: boolean;
137
+ /**
138
+ * Initializes the provider with the given configuration.
139
+ * If WASM was already warmed up via `setup()` or `warmupWasm()`, this returns almost instantly.
140
+ * @param config - Provider configuration including WASM paths
141
+ */
142
+ initialize(config: TConfig): Promise<void>;
143
+ /**
144
+ * Processes a frame through the ML pipeline.
145
+ * Callbacks set via `setCallbacks()` will be invoked based on the analysis results.
146
+ * @param image - Image data to process
147
+ * @throws Error if provider is not initialized
148
+ */
149
+ processFrame(image: ImageData): Promise<void>;
150
+ /**
151
+ * Resets the pipeline to its initial state.
152
+ * Safe to call even if not initialized (no-op in that case).
153
+ */
154
+ reset(): void;
155
+ /**
156
+ * Disposes of resources and resets initialization state.
157
+ * Safe to call even if not initialized.
158
+ */
159
+ dispose(): Promise<void>;
160
+ }
161
+ //#endregion
162
+ //#region ../infra/src/capabilities/IFaceDetectionCapability.d.ts
163
+ /**
164
+ * Configuration for face detection provider.
165
+ * Extends base ML provider config with face-detection specific options.
166
+ */
167
+ interface FaceDetectionConfig extends MLProviderConfig {
168
+ autocaptureInterval?: number;
169
+ }
170
+ interface FacePositionConstraints {
171
+ minX: number;
172
+ minY: number;
173
+ maxX: number;
174
+ maxY: number;
175
+ }
176
+ interface FaceDetectionThresholds {
177
+ brightnessThreshold: number;
178
+ blurrinessThreshold: number;
179
+ tiltRotationAngleThreshold: number;
180
+ minMagicCropSize: number;
181
+ autocaptureInterval: number;
182
+ minFaceQualityScore: number;
183
+ faceOcclusionThreshold: number;
184
+ }
185
+ interface FaceAttributesThresholds {
186
+ headwearThreshold: number;
187
+ lensesThreshold: number;
188
+ closedEyesThreshold: number;
189
+ maskThreshold: number;
190
+ }
191
+ interface FaceChecksConfig {
192
+ lenses: boolean;
193
+ mask: boolean;
194
+ closedEyes: boolean;
195
+ headWear: boolean;
196
+ occlusion: boolean;
197
+ }
198
+ interface FaceData {
199
+ rect: {
200
+ x: number;
201
+ y: number;
202
+ width: number;
203
+ height: number;
204
+ };
205
+ rightEye: {
206
+ x: number;
207
+ y: number;
208
+ };
209
+ leftEye: {
210
+ x: number;
211
+ y: number;
212
+ };
213
+ noseTip: {
214
+ x: number;
215
+ y: number;
216
+ };
217
+ rightMouthCorner: {
218
+ x: number;
219
+ y: number;
220
+ };
221
+ leftMouthCorner: {
222
+ x: number;
223
+ y: number;
224
+ };
225
+ pitch: number;
226
+ yaw: number;
227
+ roll: number;
228
+ }
229
+ type FaceCoordinates = {
230
+ rightEyeX: number;
231
+ rightEyeY: number;
232
+ leftEyeX: number;
233
+ leftEyeY: number;
234
+ noseTipX: number;
235
+ noseTipY: number;
236
+ rightMouthX: number;
237
+ rightMouthY: number;
238
+ mouthX: number;
239
+ mouthY: number;
240
+ x: number;
241
+ y: number;
242
+ width: number;
243
+ height: number;
244
+ };
245
+ interface FaceDetectionCallbacks {
246
+ onFarAway?: () => void;
247
+ onTooClose?: () => void;
248
+ onTooManyFaces?: () => void;
249
+ onNoFace?: () => void;
250
+ onCapture?: (canvas: IncodeCanvas, faceCoordinates: FaceCoordinates) => void;
251
+ onGetReady?: () => void;
252
+ onGetReadyFinished?: () => void;
253
+ onCenterFace?: () => void;
254
+ onDark?: () => void;
255
+ onBlur?: () => void;
256
+ onFaceAngle?: () => void;
257
+ onBestShot?: (face: FaceData) => void;
258
+ onLenses?: () => void;
259
+ onMask?: () => void;
260
+ onEyesClosed?: () => void;
261
+ onHeadWear?: () => void;
262
+ onSwitchToManualCapture?: () => void;
263
+ onFaceOccluded?: () => void;
264
+ }
265
+ /**
266
+ * Capability interface for face detection and selfie capture.
267
+ * Extends the base ML provider capability with face-detection specific methods.
268
+ */
269
+ interface IFaceDetectionCapability extends IMLProviderCapability<FaceDetectionConfig> {
270
+ /**
271
+ * Sets callbacks for face detection events.
272
+ * @param callbacks - Object containing callback functions for various detection events
273
+ */
274
+ setCallbacks(callbacks: FaceDetectionCallbacks): void;
275
+ /**
276
+ * Sets position constraints for face detection.
277
+ * @param constraints - Bounding box constraints for valid face position
278
+ */
279
+ setPositionConstraints(constraints: FacePositionConstraints): void;
280
+ /**
281
+ * Sets detection thresholds for quality checks.
282
+ * @param thresholds - Threshold values for various quality metrics
283
+ */
284
+ setThresholds(thresholds: FaceDetectionThresholds): void;
285
+ /**
286
+ * Sets thresholds for face attribute detection.
287
+ * @param thresholds - Threshold values for attribute detection (headwear, lenses, etc.)
288
+ */
289
+ setAttributesThresholds(thresholds: FaceAttributesThresholds): void;
290
+ /**
291
+ * Enables or disables specific face checks.
292
+ * @param config - Configuration for which checks to enable
293
+ */
294
+ setChecksEnabled(config: FaceChecksConfig): void;
295
+ /**
296
+ * Sets video selfie mode.
297
+ * @param enabled - Whether to enable video selfie mode
298
+ */
299
+ setVideoSelfieMode(enabled: boolean): void;
300
+ }
301
+ //#endregion
302
+ //#region ../infra/src/capabilities/IRecordingCapability.d.ts
303
+ type RecordingPublisher = {
304
+ getStreamId: () => string | undefined;
305
+ replaceVideoTrack: (track: MediaStreamTrack) => Promise<void>;
306
+ destroy: () => void;
307
+ };
308
+ type RecordingConnection = {
309
+ sessionId: string | undefined;
310
+ publisher: RecordingPublisher;
311
+ disconnect: () => Promise<void>;
312
+ };
313
+ type RecordingConnectionEvents = {
314
+ onSessionConnected?: (sessionId: string | undefined) => void;
315
+ onSessionDisconnected?: (sessionId: string | undefined) => void;
316
+ onSessionException?: (params: {
317
+ name?: string;
318
+ message?: string;
319
+ sessionId?: string;
320
+ }) => void;
321
+ onPublisherCreated?: (params: {
322
+ streamId?: string;
323
+ sessionId?: string;
324
+ }) => void;
325
+ onPublisherError?: (params: {
326
+ message?: string;
327
+ sessionId?: string;
328
+ streamId?: string;
329
+ }) => void;
330
+ };
331
+ type ConnectRecordingParams = {
332
+ sessionToken: string;
333
+ stream: MediaStream;
334
+ events?: RecordingConnectionEvents;
335
+ };
336
+ type IRecordingCapability = {
337
+ /**
338
+ * Connects to a recording session and publishes the provided media stream.
339
+ * Returns a connection handle that can be disconnected and used to manage the publisher.
340
+ */
341
+ connect: (params: ConnectRecordingParams) => Promise<RecordingConnection>;
342
+ };
343
+ //#endregion
344
+ //#region ../infra/src/media/camera.d.ts
345
+ type CameraStream = MediaStream;
346
+ //#endregion
347
+ //#region ../infra/src/media/StreamCanvasCapture.d.ts
348
+ type StreamCanvasCaptureOptions = {
349
+ fps?: number;
350
+ width?: number;
351
+ height?: number;
352
+ };
353
+ type StreamCanvasCaptureEventMap = {
354
+ frame: Event;
355
+ };
356
+ declare class StreamCanvasCapture {
357
+ private video;
358
+ private canvas;
359
+ private ctx;
360
+ private rafId;
361
+ private lastFrameTimeSeconds;
362
+ private lastTickTimeMs;
363
+ private hasFrame;
364
+ private disposed;
365
+ private eventTarget;
366
+ constructor(stream: CameraStream, options?: StreamCanvasCaptureOptions);
367
+ addEventListener(type: keyof StreamCanvasCaptureEventMap, listener: EventListenerOrEventListenerObject | null, options?: boolean | AddEventListenerOptions): void;
368
+ removeEventListener(type: keyof StreamCanvasCaptureEventMap, listener: EventListenerOrEventListenerObject | null, options?: boolean | EventListenerOptions): void;
369
+ /**
370
+ * Returns the latest cached frame as an {@link IncodeCanvas}.
371
+ */
372
+ getLatestCanvas(): IncodeCanvas | null;
373
+ /**
374
+ * Returns the latest cached frame as raw {@link ImageData}.
375
+ */
376
+ getLatestFrame(): ImageData | null;
377
+ /**
378
+ * Disposes internal resources and stops the capture loop.
379
+ */
380
+ dispose(): void;
381
+ private rafLoop;
382
+ private tick;
383
+ }
384
+ //#endregion
385
+ //#region ../infra/src/wasm/WasmPipelineType.d.ts
386
+ declare enum WasmPipelineType {
387
+ IdBlurGlarePipeline = 0,
388
+ IdBarcodeAndTextQualityPipeline = 1,
389
+ IdVideoSelfiePipeline = 2,
390
+ SelfieWithAggregationMetrics = 3,
391
+ SelfieWithQualityMetrics = 4,
392
+ IdFaceDetectionPipeline = 5,
393
+ }
394
+ //#endregion
395
+ //#region ../infra/src/providers/wasm/BaseWasmProvider.d.ts
396
+ /**
397
+ * Base configuration for WASM providers
398
+ */
399
+ interface BaseWasmConfig {
400
+ /** Path to the WASM binary */
401
+ wasmPath?: string;
402
+ /** Path to the SIMD-optimized WASM binary (optional) */
403
+ wasmSimdPath?: string;
404
+ /** Path to the WASM glue code */
405
+ glueCodePath?: string;
406
+ /** Whether to use SIMD optimizations (default: true) */
407
+ useSimd?: boolean;
408
+ /**
409
+ * Base path for ML model files. Models will be loaded from `${modelsBasePath}/${modelFileName}`.
410
+ * If not provided, models are expected in a 'models' subdirectory relative to the WASM binary.
411
+ */
412
+ modelsBasePath?: string;
413
+ }
414
+ /**
415
+ * Base provider class that abstracts common WASM functionality.
416
+ * This serves as a foundation for specific ML capability providers
417
+ * like FaceDetectionProvider and IdCaptureProvider.
418
+ */
419
+ declare abstract class BaseWasmProvider {
420
+ private _isInitialized;
421
+ protected pipelineType: WasmPipelineType | undefined;
422
+ /**
423
+ * Creates a new BaseWasmProvider
424
+ * @param pipelineType - The WASM pipeline type this provider uses
425
+ */
426
+ constructor(pipelineType?: WasmPipelineType);
427
+ /**
428
+ * Returns whether this provider has been initialized.
429
+ */
430
+ get initialized(): boolean;
431
+ protected getPipelineType(): WasmPipelineType;
432
+ /**
433
+ * Initializes the provider by ensuring WASM is loaded
434
+ * @param config - Provider configuration
435
+ * @param pipeline - The pipeline type to warm up ('selfie', 'idCapture', etc.)
436
+ */
437
+ protected initializeBase(config: BaseWasmConfig, pipeline: WasmPipeline): Promise<void>;
438
+ /**
439
+ * Ensures the provider is initialized before performing operations.
440
+ * @throws Error if not initialized
441
+ */
442
+ protected ensureInitialized(): void;
443
+ /**
444
+ * Processes a frame through the WASM pipeline
445
+ * @param image - Image data to process
446
+ */
447
+ processFrame(image: ImageData): Promise<void>;
448
+ /**
449
+ * Resets the pipeline to its initial state.
450
+ * Safe to call even if not initialized (no-op in that case).
451
+ */
452
+ reset(): void;
453
+ /**
454
+ * Disposes of resources and resets initialization state.
455
+ * Safe to call even if not initialized.
456
+ */
457
+ dispose(): Promise<void>;
458
+ }
459
+ //#endregion
460
+ //#region ../infra/src/providers/wasm/FaceDetectionProvider.d.ts
461
+ declare class FaceDetectionProvider extends BaseWasmProvider implements IFaceDetectionCapability {
462
+ private defaultThresholds;
463
+ private currentThresholds;
464
+ private currentFrame;
465
+ private bestCanvas;
466
+ private bestFace;
467
+ constructor();
468
+ processFrame(image: ImageData): Promise<void>;
469
+ initialize(config: FaceDetectionConfig): Promise<void>;
470
+ setCallbacks(callbacks: FaceDetectionCallbacks): void;
471
+ setPositionConstraints(constraints: FacePositionConstraints): void;
472
+ applyDefaults(autocaptureInterval?: number): void;
473
+ setAutocaptureInterval(interval: number): void;
474
+ setThresholds(thresholds: FaceDetectionThresholds): void;
475
+ setAttributesThresholds(thresholds: FaceAttributesThresholds): void;
476
+ setChecksEnabled(config: FaceChecksConfig): void;
477
+ setVideoSelfieMode(enabled: boolean): void;
478
+ reset(): void;
479
+ private createDefaultFaceCoordinates;
480
+ private formatFaceCoordinates;
481
+ }
482
+ //#endregion
483
+ //#region src/selfie/types.d.ts
484
+ type SelfieConfig = {
485
+ showTutorial: boolean;
486
+ showPreview: boolean;
487
+ assistedOnboarding: boolean;
488
+ enableFaceRecording: boolean;
489
+ recording?: {
490
+ capability?: IRecordingCapability;
491
+ };
492
+ autoCaptureTimeout: number;
493
+ captureAttempts: number;
494
+ validateLenses: boolean;
495
+ validateFaceMask: boolean;
496
+ validateHeadCover: boolean;
497
+ validateClosedEyes: boolean;
498
+ validateBrightness: boolean;
499
+ deepsightLiveness: 'SINGLE_FRAME' | 'MULTIMODAL' | 'VIDEOLIVENESS';
500
+ };
501
+ type DetectionStatus = 'idle' | 'detecting' | 'noFace' | 'tooManyFaces' | 'tooClose' | 'tooFar' | 'blur' | 'dark' | 'faceAngle' | 'headWear' | 'lenses' | 'eyesClosed' | 'faceMask' | 'centerFace' | 'manualCapture' | 'success' | 'error' | 'capturing' | 'getReady' | 'getReadyFinished' | 'offline';
502
+ type SendFaceImageResponse = {
503
+ age: number;
504
+ confidence: number;
505
+ hasClosedEyes: boolean;
506
+ hasFaceMask: boolean;
507
+ hasHeadCover: boolean;
508
+ hasLenses: boolean;
509
+ isBright: boolean;
510
+ liveness: boolean;
511
+ imageBase64: string;
512
+ sessionStatus: string;
513
+ };
514
+ declare const FACE_ERROR_CODES: {
515
+ readonly FACE_OCCLUDED: "FACE_OCCLUDED";
516
+ readonly LIVENESS: "LIVENESS_ERROR";
517
+ readonly BRIGHTNESS: "BRIGHTNESS_ERROR";
518
+ readonly LENSES: "LENSES_ERROR";
519
+ readonly MASK: "MASK_ERROR";
520
+ readonly CLOSED_EYES: "CLOSED_EYES_ERROR";
521
+ readonly HEAD_COVER: "HEAD_COVER_ERROR";
522
+ readonly SERVER: "SERVER_ERROR";
523
+ readonly FACE_NOT_FOUND: "FACE_NOT_FOUND";
524
+ readonly MULTIPLE_FACES: "MULTIPLE_FACES";
525
+ readonly TOO_BLURRY: "TOO_BLURRY_ERROR";
526
+ readonly TOO_DARK: "TOO_DARK_ERROR";
527
+ readonly USER_IS_NOT_RECOGNIZED: "USER_IS_NOT_RECOGNIZED";
528
+ readonly SPOOF_ATTEMPT_DETECTED: "SPOOF_ATTEMPT_DETECTED";
529
+ readonly FACE_TOO_DARK: "FACE_TOO_DARK";
530
+ readonly LENSES_DETECTED: "LENSES_DETECTED";
531
+ readonly FACE_MASK_DETECTED: "FACE_MASK_DETECTED";
532
+ readonly CLOSED_EYES_DETECTED: "CLOSED_EYES_DETECTED";
533
+ readonly HEAD_COVER_DETECTED: "HEAD_COVER_DETECTED";
534
+ readonly FACE_CROPPING_FAILED: "FACE_CROPPING_FAILED";
535
+ readonly FACE_TOO_SMALL: "FACE_TOO_SMALL";
536
+ readonly FACE_TOO_BLURRY: "FACE_TOO_BLURRY";
537
+ readonly BAD_PHOTO_QUALITY: "BAD_PHOTO_QUALITY";
538
+ readonly PROCESSING_ERROR: "PROCESSING_ERROR";
539
+ readonly BAD_REQUEST: "BAD_REQUEST";
540
+ readonly NONEXISTENT_CUSTOMER: "NONEXISTENT_CUSTOMER";
541
+ readonly HINT_NOT_PROVIDED: "HINT_NOT_PROVIDED";
542
+ readonly SELFIE_IMAGE_LOW_QUALITY: "SELFIE_IMAGE_LOW_QUALITY";
543
+ };
544
+ type FaceErrorCode = (typeof FACE_ERROR_CODES)[keyof typeof FACE_ERROR_CODES];
545
+ type RecordingSession = {
546
+ token: string;
547
+ sessionId: string;
548
+ videoRecordingId: string;
549
+ connection: RecordingConnection;
550
+ resolution?: string;
551
+ hasAudio: boolean;
552
+ };
553
+ //#endregion
554
+ //#region src/selfie/selfieStateMachine.d.ts
555
+ type SelfieContext = {
556
+ config: SelfieConfig;
557
+ stream: CameraStream | undefined;
558
+ provider: FaceDetectionProvider | undefined;
559
+ frameCapturer: StreamCanvasCapture | undefined;
560
+ error: string | undefined;
561
+ detectionStatus: DetectionStatus;
562
+ debugFrame: ImageData | undefined;
563
+ capturedImage: IncodeCanvas | undefined;
564
+ faceCoordinates: FaceCoordinates | undefined;
565
+ uploadResponse: SendFaceImageResponse | undefined;
566
+ recordingSession: RecordingSession | undefined;
567
+ attemptsRemaining: number;
568
+ uploadError: FaceErrorCode | undefined;
569
+ permissionResult: PermissionResult | 'refresh' | undefined;
570
+ resetDetection: (() => void) | undefined;
571
+ };
572
+ type SelfieEvent = {
573
+ type: 'LOAD';
574
+ } | {
575
+ type: 'NEXT_STEP';
576
+ } | {
577
+ type: 'REQUEST_PERMISSION';
578
+ } | {
579
+ type: 'GO_TO_LEARN_MORE';
580
+ } | {
581
+ type: 'BACK';
582
+ } | {
583
+ type: 'QUIT';
584
+ } | {
585
+ type: 'RESET';
586
+ } | {
587
+ type: 'MANUAL_CAPTURE';
588
+ } | {
589
+ type: 'DETECTION_UPDATE';
590
+ status: DetectionStatus;
591
+ } | {
592
+ type: 'DETECTION_FRAME';
593
+ frame: ImageData;
594
+ } | {
595
+ type: 'DETECTION_SUCCESS';
596
+ canvas: IncodeCanvas;
597
+ faceCoordinates?: FaceCoordinates;
598
+ } | {
599
+ type: 'DETECTION_RESET_READY';
600
+ reset: () => void;
601
+ } | {
602
+ type: 'RETRY_CAPTURE';
603
+ };
604
+ type SelfieInput = {
605
+ config: SelfieConfig;
606
+ };
607
+ /**
608
+ * The selfie capture state machine.
609
+ *
610
+ * Note: Uses AnyStateMachine type for declaration file portability.
611
+ * Type safety is ensured via the machine configuration.
612
+ */
613
+ declare const selfieMachine: AnyStateMachine;
614
+ /**
615
+ * Type representing the selfie machine.
616
+ * For advanced use cases requiring specific machine types.
617
+ */
618
+ type SelfieMachine = StateMachine<SelfieContext, SelfieEvent, any, any, any, any, any, any, any, SelfieInput, any, any, any, any>;
619
+ //#endregion
620
+ //#region src/selfie/selfieActor.d.ts
621
+ type CreateSelfieActorOptions = {
622
+ config: SelfieConfig;
623
+ };
624
+ //#endregion
625
+ //#region src/selfie/selfieManager.d.ts
626
+ type CaptureStatus = 'initializing' | 'detecting' | 'capturing' | 'uploading' | 'uploadError' | 'success';
627
+ /** Selfie manager is waiting to be started */
628
+ type SelfieIdleState = {
629
+ status: 'idle';
630
+ };
631
+ /** Checking camera permissions (when no tutorial) */
632
+ type SelfieLoadingState = {
633
+ status: 'loading';
634
+ };
635
+ /** Showing selfie tutorial */
636
+ type SelfieTutorialState = {
637
+ status: 'tutorial';
638
+ };
639
+ /** Handling camera permissions */
640
+ type SelfiePermissionsState = {
641
+ status: 'permissions';
642
+ /** Current permission sub-state: initial, requesting, denied, or learnMore */
643
+ permissionStatus: PermissionStatus;
644
+ };
645
+ /** Camera is ready for selfie capture */
646
+ type SelfieCaptureState = {
647
+ status: 'capture';
648
+ /** Current capture sub-state */
649
+ captureStatus: CaptureStatus;
650
+ /** The active camera stream */
651
+ stream: CameraStream | undefined;
652
+ /** Current face detection status */
653
+ detectionStatus: DetectionStatus;
654
+ /** Latest frame processed in the detection loop (for UI debug rendering) */
655
+ debugFrame: ImageData | undefined;
656
+ /** Number of capture attempts remaining */
657
+ attemptsRemaining: number;
658
+ /** Error message from failed upload */
659
+ uploadError: string | undefined;
660
+ };
661
+ /** Selfie capture completed successfully */
662
+ type SelfieFinishedState = {
663
+ status: 'finished';
664
+ };
665
+ /** User closed the selfie flow */
666
+ type SelfieClosedState = {
667
+ status: 'closed';
668
+ };
669
+ /** An error occurred during the flow */
670
+ type SelfieErrorState = {
671
+ status: 'error';
672
+ /** The error message */
673
+ error: string;
674
+ };
675
+ /** Union of all possible selfie states */
676
+ type SelfieState = SelfieIdleState | SelfieLoadingState | SelfieTutorialState | SelfiePermissionsState | SelfieCaptureState | SelfieFinishedState | SelfieClosedState | SelfieErrorState;
677
+ /**
678
+ * Creates a selfie manager instance for handling selfie capture flow.
679
+ *
680
+ * The selfie manager provides:
681
+ * - State management with statuses: `idle`, `loading`, `tutorial`, `permissions`, `capture`, `finished`, `closed`, `error`
682
+ * - Permission handling with nested states: `idle`, `requesting`, `denied`, `learnMore`
683
+ * - Capture handling with nested states: `initializing`, `startingRecorder`, `recordingActive`, `detecting`, `capturing`, `uploading`, `uploadError`, `success`
684
+ * - Camera stream access when in `capture` state
685
+ * - Detection status feedback during face detection
686
+ * - Attempt tracking with `attemptsRemaining`
687
+ *
688
+ * @param options - Configuration for the selfie actor
689
+ * @param options.config - The selfie module configuration from the flow
690
+ * @returns A manager instance with state subscription, API methods, and lifecycle controls
691
+ *
692
+ * @example
693
+ * ```ts
694
+ * const selfieManager = createSelfieManager({ config: selfieConfig });
695
+ *
696
+ * selfieManager.subscribe((state) => {
697
+ * if (state.status === 'capture') {
698
+ * console.log('Camera ready:', state.stream);
699
+ * console.log('Detection status:', state.detectionStatus);
700
+ * }
701
+ * });
702
+ *
703
+ * selfieManager.load();
704
+ * ```
705
+ */
706
+ declare function createSelfieManager(options: CreateSelfieActorOptions): Manager<SelfieState> & {
707
+ /**
708
+ * Starts the selfie flow.
709
+ * Goes to `tutorial` if showTutorial is true, otherwise to `loading`.
710
+ * Requires setup() to have been called with a token first.
711
+ */
712
+ load(): void;
713
+ /**
714
+ * Advances to the next step.
715
+ * From `tutorial` → permissions or capture (based on permission status).
716
+ * From `capture` → finished.
717
+ */
718
+ nextStep(): void;
719
+ /**
720
+ * Requests camera permission via getUserMedia.
721
+ * Only effective when in `permissions.idle` or `permissions.learnMore` state.
722
+ */
723
+ requestPermission(): void;
724
+ /**
725
+ * Navigates to the "learn more" permission screen.
726
+ * Only effective when in `permissions.idle` state.
727
+ */
728
+ goToLearnMore(): void;
729
+ /**
730
+ * Goes back from "learn more" to the initial permission screen.
731
+ * Only effective when in `permissions.learnMore` state.
732
+ */
733
+ back(): void;
734
+ /**
735
+ * Closes the selfie flow and transitions to `closed` state.
736
+ * Can be called from any state.
737
+ */
738
+ close(): void;
739
+ /**
740
+ * Resets the selfie manager to its initial `idle` state.
741
+ * Can be called from `finished` or `error` states.
742
+ */
743
+ reset(): void;
744
+ /**
745
+ * Retries the capture after an upload error.
746
+ * Only effective when in `capture.uploadError` state and `attemptsRemaining > 0`.
747
+ * If no attempts remaining, the transition is blocked.
748
+ */
749
+ retryCapture(): void;
750
+ /**
751
+ * Captures a selfie in manual capture mode.
752
+ * Only effective when in `capture.detecting` state and `detectionStatus === 'manualCapture'`.
753
+ */
754
+ capture(): void;
755
+ };
756
+ type SelfieManager = ReturnType<typeof createSelfieManager>;
757
+ //#endregion
758
+ export { type CameraStream, type DetectionStatus, type FaceErrorCode, type PermissionResult, type PermissionStatus, type SelfieConfig, type SelfieMachine, type SelfieManager, type SelfieState, createSelfieManager, selfieMachine };