@incodetech/core 2.0.0-alpha.10 → 2.0.0-alpha.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. package/dist/{OpenViduLogger-CQyDxBvM.esm.js → OpenViduLogger-CRbRNZA7.esm.js} +1 -1
  2. package/dist/OpenViduLogger-Dy5P806a.esm.js +3 -0
  3. package/dist/{warmup-CEJTfxQr.d.ts → StateMachine-pi8byl8C.d.ts} +4 -1
  4. package/dist/{addEvent-W0ORK0jT.esm.js → addEvent-BGKc_lHF.esm.js} +1 -1
  5. package/dist/deepsightLoader-B36_XZ7r.esm.js +25 -0
  6. package/dist/deepsightService-BWxcc4OC.esm.js +225 -0
  7. package/dist/email.d.ts +1 -1
  8. package/dist/email.esm.js +3 -3
  9. package/dist/{endpoints-BSTFaHYo.esm.js → endpoints-D9TGnxRK.esm.js} +966 -22
  10. package/dist/flow.d.ts +4 -303
  11. package/dist/flow.esm.js +4 -5
  12. package/dist/id-CJKLe8HS.esm.js +1818 -0
  13. package/dist/id.d.ts +6 -0
  14. package/dist/id.esm.js +8 -0
  15. package/dist/index-CbF_uI-x.d.ts +618 -0
  16. package/dist/index.d.ts +8 -3
  17. package/dist/index.esm.js +7 -3
  18. package/dist/{lib-Bu9XGMBW.esm.js → lib-BJoLTN_W.esm.js} +2 -2
  19. package/dist/phone.d.ts +1 -1
  20. package/dist/phone.esm.js +3 -3
  21. package/dist/recordingsRepository-D5MURoVB.esm.js +40 -0
  22. package/dist/selfie.d.ts +77 -317
  23. package/dist/selfie.esm.js +165 -62
  24. package/dist/{permissionServices-I6vX6DBy.esm.js → streamingEvents-B3hNanPl.esm.js} +34 -9
  25. package/dist/types-BpCrZLU6.d.ts +302 -0
  26. package/dist/types-DZbrbPgj.d.ts +335 -0
  27. package/package.json +6 -2
  28. package/dist/OpenViduLogger-BdPfiZO6.esm.js +0 -3
  29. package/dist/StateMachine-DRE1oH2B.d.ts +0 -2
  30. package/dist/types-iZi2rawo.d.ts +0 -5
  31. /package/dist/{Manager-BGfxEmyv.d.ts → Manager-BZUZTRPx.d.ts} +0 -0
  32. /package/dist/{chunk-C_Yo44FK.esm.js → chunk-FbsBJI8u.esm.js} +0 -0
  33. /package/dist/{xstate.esm-B_rda9yU.esm.js → xstate.esm-2hDiAXvZ.esm.js} +0 -0
@@ -1,5 +1,5 @@
1
- import { i as __toCommonJS, n as __esm, r as __export, t as __commonJS } from "./chunk-C_Yo44FK.esm.js";
2
- import { n as require_jsnlog, t as require_OpenViduLogger } from "./OpenViduLogger-CQyDxBvM.esm.js";
1
+ import { i as __toCommonJS, n as __esm, r as __export, t as __commonJS } from "./chunk-FbsBJI8u.esm.js";
2
+ import { n as require_jsnlog, t as require_OpenViduLogger } from "./OpenViduLogger-CRbRNZA7.esm.js";
3
3
 
4
4
  //#region ../../node_modules/.pnpm/openvidu-browser@2.32.1/node_modules/openvidu-browser/lib/OpenViduInternal/Enums/LocalRecorderState.js
5
5
  var require_LocalRecorderState = /* @__PURE__ */ __commonJS({ "../../node_modules/.pnpm/openvidu-browser@2.32.1/node_modules/openvidu-browser/lib/OpenViduInternal/Enums/LocalRecorderState.js": ((exports) => {
package/dist/phone.d.ts CHANGED
@@ -1,4 +1,4 @@
1
- import { t as Manager } from "./Manager-BGfxEmyv.js";
1
+ import { t as Manager } from "./Manager-BZUZTRPx.js";
2
2
 
3
3
  //#region src/modules/phone/types.d.ts
4
4
 
package/dist/phone.esm.js CHANGED
@@ -1,6 +1,6 @@
1
- import { g as createManager, n as api, t as endpoints } from "./endpoints-BSTFaHYo.esm.js";
2
- import { a as createActor, i as fromPromise, n as assign, r as fromCallback, t as setup } from "./xstate.esm-B_rda9yU.esm.js";
3
- import { t as addEvent } from "./addEvent-W0ORK0jT.esm.js";
1
+ import { E as createManager, n as api, t as endpoints } from "./endpoints-D9TGnxRK.esm.js";
2
+ import { a as createActor, i as fromPromise, n as assign, r as fromCallback, t as setup } from "./xstate.esm-2hDiAXvZ.esm.js";
3
+ import { t as addEvent } from "./addEvent-BGKc_lHF.esm.js";
4
4
 
5
5
  //#region src/modules/phone/phoneServices.ts
6
6
  async function fetchPhone(signal) {
@@ -0,0 +1,40 @@
1
+ import { n as api, t as endpoints } from "./endpoints-D9TGnxRK.esm.js";
2
+
3
+ //#region ../infra/src/device/getBrowser.ts
4
+ function getUserAgent() {
5
+ if (typeof navigator === "undefined") return "";
6
+ return navigator.userAgent;
7
+ }
8
+
9
+ //#endregion
10
+ //#region src/internal/recordings/recordingsRepository.ts
11
+ async function createRecordingSession(type) {
12
+ return (await api.post(endpoints.recordingCreateSessionV2, { type })).data;
13
+ }
14
+ async function startRecording(params) {
15
+ return (await api.post(endpoints.recordingStartV2, {
16
+ videoRecordingId: params.videoRecordingId,
17
+ frameRate: 30,
18
+ outputMode: "COMPOSED",
19
+ resolution: params.resolution,
20
+ type: params.type,
21
+ hasAudio: params.hasAudio ?? false
22
+ })).data;
23
+ }
24
+ async function stopRecording(videoRecordingId) {
25
+ return (await api.post(endpoints.recordingStopV2, { videoRecordingId })).data;
26
+ }
27
+ async function uploadDeepsightVideo(encryptedVideo, token) {
28
+ try {
29
+ return (await api.post(endpoints.deepsightVideoImport, {
30
+ video: encryptedVideo,
31
+ type: "selfie"
32
+ }, { headers: { "X-Incode-Hardware-Id": token } })).data.recordingId ?? "";
33
+ } catch (error) {
34
+ console.error("Error uploading deepsight video:", error);
35
+ return "";
36
+ }
37
+ }
38
+
39
+ //#endregion
40
+ export { getUserAgent as a, uploadDeepsightVideo as i, startRecording as n, stopRecording as r, createRecordingSession as t };
package/dist/selfie.d.ts CHANGED
@@ -1,165 +1,9 @@
1
- import { t as WasmPipeline } from "./warmup-CEJTfxQr.js";
2
- import { t as Manager } from "./Manager-BGfxEmyv.js";
3
- import { n as StateMachine, t as AnyStateMachine } from "./StateMachine-DRE1oH2B.js";
4
- import { n as PermissionStatus, t as PermissionResult } from "./types-iZi2rawo.js";
1
+ import { a as CameraStream, d as IMLProviderCapability, f as MLProviderConfig, i as StreamCanvasCapture, l as MotionPermissionState, n as PermissionStatus, o as IRecordingCapability, p as IncodeCanvas, r as BaseWasmProvider, s as RecordingConnection, t as PermissionResult, u as MotionStatus } from "./types-DZbrbPgj.js";
2
+ import { n as StateMachine, t as AnyStateMachine } from "./StateMachine-pi8byl8C.js";
3
+ import { t as Manager } from "./Manager-BZUZTRPx.js";
5
4
 
6
- //#region ../infra/src/media/canvas.d.ts
7
- /**
8
- * Class representing a canvas element for image capture and manipulation.
9
- */
10
- declare class IncodeCanvas {
11
- canvas: HTMLCanvasElement;
12
- private base64Image;
13
- private blobData;
14
- /**
15
- * Creates an {@link IncodeCanvas} from a raw {@link ImageData} frame.
16
- * @param imageData - Frame pixels in RGBA format
17
- * @returns An {@link IncodeCanvas} containing the provided pixels
18
- */
19
- static fromImageData(imageData: ImageData): IncodeCanvas;
20
- /**
21
- * Create a new canvas element.
22
- * @param canvas_ - The canvas element to clone.
23
- */
24
- constructor(canvas_: HTMLCanvasElement);
25
- /**
26
- * Check if the current canvas is valid.
27
- */
28
- private checkCanvas;
29
- /**
30
- * Release the data stored by IncodeCanvas.
31
- */
32
- release(): void;
33
- /**
34
- * Get the width of the canvas.
35
- */
36
- width(): number | null;
37
- /**
38
- * Get the height of the canvas.
39
- */
40
- height(): number | null;
41
- /**
42
- * Set the width of the canvas.
43
- */
44
- setWidth(width: number): void;
45
- /**
46
- * Set the height of the canvas.
47
- */
48
- setHeight(height: number): void;
49
- /**
50
- * Clone the current canvas.
51
- */
52
- clone(): IncodeCanvas | null;
53
- /**
54
- * Deep clone the current IncodeCanvas including blob data.
55
- */
56
- deepClone(): Promise<IncodeCanvas | null>;
57
- /**
58
- * Returns the drawing context on the canvas.
59
- */
60
- getContext(contextId: '2d', contextAttributes?: CanvasRenderingContext2DSettings): CanvasRenderingContext2D | null;
61
- /**
62
- * Retrieves the image data from the canvas.
63
- */
64
- getImageData(): ImageData | null;
65
- /**
66
- * Updates the base64 representation of the current canvas image.
67
- */
68
- updateBase64Image(jpegQuality?: number): void;
69
- /**
70
- * Converts the current canvas element to a base64 string.
71
- */
72
- getBase64Image(jpegQuality?: number, includeDataURLPrefix?: boolean): string | null;
73
- /**
74
- * Sets the base64 representation of the current canvas image.
75
- */
76
- setBase64Image(base64Image: string | null): void;
77
- /**
78
- * Updates the Blob representation of the current canvas image.
79
- */
80
- updateBlob(jpegQuality?: number, includeDataURLPrefix?: boolean): void;
81
- /**
82
- * Converts a base64 string to a Blob and creates a URL for it.
83
- */
84
- static base64ToBlob(base64: string): {
85
- blob: Blob;
86
- url: string;
87
- } | null;
88
- /**
89
- * Retrieves the Blob data and its URL from the current canvas.
90
- */
91
- getBlobData(jpegQuality?: number, includeDataURLPrefix?: boolean): {
92
- blob: Blob;
93
- url: string;
94
- } | null;
95
- /**
96
- * Sets the Blob data of the current canvas image.
97
- */
98
- setBlobData(blobData: {
99
- blob: Blob;
100
- url: string;
101
- }): Promise<void>;
102
- /**
103
- * Returns a resized canvas according to video element size.
104
- */
105
- getResizedCanvas(videoElementWidth: number, videoElementHeight: number): IncodeCanvas | null;
106
- }
107
- //#endregion
108
- //#region ../infra/src/capabilities/IMLProviderCapability.d.ts
109
- /**
110
- * Base configuration shared by all ML provider capabilities.
111
- */
112
- interface MLProviderConfig {
113
- /** Path to the WASM binary */
114
- wasmPath?: string;
115
- /** Path to the SIMD-optimized WASM binary (optional) */
116
- wasmSimdPath?: string;
117
- /** Path to the WASM glue code */
118
- glueCodePath?: string;
119
- /** Whether to use SIMD optimizations (default: true) */
120
- useSimd?: boolean;
121
- /**
122
- * Base path for ML model files. Models will be loaded from `${modelsBasePath}/${modelFileName}`.
123
- * If not provided, models are expected in a 'models' subdirectory relative to the WASM binary.
124
- */
125
- modelsBasePath?: string;
126
- }
127
- /**
128
- * Base interface for ML provider capabilities.
129
- * Provides common lifecycle and frame processing methods shared by
130
- * FaceDetectionCapability and IdCaptureCapability.
131
- */
132
- interface IMLProviderCapability<TConfig extends MLProviderConfig> {
133
- /**
134
- * Whether the provider has been initialized and is ready to process frames.
135
- */
136
- readonly initialized: boolean;
137
- /**
138
- * Initializes the provider with the given configuration.
139
- * If WASM was already warmed up via `setup()` or `warmupWasm()`, this returns almost instantly.
140
- * @param config - Provider configuration including WASM paths
141
- */
142
- initialize(config: TConfig): Promise<void>;
143
- /**
144
- * Processes a frame through the ML pipeline.
145
- * Callbacks set via `setCallbacks()` will be invoked based on the analysis results.
146
- * @param image - Image data to process
147
- * @throws Error if provider is not initialized
148
- */
149
- processFrame(image: ImageData): Promise<void>;
150
- /**
151
- * Resets the pipeline to its initial state.
152
- * Safe to call even if not initialized (no-op in that case).
153
- */
154
- reset(): void;
155
- /**
156
- * Disposes of resources and resets initialization state.
157
- * Safe to call even if not initialized.
158
- */
159
- dispose(): Promise<void>;
160
- }
161
- //#endregion
162
5
  //#region ../infra/src/capabilities/IFaceDetectionCapability.d.ts
6
+
163
7
  /**
164
8
  * Configuration for face detection provider.
165
9
  * Extends base ML provider config with face-detection specific options.
@@ -299,163 +143,19 @@ interface IFaceDetectionCapability extends IMLProviderCapability<FaceDetectionCo
299
143
  setVideoSelfieMode(enabled: boolean): void;
300
144
  }
301
145
  //#endregion
302
- //#region ../infra/src/capabilities/IRecordingCapability.d.ts
303
- type RecordingPublisher = {
304
- getStreamId: () => string | undefined;
305
- replaceVideoTrack: (track: MediaStreamTrack) => Promise<void>;
306
- destroy: () => void;
307
- };
308
- type RecordingConnection = {
309
- sessionId: string | undefined;
310
- publisher: RecordingPublisher;
311
- disconnect: () => Promise<void>;
312
- };
313
- type RecordingConnectionEvents = {
314
- onSessionConnected?: (sessionId: string | undefined) => void;
315
- onSessionDisconnected?: (sessionId: string | undefined) => void;
316
- onSessionException?: (params: {
317
- name?: string;
318
- message?: string;
319
- sessionId?: string;
320
- }) => void;
321
- onPublisherCreated?: (params: {
322
- streamId?: string;
323
- sessionId?: string;
324
- }) => void;
325
- onPublisherError?: (params: {
326
- message?: string;
327
- sessionId?: string;
328
- streamId?: string;
329
- }) => void;
330
- };
331
- type ConnectRecordingParams = {
332
- sessionToken: string;
333
- stream: MediaStream;
334
- events?: RecordingConnectionEvents;
335
- };
336
- type IRecordingCapability = {
337
- /**
338
- * Connects to a recording session and publishes the provided media stream.
339
- * Returns a connection handle that can be disconnected and used to manage the publisher.
340
- */
341
- connect: (params: ConnectRecordingParams) => Promise<RecordingConnection>;
342
- };
343
- //#endregion
344
- //#region ../infra/src/media/camera.d.ts
345
- type CameraStream = MediaStream;
346
- //#endregion
347
- //#region ../infra/src/media/StreamCanvasCapture.d.ts
348
- type StreamCanvasCaptureOptions = {
349
- fps?: number;
350
- width?: number;
351
- height?: number;
352
- };
353
- type StreamCanvasCaptureEventMap = {
354
- frame: Event;
146
+ //#region ../infra/src/capabilities/IDeepsightRecordingCapability.d.ts
147
+ type DeepsightRecordingResult = {
148
+ trimmedBlob: Blob;
149
+ encryptedVideo: string;
355
150
  };
356
- declare class StreamCanvasCapture {
357
- private video;
358
- private canvas;
359
- private ctx;
360
- private rafId;
361
- private lastFrameTimeSeconds;
362
- private lastTickTimeMs;
363
- private hasFrame;
364
- private disposed;
365
- private eventTarget;
366
- constructor(stream: CameraStream, options?: StreamCanvasCaptureOptions);
367
- addEventListener(type: keyof StreamCanvasCaptureEventMap, listener: EventListenerOrEventListenerObject | null, options?: boolean | AddEventListenerOptions): void;
368
- removeEventListener(type: keyof StreamCanvasCaptureEventMap, listener: EventListenerOrEventListenerObject | null, options?: boolean | EventListenerOptions): void;
369
- /**
370
- * Returns the latest cached frame as an {@link IncodeCanvas}.
371
- */
372
- getLatestCanvas(): IncodeCanvas | null;
373
- /**
374
- * Returns the latest cached frame as raw {@link ImageData}.
375
- */
376
- getLatestFrame(): ImageData | null;
377
- /**
378
- * Disposes internal resources and stops the capture loop.
379
- */
380
- dispose(): void;
381
- private rafLoop;
382
- private tick;
383
- }
384
- //#endregion
385
- //#region ../infra/src/wasm/WasmPipelineType.d.ts
386
- declare enum WasmPipelineType {
387
- IdBlurGlarePipeline = 0,
388
- IdBarcodeAndTextQualityPipeline = 1,
389
- IdVideoSelfiePipeline = 2,
390
- SelfieWithAggregationMetrics = 3,
391
- SelfieWithQualityMetrics = 4,
392
- IdFaceDetectionPipeline = 5,
393
- }
394
- //#endregion
395
- //#region ../infra/src/providers/wasm/BaseWasmProvider.d.ts
396
- /**
397
- * Base configuration for WASM providers
398
- */
399
- interface BaseWasmConfig {
400
- /** Path to the WASM binary */
401
- wasmPath?: string;
402
- /** Path to the SIMD-optimized WASM binary (optional) */
403
- wasmSimdPath?: string;
404
- /** Path to the WASM glue code */
405
- glueCodePath?: string;
406
- /** Whether to use SIMD optimizations (default: true) */
407
- useSimd?: boolean;
408
- /**
409
- * Base path for ML model files. Models will be loaded from `${modelsBasePath}/${modelFileName}`.
410
- * If not provided, models are expected in a 'models' subdirectory relative to the WASM binary.
411
- */
412
- modelsBasePath?: string;
413
- }
414
- /**
415
- * Base provider class that abstracts common WASM functionality.
416
- * This serves as a foundation for specific ML capability providers
417
- * like FaceDetectionProvider and IdCaptureProvider.
418
- */
419
- declare abstract class BaseWasmProvider {
420
- private _isInitialized;
421
- protected pipelineType: WasmPipelineType | undefined;
422
- /**
423
- * Creates a new BaseWasmProvider
424
- * @param pipelineType - The WASM pipeline type this provider uses
425
- */
426
- constructor(pipelineType?: WasmPipelineType);
427
- /**
428
- * Returns whether this provider has been initialized.
429
- */
430
- get initialized(): boolean;
431
- protected getPipelineType(): WasmPipelineType;
432
- /**
433
- * Initializes the provider by ensuring WASM is loaded
434
- * @param config - Provider configuration
435
- * @param pipeline - The pipeline type to warm up ('selfie', 'idCapture', etc.)
436
- */
437
- protected initializeBase(config: BaseWasmConfig, pipeline: WasmPipeline): Promise<void>;
438
- /**
439
- * Ensures the provider is initialized before performing operations.
440
- * @throws Error if not initialized
441
- */
442
- protected ensureInitialized(): void;
443
- /**
444
- * Processes a frame through the WASM pipeline
445
- * @param image - Image data to process
446
- */
447
- processFrame(image: ImageData): Promise<void>;
448
- /**
449
- * Resets the pipeline to its initial state.
450
- * Safe to call even if not initialized (no-op in that case).
451
- */
151
+ type IDeepsightRecordingCapability = {
152
+ readonly isRecording: boolean;
153
+ readonly hasError: boolean;
154
+ readonly error: string | null;
155
+ startRecording(stream: MediaStream): void;
156
+ stopRecording(trimSeconds: number, encrypt: (base64: string) => string, generateChecksum: (buffer: ArrayBuffer) => void): Promise<DeepsightRecordingResult>;
452
157
  reset(): void;
453
- /**
454
- * Disposes of resources and resets initialization state.
455
- * Safe to call even if not initialized.
456
- */
457
- dispose(): Promise<void>;
458
- }
158
+ };
459
159
  //#endregion
460
160
  //#region ../infra/src/providers/wasm/FaceDetectionProvider.d.ts
461
161
  declare class FaceDetectionProvider extends BaseWasmProvider implements IFaceDetectionCapability {
@@ -480,6 +180,49 @@ declare class FaceDetectionProvider extends BaseWasmProvider implements IFaceDet
480
180
  private formatFaceCoordinates;
481
181
  }
482
182
  //#endregion
183
+ //#region src/internal/deepsight/metadataService.d.ts
184
+ type MetadataService = {
185
+ initialize(sdkVersion: string, disableIpify?: boolean): Promise<void>;
186
+ updateCameraInfo(videoTrack: MediaStreamTrack): void;
187
+ checkForVirtualCameraByLabel(videoTrack: MediaStreamTrack | null): Promise<boolean>;
188
+ analyzeFrame(imageData: ImageData): Promise<void>;
189
+ setMotionStatus(status: string): void;
190
+ setBackgroundMode(backgroundMode: boolean): void;
191
+ estimatePerformance(): string;
192
+ getMetadata(): string;
193
+ getCheck(): string;
194
+ };
195
+ //#endregion
196
+ //#region src/internal/deepsight/motionStatusService.d.ts
197
+ type MotionStatusService = {
198
+ requestPermission(): Promise<MotionPermissionState>;
199
+ start(): Promise<void>;
200
+ stop(): void;
201
+ check(): MotionStatus;
202
+ readonly isRunning: boolean;
203
+ readonly hasPermission: boolean;
204
+ };
205
+ //#endregion
206
+ //#region src/internal/deepsight/deepsightService.d.ts
207
+ type DeepsightCaptureResult = {
208
+ metadata: string;
209
+ recordingId: string | null;
210
+ };
211
+ type DeepsightService = {
212
+ readonly metadata: MetadataService;
213
+ readonly motion: MotionStatusService;
214
+ readonly recorder: IDeepsightRecordingCapability;
215
+ initialize(disableIpify?: boolean): Promise<void>;
216
+ requestMotionPermission(): Promise<'granted' | 'denied' | 'not-required'>;
217
+ startMotionSensors(): Promise<void>;
218
+ stopMotionSensors(): void;
219
+ startRecording(stream: MediaStream): void;
220
+ checkVirtualCamera(videoTrack: MediaStreamTrack): Promise<boolean>;
221
+ performVirtualCameraCheck(): Promise<void>;
222
+ performCapture(sessionToken: string, imageData: ImageData): Promise<DeepsightCaptureResult>;
223
+ cleanup(): void;
224
+ };
225
+ //#endregion
483
226
  //#region src/modules/selfie/types.d.ts
484
227
  type SelfieConfig = {
485
228
  showTutorial: boolean;
@@ -551,6 +294,15 @@ type RecordingSession = {
551
294
  hasAudio: boolean;
552
295
  };
553
296
  //#endregion
297
+ //#region src/modules/selfie/selfieUploadService.d.ts
298
+ type ProcessFaceImageType = 'selfie' | 'videoSelfie';
299
+ type ProcessFaceResponse = {
300
+ faceMatch: boolean;
301
+ confidence: number;
302
+ existingUser: boolean;
303
+ };
304
+ declare function processFace(imageType?: ProcessFaceImageType, signal?: AbortSignal): Promise<ProcessFaceResponse>;
305
+ //#endregion
554
306
  //#region src/modules/selfie/selfieStateMachine.d.ts
555
307
  type SelfieContext = {
556
308
  config: SelfieConfig;
@@ -563,12 +315,14 @@ type SelfieContext = {
563
315
  capturedImage: IncodeCanvas | undefined;
564
316
  faceCoordinates: FaceCoordinates | undefined;
565
317
  uploadResponse: SendFaceImageResponse | undefined;
318
+ processResponse: ProcessFaceResponse | undefined;
566
319
  recordingSession: RecordingSession | undefined;
567
320
  recordingStream: MediaStream | undefined;
568
321
  attemptsRemaining: number;
569
322
  uploadError: FaceErrorCode | undefined;
570
323
  permissionResult: PermissionResult | 'refresh' | undefined;
571
324
  resetDetection: (() => void) | undefined;
325
+ deepsightService: DeepsightService | undefined;
572
326
  };
573
327
  type SelfieEvent = {
574
328
  type: 'LOAD';
@@ -659,9 +413,15 @@ type SelfieCaptureState = {
659
413
  /** Error message from failed upload */
660
414
  uploadError: string | undefined;
661
415
  };
416
+ /** Processing the captured selfie */
417
+ type SelfieProcessingState = {
418
+ status: 'processing';
419
+ };
662
420
  /** Selfie capture completed successfully */
663
421
  type SelfieFinishedState = {
664
422
  status: 'finished';
423
+ /** Face processing result (face match, confidence, existing user) */
424
+ processResponse: ProcessFaceResponse | undefined;
665
425
  };
666
426
  /** User closed the selfie flow */
667
427
  type SelfieClosedState = {
@@ -674,7 +434,7 @@ type SelfieErrorState = {
674
434
  error: string;
675
435
  };
676
436
  /** Union of all possible selfie states */
677
- type SelfieState = SelfieIdleState | SelfieLoadingState | SelfieTutorialState | SelfiePermissionsState | SelfieCaptureState | SelfieFinishedState | SelfieClosedState | SelfieErrorState;
437
+ type SelfieState = SelfieIdleState | SelfieLoadingState | SelfieTutorialState | SelfiePermissionsState | SelfieCaptureState | SelfieProcessingState | SelfieFinishedState | SelfieClosedState | SelfieErrorState;
678
438
  /**
679
439
  * Creates a selfie manager instance for handling selfie capture flow.
680
440
  *
@@ -756,4 +516,4 @@ declare function createSelfieManager(options: CreateSelfieActorOptions): Manager
756
516
  };
757
517
  type SelfieManager = ReturnType<typeof createSelfieManager>;
758
518
  //#endregion
759
- export { type CameraStream, type DetectionStatus, type FaceErrorCode, type PermissionResult, type PermissionStatus, type SelfieConfig, type SelfieMachine, type SelfieManager, type SelfieState, createSelfieManager, selfieMachine };
519
+ export { type CameraStream, type DeepsightService, type DetectionStatus, type FaceErrorCode, type PermissionResult, type PermissionStatus, type ProcessFaceResponse, type SelfieConfig, type SelfieMachine, type SelfieManager, type SelfieState, createSelfieManager, processFace, selfieMachine };