@souscheflabs/ml-vision 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. package/README.md +274 -0
  2. package/dist/components/DetectionOverlay.d.ts +57 -0
  3. package/dist/components/DetectionOverlay.js +133 -0
  4. package/dist/components/index.d.ts +4 -0
  5. package/dist/components/index.js +9 -0
  6. package/dist/core/CacheManager.d.ts +168 -0
  7. package/dist/core/CacheManager.js +331 -0
  8. package/dist/core/MLVisionProvider.d.ts +90 -0
  9. package/dist/core/MLVisionProvider.js +188 -0
  10. package/dist/core/ServerClient.d.ts +131 -0
  11. package/dist/core/ServerClient.js +291 -0
  12. package/dist/core/index.d.ts +6 -0
  13. package/dist/core/index.js +18 -0
  14. package/dist/hooks/classLabels.d.ts +35 -0
  15. package/dist/hooks/classLabels.js +439 -0
  16. package/dist/hooks/classLabelsCoco.d.ts +43 -0
  17. package/dist/hooks/classLabelsCoco.js +103 -0
  18. package/dist/hooks/index.d.ts +8 -0
  19. package/dist/hooks/index.js +27 -0
  20. package/dist/hooks/useMultiBarcodeScanner.d.ts +34 -0
  21. package/dist/hooks/useMultiBarcodeScanner.js +290 -0
  22. package/dist/hooks/useProductDetector.d.ts +38 -0
  23. package/dist/hooks/useProductDetector.js +679 -0
  24. package/dist/hooks/useReceiptScanner.d.ts +37 -0
  25. package/dist/hooks/useReceiptScanner.js +405 -0
  26. package/dist/hooks/useVideoScanner.d.ts +118 -0
  27. package/dist/hooks/useVideoScanner.js +383 -0
  28. package/dist/index.d.ts +58 -0
  29. package/dist/index.js +130 -0
  30. package/dist/processors/detectionProcessor.d.ts +86 -0
  31. package/dist/processors/detectionProcessor.js +124 -0
  32. package/dist/processors/index.d.ts +5 -0
  33. package/dist/processors/index.js +16 -0
  34. package/dist/processors/tfliteFrameProcessor.d.ts +90 -0
  35. package/dist/processors/tfliteFrameProcessor.js +213 -0
  36. package/dist/types/barcode.d.ts +91 -0
  37. package/dist/types/barcode.js +19 -0
  38. package/dist/types/detection.d.ts +166 -0
  39. package/dist/types/detection.js +8 -0
  40. package/dist/types/index.d.ts +126 -0
  41. package/dist/types/index.js +25 -0
  42. package/dist/types/ocr.d.ts +202 -0
  43. package/dist/types/ocr.js +8 -0
  44. package/dist/utils/imagePreprocessor.d.ts +85 -0
  45. package/dist/utils/imagePreprocessor.js +304 -0
  46. package/dist/utils/yoloProcessor.d.ts +40 -0
  47. package/dist/utils/yoloProcessor.js +154 -0
  48. package/package.json +78 -0
@@ -0,0 +1,124 @@
1
+ "use strict";
2
+ /**
3
+ * Detection Frame Processor
4
+ *
5
+ * VisionCamera frame processor for real-time product detection.
6
+ * Uses react-native-fast-tflite for on-device inference.
7
+ *
8
+ * @example
9
+ * ```typescript
10
+ * import { useFrameProcessor } from 'react-native-vision-camera';
11
+ * import { detectProducts } from '@souschef/ml-vision/processors';
12
+ *
13
+ * function CameraScreen() {
14
+ * const frameProcessor = useFrameProcessor((frame) => {
15
+ * 'worklet';
16
+ * const detections = detectProducts(frame);
17
+ * // Handle detections...
18
+ * }, []);
19
+ *
20
+ * return <Camera frameProcessor={frameProcessor} />;
21
+ * }
22
+ * ```
23
+ */
24
+ Object.defineProperty(exports, "__esModule", { value: true });
25
+ exports.CLASS_LABELS = exports.NUM_CLASSES = void 0;
26
+ exports.createDetectionProcessor = createDetectionProcessor;
27
+ exports.frameDetectionToResult = frameDetectionToResult;
28
+ exports.getClassLabel = getClassLabel;
29
+ const classLabels_1 = require("../hooks/classLabels");
30
+ Object.defineProperty(exports, "CLASS_LABELS", { enumerable: true, get: function () { return classLabels_1.CLASS_LABELS; } });
31
+ Object.defineProperty(exports, "NUM_CLASSES", { enumerable: true, get: function () { return classLabels_1.NUM_CLASSES; } });
32
+ // ============================================================================
33
+ // Frame Counter for Skipping
34
+ // ============================================================================
35
+ let frameCounter = 0;
36
+ // ============================================================================
37
+ // Frame Processor Plugin
38
+ // ============================================================================
39
+ /**
40
+ * Create a detection frame processor
41
+ *
42
+ * This returns a function that can be used with VisionCamera's useFrameProcessor.
43
+ *
44
+ * @example
45
+ * ```typescript
46
+ * const detectFrame = createDetectionProcessor({
47
+ * minConfidence: 0.5,
48
+ * maxDetections: 10,
49
+ * });
50
+ *
51
+ * const frameProcessor = useFrameProcessor((frame) => {
52
+ * 'worklet';
53
+ * const detections = detectFrame(frame);
54
+ * runOnJS(handleDetections)(detections);
55
+ * }, []);
56
+ * ```
57
+ */
58
+ function createDetectionProcessor(options = {}) {
59
+ const { frameSkip = 3 } = options;
60
+ /**
61
+ * The actual frame processor function
62
+ *
63
+ * Note: This is a placeholder that shows the expected interface.
64
+ * The actual TFLite inference requires native module integration
65
+ * via react-native-fast-tflite's useRunOnce or similar.
66
+ */
67
+ return function detectProductsInFrame(_frame) {
68
+ 'worklet';
69
+ // Frame skipping for performance
70
+ frameCounter++;
71
+ if (frameCounter % frameSkip !== 0) {
72
+ return [];
73
+ }
74
+ // In a real implementation, this would:
75
+ // 1. Get frame data from VisionCamera
76
+ // 2. Preprocess for TFLite input
77
+ // 3. Run inference via react-native-fast-tflite
78
+ // 4. Post-process output
79
+ // Placeholder - actual implementation requires native TFLite integration
80
+ // with VisionCamera frame processors using Reanimated worklets
81
+ return [];
82
+ };
83
+ }
84
+ // ============================================================================
85
+ // Utility Functions
86
+ // ============================================================================
87
+ /**
88
+ * Convert FrameDetection to ProductDetectionResult
89
+ *
90
+ * Call this on the JS thread after receiving worklet results.
91
+ */
92
+ function frameDetectionToResult(detection, imageWidth, imageHeight) {
93
+ const classInfo = classLabels_1.CLASS_LABELS[detection.classIndex] ?? {
94
+ label: `class_${detection.classIndex}`,
95
+ category: 'unknown',
96
+ };
97
+ const boundingBox = {
98
+ x: detection.box.x * imageWidth,
99
+ y: detection.box.y * imageHeight,
100
+ width: detection.box.width * imageWidth,
101
+ height: detection.box.height * imageHeight,
102
+ };
103
+ return {
104
+ id: `frame_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`,
105
+ type: 'product',
106
+ confidence: detection.confidence,
107
+ boundingBox,
108
+ data: {
109
+ category: classInfo.category,
110
+ classLabel: classInfo.label,
111
+ classIndex: detection.classIndex,
112
+ name: classInfo.label.replace(/_/g, ' '),
113
+ },
114
+ source: 'on_device',
115
+ processingTimeMs: 0, // Frame processor timing handled externally
116
+ timestamp: Date.now(),
117
+ };
118
+ }
119
+ /**
120
+ * Get label for class index (can be called from worklet via shared value)
121
+ */
122
+ function getClassLabel(classIndex) {
123
+ return classLabels_1.CLASS_LABELS[classIndex]?.label ?? `class_${classIndex}`;
124
+ }
@@ -0,0 +1,5 @@
1
+ /**
2
+ * Processors exports
3
+ */
4
+ export { createDetectionProcessor, frameDetectionToResult, getClassLabel, NUM_CLASSES, CLASS_LABELS, type FrameDetection, type DetectionProcessorOptions, } from './detectionProcessor';
5
+ export { useTFLiteFrameProcessor, createTFLiteFrameProcessor, type FrameDetection as TFLiteFrameDetection, type FrameProcessorResult, type UseTFLiteFrameProcessorOptions, } from './tfliteFrameProcessor';
@@ -0,0 +1,16 @@
1
+ "use strict";
2
+ /**
3
+ * Processors exports
4
+ */
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.createTFLiteFrameProcessor = exports.useTFLiteFrameProcessor = exports.CLASS_LABELS = exports.NUM_CLASSES = exports.getClassLabel = exports.frameDetectionToResult = exports.createDetectionProcessor = void 0;
7
+ var detectionProcessor_1 = require("./detectionProcessor");
8
+ Object.defineProperty(exports, "createDetectionProcessor", { enumerable: true, get: function () { return detectionProcessor_1.createDetectionProcessor; } });
9
+ Object.defineProperty(exports, "frameDetectionToResult", { enumerable: true, get: function () { return detectionProcessor_1.frameDetectionToResult; } });
10
+ Object.defineProperty(exports, "getClassLabel", { enumerable: true, get: function () { return detectionProcessor_1.getClassLabel; } });
11
+ Object.defineProperty(exports, "NUM_CLASSES", { enumerable: true, get: function () { return detectionProcessor_1.NUM_CLASSES; } });
12
+ Object.defineProperty(exports, "CLASS_LABELS", { enumerable: true, get: function () { return detectionProcessor_1.CLASS_LABELS; } });
13
+ // TFLite Frame Processor for real-time video scanning
14
+ var tfliteFrameProcessor_1 = require("./tfliteFrameProcessor");
15
+ Object.defineProperty(exports, "useTFLiteFrameProcessor", { enumerable: true, get: function () { return tfliteFrameProcessor_1.useTFLiteFrameProcessor; } });
16
+ Object.defineProperty(exports, "createTFLiteFrameProcessor", { enumerable: true, get: function () { return tfliteFrameProcessor_1.createTFLiteFrameProcessor; } });
@@ -0,0 +1,90 @@
1
+ /**
2
+ * TFLite Frame Processor for Real-time Video Detection
3
+ *
4
+ * Uses vision-camera-resize-plugin to resize frames and run
5
+ * TFLite inference directly in the frame processor worklet.
6
+ *
7
+ * Requirements:
8
+ * - npm install vision-camera-resize-plugin
9
+ * - npm install react-native-fast-tflite
10
+ * - npm install react-native-worklets-core
11
+ */
12
+ import type { TensorflowModel } from 'react-native-fast-tflite';
13
+ import type { Frame } from 'react-native-vision-camera';
14
+ import { useResizePlugin } from 'vision-camera-resize-plugin';
15
+ export interface FrameDetection {
16
+ x: number;
17
+ y: number;
18
+ width: number;
19
+ height: number;
20
+ classIndex: number;
21
+ confidence: number;
22
+ label: string;
23
+ }
24
+ export interface FrameProcessorResult {
25
+ detections: FrameDetection[];
26
+ inferenceTimeMs: number;
27
+ frameWidth: number;
28
+ frameHeight: number;
29
+ }
30
+ export interface UseTFLiteFrameProcessorOptions {
31
+ /** Loaded TFLite model */
32
+ model: TensorflowModel | null;
33
+ /** Model input size (default: 640) */
34
+ inputSize?: number;
35
+ /** Number of classes in model (default: 80 for COCO) */
36
+ numClasses?: number;
37
+ /** Confidence threshold (default: 0.25) */
38
+ confThreshold?: number;
39
+ /** IoU threshold for NMS (default: 0.45) */
40
+ iouThreshold?: number;
41
+ /** Class labels array */
42
+ classLabels?: string[];
43
+ /** Only return food classes (for COCO) */
44
+ foodOnly?: boolean;
45
+ /** Callback when detections are found */
46
+ onDetections?: (detections: FrameDetection[]) => void;
47
+ }
48
+ /**
49
+ * Hook to create a TFLite-powered frame processor
50
+ *
51
+ * @example
52
+ * ```typescript
53
+ * const { frameProcessor, detections, isProcessing } = useTFLiteFrameProcessor({
54
+ * model: loadedModel,
55
+ * numClasses: 80,
56
+ * foodOnly: true,
57
+ * });
58
+ *
59
+ * // Use with VisionCamera
60
+ * <Camera frameProcessor={frameProcessor} />
61
+ * ```
62
+ */
63
+ export declare function useTFLiteFrameProcessor(options: UseTFLiteFrameProcessorOptions): {
64
+ frameProcessor: ((frame: Frame) => void) | undefined;
65
+ detections: FrameDetection[];
66
+ isProcessing: boolean;
67
+ isReady: boolean;
68
+ };
69
+ /** Type for the resize function from useResizePlugin */
70
+ type ResizeFunction = ReturnType<typeof useResizePlugin>['resize'];
71
+ /**
72
+ * Create a simple frame processor function for use with useFrameProcessor
73
+ *
74
+ * This is for manual integration when you need more control.
75
+ * The resize function must be obtained from useResizePlugin hook.
76
+ *
77
+ * @example
78
+ * ```typescript
79
+ * const { resize } = useResizePlugin();
80
+ * const processor = createTFLiteFrameProcessor(model, resize, { foodOnly: true });
81
+ * ```
82
+ */
83
+ export declare function createTFLiteFrameProcessor(model: TensorflowModel, resizeFn: ResizeFunction, options?: {
84
+ inputSize?: number;
85
+ numClasses?: number;
86
+ confThreshold?: number;
87
+ classLabels?: string[];
88
+ foodOnly?: boolean;
89
+ }): (frame: Frame) => FrameDetection[];
90
+ export {};
@@ -0,0 +1,213 @@
1
+ "use strict";
2
+ /**
3
+ * TFLite Frame Processor for Real-time Video Detection
4
+ *
5
+ * Uses vision-camera-resize-plugin to resize frames and run
6
+ * TFLite inference directly in the frame processor worklet.
7
+ *
8
+ * Requirements:
9
+ * - npm install vision-camera-resize-plugin
10
+ * - npm install react-native-fast-tflite
11
+ * - npm install react-native-worklets-core
12
+ */
13
+ Object.defineProperty(exports, "__esModule", { value: true });
14
+ exports.useTFLiteFrameProcessor = useTFLiteFrameProcessor;
15
+ exports.createTFLiteFrameProcessor = createTFLiteFrameProcessor;
16
+ const react_1 = require("react");
17
+ const react_native_reanimated_1 = require("react-native-reanimated");
18
+ const react_native_worklets_core_1 = require("react-native-worklets-core");
19
+ const vision_camera_resize_plugin_1 = require("vision-camera-resize-plugin");
20
+ // COCO food class indices
21
+ const COCO_FOOD_INDICES = new Set([46, 47, 48, 49, 50, 51, 52, 53, 54, 55]);
22
+ // COCO class names
23
+ const COCO_LABELS = [
24
+ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck',
25
+ 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench',
26
+ 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra',
27
+ 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
28
+ 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
29
+ 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
30
+ 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
31
+ 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
32
+ 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse',
33
+ 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
34
+ 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier',
35
+ 'toothbrush'
36
+ ];
37
+ /**
38
+ * Process YOLOv8 output in worklet (simplified for worklet compatibility)
39
+ * This runs synchronously in the frame processor thread
40
+ */
41
+ function processYoloOutputWorklet(output, numClasses, confThreshold, _iouThreshold, labels, foodOnly) {
42
+ 'worklet';
43
+ const data = new Float32Array(output);
44
+ const numPredictions = 8400;
45
+ const detections = [];
46
+ // Extract detections above threshold
47
+ for (let i = 0; i < numPredictions; i++) {
48
+ let maxScore = 0;
49
+ let maxClassIdx = 0;
50
+ // Find best class
51
+ for (let c = 0; c < numClasses; c++) {
52
+ const score = data[(4 + c) * numPredictions + i];
53
+ if (score > maxScore) {
54
+ maxScore = score;
55
+ maxClassIdx = c;
56
+ }
57
+ }
58
+ if (maxScore >= confThreshold) {
59
+ // Filter for food classes if requested
60
+ if (foodOnly && !COCO_FOOD_INDICES.has(maxClassIdx)) {
61
+ continue;
62
+ }
63
+ // Extract box (center x, center y, width, height)
64
+ const cx = data[0 * numPredictions + i];
65
+ const cy = data[1 * numPredictions + i];
66
+ const w = data[2 * numPredictions + i];
67
+ const h = data[3 * numPredictions + i];
68
+ detections.push({
69
+ x: Math.max(0, cx - w / 2),
70
+ y: Math.max(0, cy - h / 2),
71
+ width: w,
72
+ height: h,
73
+ classIndex: maxClassIdx,
74
+ confidence: maxScore,
75
+ label: labels[maxClassIdx] || `class_${maxClassIdx}`,
76
+ });
77
+ }
78
+ }
79
+ // Simple NMS (keep top detection per class)
80
+ const kept = [];
81
+ const seenClasses = new Set();
82
+ // Sort by confidence
83
+ detections.sort((a, b) => b.confidence - a.confidence);
84
+ for (const det of detections) {
85
+ if (!seenClasses.has(det.classIndex)) {
86
+ kept.push(det);
87
+ seenClasses.add(det.classIndex);
88
+ }
89
+ }
90
+ return kept.slice(0, 10); // Max 10 detections
91
+ }
92
+ /**
93
+ * Hook to create a TFLite-powered frame processor
94
+ *
95
+ * @example
96
+ * ```typescript
97
+ * const { frameProcessor, detections, isProcessing } = useTFLiteFrameProcessor({
98
+ * model: loadedModel,
99
+ * numClasses: 80,
100
+ * foodOnly: true,
101
+ * });
102
+ *
103
+ * // Use with VisionCamera
104
+ * <Camera frameProcessor={frameProcessor} />
105
+ * ```
106
+ */
107
+ function useTFLiteFrameProcessor(options) {
108
+ const { model, inputSize = 640, numClasses = 80, confThreshold = 0.25, iouThreshold = 0.45, classLabels = COCO_LABELS, foodOnly = false, onDetections, } = options;
109
+ const [detections, setDetections] = (0, react_1.useState)([]);
110
+ const [isProcessing, _setIsProcessing] = (0, react_1.useState)(false);
111
+ const [frameCount, setFrameCount] = (0, react_1.useState)(0);
112
+ // Use shared value for throttle timing (accessible from worklet)
113
+ const lastProcessTime = (0, react_native_reanimated_1.useSharedValue)(0);
114
+ // Use the resize plugin hook
115
+ const { resize } = (0, vision_camera_resize_plugin_1.useResizePlugin)();
116
+ // Log state on mount and when model changes
117
+ (0, react_1.useEffect)(() => {
118
+ console.log('[TFLiteFrameProcessor] Hook state:', {
119
+ model: !!model,
120
+ resize: !!resize,
121
+ });
122
+ }, [model, resize]);
123
+ // Debug: log frame count changes
124
+ (0, react_1.useEffect)(() => {
125
+ if (frameCount > 0) {
126
+ console.log(`[TFLiteFrameProcessor] Processed ${frameCount} frames`);
127
+ }
128
+ }, [frameCount]);
129
+ // Helper to log from worklet
130
+ const logFromWorklet = (0, react_1.useCallback)((msg) => {
131
+ console.log(msg);
132
+ }, []);
133
+ const incrementFrameCount = (0, react_1.useCallback)(() => {
134
+ setFrameCount(prev => prev + 1);
135
+ }, []);
136
+ // Create frame processor
137
+ const frameProcessor = (0, react_1.useCallback)((frame) => {
138
+ 'worklet';
139
+ if (!model) {
140
+ react_native_worklets_core_1.Worklets.runOnJS(() => logFromWorklet('[TFLiteFrameProcessor] No model'));
141
+ return;
142
+ }
143
+ // Throttle to ~10fps for processing
144
+ const now = Date.now();
145
+ if (now - lastProcessTime.value < 100) {
146
+ return;
147
+ }
148
+ lastProcessTime.value = now;
149
+ react_native_worklets_core_1.Worklets.runOnJS(() => incrementFrameCount());
150
+ try {
151
+ // Resize frame to model input size and get RGB float32 tensor
152
+ const resized = resize(frame, {
153
+ scale: {
154
+ width: inputSize,
155
+ height: inputSize,
156
+ },
157
+ pixelFormat: 'rgb',
158
+ dataType: 'float32',
159
+ });
160
+ // Run TFLite inference synchronously in worklet
161
+ const outputs = model.runSync([resized]);
162
+ // Process YOLO output
163
+ // outputs[0] is a TypedArray, we need its underlying buffer
164
+ const outputData = outputs[0];
165
+ const frameDetections = processYoloOutputWorklet(outputData.buffer ?? outputs[0], numClasses, confThreshold, iouThreshold, classLabels, foodOnly);
166
+ // Update state on JS thread
167
+ if (frameDetections.length > 0) {
168
+ react_native_worklets_core_1.Worklets.runOnJS(() => setDetections(frameDetections));
169
+ if (onDetections) {
170
+ react_native_worklets_core_1.Worklets.runOnJS(() => onDetections(frameDetections));
171
+ }
172
+ }
173
+ }
174
+ catch (error) {
175
+ react_native_worklets_core_1.Worklets.runOnJS(() => logFromWorklet(`[TFLiteFrameProcessor] Error: ${error}`));
176
+ }
177
+ }, [model, resize, inputSize, numClasses, confThreshold, iouThreshold, classLabels, foodOnly, onDetections, lastProcessTime, logFromWorklet, incrementFrameCount]);
178
+ return {
179
+ frameProcessor: model ? frameProcessor : undefined,
180
+ detections,
181
+ isProcessing,
182
+ isReady: !!model,
183
+ };
184
+ }
185
+ /**
186
+ * Create a simple frame processor function for use with useFrameProcessor
187
+ *
188
+ * This is for manual integration when you need more control.
189
+ * The resize function must be obtained from useResizePlugin hook.
190
+ *
191
+ * @example
192
+ * ```typescript
193
+ * const { resize } = useResizePlugin();
194
+ * const processor = createTFLiteFrameProcessor(model, resize, { foodOnly: true });
195
+ * ```
196
+ */
197
+ function createTFLiteFrameProcessor(model, resizeFn, options = {}) {
198
+ const { inputSize = 640, numClasses = 80, confThreshold = 0.25, classLabels = COCO_LABELS, foodOnly = false, } = options;
199
+ return (frame) => {
200
+ 'worklet';
201
+ // Resize to model input
202
+ const resized = resizeFn(frame, {
203
+ scale: { width: inputSize, height: inputSize },
204
+ pixelFormat: 'rgb',
205
+ dataType: 'float32',
206
+ });
207
+ // Run inference
208
+ const outputs = model.runSync([resized]);
209
+ // Process output - access the underlying buffer from TypedArray
210
+ const outputData = outputs[0];
211
+ return processYoloOutputWorklet(outputData.buffer ?? outputs[0], numClasses, confThreshold, 0.45, classLabels, foodOnly);
212
+ };
213
+ }
@@ -0,0 +1,91 @@
1
+ /**
2
+ * Barcode Detection Types
3
+ *
4
+ * Types for multi-barcode scanning functionality.
5
+ * Designed to be compatible with react-native-vision-camera's CodeScanner.
6
+ */
7
+ import type { BaseDetectionResult } from './index';
8
+ /**
9
+ * Supported barcode formats
10
+ * Matches VisionCamera's CodeType for compatibility
11
+ */
12
+ export type BarcodeFormat = 'ean-13' | 'ean-8' | 'upc-a' | 'upc-e' | 'code-128' | 'code-39' | 'code-93' | 'itf' | 'codabar' | 'qr' | 'data-matrix' | 'pdf-417' | 'aztec';
13
+ /**
14
+ * Common grocery barcode formats (optimized subset)
15
+ */
16
+ export declare const GROCERY_BARCODE_FORMATS: BarcodeFormat[];
17
+ /**
18
+ * Data extracted from a scanned barcode
19
+ */
20
+ export interface BarcodeData {
21
+ /** The barcode format */
22
+ format: BarcodeFormat;
23
+ /** The decoded barcode value */
24
+ value: string;
25
+ /** Raw bytes if available */
26
+ rawValue?: string;
27
+ /** Display value (formatted for UI) */
28
+ displayValue?: string;
29
+ }
30
+ /**
31
+ * Complete barcode detection result
32
+ */
33
+ export interface BarcodeDetectionResult extends BaseDetectionResult {
34
+ type: 'barcode';
35
+ data: BarcodeData;
36
+ }
37
+ /**
38
+ * Options for useMultiBarcodeScanner hook
39
+ */
40
+ export interface UseMultiBarcodeScannerOptions {
41
+ /** Barcode formats to scan for (default: GROCERY_BARCODE_FORMATS) */
42
+ formats?: BarcodeFormat[];
43
+ /** Maximum number of barcodes to detect per frame (default: 20) */
44
+ maxBarcodes?: number;
45
+ /** Minimum interval between scans in ms (default: 100) */
46
+ scanInterval?: number;
47
+ /** Callback when barcodes are detected */
48
+ onDetected?: (barcodes: BarcodeDetectionResult[]) => void;
49
+ /** Callback on error */
50
+ onError?: (error: Error) => void;
51
+ /** Enable haptic feedback on detection (default: true) */
52
+ hapticFeedback?: boolean;
53
+ /** Deduplicate barcodes within session (default: true) */
54
+ deduplicate?: boolean;
55
+ }
56
+ /**
57
+ * Return type for useMultiBarcodeScanner hook
58
+ */
59
+ export interface UseMultiBarcodeScannerReturn {
60
+ /** Whether the scanner is initialized and ready */
61
+ isReady: boolean;
62
+ /** Whether actively scanning */
63
+ isScanning: boolean;
64
+ /** Array of detected barcodes */
65
+ results: BarcodeDetectionResult[];
66
+ /** Current error if any */
67
+ error: Error | null;
68
+ /** Start continuous scanning */
69
+ startScanning: () => void;
70
+ /** Stop scanning */
71
+ stopScanning: () => void;
72
+ /** Scan a single photo for barcodes */
73
+ scanPhoto: (uri: string) => Promise<BarcodeDetectionResult[]>;
74
+ /** Clear all results */
75
+ clearResults: () => void;
76
+ /** Reset scanner state */
77
+ reset: () => void;
78
+ frameProcessor: unknown;
79
+ }
80
+ /**
81
+ * Cached barcode lookup result
82
+ * Maps barcode value to product data (from your backend)
83
+ */
84
+ export interface BarcodeCacheEntry {
85
+ barcode: string;
86
+ format: BarcodeFormat;
87
+ productId?: string;
88
+ productName?: string;
89
+ cachedAt: number;
90
+ ttl: number;
91
+ }
@@ -0,0 +1,19 @@
1
+ "use strict";
2
+ /**
3
+ * Barcode Detection Types
4
+ *
5
+ * Types for multi-barcode scanning functionality.
6
+ * Designed to be compatible with react-native-vision-camera's CodeScanner.
7
+ */
8
+ Object.defineProperty(exports, "__esModule", { value: true });
9
+ exports.GROCERY_BARCODE_FORMATS = void 0;
10
+ /**
11
+ * Common grocery barcode formats (optimized subset)
12
+ */
13
+ exports.GROCERY_BARCODE_FORMATS = [
14
+ 'ean-13',
15
+ 'ean-8',
16
+ 'upc-a',
17
+ 'upc-e',
18
+ 'qr',
19
+ ];