@souscheflabs/ml-vision 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. package/README.md +274 -0
  2. package/dist/components/DetectionOverlay.d.ts +57 -0
  3. package/dist/components/DetectionOverlay.js +133 -0
  4. package/dist/components/index.d.ts +4 -0
  5. package/dist/components/index.js +9 -0
  6. package/dist/core/CacheManager.d.ts +168 -0
  7. package/dist/core/CacheManager.js +331 -0
  8. package/dist/core/MLVisionProvider.d.ts +90 -0
  9. package/dist/core/MLVisionProvider.js +188 -0
  10. package/dist/core/ServerClient.d.ts +131 -0
  11. package/dist/core/ServerClient.js +291 -0
  12. package/dist/core/index.d.ts +6 -0
  13. package/dist/core/index.js +18 -0
  14. package/dist/hooks/classLabels.d.ts +35 -0
  15. package/dist/hooks/classLabels.js +439 -0
  16. package/dist/hooks/classLabelsCoco.d.ts +43 -0
  17. package/dist/hooks/classLabelsCoco.js +103 -0
  18. package/dist/hooks/index.d.ts +8 -0
  19. package/dist/hooks/index.js +27 -0
  20. package/dist/hooks/useMultiBarcodeScanner.d.ts +34 -0
  21. package/dist/hooks/useMultiBarcodeScanner.js +290 -0
  22. package/dist/hooks/useProductDetector.d.ts +38 -0
  23. package/dist/hooks/useProductDetector.js +679 -0
  24. package/dist/hooks/useReceiptScanner.d.ts +37 -0
  25. package/dist/hooks/useReceiptScanner.js +405 -0
  26. package/dist/hooks/useVideoScanner.d.ts +118 -0
  27. package/dist/hooks/useVideoScanner.js +383 -0
  28. package/dist/index.d.ts +58 -0
  29. package/dist/index.js +130 -0
  30. package/dist/processors/detectionProcessor.d.ts +86 -0
  31. package/dist/processors/detectionProcessor.js +124 -0
  32. package/dist/processors/index.d.ts +5 -0
  33. package/dist/processors/index.js +16 -0
  34. package/dist/processors/tfliteFrameProcessor.d.ts +90 -0
  35. package/dist/processors/tfliteFrameProcessor.js +213 -0
  36. package/dist/types/barcode.d.ts +91 -0
  37. package/dist/types/barcode.js +19 -0
  38. package/dist/types/detection.d.ts +166 -0
  39. package/dist/types/detection.js +8 -0
  40. package/dist/types/index.d.ts +126 -0
  41. package/dist/types/index.js +25 -0
  42. package/dist/types/ocr.d.ts +202 -0
  43. package/dist/types/ocr.js +8 -0
  44. package/dist/utils/imagePreprocessor.d.ts +85 -0
  45. package/dist/utils/imagePreprocessor.js +304 -0
  46. package/dist/utils/yoloProcessor.d.ts +40 -0
  47. package/dist/utils/yoloProcessor.js +154 -0
  48. package/package.json +78 -0
@@ -0,0 +1,679 @@
1
+ "use strict";
2
+ /**
3
+ * useProductDetector Hook
4
+ *
5
+ * React hook for visual product detection using TFLite models.
6
+ * Detects products in photos or camera frames using YOLOv8.
7
+ *
8
+ * @example
9
+ * ```typescript
10
+ * import { useProductDetector } from '@souschef/ml-vision';
11
+ *
12
+ * function FridgeScanner() {
13
+ * const {
14
+ * isModelLoaded,
15
+ * isDetecting,
16
+ * detections,
17
+ * detectProducts,
18
+ * } = useProductDetector({
19
+ * minConfidence: 0.5,
20
+ * onDetected: (products) => console.log('Found:', products),
21
+ * });
22
+ *
23
+ * const handleCapture = async (photoUri: string) => {
24
+ * const results = await detectProducts(photoUri);
25
+ * // results contain detected products with bounding boxes
26
+ * };
27
+ *
28
+ * return (
29
+ * // Your camera UI
30
+ * );
31
+ * }
32
+ * ```
33
+ */
34
+ var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
35
+ if (k2 === undefined) k2 = k;
36
+ var desc = Object.getOwnPropertyDescriptor(m, k);
37
+ if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
38
+ desc = { enumerable: true, get: function() { return m[k]; } };
39
+ }
40
+ Object.defineProperty(o, k2, desc);
41
+ }) : (function(o, m, k, k2) {
42
+ if (k2 === undefined) k2 = k;
43
+ o[k2] = m[k];
44
+ }));
45
+ var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
46
+ Object.defineProperty(o, "default", { enumerable: true, value: v });
47
+ }) : function(o, v) {
48
+ o["default"] = v;
49
+ });
50
+ var __importStar = (this && this.__importStar) || (function () {
51
+ var ownKeys = function(o) {
52
+ ownKeys = Object.getOwnPropertyNames || function (o) {
53
+ var ar = [];
54
+ for (var k in o) if (Object.prototype.hasOwnProperty.call(o, k)) ar[ar.length] = k;
55
+ return ar;
56
+ };
57
+ return ownKeys(o);
58
+ };
59
+ return function (mod) {
60
+ if (mod && mod.__esModule) return mod;
61
+ var result = {};
62
+ if (mod != null) for (var k = ownKeys(mod), i = 0; i < k.length; i++) if (k[i] !== "default") __createBinding(result, mod, k[i]);
63
+ __setModuleDefault(result, mod);
64
+ return result;
65
+ };
66
+ })();
67
+ Object.defineProperty(exports, "__esModule", { value: true });
68
+ exports.useProductDetector = useProductDetector;
69
+ const react_1 = require("react");
70
+ const react_native_1 = require("react-native");
71
+ const MLVisionProvider_1 = require("../core/MLVisionProvider");
72
+ const classLabels_1 = require("./classLabels");
73
+ const classLabelsCoco_1 = require("./classLabelsCoco");
74
+ const imagePreprocessor_1 = require("../utils/imagePreprocessor");
75
+ const yoloProcessor_1 = require("../utils/yoloProcessor");
76
+ // ============================================================================
77
+ // TFLite Model Loading
78
+ // ============================================================================
79
+ // Dynamic import of react-native-fast-tflite to handle cases where it's not installed
80
+ let TFLite = null;
81
+ let loadedModel = null;
82
+ async function loadTFLiteModule() {
83
+ if (TFLite)
84
+ return TFLite;
85
+ try {
86
+ TFLite = await Promise.resolve().then(() => __importStar(require('react-native-fast-tflite')));
87
+ return TFLite;
88
+ }
89
+ catch (error) {
90
+ console.error('[useProductDetector] Failed to load react-native-fast-tflite:', error);
91
+ return null;
92
+ }
93
+ }
94
+ // ============================================================================
95
+ // Constants
96
+ // ============================================================================
97
+ /** Model input size (must match training config) */
98
+ const MODEL_INPUT_SIZE = 640;
99
+ /**
100
+ * Model asset - loaded via Metro bundler
101
+ * The require() returns an asset ID that react-native-fast-tflite can resolve
102
+ *
103
+ * Using SousChef fine-tuned model (233 food classes)
104
+ */
105
+ const MODEL_ASSET = require('../../models/souschef_v1_int8.tflite');
106
+ /** Whether we're using the COCO model (80 classes) or custom food model (233 classes) */
107
+ const USE_COCO_MODEL = false;
108
+ /** Get the appropriate number of classes based on model */
109
+ const getNumClasses = () => USE_COCO_MODEL ? classLabelsCoco_1.NUM_CLASSES_COCO : classLabels_1.NUM_CLASSES;
110
+ /** Model version identifier */
111
+ const MODEL_VERSION = '1.0.0';
112
+ /**
113
+ * Simple hash function for image URIs
114
+ * For a real implementation, use a perceptual hash or file content hash
115
+ */
116
+ function simpleHash(str) {
117
+ let hash = 5381;
118
+ for (let i = 0; i < str.length; i++) {
119
+ hash = (hash * 33 + str.charCodeAt(i)) % 2147483647;
120
+ }
121
+ return `img_${Math.abs(hash).toString(36)}`;
122
+ }
123
+ /**
124
+ * Process YOLOv8 output tensor to get detections
125
+ *
126
+ * YOLOv8 output format: [1, 34, 8400] where:
127
+ * - 34 = 4 (bbox) + 30 (class scores)
128
+ * - 8400 = number of anchor boxes (80x80 + 40x40 + 20x20)
129
+ *
130
+ * @deprecated Use processYoloOutput from utils/yoloProcessor instead
131
+ */
132
+ function _processYoloOutput(output, numClasses = classLabels_1.NUM_CLASSES, confThreshold = 0.25, iouThreshold = 0.45) {
133
+ const numAnchors = 8400;
134
+ // eslint-disable-next-line @typescript-eslint/no-unused-vars
135
+ const numOutputs = 4 + numClasses; // x, y, w, h + class scores
136
+ const detections = [];
137
+ // Process each anchor
138
+ for (let i = 0; i < numAnchors; i++) {
139
+ // Extract bbox: [x_center, y_center, width, height] (normalized 0-1)
140
+ const x = output[0 * numAnchors + i];
141
+ const y = output[1 * numAnchors + i];
142
+ const w = output[2 * numAnchors + i];
143
+ const h = output[3 * numAnchors + i];
144
+ // Find max class score and index
145
+ let maxScore = 0;
146
+ let maxClassIndex = 0;
147
+ for (let c = 0; c < numClasses; c++) {
148
+ const score = output[(4 + c) * numAnchors + i];
149
+ if (score > maxScore) {
150
+ maxScore = score;
151
+ maxClassIndex = c;
152
+ }
153
+ }
154
+ // Filter by confidence
155
+ if (maxScore >= confThreshold) {
156
+ detections.push({
157
+ x: x - w / 2, // Convert from center to top-left
158
+ y: y - h / 2,
159
+ width: w,
160
+ height: h,
161
+ classIndex: maxClassIndex,
162
+ confidence: maxScore,
163
+ });
164
+ }
165
+ }
166
+ // Apply Non-Maximum Suppression (NMS)
167
+ return applyNMS(detections, iouThreshold);
168
+ }
169
+ /**
170
+ * Non-Maximum Suppression to remove overlapping detections
171
+ */
172
+ function applyNMS(detections, iouThreshold) {
173
+ if (detections.length === 0)
174
+ return [];
175
+ // Sort by confidence (descending)
176
+ const sorted = [...detections].sort((a, b) => b.confidence - a.confidence);
177
+ const kept = [];
178
+ while (sorted.length > 0) {
179
+ const best = sorted.shift();
180
+ kept.push(best);
181
+ // Remove overlapping detections of same class
182
+ for (let i = sorted.length - 1; i >= 0; i--) {
183
+ if (sorted[i].classIndex === best.classIndex) {
184
+ const iou = calculateIoU(best, sorted[i]);
185
+ if (iou > iouThreshold) {
186
+ sorted.splice(i, 1);
187
+ }
188
+ }
189
+ }
190
+ }
191
+ return kept;
192
+ }
193
+ /**
194
+ * Calculate Intersection over Union (IoU) between two boxes
195
+ */
196
+ function calculateIoU(box1, box2) {
197
+ const x1 = Math.max(box1.x, box2.x);
198
+ const y1 = Math.max(box1.y, box2.y);
199
+ const x2 = Math.min(box1.x + box1.width, box2.x + box2.width);
200
+ const y2 = Math.min(box1.y + box1.height, box2.y + box2.height);
201
+ const intersection = Math.max(0, x2 - x1) * Math.max(0, y2 - y1);
202
+ const area1 = box1.width * box1.height;
203
+ const area2 = box2.width * box2.height;
204
+ const union = area1 + area2 - intersection;
205
+ return union > 0 ? intersection / union : 0;
206
+ }
207
+ // ============================================================================
208
+ // Hook Implementation
209
+ // ============================================================================
210
+ /**
211
+ * Hook for visual product detection using TFLite
212
+ */
213
+ function useProductDetector(options = {}) {
214
+ const { model: modelType = 'fast', maxDetections = 20, minConfidence = 0.5, categories, serverFallback = true, serverFallbackThreshold = 0.4, serverUrl, onDetected, onError, hapticFeedback = true, } = options;
215
+ // Context
216
+ const context = (0, MLVisionProvider_1.useMLVisionContext)();
217
+ const { serverClient, serverAvailable, config, cacheManager } = context;
218
+ // State
219
+ const [isModelLoaded, setIsModelLoaded] = (0, react_1.useState)(false);
220
+ const [isDetecting, setIsDetecting] = (0, react_1.useState)(false);
221
+ const [detections, setDetections] = (0, react_1.useState)([]);
222
+ const [error, setError] = (0, react_1.useState)(null);
223
+ // Refs
224
+ const modelRef = (0, react_1.useRef)(null);
225
+ const mountedRef = (0, react_1.useRef)(true);
226
+ // Computed server URL
227
+ const effectiveServerUrl = serverUrl || config.serverUrl;
228
+ // ============================================================================
229
+ // Model Loading
230
+ // ============================================================================
231
+ (0, react_1.useEffect)(() => {
232
+ mountedRef.current = true;
233
+ async function loadModel() {
234
+ if (modelType !== 'fast') {
235
+ // Server-only mode
236
+ setIsModelLoaded(true);
237
+ return;
238
+ }
239
+ try {
240
+ console.log('[useProductDetector] Step 1: Loading TFLite module...');
241
+ const tflite = await loadTFLiteModule();
242
+ if (!tflite) {
243
+ throw new Error('react-native-fast-tflite is not available');
244
+ }
245
+ console.log('[useProductDetector] Step 2: TFLite module loaded');
246
+ // Check if model is already loaded
247
+ if (loadedModel) {
248
+ console.log('[useProductDetector] Model already loaded, reusing...');
249
+ modelRef.current = loadedModel;
250
+ if (mountedRef.current) {
251
+ setIsModelLoaded(true);
252
+ }
253
+ return;
254
+ }
255
+ // Load the model from assets folder
256
+ console.log('[useProductDetector] Step 3: Loading TFLite model...');
257
+ console.log('[useProductDetector] MODEL_ASSET value:', MODEL_ASSET);
258
+ console.log('[useProductDetector] MODEL_ASSET type:', typeof MODEL_ASSET);
259
+ // CoreML is iOS-only, use default CPU delegate on Android for stability
260
+ const delegate = config.enableGPUDelegate && react_native_1.Platform.OS === 'ios'
261
+ ? 'core-ml'
262
+ : 'default';
263
+ console.log('[useProductDetector] Step 4: Using delegate:', delegate);
264
+ // Load model via require() which Metro resolves to an asset ID
265
+ console.log('[useProductDetector] Step 5: Calling loadTensorflowModel...');
266
+ let model;
267
+ try {
268
+ // Try loading with the bundled asset first
269
+ model = await tflite.loadTensorflowModel(MODEL_ASSET, delegate);
270
+ }
271
+ catch (loadErr) {
272
+ console.error('[useProductDetector] Primary load failed:', loadErr.message);
273
+ // Fallback: try file:// URL for Android assets
274
+ if (react_native_1.Platform.OS === 'android') {
275
+ console.log('[useProductDetector] Trying fallback: file:///android_asset/...');
276
+ try {
277
+ model = await tflite.loadTensorflowModel({ url: 'file:///android_asset/souschef_v1_int8.tflite' }, delegate);
278
+ console.log('[useProductDetector] Fallback succeeded!');
279
+ }
280
+ catch (fallbackErr) {
281
+ console.error('[useProductDetector] Fallback also failed:', fallbackErr.message);
282
+ throw loadErr; // Throw original error
283
+ }
284
+ }
285
+ else {
286
+ throw loadErr;
287
+ }
288
+ }
289
+ console.log('[useProductDetector] Step 6: Model loaded successfully');
290
+ console.log('[useProductDetector] Model inputs:', JSON.stringify(model.inputs));
291
+ console.log('[useProductDetector] Model outputs:', JSON.stringify(model.outputs));
292
+ loadedModel = model;
293
+ modelRef.current = model;
294
+ if (mountedRef.current) {
295
+ setIsModelLoaded(true);
296
+ console.log('[useProductDetector] Model loaded successfully');
297
+ }
298
+ }
299
+ catch (err) {
300
+ const loadError = err instanceof Error ? err : new Error('Failed to load model');
301
+ console.error('[useProductDetector] Model loading error:', loadError);
302
+ if (mountedRef.current) {
303
+ setError(loadError);
304
+ onError?.(loadError);
305
+ }
306
+ }
307
+ }
308
+ loadModel();
309
+ return () => {
310
+ mountedRef.current = false;
311
+ };
312
+ }, [modelType, config.enableGPUDelegate, onError]);
313
+ // ============================================================================
314
+ // Detection Functions
315
+ // ============================================================================
316
+ /**
317
+ * Detect products in a photo
318
+ */
319
+ const detectProducts = (0, react_1.useCallback)(async (uri) => {
320
+ if (isDetecting) {
321
+ console.warn('[useProductDetector] Detection already in progress');
322
+ return [];
323
+ }
324
+ setIsDetecting(true);
325
+ setError(null);
326
+ try {
327
+ let results = [];
328
+ const imageHash = simpleHash(uri);
329
+ // Check cache first
330
+ if (cacheManager) {
331
+ const cached = cacheManager.getProductRecognition(imageHash);
332
+ if (cached && cached.modelVersion === MODEL_VERSION) {
333
+ console.log('[useProductDetector] Using cached results');
334
+ // Convert cached data to ProductDetectionResult format
335
+ results = cached.detections.map((det, idx) => ({
336
+ id: `cached_${imageHash}_${idx}`,
337
+ type: 'product',
338
+ confidence: det.confidence,
339
+ boundingBox: det.boundingBox,
340
+ data: {
341
+ category: (0, classLabels_1.getClassInfo)(parseInt(Object.entries(classLabels_1.CLASS_LABELS).find(([, v]) => v.label === det.classLabel)?.[0] ?? '0', 10)).category,
342
+ classLabel: det.classLabel,
343
+ classIndex: parseInt(Object.entries(classLabels_1.CLASS_LABELS).find(([, v]) => v.label === det.classLabel)?.[0] ?? '0', 10),
344
+ name: det.classLabel.replace(/_/g, ' '),
345
+ },
346
+ source: 'on_device',
347
+ processingTimeMs: 0, // Cached, no processing time
348
+ timestamp: Date.now(),
349
+ }));
350
+ if (mountedRef.current) {
351
+ setDetections(results);
352
+ setIsDetecting(false);
353
+ }
354
+ onDetected?.(results);
355
+ return results;
356
+ }
357
+ }
358
+ // Get image dimensions
359
+ const imageSize = await new Promise((resolve, reject) => {
360
+ react_native_1.Image.getSize(uri, (width, height) => resolve({ width, height }), reject);
361
+ });
362
+ // Try on-device detection first
363
+ if (modelType === 'fast' && modelRef.current) {
364
+ try {
365
+ results = await runOnDeviceDetection(uri, imageSize.width, imageSize.height);
366
+ }
367
+ catch {
368
+ // On-device detection failed (e.g., preprocessing not implemented)
369
+ // This is expected for photo-based detection - silently fall back to server
370
+ console.log('[useProductDetector] On-device detection unavailable, will use server');
371
+ }
372
+ }
373
+ // Server fallback for:
374
+ // - Server-only mode (modelType === 'accurate')
375
+ // - On-device detection failed or returned no results
376
+ // - Low confidence results
377
+ const shouldFallback = modelType === 'accurate' ||
378
+ (serverFallback &&
379
+ effectiveServerUrl &&
380
+ serverAvailable &&
381
+ (results.length === 0 ||
382
+ results.every((r) => r.confidence < serverFallbackThreshold)));
383
+ if (shouldFallback && serverClient) {
384
+ console.log('[useProductDetector] Using server detection');
385
+ try {
386
+ const serverResults = await runServerDetection(uri);
387
+ // Merge results, preferring server for duplicates
388
+ results = mergeDetections(results, serverResults);
389
+ }
390
+ catch (serverErr) {
391
+ console.error('[useProductDetector] Server detection failed:', serverErr);
392
+ // If server fails too, we have no results - throw the error
393
+ if (results.length === 0) {
394
+ throw new Error('Detection failed: Server unavailable and on-device preprocessing not implemented');
395
+ }
396
+ }
397
+ }
398
+ // Filter by confidence and categories
399
+ results = results
400
+ .filter((r) => r.confidence >= minConfidence)
401
+ .filter((r) => !categories || categories.includes(r.data.category))
402
+ .slice(0, maxDetections);
403
+ // Update state
404
+ if (mountedRef.current) {
405
+ setDetections(results);
406
+ }
407
+ // Cache results
408
+ if (cacheManager && results.length > 0) {
409
+ const cacheData = {
410
+ imageHash,
411
+ detections: results.map((r) => ({
412
+ classLabel: r.data.classLabel,
413
+ confidence: r.confidence,
414
+ boundingBox: r.boundingBox,
415
+ })),
416
+ modelVersion: MODEL_VERSION,
417
+ };
418
+ cacheManager.setProductRecognition(imageHash, cacheData);
419
+ }
420
+ // Callbacks
421
+ if (results.length > 0) {
422
+ onDetected?.(results);
423
+ if (hapticFeedback) {
424
+ // Trigger haptic feedback (implement based on your haptic service)
425
+ // HapticService.success();
426
+ }
427
+ }
428
+ return results;
429
+ }
430
+ catch (err) {
431
+ const detectError = err instanceof Error ? err : new Error('Detection failed');
432
+ console.error('[useProductDetector] Detection error:', detectError);
433
+ if (mountedRef.current) {
434
+ setError(detectError);
435
+ }
436
+ onError?.(detectError);
437
+ return [];
438
+ }
439
+ finally {
440
+ if (mountedRef.current) {
441
+ setIsDetecting(false);
442
+ }
443
+ }
444
+ },
445
+ // eslint-disable-next-line react-hooks/exhaustive-deps
446
+ [
447
+ isDetecting,
448
+ modelType,
449
+ serverFallback,
450
+ serverFallbackThreshold,
451
+ effectiveServerUrl,
452
+ serverAvailable,
453
+ serverClient,
454
+ cacheManager,
455
+ minConfidence,
456
+ categories,
457
+ maxDetections,
458
+ onDetected,
459
+ onError,
460
+ hapticFeedback,
461
+ // Note: runOnDeviceDetection and runServerDetection are intentionally excluded
462
+ // They are stable helper functions defined within this hook
463
+ ]);
464
+ /**
465
+ * Run detection on device using TFLite
466
+ *
467
+ * Requires @shopify/react-native-skia for image preprocessing.
468
+ * Install with: npm install @shopify/react-native-skia
469
+ *
470
+ * For real-time detection (VisionCamera frame processor), use the
471
+ * vision-camera-resize-plugin which provides direct pixel access.
472
+ */
473
+ async function runOnDeviceDetection(uri, imageWidth, imageHeight) {
474
+ const model = modelRef.current;
475
+ if (!model) {
476
+ throw new Error('Model not loaded');
477
+ }
478
+ const startTime = Date.now();
479
+ // Try to initialize Skia for preprocessing
480
+ await (0, imagePreprocessor_1.initSkia)();
481
+ if (!(0, imagePreprocessor_1.isSkiaAvailable)()) {
482
+ console.log('[useProductDetector] On-device detection requires @shopify/react-native-skia');
483
+ console.log('[useProductDetector] Install with: npm install @shopify/react-native-skia');
484
+ console.log('[useProductDetector] Falling back to server detection...');
485
+ throw new Error('On-device photo detection requires @shopify/react-native-skia. ' +
486
+ 'Install it or use server fallback.');
487
+ }
488
+ console.log('[useProductDetector] Running on-device detection...');
489
+ console.log('[useProductDetector] Image:', imageWidth, 'x', imageHeight);
490
+ try {
491
+ // Preprocess image: resize to 640x640 and normalize to [0, 1]
492
+ const preprocessed = await (0, imagePreprocessor_1.preprocessImage)(uri, {
493
+ width: MODEL_INPUT_SIZE,
494
+ height: MODEL_INPUT_SIZE,
495
+ normalize: true,
496
+ letterbox: true,
497
+ });
498
+ console.log('[useProductDetector] Preprocessed image, running inference...');
499
+ // Run TFLite inference
500
+ const output = await model.run([preprocessed.data]);
501
+ const rawOutput = output[0];
502
+ const inferenceTime = Date.now() - startTime;
503
+ console.log('[useProductDetector] Inference complete:', inferenceTime, 'ms');
504
+ // Process YOLO output
505
+ const numClasses = getNumClasses();
506
+ const rawDetections = (0, yoloProcessor_1.processYoloOutput)(rawOutput, numClasses, 0.25, 0.45);
507
+ // Scale detections back to original image coordinates
508
+ const scaledDetections = (0, yoloProcessor_1.scaleDetections)(rawDetections, MODEL_INPUT_SIZE, imageWidth, imageHeight);
509
+ console.log('[useProductDetector] Found', scaledDetections.length, 'detections');
510
+ // Convert to ProductDetectionResult format
511
+ const processingTimeMs = Date.now() - startTime;
512
+ return scaledDetections.map((det) => toProductDetectionResult(det, imageWidth, imageHeight, processingTimeMs));
513
+ }
514
+ catch (preprocessError) {
515
+ console.error('[useProductDetector] On-device detection failed:', preprocessError);
516
+ throw preprocessError;
517
+ }
518
+ }
519
+ /**
520
+ * Convert raw detection to ProductDetectionResult
521
+ */
522
+ function toProductDetectionResult(det, imageWidth, imageHeight, processingTimeMs) {
523
+ // Get class info based on model type
524
+ const classInfo = USE_COCO_MODEL
525
+ ? (0, classLabelsCoco_1.getCocoClassInfo)(det.classIndex)
526
+ : (0, classLabels_1.getClassInfo)(det.classIndex);
527
+ return {
528
+ id: `det_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`,
529
+ type: 'product',
530
+ confidence: det.confidence,
531
+ boundingBox: {
532
+ x: det.x,
533
+ y: det.y,
534
+ width: det.width,
535
+ height: det.height,
536
+ },
537
+ data: {
538
+ category: classInfo.category,
539
+ classLabel: classInfo.label,
540
+ classIndex: det.classIndex,
541
+ name: classInfo.label.replace(/_/g, ' '),
542
+ },
543
+ source: 'on_device',
544
+ processingTimeMs,
545
+ timestamp: Date.now(),
546
+ };
547
+ }
548
+ /**
549
+ * Run detection on server
550
+ */
551
+ async function runServerDetection(uri) {
552
+ console.log('[useProductDetector] runServerDetection called');
553
+ console.log('[useProductDetector] Server client available:', !!serverClient);
554
+ console.log('[useProductDetector] Server URL:', effectiveServerUrl);
555
+ console.log('[useProductDetector] Image URI:', uri);
556
+ if (!serverClient) {
557
+ console.error('[useProductDetector] Server client not available!');
558
+ throw new Error('Server client not available');
559
+ }
560
+ // Convert URI to ImageUploadData format
561
+ const imageData = {
562
+ image: uri,
563
+ isBase64: false,
564
+ };
565
+ console.log('[useProductDetector] Calling serverClient.detectProducts...');
566
+ try {
567
+ const results = await serverClient.detectProducts(imageData, {
568
+ maxDetections,
569
+ minConfidence: minConfidence * 0.8, // Slightly lower threshold for server
570
+ });
571
+ console.log('[useProductDetector] Server returned', results?.length || 0, 'detections');
572
+ console.log('[useProductDetector] Server results:', JSON.stringify(results));
573
+ // Server already returns ProductDetectionResult[] with all required fields
574
+ return results;
575
+ }
576
+ catch (serverErr) {
577
+ console.error('[useProductDetector] Server request failed:', serverErr);
578
+ throw serverErr;
579
+ }
580
+ }
581
+ /**
582
+ * Merge on-device and server detections
583
+ */
584
+ function mergeDetections(onDevice, server) {
585
+ // Simple merge: include all server detections, add on-device only if no overlap
586
+ const merged = [...server];
587
+ for (const det of onDevice) {
588
+ const overlaps = server.some((s) => s.data.classLabel === det.data.classLabel &&
589
+ calculateIoU({
590
+ x: det.boundingBox.x,
591
+ y: det.boundingBox.y,
592
+ width: det.boundingBox.width,
593
+ height: det.boundingBox.height,
594
+ classIndex: det.data.classIndex,
595
+ confidence: det.confidence,
596
+ }, {
597
+ x: s.boundingBox.x,
598
+ y: s.boundingBox.y,
599
+ width: s.boundingBox.width,
600
+ height: s.boundingBox.height,
601
+ classIndex: s.data.classIndex,
602
+ confidence: s.confidence,
603
+ }) > 0.5);
604
+ if (!overlaps) {
605
+ merged.push(det);
606
+ }
607
+ }
608
+ return merged;
609
+ }
610
+ /**
611
+ * Detect products in current camera frame (placeholder)
612
+ */
613
+ const detectInFrame = (0, react_1.useCallback)(async () => {
614
+ // This would be called from a frame processor
615
+ // For now, return current detections
616
+ return detections;
617
+ }, [detections]);
618
+ /**
619
+ * Update model (check for new version)
620
+ */
621
+ const updateModel = (0, react_1.useCallback)(async () => {
622
+ // TODO: Implement model update checking
623
+ console.log('[useProductDetector] Model update check not implemented');
624
+ }, []);
625
+ /**
626
+ * Clear all detections
627
+ */
628
+ const clearDetections = (0, react_1.useCallback)(() => {
629
+ setDetections([]);
630
+ setError(null);
631
+ }, []);
632
+ // ============================================================================
633
+ // Frame Processor (placeholder)
634
+ // ============================================================================
635
+ // Frame processor for real-time detection would be implemented here
636
+ // This requires worklet functions from react-native-vision-camera
637
+ const frameProcessor = (0, react_1.useMemo)(() => {
638
+ // Placeholder - actual implementation needs VisionCamera worklets
639
+ return null;
640
+ }, []);
641
+ // ============================================================================
642
+ // Model Info
643
+ // ============================================================================
644
+ const modelInfo = (0, react_1.useMemo)(() => {
645
+ if (!isModelLoaded)
646
+ return null;
647
+ return {
648
+ name: 'souschef_v1_int8',
649
+ version: MODEL_VERSION,
650
+ sizeBytes: 3737994, // ~3.6 MB (233 food classes)
651
+ lastUpdated: new Date('2026-01-16'),
652
+ type: 'detection',
653
+ isLoaded: isModelLoaded,
654
+ };
655
+ }, [isModelLoaded]);
656
+ // ============================================================================
657
+ // Return Value
658
+ // ============================================================================
659
+ return {
660
+ // State
661
+ isModelLoaded,
662
+ isDetecting,
663
+ detections,
664
+ error,
665
+ modelVersion: MODEL_VERSION,
666
+ // Model (for frame processor use)
667
+ model: modelRef.current,
668
+ // Actions
669
+ detectProducts,
670
+ detectInFrame,
671
+ updateModel,
672
+ clearDetections,
673
+ // Frame processor
674
+ frameProcessor,
675
+ // Model info
676
+ modelInfo,
677
+ };
678
+ }
679
+ exports.default = useProductDetector;