matrice-analytics 0.1.97__py3-none-any.whl → 0.1.124__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (30) hide show
  1. matrice_analytics/post_processing/__init__.py +22 -0
  2. matrice_analytics/post_processing/advanced_tracker/config.py +8 -4
  3. matrice_analytics/post_processing/advanced_tracker/track_class_aggregator.py +128 -0
  4. matrice_analytics/post_processing/advanced_tracker/tracker.py +22 -1
  5. matrice_analytics/post_processing/config.py +17 -2
  6. matrice_analytics/post_processing/core/config.py +107 -1
  7. matrice_analytics/post_processing/face_reg/face_recognition.py +706 -73
  8. matrice_analytics/post_processing/face_reg/people_activity_logging.py +25 -14
  9. matrice_analytics/post_processing/post_processor.py +16 -0
  10. matrice_analytics/post_processing/usecases/__init__.py +9 -0
  11. matrice_analytics/post_processing/usecases/crowdflow.py +1088 -0
  12. matrice_analytics/post_processing/usecases/footfall.py +170 -22
  13. matrice_analytics/post_processing/usecases/license_plate_monitoring.py +57 -38
  14. matrice_analytics/post_processing/usecases/parking_lot_analytics.py +1137 -0
  15. matrice_analytics/post_processing/usecases/vehicle_monitoring.py +30 -4
  16. matrice_analytics/post_processing/usecases/vehicle_monitoring_drone_view.py +246 -3
  17. matrice_analytics/post_processing/usecases/vehicle_monitoring_parking_lot.py +36 -3
  18. matrice_analytics/post_processing/usecases/vehicle_monitoring_wrong_way.py +1021 -0
  19. matrice_analytics/post_processing/utils/__init__.py +5 -0
  20. matrice_analytics/post_processing/utils/agnostic_nms.py +759 -0
  21. matrice_analytics/post_processing/utils/alert_instance_utils.py +55 -7
  22. matrice_analytics/post_processing/utils/business_metrics_manager_utils.py +25 -2
  23. matrice_analytics/post_processing/utils/incident_manager_utils.py +12 -1
  24. matrice_analytics/post_processing/utils/parking_analytics_tracker.py +359 -0
  25. matrice_analytics/post_processing/utils/wrong_way_tracker.py +670 -0
  26. {matrice_analytics-0.1.97.dist-info → matrice_analytics-0.1.124.dist-info}/METADATA +1 -1
  27. {matrice_analytics-0.1.97.dist-info → matrice_analytics-0.1.124.dist-info}/RECORD +30 -23
  28. {matrice_analytics-0.1.97.dist-info → matrice_analytics-0.1.124.dist-info}/WHEEL +0 -0
  29. {matrice_analytics-0.1.97.dist-info → matrice_analytics-0.1.124.dist-info}/licenses/LICENSE.txt +0 -0
  30. {matrice_analytics-0.1.97.dist-info → matrice_analytics-0.1.124.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,759 @@
1
+ """
2
+ Production-grade reusable NMS module with YOLO-matching implementation.
3
+
4
+ This module provides class-specific and class-agnostic NMS implementations
5
+ that match YOLO's built-in behavior while being completely framework-agnostic.
6
+
7
+ Usage:
8
+ from nms_module import AgnosticNMS
9
+
10
+ nms = AgnosticNMS(iou_threshold=0.45, min_box_size=2.0)
11
+ filtered_detections = nms.apply(detections, class_agnostic=True)
12
+ """
13
+
14
+ from typing import Any, Dict, List, Optional, Tuple
15
+ import numpy as np
16
+ import sys
17
+
18
+ # Try importing torch/torchvision, but don't auto-install in production
19
+ try:
20
+ import torch # noqa: F401
21
+ from torchvision.ops import nms as torchvision_nms # noqa: F401
22
+ TORCHVISION_AVAILABLE = True
23
+ except Exception:
24
+ TORCHVISION_AVAILABLE = False
25
+
26
+
27
+ class AgnosticNMS:
28
+ """
29
+ Production-grade NMS implementation with YOLO-matching behavior.
30
+
31
+ Features:
32
+ - Class-specific and class-agnostic modes
33
+ - Vectorized (PyTorch) and iterative fallback
34
+ - Numerical stability enhancements
35
+ - Box validation and filtering
36
+ - Schema preservation
37
+ - Zero side effects
38
+ - Supports both x1/y1/x2/y2 and xmin/ymin/xmax/ymax bbox formats
39
+
40
+ Attributes:
41
+ iou_threshold: IoU threshold for suppression (default: 0.45)
42
+ min_box_size: Minimum box width/height in pixels (default: 2.0)
43
+ use_vectorized: Use torchvision.ops.nms if available (default: True)
44
+ eps: Epsilon for numerical stability (default: 1e-7)
45
+ """
46
+
47
+ def __init__(
48
+ self,
49
+ iou_threshold: float = 0.45,
50
+ min_box_size: float = 2.0,
51
+ use_vectorized: bool = True,
52
+ eps: float = 1e-7
53
+ ):
54
+ """
55
+ Initialize NMS module.
56
+
57
+ Args:
58
+ iou_threshold: IoU threshold for suppression (0.0 to 1.0)
59
+ min_box_size: Minimum box dimension in pixels
60
+ use_vectorized: Use PyTorch implementation if available
61
+ eps: Epsilon for numerical stability in IoU computation
62
+ """
63
+ if not 0.0 <= iou_threshold <= 1.0:
64
+ raise ValueError(f"iou_threshold must be in [0, 1], got {iou_threshold}")
65
+
66
+ if min_box_size < 0:
67
+ raise ValueError(f"min_box_size must be >= 0, got {min_box_size}")
68
+
69
+ self.iou_threshold = iou_threshold
70
+ self.min_box_size = min_box_size
71
+ self.use_vectorized = use_vectorized and TORCHVISION_AVAILABLE
72
+ self.eps = eps
73
+
74
+ self._stats = {
75
+ "total_calls": 0,
76
+ "vectorized_calls": 0,
77
+ "iterative_calls": 0,
78
+ "total_input": 0,
79
+ "total_output": 0,
80
+ "total_suppressed": 0
81
+ }
82
+
83
+ def apply(
84
+ self,
85
+ detections: List[Dict[str, Any]],
86
+ class_agnostic: bool = True,
87
+ target_categories: Optional[List[str]] = None
88
+ ) -> List[Dict[str, Any]]:
89
+ """
90
+ Apply NMS to detections.
91
+
92
+ Args:
93
+ detections: List of detection dicts with schema:
94
+ {
95
+ "category": str,
96
+ "confidence": float,
97
+ "bounding_box": {"x1": float, "y1": float, "x2": float, "y2": float}
98
+ or {"xmin": float, "ymin": float, "xmax": float, "ymax": float},
99
+ ... (other fields preserved)
100
+ }
101
+ class_agnostic: If True, suppress across all classes
102
+ target_categories: Optional list of categories to process (others ignored)
103
+
104
+ Returns:
105
+ Filtered list of detections with identical schema
106
+ """
107
+ self._stats["total_calls"] += 1
108
+ self._stats["total_input"] += len(detections)
109
+
110
+ if not detections:
111
+ return detections
112
+
113
+ if len(detections) == 1:
114
+ self._stats["total_output"] += 1
115
+ return detections
116
+
117
+ # Validate schema with soft-fail on errors
118
+ invalid_detections = []
119
+ validation_diagnostics = []
120
+
121
+ for idx, d in enumerate(detections):
122
+ if not self._validate_detection_schema(d):
123
+ invalid_detections.append((idx, d))
124
+
125
+ # Collect detailed diagnostics for first 3 failures
126
+ if len(validation_diagnostics) < 3:
127
+ diag = self._diagnose_detection_schema(d, idx)
128
+ validation_diagnostics.append(diag)
129
+
130
+ if invalid_detections:
131
+ # Create detailed error message
132
+ error_msg_parts = [
133
+ f"NMS Schema Validation Failed:",
134
+ f" - Total detections: {len(detections)}",
135
+ f" - Invalid detections: {len(invalid_detections)}",
136
+ f" - Validation rate: {100 * (1 - len(invalid_detections)/len(detections)):.1f}%",
137
+ f"",
138
+ f"Detailed diagnostics for first {len(validation_diagnostics)} failures:"
139
+ ]
140
+
141
+ for diag in validation_diagnostics:
142
+ error_msg_parts.append(f"\n{diag}")
143
+
144
+ error_msg = "\n".join(error_msg_parts)
145
+
146
+ # Log to console for production debugging
147
+ print(f"\n{'='*80}")
148
+ print(error_msg)
149
+ print(f"{'='*80}\n")
150
+
151
+ # Soft-fail: return original detections instead of crashing
152
+ print("WARNING: NMS bypassed due to schema validation failures. Returning original detections.")
153
+ return detections
154
+
155
+ # Filter by target categories if specified
156
+ if target_categories is not None:
157
+ detections = [d for d in detections if d.get('category') in target_categories]
158
+ if not detections:
159
+ return detections
160
+
161
+ # Apply NMS
162
+ if class_agnostic:
163
+ result = self._apply_nms_single_pass(detections)
164
+ else:
165
+ result = self._apply_nms_per_class(detections)
166
+
167
+ self._stats["total_output"] += len(result)
168
+ self._stats["total_suppressed"] += (len(detections) - len(result))
169
+
170
+ return result
171
+
172
+ def _apply_nms_single_pass(self, detections: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
173
+ """Apply NMS across all classes in a single pass."""
174
+ if self.use_vectorized:
175
+ self._stats["vectorized_calls"] += 1
176
+ return self._nms_vectorized(detections)
177
+ else:
178
+ self._stats["iterative_calls"] += 1
179
+ return self._nms_iterative(detections)
180
+
181
+ def _apply_nms_per_class(self, detections: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
182
+ """Apply NMS separately for each class."""
183
+ # Group by category
184
+ category_groups = {}
185
+ for det in detections:
186
+ cat = det.get('category')
187
+ if cat not in category_groups:
188
+ category_groups[cat] = []
189
+ category_groups[cat].append(det)
190
+
191
+ # Apply NMS per category
192
+ result = []
193
+ for cat_dets in category_groups.values():
194
+ if self.use_vectorized:
195
+ result.extend(self._nms_vectorized(cat_dets))
196
+ else:
197
+ result.extend(self._nms_iterative(cat_dets))
198
+
199
+ if self.use_vectorized:
200
+ self._stats["vectorized_calls"] += 1
201
+ else:
202
+ self._stats["iterative_calls"] += 1
203
+
204
+ return result
205
+
206
+ def _nms_vectorized(self, detections: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
207
+ """
208
+ Vectorized NMS using torchvision.ops.nms.
209
+
210
+ This matches YOLO's built-in agnostic NMS exactly.
211
+ """
212
+ if not detections:
213
+ return []
214
+
215
+ # Filter invalid boxes
216
+ valid_dets = [d for d in detections if self._validate_box(self._get_bbox(d))]
217
+
218
+ if not valid_dets:
219
+ return []
220
+
221
+ try:
222
+ # Convert to tensors - handle both bbox formats
223
+ boxes_list = []
224
+ for d in valid_dets:
225
+ bbox = self._get_bbox(d)
226
+ if 'x1' in bbox:
227
+ boxes_list.append([
228
+ float(bbox["x1"]), float(bbox["y1"]),
229
+ float(bbox["x2"]), float(bbox["y2"])
230
+ ])
231
+ elif 'xmin' in bbox:
232
+ boxes_list.append([
233
+ float(bbox["xmin"]), float(bbox["ymin"]),
234
+ float(bbox["xmax"]), float(bbox["ymax"])
235
+ ])
236
+
237
+ if not boxes_list:
238
+ return []
239
+
240
+ boxes = torch.tensor(boxes_list, dtype=torch.float32)
241
+ scores = torch.tensor([float(d["confidence"]) for d in valid_dets], dtype=torch.float32)
242
+
243
+ # Apply torchvision NMS
244
+ keep_indices = torchvision_nms(boxes, scores, self.iou_threshold)
245
+ keep_indices = keep_indices.cpu().numpy()
246
+
247
+ return [valid_dets[i] for i in keep_indices]
248
+
249
+ except Exception as e:
250
+ # Fallback to iterative on error
251
+ print(f"Vectorized NMS failed: {e}. Falling back to iterative NMS.")
252
+ return self._nms_iterative(valid_dets)
253
+
254
+ def _nms_iterative(self, detections: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
255
+ """
256
+ Iterative NMS implementation with YOLO-matching enhancements.
257
+
258
+ This provides equivalent results to vectorized NMS without PyTorch.
259
+ """
260
+ if not detections:
261
+ return []
262
+
263
+ # Filter invalid boxes
264
+ valid_dets = [d for d in detections if self._validate_box(self._get_bbox(d))]
265
+
266
+ if not valid_dets:
267
+ return []
268
+
269
+ # Sort by confidence with area tie-breaking
270
+ def sort_key(d):
271
+ bbox = self._get_bbox(d)
272
+ # Handle both bbox formats
273
+ if 'x1' in bbox:
274
+ area = abs(bbox['x2'] - bbox['x1']) * abs(bbox['y2'] - bbox['y1'])
275
+ elif 'xmin' in bbox:
276
+ area = abs(bbox['xmax'] - bbox['xmin']) * abs(bbox['ymax'] - bbox['ymin'])
277
+ else:
278
+ area = 0
279
+ return (d["confidence"], area)
280
+
281
+ sorted_dets = sorted(valid_dets, key=sort_key, reverse=True)
282
+
283
+ # Apply NMS
284
+ keep = []
285
+ suppressed_indices = set()
286
+
287
+ for i, det in enumerate(sorted_dets):
288
+ if i in suppressed_indices:
289
+ continue
290
+
291
+ keep.append(det)
292
+ best_bbox = self._get_bbox(det)
293
+
294
+ # Suppress overlapping boxes
295
+ for j in range(i + 1, len(sorted_dets)):
296
+ if j in suppressed_indices:
297
+ continue
298
+
299
+ other_bbox = self._get_bbox(sorted_dets[j])
300
+ iou = self._compute_iou(best_bbox, other_bbox)
301
+
302
+ # Use >= for consistency with torchvision
303
+ if iou >= self.iou_threshold:
304
+ suppressed_indices.add(j)
305
+
306
+ return keep
307
+
308
+ def _get_bbox(self, detection: Dict[str, Any]) -> Dict:
309
+ """Extract bounding_box from detection, handling both field names."""
310
+ return detection.get('bounding_box', detection.get('bbox', {}))
311
+
312
+ def _compute_iou(self, bbox1: Dict, bbox2: Dict) -> float:
313
+ """
314
+ Compute IoU with numerical stability.
315
+ Accepts both {x1, y1, x2, y2} and {xmin, ymin, xmax, ymax} formats.
316
+
317
+ Args:
318
+ bbox1: First box dict
319
+ bbox2: Second box dict
320
+
321
+ Returns:
322
+ IoU value in [0, 1]
323
+ """
324
+ try:
325
+ # Extract coordinates - handle both formats
326
+ if 'x1' in bbox1:
327
+ x1_1 = float(bbox1['x1'])
328
+ y1_1 = float(bbox1['y1'])
329
+ x2_1 = float(bbox1['x2'])
330
+ y2_1 = float(bbox1['y2'])
331
+ elif 'xmin' in bbox1:
332
+ x1_1 = float(bbox1['xmin'])
333
+ y1_1 = float(bbox1['ymin'])
334
+ x2_1 = float(bbox1['xmax'])
335
+ y2_1 = float(bbox1['ymax'])
336
+ else:
337
+ return 0.0
338
+
339
+ if 'x1' in bbox2:
340
+ x1_2 = float(bbox2['x1'])
341
+ y1_2 = float(bbox2['y1'])
342
+ x2_2 = float(bbox2['x2'])
343
+ y2_2 = float(bbox2['y2'])
344
+ elif 'xmin' in bbox2:
345
+ x1_2 = float(bbox2['xmin'])
346
+ y1_2 = float(bbox2['ymin'])
347
+ x2_2 = float(bbox2['xmax'])
348
+ y2_2 = float(bbox2['ymax'])
349
+ else:
350
+ return 0.0
351
+
352
+ # Ensure coordinates are in correct order
353
+ x1_1, x2_1 = min(x1_1, x2_1), max(x1_1, x2_1)
354
+ y1_1, y2_1 = min(y1_1, y2_1), max(y1_1, y2_1)
355
+ x1_2, x2_2 = min(x1_2, x2_2), max(x1_2, x2_2)
356
+ y1_2, y2_2 = min(y1_2, y2_2), max(y1_2, y2_2)
357
+
358
+ # Compute intersection
359
+ inter_x1 = max(x1_1, x1_2)
360
+ inter_y1 = max(y1_1, y1_2)
361
+ inter_x2 = min(x2_1, x2_2)
362
+ inter_y2 = min(y2_1, y2_2)
363
+
364
+ inter_w = max(0.0, inter_x2 - inter_x1)
365
+ inter_h = max(0.0, inter_y2 - inter_y1)
366
+
367
+ if inter_w == 0.0 or inter_h == 0.0:
368
+ return 0.0
369
+
370
+ inter_area = inter_w * inter_h
371
+
372
+ # Compute box areas
373
+ area1 = (x2_1 - x1_1) * (y2_1 - y1_1)
374
+ area2 = (x2_2 - x1_2) * (y2_2 - y1_2)
375
+
376
+ # Add epsilon for numerical stability (matches torchvision)
377
+ union_area = area1 + area2 - inter_area + self.eps
378
+
379
+ # Compute IoU with safeguards
380
+ iou = inter_area / union_area
381
+
382
+ # Clamp to valid range
383
+ return max(0.0, min(1.0, iou))
384
+
385
+ except Exception:
386
+ return 0.0
387
+
388
+ def _validate_box(self, bbox: Dict, max_wh: float = 1e4) -> bool:
389
+ """
390
+ Validate box dimensions with robust type handling.
391
+ Accepts both {x1, y1, x2, y2} and {xmin, ymin, xmax, ymax} formats.
392
+
393
+ Args:
394
+ bbox: Box dict with coordinates
395
+ max_wh: Maximum box width/height
396
+
397
+ Returns:
398
+ True if box is valid
399
+ """
400
+ try:
401
+ # Extract coordinates - handle both formats
402
+ if 'x1' in bbox:
403
+ x1, y1, x2, y2 = map(float, [bbox['x1'], bbox['y1'], bbox['x2'], bbox['y2']])
404
+ elif 'xmin' in bbox:
405
+ x1, y1, x2, y2 = map(float, [bbox['xmin'], bbox['ymin'], bbox['xmax'], bbox['ymax']])
406
+ else:
407
+ return False
408
+
409
+ # Calculate width and height
410
+ w = abs(x2 - x1)
411
+ h = abs(y2 - y1)
412
+
413
+ # Check minimum size
414
+ if w < self.min_box_size or h < self.min_box_size:
415
+ return False
416
+
417
+ # Check maximum size
418
+ if w > max_wh or h > max_wh:
419
+ return False
420
+
421
+ # Check for NaN or Inf
422
+ for v in [x1, y1, x2, y2]:
423
+ try:
424
+ import numpy as np
425
+ if np.isnan(v) or np.isinf(v):
426
+ return False
427
+ except ImportError:
428
+ import math
429
+ if math.isnan(v) or math.isinf(v):
430
+ return False
431
+
432
+ return True
433
+
434
+ except Exception:
435
+ return False
436
+
437
+ def _validate_detection_schema(self, detection: Dict[str, Any]) -> bool:
438
+ """
439
+ Validate detection has required fields for NMS with robust type checking.
440
+
441
+ This method is designed to be production-safe and handle various input formats
442
+ including YOLO outputs with numpy types, string categories, and varying confidence scales.
443
+
444
+ Args:
445
+ detection: Single detection dict
446
+
447
+ Returns:
448
+ True if schema is valid, False otherwise (never raises exceptions)
449
+ """
450
+ try:
451
+ # Basic type check
452
+ if not isinstance(detection, dict):
453
+ return False
454
+
455
+ # ============================================
456
+ # CATEGORY VALIDATION
457
+ # ============================================
458
+ if 'category' not in detection:
459
+ return False
460
+
461
+ category = detection['category']
462
+
463
+ # Accept: str, int, numpy.integer, numpy.str_
464
+ category_valid = isinstance(category, (str, int))
465
+
466
+ if not category_valid:
467
+ try:
468
+ import numpy as np
469
+ category_valid = isinstance(category, (np.integer, np.str_))
470
+ except ImportError:
471
+ pass
472
+
473
+ if not category_valid:
474
+ return False
475
+
476
+ # ============================================
477
+ # CONFIDENCE VALIDATION
478
+ # ============================================
479
+ if 'confidence' not in detection:
480
+ return False
481
+
482
+ conf = detection['confidence']
483
+
484
+ # Accept: int, float, numpy.integer, numpy.floating
485
+ conf_valid = isinstance(conf, (int, float))
486
+
487
+ if not conf_valid:
488
+ try:
489
+ import numpy as np
490
+ conf_valid = isinstance(conf, (np.integer, np.floating))
491
+ except ImportError:
492
+ pass
493
+
494
+ if not conf_valid:
495
+ return False
496
+
497
+ # Try to convert to float and validate range
498
+ try:
499
+ conf_val = float(conf)
500
+ # Allow wide range to handle different YOLO output formats
501
+ if conf_val < 0 or conf_val > 1000:
502
+ return False
503
+ except (ValueError, TypeError, OverflowError):
504
+ return False
505
+
506
+ # ============================================
507
+ # BOUNDING BOX VALIDATION
508
+ # ============================================
509
+ # Accept both 'bounding_box' and 'bbox' field names
510
+ bbox = detection.get('bounding_box', detection.get('bbox'))
511
+
512
+ if bbox is None or not isinstance(bbox, dict):
513
+ return False
514
+
515
+ # Accept both x1/y1/x2/y2 and xmin/ymin/xmax/ymax formats
516
+ required_keys_v1 = {'x1', 'y1', 'x2', 'y2'}
517
+ required_keys_v2 = {'xmin', 'ymin', 'xmax', 'ymax'}
518
+
519
+ has_v1 = required_keys_v1.issubset(bbox.keys())
520
+ has_v2 = required_keys_v2.issubset(bbox.keys())
521
+
522
+ if not (has_v1 or has_v2):
523
+ return False
524
+
525
+ # Determine which keys to validate
526
+ coord_keys = ['x1', 'y1', 'x2', 'y2'] if has_v1 else ['xmin', 'ymin', 'xmax', 'ymax']
527
+
528
+ # Validate each coordinate
529
+ for key in coord_keys:
530
+ val = bbox[key]
531
+
532
+ # Check if numeric (handle numpy types)
533
+ is_numeric = isinstance(val, (int, float))
534
+
535
+ if not is_numeric:
536
+ try:
537
+ import numpy as np
538
+ is_numeric = isinstance(val, (np.integer, np.floating))
539
+ except ImportError:
540
+ pass
541
+
542
+ if not is_numeric:
543
+ return False
544
+
545
+ # Try converting to float
546
+ try:
547
+ float_val = float(val)
548
+ # Check for reasonable coordinate range
549
+ if not (-1e10 < float_val < 1e10):
550
+ return False
551
+ except (ValueError, TypeError, OverflowError):
552
+ return False
553
+
554
+ return True
555
+
556
+ except Exception as e:
557
+ print(f"Unexpected error in schema validation: {e}", file=sys.stderr)
558
+ return False
559
+
560
+ def _diagnose_detection_schema(self, detection: Dict[str, Any], idx: int) -> str:
561
+ """
562
+ Diagnose why a detection failed schema validation.
563
+
564
+ Args:
565
+ detection: Detection dict that failed validation
566
+ idx: Index of detection in list
567
+
568
+ Returns:
569
+ Detailed diagnostic string
570
+ """
571
+ diagnostics = [f"Detection #{idx} Failed Validation:"]
572
+
573
+ # Check if dict
574
+ if not isinstance(detection, dict):
575
+ diagnostics.append(f" Not a dict (type: {type(detection)})")
576
+ return "\n".join(diagnostics)
577
+
578
+ diagnostics.append(f" Keys present: {list(detection.keys())}")
579
+
580
+ # Check category
581
+ if 'category' not in detection:
582
+ diagnostics.append(f" Missing 'category' key")
583
+ else:
584
+ category = detection['category']
585
+ cat_type = type(category).__name__
586
+ diagnostics.append(f" Category: '{category}' (type: {cat_type})")
587
+
588
+ is_valid_type = isinstance(category, (str, int))
589
+ if not is_valid_type:
590
+ try:
591
+ import numpy as np
592
+ is_valid_type = isinstance(category, (np.integer, np.str_))
593
+ if is_valid_type:
594
+ diagnostics.append(f" Valid (numpy type)")
595
+ else:
596
+ diagnostics.append(f" Invalid type: {cat_type}")
597
+ except ImportError:
598
+ diagnostics.append(f" Invalid type: {cat_type} (numpy not available)")
599
+ else:
600
+ diagnostics.append(f" Valid type")
601
+
602
+ # Check confidence
603
+ if 'confidence' not in detection:
604
+ diagnostics.append(f" Missing 'confidence' key")
605
+ else:
606
+ conf = detection['confidence']
607
+ conf_type = type(conf).__name__
608
+ diagnostics.append(f" Confidence: {conf} (type: {conf_type})")
609
+
610
+ is_numeric = isinstance(conf, (int, float))
611
+ if not is_numeric:
612
+ try:
613
+ import numpy as np
614
+ is_numeric = isinstance(conf, (np.integer, np.floating))
615
+ if is_numeric:
616
+ diagnostics.append(f" Valid (numpy type)")
617
+ else:
618
+ diagnostics.append(f" Not numeric: {conf_type}")
619
+ except ImportError:
620
+ diagnostics.append(f" Not numeric: {conf_type} (numpy not available)")
621
+ else:
622
+ diagnostics.append(f" Valid type")
623
+
624
+ # Check range
625
+ try:
626
+ conf_val = float(conf)
627
+ if conf_val < 0 or conf_val > 1000:
628
+ diagnostics.append(f" Out of range: {conf_val}")
629
+ else:
630
+ diagnostics.append(f" Valid range")
631
+ except:
632
+ diagnostics.append(f" Cannot convert to float")
633
+
634
+ # Check bounding_box (accept both field names)
635
+ bbox = detection.get('bounding_box', detection.get('bbox'))
636
+
637
+ if bbox is None:
638
+ diagnostics.append(f" Missing both 'bounding_box' and 'bbox' keys")
639
+ elif not isinstance(bbox, dict):
640
+ diagnostics.append(f" bounding_box is not dict (type: {type(bbox).__name__})")
641
+ else:
642
+ diagnostics.append(f" BBox keys: {list(bbox.keys())}")
643
+
644
+ required_v1 = {'x1', 'y1', 'x2', 'y2'}
645
+ required_v2 = {'xmin', 'ymin', 'xmax', 'ymax'}
646
+
647
+ has_v1 = required_v1.issubset(bbox.keys())
648
+ has_v2 = required_v2.issubset(bbox.keys())
649
+
650
+ if has_v1:
651
+ diagnostics.append(f" Has x1/y1/x2/y2 format")
652
+ coord_keys = ['x1', 'y1', 'x2', 'y2']
653
+ elif has_v2:
654
+ diagnostics.append(f" Has xmin/ymin/xmax/ymax format")
655
+ coord_keys = ['xmin', 'ymin', 'xmax', 'ymax']
656
+ else:
657
+ missing_v1 = required_v1 - set(bbox.keys())
658
+ missing_v2 = required_v2 - set(bbox.keys())
659
+ diagnostics.append(f" Missing x1/y1 format keys: {missing_v1}")
660
+ diagnostics.append(f" Missing xmin/ymin format keys: {missing_v2}")
661
+ coord_keys = []
662
+
663
+ # Check coordinate types (only if we have valid format)
664
+ if coord_keys:
665
+ for key in coord_keys:
666
+ val = bbox[key]
667
+ val_type = type(val).__name__
668
+
669
+ is_numeric = isinstance(val, (int, float))
670
+ if not is_numeric:
671
+ try:
672
+ import numpy as np
673
+ is_numeric = isinstance(val, (np.integer, np.floating))
674
+ status = " numpy" if is_numeric else " "
675
+ except:
676
+ status = " "
677
+ else:
678
+ status = " "
679
+
680
+ diagnostics.append(f" {key}: {val} (type: {val_type}) [{status}]")
681
+
682
+ return "\n".join(diagnostics)
683
+
684
+ def get_stats(self) -> Dict[str, Any]:
685
+ """
686
+ Get NMS usage statistics.
687
+
688
+ Returns:
689
+ Dictionary with statistics:
690
+ - total_calls: Number of times apply() was called
691
+ - vectorized_calls: Number of vectorized NMS calls
692
+ - iterative_calls: Number of iterative NMS calls
693
+ - total_input: Total input detections
694
+ - total_output: Total output detections
695
+ - total_suppressed: Total suppressed detections
696
+ - suppression_rate: Percentage of detections suppressed
697
+ """
698
+ stats = self._stats.copy()
699
+ if stats["total_input"] > 0:
700
+ stats["suppression_rate"] = 100 * stats["total_suppressed"] / stats["total_input"]
701
+ else:
702
+ stats["suppression_rate"] = 0.0
703
+ return stats
704
+
705
+ def reset_stats(self):
706
+ """Reset usage statistics."""
707
+ self._stats = {
708
+ "total_calls": 0,
709
+ "vectorized_calls": 0,
710
+ "iterative_calls": 0,
711
+ "total_input": 0,
712
+ "total_output": 0,
713
+ "total_suppressed": 0
714
+ }
715
+
716
+ @staticmethod
717
+ def is_vectorized_available() -> bool:
718
+ """Check if vectorized implementation is available."""
719
+ return TORCHVISION_AVAILABLE
720
+
721
+
722
+ # Convenience function for quick usage
723
+ def apply_nms(
724
+ detections: List[Dict[str, Any]],
725
+ iou_threshold: float = 0.45,
726
+ class_agnostic: bool = True,
727
+ min_box_size: float = 2.0,
728
+ use_vectorized: bool = True
729
+ ) -> List[Dict[str, Any]]:
730
+ """
731
+ Convenience function for one-time NMS application.
732
+
733
+ Args:
734
+ detections: List of detection dicts
735
+ iou_threshold: IoU threshold for suppression
736
+ class_agnostic: If True, suppress across all classes
737
+ min_box_size: Minimum box dimension in pixels
738
+ use_vectorized: Use PyTorch implementation if available
739
+
740
+ Returns:
741
+ Filtered list of detections
742
+
743
+ Example:
744
+ >>> detections = [
745
+ ... {"category": "car", "confidence": 0.9,
746
+ ... "bounding_box": {"x1": 100, "y1": 100, "x2": 200, "y2": 200}},
747
+ ... {"category": "car", "confidence": 0.85,
748
+ ... "bounding_box": {"x1": 105, "y1": 105, "x2": 205, "y2": 205}}
749
+ ... ]
750
+ >>> filtered = apply_nms(detections, iou_threshold=0.5, class_agnostic=True)
751
+ >>> len(filtered)
752
+ 1
753
+ """
754
+ nms = AgnosticNMS(
755
+ iou_threshold=iou_threshold,
756
+ min_box_size=min_box_size,
757
+ use_vectorized=use_vectorized
758
+ )
759
+ return nms.apply(detections, class_agnostic=class_agnostic)