matrice 1.0.99142__py3-none-any.whl → 1.0.99144__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice/deploy/utils/post_processing/usecases/road_lane_detection.py +93 -65
- {matrice-1.0.99142.dist-info → matrice-1.0.99144.dist-info}/METADATA +1 -1
- {matrice-1.0.99142.dist-info → matrice-1.0.99144.dist-info}/RECORD +6 -6
- {matrice-1.0.99142.dist-info → matrice-1.0.99144.dist-info}/WHEEL +0 -0
- {matrice-1.0.99142.dist-info → matrice-1.0.99144.dist-info}/licenses/LICENSE.txt +0 -0
- {matrice-1.0.99142.dist-info → matrice-1.0.99144.dist-info}/top_level.txt +0 -0
@@ -1,10 +1,3 @@
|
|
1
|
-
"""
|
2
|
-
Road Lane Detection Use Case for Post-Processing
|
3
|
-
|
4
|
-
This module provides road lane detection functionality with lane type classification,
|
5
|
-
zone analysis, and alert generation.
|
6
|
-
"""
|
7
|
-
|
8
1
|
from typing import Any, Dict, List, Optional
|
9
2
|
from dataclasses import asdict, dataclass, field
|
10
3
|
import time
|
@@ -25,9 +18,10 @@ from ..utils import (
|
|
25
18
|
)
|
26
19
|
from ..core.config import BaseConfig, AlertConfig, ZoneConfig
|
27
20
|
|
21
|
+
|
28
22
|
@dataclass
|
29
23
|
class LaneDetectionConfig(BaseConfig):
|
30
|
-
"""Configuration for
|
24
|
+
"""Configuration for lane detection use case in road monitoring."""
|
31
25
|
enable_smoothing: bool = True
|
32
26
|
smoothing_algorithm: str = "observability"
|
33
27
|
smoothing_window_size: int = 20
|
@@ -52,6 +46,7 @@ class LaneDetectionConfig(BaseConfig):
|
|
52
46
|
}
|
53
47
|
)
|
54
48
|
|
49
|
+
|
55
50
|
class LaneDetectionUseCase(BaseProcessor):
|
56
51
|
CATEGORY_DISPLAY = {
|
57
52
|
"Divider-Line": "Divider Line",
|
@@ -97,7 +92,7 @@ class LaneDetectionUseCase(BaseProcessor):
|
|
97
92
|
self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
|
98
93
|
else:
|
99
94
|
processed_data = data
|
100
|
-
self.logger.debug("
|
95
|
+
self.logger.debug("Did not apply confidence filtering since no threshold provided")
|
101
96
|
|
102
97
|
if config.index_to_category:
|
103
98
|
processed_data = apply_category_mapping(processed_data, config.index_to_category)
|
@@ -256,7 +251,6 @@ class LaneDetectionUseCase(BaseProcessor):
|
|
256
251
|
level = "medium"
|
257
252
|
self._ascending_alert_list.append(1)
|
258
253
|
else:
|
259
|
-
level = "low"
|
260
254
|
self._ascending_alert_list.append(0)
|
261
255
|
else:
|
262
256
|
if total_detections > 30:
|
@@ -276,7 +270,7 @@ class LaneDetectionUseCase(BaseProcessor):
|
|
276
270
|
intensity = min(10.0, total_detections / 3.0)
|
277
271
|
self._ascending_alert_list.append(0)
|
278
272
|
|
279
|
-
human_text_lines = [f"
|
273
|
+
human_text_lines = [f"INCIDENTS DETECTED @ {current_timestamp}:"]
|
280
274
|
human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE, level)}")
|
281
275
|
human_text = "\n".join(human_text_lines)
|
282
276
|
|
@@ -307,11 +301,11 @@ class LaneDetectionUseCase(BaseProcessor):
|
|
307
301
|
else:
|
308
302
|
self._ascending_alert_list.append(0)
|
309
303
|
incidents.append({})
|
310
|
-
|
311
304
|
return incidents
|
312
305
|
|
313
306
|
def _generate_tracking_stats(self, counting_summary: Dict, alerts: List, config: LaneDetectionConfig,
|
314
307
|
frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
|
308
|
+
"""Generate structured tracking stats matching expected format."""
|
315
309
|
camera_info = self.get_camera_info_from_stream(stream_info)
|
316
310
|
tracking_stats = []
|
317
311
|
total_detections = counting_summary.get("total_count", 0)
|
@@ -322,8 +316,13 @@ class LaneDetectionUseCase(BaseProcessor):
|
|
322
316
|
high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
|
323
317
|
high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
|
324
318
|
|
325
|
-
|
326
|
-
|
319
|
+
# Build total_counts and current_counts arrays
|
320
|
+
total_counts = [{"category": cat, "count": count} for cat, count in total_counts_dict.items()]
|
321
|
+
current_counts = [{"category": cat, "count": count} for cat, count in per_category_count.items()]
|
322
|
+
|
323
|
+
# Log counts for debugging
|
324
|
+
self.logger.debug(f"Total counts: {total_counts}")
|
325
|
+
self.logger.debug(f"Current counts: {current_counts}")
|
327
326
|
|
328
327
|
detections = []
|
329
328
|
for detection in counting_summary.get("detections", []):
|
@@ -350,7 +349,7 @@ class LaneDetectionUseCase(BaseProcessor):
|
|
350
349
|
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
351
350
|
"ascending": True,
|
352
351
|
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
|
353
|
-
|
352
|
+
getattr(config.alert_config, 'alert_value', ['JSON']))}
|
354
353
|
})
|
355
354
|
|
356
355
|
human_text_lines = [f"Tracking Statistics:"]
|
@@ -359,9 +358,12 @@ class LaneDetectionUseCase(BaseProcessor):
|
|
359
358
|
human_text_lines.append(f"\t{cat}: {count}")
|
360
359
|
human_text_lines.append(f"TOTAL SINCE {start_timestamp}")
|
361
360
|
for cat, count in total_counts_dict.items():
|
362
|
-
|
363
|
-
|
364
|
-
|
361
|
+
human_text_lines.append(f"\t{cat}: {count}")
|
362
|
+
if alerts:
|
363
|
+
for alert in alerts:
|
364
|
+
human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
|
365
|
+
else:
|
366
|
+
human_text_lines.append("Alerts: None")
|
365
367
|
human_text = "\n".join(human_text_lines)
|
366
368
|
|
367
369
|
reset_settings = [{"interval_type": "daily", "reset_time": {"value": 9, "time_unit": "hour"}}]
|
@@ -389,18 +391,22 @@ class LaneDetectionUseCase(BaseProcessor):
|
|
389
391
|
lines = {}
|
390
392
|
lines["Application Name"] = self.CASE_TYPE
|
391
393
|
lines["Application Version"] = self.CASE_VERSION
|
392
|
-
if incidents:
|
394
|
+
if len(incidents) > 0:
|
393
395
|
lines["Incidents:"] = f"\n\t{incidents[0].get('human_text', 'No incidents detected')}\n"
|
394
|
-
if tracking_stats:
|
396
|
+
if len(tracking_stats) > 0:
|
395
397
|
lines["Tracking Statistics:"] = f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}\n"
|
396
|
-
if business_analytics:
|
398
|
+
if len(business_analytics) > 0:
|
397
399
|
lines["Business Analytics:"] = f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}\n"
|
398
|
-
if
|
400
|
+
if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
|
399
401
|
lines["Summary"] = "No Summary Data"
|
400
402
|
return [lines]
|
401
403
|
|
402
404
|
def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
|
403
|
-
frame_track_ids =
|
405
|
+
frame_track_ids = set()
|
406
|
+
for det in detections:
|
407
|
+
tid = det.get('track_id')
|
408
|
+
if tid is not None:
|
409
|
+
frame_track_ids.add(tid)
|
404
410
|
total_track_ids = set()
|
405
411
|
for s in getattr(self, '_per_category_total_track_ids', {}).values():
|
406
412
|
total_track_ids.update(s)
|
@@ -444,68 +450,90 @@ class LaneDetectionUseCase(BaseProcessor):
|
|
444
450
|
|
445
451
|
def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str] = None) -> str:
|
446
452
|
if not stream_info:
|
447
|
-
return "00:00:00.00"
|
453
|
+
return "00:00:00.00"
|
448
454
|
if precision:
|
449
455
|
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
450
|
-
|
456
|
+
if frame_id:
|
457
|
+
start_time = int(frame_id) / stream_info.get("input_settings", {}).get("original_fps", 30)
|
458
|
+
else:
|
459
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30) / stream_info.get("input_settings", {}).get("original_fps", 30)
|
451
460
|
return self._format_timestamp_for_video(start_time)
|
452
|
-
|
461
|
+
else:
|
462
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
453
463
|
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
454
|
-
|
464
|
+
if frame_id:
|
465
|
+
start_time = int(frame_id) / stream_info.get("input_settings", {}).get("original_fps", 30)
|
466
|
+
else:
|
467
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30) / stream_info.get("input_settings", {}).get("original_fps", 30)
|
455
468
|
return self._format_timestamp_for_video(start_time)
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
|
469
|
+
else:
|
470
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
471
|
+
if stream_time_str:
|
472
|
+
try:
|
473
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
474
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
475
|
+
timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
|
476
|
+
return self._format_timestamp_for_stream(timestamp)
|
477
|
+
except:
|
478
|
+
return self._format_timestamp_for_stream(time.time())
|
479
|
+
else:
|
464
480
|
return self._format_timestamp_for_stream(time.time())
|
465
|
-
return self._format_timestamp_for_stream(time.time())
|
466
481
|
|
467
482
|
def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
|
468
483
|
if not stream_info:
|
469
|
-
return "00:00:00"
|
484
|
+
return "00:00:00"
|
470
485
|
if precision:
|
471
486
|
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
472
|
-
return "00:00:00
|
473
|
-
|
487
|
+
return "00:00:00"
|
488
|
+
else:
|
489
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
474
490
|
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
475
491
|
return "00:00:00"
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
|
481
|
-
|
482
|
-
|
483
|
-
|
492
|
+
else:
|
493
|
+
if self._tracking_start_time is None:
|
494
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
495
|
+
if stream_time_str:
|
496
|
+
try:
|
497
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
498
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
499
|
+
self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
|
500
|
+
except:
|
501
|
+
self._tracking_start_time = time.time()
|
502
|
+
else:
|
484
503
|
self._tracking_start_time = time.time()
|
485
|
-
|
486
|
-
|
487
|
-
|
488
|
-
dt = dt.replace(minute=0, second=0, microsecond=0)
|
489
|
-
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
504
|
+
dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
|
505
|
+
dt = dt.replace(minute=0, second=0, microsecond=0)
|
506
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
490
507
|
|
491
508
|
def _count_categories(self, detections: list, config: LaneDetectionConfig) -> dict:
|
492
|
-
|
509
|
+
"""
|
510
|
+
Count the number of detections per category and return a summary dict.
|
511
|
+
"""
|
512
|
+
counts = {cat: 0 for cat in self.target_categories} # Initialize with all target categories
|
513
|
+
valid_detections = []
|
514
|
+
|
493
515
|
for det in detections:
|
494
516
|
cat = det.get('category', 'unknown')
|
495
|
-
|
517
|
+
# Normalize category to match target_categories
|
518
|
+
normalized_cat = cat.replace('-', ' ').title().replace(' ', '-') # e.g., "solid-line" -> "Solid-Line"
|
519
|
+
if normalized_cat not in self.target_categories:
|
520
|
+
self.logger.debug(f"Skipping detection with category {normalized_cat}, not in target categories")
|
521
|
+
continue
|
522
|
+
counts[normalized_cat] += 1
|
523
|
+
det['category'] = normalized_cat # Update detection with normalized category
|
524
|
+
valid_detections.append({
|
525
|
+
"bounding_box": det.get("bounding_box"),
|
526
|
+
"category": normalized_cat,
|
527
|
+
"confidence": det.get("confidence"),
|
528
|
+
"track_id": det.get("track_id"),
|
529
|
+
"frame_id": det.get("frame_id")
|
530
|
+
})
|
531
|
+
self.logger.debug(f"Counted detection for category {normalized_cat}, confidence {det.get('confidence')}")
|
532
|
+
|
496
533
|
return {
|
497
534
|
"total_count": sum(counts.values()),
|
498
535
|
"per_category_count": counts,
|
499
|
-
"detections":
|
500
|
-
{
|
501
|
-
"bounding_box": det.get("bounding_box"),
|
502
|
-
"category": det.get("category"),
|
503
|
-
"confidence": det.get("confidence"),
|
504
|
-
"track_id": det.get("track_id"),
|
505
|
-
"frame_id": det.get("frame_id")
|
506
|
-
}
|
507
|
-
for det in detections
|
508
|
-
]
|
536
|
+
"detections": valid_detections
|
509
537
|
}
|
510
538
|
|
511
539
|
def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
|
@@ -582,7 +610,7 @@ class LaneDetectionUseCase(BaseProcessor):
|
|
582
610
|
self._canonical_tracks[canonical_id] = {
|
583
611
|
"last_bbox": bbox,
|
584
612
|
"last_update": now,
|
585
|
-
"raw_ids": {raw_id}
|
613
|
+
"raw_ids": {raw_id},
|
586
614
|
}
|
587
615
|
return canonical_id
|
588
616
|
|
@@ -191,7 +191,7 @@ matrice/deploy/utils/post_processing/usecases/plaque_segmentation_img.py,sha256=
|
|
191
191
|
matrice/deploy/utils/post_processing/usecases/pothole_segmentation.py,sha256=6Mv8SoEE5CGItY7S0g-SY5Lb3DV-WWVMlpEp04a86a8,43197
|
192
192
|
matrice/deploy/utils/post_processing/usecases/ppe_compliance.py,sha256=G9P9j9E9nfNJInHJxmK1Lb4daFBlG5hq0aqotTLvFFE,30146
|
193
193
|
matrice/deploy/utils/post_processing/usecases/price_tag_detection.py,sha256=Sn_Dvwf5f_dcfaiPIl-pqckgP8z96CeNIJ4hfeab3FM,39880
|
194
|
-
matrice/deploy/utils/post_processing/usecases/road_lane_detection.py,sha256=
|
194
|
+
matrice/deploy/utils/post_processing/usecases/road_lane_detection.py,sha256=ljWoDeFJb0IJJPClYaZrNnBZd87JqKI64jE_I79GAYo,31146
|
195
195
|
matrice/deploy/utils/post_processing/usecases/shelf_inventory_detection.py,sha256=1juloltHnCj3U499Aps0ggE0nEI37x3iKe4DgfP4RCw,29140
|
196
196
|
matrice/deploy/utils/post_processing/usecases/shoplifting_detection.py,sha256=zqeV_ARV5gJqMY2sJGBjlU6UOb0SthGGbC8UNj_mycs,34701
|
197
197
|
matrice/deploy/utils/post_processing/usecases/shopping_cart_analysis.py,sha256=9Ej2xiZM7yq5sOBcSXIllou_z0rSZDJ_QHyYz6HxZSY,43957
|
@@ -225,8 +225,8 @@ matrice/deployment/camera_manager.py,sha256=ReBZqm1CNXRImKcbcZ4uWAT3TUWkof1D28oB
|
|
225
225
|
matrice/deployment/deployment.py,sha256=PLIUD-PxTaC2Zxb3Y12wUddsryV-OJetjCjLoSUh7S4,48103
|
226
226
|
matrice/deployment/inference_pipeline.py,sha256=bXLgd29ViA7o0c7YWLFJl1otBUQfTPb61jS6VawQB0Y,37918
|
227
227
|
matrice/deployment/streaming_gateway_manager.py,sha256=w5swGsuFVfZIdOm2ZuBHRHlRdYYJMLopLsf2gb91lQ8,20946
|
228
|
-
matrice-1.0.
|
229
|
-
matrice-1.0.
|
230
|
-
matrice-1.0.
|
231
|
-
matrice-1.0.
|
232
|
-
matrice-1.0.
|
228
|
+
matrice-1.0.99144.dist-info/licenses/LICENSE.txt,sha256=2bm9uFabQZ3Ykb_SaSU_uUbAj2-htc6WJQmS_65qD00,1073
|
229
|
+
matrice-1.0.99144.dist-info/METADATA,sha256=xF8gj0tJeJ-JY50BNsnZfd9LLxRif6dtncWMdvo1pE4,14624
|
230
|
+
matrice-1.0.99144.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
231
|
+
matrice-1.0.99144.dist-info/top_level.txt,sha256=P97js8ur6o5ClRqMH3Cjoab_NqbJ6sOQ3rJmVzKBvMc,8
|
232
|
+
matrice-1.0.99144.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|