matrice 1.0.99292__py3-none-any.whl → 1.0.99294__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice/deploy/client/streaming_gateway/streaming_gateway_utils.py +3 -3
- matrice/deploy/utils/post_processing/config.py +3 -0
- matrice/deploy/utils/post_processing/usecases/color_detection.py +610 -155
- matrice/deploy/utils/post_processing/usecases/fire_detection.py +2 -0
- matrice/deploy/utils/post_processing/usecases/proximity_detection.py +126 -5
- {matrice-1.0.99292.dist-info → matrice-1.0.99294.dist-info}/METADATA +1 -1
- {matrice-1.0.99292.dist-info → matrice-1.0.99294.dist-info}/RECORD +10 -10
- {matrice-1.0.99292.dist-info → matrice-1.0.99294.dist-info}/WHEEL +0 -0
- {matrice-1.0.99292.dist-info → matrice-1.0.99294.dist-info}/licenses/LICENSE.txt +0 -0
- {matrice-1.0.99292.dist-info → matrice-1.0.99294.dist-info}/top_level.txt +0 -0
@@ -339,9 +339,9 @@ class _RealTimeJsonEventPicker:
|
|
339
339
|
|
340
340
|
def send_api_call(self,json_data):
|
341
341
|
headers = {'Content-Type': 'application/json'}
|
342
|
-
API_URL = "https://monthly-genuine-troll.ngrok-free.app" #https://matricedemo.forumalertcloud.io/matriceapi/
|
343
|
-
API_USER = "admin" #"matrice"
|
344
|
-
API_PASS = "admin" #"hR9aN9mQ"
|
342
|
+
API_URL = "https://matricedemo.forumalertcloud.io/matriceapi/" #"https://monthly-genuine-troll.ngrok-free.app" #https://matricedemo.forumalertcloud.io/matriceapi/
|
343
|
+
API_USER = "matrice" #"admin" #"matrice"
|
344
|
+
API_PASS = "hR9aN9mQ" #"admin" #"hR9aN9mQ"
|
345
345
|
try:
|
346
346
|
response = requests.post(
|
347
347
|
API_URL,
|
@@ -51,6 +51,8 @@ APP_NAME_TO_USECASE = {
|
|
51
51
|
"human_activity_recognition": "human_activity_recognition",
|
52
52
|
"abandoned_object_detection" : "abandoned_object_detection",
|
53
53
|
"gas_leak_detection": "gas_leak_detection",
|
54
|
+
"color_detection": "color_detection",
|
55
|
+
|
54
56
|
}
|
55
57
|
|
56
58
|
APP_NAME_TO_CATEGORY = {
|
@@ -106,6 +108,7 @@ APP_NAME_TO_CATEGORY = {
|
|
106
108
|
"human_activity_recognition": "general",
|
107
109
|
"abandoned_object_detection" : "security",
|
108
110
|
"gas_leak_detection": "oil_gas",
|
111
|
+
"color_detection": "visual_appearance",
|
109
112
|
}
|
110
113
|
|
111
114
|
def get_usecase_from_app_name(app_name: str) -> str:
|
@@ -12,6 +12,7 @@ from datetime import datetime, timezone
|
|
12
12
|
import tempfile
|
13
13
|
import os
|
14
14
|
import cv2
|
15
|
+
import copy
|
15
16
|
import numpy as np
|
16
17
|
from collections import defaultdict
|
17
18
|
import time
|
@@ -77,12 +78,29 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
77
78
|
def __init__(self):
|
78
79
|
super().__init__("color_detection")
|
79
80
|
self.category = "visual_appearance"
|
81
|
+
|
82
|
+
self.target_categories = ["person", "people", "car", "cars", "truck", "trucks", "motorcycle", "motorcycles", "vehicle", "vehicles", "bus", "bicycle"]
|
83
|
+
|
84
|
+
self.CASE_TYPE: Optional[str] = 'color_detection'
|
85
|
+
self.CASE_VERSION: Optional[str] = '1.3'
|
86
|
+
|
80
87
|
self.tracker = None # AdvancedTracker instance
|
81
88
|
self.smoothing_tracker = None # BBoxSmoothingTracker instance
|
82
89
|
self._total_frame_counter = 0 # Total frames processed
|
83
90
|
self._global_frame_offset = 0 # Frame offset for new sessions
|
84
91
|
self._color_total_track_ids = {} # Cumulative track IDs per category and color
|
85
92
|
self._color_current_frame_track_ids = {} # Per-frame track IDs per category and color
|
93
|
+
|
94
|
+
self._tracking_start_time = None
|
95
|
+
|
96
|
+
self._track_aliases: Dict[Any, Any] = {}
|
97
|
+
self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
|
98
|
+
# Tunable parameters – adjust if necessary for specific scenarios
|
99
|
+
self._track_merge_iou_threshold: float = 0.05 # IoU ≥ 0.05 →
|
100
|
+
self._track_merge_time_window: float = 7.0 # seconds within which to merge
|
101
|
+
|
102
|
+
self._ascending_alert_list: List[int] = []
|
103
|
+
self.current_incident_end_timestamp: str = "N/A"
|
86
104
|
|
87
105
|
def reset_tracker(self) -> None:
|
88
106
|
"""Reset the advanced tracker instance."""
|
@@ -279,6 +297,7 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
279
297
|
color_processed_data = filter_by_categories(processed_data.copy(), config.target_categories)
|
280
298
|
self.logger.debug(f"Applied target category filtering for: {config.target_categories}")
|
281
299
|
|
300
|
+
raw_processed_data = [copy.deepcopy(det) for det in color_processed_data]
|
282
301
|
# Step 3: Apply bounding box smoothing if enabled
|
283
302
|
# if config.enable_smoothing:
|
284
303
|
# if self.smoothing_tracker is None:
|
@@ -313,6 +332,7 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
313
332
|
|
314
333
|
# Step 6: Update tracking state
|
315
334
|
self._update_color_tracking_state(color_processed_data)
|
335
|
+
color_processed_data = self._attach_masks_to_detections(color_processed_data, raw_processed_data)
|
316
336
|
self._total_frame_counter += 1
|
317
337
|
|
318
338
|
frame_number = None
|
@@ -333,12 +353,14 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
333
353
|
|
334
354
|
# Step 8: Calculate summaries
|
335
355
|
color_summary = self._calculate_color_summary(color_analysis, config)
|
356
|
+
|
336
357
|
general_summary = self._calculate_general_summary(processed_data, config)
|
337
358
|
color_summary['total_color_counts'] = self.get_total_color_counts()
|
338
|
-
|
359
|
+
print("-------------------COLOR_SUMMARY-------------------")
|
360
|
+
print(color_summary)
|
361
|
+
print("-------------------COLOR_SUMMARY-------------------")
|
339
362
|
# Step 9: Generate insights and alerts
|
340
|
-
insights = self._generate_insights(color_summary, config)
|
341
|
-
alerts = self._check_alerts(color_summary, config)
|
363
|
+
# insights = self._generate_insights(color_summary, config)
|
342
364
|
|
343
365
|
# Step 10: Calculate metrics
|
344
366
|
metrics = self._calculate_metrics(color_analysis, color_summary, config, context)
|
@@ -347,44 +369,52 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
347
369
|
predictions = self._extract_predictions(color_analysis, config)
|
348
370
|
|
349
371
|
# Step 12: Generate human-readable summary
|
350
|
-
|
372
|
+
|
351
373
|
|
352
374
|
# Step 13: Generate structured events and tracking stats
|
353
375
|
# frame_number = None # Extract from input_bytes or data if available
|
354
|
-
|
355
|
-
|
376
|
+
alerts = self._check_alerts(color_summary,frame_number, config)
|
377
|
+
print("-------------------ALERTS-------------------")
|
378
|
+
print(alerts)
|
379
|
+
print("-------------------ALERTS-------------------")
|
380
|
+
incidents_list = self._generate_incidents(color_summary, alerts, config, frame_number, stream_info)
|
381
|
+
print("-------------------INCIDENTS_LIST-------------------")
|
382
|
+
print(incidents_list)
|
383
|
+
print("-------------------INCIDENTS_LIST-------------------")
|
384
|
+
# events_list = self._generate_events(color_summary, alerts, config, frame_number)
|
385
|
+
tracking_stats_list = self._generate_tracking_stats(color_summary, alerts, config, frame_number,stream_info)
|
386
|
+
print("-------------------TRACKING_STATS_LIST-------------------")
|
387
|
+
print(tracking_stats_list)
|
388
|
+
print("-------------------TRACKING_STATS_LIST-------------------")
|
389
|
+
business_analytics_list = []
|
390
|
+
summary_list = self._generate_summary(color_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
|
391
|
+
print("-------------------SUMMARY_LIST-------------------")
|
392
|
+
print(summary_list)
|
393
|
+
print("-------------------SUMMARY_LIST-------------------")
|
356
394
|
|
357
|
-
|
395
|
+
incidents = incidents_list[0] if incidents_list else {}
|
358
396
|
tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
|
359
|
-
|
397
|
+
business_analytics = business_analytics_list[0] if business_analytics_list else {}
|
398
|
+
summary = summary_list[0] if summary_list else {}
|
399
|
+
agg_summary = {str(frame_number): {
|
400
|
+
"incidents": incidents,
|
401
|
+
"tracking_stats": tracking_stats,
|
402
|
+
"business_analytics": business_analytics,
|
403
|
+
"alerts": alerts,
|
404
|
+
"human_text": summary}
|
405
|
+
}
|
406
|
+
|
360
407
|
context.mark_completed()
|
361
|
-
|
408
|
+
|
409
|
+
# Build result object following the new pattern
|
410
|
+
|
362
411
|
result = self.create_result(
|
363
|
-
data={
|
364
|
-
"color_analysis": color_analysis,
|
365
|
-
"color_summary": color_summary,
|
366
|
-
"general_summary": general_summary,
|
367
|
-
"alerts": alerts,
|
368
|
-
"total_detections": len(color_analysis),
|
369
|
-
"unique_colors": len(color_summary.get("color_distribution", {})),
|
370
|
-
"events": events,
|
371
|
-
"tracking_stats": tracking_stats
|
372
|
-
},
|
412
|
+
data={"agg_summary": agg_summary},
|
373
413
|
usecase=self.name,
|
374
414
|
category=self.category,
|
375
415
|
context=context
|
376
416
|
)
|
377
417
|
|
378
|
-
result.summary = summary
|
379
|
-
result.insights = insights
|
380
|
-
result.predictions = predictions
|
381
|
-
result.metrics = metrics
|
382
|
-
|
383
|
-
if config.confidence_threshold and config.confidence_threshold < 0.3:
|
384
|
-
result.add_warning(f"Low confidence threshold ({config.confidence_threshold}) may result in false positives")
|
385
|
-
|
386
|
-
processing_time = context.processing_time or time.time() - start_time
|
387
|
-
self.logger.info(f"Color detection completed successfully in {processing_time:.2f}s")
|
388
418
|
return result
|
389
419
|
|
390
420
|
except Exception as e:
|
@@ -596,11 +626,12 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
596
626
|
category_colors = defaultdict(lambda: defaultdict(int))
|
597
627
|
total_detections = len(color_analysis)
|
598
628
|
detections = []
|
599
|
-
|
629
|
+
counts = {}
|
600
630
|
for record in color_analysis:
|
601
631
|
category = record["category"]
|
602
632
|
main_color = record["main_color"]
|
603
633
|
category_colors[category][main_color] += 1
|
634
|
+
counts[category] = counts.get(category, 0) + 1
|
604
635
|
detections.append({
|
605
636
|
"bounding_box": record["bbox"],
|
606
637
|
"category": record["category"],
|
@@ -609,12 +640,11 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
609
640
|
"frame_id": record["frame_id"],
|
610
641
|
"main_color": record["main_color"]
|
611
642
|
})
|
612
|
-
|
643
|
+
|
644
|
+
self.logger.debug(f"Valid detections after filtering: {len(detections)}")
|
613
645
|
summary = {
|
614
|
-
"
|
615
|
-
"
|
616
|
-
"color_distribution": {},
|
617
|
-
"dominant_colors": {},
|
646
|
+
"total_count": sum(counts.values()),
|
647
|
+
"per_category_count": counts,
|
618
648
|
"detections": detections
|
619
649
|
}
|
620
650
|
|
@@ -665,83 +695,7 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
665
695
|
"categories_detected": list(category_counts.keys())
|
666
696
|
}
|
667
697
|
|
668
|
-
def _generate_insights(self, color_summary: Dict, config: ColorDetectionConfig) -> List[str]:
|
669
|
-
"""Generate insights from color analysis."""
|
670
|
-
insights = []
|
671
|
-
|
672
|
-
total_detections = color_summary.get("total_detections", 0)
|
673
|
-
if total_detections == 0:
|
674
|
-
insights.append("No objects detected for color analysis.")
|
675
|
-
return insights
|
676
|
-
|
677
|
-
categories = color_summary.get("categories", {})
|
678
|
-
dominant_colors = color_summary.get("dominant_colors", {})
|
679
|
-
color_distribution = color_summary.get("color_distribution", {})
|
680
|
-
|
681
|
-
# Per-category color insights
|
682
|
-
for category, colors in categories.items():
|
683
|
-
total = sum(colors.values())
|
684
|
-
color_details = ", ".join([f"{color}: {count}" for color, count in colors.items()])
|
685
|
-
insights.append(f"{category.capitalize()} colors: {color_details} (Total: {total})")
|
686
|
-
|
687
|
-
# Dominant color summary per category
|
688
|
-
for category, info in dominant_colors.items():
|
689
|
-
insights.append(
|
690
|
-
f"{category.capitalize()} is mostly {info['color']} "
|
691
|
-
f"({info['count']} detections, {info['percentage']}%)"
|
692
|
-
)
|
693
|
-
|
694
|
-
# Color diversity insights
|
695
|
-
unique_colors = len(color_distribution)
|
696
|
-
if unique_colors > 1:
|
697
|
-
insights.append(f"Detected {unique_colors} unique colors across all categories.")
|
698
|
-
|
699
|
-
# Most common color overall
|
700
|
-
if color_distribution:
|
701
|
-
most_common_color = max(color_distribution.items(), key=lambda x: x[1])
|
702
|
-
insights.append(
|
703
|
-
f"Most common color overall: {most_common_color[0]} ({most_common_color[1]} detections)"
|
704
|
-
)
|
705
|
-
|
706
|
-
return insights
|
707
|
-
|
708
|
-
|
709
|
-
def _check_alerts(self, color_summary: Dict, config: ColorDetectionConfig) -> List[Dict]:
|
710
|
-
"""Check for alert conditions."""
|
711
|
-
alerts = []
|
712
|
-
|
713
|
-
if not config.alert_config:
|
714
|
-
return alerts
|
715
|
-
|
716
|
-
total_detections = color_summary.get("total_detections", 0)
|
717
698
|
|
718
|
-
# Count threshold alerts
|
719
|
-
if config.alert_config.count_thresholds:
|
720
|
-
for category, threshold in config.alert_config.count_thresholds.items():
|
721
|
-
if category == "all" and total_detections >= threshold:
|
722
|
-
alerts.append({
|
723
|
-
"type": "count_threshold",
|
724
|
-
"severity": "warning",
|
725
|
-
"message": f"Total detections ({total_detections}) exceeds threshold ({threshold})",
|
726
|
-
"category": category,
|
727
|
-
"current_count": total_detections,
|
728
|
-
"threshold": threshold,
|
729
|
-
"timestamp": datetime.now().isoformat()
|
730
|
-
})
|
731
|
-
elif category in color_summary.get("categories", {}):
|
732
|
-
category_total = sum(color_summary["categories"][category].values())
|
733
|
-
if category_total >= threshold:
|
734
|
-
alerts.append({
|
735
|
-
"type": "count_threshold",
|
736
|
-
"severity": "warning",
|
737
|
-
"message": f"{category} detections ({category_total}) exceeds threshold ({threshold})",
|
738
|
-
"category": category,
|
739
|
-
"current_count": category_total,
|
740
|
-
"threshold": threshold,
|
741
|
-
"timestamp": datetime.now().isoformat()
|
742
|
-
})
|
743
|
-
|
744
|
-
return alerts
|
745
699
|
|
746
700
|
def _calculate_metrics(self, color_analysis: List[Dict], color_summary: Dict, config: ColorDetectionConfig, context: ProcessingContext) -> Dict[str, Any]:
|
747
701
|
"""Calculate detailed metrics for analytics."""
|
@@ -811,28 +765,24 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
811
765
|
|
812
766
|
return predictions
|
813
767
|
|
814
|
-
def _generate_summary(self,
|
815
|
-
"""
|
816
|
-
|
817
|
-
|
818
|
-
|
819
|
-
|
820
|
-
|
821
|
-
|
822
|
-
|
823
|
-
|
824
|
-
|
825
|
-
|
826
|
-
|
827
|
-
|
828
|
-
if len(
|
829
|
-
|
830
|
-
|
831
|
-
|
832
|
-
alert_count = len(alerts)
|
833
|
-
summary_parts.append(f"with {alert_count} alert{'s' if alert_count != 1 else ''}")
|
834
|
-
|
835
|
-
return ", ".join(summary_parts)
|
768
|
+
def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
|
769
|
+
"""
|
770
|
+
Generate a human_text string for the tracking_stat, incident, business analytics and alerts.
|
771
|
+
"""
|
772
|
+
lines = {}
|
773
|
+
lines["Application Name"] = self.CASE_TYPE
|
774
|
+
lines["Application Version"] = self.CASE_VERSION
|
775
|
+
if len(incidents) > 0:
|
776
|
+
lines["Incidents:"]=f"\n\t{incidents[0].get('human_text', 'No incidents detected')}\n"
|
777
|
+
if len(tracking_stats) > 0:
|
778
|
+
lines["Tracking Statistics:"]=f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}\n"
|
779
|
+
if len(business_analytics) > 0:
|
780
|
+
lines["Business Analytics:"]=f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}\n"
|
781
|
+
|
782
|
+
if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
|
783
|
+
lines["Summary"] = "No Summary Data"
|
784
|
+
|
785
|
+
return [lines]
|
836
786
|
|
837
787
|
def _generate_events(self, color_summary: Dict, alerts: List, config: ColorDetectionConfig, frame_number: Optional[int] = None) -> List[Dict]:
|
838
788
|
"""Generate structured events with frame-based keys."""
|
@@ -898,30 +848,131 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
898
848
|
|
899
849
|
return events
|
900
850
|
|
901
|
-
def _generate_tracking_stats(
|
902
|
-
|
903
|
-
|
904
|
-
|
905
|
-
|
906
|
-
|
851
|
+
def _generate_tracking_stats(
|
852
|
+
self,
|
853
|
+
counting_summary: Dict,
|
854
|
+
alerts: Any,
|
855
|
+
config: ColorDetectionConfig,
|
856
|
+
frame_number: Optional[int] = None,
|
857
|
+
stream_info: Optional[Dict[str, Any]] = None
|
858
|
+
) -> List[Dict]:
|
859
|
+
"""Generate structured tracking stats for the output format with frame-based keys, including track_ids_info and detections with masks."""
|
860
|
+
# frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
861
|
+
# tracking_stats = [{frame_key: []}]
|
862
|
+
# frame_tracking_stats = tracking_stats[0][frame_key]
|
863
|
+
tracking_stats = []
|
907
864
|
|
908
|
-
|
909
|
-
|
910
|
-
|
911
|
-
|
912
|
-
|
913
|
-
|
914
|
-
|
915
|
-
|
916
|
-
|
917
|
-
|
918
|
-
|
919
|
-
|
920
|
-
|
921
|
-
|
922
|
-
|
923
|
-
|
865
|
+
total_detections = counting_summary.get("total_detections", 0)
|
866
|
+
total_counts = counting_summary.get("total_color_counts", {})
|
867
|
+
cumulative_total = sum(total_counts.values()) if total_counts else 0
|
868
|
+
per_category_count = counting_summary.get("per_category_count", {})
|
869
|
+
|
870
|
+
track_ids_info = self._get_track_ids_info(counting_summary.get("detections", []))
|
871
|
+
|
872
|
+
current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
|
873
|
+
start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
|
874
|
+
|
875
|
+
# Create high precision timestamps for input_timestamp and reset_timestamp
|
876
|
+
high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
|
877
|
+
high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
|
878
|
+
|
879
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
880
|
+
human_text_lines = []
|
881
|
+
|
882
|
+
# CURRENT FRAME section
|
883
|
+
human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
|
884
|
+
if total_detections > 0:
|
885
|
+
category_counts = [f"{count} {cat}" for cat, count in per_category_count.items()]
|
886
|
+
if len(category_counts) == 1:
|
887
|
+
detection_text = category_counts[0] + " detected"
|
888
|
+
elif len(category_counts) == 2:
|
889
|
+
detection_text = f"{category_counts[0]} and {category_counts[1]} detected"
|
890
|
+
else:
|
891
|
+
detection_text = f"{', '.join(category_counts[:-1])}, and {category_counts[-1]} detected"
|
892
|
+
human_text_lines.append(f"\t- {detection_text}")
|
893
|
+
else:
|
894
|
+
human_text_lines.append(f"\t- No detections")
|
895
|
+
|
896
|
+
human_text_lines.append("") # spacing
|
897
|
+
|
898
|
+
# TOTAL SINCE section
|
899
|
+
human_text_lines.append(f"TOTAL SINCE {start_timestamp}:")
|
900
|
+
human_text_lines.append(f"\t- Total Detected: {cumulative_total}")
|
901
|
+
# Add category-wise counts
|
902
|
+
print(total_counts)
|
903
|
+
if total_counts:
|
904
|
+
for cat, count in total_counts.items():
|
905
|
+
if count > 0: # Only include categories with non-zero counts
|
906
|
+
human_text_lines.append(f"\t- {cat}: {count}")
|
907
|
+
# Build current_counts array in expected format
|
908
|
+
current_counts = []
|
909
|
+
for cat, count in per_category_count.items():
|
910
|
+
if count > 0 or total_detections > 0: # Include even if 0 when there are detections
|
911
|
+
current_counts.append({
|
912
|
+
"category": cat,
|
913
|
+
"count": count
|
914
|
+
})
|
915
|
+
|
916
|
+
human_text = "\n".join(human_text_lines)
|
917
|
+
|
918
|
+
# Include detections with masks from counting_summary
|
919
|
+
# Prepare detections without confidence scores (as per eg.json)
|
920
|
+
detections = []
|
921
|
+
for detection in counting_summary.get("detections", []):
|
922
|
+
bbox = detection.get("bounding_box", {})
|
923
|
+
category = detection.get("category", "person")
|
924
|
+
if category == "Point d-eau":
|
925
|
+
category = "Water Body"
|
926
|
+
# Include segmentation if available (like in eg.json)
|
927
|
+
if detection.get("masks"):
|
928
|
+
segmentation= detection.get("masks", [])
|
929
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
930
|
+
elif detection.get("segmentation"):
|
931
|
+
segmentation= detection.get("segmentation")
|
932
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
933
|
+
elif detection.get("mask"):
|
934
|
+
segmentation= detection.get("mask")
|
935
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
936
|
+
else:
|
937
|
+
detection_obj = self.create_detection_object(category, bbox)
|
938
|
+
detections.append(detection_obj)
|
939
|
+
|
940
|
+
# Build alert_settings array in expected format
|
941
|
+
alert_settings = []
|
942
|
+
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
943
|
+
alert_settings.append({
|
944
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
945
|
+
"incident_category": self.CASE_TYPE,
|
946
|
+
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
947
|
+
"ascending": True,
|
948
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
949
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
950
|
+
}
|
951
|
+
})
|
952
|
+
|
953
|
+
if alerts:
|
954
|
+
for alert in alerts:
|
955
|
+
human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
|
956
|
+
else:
|
957
|
+
human_text_lines.append("Alerts: None")
|
924
958
|
|
959
|
+
human_text = "\n".join(human_text_lines)
|
960
|
+
reset_settings = [
|
961
|
+
{
|
962
|
+
"interval_type": "daily",
|
963
|
+
"reset_time": {
|
964
|
+
"value": 9,
|
965
|
+
"time_unit": "hour"
|
966
|
+
}
|
967
|
+
}
|
968
|
+
]
|
969
|
+
|
970
|
+
tracking_stat=self.create_tracking_stats(total_counts=total_counts, current_counts=current_counts,
|
971
|
+
detections=detections, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
|
972
|
+
reset_settings=reset_settings, start_time=high_precision_start_timestamp ,
|
973
|
+
reset_time=high_precision_reset_timestamp)
|
974
|
+
|
975
|
+
tracking_stats.append(tracking_stat)
|
925
976
|
return tracking_stats
|
926
977
|
|
927
978
|
def _generate_human_text_for_tracking(self, total_detections: int, color_summary: Dict, insights: List[str], summary: str, config: ColorDetectionConfig) -> str:
|
@@ -971,3 +1022,407 @@ class ColorDetectionUseCase(BaseProcessor):
|
|
971
1022
|
# text_parts.append(f" - {insight}")
|
972
1023
|
|
973
1024
|
return "\n".join(text_parts)
|
1025
|
+
|
1026
|
+
|
1027
|
+
def _attach_masks_to_detections(
|
1028
|
+
self,
|
1029
|
+
processed_detections: List[Dict[str, Any]],
|
1030
|
+
raw_detections: List[Dict[str, Any]],
|
1031
|
+
iou_threshold: float = 0.5,
|
1032
|
+
) -> List[Dict[str, Any]]:
|
1033
|
+
"""
|
1034
|
+
Attach segmentation masks from the original `raw_detections` list to the
|
1035
|
+
`processed_detections` list returned after smoothing/tracking.
|
1036
|
+
|
1037
|
+
Matching between detections is performed using Intersection-over-Union
|
1038
|
+
(IoU) of the bounding boxes. For each processed detection we select the
|
1039
|
+
raw detection with the highest IoU above `iou_threshold` and copy its
|
1040
|
+
`masks` (or `mask`) field. If no suitable match is found, the detection
|
1041
|
+
keeps an empty list for `masks` to maintain a consistent schema.
|
1042
|
+
"""
|
1043
|
+
|
1044
|
+
if not processed_detections or not raw_detections:
|
1045
|
+
# Nothing to do – ensure masks key exists for downstream logic.
|
1046
|
+
for det in processed_detections:
|
1047
|
+
det.setdefault("masks", [])
|
1048
|
+
return processed_detections
|
1049
|
+
|
1050
|
+
# Track which raw detections have already been matched to avoid
|
1051
|
+
# assigning the same mask to multiple processed detections.
|
1052
|
+
used_raw_indices = set()
|
1053
|
+
|
1054
|
+
for det in processed_detections:
|
1055
|
+
best_iou = 0.0
|
1056
|
+
best_idx = None
|
1057
|
+
|
1058
|
+
for idx, raw_det in enumerate(raw_detections):
|
1059
|
+
if idx in used_raw_indices:
|
1060
|
+
continue
|
1061
|
+
|
1062
|
+
iou = self._compute_iou(det.get("bounding_box"), raw_det.get("bounding_box"))
|
1063
|
+
if iou > best_iou:
|
1064
|
+
best_iou = iou
|
1065
|
+
best_idx = idx
|
1066
|
+
|
1067
|
+
if best_idx is not None and best_iou >= iou_threshold:
|
1068
|
+
raw_det = raw_detections[best_idx]
|
1069
|
+
masks = raw_det.get("masks", raw_det.get("mask"))
|
1070
|
+
if masks is not None:
|
1071
|
+
det["masks"] = masks
|
1072
|
+
used_raw_indices.add(best_idx)
|
1073
|
+
else:
|
1074
|
+
# No adequate match – default to empty list to keep schema consistent.
|
1075
|
+
det.setdefault("masks", ["EMPTY"])
|
1076
|
+
|
1077
|
+
return processed_detections
|
1078
|
+
|
1079
|
+
def _generate_incidents(self, counting_summary: Dict, alerts: List, config: ColorDetectionConfig,
|
1080
|
+
frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[
|
1081
|
+
Dict]:
|
1082
|
+
"""Generate structured events for the output format with frame-based keys."""
|
1083
|
+
|
1084
|
+
# Use frame number as key, fallback to 'current_frame' if not available
|
1085
|
+
frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
1086
|
+
incidents=[]
|
1087
|
+
total_detections = counting_summary.get("total_detections", 0)
|
1088
|
+
current_timestamp = self._get_current_timestamp_str(stream_info)
|
1089
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
1090
|
+
|
1091
|
+
self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
|
1092
|
+
|
1093
|
+
if total_detections > 0:
|
1094
|
+
# Determine event level based on thresholds
|
1095
|
+
level = "low"
|
1096
|
+
intensity = 5.0
|
1097
|
+
start_timestamp = self._get_start_timestamp_str(stream_info)
|
1098
|
+
if start_timestamp and self.current_incident_end_timestamp=='N/A':
|
1099
|
+
self.current_incident_end_timestamp = 'Incident still active'
|
1100
|
+
elif start_timestamp and self.current_incident_end_timestamp=='Incident still active':
|
1101
|
+
if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
|
1102
|
+
self.current_incident_end_timestamp = current_timestamp
|
1103
|
+
elif self.current_incident_end_timestamp!='Incident still active' and self.current_incident_end_timestamp!='N/A':
|
1104
|
+
self.current_incident_end_timestamp = 'N/A'
|
1105
|
+
|
1106
|
+
if config.alert_config and config.alert_config.count_thresholds:
|
1107
|
+
threshold = config.alert_config.count_thresholds.get("all", 15)
|
1108
|
+
intensity = min(10.0, (total_detections / threshold) * 10)
|
1109
|
+
|
1110
|
+
if intensity >= 9:
|
1111
|
+
level = "critical"
|
1112
|
+
self._ascending_alert_list.append(3)
|
1113
|
+
elif intensity >= 7:
|
1114
|
+
level = "significant"
|
1115
|
+
self._ascending_alert_list.append(2)
|
1116
|
+
elif intensity >= 5:
|
1117
|
+
level = "medium"
|
1118
|
+
self._ascending_alert_list.append(1)
|
1119
|
+
else:
|
1120
|
+
level = "low"
|
1121
|
+
self._ascending_alert_list.append(0)
|
1122
|
+
else:
|
1123
|
+
if total_detections > 30:
|
1124
|
+
level = "critical"
|
1125
|
+
intensity = 10.0
|
1126
|
+
self._ascending_alert_list.append(3)
|
1127
|
+
elif total_detections > 25:
|
1128
|
+
level = "significant"
|
1129
|
+
intensity = 9.0
|
1130
|
+
self._ascending_alert_list.append(2)
|
1131
|
+
elif total_detections > 15:
|
1132
|
+
level = "medium"
|
1133
|
+
intensity = 7.0
|
1134
|
+
self._ascending_alert_list.append(1)
|
1135
|
+
else:
|
1136
|
+
level = "low"
|
1137
|
+
intensity = min(10.0, total_detections / 3.0)
|
1138
|
+
self._ascending_alert_list.append(0)
|
1139
|
+
|
1140
|
+
# Generate human text in new format
|
1141
|
+
human_text_lines = [f"INCIDENTS DETECTED @ {current_timestamp}:"]
|
1142
|
+
human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE,level)}")
|
1143
|
+
human_text = "\n".join(human_text_lines)
|
1144
|
+
|
1145
|
+
alert_settings=[]
|
1146
|
+
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
1147
|
+
alert_settings.append({
|
1148
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
1149
|
+
"incident_category": self.CASE_TYPE,
|
1150
|
+
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
1151
|
+
"ascending": True,
|
1152
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
1153
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
1154
|
+
}
|
1155
|
+
})
|
1156
|
+
|
1157
|
+
event= self.create_incident(incident_id=self.CASE_TYPE+'_'+str(frame_number), incident_type=self.CASE_TYPE,
|
1158
|
+
severity_level=level, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
|
1159
|
+
start_time=start_timestamp, end_time=self.current_incident_end_timestamp,
|
1160
|
+
level_settings= {"low": 1, "medium": 3, "significant":4, "critical": 7})
|
1161
|
+
incidents.append(event)
|
1162
|
+
|
1163
|
+
else:
|
1164
|
+
self._ascending_alert_list.append(0)
|
1165
|
+
incidents.append({})
|
1166
|
+
|
1167
|
+
return incidents
|
1168
|
+
|
1169
|
+
def _check_alerts(self, summary: dict, frame_number:Any, config: ColorDetectionConfig) -> List[Dict]:
|
1170
|
+
"""
|
1171
|
+
Check if any alert thresholds are exceeded and return alert dicts.
|
1172
|
+
"""
|
1173
|
+
def get_trend(data, lookback=900, threshold=0.6):
|
1174
|
+
'''
|
1175
|
+
Determine if the trend is ascending or descending based on actual value progression.
|
1176
|
+
Now works with values 0,1,2,3 (not just binary).
|
1177
|
+
'''
|
1178
|
+
window = data[-lookback:] if len(data) >= lookback else data
|
1179
|
+
if len(window) < 2:
|
1180
|
+
return True # not enough data to determine trend
|
1181
|
+
increasing = 0
|
1182
|
+
total = 0
|
1183
|
+
for i in range(1, len(window)):
|
1184
|
+
if window[i] >= window[i - 1]:
|
1185
|
+
increasing += 1
|
1186
|
+
total += 1
|
1187
|
+
ratio = increasing / total
|
1188
|
+
if ratio >= threshold:
|
1189
|
+
return True
|
1190
|
+
elif ratio <= (1 - threshold):
|
1191
|
+
return False
|
1192
|
+
|
1193
|
+
frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
1194
|
+
alerts = []
|
1195
|
+
total_detections = summary.get("total_detections", 0) #CURRENT combined total count of all classes
|
1196
|
+
total_counts_dict = summary.get("total_color_counts", {}) #TOTAL cumulative counts per class
|
1197
|
+
cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0 #TOTAL combined cumulative count
|
1198
|
+
per_category_count = summary.get("per_category_count", {}) #CURRENT count per class
|
1199
|
+
|
1200
|
+
if not config.alert_config:
|
1201
|
+
return alerts
|
1202
|
+
|
1203
|
+
total = summary.get("total_detections", 0)
|
1204
|
+
#self._ascending_alert_list
|
1205
|
+
if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
|
1206
|
+
|
1207
|
+
for category, threshold in config.alert_config.count_thresholds.items():
|
1208
|
+
if category == "all" and total > threshold:
|
1209
|
+
|
1210
|
+
alerts.append({
|
1211
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
1212
|
+
"alert_id": "alert_"+category+'_'+frame_key,
|
1213
|
+
"incident_category": self.CASE_TYPE,
|
1214
|
+
"threshold_level": threshold,
|
1215
|
+
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
1216
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
1217
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
1218
|
+
}
|
1219
|
+
})
|
1220
|
+
elif category in summary.get("per_category_count", {}):
|
1221
|
+
count = summary.get("per_category_count", {})[category]
|
1222
|
+
if count > threshold: # Fixed logic: alert when EXCEEDING threshold
|
1223
|
+
alerts.append({
|
1224
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
1225
|
+
"alert_id": "alert_"+category+'_'+frame_key,
|
1226
|
+
"incident_category": self.CASE_TYPE,
|
1227
|
+
"threshold_level": threshold,
|
1228
|
+
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
1229
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
1230
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
1231
|
+
}
|
1232
|
+
})
|
1233
|
+
else:
|
1234
|
+
pass
|
1235
|
+
return alerts
|
1236
|
+
|
1237
|
+
def _format_timestamp_for_video(self, timestamp: float) -> str:
|
1238
|
+
"""Format timestamp for video chunks (HH:MM:SS.ms format)."""
|
1239
|
+
hours = int(timestamp // 3600)
|
1240
|
+
minutes = int((timestamp % 3600) // 60)
|
1241
|
+
seconds = round(float(timestamp % 60),2)
|
1242
|
+
return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
|
1243
|
+
|
1244
|
+
def _format_timestamp_for_stream(self, timestamp: float) -> str:
|
1245
|
+
"""Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
|
1246
|
+
dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
|
1247
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
1248
|
+
|
1249
|
+
def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
|
1250
|
+
"""Get formatted current timestamp based on stream type."""
|
1251
|
+
if not stream_info:
|
1252
|
+
return "00:00:00.00"
|
1253
|
+
# is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
|
1254
|
+
if precision:
|
1255
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
1256
|
+
if frame_id:
|
1257
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
1258
|
+
else:
|
1259
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
1260
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
1261
|
+
return stream_time_str
|
1262
|
+
else:
|
1263
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
1264
|
+
|
1265
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
1266
|
+
if frame_id:
|
1267
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
1268
|
+
else:
|
1269
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
1270
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
1271
|
+
return stream_time_str
|
1272
|
+
else:
|
1273
|
+
# For streams, use stream_time from stream_info
|
1274
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
1275
|
+
if stream_time_str:
|
1276
|
+
# Parse the high precision timestamp string to get timestamp
|
1277
|
+
try:
|
1278
|
+
# Remove " UTC" suffix and parse
|
1279
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
1280
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
1281
|
+
timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
|
1282
|
+
return self._format_timestamp_for_stream(timestamp)
|
1283
|
+
except:
|
1284
|
+
# Fallback to current time if parsing fails
|
1285
|
+
return self._format_timestamp_for_stream(time.time())
|
1286
|
+
else:
|
1287
|
+
return self._format_timestamp_for_stream(time.time())
|
1288
|
+
|
1289
|
+
def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
|
1290
|
+
"""Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
|
1291
|
+
if not stream_info:
|
1292
|
+
return "00:00:00"
|
1293
|
+
if precision:
|
1294
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
1295
|
+
return "00:00:00"
|
1296
|
+
else:
|
1297
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
1298
|
+
|
1299
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
1300
|
+
# If video format, start from 00:00:00
|
1301
|
+
return "00:00:00"
|
1302
|
+
else:
|
1303
|
+
# For streams, use tracking start time or current time with minutes/seconds reset
|
1304
|
+
if self._tracking_start_time is None:
|
1305
|
+
# Try to extract timestamp from stream_time string
|
1306
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
1307
|
+
if stream_time_str:
|
1308
|
+
try:
|
1309
|
+
# Remove " UTC" suffix and parse
|
1310
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
1311
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
1312
|
+
self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
|
1313
|
+
except:
|
1314
|
+
# Fallback to current time if parsing fails
|
1315
|
+
self._tracking_start_time = time.time()
|
1316
|
+
else:
|
1317
|
+
self._tracking_start_time = time.time()
|
1318
|
+
|
1319
|
+
dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
|
1320
|
+
# Reset minutes and seconds to 00:00 for "TOTAL SINCE" format
|
1321
|
+
dt = dt.replace(minute=0, second=0, microsecond=0)
|
1322
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
1323
|
+
|
1324
|
+
def _compute_iou(self, box1: Any, box2: Any) -> float:
|
1325
|
+
"""Compute IoU between two bounding boxes which may be dicts or lists.
|
1326
|
+
Falls back to 0 when insufficient data is available."""
|
1327
|
+
|
1328
|
+
# Helper to convert bbox (dict or list) to [x1, y1, x2, y2]
|
1329
|
+
def _bbox_to_list(bbox):
|
1330
|
+
if bbox is None:
|
1331
|
+
return []
|
1332
|
+
if isinstance(bbox, list):
|
1333
|
+
return bbox[:4] if len(bbox) >= 4 else []
|
1334
|
+
if isinstance(bbox, dict):
|
1335
|
+
if "xmin" in bbox:
|
1336
|
+
return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
|
1337
|
+
if "x1" in bbox:
|
1338
|
+
return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
|
1339
|
+
# Fallback: first four numeric values
|
1340
|
+
values = [v for v in bbox.values() if isinstance(v, (int, float))]
|
1341
|
+
return values[:4] if len(values) >= 4 else []
|
1342
|
+
return []
|
1343
|
+
|
1344
|
+
l1 = _bbox_to_list(box1)
|
1345
|
+
l2 = _bbox_to_list(box2)
|
1346
|
+
if len(l1) < 4 or len(l2) < 4:
|
1347
|
+
return 0.0
|
1348
|
+
x1_min, y1_min, x1_max, y1_max = l1
|
1349
|
+
x2_min, y2_min, x2_max, y2_max = l2
|
1350
|
+
|
1351
|
+
# Ensure correct order
|
1352
|
+
x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
|
1353
|
+
y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
|
1354
|
+
x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
|
1355
|
+
y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
|
1356
|
+
|
1357
|
+
inter_x_min = max(x1_min, x2_min)
|
1358
|
+
inter_y_min = max(y1_min, y2_min)
|
1359
|
+
inter_x_max = min(x1_max, x2_max)
|
1360
|
+
inter_y_max = min(y1_max, y2_max)
|
1361
|
+
|
1362
|
+
inter_w = max(0.0, inter_x_max - inter_x_min)
|
1363
|
+
inter_h = max(0.0, inter_y_max - inter_y_min)
|
1364
|
+
inter_area = inter_w * inter_h
|
1365
|
+
|
1366
|
+
area1 = (x1_max - x1_min) * (y1_max - y1_min)
|
1367
|
+
area2 = (x2_max - x2_min) * (y2_max - y2_min)
|
1368
|
+
union_area = area1 + area2 - inter_area
|
1369
|
+
|
1370
|
+
return (inter_area / union_area) if union_area > 0 else 0.0
|
1371
|
+
|
1372
|
+
def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
|
1373
|
+
"""Return a stable canonical ID for a raw tracker ID, merging fragmented
|
1374
|
+
tracks when IoU and temporal constraints indicate they represent the
|
1375
|
+
same physical."""
|
1376
|
+
if raw_id is None or bbox is None:
|
1377
|
+
# Nothing to merge
|
1378
|
+
return raw_id
|
1379
|
+
|
1380
|
+
now = time.time()
|
1381
|
+
|
1382
|
+
# Fast path – raw_id already mapped
|
1383
|
+
if raw_id in self._track_aliases:
|
1384
|
+
canonical_id = self._track_aliases[raw_id]
|
1385
|
+
track_info = self._canonical_tracks.get(canonical_id)
|
1386
|
+
if track_info is not None:
|
1387
|
+
track_info["last_bbox"] = bbox
|
1388
|
+
track_info["last_update"] = now
|
1389
|
+
track_info["raw_ids"].add(raw_id)
|
1390
|
+
return canonical_id
|
1391
|
+
|
1392
|
+
# Attempt to merge with an existing canonical track
|
1393
|
+
for canonical_id, info in self._canonical_tracks.items():
|
1394
|
+
# Only consider recently updated tracks
|
1395
|
+
if now - info["last_update"] > self._track_merge_time_window:
|
1396
|
+
continue
|
1397
|
+
iou = self._compute_iou(bbox, info["last_bbox"])
|
1398
|
+
if iou >= self._track_merge_iou_threshold:
|
1399
|
+
# Merge
|
1400
|
+
self._track_aliases[raw_id] = canonical_id
|
1401
|
+
info["last_bbox"] = bbox
|
1402
|
+
info["last_update"] = now
|
1403
|
+
info["raw_ids"].add(raw_id)
|
1404
|
+
return canonical_id
|
1405
|
+
|
1406
|
+
# No match – register new canonical track
|
1407
|
+
canonical_id = raw_id
|
1408
|
+
self._track_aliases[raw_id] = canonical_id
|
1409
|
+
self._canonical_tracks[canonical_id] = {
|
1410
|
+
"last_bbox": bbox,
|
1411
|
+
"last_update": now,
|
1412
|
+
"raw_ids": {raw_id},
|
1413
|
+
}
|
1414
|
+
return canonical_id
|
1415
|
+
|
1416
|
+
def _format_timestamp(self, timestamp: float) -> str:
|
1417
|
+
"""Format a timestamp for human-readable output."""
|
1418
|
+
return datetime.fromtimestamp(timestamp, timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC')
|
1419
|
+
|
1420
|
+
def _get_tracking_start_time(self) -> str:
|
1421
|
+
"""Get the tracking start time, formatted as a string."""
|
1422
|
+
if self._tracking_start_time is None:
|
1423
|
+
return "N/A"
|
1424
|
+
return self._format_timestamp(self._tracking_start_time)
|
1425
|
+
|
1426
|
+
def _set_tracking_start_time(self) -> None:
|
1427
|
+
"""Set the tracking start time to the current time."""
|
1428
|
+
self._tracking_start_time = time.time()
|
@@ -649,6 +649,8 @@ class FireSmokeUseCase(BaseProcessor):
|
|
649
649
|
start_timestamp = self.id_timing_list[-1]
|
650
650
|
if incident_id==self.return_id_counter:
|
651
651
|
incident_id = incident_id-1
|
652
|
+
if self.return_id_counter > incident_id:
|
653
|
+
incident_id = self.return_id_counter-incident_id
|
652
654
|
if last_ending_id==5:
|
653
655
|
alert_serial = getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default']
|
654
656
|
alert_serial = alert_serial[0]
|
@@ -288,8 +288,8 @@ class ProximityUseCase(BaseProcessor):
|
|
288
288
|
# Update tracking state BEFORE proximity calculation so we have canonical IDs
|
289
289
|
self._update_tracking_state(counting_summary)
|
290
290
|
|
291
|
-
# Calculate unique proximity events for this frame
|
292
|
-
proximity_count = self.
|
291
|
+
# Calculate unique proximity events for this frame using expanded bbox method
|
292
|
+
proximity_count = self._count_proximity_events_by_expanded_bbox(counting_summary["detections"], config, stream_info)
|
293
293
|
counting_summary["proximity_events"] = proximity_count
|
294
294
|
counting_summary["total_proximity_count"] = self._total_proximity_count
|
295
295
|
|
@@ -769,8 +769,8 @@ class ProximityUseCase(BaseProcessor):
|
|
769
769
|
|
770
770
|
human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
|
771
771
|
|
772
|
-
# Add proximity count to human text (
|
773
|
-
proximity_count = self.
|
772
|
+
# Add proximity count to human text (expanded bbox method)
|
773
|
+
proximity_count = self._count_proximity_events_by_expanded_bbox(detections, config, stream_info)
|
774
774
|
if proximity_count > 0:
|
775
775
|
human_text_lines.append(f"\t- Current Frame Proximity: {proximity_count//2}")
|
776
776
|
else:
|
@@ -1703,6 +1703,13 @@ class ProximityUseCase(BaseProcessor):
|
|
1703
1703
|
"default": 400,
|
1704
1704
|
"description": "Fallback pixel threshold if no calibration is available"
|
1705
1705
|
},
|
1706
|
+
"proximity_iou_threshold": {
|
1707
|
+
"type": "number",
|
1708
|
+
"minimum": 0.0,
|
1709
|
+
"maximum": 1.0,
|
1710
|
+
"default": 0.1,
|
1711
|
+
"description": "IoU threshold for proximity detection using expanded bounding boxes"
|
1712
|
+
},
|
1706
1713
|
"time_window_minutes": {
|
1707
1714
|
"type": "integer",
|
1708
1715
|
"minimum": 1,
|
@@ -1777,4 +1784,118 @@ class ProximityUseCase(BaseProcessor):
|
|
1777
1784
|
|
1778
1785
|
smoothed_data = bbox_smoothing(data, self.smoothing_tracker.config, self.smoothing_tracker)
|
1779
1786
|
self.logger.debug("Applied bbox smoothing to tracking results")
|
1780
|
-
return smoothed_data
|
1787
|
+
return smoothed_data
|
1788
|
+
|
1789
|
+
def _count_proximity_events_by_expanded_bbox(self, detections: List[Dict[str, Any]], config: ProximityConfig, stream_info: Optional[Dict[str, Any]] = None) -> int:
|
1790
|
+
"""Count UNIQUE proximity events using expanded bounding boxes and IoU.
|
1791
|
+
|
1792
|
+
Rules:
|
1793
|
+
- Expand each bbox by 20% width and 10% height
|
1794
|
+
- Use IoU threshold to determine proximity between expanded boxes
|
1795
|
+
- Use track IDs when available to build stable (id1,id2) pairs
|
1796
|
+
- Count each pair once (i < j) using IoU between expanded boxes
|
1797
|
+
- Maintain a running set of unique canonical-ID pairs across frames to compute total unique proximity events
|
1798
|
+
"""
|
1799
|
+
if not detections:
|
1800
|
+
return 0
|
1801
|
+
|
1802
|
+
# IoU threshold for proximity detection (configurable)
|
1803
|
+
proximity_iou_threshold = getattr(config, "proximity_iou_threshold", 0.1)
|
1804
|
+
overlap_iou_threshold = getattr(self, "_proximity_iou_duplicate_threshold", 0.5)
|
1805
|
+
|
1806
|
+
# Helper: convert bbox to xyxy list
|
1807
|
+
def _to_xyxy(bbox: Any) -> List[float]:
|
1808
|
+
if isinstance(bbox, list):
|
1809
|
+
if len(bbox) >= 4:
|
1810
|
+
return [float(bbox[0]), float(bbox[1]), float(bbox[2]), float(bbox[3])]
|
1811
|
+
return []
|
1812
|
+
if isinstance(bbox, dict):
|
1813
|
+
if all(k in bbox for k in ("xmin", "ymin", "xmax", "ymax")):
|
1814
|
+
return [float(bbox["xmin"]), float(bbox["ymin"]), float(bbox["xmax"]), float(bbox["ymax"])]
|
1815
|
+
if all(k in bbox for k in ("x1", "y1", "x2", "y2")):
|
1816
|
+
return [float(bbox["x1"]), float(bbox["y1"]), float(bbox["x2"]), float(bbox["y2"])]
|
1817
|
+
# Fallback: take first four values
|
1818
|
+
vals = list(bbox.values())
|
1819
|
+
if len(vals) >= 4:
|
1820
|
+
return [float(vals[0]), float(vals[1]), float(vals[2]), float(vals[3])]
|
1821
|
+
return []
|
1822
|
+
return []
|
1823
|
+
|
1824
|
+
# Helper: expand bbox by given percentages
|
1825
|
+
def _expand_bbox(bbox_xyxy: List[float], width_expand: float = 0.2, height_expand: float = 0.1) -> List[float]:
|
1826
|
+
"""Expand bbox by width_expand% width and height_expand% height."""
|
1827
|
+
if len(bbox_xyxy) < 4:
|
1828
|
+
return bbox_xyxy
|
1829
|
+
|
1830
|
+
x1, y1, x2, y2 = bbox_xyxy
|
1831
|
+
width = x2 - x1
|
1832
|
+
height = y2 - y1
|
1833
|
+
|
1834
|
+
# Calculate expansion amounts
|
1835
|
+
width_expansion = width * width_expand
|
1836
|
+
height_expansion = height * height_expand
|
1837
|
+
|
1838
|
+
# Expand bbox (expand outward from center)
|
1839
|
+
expanded_x1 = x1 - width_expansion / 2
|
1840
|
+
expanded_y1 = y1 - height_expansion / 2
|
1841
|
+
expanded_x2 = x2 + width_expansion / 2
|
1842
|
+
expanded_y2 = y2 + height_expansion / 2
|
1843
|
+
|
1844
|
+
return [expanded_x1, expanded_y1, expanded_x2, expanded_y2]
|
1845
|
+
|
1846
|
+
# Prepare tracked detections with expanded bboxes
|
1847
|
+
tracked_detections: List[Dict[str, Any]] = []
|
1848
|
+
for det in detections:
|
1849
|
+
bbox = _to_xyxy(det.get("bounding_box", det.get("bbox", {})))
|
1850
|
+
if not bbox:
|
1851
|
+
continue
|
1852
|
+
|
1853
|
+
# Expand the bbox
|
1854
|
+
expanded_bbox = _expand_bbox(bbox)
|
1855
|
+
|
1856
|
+
tracked_detections.append({
|
1857
|
+
"track_id": det.get("track_id"),
|
1858
|
+
"original_bbox": bbox,
|
1859
|
+
"expanded_bbox": expanded_bbox,
|
1860
|
+
"confidence": float(det.get("confidence", 1.0))
|
1861
|
+
})
|
1862
|
+
|
1863
|
+
# IoU-NMS to remove overlapping original boxes, keep highest confidence
|
1864
|
+
kept: List[Dict[str, Any]] = self._nms_by_iou(tracked_detections, overlap_iou_threshold)
|
1865
|
+
|
1866
|
+
n = len(kept)
|
1867
|
+
current_pairs_by_ids: Set[tuple] = set()
|
1868
|
+
current_pairs_all: Set[tuple] = set()
|
1869
|
+
|
1870
|
+
# Build current frame proximity pairs using expanded bbox IoU
|
1871
|
+
for i in range(n):
|
1872
|
+
expanded_bbox_i = kept[i]["expanded_bbox"]
|
1873
|
+
for j in range(i + 1, n):
|
1874
|
+
expanded_bbox_j = kept[j]["expanded_bbox"]
|
1875
|
+
|
1876
|
+
# Calculate IoU between expanded bboxes
|
1877
|
+
iou = self._compute_iou(expanded_bbox_i, expanded_bbox_j)
|
1878
|
+
|
1879
|
+
# Check if IoU exceeds proximity threshold
|
1880
|
+
if iou >= proximity_iou_threshold:
|
1881
|
+
# For per-frame count, include every close pair
|
1882
|
+
current_pairs_all.add((i, j))
|
1883
|
+
|
1884
|
+
# For global unique, require both IDs
|
1885
|
+
id_i = kept[i].get("track_id")
|
1886
|
+
id_j = kept[j].get("track_id")
|
1887
|
+
if id_i is not None and id_j is not None:
|
1888
|
+
pair_ids = (id_i, id_j) if id_i <= id_j else (id_j, id_i)
|
1889
|
+
current_pairs_by_ids.add(pair_ids)
|
1890
|
+
|
1891
|
+
# Update global unique proximity pairs using ID pairs only
|
1892
|
+
new_unique_pairs = {frozenset(p) for p in current_pairs_by_ids} - self._observed_proximity_pairs
|
1893
|
+
if new_unique_pairs:
|
1894
|
+
self._total_proximity_count += len(new_unique_pairs)
|
1895
|
+
self._observed_proximity_pairs.update(new_unique_pairs)
|
1896
|
+
|
1897
|
+
# Store last frame pairs (ID pairs if available, else index pairs as fallback)
|
1898
|
+
self._last_frame_proximity_pairs = current_pairs_by_ids if current_pairs_by_ids else current_pairs_all
|
1899
|
+
|
1900
|
+
# Return count of pairs detected in the current frame
|
1901
|
+
return len(current_pairs_by_ids) if current_pairs_by_ids else len(current_pairs_all)
|
@@ -109,7 +109,7 @@ matrice/deploy/client/auto_streaming/auto_streaming.py,sha256=VRfy_EBFvhspN-hoN3
|
|
109
109
|
matrice/deploy/client/auto_streaming/auto_streaming_utils.py,sha256=BuRUokLp3t43yzRF8YSX9p_RHQD94RoBwNEoldXFDQo,14995
|
110
110
|
matrice/deploy/client/streaming_gateway/__init__.py,sha256=hkYC0qszaXZThquMuuI20Qkt_AHCB3pdy7jlJVeqPN4,1203
|
111
111
|
matrice/deploy/client/streaming_gateway/streaming_gateway.py,sha256=r8Z5AXBom89n-8W5RTlB-nOeUaprxK-QBDsAb-E9gl8,19605
|
112
|
-
matrice/deploy/client/streaming_gateway/streaming_gateway_utils.py,sha256=
|
112
|
+
matrice/deploy/client/streaming_gateway/streaming_gateway_utils.py,sha256=Avp0LKUeBaYvGMLfGJKK0Wygb6fA4de8fT9IILIb9Go,42774
|
113
113
|
matrice/deploy/client/streaming_gateway/streaming_results_handler.py,sha256=OpHkdbnuqVN28tQm9CYwrfgAth3Qz40Uaq5Tg4bbxyo,15813
|
114
114
|
matrice/deploy/server/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
115
115
|
matrice/deploy/server/server.py,sha256=duki4KUU1tvW3Y7wkrlMRVvt7bAP2QqSIsrSogLxC4o,36799
|
@@ -130,7 +130,7 @@ matrice/deploy/utils/boundary_drawing_internal/boundary_drawing_internal.py,sha2
|
|
130
130
|
matrice/deploy/utils/boundary_drawing_internal/boundary_drawing_tool.py,sha256=eY0VQGZ8BfTmR4_ThIAXaumBjh8_c7w69w-d3kta8p0,15421
|
131
131
|
matrice/deploy/utils/boundary_drawing_internal/example_usage.py,sha256=cUBhxxsVdTQWIPvIOjCUGrhqon7ZBr5N6qNewjrTIuk,6434
|
132
132
|
matrice/deploy/utils/post_processing/__init__.py,sha256=a2gUxjLmmc5CF4ik9vXWCI2LQAbqvA6B8pFsBa1kMic,25727
|
133
|
-
matrice/deploy/utils/post_processing/config.py,sha256=
|
133
|
+
matrice/deploy/utils/post_processing/config.py,sha256=l2vmibZayfOGZdevDnT8OP32eqxIeXCygWCYZY1FhiM,5442
|
134
134
|
matrice/deploy/utils/post_processing/processor.py,sha256=QJehlljMDmlYNpun5dUnkPorijHs17BNQv6aPZZqgag,34907
|
135
135
|
matrice/deploy/utils/post_processing/advanced_tracker/__init__.py,sha256=tAPFzI_Yep5TLX60FDwKqBqppc-EbxSr0wNsQ9DGI1o,423
|
136
136
|
matrice/deploy/utils/post_processing/advanced_tracker/base.py,sha256=VqWy4dd5th5LK-JfueTt2_GSEoOi5QQfQxjTNhmQoLc,3580
|
@@ -170,7 +170,7 @@ matrice/deploy/utils/post_processing/usecases/car_part_segmentation.py,sha256=Jb
|
|
170
170
|
matrice/deploy/utils/post_processing/usecases/cardiomegaly_classification.py,sha256=1P6DyOU6R1XKmQ-55BbKMU8CSsm4-wR5wS827UJG2JU,41244
|
171
171
|
matrice/deploy/utils/post_processing/usecases/chicken_pose_detection.py,sha256=-e8di7Am-E-FCQFrSY8qJTO1aWtdRAVJoE-VKBgcyyI,29291
|
172
172
|
matrice/deploy/utils/post_processing/usecases/child_monitoring.py,sha256=z3oymoqq4hDGwA8MkdEONZW_Vx5CAZmvzZaNLsqmCfw,39380
|
173
|
-
matrice/deploy/utils/post_processing/usecases/color_detection.py,sha256=
|
173
|
+
matrice/deploy/utils/post_processing/usecases/color_detection.py,sha256=s7Qi4i-44DekS9tWM9NLkx8yVoTUCHfyMqi_qed-wjc,67335
|
174
174
|
matrice/deploy/utils/post_processing/usecases/color_map_utils.py,sha256=SP-AEVcjLmL8rxblu-ixqUJC2fqlcr7ab4hWo4Fcr_k,2677
|
175
175
|
matrice/deploy/utils/post_processing/usecases/concrete_crack_detection.py,sha256=pxhOH_hG4hq9yytNepbGMdk2W_lTG8D1_2RAagaPBkg,40252
|
176
176
|
matrice/deploy/utils/post_processing/usecases/crop_weed_detection.py,sha256=Ao1k5fJDYU_f6yZ8VO-jW8-esECV0-zY5Q570c_fako,35674
|
@@ -183,7 +183,7 @@ matrice/deploy/utils/post_processing/usecases/face_emotion.py,sha256=eRfqBdryB0u
|
|
183
183
|
matrice/deploy/utils/post_processing/usecases/face_recognition.py,sha256=T5xAuv6b9OrkmTmoXgZs4LZ5XUsbvp9xCpeLBwdu7eI,40231
|
184
184
|
matrice/deploy/utils/post_processing/usecases/fashion_detection.py,sha256=f9gpzMDhIW-gyn46k9jgf8nY7YeoqAnTxGOzksabFbE,40457
|
185
185
|
matrice/deploy/utils/post_processing/usecases/field_mapping.py,sha256=JDwYX8pd2W-waDvBh98Y_o_uchJu7wEYbFxOliA4Iq4,39822
|
186
|
-
matrice/deploy/utils/post_processing/usecases/fire_detection.py,sha256=
|
186
|
+
matrice/deploy/utils/post_processing/usecases/fire_detection.py,sha256=V3ZV8FxdYwPqy-6jVYBwWgAp9bCLwW6QIHmg79SXgbs,50700
|
187
187
|
matrice/deploy/utils/post_processing/usecases/flare_analysis.py,sha256=-egmS3Hs_iGOLeCMfapbkfQ04EWtZx97QRuUcDa-jMU,45340
|
188
188
|
matrice/deploy/utils/post_processing/usecases/flower_segmentation.py,sha256=4I7qMx9Ztxg_hy9KTVX-3qBhAN-QwDt_Yigf9fFjLus,52017
|
189
189
|
matrice/deploy/utils/post_processing/usecases/gas_leak_detection.py,sha256=KL2ft7fXvjTas-65-QgcJm3W8KBsrwF44qibSXjfaLc,40557
|
@@ -205,7 +205,7 @@ matrice/deploy/utils/post_processing/usecases/plaque_segmentation_img.py,sha256=
|
|
205
205
|
matrice/deploy/utils/post_processing/usecases/pothole_segmentation.py,sha256=jXTb8ZqInp5xJ-O3Zp3zQBiryFVD0-WBbhW6Kux_NDo,44905
|
206
206
|
matrice/deploy/utils/post_processing/usecases/ppe_compliance.py,sha256=G9P9j9E9nfNJInHJxmK1Lb4daFBlG5hq0aqotTLvFFE,30146
|
207
207
|
matrice/deploy/utils/post_processing/usecases/price_tag_detection.py,sha256=09Tp6MGAHh95s-NSAp-4WC9iCc20sajWApuUBAvgXiQ,39880
|
208
|
-
matrice/deploy/utils/post_processing/usecases/proximity_detection.py,sha256=
|
208
|
+
matrice/deploy/utils/post_processing/usecases/proximity_detection.py,sha256=6zViF9L_nY-qmgEu8isuKAv8-fc7RfX93vUeZs2eojY,92867
|
209
209
|
matrice/deploy/utils/post_processing/usecases/road_lane_detection.py,sha256=V_KxwBtAHSNkyoH8sXw-U-P3J8ToXtX3ncc69gn6Tds,31591
|
210
210
|
matrice/deploy/utils/post_processing/usecases/road_traffic_density.py,sha256=YiHQ0kKhXglagHPvygywxMqZAw8s0WharrBQqLQj2q4,40311
|
211
211
|
matrice/deploy/utils/post_processing/usecases/road_view_segmentation.py,sha256=BcBbOOg5622KuvzKrzs9cJW1wkRoIIcOab0N7BONQKQ,44986
|
@@ -244,8 +244,8 @@ matrice/deployment/camera_manager.py,sha256=e1Lc81RJP5wUWRdTgHO6tMWF9BkBdHOSVyx3
|
|
244
244
|
matrice/deployment/deployment.py,sha256=HFt151eWq6iqIAMsQvurpV2WNxW6Cx_gIUVfnVy5SWE,48093
|
245
245
|
matrice/deployment/inference_pipeline.py,sha256=6b4Mm3-qt-Zy0BeiJfFQdImOn3FzdNCY-7ET7Rp8PMk,37911
|
246
246
|
matrice/deployment/streaming_gateway_manager.py,sha256=ifYGl3g25wyU39HwhPQyI2OgF3M6oIqKMWt8RXtMxY8,21401
|
247
|
-
matrice-1.0.
|
248
|
-
matrice-1.0.
|
249
|
-
matrice-1.0.
|
250
|
-
matrice-1.0.
|
251
|
-
matrice-1.0.
|
247
|
+
matrice-1.0.99294.dist-info/licenses/LICENSE.txt,sha256=2bm9uFabQZ3Ykb_SaSU_uUbAj2-htc6WJQmS_65qD00,1073
|
248
|
+
matrice-1.0.99294.dist-info/METADATA,sha256=lXvLsYMYQbH8vQxZlhIDVAdn4rKiem6xy5gtVfRVaig,14624
|
249
|
+
matrice-1.0.99294.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
250
|
+
matrice-1.0.99294.dist-info/top_level.txt,sha256=P97js8ur6o5ClRqMH3Cjoab_NqbJ6sOQ3rJmVzKBvMc,8
|
251
|
+
matrice-1.0.99294.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|