matrice 1.0.99397__py3-none-any.whl → 1.0.99399__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice/deploy/utils/post_processing/config.py +3 -1
- matrice/deploy/utils/post_processing/core/config.py +29 -0
- matrice/deploy/utils/post_processing/processor.py +4 -0
- matrice/deploy/utils/post_processing/usecases/__init__.py +4 -0
- matrice/deploy/utils/post_processing/usecases/cell_microscopy_segmentation.py +897 -0
- matrice/deploy/utils/post_processing/usecases/license_plate_monitoring.py +18 -2
- {matrice-1.0.99397.dist-info → matrice-1.0.99399.dist-info}/METADATA +1 -1
- {matrice-1.0.99397.dist-info → matrice-1.0.99399.dist-info}/RECORD +11 -10
- {matrice-1.0.99397.dist-info → matrice-1.0.99399.dist-info}/WHEEL +0 -0
- {matrice-1.0.99397.dist-info → matrice-1.0.99399.dist-info}/licenses/LICENSE.txt +0 -0
- {matrice-1.0.99397.dist-info → matrice-1.0.99399.dist-info}/top_level.txt +0 -0
@@ -53,6 +53,7 @@ APP_NAME_TO_USECASE = {
|
|
53
53
|
"gas_leak_detection": "gas_leak_detection",
|
54
54
|
"color_detection": "color_detection",
|
55
55
|
"License Plate Recognition" : "license_plate_monitor",
|
56
|
+
"cell_microscopy_segmentation": "cell_microscopy_segmentation"
|
56
57
|
|
57
58
|
}
|
58
59
|
|
@@ -110,7 +111,8 @@ APP_NAME_TO_CATEGORY = {
|
|
110
111
|
"abandoned_object_detection" : "security",
|
111
112
|
"gas_leak_detection": "oil_gas",
|
112
113
|
"color_detection": "visual_appearance",
|
113
|
-
"License Plate Recognition" : "license_plate_monitor"
|
114
|
+
"License Plate Recognition" : "license_plate_monitor",
|
115
|
+
"cell_microscopy_segmentation" : "healthcare"
|
114
116
|
}
|
115
117
|
|
116
118
|
def get_usecase_from_app_name(app_name: str) -> str:
|
@@ -632,6 +632,7 @@ class ConfigManager:
|
|
632
632
|
'plaque_segmentation_img': None,
|
633
633
|
'cardiomegaly_classification': None,
|
634
634
|
'histopathological_cancer_detection' : None,
|
635
|
+
'cell_microscopy_segmentation': None,
|
635
636
|
}
|
636
637
|
|
637
638
|
def register_config_class(self, usecase: str, config_class: type) -> None:
|
@@ -1110,6 +1111,14 @@ class ConfigManager:
|
|
1110
1111
|
return HistopathologicalCancerDetectionConfig
|
1111
1112
|
except ImportError:
|
1112
1113
|
return None
|
1114
|
+
|
1115
|
+
def cell_microscopy_segmentation_config_class(self):
|
1116
|
+
"""Register a configuration class for a use case."""
|
1117
|
+
try:
|
1118
|
+
from ..usecases.cell_microscopy_segmentation import CellMicroscopyConfig
|
1119
|
+
return CellMicroscopyConfig
|
1120
|
+
except ImportError:
|
1121
|
+
return None
|
1113
1122
|
|
1114
1123
|
def create_config(self, usecase: str, category: Optional[str] = None, **kwargs) -> BaseConfig:
|
1115
1124
|
"""
|
@@ -2213,6 +2222,21 @@ class ConfigManager:
|
|
2213
2222
|
alert_config=alert_config,
|
2214
2223
|
**kwargs
|
2215
2224
|
)
|
2225
|
+
elif usecase == "cell_microscopy_segmentation":
|
2226
|
+
# Import here to avoid circular import
|
2227
|
+
from ..usecases.cell_microscopy_segmentation import CellMicroscopyConfig
|
2228
|
+
|
2229
|
+
# Handle nested configurations
|
2230
|
+
alert_config = kwargs.pop("alert_config", None)
|
2231
|
+
if alert_config and isinstance(alert_config, dict):
|
2232
|
+
alert_config = AlertConfig(**alert_config)
|
2233
|
+
|
2234
|
+
config = CellMicroscopyConfig(
|
2235
|
+
category=category or "healthcare",
|
2236
|
+
usecase=usecase,
|
2237
|
+
alert_config=alert_config,
|
2238
|
+
**kwargs
|
2239
|
+
)
|
2216
2240
|
|
2217
2241
|
else:
|
2218
2242
|
raise ConfigValidationError(f"Unknown use case: {usecase}")
|
@@ -2640,6 +2664,11 @@ class ConfigManager:
|
|
2640
2664
|
from ..usecases.Histopathological_Cancer_Detection_img import HistopathologicalCancerDetectionConfig
|
2641
2665
|
default_config = HistopathologicalCancerDetectionConfig()
|
2642
2666
|
return default_config.to_dict()
|
2667
|
+
elif usecase == "cell_microscopy_segmentation":
|
2668
|
+
# Import here to avoid circular import
|
2669
|
+
from ..usecases.cell_microscopy_segmentation import CellMicroscopyConfig
|
2670
|
+
default_config = CellMicroscopyConfig()
|
2671
|
+
return default_config.to_dict()
|
2643
2672
|
|
2644
2673
|
elif usecase not in self._config_classes:
|
2645
2674
|
raise ConfigValidationError(f"Unsupported use case: {usecase}")
|
@@ -105,6 +105,7 @@ from .usecases import (
|
|
105
105
|
PlaqueSegmentationUseCase,
|
106
106
|
CardiomegalyUseCase,
|
107
107
|
HistopathologicalCancerDetectionUseCase,
|
108
|
+
CellMicroscopyUseCase,
|
108
109
|
|
109
110
|
|
110
111
|
)
|
@@ -246,6 +247,7 @@ class PostProcessor:
|
|
246
247
|
registry.register_use_case("healthcare", "plaque_img_segmentation", PlaqueSegmentationUseCase)
|
247
248
|
registry.register_use_case("healthcare", "cardiomegaly_classification", CardiomegalyUseCase)
|
248
249
|
registry.register_use_case("healthcare", "histopathological_cancer_detection", HistopathologicalCancerDetectionUseCase)
|
250
|
+
registry.register_use_case("healthcare", "cell_microscopy_segmentation", CellMicroscopyUseCase)
|
249
251
|
|
250
252
|
|
251
253
|
logger.debug("Registered use cases with registry")
|
@@ -432,6 +434,8 @@ class PostProcessor:
|
|
432
434
|
result = use_case.process(data, parsed_config, context, stream_info)
|
433
435
|
elif isinstance(use_case, HistopathologicalCancerDetectionUseCase):
|
434
436
|
result = use_case.process(data, parsed_config, context, stream_info)
|
437
|
+
elif isinstance(use_case, CellMicroscopyUseCase):
|
438
|
+
result = use_case.process(data, parsed_config, context, stream_info)
|
435
439
|
|
436
440
|
else:
|
437
441
|
result = use_case.process(data, parsed_config, context, stream_info)
|
@@ -84,6 +84,8 @@ from .skin_cancer_classification_img import SkinCancerClassificationConfig, Skin
|
|
84
84
|
from .plaque_segmentation_img import PlaqueSegmentationConfig, PlaqueSegmentationUseCase
|
85
85
|
from .cardiomegaly_classification import CardiomegalyConfig, CardiomegalyUseCase
|
86
86
|
from .Histopathological_Cancer_Detection_img import HistopathologicalCancerDetectionConfig,HistopathologicalCancerDetectionUseCase
|
87
|
+
from .cell_microscopy_segmentation import CellMicroscopyConfig, CellMicroscopyUseCase
|
88
|
+
|
87
89
|
|
88
90
|
__all__ = [
|
89
91
|
'PeopleCountingUseCase',
|
@@ -156,6 +158,7 @@ __all__ = [
|
|
156
158
|
'PlaqueSegmentationUseCase',
|
157
159
|
'CardiomegalyUseCase',
|
158
160
|
'HistopathologicalCancerDetectionUseCase',
|
161
|
+
'CellMicroscopyUseCase',
|
159
162
|
|
160
163
|
|
161
164
|
|
@@ -230,6 +233,7 @@ __all__ = [
|
|
230
233
|
'PlaqueSegmentationConfig',
|
231
234
|
'CardiomegalyConfig',
|
232
235
|
'HistopathologicalCancerDetectionConfig',
|
236
|
+
'CellMicroscopyConfig',
|
233
237
|
|
234
238
|
|
235
239
|
]
|
@@ -0,0 +1,897 @@
|
|
1
|
+
"""
|
2
|
+
Cell Segmentation in Microscopy Images Use Case for Post-Processing
|
3
|
+
|
4
|
+
This module provides cell microscopy segmenatation.
|
5
|
+
"""
|
6
|
+
|
7
|
+
from typing import Any, Dict, List, Optional
|
8
|
+
from dataclasses import asdict
|
9
|
+
import time
|
10
|
+
from datetime import datetime, timezone
|
11
|
+
import copy # Added for deep copying detections to preserve original masks
|
12
|
+
|
13
|
+
from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol, ResultFormat
|
14
|
+
from ..utils import (
|
15
|
+
filter_by_confidence,
|
16
|
+
filter_by_categories,
|
17
|
+
apply_category_mapping,
|
18
|
+
count_objects_by_category,
|
19
|
+
count_objects_in_zones,
|
20
|
+
calculate_counting_summary,
|
21
|
+
match_results_structure,
|
22
|
+
bbox_smoothing,
|
23
|
+
BBoxSmoothingConfig,
|
24
|
+
BBoxSmoothingTracker
|
25
|
+
)
|
26
|
+
from dataclasses import dataclass, field
|
27
|
+
from ..core.config import BaseConfig, AlertConfig, ZoneConfig
|
28
|
+
|
29
|
+
|
30
|
+
@dataclass
|
31
|
+
class CellMicroscopyConfig(BaseConfig):
|
32
|
+
"""Configuration for Cell segmentation in microscopy images use case for post-processing."""
|
33
|
+
# Smoothing configuration
|
34
|
+
enable_smoothing: bool = True
|
35
|
+
smoothing_algorithm: str = "observability" # "window" or "observability"
|
36
|
+
smoothing_window_size: int = 20
|
37
|
+
smoothing_cooldown_frames: int = 5
|
38
|
+
smoothing_confidence_range_factor: float = 0.5
|
39
|
+
|
40
|
+
# confidence thresholds
|
41
|
+
confidence_threshold: float = 0.90
|
42
|
+
|
43
|
+
usecase_categories: List[str] = field(
|
44
|
+
default_factory=lambda: ['Culture']
|
45
|
+
)
|
46
|
+
|
47
|
+
target_categories: List[str] = field(
|
48
|
+
default_factory=lambda: ['Culture']
|
49
|
+
)
|
50
|
+
|
51
|
+
alert_config: Optional[AlertConfig] = None
|
52
|
+
|
53
|
+
index_to_category: Optional[Dict[int, str]] = field(
|
54
|
+
default_factory=lambda: {
|
55
|
+
0: "Culture"
|
56
|
+
}
|
57
|
+
)
|
58
|
+
|
59
|
+
|
60
|
+
class CellMicroscopyUseCase(BaseProcessor):
|
61
|
+
|
62
|
+
# Human-friendly display names for categories
|
63
|
+
CATEGORY_DISPLAY = {
|
64
|
+
"Culture": "Culture"
|
65
|
+
}
|
66
|
+
def __init__(self):
|
67
|
+
super().__init__("cell_microscopy_segmentation")
|
68
|
+
self.category = "healthcare"
|
69
|
+
|
70
|
+
# List of categories to track
|
71
|
+
self.target_categories = ["Culture"]
|
72
|
+
|
73
|
+
self.CASE_TYPE: Optional[str] = 'cell_microscopy_segmentation'
|
74
|
+
self.CASE_VERSION: Optional[str] = '1.0'
|
75
|
+
|
76
|
+
# Initialize smoothing tracker
|
77
|
+
self.smoothing_tracker = None
|
78
|
+
|
79
|
+
# Initialize advanced tracker (will be created on first use)
|
80
|
+
self.tracker = None
|
81
|
+
|
82
|
+
# Initialize tracking state variables
|
83
|
+
self._total_frame_counter = 0
|
84
|
+
self._global_frame_offset = 0
|
85
|
+
|
86
|
+
# Track start time for "TOTAL SINCE" calculation
|
87
|
+
self._tracking_start_time = None
|
88
|
+
|
89
|
+
# ------------------------------------------------------------------ #
|
90
|
+
# Canonical tracking aliasing to avoid duplicate counts #
|
91
|
+
# ------------------------------------------------------------------ #
|
92
|
+
# Maps raw tracker-generated IDs to stable canonical IDs that persist
|
93
|
+
# even if the underlying tracker re-assigns a new ID after a short
|
94
|
+
# interruption. This mirrors the logic used in people_counting to
|
95
|
+
# provide accurate unique counting.
|
96
|
+
self._track_aliases: Dict[Any, Any] = {}
|
97
|
+
self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
|
98
|
+
# Tunable parameters – adjust if necessary for specific scenarios
|
99
|
+
self._track_merge_iou_threshold: float = 0.05 # IoU ≥ 0.05 →
|
100
|
+
self._track_merge_time_window: float = 7.0 # seconds within which to merge
|
101
|
+
|
102
|
+
self._ascending_alert_list: List[int] = []
|
103
|
+
self.current_incident_end_timestamp: str = "N/A"
|
104
|
+
|
105
|
+
def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
|
106
|
+
stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
|
107
|
+
"""
|
108
|
+
Main entry point for post-processing.
|
109
|
+
Applies category mapping, smoothing, counting, alerting, and summary generation.
|
110
|
+
Returns a ProcessingResult with all relevant outputs.
|
111
|
+
"""
|
112
|
+
start_time = time.time()
|
113
|
+
# Ensure config is correct type
|
114
|
+
if not isinstance(config, CellMicroscopyConfig):
|
115
|
+
return self.create_error_result("Invalid config type", usecase=self.name, category=self.category,
|
116
|
+
context=context)
|
117
|
+
if context is None:
|
118
|
+
context = ProcessingContext()
|
119
|
+
|
120
|
+
# Detect input format and store in context
|
121
|
+
input_format = match_results_structure(data)
|
122
|
+
context.input_format = input_format
|
123
|
+
context.confidence_threshold = config.confidence_threshold
|
124
|
+
|
125
|
+
# Step 1: Confidence filtering
|
126
|
+
if config.confidence_threshold is not None:
|
127
|
+
processed_data = filter_by_confidence(data, config.confidence_threshold)
|
128
|
+
else:
|
129
|
+
processed_data = data
|
130
|
+
self.logger.debug(f"Did not apply confidence filtering with threshold since nothing was provided")
|
131
|
+
|
132
|
+
# Step 2: Apply category mapping if provided
|
133
|
+
if config.index_to_category:
|
134
|
+
processed_data = apply_category_mapping(processed_data, config.index_to_category)
|
135
|
+
|
136
|
+
# Step 3: Category filtering
|
137
|
+
if config.target_categories:
|
138
|
+
processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
|
139
|
+
|
140
|
+
# Step 4: Apply bbox smoothing if enabled
|
141
|
+
# Deep-copy detections so that we preserve the original masks before any
|
142
|
+
# smoothing/tracking logic potentially removes them.
|
143
|
+
raw_processed_data = [copy.deepcopy(det) for det in processed_data]
|
144
|
+
if config.enable_smoothing:
|
145
|
+
if self.smoothing_tracker is None:
|
146
|
+
smoothing_config = BBoxSmoothingConfig(
|
147
|
+
smoothing_algorithm=config.smoothing_algorithm,
|
148
|
+
window_size=config.smoothing_window_size,
|
149
|
+
cooldown_frames=config.smoothing_cooldown_frames,
|
150
|
+
confidence_threshold=config.confidence_threshold,
|
151
|
+
confidence_range_factor=config.smoothing_confidence_range_factor,
|
152
|
+
enable_smoothing=True
|
153
|
+
)
|
154
|
+
self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
|
155
|
+
|
156
|
+
processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
|
157
|
+
# Restore masks after smoothing
|
158
|
+
|
159
|
+
# Step 5: Advanced tracking (BYTETracker-like)
|
160
|
+
try:
|
161
|
+
from ..advanced_tracker import AdvancedTracker
|
162
|
+
from ..advanced_tracker.config import TrackerConfig
|
163
|
+
|
164
|
+
# Create tracker instance if it doesn't exist (preserves state across frames)
|
165
|
+
if self.tracker is None:
|
166
|
+
tracker_config = TrackerConfig()
|
167
|
+
self.tracker = AdvancedTracker(tracker_config)
|
168
|
+
self.logger.info("Initialized AdvancedTracker for Monitoring and tracking")
|
169
|
+
|
170
|
+
processed_data = self.tracker.update(processed_data)
|
171
|
+
except Exception as e:
|
172
|
+
# If advanced tracker fails, fallback to unsmoothed detections
|
173
|
+
self.logger.warning(f"AdvancedTracker failed: {e}")
|
174
|
+
|
175
|
+
# Update tracking state for total count per label
|
176
|
+
self._update_tracking_state(processed_data)
|
177
|
+
|
178
|
+
# ------------------------------------------------------------------ #
|
179
|
+
# Re-attach segmentation masks that were present in the original input
|
180
|
+
# but may have been stripped during smoothing/tracking. We match each
|
181
|
+
# processed detection back to the raw detection with the highest IoU
|
182
|
+
# and copy over its "masks" field (if available).
|
183
|
+
# ------------------------------------------------------------------ #
|
184
|
+
processed_data = self._attach_masks_to_detections(processed_data, raw_processed_data)
|
185
|
+
|
186
|
+
# Update frame counter
|
187
|
+
self._total_frame_counter += 1
|
188
|
+
|
189
|
+
# Extract frame information from stream_info
|
190
|
+
frame_number = None
|
191
|
+
if stream_info:
|
192
|
+
input_settings = stream_info.get("input_settings", {})
|
193
|
+
start_frame = input_settings.get("start_frame")
|
194
|
+
end_frame = input_settings.get("end_frame")
|
195
|
+
# If start and end frame are the same, it's a single frame
|
196
|
+
if start_frame is not None and end_frame is not None and start_frame == end_frame:
|
197
|
+
frame_number = start_frame
|
198
|
+
|
199
|
+
# Compute summaries and alerts
|
200
|
+
general_counting_summary = calculate_counting_summary(data)
|
201
|
+
counting_summary = self._count_categories(processed_data, config)
|
202
|
+
# Add total unique counts after tracking using only local state
|
203
|
+
total_counts = self.get_total_counts()
|
204
|
+
counting_summary['total_counts'] = total_counts
|
205
|
+
|
206
|
+
alerts = self._check_alerts(counting_summary, frame_number, config)
|
207
|
+
predictions = self._extract_predictions(processed_data)
|
208
|
+
|
209
|
+
# Step: Generate structured events and tracking stats with frame-based keys
|
210
|
+
incidents_list = self._generate_incidents(counting_summary, alerts, config, frame_number, stream_info)
|
211
|
+
tracking_stats_list = self._generate_tracking_stats(counting_summary, alerts, config, frame_number,stream_info)
|
212
|
+
# business_analytics_list = self._generate_business_analytics(counting_summary, alerts, config, frame_number, stream_info, is_empty=False)
|
213
|
+
business_analytics_list = []
|
214
|
+
summary_list = self._generate_summary(counting_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
|
215
|
+
|
216
|
+
# Extract frame-based dictionaries from the lists
|
217
|
+
incidents = incidents_list[0] if incidents_list else {}
|
218
|
+
tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
|
219
|
+
business_analytics = business_analytics_list[0] if business_analytics_list else {}
|
220
|
+
summary = summary_list[0] if summary_list else {}
|
221
|
+
agg_summary = {str(frame_number): {
|
222
|
+
"incidents": incidents,
|
223
|
+
"tracking_stats": tracking_stats,
|
224
|
+
"business_analytics": business_analytics,
|
225
|
+
"alerts": alerts,
|
226
|
+
"human_text": summary}
|
227
|
+
}
|
228
|
+
|
229
|
+
context.mark_completed()
|
230
|
+
|
231
|
+
# Build result object following the new pattern
|
232
|
+
|
233
|
+
result = self.create_result(
|
234
|
+
data={"agg_summary": agg_summary},
|
235
|
+
usecase=self.name,
|
236
|
+
category=self.category,
|
237
|
+
context=context
|
238
|
+
)
|
239
|
+
|
240
|
+
return result
|
241
|
+
|
242
|
+
def _check_alerts(self, summary: dict, frame_number: Any, config: CellMicroscopyConfig) -> List[Dict]:
|
243
|
+
"""
|
244
|
+
Check if any alert thresholds are exceeded and return alert dicts.
|
245
|
+
"""
|
246
|
+
def get_trend(data, lookback=900, threshold=0.6):
|
247
|
+
'''
|
248
|
+
Determine if the trend is ascending or descending based on actual value progression.
|
249
|
+
Now works with values 0,1,2,3 (not just binary).
|
250
|
+
'''
|
251
|
+
window = data[-lookback:] if len(data) >= lookback else data
|
252
|
+
if len(window) < 2:
|
253
|
+
return True # not enough data to determine trend
|
254
|
+
increasing = 0
|
255
|
+
total = 0
|
256
|
+
for i in range(1, len(window)):
|
257
|
+
if window[i] >= window[i - 1]:
|
258
|
+
increasing += 1
|
259
|
+
total += 1
|
260
|
+
ratio = increasing / total
|
261
|
+
if ratio >= threshold:
|
262
|
+
return True
|
263
|
+
elif ratio <= (1 - threshold):
|
264
|
+
return False
|
265
|
+
|
266
|
+
frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
267
|
+
alerts = []
|
268
|
+
total_detections = summary.get("total_count", 0) #CURRENT combined total count of all classes
|
269
|
+
total_counts_dict = summary.get("total_counts", {}) #TOTAL cumulative counts per class
|
270
|
+
cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0 #TOTAL combined cumulative count
|
271
|
+
per_category_count = summary.get("per_category_count", {}) #CURRENT count per class
|
272
|
+
|
273
|
+
if not config.alert_config:
|
274
|
+
return alerts
|
275
|
+
|
276
|
+
total = summary.get("total_count", 0)
|
277
|
+
#self._ascending_alert_list
|
278
|
+
if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
|
279
|
+
|
280
|
+
for category, threshold in config.alert_config.count_thresholds.items():
|
281
|
+
if category == "all" and total > threshold:
|
282
|
+
|
283
|
+
alerts.append({
|
284
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
285
|
+
"alert_id": "alert_"+category+'_'+frame_key,
|
286
|
+
"incident_category": self.CASE_TYPE,
|
287
|
+
"threshold_level": threshold,
|
288
|
+
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
289
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
290
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
291
|
+
}
|
292
|
+
})
|
293
|
+
elif category in summary.get("per_category_count", {}):
|
294
|
+
count = summary.get("per_category_count", {})[category]
|
295
|
+
if count > threshold: # Fixed logic: alert when EXCEEDING threshold
|
296
|
+
alerts.append({
|
297
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
298
|
+
"alert_id": "alert_"+category+'_'+frame_key,
|
299
|
+
"incident_category": self.CASE_TYPE,
|
300
|
+
"threshold_level": threshold,
|
301
|
+
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
302
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
303
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
304
|
+
}
|
305
|
+
})
|
306
|
+
else:
|
307
|
+
pass
|
308
|
+
return alerts
|
309
|
+
|
310
|
+
def _generate_incidents(self, counting_summary: Dict, alerts: List, config: CellMicroscopyConfig,
|
311
|
+
frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[
|
312
|
+
Dict]:
|
313
|
+
"""Generate structured events for the output format with frame-based keys."""
|
314
|
+
|
315
|
+
# Use frame number as key, fallback to 'current_frame' if not available
|
316
|
+
frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
317
|
+
incidents=[]
|
318
|
+
total_detections = counting_summary.get("total_count", 0)
|
319
|
+
current_timestamp = self._get_current_timestamp_str(stream_info)
|
320
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
321
|
+
|
322
|
+
self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
|
323
|
+
|
324
|
+
if total_detections > 0:
|
325
|
+
# Determine event level based on thresholds
|
326
|
+
level = "low"
|
327
|
+
intensity = 5.0
|
328
|
+
start_timestamp = self._get_start_timestamp_str(stream_info)
|
329
|
+
if start_timestamp and self.current_incident_end_timestamp=='N/A':
|
330
|
+
self.current_incident_end_timestamp = 'Incident still active'
|
331
|
+
elif start_timestamp and self.current_incident_end_timestamp=='Incident still active':
|
332
|
+
if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
|
333
|
+
self.current_incident_end_timestamp = current_timestamp
|
334
|
+
elif self.current_incident_end_timestamp!='Incident still active' and self.current_incident_end_timestamp!='N/A':
|
335
|
+
self.current_incident_end_timestamp = 'N/A'
|
336
|
+
|
337
|
+
if config.alert_config and config.alert_config.count_thresholds:
|
338
|
+
threshold = config.alert_config.count_thresholds.get("all", 15)
|
339
|
+
intensity = min(10.0, (total_detections / threshold) * 10)
|
340
|
+
|
341
|
+
if intensity >= 9:
|
342
|
+
level = "critical"
|
343
|
+
self._ascending_alert_list.append(3)
|
344
|
+
elif intensity >= 7:
|
345
|
+
level = "significant"
|
346
|
+
self._ascending_alert_list.append(2)
|
347
|
+
elif intensity >= 5:
|
348
|
+
level = "medium"
|
349
|
+
self._ascending_alert_list.append(1)
|
350
|
+
else:
|
351
|
+
level = "low"
|
352
|
+
self._ascending_alert_list.append(0)
|
353
|
+
else:
|
354
|
+
if total_detections > 30:
|
355
|
+
level = "critical"
|
356
|
+
intensity = 10.0
|
357
|
+
self._ascending_alert_list.append(3)
|
358
|
+
elif total_detections > 25:
|
359
|
+
level = "significant"
|
360
|
+
intensity = 9.0
|
361
|
+
self._ascending_alert_list.append(2)
|
362
|
+
elif total_detections > 15:
|
363
|
+
level = "medium"
|
364
|
+
intensity = 7.0
|
365
|
+
self._ascending_alert_list.append(1)
|
366
|
+
else:
|
367
|
+
level = "low"
|
368
|
+
intensity = min(10.0, total_detections / 3.0)
|
369
|
+
self._ascending_alert_list.append(0)
|
370
|
+
|
371
|
+
# Generate human text in new format
|
372
|
+
human_text_lines = [f"INCIDENTS DETECTED @ {current_timestamp}:"]
|
373
|
+
human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE,level)}")
|
374
|
+
human_text = "\n".join(human_text_lines)
|
375
|
+
|
376
|
+
alert_settings = []
|
377
|
+
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
378
|
+
alert_settings.append({
|
379
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
380
|
+
"incident_category": self.CASE_TYPE,
|
381
|
+
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
382
|
+
"ascending": True,
|
383
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
384
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
385
|
+
}
|
386
|
+
})
|
387
|
+
|
388
|
+
event= self.create_incident(incident_id=self.CASE_TYPE+'_'+str(frame_number), incident_type=self.CASE_TYPE,
|
389
|
+
severity_level=level, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
|
390
|
+
start_time=start_timestamp, end_time=self.current_incident_end_timestamp,
|
391
|
+
level_settings= {"low": 1, "medium": 3, "significant":4, "critical": 7})
|
392
|
+
incidents.append(event)
|
393
|
+
|
394
|
+
else:
|
395
|
+
self._ascending_alert_list.append(0)
|
396
|
+
incidents.append({})
|
397
|
+
|
398
|
+
return incidents
|
399
|
+
|
400
|
+
def _generate_tracking_stats(
|
401
|
+
self,
|
402
|
+
counting_summary: Dict,
|
403
|
+
alerts: List,
|
404
|
+
config: CellMicroscopyConfig,
|
405
|
+
frame_number: Optional[int] = None,
|
406
|
+
stream_info: Optional[Dict[str, Any]] = None
|
407
|
+
) -> List[Dict]:
|
408
|
+
"""Generate structured tracking stats matching eg.json format."""
|
409
|
+
camera_info = self.get_camera_info_from_stream(stream_info)
|
410
|
+
|
411
|
+
tracking_stats = []
|
412
|
+
# frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
413
|
+
# tracking_stats = [{frame_key: []}]
|
414
|
+
# frame_tracking_stats = tracking_stats[0][frame_key]
|
415
|
+
|
416
|
+
total_detections = counting_summary.get("total_count", 0) #CURRENT total count of all classes
|
417
|
+
total_counts_dict = counting_summary.get("total_counts", {}) #TOTAL cumulative counts per class
|
418
|
+
cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0 #TOTAL combined cumulative count
|
419
|
+
per_category_count = counting_summary.get("per_category_count", {}) #CURRENT count per class
|
420
|
+
|
421
|
+
track_ids_info = self._get_track_ids_info(counting_summary.get("detections", []))
|
422
|
+
|
423
|
+
current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
|
424
|
+
start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
|
425
|
+
|
426
|
+
# Create high precision timestamps for input_timestamp and reset_timestamp
|
427
|
+
high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
|
428
|
+
high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
|
429
|
+
|
430
|
+
|
431
|
+
# Build total_counts array in expected format
|
432
|
+
total_counts = []
|
433
|
+
for cat, count in total_counts_dict.items():
|
434
|
+
if count > 0:
|
435
|
+
total_counts.append({
|
436
|
+
"category": cat,
|
437
|
+
"count": count
|
438
|
+
})
|
439
|
+
print(total_counts)
|
440
|
+
# Build current_counts array in expected format
|
441
|
+
current_counts = []
|
442
|
+
for cat, count in per_category_count.items():
|
443
|
+
if count > 0 or total_detections > 0: # Include even if 0 when there are detections
|
444
|
+
current_counts.append({
|
445
|
+
"category": cat,
|
446
|
+
"count": count
|
447
|
+
})
|
448
|
+
print(current_counts)
|
449
|
+
# Prepare detections without confidence scores (as per eg.json)
|
450
|
+
detections = []
|
451
|
+
for detection in counting_summary.get("detections", []):
|
452
|
+
bbox = detection.get("bounding_box", {})
|
453
|
+
category = detection.get("category", "person")
|
454
|
+
# Include segmentation if available (like in eg.json)
|
455
|
+
if detection.get("masks"):
|
456
|
+
segmentation= detection.get("masks", [])
|
457
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
458
|
+
elif detection.get("segmentation"):
|
459
|
+
segmentation= detection.get("segmentation")
|
460
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
461
|
+
elif detection.get("mask"):
|
462
|
+
segmentation= detection.get("mask")
|
463
|
+
detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
|
464
|
+
else:
|
465
|
+
detection_obj = self.create_detection_object(category, bbox)
|
466
|
+
detections.append(detection_obj)
|
467
|
+
print(detections)
|
468
|
+
# Build alert_settings array in expected format
|
469
|
+
alert_settings = []
|
470
|
+
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
471
|
+
alert_settings.append({
|
472
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
473
|
+
"incident_category": self.CASE_TYPE,
|
474
|
+
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
475
|
+
"ascending": True,
|
476
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
|
477
|
+
getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
|
478
|
+
}
|
479
|
+
})
|
480
|
+
print(alert_settings)
|
481
|
+
# Generate human_text in expected format
|
482
|
+
human_text_lines = [f"Tracking Statistics:"]
|
483
|
+
human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}")
|
484
|
+
|
485
|
+
for cat, count in per_category_count.items():
|
486
|
+
human_text_lines.append(f"\t{cat}: {count}")
|
487
|
+
|
488
|
+
human_text_lines.append(f"TOTAL SINCE {start_timestamp}")
|
489
|
+
for cat, count in total_counts_dict.items():
|
490
|
+
if count > 0:
|
491
|
+
human_text_lines.append(f"\t{cat}: {count}")
|
492
|
+
|
493
|
+
if alerts:
|
494
|
+
for alert in alerts:
|
495
|
+
human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
|
496
|
+
else:
|
497
|
+
human_text_lines.append("Alerts: None")
|
498
|
+
|
499
|
+
human_text = "\n".join(human_text_lines)
|
500
|
+
reset_settings = [
|
501
|
+
{
|
502
|
+
"interval_type": "daily",
|
503
|
+
"reset_time": {
|
504
|
+
"value": 9,
|
505
|
+
"time_unit": "hour"
|
506
|
+
}
|
507
|
+
}
|
508
|
+
]
|
509
|
+
print(human_text)
|
510
|
+
tracking_stat=self.create_tracking_stats(total_counts=total_counts, current_counts=current_counts,
|
511
|
+
detections=detections, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
|
512
|
+
reset_settings=reset_settings, start_time=high_precision_start_timestamp ,
|
513
|
+
reset_time=high_precision_reset_timestamp)
|
514
|
+
|
515
|
+
tracking_stats.append(tracking_stat)
|
516
|
+
print(tracking_stats)
|
517
|
+
return tracking_stats
|
518
|
+
|
519
|
+
def _generate_business_analytics(self, counting_summary: Dict, zone_analysis: Dict, config: CellMicroscopyConfig, stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
|
520
|
+
"""Generate standardized business analytics for the agg_summary structure."""
|
521
|
+
if is_empty:
|
522
|
+
return []
|
523
|
+
|
524
|
+
#-----IF YOUR USECASE NEEDS BUSINESS ANALYTICS, YOU CAN USE THIS FUNCTION------#
|
525
|
+
#camera_info = self.get_camera_info_from_stream(stream_info)
|
526
|
+
# business_analytics = self.create_business_analytics(nalysis_name, statistics,
|
527
|
+
# human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
|
528
|
+
# reset_settings)
|
529
|
+
# return business_analytics
|
530
|
+
|
531
|
+
def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
|
532
|
+
"""
|
533
|
+
Generate a human_text string for the tracking_stat, incident, business analytics and alerts.
|
534
|
+
"""
|
535
|
+
lines = {}
|
536
|
+
lines["Application Name"] = self.CASE_TYPE
|
537
|
+
lines["Application Version"] = self.CASE_VERSION
|
538
|
+
if len(incidents) > 0:
|
539
|
+
lines["Incidents:"]=f"\n\t{incidents[0].get('human_text', 'No incidents detected')}\n"
|
540
|
+
if len(tracking_stats) > 0:
|
541
|
+
lines["Tracking Statistics:"]=f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}\n"
|
542
|
+
if len(business_analytics) > 0:
|
543
|
+
lines["Business Analytics:"]=f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}\n"
|
544
|
+
|
545
|
+
if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
|
546
|
+
lines["Summary"] = "No Summary Data"
|
547
|
+
|
548
|
+
return [lines]
|
549
|
+
|
550
|
+
|
551
|
+
def _count_categories(self, detections: list, config: CellMicroscopyConfig) -> dict:
|
552
|
+
"""
|
553
|
+
Count the number of detections per category and return a summary dict.
|
554
|
+
The detections list is expected to have 'track_id' (from tracker), 'category', 'bounding_box', 'masks', etc.
|
555
|
+
Output structure will include 'track_id' and 'masks' for each detection as per AdvancedTracker output.
|
556
|
+
"""
|
557
|
+
counts = {}
|
558
|
+
valid_detections = []
|
559
|
+
for det in detections:
|
560
|
+
cat = det.get('category', 'unknown')
|
561
|
+
if not all(k in det for k in ['category', 'confidence', 'bounding_box']): # Validate required fields
|
562
|
+
self.logger.warning(f"Skipping invalid detection: {det}")
|
563
|
+
continue
|
564
|
+
counts[cat] = counts.get(cat, 0) + 1
|
565
|
+
valid_detections.append({
|
566
|
+
"bounding_box": det.get("bounding_box"),
|
567
|
+
"category": det.get("category"),
|
568
|
+
"confidence": det.get("confidence"),
|
569
|
+
"track_id": det.get("track_id"),
|
570
|
+
"frame_id": det.get("frame_id"),
|
571
|
+
"masks": det.get("masks", det.get("mask", [])) # Include masks, fallback to empty list
|
572
|
+
})
|
573
|
+
self.logger.debug(f"Valid detections after filtering: {len(valid_detections)}")
|
574
|
+
return {
|
575
|
+
"total_count": sum(counts.values()),
|
576
|
+
"per_category_count": counts,
|
577
|
+
"detections": valid_detections
|
578
|
+
}
|
579
|
+
|
580
|
+
def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
|
581
|
+
"""
|
582
|
+
Get detailed information about track IDs (per frame).
|
583
|
+
"""
|
584
|
+
# Collect all track_ids in this frame
|
585
|
+
frame_track_ids = set()
|
586
|
+
for det in detections:
|
587
|
+
tid = det.get('track_id')
|
588
|
+
if tid is not None:
|
589
|
+
frame_track_ids.add(tid)
|
590
|
+
# Use persistent total set for unique counting
|
591
|
+
total_track_ids = set()
|
592
|
+
for s in getattr(self, '_per_category_total_track_ids', {}).values():
|
593
|
+
total_track_ids.update(s)
|
594
|
+
return {
|
595
|
+
"total_count": len(total_track_ids),
|
596
|
+
"current_frame_count": len(frame_track_ids),
|
597
|
+
"total_unique_track_ids": len(total_track_ids),
|
598
|
+
"current_frame_track_ids": list(frame_track_ids),
|
599
|
+
"last_update_time": time.time(),
|
600
|
+
"total_frames_processed": getattr(self, '_total_frame_counter', 0)
|
601
|
+
}
|
602
|
+
|
603
|
+
def _update_tracking_state(self, detections: list):
|
604
|
+
"""
|
605
|
+
Track unique categories track_ids per category for total count after tracking.
|
606
|
+
Applies canonical ID merging to avoid duplicate counting when the underlying
|
607
|
+
tracker loses an object temporarily and assigns a new ID.
|
608
|
+
"""
|
609
|
+
# Lazily initialise storage dicts
|
610
|
+
if not hasattr(self, "_per_category_total_track_ids"):
|
611
|
+
self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
|
612
|
+
self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
|
613
|
+
|
614
|
+
for det in detections:
|
615
|
+
cat = det.get("category")
|
616
|
+
raw_track_id = det.get("track_id")
|
617
|
+
if cat not in self.target_categories or raw_track_id is None:
|
618
|
+
continue
|
619
|
+
bbox = det.get("bounding_box", det.get("bbox"))
|
620
|
+
canonical_id = self._merge_or_register_track(raw_track_id, bbox)
|
621
|
+
# Propagate canonical ID back to detection so downstream logic uses it
|
622
|
+
det["track_id"] = canonical_id
|
623
|
+
|
624
|
+
self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
|
625
|
+
self._current_frame_track_ids[cat].add(canonical_id)
|
626
|
+
|
627
|
+
def get_total_counts(self):
|
628
|
+
"""
|
629
|
+
Return total unique track_id count for each category.
|
630
|
+
"""
|
631
|
+
return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
|
632
|
+
|
633
|
+
def _format_timestamp_for_video(self, timestamp: float) -> str:
|
634
|
+
"""Format timestamp for video chunks (HH:MM:SS.ms format)."""
|
635
|
+
hours = int(timestamp // 3600)
|
636
|
+
minutes = int((timestamp % 3600) // 60)
|
637
|
+
seconds = round(float(timestamp % 60),2)
|
638
|
+
return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
|
639
|
+
|
640
|
+
def _format_timestamp_for_stream(self, timestamp: float) -> str:
|
641
|
+
"""Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
|
642
|
+
dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
|
643
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
644
|
+
|
645
|
+
def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
|
646
|
+
"""Get formatted current timestamp based on stream type."""
|
647
|
+
if not stream_info:
|
648
|
+
return "00:00:00.00"
|
649
|
+
# is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
|
650
|
+
if precision:
|
651
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
652
|
+
if frame_id:
|
653
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
654
|
+
else:
|
655
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
656
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
657
|
+
return stream_time_str
|
658
|
+
else:
|
659
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
660
|
+
|
661
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
662
|
+
if frame_id:
|
663
|
+
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
664
|
+
else:
|
665
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
666
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
667
|
+
return stream_time_str
|
668
|
+
else:
|
669
|
+
# For streams, use stream_time from stream_info
|
670
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
671
|
+
if stream_time_str:
|
672
|
+
# Parse the high precision timestamp string to get timestamp
|
673
|
+
try:
|
674
|
+
# Remove " UTC" suffix and parse
|
675
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
676
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
677
|
+
timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
|
678
|
+
return self._format_timestamp_for_stream(timestamp)
|
679
|
+
except:
|
680
|
+
# Fallback to current time if parsing fails
|
681
|
+
return self._format_timestamp_for_stream(time.time())
|
682
|
+
else:
|
683
|
+
return self._format_timestamp_for_stream(time.time())
|
684
|
+
|
685
|
+
def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
|
686
|
+
"""Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
|
687
|
+
if not stream_info:
|
688
|
+
return "00:00:00"
|
689
|
+
if precision:
|
690
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
691
|
+
return "00:00:00"
|
692
|
+
else:
|
693
|
+
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
694
|
+
|
695
|
+
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
696
|
+
# If video format, start from 00:00:00
|
697
|
+
return "00:00:00"
|
698
|
+
else:
|
699
|
+
# For streams, use tracking start time or current time with minutes/seconds reset
|
700
|
+
if self._tracking_start_time is None:
|
701
|
+
# Try to extract timestamp from stream_time string
|
702
|
+
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
703
|
+
if stream_time_str:
|
704
|
+
try:
|
705
|
+
# Remove " UTC" suffix and parse
|
706
|
+
timestamp_str = stream_time_str.replace(" UTC", "")
|
707
|
+
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
708
|
+
self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
|
709
|
+
except:
|
710
|
+
# Fallback to current time if parsing fails
|
711
|
+
self._tracking_start_time = time.time()
|
712
|
+
else:
|
713
|
+
self._tracking_start_time = time.time()
|
714
|
+
|
715
|
+
dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
|
716
|
+
# Reset minutes and seconds to 00:00 for "TOTAL SINCE" format
|
717
|
+
dt = dt.replace(minute=0, second=0, microsecond=0)
|
718
|
+
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
719
|
+
|
720
|
+
# ------------------------------------------------------------------ #
|
721
|
+
# Helper to merge masks back into detections #
|
722
|
+
# ------------------------------------------------------------------ #
|
723
|
+
def _attach_masks_to_detections(
|
724
|
+
self,
|
725
|
+
processed_detections: List[Dict[str, Any]],
|
726
|
+
raw_detections: List[Dict[str, Any]],
|
727
|
+
iou_threshold: float = 0.5,
|
728
|
+
) -> List[Dict[str, Any]]:
|
729
|
+
"""
|
730
|
+
Attach segmentation masks from the original `raw_detections` list to the
|
731
|
+
`processed_detections` list returned after smoothing/tracking.
|
732
|
+
|
733
|
+
Matching between detections is performed using Intersection-over-Union
|
734
|
+
(IoU) of the bounding boxes. For each processed detection we select the
|
735
|
+
raw detection with the highest IoU above `iou_threshold` and copy its
|
736
|
+
`masks` (or `mask`) field. If no suitable match is found, the detection
|
737
|
+
keeps an empty list for `masks` to maintain a consistent schema.
|
738
|
+
"""
|
739
|
+
|
740
|
+
if not processed_detections or not raw_detections:
|
741
|
+
# Nothing to do – ensure masks key exists for downstream logic.
|
742
|
+
for det in processed_detections:
|
743
|
+
det.setdefault("masks", [])
|
744
|
+
return processed_detections
|
745
|
+
|
746
|
+
# Track which raw detections have already been matched to avoid
|
747
|
+
# assigning the same mask to multiple processed detections.
|
748
|
+
used_raw_indices = set()
|
749
|
+
|
750
|
+
for det in processed_detections:
|
751
|
+
best_iou = 0.0
|
752
|
+
best_idx = None
|
753
|
+
|
754
|
+
for idx, raw_det in enumerate(raw_detections):
|
755
|
+
if idx in used_raw_indices:
|
756
|
+
continue
|
757
|
+
|
758
|
+
iou = self._compute_iou(det.get("bounding_box"), raw_det.get("bounding_box"))
|
759
|
+
if iou > best_iou:
|
760
|
+
best_iou = iou
|
761
|
+
best_idx = idx
|
762
|
+
|
763
|
+
if best_idx is not None and best_iou >= iou_threshold:
|
764
|
+
raw_det = raw_detections[best_idx]
|
765
|
+
masks = raw_det.get("masks", raw_det.get("mask"))
|
766
|
+
if masks is not None:
|
767
|
+
det["masks"] = masks
|
768
|
+
used_raw_indices.add(best_idx)
|
769
|
+
else:
|
770
|
+
# No adequate match – default to empty list to keep schema consistent.
|
771
|
+
det.setdefault("masks", ["EMPTY"])
|
772
|
+
|
773
|
+
return processed_detections
|
774
|
+
|
775
|
+
def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
|
776
|
+
"""
|
777
|
+
Extract prediction details for output (category, confidence, bounding box).
|
778
|
+
"""
|
779
|
+
return [
|
780
|
+
{
|
781
|
+
"category": det.get("category", "unknown"),
|
782
|
+
"confidence": det.get("confidence", 0.0),
|
783
|
+
"bounding_box": det.get("bounding_box", {}),
|
784
|
+
"mask": det.get("mask", det.get("masks", None)) # Accept either key
|
785
|
+
}
|
786
|
+
for det in detections
|
787
|
+
]
|
788
|
+
|
789
|
+
|
790
|
+
# ------------------------------------------------------------------ #
|
791
|
+
# Canonical ID helpers #
|
792
|
+
# ------------------------------------------------------------------ #
|
793
|
+
def _compute_iou(self, box1: Any, box2: Any) -> float:
|
794
|
+
"""Compute IoU between two bounding boxes which may be dicts or lists.
|
795
|
+
Falls back to 0 when insufficient data is available."""
|
796
|
+
|
797
|
+
# Helper to convert bbox (dict or list) to [x1, y1, x2, y2]
|
798
|
+
def _bbox_to_list(bbox):
|
799
|
+
if bbox is None:
|
800
|
+
return []
|
801
|
+
if isinstance(bbox, list):
|
802
|
+
return bbox[:4] if len(bbox) >= 4 else []
|
803
|
+
if isinstance(bbox, dict):
|
804
|
+
if "xmin" in bbox:
|
805
|
+
return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
|
806
|
+
if "x1" in bbox:
|
807
|
+
return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
|
808
|
+
# Fallback: first four numeric values
|
809
|
+
values = [v for v in bbox.values() if isinstance(v, (int, float))]
|
810
|
+
return values[:4] if len(values) >= 4 else []
|
811
|
+
return []
|
812
|
+
|
813
|
+
l1 = _bbox_to_list(box1)
|
814
|
+
l2 = _bbox_to_list(box2)
|
815
|
+
if len(l1) < 4 or len(l2) < 4:
|
816
|
+
return 0.0
|
817
|
+
x1_min, y1_min, x1_max, y1_max = l1
|
818
|
+
x2_min, y2_min, x2_max, y2_max = l2
|
819
|
+
|
820
|
+
# Ensure correct order
|
821
|
+
x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
|
822
|
+
y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
|
823
|
+
x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
|
824
|
+
y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
|
825
|
+
|
826
|
+
inter_x_min = max(x1_min, x2_min)
|
827
|
+
inter_y_min = max(y1_min, y2_min)
|
828
|
+
inter_x_max = min(x1_max, x2_max)
|
829
|
+
inter_y_max = min(y1_max, y2_max)
|
830
|
+
|
831
|
+
inter_w = max(0.0, inter_x_max - inter_x_min)
|
832
|
+
inter_h = max(0.0, inter_y_max - inter_y_min)
|
833
|
+
inter_area = inter_w * inter_h
|
834
|
+
|
835
|
+
area1 = (x1_max - x1_min) * (y1_max - y1_min)
|
836
|
+
area2 = (x2_max - x2_min) * (y2_max - y2_min)
|
837
|
+
union_area = area1 + area2 - inter_area
|
838
|
+
|
839
|
+
return (inter_area / union_area) if union_area > 0 else 0.0
|
840
|
+
|
841
|
+
def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
|
842
|
+
"""Return a stable canonical ID for a raw tracker ID, merging fragmented
|
843
|
+
tracks when IoU and temporal constraints indicate they represent the
|
844
|
+
same physical."""
|
845
|
+
if raw_id is None or bbox is None:
|
846
|
+
# Nothing to merge
|
847
|
+
return raw_id
|
848
|
+
|
849
|
+
now = time.time()
|
850
|
+
|
851
|
+
# Fast path – raw_id already mapped
|
852
|
+
if raw_id in self._track_aliases:
|
853
|
+
canonical_id = self._track_aliases[raw_id]
|
854
|
+
track_info = self._canonical_tracks.get(canonical_id)
|
855
|
+
if track_info is not None:
|
856
|
+
track_info["last_bbox"] = bbox
|
857
|
+
track_info["last_update"] = now
|
858
|
+
track_info["raw_ids"].add(raw_id)
|
859
|
+
return canonical_id
|
860
|
+
|
861
|
+
# Attempt to merge with an existing canonical track
|
862
|
+
for canonical_id, info in self._canonical_tracks.items():
|
863
|
+
# Only consider recently updated tracks
|
864
|
+
if now - info["last_update"] > self._track_merge_time_window:
|
865
|
+
continue
|
866
|
+
iou = self._compute_iou(bbox, info["last_bbox"])
|
867
|
+
if iou >= self._track_merge_iou_threshold:
|
868
|
+
# Merge
|
869
|
+
self._track_aliases[raw_id] = canonical_id
|
870
|
+
info["last_bbox"] = bbox
|
871
|
+
info["last_update"] = now
|
872
|
+
info["raw_ids"].add(raw_id)
|
873
|
+
return canonical_id
|
874
|
+
|
875
|
+
# No match – register new canonical track
|
876
|
+
canonical_id = raw_id
|
877
|
+
self._track_aliases[raw_id] = canonical_id
|
878
|
+
self._canonical_tracks[canonical_id] = {
|
879
|
+
"last_bbox": bbox,
|
880
|
+
"last_update": now,
|
881
|
+
"raw_ids": {raw_id},
|
882
|
+
}
|
883
|
+
return canonical_id
|
884
|
+
|
885
|
+
def _format_timestamp(self, timestamp: float) -> str:
|
886
|
+
"""Format a timestamp for human-readable output."""
|
887
|
+
return datetime.fromtimestamp(timestamp, timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC')
|
888
|
+
|
889
|
+
def _get_tracking_start_time(self) -> str:
|
890
|
+
"""Get the tracking start time, formatted as a string."""
|
891
|
+
if self._tracking_start_time is None:
|
892
|
+
return "N/A"
|
893
|
+
return self._format_timestamp(self._tracking_start_time)
|
894
|
+
|
895
|
+
def _set_tracking_start_time(self) -> None:
|
896
|
+
"""Set the tracking start time to the current time."""
|
897
|
+
self._tracking_start_time = time.time()
|
@@ -313,6 +313,7 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
313
313
|
def _analyze_ocr_in_media(self, data: Any, media_bytes: bytes, config: LicensePlateMonitorConfig) -> List[Dict[str, Any]]:
|
314
314
|
"""Analyze OCR of license plates in video frames or images."""
|
315
315
|
is_video = self._is_video_bytes(media_bytes)
|
316
|
+
print('is_video',is_video)
|
316
317
|
if is_video:
|
317
318
|
return self._analyze_ocr_in_video(data, media_bytes, config)
|
318
319
|
else:
|
@@ -397,20 +398,26 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
397
398
|
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
398
399
|
ocr_analysis = []
|
399
400
|
detections = self._get_frame_detections(data, "0")
|
401
|
+
|
402
|
+
print("OCR-detections", detections)
|
400
403
|
|
401
404
|
for detection in detections:
|
405
|
+
print("---------OCR DETECTION",detection)
|
402
406
|
if detection.get("confidence", 1.0) < config.confidence_threshold:
|
403
407
|
continue
|
404
408
|
|
405
409
|
bbox = detection.get("bounding_box", detection.get("bbox"))
|
410
|
+
print("---------OCR BBOX",bbox)
|
406
411
|
if not bbox:
|
407
412
|
continue
|
408
413
|
|
409
414
|
crop = self._crop_bbox(rgb_image, bbox, config.bbox_format)
|
415
|
+
print("---------OCR CROP SIZEE",crop.size)
|
410
416
|
if crop.size == 0:
|
411
417
|
continue
|
412
|
-
|
418
|
+
|
413
419
|
plate_text_raw = self._run_ocr(crop)
|
420
|
+
print("---------OCR PLATE TEXT",plate_text_raw)
|
414
421
|
plate_text = plate_text_raw if plate_text_raw else None
|
415
422
|
|
416
423
|
ocr_record = {
|
@@ -465,6 +472,9 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
465
472
|
|
466
473
|
def _run_ocr(self, crop: np.ndarray) -> str:
|
467
474
|
"""Run OCR on a cropped plate image and return cleaned text or empty string."""
|
475
|
+
print("---------OCR CROP22",crop)
|
476
|
+
print("---------OCR CROP SIZE22",crop.size)
|
477
|
+
|
468
478
|
if crop is None or crop.size == 0 or self.ocr_model is None:
|
469
479
|
return ""
|
470
480
|
try:
|
@@ -521,12 +531,15 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
521
531
|
unique_texts: set = set()
|
522
532
|
valid_detections: List[Dict[str, Any]] = []
|
523
533
|
|
534
|
+
print("---------SUMMARY COUNT CATEGORIES",detections)
|
524
535
|
for det in detections:
|
525
536
|
if not all(k in det for k in ['category', 'confidence', 'bounding_box']):
|
526
537
|
continue
|
527
538
|
|
528
|
-
cat = det.get('category', '
|
539
|
+
cat = det.get('category', 'license_plate')
|
529
540
|
plate_text_raw = det.get('plate_text', '')
|
541
|
+
print("---------SUMMARY COUNT CATEGORIES PLATE TEXT RAW",plate_text_raw)
|
542
|
+
print("---------SUMMARY COUNT CATEGORIES PLATE TEXT RAW",det)
|
530
543
|
cleaned_text = self._clean_text(plate_text_raw)
|
531
544
|
|
532
545
|
# Consider as unique only if meets criteria
|
@@ -545,6 +558,9 @@ class LicensePlateMonitorUseCase(BaseProcessor):
|
|
545
558
|
|
546
559
|
counts = {"License_Plate": len(unique_texts)} if unique_texts else {}
|
547
560
|
|
561
|
+
print("---------SUMMARY COUNT CATEGORIES VALID DETECTIONS",valid_detections)
|
562
|
+
print(len(unique_texts),'per_cat_count',counts)
|
563
|
+
|
548
564
|
return {
|
549
565
|
"total_count": len(unique_texts),
|
550
566
|
"per_category_count": counts,
|
@@ -141,8 +141,8 @@ matrice/deploy/utils/boundary_drawing_internal/boundary_drawing_internal.py,sha2
|
|
141
141
|
matrice/deploy/utils/boundary_drawing_internal/boundary_drawing_tool.py,sha256=eY0VQGZ8BfTmR4_ThIAXaumBjh8_c7w69w-d3kta8p0,15421
|
142
142
|
matrice/deploy/utils/boundary_drawing_internal/example_usage.py,sha256=cUBhxxsVdTQWIPvIOjCUGrhqon7ZBr5N6qNewjrTIuk,6434
|
143
143
|
matrice/deploy/utils/post_processing/__init__.py,sha256=8qpwRZI05pAYvKXQZjmJucVllyhJzcb8MFHiKv2hX1c,26097
|
144
|
-
matrice/deploy/utils/post_processing/config.py,sha256=
|
145
|
-
matrice/deploy/utils/post_processing/processor.py,sha256=
|
144
|
+
matrice/deploy/utils/post_processing/config.py,sha256=OwPV4nQXlsuQZhRzP0jlSNMn2z15TKqP9APTJPIhS8c,5677
|
145
|
+
matrice/deploy/utils/post_processing/processor.py,sha256=eHcJ4Z-x7r4ibvNjwyseDamiU-WB5j78vOMF16pvHrg,35497
|
146
146
|
matrice/deploy/utils/post_processing/advanced_tracker/__init__.py,sha256=tAPFzI_Yep5TLX60FDwKqBqppc-EbxSr0wNsQ9DGI1o,423
|
147
147
|
matrice/deploy/utils/post_processing/advanced_tracker/base.py,sha256=VqWy4dd5th5LK-JfueTt2_GSEoOi5QQfQxjTNhmQoLc,3580
|
148
148
|
matrice/deploy/utils/post_processing/advanced_tracker/config.py,sha256=hEVJVbh4uUrbIynmoq4OhuxF2IZA5AMCBLpixScp5FI,2865
|
@@ -152,7 +152,7 @@ matrice/deploy/utils/post_processing/advanced_tracker/strack.py,sha256=rVH2xOysZ
|
|
152
152
|
matrice/deploy/utils/post_processing/advanced_tracker/tracker.py,sha256=D-PKZ2Pxutmlu--icyxuxjvnWBrzrmZcEChYS0nx00M,14328
|
153
153
|
matrice/deploy/utils/post_processing/core/__init__.py,sha256=MPMj_iRv--PfKBpYN12IjReAzSU7aRMVD6VW-LC95-M,1379
|
154
154
|
matrice/deploy/utils/post_processing/core/base.py,sha256=l3UTC5oft9y_BDoGZk0QCC_QS9J6eWdieNijfK_EAF8,28333
|
155
|
-
matrice/deploy/utils/post_processing/core/config.py,sha256=
|
155
|
+
matrice/deploy/utils/post_processing/core/config.py,sha256=SdhOU9gnhWQZ7HhwehTYSoyxA1J9HfvQB8Kaelci3Nc,107614
|
156
156
|
matrice/deploy/utils/post_processing/core/config_utils.py,sha256=Y_Czm9RmtHuxzBZzGUBA57JRyx5r6tzrM5l89Dbdf_w,28871
|
157
157
|
matrice/deploy/utils/post_processing/ocr/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
158
158
|
matrice/deploy/utils/post_processing/ocr/easyocr_extractor.py,sha256=FwVoUATYdiZtfhSAoiyCo_9dgA786pFZfONx6tsQOfE,11403
|
@@ -171,7 +171,7 @@ matrice/deploy/utils/post_processing/test_cases/test_processor.py,sha256=nwF2EIA
|
|
171
171
|
matrice/deploy/utils/post_processing/test_cases/test_utilities.py,sha256=lmT5bp5_T5yYy1HQ4X01myfScAqnMgf4pd7hHBCjr6A,13414
|
172
172
|
matrice/deploy/utils/post_processing/test_cases/test_utils.py,sha256=bfmOT1rr9asv3jpr-p_UrjnnSZ1qEWM2LEqNKkyvJZ8,29370
|
173
173
|
matrice/deploy/utils/post_processing/usecases/Histopathological_Cancer_Detection_img.py,sha256=bHDXxxG3QgWMFZbDuBaJWpkIvxTXsFMTqCPBCFm3SDs,30247
|
174
|
-
matrice/deploy/utils/post_processing/usecases/__init__.py,sha256=
|
174
|
+
matrice/deploy/utils/post_processing/usecases/__init__.py,sha256=KeP4ykMepIWQUiq5Y02z3cAVeGOjZBGE3PruWBPfEIs,9873
|
175
175
|
matrice/deploy/utils/post_processing/usecases/abandoned_object_detection.py,sha256=MnBfZ_DnB3b70cw354-EnmK-ydswJToC4TPiaDrOS6Y,32514
|
176
176
|
matrice/deploy/utils/post_processing/usecases/advanced_customer_service.py,sha256=ELt5euxr6P4X2s8-YGngmj27QscOHefjOsx3774sNFk,75914
|
177
177
|
matrice/deploy/utils/post_processing/usecases/age_detection.py,sha256=yn1LXOgbnOWSMDnsCds6-uN6W-I1Hy4_-AMrjbT5PtY,41318
|
@@ -184,6 +184,7 @@ matrice/deploy/utils/post_processing/usecases/car_damage_detection.py,sha256=2jP
|
|
184
184
|
matrice/deploy/utils/post_processing/usecases/car_part_segmentation.py,sha256=JbLcl1VvsQ2heuJYOn6QN44odQZ5WwLYWJXpM6iXpVk,46240
|
185
185
|
matrice/deploy/utils/post_processing/usecases/car_service.py,sha256=b9gOBjIuoWywksoq_FrAOmwHs_IVa2L8ldZYp7wPyXA,74383
|
186
186
|
matrice/deploy/utils/post_processing/usecases/cardiomegaly_classification.py,sha256=1P6DyOU6R1XKmQ-55BbKMU8CSsm4-wR5wS827UJG2JU,41244
|
187
|
+
matrice/deploy/utils/post_processing/usecases/cell_microscopy_segmentation.py,sha256=dqzdEwvdrbav10Ezr8N4ESlt04pURRCGOjrAYTBWcwA,43715
|
187
188
|
matrice/deploy/utils/post_processing/usecases/chicken_pose_detection.py,sha256=-e8di7Am-E-FCQFrSY8qJTO1aWtdRAVJoE-VKBgcyyI,29291
|
188
189
|
matrice/deploy/utils/post_processing/usecases/child_monitoring.py,sha256=z3oymoqq4hDGwA8MkdEONZW_Vx5CAZmvzZaNLsqmCfw,39380
|
189
190
|
matrice/deploy/utils/post_processing/usecases/color_detection.py,sha256=IY8T6DTSuDFmZgswP1J_7Y15Ka9EK9ClFvnWsVcDSao,78939
|
@@ -210,7 +211,7 @@ matrice/deploy/utils/post_processing/usecases/leaf.py,sha256=cwgB1ZNxkQFtkk-thSJ
|
|
210
211
|
matrice/deploy/utils/post_processing/usecases/leaf_disease.py,sha256=bkiLccTdf4KUq3he4eCpBlKXb5exr-WBhQ_oWQ7os68,36225
|
211
212
|
matrice/deploy/utils/post_processing/usecases/leak_detection.py,sha256=oOCLLVMuXVeXPHyN8FUrD3U9JYJJwIz-5fcEMgvLdls,40531
|
212
213
|
matrice/deploy/utils/post_processing/usecases/license_plate_detection.py,sha256=WmVmtp-GLUSNtBxjZHUtUk_M9lPp_8gA0rdzFXVL1SY,44961
|
213
|
-
matrice/deploy/utils/post_processing/usecases/license_plate_monitoring.py,sha256=
|
214
|
+
matrice/deploy/utils/post_processing/usecases/license_plate_monitoring.py,sha256=Ir0nEFAPxf8SMztfYxAMFekLTrjZ3WhF9tVQsl2X1UA,55556
|
214
215
|
matrice/deploy/utils/post_processing/usecases/litter_monitoring.py,sha256=XaHAUGRBDJg_iVbu8hRMjTR-5TqrLj6ZNCRkInbzZTY,33255
|
215
216
|
matrice/deploy/utils/post_processing/usecases/mask_detection.py,sha256=MNpCcuefOdW7C8g_x_mNuWYA4mbyg8UNwomwBPoKtr0,39684
|
216
217
|
matrice/deploy/utils/post_processing/usecases/parking.py,sha256=lqTGqcjUZZPFw3tu11Ha8BSsZ311K5--wEZnlVsXakU,34534
|
@@ -261,8 +262,8 @@ matrice/deployment/camera_manager.py,sha256=e1Lc81RJP5wUWRdTgHO6tMWF9BkBdHOSVyx3
|
|
261
262
|
matrice/deployment/deployment.py,sha256=HFt151eWq6iqIAMsQvurpV2WNxW6Cx_gIUVfnVy5SWE,48093
|
262
263
|
matrice/deployment/inference_pipeline.py,sha256=6b4Mm3-qt-Zy0BeiJfFQdImOn3FzdNCY-7ET7Rp8PMk,37911
|
263
264
|
matrice/deployment/streaming_gateway_manager.py,sha256=ifYGl3g25wyU39HwhPQyI2OgF3M6oIqKMWt8RXtMxY8,21401
|
264
|
-
matrice-1.0.
|
265
|
-
matrice-1.0.
|
266
|
-
matrice-1.0.
|
267
|
-
matrice-1.0.
|
268
|
-
matrice-1.0.
|
265
|
+
matrice-1.0.99399.dist-info/licenses/LICENSE.txt,sha256=2bm9uFabQZ3Ykb_SaSU_uUbAj2-htc6WJQmS_65qD00,1073
|
266
|
+
matrice-1.0.99399.dist-info/METADATA,sha256=CVb6HqOjakisnp4f5u5np3CC1ghEqNdqYxicqhszpZA,14624
|
267
|
+
matrice-1.0.99399.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
268
|
+
matrice-1.0.99399.dist-info/top_level.txt,sha256=P97js8ur6o5ClRqMH3Cjoab_NqbJ6sOQ3rJmVzKBvMc,8
|
269
|
+
matrice-1.0.99399.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|