matrice 1.0.99217__py3-none-any.whl → 1.0.99218__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -47,6 +47,7 @@ APP_NAME_TO_USECASE = {
47
47
  "human_activity_recognition": "human_activity_recognition",
48
48
  "leak_detection": "leak_detection",
49
49
  "fire_smoke_detection": "fire_smoke_detection",
50
+ "human_activity": "human_activity",
50
51
  }
51
52
 
52
53
  APP_NAME_TO_CATEGORY = {
@@ -98,6 +99,7 @@ APP_NAME_TO_CATEGORY = {
98
99
  "human_activity_recognition": "general",
99
100
  "leak_detection": "oil_gas",
100
101
  "fire_smoke_detection": "hazard",
102
+ "human_activity": "general",
101
103
  }
102
104
 
103
105
  def get_usecase_from_app_name(app_name: str) -> str:
@@ -514,6 +514,7 @@ class ConfigManager:
514
514
  'waterbody_segmentation': None,
515
515
  'litter_detection':None,
516
516
  'leak_detection': None,
517
+ 'human_activity': None,
517
518
 
518
519
  #Put all image based usecases here::
519
520
  'blood_cancer_detection_img': None,
@@ -926,6 +927,14 @@ class ConfigManager:
926
927
  except ImportError:
927
928
  return None
928
929
 
930
+ def human_activity_config_class(self):
931
+ """Register a configuration class for a use case."""
932
+ try:
933
+ from ..usecases.human_activity import HumanActivityRecognitionConfig
934
+ return HumanActivityRecognitionConfig
935
+ except ImportError:
936
+ return None
937
+
929
938
  #put all image based usecases here::
930
939
  def blood_cancer_detection_config_class(self):
931
940
  """Register a configuration class for a use case."""
@@ -1910,7 +1919,27 @@ class ConfigManager:
1910
1919
  alert_config=alert_config,
1911
1920
  **kwargs
1912
1921
  )
1922
+
1923
+ elif usecase == "human_activity":
1924
+ # Import here to avoid circular import
1925
+ from ..usecases.human_activity import HumanActivityRecognitionConfig
1926
+
1927
+ # Handle nested configurations
1928
+ zone_config = kwargs.pop("zone_config", None)
1929
+ if zone_config and isinstance(zone_config, dict):
1930
+ zone_config = ZoneConfig(**zone_config)
1931
+
1932
+ alert_config = kwargs.pop("alert_config", None)
1933
+ if alert_config and isinstance(alert_config, dict):
1934
+ alert_config = AlertConfig(**alert_config)
1913
1935
 
1936
+ config = HumanActivityRecognitionConfig(
1937
+ category=category or "general",
1938
+ usecase=usecase,
1939
+ zone_config=zone_config,
1940
+ alert_config=alert_config,
1941
+ **kwargs
1942
+ )
1914
1943
 
1915
1944
  #Add IMAGE based usecases here::
1916
1945
  elif usecase == "blood_cancer_detection_img":
@@ -62,6 +62,7 @@ from .usecases import (
62
62
  LaneDetectionUseCase,
63
63
  LitterDetectionUseCase,
64
64
  LeakDetectionUseCase,
65
+ HumanActivityRecognitionUseCase,
65
66
 
66
67
 
67
68
  AgeDetectionUseCase,
@@ -224,6 +225,7 @@ class PostProcessor:
224
225
  registry.register_use_case("litter_detection", "litter_detection", LitterDetectionUseCase)
225
226
  registry.register_use_case("general", "human_activity_recognition", HumanActivityUseCase)
226
227
  registry.register_use_case("oil_gas", "leak_detection", LeakDetectionUseCase)
228
+ registry.register_use_case("general", "human_activity", HumanActivityRecognitionUseCase)
227
229
 
228
230
  #Put all IMAGE based usecases here
229
231
  registry.register_use_case("healthcare", "bloodcancer_img_detection", BloodCancerDetectionUseCase)
@@ -398,6 +400,8 @@ class PostProcessor:
398
400
  result = use_case.process(data, parsed_config, context, stream_info)
399
401
  elif isinstance(use_case, LeakDetectionUseCase):
400
402
  result = use_case.process(data, parsed_config, context, stream_info)
403
+ elif isinstance(use_case, HumanActivityRecognitionUseCase):
404
+ result = use_case.process(data, parsed_config, context, stream_info)
401
405
 
402
406
  #Put all IMAGE based usecases here
403
407
  elif isinstance(use_case, BloodCancerDetectionUseCase):
@@ -70,6 +70,7 @@ from .drowsy_driver_detection import DrowsyDriverUseCase, DrowsyDriverUseCase
70
70
  from .waterbody_segmentation import WaterBodyConfig, WaterBodyUseCase
71
71
  from .litter_monitoring import LitterDetectionConfig,LitterDetectionUseCase
72
72
  from .leak_detection import LeakDetectionConfig, LeakDetectionUseCase
73
+ from .human_activity import HumanActivityRecognitionConfig, HumanActivityRecognitionUseCase
73
74
 
74
75
  #Put all IMAGE based usecases here
75
76
  from .blood_cancer_detection_img import BloodCancerDetectionConfig, BloodCancerDetectionUseCase
@@ -137,6 +138,7 @@ __all__ = [
137
138
  'WaterBodyUseCase',
138
139
  'LitterDetectionUseCase',
139
140
  'LeakDetectionUseCase',
141
+ 'HumanActivityRecognitionUseCase',
140
142
 
141
143
  #Put all IMAGE based usecases here
142
144
  'BloodCancerDetectionUseCase',
@@ -204,6 +206,7 @@ __all__ = [
204
206
  'WaterBodyConfig',
205
207
  'LitterDetectionConfig',
206
208
  'LeakDetectionConfig',
209
+ 'HumanActivityRecognitionConfig',
207
210
 
208
211
 
209
212
  #Put all IMAGE based usecase CONFIGS here
@@ -0,0 +1,841 @@
1
+ from typing import Any, Dict, List, Optional
2
+ from dataclasses import asdict
3
+ import time
4
+ from datetime import datetime, timezone
5
+
6
+ from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol, ResultFormat
7
+ from ..utils import (
8
+ filter_by_confidence,
9
+ filter_by_categories,
10
+ apply_category_mapping,
11
+ count_objects_by_category,
12
+ count_objects_in_zones,
13
+ calculate_counting_summary,
14
+ match_results_structure,
15
+ bbox_smoothing,
16
+ BBoxSmoothingConfig,
17
+ BBoxSmoothingTracker
18
+ )
19
+ from dataclasses import dataclass, field
20
+ from ..core.config import BaseConfig, AlertConfig, ZoneConfig
21
+
22
+
23
+ @dataclass
24
+ class HumanActivityRecognitionConfig(BaseConfig):
25
+ """Configuration for human activity detection use case."""
26
+ # Smoothing configuration
27
+ enable_smoothing: bool = True
28
+ smoothing_algorithm: str = "observability" # "window" or "observability"
29
+ smoothing_window_size: int = 20
30
+ smoothing_cooldown_frames: int = 5
31
+ smoothing_confidence_range_factor: float = 0.5
32
+
33
+ #confidence thresholds
34
+ confidence_threshold: float = 0.3
35
+
36
+ usecase_categories: List[str] = field(
37
+ default_factory=lambda: ['Drinking', 'Fall-Detected', 'Fall_down', 'Lying_down', 'Nearly_fall', 'Sit Down', 'Sitting', 'Standing', 'Walking', 'Walking_on_Stairs', 'crawling', 'falling', 'sitting', 'standing', 'walking']
38
+ )
39
+
40
+ target_categories: List[str] = field(
41
+ default_factory=lambda: ['Drinking', 'Fall-Detected', 'Fall_down', 'Lying_down', 'Nearly_fall', 'Sit Down', 'Sitting', 'Standing', 'Walking', 'Walking_on_Stairs', 'crawling', 'falling', 'sitting', 'standing', 'walking']
42
+ )
43
+
44
+ alert_config: Optional[AlertConfig] = None
45
+
46
+ index_to_category: Optional[Dict[int, str]] = field(
47
+ default_factory=lambda: {
48
+ 0: 'Drinking',
49
+ 1: 'Fall-Detected',
50
+ 2: 'Fall_down',
51
+ 3: 'Lying_down',
52
+ 4: 'Nearly_fall',
53
+ 5: 'Sit Down',
54
+ 6: 'Sitting',
55
+ 7: 'Standing',
56
+ 8: 'Walking',
57
+ 9: 'Walking_on_Stairs',
58
+ 10: 'crawling',
59
+ 11: 'falling',
60
+ 12: 'sitting',
61
+ 13: 'standing',
62
+ 14: 'walking'
63
+ }
64
+ )
65
+
66
+
67
+ class HumanActivityRecognitionUseCase(BaseProcessor):
68
+ # Human-friendly display names for categories
69
+ # CATEGORY_DISPLAY = {
70
+ # "leak": "leak",
71
+ # }
72
+
73
+
74
+ def __init__(self):
75
+ super().__init__("human_activity")
76
+ self.category = "general"
77
+
78
+ self.CASE_TYPE: Optional[str] = 'human_activity'
79
+ self.CASE_VERSION: Optional[str] = '1.2'
80
+ # List of categories to track
81
+ self.target_categories = ['Drinking', 'Fall-Detected', 'Fall_down', 'Lying_down', 'Nearly_fall', 'Sit Down', 'Sitting', 'Standing', 'Walking', 'Walking_on_Stairs', 'crawling', 'falling', 'sitting', 'standing', 'walking']
82
+
83
+
84
+ # Initialize smoothing tracker
85
+ self.smoothing_tracker = None
86
+
87
+ # Initialize advanced tracker (will be created on first use)
88
+ self.tracker = None
89
+ # Initialize tracking state variables
90
+ self._total_frame_counter = 0
91
+ self._global_frame_offset = 0
92
+
93
+ # Track start time for "TOTAL SINCE" calculation
94
+ self._tracking_start_time = None
95
+
96
+ self._track_aliases: Dict[Any, Any] = {}
97
+ self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
98
+ # Tunable parameters – adjust if necessary for specific scenarios
99
+ self._track_merge_iou_threshold: float = 0.05 # IoU ≥ 0.05 →
100
+ self._track_merge_time_window: float = 7.0 # seconds within which to merge
101
+
102
+ self._ascending_alert_list: List[int] = []
103
+ self.current_incident_end_timestamp: str = "N/A"
104
+
105
+
106
+ def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
107
+ stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
108
+ """
109
+ Main entry point for post-processing.
110
+ Applies category mapping, smoothing, counting, alerting, and summary generation.
111
+ Returns a ProcessingResult with all relevant outputs.
112
+ """
113
+ start_time = time.time()
114
+ # Ensure config is correct type
115
+ if not isinstance(config, HumanActivityRecognitionConfig):
116
+ return self.create_error_result("Invalid config type", usecase=self.name, category=self.category,
117
+ context=context)
118
+ if context is None:
119
+ context = ProcessingContext()
120
+
121
+ # Detect input format and store in context
122
+ input_format = match_results_structure(data)
123
+ context.input_format = input_format
124
+ context.confidence_threshold = config.confidence_threshold
125
+
126
+ if config.confidence_threshold is not None:
127
+ processed_data = filter_by_confidence(data, config.confidence_threshold)
128
+ self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
129
+ else:
130
+ processed_data = data
131
+
132
+ self.logger.debug(f"Did not apply confidence filtering with threshold since nothing was provided")
133
+
134
+ # Step 2: Apply category mapping if provided
135
+ if config.index_to_category:
136
+ processed_data = apply_category_mapping(processed_data, config.index_to_category)
137
+ self.logger.debug("Applied category mapping")
138
+
139
+ if config.target_categories:
140
+ processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
141
+ self.logger.debug(f"Applied category filtering")
142
+
143
+ # Apply bbox smoothing if enabled
144
+ if config.enable_smoothing:
145
+ if self.smoothing_tracker is None:
146
+ smoothing_config = BBoxSmoothingConfig(
147
+ smoothing_algorithm=config.smoothing_algorithm,
148
+ window_size=config.smoothing_window_size,
149
+ cooldown_frames=config.smoothing_cooldown_frames,
150
+ confidence_threshold=config.confidence_threshold, # Use mask threshold as default
151
+ confidence_range_factor=config.smoothing_confidence_range_factor,
152
+ enable_smoothing=True
153
+ )
154
+ self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
155
+ processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
156
+
157
+ # Advanced tracking (BYTETracker-like)
158
+ try:
159
+ from ..advanced_tracker import AdvancedTracker
160
+ from ..advanced_tracker.config import TrackerConfig
161
+
162
+ # Create tracker instance if it doesn't exist (preserves state across frames)
163
+ if self.tracker is None:
164
+ # Configure tracker thresholds based on the use-case confidence threshold so that
165
+ # low-confidence detections (e.g. < 0.7) can still be initialised as tracks when
166
+ # the user passes a lower `confidence_threshold` in the post-processing config.
167
+ if config.confidence_threshold is not None:
168
+ tracker_config = TrackerConfig(
169
+ track_high_thresh=float(config.confidence_threshold),
170
+ # Allow even lower detections to participate in secondary association
171
+ track_low_thresh=max(0.05, float(config.confidence_threshold) / 2),
172
+ new_track_thresh=float(config.confidence_threshold)
173
+ )
174
+ else:
175
+ tracker_config = TrackerConfig()
176
+ self.tracker = AdvancedTracker(tracker_config)
177
+ self.logger.info(
178
+ "Initialized AdvancedTracker for Monitoring and tracking with thresholds: "
179
+ f"high={tracker_config.track_high_thresh}, "
180
+ f"low={tracker_config.track_low_thresh}, "
181
+ f"new={tracker_config.new_track_thresh}"
182
+ )
183
+
184
+ # The tracker expects the data in the same format as input
185
+ # It will add track_id and frame_id to each detection
186
+ processed_data = self.tracker.update(processed_data)
187
+
188
+ except Exception as e:
189
+ # If advanced tracker fails, fallback to unsmoothed detections
190
+ self.logger.warning(f"AdvancedTracker failed: {e}")
191
+
192
+ # Update tracking state for total count per label
193
+ self._update_tracking_state(processed_data)
194
+
195
+ # Update frame counter
196
+ self._total_frame_counter += 1
197
+
198
+ # Extract frame information from stream_info
199
+ frame_number = None
200
+ if stream_info:
201
+ input_settings = stream_info.get("input_settings", {})
202
+ start_frame = input_settings.get("start_frame")
203
+ end_frame = input_settings.get("end_frame")
204
+ # If start and end frame are the same, it's a single frame
205
+ if start_frame is not None and end_frame is not None and start_frame == end_frame:
206
+ frame_number = start_frame
207
+
208
+ # Compute summaries and alerts
209
+ general_counting_summary = calculate_counting_summary(data)
210
+ counting_summary = self._count_categories(processed_data, config)
211
+ # Add total unique counts after tracking using only local state
212
+ total_counts = self.get_total_counts()
213
+ counting_summary['total_counts'] = total_counts
214
+
215
+ alerts = self._check_alerts(counting_summary, frame_number, config)
216
+ predictions = self._extract_predictions(processed_data)
217
+
218
+ # Step: Generate structured incidents, tracking stats and business analytics with frame-based keys
219
+ incidents_list = self._generate_incidents(counting_summary, alerts, config, frame_number, stream_info)
220
+ tracking_stats_list = self._generate_tracking_stats(counting_summary, alerts, config, frame_number, stream_info)
221
+ business_analytics_list = self._generate_business_analytics(counting_summary, alerts, config, stream_info, is_empty=True)
222
+ summary_list = self._generate_summary(counting_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
223
+
224
+ # Extract frame-based dictionaries from the lists
225
+ incidents = incidents_list[0] if incidents_list else {}
226
+ tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
227
+ business_analytics = business_analytics_list[0] if business_analytics_list else {}
228
+ summary = summary_list[0] if summary_list else {}
229
+ agg_summary = {str(frame_number): {
230
+ "incidents": incidents,
231
+ "tracking_stats": tracking_stats,
232
+ "business_analytics": business_analytics,
233
+ "alerts": alerts,
234
+ "human_text": summary}
235
+ }
236
+
237
+
238
+ context.mark_completed()
239
+
240
+ # Build result object following the new pattern
241
+
242
+ result = self.create_result(
243
+ data={"agg_summary": agg_summary},
244
+ usecase=self.name,
245
+ category=self.category,
246
+ context=context
247
+ )
248
+
249
+ return result
250
+
251
+ def _check_alerts(self, summary: dict, frame_number:Any, config: HumanActivityRecognitionConfig) -> List[Dict]:
252
+ """
253
+ Check if any alert thresholds are exceeded and return alert dicts.
254
+ """
255
+ def get_trend(data, lookback=900, threshold=0.6):
256
+ '''
257
+ Determine if the trend is ascending or descending based on actual value progression.
258
+ Now works with values 0,1,2,3 (not just binary).
259
+ '''
260
+ window = data[-lookback:] if len(data) >= lookback else data
261
+ if len(window) < 2:
262
+ return True # not enough data to determine trend
263
+ increasing = 0
264
+ total = 0
265
+ for i in range(1, len(window)):
266
+ if window[i] >= window[i - 1]:
267
+ increasing += 1
268
+ total += 1
269
+ ratio = increasing / total
270
+ if ratio >= threshold:
271
+ return True
272
+ elif ratio <= (1 - threshold):
273
+ return False
274
+
275
+ frame_key = str(frame_number) if frame_number is not None else "current_frame"
276
+ alerts = []
277
+ total_detections = summary.get("total_count", 0) #CURRENT combined total count of all classes
278
+ total_counts_dict = summary.get("total_counts", {}) #TOTAL cumulative counts per class
279
+ cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0 #TOTAL combined cumulative count
280
+ per_category_count = summary.get("per_category_count", {}) #CURRENT count per class
281
+
282
+ if not config.alert_config:
283
+ return alerts
284
+
285
+ total = summary.get("total_count", 0)
286
+ #self._ascending_alert_list
287
+ if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
288
+
289
+ for category, threshold in config.alert_config.count_thresholds.items():
290
+ if category == "all" and total > threshold:
291
+
292
+ alerts.append({
293
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
294
+ "alert_id": "alert_"+category+'_'+frame_key,
295
+ "incident_category": self.CASE_TYPE,
296
+ "threshold_level": threshold,
297
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
298
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
299
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
300
+ }
301
+ })
302
+ elif category in summary.get("per_category_count", {}):
303
+ count = summary.get("per_category_count", {})[category]
304
+ if count > threshold: # Fixed logic: alert when EXCEEDING threshold
305
+ alerts.append({
306
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
307
+ "alert_id": "alert_"+category+'_'+frame_key,
308
+ "incident_category": self.CASE_TYPE,
309
+ "threshold_level": threshold,
310
+ "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
311
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
312
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
313
+ }
314
+ })
315
+ else:
316
+ pass
317
+ return alerts
318
+
319
+ def _generate_incidents(self, counting_summary: Dict, alerts: List, config: HumanActivityRecognitionConfig,
320
+ frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[
321
+ Dict]:
322
+ """Generate structured incidents for the output format with frame-based keys."""
323
+
324
+ incidents = []
325
+ total_detections = counting_summary.get("total_count", 0)
326
+ current_timestamp = self._get_current_timestamp_str(stream_info)
327
+ camera_info = self.get_camera_info_from_stream(stream_info)
328
+
329
+ self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
330
+
331
+ if total_detections > 0:
332
+ # Determine event level based on thresholds
333
+ level = "low"
334
+ intensity = 5.0
335
+ start_timestamp = self._get_start_timestamp_str(stream_info)
336
+ if start_timestamp and self.current_incident_end_timestamp=='N/A':
337
+ self.current_incident_end_timestamp = 'Incident still active'
338
+ elif start_timestamp and self.current_incident_end_timestamp=='Incident still active':
339
+ if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
340
+ self.current_incident_end_timestamp = current_timestamp
341
+ elif self.current_incident_end_timestamp!='Incident still active' and self.current_incident_end_timestamp!='N/A':
342
+ self.current_incident_end_timestamp = 'N/A'
343
+
344
+ if config.alert_config and config.alert_config.count_thresholds:
345
+ threshold = config.alert_config.count_thresholds.get("all", 15)
346
+ intensity = min(10.0, (total_detections / threshold) * 10)
347
+
348
+ if intensity >= 9:
349
+ level = "critical"
350
+ self._ascending_alert_list.append(3)
351
+ elif intensity >= 7:
352
+ level = "significant"
353
+ self._ascending_alert_list.append(2)
354
+ elif intensity >= 5:
355
+ level = "medium"
356
+ self._ascending_alert_list.append(1)
357
+ else:
358
+ level = "low"
359
+ self._ascending_alert_list.append(0)
360
+ else:
361
+ if total_detections > 30:
362
+ level = "critical"
363
+ intensity = 10.0
364
+ self._ascending_alert_list.append(3)
365
+ elif total_detections > 25:
366
+ level = "significant"
367
+ intensity = 9.0
368
+ self._ascending_alert_list.append(2)
369
+ elif total_detections > 15:
370
+ level = "medium"
371
+ intensity = 7.0
372
+ self._ascending_alert_list.append(1)
373
+ else:
374
+ level = "low"
375
+ intensity = min(10.0, total_detections / 3.0)
376
+ self._ascending_alert_list.append(0)
377
+
378
+ # Generate human text in new format
379
+ human_text_lines = [f"INCIDENTS DETECTED @ {current_timestamp}:"]
380
+ human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE,level)}")
381
+ human_text = "\n".join(human_text_lines)
382
+
383
+ alert_settings=[]
384
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
385
+ alert_settings.append({
386
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
387
+ "incident_category": self.CASE_TYPE,
388
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
389
+ "ascending": True,
390
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
391
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
392
+ }
393
+ })
394
+
395
+ event= self.create_incident(incident_id=self.CASE_TYPE+'_'+str(frame_number), incident_type=self.CASE_TYPE,
396
+ severity_level=level, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
397
+ start_time=start_timestamp, end_time=self.current_incident_end_timestamp,
398
+ level_settings= {"low": 1, "medium": 3, "significant":4, "critical": 7})
399
+ incidents.append(event)
400
+
401
+ else:
402
+ self._ascending_alert_list.append(0)
403
+ incidents.append({})
404
+
405
+ return incidents
406
+ def _generate_tracking_stats(
407
+ self,
408
+ counting_summary: Dict,
409
+ alerts: List,
410
+ config: HumanActivityRecognitionConfig,
411
+ frame_number: Optional[int] = None,
412
+ stream_info: Optional[Dict[str, Any]] = None
413
+ ) -> List[Dict]:
414
+ """Generate structured tracking stats matching eg.json format."""
415
+ camera_info = self.get_camera_info_from_stream(stream_info)
416
+
417
+ # frame_key = str(frame_number) if frame_number is not None else "current_frame"
418
+ # tracking_stats = [{frame_key: []}]
419
+ # frame_tracking_stats = tracking_stats[0][frame_key]
420
+ tracking_stats = []
421
+
422
+ total_detections = counting_summary.get("total_count", 0) #CURRENT total count of all classes
423
+ total_counts_dict = counting_summary.get("total_counts", {}) #TOTAL cumulative counts per class
424
+ cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0 #TOTAL combined cumulative count
425
+ per_category_count = counting_summary.get("per_category_count", {}) #CURRENT count per class
426
+
427
+ current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
428
+ start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
429
+
430
+ # Create high precision timestamps for input_timestamp and reset_timestamp
431
+ high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
432
+ high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
433
+
434
+
435
+ # Build total_counts array in expected format
436
+ total_counts = []
437
+ for cat, count in total_counts_dict.items():
438
+ if count > 0:
439
+ total_counts.append({
440
+ "category": cat,
441
+ "count": count
442
+ })
443
+
444
+ # Build current_counts array in expected format
445
+ current_counts = []
446
+ for cat, count in per_category_count.items():
447
+ if count > 0 or total_detections > 0: # Include even if 0 when there are detections
448
+ current_counts.append({
449
+ "category": cat,
450
+ "count": count
451
+ })
452
+
453
+ # Prepare detections without confidence scores (as per eg.json)
454
+ detections = []
455
+ for detection in counting_summary.get("detections", []):
456
+ bbox = detection.get("bounding_box", {})
457
+ category = detection.get("category", "person")
458
+ # Include segmentation if available (like in eg.json)
459
+ if detection.get("masks"):
460
+ segmentation= detection.get("masks", [])
461
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
462
+ elif detection.get("segmentation"):
463
+ segmentation= detection.get("segmentation")
464
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
465
+ elif detection.get("mask"):
466
+ segmentation= detection.get("mask")
467
+ detection_obj = self.create_detection_object(category, bbox, segmentation=segmentation)
468
+ else:
469
+ detection_obj = self.create_detection_object(category, bbox)
470
+ detections.append(detection_obj)
471
+
472
+ # Build alert_settings array in expected format
473
+ alert_settings = []
474
+ if config.alert_config and hasattr(config.alert_config, 'alert_type'):
475
+ alert_settings.append({
476
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
477
+ "incident_category": self.CASE_TYPE,
478
+ "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
479
+ "ascending": True,
480
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
481
+ getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
482
+ }
483
+ })
484
+
485
+ # Generate human_text in expected format
486
+ human_text_lines = [f"Tracking Statistics:"]
487
+ human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}")
488
+
489
+ for cat, count in per_category_count.items():
490
+ human_text_lines.append(f"\t{cat}: {count}")
491
+
492
+ human_text_lines.append(f"TOTAL SINCE {start_timestamp}")
493
+ for cat, count in total_counts_dict.items():
494
+ if count > 0:
495
+ human_text_lines.append(f"\t{cat}: {count}")
496
+
497
+ if alerts:
498
+ for alert in alerts:
499
+ human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
500
+ else:
501
+ human_text_lines.append("Alerts: None")
502
+
503
+ human_text = "\n".join(human_text_lines)
504
+ reset_settings=[
505
+ {
506
+ "interval_type": "daily",
507
+ "reset_time": {
508
+ "value": 9,
509
+ "time_unit": "hour"
510
+ }
511
+ }
512
+ ]
513
+
514
+ tracking_stat=self.create_tracking_stats(total_counts=total_counts, current_counts=current_counts,
515
+ detections=detections, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
516
+ reset_settings=reset_settings, start_time=high_precision_start_timestamp ,
517
+ reset_time=high_precision_reset_timestamp)
518
+
519
+ tracking_stats.append(tracking_stat)
520
+ return tracking_stats
521
+
522
+ def _generate_business_analytics(self, counting_summary: Dict, alerts:Any, config: HumanActivityRecognitionConfig, stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
523
+ """Generate standardized business analytics for the agg_summary structure."""
524
+ if is_empty:
525
+ return []
526
+
527
+ #-----IF YOUR USECASE NEEDS BUSINESS ANALYTICS, YOU CAN USE THIS FUNCTION------#
528
+ #camera_info = self.get_camera_info_from_stream(stream_info)
529
+ # business_analytics = self.create_business_analytics(nalysis_name, statistics,
530
+ # human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
531
+ # reset_settings)
532
+ # return business_analytics
533
+
534
+ def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
535
+ """
536
+ Generate a human_text string for the tracking_stat, incident, business analytics and alerts.
537
+ """
538
+ lines = {}
539
+ lines["Application Name"] = self.CASE_TYPE
540
+ lines["Application Version"] = self.CASE_VERSION
541
+ if len(incidents) > 0:
542
+ lines["Incidents:"]=f"\n\t{incidents[0].get('human_text', 'No incidents detected')}\n"
543
+ if len(tracking_stats) > 0:
544
+ lines["Tracking Statistics:"]=f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}\n"
545
+ if len(business_analytics) > 0:
546
+ lines["Business Analytics:"]=f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}\n"
547
+
548
+ if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
549
+ lines["Summary"] = "No Summary Data"
550
+
551
+ return [lines]
552
+
553
+ def _get_track_ids_info(self, detections: list) -> Dict[str, Any]:
554
+ """
555
+ Get detailed information about track IDs (per frame).
556
+ """
557
+ # Collect all track_ids in this frame
558
+ frame_track_ids = set()
559
+ for det in detections:
560
+ tid = det.get('track_id')
561
+ if tid is not None:
562
+ frame_track_ids.add(tid)
563
+ # Use persistent total set for unique counting
564
+ total_track_ids = set()
565
+ for s in getattr(self, '_per_category_total_track_ids', {}).values():
566
+ total_track_ids.update(s)
567
+ return {
568
+ "total_count": len(total_track_ids),
569
+ "current_frame_count": len(frame_track_ids),
570
+ "total_unique_track_ids": len(total_track_ids),
571
+ "current_frame_track_ids": list(frame_track_ids),
572
+ "last_update_time": time.time(),
573
+ "total_frames_processed": getattr(self, '_total_frame_counter', 0)
574
+ }
575
+
576
+ def _update_tracking_state(self, detections: list):
577
+ """
578
+ Track unique categories track_ids per category for total count after tracking.
579
+ Applies canonical ID merging to avoid duplicate counting when the underlying
580
+ tracker loses an object temporarily and assigns a new ID.
581
+ """
582
+ # Lazily initialise storage dicts
583
+ if not hasattr(self, "_per_category_total_track_ids"):
584
+ self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
585
+ self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
586
+
587
+ for det in detections:
588
+ cat = det.get("category")
589
+ raw_track_id = det.get("track_id")
590
+ if cat not in self.target_categories or raw_track_id is None:
591
+ continue
592
+ bbox = det.get("bounding_box", det.get("bbox"))
593
+ canonical_id = self._merge_or_register_track(raw_track_id, bbox)
594
+ # Propagate canonical ID back to detection so downstream logic uses it
595
+ det["track_id"] = canonical_id
596
+
597
+ self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
598
+ self._current_frame_track_ids[cat].add(canonical_id)
599
+
600
+ def get_total_counts(self):
601
+ """
602
+ Return total unique track_id count for each category.
603
+ """
604
+ return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
605
+
606
+
607
+ def _format_timestamp_for_stream(self, timestamp: float) -> str:
608
+ """Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
609
+ dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
610
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
611
+
612
+ def _format_timestamp_for_video(self, timestamp: float) -> str:
613
+ """Format timestamp for video chunks (HH:MM:SS.ms format)."""
614
+ hours = int(timestamp // 3600)
615
+ minutes = int((timestamp % 3600) // 60)
616
+ seconds = round(float(timestamp % 60),2)
617
+ return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
618
+
619
+ def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
620
+ """Get formatted current timestamp based on stream type."""
621
+ if not stream_info:
622
+ return "00:00:00.00"
623
+ # is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
624
+ if precision:
625
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
626
+ if frame_id:
627
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
628
+ else:
629
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
630
+ stream_time_str = self._format_timestamp_for_video(start_time)
631
+ return stream_time_str
632
+ else:
633
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
634
+
635
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
636
+ if frame_id:
637
+ start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
638
+ else:
639
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
640
+ stream_time_str = self._format_timestamp_for_video(start_time)
641
+ return stream_time_str
642
+ else:
643
+ # For streams, use stream_time from stream_info
644
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
645
+ if stream_time_str:
646
+ # Parse the high precision timestamp string to get timestamp
647
+ try:
648
+ # Remove " UTC" suffix and parse
649
+ timestamp_str = stream_time_str.replace(" UTC", "")
650
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
651
+ timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
652
+ return self._format_timestamp_for_stream(timestamp)
653
+ except:
654
+ # Fallback to current time if parsing fails
655
+ return self._format_timestamp_for_stream(time.time())
656
+ else:
657
+ return self._format_timestamp_for_stream(time.time())
658
+
659
+ def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
660
+ """Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
661
+ if not stream_info:
662
+ return "00:00:00"
663
+ if precision:
664
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
665
+ return "00:00:00"
666
+ else:
667
+ return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
668
+
669
+ if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
670
+ # If video format, start from 00:00:00
671
+ return "00:00:00"
672
+ else:
673
+ # For streams, use tracking start time or current time with minutes/seconds reset
674
+ if self._tracking_start_time is None:
675
+ # Try to extract timestamp from stream_time string
676
+ stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
677
+ if stream_time_str:
678
+ try:
679
+ # Remove " UTC" suffix and parse
680
+ timestamp_str = stream_time_str.replace(" UTC", "")
681
+ dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
682
+ self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
683
+ except:
684
+ # Fallback to current time if parsing fails
685
+ self._tracking_start_time = time.time()
686
+ else:
687
+ self._tracking_start_time = time.time()
688
+
689
+ dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
690
+ # Reset minutes and seconds to 00:00 for "TOTAL SINCE" format
691
+ dt = dt.replace(minute=0, second=0, microsecond=0)
692
+ return dt.strftime('%Y:%m:%d %H:%M:%S')
693
+
694
+
695
+ def _count_categories(self, detections: list, config: HumanActivityRecognitionConfig) -> dict:
696
+ """
697
+ Count the number of detections per category and return a summary dict.
698
+ The detections list is expected to have 'track_id' (from tracker), 'category', 'bounding_box', etc.
699
+ Output structure will include 'track_id' for each detection as per AdvancedTracker output.
700
+ """
701
+ counts = {}
702
+ for det in detections:
703
+ cat = det.get('category', 'unknown')
704
+ counts[cat] = counts.get(cat, 0) + 1
705
+ # Each detection dict will now include 'track_id' (and possibly 'frame_id')
706
+ return {
707
+ "total_count": sum(counts.values()),
708
+ "per_category_count": counts,
709
+ "detections": [
710
+ {
711
+ "bounding_box": det.get("bounding_box"),
712
+ "category": det.get("category"),
713
+ "confidence": det.get("confidence"),
714
+ "track_id": det.get("track_id"),
715
+ "frame_id": det.get("frame_id")
716
+ }
717
+ for det in detections
718
+ ]
719
+ }
720
+
721
+ def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
722
+ """
723
+ Extract prediction details for output (category, confidence, bounding box).
724
+ """
725
+ return [
726
+ {
727
+ "category": det.get("category", "unknown"),
728
+ "confidence": det.get("confidence", 0.0),
729
+ "bounding_box": det.get("bounding_box", {})
730
+ }
731
+ for det in detections
732
+ ]
733
+
734
+ # ------------------------------------------------------------------ #
735
+ # Canonical ID helpers #
736
+ # ------------------------------------------------------------------ #
737
+ def _compute_iou(self, box1: Any, box2: Any) -> float:
738
+ """Compute IoU between two bounding boxes which may be dicts or lists.
739
+ Falls back to 0 when insufficient data is available."""
740
+
741
+ # Helper to convert bbox (dict or list) to [x1, y1, x2, y2]
742
+ def _bbox_to_list(bbox):
743
+ if bbox is None:
744
+ return []
745
+ if isinstance(bbox, list):
746
+ return bbox[:4] if len(bbox) >= 4 else []
747
+ if isinstance(bbox, dict):
748
+ if "xmin" in bbox:
749
+ return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
750
+ if "x1" in bbox:
751
+ return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
752
+ # Fallback: first four numeric values
753
+ values = [v for v in bbox.values() if isinstance(v, (int, float))]
754
+ return values[:4] if len(values) >= 4 else []
755
+ return []
756
+
757
+ l1 = _bbox_to_list(box1)
758
+ l2 = _bbox_to_list(box2)
759
+ if len(l1) < 4 or len(l2) < 4:
760
+ return 0.0
761
+ x1_min, y1_min, x1_max, y1_max = l1
762
+ x2_min, y2_min, x2_max, y2_max = l2
763
+
764
+ # Ensure correct order
765
+ x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
766
+ y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
767
+ x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
768
+ y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
769
+
770
+ inter_x_min = max(x1_min, x2_min)
771
+ inter_y_min = max(y1_min, y2_min)
772
+ inter_x_max = min(x1_max, x2_max)
773
+ inter_y_max = min(y1_max, y2_max)
774
+
775
+ inter_w = max(0.0, inter_x_max - inter_x_min)
776
+ inter_h = max(0.0, inter_y_max - inter_y_min)
777
+ inter_area = inter_w * inter_h
778
+
779
+ area1 = (x1_max - x1_min) * (y1_max - y1_min)
780
+ area2 = (x2_max - x2_min) * (y2_max - y2_min)
781
+ union_area = area1 + area2 - inter_area
782
+
783
+ return (inter_area / union_area) if union_area > 0 else 0.0
784
+
785
+ def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
786
+ """Return a stable canonical ID for a raw tracker ID, merging fragmented
787
+ tracks when IoU and temporal constraints indicate they represent the
788
+ same physical."""
789
+ if raw_id is None or bbox is None:
790
+ # Nothing to merge
791
+ return raw_id
792
+
793
+ now = time.time()
794
+
795
+ # Fast path – raw_id already mapped
796
+ if raw_id in self._track_aliases:
797
+ canonical_id = self._track_aliases[raw_id]
798
+ track_info = self._canonical_tracks.get(canonical_id)
799
+ if track_info is not None:
800
+ track_info["last_bbox"] = bbox
801
+ track_info["last_update"] = now
802
+ track_info["raw_ids"].add(raw_id)
803
+ return canonical_id
804
+
805
+ # Attempt to merge with an existing canonical track
806
+ for canonical_id, info in self._canonical_tracks.items():
807
+ # Only consider recently updated tracks
808
+ if now - info["last_update"] > self._track_merge_time_window:
809
+ continue
810
+ iou = self._compute_iou(bbox, info["last_bbox"])
811
+ if iou >= self._track_merge_iou_threshold:
812
+ # Merge
813
+ self._track_aliases[raw_id] = canonical_id
814
+ info["last_bbox"] = bbox
815
+ info["last_update"] = now
816
+ info["raw_ids"].add(raw_id)
817
+ return canonical_id
818
+
819
+ # No match – register new canonical track
820
+ canonical_id = raw_id
821
+ self._track_aliases[raw_id] = canonical_id
822
+ self._canonical_tracks[canonical_id] = {
823
+ "last_bbox": bbox,
824
+ "last_update": now,
825
+ "raw_ids": {raw_id},
826
+ }
827
+ return canonical_id
828
+
829
+ def _format_timestamp(self, timestamp: float) -> str:
830
+ """Format a timestamp for human-readable output."""
831
+ return datetime.fromtimestamp(timestamp, timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC')
832
+
833
+ def _get_tracking_start_time(self) -> str:
834
+ """Get the tracking start time, formatted as a string."""
835
+ if self._tracking_start_time is None:
836
+ return "N/A"
837
+ return self._format_timestamp(self._tracking_start_time)
838
+
839
+ def _set_tracking_start_time(self) -> None:
840
+ """Set the tracking start time to the current time."""
841
+ self._tracking_start_time = time.time()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: matrice
3
- Version: 1.0.99217
3
+ Version: 1.0.99218
4
4
  Summary: SDK for connecting to matrice.ai services
5
5
  Home-page: https://github.com/matrice-ai/python-sdk
6
6
  Author: Matrice.ai
@@ -129,8 +129,8 @@ matrice/deploy/utils/boundary_drawing_internal/boundary_drawing_internal.py,sha2
129
129
  matrice/deploy/utils/boundary_drawing_internal/boundary_drawing_tool.py,sha256=eY0VQGZ8BfTmR4_ThIAXaumBjh8_c7w69w-d3kta8p0,15421
130
130
  matrice/deploy/utils/boundary_drawing_internal/example_usage.py,sha256=cUBhxxsVdTQWIPvIOjCUGrhqon7ZBr5N6qNewjrTIuk,6434
131
131
  matrice/deploy/utils/post_processing/__init__.py,sha256=Z4Hr-GVsk55UEo2J7zmYAXPS-a9lAh1PlVrUdnRkiZE,24591
132
- matrice/deploy/utils/post_processing/config.py,sha256=FmZb7phtss9Vh-8_b311WcGazpa15wpbRCcX-KCG20M,4972
133
- matrice/deploy/utils/post_processing/processor.py,sha256=rwV5fMz1B_0qErOfEs1MDsqGeil4uBipBlHzWn85Sto,33842
132
+ matrice/deploy/utils/post_processing/config.py,sha256=s9425ZqHE3N2yBj9EuDS_YiUFE9KDtZM0hpz8ZfI2i4,5045
133
+ matrice/deploy/utils/post_processing/processor.py,sha256=AVC4yQ1BSTLOc2oTe17b9VOTzt7oNPeqENVxmIckNnM,34133
134
134
  matrice/deploy/utils/post_processing/advanced_tracker/__init__.py,sha256=tAPFzI_Yep5TLX60FDwKqBqppc-EbxSr0wNsQ9DGI1o,423
135
135
  matrice/deploy/utils/post_processing/advanced_tracker/base.py,sha256=VqWy4dd5th5LK-JfueTt2_GSEoOi5QQfQxjTNhmQoLc,3580
136
136
  matrice/deploy/utils/post_processing/advanced_tracker/config.py,sha256=hEVJVbh4uUrbIynmoq4OhuxF2IZA5AMCBLpixScp5FI,2865
@@ -140,7 +140,7 @@ matrice/deploy/utils/post_processing/advanced_tracker/strack.py,sha256=rVH2xOysZ
140
140
  matrice/deploy/utils/post_processing/advanced_tracker/tracker.py,sha256=D-PKZ2Pxutmlu--icyxuxjvnWBrzrmZcEChYS0nx00M,14328
141
141
  matrice/deploy/utils/post_processing/core/__init__.py,sha256=sCdnjfgypTh3TsnyAYJtN0Z8EQne96Nk4j7ICQVXjWE,1312
142
142
  matrice/deploy/utils/post_processing/core/base.py,sha256=V_DmaMLtrIunrN8Aq9iLeMIQPlkbCE-9d7n0Yz-nKQg,28228
143
- matrice/deploy/utils/post_processing/core/config.py,sha256=BPJ2tuHR6g364lMozNAgbAx1mIuLRPZtNe_-L2eWhHY,97758
143
+ matrice/deploy/utils/post_processing/core/config.py,sha256=D_qANYrNnkVu5Ft06h3kDzOyiPmIavGHB-RBL7QPJrY,98943
144
144
  matrice/deploy/utils/post_processing/core/config_utils.py,sha256=fVZbYRWJr7dq7mz3FMYBVbYUwWDB-5t7oBuhJix9ghE,23102
145
145
  matrice/deploy/utils/post_processing/test_cases/__init__.py,sha256=zUU2kKrIcCl8WeyjjQViwp7PWTZlKPuF8M2pZkxoNNQ,42
146
146
  matrice/deploy/utils/post_processing/test_cases/run_tests.py,sha256=RBFGvxFR-gozxnQFzkWLrs90vLlp8Bsn-Z7MLQrNw4o,4731
@@ -155,7 +155,7 @@ matrice/deploy/utils/post_processing/test_cases/test_processor.py,sha256=nwF2EIA
155
155
  matrice/deploy/utils/post_processing/test_cases/test_utilities.py,sha256=lmT5bp5_T5yYy1HQ4X01myfScAqnMgf4pd7hHBCjr6A,13414
156
156
  matrice/deploy/utils/post_processing/test_cases/test_utils.py,sha256=bfmOT1rr9asv3jpr-p_UrjnnSZ1qEWM2LEqNKkyvJZ8,29370
157
157
  matrice/deploy/utils/post_processing/usecases/Histopathological_Cancer_Detection_img.py,sha256=bHDXxxG3QgWMFZbDuBaJWpkIvxTXsFMTqCPBCFm3SDs,30247
158
- matrice/deploy/utils/post_processing/usecases/__init__.py,sha256=HubENM5im9vAEPggy9zeCFtECTAnMA2jdRhc_gnpP90,8961
158
+ matrice/deploy/utils/post_processing/usecases/__init__.py,sha256=-aYmbjrAx7rvTokC5ZILCWwxsxT4srNwPLOhcXeKuM4,9130
159
159
  matrice/deploy/utils/post_processing/usecases/advanced_customer_service.py,sha256=ELt5euxr6P4X2s8-YGngmj27QscOHefjOsx3774sNFk,75914
160
160
  matrice/deploy/utils/post_processing/usecases/age_detection.py,sha256=yn1LXOgbnOWSMDnsCds6-uN6W-I1Hy4_-AMrjbT5PtY,41318
161
161
  matrice/deploy/utils/post_processing/usecases/anti_spoofing_detection.py,sha256=XdtDdXGzZMLQdWcoOoiE5t4LPYHhgOtJ7tZCNlq1A2E,31329
@@ -185,6 +185,7 @@ matrice/deploy/utils/post_processing/usecases/fire_detection.py,sha256=RBm23nrtV
185
185
  matrice/deploy/utils/post_processing/usecases/flare_analysis.py,sha256=-egmS3Hs_iGOLeCMfapbkfQ04EWtZx97QRuUcDa-jMU,45340
186
186
  matrice/deploy/utils/post_processing/usecases/flower_segmentation.py,sha256=4I7qMx9Ztxg_hy9KTVX-3qBhAN-QwDt_Yigf9fFjLus,52017
187
187
  matrice/deploy/utils/post_processing/usecases/gender_detection.py,sha256=DEnCTRew6B7DtPcBQVCTtpd_IQMvMusBcu6nadUg2oM,40107
188
+ matrice/deploy/utils/post_processing/usecases/human_activity.py,sha256=RMH1_QZHWtVY1TGwjcCHcJ3YKzvMuUnrt22xTbELLJg,41261
188
189
  matrice/deploy/utils/post_processing/usecases/human_activity_recognition.py,sha256=5a-akti7tRR1epuagEycGHrRRG2FTs0r8vesPuCqiJI,94827
189
190
  matrice/deploy/utils/post_processing/usecases/leaf.py,sha256=cwgB1ZNxkQFtkk-thSJrkXOGou1ghJr1kqtopb3sLD4,37036
190
191
  matrice/deploy/utils/post_processing/usecases/leaf_disease.py,sha256=bkiLccTdf4KUq3he4eCpBlKXb5exr-WBhQ_oWQ7os68,36225
@@ -239,8 +240,8 @@ matrice/deployment/camera_manager.py,sha256=MEluadz_I3k10GqwFb_w5w_HhuHcEu2vA7oh
239
240
  matrice/deployment/deployment.py,sha256=HFt151eWq6iqIAMsQvurpV2WNxW6Cx_gIUVfnVy5SWE,48093
240
241
  matrice/deployment/inference_pipeline.py,sha256=6b4Mm3-qt-Zy0BeiJfFQdImOn3FzdNCY-7ET7Rp8PMk,37911
241
242
  matrice/deployment/streaming_gateway_manager.py,sha256=90O_SvR4RtBJxR6dy43uHNRTRdgxQE09KzhzHzVqq3E,20939
242
- matrice-1.0.99217.dist-info/licenses/LICENSE.txt,sha256=2bm9uFabQZ3Ykb_SaSU_uUbAj2-htc6WJQmS_65qD00,1073
243
- matrice-1.0.99217.dist-info/METADATA,sha256=R3KMcop9KCXFcD_RCB8SoOjcf6I3WADmxoenM4l5JPo,14624
244
- matrice-1.0.99217.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
245
- matrice-1.0.99217.dist-info/top_level.txt,sha256=P97js8ur6o5ClRqMH3Cjoab_NqbJ6sOQ3rJmVzKBvMc,8
246
- matrice-1.0.99217.dist-info/RECORD,,
243
+ matrice-1.0.99218.dist-info/licenses/LICENSE.txt,sha256=2bm9uFabQZ3Ykb_SaSU_uUbAj2-htc6WJQmS_65qD00,1073
244
+ matrice-1.0.99218.dist-info/METADATA,sha256=UaEBdQzouoPSpmNO8LSTKPIrUSQEX2fC5LisHMlcZzQ,14624
245
+ matrice-1.0.99218.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
246
+ matrice-1.0.99218.dist-info/top_level.txt,sha256=P97js8ur6o5ClRqMH3Cjoab_NqbJ6sOQ3rJmVzKBvMc,8
247
+ matrice-1.0.99218.dist-info/RECORD,,