matrice 1.0.99286__py3-none-any.whl → 1.0.99287__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,9 +1,3 @@
1
- """
2
- Crop weed detection usecase
3
- This module provides functionality for detecting crops in images or video streams.
4
-
5
- """
6
-
7
1
  from typing import Any, Dict, List, Optional
8
2
  from dataclasses import asdict
9
3
  import time
@@ -67,44 +61,63 @@ class CropWeedDetectionUseCase(BaseProcessor):
67
61
  "Maize": "Maize",
68
62
  "NarWeed": "Nar Weed"
69
63
  }
64
+
70
65
  def __init__(self):
71
66
  super().__init__("crop_weed_detection")
72
67
  self.category = "agriculture"
73
-
74
68
  self.CASE_TYPE: Optional[str] = 'crop_weed_detection'
75
69
  self.CASE_VERSION: Optional[str] = '1.2'
76
-
77
- # List of categories to track
78
70
  self.target_categories = ['plants', 'BroWeed', 'Maize', 'NarWeed']
79
-
80
-
81
- # Initialize smoothing tracker
82
71
  self.smoothing_tracker = None
83
-
84
- # Initialize advanced tracker (will be created on first use)
85
72
  self.tracker = None
86
-
87
- # Initialize tracking state variables
88
73
  self._total_frame_counter = 0
89
74
  self._global_frame_offset = 0
90
-
91
- # Track start time for "TOTAL SINCE" calculation
92
75
  self._tracking_start_time = None
93
-
94
76
  self._track_aliases: Dict[Any, Any] = {}
95
77
  self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
96
- # Tunable parameters – adjust if necessary for specific scenarios
97
- self._track_merge_iou_threshold: float = 0.05 # IoU ≥ 0.05 →
98
- self._track_merge_time_window: float = 7.0 # seconds within which to merge
99
-
78
+ self._track_merge_iou_threshold: float = 0.05
79
+ self._track_merge_time_window: float = 7.0
100
80
  self._ascending_alert_list: List[int] = []
101
81
  self.current_incident_end_timestamp: str = "N/A"
102
82
 
83
+ def _normalize_category(self, category: str) -> str:
84
+ """
85
+ Dynamically normalize category names by stripping quotes and correcting known misspellings.
86
+ Returns the normalized category or the original if no match is found.
87
+ """
88
+ if not isinstance(category, str):
89
+ self.logger.warning(f"Invalid category type: {type(category)}, returning as-is")
90
+ return category
91
+
92
+ # Strip quotes and whitespace
93
+ cleaned_category = category.strip("'").strip('"').strip()
94
+
95
+ # Define known misspellings or variations
96
+ category_corrections = {
97
+ "plantss": "plants",
98
+ "Plants": "plants",
99
+ "broweed": "BroWeed",
100
+ "maize": "Maize",
101
+ "narweed": "NarWeed"
102
+ }
103
+
104
+ # Check if the cleaned category is in target_categories
105
+ if cleaned_category in self.target_categories:
106
+ return cleaned_category
107
+
108
+ # Check for known misspellings
109
+ normalized_category = category_corrections.get(cleaned_category.lower(), cleaned_category)
110
+
111
+ # Log if the category is unrecognized
112
+ if normalized_category not in self.target_categories:
113
+ self.logger.warning(f"Unrecognized category '{category}' normalized to '{normalized_category}'")
114
+
115
+ return normalized_category
103
116
 
104
117
  def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
105
118
  stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
106
119
  """
107
- Main entry point for post-processing.
120
+ Main entry point for post-processing.
108
121
  Applies category mapping, smoothing, counting, alerting, and summary generation.
109
122
  Returns a ProcessingResult with all relevant outputs.
110
123
  """
@@ -112,41 +125,45 @@ class CropWeedDetectionUseCase(BaseProcessor):
112
125
  # Ensure config is correct type
113
126
  if not isinstance(config, CropWeedDetectionConfig):
114
127
  return self.create_error_result("Invalid config type", usecase=self.name, category=self.category,
115
- context=context)
128
+ context=context)
116
129
  if context is None:
117
130
  context = ProcessingContext()
118
-
119
131
 
120
132
  # Detect input format and store in context
121
133
  input_format = match_results_structure(data)
122
134
 
123
- # DEBUG
135
+ # DEBUG: Log raw input data
124
136
  print(f"Detected data: {data}")
125
-
126
-
127
- context.input_format = input_format
128
- context.confidence_threshold = config.confidence_threshold
129
137
 
138
+ # Normalize category names dynamically
130
139
  processed_data = [
131
140
  {**d, 'category': self._normalize_category(d.get('category'))}
132
141
  for d in data
133
142
  ]
143
+ print(f"Data after category normalization: {processed_data}")
144
+
145
+ context.input_format = input_format
146
+ context.confidence_threshold = config.confidence_threshold
134
147
 
148
+ # Apply confidence filtering
135
149
  if config.confidence_threshold is not None:
136
- processed_data = filter_by_confidence(data, config.confidence_threshold)
150
+ processed_data = filter_by_confidence(processed_data, config.confidence_threshold)
137
151
  self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
152
+ print(f"Detections after confidence filtering: {processed_data}")
138
153
  else:
139
- processed_data = data
140
- self.logger.debug(f"Did not apply confidence filtering with threshold since nothing was provided")
154
+ self.logger.debug("Did not apply confidence filtering since no threshold provided")
141
155
 
142
- # Step 2: Apply category mapping if provided
156
+ # Apply category mapping if provided
143
157
  if config.index_to_category:
144
158
  processed_data = apply_category_mapping(processed_data, config.index_to_category)
145
159
  self.logger.debug("Applied category mapping")
160
+ print(f"Detections after category mapping: {processed_data}")
146
161
 
162
+ # Apply category filtering
147
163
  if config.target_categories:
148
164
  processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
149
- self.logger.debug(f"Applied category filtering")
165
+ self.logger.debug(f"Applied category filtering")
166
+ print(f"Detections after category filtering: {processed_data}")
150
167
 
151
168
  # Apply bbox smoothing if enabled
152
169
  if config.enable_smoothing:
@@ -155,28 +172,23 @@ class CropWeedDetectionUseCase(BaseProcessor):
155
172
  smoothing_algorithm=config.smoothing_algorithm,
156
173
  window_size=config.smoothing_window_size,
157
174
  cooldown_frames=config.smoothing_cooldown_frames,
158
- confidence_threshold=config.confidence_threshold, # Use mask threshold as default
175
+ confidence_threshold=config.confidence_threshold,
159
176
  confidence_range_factor=config.smoothing_confidence_range_factor,
160
177
  enable_smoothing=True
161
178
  )
162
179
  self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
163
180
  processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
164
-
165
- # DEBUG:
166
- print(f"Processed data after filtering and smoothing: {processed_data}")
181
+ print(f"Processed data after smoothing: {processed_data}")
167
182
 
168
183
  # Advanced tracking (BYTETracker-like)
169
184
  try:
170
185
  from ..advanced_tracker import AdvancedTracker
171
186
  from ..advanced_tracker.config import TrackerConfig
172
187
 
173
- # Create tracker instance if it doesn't exist (preserves state across frames)
174
-
175
188
  if self.tracker is None:
176
189
  if config.confidence_threshold is not None:
177
190
  tracker_config = TrackerConfig(
178
191
  track_high_thresh=float(config.confidence_threshold),
179
- # Allow even lower detections to participate in secondary association
180
192
  track_low_thresh=max(0.05, float(config.confidence_threshold) / 2),
181
193
  new_track_thresh=float(config.confidence_threshold)
182
194
  )
@@ -184,20 +196,16 @@ class CropWeedDetectionUseCase(BaseProcessor):
184
196
  tracker_config = TrackerConfig()
185
197
  self.tracker = AdvancedTracker(tracker_config)
186
198
  self.logger.info(
187
- "Initialized AdvancedTracker for Monitoring and tracking with thresholds: "
188
- f"high={tracker_config.track_high_thresh}, "
189
- f"low={tracker_config.track_low_thresh}, "
190
- f"new={tracker_config.new_track_thresh}"
199
+ f"Initialized AdvancedTracker with thresholds: high={tracker_config.track_high_thresh}, "
200
+ f"low={tracker_config.track_low_thresh}, new={tracker_config.new_track_thresh}"
191
201
  )
192
- # The tracker expects the data in the same format as input
193
- # It will add track_id and frame_id to each detection
194
202
  processed_data = self.tracker.update(processed_data)
203
+ print(f"Data after tracking: {processed_data}")
195
204
 
196
205
  except Exception as e:
197
- # If advanced tracker fails, fallback to unsmoothed detections
198
206
  self.logger.warning(f"AdvancedTracker failed: {e}")
199
207
 
200
- # Update tracking state for total count per label
208
+ # Update tracking state
201
209
  self._update_tracking_state(processed_data)
202
210
 
203
211
  # Update frame counter
@@ -209,45 +217,40 @@ class CropWeedDetectionUseCase(BaseProcessor):
209
217
  input_settings = stream_info.get("input_settings", {})
210
218
  start_frame = input_settings.get("start_frame")
211
219
  end_frame = input_settings.get("end_frame")
212
- # If start and end frame are the same, it's a single frame
213
220
  if start_frame is not None and end_frame is not None and start_frame == end_frame:
214
221
  frame_number = start_frame
215
222
 
216
223
  # Compute summaries and alerts
217
- general_counting_summary = calculate_counting_summary(data)
218
- counting_summary = self._count_categories(processed_data, config)
219
- # Add total unique counts after tracking using only local state
220
- total_counts = self.get_total_counts()
221
- counting_summary['total_counts'] = total_counts
224
+ general_counting_summary = calculate_counting_summary(data)
225
+ counting_summary = self._count_categories(processed_data, config)
226
+ total_counts = self.get_total_counts()
227
+ counting_summary['total_counts'] = total_counts
222
228
 
223
229
  alerts = self._check_alerts(counting_summary, frame_number, config)
224
230
  predictions = self._extract_predictions(processed_data)
225
231
 
226
- # Step: Generate structured incidents, tracking stats and business analytics with frame-based keys
232
+ # Generate structured outputs
227
233
  incidents_list = self._generate_incidents(counting_summary, alerts, config, frame_number, stream_info)
228
234
  tracking_stats_list = self._generate_tracking_stats(counting_summary, alerts, config, frame_number, stream_info)
229
235
  business_analytics_list = self._generate_business_analytics(counting_summary, alerts, config, stream_info, is_empty=True)
230
236
  summary_list = self._generate_summary(counting_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
231
237
 
232
- # Extract frame-based dictionaries from the lists
238
+ # Extract frame-based dictionaries
233
239
  incidents = incidents_list[0] if incidents_list else {}
234
240
  tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
235
241
  business_analytics = business_analytics_list[0] if business_analytics_list else {}
236
242
  summary = summary_list[0] if summary_list else {}
237
243
  agg_summary = {str(frame_number): {
238
- "incidents": incidents,
239
- "tracking_stats": tracking_stats,
240
- "business_analytics": business_analytics,
241
- "alerts": alerts,
242
- "human_text": summary}
243
- }
244
-
245
-
244
+ "incidents": incidents,
245
+ "tracking_stats": tracking_stats,
246
+ "business_analytics": business_analytics,
247
+ "alerts": alerts,
248
+ "human_text": summary}
249
+ }
250
+
246
251
  context.mark_completed()
247
252
 
248
- # Build result object following the new pattern
249
-
250
- # DEBUG:
253
+ # DEBUG: Log final summary
251
254
  print(f"Final aggregated summary: {agg_summary}")
252
255
 
253
256
  result = self.create_result(
@@ -259,18 +262,14 @@ class CropWeedDetectionUseCase(BaseProcessor):
259
262
 
260
263
  return result
261
264
 
262
- def _check_alerts(self, summary: dict, frame_number:Any, config: CropWeedDetectionConfig) -> List[Dict]:
265
+ def _check_alerts(self, summary: dict, frame_number: Any, config: CropWeedDetectionConfig) -> List[Dict]:
263
266
  """
264
267
  Check if any alert thresholds are exceeded and return alert dicts.
265
268
  """
266
269
  def get_trend(data, lookback=900, threshold=0.6):
267
- '''
268
- Determine if the trend is ascending or descending based on actual value progression.
269
- Now works with values 0,1,2,3 (not just binary).
270
- '''
271
270
  window = data[-lookback:] if len(data) >= lookback else data
272
271
  if len(window) < 2:
273
- return True # not enough data to determine trend
272
+ return True
274
273
  increasing = 0
275
274
  total = 0
276
275
  for i in range(1, len(window)):
@@ -285,53 +284,44 @@ class CropWeedDetectionUseCase(BaseProcessor):
285
284
 
286
285
  frame_key = str(frame_number) if frame_number is not None else "current_frame"
287
286
  alerts = []
288
- total_detections = summary.get("total_count", 0) #CURRENT combined total count of all classes
289
- total_counts_dict = summary.get("total_counts", {}) #TOTAL cumulative counts per class
290
- cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0 #TOTAL combined cumulative count
291
- per_category_count = summary.get("per_category_count", {}) #CURRENT count per class
287
+ total_detections = summary.get("total_count", 0)
288
+ total_counts_dict = summary.get("total_counts", {})
289
+ cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0
290
+ per_category_count = summary.get("per_category_count", {})
292
291
 
293
292
  if not config.alert_config:
294
293
  return alerts
295
294
 
296
295
  total = summary.get("total_count", 0)
297
- #self._ascending_alert_list
298
296
  if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
299
-
300
297
  for category, threshold in config.alert_config.count_thresholds.items():
301
- if category == "all" and total > threshold:
302
-
298
+ if category == "all" and total > threshold:
303
299
  alerts.append({
304
- "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
305
- "alert_id": "alert_"+category+'_'+frame_key,
300
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
301
+ "alert_id": "alert_" + category + '_' + frame_key,
306
302
  "incident_category": self.CASE_TYPE,
307
303
  "threshold_level": threshold,
308
304
  "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
309
- "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
310
- getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
311
- }
305
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
306
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
312
307
  })
313
308
  elif category in summary.get("per_category_count", {}):
314
309
  count = summary.get("per_category_count", {})[category]
315
- if count > threshold: # Fixed logic: alert when EXCEEDING threshold
310
+ if count > threshold:
316
311
  alerts.append({
317
- "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
318
- "alert_id": "alert_"+category+'_'+frame_key,
312
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
313
+ "alert_id": "alert_" + category + '_' + frame_key,
319
314
  "incident_category": self.CASE_TYPE,
320
315
  "threshold_level": threshold,
321
316
  "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
322
- "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
323
- getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
324
- }
317
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
318
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
325
319
  })
326
- else:
327
- pass
328
320
  return alerts
329
321
 
330
322
  def _generate_incidents(self, counting_summary: Dict, alerts: List, config: CropWeedDetectionConfig,
331
- frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[
332
- Dict]:
323
+ frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
333
324
  """Generate structured incidents for the output format with frame-based keys."""
334
-
335
325
  incidents = []
336
326
  total_detections = counting_summary.get("total_count", 0)
337
327
  current_timestamp = self._get_current_timestamp_str(stream_info)
@@ -340,22 +330,20 @@ class CropWeedDetectionUseCase(BaseProcessor):
340
330
  self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
341
331
 
342
332
  if total_detections > 0:
343
- # Determine event level based on thresholds
344
333
  level = "low"
345
334
  intensity = 5.0
346
335
  start_timestamp = self._get_start_timestamp_str(stream_info)
347
- if start_timestamp and self.current_incident_end_timestamp=='N/A':
336
+ if start_timestamp and self.current_incident_end_timestamp == 'N/A':
348
337
  self.current_incident_end_timestamp = 'Incident still active'
349
- elif start_timestamp and self.current_incident_end_timestamp=='Incident still active':
350
- if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
338
+ elif start_timestamp and self.current_incident_end_timestamp == 'Incident still active':
339
+ if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
351
340
  self.current_incident_end_timestamp = current_timestamp
352
- elif self.current_incident_end_timestamp!='Incident still active' and self.current_incident_end_timestamp!='N/A':
341
+ elif self.current_incident_end_timestamp != 'Incident still active' and self.current_incident_end_timestamp != 'N/A':
353
342
  self.current_incident_end_timestamp = 'N/A'
354
343
 
355
344
  if config.alert_config and config.alert_config.count_thresholds:
356
345
  threshold = config.alert_config.count_thresholds.get("all", 15)
357
346
  intensity = min(10.0, (total_detections / threshold) * 10)
358
-
359
347
  if intensity >= 9:
360
348
  level = "critical"
361
349
  self._ascending_alert_list.append(3)
@@ -386,29 +374,34 @@ class CropWeedDetectionUseCase(BaseProcessor):
386
374
  intensity = min(10.0, total_detections / 3.0)
387
375
  self._ascending_alert_list.append(0)
388
376
 
389
- # Generate human text in new format
390
377
  human_text_lines = [f"INCIDENTS DETECTED @ {current_timestamp}:"]
391
- human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE,level)}")
378
+ human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE, level)}")
392
379
  human_text = "\n".join(human_text_lines)
393
380
 
394
- alert_settings=[]
381
+ alert_settings = []
395
382
  if config.alert_config and hasattr(config.alert_config, 'alert_type'):
396
383
  alert_settings.append({
397
- "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
384
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
398
385
  "incident_category": self.CASE_TYPE,
399
386
  "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
400
387
  "ascending": True,
401
- "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
402
- getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
403
- }
388
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
389
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
404
390
  })
405
391
 
406
- event= self.create_incident(incident_id=self.CASE_TYPE+'_'+str(frame_number), incident_type=self.CASE_TYPE,
407
- severity_level=level, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
408
- start_time=start_timestamp, end_time=self.current_incident_end_timestamp,
409
- level_settings= {"low": 1, "medium": 3, "significant":4, "critical": 7})
392
+ event = self.create_incident(
393
+ incident_id=self.CASE_TYPE + '_' + str(frame_number),
394
+ incident_type=self.CASE_TYPE,
395
+ severity_level=level,
396
+ human_text=human_text,
397
+ camera_info=camera_info,
398
+ alerts=alerts,
399
+ alert_settings=alert_settings,
400
+ start_time=start_timestamp,
401
+ end_time=self.current_incident_end_timestamp,
402
+ level_settings={"low": 1, "medium": 3, "significant": 4, "critical": 7}
403
+ )
410
404
  incidents.append(event)
411
-
412
405
  else:
413
406
  self._ascending_alert_list.append(0)
414
407
  incidents.append({})
@@ -425,80 +418,50 @@ class CropWeedDetectionUseCase(BaseProcessor):
425
418
  ) -> List[Dict]:
426
419
  """Generate structured tracking stats matching eg.json format."""
427
420
  camera_info = self.get_camera_info_from_stream(stream_info)
428
-
429
- # frame_key = str(frame_number) if frame_number is not None else "current_frame"
430
- # tracking_stats = [{frame_key: []}]
431
- # frame_tracking_stats = tracking_stats[0][frame_key]
432
421
  tracking_stats = []
433
422
 
434
- total_detections = counting_summary.get("total_count", 0) #CURRENT total count of all classes
435
- total_counts_dict = counting_summary.get("total_counts", {}) #TOTAL cumulative counts per class
436
- cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0 #TOTAL combined cumulative count
437
- per_category_count = counting_summary.get("per_category_count", {}) #CURRENT count per class
423
+ total_detections = counting_summary.get("total_count", 0)
424
+ total_counts_dict = counting_summary.get("total_counts", {})
425
+ cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0
426
+ per_category_count = counting_summary.get("per_category_count", {})
438
427
 
439
428
  current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
440
429
  start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
441
-
442
- # Create high precision timestamps for input_timestamp and reset_timestamp
443
430
  high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
444
431
  high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
445
432
 
446
-
447
- # Build total_counts array in expected format
448
- total_counts = []
449
- for cat, count in total_counts_dict.items():
450
- if count > 0:
451
- total_counts.append({
452
- "category": cat,
453
- "count": count
454
- })
455
-
456
- # Build current_counts array in expected format
457
- current_counts = []
458
- for cat, count in per_category_count.items():
459
- if count > 0 or total_detections > 0: # Include even if 0 when there are detections
460
- current_counts.append({
461
- "category": cat,
462
- "count": count
463
- })
464
-
465
- # Prepare detections without confidence scores (as per eg.json)
466
- detections = []
433
+ total_counts = [
434
+ {"category": cat, "count": count}
435
+ for cat, count in total_counts_dict.items() if count > 0
436
+ ]
467
437
 
468
- # DEBUG
469
- print(f"Model detections: {counting_summary.get('detections', [])}")
438
+ current_counts = [
439
+ {"category": cat, "count": count}
440
+ for cat, count in per_category_count.items() if count > 0 or total_detections > 0
441
+ ]
470
442
 
471
- for detection in counting_summary.get("detections", []):
472
- detection_data = {
443
+ detections = [
444
+ {
473
445
  "category": detection.get("category"),
474
- "bounding_box": detection.get("bounding_box", {})
446
+ "bounding_box": detection.get("bounding_box", {}),
447
+ "masks": detection.get("masks", []) if detection.get("masks") else None,
448
+ "segmentation": detection.get("segmentation") if detection.get("segmentation") else None,
449
+ "mask": detection.get("mask") if detection.get("mask") else None
475
450
  }
476
- # Include segmentation if available (like in eg.json)
477
- if detection.get("masks"):
478
- detection_data["masks"] = detection.get("masks", [])
479
- if detection.get("segmentation"):
480
- detection_data["segmentation"] = detection.get("segmentation")
481
- if detection.get("mask"):
482
- detection_data["mask"] = detection.get("mask")
483
- detections.append(detection_data)
484
-
485
- # Build alert_settings array in expected format
451
+ for detection in counting_summary.get("detections", [])
452
+ ]
453
+
486
454
  alert_settings = []
487
455
  if config.alert_config and hasattr(config.alert_config, 'alert_type'):
488
456
  alert_settings.append({
489
- "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
457
+ "alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
490
458
  "incident_category": self.CASE_TYPE,
491
459
  "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
492
460
  "ascending": True,
493
- "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
494
- getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
495
- }
461
+ "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
462
+ getattr(config.alert_config, 'alert_value', ['JSON']))}
496
463
  })
497
464
 
498
- # DEBUG
499
- print(f"Detections for tracking stats: {detections}")
500
-
501
- # Generate human_text in expected format
502
465
  human_text_lines = [f"Tracking Statistics:"]
503
466
  human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}")
504
467
 
@@ -517,40 +480,38 @@ class CropWeedDetectionUseCase(BaseProcessor):
517
480
  human_text_lines.append("Alerts: None")
518
481
 
519
482
  human_text = "\n".join(human_text_lines)
520
- reset_settings=[
521
- {
522
- "interval_type": "daily",
523
- "reset_time": {
524
- "value": 9,
525
- "time_unit": "hour"
526
- }
527
- }
528
- ]
529
-
530
- tracking_stat=self.create_tracking_stats(total_counts=total_counts, current_counts=current_counts,
531
- detections=detections, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
532
- reset_settings=reset_settings, start_time=high_precision_start_timestamp ,
533
- reset_time=high_precision_reset_timestamp)
483
+ reset_settings = [
484
+ {
485
+ "interval_type": "daily",
486
+ "reset_time": {"value": 9, "time_unit": "hour"}
487
+ }
488
+ ]
534
489
 
535
- # DEBUG
536
- print(f"Generated tracking stat: {tracking_stat}")
490
+ tracking_stat = self.create_tracking_stats(
491
+ total_counts=total_counts,
492
+ current_counts=current_counts,
493
+ detections=detections,
494
+ human_text=human_text,
495
+ camera_info=camera_info,
496
+ alerts=alerts,
497
+ alert_settings=alert_settings,
498
+ reset_settings=reset_settings,
499
+ start_time=high_precision_start_timestamp,
500
+ reset_time=high_precision_reset_timestamp
501
+ )
537
502
 
538
503
  tracking_stats.append(tracking_stat)
539
504
  return tracking_stats
540
-
541
- def _generate_business_analytics(self, counting_summary: Dict, alerts:Any, config: CropWeedDetectionConfig, stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
505
+
506
+ def _generate_business_analytics(self, counting_summary: Dict, alerts: Any, config: CropWeedDetectionConfig,
507
+ stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
542
508
  """Generate standardized business analytics for the agg_summary structure."""
543
509
  if is_empty:
544
510
  return []
511
+ # Implement if needed
512
+ return []
545
513
 
546
- #-----IF YOUR USECASE NEEDS BUSINESS ANALYTICS, YOU CAN USE THIS FUNCTION------#
547
- #camera_info = self.get_camera_info_from_stream(stream_info)
548
- # business_analytics = self.create_business_analytics(nalysis_name, statistics,
549
- # human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
550
- # reset_settings)
551
- # return business_analytics
552
-
553
- def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
514
+ def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[dict]:
554
515
  """
555
516
  Generate a human_text string for the tracking_stat, incident, business analytics and alerts.
556
517
  """
@@ -558,11 +519,11 @@ class CropWeedDetectionUseCase(BaseProcessor):
558
519
  lines["Application Name"] = self.CASE_TYPE
559
520
  lines["Application Version"] = self.CASE_VERSION
560
521
  if len(incidents) > 0:
561
- lines["Incidents:"]=f"\n\t{incidents[0].get('human_text', 'No incidents detected')}\n"
522
+ lines["Incidents:"] = f"\n\t{incidents[0].get('human_text', 'No incidents detected')}\n"
562
523
  if len(tracking_stats) > 0:
563
- lines["Tracking Statistics:"]=f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}\n"
524
+ lines["Tracking Statistics:"] = f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}\n"
564
525
  if len(business_analytics) > 0:
565
- lines["Business Analytics:"]=f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}\n"
526
+ lines["Business Analytics:"] = f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}\n"
566
527
 
567
528
  if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
568
529
  lines["Summary"] = "No Summary Data"
@@ -573,13 +534,11 @@ class CropWeedDetectionUseCase(BaseProcessor):
573
534
  """
574
535
  Get detailed information about track IDs (per frame).
575
536
  """
576
- # Collect all track_ids in this frame
577
537
  frame_track_ids = set()
578
538
  for det in detections:
579
539
  tid = det.get('track_id')
580
540
  if tid is not None:
581
541
  frame_track_ids.add(tid)
582
- # Use persistent total set for unique counting
583
542
  total_track_ids = set()
584
543
  for s in getattr(self, '_per_category_total_track_ids', {}).values():
585
544
  total_track_ids.update(s)
@@ -595,10 +554,7 @@ class CropWeedDetectionUseCase(BaseProcessor):
595
554
  def _update_tracking_state(self, detections: list):
596
555
  """
597
556
  Track unique categories track_ids per category for total count after tracking.
598
- Applies canonical ID merging to avoid duplicate counting when the underlying
599
- tracker loses an object temporarily and assigns a new ID.
600
557
  """
601
- # Lazily initialise storage dicts
602
558
  if not hasattr(self, "_per_category_total_track_ids"):
603
559
  self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
604
560
  self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
@@ -610,9 +566,7 @@ class CropWeedDetectionUseCase(BaseProcessor):
610
566
  continue
611
567
  bbox = det.get("bounding_box", det.get("bbox"))
612
568
  canonical_id = self._merge_or_register_track(raw_track_id, bbox)
613
- # Propagate canonical ID back to detection so downstream logic uses it
614
569
  det["track_id"] = canonical_id
615
-
616
570
  self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
617
571
  self._current_frame_track_ids[cat].add(canonical_id)
618
572
 
@@ -622,7 +576,6 @@ class CropWeedDetectionUseCase(BaseProcessor):
622
576
  """
623
577
  return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
624
578
 
625
-
626
579
  def _format_timestamp_for_stream(self, timestamp: float) -> str:
627
580
  """Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
628
581
  dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
@@ -632,45 +585,40 @@ class CropWeedDetectionUseCase(BaseProcessor):
632
585
  """Format timestamp for video chunks (HH:MM:SS.ms format)."""
633
586
  hours = int(timestamp // 3600)
634
587
  minutes = int((timestamp % 3600) // 60)
635
- seconds = round(float(timestamp % 60),2)
588
+ seconds = round(float(timestamp % 60), 2)
636
589
  return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
637
590
 
638
591
  def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
639
592
  """Get formatted current timestamp based on stream type."""
640
593
  if not stream_info:
641
594
  return "00:00:00.00"
642
- # is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
643
595
  if precision:
644
596
  if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
645
597
  if frame_id:
646
- start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
598
+ start_time = int(frame_id) / stream_info.get("input_settings", {}).get("original_fps", 30)
647
599
  else:
648
- start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
600
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30) / stream_info.get("input_settings", {}).get("original_fps", 30)
649
601
  stream_time_str = self._format_timestamp_for_video(start_time)
650
602
  return stream_time_str
651
603
  else:
652
604
  return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
653
605
 
654
606
  if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
655
- if frame_id:
656
- start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
657
- else:
658
- start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
659
- stream_time_str = self._format_timestamp_for_video(start_time)
660
- return stream_time_str
607
+ if frame_id:
608
+ start_time = int(frame_id) / stream_info.get("input_settings", {}).get("original_fps", 30)
609
+ else:
610
+ start_time = stream_info.get("input_settings", {}).get("start_frame", 30) / stream_info.get("input_settings", {}).get("original_fps", 30)
611
+ stream_time_str = self._format_timestamp_for_video(start_time)
612
+ return stream_time_str
661
613
  else:
662
- # For streams, use stream_time from stream_info
663
614
  stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
664
615
  if stream_time_str:
665
- # Parse the high precision timestamp string to get timestamp
666
616
  try:
667
- # Remove " UTC" suffix and parse
668
617
  timestamp_str = stream_time_str.replace(" UTC", "")
669
618
  dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
670
619
  timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
671
620
  return self._format_timestamp_for_stream(timestamp)
672
621
  except:
673
- # Fallback to current time if parsing fails
674
622
  return self._format_timestamp_for_stream(time.time())
675
623
  else:
676
624
  return self._format_timestamp_for_stream(time.time())
@@ -686,41 +634,32 @@ class CropWeedDetectionUseCase(BaseProcessor):
686
634
  return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
687
635
 
688
636
  if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
689
- # If video format, start from 00:00:00
690
637
  return "00:00:00"
691
638
  else:
692
- # For streams, use tracking start time or current time with minutes/seconds reset
693
639
  if self._tracking_start_time is None:
694
- # Try to extract timestamp from stream_time string
695
640
  stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
696
641
  if stream_time_str:
697
642
  try:
698
- # Remove " UTC" suffix and parse
699
643
  timestamp_str = stream_time_str.replace(" UTC", "")
700
644
  dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
701
645
  self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
702
646
  except:
703
- # Fallback to current time if parsing fails
704
647
  self._tracking_start_time = time.time()
705
648
  else:
706
649
  self._tracking_start_time = time.time()
707
650
 
708
651
  dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
709
- # Reset minutes and seconds to 00:00 for "TOTAL SINCE" format
710
652
  dt = dt.replace(minute=0, second=0, microsecond=0)
711
653
  return dt.strftime('%Y:%m:%d %H:%M:%S')
712
654
 
713
655
  def _count_categories(self, detections: list, config: CropWeedDetectionConfig) -> dict:
714
656
  """
715
657
  Count the number of detections per category and return a summary dict.
716
- The detections list is expected to have 'track_id' (from tracker), 'category', 'bounding_box', etc.
717
- Output structure will include 'track_id' for each detection as per AdvancedTracker output.
718
658
  """
719
659
  counts = {}
720
660
  for det in detections:
721
661
  cat = det.get('category', 'unknown')
722
662
  counts[cat] = counts.get(cat, 0) + 1
723
- # Each detection dict will now include 'track_id' (and possibly 'frame_id')
724
663
  return {
725
664
  "total_count": sum(counts.values()),
726
665
  "per_category_count": counts,
@@ -736,7 +675,6 @@ class CropWeedDetectionUseCase(BaseProcessor):
736
675
  ]
737
676
  }
738
677
 
739
-
740
678
  def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
741
679
  """
742
680
  Extract prediction details for output (category, confidence, bounding box).
@@ -750,14 +688,8 @@ class CropWeedDetectionUseCase(BaseProcessor):
750
688
  for det in detections
751
689
  ]
752
690
 
753
- # ------------------------------------------------------------------ #
754
- # Canonical ID helpers #
755
- # ------------------------------------------------------------------ #
756
691
  def _compute_iou(self, box1: Any, box2: Any) -> float:
757
- """Compute IoU between two bounding boxes which may be dicts or lists.
758
- Falls back to 0 when insufficient data is available."""
759
-
760
- # Helper to convert bbox (dict or list) to [x1, y1, x2, y2]
692
+ """Compute IoU between two bounding boxes which may be dicts or lists."""
761
693
  def _bbox_to_list(bbox):
762
694
  if bbox is None:
763
695
  return []
@@ -768,7 +700,6 @@ class CropWeedDetectionUseCase(BaseProcessor):
768
700
  return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
769
701
  if "x1" in bbox:
770
702
  return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
771
- # Fallback: first four numeric values
772
703
  values = [v for v in bbox.values() if isinstance(v, (int, float))]
773
704
  return values[:4] if len(values) >= 4 else []
774
705
  return []
@@ -780,7 +711,6 @@ class CropWeedDetectionUseCase(BaseProcessor):
780
711
  x1_min, y1_min, x1_max, y1_max = l1
781
712
  x2_min, y2_min, x2_max, y2_max = l2
782
713
 
783
- # Ensure correct order
784
714
  x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
785
715
  y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
786
716
  x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
@@ -802,16 +732,11 @@ class CropWeedDetectionUseCase(BaseProcessor):
802
732
  return (inter_area / union_area) if union_area > 0 else 0.0
803
733
 
804
734
  def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
805
- """Return a stable canonical ID for a raw tracker ID, merging fragmented
806
- tracks when IoU and temporal constraints indicate they represent the
807
- same physical."""
735
+ """Return a stable canonical ID for a raw tracker ID."""
808
736
  if raw_id is None or bbox is None:
809
- # Nothing to merge
810
737
  return raw_id
811
738
 
812
739
  now = time.time()
813
-
814
- # Fast path – raw_id already mapped
815
740
  if raw_id in self._track_aliases:
816
741
  canonical_id = self._track_aliases[raw_id]
817
742
  track_info = self._canonical_tracks.get(canonical_id)
@@ -821,21 +746,17 @@ class CropWeedDetectionUseCase(BaseProcessor):
821
746
  track_info["raw_ids"].add(raw_id)
822
747
  return canonical_id
823
748
 
824
- # Attempt to merge with an existing canonical track
825
749
  for canonical_id, info in self._canonical_tracks.items():
826
- # Only consider recently updated tracks
827
750
  if now - info["last_update"] > self._track_merge_time_window:
828
751
  continue
829
752
  iou = self._compute_iou(bbox, info["last_bbox"])
830
753
  if iou >= self._track_merge_iou_threshold:
831
- # Merge
832
754
  self._track_aliases[raw_id] = canonical_id
833
755
  info["last_bbox"] = bbox
834
756
  info["last_update"] = now
835
757
  info["raw_ids"].add(raw_id)
836
758
  return canonical_id
837
759
 
838
- # No match – register new canonical track
839
760
  canonical_id = raw_id
840
761
  self._track_aliases[raw_id] = canonical_id
841
762
  self._canonical_tracks[canonical_id] = {
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: matrice
3
- Version: 1.0.99286
3
+ Version: 1.0.99287
4
4
  Summary: SDK for connecting to matrice.ai services
5
5
  Home-page: https://github.com/matrice-ai/python-sdk
6
6
  Author: Matrice.ai
@@ -173,7 +173,7 @@ matrice/deploy/utils/post_processing/usecases/child_monitoring.py,sha256=z3oymoq
173
173
  matrice/deploy/utils/post_processing/usecases/color_detection.py,sha256=Z8-akjy8a7f8YyiOzXu_Zi1Km30v-TRrymDqQOPpJ_8,43277
174
174
  matrice/deploy/utils/post_processing/usecases/color_map_utils.py,sha256=SP-AEVcjLmL8rxblu-ixqUJC2fqlcr7ab4hWo4Fcr_k,2677
175
175
  matrice/deploy/utils/post_processing/usecases/concrete_crack_detection.py,sha256=pxhOH_hG4hq9yytNepbGMdk2W_lTG8D1_2RAagaPBkg,40252
176
- matrice/deploy/utils/post_processing/usecases/crop_weed_detection.py,sha256=GUfbdZmaZfBAwceaSBwJaXzIVlIo3KMqry8LHO4diLU,40697
176
+ matrice/deploy/utils/post_processing/usecases/crop_weed_detection.py,sha256=uATIX4ox7IfIkwhTXr8QGJHq45bJU7X33WKqewK1eJA,35680
177
177
  matrice/deploy/utils/post_processing/usecases/customer_service.py,sha256=UWS83qxguyAyhh8a0JF5QH9DtKxO8I-gI2BPOjLPxBw,44642
178
178
  matrice/deploy/utils/post_processing/usecases/defect_detection_products.py,sha256=blvo4wmak-wlvPSZOcmRsV1FoZSeGX_dUAX5A1WheBE,45949
179
179
  matrice/deploy/utils/post_processing/usecases/distracted_driver_detection.py,sha256=rkyYHbmcYUAfKbmmKyKxHlk47vJ_fogHWKhQjrERsok,40316
@@ -244,8 +244,8 @@ matrice/deployment/camera_manager.py,sha256=e1Lc81RJP5wUWRdTgHO6tMWF9BkBdHOSVyx3
244
244
  matrice/deployment/deployment.py,sha256=HFt151eWq6iqIAMsQvurpV2WNxW6Cx_gIUVfnVy5SWE,48093
245
245
  matrice/deployment/inference_pipeline.py,sha256=6b4Mm3-qt-Zy0BeiJfFQdImOn3FzdNCY-7ET7Rp8PMk,37911
246
246
  matrice/deployment/streaming_gateway_manager.py,sha256=ifYGl3g25wyU39HwhPQyI2OgF3M6oIqKMWt8RXtMxY8,21401
247
- matrice-1.0.99286.dist-info/licenses/LICENSE.txt,sha256=2bm9uFabQZ3Ykb_SaSU_uUbAj2-htc6WJQmS_65qD00,1073
248
- matrice-1.0.99286.dist-info/METADATA,sha256=2cKToW_ZvMOC-aDa_ZyzU05PYxIEWJeAfoZJIx-Khu4,14624
249
- matrice-1.0.99286.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
250
- matrice-1.0.99286.dist-info/top_level.txt,sha256=P97js8ur6o5ClRqMH3Cjoab_NqbJ6sOQ3rJmVzKBvMc,8
251
- matrice-1.0.99286.dist-info/RECORD,,
247
+ matrice-1.0.99287.dist-info/licenses/LICENSE.txt,sha256=2bm9uFabQZ3Ykb_SaSU_uUbAj2-htc6WJQmS_65qD00,1073
248
+ matrice-1.0.99287.dist-info/METADATA,sha256=U7NyO0Ia_YH0saRsGMknFF5DmQ6xJzy50kI7d69joFg,14624
249
+ matrice-1.0.99287.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
250
+ matrice-1.0.99287.dist-info/top_level.txt,sha256=P97js8ur6o5ClRqMH3Cjoab_NqbJ6sOQ3rJmVzKBvMc,8
251
+ matrice-1.0.99287.dist-info/RECORD,,