matrice 1.0.99285__py3-none-any.whl → 1.0.99287__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- matrice/deploy/utils/post_processing/usecases/crop_weed_detection.py +175 -249
- {matrice-1.0.99285.dist-info → matrice-1.0.99287.dist-info}/METADATA +1 -1
- {matrice-1.0.99285.dist-info → matrice-1.0.99287.dist-info}/RECORD +6 -6
- {matrice-1.0.99285.dist-info → matrice-1.0.99287.dist-info}/WHEEL +0 -0
- {matrice-1.0.99285.dist-info → matrice-1.0.99287.dist-info}/licenses/LICENSE.txt +0 -0
- {matrice-1.0.99285.dist-info → matrice-1.0.99287.dist-info}/top_level.txt +0 -0
@@ -1,9 +1,3 @@
|
|
1
|
-
"""
|
2
|
-
Crop weed detection usecase
|
3
|
-
This module provides functionality for detecting crops in images or video streams.
|
4
|
-
|
5
|
-
"""
|
6
|
-
|
7
1
|
from typing import Any, Dict, List, Optional
|
8
2
|
from dataclasses import asdict
|
9
3
|
import time
|
@@ -67,44 +61,63 @@ class CropWeedDetectionUseCase(BaseProcessor):
|
|
67
61
|
"Maize": "Maize",
|
68
62
|
"NarWeed": "Nar Weed"
|
69
63
|
}
|
64
|
+
|
70
65
|
def __init__(self):
|
71
66
|
super().__init__("crop_weed_detection")
|
72
67
|
self.category = "agriculture"
|
73
|
-
|
74
68
|
self.CASE_TYPE: Optional[str] = 'crop_weed_detection'
|
75
69
|
self.CASE_VERSION: Optional[str] = '1.2'
|
76
|
-
|
77
|
-
# List of categories to track
|
78
70
|
self.target_categories = ['plants', 'BroWeed', 'Maize', 'NarWeed']
|
79
|
-
|
80
|
-
|
81
|
-
# Initialize smoothing tracker
|
82
71
|
self.smoothing_tracker = None
|
83
|
-
|
84
|
-
# Initialize advanced tracker (will be created on first use)
|
85
72
|
self.tracker = None
|
86
|
-
|
87
|
-
# Initialize tracking state variables
|
88
73
|
self._total_frame_counter = 0
|
89
74
|
self._global_frame_offset = 0
|
90
|
-
|
91
|
-
# Track start time for "TOTAL SINCE" calculation
|
92
75
|
self._tracking_start_time = None
|
93
|
-
|
94
76
|
self._track_aliases: Dict[Any, Any] = {}
|
95
77
|
self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
|
96
|
-
|
97
|
-
self.
|
98
|
-
self._track_merge_time_window: float = 7.0 # seconds within which to merge
|
99
|
-
|
78
|
+
self._track_merge_iou_threshold: float = 0.05
|
79
|
+
self._track_merge_time_window: float = 7.0
|
100
80
|
self._ascending_alert_list: List[int] = []
|
101
81
|
self.current_incident_end_timestamp: str = "N/A"
|
102
82
|
|
83
|
+
def _normalize_category(self, category: str) -> str:
|
84
|
+
"""
|
85
|
+
Dynamically normalize category names by stripping quotes and correcting known misspellings.
|
86
|
+
Returns the normalized category or the original if no match is found.
|
87
|
+
"""
|
88
|
+
if not isinstance(category, str):
|
89
|
+
self.logger.warning(f"Invalid category type: {type(category)}, returning as-is")
|
90
|
+
return category
|
91
|
+
|
92
|
+
# Strip quotes and whitespace
|
93
|
+
cleaned_category = category.strip("'").strip('"').strip()
|
94
|
+
|
95
|
+
# Define known misspellings or variations
|
96
|
+
category_corrections = {
|
97
|
+
"plantss": "plants",
|
98
|
+
"Plants": "plants",
|
99
|
+
"broweed": "BroWeed",
|
100
|
+
"maize": "Maize",
|
101
|
+
"narweed": "NarWeed"
|
102
|
+
}
|
103
|
+
|
104
|
+
# Check if the cleaned category is in target_categories
|
105
|
+
if cleaned_category in self.target_categories:
|
106
|
+
return cleaned_category
|
107
|
+
|
108
|
+
# Check for known misspellings
|
109
|
+
normalized_category = category_corrections.get(cleaned_category.lower(), cleaned_category)
|
110
|
+
|
111
|
+
# Log if the category is unrecognized
|
112
|
+
if normalized_category not in self.target_categories:
|
113
|
+
self.logger.warning(f"Unrecognized category '{category}' normalized to '{normalized_category}'")
|
114
|
+
|
115
|
+
return normalized_category
|
103
116
|
|
104
117
|
def process(self, data: Any, config: ConfigProtocol, context: Optional[ProcessingContext] = None,
|
105
118
|
stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
|
106
119
|
"""
|
107
|
-
Main entry point for
|
120
|
+
Main entry point for post-processing.
|
108
121
|
Applies category mapping, smoothing, counting, alerting, and summary generation.
|
109
122
|
Returns a ProcessingResult with all relevant outputs.
|
110
123
|
"""
|
@@ -112,36 +125,45 @@ class CropWeedDetectionUseCase(BaseProcessor):
|
|
112
125
|
# Ensure config is correct type
|
113
126
|
if not isinstance(config, CropWeedDetectionConfig):
|
114
127
|
return self.create_error_result("Invalid config type", usecase=self.name, category=self.category,
|
115
|
-
|
128
|
+
context=context)
|
116
129
|
if context is None:
|
117
130
|
context = ProcessingContext()
|
118
|
-
|
119
131
|
|
120
132
|
# Detect input format and store in context
|
121
133
|
input_format = match_results_structure(data)
|
122
134
|
|
123
|
-
# DEBUG
|
135
|
+
# DEBUG: Log raw input data
|
124
136
|
print(f"Detected data: {data}")
|
125
137
|
|
126
|
-
|
138
|
+
# Normalize category names dynamically
|
139
|
+
processed_data = [
|
140
|
+
{**d, 'category': self._normalize_category(d.get('category'))}
|
141
|
+
for d in data
|
142
|
+
]
|
143
|
+
print(f"Data after category normalization: {processed_data}")
|
144
|
+
|
127
145
|
context.input_format = input_format
|
128
146
|
context.confidence_threshold = config.confidence_threshold
|
129
147
|
|
148
|
+
# Apply confidence filtering
|
130
149
|
if config.confidence_threshold is not None:
|
131
|
-
processed_data = filter_by_confidence(
|
150
|
+
processed_data = filter_by_confidence(processed_data, config.confidence_threshold)
|
132
151
|
self.logger.debug(f"Applied confidence filtering with threshold {config.confidence_threshold}")
|
152
|
+
print(f"Detections after confidence filtering: {processed_data}")
|
133
153
|
else:
|
134
|
-
|
135
|
-
self.logger.debug(f"Did not apply confidence filtering with threshold since nothing was provided")
|
154
|
+
self.logger.debug("Did not apply confidence filtering since no threshold provided")
|
136
155
|
|
137
|
-
#
|
156
|
+
# Apply category mapping if provided
|
138
157
|
if config.index_to_category:
|
139
158
|
processed_data = apply_category_mapping(processed_data, config.index_to_category)
|
140
159
|
self.logger.debug("Applied category mapping")
|
160
|
+
print(f"Detections after category mapping: {processed_data}")
|
141
161
|
|
162
|
+
# Apply category filtering
|
142
163
|
if config.target_categories:
|
143
164
|
processed_data = [d for d in processed_data if d.get('category') in self.target_categories]
|
144
|
-
self.logger.debug(f"Applied
|
165
|
+
self.logger.debug(f"Applied category filtering")
|
166
|
+
print(f"Detections after category filtering: {processed_data}")
|
145
167
|
|
146
168
|
# Apply bbox smoothing if enabled
|
147
169
|
if config.enable_smoothing:
|
@@ -150,28 +172,23 @@ class CropWeedDetectionUseCase(BaseProcessor):
|
|
150
172
|
smoothing_algorithm=config.smoothing_algorithm,
|
151
173
|
window_size=config.smoothing_window_size,
|
152
174
|
cooldown_frames=config.smoothing_cooldown_frames,
|
153
|
-
confidence_threshold=config.confidence_threshold,
|
175
|
+
confidence_threshold=config.confidence_threshold,
|
154
176
|
confidence_range_factor=config.smoothing_confidence_range_factor,
|
155
177
|
enable_smoothing=True
|
156
178
|
)
|
157
179
|
self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
|
158
180
|
processed_data = bbox_smoothing(processed_data, self.smoothing_tracker.config, self.smoothing_tracker)
|
159
|
-
|
160
|
-
# DEBUG:
|
161
|
-
print(f"Processed data after filtering and smoothing: {processed_data}")
|
181
|
+
print(f"Processed data after smoothing: {processed_data}")
|
162
182
|
|
163
183
|
# Advanced tracking (BYTETracker-like)
|
164
184
|
try:
|
165
185
|
from ..advanced_tracker import AdvancedTracker
|
166
186
|
from ..advanced_tracker.config import TrackerConfig
|
167
187
|
|
168
|
-
# Create tracker instance if it doesn't exist (preserves state across frames)
|
169
|
-
|
170
188
|
if self.tracker is None:
|
171
189
|
if config.confidence_threshold is not None:
|
172
190
|
tracker_config = TrackerConfig(
|
173
191
|
track_high_thresh=float(config.confidence_threshold),
|
174
|
-
# Allow even lower detections to participate in secondary association
|
175
192
|
track_low_thresh=max(0.05, float(config.confidence_threshold) / 2),
|
176
193
|
new_track_thresh=float(config.confidence_threshold)
|
177
194
|
)
|
@@ -179,20 +196,16 @@ class CropWeedDetectionUseCase(BaseProcessor):
|
|
179
196
|
tracker_config = TrackerConfig()
|
180
197
|
self.tracker = AdvancedTracker(tracker_config)
|
181
198
|
self.logger.info(
|
182
|
-
"Initialized AdvancedTracker
|
183
|
-
f"
|
184
|
-
f"low={tracker_config.track_low_thresh}, "
|
185
|
-
f"new={tracker_config.new_track_thresh}"
|
199
|
+
f"Initialized AdvancedTracker with thresholds: high={tracker_config.track_high_thresh}, "
|
200
|
+
f"low={tracker_config.track_low_thresh}, new={tracker_config.new_track_thresh}"
|
186
201
|
)
|
187
|
-
# The tracker expects the data in the same format as input
|
188
|
-
# It will add track_id and frame_id to each detection
|
189
202
|
processed_data = self.tracker.update(processed_data)
|
203
|
+
print(f"Data after tracking: {processed_data}")
|
190
204
|
|
191
205
|
except Exception as e:
|
192
|
-
# If advanced tracker fails, fallback to unsmoothed detections
|
193
206
|
self.logger.warning(f"AdvancedTracker failed: {e}")
|
194
207
|
|
195
|
-
# Update
|
208
|
+
# Update tracking state
|
196
209
|
self._update_tracking_state(processed_data)
|
197
210
|
|
198
211
|
# Update frame counter
|
@@ -204,45 +217,40 @@ class CropWeedDetectionUseCase(BaseProcessor):
|
|
204
217
|
input_settings = stream_info.get("input_settings", {})
|
205
218
|
start_frame = input_settings.get("start_frame")
|
206
219
|
end_frame = input_settings.get("end_frame")
|
207
|
-
# If start and end frame are the same, it's a single frame
|
208
220
|
if start_frame is not None and end_frame is not None and start_frame == end_frame:
|
209
221
|
frame_number = start_frame
|
210
222
|
|
211
223
|
# Compute summaries and alerts
|
212
|
-
general_counting_summary = calculate_counting_summary(data)
|
213
|
-
counting_summary = self._count_categories(processed_data, config)
|
214
|
-
|
215
|
-
total_counts =
|
216
|
-
counting_summary['total_counts'] = total_counts
|
224
|
+
general_counting_summary = calculate_counting_summary(data)
|
225
|
+
counting_summary = self._count_categories(processed_data, config)
|
226
|
+
total_counts = self.get_total_counts()
|
227
|
+
counting_summary['total_counts'] = total_counts
|
217
228
|
|
218
229
|
alerts = self._check_alerts(counting_summary, frame_number, config)
|
219
230
|
predictions = self._extract_predictions(processed_data)
|
220
231
|
|
221
|
-
#
|
232
|
+
# Generate structured outputs
|
222
233
|
incidents_list = self._generate_incidents(counting_summary, alerts, config, frame_number, stream_info)
|
223
234
|
tracking_stats_list = self._generate_tracking_stats(counting_summary, alerts, config, frame_number, stream_info)
|
224
235
|
business_analytics_list = self._generate_business_analytics(counting_summary, alerts, config, stream_info, is_empty=True)
|
225
236
|
summary_list = self._generate_summary(counting_summary, incidents_list, tracking_stats_list, business_analytics_list, alerts)
|
226
237
|
|
227
|
-
# Extract frame-based dictionaries
|
238
|
+
# Extract frame-based dictionaries
|
228
239
|
incidents = incidents_list[0] if incidents_list else {}
|
229
240
|
tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
|
230
241
|
business_analytics = business_analytics_list[0] if business_analytics_list else {}
|
231
242
|
summary = summary_list[0] if summary_list else {}
|
232
243
|
agg_summary = {str(frame_number): {
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
244
|
+
"incidents": incidents,
|
245
|
+
"tracking_stats": tracking_stats,
|
246
|
+
"business_analytics": business_analytics,
|
247
|
+
"alerts": alerts,
|
248
|
+
"human_text": summary}
|
249
|
+
}
|
250
|
+
|
241
251
|
context.mark_completed()
|
242
252
|
|
243
|
-
#
|
244
|
-
|
245
|
-
# DEBUG:
|
253
|
+
# DEBUG: Log final summary
|
246
254
|
print(f"Final aggregated summary: {agg_summary}")
|
247
255
|
|
248
256
|
result = self.create_result(
|
@@ -254,18 +262,14 @@ class CropWeedDetectionUseCase(BaseProcessor):
|
|
254
262
|
|
255
263
|
return result
|
256
264
|
|
257
|
-
def _check_alerts(self, summary: dict, frame_number:Any, config: CropWeedDetectionConfig) -> List[Dict]:
|
265
|
+
def _check_alerts(self, summary: dict, frame_number: Any, config: CropWeedDetectionConfig) -> List[Dict]:
|
258
266
|
"""
|
259
267
|
Check if any alert thresholds are exceeded and return alert dicts.
|
260
268
|
"""
|
261
269
|
def get_trend(data, lookback=900, threshold=0.6):
|
262
|
-
'''
|
263
|
-
Determine if the trend is ascending or descending based on actual value progression.
|
264
|
-
Now works with values 0,1,2,3 (not just binary).
|
265
|
-
'''
|
266
270
|
window = data[-lookback:] if len(data) >= lookback else data
|
267
271
|
if len(window) < 2:
|
268
|
-
return True
|
272
|
+
return True
|
269
273
|
increasing = 0
|
270
274
|
total = 0
|
271
275
|
for i in range(1, len(window)):
|
@@ -280,53 +284,44 @@ class CropWeedDetectionUseCase(BaseProcessor):
|
|
280
284
|
|
281
285
|
frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
282
286
|
alerts = []
|
283
|
-
total_detections = summary.get("total_count", 0)
|
284
|
-
total_counts_dict = summary.get("total_counts", {})
|
285
|
-
cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0
|
286
|
-
per_category_count = summary.get("per_category_count", {})
|
287
|
+
total_detections = summary.get("total_count", 0)
|
288
|
+
total_counts_dict = summary.get("total_counts", {})
|
289
|
+
cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0
|
290
|
+
per_category_count = summary.get("per_category_count", {})
|
287
291
|
|
288
292
|
if not config.alert_config:
|
289
293
|
return alerts
|
290
294
|
|
291
295
|
total = summary.get("total_count", 0)
|
292
|
-
#self._ascending_alert_list
|
293
296
|
if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
|
294
|
-
|
295
297
|
for category, threshold in config.alert_config.count_thresholds.items():
|
296
|
-
if category == "all" and total > threshold:
|
297
|
-
|
298
|
+
if category == "all" and total > threshold:
|
298
299
|
alerts.append({
|
299
|
-
"alert_type": getattr(config.alert_config, 'alert_type', ['Default'])
|
300
|
-
"alert_id": "alert_"+category+'_'+frame_key,
|
300
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
301
|
+
"alert_id": "alert_" + category + '_' + frame_key,
|
301
302
|
"incident_category": self.CASE_TYPE,
|
302
303
|
"threshold_level": threshold,
|
303
304
|
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
304
|
-
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default'])
|
305
|
-
|
306
|
-
}
|
305
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
|
306
|
+
getattr(config.alert_config, 'alert_value', ['JSON']))}
|
307
307
|
})
|
308
308
|
elif category in summary.get("per_category_count", {}):
|
309
309
|
count = summary.get("per_category_count", {})[category]
|
310
|
-
if count > threshold:
|
310
|
+
if count > threshold:
|
311
311
|
alerts.append({
|
312
|
-
"alert_type": getattr(config.alert_config, 'alert_type', ['Default'])
|
313
|
-
"alert_id": "alert_"+category+'_'+frame_key,
|
312
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
313
|
+
"alert_id": "alert_" + category + '_' + frame_key,
|
314
314
|
"incident_category": self.CASE_TYPE,
|
315
315
|
"threshold_level": threshold,
|
316
316
|
"ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
|
317
|
-
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default'])
|
318
|
-
|
319
|
-
}
|
317
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
|
318
|
+
getattr(config.alert_config, 'alert_value', ['JSON']))}
|
320
319
|
})
|
321
|
-
else:
|
322
|
-
pass
|
323
320
|
return alerts
|
324
321
|
|
325
322
|
def _generate_incidents(self, counting_summary: Dict, alerts: List, config: CropWeedDetectionConfig,
|
326
|
-
|
327
|
-
Dict]:
|
323
|
+
frame_number: Optional[int] = None, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
|
328
324
|
"""Generate structured incidents for the output format with frame-based keys."""
|
329
|
-
|
330
325
|
incidents = []
|
331
326
|
total_detections = counting_summary.get("total_count", 0)
|
332
327
|
current_timestamp = self._get_current_timestamp_str(stream_info)
|
@@ -335,22 +330,20 @@ class CropWeedDetectionUseCase(BaseProcessor):
|
|
335
330
|
self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
|
336
331
|
|
337
332
|
if total_detections > 0:
|
338
|
-
# Determine event level based on thresholds
|
339
333
|
level = "low"
|
340
334
|
intensity = 5.0
|
341
335
|
start_timestamp = self._get_start_timestamp_str(stream_info)
|
342
|
-
if start_timestamp and self.current_incident_end_timestamp=='N/A':
|
336
|
+
if start_timestamp and self.current_incident_end_timestamp == 'N/A':
|
343
337
|
self.current_incident_end_timestamp = 'Incident still active'
|
344
|
-
elif start_timestamp and self.current_incident_end_timestamp=='Incident still active':
|
345
|
-
if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
|
338
|
+
elif start_timestamp and self.current_incident_end_timestamp == 'Incident still active':
|
339
|
+
if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
|
346
340
|
self.current_incident_end_timestamp = current_timestamp
|
347
|
-
elif self.current_incident_end_timestamp!='Incident still active' and self.current_incident_end_timestamp!='N/A':
|
341
|
+
elif self.current_incident_end_timestamp != 'Incident still active' and self.current_incident_end_timestamp != 'N/A':
|
348
342
|
self.current_incident_end_timestamp = 'N/A'
|
349
343
|
|
350
344
|
if config.alert_config and config.alert_config.count_thresholds:
|
351
345
|
threshold = config.alert_config.count_thresholds.get("all", 15)
|
352
346
|
intensity = min(10.0, (total_detections / threshold) * 10)
|
353
|
-
|
354
347
|
if intensity >= 9:
|
355
348
|
level = "critical"
|
356
349
|
self._ascending_alert_list.append(3)
|
@@ -381,29 +374,34 @@ class CropWeedDetectionUseCase(BaseProcessor):
|
|
381
374
|
intensity = min(10.0, total_detections / 3.0)
|
382
375
|
self._ascending_alert_list.append(0)
|
383
376
|
|
384
|
-
# Generate human text in new format
|
385
377
|
human_text_lines = [f"INCIDENTS DETECTED @ {current_timestamp}:"]
|
386
|
-
human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE,level)}")
|
378
|
+
human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE, level)}")
|
387
379
|
human_text = "\n".join(human_text_lines)
|
388
380
|
|
389
|
-
alert_settings=[]
|
381
|
+
alert_settings = []
|
390
382
|
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
391
383
|
alert_settings.append({
|
392
|
-
"alert_type": getattr(config.alert_config, 'alert_type', ['Default'])
|
384
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
393
385
|
"incident_category": self.CASE_TYPE,
|
394
386
|
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
395
387
|
"ascending": True,
|
396
|
-
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default'])
|
397
|
-
|
398
|
-
}
|
388
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
|
389
|
+
getattr(config.alert_config, 'alert_value', ['JSON']))}
|
399
390
|
})
|
400
391
|
|
401
|
-
event= self.create_incident(
|
402
|
-
|
403
|
-
|
404
|
-
|
392
|
+
event = self.create_incident(
|
393
|
+
incident_id=self.CASE_TYPE + '_' + str(frame_number),
|
394
|
+
incident_type=self.CASE_TYPE,
|
395
|
+
severity_level=level,
|
396
|
+
human_text=human_text,
|
397
|
+
camera_info=camera_info,
|
398
|
+
alerts=alerts,
|
399
|
+
alert_settings=alert_settings,
|
400
|
+
start_time=start_timestamp,
|
401
|
+
end_time=self.current_incident_end_timestamp,
|
402
|
+
level_settings={"low": 1, "medium": 3, "significant": 4, "critical": 7}
|
403
|
+
)
|
405
404
|
incidents.append(event)
|
406
|
-
|
407
405
|
else:
|
408
406
|
self._ascending_alert_list.append(0)
|
409
407
|
incidents.append({})
|
@@ -420,80 +418,50 @@ class CropWeedDetectionUseCase(BaseProcessor):
|
|
420
418
|
) -> List[Dict]:
|
421
419
|
"""Generate structured tracking stats matching eg.json format."""
|
422
420
|
camera_info = self.get_camera_info_from_stream(stream_info)
|
423
|
-
|
424
|
-
# frame_key = str(frame_number) if frame_number is not None else "current_frame"
|
425
|
-
# tracking_stats = [{frame_key: []}]
|
426
|
-
# frame_tracking_stats = tracking_stats[0][frame_key]
|
427
421
|
tracking_stats = []
|
428
422
|
|
429
|
-
total_detections = counting_summary.get("total_count", 0)
|
430
|
-
total_counts_dict = counting_summary.get("total_counts", {})
|
431
|
-
cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0
|
432
|
-
per_category_count = counting_summary.get("per_category_count", {})
|
423
|
+
total_detections = counting_summary.get("total_count", 0)
|
424
|
+
total_counts_dict = counting_summary.get("total_counts", {})
|
425
|
+
cumulative_total = sum(total_counts_dict.values()) if total_counts_dict else 0
|
426
|
+
per_category_count = counting_summary.get("per_category_count", {})
|
433
427
|
|
434
428
|
current_timestamp = self._get_current_timestamp_str(stream_info, precision=False)
|
435
429
|
start_timestamp = self._get_start_timestamp_str(stream_info, precision=False)
|
436
|
-
|
437
|
-
# Create high precision timestamps for input_timestamp and reset_timestamp
|
438
430
|
high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True)
|
439
431
|
high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
|
440
432
|
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
if count > 0:
|
446
|
-
total_counts.append({
|
447
|
-
"category": cat,
|
448
|
-
"count": count
|
449
|
-
})
|
450
|
-
|
451
|
-
# Build current_counts array in expected format
|
452
|
-
current_counts = []
|
453
|
-
for cat, count in per_category_count.items():
|
454
|
-
if count > 0 or total_detections > 0: # Include even if 0 when there are detections
|
455
|
-
current_counts.append({
|
456
|
-
"category": cat,
|
457
|
-
"count": count
|
458
|
-
})
|
459
|
-
|
460
|
-
# Prepare detections without confidence scores (as per eg.json)
|
461
|
-
detections = []
|
433
|
+
total_counts = [
|
434
|
+
{"category": cat, "count": count}
|
435
|
+
for cat, count in total_counts_dict.items() if count > 0
|
436
|
+
]
|
462
437
|
|
463
|
-
|
464
|
-
|
438
|
+
current_counts = [
|
439
|
+
{"category": cat, "count": count}
|
440
|
+
for cat, count in per_category_count.items() if count > 0 or total_detections > 0
|
441
|
+
]
|
465
442
|
|
466
|
-
|
467
|
-
|
443
|
+
detections = [
|
444
|
+
{
|
468
445
|
"category": detection.get("category"),
|
469
|
-
"bounding_box": detection.get("bounding_box", {})
|
446
|
+
"bounding_box": detection.get("bounding_box", {}),
|
447
|
+
"masks": detection.get("masks", []) if detection.get("masks") else None,
|
448
|
+
"segmentation": detection.get("segmentation") if detection.get("segmentation") else None,
|
449
|
+
"mask": detection.get("mask") if detection.get("mask") else None
|
470
450
|
}
|
471
|
-
|
472
|
-
|
473
|
-
|
474
|
-
if detection.get("segmentation"):
|
475
|
-
detection_data["segmentation"] = detection.get("segmentation")
|
476
|
-
if detection.get("mask"):
|
477
|
-
detection_data["mask"] = detection.get("mask")
|
478
|
-
detections.append(detection_data)
|
479
|
-
|
480
|
-
# Build alert_settings array in expected format
|
451
|
+
for detection in counting_summary.get("detections", [])
|
452
|
+
]
|
453
|
+
|
481
454
|
alert_settings = []
|
482
455
|
if config.alert_config and hasattr(config.alert_config, 'alert_type'):
|
483
456
|
alert_settings.append({
|
484
|
-
"alert_type": getattr(config.alert_config, 'alert_type', ['Default'])
|
457
|
+
"alert_type": getattr(config.alert_config, 'alert_type', ['Default']),
|
485
458
|
"incident_category": self.CASE_TYPE,
|
486
459
|
"threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
|
487
460
|
"ascending": True,
|
488
|
-
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default'])
|
489
|
-
|
490
|
-
}
|
461
|
+
"settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']),
|
462
|
+
getattr(config.alert_config, 'alert_value', ['JSON']))}
|
491
463
|
})
|
492
464
|
|
493
|
-
# DEBUG
|
494
|
-
print(f"Detections for tracking stats: {detections}")
|
495
|
-
|
496
|
-
# Generate human_text in expected format
|
497
465
|
human_text_lines = [f"Tracking Statistics:"]
|
498
466
|
human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}")
|
499
467
|
|
@@ -512,40 +480,38 @@ class CropWeedDetectionUseCase(BaseProcessor):
|
|
512
480
|
human_text_lines.append("Alerts: None")
|
513
481
|
|
514
482
|
human_text = "\n".join(human_text_lines)
|
515
|
-
reset_settings=[
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
|
520
|
-
|
521
|
-
}
|
522
|
-
}
|
523
|
-
]
|
524
|
-
|
525
|
-
tracking_stat=self.create_tracking_stats(total_counts=total_counts, current_counts=current_counts,
|
526
|
-
detections=detections, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
|
527
|
-
reset_settings=reset_settings, start_time=high_precision_start_timestamp ,
|
528
|
-
reset_time=high_precision_reset_timestamp)
|
483
|
+
reset_settings = [
|
484
|
+
{
|
485
|
+
"interval_type": "daily",
|
486
|
+
"reset_time": {"value": 9, "time_unit": "hour"}
|
487
|
+
}
|
488
|
+
]
|
529
489
|
|
530
|
-
|
531
|
-
|
490
|
+
tracking_stat = self.create_tracking_stats(
|
491
|
+
total_counts=total_counts,
|
492
|
+
current_counts=current_counts,
|
493
|
+
detections=detections,
|
494
|
+
human_text=human_text,
|
495
|
+
camera_info=camera_info,
|
496
|
+
alerts=alerts,
|
497
|
+
alert_settings=alert_settings,
|
498
|
+
reset_settings=reset_settings,
|
499
|
+
start_time=high_precision_start_timestamp,
|
500
|
+
reset_time=high_precision_reset_timestamp
|
501
|
+
)
|
532
502
|
|
533
503
|
tracking_stats.append(tracking_stat)
|
534
504
|
return tracking_stats
|
535
|
-
|
536
|
-
def _generate_business_analytics(self, counting_summary: Dict, alerts:Any, config: CropWeedDetectionConfig,
|
505
|
+
|
506
|
+
def _generate_business_analytics(self, counting_summary: Dict, alerts: Any, config: CropWeedDetectionConfig,
|
507
|
+
stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
|
537
508
|
"""Generate standardized business analytics for the agg_summary structure."""
|
538
509
|
if is_empty:
|
539
510
|
return []
|
511
|
+
# Implement if needed
|
512
|
+
return []
|
540
513
|
|
541
|
-
|
542
|
-
#camera_info = self.get_camera_info_from_stream(stream_info)
|
543
|
-
# business_analytics = self.create_business_analytics(nalysis_name, statistics,
|
544
|
-
# human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
|
545
|
-
# reset_settings)
|
546
|
-
# return business_analytics
|
547
|
-
|
548
|
-
def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
|
514
|
+
def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[dict]:
|
549
515
|
"""
|
550
516
|
Generate a human_text string for the tracking_stat, incident, business analytics and alerts.
|
551
517
|
"""
|
@@ -553,11 +519,11 @@ class CropWeedDetectionUseCase(BaseProcessor):
|
|
553
519
|
lines["Application Name"] = self.CASE_TYPE
|
554
520
|
lines["Application Version"] = self.CASE_VERSION
|
555
521
|
if len(incidents) > 0:
|
556
|
-
lines["Incidents:"]=f"\n\t{incidents[0].get('human_text', 'No incidents detected')}\n"
|
522
|
+
lines["Incidents:"] = f"\n\t{incidents[0].get('human_text', 'No incidents detected')}\n"
|
557
523
|
if len(tracking_stats) > 0:
|
558
|
-
lines["Tracking Statistics:"]=f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}\n"
|
524
|
+
lines["Tracking Statistics:"] = f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}\n"
|
559
525
|
if len(business_analytics) > 0:
|
560
|
-
lines["Business Analytics:"]=f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}\n"
|
526
|
+
lines["Business Analytics:"] = f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}\n"
|
561
527
|
|
562
528
|
if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
|
563
529
|
lines["Summary"] = "No Summary Data"
|
@@ -568,13 +534,11 @@ class CropWeedDetectionUseCase(BaseProcessor):
|
|
568
534
|
"""
|
569
535
|
Get detailed information about track IDs (per frame).
|
570
536
|
"""
|
571
|
-
# Collect all track_ids in this frame
|
572
537
|
frame_track_ids = set()
|
573
538
|
for det in detections:
|
574
539
|
tid = det.get('track_id')
|
575
540
|
if tid is not None:
|
576
541
|
frame_track_ids.add(tid)
|
577
|
-
# Use persistent total set for unique counting
|
578
542
|
total_track_ids = set()
|
579
543
|
for s in getattr(self, '_per_category_total_track_ids', {}).values():
|
580
544
|
total_track_ids.update(s)
|
@@ -590,10 +554,7 @@ class CropWeedDetectionUseCase(BaseProcessor):
|
|
590
554
|
def _update_tracking_state(self, detections: list):
|
591
555
|
"""
|
592
556
|
Track unique categories track_ids per category for total count after tracking.
|
593
|
-
Applies canonical ID merging to avoid duplicate counting when the underlying
|
594
|
-
tracker loses an object temporarily and assigns a new ID.
|
595
557
|
"""
|
596
|
-
# Lazily initialise storage dicts
|
597
558
|
if not hasattr(self, "_per_category_total_track_ids"):
|
598
559
|
self._per_category_total_track_ids = {cat: set() for cat in self.target_categories}
|
599
560
|
self._current_frame_track_ids = {cat: set() for cat in self.target_categories}
|
@@ -605,9 +566,7 @@ class CropWeedDetectionUseCase(BaseProcessor):
|
|
605
566
|
continue
|
606
567
|
bbox = det.get("bounding_box", det.get("bbox"))
|
607
568
|
canonical_id = self._merge_or_register_track(raw_track_id, bbox)
|
608
|
-
# Propagate canonical ID back to detection so downstream logic uses it
|
609
569
|
det["track_id"] = canonical_id
|
610
|
-
|
611
570
|
self._per_category_total_track_ids.setdefault(cat, set()).add(canonical_id)
|
612
571
|
self._current_frame_track_ids[cat].add(canonical_id)
|
613
572
|
|
@@ -617,7 +576,6 @@ class CropWeedDetectionUseCase(BaseProcessor):
|
|
617
576
|
"""
|
618
577
|
return {cat: len(ids) for cat, ids in getattr(self, '_per_category_total_track_ids', {}).items()}
|
619
578
|
|
620
|
-
|
621
579
|
def _format_timestamp_for_stream(self, timestamp: float) -> str:
|
622
580
|
"""Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
|
623
581
|
dt = datetime.fromtimestamp(timestamp, tz=timezone.utc)
|
@@ -627,45 +585,40 @@ class CropWeedDetectionUseCase(BaseProcessor):
|
|
627
585
|
"""Format timestamp for video chunks (HH:MM:SS.ms format)."""
|
628
586
|
hours = int(timestamp // 3600)
|
629
587
|
minutes = int((timestamp % 3600) // 60)
|
630
|
-
seconds = round(float(timestamp % 60),2)
|
588
|
+
seconds = round(float(timestamp % 60), 2)
|
631
589
|
return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
|
632
590
|
|
633
591
|
def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: Optional[str]=None) -> str:
|
634
592
|
"""Get formatted current timestamp based on stream type."""
|
635
593
|
if not stream_info:
|
636
594
|
return "00:00:00.00"
|
637
|
-
# is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
|
638
595
|
if precision:
|
639
596
|
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
640
597
|
if frame_id:
|
641
|
-
start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
598
|
+
start_time = int(frame_id) / stream_info.get("input_settings", {}).get("original_fps", 30)
|
642
599
|
else:
|
643
|
-
start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
|
600
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30) / stream_info.get("input_settings", {}).get("original_fps", 30)
|
644
601
|
stream_time_str = self._format_timestamp_for_video(start_time)
|
645
602
|
return stream_time_str
|
646
603
|
else:
|
647
604
|
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
648
605
|
|
649
606
|
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
650
|
-
|
651
|
-
|
652
|
-
|
653
|
-
|
654
|
-
|
655
|
-
|
607
|
+
if frame_id:
|
608
|
+
start_time = int(frame_id) / stream_info.get("input_settings", {}).get("original_fps", 30)
|
609
|
+
else:
|
610
|
+
start_time = stream_info.get("input_settings", {}).get("start_frame", 30) / stream_info.get("input_settings", {}).get("original_fps", 30)
|
611
|
+
stream_time_str = self._format_timestamp_for_video(start_time)
|
612
|
+
return stream_time_str
|
656
613
|
else:
|
657
|
-
# For streams, use stream_time from stream_info
|
658
614
|
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
659
615
|
if stream_time_str:
|
660
|
-
# Parse the high precision timestamp string to get timestamp
|
661
616
|
try:
|
662
|
-
# Remove " UTC" suffix and parse
|
663
617
|
timestamp_str = stream_time_str.replace(" UTC", "")
|
664
618
|
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
665
619
|
timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
|
666
620
|
return self._format_timestamp_for_stream(timestamp)
|
667
621
|
except:
|
668
|
-
# Fallback to current time if parsing fails
|
669
622
|
return self._format_timestamp_for_stream(time.time())
|
670
623
|
else:
|
671
624
|
return self._format_timestamp_for_stream(time.time())
|
@@ -681,41 +634,32 @@ class CropWeedDetectionUseCase(BaseProcessor):
|
|
681
634
|
return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
|
682
635
|
|
683
636
|
if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
|
684
|
-
# If video format, start from 00:00:00
|
685
637
|
return "00:00:00"
|
686
638
|
else:
|
687
|
-
# For streams, use tracking start time or current time with minutes/seconds reset
|
688
639
|
if self._tracking_start_time is None:
|
689
|
-
# Try to extract timestamp from stream_time string
|
690
640
|
stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
|
691
641
|
if stream_time_str:
|
692
642
|
try:
|
693
|
-
# Remove " UTC" suffix and parse
|
694
643
|
timestamp_str = stream_time_str.replace(" UTC", "")
|
695
644
|
dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
|
696
645
|
self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
|
697
646
|
except:
|
698
|
-
# Fallback to current time if parsing fails
|
699
647
|
self._tracking_start_time = time.time()
|
700
648
|
else:
|
701
649
|
self._tracking_start_time = time.time()
|
702
650
|
|
703
651
|
dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
|
704
|
-
# Reset minutes and seconds to 00:00 for "TOTAL SINCE" format
|
705
652
|
dt = dt.replace(minute=0, second=0, microsecond=0)
|
706
653
|
return dt.strftime('%Y:%m:%d %H:%M:%S')
|
707
654
|
|
708
655
|
def _count_categories(self, detections: list, config: CropWeedDetectionConfig) -> dict:
|
709
656
|
"""
|
710
657
|
Count the number of detections per category and return a summary dict.
|
711
|
-
The detections list is expected to have 'track_id' (from tracker), 'category', 'bounding_box', etc.
|
712
|
-
Output structure will include 'track_id' for each detection as per AdvancedTracker output.
|
713
658
|
"""
|
714
659
|
counts = {}
|
715
660
|
for det in detections:
|
716
661
|
cat = det.get('category', 'unknown')
|
717
662
|
counts[cat] = counts.get(cat, 0) + 1
|
718
|
-
# Each detection dict will now include 'track_id' (and possibly 'frame_id')
|
719
663
|
return {
|
720
664
|
"total_count": sum(counts.values()),
|
721
665
|
"per_category_count": counts,
|
@@ -731,7 +675,6 @@ class CropWeedDetectionUseCase(BaseProcessor):
|
|
731
675
|
]
|
732
676
|
}
|
733
677
|
|
734
|
-
|
735
678
|
def _extract_predictions(self, detections: list) -> List[Dict[str, Any]]:
|
736
679
|
"""
|
737
680
|
Extract prediction details for output (category, confidence, bounding box).
|
@@ -745,14 +688,8 @@ class CropWeedDetectionUseCase(BaseProcessor):
|
|
745
688
|
for det in detections
|
746
689
|
]
|
747
690
|
|
748
|
-
# ------------------------------------------------------------------ #
|
749
|
-
# Canonical ID helpers #
|
750
|
-
# ------------------------------------------------------------------ #
|
751
691
|
def _compute_iou(self, box1: Any, box2: Any) -> float:
|
752
|
-
"""Compute IoU between two bounding boxes which may be dicts or lists.
|
753
|
-
Falls back to 0 when insufficient data is available."""
|
754
|
-
|
755
|
-
# Helper to convert bbox (dict or list) to [x1, y1, x2, y2]
|
692
|
+
"""Compute IoU between two bounding boxes which may be dicts or lists."""
|
756
693
|
def _bbox_to_list(bbox):
|
757
694
|
if bbox is None:
|
758
695
|
return []
|
@@ -763,7 +700,6 @@ class CropWeedDetectionUseCase(BaseProcessor):
|
|
763
700
|
return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
|
764
701
|
if "x1" in bbox:
|
765
702
|
return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
|
766
|
-
# Fallback: first four numeric values
|
767
703
|
values = [v for v in bbox.values() if isinstance(v, (int, float))]
|
768
704
|
return values[:4] if len(values) >= 4 else []
|
769
705
|
return []
|
@@ -775,7 +711,6 @@ class CropWeedDetectionUseCase(BaseProcessor):
|
|
775
711
|
x1_min, y1_min, x1_max, y1_max = l1
|
776
712
|
x2_min, y2_min, x2_max, y2_max = l2
|
777
713
|
|
778
|
-
# Ensure correct order
|
779
714
|
x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
|
780
715
|
y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
|
781
716
|
x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
|
@@ -797,16 +732,11 @@ class CropWeedDetectionUseCase(BaseProcessor):
|
|
797
732
|
return (inter_area / union_area) if union_area > 0 else 0.0
|
798
733
|
|
799
734
|
def _merge_or_register_track(self, raw_id: Any, bbox: Any) -> Any:
|
800
|
-
"""Return a stable canonical ID for a raw tracker ID
|
801
|
-
tracks when IoU and temporal constraints indicate they represent the
|
802
|
-
same physical."""
|
735
|
+
"""Return a stable canonical ID for a raw tracker ID."""
|
803
736
|
if raw_id is None or bbox is None:
|
804
|
-
# Nothing to merge
|
805
737
|
return raw_id
|
806
738
|
|
807
739
|
now = time.time()
|
808
|
-
|
809
|
-
# Fast path – raw_id already mapped
|
810
740
|
if raw_id in self._track_aliases:
|
811
741
|
canonical_id = self._track_aliases[raw_id]
|
812
742
|
track_info = self._canonical_tracks.get(canonical_id)
|
@@ -816,21 +746,17 @@ class CropWeedDetectionUseCase(BaseProcessor):
|
|
816
746
|
track_info["raw_ids"].add(raw_id)
|
817
747
|
return canonical_id
|
818
748
|
|
819
|
-
# Attempt to merge with an existing canonical track
|
820
749
|
for canonical_id, info in self._canonical_tracks.items():
|
821
|
-
# Only consider recently updated tracks
|
822
750
|
if now - info["last_update"] > self._track_merge_time_window:
|
823
751
|
continue
|
824
752
|
iou = self._compute_iou(bbox, info["last_bbox"])
|
825
753
|
if iou >= self._track_merge_iou_threshold:
|
826
|
-
# Merge
|
827
754
|
self._track_aliases[raw_id] = canonical_id
|
828
755
|
info["last_bbox"] = bbox
|
829
756
|
info["last_update"] = now
|
830
757
|
info["raw_ids"].add(raw_id)
|
831
758
|
return canonical_id
|
832
759
|
|
833
|
-
# No match – register new canonical track
|
834
760
|
canonical_id = raw_id
|
835
761
|
self._track_aliases[raw_id] = canonical_id
|
836
762
|
self._canonical_tracks[canonical_id] = {
|
@@ -173,7 +173,7 @@ matrice/deploy/utils/post_processing/usecases/child_monitoring.py,sha256=z3oymoq
|
|
173
173
|
matrice/deploy/utils/post_processing/usecases/color_detection.py,sha256=Z8-akjy8a7f8YyiOzXu_Zi1Km30v-TRrymDqQOPpJ_8,43277
|
174
174
|
matrice/deploy/utils/post_processing/usecases/color_map_utils.py,sha256=SP-AEVcjLmL8rxblu-ixqUJC2fqlcr7ab4hWo4Fcr_k,2677
|
175
175
|
matrice/deploy/utils/post_processing/usecases/concrete_crack_detection.py,sha256=pxhOH_hG4hq9yytNepbGMdk2W_lTG8D1_2RAagaPBkg,40252
|
176
|
-
matrice/deploy/utils/post_processing/usecases/crop_weed_detection.py,sha256=
|
176
|
+
matrice/deploy/utils/post_processing/usecases/crop_weed_detection.py,sha256=uATIX4ox7IfIkwhTXr8QGJHq45bJU7X33WKqewK1eJA,35680
|
177
177
|
matrice/deploy/utils/post_processing/usecases/customer_service.py,sha256=UWS83qxguyAyhh8a0JF5QH9DtKxO8I-gI2BPOjLPxBw,44642
|
178
178
|
matrice/deploy/utils/post_processing/usecases/defect_detection_products.py,sha256=blvo4wmak-wlvPSZOcmRsV1FoZSeGX_dUAX5A1WheBE,45949
|
179
179
|
matrice/deploy/utils/post_processing/usecases/distracted_driver_detection.py,sha256=rkyYHbmcYUAfKbmmKyKxHlk47vJ_fogHWKhQjrERsok,40316
|
@@ -244,8 +244,8 @@ matrice/deployment/camera_manager.py,sha256=e1Lc81RJP5wUWRdTgHO6tMWF9BkBdHOSVyx3
|
|
244
244
|
matrice/deployment/deployment.py,sha256=HFt151eWq6iqIAMsQvurpV2WNxW6Cx_gIUVfnVy5SWE,48093
|
245
245
|
matrice/deployment/inference_pipeline.py,sha256=6b4Mm3-qt-Zy0BeiJfFQdImOn3FzdNCY-7ET7Rp8PMk,37911
|
246
246
|
matrice/deployment/streaming_gateway_manager.py,sha256=ifYGl3g25wyU39HwhPQyI2OgF3M6oIqKMWt8RXtMxY8,21401
|
247
|
-
matrice-1.0.
|
248
|
-
matrice-1.0.
|
249
|
-
matrice-1.0.
|
250
|
-
matrice-1.0.
|
251
|
-
matrice-1.0.
|
247
|
+
matrice-1.0.99287.dist-info/licenses/LICENSE.txt,sha256=2bm9uFabQZ3Ykb_SaSU_uUbAj2-htc6WJQmS_65qD00,1073
|
248
|
+
matrice-1.0.99287.dist-info/METADATA,sha256=U7NyO0Ia_YH0saRsGMknFF5DmQ6xJzy50kI7d69joFg,14624
|
249
|
+
matrice-1.0.99287.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
250
|
+
matrice-1.0.99287.dist-info/top_level.txt,sha256=P97js8ur6o5ClRqMH3Cjoab_NqbJ6sOQ3rJmVzKBvMc,8
|
251
|
+
matrice-1.0.99287.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|