matrice 1.0.99218__py3-none-any.whl → 1.0.99220__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,1984 +0,0 @@
1
- """
2
- Human Activity Recognition use case implementation.
3
-
4
- This module provides a clean implementation of Human Activity functionality
5
- with zone-based analysis, tracking, and alerting capabilities.
6
- """
7
-
8
- from typing import Any, Dict, List, Optional, Set
9
- from dataclasses import asdict, field
10
- import time
11
- from datetime import datetime, timezone
12
-
13
- from ..core.base import BaseProcessor, ProcessingContext, ProcessingResult, ConfigProtocol, ResultFormat
14
- from ..core.config import HumanActivityConfig, ZoneConfig, AlertConfig
15
- from ..utils import (
16
- filter_by_confidence,
17
- filter_by_categories,
18
- apply_category_mapping,
19
- count_objects_by_category,
20
- count_objects_in_zones,
21
- calculate_counting_summary,
22
- match_results_structure,
23
- bbox_smoothing,
24
- BBoxSmoothingConfig,
25
- BBoxSmoothingTracker,
26
- calculate_iou
27
- )
28
- from ..utils.geometry_utils import get_bbox_center, point_in_polygon, get_bbox_bottom25_center
29
-
30
-
31
- class HumanActivityUseCase(BaseProcessor):
32
- """Human Activity Recognition use case with zone analysis and alerting."""
33
-
34
- def __init__(self):
35
- """Initialize Human Activity Recognition use case."""
36
- super().__init__("human_activity_recognition")
37
- self.category = "general"
38
- self.CASE_TYPE: Optional[str] = 'Human_Activity_Recognition'
39
- self.CASE_VERSION: Optional[str] = '1.3'
40
-
41
- # Track ID storage for total count calculation
42
- self._total_track_ids = set() # Store all unique track IDs seen across calls
43
- self._current_frame_track_ids = set() # Store track IDs from current frame
44
- self._total_count = 0 # Cached total count
45
- self._last_update_time = time.time() # Track when last updated
46
-
47
- # Zone-based tracking storage
48
- self._zone_current_track_ids = {} # zone_name -> set of current track IDs in zone
49
- self._zone_total_track_ids = {} # zone_name -> set of all track IDs that have been in zone
50
- self._zone_current_counts = {} # zone_name -> current count in zone
51
- self._zone_total_counts = {} # zone_name -> total count that have been in zone
52
-
53
- # Frame counter for tracking total frames processed
54
- self._total_frame_counter = 0 # Total frames processed across all calls
55
-
56
- # Global frame offset for video chunk processing
57
- self._global_frame_offset = 0 # Offset to add to local frame IDs for global frame numbering
58
- self._frames_in_current_chunk = 0 # Number of frames in current chunk
59
-
60
- # Initialize smoothing tracker
61
- self.smoothing_tracker = None
62
-
63
- # Track start time for "TOTAL SINCE" calculation
64
- self._tracking_start_time = None
65
-
66
- # --------------------------------------------------------------------- #
67
- # Tracking aliasing structures to merge fragmented IDs #
68
- # --------------------------------------------------------------------- #
69
- # Maps raw tracker IDs generated by ByteTrack to a stable canonical ID
70
- # that represents a real-world activity. This helps avoid double counting
71
- # when the tracker loses a target temporarily and assigns a new ID.
72
- self._track_aliases: Dict[Any, Any] = {}
73
-
74
- # Stores metadata about each canonical track such as its last seen
75
- # bounding box, last update timestamp and all raw IDs that have been
76
- # merged into it.
77
- self._canonical_tracks: Dict[Any, Dict[str, Any]] = {}
78
-
79
- # IoU threshold above which two bounding boxes are considered to belong
80
- # to the same activity (empirically chosen; adjust in production if
81
- # needed).
82
- self._track_merge_iou_threshold: float = 0.04
83
-
84
- # Only merge with canonical tracks that were updated within this time
85
- # window (in seconds). This prevents accidentally merging tracks that
86
- # left the scene long ago.
87
- self._track_merge_time_window: float = 10.0
88
-
89
- self._ascending_alert_list: List[int] = []
90
- self.current_incident_end_timestamp: str = "N/A"
91
- self.index_to_category={
92
- 0: "abseiling",
93
- 1: "air drumming",
94
- 2: "answering questions",
95
- 3: "applauding",
96
- 4: "applying cream",
97
- 5: "archery",
98
- 6: "arm wrestling",
99
- 7: "arranging flowers",
100
- 8: "assembling computer",
101
- 9: "auctioning",
102
- 10: "baby waking up",
103
- 11: "baking cookies",
104
- 12: "balloon blowing",
105
- 13: "bandaging",
106
- 14: "barbequing",
107
- 15: "bartending",
108
- 16: "beatboxing",
109
- 17: "bee keeping",
110
- 18: "belly dancing",
111
- 19: "bench pressing",
112
- 20: "bending back",
113
- 21: "bending metal",
114
- 22: "biking through snow",
115
- 23: "blasting sand",
116
- 24: "blowing glass",
117
- 25: "blowing leaves",
118
- 26: "blowing nose",
119
- 27: "blowing out candles",
120
- 28: "bobsledding",
121
- 29: "bookbinding",
122
- 30: "bouncing on trampoline",
123
- 31: "bowling",
124
- 32: "braiding hair",
125
- 33: "breading or breadcrumbing",
126
- 34: "breakdancing",
127
- 35: "brush painting",
128
- 36: "brushing hair",
129
- 37: "brushing teeth",
130
- 38: "building cabinet",
131
- 39: "building shed",
132
- 40: "bungee jumping",
133
- 41: "busking",
134
- 42: "canoeing or kayaking",
135
- 43: "capoeira",
136
- 44: "carrying baby",
137
- 45: "cartwheeling",
138
- 46: "carving pumpkin",
139
- 47: "catching fish",
140
- 48: "catching or throwing baseball",
141
- 49: "catching or throwing frisbee",
142
- 50: "catching or throwing softball",
143
- 51: "celebrating",
144
- 52: "changing oil",
145
- 53: "changing wheel",
146
- 54: "checking tires",
147
- 55: "cheerleading",
148
- 56: "chopping wood",
149
- 57: "clapping",
150
- 58: "clay pottery making",
151
- 59: "clean and jerk",
152
- 60: "cleaning floor",
153
- 61: "cleaning gutters",
154
- 62: "cleaning pool",
155
- 63: "cleaning shoes",
156
- 64: "cleaning toilet",
157
- 65: "cleaning windows",
158
- 66: "climbing a rope",
159
- 67: "climbing ladder",
160
- 68: "climbing tree",
161
- 69: "contact juggling",
162
- 70: "cooking chicken",
163
- 71: "cooking egg",
164
- 72: "cooking on campfire",
165
- 73: "cooking sausages",
166
- 74: "counting money",
167
- 75: "country line dancing",
168
- 76: "cracking neck",
169
- 77: "crawling baby",
170
- 78: "crossing river",
171
- 79: "crying",
172
- 80: "curling hair",
173
- 81: "cutting nails",
174
- 82: "cutting pineapple",
175
- 83: "cutting watermelon",
176
- 84: "dancing ballet",
177
- 85: "dancing charleston",
178
- 86: "dancing gangnam style",
179
- 87: "dancing macarena",
180
- 88: "deadlifting",
181
- 89: "decorating the christmas tree",
182
- 90: "digging",
183
- 91: "dining",
184
- 92: "disc golfing",
185
- 93: "diving cliff",
186
- 94: "dodgeball",
187
- 95: "doing aerobics",
188
- 96: "doing laundry",
189
- 97: "doing nails",
190
- 98: "drawing",
191
- 99: "dribbling basketball",
192
- 100: "drinking",
193
- 101: "drinking beer",
194
- 102: "drinking shots",
195
- 103: "driving car",
196
- 104: "driving tractor",
197
- 105: "drop kicking",
198
- 106: "drumming fingers",
199
- 107: "dunking basketball",
200
- 108: "dying hair",
201
- 109: "eating burger",
202
- 110: "eating cake",
203
- 111: "eating carrots",
204
- 112: "eating chips",
205
- 113: "eating doughnuts",
206
- 114: "eating hotdog",
207
- 115: "eating ice cream",
208
- 116: "eating spaghetti",
209
- 117: "eating watermelon",
210
- 118: "egg hunting",
211
- 119: "exercising arm",
212
- 120: "exercising with an exercise ball",
213
- 121: "extinguishing fire",
214
- 122: "faceplanting",
215
- 123: "feeding birds",
216
- 124: "feeding fish",
217
- 125: "feeding goats",
218
- 126: "filling eyebrows",
219
- 127: "finger snapping",
220
- 128: "fixing hair",
221
- 129: "flipping pancake",
222
- 130: "flying kite",
223
- 131: "folding clothes",
224
- 132: "folding napkins",
225
- 133: "folding paper",
226
- 134: "front raises",
227
- 135: "frying vegetables",
228
- 136: "garbage collecting",
229
- 137: "gargling",
230
- 138: "getting a haircut",
231
- 139: "getting a tattoo",
232
- 140: "giving or receiving award",
233
- 141: "golf chipping",
234
- 142: "golf driving",
235
- 143: "golf putting",
236
- 144: "grinding meat",
237
- 145: "grooming dog",
238
- 146: "grooming horse",
239
- 147: "gymnastics tumbling",
240
- 148: "hammer throw",
241
- 149: "headbanging",
242
- 150: "headbutting",
243
- 151: "high jump",
244
- 152: "high kick",
245
- 153: "hitting baseball",
246
- 154: "hockey stop",
247
- 155: "holding snake",
248
- 156: "hopscotch",
249
- 157: "hoverboarding",
250
- 158: "hugging",
251
- 159: "hula hooping",
252
- 160: "hurdling",
253
- 161: "hurling (sport)",
254
- 162: "ice climbing",
255
- 163: "ice fishing",
256
- 164: "ice skating",
257
- 165: "ironing",
258
- 166: "javelin throw",
259
- 167: "jetskiing",
260
- 168: "jogging",
261
- 169: "juggling balls",
262
- 170: "juggling fire",
263
- 171: "juggling soccer ball",
264
- 172: "jumping into pool",
265
- 173: "jumpstyle dancing",
266
- 174: "kicking field goal",
267
- 175: "kicking soccer ball",
268
- 176: "kissing",
269
- 177: "kitesurfing",
270
- 178: "knitting",
271
- 179: "krumping",
272
- 180: "laughing",
273
- 181: "laying bricks",
274
- 182: "long jump",
275
- 183: "lunge",
276
- 184: "making a cake",
277
- 185: "making a sandwich",
278
- 186: "making bed",
279
- 187: "making jewelry",
280
- 188: "making pizza",
281
- 189: "making snowman",
282
- 190: "making sushi",
283
- 191: "making tea",
284
- 192: "marching",
285
- 193: "massaging back",
286
- 194: "massaging feet",
287
- 195: "massaging legs",
288
- 196: "massaging person's head",
289
- 197: "milking cow",
290
- 198: "mopping floor",
291
- 199: "motorcycling",
292
- 200: "moving furniture",
293
- 201: "mowing lawn",
294
- 202: "news anchoring",
295
- 203: "opening bottle",
296
- 204: "opening present",
297
- 205: "paragliding",
298
- 206: "parasailing",
299
- 207: "parkour",
300
- 208: "passing American football (in game)",
301
- 209: "passing American football (not in game)",
302
- 210: "peeling apples",
303
- 211: "peeling potatoes",
304
- 212: "petting animal (not cat)",
305
- 213: "petting cat",
306
- 214: "picking fruit",
307
- 215: "planting trees",
308
- 216: "plastering",
309
- 217: "playing accordion",
310
- 218: "playing badminton",
311
- 219: "playing bagpipes",
312
- 220: "playing basketball",
313
- 221: "playing bass guitar",
314
- 222: "playing cards",
315
- 223: "playing cello",
316
- 224: "playing chess",
317
- 225: "playing clarinet",
318
- 226: "playing controller",
319
- 227: "playing cricket",
320
- 228: "playing cymbals",
321
- 229: "playing didgeridoo",
322
- 230: "playing drums",
323
- 231: "playing flute",
324
- 232: "playing guitar",
325
- 233: "playing harmonica",
326
- 234: "playing harp",
327
- 235: "playing ice hockey",
328
- 236: "playing keyboard",
329
- 237: "playing kickball",
330
- 238: "playing monopoly",
331
- 239: "playing organ",
332
- 240: "playing paintball",
333
- 241: "playing piano",
334
- 242: "playing poker",
335
- 243: "playing recorder",
336
- 244: "playing saxophone",
337
- 245: "playing squash or racquetball",
338
- 246: "playing tennis",
339
- 247: "playing trombone",
340
- 248: "playing trumpet",
341
- 249: "playing ukulele",
342
- 250: "playing violin",
343
- 251: "playing volleyball",
344
- 252: "playing xylophone",
345
- 253: "pole vault",
346
- 254: "presenting weather forecast",
347
- 255: "pull ups",
348
- 256: "pumping fist",
349
- 257: "pumping gas",
350
- 258: "punching bag",
351
- 259: "punching person (boxing)",
352
- 260: "push up",
353
- 261: "pushing car",
354
- 262: "pushing cart",
355
- 263: "pushing wheelchair",
356
- 264: "reading book",
357
- 265: "reading newspaper",
358
- 266: "recording music",
359
- 267: "riding a bike",
360
- 268: "riding camel",
361
- 269: "riding elephant",
362
- 270: "riding mechanical bull",
363
- 271: "riding mountain bike",
364
- 272: "riding mule",
365
- 273: "riding or walking with horse",
366
- 274: "riding scooter",
367
- 275: "riding unicycle",
368
- 276: "ripping paper",
369
- 277: "robot dancing",
370
- 278: "rock climbing",
371
- 279: "rock scissors paper",
372
- 280: "roller skating",
373
- 281: "running on treadmill",
374
- 282: "sailing",
375
- 283: "salsa dancing",
376
- 284: "sanding floor",
377
- 285: "scrambling eggs",
378
- 286: "scuba diving",
379
- 287: "setting table",
380
- 288: "shaking hands",
381
- 289: "shaking head",
382
- 290: "sharpening knives",
383
- 291: "sharpening pencil",
384
- 292: "shaving head",
385
- 293: "shaving legs",
386
- 294: "shearing sheep",
387
- 295: "shining shoes",
388
- 296: "shooting basketball",
389
- 297: "shooting goal (soccer)",
390
- 298: "shot put",
391
- 299: "shoveling snow",
392
- 300: "shredding paper",
393
- 301: "shuffling cards",
394
- 302: "side kick",
395
- 303: "sign language interpreting",
396
- 304: "singing",
397
- 305: "situp",
398
- 306: "skateboarding",
399
- 307: "ski jumping",
400
- 308: "skiing (not slalom or crosscountry)",
401
- 309: "skiing crosscountry",
402
- 310: "skiing slalom",
403
- 311: "skipping rope",
404
- 312: "skydiving",
405
- 313: "slacklining",
406
- 314: "slapping",
407
- 315: "sled dog racing",
408
- 316: "smoking",
409
- 317: "smoking hookah",
410
- 318: "snatch weight lifting",
411
- 319: "sneezing",
412
- 320: "sniffing",
413
- 321: "snorkeling",
414
- 322: "snowboarding",
415
- 323: "snowkiting",
416
- 324: "snowmobiling",
417
- 325: "somersaulting",
418
- 326: "spinning poi",
419
- 327: "spray painting",
420
- 328: "spraying",
421
- 329: "springboard diving",
422
- 330: "squat",
423
- 331: "sticking tongue out",
424
- 332: "stomping grapes",
425
- 333: "stretching arm",
426
- 334: "stretching leg",
427
- 335: "strumming guitar",
428
- 336: "surfing crowd",
429
- 337: "surfing water",
430
- 338: "sweeping floor",
431
- 339: "swimming backstroke",
432
- 340: "swimming breast stroke",
433
- 341: "swimming butterfly stroke",
434
- 342: "swing dancing",
435
- 343: "swinging legs",
436
- 344: "swinging on something",
437
- 345: "sword fighting",
438
- 346: "tai chi",
439
- 347: "taking a shower",
440
- 348: "tango dancing",
441
- 349: "tap dancing",
442
- 350: "tapping guitar",
443
- 351: "tapping pen",
444
- 352: "tasting beer",
445
- 353: "tasting food",
446
- 354: "testifying",
447
- 355: "texting",
448
- 356: "throwing axe",
449
- 357: "throwing ball",
450
- 358: "throwing discus",
451
- 359: "tickling",
452
- 360: "tobogganing",
453
- 361: "tossing coin",
454
- 362: "tossing salad",
455
- 363: "training dog",
456
- 364: "trapezing",
457
- 365: "trimming or shaving beard",
458
- 366: "trimming trees",
459
- 367: "triple jump",
460
- 368: "tying bow tie",
461
- 369: "tying knot (not on a tie)",
462
- 370: "tying tie",
463
- 371: "unboxing",
464
- 372: "unloading truck",
465
- 373: "using computer",
466
- 374: "using remote controller (not gaming)",
467
- 375: "using segway",
468
- 376: "vault",
469
- 377: "waiting in line",
470
- 378: "walking the dog",
471
- 379: "washing dishes",
472
- 380: "washing feet",
473
- 381: "washing hair",
474
- 382: "washing hands",
475
- 383: "water skiing",
476
- 384: "water sliding",
477
- 385: "watering plants",
478
- 386: "waxing back",
479
- 387: "waxing chest",
480
- 388: "waxing eyebrows",
481
- 389: "waxing legs",
482
- 390: "weaving basket",
483
- 391: "welding",
484
- 392: "whistling",
485
- 393: "windsurfing",
486
- 394: "wrapping present",
487
- 395: "wrestling",
488
- 396: "writing",
489
- 397: "yawning",
490
- 398: "yoga",
491
- 399: "zumba",
492
- }
493
-
494
-
495
- def process(self, data: Any, config: ConfigProtocol,
496
- context: Optional[ProcessingContext] = None, stream_info: Optional[Any] = None) -> ProcessingResult:
497
- """
498
- Process human activity use case - automatically detects single or multi-frame structure.
499
-
500
- Args:
501
- data: Raw model output (detection or tracking format)
502
- config: human activity configuration
503
- context: Processing context
504
- stream_info: Stream information containing frame details (optional)
505
-
506
- Returns:
507
- ProcessingResult: Processing result with standardized agg_summary structure
508
- """
509
- start_time = time.time()
510
-
511
- try:
512
- # Ensure we have the right config type
513
- if not isinstance(config, HumanActivityConfig):
514
- return self.create_error_result(
515
- "Invalid configuration type for human activity recognition",
516
- usecase=self.name,
517
- category=self.category,
518
- context=context
519
- )
520
-
521
- # Initialize processing context if not provided
522
- if context is None:
523
- context = ProcessingContext()
524
-
525
- # Detect input format and frame structure
526
- print("--------------------------------------------------------------------")
527
- print(data)
528
- input_format = match_results_structure(data)
529
- context.input_format = input_format
530
- context.confidence_threshold = config.confidence_threshold
531
-
532
- is_multi_frame = self.detect_frame_structure(data)
533
-
534
- #self.logger.info(f"Processing Human Activity - Format: {input_format.value}, Multi-frame: {is_multi_frame}")
535
-
536
- # Apply smoothing if enabled
537
- if config.enable_smoothing and input_format == ResultFormat.OBJECT_TRACKING:
538
- data = self._apply_smoothing(data, config)
539
-
540
- # Process based on frame structure
541
- if is_multi_frame:
542
-
543
- return self._process_multi_frame(data, config, context, stream_info)
544
- else:
545
- return self._process_single_frame(data, config, context, stream_info)
546
-
547
- except Exception as e:
548
- self.logger.error(f"Human Activity failed: {str(e)}", exc_info=True)
549
-
550
- if context:
551
- context.mark_completed()
552
-
553
- return self.create_error_result(
554
- str(e),
555
- type(e).__name__,
556
- usecase=self.name,
557
- category=self.category,
558
- context=context
559
- )
560
-
561
- def _process_multi_frame(self, data: Dict, config: HumanActivityConfig, context: ProcessingContext, stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
562
- """Process multi-frame data to generate frame-wise agg_summary."""
563
-
564
- frame_incidents = {}
565
- frame_tracking_stats = {}
566
- frame_business_analytics = {}
567
- frame_human_text = {}
568
- frame_alerts = {}
569
-
570
- # Increment total frame counter
571
- frames_in_this_call = len(data)
572
- self._total_frame_counter += frames_in_this_call
573
-
574
- # Process each frame individually
575
- for frame_key, frame_detections in data.items():
576
- # Extract frame ID from tracking data
577
- frame_id = self._extract_frame_id_from_tracking(frame_detections, frame_key)
578
- global_frame_id = self.get_global_frame_id(frame_id)
579
-
580
- # Process this single frame's detections
581
- alerts, incidents_list, tracking_stats_list, business_analytics_list, summary_list = self._process_frame_detections(
582
- frame_detections, config, global_frame_id, stream_info
583
- )
584
- incidents = incidents_list[0] if incidents_list else {}
585
- tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
586
- business_analytics = business_analytics_list[0] if business_analytics_list else {}
587
- summary = summary_list[0] if summary_list else {}
588
-
589
- # Store frame-wise results
590
- if incidents:
591
- frame_incidents[global_frame_id] = incidents
592
- if tracking_stats:
593
- frame_tracking_stats[global_frame_id] = tracking_stats
594
- if business_analytics:
595
- frame_business_analytics[global_frame_id] = business_analytics
596
- if summary:
597
- frame_human_text[global_frame_id] = summary
598
- if alerts:
599
- frame_alerts[global_frame_id] = alerts
600
-
601
- # Update global frame offset after processing this chunk
602
- self.update_global_frame_offset(frames_in_this_call)
603
-
604
- # Create frame-wise agg_summary
605
- agg_summary = self.create_frame_wise_agg_summary(
606
- frame_incidents, frame_tracking_stats, frame_business_analytics, frame_alerts,
607
- frame_human_text=frame_human_text
608
- )
609
-
610
- # Mark processing as completed
611
- context.mark_completed()
612
-
613
- # Create result with standardized agg_summary
614
- return self.create_result(
615
- data={"agg_summary": agg_summary},
616
- usecase=self.name,
617
- category=self.category,
618
- context=context
619
- )
620
-
621
- def _process_single_frame(self, data: Any, config: HumanActivityConfig, context: ProcessingContext, stream_info: Optional[Dict[str, Any]] = None) -> ProcessingResult:
622
- """Process single frame data and return standardized agg_summary."""
623
-
624
- current_frame = stream_info.get("input_settings", {}).get("start_frame", "current_frame")
625
- # Process frame data
626
- alerts, incidents_list, tracking_stats_list, business_analytics_list, summary_list = self._process_frame_detections(
627
- data, config, current_frame, stream_info
628
- )
629
- incidents = incidents_list[0] if incidents_list else {}
630
- tracking_stats = tracking_stats_list[0] if tracking_stats_list else {}
631
- business_analytics = business_analytics_list[0] if business_analytics_list else {}
632
- summary = summary_list[0] if summary_list else {}
633
-
634
- # Create single-frame agg_summary
635
- agg_summary = self.create_agg_summary(
636
- current_frame, incidents, tracking_stats, business_analytics, alerts, human_text=summary
637
- )
638
-
639
- # Mark processing as completed
640
- context.mark_completed()
641
-
642
- # Create result with standardized agg_summary
643
- return self.create_result(
644
- data={"agg_summary": agg_summary},
645
- usecase=self.name,
646
- category=self.category,
647
- context=context
648
- )
649
-
650
-
651
- def _process_frame_detections(self, frame_data: Any, config: HumanActivityConfig, frame_id: str, stream_info: Optional[Dict[str, Any]] = None) -> tuple:
652
- """Process detections from a single frame and return standardized components."""
653
-
654
- # Convert frame_data to list if it's not already
655
- if isinstance(frame_data, list):
656
- frame_detections = frame_data
657
- else:
658
- # Handle other formats as needed
659
- frame_detections = []
660
-
661
- # Step 1: Apply confidence filtering to this frame
662
- if config.confidence_threshold is not None:
663
- frame_detections = [d for d in frame_detections if d.get("confidence", 0) >= config.confidence_threshold]
664
-
665
- # Step 2: Apply category mapping if provided
666
- if self.index_to_category:
667
- frame_detections = apply_category_mapping(frame_detections, self.index_to_category)
668
-
669
- # Step 3: Filter to activity categories
670
- if config.activity_categories:
671
- frame_detections = [d for d in frame_detections if d.get("category") in config.activity_categories]
672
-
673
- # Step 4: Create counting summary for this frame
674
- counting_summary = {
675
- "total_objects": len(frame_detections),
676
- "detections": frame_detections,
677
- "categories": {}
678
- }
679
-
680
- # Count by category
681
- for detection in frame_detections:
682
- category = detection.get("category", "unknown")
683
- counting_summary["categories"][category] = counting_summary["categories"].get(category, 0) + 1
684
-
685
- print(counting_summary)
686
- # Step 5: Zone analysis for this frame
687
- zone_analysis = {}
688
- # if config.zone_config and config.zone_config.zones:
689
- # # Convert single frame to format expected by count_objects_in_zones
690
- # frame_data = frame_detections #[frame_detections]
691
- # zone_analysis = count_objects_in_zones(frame_data, config.zone_config.zones)
692
-
693
- # # Update zone tracking with current frame data
694
- # if zone_analysis and config.enable_tracking:
695
- # enhanced_zone_analysis = self._update_zone_tracking(zone_analysis, frame_detections, config)
696
- # # Merge enhanced zone analysis with original zone analysis
697
- # for zone_name, enhanced_data in enhanced_zone_analysis.items():
698
- # zone_analysis[zone_name] = enhanced_data
699
-
700
- # Step 4.5: Always update tracking state (regardless of enable_unique_counting setting)
701
- self._update_tracking_state(counting_summary)
702
-
703
- # Step 5: Generate insights and alerts for this frame
704
- alerts = self._check_alerts(counting_summary, zone_analysis, config, frame_id)
705
-
706
- # Step 6: Generate summary and standardized agg_summary components for this frame
707
- incidents = self._generate_incidents(counting_summary, zone_analysis, alerts, config, frame_id, stream_info)
708
- tracking_stats = self._generate_tracking_stats(counting_summary, zone_analysis, config, frame_id=frame_id, alerts=alerts, stream_info=stream_info)
709
- business_analytics = self._generate_business_analytics(counting_summary, zone_analysis, config, frame_id, stream_info, is_empty=True)
710
- summary = self._generate_summary(counting_summary, incidents, tracking_stats, business_analytics, alerts)
711
-
712
- print(tracking_stats)
713
- # Return standardized components as tuple
714
- return alerts, incidents, tracking_stats, business_analytics, summary
715
-
716
- def _generate_incidents(self, counting_summary: Dict, zone_analysis: Dict, alerts: List, config: HumanActivityConfig, frame_id: str, stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
717
- """Generate standardized incidents for the agg_summary structure."""
718
-
719
- camera_info = self.get_camera_info_from_stream(stream_info)
720
- incidents = []
721
- total_activity = counting_summary.get("total_objects", 0)
722
- current_timestamp = self._get_current_timestamp_str(stream_info, frame_id=frame_id)
723
- self._ascending_alert_list = self._ascending_alert_list[-900:] if len(self._ascending_alert_list) > 900 else self._ascending_alert_list
724
-
725
- alert_settings=[]
726
- if config.alert_config and hasattr(config.alert_config, 'alert_type'):
727
- alert_settings.append({
728
- "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
729
- "incident_category": self.CASE_TYPE,
730
- "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
731
- "ascending": True,
732
- "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
733
- getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
734
- }
735
- })
736
-
737
- if total_activity > 0:
738
- # Determine event level based on thresholds
739
-
740
- level = "info"
741
- intensity = 5.0
742
- start_timestamp = self._get_start_timestamp_str(stream_info)
743
- if start_timestamp and self.current_incident_end_timestamp=='N/A':
744
- self.current_incident_end_timestamp = 'Incident still active'
745
- elif start_timestamp and self.current_incident_end_timestamp=='Incident still active':
746
- if len(self._ascending_alert_list) >= 15 and sum(self._ascending_alert_list[-15:]) / 15 < 1.5:
747
- self.current_incident_end_timestamp = current_timestamp
748
- elif self.current_incident_end_timestamp!='Incident still active' and self.current_incident_end_timestamp!='N/A':
749
- self.current_incident_end_timestamp = 'N/A'
750
-
751
- if config.alert_config and config.alert_config.count_thresholds:
752
- threshold = config.alert_config.count_thresholds.get("all", 10)
753
- intensity = min(10.0, (total_activity / threshold) * 10)
754
-
755
- if intensity >= 9:
756
- level = "critical"
757
- self._ascending_alert_list.append(3)
758
- elif intensity >= 7:
759
- level = "significant"
760
- self._ascending_alert_list.append(2)
761
- elif intensity >= 5:
762
- level = "medium"
763
- self._ascending_alert_list.append(1)
764
- else:
765
- level = "low"
766
- self._ascending_alert_list.append(0)
767
- else:
768
- if total_activity > 30:
769
- level = "critical"
770
- intensity = 10.0
771
- self._ascending_alert_list.append(3)
772
- elif total_activity > 25:
773
- level = "significant"
774
- intensity = 9.0
775
- self._ascending_alert_list.append(2)
776
- elif total_activity > 15:
777
- level = "medium"
778
- intensity = 7.0
779
- self._ascending_alert_list.append(1)
780
- else:
781
- level = "low"
782
- intensity = min(10.0, total_activity / 3.0)
783
- self._ascending_alert_list.append(0)
784
-
785
- # Generate human text in new format
786
- human_text_lines = [f"INCIDENTS DETECTED @ {current_timestamp}:"]
787
- human_text_lines.append(f"\tSeverity Level: {(self.CASE_TYPE,level)}")
788
- human_text = "\n".join(human_text_lines)
789
-
790
- # Main activity counting incident
791
- event= self.create_incident(incident_id=self.CASE_TYPE+'_'+str(frame_id), incident_type=self.CASE_TYPE,
792
- severity_level=level, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
793
- start_time=start_timestamp, end_time=self.current_incident_end_timestamp,
794
- level_settings= {"low": 1, "medium": 3, "significant":4, "critical": 7})
795
- incidents.append(event)
796
- else:
797
- self._ascending_alert_list.append(0)
798
- incidents.append({})
799
-
800
- # Add zone-specific events if applicable
801
- if zone_analysis:
802
- human_text_lines.append(f"\t- ZONE EVENTS:")
803
- for zone_name, zone_count in zone_analysis.items():
804
- zone_total = self._robust_zone_total(zone_count)
805
- if zone_total > 0:
806
- zone_intensity = min(10.0, zone_total / 5.0)
807
- zone_level = "info"
808
- if intensity >= 9:
809
- level = "critical"
810
- self._ascending_alert_list.append(3)
811
- elif intensity >= 7:
812
- level = "significant"
813
- self._ascending_alert_list.append(2)
814
- elif intensity >= 5:
815
- level = "medium"
816
- self._ascending_alert_list.append(1)
817
- else:
818
- level = "low"
819
- self._ascending_alert_list.append(0)
820
-
821
- human_text_lines.append(f"\t\t- Zone name: {zone_name}")
822
- human_text_lines.append(f"\t\t\t- Total activity in zone: {zone_total}")
823
- # Main human activity incident
824
- event= self.create_incident(incident_id=self.CASE_TYPE+'_'+'zone_'+zone_name+str(frame_id), incident_type=self.CASE_TYPE,
825
- severity_level=zone_level, human_text=human_text, camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
826
- start_time=start_timestamp, end_time=self.current_incident_end_timestamp,
827
- level_settings= {"low": 1, "medium": 3, "significant":4, "critical": 7})
828
- incidents.append(event)
829
- return incidents
830
-
831
- def _generate_tracking_stats(self, counting_summary: Dict, zone_analysis: Dict, config: HumanActivityConfig, frame_id: str, alerts: Any=[], stream_info: Optional[Dict[str, Any]] = None) -> List[Dict]:
832
- """Generate tracking stats using standardized methods."""
833
-
834
- total_activity = counting_summary.get("total_objects", 0)
835
-
836
- # Get total count from cached tracking state
837
- total_unique_count = self.get_total_count()
838
- current_frame_count = self.get_current_frame_count()
839
-
840
- # Get camera info using standardized method
841
- camera_info = self.get_camera_info_from_stream(stream_info)
842
-
843
- # Build total_counts using standardized method
844
- total_counts = []
845
- per_category_total = {}
846
-
847
- for category in config.activity_categories:
848
- # Get count for this category from zone analysis or counting summary
849
- category_total_count = 0
850
- if zone_analysis:
851
- for zone_data in zone_analysis.values():
852
- if isinstance(zone_data, dict) and "total_count" in zone_data:
853
- category_total_count += zone_data.get("total_count", 0)
854
- elif isinstance(zone_data, dict):
855
- # Sum up zone counts
856
- for v in zone_data.values():
857
- if isinstance(v, int):
858
- category_total_count += v
859
- elif isinstance(v, list):
860
- category_total_count += len(v)
861
- elif isinstance(zone_data, (int, list)):
862
- category_total_count += len(zone_data) if isinstance(zone_data, list) else zone_data
863
- else:
864
- # Use total unique count from tracking state
865
- category_total_count = total_unique_count
866
-
867
- if category_total_count > 0:
868
- total_counts.append(self.create_count_object(category, category_total_count))
869
- per_category_total[category] = category_total_count
870
-
871
- # Build current_counts using standardized method
872
- current_counts = []
873
- per_category_current = {}
874
-
875
- for category in config.activity_categories:
876
- # Get current count for this category
877
- category_current_count = 0
878
- if zone_analysis:
879
- for zone_data in zone_analysis.values():
880
- if isinstance(zone_data, dict) and "current_count" in zone_data:
881
- category_current_count += zone_data.get("current_count", 0)
882
- elif isinstance(zone_data, dict):
883
- # For current frame, look at detections count
884
- for v in zone_data.values():
885
- if isinstance(v, int):
886
- category_current_count += v
887
- elif isinstance(v, list):
888
- category_current_count += len(v)
889
- elif isinstance(zone_data, (int, list)):
890
- category_current_count += len(zone_data) if isinstance(zone_data, list) else zone_data
891
- else:
892
- # Count detections in current frame for this category
893
- detections = counting_summary.get("detections", [])
894
- category_current_count = sum(1 for d in detections if d.get("category") == category)
895
-
896
- if category_current_count > 0 or total_activity > 0: # Include even if 0 when there are activity
897
- current_counts.append(self.create_count_object(category, category_current_count))
898
- per_category_current[category] = category_current_count
899
-
900
- # Prepare detections using standardized method (without confidence and track_id)
901
- detections = []
902
- for detection in counting_summary.get("detections", []):
903
- bbox = detection.get("bounding_box", {})
904
- category = detection.get("category", "unknown")
905
- # Include segmentation if available (like in eg.json)
906
- if detection.get("masks"):
907
- segmentation = detection.get("masks", [])
908
- detection_obj = self.create_detection_object(category, bbox, segmentation = segmentation)
909
- elif detection.get("segmentation"):
910
- segmentation = detection.get("segmentation")
911
- detection_obj = self.create_detection_object(category, bbox, segmentation = segmentation)
912
- elif detection.get("mask"):
913
- segmentation = detection.get("mask")
914
- detection_obj = self.create_detection_object(category, bbox, segmentation = segmentation)
915
- else:
916
- detection_obj = self.create_detection_object(category, bbox)
917
- detections.append(detection_obj)
918
-
919
- # Build alerts and alert_settings arrays
920
- alert_settings = []
921
- if config.alert_config and hasattr(config.alert_config, 'alert_type'):
922
- alert_settings.append({
923
- "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
924
- "incident_category": self.CASE_TYPE,
925
- "threshold_level": config.alert_config.count_thresholds if hasattr(config.alert_config, 'count_thresholds') else {},
926
- "ascending": True,
927
- "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
928
- getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
929
- }
930
- })
931
- if zone_analysis:
932
- human_text_lines=[]
933
- current_timestamp = self._get_current_timestamp_str(stream_info, frame_id=frame_id)
934
- start_timestamp = self._get_start_timestamp_str(stream_info)
935
- human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
936
- def robust_zone_total(zone_count):
937
- if isinstance(zone_count, dict):
938
- total = 0
939
- for v in zone_count.values():
940
- if isinstance(v, int):
941
- total += v
942
- elif isinstance(v, list) and total == 0:
943
- total += len(v)
944
- return total
945
- elif isinstance(zone_count, list):
946
- return len(zone_count)
947
- elif isinstance(zone_count, int):
948
- return zone_count
949
- else:
950
- return 0
951
- human_text_lines.append(f"\t- Activity Detected: {total_activity}")
952
- human_text_lines.append("")
953
- human_text_lines.append(f"TOTAL SINCE @ {start_timestamp}:")
954
-
955
- for zone_name, zone_count in zone_analysis.items():
956
- zone_total = robust_zone_total(zone_count)
957
- human_text_lines.append(f"\t- Zone name: {zone_name}")
958
- human_text_lines.append(f"\t\t- Total count in zone: {zone_total-1}")
959
-
960
- human_text_lines.append(f"\t- Total unique activity in the scene: {total_unique_count}")
961
- if alerts:
962
- for alert in alerts:
963
- human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
964
- else:
965
- human_text_lines.append("Alerts: None")
966
- human_text = "\n".join(human_text_lines)
967
- else:
968
- human_text = self._generate_human_text_for_tracking(total_activity, total_unique_count, config, frame_id, alerts, stream_info)
969
-
970
- # Create high precision timestamps for input_timestamp and reset_timestamp
971
- high_precision_start_timestamp = self._get_current_timestamp_str(stream_info, precision=True, frame_id=frame_id)
972
- high_precision_reset_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
973
- # Create tracking_stat using standardized method
974
- tracking_stat = self.create_tracking_stats(
975
- total_counts, current_counts, detections, human_text, camera_info, alerts, alert_settings, start_time=high_precision_start_timestamp, reset_time=high_precision_reset_timestamp
976
- )
977
- print(tracking_stat)
978
- return [tracking_stat]
979
-
980
- def _generate_human_text_for_tracking(self, total_activity: int, total_unique_count: int, config: HumanActivityConfig, frame_id: str, alerts:Any=[], stream_info: Optional[Dict[str, Any]] = None) -> str:
981
- """Generate human-readable text for tracking stats in old format."""
982
- from datetime import datetime, timezone
983
-
984
- human_text_lines = []
985
- current_timestamp = self._get_current_timestamp_str(stream_info, precision=True, frame_id=frame_id)
986
- start_timestamp = self._get_start_timestamp_str(stream_info, precision=True)
987
-
988
- human_text_lines.append(f"CURRENT FRAME @ {current_timestamp}:")
989
- human_text_lines.append(f"\t- Activity Detected: {total_activity}")
990
-
991
- human_text_lines.append("")
992
- human_text_lines.append(f"TOTAL SINCE @ {start_timestamp}:")
993
- human_text_lines.append(f"\t- Total unique activity count: {total_unique_count}")
994
-
995
- if alerts:
996
- for alert in alerts:
997
- human_text_lines.append(f"Alerts: {alert.get('settings', {})} sent @ {current_timestamp}")
998
- else:
999
- human_text_lines.append("Alerts: None")
1000
-
1001
- return "\n".join(human_text_lines)
1002
-
1003
- def _check_alerts(self, counting_summary: Dict, zone_analysis: Dict,
1004
- config: HumanActivityConfig, frame_id: str) -> List[Dict]:
1005
- """Check for alert conditions and generate alerts."""
1006
- def get_trend(data, lookback=900, threshold=0.6):
1007
- '''
1008
- Determine if the trend is ascending or descending based on actual value progression.
1009
- Now works with values 0,1,2,3 (not just binary).
1010
- '''
1011
- window = data[-lookback:] if len(data) >= lookback else data
1012
- if len(window) < 2:
1013
- return True # not enough data to determine trend
1014
- increasing = 0
1015
- total = 0
1016
- for i in range(1, len(window)):
1017
- if window[i] >= window[i - 1]:
1018
- increasing += 1
1019
- total += 1
1020
- ratio = increasing / total
1021
- if ratio >= threshold:
1022
- return True
1023
- elif ratio <= (1 - threshold):
1024
- return False
1025
- alerts = []
1026
-
1027
- if not config.alert_config:
1028
- return alerts
1029
-
1030
- total_activity = counting_summary.get("total_objects", 0)
1031
-
1032
- # Count threshold alerts
1033
- if hasattr(config.alert_config, 'count_thresholds') and config.alert_config.count_thresholds:
1034
-
1035
- for category, threshold in config.alert_config.count_thresholds.items():
1036
- if category == "all" and total_activity >= threshold:
1037
-
1038
- alerts.append({
1039
- "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
1040
- "alert_id": "alert_"+category+'_'+frame_id,
1041
- "incident_category": self.CASE_TYPE,
1042
- "threshold_level": threshold,
1043
- "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
1044
- "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
1045
- getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
1046
- }
1047
- })
1048
- elif category in counting_summary.get("by_category", {}):
1049
- count = counting_summary["by_category"][category]
1050
-
1051
- if count >= threshold:
1052
- alerts.append({
1053
- "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
1054
- "alert_id": "alert_"+category+'_'+frame_id,
1055
- "incident_category": self.CASE_TYPE,
1056
- "threshold_level": threshold,
1057
- "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
1058
- "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
1059
- getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
1060
- }
1061
- })
1062
- else:
1063
- pass
1064
-
1065
- # Zone occupancy threshold alerts
1066
- if config.alert_config.occupancy_thresholds:
1067
- for zone_name, threshold in config.alert_config.occupancy_thresholds.items():
1068
- if zone_name in zone_analysis:
1069
- # Calculate zone_count robustly (supports int, list, dict values)
1070
- print('ZONEEE',zone_name, zone_analysis[zone_name])
1071
- zone_count = self._robust_zone_total(zone_analysis[zone_name])
1072
- if zone_count >= threshold:
1073
- alerts.append({
1074
- "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
1075
- "alert_id": f"alert_zone_{zone_name}_{frame_id}",
1076
- "incident_category": f"{self.CASE_TYPE}_{zone_name}",
1077
- "threshold_level": threshold,
1078
- "ascending": get_trend(self._ascending_alert_list, lookback=900, threshold=0.8),
1079
- "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
1080
- getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
1081
- }
1082
- })
1083
-
1084
- return alerts
1085
-
1086
- def _generate_business_analytics(self, counting_summary: Dict, zone_analysis: Dict, config: HumanActivityConfig, frame_id: str, stream_info: Optional[Dict[str, Any]] = None, is_empty=False) -> List[Dict]:
1087
- """Generate standardized business analytics for the agg_summary structure."""
1088
- if is_empty:
1089
- return []
1090
- business_analytics = []
1091
-
1092
- total_activity = counting_summary.get("total_objects", 0)
1093
-
1094
- # Get camera info using standardized method
1095
- camera_info = self.get_camera_info_from_stream(stream_info)
1096
-
1097
- if total_activity > 0 or config.enable_analytics:
1098
- # Calculate analytics statistics
1099
- analytics_stats = {
1100
- "activity_count": total_activity,
1101
- "unique_activity_count": self.get_total_count(),
1102
- "current_frame_count": self.get_current_frame_count()
1103
- }
1104
-
1105
- # Add zone analytics if available
1106
- if zone_analysis:
1107
- zone_stats = {}
1108
- for zone_name, zone_count in zone_analysis.items():
1109
- zone_total = self._robust_zone_total(zone_count)
1110
- zone_stats[f"{zone_name}_occupancy"] = zone_total
1111
- analytics_stats.update(zone_stats)
1112
-
1113
- # Generate human text for analytics
1114
- current_timestamp = self._get_current_timestamp_str(stream_info, frame_id=frame_id)
1115
- start_timestamp = self._get_start_timestamp_str(stream_info)
1116
-
1117
- analytics_human_text = self.generate_analytics_human_text(
1118
- "human_activity_analytics", analytics_stats, current_timestamp, start_timestamp
1119
- )
1120
-
1121
- # Create business analytics using standardized method
1122
- analytics = self.create_business_analytics(
1123
- "human_activity_analytics", analytics_stats, analytics_human_text, camera_info
1124
- )
1125
- business_analytics.append(analytics)
1126
-
1127
- return business_analytics
1128
-
1129
- def _generate_summary(self, summary: dict, incidents: List, tracking_stats: List, business_analytics: List, alerts: List) -> List[str]:
1130
- """
1131
- Generate a human_text string for the tracking_stat, incident, business analytics and alerts.
1132
- """
1133
- lines = {}
1134
- lines["Application Name"] = self.CASE_TYPE
1135
- lines["Application Version"] = self.CASE_VERSION
1136
- if len(incidents) > 0:
1137
- lines["Incidents:"] = f"\n\t{incidents[0].get('human_text', 'No incidents detected')}\n"
1138
- if len(tracking_stats) > 0:
1139
- lines["Tracking Statistics:"] = f"\t{tracking_stats[0].get('human_text', 'No tracking statistics detected')}\n"
1140
- if len(business_analytics) > 0:
1141
- lines["Business Analytics:"] = f"\t{business_analytics[0].get('human_text', 'No business analytics detected')}\n"
1142
-
1143
- if len(incidents) == 0 and len(tracking_stats) == 0 and len(business_analytics) == 0:
1144
- lines["Summary"] = "No Summary Data"
1145
-
1146
- return [lines]
1147
-
1148
- def _calculate_metrics(self, counting_summary: Dict, zone_analysis: Dict,
1149
- config: HumanActivityConfig, context: ProcessingContext) -> Dict[str, Any]:
1150
- """Calculate detailed metrics for analytics."""
1151
- total_activity = counting_summary.get("total_objects", 0)
1152
-
1153
- metrics = {
1154
- "total_activity": total_activity,
1155
- "processing_time": context.processing_time or 0.0,
1156
- "input_format": context.input_format.value,
1157
- "confidence_threshold": config.confidence_threshold,
1158
- "zones_analyzed": len(zone_analysis),
1159
- "detection_rate": 0.0,
1160
- "coverage_percentage": 0.0
1161
- }
1162
-
1163
- # Calculate detection rate
1164
- if config.time_window_minutes and config.time_window_minutes > 0:
1165
- metrics["detection_rate"] = (total_activity / config.time_window_minutes) * 60
1166
-
1167
- # Calculate zone coverage
1168
- if zone_analysis and total_activity > 0:
1169
- activity_in_zones = 0
1170
- for zone_counts in zone_analysis.values():
1171
- if isinstance(zone_counts, dict):
1172
- for v in zone_counts.values():
1173
- if isinstance(v, int):
1174
- activity_in_zones += v
1175
- elif isinstance(v, list):
1176
- activity_in_zones += len(v)
1177
- elif isinstance(zone_counts, list):
1178
- activity_in_zones += len(zone_counts)
1179
- elif isinstance(zone_counts, int):
1180
- activity_in_zones += zone_counts
1181
- metrics["coverage_percentage"] = (activity_in_zones / total_activity) * 100
1182
-
1183
- # Unique tracking metrics
1184
- if config.enable_unique_counting:
1185
- unique_count = self._count_unique_tracks(counting_summary, config)
1186
- if unique_count is not None:
1187
- metrics["unique_activity"] = unique_count
1188
- metrics["tracking_efficiency"] = (unique_count / total_activity) * 100 if total_activity > 0 else 0
1189
-
1190
- # Per-zone metrics
1191
- if zone_analysis:
1192
- zone_metrics = {}
1193
- for zone_name, zone_counts in zone_analysis.items():
1194
- # Robustly sum counts, handling dicts with int or list values
1195
- if isinstance(zone_counts, dict):
1196
- zone_total = 0
1197
- for v in zone_counts.values():
1198
- if isinstance(v, int):
1199
- zone_total += v
1200
- elif isinstance(v, list):
1201
- zone_total += len(v)
1202
- elif isinstance(zone_counts, list):
1203
- zone_total = len(zone_counts)
1204
- elif isinstance(zone_counts, int):
1205
- zone_total = zone_counts
1206
- else:
1207
- zone_total = 0
1208
- zone_metrics[zone_name] = {
1209
- "count": zone_total,
1210
- "percentage": (zone_total / total_activity) * 100 if total_activity > 0 else 0
1211
- }
1212
- metrics["zone_metrics"] = zone_metrics
1213
-
1214
- return metrics
1215
-
1216
- def _extract_predictions(self, data: Any) -> List[Dict[str, Any]]:
1217
- """Extract predictions from processed data for API compatibility."""
1218
- predictions = []
1219
-
1220
- try:
1221
- if isinstance(data, list):
1222
- # Detection format
1223
- for item in data:
1224
- prediction = self._normalize_prediction(item)
1225
- if prediction:
1226
- predictions.append(prediction)
1227
-
1228
- elif isinstance(data, dict):
1229
- # Frame-based or tracking format
1230
- for frame_id, items in data.items():
1231
- if isinstance(items, list):
1232
- for item in items:
1233
- prediction = self._normalize_prediction(item)
1234
- if prediction:
1235
- prediction["frame_id"] = frame_id
1236
- predictions.append(prediction)
1237
-
1238
- except Exception as e:
1239
- self.logger.warning(f"Failed to extract predictions: {str(e)}")
1240
-
1241
- return predictions
1242
-
1243
- def _normalize_prediction(self, item: Dict[str, Any]) -> Dict[str, Any]:
1244
- """Normalize a single prediction item."""
1245
- if not isinstance(item, dict):
1246
- return {}
1247
-
1248
- return {
1249
- "category": item.get("category", item.get("class", "unknown")),
1250
- "confidence": item.get("confidence", item.get("score", 0.0)),
1251
- "bounding_box": item.get("bounding_box", item.get("bbox", {})),
1252
- "track_id": item.get("track_id")
1253
- }
1254
-
1255
- def _get_detections_with_confidence(self, counting_summary: Dict) -> List[Dict]:
1256
- """Extract detection items with confidence scores."""
1257
- return counting_summary.get("detections", [])
1258
-
1259
- def _count_unique_tracks(self, counting_summary: Dict, config: HumanActivityConfig = None) -> Optional[int]:
1260
- """Count unique tracks if tracking is enabled."""
1261
- # Always update tracking state regardless of enable_unique_counting setting
1262
- self._update_tracking_state(counting_summary)
1263
-
1264
- # Only return the count if unique counting is enabled
1265
- if config and config.enable_unique_counting:
1266
- return self._total_count if self._total_count > 0 else None
1267
- else:
1268
- return None
1269
-
1270
- def _update_tracking_state(self, counting_summary: Dict) -> None:
1271
- """Update tracking state with current frame data (always called)."""
1272
- detections = self._get_detections_with_confidence(counting_summary)
1273
-
1274
- if not detections:
1275
- return
1276
-
1277
- # Map raw tracker IDs to canonical IDs to avoid duplicate counting
1278
- current_frame_tracks: Set[Any] = set()
1279
-
1280
- for detection in detections:
1281
- raw_track_id = detection.get("track_id")
1282
- if raw_track_id is None:
1283
- continue
1284
-
1285
- bbox = detection.get("bounding_box", detection.get("bbox"))
1286
- if not bbox:
1287
- continue
1288
-
1289
- canonical_id = self._merge_or_register_track(raw_track_id, bbox)
1290
-
1291
- # Propagate canonical ID so that downstream logic (including zone
1292
- # tracking and event generation) operates on the de-duplicated ID.
1293
- detection["track_id"] = canonical_id
1294
- current_frame_tracks.add(canonical_id)
1295
-
1296
- # Update total track IDs with new canonical IDs from current frame
1297
- old_total_count = len(self._total_track_ids)
1298
- self._total_track_ids.update(current_frame_tracks)
1299
- self._current_frame_track_ids = current_frame_tracks
1300
-
1301
- # Update total count
1302
- self._total_count = len(self._total_track_ids)
1303
- self._last_update_time = time.time()
1304
-
1305
- # Log tracking state updates
1306
- if len(current_frame_tracks) > 0:
1307
- new_tracks = current_frame_tracks - (self._total_track_ids - current_frame_tracks)
1308
- if new_tracks:
1309
- self.logger.debug(
1310
- f"Tracking state updated: {len(new_tracks)} new canonical track IDs added, total unique tracks: {self._total_count}")
1311
- else:
1312
- self.logger.debug(
1313
- f"Tracking state updated: {len(current_frame_tracks)} current frame canonical tracks, total unique tracks: {self._total_count}")
1314
-
1315
- def get_total_count(self) -> int:
1316
- """Get the total count of unique activity tracked across all calls."""
1317
- return self._total_count
1318
-
1319
- def get_current_frame_count(self) -> int:
1320
- """Get the count of activity in the current frame."""
1321
- return len(self._current_frame_track_ids)
1322
-
1323
- def get_total_frames_processed(self) -> int:
1324
- """Get the total number of frames processed across all calls."""
1325
- return self._total_frame_counter
1326
-
1327
- def set_global_frame_offset(self, offset: int) -> None:
1328
- """Set the global frame offset for video chunk processing."""
1329
- self._global_frame_offset = offset
1330
- self.logger.info(f"Global frame offset set to: {offset}")
1331
-
1332
- def get_global_frame_offset(self) -> int:
1333
- """Get the current global frame offset."""
1334
- return self._global_frame_offset
1335
-
1336
- def update_global_frame_offset(self, frames_in_chunk: int) -> None:
1337
- """Update global frame offset after processing a chunk."""
1338
- old_offset = self._global_frame_offset
1339
- self._global_frame_offset += frames_in_chunk
1340
- self.logger.info(f"Global frame offset updated: {old_offset} -> {self._global_frame_offset} (added {frames_in_chunk} frames)")
1341
-
1342
- def get_global_frame_id(self, local_frame_id: str) -> str:
1343
- """Convert local frame ID to global frame ID."""
1344
- try:
1345
- # Try to convert local_frame_id to integer
1346
- local_frame_num = int(local_frame_id)
1347
- global_frame_num = local_frame_num #+ self._global_frame_offset
1348
- return str(global_frame_num)
1349
- except (ValueError, TypeError):
1350
- # If local_frame_id is not a number (e.g., timestamp), return as is
1351
- return local_frame_id
1352
-
1353
- def get_track_ids_info(self) -> Dict[str, Any]:
1354
- """Get detailed information about track IDs."""
1355
- return {
1356
- "total_count": self._total_count,
1357
- "current_frame_count": len(self._current_frame_track_ids),
1358
- "total_unique_track_ids": len(self._total_track_ids),
1359
- "current_frame_track_ids": list(self._current_frame_track_ids),
1360
- "last_update_time": self._last_update_time,
1361
- "total_frames_processed": self._total_frame_counter
1362
- }
1363
-
1364
- def get_tracking_debug_info(self) -> Dict[str, Any]:
1365
- """Get detailed debugging information about tracking state."""
1366
- return {
1367
- "total_track_ids": list(self._total_track_ids),
1368
- "current_frame_track_ids": list(self._current_frame_track_ids),
1369
- "total_count": self._total_count,
1370
- "current_frame_count": len(self._current_frame_track_ids),
1371
- "total_frames_processed": self._total_frame_counter,
1372
- "last_update_time": self._last_update_time,
1373
- "zone_current_track_ids": {zone: list(tracks) for zone, tracks in self._zone_current_track_ids.items()},
1374
- "zone_total_track_ids": {zone: list(tracks) for zone, tracks in self._zone_total_track_ids.items()},
1375
- "zone_current_counts": self._zone_current_counts.copy(),
1376
- "zone_total_counts": self._zone_total_counts.copy(),
1377
- "global_frame_offset": self._global_frame_offset,
1378
- "frames_in_current_chunk": self._frames_in_current_chunk
1379
- }
1380
-
1381
- def get_frame_info(self) -> Dict[str, Any]:
1382
- """Get detailed information about frame processing and global frame offset."""
1383
- return {
1384
- "global_frame_offset": self._global_frame_offset,
1385
- "total_frames_processed": self._total_frame_counter,
1386
- "frames_in_current_chunk": self._frames_in_current_chunk,
1387
- "next_global_frame": self._global_frame_offset + self._frames_in_current_chunk
1388
- }
1389
-
1390
- def reset_tracking_state(self) -> None:
1391
- """
1392
- WARNING: This completely resets ALL tracking data including cumulative totals!
1393
-
1394
- This should ONLY be used when:
1395
- - Starting a completely new tracking session
1396
- - Switching to a different video/stream
1397
- - Manual reset requested by user
1398
-
1399
- For clearing expired/stale tracks, use clear_current_frame_tracking() instead.
1400
- """
1401
- self._total_track_ids.clear()
1402
- self._current_frame_track_ids.clear()
1403
- self._total_count = 0
1404
- self._last_update_time = time.time()
1405
-
1406
- # Clear zone tracking data
1407
- self._zone_current_track_ids.clear()
1408
- self._zone_total_track_ids.clear()
1409
- self._zone_current_counts.clear()
1410
- self._zone_total_counts.clear()
1411
-
1412
- # Reset frame counter and global frame offset
1413
- self._total_frame_counter = 0
1414
- self._global_frame_offset = 0
1415
- self._frames_in_current_chunk = 0
1416
-
1417
- # Clear aliasing information
1418
- self._canonical_tracks.clear()
1419
- self._track_aliases.clear()
1420
- self._tracking_start_time = None
1421
-
1422
- self.logger.warning(" FULL tracking state reset - all track IDs, zone data, frame counter, and global frame offset cleared. Cumulative totals lost!")
1423
-
1424
- def clear_current_frame_tracking(self) -> int:
1425
- """
1426
- MANUAL USE ONLY: Clear only current frame tracking data while preserving cumulative totals.
1427
-
1428
- This method is NOT called automatically anywhere in the code.
1429
-
1430
- This is the SAFE method to use for manual clearing of stale/expired current frame data.
1431
- The cumulative total (self._total_count) is always preserved.
1432
-
1433
- In streaming scenarios, you typically don't need to call this at all.
1434
-
1435
- Returns:
1436
- Number of current frame tracks cleared
1437
- """
1438
- old_current_count = len(self._current_frame_track_ids)
1439
- self._current_frame_track_ids.clear()
1440
-
1441
- # Clear current zone tracking (but keep total zone tracking)
1442
- cleared_zone_tracks = 0
1443
- for zone_name in list(self._zone_current_track_ids.keys()):
1444
- cleared_zone_tracks += len(self._zone_current_track_ids[zone_name])
1445
- self._zone_current_track_ids[zone_name].clear()
1446
- self._zone_current_counts[zone_name] = 0
1447
-
1448
- # Update timestamp
1449
- self._last_update_time = time.time()
1450
-
1451
- self.logger.info(f"Cleared {old_current_count} current frame tracks and {cleared_zone_tracks} zone current tracks. Cumulative total preserved: {self._total_count}")
1452
- return old_current_count
1453
-
1454
- def reset_frame_counter(self) -> None:
1455
- """Reset only the frame counter."""
1456
- old_count = self._total_frame_counter
1457
- self._total_frame_counter = 0
1458
- self.logger.info(f"Frame counter reset from {old_count} to 0")
1459
-
1460
- def clear_expired_tracks(self, max_age_seconds: float = 300.0) -> int:
1461
- """
1462
- MANUAL USE ONLY: Clear current frame tracking data if no updates for a while.
1463
-
1464
- This method is NOT called automatically anywhere in the code.
1465
- It's provided as a utility function for manual cleanup if needed.
1466
-
1467
- In streaming scenarios, you typically don't need to call this at all.
1468
- The cumulative total should keep growing as new unique activity are detected.
1469
-
1470
- This method only clears current frame tracking data while preserving
1471
- the cumulative total count. The cumulative total should never decrease.
1472
-
1473
- Args:
1474
- max_age_seconds: Maximum age in seconds before clearing current frame tracks
1475
-
1476
- Returns:
1477
- Number of current frame tracks cleared
1478
- """
1479
- current_time = time.time()
1480
- if current_time - self._last_update_time > max_age_seconds:
1481
- # Use the safe method that preserves cumulative totals
1482
- cleared_count = self.clear_current_frame_tracking()
1483
- self.logger.info(f"Manual cleanup: cleared {cleared_count} expired current frame tracks (age > {max_age_seconds}s)")
1484
- return cleared_count
1485
- return 0
1486
-
1487
- def _update_zone_tracking(self, zone_analysis: Dict[str, Dict[str, int]], detections: List[Dict], config: HumanActivityConfig) -> Dict[str, Dict[str, Any]]:
1488
- """
1489
- Update zone tracking with current frame data.
1490
-
1491
- Args:
1492
- zone_analysis: Current zone analysis results
1493
- detections: List of detections with track IDs
1494
- config: Human activity configuration with zone polygons
1495
-
1496
- Returns:
1497
- Enhanced zone analysis with tracking information
1498
- """
1499
- if not zone_analysis or not config.zone_config or not config.zone_config.zones:
1500
- return {}
1501
-
1502
- enhanced_zone_analysis = {}
1503
- zones = config.zone_config.zones
1504
-
1505
- # Get current frame track IDs in each zone
1506
- current_frame_zone_tracks = {}
1507
-
1508
- # Initialize zone tracking for all zones
1509
- for zone_name in zones.keys():
1510
- current_frame_zone_tracks[zone_name] = set()
1511
- if zone_name not in self._zone_current_track_ids:
1512
- self._zone_current_track_ids[zone_name] = set()
1513
- if zone_name not in self._zone_total_track_ids:
1514
- self._zone_total_track_ids[zone_name] = set()
1515
-
1516
- # Check each detection against each zone
1517
- for detection in detections:
1518
- track_id = detection.get("track_id")
1519
- if track_id is None:
1520
- continue
1521
-
1522
- # Get detection bbox
1523
- bbox = detection.get("bounding_box", detection.get("bbox"))
1524
- if not bbox:
1525
- continue
1526
-
1527
- # Get detection center point
1528
- center_point = get_bbox_bottom25_center(bbox) #get_bbox_center(bbox)
1529
-
1530
- # Check which zone this detection is in using actual zone polygons
1531
- for zone_name, zone_polygon in zones.items():
1532
- # Convert polygon points to tuples for point_in_polygon function
1533
- # zone_polygon format: [[x1, y1], [x2, y2], [x3, y3], ...]
1534
- polygon_points = [(point[0], point[1]) for point in zone_polygon]
1535
-
1536
- # Check if detection center is inside the zone polygon using ray casting algorithm
1537
- if point_in_polygon(center_point, polygon_points):
1538
- current_frame_zone_tracks[zone_name].add(track_id)
1539
-
1540
- # Update zone tracking for each zone
1541
- for zone_name, zone_counts in zone_analysis.items():
1542
- # Get current frame tracks for this zone
1543
- current_tracks = current_frame_zone_tracks.get(zone_name, set())
1544
-
1545
- # Update current zone tracks
1546
- self._zone_current_track_ids[zone_name] = current_tracks
1547
-
1548
- # Update total zone tracks (accumulate all track IDs that have been in this zone)
1549
- self._zone_total_track_ids[zone_name].update(current_tracks)
1550
-
1551
- # Update counts
1552
- self._zone_current_counts[zone_name] = len(current_tracks)
1553
- self._zone_total_counts[zone_name] = len(self._zone_total_track_ids[zone_name])
1554
-
1555
- # Create enhanced zone analysis
1556
- enhanced_zone_analysis[zone_name] = {
1557
- "current_count": self._zone_current_counts[zone_name],
1558
- "total_count": self._zone_total_counts[zone_name],
1559
- "current_track_ids": list(current_tracks),
1560
- "total_track_ids": list(self._zone_total_track_ids[zone_name]),
1561
- "original_counts": zone_counts # Preserve original zone counts
1562
- }
1563
-
1564
- return enhanced_zone_analysis
1565
-
1566
- def get_zone_tracking_info(self) -> Dict[str, Dict[str, Any]]:
1567
- """Get detailed zone tracking information."""
1568
- return {
1569
- zone_name: {
1570
- "current_count": self._zone_current_counts.get(zone_name, 0),
1571
- "total_count": self._zone_total_counts.get(zone_name, 0),
1572
- "current_track_ids": list(self._zone_current_track_ids.get(zone_name, set())),
1573
- "total_track_ids": list(self._zone_total_track_ids.get(zone_name, set()))
1574
- }
1575
- for zone_name in set(self._zone_current_counts.keys()) | set(self._zone_total_counts.keys())
1576
- }
1577
-
1578
- def get_zone_current_count(self, zone_name: str) -> int:
1579
- """Get current count of activity in a specific zone."""
1580
- return self._zone_current_counts.get(zone_name, 0)
1581
-
1582
- def get_zone_total_count(self, zone_name: str) -> int:
1583
- """Get total count of activity who have been in a specific zone."""
1584
- return self._zone_total_counts.get(zone_name, 0)
1585
-
1586
- def get_all_zone_counts(self) -> Dict[str, Dict[str, int]]:
1587
- """Get current and total counts for all zones."""
1588
- return {
1589
- zone_name: {
1590
- "current": self._zone_current_counts.get(zone_name, 0),
1591
- "total": self._zone_total_counts.get(zone_name, 0)
1592
- }
1593
- for zone_name in set(self._zone_current_counts.keys()) | set(self._zone_total_counts.keys())
1594
- }
1595
-
1596
- def _format_timestamp_for_stream(self, timestamp: float) -> str:
1597
- """Format timestamp for streams (YYYY:MM:DD HH:MM:SS format)."""
1598
- dt = datetime.fromtimestamp(float(timestamp), tz=timezone.utc)
1599
- return dt.strftime('%Y:%m:%d %H:%M:%S')
1600
-
1601
- def _format_timestamp_for_video(self, timestamp: float) -> str:
1602
- """Format timestamp for video chunks (HH:MM:SS.ms format)."""
1603
- hours = int(timestamp // 3600)
1604
- minutes = int((timestamp % 3600) // 60)
1605
- seconds = round(float(timestamp % 60),2)
1606
- return f"{hours:02d}:{minutes:02d}:{seconds:.1f}"
1607
-
1608
- def _get_current_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False, frame_id: str=None) -> str:
1609
- """Get formatted current timestamp based on stream type."""
1610
- if not stream_info:
1611
- return "00:00:00.00"
1612
- # is_video_chunk = stream_info.get("input_settings", {}).get("is_video_chunk", False)
1613
- if precision:
1614
- if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
1615
- if frame_id:
1616
- start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
1617
- else:
1618
- start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
1619
- stream_time_str = self._format_timestamp_for_video(start_time)
1620
- return stream_time_str
1621
- else:
1622
- return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
1623
-
1624
- if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
1625
- if frame_id:
1626
- start_time = int(frame_id)/stream_info.get("input_settings", {}).get("original_fps", 30)
1627
- else:
1628
- start_time = stream_info.get("input_settings", {}).get("start_frame", 30)/stream_info.get("input_settings", {}).get("original_fps", 30)
1629
- stream_time_str = self._format_timestamp_for_video(start_time)
1630
- return stream_time_str
1631
- else:
1632
- # For streams, use stream_time from stream_info
1633
- stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
1634
- if stream_time_str:
1635
- # Parse the high precision timestamp string to get timestamp
1636
- try:
1637
- # Remove " UTC" suffix and parse
1638
- timestamp_str = stream_time_str.replace(" UTC", "")
1639
- dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
1640
- timestamp = dt.replace(tzinfo=timezone.utc).timestamp()
1641
- return self._format_timestamp_for_stream(timestamp)
1642
- except:
1643
- # Fallback to current time if parsing fails
1644
- return self._format_timestamp_for_stream(time.time())
1645
- else:
1646
- return self._format_timestamp_for_stream(time.time())
1647
-
1648
- def _get_start_timestamp_str(self, stream_info: Optional[Dict[str, Any]], precision=False) -> str:
1649
- """Get formatted start timestamp for 'TOTAL SINCE' based on stream type."""
1650
- if not stream_info:
1651
- return "00:00:00"
1652
-
1653
- if precision:
1654
- if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
1655
- return "00:00:00"
1656
- else:
1657
- return datetime.now(timezone.utc).strftime("%Y-%m-%d-%H:%M:%S.%f UTC")
1658
-
1659
-
1660
- if stream_info.get("input_settings", {}).get("start_frame", "na") != "na":
1661
- # If video format, start from 00:00:00
1662
- return "00:00:00"
1663
- else:
1664
- # For streams, use tracking start time or current time with minutes/seconds reset
1665
- if self._tracking_start_time is None:
1666
- # Try to extract timestamp from stream_time string
1667
- stream_time_str = stream_info.get("input_settings", {}).get("stream_info", {}).get("stream_time", "")
1668
- if stream_time_str:
1669
- try:
1670
- # Remove " UTC" suffix and parse
1671
- timestamp_str = stream_time_str.replace(" UTC", "")
1672
- dt = datetime.strptime(timestamp_str, "%Y-%m-%d-%H:%M:%S.%f")
1673
- self._tracking_start_time = dt.replace(tzinfo=timezone.utc).timestamp()
1674
- except:
1675
- # Fallback to current time if parsing fails
1676
- self._tracking_start_time = time.time()
1677
- else:
1678
- self._tracking_start_time = time.time()
1679
-
1680
- dt = datetime.fromtimestamp(self._tracking_start_time, tz=timezone.utc)
1681
- # Reset minutes and seconds to 00:00 for "TOTAL SINCE" format
1682
- dt = dt.replace(minute=0, second=0, microsecond=0)
1683
- return dt.strftime('%Y:%m:%d %H:%M:%S')
1684
-
1685
- def _extract_frame_id_from_tracking(self, frame_detections: List[Dict], frame_key: str) -> str:
1686
- """Extract frame ID from tracking data."""
1687
- # Priority 1: Check if detections have frame information
1688
- if frame_detections and len(frame_detections) > 0:
1689
- first_detection = frame_detections[0]
1690
- if "frame" in first_detection:
1691
- return str(first_detection["frame"])
1692
- elif "frame_id" in first_detection:
1693
- return str(first_detection["frame_id"])
1694
- # Priority 2: Use frame_key from input data
1695
- return str(frame_key)
1696
-
1697
- def _robust_zone_total(self, zone_count):
1698
- """Helper method to robustly calculate zone total."""
1699
- if isinstance(zone_count, dict):
1700
- total = 0
1701
- for v in zone_count.values():
1702
- if isinstance(v, int):
1703
- total += v
1704
- elif isinstance(v, list):
1705
- total += len(v)
1706
- return total
1707
- elif isinstance(zone_count, list):
1708
- return len(zone_count)
1709
- elif isinstance(zone_count, int):
1710
- return zone_count
1711
- else:
1712
- return 0
1713
-
1714
- # --------------------------------------------------------------------- #
1715
- # Private helpers for canonical track aliasing #
1716
- # --------------------------------------------------------------------- #
1717
-
1718
- def _compute_iou(self, box1: Any, box2: Any) -> float:
1719
- """Compute IoU between two bounding boxes that may be either list or dict.
1720
- Falls back to geometry_utils.calculate_iou when both boxes are dicts.
1721
- """
1722
- # Handle dict format directly with calculate_iou (supports many keys)
1723
- if isinstance(box1, dict) and isinstance(box2, dict):
1724
- return calculate_iou(box1, box2)
1725
-
1726
- # Helper to convert bbox (dict or list) to a list [x1,y1,x2,y2]
1727
- def _bbox_to_list(bbox):
1728
- if bbox is None:
1729
- return []
1730
- if isinstance(bbox, list):
1731
- return bbox[:4] if len(bbox) >= 4 else []
1732
- if isinstance(bbox, dict):
1733
- if "xmin" in bbox:
1734
- return [bbox["xmin"], bbox["ymin"], bbox["xmax"], bbox["ymax"]]
1735
- if "x1" in bbox:
1736
- return [bbox["x1"], bbox["y1"], bbox["x2"], bbox["y2"]]
1737
- # Fallback: take first four values in insertion order
1738
- values = list(bbox.values())
1739
- return values[:4] if len(values) >= 4 else []
1740
- # Unsupported type
1741
- return []
1742
-
1743
- list1 = _bbox_to_list(box1)
1744
- list2 = _bbox_to_list(box2)
1745
-
1746
- if len(list1) < 4 or len(list2) < 4:
1747
- return 0.0
1748
-
1749
- x1_min, y1_min, x1_max, y1_max = list1
1750
- x2_min, y2_min, x2_max, y2_max = list2
1751
-
1752
- # Ensure correct ordering of coordinates
1753
- x1_min, x1_max = min(x1_min, x1_max), max(x1_min, x1_max)
1754
- y1_min, y1_max = min(y1_min, y1_max), max(y1_min, y1_max)
1755
- x2_min, x2_max = min(x2_min, x2_max), max(x2_min, x2_max)
1756
- y2_min, y2_max = min(y2_min, y2_max), max(y2_min, y2_max)
1757
-
1758
- inter_x_min = max(x1_min, x2_min)
1759
- inter_y_min = max(y1_min, y2_min)
1760
- inter_x_max = min(x1_max, x2_max)
1761
- inter_y_max = min(y1_max, y2_max)
1762
-
1763
- inter_w = max(0.0, inter_x_max - inter_x_min)
1764
- inter_h = max(0.0, inter_y_max - inter_y_min)
1765
- inter_area = inter_w * inter_h
1766
-
1767
- area1 = (x1_max - x1_min) * (y1_max - y1_min)
1768
- area2 = (x2_max - x2_min) * (y2_max - y2_min)
1769
- union_area = area1 + area2 - inter_area
1770
-
1771
- return (inter_area / union_area) if union_area > 0 else 0.0
1772
-
1773
- def _get_canonical_id(self, raw_id: Any) -> Any:
1774
- """Return the canonical ID for a raw tracker-generated ID."""
1775
- return self._track_aliases.get(raw_id, raw_id)
1776
-
1777
- def _merge_or_register_track(self, raw_id: Any, bbox: List[float]) -> Any:
1778
- """Merge the raw track into an existing canonical track if possible,
1779
- otherwise register it as a new canonical track. Returns the canonical
1780
- ID to use for counting.
1781
- """
1782
- now = time.time()
1783
-
1784
- # Fast path: raw_id already mapped
1785
- if raw_id in self._track_aliases:
1786
- canonical_id = self._track_aliases[raw_id]
1787
- track_info = self._canonical_tracks.get(canonical_id)
1788
- if track_info is not None:
1789
- track_info["last_bbox"] = bbox
1790
- track_info["last_update"] = now
1791
- track_info["raw_ids"].add(raw_id)
1792
- return canonical_id
1793
-
1794
- # Attempt to merge with an existing canonical track
1795
- for canonical_id, info in self._canonical_tracks.items():
1796
- # Only consider recently updated tracks to avoid stale matches
1797
- if now - info["last_update"] > self._track_merge_time_window:
1798
- continue
1799
-
1800
- iou = self._compute_iou(bbox, info["last_bbox"])
1801
- if iou >= self._track_merge_iou_threshold:
1802
- # Merge raw_id into canonical track
1803
- self._track_aliases[raw_id] = canonical_id
1804
- info["last_bbox"] = bbox
1805
- info["last_update"] = now
1806
- info["raw_ids"].add(raw_id)
1807
- self.logger.debug(
1808
- f"Merged raw track {raw_id} into canonical track {canonical_id} (IoU={iou:.2f})")
1809
- return canonical_id
1810
-
1811
- # No match found – create a new canonical track
1812
- canonical_id = raw_id
1813
- self._track_aliases[raw_id] = canonical_id
1814
- self._canonical_tracks[canonical_id] = {
1815
- "last_bbox": bbox,
1816
- "last_update": now,
1817
- "raw_ids": {raw_id},
1818
- }
1819
- self.logger.debug(f"Registered new canonical track {canonical_id}")
1820
- return canonical_id
1821
-
1822
- def _format_timestamp(self, timestamp: float) -> str:
1823
- """Format a timestamp for human-readable output."""
1824
- return datetime.fromtimestamp(timestamp, timezone.utc).strftime('%Y-%m-%d %H:%M:%S UTC')
1825
-
1826
- def _get_tracking_start_time(self) -> str:
1827
- """Get the tracking start time, formatted as a string."""
1828
- if self._tracking_start_time is None:
1829
- return "N/A"
1830
- return self._format_timestamp(self._tracking_start_time)
1831
-
1832
- def _set_tracking_start_time(self) -> None:
1833
- """Set the tracking start time to the current time."""
1834
- self._tracking_start_time = time.time()
1835
-
1836
- def get_config_schema(self) -> Dict[str, Any]:
1837
- """Get configuration schema for activity counting."""
1838
- return {
1839
- "type": "object",
1840
- "properties": {
1841
- "confidence_threshold": {
1842
- "type": "number",
1843
- "minimum": 0.0,
1844
- "maximum": 1.0,
1845
- "default": 0.5,
1846
- "description": "Minimum confidence threshold for detections"
1847
- },
1848
- "enable_tracking": {
1849
- "type": "boolean",
1850
- "default": False,
1851
- "description": "Enable tracking for unique counting"
1852
- },
1853
- "zone_config": {
1854
- "type": "object",
1855
- "properties": {
1856
- "zones": {
1857
- "type": "object",
1858
- "additionalProperties": {
1859
- "type": "array",
1860
- "items": {
1861
- "type": "array",
1862
- "items": {"type": "number"},
1863
- "minItems": 2,
1864
- "maxItems": 2
1865
- },
1866
- "minItems": 3
1867
- },
1868
- "description": "Zone definitions as polygons"
1869
- },
1870
- "zone_confidence_thresholds": {
1871
- "type": "object",
1872
- "additionalProperties": {"type": "number", "minimum": 0.0, "maximum": 1.0},
1873
- "description": "Per-zone confidence thresholds"
1874
- }
1875
- }
1876
- },
1877
- "activity_categories": {
1878
- "type": "array",
1879
- "items": {"type": "string"},
1880
- "default": ["barbequing", "bartending", "breading or flooring", "celebrating",
1881
- "clapping", "cleaning floor", "cleaning gutters", "cleaning toilet",
1882
- "cleaning windows", "climbing ladder", "cooking chicken", "cooking egg",
1883
- "cooking sausages", "counting money", "cutting pineapple", "cutting watermelon",
1884
- "dining", "drinking", "drinking beer", "drinking shots", "eating burger", "eating cake",
1885
- "eating carrots", "eating chips", "eating doughnuts", "eating hotdog", "eating ice cream",
1886
- "eating spaghetti", "eating watermelon", "flipping pancake", "frying vegetables", "garbage collecting",
1887
- "making a cake", "making a sandwich", "making pizza making sushi", "making tea",
1888
- "mopping floor", "moving furniture", "peeling apples", "peeling potatos", "picking fruit",
1889
- "reading book", "reading newspaper", "setting table", "shaking hands", "smoking", "smoking hookah",
1890
- "sweeping floor", "tasting beer", "tasting food", "tossing salad", "washing hands",
1891
- "washing dishes"],
1892
- "description": "Category names that represent activity"
1893
- },
1894
- "enable_unique_counting": {
1895
- "type": "boolean",
1896
- "default": True,
1897
- "description": "Enable unique human activity using tracking"
1898
- },
1899
- "time_window_minutes": {
1900
- "type": "integer",
1901
- "minimum": 1,
1902
- "default": 60,
1903
- "description": "Time window for counting analysis in minutes"
1904
- },
1905
- "alert_config": {
1906
- "type": "object",
1907
- "properties": {
1908
- "count_thresholds": {
1909
- "type": "object",
1910
- "additionalProperties": {"type": "integer", "minimum": 1},
1911
- "description": "Count thresholds for alerts"
1912
- },
1913
- "occupancy_thresholds": {
1914
- "type": "object",
1915
- "additionalProperties": {"type": "integer", "minimum": 1},
1916
- "description": "Zone occupancy thresholds for alerts"
1917
- },
1918
- "alert_type": {
1919
- "type": "array",
1920
- "items": {"type": "string"},
1921
- "default": ["Default"],
1922
- "description": "To pass the type of alert. EG: email, sms, etc."
1923
- },
1924
- "alert_value": {
1925
- "type": "array",
1926
- "items": {"type": "string"},
1927
- "default": ["JSON"],
1928
- "description": "Alert value to pass the value based on type. EG: email id if type is email."
1929
- },
1930
- "alert_incident_category": {
1931
- "type": "array",
1932
- "items": {"type": "string"},
1933
- "default": ["Incident Detection Alert"],
1934
- "description": "Group and name the Alert category Type"
1935
- },
1936
- }
1937
- }
1938
- },
1939
- "required": ["confidence_threshold"],
1940
- "additionalProperties": False
1941
- }
1942
-
1943
- def create_default_config(self, **overrides) -> HumanActivityConfig:
1944
- """Create default configuration with optional overrides."""
1945
- defaults = {
1946
- "category": self.category,
1947
- "usecase": self.name,
1948
- "confidence_threshold": 0.5,
1949
- "enable_tracking": False,
1950
- "enable_analytics": True,
1951
- "enable_unique_counting": True,
1952
- "time_window_minutes": 60,
1953
- "activity_categories": ["barbequing", "bartending", "breading or flooring", "celebrating",
1954
- "clapping", "cleaning floor", "cleaning gutters", "cleaning toilet",
1955
- "cleaning windows", "climbing ladder", "cooking chicken", "cooking egg",
1956
- "cooking sausages", "counting money", "cutting pineapple", "cutting watermelon",
1957
- "dining", "drinking", "drinking beer", "drinking shots", "eating burger", "eating cake",
1958
- "eating carrots", "eating chips", "eating doughnuts", "eating hotdog", "eating ice cream",
1959
- "eating spaghetti", "eating watermelon", "flipping pancake", "frying vegetables", "garbage collecting",
1960
- "making a cake", "making a sandwich", "making pizza making sushi", "making tea",
1961
- "mopping floor", "moving furniture", "peeling apples", "peeling potatos", "picking fruit",
1962
- "reading book", "reading newspaper", "setting table", "shaking hands", "smoking", "smoking hookah",
1963
- "sweeping floor", "tasting beer", "tasting food", "tossing salad", "washing hands",
1964
- "washing dishes"],
1965
- }
1966
- defaults.update(overrides)
1967
- return HumanActivityConfig(**defaults)
1968
-
1969
- def _apply_smoothing(self, data: Any, config: HumanActivityConfig) -> Any:
1970
- """Apply smoothing to tracking data if enabled."""
1971
- if self.smoothing_tracker is None:
1972
- smoothing_config = BBoxSmoothingConfig(
1973
- smoothing_algorithm=config.smoothing_algorithm,
1974
- window_size=config.smoothing_window_size,
1975
- cooldown_frames=config.smoothing_cooldown_frames,
1976
- confidence_threshold=config.confidence_threshold or 0.5,
1977
- confidence_range_factor=config.smoothing_confidence_range_factor,
1978
- enable_smoothing=True
1979
- )
1980
- self.smoothing_tracker = BBoxSmoothingTracker(smoothing_config)
1981
-
1982
- smoothed_data = bbox_smoothing(data, self.smoothing_tracker.config, self.smoothing_tracker)
1983
- self.logger.debug(f"Applied bbox smoothing to tracking results")
1984
- return smoothed_data