matrice 1.0.99273__py3-none-any.whl → 1.0.99275__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,435 @@
1
+ import logging
2
+ import threading
3
+ import time
4
+ import base64
5
+ import json
6
+ from typing import Dict, Any, Optional, List, Tuple
7
+
8
+ from matrice.session import Session
9
+ from confluent_kafka import Producer
10
+
11
+ class AnalyticsSummarizer:
12
+ """
13
+ Buffers aggregated camera_results and emits 5-minute rollups per camera
14
+ focusing on tracking_stats per application.
15
+
16
+ Output structure example per camera:
17
+ {
18
+ "camera_name": "camera_1",
19
+ "inferencePipelineId": "pipeline-xyz",
20
+ "camera_group": "group_a",
21
+ "location": "Lobby",
22
+ "agg_apps": [
23
+ {
24
+ "application_name": "People Counting",
25
+ "application_key_name": "People_Counting",
26
+ "application_version": "1.3",
27
+ "tracking_stats": {
28
+ "input_timestamp": "00:00:09.9", # last seen
29
+ "reset_timestamp": "00:00:00", # earliest seen in window
30
+ "current_counts": [{"category": "person", "count": 4}], # last seen
31
+ "total_counts": [{"category": "person", "count": 37}] # max seen in window
32
+ }
33
+ }
34
+ ],
35
+ "summary_metadata": {
36
+ "window_seconds": 300,
37
+ "messages_aggregated": 123,
38
+ "start_time": 1710000000.0,
39
+ "end_time": 1710000300.0
40
+ }
41
+ }
42
+ """
43
+
44
+ def __init__(
45
+ self,
46
+ session: Session,
47
+ inference_pipeline_id: str,
48
+ flush_interval_seconds: int = 300,
49
+ ) -> None:
50
+ self.session = session
51
+ self.inference_pipeline_id = inference_pipeline_id
52
+ self.flush_interval_seconds = flush_interval_seconds
53
+
54
+ self.kafka_producer = self._setup_kafka_producer()
55
+
56
+ # Threading
57
+ self._stop = threading.Event()
58
+ self._thread: Optional[threading.Thread] = None
59
+ self._is_running = False
60
+ self._lock = threading.RLock()
61
+
62
+ # Ingestion queue
63
+ self._ingest_queue: List[Dict[str, Any]] = []
64
+
65
+ # Aggregation buffers keyed by (camera_group, camera_name)
66
+ # Each value holds:
67
+ # {
68
+ # "window_start": float,
69
+ # "last_seen": float,
70
+ # "camera_info": dict,
71
+ # "messages": int,
72
+ # "apps": {
73
+ # application_key_name: {
74
+ # "meta": {name, key_name, version},
75
+ # "last_input_timestamp": str,
76
+ # "earliest_reset_timestamp": str | None,
77
+ # "current_counts": {category: last_value},
78
+ # "total_counts": {category: max_value}
79
+ # }
80
+ # }
81
+ # }
82
+ #
83
+ self._buffers: Dict[Tuple[str, str], Dict[str, Any]] = {}
84
+
85
+ # Stats
86
+ self.stats = {
87
+ "start_time": None,
88
+ "summaries_published": 0,
89
+ "messages_ingested": 0,
90
+ "errors": 0,
91
+ "last_error": None,
92
+ "last_error_time": None,
93
+ "last_flush_time": None,
94
+ }
95
+
96
+ def _setup_kafka_producer(self):
97
+ path = "/v1/actions/get_kafka_info"
98
+
99
+ response = self.session.get(path=path, raise_exception=True)
100
+
101
+ if not response or not response.get("success"):
102
+ raise ValueError(f"Failed to fetch Kafka config: {response.get('message', 'No response')}")
103
+
104
+ # Decode base64 fields
105
+ encoded_ip = response["data"]["ip"]
106
+ encoded_port = response["data"]["port"]
107
+ ip = base64.b64decode(encoded_ip).decode("utf-8")
108
+ port = base64.b64decode(encoded_port).decode("utf-8")
109
+ bootstrap_servers = f"{ip}:{port}"
110
+
111
+
112
+ # Kafka handler for summaries (reuse pipeline server topic)
113
+ kafka_producer = Producer({
114
+ "bootstrap.servers": bootstrap_servers,
115
+ "acks": "all",
116
+ "retries": 3,
117
+ "retry.backoff.ms": 1000,
118
+ "request.timeout.ms": 30000,
119
+ "max.in.flight.requests.per.connection": 1,
120
+ "linger.ms": 10,
121
+ "batch.size": 4096,
122
+ "queue.buffering.max.ms": 50,
123
+ "log_level": 0,
124
+ })
125
+ return kafka_producer
126
+
127
+ def start(self) -> bool:
128
+ if self._is_running:
129
+ logging.warning("Analytics summarizer already running")
130
+ return True
131
+ try:
132
+ self._stop.clear()
133
+ self._is_running = True
134
+ self.stats["start_time"] = time.time()
135
+ self.stats["last_flush_time"] = time.time()
136
+ self._thread = threading.Thread(
137
+ target=self._run, name=f"AnalyticsSummarizer-{self.inference_pipeline_id}", daemon=True
138
+ )
139
+ self._thread.start()
140
+ logging.info("Analytics summarizer started")
141
+ return True
142
+ except Exception as exc:
143
+ self._record_error(f"Failed to start analytics summarizer: {exc}")
144
+ self.stop()
145
+ return False
146
+
147
+ def stop(self) -> None:
148
+ if not self._is_running:
149
+ logging.info("Analytics summarizer not running")
150
+ return
151
+ logging.info("Stopping analytics summarizer...")
152
+ self._is_running = False
153
+ self._stop.set()
154
+ try:
155
+ if self._thread and self._thread.is_alive():
156
+ self._thread.join(timeout=5.0)
157
+ except Exception as exc:
158
+ logging.error(f"Error joining analytics summarizer thread: {exc}")
159
+ self._thread = None
160
+ logging.info("Analytics summarizer stopped")
161
+
162
+ def ingest_result(self, aggregated_result: Dict[str, Any]) -> None:
163
+ """
164
+ Receive a single aggregated camera_results payload for buffering.
165
+ This is intended to be called by the publisher after successful publish.
166
+ """
167
+ try:
168
+ with self._lock:
169
+ self._ingest_queue.append(aggregated_result)
170
+ self.stats["messages_ingested"] += 1
171
+ except Exception as exc:
172
+ self._record_error(f"Failed to ingest result: {exc}")
173
+
174
+ def _run(self) -> None:
175
+ logging.info("Analytics summarizer worker started")
176
+ while not self._stop.is_set():
177
+ try:
178
+ # Drain ingestion queue
179
+ self._drain_ingest_queue()
180
+
181
+ # Time-based flush
182
+ current_time = time.time()
183
+ last_flush = self.stats.get("last_flush_time") or current_time
184
+ if current_time - last_flush >= self.flush_interval_seconds:
185
+ self._flush_all(current_time)
186
+ self.stats["last_flush_time"] = current_time
187
+
188
+ # Prevent busy loop
189
+ time.sleep(0.5)
190
+
191
+ except Exception as exc:
192
+ if not self._stop.is_set():
193
+ self._record_error(f"Error in summarizer loop: {exc}")
194
+ time.sleep(0.2)
195
+ # Final flush on stop
196
+ try:
197
+ self._flush_all(time.time())
198
+ except Exception as exc:
199
+ logging.error(f"Error during final analytics flush: {exc}")
200
+ logging.info("Analytics summarizer worker stopped")
201
+
202
+ def _drain_ingest_queue(self) -> None:
203
+ local_batch: List[Dict[str, Any]] = []
204
+ with self._lock:
205
+ if self._ingest_queue:
206
+ local_batch = self._ingest_queue
207
+ self._ingest_queue = []
208
+
209
+ for result in local_batch:
210
+ try:
211
+ self._add_to_buffers(result)
212
+ except Exception as exc:
213
+ self._record_error(f"Failed buffering result: {exc}")
214
+
215
+ def _add_to_buffers(self, result: Dict[str, Any]) -> None:
216
+ camera_info = result.get("camera_info", {}) or {}
217
+ camera_name = camera_info.get("camera_name") or "unknown"
218
+ camera_group = camera_info.get("camera_group") or "default_group"
219
+ location = camera_info.get("location")
220
+
221
+ key = (camera_group, camera_name)
222
+ now = time.time()
223
+ buffer = self._buffers.get(key)
224
+ if not buffer:
225
+ buffer = {
226
+ "window_start": now,
227
+ "last_seen": now,
228
+ "camera_info": {
229
+ "camera_name": camera_name,
230
+ "camera_group": camera_group,
231
+ "location": location,
232
+ },
233
+ "messages": 0,
234
+ "apps": {},
235
+ }
236
+ self._buffers[key] = buffer
237
+ else:
238
+ buffer["last_seen"] = now
239
+ # Update location if provided
240
+ if location:
241
+ buffer["camera_info"]["location"] = location
242
+
243
+ buffer["messages"] += 1
244
+
245
+ # Process each app
246
+ agg_apps = result.get("agg_apps", []) or []
247
+ for app in agg_apps:
248
+ app_name = app.get("application_name") or app.get("app_name") or "unknown"
249
+ app_key = app.get("application_key_name") or app.get("application_key") or app_name
250
+ app_ver = app.get("application_version") or app.get("version") or ""
251
+
252
+ app_buf = buffer["apps"].get(app_key)
253
+ if not app_buf:
254
+ app_buf = {
255
+ "meta": {
256
+ "application_name": app_name,
257
+ "application_key_name": app_key,
258
+ "application_version": app_ver,
259
+ },
260
+ "last_input_timestamp": None,
261
+ "reset_timestamp": None,
262
+ "current_counts": {},
263
+ "total_counts": {},
264
+ }
265
+ buffer["apps"][app_key] = app_buf
266
+
267
+ # Extract tracking_stats from app
268
+ tracking_stats = self._extract_tracking_stats_from_app(app)
269
+ if not tracking_stats:
270
+ continue
271
+
272
+ input_ts = tracking_stats.get("input_timestamp")
273
+ reset_ts = tracking_stats.get("reset_timestamp")
274
+ current_counts = tracking_stats.get("current_counts") or []
275
+ total_counts = tracking_stats.get("total_counts") or []
276
+
277
+ if input_ts:
278
+ app_buf["last_input_timestamp"] = input_ts
279
+ if reset_ts is not None:
280
+ # Simplify: keep last seen reset timestamp only
281
+ app_buf["reset_timestamp"] = reset_ts
282
+
283
+ # Update current counts (take last observed)
284
+ for item in current_counts:
285
+ cat = item.get("category")
286
+ cnt = item.get("count")
287
+ if cat is not None and cnt is not None:
288
+ app_buf["current_counts"][cat] = cnt
289
+
290
+ # Update total counts (take max observed to avoid double-counting cumulative totals)
291
+ for item in total_counts:
292
+ cat = item.get("category")
293
+ cnt = item.get("count")
294
+ if cat is None or cnt is None:
295
+ continue
296
+ existing = app_buf["total_counts"].get(cat)
297
+ if existing is None or cnt > existing:
298
+ app_buf["total_counts"][cat] = cnt
299
+
300
+ def _extract_tracking_stats_from_app(self, app: Dict[str, Any]) -> Optional[Dict[str, Any]]:
301
+ # Prefer direct 'tracking_stats' if present
302
+ if isinstance(app.get("tracking_stats"), dict):
303
+ return app["tracking_stats"]
304
+
305
+ # Otherwise, try agg_summary structure: pick latest by key order
306
+ agg_summary = app.get("agg_summary")
307
+ if isinstance(agg_summary, dict) and agg_summary:
308
+ # Keys might be frame numbers as strings -> choose max numerically
309
+ try:
310
+ latest_key = max(agg_summary.keys(), key=lambda k: int(str(k)))
311
+ except Exception:
312
+ latest_key = sorted(agg_summary.keys())[-1]
313
+ entry = agg_summary.get(latest_key) or {}
314
+ ts = entry.get("tracking_stats")
315
+ if isinstance(ts, dict):
316
+ return ts
317
+ return None
318
+
319
+ def _flush_all(self, end_time: float) -> None:
320
+ # Build and publish summaries per camera
321
+ with self._lock:
322
+ items = list(self._buffers.items())
323
+ # Reset buffers after copying references
324
+ self._buffers = {}
325
+
326
+ for (camera_group, camera_name), buf in items:
327
+ try:
328
+ camera_info = buf.get("camera_info", {})
329
+ start_time = buf.get("window_start", end_time)
330
+ messages = buf.get("messages", 0)
331
+
332
+ agg_apps_output: List[Dict[str, Any]] = []
333
+ for app_key, app_buf in buf.get("apps", {}).items():
334
+ # Convert counts dicts to lists
335
+ current_list = [
336
+ {"category": cat, "count": cnt}
337
+ for cat, cnt in app_buf.get("current_counts", {}).items()
338
+ ]
339
+ total_list = [
340
+ {"category": cat, "count": cnt}
341
+ for cat, cnt in app_buf.get("total_counts", {}).items()
342
+ ]
343
+
344
+ agg_apps_output.append(
345
+ {
346
+ **app_buf["meta"],
347
+ "tracking_stats": {
348
+ "input_timestamp": app_buf.get("last_input_timestamp"),
349
+ "reset_timestamp": app_buf.get("reset_timestamp"),
350
+ "current_counts": current_list,
351
+ "total_counts": total_list,
352
+ },
353
+ }
354
+ )
355
+
356
+ summary_payload = {
357
+ "camera_name": camera_info.get("camera_name", camera_name),
358
+ "inferencePipelineId": self.inference_pipeline_id,
359
+ "camera_group": camera_info.get("camera_group", camera_group),
360
+ "location": camera_info.get("location"),
361
+ "agg_apps": agg_apps_output,
362
+ "summary_metadata": {
363
+ "window_seconds": self.flush_interval_seconds,
364
+ "messages_aggregated": messages,
365
+ },
366
+ }
367
+
368
+ # Publish via Kafka (JSON bytes)
369
+ self.kafka_producer.produce(
370
+ topic="Analytics-Inference-Pipeline",
371
+ key=str(camera_name).encode("utf-8"),
372
+ value=json.dumps(summary_payload, separators=(",", ":")).encode("utf-8"),
373
+ )
374
+ self.stats["summaries_published"] += 1
375
+ logging.debug(
376
+ f"Published 5-min summary for camera {camera_group}/{camera_name} with {len(agg_apps_output)} apps"
377
+ )
378
+ except Exception as exc:
379
+ self._record_error(f"Failed to publish summary for {camera_group}/{camera_name}: {exc}")
380
+ # Brief flush for delivery
381
+ try:
382
+ self.kafka_producer.poll(0)
383
+ self.kafka_producer.flush(5)
384
+ except Exception:
385
+ pass
386
+
387
+ def _record_error(self, error_message: str) -> None:
388
+ with self._lock:
389
+ self.stats["errors"] += 1
390
+ self.stats["last_error"] = error_message
391
+ self.stats["last_error_time"] = time.time()
392
+ logging.error(f"Analytics summarizer error: {error_message}")
393
+
394
+ def get_stats(self) -> Dict[str, Any]:
395
+ with self._lock:
396
+ stats = dict(self.stats)
397
+ if stats.get("start_time"):
398
+ stats["uptime_seconds"] = time.time() - stats["start_time"]
399
+ return stats
400
+
401
+ def get_health_status(self) -> Dict[str, Any]:
402
+ health = {
403
+ "status": "healthy",
404
+ "is_running": self._is_running,
405
+ "errors": self.stats["errors"],
406
+ "summaries_published": self.stats["summaries_published"],
407
+ "messages_ingested": self.stats["messages_ingested"],
408
+ }
409
+ if (
410
+ self.stats.get("last_error_time")
411
+ and (time.time() - self.stats["last_error_time"]) < 60
412
+ ):
413
+ health["status"] = "degraded"
414
+ health["reason"] = f"Recent error: {self.stats.get('last_error')}"
415
+ if not self._is_running:
416
+ health["status"] = "unhealthy"
417
+ health["reason"] = "Summarizer is not running"
418
+ return health
419
+
420
+ def cleanup(self) -> None:
421
+ try:
422
+ self.stop()
423
+ except Exception:
424
+ pass
425
+ with self._lock:
426
+ self._ingest_queue = []
427
+ self._buffers = {}
428
+ try:
429
+ if hasattr(self, "kafka_producer") and self.kafka_producer is not None:
430
+ self.kafka_producer.flush(5)
431
+ except Exception as exc:
432
+ logging.error(f"Error flushing analytics kafka producer: {exc}")
433
+ logging.info("Analytics summarizer cleanup completed")
434
+
435
+
@@ -7,6 +7,7 @@ from matrice.deploy.aggregator.ingestor import ResultsIngestor
7
7
  from matrice.deploy.aggregator.synchronizer import ResultsSynchronizer
8
8
  from matrice.deploy.aggregator.aggregator import ResultsAggregator
9
9
  from matrice.deploy.aggregator.publisher import ResultsPublisher
10
+ from matrice.deploy.aggregator.analytics import AnalyticsSummarizer
10
11
  from matrice.deployment.inference_pipeline import InferencePipeline
11
12
 
12
13
 
@@ -71,6 +72,7 @@ class ResultsAggregationPipeline:
71
72
  self.results_synchronizer = None
72
73
  self.results_aggregator = None
73
74
  self.results_publisher = None
75
+ self.analytics_summarizer = None
74
76
 
75
77
  # Initialize the final results queue
76
78
  self.final_results_queue = Queue()
@@ -178,12 +180,22 @@ class ResultsAggregationPipeline:
178
180
  )
179
181
  self.stats["component_status"]["aggregator"] = "initialized"
180
182
 
183
+ # Initialize analytics summarizer (5-minute window)
184
+ # logging.info("Initializing analytics summarizer...")
185
+ # self.analytics_summarizer = AnalyticsSummarizer(
186
+ # session=self.session,
187
+ # inference_pipeline_id=self.inference_pipeline_id,
188
+ # flush_interval_seconds=300,
189
+ # )
190
+ # self.stats["component_status"]["analytics_summarizer"] = "initialized"
191
+
181
192
  # Initialize the results publisher
182
193
  logging.info("Initializing results publisher...")
183
194
  self.results_publisher = ResultsPublisher(
184
195
  inference_pipeline_id=self.inference_pipeline_id,
185
196
  session=self.session,
186
- final_results_queue=self.results_aggregator.aggregated_results_queue
197
+ final_results_queue=self.results_aggregator.aggregated_results_queue,
198
+ analytics_summarizer=self.analytics_summarizer,
187
199
  )
188
200
  self.stats["component_status"]["publisher"] = "initialized"
189
201
 
@@ -238,6 +250,13 @@ class ResultsAggregationPipeline:
238
250
  return False
239
251
  self.stats["component_status"]["aggregator"] = "running"
240
252
 
253
+ # Start analytics summarizer
254
+ logging.info("Starting analytics summarizer...")
255
+ # if not self.analytics_summarizer.start(): # TODO: Uncomment this when analytics summarizer is ready
256
+ # self._record_error("Failed to start analytics summarizer")
257
+ # return False
258
+ self.stats["component_status"]["analytics_summarizer"] = "running"
259
+
241
260
  # Start results publishing
242
261
  logging.info("Starting results publishing...")
243
262
  if not self.results_publisher.start_streaming():
@@ -387,6 +406,12 @@ class ResultsAggregationPipeline:
387
406
  if agg_stats.get("errors", 0) > 0:
388
407
  logging.warning(f" └─ Aggregator Errors: {agg_stats['errors']} (last: {agg_stats.get('last_error', 'N/A')})")
389
408
 
409
+ if "analytics_summarizer" in components:
410
+ sum_stats = components["analytics_summarizer"]
411
+ logging.info(f" 🧮 Summaries Published: {sum_stats.get('summaries_published', 0)}")
412
+ if sum_stats.get("errors", 0) > 0:
413
+ logging.warning(f" └─ Summarizer Errors: {sum_stats['errors']} (last: {sum_stats.get('last_error', 'N/A')})")
414
+
390
415
  if "results_publisher" in components:
391
416
  pub_stats = components["results_publisher"]
392
417
  logging.info(f" 📤 Messages Published: {pub_stats.get('messages_produced', 0)}")
@@ -440,6 +465,14 @@ class ResultsAggregationPipeline:
440
465
  except Exception as exc:
441
466
  logging.error(f"Error stopping results publisher: {exc}")
442
467
 
468
+ if self.analytics_summarizer:
469
+ try:
470
+ logging.info("Stopping analytics summarizer...")
471
+ self.analytics_summarizer.stop()
472
+ self.stats["component_status"]["analytics_summarizer"] = "stopped"
473
+ except Exception as exc:
474
+ logging.error(f"Error stopping analytics summarizer: {exc}")
475
+
443
476
  if self.results_aggregator:
444
477
  try:
445
478
  logging.info("Stopping results aggregator...")
@@ -484,6 +517,9 @@ class ResultsAggregationPipeline:
484
517
  if self.results_aggregator:
485
518
  stats["components"]["results_aggregator"] = self.results_aggregator.get_stats()
486
519
 
520
+ if self.analytics_summarizer:
521
+ stats["components"]["analytics_summarizer"] = self.analytics_summarizer.get_stats()
522
+
487
523
  if self.results_publisher:
488
524
  stats["components"]["results_publisher"] = self.results_publisher.get_stats()
489
525
 
@@ -598,6 +634,21 @@ class ResultsAggregationPipeline:
598
634
  health["issues"].append("Results aggregator not initialized")
599
635
  logging.error("Results aggregator not initialized")
600
636
 
637
+ if self.analytics_summarizer:
638
+ sum_health = self.analytics_summarizer.get_health_status()
639
+ health["components"]["analytics_summarizer"] = sum_health
640
+ if sum_health.get("status") != "healthy":
641
+ issue_detail = f"Analytics summarizer is {sum_health.get('status', 'unknown')}"
642
+ if "reason" in sum_health:
643
+ issue_detail += f": {sum_health['reason']}"
644
+ if sum_health.get("errors", 0) > 0:
645
+ issue_detail += f" ({sum_health['errors']} errors)"
646
+ health["issues"].append(issue_detail)
647
+ logging.warning(f"Summarizer health issue: {issue_detail}")
648
+ else:
649
+ health["issues"].append("Analytics summarizer not initialized")
650
+ logging.error("Analytics summarizer not initialized")
651
+
601
652
  if self.results_publisher:
602
653
  pub_health = self.results_publisher.get_health_status()
603
654
  health["components"]["results_publisher"] = pub_health
@@ -738,6 +789,12 @@ class ResultsAggregationPipeline:
738
789
  self.results_aggregator.cleanup()
739
790
  except Exception as exc:
740
791
  logging.error(f"Error cleaning up aggregator: {exc}")
792
+
793
+ if self.analytics_summarizer:
794
+ try:
795
+ self.analytics_summarizer.cleanup()
796
+ except Exception as exc:
797
+ logging.error(f"Error cleaning up analytics summarizer: {exc}")
741
798
 
742
799
  if self.results_synchronizer:
743
800
  try:
@@ -15,7 +15,7 @@ class ResultsPublisher:
15
15
  """
16
16
 
17
17
  def __init__(
18
- self, inference_pipeline_id: str, session: Session, final_results_queue: Queue
18
+ self, inference_pipeline_id: str, session: Session, final_results_queue: Queue, analytics_summarizer: Optional[Any] = None
19
19
  ):
20
20
  """
21
21
  Initialize the final results streamer.
@@ -31,6 +31,8 @@ class ResultsPublisher:
31
31
  self.kafka_handler = MatriceKafkaDeployment(
32
32
  session, inference_pipeline_id, type="server"
33
33
  )
34
+ # Optional analytics summarizer hook
35
+ self.analytics_summarizer = analytics_summarizer
34
36
 
35
37
  # Threading and state management
36
38
  self._stop_streaming = threading.Event()
@@ -109,6 +111,12 @@ class ResultsPublisher:
109
111
  camera_info = aggregated_result.get('camera_info', {})
110
112
  stream_key = camera_info.get('camera_name', 'unknown')
111
113
  logging.debug(f"Successfully published camera_results for stream: {stream_key}")
114
+ # Forward to analytics summarizer after successful publish
115
+ try:
116
+ if self.analytics_summarizer is not None and hasattr(self.analytics_summarizer, 'ingest_result'):
117
+ self.analytics_summarizer.ingest_result(aggregated_result)
118
+ except Exception as exc_inner:
119
+ logging.warning(f"Failed to forward to analytics summarizer: {exc_inner}")
112
120
  except Exception as exc:
113
121
  self.stats["kafka_errors"] += 1
114
122
  self._record_error(f"Failed to produce aggregated result to Kafka: {str(exc)}")
@@ -319,7 +319,7 @@ class OutputConfig:
319
319
  class _RealTimeJsonEventPicker:
320
320
  """Stateful helper that replicates the original logic but works one frame at a time."""
321
321
 
322
- def __init__(self, consecutive_threshold: int = 7, end_threshold: int = 120):
322
+ def __init__(self, consecutive_threshold: int = 7, end_threshold: int = 130):
323
323
  # Required sequence of severities
324
324
  self._base_sequence: List[str] = ["low", "medium", "significant", "critical", "low"]
325
325
  self._sequence: deque[str] = deque(self._base_sequence)
@@ -52,8 +52,15 @@ class StreamWorker:
52
52
  if consumer_group_suffix:
53
53
  consumer_group_id += f"-{consumer_group_suffix}"
54
54
 
55
- custom_request_service_id = self.inference_pipeline_id if (self.inference_pipeline_id and self.inference_pipeline_id != "000000000000000000000000") else deployment_id
56
-
55
+ custom_request_service_id = (
56
+ self.inference_pipeline_id
57
+ if (
58
+ self.inference_pipeline_id
59
+ and self.inference_pipeline_id != "000000000000000000000000"
60
+ )
61
+ else deployment_id
62
+ )
63
+
57
64
  self.kafka_deployment = MatriceKafkaDeployment(
58
65
  session,
59
66
  deployment_id,
@@ -231,7 +238,7 @@ class StreamWorker:
231
238
  "original_fps": input_stream.get("original_fps",31),
232
239
  }
233
240
  }
234
-
241
+
235
242
  model_result, post_processing_result = await self.inference_interface.inference(
236
243
  input_content,
237
244
  apply_post_processing=True,
@@ -240,7 +247,7 @@ class StreamWorker:
240
247
  camera_info=camera_info,
241
248
  input_hash=input_hash
242
249
  )
243
-
250
+
244
251
  # Extract agg_summary from post-processing result
245
252
  agg_summary = {}
246
253
  if post_processing_result and isinstance(post_processing_result, dict):
@@ -255,7 +262,7 @@ class StreamWorker:
255
262
  "stream_time": self._get_high_precision_timestamp(),
256
263
  },
257
264
  }
258
-
265
+
259
266
  app_result = {
260
267
  "application_name": self.app_name,
261
268
  "application_key_name": self.app_name.replace(" ", "_").replace("-", "_"),
@@ -313,6 +313,8 @@ class FireSmokeUseCase(BaseProcessor):
313
313
  # Safely fetch the last recorded severity level as a **string** (empty if no history yet)
314
314
  last_level = self._ascending_alert_list[-1] if self._ascending_alert_list else "low"
315
315
  alert_id = self._get_alert_incident_ids(last_level)
316
+ if alert_id not in [1,2,3,4,5]:
317
+ alert_id = 4
316
318
 
317
319
  count_thresholds = {}
318
320
  if config.alert_config and hasattr(config.alert_config, "count_thresholds"):
@@ -443,6 +445,7 @@ class FireSmokeUseCase(BaseProcessor):
443
445
  elif intensity_pct >= 13:
444
446
  level = "significant"
445
447
  self._ascending_alert_list.append(level)
448
+ self.current_incident_end_timestamp='Incident still active'
446
449
  elif intensity_pct >= 3:
447
450
  level = "medium"
448
451
  self._ascending_alert_list.append(level)
@@ -457,6 +460,7 @@ class FireSmokeUseCase(BaseProcessor):
457
460
  elif intensity_pct > 12:
458
461
  level = "significant"
459
462
  intensity = 9.0
463
+ self.current_incident_end_timestamp='Incident still active'
460
464
  self._ascending_alert_list.append(level)
461
465
  elif intensity_pct > 2:
462
466
  level = "medium"
@@ -475,6 +479,8 @@ class FireSmokeUseCase(BaseProcessor):
475
479
  # Pass the last severity level **value** instead of a single-element list
476
480
  last_level = level if level else self._ascending_alert_list[-1]
477
481
  incident_id = self._get_alert_incident_ids(last_level)
482
+ if incident_id not in [1,2,3,4,5]:
483
+ incident_id = 4
478
484
 
479
485
  alert_settings=[]
480
486
  if config.alert_config and hasattr(config.alert_config, 'alert_type'):
@@ -632,16 +638,17 @@ class FireSmokeUseCase(BaseProcessor):
632
638
  last_ending_id = self._get_alert_incident_ids("")
633
639
  print('last_ending_id',last_ending_id)
634
640
  if last_ending_id==5:
635
- tracking_stats.append({
641
+ alerts={
636
642
  "alert_type": getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
637
- "alert_id": "alert_"+category+'_'+str(last_ending_id),
643
+ "alert_id": "alert_"+'Event_Ended'+'_'+str(last_ending_id),
638
644
  "incident_category": self.CASE_TYPE,
639
645
  "threshold_level": 0,
640
646
  "ascending": False,
641
647
  "settings": {t: v for t, v in zip(getattr(config.alert_config, 'alert_type', ['Default']) if hasattr(config.alert_config, 'alert_type') else ['Default'],
642
648
  getattr(config.alert_config, 'alert_value', ['JSON']) if hasattr(config.alert_config, 'alert_value') else ['JSON'])
643
649
  }
644
- })
650
+ }
651
+ tracking_stats.append(alerts)
645
652
  tracking_stats.append(self.create_incident(incident_id=self.CASE_TYPE+'_'+str(last_ending_id), incident_type=self.CASE_TYPE,
646
653
  severity_level='info', human_text='Event Over', camera_info=camera_info, alerts=alerts, alert_settings=alert_settings,
647
654
  start_time=start_timestamp, end_time='Incident still active',
@@ -1016,7 +1023,7 @@ class FireSmokeUseCase(BaseProcessor):
1016
1023
  else:
1017
1024
  if len(self.id_hit_list)==1:
1018
1025
  self.id_hit_counter+=1
1019
- if self.id_hit_counter>120:
1026
+ if self.id_hit_counter>130:
1020
1027
  self.id_hit_list = ["low","medium","significant","critical","low"]
1021
1028
  self.id_hit_counter = 0
1022
1029
  self.latest_stack = None
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: matrice
3
- Version: 1.0.99273
3
+ Version: 1.0.99275
4
4
  Summary: SDK for connecting to matrice.ai services
5
5
  Home-page: https://github.com/matrice-ai/python-sdk
6
6
  Author: Matrice.ai
@@ -94,9 +94,10 @@ matrice/data_processing/data_formats/yolo_detection.py,sha256=qUeZA7_8Of_QWGZlHh
94
94
  matrice/deploy/__init__.py,sha256=1IEknWS1AxZL4aGzCyASusGH_vMEPk1L6u8RnnZjI9w,1256
95
95
  matrice/deploy/aggregator/__init__.py,sha256=HFz-ufUMeSpSyRINcrx6NdmrcuVZtPfyIXxYu5UNLTc,508
96
96
  matrice/deploy/aggregator/aggregator.py,sha256=ob-I9ZaGmhXqdzbIHZL8DDtYCaT944ipmlhFzIz6peQ,11450
97
+ matrice/deploy/aggregator/analytics.py,sha256=kNfME7CEySJNOhsVH25DXKryj8bTdKnkroordJVauQM,16764
97
98
  matrice/deploy/aggregator/ingestor.py,sha256=-ySEg1UD2ahPXw7Ifr_4QC5xRGb6co8jGD6PgtvCK3w,17141
98
- matrice/deploy/aggregator/pipeline.py,sha256=xOKDqb4Qre5Ek3Fr-03C-jxpZNXdQ2_Dk5EaOn4ssYE,33948
99
- matrice/deploy/aggregator/publisher.py,sha256=SRPnyindD_R_QNK5MJ_WZAeAaGtj79CZ1VNoltsQtXM,15587
99
+ matrice/deploy/aggregator/pipeline.py,sha256=nqHoc-lGIpiJ4UWq6KfRqykE4QNfu-XxNUPvlmnVDIs,37049
100
+ matrice/deploy/aggregator/publisher.py,sha256=XeZG5XcnSn0Z----yH4vFKd_gohTd4g8Kx9TYwAaPOA,16196
100
101
  matrice/deploy/aggregator/synchronizer.py,sha256=nSoFxlt4Ol6zbg4yCHufcNsif_Spq3jjKTvB5WY73Gk,20455
101
102
  matrice/deploy/client/__init__.py,sha256=d7yxlWNHYKOoAM9T_AlrSQw1_9cfLEop4zBE0QqrTVw,1330
102
103
  matrice/deploy/client/client.py,sha256=5itmvupufK48lPNb2__ZbZ9q3Q5ycfCDTW6ClDC7cM4,28531
@@ -108,11 +109,11 @@ matrice/deploy/client/auto_streaming/auto_streaming.py,sha256=VRfy_EBFvhspN-hoN3
108
109
  matrice/deploy/client/auto_streaming/auto_streaming_utils.py,sha256=BuRUokLp3t43yzRF8YSX9p_RHQD94RoBwNEoldXFDQo,14995
109
110
  matrice/deploy/client/streaming_gateway/__init__.py,sha256=hkYC0qszaXZThquMuuI20Qkt_AHCB3pdy7jlJVeqPN4,1203
110
111
  matrice/deploy/client/streaming_gateway/streaming_gateway.py,sha256=r8Z5AXBom89n-8W5RTlB-nOeUaprxK-QBDsAb-E9gl8,19605
111
- matrice/deploy/client/streaming_gateway/streaming_gateway_utils.py,sha256=znNZkzG9W7VeZPKUxzgxHiW1GqvD-oA-McZSnMiBm1E,42144
112
+ matrice/deploy/client/streaming_gateway/streaming_gateway_utils.py,sha256=_fJl0ry2O8UkEwHpufNdJ68EAkWAAN7moGbBLJtPQeE,42144
112
113
  matrice/deploy/client/streaming_gateway/streaming_results_handler.py,sha256=OpHkdbnuqVN28tQm9CYwrfgAth3Qz40Uaq5Tg4bbxyo,15813
113
114
  matrice/deploy/server/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
114
115
  matrice/deploy/server/server.py,sha256=duki4KUU1tvW3Y7wkrlMRVvt7bAP2QqSIsrSogLxC4o,36799
115
- matrice/deploy/server/stream_worker.py,sha256=KDNlYep4Bg1xhHwDrwY9kfxsJfFtATydiYbzVDt58vs,20481
116
+ matrice/deploy/server/stream_worker.py,sha256=reRkWGBl7a1f7EduZs25Uzecs6mc5wr2adNhlXxSgK0,20530
116
117
  matrice/deploy/server/inference/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
117
118
  matrice/deploy/server/inference/batch_manager.py,sha256=7fCHJfSeBqfYgKGZH9_sgUDAllymkGkFm0Oin_-SAYI,8322
118
119
  matrice/deploy/server/inference/cache_manager.py,sha256=ebQ1Y3vGNW3--TCPnWijDK3ix5HuZ_zH7wjs8iHKkbU,1610
@@ -182,7 +183,7 @@ matrice/deploy/utils/post_processing/usecases/face_emotion.py,sha256=eRfqBdryB0u
182
183
  matrice/deploy/utils/post_processing/usecases/face_recognition.py,sha256=T5xAuv6b9OrkmTmoXgZs4LZ5XUsbvp9xCpeLBwdu7eI,40231
183
184
  matrice/deploy/utils/post_processing/usecases/fashion_detection.py,sha256=f9gpzMDhIW-gyn46k9jgf8nY7YeoqAnTxGOzksabFbE,40457
184
185
  matrice/deploy/utils/post_processing/usecases/field_mapping.py,sha256=JDwYX8pd2W-waDvBh98Y_o_uchJu7wEYbFxOliA4Iq4,39822
185
- matrice/deploy/utils/post_processing/usecases/fire_detection.py,sha256=iGsTciE54AnuGf8tDmgIIAXpuCyFTcU_qOTRPsxo2gA,48549
186
+ matrice/deploy/utils/post_processing/usecases/fire_detection.py,sha256=8GMabLpVFWCSHULdjO8V_fCPqz9hf9pyAu2n-vuLg7A,48936
186
187
  matrice/deploy/utils/post_processing/usecases/flare_analysis.py,sha256=-egmS3Hs_iGOLeCMfapbkfQ04EWtZx97QRuUcDa-jMU,45340
187
188
  matrice/deploy/utils/post_processing/usecases/flower_segmentation.py,sha256=4I7qMx9Ztxg_hy9KTVX-3qBhAN-QwDt_Yigf9fFjLus,52017
188
189
  matrice/deploy/utils/post_processing/usecases/gas_leak_detection.py,sha256=KL2ft7fXvjTas-65-QgcJm3W8KBsrwF44qibSXjfaLc,40557
@@ -243,8 +244,8 @@ matrice/deployment/camera_manager.py,sha256=e1Lc81RJP5wUWRdTgHO6tMWF9BkBdHOSVyx3
243
244
  matrice/deployment/deployment.py,sha256=HFt151eWq6iqIAMsQvurpV2WNxW6Cx_gIUVfnVy5SWE,48093
244
245
  matrice/deployment/inference_pipeline.py,sha256=6b4Mm3-qt-Zy0BeiJfFQdImOn3FzdNCY-7ET7Rp8PMk,37911
245
246
  matrice/deployment/streaming_gateway_manager.py,sha256=ifYGl3g25wyU39HwhPQyI2OgF3M6oIqKMWt8RXtMxY8,21401
246
- matrice-1.0.99273.dist-info/licenses/LICENSE.txt,sha256=2bm9uFabQZ3Ykb_SaSU_uUbAj2-htc6WJQmS_65qD00,1073
247
- matrice-1.0.99273.dist-info/METADATA,sha256=Zu1ooyc7gmPiF9mrhwDdcRn2wewobwm1aXdxTN9ZKnk,14624
248
- matrice-1.0.99273.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
249
- matrice-1.0.99273.dist-info/top_level.txt,sha256=P97js8ur6o5ClRqMH3Cjoab_NqbJ6sOQ3rJmVzKBvMc,8
250
- matrice-1.0.99273.dist-info/RECORD,,
247
+ matrice-1.0.99275.dist-info/licenses/LICENSE.txt,sha256=2bm9uFabQZ3Ykb_SaSU_uUbAj2-htc6WJQmS_65qD00,1073
248
+ matrice-1.0.99275.dist-info/METADATA,sha256=F8K1g936l74lHwzQ9iLCeOwcTHnSzcFxchP3cbU6qbI,14624
249
+ matrice-1.0.99275.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
250
+ matrice-1.0.99275.dist-info/top_level.txt,sha256=P97js8ur6o5ClRqMH3Cjoab_NqbJ6sOQ3rJmVzKBvMc,8
251
+ matrice-1.0.99275.dist-info/RECORD,,