matrice-analytics 0.1.70__py3-none-any.whl → 0.1.96__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. matrice_analytics/post_processing/__init__.py +8 -2
  2. matrice_analytics/post_processing/config.py +4 -2
  3. matrice_analytics/post_processing/core/base.py +1 -1
  4. matrice_analytics/post_processing/core/config.py +40 -3
  5. matrice_analytics/post_processing/face_reg/face_recognition.py +1014 -201
  6. matrice_analytics/post_processing/face_reg/face_recognition_client.py +171 -29
  7. matrice_analytics/post_processing/face_reg/people_activity_logging.py +19 -0
  8. matrice_analytics/post_processing/post_processor.py +4 -0
  9. matrice_analytics/post_processing/usecases/__init__.py +4 -1
  10. matrice_analytics/post_processing/usecases/advanced_customer_service.py +913 -500
  11. matrice_analytics/post_processing/usecases/color_detection.py +19 -18
  12. matrice_analytics/post_processing/usecases/customer_service.py +356 -9
  13. matrice_analytics/post_processing/usecases/fire_detection.py +241 -23
  14. matrice_analytics/post_processing/usecases/footfall.py +750 -0
  15. matrice_analytics/post_processing/usecases/license_plate_monitoring.py +638 -40
  16. matrice_analytics/post_processing/usecases/people_counting.py +66 -33
  17. matrice_analytics/post_processing/usecases/vehicle_monitoring.py +35 -34
  18. matrice_analytics/post_processing/usecases/weapon_detection.py +2 -1
  19. matrice_analytics/post_processing/utils/alert_instance_utils.py +1018 -0
  20. matrice_analytics/post_processing/utils/business_metrics_manager_utils.py +1338 -0
  21. matrice_analytics/post_processing/utils/incident_manager_utils.py +1754 -0
  22. {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/METADATA +1 -1
  23. {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/RECORD +26 -22
  24. {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/WHEEL +0 -0
  25. {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/licenses/LICENSE.txt +0 -0
  26. {matrice_analytics-0.1.70.dist-info → matrice_analytics-0.1.96.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1338 @@
1
+ """
2
+ business_metrics_manager_utils.py
3
+
4
+ Manages business metrics aggregation and publishing to Redis/Kafka.
5
+ Aggregates metrics for 5 minutes (300 seconds) and pushes to output topic.
6
+ Supports aggregation types: mean (default), min, max, sum.
7
+
8
+ PRODUCTION-READY VERSION
9
+ """
10
+
11
+ import json
12
+ import time
13
+ import threading
14
+ import logging
15
+ import os
16
+ import urllib.request
17
+ import base64
18
+ import re
19
+ from typing import Dict, List, Optional, Any, Union
20
+ from datetime import datetime, timezone
21
+ from dataclasses import dataclass, field
22
+ from pathlib import Path
23
+
24
+
25
+ # Default aggregation interval in seconds (5 minutes)
26
+ DEFAULT_AGGREGATION_INTERVAL = 300
27
+
28
+ # Supported aggregation types
29
+ AGGREGATION_TYPES = ["mean", "min", "max", "sum"]
30
+
31
+ # Cache for location names to avoid repeated API calls
32
+ _location_name_cache: Dict[str, str] = {}
33
+
34
+ # Default metrics configuration with aggregation type
35
+ DEFAULT_METRICS_CONFIG = {
36
+ "customer_to_staff_ratio": "mean",
37
+ "service_coverage": "mean",
38
+ "interaction_rate": "mean",
39
+ "staff_utilization": "mean",
40
+ "area_utilization": "mean",
41
+ "service_quality_score": "mean",
42
+ "attention_score": "mean",
43
+ "overall_performance": "mean",
44
+ }
45
+
46
+
47
+ @dataclass
48
+ class MetricAggregator:
49
+ """Stores aggregated values for a single metric."""
50
+ values: List[float] = field(default_factory=list)
51
+ agg_type: str = "mean"
52
+
53
+ def add_value(self, value: float):
54
+ """Add a value to the aggregator."""
55
+ if value is not None and isinstance(value, (int, float)):
56
+ self.values.append(float(value))
57
+
58
+ def get_aggregated_value(self) -> Optional[float]:
59
+ """Get the aggregated value based on aggregation type."""
60
+ if not self.values:
61
+ return None
62
+
63
+ if self.agg_type == "mean":
64
+ return sum(self.values) / len(self.values)
65
+ elif self.agg_type == "min":
66
+ return min(self.values)
67
+ elif self.agg_type == "max":
68
+ return max(self.values)
69
+ elif self.agg_type == "sum":
70
+ return sum(self.values)
71
+ else:
72
+ # Default to mean if unknown type
73
+ return sum(self.values) / len(self.values)
74
+
75
+ def reset(self):
76
+ """Reset the aggregator values."""
77
+ self.values = []
78
+
79
+ def has_values(self) -> bool:
80
+ """Check if aggregator has any values."""
81
+ return len(self.values) > 0
82
+
83
+
84
+ @dataclass
85
+ class CameraMetricsState:
86
+ """Stores metrics state for a camera."""
87
+ camera_id: str
88
+ camera_name: str = ""
89
+ app_deployment_id: str = ""
90
+ application_id: str = ""
91
+ location_id: str = ""
92
+ location_name: str = ""
93
+ metrics: Dict[str, MetricAggregator] = field(default_factory=dict)
94
+ last_push_time: float = field(default_factory=time.time)
95
+
96
+ def add_metric_value(self, metric_name: str, value: float, agg_type: str = "mean"):
97
+ """Add a value for a specific metric."""
98
+ if metric_name not in self.metrics:
99
+ self.metrics[metric_name] = MetricAggregator(agg_type=agg_type)
100
+ self.metrics[metric_name].add_value(value)
101
+
102
+ def get_aggregated_metrics(self) -> Dict[str, Dict[str, Any]]:
103
+ """Get all aggregated metrics in output format."""
104
+ result = {}
105
+ for metric_name, aggregator in self.metrics.items():
106
+ if aggregator.has_values():
107
+ agg_value = aggregator.get_aggregated_value()
108
+ if agg_value is not None:
109
+ result[metric_name] = {
110
+ "data": round(agg_value, 4),
111
+ "agg_type": aggregator.agg_type
112
+ }
113
+ return result
114
+
115
+ def reset_metrics(self):
116
+ """Reset all metric aggregators."""
117
+ for aggregator in self.metrics.values():
118
+ aggregator.reset()
119
+ self.last_push_time = time.time()
120
+
121
+ def has_metrics(self) -> bool:
122
+ """Check if any metrics have values."""
123
+ return any(agg.has_values() for agg in self.metrics.values())
124
+
125
+
126
+ class BUSINESS_METRICS_MANAGER:
127
+ """
128
+ Manages business metrics aggregation and publishing.
129
+
130
+ Key behaviors:
131
+ - Aggregates business metrics for configurable interval (default 5 minutes)
132
+ - Publishes aggregated metrics to Redis/Kafka topic
133
+ - Supports multiple aggregation types (mean, min, max, sum)
134
+ - Resets all values after publishing
135
+ - Thread-safe operations
136
+
137
+ Usage:
138
+ manager = BUSINESS_METRICS_MANAGER(redis_client=..., kafka_client=...)
139
+ manager.start() # Start aggregation timer
140
+ manager.process_metrics(camera_id, metrics_data, stream_info)
141
+ manager.stop() # Stop on shutdown
142
+ """
143
+
144
+ OUTPUT_TOPIC = "business_metrics"
145
+
146
+ def __init__(
147
+ self,
148
+ redis_client: Optional[Any] = None,
149
+ kafka_client: Optional[Any] = None,
150
+ output_topic: str = "business_metrics",
151
+ aggregation_interval: int = DEFAULT_AGGREGATION_INTERVAL,
152
+ metrics_config: Optional[Dict[str, str]] = None,
153
+ logger: Optional[logging.Logger] = None
154
+ ):
155
+ """
156
+ Initialize BUSINESS_METRICS_MANAGER.
157
+
158
+ Args:
159
+ redis_client: MatriceStream instance configured for Redis
160
+ kafka_client: MatriceStream instance configured for Kafka
161
+ output_topic: Topic/stream name for publishing metrics
162
+ aggregation_interval: Interval in seconds for aggregation (default 300 = 5 minutes)
163
+ metrics_config: Dict of metric_name -> aggregation_type
164
+ logger: Python logger instance
165
+ """
166
+ self.redis_client = redis_client
167
+ self.kafka_client = kafka_client
168
+ self.output_topic = output_topic
169
+ self.aggregation_interval = aggregation_interval
170
+ self.metrics_config = metrics_config or DEFAULT_METRICS_CONFIG.copy()
171
+ self.logger = logger or logging.getLogger(__name__)
172
+
173
+ # Per-camera metrics state tracking: {camera_id: CameraMetricsState}
174
+ self._camera_states: Dict[str, CameraMetricsState] = {}
175
+ self._states_lock = threading.Lock()
176
+
177
+ # Timer thread control
178
+ self._timer_thread: Optional[threading.Thread] = None
179
+ self._stop_event = threading.Event()
180
+ self._running = False
181
+
182
+ # Store factory reference for fetching camera info
183
+ self._factory_ref: Optional['BusinessMetricsManagerFactory'] = None
184
+
185
+ self.logger.info(
186
+ f"[BUSINESS_METRICS_MANAGER] Initialized with output_topic={output_topic}, "
187
+ f"aggregation_interval={aggregation_interval}s"
188
+ )
189
+
190
+ def set_factory_ref(self, factory: 'BusinessMetricsManagerFactory'):
191
+ """Set reference to factory for accessing deployment info."""
192
+ self._factory_ref = factory
193
+
194
+ def start(self):
195
+ """Start the background timer thread for periodic publishing."""
196
+ if self._running:
197
+ self.logger.warning("[BUSINESS_METRICS_MANAGER] Already running")
198
+ return
199
+
200
+ self._running = True
201
+ self._stop_event.clear()
202
+ self._timer_thread = threading.Thread(
203
+ target=self._timer_loop,
204
+ daemon=True,
205
+ name="BusinessMetricsTimer"
206
+ )
207
+ self._timer_thread.start()
208
+ self.logger.info("[BUSINESS_METRICS_MANAGER] ✓ Started timer thread")
209
+
210
+ def stop(self):
211
+ """Stop the background timer thread gracefully."""
212
+ if not self._running:
213
+ return
214
+
215
+ self.logger.info("[BUSINESS_METRICS_MANAGER] Stopping...")
216
+ self._running = False
217
+ self._stop_event.set()
218
+
219
+ if self._timer_thread and self._timer_thread.is_alive():
220
+ self._timer_thread.join(timeout=5)
221
+
222
+ self.logger.info("[BUSINESS_METRICS_MANAGER] ✓ Stopped")
223
+
224
+ def _timer_loop(self):
225
+ """Background thread that checks and publishes metrics periodically."""
226
+ self.logger.info(
227
+ f"[BUSINESS_METRICS_MANAGER] Timer loop started "
228
+ f"(interval: {self.aggregation_interval}s, check_every: 10s)"
229
+ )
230
+
231
+ loop_count = 0
232
+ while not self._stop_event.is_set():
233
+ loop_count += 1
234
+ try:
235
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER] Timer loop iteration #{loop_count}")
236
+ self._check_and_publish_all()
237
+ except Exception as e:
238
+ self.logger.error(
239
+ f"[BUSINESS_METRICS_MANAGER] Error in timer loop: {e}",
240
+ exc_info=True
241
+ )
242
+
243
+ # Sleep in small increments to allow quick shutdown
244
+ for _ in range(min(10, self.aggregation_interval)):
245
+ if self._stop_event.is_set():
246
+ break
247
+ time.sleep(1)
248
+
249
+ self.logger.info("[BUSINESS_METRICS_MANAGER] Timer loop exited")
250
+
251
+ def _check_and_publish_all(self):
252
+ """Check all cameras and publish metrics if interval has passed."""
253
+ current_time = time.time()
254
+ cameras_to_publish = []
255
+
256
+ with self._states_lock:
257
+ num_cameras = len(self._camera_states)
258
+ if num_cameras > 0:
259
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER] _check_and_publish_all: checking {num_cameras} camera(s)")
260
+
261
+ for camera_id, state in self._camera_states.items():
262
+ elapsed = current_time - state.last_push_time
263
+ has_metrics = state.has_metrics()
264
+ metrics_count = sum(len(agg.values) for agg in state.metrics.values())
265
+
266
+ self.logger.debug(
267
+ f"[BUSINESS_METRICS_MANAGER] Camera {camera_id}: elapsed={elapsed:.1f}s, "
268
+ f"interval={self.aggregation_interval}s, has_metrics={has_metrics}, count={metrics_count}"
269
+ )
270
+
271
+ if elapsed >= self.aggregation_interval and has_metrics:
272
+ cameras_to_publish.append(camera_id)
273
+ self.logger.info(
274
+ f"[BUSINESS_METRICS_MANAGER] ✓ Camera {camera_id} ready for publish "
275
+ f"(elapsed={elapsed:.1f}s >= {self.aggregation_interval}s)"
276
+ )
277
+
278
+ if cameras_to_publish:
279
+ self.logger.info(f"[BUSINESS_METRICS_MANAGER] Publishing metrics for {len(cameras_to_publish)} camera(s)")
280
+
281
+ for camera_id in cameras_to_publish:
282
+ try:
283
+ success = self._publish_camera_metrics(camera_id)
284
+ if success:
285
+ self.logger.info(f"[BUSINESS_METRICS_MANAGER] ✓ Successfully published metrics for camera: {camera_id}")
286
+ else:
287
+ self.logger.warning(f"[BUSINESS_METRICS_MANAGER] ❌ Failed to publish metrics for camera: {camera_id}")
288
+ except Exception as e:
289
+ self.logger.error(
290
+ f"[BUSINESS_METRICS_MANAGER] Error publishing metrics for "
291
+ f"camera {camera_id}: {e}",
292
+ exc_info=True
293
+ )
294
+
295
+ def _extract_camera_info_from_stream(
296
+ self,
297
+ stream_info: Optional[Dict[str, Any]]
298
+ ) -> Dict[str, str]:
299
+ """
300
+ Extract camera info from stream_info.
301
+
302
+ Stream info structure example:
303
+ {
304
+ 'broker': 'localhost:9092',
305
+ 'topic': '692d7bde42582ffde3611908_input_topic', # camera_id is here!
306
+ 'stream_time': '2025-12-02-05:09:53.914224 UTC',
307
+ 'camera_info': {
308
+ 'camera_name': 'cusstomer-cam-1',
309
+ 'camera_group': 'staging-customer-1',
310
+ 'location': '6908756db129880c34f2e09a'
311
+ },
312
+ 'frame_id': '...'
313
+ }
314
+
315
+ Args:
316
+ stream_info: Stream metadata from usecase
317
+
318
+ Returns:
319
+ Dict with camera_id, camera_name, app_deployment_id, application_id, location_id
320
+ """
321
+ result = {
322
+ "camera_id": "",
323
+ "camera_name": "",
324
+ "app_deployment_id": "",
325
+ "application_id": "",
326
+ "location_id": ""
327
+ }
328
+
329
+ if not stream_info:
330
+ self.logger.debug("[BUSINESS_METRICS_MANAGER] _extract_camera_info_from_stream: stream_info is None/empty")
331
+ return result
332
+
333
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER] _extract_camera_info_from_stream: stream_info keys = {list(stream_info.keys())}")
334
+
335
+ try:
336
+ # Try multiple paths to get camera info
337
+ # Path 1: Direct camera_info in stream_info (most common for streaming)
338
+ camera_info = stream_info.get("camera_info", {}) or {}
339
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER] Direct camera_info = {camera_info}")
340
+
341
+ # Path 2: From input_settings -> input_stream pattern
342
+ input_settings = stream_info.get("input_settings", {}) or {}
343
+ input_stream = input_settings.get("input_stream", {}) or {}
344
+ input_camera_info = input_stream.get("camera_info", {}) or {}
345
+
346
+ # Path 3: From input_streams array
347
+ input_streams = stream_info.get("input_streams", [])
348
+ if input_streams and len(input_streams) > 0:
349
+ input_data = input_streams[0] if isinstance(input_streams[0], dict) else {}
350
+ input_stream_inner = input_data.get("input_stream", input_data)
351
+ input_camera_info = input_stream_inner.get("camera_info", {}) or input_camera_info
352
+
353
+ # Path 4: Extract camera_id from topic field (e.g., "692d7bde42582ffde3611908_input_topic")
354
+ topic = stream_info.get("topic", "")
355
+ camera_id_from_topic = ""
356
+ if topic and "_input_topic" in topic:
357
+ camera_id_from_topic = topic.replace("_input_topic", "").strip()
358
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER] Extracted camera_id from topic: {camera_id_from_topic}")
359
+
360
+ # Merge all sources, preferring non-empty values
361
+ # camera_name - prefer camera_info.camera_name
362
+ result["camera_name"] = (
363
+ camera_info.get("camera_name", "") or
364
+ input_camera_info.get("camera_name", "") or
365
+ stream_info.get("camera_name", "") or
366
+ input_settings.get("camera_name", "") or
367
+ ""
368
+ )
369
+
370
+ # camera_id - try topic extraction first, then other sources
371
+ result["camera_id"] = (
372
+ camera_id_from_topic or
373
+ camera_info.get("camera_id", "") or
374
+ input_camera_info.get("camera_id", "") or
375
+ stream_info.get("camera_id", "") or
376
+ input_settings.get("camera_id", "") or
377
+ camera_info.get("cameraId", "") or
378
+ input_camera_info.get("cameraId", "") or
379
+ ""
380
+ )
381
+
382
+ # app_deployment_id
383
+ result["app_deployment_id"] = (
384
+ stream_info.get("app_deployment_id", "") or
385
+ stream_info.get("appDeploymentId", "") or
386
+ input_settings.get("app_deployment_id", "") or
387
+ input_settings.get("appDeploymentId", "") or
388
+ camera_info.get("app_deployment_id", "") or
389
+ ""
390
+ )
391
+
392
+ # application_id
393
+ result["application_id"] = (
394
+ stream_info.get("application_id", "") or
395
+ stream_info.get("applicationId", "") or
396
+ input_settings.get("application_id", "") or
397
+ input_settings.get("applicationId", "") or
398
+ camera_info.get("application_id", "") or
399
+ ""
400
+ )
401
+
402
+ # location_id - from camera_info.location
403
+ result["location_id"] = (
404
+ camera_info.get("location", "") or
405
+ camera_info.get("location_id", "") or
406
+ camera_info.get("locationId", "") or
407
+ input_camera_info.get("location", "") or
408
+ input_camera_info.get("location_id", "") or
409
+ ""
410
+ )
411
+
412
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER] Extracted camera info: {result}")
413
+
414
+ except Exception as e:
415
+ self.logger.error(f"[BUSINESS_METRICS_MANAGER] Error extracting camera info: {e}", exc_info=True)
416
+
417
+ return result
418
+
419
+ def _fetch_location_name(self, location_id: str) -> str:
420
+ """
421
+ Fetch location name from API using location_id.
422
+
423
+ Args:
424
+ location_id: The location ID to look up
425
+
426
+ Returns:
427
+ Location name string, or 'Entry Reception' as default if API fails
428
+ """
429
+ global _location_name_cache
430
+ default_location = "Entry Reception"
431
+
432
+ if not location_id:
433
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER] No location_id provided, using default: '{default_location}'")
434
+ return default_location
435
+
436
+ # Check cache first
437
+ if location_id in _location_name_cache:
438
+ cached_name = _location_name_cache[location_id]
439
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER] Using cached location name for '{location_id}': '{cached_name}'")
440
+ return cached_name
441
+
442
+ # Need factory reference with session to make API call
443
+ if not self._factory_ref or not self._factory_ref._session:
444
+ self.logger.warning(f"[BUSINESS_METRICS_MANAGER] No session available for location API, using default: '{default_location}'")
445
+ return default_location
446
+
447
+ try:
448
+ endpoint = f"/v1/inference/get_location/{location_id}"
449
+ self.logger.info(f"[BUSINESS_METRICS_MANAGER] Fetching location name from API: {endpoint}")
450
+
451
+ response = self._factory_ref._session.rpc.get(endpoint)
452
+
453
+ if response and isinstance(response, dict):
454
+ success = response.get("success", False)
455
+ if success:
456
+ data = response.get("data", {})
457
+ location_name = data.get("locationName", default_location)
458
+ self.logger.info(f"[BUSINESS_METRICS_MANAGER] ✓ Fetched location name: '{location_name}' for location_id: '{location_id}'")
459
+
460
+ # Cache the result
461
+ _location_name_cache[location_id] = location_name
462
+ return location_name
463
+ else:
464
+ self.logger.warning(
465
+ f"[BUSINESS_METRICS_MANAGER] API returned success=false for location_id '{location_id}': "
466
+ f"{response.get('message', 'Unknown error')}"
467
+ )
468
+ else:
469
+ self.logger.warning(f"[BUSINESS_METRICS_MANAGER] Invalid response format from API: {response}")
470
+
471
+ except Exception as e:
472
+ self.logger.error(f"[BUSINESS_METRICS_MANAGER] Error fetching location name for '{location_id}': {e}", exc_info=True)
473
+
474
+ # Use default on any failure
475
+ self.logger.info(f"[BUSINESS_METRICS_MANAGER] Using default location name: '{default_location}'")
476
+ _location_name_cache[location_id] = default_location
477
+ return default_location
478
+
479
+ def process_metrics(
480
+ self,
481
+ camera_id: str,
482
+ metrics_data: Dict[str, Any],
483
+ stream_info: Optional[Dict[str, Any]] = None
484
+ ) -> bool:
485
+ """
486
+ Process business metrics and add to aggregation.
487
+
488
+ This method:
489
+ 1. Extracts camera info from stream_info
490
+ 2. Adds each metric value to the appropriate aggregator
491
+ 3. Checks if aggregation interval has passed and publishes if so
492
+
493
+ Args:
494
+ camera_id: Unique camera identifier
495
+ metrics_data: Business metrics dictionary from usecase
496
+ stream_info: Stream metadata
497
+
498
+ Returns:
499
+ True if metrics were published, False otherwise
500
+ """
501
+ try:
502
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER] ===== process_metrics START =====")
503
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER] Input camera_id param: {camera_id}")
504
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER] metrics_data keys: {list(metrics_data.keys()) if metrics_data else 'None'}")
505
+
506
+ if not metrics_data or not isinstance(metrics_data, dict):
507
+ self.logger.debug("[BUSINESS_METRICS_MANAGER] Empty or invalid metrics data, skipping")
508
+ return False
509
+
510
+ # Extract camera info from stream_info
511
+ camera_info = self._extract_camera_info_from_stream(stream_info)
512
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER] Extracted camera_info: {camera_info}")
513
+
514
+ # Get factory app_deployment_id and application_id if available (from jobParams)
515
+ factory_app_deployment_id = ""
516
+ factory_application_id = ""
517
+ if self._factory_ref:
518
+ factory_app_deployment_id = self._factory_ref._app_deployment_id or ""
519
+ factory_application_id = self._factory_ref._application_id or ""
520
+ self.logger.debug(
521
+ f"[BUSINESS_METRICS_MANAGER] Factory values - "
522
+ f"app_deployment_id: {factory_app_deployment_id}, application_id: {factory_application_id}"
523
+ )
524
+
525
+ # Use extracted or fallback values
526
+ # Priority: stream_info > factory (from jobParams)
527
+ final_camera_id = camera_info.get("camera_id") or camera_id or ""
528
+ final_camera_name = camera_info.get("camera_name") or ""
529
+ final_app_deployment_id = camera_info.get("app_deployment_id") or factory_app_deployment_id or ""
530
+ final_application_id = camera_info.get("application_id") or factory_application_id or ""
531
+ final_location_id = camera_info.get("location_id") or ""
532
+
533
+ # Fetch location_name from API using location_id
534
+ final_location_name = self._fetch_location_name(final_location_id)
535
+
536
+ self.logger.info(
537
+ f"[BUSINESS_METRICS_MANAGER] Final values - camera_id={final_camera_id}, "
538
+ f"camera_name={final_camera_name}, app_deployment_id={final_app_deployment_id}, "
539
+ f"application_id={final_application_id}, location_id={final_location_id}, "
540
+ f"location_name={final_location_name}"
541
+ )
542
+
543
+ with self._states_lock:
544
+ # Get or create state for this camera
545
+ if final_camera_id not in self._camera_states:
546
+ self._camera_states[final_camera_id] = CameraMetricsState(
547
+ camera_id=final_camera_id,
548
+ camera_name=final_camera_name,
549
+ app_deployment_id=final_app_deployment_id,
550
+ application_id=final_application_id,
551
+ location_id=final_location_id,
552
+ location_name=final_location_name
553
+ )
554
+ self.logger.info(
555
+ f"[BUSINESS_METRICS_MANAGER] ✓ Created new state for camera: {final_camera_id}"
556
+ )
557
+
558
+ state = self._camera_states[final_camera_id]
559
+
560
+ # Update camera info if we have better values
561
+ if final_camera_name and not state.camera_name:
562
+ state.camera_name = final_camera_name
563
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER] Updated camera_name to: {final_camera_name}")
564
+ if final_app_deployment_id and not state.app_deployment_id:
565
+ state.app_deployment_id = final_app_deployment_id
566
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER] Updated app_deployment_id to: {final_app_deployment_id}")
567
+ if final_application_id and not state.application_id:
568
+ state.application_id = final_application_id
569
+ if final_location_id and not state.location_id:
570
+ state.location_id = final_location_id
571
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER] Updated location_id to: {final_location_id}")
572
+ if final_location_name and not state.location_name:
573
+ state.location_name = final_location_name
574
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER] Updated location_name to: {final_location_name}")
575
+
576
+ # Add each metric value to aggregator
577
+ metrics_added = 0
578
+ for metric_name, value in metrics_data.items():
579
+ # Skip non-numeric fields and complex objects
580
+ if metric_name in ["peak_areas", "optimization_opportunities"]:
581
+ continue
582
+
583
+ # Handle area_utilization which is a dict
584
+ if metric_name == "area_utilization" and isinstance(value, dict):
585
+ # Average all area utilization values
586
+ area_values = [v for v in value.values() if isinstance(v, (int, float))]
587
+ if area_values:
588
+ value = sum(area_values) / len(area_values)
589
+ else:
590
+ continue
591
+
592
+ # Only process numeric values
593
+ if isinstance(value, (int, float)):
594
+ agg_type = self.metrics_config.get(metric_name, "mean")
595
+ with self._states_lock:
596
+ state.add_metric_value(metric_name, value, agg_type)
597
+ metrics_added += 1
598
+
599
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER] Added {metrics_added} metric values to aggregator")
600
+
601
+ # Check if we should publish (interval elapsed)
602
+ current_time = time.time()
603
+ should_publish = False
604
+ elapsed = 0.0
605
+ metrics_count = 0
606
+
607
+ with self._states_lock:
608
+ elapsed = current_time - state.last_push_time
609
+ has_metrics = state.has_metrics()
610
+ metrics_count = sum(len(agg.values) for agg in state.metrics.values())
611
+
612
+ self.logger.debug(
613
+ f"[BUSINESS_METRICS_MANAGER] Publish check - elapsed={elapsed:.1f}s, "
614
+ f"interval={self.aggregation_interval}s, has_metrics={has_metrics}, "
615
+ f"total_values_count={metrics_count}"
616
+ )
617
+
618
+ if elapsed >= self.aggregation_interval and has_metrics:
619
+ should_publish = True
620
+ self.logger.info(
621
+ f"[BUSINESS_METRICS_MANAGER] ✓ PUBLISH CONDITION MET! "
622
+ f"elapsed={elapsed:.1f}s >= interval={self.aggregation_interval}s"
623
+ )
624
+ else:
625
+ remaining = self.aggregation_interval - elapsed
626
+ self.logger.debug(
627
+ f"[BUSINESS_METRICS_MANAGER] Not publishing yet. "
628
+ f"Remaining time: {remaining:.1f}s, metrics_count={metrics_count}"
629
+ )
630
+
631
+ if should_publish:
632
+ self.logger.info(f"[BUSINESS_METRICS_MANAGER] Triggering publish for camera: {final_camera_id}")
633
+ return self._publish_camera_metrics(final_camera_id)
634
+
635
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER] ===== process_metrics END (no publish) =====")
636
+ return False
637
+
638
+ except Exception as e:
639
+ self.logger.error(
640
+ f"[BUSINESS_METRICS_MANAGER] Error processing metrics: {e}",
641
+ exc_info=True
642
+ )
643
+ return False
644
+
645
+ def _publish_camera_metrics(self, camera_id: str) -> bool:
646
+ """
647
+ Publish aggregated metrics for a specific camera.
648
+
649
+ Args:
650
+ camera_id: Camera identifier
651
+
652
+ Returns:
653
+ True if published successfully, False otherwise
654
+ """
655
+ self.logger.info(f"[BUSINESS_METRICS_MANAGER] ========== PUBLISHING METRICS ==========")
656
+
657
+ try:
658
+ with self._states_lock:
659
+ if camera_id not in self._camera_states:
660
+ self.logger.warning(
661
+ f"[BUSINESS_METRICS_MANAGER] No state found for camera: {camera_id}"
662
+ )
663
+ return False
664
+
665
+ state = self._camera_states[camera_id]
666
+
667
+ if not state.has_metrics():
668
+ self.logger.debug(
669
+ f"[BUSINESS_METRICS_MANAGER] No metrics to publish for camera: {camera_id}"
670
+ )
671
+ return False
672
+
673
+ # Build the message
674
+ aggregated_metrics = state.get_aggregated_metrics()
675
+
676
+ message = {
677
+ "camera_id": state.camera_id,
678
+ "camera_name": state.camera_name,
679
+ "app_deployment_id": state.app_deployment_id,
680
+ "application_id": state.application_id,
681
+ "location_name": state.location_name,
682
+ "business_metrics": aggregated_metrics,
683
+ "timestamp": datetime.now(timezone.utc).isoformat(),
684
+ "aggregation_interval_seconds": self.aggregation_interval
685
+ }
686
+
687
+ # Reset metrics after building message (inside lock)
688
+ state.reset_metrics()
689
+
690
+ self.logger.info(
691
+ f"[BUSINESS_METRICS_MANAGER] Built metrics message: "
692
+ f"{json.dumps(message, default=str)[:500]}..."
693
+ )
694
+
695
+ success = False
696
+
697
+ # Try Redis first (primary)
698
+ if self.redis_client:
699
+ try:
700
+ self.logger.debug(
701
+ f"[BUSINESS_METRICS_MANAGER] Publishing to Redis stream: {self.output_topic}"
702
+ )
703
+ self._publish_to_redis(self.output_topic, message)
704
+ self.logger.info(
705
+ f"[BUSINESS_METRICS_MANAGER] ✓ Metrics published to Redis"
706
+ )
707
+ success = True
708
+ except Exception as e:
709
+ self.logger.error(
710
+ f"[BUSINESS_METRICS_MANAGER] ❌ Redis publish failed: {e}",
711
+ exc_info=True
712
+ )
713
+
714
+ # Fallback to Kafka if Redis failed or no Redis client
715
+ if not success and self.kafka_client:
716
+ try:
717
+ self.logger.debug(
718
+ f"[BUSINESS_METRICS_MANAGER] Publishing to Kafka topic: {self.output_topic}"
719
+ )
720
+ self._publish_to_kafka(self.output_topic, message)
721
+ self.logger.info(
722
+ f"[BUSINESS_METRICS_MANAGER] ✓ Metrics published to Kafka"
723
+ )
724
+ success = True
725
+ except Exception as e:
726
+ self.logger.error(
727
+ f"[BUSINESS_METRICS_MANAGER] ❌ Kafka publish failed: {e}",
728
+ exc_info=True
729
+ )
730
+
731
+ if success:
732
+ self.logger.info(f"[BUSINESS_METRICS_MANAGER] ========== METRICS PUBLISHED ==========")
733
+ else:
734
+ self.logger.error(
735
+ f"[BUSINESS_METRICS_MANAGER] ❌ METRICS NOT PUBLISHED (both transports failed)"
736
+ )
737
+
738
+ return success
739
+
740
+ except Exception as e:
741
+ self.logger.error(
742
+ f"[BUSINESS_METRICS_MANAGER] Error publishing metrics: {e}",
743
+ exc_info=True
744
+ )
745
+ return False
746
+
747
+ def _publish_to_redis(self, topic: str, message: Dict[str, Any]):
748
+ """Publish message to Redis stream."""
749
+ try:
750
+ self.redis_client.add_message(
751
+ topic_or_channel=topic,
752
+ message=json.dumps(message),
753
+ key=message.get("camera_id", "")
754
+ )
755
+ except Exception as e:
756
+ self.logger.error(f"[BUSINESS_METRICS_MANAGER] Redis publish error: {e}")
757
+ raise
758
+
759
+ def _publish_to_kafka(self, topic: str, message: Dict[str, Any]):
760
+ """Publish message to Kafka topic."""
761
+ try:
762
+ self.kafka_client.add_message(
763
+ topic_or_channel=topic,
764
+ message=json.dumps(message),
765
+ key=message.get("camera_id", "")
766
+ )
767
+ except Exception as e:
768
+ self.logger.error(f"[BUSINESS_METRICS_MANAGER] Kafka publish error: {e}")
769
+ raise
770
+
771
+ def reset_camera_state(self, camera_id: str):
772
+ """Reset metrics state for a specific camera."""
773
+ with self._states_lock:
774
+ if camera_id in self._camera_states:
775
+ self._camera_states[camera_id].reset_metrics()
776
+ self.logger.info(f"[BUSINESS_METRICS_MANAGER] Reset state for camera: {camera_id}")
777
+
778
+ def get_camera_state(self, camera_id: str) -> Optional[Dict[str, Any]]:
779
+ """Get current metrics state for a camera (for debugging)."""
780
+ with self._states_lock:
781
+ state = self._camera_states.get(camera_id)
782
+ if state:
783
+ return {
784
+ "camera_id": state.camera_id,
785
+ "camera_name": state.camera_name,
786
+ "app_deployment_id": state.app_deployment_id,
787
+ "application_id": state.application_id,
788
+ "location_id": state.location_id,
789
+ "location_name": state.location_name,
790
+ "metrics_count": {
791
+ name: len(agg.values)
792
+ for name, agg in state.metrics.items()
793
+ },
794
+ "last_push_time": state.last_push_time,
795
+ "seconds_since_push": time.time() - state.last_push_time
796
+ }
797
+ return None
798
+
799
+ def get_all_camera_states(self) -> Dict[str, Dict[str, Any]]:
800
+ """Get all camera states for debugging/monitoring."""
801
+ with self._states_lock:
802
+ return {
803
+ cam_id: {
804
+ "camera_id": state.camera_id,
805
+ "camera_name": state.camera_name,
806
+ "location_name": state.location_name,
807
+ "metrics_count": {
808
+ name: len(agg.values)
809
+ for name, agg in state.metrics.items()
810
+ },
811
+ "last_push_time": state.last_push_time,
812
+ "seconds_since_push": time.time() - state.last_push_time
813
+ }
814
+ for cam_id, state in self._camera_states.items()
815
+ }
816
+
817
+ def force_publish_all(self) -> int:
818
+ """Force publish all cameras with pending metrics. Returns count published."""
819
+ published_count = 0
820
+ # Collect camera IDs with pending metrics without holding the lock during publish
821
+ with self._states_lock:
822
+ camera_ids = [cam_id for cam_id, state in self._camera_states.items() if state.has_metrics()]
823
+ for camera_id in camera_ids:
824
+ if self._publish_camera_metrics(camera_id):
825
+ published_count += 1
826
+ return published_count
827
+
828
+ def set_metrics_config(self, metrics_config: Dict[str, str]):
829
+ """
830
+ Set aggregation type configuration for metrics.
831
+
832
+ Args:
833
+ metrics_config: Dict of metric_name -> aggregation_type
834
+ """
835
+ self.metrics_config = metrics_config
836
+ self.logger.info(f"[BUSINESS_METRICS_MANAGER] Updated metrics config: {metrics_config}")
837
+
838
+ def set_aggregation_interval(self, interval_seconds: int):
839
+ """
840
+ Set the aggregation interval.
841
+
842
+ Args:
843
+ interval_seconds: New interval in seconds
844
+ """
845
+ self.aggregation_interval = interval_seconds
846
+ self.logger.info(
847
+ f"[BUSINESS_METRICS_MANAGER] Updated aggregation interval to {interval_seconds}s"
848
+ )
849
+
850
+
851
+ class BusinessMetricsManagerFactory:
852
+ """
853
+ Factory class for creating BUSINESS_METRICS_MANAGER instances.
854
+
855
+ Handles session initialization and Redis/Kafka client creation
856
+ following the same pattern as IncidentManagerFactory.
857
+ """
858
+
859
+ ACTION_ID_PATTERN = re.compile(r"^[0-9a-f]{8,}$", re.IGNORECASE)
860
+
861
+ def __init__(self, logger: Optional[logging.Logger] = None):
862
+ self.logger = logger or logging.getLogger(__name__)
863
+ self._initialized = False
864
+ self._business_metrics_manager: Optional[BUSINESS_METRICS_MANAGER] = None
865
+
866
+ # Store these for later access
867
+ self._session = None
868
+ self._action_id: Optional[str] = None
869
+ self._instance_id: Optional[str] = None
870
+ self._deployment_id: Optional[str] = None
871
+ self._app_deployment_id: Optional[str] = None
872
+ self._application_id: Optional[str] = None # Store application_id from jobParams
873
+ self._external_ip: Optional[str] = None
874
+
875
+ def initialize(
876
+ self,
877
+ config: Any,
878
+ aggregation_interval: int = DEFAULT_AGGREGATION_INTERVAL,
879
+ metrics_config: Optional[Dict[str, str]] = None
880
+ ) -> Optional[BUSINESS_METRICS_MANAGER]:
881
+ """
882
+ Initialize and return BUSINESS_METRICS_MANAGER with Redis/Kafka clients.
883
+
884
+ This follows the same pattern as IncidentManagerFactory for
885
+ session initialization and Redis/Kafka client creation.
886
+
887
+ Args:
888
+ config: Configuration object with session, server_id, etc.
889
+ aggregation_interval: Interval in seconds for aggregation (default 300)
890
+ metrics_config: Dict of metric_name -> aggregation_type
891
+
892
+ Returns:
893
+ BUSINESS_METRICS_MANAGER instance or None if initialization failed
894
+ """
895
+ if self._initialized and self._business_metrics_manager is not None:
896
+ self.logger.debug(
897
+ "[BUSINESS_METRICS_MANAGER_FACTORY] Already initialized, returning existing instance"
898
+ )
899
+ return self._business_metrics_manager
900
+
901
+ try:
902
+ # Import required modules
903
+ from matrice_common.stream.matrice_stream import MatriceStream, StreamType
904
+ from matrice_common.session import Session
905
+
906
+ self.logger.info("[BUSINESS_METRICS_MANAGER_FACTORY] ===== STARTING INITIALIZATION =====")
907
+
908
+ # Get or create session
909
+ self._session = getattr(config, 'session', None)
910
+ if not self._session:
911
+ self.logger.info(
912
+ "[BUSINESS_METRICS_MANAGER_FACTORY] No session in config, creating from environment..."
913
+ )
914
+ account_number = os.getenv("MATRICE_ACCOUNT_NUMBER", "")
915
+ access_key_id = os.getenv("MATRICE_ACCESS_KEY_ID", "")
916
+ secret_key = os.getenv("MATRICE_SECRET_ACCESS_KEY", "")
917
+ project_id = os.getenv("MATRICE_PROJECT_ID", "")
918
+
919
+ self.logger.debug(
920
+ f"[BUSINESS_METRICS_MANAGER_FACTORY] Env vars - "
921
+ f"account: {'SET' if account_number else 'NOT SET'}, "
922
+ f"access_key: {'SET' if access_key_id else 'NOT SET'}, "
923
+ f"secret: {'SET' if secret_key else 'NOT SET'}"
924
+ )
925
+
926
+ self._session = Session(
927
+ account_number=account_number,
928
+ access_key=access_key_id,
929
+ secret_key=secret_key,
930
+ project_id=project_id,
931
+ )
932
+ self.logger.info("[BUSINESS_METRICS_MANAGER_FACTORY] ✓ Created session from environment")
933
+ else:
934
+ self.logger.info("[BUSINESS_METRICS_MANAGER_FACTORY] ✓ Using session from config")
935
+
936
+ rpc = self._session.rpc
937
+
938
+ # Discover action_id
939
+ self._action_id = self._discover_action_id()
940
+ if not self._action_id:
941
+ self.logger.error("[BUSINESS_METRICS_MANAGER_FACTORY] ❌ Could not discover action_id")
942
+ print("----- BUSINESS METRICS MANAGER ACTION DISCOVERY -----")
943
+ print("action_id: NOT FOUND")
944
+ print("------------------------------------------------------")
945
+ self._initialized = True
946
+ return None
947
+
948
+ self.logger.info(f"[BUSINESS_METRICS_MANAGER_FACTORY] ✓ Discovered action_id: {self._action_id}")
949
+
950
+ # Fetch action details
951
+ action_details = {}
952
+ try:
953
+ action_url = f"/v1/actions/action/{self._action_id}/details"
954
+ action_resp = rpc.get(action_url)
955
+ if not (action_resp and action_resp.get("success", False)):
956
+ raise RuntimeError(
957
+ action_resp.get("message", "Unknown error")
958
+ if isinstance(action_resp, dict) else "Unknown error"
959
+ )
960
+ action_doc = action_resp.get("data", {}) if isinstance(action_resp, dict) else {}
961
+ action_details = action_doc.get("actionDetails", {}) if isinstance(action_doc, dict) else {}
962
+
963
+ # IMPORTANT: jobParams contains application_id
964
+ # Structure: response['data']['jobParams']['application_id']
965
+ job_params = action_doc.get("jobParams", {}) if isinstance(action_doc, dict) else {}
966
+
967
+ # Extract server details
968
+ server_id = (
969
+ action_details.get("serverId")
970
+ or action_details.get("server_id")
971
+ or action_details.get("serverID")
972
+ or action_details.get("redis_server_id")
973
+ or action_details.get("kafka_server_id")
974
+ )
975
+ server_type = (
976
+ action_details.get("serverType")
977
+ or action_details.get("server_type")
978
+ or action_details.get("type")
979
+ )
980
+
981
+ # Store identifiers
982
+ self._deployment_id = action_details.get("_idDeployment") or action_details.get("deployment_id")
983
+
984
+ # app_deployment_id: check actionDetails first, then jobParams
985
+ self._app_deployment_id = (
986
+ action_details.get("app_deployment_id") or
987
+ action_details.get("appDeploymentId") or
988
+ action_details.get("app_deploymentId") or
989
+ job_params.get("app_deployment_id") or
990
+ job_params.get("appDeploymentId") or
991
+ job_params.get("app_deploymentId") or
992
+ ""
993
+ )
994
+
995
+ # application_id: PRIMARILY from jobParams (this is where it lives!)
996
+ # response['data']['jobParams'].get('application_id', '')
997
+ self._application_id = (
998
+ job_params.get("application_id") or
999
+ job_params.get("applicationId") or
1000
+ job_params.get("app_id") or
1001
+ job_params.get("appId") or
1002
+ action_details.get("application_id") or
1003
+ action_details.get("applicationId") or
1004
+ ""
1005
+ )
1006
+
1007
+ self._instance_id = action_details.get("instanceID") or action_details.get("instanceId")
1008
+ self._external_ip = action_details.get("externalIP") or action_details.get("externalIp")
1009
+
1010
+ print("----- BUSINESS METRICS MANAGER ACTION DETAILS -----")
1011
+ print(f"action_id: {self._action_id}")
1012
+ print(f"server_type: {server_type}")
1013
+ print(f"server_id: {server_id}")
1014
+ print(f"deployment_id: {self._deployment_id}")
1015
+ print(f"app_deployment_id: {self._app_deployment_id}")
1016
+ print(f"application_id: {self._application_id}")
1017
+ print(f"instance_id: {self._instance_id}")
1018
+ print(f"external_ip: {self._external_ip}")
1019
+ print(f"jobParams keys: {list(job_params.keys()) if job_params else []}")
1020
+ print("----------------------------------------------------")
1021
+
1022
+ self.logger.info(
1023
+ f"[BUSINESS_METRICS_MANAGER_FACTORY] Action details - server_type={server_type}, "
1024
+ f"instance_id={self._instance_id}, "
1025
+ f"app_deployment_id={self._app_deployment_id}, application_id={self._application_id}"
1026
+ )
1027
+
1028
+ # Log all available keys for debugging
1029
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER_FACTORY] actionDetails keys: {list(action_details.keys())}")
1030
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER_FACTORY] jobParams keys: {list(job_params.keys()) if job_params else []}")
1031
+
1032
+ except Exception as e:
1033
+ self.logger.error(
1034
+ f"[BUSINESS_METRICS_MANAGER_FACTORY] ❌ Failed to fetch action details: {e}",
1035
+ exc_info=True
1036
+ )
1037
+ print("----- BUSINESS METRICS MANAGER ACTION DETAILS ERROR -----")
1038
+ print(f"action_id: {self._action_id}")
1039
+ print(f"error: {e}")
1040
+ print("---------------------------------------------------------")
1041
+ self._initialized = True
1042
+ return None
1043
+
1044
+ # Determine localhost vs cloud using externalIP from action_details
1045
+ is_localhost = False
1046
+ public_ip = self._get_public_ip()
1047
+
1048
+ # Get server host from action_details
1049
+ server_host = (
1050
+ action_details.get("externalIP")
1051
+ or action_details.get("external_IP")
1052
+ or action_details.get("externalip")
1053
+ or action_details.get("external_ip")
1054
+ or action_details.get("externalIp")
1055
+ or action_details.get("external_Ip")
1056
+ )
1057
+ print(f"server_host: {server_host}")
1058
+ self.logger.info(f"[BUSINESS_METRICS_MANAGER_FACTORY] DEBUG - server_host: {server_host}")
1059
+
1060
+ localhost_indicators = ["localhost", "127.0.0.1", "0.0.0.0"]
1061
+ if server_host in localhost_indicators:
1062
+ is_localhost = True
1063
+ self.logger.info(
1064
+ f"[BUSINESS_METRICS_MANAGER_FACTORY] Detected Localhost environment "
1065
+ f"(Public IP={public_ip}, Server IP={server_host})"
1066
+ )
1067
+ else:
1068
+ is_localhost = False
1069
+ self.logger.info(
1070
+ f"[BUSINESS_METRICS_MANAGER_FACTORY] Detected Cloud environment "
1071
+ f"(Public IP={public_ip}, Server IP={server_host})"
1072
+ )
1073
+
1074
+ redis_client = None
1075
+ kafka_client = None
1076
+
1077
+ # STRICT SWITCH: Only Redis if localhost, Only Kafka if cloud
1078
+ if is_localhost:
1079
+ # Initialize Redis client (ONLY) using instance_id
1080
+ if not self._instance_id:
1081
+ self.logger.error(
1082
+ "[BUSINESS_METRICS_MANAGER_FACTORY] ❌ Localhost mode but instance_id missing"
1083
+ )
1084
+ else:
1085
+ try:
1086
+ url = f"/v1/actions/get_redis_server_by_instance_id/{self._instance_id}"
1087
+ self.logger.info(
1088
+ f"[BUSINESS_METRICS_MANAGER_FACTORY] Fetching Redis server info "
1089
+ f"for instance: {self._instance_id}"
1090
+ )
1091
+ response = rpc.get(url)
1092
+
1093
+ if isinstance(response, dict) and response.get("success", False):
1094
+ data = response.get("data", {})
1095
+ host = data.get("host")
1096
+ port = data.get("port")
1097
+ username = data.get("username")
1098
+ password = data.get("password", "")
1099
+ db_index = data.get("db", 0)
1100
+ conn_timeout = data.get("connection_timeout", 120)
1101
+
1102
+ print("----- BUSINESS METRICS MANAGER REDIS SERVER PARAMS -----")
1103
+ print(f"instance_id: {self._instance_id}")
1104
+ print(f"host: {host}")
1105
+ print(f"port: {port}")
1106
+ print(f"username: {username}")
1107
+ print(f"password: {'*' * len(password) if password else ''}")
1108
+ print(f"db: {db_index}")
1109
+ print(f"connection_timeout: {conn_timeout}")
1110
+ print("--------------------------------------------------------")
1111
+
1112
+ self.logger.info(
1113
+ f"[BUSINESS_METRICS_MANAGER_FACTORY] Redis params - "
1114
+ f"host={host}, port={port}, user={username}"
1115
+ )
1116
+
1117
+ redis_client = MatriceStream(
1118
+ StreamType.REDIS,
1119
+ host=host,
1120
+ port=int(port),
1121
+ password=password,
1122
+ username=username,
1123
+ db=db_index,
1124
+ connection_timeout=conn_timeout
1125
+ )
1126
+ # Setup for metrics publishing
1127
+ redis_client.setup("business_metrics")
1128
+ self.logger.info("[BUSINESS_METRICS_MANAGER_FACTORY] ✓ Redis client initialized")
1129
+ else:
1130
+ self.logger.warning(
1131
+ f"[BUSINESS_METRICS_MANAGER_FACTORY] Failed to fetch Redis server info: "
1132
+ f"{response.get('message', 'Unknown error') if isinstance(response, dict) else 'Unknown error'}"
1133
+ )
1134
+ except Exception as e:
1135
+ self.logger.warning(
1136
+ f"[BUSINESS_METRICS_MANAGER_FACTORY] Redis initialization failed: {e}"
1137
+ )
1138
+
1139
+ else:
1140
+ # Initialize Kafka client (ONLY) using global info endpoint
1141
+ try:
1142
+ url = f"/v1/actions/get_kafka_info"
1143
+ self.logger.info(
1144
+ "[BUSINESS_METRICS_MANAGER_FACTORY] Fetching Kafka server info for Cloud mode"
1145
+ )
1146
+ response = rpc.get(url)
1147
+
1148
+ if isinstance(response, dict) and response.get("success", False):
1149
+ data = response.get("data", {})
1150
+ enc_ip = data.get("ip")
1151
+ enc_port = data.get("port")
1152
+
1153
+ # Decode base64 encoded values
1154
+ ip_addr = None
1155
+ port = None
1156
+ try:
1157
+ ip_addr = base64.b64decode(str(enc_ip)).decode("utf-8")
1158
+ except Exception:
1159
+ ip_addr = enc_ip
1160
+ try:
1161
+ port = base64.b64decode(str(enc_port)).decode("utf-8")
1162
+ except Exception:
1163
+ port = enc_port
1164
+
1165
+ print("----- BUSINESS METRICS MANAGER KAFKA SERVER PARAMS -----")
1166
+ print(f"ipAddress: {ip_addr}")
1167
+ print(f"port: {port}")
1168
+ print("--------------------------------------------------------")
1169
+
1170
+ self.logger.info(
1171
+ f"[BUSINESS_METRICS_MANAGER_FACTORY] Kafka params - ip={ip_addr}, port={port}"
1172
+ )
1173
+
1174
+ bootstrap_servers = f"{ip_addr}:{port}"
1175
+ kafka_client = MatriceStream(
1176
+ StreamType.KAFKA,
1177
+ bootstrap_servers=bootstrap_servers,
1178
+ sasl_mechanism="SCRAM-SHA-256",
1179
+ sasl_username="matrice-sdk-user",
1180
+ sasl_password="matrice-sdk-password",
1181
+ security_protocol="SASL_PLAINTEXT"
1182
+ )
1183
+ # Setup for metrics publishing (producer-only; no consumer group needed)
1184
+ kafka_client.setup("business_metrics")
1185
+ self.logger.info(
1186
+ f"[BUSINESS_METRICS_MANAGER_FACTORY] ✓ Kafka client initialized "
1187
+ f"(servers={bootstrap_servers})"
1188
+ )
1189
+ else:
1190
+ self.logger.warning(
1191
+ f"[BUSINESS_METRICS_MANAGER_FACTORY] Failed to fetch Kafka server info: "
1192
+ f"{response.get('message', 'Unknown error') if isinstance(response, dict) else 'Unknown error'}"
1193
+ )
1194
+ except Exception as e:
1195
+ self.logger.warning(
1196
+ f"[BUSINESS_METRICS_MANAGER_FACTORY] Kafka initialization failed: {e}"
1197
+ )
1198
+
1199
+ # Create business metrics manager if we have at least one transport
1200
+ if redis_client or kafka_client:
1201
+ self._business_metrics_manager = BUSINESS_METRICS_MANAGER(
1202
+ redis_client=redis_client,
1203
+ kafka_client=kafka_client,
1204
+ output_topic="business_metrics",
1205
+ aggregation_interval=aggregation_interval,
1206
+ metrics_config=metrics_config or DEFAULT_METRICS_CONFIG.copy(),
1207
+ logger=self.logger
1208
+ )
1209
+ # Set factory reference for accessing deployment info
1210
+ self._business_metrics_manager.set_factory_ref(self)
1211
+ # Start the timer thread
1212
+ self._business_metrics_manager.start()
1213
+
1214
+ transport = "Redis" if redis_client else "Kafka"
1215
+ self.logger.info(
1216
+ f"[BUSINESS_METRICS_MANAGER_FACTORY] ✓ Business metrics manager created with {transport}"
1217
+ )
1218
+ print(f"----- BUSINESS METRICS MANAGER INITIALIZED ({transport}) -----")
1219
+ else:
1220
+ self.logger.warning(
1221
+ f"[BUSINESS_METRICS_MANAGER_FACTORY] No {'Redis' if is_localhost else 'Kafka'} client available, "
1222
+ f"business metrics manager not created"
1223
+ )
1224
+
1225
+ self._initialized = True
1226
+ self.logger.info("[BUSINESS_METRICS_MANAGER_FACTORY] ===== INITIALIZATION COMPLETE =====")
1227
+ return self._business_metrics_manager
1228
+
1229
+ except ImportError as e:
1230
+ self.logger.error(f"[BUSINESS_METRICS_MANAGER_FACTORY] Import error: {e}")
1231
+ self._initialized = True
1232
+ return None
1233
+ except Exception as e:
1234
+ self.logger.error(
1235
+ f"[BUSINESS_METRICS_MANAGER_FACTORY] Initialization failed: {e}",
1236
+ exc_info=True
1237
+ )
1238
+ self._initialized = True
1239
+ return None
1240
+
1241
+ def _discover_action_id(self) -> Optional[str]:
1242
+ """Discover action_id from current working directory name (and parents)."""
1243
+ try:
1244
+ candidates: List[str] = []
1245
+
1246
+ try:
1247
+ cwd = Path.cwd()
1248
+ candidates.append(cwd.name)
1249
+ for parent in cwd.parents:
1250
+ candidates.append(parent.name)
1251
+ except Exception:
1252
+ pass
1253
+
1254
+ try:
1255
+ usr_src = Path("/usr/src")
1256
+ if usr_src.exists():
1257
+ for child in usr_src.iterdir():
1258
+ if child.is_dir():
1259
+ candidates.append(child.name)
1260
+ except Exception:
1261
+ pass
1262
+
1263
+ for candidate in candidates:
1264
+ if candidate and len(candidate) >= 8 and self.ACTION_ID_PATTERN.match(candidate):
1265
+ return candidate
1266
+ except Exception:
1267
+ pass
1268
+ return None
1269
+
1270
+ def _get_public_ip(self) -> str:
1271
+ """Get the public IP address of this machine."""
1272
+ self.logger.info("[BUSINESS_METRICS_MANAGER_FACTORY] Fetching public IP address...")
1273
+ try:
1274
+ public_ip = urllib.request.urlopen(
1275
+ "https://v4.ident.me", timeout=120
1276
+ ).read().decode("utf8").strip()
1277
+ self.logger.debug(f"[BUSINESS_METRICS_MANAGER_FACTORY] Public IP: {public_ip}")
1278
+ return public_ip
1279
+ except Exception as e:
1280
+ self.logger.warning(f"[BUSINESS_METRICS_MANAGER_FACTORY] Error fetching public IP: {e}")
1281
+ return "localhost"
1282
+
1283
+ def _get_backend_base_url(self) -> str:
1284
+ """Resolve backend base URL based on ENV variable."""
1285
+ env = os.getenv("ENV", "prod").strip().lower()
1286
+ if env in ("prod", "production"):
1287
+ host = "prod.backend.app.matrice.ai"
1288
+ elif env in ("dev", "development"):
1289
+ host = "dev.backend.app.matrice.ai"
1290
+ else:
1291
+ host = "staging.backend.app.matrice.ai"
1292
+ return f"https://{host}"
1293
+
1294
+ @property
1295
+ def is_initialized(self) -> bool:
1296
+ return self._initialized
1297
+
1298
+ @property
1299
+ def business_metrics_manager(self) -> Optional[BUSINESS_METRICS_MANAGER]:
1300
+ return self._business_metrics_manager
1301
+
1302
+
1303
+ # Module-level factory instance for convenience
1304
+ _default_factory: Optional[BusinessMetricsManagerFactory] = None
1305
+
1306
+
1307
+ def get_business_metrics_manager(
1308
+ config: Any,
1309
+ logger: Optional[logging.Logger] = None,
1310
+ aggregation_interval: int = DEFAULT_AGGREGATION_INTERVAL,
1311
+ metrics_config: Optional[Dict[str, str]] = None
1312
+ ) -> Optional[BUSINESS_METRICS_MANAGER]:
1313
+ """
1314
+ Get or create BUSINESS_METRICS_MANAGER instance.
1315
+
1316
+ This is a convenience function that uses a module-level factory.
1317
+ For more control, use BusinessMetricsManagerFactory directly.
1318
+
1319
+ Args:
1320
+ config: Configuration object with session, server_id, etc.
1321
+ logger: Logger instance
1322
+ aggregation_interval: Interval in seconds for aggregation (default 300)
1323
+ metrics_config: Dict of metric_name -> aggregation_type
1324
+
1325
+ Returns:
1326
+ BUSINESS_METRICS_MANAGER instance or None
1327
+ """
1328
+ global _default_factory
1329
+
1330
+ if _default_factory is None:
1331
+ _default_factory = BusinessMetricsManagerFactory(logger=logger)
1332
+
1333
+ return _default_factory.initialize(
1334
+ config,
1335
+ aggregation_interval=aggregation_interval,
1336
+ metrics_config=metrics_config
1337
+ )
1338
+