matrice-inference 0.1.2__py3-none-any.whl → 0.1.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of matrice-inference might be problematic. Click here for more details.

@@ -1,20 +1,60 @@
1
1
  # Import moved to method where it's needed to avoid circular imports
2
- from matrice_inference.server.stream.utils import CameraConfig, StreamMessage
3
2
  import asyncio
4
3
  import json
5
- import time
6
4
  import logging
7
- import threading
8
5
  import queue
6
+ import threading
7
+ import time
8
+ import base64
9
+ import copy
10
+ import cv2
9
11
  from datetime import datetime, timezone
10
- import logging
12
+ from typing import Dict, Any, Optional
13
+ from matrice_inference.server.stream.utils import CameraConfig, StreamMessage
11
14
 
12
15
  class ConsumerWorker:
13
- """Handles message consumption from streams."""
16
+ """Handles message consumption from streams with optimized processing.
17
+
18
+ Frame ID Management:
19
+ -------------------
20
+ This worker ensures frame_id uniqueness and consistency throughout the pipeline:
14
21
 
15
- def __init__(self, camera_id: str, worker_id: int, stream_config: dict, input_topic: str,
16
- inference_queue: queue.PriorityQueue, message_timeout: float,
17
- camera_config: CameraConfig):
22
+ 1. Frame ID Source Priority:
23
+ - Upstream frame_id (from message data) - preferred
24
+ - Message key (if UUID-like)
25
+ - Generated unique ID (camera_id + worker_id + uuid4)
26
+
27
+ 2. Frame Caching:
28
+ - Frames are cached to Redis using: stream:frames:{frame_id}
29
+ - frame_id MUST be unique across all apps and cameras
30
+ - The same frame_id is used throughout the entire pipeline
31
+
32
+ 3. Multi-App Safety:
33
+ - Each app deployment has unique camera_ids
34
+ - Generated IDs include camera_id + worker_id + uuid4 for uniqueness
35
+ - Redis prefix ensures isolation between different frame types
36
+
37
+ 4. Frame ID Flow:
38
+ Consumer → Inference → Post-Processing → Producer
39
+ The frame_id is preserved in task_data["frame_id"] at each stage
40
+ and included in the final output message for client retrieval.
41
+ """
42
+
43
+ DEFAULT_PRIORITY = 1
44
+ DEFAULT_DB = 0
45
+ DEFAULT_CONNECTION_TIMEOUT = 120
46
+
47
+ def __init__(
48
+ self,
49
+ camera_id: str,
50
+ worker_id: int,
51
+ stream_config: Dict[str, Any],
52
+ input_topic: str,
53
+ inference_queue: queue.PriorityQueue,
54
+ message_timeout: float,
55
+ camera_config: CameraConfig,
56
+ frame_cache: Optional[Any] = None
57
+ ):
18
58
  self.camera_id = camera_id
19
59
  self.worker_id = worker_id
20
60
  self.stream_config = stream_config
@@ -23,179 +63,488 @@ class ConsumerWorker:
23
63
  self.message_timeout = message_timeout
24
64
  self.camera_config = camera_config
25
65
  self.running = False
26
- self.stream = None # Will be created in worker thread's event loop
66
+ self.stream: Optional[Any] = None
27
67
  self.logger = logging.getLogger(f"{__name__}.consumer.{camera_id}.{worker_id}")
68
+ # H.265 stream decoder instance (initialized lazily per worker)
69
+ self._h265_stream_decoder = None
70
+ # Optional frame cache for low-latency caching at ingestion
71
+ self.frame_cache = frame_cache
28
72
 
29
- def start(self):
73
+ def start(self) -> threading.Thread:
30
74
  """Start the consumer worker in a separate thread."""
31
75
  self.running = True
32
- thread = threading.Thread(target=self._run, name=f"Consumer-{self.camera_id}-{self.worker_id}", daemon=False)
76
+ thread = threading.Thread(
77
+ target=self._run,
78
+ name=f"Consumer-{self.camera_id}-{self.worker_id}",
79
+ daemon=False
80
+ )
33
81
  thread.start()
34
82
  return thread
35
83
 
36
84
  def stop(self):
37
85
  """Stop the consumer worker."""
38
86
  self.running = False
87
+ try:
88
+ if self._h265_stream_decoder is not None:
89
+ self._h265_stream_decoder.stop()
90
+ except Exception:
91
+ pass
39
92
 
40
- def _run(self):
41
- """Main consumer loop."""
42
- # Create a new event loop for this worker thread
93
+ def _run(self) -> None:
94
+ """Main consumer loop with proper resource management."""
43
95
  loop = asyncio.new_event_loop()
44
96
  asyncio.set_event_loop(loop)
45
-
97
+
46
98
  self.logger.info(f"Started consumer worker for camera {self.camera_id}")
47
-
99
+
48
100
  try:
49
- # Initialize stream in this event loop
50
101
  loop.run_until_complete(self._initialize_stream())
51
-
52
- while self.running and self.camera_config.enabled:
53
- try:
54
- # Get message from stream
55
- message_data = loop.run_until_complete(
56
- self._get_message_safely()
57
- )
58
-
59
- if not message_data:
60
- continue
61
-
62
- # Parse and create task
63
- self._process_message(message_data)
64
-
65
- except Exception as e:
66
- self.logger.error(f"Consumer error: {e}")
67
- time.sleep(1.0)
68
-
102
+ self._consume_messages(loop)
103
+ except Exception as e:
104
+ self.logger.error(f"Fatal error in consumer worker: {e}")
69
105
  finally:
70
- # Clean up stream
71
- if self.stream:
72
- try:
73
- loop.run_until_complete(self.stream.async_close())
74
- except Exception as e:
75
- self.logger.error(f"Error closing stream: {e}")
106
+ self._cleanup_resources(loop)
107
+
108
+ def _consume_messages(self, loop: asyncio.AbstractEventLoop) -> None:
109
+ """Main message consumption loop."""
110
+ while self.running and self.camera_config.enabled:
111
+ try:
112
+ message_data = loop.run_until_complete(self._get_message_safely())
113
+ if message_data:
114
+ self._process_message(message_data)
115
+ except Exception as e:
116
+ self.logger.error(f"Error processing message: {e}")
117
+ time.sleep(1.0)
118
+
119
+ def _cleanup_resources(self, loop: asyncio.AbstractEventLoop) -> None:
120
+ """Clean up stream and event loop resources."""
121
+ if self.stream:
122
+ try:
123
+ loop.run_until_complete(self.stream.async_close())
124
+ except Exception as e:
125
+ self.logger.error(f"Error closing stream: {e}")
126
+
127
+ try:
76
128
  loop.close()
77
- self.logger.info(f"Consumer worker stopped for camera {self.camera_id}")
129
+ except Exception as e:
130
+ self.logger.error(f"Error closing event loop: {e}")
78
131
 
79
- async def _initialize_stream(self):
80
- """Initialize MatriceStream in the current event loop."""
132
+ self.logger.info(f"Consumer worker stopped for camera {self.camera_id}")
133
+
134
+ async def _initialize_stream(self) -> None:
135
+ """Initialize MatriceStream with proper configuration."""
81
136
  try:
82
137
  from matrice_common.stream.matrice_stream import MatriceStream, StreamType
83
-
84
- # Determine stream type
85
- stream_type = StreamType.KAFKA if self.stream_config.get("stream_type", "kafka").lower() == "kafka" else StreamType.REDIS
86
-
87
- # Create stream configuration
88
- if stream_type == StreamType.KAFKA:
89
- stream_params = {
90
- "bootstrap_servers": self.stream_config.get("bootstrap_servers", "localhost:9092"),
91
- "sasl_username": self.stream_config.get("sasl_username", "matrice-sdk-user"),
92
- "sasl_password": self.stream_config.get("sasl_password", "matrice-sdk-password"),
93
- "sasl_mechanism": self.stream_config.get("sasl_mechanism", "SCRAM-SHA-256"),
94
- "security_protocol": self.stream_config.get("security_protocol", "SASL_PLAINTEXT"),
95
- }
96
- else: # Redis
97
- stream_params = {
98
- "host": self.stream_config.get("host", "localhost"),
99
- "port": self.stream_config.get("port", 6379),
100
- "password": self.stream_config.get("password"),
101
- "username": self.stream_config.get("username"),
102
- "db": self.stream_config.get("db", 0),
103
- "connection_timeout": self.stream_config.get("connection_timeout", 120),
104
- }
105
-
106
- # Create and setup stream
138
+
139
+ stream_type = self._get_stream_type()
140
+ stream_params = self._build_stream_params(stream_type)
141
+
107
142
  self.stream = MatriceStream(stream_type, **stream_params)
108
- await self.stream.async_setup(self.input_topic, f"inference_consumer_{self.camera_id}_{self.worker_id}")
109
- # TODO: Add app name to the consumer group id to make sure it processing once only
110
-
143
+ consumer_group = f"inference_consumer_{self.camera_id}_{self.worker_id}"
144
+ await self.stream.async_setup(self.input_topic, consumer_group)
145
+
111
146
  self.logger.info(f"Initialized {stream_type.value} stream for consumer worker {self.worker_id}")
112
-
147
+
113
148
  except Exception as e:
114
- self.logger.error(f"Failed to initialize stream for consumer worker: {e}")
149
+ self.logger.error(f"Failed to initialize stream: {e}")
115
150
  raise
116
151
 
117
- async def _get_message_safely(self):
118
- """Safely get message from stream in the current event loop."""
152
+ def _get_stream_type(self):
153
+ """Determine stream type from configuration."""
154
+ from matrice_common.stream.matrice_stream import StreamType
155
+ stream_type_str = self.stream_config.get("stream_type", "kafka").lower()
156
+ return StreamType.KAFKA if stream_type_str == "kafka" else StreamType.REDIS
157
+
158
+ def _build_stream_params(self, stream_type) -> Dict[str, Any]:
159
+ """Build stream parameters based on type."""
160
+ from matrice_common.stream.matrice_stream import StreamType
161
+
162
+ if stream_type == StreamType.KAFKA:
163
+ return {
164
+ "bootstrap_servers": self.stream_config.get("bootstrap_servers", "localhost:9092"),
165
+ "sasl_username": self.stream_config.get("sasl_username", "matrice-sdk-user"),
166
+ "sasl_password": self.stream_config.get("sasl_password", "matrice-sdk-password"),
167
+ "sasl_mechanism": self.stream_config.get("sasl_mechanism", "SCRAM-SHA-256"),
168
+ "security_protocol": self.stream_config.get("security_protocol", "SASL_PLAINTEXT"),
169
+ }
170
+ else:
171
+ return {
172
+ "host": self.stream_config.get("host", "localhost"),
173
+ "port": self.stream_config.get("port", 6379),
174
+ "password": self.stream_config.get("password"),
175
+ "username": self.stream_config.get("username"),
176
+ "db": self.stream_config.get("db", self.DEFAULT_DB),
177
+ "connection_timeout": self.stream_config.get("connection_timeout", self.DEFAULT_CONNECTION_TIMEOUT),
178
+ }
179
+
180
+ async def _get_message_safely(self) -> Optional[Dict[str, Any]]:
181
+ """Safely get message from stream."""
182
+ if not self.stream:
183
+ self.logger.error("Stream not initialized")
184
+ return None
185
+
119
186
  try:
120
- if not self.stream:
121
- self.logger.error("Stream not initialized")
122
- return None
123
187
  return await self.stream.async_get_message(self.message_timeout)
124
188
  except Exception as e:
125
- # Handle stream issues gracefully
126
- self.logger.debug(f"Error getting message from stream: {e}")
189
+ self.logger.debug(f"Error getting message: {e}")
190
+ return None
191
+
192
+ # -------------------- H.265 helpers --------------------
193
+ def _decode_h265_frame(self, h265_bytes: bytes, width: int, height: int):
194
+ """Decode a single H.265-encoded frame to OpenCV BGR image."""
195
+ try:
196
+ try:
197
+ # Prefer local matrice_common implementation if available
198
+ from matrice_common.video.h265_processor import H265FrameDecoder
199
+ decoder = H265FrameDecoder()
200
+ frame = decoder.decode_frame(h265_bytes, width=width, height=height)
201
+ return frame
202
+ except Exception as e:
203
+ self.logger.error(f"H.265 single-frame decode failed: {e}")
204
+ return None
205
+ except Exception as e:
206
+ self.logger.error(f"Unexpected error in H.265 frame decode: {e}")
127
207
  return None
208
+
209
+ def _ensure_h265_stream_decoder(self, width: int, height: int):
210
+ """Ensure a continuous H.265 stream decoder exists with given dimensions."""
211
+ if self._h265_stream_decoder is not None:
212
+ return True
213
+ try:
214
+ from matrice_common.video.h265_processor import H265StreamDecoder
215
+ decoder = H265StreamDecoder(width=width, height=height)
216
+ if not decoder.start():
217
+ self.logger.error("Failed to start H.265 stream decoder")
218
+ return False
219
+ self._h265_stream_decoder = decoder
220
+ return True
221
+ except Exception as e:
222
+ self.logger.error(f"Failed to initialize H.265 stream decoder: {e}")
223
+ return False
224
+
225
+ def _frame_to_jpeg_bytes(self, frame) -> bytes:
226
+ """Encode an OpenCV BGR frame to JPEG bytes."""
227
+ try:
228
+ ok, buf = cv2.imencode('.jpg', frame, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
229
+ if not ok:
230
+ raise RuntimeError("cv2.imencode failed")
231
+ return buf.tobytes()
232
+ except Exception as e:
233
+ self.logger.error(f"Failed to encode frame to JPEG: {e}")
234
+ return b""
128
235
 
129
- def _process_message(self, message_data):
130
- """Process incoming message and add to inference queue."""
236
+ def _process_message(self, message_data: Dict[str, Any]) -> None:
237
+ """Process incoming message and add to inference queue.
238
+
239
+ This method:
240
+ 1. Extracts/generates a unique frame_id
241
+ 2. Handles codec-specific processing (H.264, H.265, JPEG, etc.)
242
+ 3. Caches the frame content to Redis with the frame_id
243
+ 4. Enqueues the task for inference with frame_id preserved
244
+
245
+ Frame ID Consistency:
246
+ - The frame_id is determined once at the start of processing
247
+ - The same frame_id is used for cache writes and task data
248
+ - frame_id is propagated through the entire pipeline
249
+ - Output messages include the frame_id for client retrieval
250
+
251
+ Multi-App Safety:
252
+ - frame_id uniqueness ensures no collisions between apps
253
+ - Redis prefix (stream:frames:) provides namespace isolation
254
+ - Cache writes are non-blocking to prevent pipeline delays
255
+ """
131
256
  try:
132
- # Parse message data - handle camera streamer format
133
- if isinstance(message_data.get("data"), bytes):
134
- data = json.loads(message_data["data"].decode("utf-8"))
135
- else:
136
- data = message_data.get("data", {})
137
-
138
- # Handle camera streamer input format
139
- input_stream = data.get("input_stream", {})
140
- if not input_stream:
141
- # Fallback to direct format
142
- input_stream = data
143
-
144
- # Create stream message
145
- stream_msg = StreamMessage(
146
- camera_id=self.camera_id,
147
- message_key=message_data.get("key", data.get("input_name", f"{self.camera_id}_{int(time.time())}")),
148
- data=data,
149
- timestamp=datetime.now(timezone.utc),
150
- priority=1
151
- )
152
-
153
- # Ensure extra_params is a dictionary
154
- extra_params = data.get("extra_params", {})
155
- if not isinstance(extra_params, dict):
156
- self.logger.warning(f"extra_params is not a dict, converting from {type(extra_params)}: {extra_params}")
157
- if isinstance(extra_params, list):
158
- # Convert list to dict if possible
159
- if len(extra_params) == 0:
160
- extra_params = {}
161
- elif all(isinstance(item, dict) for item in extra_params):
162
- # Merge all dictionaries in the list
163
- merged_params = {}
164
- for item in extra_params:
165
- merged_params.update(item)
166
- extra_params = merged_params
167
- else:
168
- extra_params = {}
169
- else:
170
- extra_params = {}
257
+ message_key = self._extract_message_key(message_data)
258
+ data = self._parse_message_data(message_data)
259
+ input_stream = self._extract_input_stream(data)
260
+ extra_params = self._normalize_extra_params(data)
261
+ frame_id = self._determine_frame_id(data, message_data)
171
262
 
172
- # Determine frame_id (prefer value from upstream gateway; otherwise fallback to message key)
173
- frame_id = data.get("frame_id")
174
- if not frame_id:
175
- frame_id = message_data.get("key", data.get("input_name", f"{self.camera_id}_{int(time.time() * 1000)}"))
263
+ self._enrich_input_stream(input_stream, frame_id)
176
264
 
177
- # Attach frame_id into input_stream for propagation if not present
265
+ # Codec detection
266
+ codec = None
267
+ codec_lower = None
178
268
  try:
179
- if isinstance(input_stream, dict) and "frame_id" not in input_stream:
180
- input_stream["frame_id"] = frame_id
269
+ if isinstance(input_stream, dict):
270
+ codec = input_stream.get("video_codec") or input_stream.get("compression_format")
271
+ if isinstance(codec, str):
272
+ codec_lower = codec.lower()
181
273
  except Exception:
182
- pass
274
+ codec_lower = None
183
275
 
184
- # Create inference task with camera streamer format
185
- task_data = {
186
- "message": stream_msg,
187
- "input_stream": input_stream, # Pass the full input_stream
188
- "stream_key": f"{self.camera_id}_{stream_msg.message_key}",
189
- "extra_params": extra_params,
190
- "camera_config": self.camera_config.__dict__,
191
- "frame_id": frame_id
192
- }
193
-
194
- # Add to inference queue with timestamp as tie-breaker for priority queue comparison
276
+ # H.264 handling (frame-wise) - upstream always sends JPEG-encoded frames
277
+ # Content is base64-encoded JPEG, ready for PIL/inference
278
+ if codec_lower == "h264" and isinstance(input_stream, dict):
279
+ stream_unit = input_stream.get("stream_unit", "frame")
280
+ if isinstance(stream_unit, str) and stream_unit.lower() != "frame":
281
+ self.logger.warning("Received H.264 with non-frame stream_unit; skipping")
282
+ return
283
+ content_b64 = input_stream.get("content")
284
+ if isinstance(content_b64, str) and content_b64:
285
+ # Cache JPEG base64 as-is
286
+ self._cache_frame(frame_id, content_b64)
287
+ stream_msg = self._create_stream_message(message_key, data)
288
+ task_data = self._build_task_data(stream_msg, input_stream, extra_params, frame_id)
289
+ self.inference_queue.put((stream_msg.priority, time.time(), task_data))
290
+ return
291
+ self.logger.warning("H.264 frame missing content; skipping")
292
+ return
293
+
294
+ # H.265 handling: convert to JPEG base64 before enqueuing
295
+ if codec_lower in ["h265", "hevc"] and isinstance(input_stream, dict):
296
+ # Resolve resolution
297
+ width = None
298
+ height = None
299
+ try:
300
+ res = input_stream.get("stream_resolution") or input_stream.get("original_resolution") or {}
301
+ width = int(res.get("width")) if res and res.get("width") else None
302
+ height = int(res.get("height")) if res and res.get("height") else None
303
+ except Exception:
304
+ width, height = None, None
305
+
306
+ payload_b64 = input_stream.get("content")
307
+ payload_bytes = b""
308
+ if isinstance(payload_b64, str) and payload_b64:
309
+ try:
310
+ payload_bytes = base64.b64decode(payload_b64)
311
+ except Exception:
312
+ payload_bytes = b""
313
+
314
+ stream_unit = input_stream.get("stream_unit", "frame")
315
+ is_stream_chunk = bool(input_stream.get("is_video_chunk")) or (isinstance(stream_unit, str) and stream_unit.lower() != "frame")
316
+
317
+ stream_msg = self._create_stream_message(message_key, data)
318
+
319
+ if not is_stream_chunk:
320
+ # Single-frame H.265
321
+ if payload_bytes and width and height:
322
+ frame_img = self._decode_h265_frame(payload_bytes, width, height)
323
+ if frame_img is not None:
324
+ jpeg_bytes = self._frame_to_jpeg_bytes(frame_img)
325
+ if jpeg_bytes:
326
+ input_stream_jpeg = copy.deepcopy(input_stream)
327
+ input_stream_jpeg["content"] = base64.b64encode(jpeg_bytes).decode("utf-8")
328
+ input_stream_jpeg["video_codec"] = "jpeg"
329
+ # Low-latency cache write
330
+ self._cache_frame(frame_id, input_stream_jpeg["content"])
331
+ task_data = self._build_task_data(stream_msg, input_stream_jpeg, extra_params, frame_id)
332
+ self.inference_queue.put((stream_msg.priority, time.time(), task_data))
333
+ return
334
+ # Drop undecodable H.265 frame
335
+ self.logger.warning("Dropping H.265 frame due to missing payload/resolution or decode failure")
336
+ return
337
+ else:
338
+ # Stream-chunk H.265 (emit at most one frame per message using upstream frame_id)
339
+ if width and height and self._ensure_h265_stream_decoder(width, height) and payload_bytes:
340
+ try:
341
+ self._h265_stream_decoder.decode_bytes(payload_bytes)
342
+ latest_frame = None
343
+ while True:
344
+ frame_img = self._h265_stream_decoder.read_frame()
345
+ if frame_img is None:
346
+ break
347
+ latest_frame = frame_img
348
+ if latest_frame is not None:
349
+ jpeg_bytes = self._frame_to_jpeg_bytes(latest_frame)
350
+ if jpeg_bytes:
351
+ input_stream_jpeg = copy.deepcopy(input_stream)
352
+ input_stream_jpeg["content"] = base64.b64encode(jpeg_bytes).decode("utf-8")
353
+ input_stream_jpeg["video_codec"] = "jpeg"
354
+ # Keep upstream frame_id as-is
355
+ try:
356
+ input_stream_jpeg["frame_id"] = frame_id
357
+ except Exception:
358
+ pass
359
+ # Low-latency cache write
360
+ self._cache_frame(frame_id, input_stream_jpeg["content"])
361
+ task_data = self._build_task_data(stream_msg, input_stream_jpeg, extra_params, frame_id)
362
+ self.inference_queue.put((stream_msg.priority, time.time(), task_data))
363
+ return
364
+ except Exception as e:
365
+ self.logger.error(f"H.265 stream decode error: {e}")
366
+ # No complete frame available yet for this chunk; skip forwarding
367
+ self.logger.debug("No decoded frame available from H.265 stream chunk for this message")
368
+ return
369
+
370
+ # Default path (other formats): enqueue as-is
371
+ stream_msg = self._create_stream_message(message_key, data)
372
+ # Cache if there is a base64 content present
373
+ try:
374
+ if isinstance(input_stream, dict) and isinstance(input_stream.get("content"), str) and input_stream.get("content"):
375
+ self._cache_frame(frame_id, input_stream.get("content"))
376
+ except Exception:
377
+ pass
378
+ task_data = self._build_task_data(stream_msg, input_stream, extra_params, frame_id)
195
379
  self.inference_queue.put((stream_msg.priority, time.time(), task_data))
196
-
380
+
197
381
  except json.JSONDecodeError as e:
198
382
  self.logger.error(f"Failed to parse message JSON: {e}")
199
383
  except Exception as e:
200
384
  self.logger.error(f"Error processing message: {e}")
201
385
 
386
+ def _extract_message_key(self, message_data: Dict[str, Any]) -> Optional[str]:
387
+ """Extract message key from Kafka/Redis message."""
388
+ if not isinstance(message_data, dict):
389
+ return None
390
+
391
+ key = message_data.get('key') or message_data.get('message_key')
392
+ if isinstance(key, bytes):
393
+ return key.decode('utf-8')
394
+ return key
395
+
396
+ def _parse_message_data(self, message_data: Dict[str, Any]) -> Dict[str, Any]:
397
+ """Parse message data from different stream formats."""
398
+ for field in ['value', 'data']:
399
+ if field in message_data:
400
+ value = message_data[field]
401
+ if isinstance(value, dict):
402
+ return value
403
+ elif isinstance(value, (str, bytes)):
404
+ if isinstance(value, bytes):
405
+ value = value.decode('utf-8')
406
+ return json.loads(value)
407
+ return message_data
408
+
409
+ def _extract_input_stream(self, data: Dict[str, Any]) -> Dict[str, Any]:
410
+ """Extract input stream from message data."""
411
+ input_stream = data.get("input_stream", {})
412
+ return input_stream if input_stream else data
413
+
414
+ def _normalize_extra_params(self, data: Dict[str, Any]) -> Dict[str, Any]:
415
+ """Normalize extra_params to ensure it's a dictionary."""
416
+ extra_params = data.get("extra_params", {})
417
+
418
+ if isinstance(extra_params, dict):
419
+ return extra_params
420
+ elif isinstance(extra_params, list):
421
+ return self._merge_list_params(extra_params)
422
+ else:
423
+ self.logger.warning(f"Invalid extra_params type {type(extra_params)}, using empty dict")
424
+ return {}
425
+
426
+ def _merge_list_params(self, params_list: list) -> Dict[str, Any]:
427
+ """Merge list of dictionaries into single dictionary."""
428
+ if not params_list:
429
+ return {}
430
+
431
+ if all(isinstance(item, dict) for item in params_list):
432
+ merged = {}
433
+ for item in params_list:
434
+ merged.update(item)
435
+ return merged
436
+
437
+ return {}
438
+
439
+ def _determine_frame_id(self, data: Dict[str, Any], message_data: Dict[str, Any]) -> str:
440
+ """Determine frame ID from message data with guaranteed uniqueness.
441
+
442
+ Priority:
443
+ 1. Existing frame_id from upstream (UUID expected)
444
+ 2. Message key (if it looks like a UUID)
445
+ 3. Generate unique ID with camera context
446
+ """
447
+ # First priority: explicit frame_id from upstream
448
+ frame_id = data.get("frame_id")
449
+ if frame_id and isinstance(frame_id, str) and frame_id.strip():
450
+ self.logger.debug(f"Using upstream frame_id: {frame_id}")
451
+ return str(frame_id).strip()
452
+
453
+ # Second priority: message key (if it's a UUID-like string)
454
+ fallback_key = message_data.get("key") or data.get("input_name")
455
+ if fallback_key:
456
+ key_str = str(fallback_key)
457
+ # Check if it looks like a UUID (contains dashes and right length)
458
+ if "-" in key_str and len(key_str) >= 32:
459
+ self.logger.debug(f"Using message key as frame_id: {key_str}")
460
+ return key_str
461
+
462
+ # Last resort: generate unique ID with camera, worker, and high-precision timestamp
463
+ import uuid
464
+ generated_id = f"{self.camera_id}_{self.worker_id}_{uuid.uuid4()}"
465
+ self.logger.warning(
466
+ f"No upstream frame_id found, generated unique ID: {generated_id} "
467
+ f"(message_key: {fallback_key})"
468
+ )
469
+ return generated_id
470
+
471
+ def _enrich_input_stream(self, input_stream: Dict[str, Any], frame_id: str) -> None:
472
+ """Add frame_id to input_stream if not present."""
473
+ try:
474
+ if isinstance(input_stream, dict) and "frame_id" not in input_stream:
475
+ input_stream["frame_id"] = frame_id
476
+ except Exception:
477
+ pass
478
+
479
+ def _create_stream_message(self, message_key: Optional[str], data: Dict[str, Any]) -> StreamMessage:
480
+ """Create StreamMessage instance."""
481
+ final_key = message_key or data.get("input_name") or f"{self.camera_id}_{int(time.time())}"
482
+
483
+ return StreamMessage(
484
+ camera_id=self.camera_id,
485
+ message_key=final_key,
486
+ data=data,
487
+ timestamp=datetime.now(timezone.utc),
488
+ priority=self.DEFAULT_PRIORITY
489
+ )
490
+
491
+ def _build_task_data(self, stream_msg: StreamMessage, input_stream: Dict[str, Any],
492
+ extra_params: Dict[str, Any], frame_id: str) -> Dict[str, Any]:
493
+ """Build task data for inference queue."""
494
+ return {
495
+ "message": stream_msg,
496
+ "input_stream": input_stream,
497
+ "stream_key": stream_msg.message_key,
498
+ "extra_params": extra_params,
499
+ "camera_config": self.camera_config.__dict__,
500
+ "frame_id": frame_id
501
+ }
502
+
503
+ def _cache_frame(self, frame_id: Optional[str], content_b64: Optional[str]) -> None:
504
+ """Write frame to Redis cache if configured, non-blocking.
505
+
506
+ Args:
507
+ frame_id: Unique frame identifier (uuid expected)
508
+ content_b64: Base64-encoded JPEG string
509
+ """
510
+ if not self.frame_cache:
511
+ self.logger.debug("Frame cache not configured, skipping cache write")
512
+ return
513
+
514
+ # Validate frame_id
515
+ if not frame_id or not isinstance(frame_id, str):
516
+ self.logger.warning(
517
+ f"Invalid frame_id for caching: {frame_id!r} (type: {type(frame_id).__name__})"
518
+ )
519
+ return
520
+
521
+ frame_id = frame_id.strip()
522
+ if not frame_id:
523
+ self.logger.warning("Empty frame_id after stripping, skipping cache")
524
+ return
525
+
526
+ # Validate content
527
+ if not content_b64 or not isinstance(content_b64, str):
528
+ self.logger.warning(
529
+ f"Invalid content for frame_id={frame_id}: "
530
+ f"type={type(content_b64).__name__}, "
531
+ f"len={len(content_b64) if content_b64 else 0}"
532
+ )
533
+ return
534
+
535
+ try:
536
+ content_len = len(content_b64)
537
+ self.logger.debug(
538
+ f"Caching frame: frame_id={frame_id}, camera={self.camera_id}, "
539
+ f"worker={self.worker_id}, content_size={content_len} bytes"
540
+ )
541
+ self.frame_cache.put(frame_id, content_b64)
542
+ self.logger.debug(f"Successfully queued frame {frame_id} for caching")
543
+ except Exception as e:
544
+ # Do not block pipeline on cache errors
545
+ self.logger.error(
546
+ f"Frame cache put failed: frame_id={frame_id}, camera={self.camera_id}, "
547
+ f"worker={self.worker_id}, error={e}",
548
+ exc_info=True
549
+ )
550
+