matrice-inference 0.1.2__py3-none-any.whl → 0.1.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of matrice-inference might be problematic. Click here for more details.
- matrice_inference/__init__.py +40 -23
- matrice_inference/server/__init__.py +17 -11
- matrice_inference/server/model/triton_server.py +1 -3
- matrice_inference/server/server.py +3 -4
- matrice_inference/server/stream/consumer_worker.py +398 -141
- matrice_inference/server/stream/frame_cache.py +149 -54
- matrice_inference/server/stream/inference_worker.py +183 -94
- matrice_inference/server/stream/post_processing_worker.py +246 -181
- matrice_inference/server/stream/producer_worker.py +155 -98
- matrice_inference/server/stream/stream_pipeline.py +220 -248
- matrice_inference/tmp/aggregator/analytics.py +1 -1
- matrice_inference/tmp/overall_inference_testing.py +0 -4
- {matrice_inference-0.1.2.dist-info → matrice_inference-0.1.22.dist-info}/METADATA +1 -1
- {matrice_inference-0.1.2.dist-info → matrice_inference-0.1.22.dist-info}/RECORD +17 -17
- {matrice_inference-0.1.2.dist-info → matrice_inference-0.1.22.dist-info}/WHEEL +0 -0
- {matrice_inference-0.1.2.dist-info → matrice_inference-0.1.22.dist-info}/licenses/LICENSE.txt +0 -0
- {matrice_inference-0.1.2.dist-info → matrice_inference-0.1.22.dist-info}/top_level.txt +0 -0
|
@@ -1,20 +1,35 @@
|
|
|
1
1
|
# Import moved to method where it's needed to avoid circular imports
|
|
2
|
-
from matrice_inference.server.stream.utils import CameraConfig, StreamMessage
|
|
3
2
|
import asyncio
|
|
4
3
|
import json
|
|
5
|
-
import time
|
|
6
4
|
import logging
|
|
7
|
-
import threading
|
|
8
5
|
import queue
|
|
6
|
+
import threading
|
|
7
|
+
import time
|
|
8
|
+
import base64
|
|
9
|
+
import copy
|
|
10
|
+
import cv2
|
|
9
11
|
from datetime import datetime, timezone
|
|
10
|
-
import
|
|
12
|
+
from typing import Dict, Any, Optional
|
|
13
|
+
from matrice_inference.server.stream.utils import CameraConfig, StreamMessage
|
|
11
14
|
|
|
12
15
|
class ConsumerWorker:
|
|
13
|
-
"""Handles message consumption from streams."""
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
16
|
+
"""Handles message consumption from streams with optimized processing."""
|
|
17
|
+
|
|
18
|
+
DEFAULT_PRIORITY = 1
|
|
19
|
+
DEFAULT_DB = 0
|
|
20
|
+
DEFAULT_CONNECTION_TIMEOUT = 120
|
|
21
|
+
|
|
22
|
+
def __init__(
|
|
23
|
+
self,
|
|
24
|
+
camera_id: str,
|
|
25
|
+
worker_id: int,
|
|
26
|
+
stream_config: Dict[str, Any],
|
|
27
|
+
input_topic: str,
|
|
28
|
+
inference_queue: queue.PriorityQueue,
|
|
29
|
+
message_timeout: float,
|
|
30
|
+
camera_config: CameraConfig,
|
|
31
|
+
frame_cache: Optional[Any] = None
|
|
32
|
+
):
|
|
18
33
|
self.camera_id = camera_id
|
|
19
34
|
self.worker_id = worker_id
|
|
20
35
|
self.stream_config = stream_config
|
|
@@ -23,179 +38,421 @@ class ConsumerWorker:
|
|
|
23
38
|
self.message_timeout = message_timeout
|
|
24
39
|
self.camera_config = camera_config
|
|
25
40
|
self.running = False
|
|
26
|
-
self.stream = None
|
|
41
|
+
self.stream: Optional[Any] = None
|
|
27
42
|
self.logger = logging.getLogger(f"{__name__}.consumer.{camera_id}.{worker_id}")
|
|
43
|
+
# H.265 stream decoder instance (initialized lazily per worker)
|
|
44
|
+
self._h265_stream_decoder = None
|
|
45
|
+
# Optional frame cache for low-latency caching at ingestion
|
|
46
|
+
self.frame_cache = frame_cache
|
|
28
47
|
|
|
29
|
-
def start(self):
|
|
48
|
+
def start(self) -> threading.Thread:
|
|
30
49
|
"""Start the consumer worker in a separate thread."""
|
|
31
50
|
self.running = True
|
|
32
|
-
thread = threading.Thread(
|
|
51
|
+
thread = threading.Thread(
|
|
52
|
+
target=self._run,
|
|
53
|
+
name=f"Consumer-{self.camera_id}-{self.worker_id}",
|
|
54
|
+
daemon=False
|
|
55
|
+
)
|
|
33
56
|
thread.start()
|
|
34
57
|
return thread
|
|
35
58
|
|
|
36
59
|
def stop(self):
|
|
37
60
|
"""Stop the consumer worker."""
|
|
38
61
|
self.running = False
|
|
62
|
+
try:
|
|
63
|
+
if self._h265_stream_decoder is not None:
|
|
64
|
+
self._h265_stream_decoder.stop()
|
|
65
|
+
except Exception:
|
|
66
|
+
pass
|
|
39
67
|
|
|
40
|
-
def _run(self):
|
|
41
|
-
"""Main consumer loop."""
|
|
42
|
-
# Create a new event loop for this worker thread
|
|
68
|
+
def _run(self) -> None:
|
|
69
|
+
"""Main consumer loop with proper resource management."""
|
|
43
70
|
loop = asyncio.new_event_loop()
|
|
44
71
|
asyncio.set_event_loop(loop)
|
|
45
|
-
|
|
72
|
+
|
|
46
73
|
self.logger.info(f"Started consumer worker for camera {self.camera_id}")
|
|
47
|
-
|
|
74
|
+
|
|
48
75
|
try:
|
|
49
|
-
# Initialize stream in this event loop
|
|
50
76
|
loop.run_until_complete(self._initialize_stream())
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
# Get message from stream
|
|
55
|
-
message_data = loop.run_until_complete(
|
|
56
|
-
self._get_message_safely()
|
|
57
|
-
)
|
|
58
|
-
|
|
59
|
-
if not message_data:
|
|
60
|
-
continue
|
|
61
|
-
|
|
62
|
-
# Parse and create task
|
|
63
|
-
self._process_message(message_data)
|
|
64
|
-
|
|
65
|
-
except Exception as e:
|
|
66
|
-
self.logger.error(f"Consumer error: {e}")
|
|
67
|
-
time.sleep(1.0)
|
|
68
|
-
|
|
77
|
+
self._consume_messages(loop)
|
|
78
|
+
except Exception as e:
|
|
79
|
+
self.logger.error(f"Fatal error in consumer worker: {e}")
|
|
69
80
|
finally:
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
81
|
+
self._cleanup_resources(loop)
|
|
82
|
+
|
|
83
|
+
def _consume_messages(self, loop: asyncio.AbstractEventLoop) -> None:
|
|
84
|
+
"""Main message consumption loop."""
|
|
85
|
+
while self.running and self.camera_config.enabled:
|
|
86
|
+
try:
|
|
87
|
+
message_data = loop.run_until_complete(self._get_message_safely())
|
|
88
|
+
if message_data:
|
|
89
|
+
self._process_message(message_data)
|
|
90
|
+
except Exception as e:
|
|
91
|
+
self.logger.error(f"Error processing message: {e}")
|
|
92
|
+
time.sleep(1.0)
|
|
93
|
+
|
|
94
|
+
def _cleanup_resources(self, loop: asyncio.AbstractEventLoop) -> None:
|
|
95
|
+
"""Clean up stream and event loop resources."""
|
|
96
|
+
if self.stream:
|
|
97
|
+
try:
|
|
98
|
+
loop.run_until_complete(self.stream.async_close())
|
|
99
|
+
except Exception as e:
|
|
100
|
+
self.logger.error(f"Error closing stream: {e}")
|
|
101
|
+
|
|
102
|
+
try:
|
|
76
103
|
loop.close()
|
|
77
|
-
|
|
104
|
+
except Exception as e:
|
|
105
|
+
self.logger.error(f"Error closing event loop: {e}")
|
|
78
106
|
|
|
79
|
-
|
|
80
|
-
|
|
107
|
+
self.logger.info(f"Consumer worker stopped for camera {self.camera_id}")
|
|
108
|
+
|
|
109
|
+
async def _initialize_stream(self) -> None:
|
|
110
|
+
"""Initialize MatriceStream with proper configuration."""
|
|
81
111
|
try:
|
|
82
112
|
from matrice_common.stream.matrice_stream import MatriceStream, StreamType
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
# Create stream configuration
|
|
88
|
-
if stream_type == StreamType.KAFKA:
|
|
89
|
-
stream_params = {
|
|
90
|
-
"bootstrap_servers": self.stream_config.get("bootstrap_servers", "localhost:9092"),
|
|
91
|
-
"sasl_username": self.stream_config.get("sasl_username", "matrice-sdk-user"),
|
|
92
|
-
"sasl_password": self.stream_config.get("sasl_password", "matrice-sdk-password"),
|
|
93
|
-
"sasl_mechanism": self.stream_config.get("sasl_mechanism", "SCRAM-SHA-256"),
|
|
94
|
-
"security_protocol": self.stream_config.get("security_protocol", "SASL_PLAINTEXT"),
|
|
95
|
-
}
|
|
96
|
-
else: # Redis
|
|
97
|
-
stream_params = {
|
|
98
|
-
"host": self.stream_config.get("host", "localhost"),
|
|
99
|
-
"port": self.stream_config.get("port", 6379),
|
|
100
|
-
"password": self.stream_config.get("password"),
|
|
101
|
-
"username": self.stream_config.get("username"),
|
|
102
|
-
"db": self.stream_config.get("db", 0),
|
|
103
|
-
"connection_timeout": self.stream_config.get("connection_timeout", 120),
|
|
104
|
-
}
|
|
105
|
-
|
|
106
|
-
# Create and setup stream
|
|
113
|
+
|
|
114
|
+
stream_type = self._get_stream_type()
|
|
115
|
+
stream_params = self._build_stream_params(stream_type)
|
|
116
|
+
|
|
107
117
|
self.stream = MatriceStream(stream_type, **stream_params)
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
118
|
+
consumer_group = f"inference_consumer_{self.camera_id}_{self.worker_id}"
|
|
119
|
+
await self.stream.async_setup(self.input_topic, consumer_group)
|
|
120
|
+
|
|
111
121
|
self.logger.info(f"Initialized {stream_type.value} stream for consumer worker {self.worker_id}")
|
|
112
|
-
|
|
122
|
+
|
|
113
123
|
except Exception as e:
|
|
114
|
-
self.logger.error(f"Failed to initialize stream
|
|
124
|
+
self.logger.error(f"Failed to initialize stream: {e}")
|
|
115
125
|
raise
|
|
116
126
|
|
|
117
|
-
|
|
118
|
-
"""
|
|
127
|
+
def _get_stream_type(self):
|
|
128
|
+
"""Determine stream type from configuration."""
|
|
129
|
+
from matrice_common.stream.matrice_stream import StreamType
|
|
130
|
+
stream_type_str = self.stream_config.get("stream_type", "kafka").lower()
|
|
131
|
+
return StreamType.KAFKA if stream_type_str == "kafka" else StreamType.REDIS
|
|
132
|
+
|
|
133
|
+
def _build_stream_params(self, stream_type) -> Dict[str, Any]:
|
|
134
|
+
"""Build stream parameters based on type."""
|
|
135
|
+
from matrice_common.stream.matrice_stream import StreamType
|
|
136
|
+
|
|
137
|
+
if stream_type == StreamType.KAFKA:
|
|
138
|
+
return {
|
|
139
|
+
"bootstrap_servers": self.stream_config.get("bootstrap_servers", "localhost:9092"),
|
|
140
|
+
"sasl_username": self.stream_config.get("sasl_username", "matrice-sdk-user"),
|
|
141
|
+
"sasl_password": self.stream_config.get("sasl_password", "matrice-sdk-password"),
|
|
142
|
+
"sasl_mechanism": self.stream_config.get("sasl_mechanism", "SCRAM-SHA-256"),
|
|
143
|
+
"security_protocol": self.stream_config.get("security_protocol", "SASL_PLAINTEXT"),
|
|
144
|
+
}
|
|
145
|
+
else:
|
|
146
|
+
return {
|
|
147
|
+
"host": self.stream_config.get("host", "localhost"),
|
|
148
|
+
"port": self.stream_config.get("port", 6379),
|
|
149
|
+
"password": self.stream_config.get("password"),
|
|
150
|
+
"username": self.stream_config.get("username"),
|
|
151
|
+
"db": self.stream_config.get("db", self.DEFAULT_DB),
|
|
152
|
+
"connection_timeout": self.stream_config.get("connection_timeout", self.DEFAULT_CONNECTION_TIMEOUT),
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
async def _get_message_safely(self) -> Optional[Dict[str, Any]]:
|
|
156
|
+
"""Safely get message from stream."""
|
|
157
|
+
if not self.stream:
|
|
158
|
+
self.logger.error("Stream not initialized")
|
|
159
|
+
return None
|
|
160
|
+
|
|
119
161
|
try:
|
|
120
|
-
if not self.stream:
|
|
121
|
-
self.logger.error("Stream not initialized")
|
|
122
|
-
return None
|
|
123
162
|
return await self.stream.async_get_message(self.message_timeout)
|
|
124
163
|
except Exception as e:
|
|
125
|
-
|
|
126
|
-
|
|
164
|
+
self.logger.debug(f"Error getting message: {e}")
|
|
165
|
+
return None
|
|
166
|
+
|
|
167
|
+
# -------------------- H.265 helpers --------------------
|
|
168
|
+
def _decode_h265_frame(self, h265_bytes: bytes, width: int, height: int):
|
|
169
|
+
"""Decode a single H.265-encoded frame to OpenCV BGR image."""
|
|
170
|
+
try:
|
|
171
|
+
try:
|
|
172
|
+
# Prefer local matrice_common implementation if available
|
|
173
|
+
from matrice_common.video.h265_processor import H265FrameDecoder
|
|
174
|
+
decoder = H265FrameDecoder()
|
|
175
|
+
frame = decoder.decode_frame(h265_bytes, width=width, height=height)
|
|
176
|
+
return frame
|
|
177
|
+
except Exception as e:
|
|
178
|
+
self.logger.error(f"H.265 single-frame decode failed: {e}")
|
|
179
|
+
return None
|
|
180
|
+
except Exception as e:
|
|
181
|
+
self.logger.error(f"Unexpected error in H.265 frame decode: {e}")
|
|
127
182
|
return None
|
|
183
|
+
|
|
184
|
+
def _ensure_h265_stream_decoder(self, width: int, height: int):
|
|
185
|
+
"""Ensure a continuous H.265 stream decoder exists with given dimensions."""
|
|
186
|
+
if self._h265_stream_decoder is not None:
|
|
187
|
+
return True
|
|
188
|
+
try:
|
|
189
|
+
from matrice_common.video.h265_processor import H265StreamDecoder
|
|
190
|
+
decoder = H265StreamDecoder(width=width, height=height)
|
|
191
|
+
if not decoder.start():
|
|
192
|
+
self.logger.error("Failed to start H.265 stream decoder")
|
|
193
|
+
return False
|
|
194
|
+
self._h265_stream_decoder = decoder
|
|
195
|
+
return True
|
|
196
|
+
except Exception as e:
|
|
197
|
+
self.logger.error(f"Failed to initialize H.265 stream decoder: {e}")
|
|
198
|
+
return False
|
|
199
|
+
|
|
200
|
+
def _frame_to_jpeg_bytes(self, frame) -> bytes:
|
|
201
|
+
"""Encode an OpenCV BGR frame to JPEG bytes."""
|
|
202
|
+
try:
|
|
203
|
+
ok, buf = cv2.imencode('.jpg', frame, [int(cv2.IMWRITE_JPEG_QUALITY), 90])
|
|
204
|
+
if not ok:
|
|
205
|
+
raise RuntimeError("cv2.imencode failed")
|
|
206
|
+
return buf.tobytes()
|
|
207
|
+
except Exception as e:
|
|
208
|
+
self.logger.error(f"Failed to encode frame to JPEG: {e}")
|
|
209
|
+
return b""
|
|
128
210
|
|
|
129
|
-
def _process_message(self, message_data):
|
|
211
|
+
def _process_message(self, message_data: Dict[str, Any]) -> None:
|
|
130
212
|
"""Process incoming message and add to inference queue."""
|
|
131
213
|
try:
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
# Handle camera streamer input format
|
|
139
|
-
input_stream = data.get("input_stream", {})
|
|
140
|
-
if not input_stream:
|
|
141
|
-
# Fallback to direct format
|
|
142
|
-
input_stream = data
|
|
143
|
-
|
|
144
|
-
# Create stream message
|
|
145
|
-
stream_msg = StreamMessage(
|
|
146
|
-
camera_id=self.camera_id,
|
|
147
|
-
message_key=message_data.get("key", data.get("input_name", f"{self.camera_id}_{int(time.time())}")),
|
|
148
|
-
data=data,
|
|
149
|
-
timestamp=datetime.now(timezone.utc),
|
|
150
|
-
priority=1
|
|
151
|
-
)
|
|
152
|
-
|
|
153
|
-
# Ensure extra_params is a dictionary
|
|
154
|
-
extra_params = data.get("extra_params", {})
|
|
155
|
-
if not isinstance(extra_params, dict):
|
|
156
|
-
self.logger.warning(f"extra_params is not a dict, converting from {type(extra_params)}: {extra_params}")
|
|
157
|
-
if isinstance(extra_params, list):
|
|
158
|
-
# Convert list to dict if possible
|
|
159
|
-
if len(extra_params) == 0:
|
|
160
|
-
extra_params = {}
|
|
161
|
-
elif all(isinstance(item, dict) for item in extra_params):
|
|
162
|
-
# Merge all dictionaries in the list
|
|
163
|
-
merged_params = {}
|
|
164
|
-
for item in extra_params:
|
|
165
|
-
merged_params.update(item)
|
|
166
|
-
extra_params = merged_params
|
|
167
|
-
else:
|
|
168
|
-
extra_params = {}
|
|
169
|
-
else:
|
|
170
|
-
extra_params = {}
|
|
214
|
+
message_key = self._extract_message_key(message_data)
|
|
215
|
+
data = self._parse_message_data(message_data)
|
|
216
|
+
input_stream = self._extract_input_stream(data)
|
|
217
|
+
extra_params = self._normalize_extra_params(data)
|
|
218
|
+
frame_id = self._determine_frame_id(data, message_data)
|
|
171
219
|
|
|
172
|
-
|
|
173
|
-
frame_id = data.get("frame_id")
|
|
174
|
-
if not frame_id:
|
|
175
|
-
frame_id = message_data.get("key", data.get("input_name", f"{self.camera_id}_{int(time.time() * 1000)}"))
|
|
220
|
+
self._enrich_input_stream(input_stream, frame_id)
|
|
176
221
|
|
|
177
|
-
#
|
|
222
|
+
# Codec detection
|
|
223
|
+
codec = None
|
|
224
|
+
codec_lower = None
|
|
178
225
|
try:
|
|
179
|
-
if isinstance(input_stream, dict)
|
|
180
|
-
input_stream
|
|
226
|
+
if isinstance(input_stream, dict):
|
|
227
|
+
codec = input_stream.get("video_codec") or input_stream.get("compression_format")
|
|
228
|
+
if isinstance(codec, str):
|
|
229
|
+
codec_lower = codec.lower()
|
|
181
230
|
except Exception:
|
|
182
|
-
|
|
231
|
+
codec_lower = None
|
|
183
232
|
|
|
184
|
-
#
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
"
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
"
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
233
|
+
# H.264 handling (frame-wise) - upstream always sends JPEG-encoded frames
|
|
234
|
+
# Content is base64-encoded JPEG, ready for PIL/inference
|
|
235
|
+
if codec_lower == "h264" and isinstance(input_stream, dict):
|
|
236
|
+
stream_unit = input_stream.get("stream_unit", "frame")
|
|
237
|
+
if isinstance(stream_unit, str) and stream_unit.lower() != "frame":
|
|
238
|
+
self.logger.warning("Received H.264 with non-frame stream_unit; skipping")
|
|
239
|
+
return
|
|
240
|
+
content_b64 = input_stream.get("content")
|
|
241
|
+
if isinstance(content_b64, str) and content_b64:
|
|
242
|
+
# Cache JPEG base64 as-is
|
|
243
|
+
self._cache_frame(frame_id, content_b64)
|
|
244
|
+
stream_msg = self._create_stream_message(message_key, data)
|
|
245
|
+
task_data = self._build_task_data(stream_msg, input_stream, extra_params, frame_id)
|
|
246
|
+
self.inference_queue.put((stream_msg.priority, time.time(), task_data))
|
|
247
|
+
return
|
|
248
|
+
self.logger.warning("H.264 frame missing content; skipping")
|
|
249
|
+
return
|
|
250
|
+
|
|
251
|
+
# H.265 handling: convert to JPEG base64 before enqueuing
|
|
252
|
+
if codec_lower in ["h265", "hevc"] and isinstance(input_stream, dict):
|
|
253
|
+
# Resolve resolution
|
|
254
|
+
width = None
|
|
255
|
+
height = None
|
|
256
|
+
try:
|
|
257
|
+
res = input_stream.get("stream_resolution") or input_stream.get("original_resolution") or {}
|
|
258
|
+
width = int(res.get("width")) if res and res.get("width") else None
|
|
259
|
+
height = int(res.get("height")) if res and res.get("height") else None
|
|
260
|
+
except Exception:
|
|
261
|
+
width, height = None, None
|
|
262
|
+
|
|
263
|
+
payload_b64 = input_stream.get("content")
|
|
264
|
+
payload_bytes = b""
|
|
265
|
+
if isinstance(payload_b64, str) and payload_b64:
|
|
266
|
+
try:
|
|
267
|
+
payload_bytes = base64.b64decode(payload_b64)
|
|
268
|
+
except Exception:
|
|
269
|
+
payload_bytes = b""
|
|
270
|
+
|
|
271
|
+
stream_unit = input_stream.get("stream_unit", "frame")
|
|
272
|
+
is_stream_chunk = bool(input_stream.get("is_video_chunk")) or (isinstance(stream_unit, str) and stream_unit.lower() != "frame")
|
|
273
|
+
|
|
274
|
+
stream_msg = self._create_stream_message(message_key, data)
|
|
275
|
+
|
|
276
|
+
if not is_stream_chunk:
|
|
277
|
+
# Single-frame H.265
|
|
278
|
+
if payload_bytes and width and height:
|
|
279
|
+
frame_img = self._decode_h265_frame(payload_bytes, width, height)
|
|
280
|
+
if frame_img is not None:
|
|
281
|
+
jpeg_bytes = self._frame_to_jpeg_bytes(frame_img)
|
|
282
|
+
if jpeg_bytes:
|
|
283
|
+
input_stream_jpeg = copy.deepcopy(input_stream)
|
|
284
|
+
input_stream_jpeg["content"] = base64.b64encode(jpeg_bytes).decode("utf-8")
|
|
285
|
+
input_stream_jpeg["video_codec"] = "jpeg"
|
|
286
|
+
# Low-latency cache write
|
|
287
|
+
self._cache_frame(frame_id, input_stream_jpeg["content"])
|
|
288
|
+
task_data = self._build_task_data(stream_msg, input_stream_jpeg, extra_params, frame_id)
|
|
289
|
+
self.inference_queue.put((stream_msg.priority, time.time(), task_data))
|
|
290
|
+
return
|
|
291
|
+
# Drop undecodable H.265 frame
|
|
292
|
+
self.logger.warning("Dropping H.265 frame due to missing payload/resolution or decode failure")
|
|
293
|
+
return
|
|
294
|
+
else:
|
|
295
|
+
# Stream-chunk H.265 (emit at most one frame per message using upstream frame_id)
|
|
296
|
+
if width and height and self._ensure_h265_stream_decoder(width, height) and payload_bytes:
|
|
297
|
+
try:
|
|
298
|
+
self._h265_stream_decoder.decode_bytes(payload_bytes)
|
|
299
|
+
latest_frame = None
|
|
300
|
+
while True:
|
|
301
|
+
frame_img = self._h265_stream_decoder.read_frame()
|
|
302
|
+
if frame_img is None:
|
|
303
|
+
break
|
|
304
|
+
latest_frame = frame_img
|
|
305
|
+
if latest_frame is not None:
|
|
306
|
+
jpeg_bytes = self._frame_to_jpeg_bytes(latest_frame)
|
|
307
|
+
if jpeg_bytes:
|
|
308
|
+
input_stream_jpeg = copy.deepcopy(input_stream)
|
|
309
|
+
input_stream_jpeg["content"] = base64.b64encode(jpeg_bytes).decode("utf-8")
|
|
310
|
+
input_stream_jpeg["video_codec"] = "jpeg"
|
|
311
|
+
# Keep upstream frame_id as-is
|
|
312
|
+
try:
|
|
313
|
+
input_stream_jpeg["frame_id"] = frame_id
|
|
314
|
+
except Exception:
|
|
315
|
+
pass
|
|
316
|
+
# Low-latency cache write
|
|
317
|
+
self._cache_frame(frame_id, input_stream_jpeg["content"])
|
|
318
|
+
task_data = self._build_task_data(stream_msg, input_stream_jpeg, extra_params, frame_id)
|
|
319
|
+
self.inference_queue.put((stream_msg.priority, time.time(), task_data))
|
|
320
|
+
return
|
|
321
|
+
except Exception as e:
|
|
322
|
+
self.logger.error(f"H.265 stream decode error: {e}")
|
|
323
|
+
# No complete frame available yet for this chunk; skip forwarding
|
|
324
|
+
self.logger.debug("No decoded frame available from H.265 stream chunk for this message")
|
|
325
|
+
return
|
|
326
|
+
|
|
327
|
+
# Default path (other formats): enqueue as-is
|
|
328
|
+
stream_msg = self._create_stream_message(message_key, data)
|
|
329
|
+
# Cache if there is a base64 content present
|
|
330
|
+
try:
|
|
331
|
+
if isinstance(input_stream, dict) and isinstance(input_stream.get("content"), str) and input_stream.get("content"):
|
|
332
|
+
self._cache_frame(frame_id, input_stream.get("content"))
|
|
333
|
+
except Exception:
|
|
334
|
+
pass
|
|
335
|
+
task_data = self._build_task_data(stream_msg, input_stream, extra_params, frame_id)
|
|
195
336
|
self.inference_queue.put((stream_msg.priority, time.time(), task_data))
|
|
196
|
-
|
|
337
|
+
|
|
197
338
|
except json.JSONDecodeError as e:
|
|
198
339
|
self.logger.error(f"Failed to parse message JSON: {e}")
|
|
199
340
|
except Exception as e:
|
|
200
341
|
self.logger.error(f"Error processing message: {e}")
|
|
201
342
|
|
|
343
|
+
def _extract_message_key(self, message_data: Dict[str, Any]) -> Optional[str]:
|
|
344
|
+
"""Extract message key from Kafka/Redis message."""
|
|
345
|
+
if not isinstance(message_data, dict):
|
|
346
|
+
return None
|
|
347
|
+
|
|
348
|
+
key = message_data.get('key') or message_data.get('message_key')
|
|
349
|
+
if isinstance(key, bytes):
|
|
350
|
+
return key.decode('utf-8')
|
|
351
|
+
return key
|
|
352
|
+
|
|
353
|
+
def _parse_message_data(self, message_data: Dict[str, Any]) -> Dict[str, Any]:
|
|
354
|
+
"""Parse message data from different stream formats."""
|
|
355
|
+
for field in ['value', 'data']:
|
|
356
|
+
if field in message_data:
|
|
357
|
+
value = message_data[field]
|
|
358
|
+
if isinstance(value, dict):
|
|
359
|
+
return value
|
|
360
|
+
elif isinstance(value, (str, bytes)):
|
|
361
|
+
if isinstance(value, bytes):
|
|
362
|
+
value = value.decode('utf-8')
|
|
363
|
+
return json.loads(value)
|
|
364
|
+
return message_data
|
|
365
|
+
|
|
366
|
+
def _extract_input_stream(self, data: Dict[str, Any]) -> Dict[str, Any]:
|
|
367
|
+
"""Extract input stream from message data."""
|
|
368
|
+
input_stream = data.get("input_stream", {})
|
|
369
|
+
return input_stream if input_stream else data
|
|
370
|
+
|
|
371
|
+
def _normalize_extra_params(self, data: Dict[str, Any]) -> Dict[str, Any]:
|
|
372
|
+
"""Normalize extra_params to ensure it's a dictionary."""
|
|
373
|
+
extra_params = data.get("extra_params", {})
|
|
374
|
+
|
|
375
|
+
if isinstance(extra_params, dict):
|
|
376
|
+
return extra_params
|
|
377
|
+
elif isinstance(extra_params, list):
|
|
378
|
+
return self._merge_list_params(extra_params)
|
|
379
|
+
else:
|
|
380
|
+
self.logger.warning(f"Invalid extra_params type {type(extra_params)}, using empty dict")
|
|
381
|
+
return {}
|
|
382
|
+
|
|
383
|
+
def _merge_list_params(self, params_list: list) -> Dict[str, Any]:
|
|
384
|
+
"""Merge list of dictionaries into single dictionary."""
|
|
385
|
+
if not params_list:
|
|
386
|
+
return {}
|
|
387
|
+
|
|
388
|
+
if all(isinstance(item, dict) for item in params_list):
|
|
389
|
+
merged = {}
|
|
390
|
+
for item in params_list:
|
|
391
|
+
merged.update(item)
|
|
392
|
+
return merged
|
|
393
|
+
|
|
394
|
+
return {}
|
|
395
|
+
|
|
396
|
+
def _determine_frame_id(self, data: Dict[str, Any], message_data: Dict[str, Any]) -> str:
|
|
397
|
+
"""Determine frame ID from message data."""
|
|
398
|
+
frame_id = data.get("frame_id")
|
|
399
|
+
if frame_id:
|
|
400
|
+
return frame_id
|
|
401
|
+
|
|
402
|
+
fallback_key = message_data.get("key") or data.get("input_name")
|
|
403
|
+
if fallback_key:
|
|
404
|
+
return str(fallback_key)
|
|
405
|
+
|
|
406
|
+
return f"{self.camera_id}_{int(time.time() * 1000)}"
|
|
407
|
+
|
|
408
|
+
def _enrich_input_stream(self, input_stream: Dict[str, Any], frame_id: str) -> None:
|
|
409
|
+
"""Add frame_id to input_stream if not present."""
|
|
410
|
+
try:
|
|
411
|
+
if isinstance(input_stream, dict) and "frame_id" not in input_stream:
|
|
412
|
+
input_stream["frame_id"] = frame_id
|
|
413
|
+
except Exception:
|
|
414
|
+
pass
|
|
415
|
+
|
|
416
|
+
def _create_stream_message(self, message_key: Optional[str], data: Dict[str, Any]) -> StreamMessage:
|
|
417
|
+
"""Create StreamMessage instance."""
|
|
418
|
+
final_key = message_key or data.get("input_name") or f"{self.camera_id}_{int(time.time())}"
|
|
419
|
+
|
|
420
|
+
return StreamMessage(
|
|
421
|
+
camera_id=self.camera_id,
|
|
422
|
+
message_key=final_key,
|
|
423
|
+
data=data,
|
|
424
|
+
timestamp=datetime.now(timezone.utc),
|
|
425
|
+
priority=self.DEFAULT_PRIORITY
|
|
426
|
+
)
|
|
427
|
+
|
|
428
|
+
def _build_task_data(self, stream_msg: StreamMessage, input_stream: Dict[str, Any],
|
|
429
|
+
extra_params: Dict[str, Any], frame_id: str) -> Dict[str, Any]:
|
|
430
|
+
"""Build task data for inference queue."""
|
|
431
|
+
return {
|
|
432
|
+
"message": stream_msg,
|
|
433
|
+
"input_stream": input_stream,
|
|
434
|
+
"stream_key": stream_msg.message_key,
|
|
435
|
+
"extra_params": extra_params,
|
|
436
|
+
"camera_config": self.camera_config.__dict__,
|
|
437
|
+
"frame_id": frame_id
|
|
438
|
+
}
|
|
439
|
+
|
|
440
|
+
def _cache_frame(self, frame_id: Optional[str], content_b64: Optional[str]) -> None:
|
|
441
|
+
"""Write frame to Redis cache if configured, non-blocking.
|
|
442
|
+
|
|
443
|
+
Args:
|
|
444
|
+
frame_id: Unique frame identifier (uuid expected)
|
|
445
|
+
content_b64: Base64-encoded JPEG string
|
|
446
|
+
"""
|
|
447
|
+
try:
|
|
448
|
+
if not self.frame_cache:
|
|
449
|
+
return
|
|
450
|
+
if not frame_id or not isinstance(frame_id, str):
|
|
451
|
+
return
|
|
452
|
+
if not content_b64 or not isinstance(content_b64, str):
|
|
453
|
+
return
|
|
454
|
+
self.frame_cache.put(frame_id, content_b64)
|
|
455
|
+
except Exception as e:
|
|
456
|
+
# Do not block pipeline on cache errors
|
|
457
|
+
self.logger.debug(f"Frame cache put failed for frame_id={frame_id}: {e}")
|
|
458
|
+
|