rocket-welder-sdk 1.1.31__tar.gz → 1.1.33__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/PKG-INFO +15 -2
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/README.md +10 -1
- rocket_welder_sdk-1.1.33/VERSION +1 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/pyproject.toml +21 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/rocket_welder_sdk/__init__.py +5 -6
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/rocket_welder_sdk/controllers.py +138 -103
- rocket_welder_sdk-1.1.33/rocket_welder_sdk/frame_metadata.py +138 -0
- rocket_welder_sdk-1.1.33/rocket_welder_sdk/high_level/__init__.py +66 -0
- rocket_welder_sdk-1.1.33/rocket_welder_sdk/high_level/connection_strings.py +330 -0
- rocket_welder_sdk-1.1.33/rocket_welder_sdk/high_level/data_context.py +163 -0
- rocket_welder_sdk-1.1.33/rocket_welder_sdk/high_level/schema.py +180 -0
- rocket_welder_sdk-1.1.33/rocket_welder_sdk/high_level/transport_protocol.py +166 -0
- rocket_welder_sdk-1.1.33/rocket_welder_sdk/keypoints_protocol.py +642 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/rocket_welder_sdk/rocket_welder_client.py +17 -3
- rocket_welder_sdk-1.1.33/rocket_welder_sdk/segmentation_result.py +420 -0
- rocket_welder_sdk-1.1.33/rocket_welder_sdk/transport/__init__.py +38 -0
- rocket_welder_sdk-1.1.33/rocket_welder_sdk/transport/frame_sink.py +77 -0
- rocket_welder_sdk-1.1.33/rocket_welder_sdk/transport/frame_source.py +74 -0
- rocket_welder_sdk-1.1.33/rocket_welder_sdk/transport/nng_transport.py +197 -0
- rocket_welder_sdk-1.1.33/rocket_welder_sdk/transport/stream_transport.py +193 -0
- rocket_welder_sdk-1.1.33/rocket_welder_sdk/transport/tcp_transport.py +154 -0
- rocket_welder_sdk-1.1.33/rocket_welder_sdk/transport/unix_socket_transport.py +339 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/rocket_welder_sdk.egg-info/PKG-INFO +15 -2
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/rocket_welder_sdk.egg-info/SOURCES.txt +23 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/rocket_welder_sdk.egg-info/requires.txt +5 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/setup.py +5 -4
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/tests/test_connection_string.py +116 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/tests/test_controllers.py +72 -11
- rocket_welder_sdk-1.1.33/tests/test_frame_metadata.py +183 -0
- rocket_welder_sdk-1.1.33/tests/test_high_level_api.py +417 -0
- rocket_welder_sdk-1.1.33/tests/test_keypoints_cross_platform.py +216 -0
- rocket_welder_sdk-1.1.33/tests/test_keypoints_protocol.py +354 -0
- rocket_welder_sdk-1.1.33/tests/test_rocket_welder_client.py +254 -0
- rocket_welder_sdk-1.1.33/tests/test_segmentation_cross_platform.py +148 -0
- rocket_welder_sdk-1.1.33/tests/test_segmentation_result.py +430 -0
- rocket_welder_sdk-1.1.33/tests/test_transport_cross_platform.py +1207 -0
- rocket_welder_sdk-1.1.31/VERSION +0 -1
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/MANIFEST.in +0 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/logo.png +0 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/rocket_welder_sdk/bytes_size.py +0 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/rocket_welder_sdk/connection_string.py +0 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/rocket_welder_sdk/external_controls/__init__.py +0 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/rocket_welder_sdk/external_controls/contracts.py +0 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/rocket_welder_sdk/external_controls/contracts_old.py +0 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/rocket_welder_sdk/gst_metadata.py +0 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/rocket_welder_sdk/opencv_controller.py +0 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/rocket_welder_sdk/periodic_timer.py +0 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/rocket_welder_sdk/py.typed +0 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/rocket_welder_sdk/ui/__init__.py +0 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/rocket_welder_sdk/ui/controls.py +0 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/rocket_welder_sdk/ui/icons.py +0 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/rocket_welder_sdk/ui/ui_events_projection.py +0 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/rocket_welder_sdk/ui/ui_service.py +0 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/rocket_welder_sdk/ui/value_types.py +0 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/rocket_welder_sdk.egg-info/dependency_links.txt +0 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/rocket_welder_sdk.egg-info/top_level.txt +0 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/setup.cfg +0 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/tests/test_bytes_size.py +0 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/tests/test_external_controls_serialization.py +0 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/tests/test_external_controls_serialization_v2.py +0 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/tests/test_gst_metadata.py +0 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/tests/test_icons.py +0 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/tests/test_ui_controls.py +0 -0
- {rocket_welder_sdk-1.1.31 → rocket_welder_sdk-1.1.33}/tests/test_ui_service_happy_path.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: rocket-welder-sdk
|
|
3
|
-
Version: 1.1.
|
|
3
|
+
Version: 1.1.33
|
|
4
4
|
Summary: High-performance video streaming SDK for RocketWelder services using ZeroBuffer IPC
|
|
5
5
|
Home-page: https://github.com/modelingevolution/rocket-welder-sdk
|
|
6
6
|
Author: ModelingEvolution
|
|
@@ -31,6 +31,9 @@ Requires-Dist: opencv-python>=4.5.0
|
|
|
31
31
|
Requires-Dist: zerobuffer-ipc>=1.1.17
|
|
32
32
|
Requires-Dist: pydantic>=2.5.0
|
|
33
33
|
Requires-Dist: py-micro-plumberd>=0.1.8
|
|
34
|
+
Requires-Dist: typing-extensions>=4.0.0
|
|
35
|
+
Provides-Extra: nng
|
|
36
|
+
Requires-Dist: pynng>=0.7.2; extra == "nng"
|
|
34
37
|
Provides-Extra: dev
|
|
35
38
|
Requires-Dist: pytest>=7.0; extra == "dev"
|
|
36
39
|
Requires-Dist: pytest-cov>=4.0; extra == "dev"
|
|
@@ -39,6 +42,7 @@ Requires-Dist: black>=22.0; extra == "dev"
|
|
|
39
42
|
Requires-Dist: mypy>=1.0; extra == "dev"
|
|
40
43
|
Requires-Dist: ruff>=0.1.0; extra == "dev"
|
|
41
44
|
Requires-Dist: types-setuptools; extra == "dev"
|
|
45
|
+
Requires-Dist: pynng>=0.7.2; extra == "dev"
|
|
42
46
|
Dynamic: author
|
|
43
47
|
Dynamic: home-page
|
|
44
48
|
Dynamic: requires-python
|
|
@@ -169,7 +173,7 @@ Start by testing your container locally before deploying to Neuron:
|
|
|
169
173
|
|
|
170
174
|
```bash
|
|
171
175
|
# Build your container
|
|
172
|
-
docker build -t my-ai-app:v1 -f examples/
|
|
176
|
+
docker build -t my-ai-app:v1 -f python/examples/Dockerfile .
|
|
173
177
|
|
|
174
178
|
# Test with a video file
|
|
175
179
|
docker run --rm \
|
|
@@ -223,6 +227,15 @@ docker run --rm \
|
|
|
223
227
|
my-ai-app:v1
|
|
224
228
|
```
|
|
225
229
|
|
|
230
|
+
You can also see preview in your terminal.
|
|
231
|
+
```bash
|
|
232
|
+
docker run --rm \
|
|
233
|
+
-e CONNECTION_STRING="mjpeg+tcp://<neuron-ip>:<tcp-server-sink-port>?preview=true" \
|
|
234
|
+
-e DISPLAY=$DISPLAY \
|
|
235
|
+
-v /tmp/.X11-unix:/tmp/.X11-unix \
|
|
236
|
+
--network host my-ai-app:v1
|
|
237
|
+
```
|
|
238
|
+
|
|
226
239
|
This allows you to:
|
|
227
240
|
- Test your AI processing with real camera feeds
|
|
228
241
|
- Debug frame processing logic
|
|
@@ -124,7 +124,7 @@ Start by testing your container locally before deploying to Neuron:
|
|
|
124
124
|
|
|
125
125
|
```bash
|
|
126
126
|
# Build your container
|
|
127
|
-
docker build -t my-ai-app:v1 -f examples/
|
|
127
|
+
docker build -t my-ai-app:v1 -f python/examples/Dockerfile .
|
|
128
128
|
|
|
129
129
|
# Test with a video file
|
|
130
130
|
docker run --rm \
|
|
@@ -178,6 +178,15 @@ docker run --rm \
|
|
|
178
178
|
my-ai-app:v1
|
|
179
179
|
```
|
|
180
180
|
|
|
181
|
+
You can also see preview in your terminal.
|
|
182
|
+
```bash
|
|
183
|
+
docker run --rm \
|
|
184
|
+
-e CONNECTION_STRING="mjpeg+tcp://<neuron-ip>:<tcp-server-sink-port>?preview=true" \
|
|
185
|
+
-e DISPLAY=$DISPLAY \
|
|
186
|
+
-v /tmp/.X11-unix:/tmp/.X11-unix \
|
|
187
|
+
--network host my-ai-app:v1
|
|
188
|
+
```
|
|
189
|
+
|
|
181
190
|
This allows you to:
|
|
182
191
|
- Test your AI processing with real camera feeds
|
|
183
192
|
- Debug frame processing logic
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
1.1.33
|
|
@@ -38,9 +38,13 @@ dependencies = [
|
|
|
38
38
|
"zerobuffer-ipc>=1.1.17",
|
|
39
39
|
"pydantic>=2.5.0",
|
|
40
40
|
"py-micro-plumberd>=0.1.8",
|
|
41
|
+
"typing-extensions>=4.0.0",
|
|
41
42
|
]
|
|
42
43
|
|
|
43
44
|
[project.optional-dependencies]
|
|
45
|
+
nng = [
|
|
46
|
+
"pynng>=0.7.2",
|
|
47
|
+
]
|
|
44
48
|
dev = [
|
|
45
49
|
"pytest>=7.0",
|
|
46
50
|
"pytest-cov>=4.0",
|
|
@@ -49,6 +53,7 @@ dev = [
|
|
|
49
53
|
"mypy>=1.0",
|
|
50
54
|
"ruff>=0.1.0",
|
|
51
55
|
"types-setuptools",
|
|
56
|
+
"pynng>=0.7.2",
|
|
52
57
|
]
|
|
53
58
|
|
|
54
59
|
[project.urls]
|
|
@@ -76,6 +81,10 @@ namespace_packages = true
|
|
|
76
81
|
show_error_codes = true
|
|
77
82
|
show_column_numbers = true
|
|
78
83
|
pretty = true
|
|
84
|
+
exclude = [
|
|
85
|
+
"examples/05-traktorek",
|
|
86
|
+
"examples/rocket-welder-client-python-yolo",
|
|
87
|
+
]
|
|
79
88
|
|
|
80
89
|
[[tool.mypy.overrides]]
|
|
81
90
|
module = [
|
|
@@ -88,6 +97,8 @@ module = [
|
|
|
88
97
|
"py_micro_plumberd.*",
|
|
89
98
|
"esdbclient",
|
|
90
99
|
"esdbclient.*",
|
|
100
|
+
"pynng",
|
|
101
|
+
"pynng.*",
|
|
91
102
|
]
|
|
92
103
|
ignore_missing_imports = true
|
|
93
104
|
|
|
@@ -95,10 +106,20 @@ ignore_missing_imports = true
|
|
|
95
106
|
line-length = 100
|
|
96
107
|
target-version = ['py38', 'py39', 'py310', 'py311', 'py312']
|
|
97
108
|
include = '\.pyi?$'
|
|
109
|
+
exclude = '''
|
|
110
|
+
/(
|
|
111
|
+
examples/05-traktorek
|
|
112
|
+
| examples/rocket-welder-client-python-yolo
|
|
113
|
+
)/
|
|
114
|
+
'''
|
|
98
115
|
|
|
99
116
|
[tool.ruff]
|
|
100
117
|
line-length = 100
|
|
101
118
|
target-version = "py38"
|
|
119
|
+
exclude = [
|
|
120
|
+
"examples/05-traktorek",
|
|
121
|
+
"examples/rocket-welder-client-python-yolo",
|
|
122
|
+
]
|
|
102
123
|
|
|
103
124
|
[tool.ruff.lint]
|
|
104
125
|
select = [
|
|
@@ -10,6 +10,7 @@ import os
|
|
|
10
10
|
from .bytes_size import BytesSize
|
|
11
11
|
from .connection_string import ConnectionMode, ConnectionString, Protocol
|
|
12
12
|
from .controllers import DuplexShmController, IController, OneWayShmController
|
|
13
|
+
from .frame_metadata import FRAME_METADATA_SIZE, FrameMetadata, GstVideoFormat
|
|
13
14
|
from .gst_metadata import GstCaps, GstMetadata
|
|
14
15
|
from .opencv_controller import OpenCvController
|
|
15
16
|
from .periodic_timer import PeriodicTimer, PeriodicTimerSync
|
|
@@ -40,23 +41,21 @@ if _log_level:
|
|
|
40
41
|
pass # Invalid log level, ignore
|
|
41
42
|
|
|
42
43
|
__all__ = [
|
|
43
|
-
|
|
44
|
+
"FRAME_METADATA_SIZE",
|
|
44
45
|
"BytesSize",
|
|
45
|
-
"Client",
|
|
46
|
+
"Client",
|
|
46
47
|
"ConnectionMode",
|
|
47
48
|
"ConnectionString",
|
|
48
49
|
"DuplexShmController",
|
|
49
|
-
|
|
50
|
+
"FrameMetadata",
|
|
50
51
|
"GstCaps",
|
|
51
52
|
"GstMetadata",
|
|
52
|
-
|
|
53
|
+
"GstVideoFormat",
|
|
53
54
|
"IController",
|
|
54
55
|
"OneWayShmController",
|
|
55
56
|
"OpenCvController",
|
|
56
|
-
# Timers
|
|
57
57
|
"PeriodicTimer",
|
|
58
58
|
"PeriodicTimerSync",
|
|
59
59
|
"Protocol",
|
|
60
|
-
# Main client
|
|
61
60
|
"RocketWelderClient",
|
|
62
61
|
]
|
|
@@ -14,17 +14,20 @@ from typing import TYPE_CHECKING, Callable, Optional
|
|
|
14
14
|
import numpy as np
|
|
15
15
|
from zerobuffer import BufferConfig, Frame, Reader, Writer
|
|
16
16
|
from zerobuffer.duplex import DuplexChannelFactory
|
|
17
|
-
from zerobuffer.duplex.server import ImmutableDuplexServer
|
|
18
17
|
from zerobuffer.exceptions import WriterDeadException
|
|
19
18
|
|
|
20
19
|
from .connection_string import ConnectionMode, ConnectionString, Protocol
|
|
20
|
+
from .frame_metadata import FRAME_METADATA_SIZE, FrameMetadata
|
|
21
21
|
from .gst_metadata import GstCaps, GstMetadata
|
|
22
22
|
|
|
23
23
|
if TYPE_CHECKING:
|
|
24
24
|
import numpy.typing as npt
|
|
25
|
+
from zerobuffer.duplex import IImmutableDuplexServer
|
|
25
26
|
|
|
26
27
|
Mat = npt.NDArray[np.uint8]
|
|
27
28
|
else:
|
|
29
|
+
from zerobuffer.duplex import IImmutableDuplexServer
|
|
30
|
+
|
|
28
31
|
Mat = np.ndarray # type: ignore[misc]
|
|
29
32
|
|
|
30
33
|
# Module logger
|
|
@@ -334,6 +337,9 @@ class OneWayShmController(IController):
|
|
|
334
337
|
Create OpenCV Mat from frame data using GstCaps.
|
|
335
338
|
Matches C# CreateMat behavior - creates Mat wrapping the data.
|
|
336
339
|
|
|
340
|
+
Frame data layout from GStreamer zerosink:
|
|
341
|
+
[FrameMetadata (16 bytes)][Pixel Data (W×H×C bytes)]
|
|
342
|
+
|
|
337
343
|
Args:
|
|
338
344
|
frame: ZeroBuffer frame
|
|
339
345
|
|
|
@@ -357,31 +363,40 @@ class OneWayShmController(IController):
|
|
|
357
363
|
else:
|
|
358
364
|
channels = 3 # Default to RGB
|
|
359
365
|
|
|
360
|
-
#
|
|
361
|
-
#
|
|
362
|
-
|
|
366
|
+
# Frame data has 16-byte FrameMetadata prefix that must be stripped
|
|
367
|
+
# Layout: [FrameMetadata (16 bytes)][Pixel Data]
|
|
368
|
+
if frame.size < FRAME_METADATA_SIZE:
|
|
369
|
+
logger.error(
|
|
370
|
+
"Frame too small for FrameMetadata: %d bytes (need at least %d)",
|
|
371
|
+
frame.size,
|
|
372
|
+
FRAME_METADATA_SIZE,
|
|
373
|
+
)
|
|
374
|
+
return None
|
|
375
|
+
|
|
376
|
+
# Get pixel data (skip 16-byte FrameMetadata prefix)
|
|
377
|
+
pixel_data = np.frombuffer(frame.data[FRAME_METADATA_SIZE:], dtype=np.uint8)
|
|
363
378
|
|
|
364
|
-
# Check data size matches expected
|
|
379
|
+
# Check pixel data size matches expected
|
|
365
380
|
expected_size = height * width * channels
|
|
366
|
-
if len(
|
|
381
|
+
if len(pixel_data) != expected_size:
|
|
367
382
|
logger.error(
|
|
368
|
-
"
|
|
383
|
+
"Pixel data size mismatch. Expected %d bytes for %dx%d with %d channels, got %d",
|
|
369
384
|
expected_size,
|
|
370
385
|
width,
|
|
371
386
|
height,
|
|
372
387
|
channels,
|
|
373
|
-
len(
|
|
388
|
+
len(pixel_data),
|
|
374
389
|
)
|
|
375
390
|
return None
|
|
376
391
|
|
|
377
392
|
# Reshape to image dimensions - this is zero-copy, just changes the view
|
|
378
393
|
# This matches C#: new Mat(Height, Width, Depth, Channels, ptr, Width * Channels)
|
|
379
394
|
if channels == 3:
|
|
380
|
-
mat =
|
|
395
|
+
mat = pixel_data.reshape((height, width, 3))
|
|
381
396
|
elif channels == 1:
|
|
382
|
-
mat =
|
|
397
|
+
mat = pixel_data.reshape((height, width))
|
|
383
398
|
elif channels == 4:
|
|
384
|
-
mat =
|
|
399
|
+
mat = pixel_data.reshape((height, width, 4))
|
|
385
400
|
else:
|
|
386
401
|
logger.error("Unsupported channel count: %d", channels)
|
|
387
402
|
return None
|
|
@@ -391,41 +406,51 @@ class OneWayShmController(IController):
|
|
|
391
406
|
# No caps available - try to infer from frame size
|
|
392
407
|
logger.warning("No GstCaps available, attempting to infer from frame size")
|
|
393
408
|
|
|
394
|
-
#
|
|
395
|
-
|
|
409
|
+
# Frame data has 16-byte FrameMetadata prefix
|
|
410
|
+
if frame.size < FRAME_METADATA_SIZE:
|
|
411
|
+
logger.error(
|
|
412
|
+
"Frame too small for FrameMetadata: %d bytes (need at least %d)",
|
|
413
|
+
frame.size,
|
|
414
|
+
FRAME_METADATA_SIZE,
|
|
415
|
+
)
|
|
416
|
+
return None
|
|
417
|
+
|
|
418
|
+
# Calculate pixel data size (frame size minus 16-byte metadata prefix)
|
|
419
|
+
pixel_data_size = frame.size - FRAME_METADATA_SIZE
|
|
396
420
|
|
|
397
421
|
# First, check if it's a perfect square (square frame)
|
|
398
422
|
import math
|
|
399
423
|
|
|
400
|
-
sqrt_size = math.sqrt(
|
|
424
|
+
sqrt_size = math.sqrt(pixel_data_size)
|
|
401
425
|
if sqrt_size == int(sqrt_size):
|
|
402
426
|
# Perfect square - assume square grayscale image
|
|
403
427
|
dimension = int(sqrt_size)
|
|
404
428
|
logger.info(
|
|
405
|
-
f"
|
|
429
|
+
f"Pixel data size {pixel_data_size} is a perfect square, "
|
|
430
|
+
f"assuming {dimension}x{dimension} grayscale"
|
|
406
431
|
)
|
|
407
|
-
|
|
408
|
-
return
|
|
432
|
+
pixel_data = np.frombuffer(frame.data[FRAME_METADATA_SIZE:], dtype=np.uint8)
|
|
433
|
+
return pixel_data.reshape((dimension, dimension)) # type: ignore[no-any-return]
|
|
409
434
|
|
|
410
435
|
# Also check for square RGB (size = width * height * 3)
|
|
411
|
-
if
|
|
412
|
-
pixels =
|
|
436
|
+
if pixel_data_size % 3 == 0:
|
|
437
|
+
pixels = pixel_data_size // 3
|
|
413
438
|
sqrt_pixels = math.sqrt(pixels)
|
|
414
439
|
if sqrt_pixels == int(sqrt_pixels):
|
|
415
440
|
dimension = int(sqrt_pixels)
|
|
416
|
-
logger.info(f"
|
|
417
|
-
|
|
418
|
-
return
|
|
441
|
+
logger.info(f"Pixel data size {pixel_data_size} suggests {dimension}x{dimension} RGB")
|
|
442
|
+
pixel_data = np.frombuffer(frame.data[FRAME_METADATA_SIZE:], dtype=np.uint8)
|
|
443
|
+
return pixel_data.reshape((dimension, dimension, 3)) # type: ignore[no-any-return]
|
|
419
444
|
|
|
420
445
|
# Check for square RGBA (size = width * height * 4)
|
|
421
|
-
if
|
|
422
|
-
pixels =
|
|
446
|
+
if pixel_data_size % 4 == 0:
|
|
447
|
+
pixels = pixel_data_size // 4
|
|
423
448
|
sqrt_pixels = math.sqrt(pixels)
|
|
424
449
|
if sqrt_pixels == int(sqrt_pixels):
|
|
425
450
|
dimension = int(sqrt_pixels)
|
|
426
|
-
logger.info(f"
|
|
427
|
-
|
|
428
|
-
return
|
|
451
|
+
logger.info(f"Pixel data size {pixel_data_size} suggests {dimension}x{dimension} RGBA")
|
|
452
|
+
pixel_data = np.frombuffer(frame.data[FRAME_METADATA_SIZE:], dtype=np.uint8)
|
|
453
|
+
return pixel_data.reshape((dimension, dimension, 4)) # type: ignore[no-any-return]
|
|
429
454
|
|
|
430
455
|
common_resolutions = [
|
|
431
456
|
(640, 480, 3), # VGA RGB
|
|
@@ -436,7 +461,7 @@ class OneWayShmController(IController):
|
|
|
436
461
|
]
|
|
437
462
|
|
|
438
463
|
for width, height, channels in common_resolutions:
|
|
439
|
-
if
|
|
464
|
+
if pixel_data_size == width * height * channels:
|
|
440
465
|
logger.info(f"Inferred resolution: {width}x{height} with {channels} channels")
|
|
441
466
|
|
|
442
467
|
# Create caps for future use
|
|
@@ -445,16 +470,16 @@ class OneWayShmController(IController):
|
|
|
445
470
|
width=width, height=height, format=format_str
|
|
446
471
|
)
|
|
447
472
|
|
|
448
|
-
# Create Mat
|
|
449
|
-
|
|
473
|
+
# Create Mat from pixel data (skip 16-byte FrameMetadata prefix)
|
|
474
|
+
pixel_data = np.frombuffer(frame.data[FRAME_METADATA_SIZE:], dtype=np.uint8)
|
|
450
475
|
if channels == 3:
|
|
451
|
-
return
|
|
476
|
+
return pixel_data.reshape((height, width, 3)) # type: ignore[no-any-return]
|
|
452
477
|
elif channels == 1:
|
|
453
|
-
return
|
|
478
|
+
return pixel_data.reshape((height, width)) # type: ignore[no-any-return]
|
|
454
479
|
elif channels == 4:
|
|
455
|
-
return
|
|
480
|
+
return pixel_data.reshape((height, width, 4)) # type: ignore[no-any-return]
|
|
456
481
|
|
|
457
|
-
logger.error(f"Could not infer resolution for
|
|
482
|
+
logger.error(f"Could not infer resolution for pixel data size {pixel_data_size}")
|
|
458
483
|
return None
|
|
459
484
|
|
|
460
485
|
except Exception as e:
|
|
@@ -547,11 +572,11 @@ class DuplexShmController(IController):
|
|
|
547
572
|
)
|
|
548
573
|
|
|
549
574
|
self._connection = connection
|
|
550
|
-
self._duplex_server: Optional[
|
|
575
|
+
self._duplex_server: Optional[IImmutableDuplexServer] = None
|
|
551
576
|
self._gst_caps: Optional[GstCaps] = None
|
|
552
577
|
self._metadata: Optional[GstMetadata] = None
|
|
553
578
|
self._is_running = False
|
|
554
|
-
self._on_frame_callback: Optional[Callable[[Mat, Mat], None]] = None # type: ignore[valid-type]
|
|
579
|
+
self._on_frame_callback: Optional[Callable[[FrameMetadata, Mat, Mat], None]] = None # type: ignore[valid-type]
|
|
555
580
|
self._frame_count = 0
|
|
556
581
|
|
|
557
582
|
@property
|
|
@@ -565,14 +590,18 @@ class DuplexShmController(IController):
|
|
|
565
590
|
|
|
566
591
|
def start(
|
|
567
592
|
self,
|
|
568
|
-
on_frame: Callable[[Mat, Mat], None], # type: ignore[override,valid-type]
|
|
593
|
+
on_frame: Callable[[FrameMetadata, Mat, Mat], None], # type: ignore[override,valid-type]
|
|
569
594
|
cancellation_token: Optional[threading.Event] = None,
|
|
570
595
|
) -> None:
|
|
571
596
|
"""
|
|
572
|
-
Start duplex frame processing.
|
|
597
|
+
Start duplex frame processing with FrameMetadata.
|
|
598
|
+
|
|
599
|
+
The callback receives FrameMetadata (frame number, timestamp, dimensions),
|
|
600
|
+
input Mat, and output Mat. The 24-byte metadata prefix is stripped from
|
|
601
|
+
the frame data before creating the input Mat.
|
|
573
602
|
|
|
574
603
|
Args:
|
|
575
|
-
on_frame: Callback that receives
|
|
604
|
+
on_frame: Callback that receives (FrameMetadata, input_mat, output_mat)
|
|
576
605
|
cancellation_token: Optional cancellation token
|
|
577
606
|
"""
|
|
578
607
|
if self._is_running:
|
|
@@ -588,7 +617,6 @@ class DuplexShmController(IController):
|
|
|
588
617
|
)
|
|
589
618
|
|
|
590
619
|
# Create duplex server using factory
|
|
591
|
-
# Convert timeout from milliseconds to seconds for Python API
|
|
592
620
|
if not self._connection.buffer_name:
|
|
593
621
|
raise ValueError("Buffer name is required for shared memory connection")
|
|
594
622
|
timeout_seconds = self._connection.timeout_ms / 1000.0
|
|
@@ -696,91 +724,98 @@ class DuplexShmController(IController):
|
|
|
696
724
|
|
|
697
725
|
def _process_duplex_frame(self, request_frame: Frame, response_writer: Writer) -> None:
|
|
698
726
|
"""
|
|
699
|
-
Process a frame in duplex mode.
|
|
727
|
+
Process a frame in duplex mode with FrameMetadata.
|
|
728
|
+
|
|
729
|
+
The frame data has a 24-byte FrameMetadata prefix that is stripped
|
|
730
|
+
before creating the input Mat.
|
|
700
731
|
|
|
701
732
|
Args:
|
|
702
|
-
request_frame: Input frame from the request
|
|
733
|
+
request_frame: Input frame from the request (with metadata prefix)
|
|
703
734
|
response_writer: Writer for the response frame
|
|
704
735
|
"""
|
|
705
|
-
logger.debug(
|
|
706
|
-
"_process_duplex_frame called, frame_count=%d, has_gst_caps=%s",
|
|
707
|
-
self._frame_count,
|
|
708
|
-
self._gst_caps is not None,
|
|
709
|
-
)
|
|
710
736
|
try:
|
|
711
737
|
if not self._on_frame_callback:
|
|
712
738
|
logger.warning("No frame callback set")
|
|
713
739
|
return
|
|
714
740
|
|
|
741
|
+
# Check frame size is sufficient for metadata
|
|
742
|
+
if request_frame.size < FRAME_METADATA_SIZE:
|
|
743
|
+
logger.warning("Frame too small for FrameMetadata: %d bytes", request_frame.size)
|
|
744
|
+
return
|
|
745
|
+
|
|
715
746
|
self._frame_count += 1
|
|
716
747
|
|
|
717
|
-
#
|
|
718
|
-
|
|
719
|
-
self._metadata is None
|
|
720
|
-
and self._duplex_server
|
|
721
|
-
and self._duplex_server.request_reader
|
|
722
|
-
):
|
|
723
|
-
try:
|
|
724
|
-
metadata_bytes = self._duplex_server.request_reader.get_metadata()
|
|
725
|
-
if metadata_bytes:
|
|
726
|
-
# Use helper method to parse metadata
|
|
727
|
-
metadata = self._parse_metadata_json(metadata_bytes)
|
|
728
|
-
if metadata:
|
|
729
|
-
self._metadata = metadata
|
|
730
|
-
self._gst_caps = metadata.caps
|
|
731
|
-
logger.info(
|
|
732
|
-
"Successfully read metadata from buffer '%s': %s",
|
|
733
|
-
self._connection.buffer_name,
|
|
734
|
-
self._gst_caps,
|
|
735
|
-
)
|
|
736
|
-
else:
|
|
737
|
-
logger.debug("Failed to parse metadata in frame processing")
|
|
738
|
-
except Exception as e:
|
|
739
|
-
logger.debug("Failed to read metadata in frame processing: %s", e)
|
|
748
|
+
# Parse FrameMetadata from the beginning of the frame
|
|
749
|
+
frame_metadata = FrameMetadata.from_bytes(request_frame.data)
|
|
740
750
|
|
|
741
|
-
#
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
751
|
+
# Calculate pixel data offset and size
|
|
752
|
+
pixel_data_offset = FRAME_METADATA_SIZE
|
|
753
|
+
pixel_data_size = request_frame.size - FRAME_METADATA_SIZE
|
|
754
|
+
|
|
755
|
+
# GstCaps must be available for width/height/format
|
|
756
|
+
# (FrameMetadata no longer contains these - they're stream-level, not per-frame)
|
|
757
|
+
if not self._gst_caps:
|
|
758
|
+
logger.warning(
|
|
759
|
+
"GstCaps not available, skipping frame %d", frame_metadata.frame_number
|
|
760
|
+
)
|
|
745
761
|
return
|
|
746
762
|
|
|
747
|
-
|
|
748
|
-
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
763
|
+
width = self._gst_caps.width
|
|
764
|
+
height = self._gst_caps.height
|
|
765
|
+
format_str = self._gst_caps.format
|
|
766
|
+
|
|
767
|
+
# Determine channels from format
|
|
768
|
+
if format_str in ["RGB", "BGR"]:
|
|
769
|
+
channels = 3
|
|
770
|
+
elif format_str in ["RGBA", "BGRA", "ARGB", "ABGR"]:
|
|
771
|
+
channels = 4
|
|
772
|
+
elif format_str in ["GRAY8", "GRAY16_LE", "GRAY16_BE"]:
|
|
773
|
+
channels = 1
|
|
774
|
+
else:
|
|
775
|
+
channels = 3 # Default to RGB
|
|
776
|
+
|
|
777
|
+
# Create input Mat from pixel data (after metadata prefix)
|
|
778
|
+
pixel_data = np.frombuffer(request_frame.data[pixel_data_offset:], dtype=np.uint8)
|
|
779
|
+
|
|
780
|
+
expected_size = height * width * channels
|
|
781
|
+
if len(pixel_data) != expected_size:
|
|
782
|
+
logger.error(
|
|
783
|
+
"Pixel data size mismatch. Expected %d bytes for %dx%d with %d channels, got %d",
|
|
784
|
+
expected_size,
|
|
785
|
+
width,
|
|
786
|
+
height,
|
|
787
|
+
channels,
|
|
788
|
+
len(pixel_data),
|
|
789
|
+
)
|
|
790
|
+
return
|
|
753
791
|
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
)
|
|
792
|
+
# Reshape to image dimensions
|
|
793
|
+
if channels == 1:
|
|
794
|
+
input_mat = pixel_data.reshape((height, width))
|
|
795
|
+
else:
|
|
796
|
+
input_mat = pixel_data.reshape((height, width, channels))
|
|
797
|
+
|
|
798
|
+
# Response doesn't need metadata prefix - just pixel data
|
|
799
|
+
with response_writer.get_frame_buffer(pixel_data_size) as output_buffer:
|
|
800
|
+
# Create output Mat from buffer (zero-copy)
|
|
801
|
+
output_data = np.frombuffer(output_buffer, dtype=np.uint8)
|
|
802
|
+
if channels == 1:
|
|
803
|
+
output_mat = output_data.reshape((height, width))
|
|
767
804
|
else:
|
|
768
|
-
|
|
769
|
-
output_mat = np.frombuffer(output_buffer, dtype=np.uint8).reshape(
|
|
770
|
-
input_mat.shape
|
|
771
|
-
)
|
|
805
|
+
output_mat = output_data.reshape((height, width, channels))
|
|
772
806
|
|
|
773
|
-
# Call user's processing function
|
|
774
|
-
self._on_frame_callback(input_mat, output_mat)
|
|
807
|
+
# Call user's processing function with metadata
|
|
808
|
+
self._on_frame_callback(frame_metadata, input_mat, output_mat)
|
|
775
809
|
|
|
776
810
|
# Commit the response frame after buffer is released
|
|
777
811
|
response_writer.commit_frame()
|
|
778
812
|
|
|
779
813
|
logger.debug(
|
|
780
|
-
"Processed duplex frame %d (%dx%d)",
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
814
|
+
"Processed duplex frame %d (%dx%d %s)",
|
|
815
|
+
frame_metadata.frame_number,
|
|
816
|
+
width,
|
|
817
|
+
height,
|
|
818
|
+
format_str,
|
|
784
819
|
)
|
|
785
820
|
|
|
786
821
|
except Exception as e:
|