rocket-welder-sdk 1.1.32__py3-none-any.whl → 1.1.33__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- rocket_welder_sdk/__init__.py +5 -6
- rocket_welder_sdk/controllers.py +134 -101
- rocket_welder_sdk/frame_metadata.py +138 -0
- rocket_welder_sdk/high_level/__init__.py +66 -0
- rocket_welder_sdk/high_level/connection_strings.py +330 -0
- rocket_welder_sdk/high_level/data_context.py +163 -0
- rocket_welder_sdk/high_level/schema.py +180 -0
- rocket_welder_sdk/high_level/transport_protocol.py +166 -0
- rocket_welder_sdk/keypoints_protocol.py +642 -0
- rocket_welder_sdk/rocket_welder_client.py +17 -3
- rocket_welder_sdk/segmentation_result.py +420 -0
- rocket_welder_sdk/transport/__init__.py +38 -0
- rocket_welder_sdk/transport/frame_sink.py +77 -0
- rocket_welder_sdk/transport/frame_source.py +74 -0
- rocket_welder_sdk/transport/nng_transport.py +197 -0
- rocket_welder_sdk/transport/stream_transport.py +193 -0
- rocket_welder_sdk/transport/tcp_transport.py +154 -0
- rocket_welder_sdk/transport/unix_socket_transport.py +339 -0
- {rocket_welder_sdk-1.1.32.dist-info → rocket_welder_sdk-1.1.33.dist-info}/METADATA +15 -2
- rocket_welder_sdk-1.1.33.dist-info/RECORD +37 -0
- rocket_welder_sdk-1.1.32.dist-info/RECORD +0 -22
- {rocket_welder_sdk-1.1.32.dist-info → rocket_welder_sdk-1.1.33.dist-info}/WHEEL +0 -0
- {rocket_welder_sdk-1.1.32.dist-info → rocket_welder_sdk-1.1.33.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,420 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Segmentation result serialization protocol.
|
|
3
|
+
|
|
4
|
+
Binary protocol for efficient streaming of instance segmentation results.
|
|
5
|
+
Compatible with C# implementation for cross-platform interoperability.
|
|
6
|
+
|
|
7
|
+
Protocol (per frame):
|
|
8
|
+
[FrameId: 8B little-endian][Width: varint][Height: varint]
|
|
9
|
+
[classId: 1B][instanceId: 1B][pointCount: varint][points: delta+varint...]
|
|
10
|
+
[classId: 1B][instanceId: 1B][pointCount: varint][points: delta+varint...]
|
|
11
|
+
...
|
|
12
|
+
|
|
13
|
+
Features:
|
|
14
|
+
- Delta encoding for adjacent contour points (efficient compression)
|
|
15
|
+
- Varint encoding for variable-length integers
|
|
16
|
+
- ZigZag encoding for signed deltas
|
|
17
|
+
- Explicit little-endian for cross-platform compatibility
|
|
18
|
+
- Frame boundaries handled by transport layer (IFrameSink)
|
|
19
|
+
- NumPy array support for efficient processing
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
import io
|
|
23
|
+
import struct
|
|
24
|
+
from dataclasses import dataclass
|
|
25
|
+
from typing import BinaryIO, Iterator, List, Optional, Tuple, Union
|
|
26
|
+
|
|
27
|
+
import numpy as np
|
|
28
|
+
import numpy.typing as npt
|
|
29
|
+
from typing_extensions import TypeAlias
|
|
30
|
+
|
|
31
|
+
from .transport import IFrameSink, StreamFrameSink
|
|
32
|
+
|
|
33
|
+
# Type aliases
|
|
34
|
+
Point = Tuple[int, int]
|
|
35
|
+
PointArray: TypeAlias = npt.NDArray[np.int32] # Shape: (N, 2)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def _write_varint(stream: BinaryIO, value: int) -> None:
|
|
39
|
+
"""Write unsigned integer as varint."""
|
|
40
|
+
if value < 0:
|
|
41
|
+
raise ValueError(f"Varint requires non-negative value, got {value}")
|
|
42
|
+
|
|
43
|
+
while value >= 0x80:
|
|
44
|
+
stream.write(bytes([value & 0x7F | 0x80]))
|
|
45
|
+
value >>= 7
|
|
46
|
+
stream.write(bytes([value & 0x7F]))
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def _read_varint(stream: BinaryIO) -> int:
|
|
50
|
+
"""Read varint from stream and decode to unsigned integer."""
|
|
51
|
+
result = 0
|
|
52
|
+
shift = 0
|
|
53
|
+
|
|
54
|
+
while True:
|
|
55
|
+
if shift >= 35: # Max 5 bytes for uint32
|
|
56
|
+
raise ValueError("Varint too long (corrupted stream)")
|
|
57
|
+
|
|
58
|
+
byte_data = stream.read(1)
|
|
59
|
+
if not byte_data:
|
|
60
|
+
raise EOFError("Unexpected end of stream reading varint")
|
|
61
|
+
|
|
62
|
+
byte = byte_data[0]
|
|
63
|
+
result |= (byte & 0x7F) << shift
|
|
64
|
+
shift += 7
|
|
65
|
+
|
|
66
|
+
if not (byte & 0x80):
|
|
67
|
+
break
|
|
68
|
+
|
|
69
|
+
return result
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def _zigzag_encode(value: int) -> int:
|
|
73
|
+
"""ZigZag encode signed integer to unsigned."""
|
|
74
|
+
return (value << 1) ^ (value >> 31)
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def _zigzag_decode(value: int) -> int:
|
|
78
|
+
"""ZigZag decode unsigned integer to signed."""
|
|
79
|
+
return (value >> 1) ^ -(value & 1)
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
@dataclass(frozen=True)
|
|
83
|
+
class SegmentationFrameMetadata:
|
|
84
|
+
"""Metadata for a segmentation frame."""
|
|
85
|
+
|
|
86
|
+
frame_id: int
|
|
87
|
+
width: int
|
|
88
|
+
height: int
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
@dataclass(frozen=True)
|
|
92
|
+
class SegmentationInstance:
|
|
93
|
+
"""A single instance in a segmentation result."""
|
|
94
|
+
|
|
95
|
+
class_id: int
|
|
96
|
+
instance_id: int
|
|
97
|
+
points: PointArray # NumPy array of shape (N, 2) with dtype int32
|
|
98
|
+
|
|
99
|
+
def to_normalized(self, width: int, height: int) -> npt.NDArray[np.float32]:
|
|
100
|
+
"""
|
|
101
|
+
Convert points to normalized coordinates [0-1] range.
|
|
102
|
+
|
|
103
|
+
Args:
|
|
104
|
+
width: Frame width in pixels
|
|
105
|
+
height: Frame height in pixels
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
NumPy array of shape (N, 2) with dtype float32, normalized to [0-1]
|
|
109
|
+
"""
|
|
110
|
+
if width <= 0 or height <= 0:
|
|
111
|
+
raise ValueError("Width and height must be positive")
|
|
112
|
+
|
|
113
|
+
# Vectorized operation - very efficient
|
|
114
|
+
normalized = self.points.astype(np.float32)
|
|
115
|
+
normalized[:, 0] /= width
|
|
116
|
+
normalized[:, 1] /= height
|
|
117
|
+
return normalized
|
|
118
|
+
|
|
119
|
+
def to_list(self) -> List[Point]:
|
|
120
|
+
"""Convert points to list of tuples."""
|
|
121
|
+
return [(int(x), int(y)) for x, y in self.points]
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
class SegmentationResultWriter:
|
|
125
|
+
"""
|
|
126
|
+
Writes segmentation results for a single frame via IFrameSink.
|
|
127
|
+
|
|
128
|
+
Frames are buffered in memory and written atomically on close.
|
|
129
|
+
|
|
130
|
+
Thread-safe: No (caller must synchronize)
|
|
131
|
+
"""
|
|
132
|
+
|
|
133
|
+
def __init__(
|
|
134
|
+
self,
|
|
135
|
+
frame_id: int,
|
|
136
|
+
width: int,
|
|
137
|
+
height: int,
|
|
138
|
+
stream: Optional[BinaryIO] = None,
|
|
139
|
+
*,
|
|
140
|
+
frame_sink: Optional[IFrameSink] = None,
|
|
141
|
+
) -> None:
|
|
142
|
+
"""
|
|
143
|
+
Initialize writer for a single frame.
|
|
144
|
+
|
|
145
|
+
Args:
|
|
146
|
+
frame_id: Unique frame identifier
|
|
147
|
+
width: Frame width in pixels
|
|
148
|
+
height: Frame height in pixels
|
|
149
|
+
stream: Binary stream (convenience - auto-wraps in StreamFrameSink)
|
|
150
|
+
frame_sink: IFrameSink to write frame to (keyword-only, transport-agnostic)
|
|
151
|
+
|
|
152
|
+
Note:
|
|
153
|
+
Either stream or frame_sink must be provided (not both).
|
|
154
|
+
For convenience, stream is the primary parameter (auto-wraps in StreamFrameSink).
|
|
155
|
+
"""
|
|
156
|
+
if frame_sink is None and stream is None:
|
|
157
|
+
raise TypeError("Either stream or frame_sink must be provided")
|
|
158
|
+
|
|
159
|
+
if frame_sink is not None and stream is not None:
|
|
160
|
+
raise TypeError("Cannot provide both stream and frame_sink")
|
|
161
|
+
|
|
162
|
+
# Convenience: auto-wrap stream in StreamFrameSink
|
|
163
|
+
if stream is not None:
|
|
164
|
+
self._frame_sink: IFrameSink = StreamFrameSink(stream, leave_open=True)
|
|
165
|
+
self._owns_sink = False # Don't close the stream wrapper
|
|
166
|
+
else:
|
|
167
|
+
assert frame_sink is not None
|
|
168
|
+
self._frame_sink = frame_sink
|
|
169
|
+
self._owns_sink = False
|
|
170
|
+
|
|
171
|
+
self._frame_id = frame_id
|
|
172
|
+
self._width = width
|
|
173
|
+
self._height = height
|
|
174
|
+
self._buffer = io.BytesIO() # Buffer frame for atomic write
|
|
175
|
+
self._header_written = False
|
|
176
|
+
self._disposed = False
|
|
177
|
+
|
|
178
|
+
def _ensure_header_written(self) -> None:
|
|
179
|
+
"""Write frame header to buffer if not already written."""
|
|
180
|
+
if self._header_written:
|
|
181
|
+
return
|
|
182
|
+
|
|
183
|
+
# Write FrameId (8 bytes, little-endian)
|
|
184
|
+
self._buffer.write(struct.pack("<Q", self._frame_id))
|
|
185
|
+
|
|
186
|
+
# Write Width and Height as varints
|
|
187
|
+
_write_varint(self._buffer, self._width)
|
|
188
|
+
_write_varint(self._buffer, self._height)
|
|
189
|
+
|
|
190
|
+
self._header_written = True
|
|
191
|
+
|
|
192
|
+
def append(
|
|
193
|
+
self,
|
|
194
|
+
class_id: int,
|
|
195
|
+
instance_id: int,
|
|
196
|
+
points: Union[List[Point], PointArray],
|
|
197
|
+
) -> None:
|
|
198
|
+
"""
|
|
199
|
+
Append an instance with contour points.
|
|
200
|
+
|
|
201
|
+
Args:
|
|
202
|
+
class_id: Object class ID (0-255)
|
|
203
|
+
instance_id: Instance ID within class (0-255)
|
|
204
|
+
points: List of (x, y) tuples or NumPy array of shape (N, 2)
|
|
205
|
+
"""
|
|
206
|
+
if class_id < 0 or class_id > 255:
|
|
207
|
+
raise ValueError(f"class_id must be 0-255, got {class_id}")
|
|
208
|
+
if instance_id < 0 or instance_id > 255:
|
|
209
|
+
raise ValueError(f"instance_id must be 0-255, got {instance_id}")
|
|
210
|
+
|
|
211
|
+
self._ensure_header_written()
|
|
212
|
+
|
|
213
|
+
# Convert to NumPy array if needed
|
|
214
|
+
if not isinstance(points, np.ndarray):
|
|
215
|
+
points_array = np.array(points, dtype=np.int32)
|
|
216
|
+
else:
|
|
217
|
+
points_array = points.astype(np.int32)
|
|
218
|
+
|
|
219
|
+
if points_array.ndim != 2 or points_array.shape[1] != 2:
|
|
220
|
+
raise ValueError(f"Points must be shape (N, 2), got {points_array.shape}")
|
|
221
|
+
|
|
222
|
+
# Write class_id and instance_id
|
|
223
|
+
self._buffer.write(bytes([class_id, instance_id]))
|
|
224
|
+
|
|
225
|
+
# Write point count
|
|
226
|
+
point_count = len(points_array)
|
|
227
|
+
_write_varint(self._buffer, point_count)
|
|
228
|
+
|
|
229
|
+
if point_count == 0:
|
|
230
|
+
return
|
|
231
|
+
|
|
232
|
+
# Write first point (absolute coordinates)
|
|
233
|
+
first_point = points_array[0]
|
|
234
|
+
_write_varint(self._buffer, _zigzag_encode(int(first_point[0])))
|
|
235
|
+
_write_varint(self._buffer, _zigzag_encode(int(first_point[1])))
|
|
236
|
+
|
|
237
|
+
# Write remaining points (delta encoded)
|
|
238
|
+
for i in range(1, point_count):
|
|
239
|
+
delta_x = int(points_array[i, 0] - points_array[i - 1, 0])
|
|
240
|
+
delta_y = int(points_array[i, 1] - points_array[i - 1, 1])
|
|
241
|
+
_write_varint(self._buffer, _zigzag_encode(delta_x))
|
|
242
|
+
_write_varint(self._buffer, _zigzag_encode(delta_y))
|
|
243
|
+
|
|
244
|
+
def flush(self) -> None:
|
|
245
|
+
"""Flush buffered frame via frame sink without closing."""
|
|
246
|
+
if self._disposed:
|
|
247
|
+
return
|
|
248
|
+
|
|
249
|
+
# Ensure header is written (even if no instances appended)
|
|
250
|
+
self._ensure_header_written()
|
|
251
|
+
|
|
252
|
+
# Write buffered frame atomically via sink
|
|
253
|
+
frame_data = self._buffer.getvalue()
|
|
254
|
+
self._frame_sink.write_frame(frame_data)
|
|
255
|
+
self._frame_sink.flush()
|
|
256
|
+
|
|
257
|
+
def close(self) -> None:
|
|
258
|
+
"""Close writer and write buffered frame via frame sink."""
|
|
259
|
+
if self._disposed:
|
|
260
|
+
return
|
|
261
|
+
|
|
262
|
+
self._disposed = True
|
|
263
|
+
|
|
264
|
+
# Ensure header is written (even if no instances appended)
|
|
265
|
+
self._ensure_header_written()
|
|
266
|
+
|
|
267
|
+
# Send complete frame atomically via sink
|
|
268
|
+
frame_data = self._buffer.getvalue()
|
|
269
|
+
self._frame_sink.write_frame(frame_data)
|
|
270
|
+
|
|
271
|
+
# Clean up buffer
|
|
272
|
+
self._buffer.close()
|
|
273
|
+
|
|
274
|
+
def __enter__(self) -> "SegmentationResultWriter":
|
|
275
|
+
"""Context manager entry."""
|
|
276
|
+
return self
|
|
277
|
+
|
|
278
|
+
def __exit__(self, *args: object) -> None:
|
|
279
|
+
"""Context manager exit."""
|
|
280
|
+
self.close()
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
class SegmentationResultReader:
|
|
284
|
+
"""
|
|
285
|
+
Reads segmentation results for a single frame.
|
|
286
|
+
|
|
287
|
+
Thread-safe: No (caller must synchronize)
|
|
288
|
+
Stream ownership: Caller must close stream
|
|
289
|
+
"""
|
|
290
|
+
|
|
291
|
+
def __init__(self, stream: BinaryIO) -> None:
|
|
292
|
+
"""
|
|
293
|
+
Initialize reader for a single frame.
|
|
294
|
+
|
|
295
|
+
Args:
|
|
296
|
+
stream: Binary stream to read from (must support read()).
|
|
297
|
+
Should contain raw frame data without length prefix.
|
|
298
|
+
Use StreamFrameSource to strip length prefixes from transport streams.
|
|
299
|
+
"""
|
|
300
|
+
if not hasattr(stream, "read"):
|
|
301
|
+
raise TypeError("Stream must be a binary readable stream")
|
|
302
|
+
|
|
303
|
+
self._stream = stream
|
|
304
|
+
self._header_read = False
|
|
305
|
+
self._metadata: Optional[SegmentationFrameMetadata] = None
|
|
306
|
+
|
|
307
|
+
# Max points per instance - prevents OOM attacks
|
|
308
|
+
self._max_points_per_instance = 10_000_000 # 10M points
|
|
309
|
+
|
|
310
|
+
def _ensure_header_read(self) -> None:
|
|
311
|
+
"""Read frame header if not already read."""
|
|
312
|
+
if self._header_read:
|
|
313
|
+
return
|
|
314
|
+
|
|
315
|
+
# Read FrameId (8 bytes, little-endian)
|
|
316
|
+
frame_id_bytes = self._stream.read(8)
|
|
317
|
+
if len(frame_id_bytes) != 8:
|
|
318
|
+
raise EOFError("Failed to read FrameId")
|
|
319
|
+
frame_id = struct.unpack("<Q", frame_id_bytes)[0]
|
|
320
|
+
|
|
321
|
+
# Read Width and Height as varints
|
|
322
|
+
width = _read_varint(self._stream)
|
|
323
|
+
height = _read_varint(self._stream)
|
|
324
|
+
|
|
325
|
+
self._metadata = SegmentationFrameMetadata(frame_id, width, height)
|
|
326
|
+
self._header_read = True
|
|
327
|
+
|
|
328
|
+
@property
|
|
329
|
+
def metadata(self) -> SegmentationFrameMetadata:
|
|
330
|
+
"""Get frame metadata (frameId, width, height)."""
|
|
331
|
+
self._ensure_header_read()
|
|
332
|
+
assert self._metadata is not None
|
|
333
|
+
return self._metadata
|
|
334
|
+
|
|
335
|
+
def read_next(self) -> Optional[SegmentationInstance]:
|
|
336
|
+
"""
|
|
337
|
+
Read next instance from stream.
|
|
338
|
+
|
|
339
|
+
Returns:
|
|
340
|
+
SegmentationInstance if available, None if end of stream reached
|
|
341
|
+
|
|
342
|
+
Raises:
|
|
343
|
+
EOFError: If stream ends unexpectedly
|
|
344
|
+
ValueError: If data is corrupted
|
|
345
|
+
"""
|
|
346
|
+
self._ensure_header_read()
|
|
347
|
+
|
|
348
|
+
# Read class_id and instance_id (buffered for performance)
|
|
349
|
+
header = self._stream.read(2)
|
|
350
|
+
|
|
351
|
+
if len(header) == 0:
|
|
352
|
+
# End of stream - no more instances
|
|
353
|
+
return None
|
|
354
|
+
|
|
355
|
+
if len(header) != 2:
|
|
356
|
+
raise EOFError("Unexpected end of stream reading instance header")
|
|
357
|
+
|
|
358
|
+
class_id = header[0]
|
|
359
|
+
instance_id = header[1]
|
|
360
|
+
|
|
361
|
+
# Read point count with validation
|
|
362
|
+
point_count = _read_varint(self._stream)
|
|
363
|
+
if point_count > self._max_points_per_instance:
|
|
364
|
+
raise ValueError(
|
|
365
|
+
f"Point count {point_count} exceeds maximum " f"{self._max_points_per_instance}"
|
|
366
|
+
)
|
|
367
|
+
|
|
368
|
+
if point_count == 0:
|
|
369
|
+
# Empty points array
|
|
370
|
+
points = np.empty((0, 2), dtype=np.int32)
|
|
371
|
+
return SegmentationInstance(class_id, instance_id, points)
|
|
372
|
+
|
|
373
|
+
# Allocate NumPy array for points
|
|
374
|
+
points = np.empty((point_count, 2), dtype=np.int32)
|
|
375
|
+
|
|
376
|
+
# Read first point (absolute coordinates)
|
|
377
|
+
x = _zigzag_decode(_read_varint(self._stream))
|
|
378
|
+
y = _zigzag_decode(_read_varint(self._stream))
|
|
379
|
+
points[0] = [x, y]
|
|
380
|
+
|
|
381
|
+
# Read remaining points (delta encoded)
|
|
382
|
+
for i in range(1, point_count):
|
|
383
|
+
delta_x = _zigzag_decode(_read_varint(self._stream))
|
|
384
|
+
delta_y = _zigzag_decode(_read_varint(self._stream))
|
|
385
|
+
x += delta_x
|
|
386
|
+
y += delta_y
|
|
387
|
+
points[i] = [x, y]
|
|
388
|
+
|
|
389
|
+
return SegmentationInstance(class_id, instance_id, points)
|
|
390
|
+
|
|
391
|
+
def read_all(self) -> List[SegmentationInstance]:
|
|
392
|
+
"""
|
|
393
|
+
Read all instances from frame.
|
|
394
|
+
|
|
395
|
+
Returns:
|
|
396
|
+
List of all instances in frame
|
|
397
|
+
"""
|
|
398
|
+
instances = []
|
|
399
|
+
while True:
|
|
400
|
+
instance = self.read_next()
|
|
401
|
+
if instance is None:
|
|
402
|
+
break
|
|
403
|
+
instances.append(instance)
|
|
404
|
+
return instances
|
|
405
|
+
|
|
406
|
+
def __iter__(self) -> Iterator[SegmentationInstance]:
|
|
407
|
+
"""Iterate over instances in frame."""
|
|
408
|
+
while True:
|
|
409
|
+
instance = self.read_next()
|
|
410
|
+
if instance is None:
|
|
411
|
+
break
|
|
412
|
+
yield instance
|
|
413
|
+
|
|
414
|
+
def __enter__(self) -> "SegmentationResultReader":
|
|
415
|
+
"""Context manager entry."""
|
|
416
|
+
return self
|
|
417
|
+
|
|
418
|
+
def __exit__(self, *args: object) -> None:
|
|
419
|
+
"""Context manager exit."""
|
|
420
|
+
pass
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Transport layer for RocketWelder SDK.
|
|
3
|
+
|
|
4
|
+
Provides transport-agnostic frame sink/source abstractions for protocols.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from .frame_sink import IFrameSink
|
|
8
|
+
from .frame_source import IFrameSource
|
|
9
|
+
from .nng_transport import NngFrameSink, NngFrameSource
|
|
10
|
+
from .stream_transport import StreamFrameSink, StreamFrameSource
|
|
11
|
+
from .tcp_transport import TcpFrameSink, TcpFrameSource
|
|
12
|
+
from .unix_socket_transport import (
|
|
13
|
+
UnixSocketFrameSink,
|
|
14
|
+
UnixSocketFrameSource,
|
|
15
|
+
UnixSocketServer,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
__all__ = [
|
|
19
|
+
"IFrameSink",
|
|
20
|
+
"IFrameSource",
|
|
21
|
+
"NngFrameSink",
|
|
22
|
+
"NngFrameSource",
|
|
23
|
+
"StreamFrameSink",
|
|
24
|
+
"StreamFrameSource",
|
|
25
|
+
"TcpFrameSink",
|
|
26
|
+
"TcpFrameSource",
|
|
27
|
+
"UnixSocketFrameSink",
|
|
28
|
+
"UnixSocketFrameSource",
|
|
29
|
+
"UnixSocketServer",
|
|
30
|
+
]
|
|
31
|
+
|
|
32
|
+
# NNG transport is optional (requires pynng package)
|
|
33
|
+
try:
|
|
34
|
+
from .nng_transport import NngFrameSink, NngFrameSource
|
|
35
|
+
|
|
36
|
+
__all__.extend(["NngFrameSink", "NngFrameSource"])
|
|
37
|
+
except ImportError:
|
|
38
|
+
pass # pynng not installed
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
"""Frame sink abstraction for writing frames to any transport."""
|
|
2
|
+
|
|
3
|
+
from abc import ABC, abstractmethod
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class IFrameSink(ABC):
|
|
7
|
+
"""
|
|
8
|
+
Low-level abstraction for writing discrete frames to any transport.
|
|
9
|
+
|
|
10
|
+
Transport-agnostic interface that handles the question: "where do frames go?"
|
|
11
|
+
This abstraction decouples protocol logic (KeyPoints, SegmentationResults) from
|
|
12
|
+
transport mechanisms (File, TCP, WebSocket, NNG). Each frame is written atomically.
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
@abstractmethod
|
|
16
|
+
def write_frame(self, frame_data: bytes) -> None:
|
|
17
|
+
"""
|
|
18
|
+
Write a complete frame to the underlying transport synchronously.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
frame_data: Complete frame data to write
|
|
22
|
+
"""
|
|
23
|
+
pass
|
|
24
|
+
|
|
25
|
+
@abstractmethod
|
|
26
|
+
async def write_frame_async(self, frame_data: bytes) -> None:
|
|
27
|
+
"""
|
|
28
|
+
Write a complete frame to the underlying transport asynchronously.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
frame_data: Complete frame data to write
|
|
32
|
+
"""
|
|
33
|
+
pass
|
|
34
|
+
|
|
35
|
+
@abstractmethod
|
|
36
|
+
def flush(self) -> None:
|
|
37
|
+
"""
|
|
38
|
+
Flush any buffered data to the transport synchronously.
|
|
39
|
+
|
|
40
|
+
For message-based transports (NNG, WebSocket), this may be a no-op.
|
|
41
|
+
"""
|
|
42
|
+
pass
|
|
43
|
+
|
|
44
|
+
@abstractmethod
|
|
45
|
+
async def flush_async(self) -> None:
|
|
46
|
+
"""
|
|
47
|
+
Flush any buffered data to the transport asynchronously.
|
|
48
|
+
|
|
49
|
+
For message-based transports (NNG, WebSocket), this may be a no-op.
|
|
50
|
+
"""
|
|
51
|
+
pass
|
|
52
|
+
|
|
53
|
+
def __enter__(self) -> "IFrameSink":
|
|
54
|
+
"""Context manager entry."""
|
|
55
|
+
return self
|
|
56
|
+
|
|
57
|
+
def __exit__(self, *args: object) -> None:
|
|
58
|
+
"""Context manager exit."""
|
|
59
|
+
self.close()
|
|
60
|
+
|
|
61
|
+
async def __aenter__(self) -> "IFrameSink":
|
|
62
|
+
"""Async context manager entry."""
|
|
63
|
+
return self
|
|
64
|
+
|
|
65
|
+
async def __aexit__(self, *args: object) -> None:
|
|
66
|
+
"""Async context manager exit."""
|
|
67
|
+
await self.close_async()
|
|
68
|
+
|
|
69
|
+
@abstractmethod
|
|
70
|
+
def close(self) -> None:
|
|
71
|
+
"""Close the sink and release resources."""
|
|
72
|
+
pass
|
|
73
|
+
|
|
74
|
+
@abstractmethod
|
|
75
|
+
async def close_async(self) -> None:
|
|
76
|
+
"""Close the sink and release resources asynchronously."""
|
|
77
|
+
pass
|
|
@@ -0,0 +1,74 @@
|
|
|
1
|
+
"""Frame source abstraction for reading frames from any transport."""
|
|
2
|
+
|
|
3
|
+
from abc import ABC, abstractmethod
|
|
4
|
+
from typing import Optional
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class IFrameSource(ABC):
|
|
8
|
+
"""
|
|
9
|
+
Low-level abstraction for reading discrete frames from any transport.
|
|
10
|
+
|
|
11
|
+
Transport-agnostic interface that handles the question: "where do frames come from?"
|
|
12
|
+
This abstraction decouples protocol logic (KeyPoints, SegmentationResults) from
|
|
13
|
+
transport mechanisms (File, TCP, WebSocket, NNG). Each frame is read atomically.
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
@abstractmethod
|
|
17
|
+
def read_frame(self) -> Optional[bytes]:
|
|
18
|
+
"""
|
|
19
|
+
Read a complete frame from the underlying transport synchronously.
|
|
20
|
+
|
|
21
|
+
Returns:
|
|
22
|
+
Complete frame data, or None if end of stream/no more messages
|
|
23
|
+
"""
|
|
24
|
+
pass
|
|
25
|
+
|
|
26
|
+
@abstractmethod
|
|
27
|
+
async def read_frame_async(self) -> Optional[bytes]:
|
|
28
|
+
"""
|
|
29
|
+
Read a complete frame from the underlying transport asynchronously.
|
|
30
|
+
|
|
31
|
+
Returns:
|
|
32
|
+
Complete frame data, or None if end of stream/no more messages
|
|
33
|
+
"""
|
|
34
|
+
pass
|
|
35
|
+
|
|
36
|
+
@property
|
|
37
|
+
@abstractmethod
|
|
38
|
+
def has_more_frames(self) -> bool:
|
|
39
|
+
"""
|
|
40
|
+
Check if more frames are available.
|
|
41
|
+
|
|
42
|
+
For streaming transports (file), this checks for EOF.
|
|
43
|
+
For message-based transports (NNG), this may always return True until disconnection.
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
True if more frames are available, False otherwise
|
|
47
|
+
"""
|
|
48
|
+
pass
|
|
49
|
+
|
|
50
|
+
def __enter__(self) -> "IFrameSource":
|
|
51
|
+
"""Context manager entry."""
|
|
52
|
+
return self
|
|
53
|
+
|
|
54
|
+
def __exit__(self, *args: object) -> None:
|
|
55
|
+
"""Context manager exit."""
|
|
56
|
+
self.close()
|
|
57
|
+
|
|
58
|
+
async def __aenter__(self) -> "IFrameSource":
|
|
59
|
+
"""Async context manager entry."""
|
|
60
|
+
return self
|
|
61
|
+
|
|
62
|
+
async def __aexit__(self, *args: object) -> None:
|
|
63
|
+
"""Async context manager exit."""
|
|
64
|
+
await self.close_async()
|
|
65
|
+
|
|
66
|
+
@abstractmethod
|
|
67
|
+
def close(self) -> None:
|
|
68
|
+
"""Close the source and release resources."""
|
|
69
|
+
pass
|
|
70
|
+
|
|
71
|
+
@abstractmethod
|
|
72
|
+
async def close_async(self) -> None:
|
|
73
|
+
"""Close the source and release resources asynchronously."""
|
|
74
|
+
pass
|