rocket-welder-sdk 1.1.32__py3-none-any.whl → 1.1.34__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -10,10 +10,30 @@ import os
10
10
  from .bytes_size import BytesSize
11
11
  from .connection_string import ConnectionMode, ConnectionString, Protocol
12
12
  from .controllers import DuplexShmController, IController, OneWayShmController
13
+ from .frame_metadata import FRAME_METADATA_SIZE, FrameMetadata, GstVideoFormat
13
14
  from .gst_metadata import GstCaps, GstMetadata
14
15
  from .opencv_controller import OpenCvController
15
16
  from .periodic_timer import PeriodicTimer, PeriodicTimerSync
16
17
  from .rocket_welder_client import RocketWelderClient
18
+ from .session_id import (
19
+ # Explicit URL functions (PREFERRED - set by rocket-welder2)
20
+ ACTIONS_SINK_URL_ENV,
21
+ KEYPOINTS_SINK_URL_ENV,
22
+ SEGMENTATION_SINK_URL_ENV,
23
+ # SessionId-derived URL functions (fallback for backwards compatibility)
24
+ get_actions_url,
25
+ get_actions_url_from_env,
26
+ get_configured_nng_urls,
27
+ get_keypoints_url,
28
+ get_keypoints_url_from_env,
29
+ get_nng_urls,
30
+ get_nng_urls_from_env,
31
+ get_segmentation_url,
32
+ get_segmentation_url_from_env,
33
+ get_session_id_from_env,
34
+ has_explicit_nng_urls,
35
+ parse_session_id,
36
+ )
17
37
 
18
38
  # Alias for backward compatibility and README examples
19
39
  Client = RocketWelderClient
@@ -40,23 +60,36 @@ if _log_level:
40
60
  pass # Invalid log level, ignore
41
61
 
42
62
  __all__ = [
43
- # Core types
63
+ "ACTIONS_SINK_URL_ENV",
64
+ "FRAME_METADATA_SIZE",
65
+ "KEYPOINTS_SINK_URL_ENV",
66
+ "SEGMENTATION_SINK_URL_ENV",
44
67
  "BytesSize",
45
- "Client", # Backward compatibility
68
+ "Client",
46
69
  "ConnectionMode",
47
70
  "ConnectionString",
48
71
  "DuplexShmController",
49
- # GStreamer metadata
72
+ "FrameMetadata",
50
73
  "GstCaps",
51
74
  "GstMetadata",
52
- # Controllers
75
+ "GstVideoFormat",
53
76
  "IController",
54
77
  "OneWayShmController",
55
78
  "OpenCvController",
56
- # Timers
57
79
  "PeriodicTimer",
58
80
  "PeriodicTimerSync",
59
81
  "Protocol",
60
- # Main client
61
82
  "RocketWelderClient",
83
+ "get_actions_url",
84
+ "get_actions_url_from_env",
85
+ "get_configured_nng_urls",
86
+ "get_keypoints_url",
87
+ "get_keypoints_url_from_env",
88
+ "get_nng_urls",
89
+ "get_nng_urls_from_env",
90
+ "get_segmentation_url",
91
+ "get_segmentation_url_from_env",
92
+ "get_session_id_from_env",
93
+ "has_explicit_nng_urls",
94
+ "parse_session_id",
62
95
  ]
@@ -17,6 +17,7 @@ from zerobuffer.duplex import DuplexChannelFactory
17
17
  from zerobuffer.exceptions import WriterDeadException
18
18
 
19
19
  from .connection_string import ConnectionMode, ConnectionString, Protocol
20
+ from .frame_metadata import FRAME_METADATA_SIZE, FrameMetadata
20
21
  from .gst_metadata import GstCaps, GstMetadata
21
22
 
22
23
  if TYPE_CHECKING:
@@ -336,6 +337,9 @@ class OneWayShmController(IController):
336
337
  Create OpenCV Mat from frame data using GstCaps.
337
338
  Matches C# CreateMat behavior - creates Mat wrapping the data.
338
339
 
340
+ Frame data layout from GStreamer zerosink:
341
+ [FrameMetadata (16 bytes)][Pixel Data (WxHxC bytes)]
342
+
339
343
  Args:
340
344
  frame: ZeroBuffer frame
341
345
 
@@ -359,31 +363,40 @@ class OneWayShmController(IController):
359
363
  else:
360
364
  channels = 3 # Default to RGB
361
365
 
362
- # Get frame data directly as numpy array (zero-copy view)
363
- # Frame.data is already a memoryview/buffer that can be wrapped
364
- data = np.frombuffer(frame.data, dtype=np.uint8)
366
+ # Frame data has 16-byte FrameMetadata prefix that must be stripped
367
+ # Layout: [FrameMetadata (16 bytes)][Pixel Data]
368
+ if frame.size < FRAME_METADATA_SIZE:
369
+ logger.error(
370
+ "Frame too small for FrameMetadata: %d bytes (need at least %d)",
371
+ frame.size,
372
+ FRAME_METADATA_SIZE,
373
+ )
374
+ return None
375
+
376
+ # Get pixel data (skip 16-byte FrameMetadata prefix)
377
+ pixel_data = np.frombuffer(frame.data[FRAME_METADATA_SIZE:], dtype=np.uint8)
365
378
 
366
- # Check data size matches expected
379
+ # Check pixel data size matches expected
367
380
  expected_size = height * width * channels
368
- if len(data) != expected_size:
381
+ if len(pixel_data) != expected_size:
369
382
  logger.error(
370
- "Data size mismatch. Expected %d bytes for %dx%d with %d channels, got %d",
383
+ "Pixel data size mismatch. Expected %d bytes for %dx%d with %d channels, got %d",
371
384
  expected_size,
372
385
  width,
373
386
  height,
374
387
  channels,
375
- len(data),
388
+ len(pixel_data),
376
389
  )
377
390
  return None
378
391
 
379
392
  # Reshape to image dimensions - this is zero-copy, just changes the view
380
393
  # This matches C#: new Mat(Height, Width, Depth, Channels, ptr, Width * Channels)
381
394
  if channels == 3:
382
- mat = data.reshape((height, width, 3))
395
+ mat = pixel_data.reshape((height, width, 3))
383
396
  elif channels == 1:
384
- mat = data.reshape((height, width))
397
+ mat = pixel_data.reshape((height, width))
385
398
  elif channels == 4:
386
- mat = data.reshape((height, width, 4))
399
+ mat = pixel_data.reshape((height, width, 4))
387
400
  else:
388
401
  logger.error("Unsupported channel count: %d", channels)
389
402
  return None
@@ -393,41 +406,55 @@ class OneWayShmController(IController):
393
406
  # No caps available - try to infer from frame size
394
407
  logger.warning("No GstCaps available, attempting to infer from frame size")
395
408
 
396
- # Try common resolutions
397
- frame_size = len(frame.data)
409
+ # Frame data has 16-byte FrameMetadata prefix
410
+ if frame.size < FRAME_METADATA_SIZE:
411
+ logger.error(
412
+ "Frame too small for FrameMetadata: %d bytes (need at least %d)",
413
+ frame.size,
414
+ FRAME_METADATA_SIZE,
415
+ )
416
+ return None
417
+
418
+ # Calculate pixel data size (frame size minus 16-byte metadata prefix)
419
+ pixel_data_size = frame.size - FRAME_METADATA_SIZE
398
420
 
399
421
  # First, check if it's a perfect square (square frame)
400
422
  import math
401
423
 
402
- sqrt_size = math.sqrt(frame_size)
424
+ sqrt_size = math.sqrt(pixel_data_size)
403
425
  if sqrt_size == int(sqrt_size):
404
426
  # Perfect square - assume square grayscale image
405
427
  dimension = int(sqrt_size)
406
428
  logger.info(
407
- f"Frame size {frame_size} is a perfect square, assuming {dimension}x{dimension} grayscale"
429
+ f"Pixel data size {pixel_data_size} is a perfect square, "
430
+ f"assuming {dimension}x{dimension} grayscale"
408
431
  )
409
- data = np.frombuffer(frame.data, dtype=np.uint8)
410
- return data.reshape((dimension, dimension)) # type: ignore[no-any-return]
432
+ pixel_data = np.frombuffer(frame.data[FRAME_METADATA_SIZE:], dtype=np.uint8)
433
+ return pixel_data.reshape((dimension, dimension)) # type: ignore[no-any-return]
411
434
 
412
435
  # Also check for square RGB (size = width * height * 3)
413
- if frame_size % 3 == 0:
414
- pixels = frame_size // 3
436
+ if pixel_data_size % 3 == 0:
437
+ pixels = pixel_data_size // 3
415
438
  sqrt_pixels = math.sqrt(pixels)
416
439
  if sqrt_pixels == int(sqrt_pixels):
417
440
  dimension = int(sqrt_pixels)
418
- logger.info(f"Frame size {frame_size} suggests {dimension}x{dimension} RGB")
419
- data = np.frombuffer(frame.data, dtype=np.uint8)
420
- return data.reshape((dimension, dimension, 3)) # type: ignore[no-any-return]
441
+ logger.info(
442
+ f"Pixel data size {pixel_data_size} suggests {dimension}x{dimension} RGB"
443
+ )
444
+ pixel_data = np.frombuffer(frame.data[FRAME_METADATA_SIZE:], dtype=np.uint8)
445
+ return pixel_data.reshape((dimension, dimension, 3)) # type: ignore[no-any-return]
421
446
 
422
447
  # Check for square RGBA (size = width * height * 4)
423
- if frame_size % 4 == 0:
424
- pixels = frame_size // 4
448
+ if pixel_data_size % 4 == 0:
449
+ pixels = pixel_data_size // 4
425
450
  sqrt_pixels = math.sqrt(pixels)
426
451
  if sqrt_pixels == int(sqrt_pixels):
427
452
  dimension = int(sqrt_pixels)
428
- logger.info(f"Frame size {frame_size} suggests {dimension}x{dimension} RGBA")
429
- data = np.frombuffer(frame.data, dtype=np.uint8)
430
- return data.reshape((dimension, dimension, 4)) # type: ignore[no-any-return]
453
+ logger.info(
454
+ f"Pixel data size {pixel_data_size} suggests {dimension}x{dimension} RGBA"
455
+ )
456
+ pixel_data = np.frombuffer(frame.data[FRAME_METADATA_SIZE:], dtype=np.uint8)
457
+ return pixel_data.reshape((dimension, dimension, 4)) # type: ignore[no-any-return]
431
458
 
432
459
  common_resolutions = [
433
460
  (640, 480, 3), # VGA RGB
@@ -438,7 +465,7 @@ class OneWayShmController(IController):
438
465
  ]
439
466
 
440
467
  for width, height, channels in common_resolutions:
441
- if frame_size == width * height * channels:
468
+ if pixel_data_size == width * height * channels:
442
469
  logger.info(f"Inferred resolution: {width}x{height} with {channels} channels")
443
470
 
444
471
  # Create caps for future use
@@ -447,16 +474,16 @@ class OneWayShmController(IController):
447
474
  width=width, height=height, format=format_str
448
475
  )
449
476
 
450
- # Create Mat
451
- data = np.frombuffer(frame.data, dtype=np.uint8)
477
+ # Create Mat from pixel data (skip 16-byte FrameMetadata prefix)
478
+ pixel_data = np.frombuffer(frame.data[FRAME_METADATA_SIZE:], dtype=np.uint8)
452
479
  if channels == 3:
453
- return data.reshape((height, width, 3)) # type: ignore[no-any-return]
480
+ return pixel_data.reshape((height, width, 3)) # type: ignore[no-any-return]
454
481
  elif channels == 1:
455
- return data.reshape((height, width)) # type: ignore[no-any-return]
482
+ return pixel_data.reshape((height, width)) # type: ignore[no-any-return]
456
483
  elif channels == 4:
457
- return data.reshape((height, width, 4)) # type: ignore[no-any-return]
484
+ return pixel_data.reshape((height, width, 4)) # type: ignore[no-any-return]
458
485
 
459
- logger.error(f"Could not infer resolution for frame size {frame_size}")
486
+ logger.error(f"Could not infer resolution for pixel data size {pixel_data_size}")
460
487
  return None
461
488
 
462
489
  except Exception as e:
@@ -553,7 +580,7 @@ class DuplexShmController(IController):
553
580
  self._gst_caps: Optional[GstCaps] = None
554
581
  self._metadata: Optional[GstMetadata] = None
555
582
  self._is_running = False
556
- self._on_frame_callback: Optional[Callable[[Mat, Mat], None]] = None # type: ignore[valid-type]
583
+ self._on_frame_callback: Optional[Callable[[FrameMetadata, Mat, Mat], None]] = None # type: ignore[valid-type]
557
584
  self._frame_count = 0
558
585
 
559
586
  @property
@@ -567,14 +594,18 @@ class DuplexShmController(IController):
567
594
 
568
595
  def start(
569
596
  self,
570
- on_frame: Callable[[Mat, Mat], None], # type: ignore[override,valid-type]
597
+ on_frame: Callable[[FrameMetadata, Mat, Mat], None], # type: ignore[override,valid-type]
571
598
  cancellation_token: Optional[threading.Event] = None,
572
599
  ) -> None:
573
600
  """
574
- Start duplex frame processing.
601
+ Start duplex frame processing with FrameMetadata.
602
+
603
+ The callback receives FrameMetadata (frame number, timestamp, dimensions),
604
+ input Mat, and output Mat. The 24-byte metadata prefix is stripped from
605
+ the frame data before creating the input Mat.
575
606
 
576
607
  Args:
577
- on_frame: Callback that receives input frame and output frame to fill
608
+ on_frame: Callback that receives (FrameMetadata, input_mat, output_mat)
578
609
  cancellation_token: Optional cancellation token
579
610
  """
580
611
  if self._is_running:
@@ -590,7 +621,6 @@ class DuplexShmController(IController):
590
621
  )
591
622
 
592
623
  # Create duplex server using factory
593
- # Convert timeout from milliseconds to seconds for Python API
594
624
  if not self._connection.buffer_name:
595
625
  raise ValueError("Buffer name is required for shared memory connection")
596
626
  timeout_seconds = self._connection.timeout_ms / 1000.0
@@ -698,91 +728,98 @@ class DuplexShmController(IController):
698
728
 
699
729
  def _process_duplex_frame(self, request_frame: Frame, response_writer: Writer) -> None:
700
730
  """
701
- Process a frame in duplex mode.
731
+ Process a frame in duplex mode with FrameMetadata.
732
+
733
+ The frame data has a 24-byte FrameMetadata prefix that is stripped
734
+ before creating the input Mat.
702
735
 
703
736
  Args:
704
- request_frame: Input frame from the request
737
+ request_frame: Input frame from the request (with metadata prefix)
705
738
  response_writer: Writer for the response frame
706
739
  """
707
- logger.debug(
708
- "_process_duplex_frame called, frame_count=%d, has_gst_caps=%s",
709
- self._frame_count,
710
- self._gst_caps is not None,
711
- )
712
740
  try:
713
741
  if not self._on_frame_callback:
714
742
  logger.warning("No frame callback set")
715
743
  return
716
744
 
745
+ # Check frame size is sufficient for metadata
746
+ if request_frame.size < FRAME_METADATA_SIZE:
747
+ logger.warning("Frame too small for FrameMetadata: %d bytes", request_frame.size)
748
+ return
749
+
717
750
  self._frame_count += 1
718
751
 
719
- # Try to read metadata if we don't have it yet
720
- if (
721
- self._metadata is None
722
- and self._duplex_server
723
- and self._duplex_server.request_reader
724
- ):
725
- try:
726
- metadata_bytes = self._duplex_server.request_reader.get_metadata()
727
- if metadata_bytes:
728
- # Use helper method to parse metadata
729
- metadata = self._parse_metadata_json(metadata_bytes)
730
- if metadata:
731
- self._metadata = metadata
732
- self._gst_caps = metadata.caps
733
- logger.info(
734
- "Successfully read metadata from buffer '%s': %s",
735
- self._connection.buffer_name,
736
- self._gst_caps,
737
- )
738
- else:
739
- logger.debug("Failed to parse metadata in frame processing")
740
- except Exception as e:
741
- logger.debug("Failed to read metadata in frame processing: %s", e)
752
+ # Parse FrameMetadata from the beginning of the frame
753
+ frame_metadata = FrameMetadata.from_bytes(request_frame.data)
754
+
755
+ # Calculate pixel data offset and size
756
+ pixel_data_offset = FRAME_METADATA_SIZE
757
+ pixel_data_size = request_frame.size - FRAME_METADATA_SIZE
742
758
 
743
- # Convert input frame to Mat
744
- input_mat = self._frame_to_mat(request_frame)
745
- if input_mat is None:
746
- logger.error("Failed to convert frame to Mat, gst_caps=%s", self._gst_caps)
759
+ # GstCaps must be available for width/height/format
760
+ # (FrameMetadata no longer contains these - they're stream-level, not per-frame)
761
+ if not self._gst_caps:
762
+ logger.warning(
763
+ "GstCaps not available, skipping frame %d", frame_metadata.frame_number
764
+ )
747
765
  return
748
766
 
749
- # Get buffer for output frame - use context manager for RAII
750
- with response_writer.get_frame_buffer(request_frame.size) as output_buffer:
751
- # Create output Mat from buffer (zero-copy)
752
- if self._gst_caps:
753
- height = self._gst_caps.height or 480
754
- width = self._gst_caps.width or 640
767
+ width = self._gst_caps.width
768
+ height = self._gst_caps.height
769
+ format_str = self._gst_caps.format
770
+
771
+ # Determine channels from format
772
+ if format_str in ["RGB", "BGR"]:
773
+ channels = 3
774
+ elif format_str in ["RGBA", "BGRA", "ARGB", "ABGR"]:
775
+ channels = 4
776
+ elif format_str in ["GRAY8", "GRAY16_LE", "GRAY16_BE"]:
777
+ channels = 1
778
+ else:
779
+ channels = 3 # Default to RGB
780
+
781
+ # Create input Mat from pixel data (after metadata prefix)
782
+ pixel_data = np.frombuffer(request_frame.data[pixel_data_offset:], dtype=np.uint8)
783
+
784
+ expected_size = height * width * channels
785
+ if len(pixel_data) != expected_size:
786
+ logger.error(
787
+ "Pixel data size mismatch. Expected %d bytes for %dx%d with %d channels, got %d",
788
+ expected_size,
789
+ width,
790
+ height,
791
+ channels,
792
+ len(pixel_data),
793
+ )
794
+ return
755
795
 
756
- if self._gst_caps.format == "RGB" or self._gst_caps.format == "BGR":
757
- output_mat = np.frombuffer(output_buffer, dtype=np.uint8).reshape(
758
- (height, width, 3)
759
- )
760
- elif self._gst_caps.format == "GRAY8":
761
- output_mat = np.frombuffer(output_buffer, dtype=np.uint8).reshape(
762
- (height, width)
763
- )
764
- else:
765
- # Default to same shape as input
766
- output_mat = np.frombuffer(output_buffer, dtype=np.uint8).reshape(
767
- input_mat.shape
768
- )
796
+ # Reshape to image dimensions
797
+ if channels == 1:
798
+ input_mat = pixel_data.reshape((height, width))
799
+ else:
800
+ input_mat = pixel_data.reshape((height, width, channels))
801
+
802
+ # Response doesn't need metadata prefix - just pixel data
803
+ with response_writer.get_frame_buffer(pixel_data_size) as output_buffer:
804
+ # Create output Mat from buffer (zero-copy)
805
+ output_data = np.frombuffer(output_buffer, dtype=np.uint8)
806
+ if channels == 1:
807
+ output_mat = output_data.reshape((height, width))
769
808
  else:
770
- # Use same shape as input
771
- output_mat = np.frombuffer(output_buffer, dtype=np.uint8).reshape(
772
- input_mat.shape
773
- )
809
+ output_mat = output_data.reshape((height, width, channels))
774
810
 
775
- # Call user's processing function
776
- self._on_frame_callback(input_mat, output_mat)
811
+ # Call user's processing function with metadata
812
+ self._on_frame_callback(frame_metadata, input_mat, output_mat)
777
813
 
778
814
  # Commit the response frame after buffer is released
779
815
  response_writer.commit_frame()
780
816
 
781
817
  logger.debug(
782
- "Processed duplex frame %d (%dx%d)",
783
- self._frame_count,
784
- input_mat.shape[1],
785
- input_mat.shape[0],
818
+ "Processed duplex frame %d (%dx%d %s)",
819
+ frame_metadata.frame_number,
820
+ width,
821
+ height,
822
+ format_str,
786
823
  )
787
824
 
788
825
  except Exception as e:
@@ -0,0 +1,138 @@
1
+ """
2
+ Frame metadata structure prepended to each frame in zerobuffer shared memory.
3
+
4
+ This module provides the FrameMetadata dataclass that matches the C++ struct
5
+ defined in frame_metadata.h.
6
+
7
+ Protocol Layout (16 bytes, 8-byte aligned):
8
+ [0-7] frame_number - Sequential frame index (0-based)
9
+ [8-15] timestamp_ns - GStreamer PTS in nanoseconds (UINT64_MAX if unavailable)
10
+
11
+ Note: Width, height, and format are NOT included here because they are
12
+ stream-level properties that never change per-frame. They are stored once
13
+ in the ZeroBuffer metadata section as GstCaps (via GstMetadata).
14
+ This avoids redundant data and follows single-source-of-truth principle.
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import struct
20
+ from dataclasses import dataclass
21
+ from typing import ClassVar, Dict, Optional
22
+
23
+ # Size of the FrameMetadata structure in bytes
24
+ FRAME_METADATA_SIZE = 16
25
+
26
+ # Value indicating timestamp is unavailable
27
+ TIMESTAMP_UNAVAILABLE = 0xFFFFFFFFFFFFFFFF # UINT64_MAX
28
+
29
+ # Struct format: little-endian, 2 uint64
30
+ # Q = unsigned long long (8 bytes)
31
+ _FRAME_METADATA_FORMAT = "<QQ"
32
+
33
+
34
+ @dataclass(frozen=True)
35
+ class FrameMetadata:
36
+ """
37
+ Frame metadata prepended to each frame in zerobuffer shared memory.
38
+
39
+ Attributes:
40
+ frame_number: Sequential frame index (0-based, increments per frame)
41
+ timestamp_ns: GStreamer PTS in nanoseconds (TIMESTAMP_UNAVAILABLE if not set)
42
+
43
+ Note: Width, height, and format come from GstCaps in ZeroBuffer metadata section,
44
+ not from per-frame metadata. This avoids redundant data.
45
+ """
46
+
47
+ frame_number: int
48
+ timestamp_ns: int
49
+
50
+ @classmethod
51
+ def from_bytes(cls, data: bytes | memoryview) -> FrameMetadata:
52
+ """
53
+ Parse FrameMetadata from raw bytes.
54
+
55
+ Args:
56
+ data: At least 16 bytes of data
57
+
58
+ Returns:
59
+ FrameMetadata instance
60
+
61
+ Raises:
62
+ ValueError: If data is too short
63
+ """
64
+ if len(data) < FRAME_METADATA_SIZE:
65
+ raise ValueError(f"Data must be at least {FRAME_METADATA_SIZE} bytes, got {len(data)}")
66
+
67
+ # Unpack the struct
68
+ frame_number, timestamp_ns = struct.unpack(
69
+ _FRAME_METADATA_FORMAT, data[:FRAME_METADATA_SIZE]
70
+ )
71
+
72
+ return cls(
73
+ frame_number=frame_number,
74
+ timestamp_ns=timestamp_ns,
75
+ )
76
+
77
+ @property
78
+ def has_timestamp(self) -> bool:
79
+ """Check if timestamp is available."""
80
+ return self.timestamp_ns != TIMESTAMP_UNAVAILABLE
81
+
82
+ @property
83
+ def timestamp_ms(self) -> Optional[float]:
84
+ """Get timestamp in milliseconds, or None if unavailable."""
85
+ if self.has_timestamp:
86
+ return self.timestamp_ns / 1_000_000.0
87
+ return None
88
+
89
+ def __str__(self) -> str:
90
+ """Return string representation."""
91
+ timestamp = f"{self.timestamp_ns / 1_000_000.0:.3f}ms" if self.has_timestamp else "N/A"
92
+ return f"Frame {self.frame_number} @ {timestamp}"
93
+
94
+
95
+ # Common GstVideoFormat values - kept for reference when working with GstCaps
96
+ class GstVideoFormat:
97
+ """Common GStreamer video format values (for use with GstCaps)."""
98
+
99
+ UNKNOWN = 0
100
+ I420 = 2
101
+ YV12 = 3
102
+ YUY2 = 4
103
+ UYVY = 5
104
+ RGBA = 11
105
+ BGRA = 12
106
+ ARGB = 13
107
+ ABGR = 14
108
+ RGB = 15
109
+ BGR = 16
110
+ NV12 = 23
111
+ NV21 = 24
112
+ GRAY8 = 25
113
+ GRAY16_BE = 26
114
+ GRAY16_LE = 27
115
+
116
+ _FORMAT_NAMES: ClassVar[Dict[int, str]] = {
117
+ 0: "UNKNOWN",
118
+ 2: "I420",
119
+ 3: "YV12",
120
+ 4: "YUY2",
121
+ 5: "UYVY",
122
+ 11: "RGBA",
123
+ 12: "BGRA",
124
+ 13: "ARGB",
125
+ 14: "ABGR",
126
+ 15: "RGB",
127
+ 16: "BGR",
128
+ 23: "NV12",
129
+ 24: "NV21",
130
+ 25: "GRAY8",
131
+ 26: "GRAY16_BE",
132
+ 27: "GRAY16_LE",
133
+ }
134
+
135
+ @classmethod
136
+ def to_string(cls, format_value: int) -> str:
137
+ """Convert format value to string name."""
138
+ return cls._FORMAT_NAMES.get(format_value, f"FORMAT_{format_value}")
@@ -0,0 +1,52 @@
1
+ """
2
+ High-level API for RocketWelder SDK.
3
+
4
+ Mirrors C# RocketWelder.SDK API for consistent developer experience.
5
+
6
+ Example:
7
+ from rocket_welder_sdk.high_level import RocketWelderClient
8
+
9
+ with RocketWelderClient.from_environment() as client:
10
+ nose = client.keypoints.define_point("nose")
11
+ person = client.segmentation.define_class(1, "person")
12
+ client.start(process_frame)
13
+ """
14
+
15
+ from .client import RocketWelderClient, RocketWelderClientOptions
16
+ from .connection_strings import (
17
+ KeyPointsConnectionString,
18
+ SegmentationConnectionString,
19
+ VideoSourceConnectionString,
20
+ VideoSourceType,
21
+ )
22
+ from .data_context import (
23
+ IKeyPointsDataContext,
24
+ ISegmentationDataContext,
25
+ )
26
+ from .schema import (
27
+ IKeyPointsSchema,
28
+ ISegmentationSchema,
29
+ KeyPointDefinition,
30
+ SegmentClass,
31
+ )
32
+ from .transport_protocol import (
33
+ TransportKind,
34
+ TransportProtocol,
35
+ )
36
+
37
+ __all__ = [
38
+ "IKeyPointsDataContext",
39
+ "IKeyPointsSchema",
40
+ "ISegmentationDataContext",
41
+ "ISegmentationSchema",
42
+ "KeyPointDefinition",
43
+ "KeyPointsConnectionString",
44
+ "RocketWelderClient",
45
+ "RocketWelderClientOptions",
46
+ "SegmentClass",
47
+ "SegmentationConnectionString",
48
+ "TransportKind",
49
+ "TransportProtocol",
50
+ "VideoSourceConnectionString",
51
+ "VideoSourceType",
52
+ ]