dexcontrol 0.3.0__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dexcontrol might be problematic. Click here for more details.

Files changed (51) hide show
  1. dexcontrol/__init__.py +16 -7
  2. dexcontrol/apps/dualsense_teleop_base.py +1 -1
  3. dexcontrol/comm/__init__.py +51 -0
  4. dexcontrol/comm/base.py +421 -0
  5. dexcontrol/comm/rtc.py +400 -0
  6. dexcontrol/comm/subscribers.py +329 -0
  7. dexcontrol/config/sensors/cameras/__init__.py +1 -2
  8. dexcontrol/config/sensors/cameras/zed_camera.py +2 -2
  9. dexcontrol/config/sensors/vega_sensors.py +12 -18
  10. dexcontrol/core/arm.py +29 -25
  11. dexcontrol/core/chassis.py +3 -12
  12. dexcontrol/core/component.py +68 -43
  13. dexcontrol/core/hand.py +50 -52
  14. dexcontrol/core/head.py +14 -26
  15. dexcontrol/core/misc.py +188 -166
  16. dexcontrol/core/robot_query_interface.py +137 -114
  17. dexcontrol/core/torso.py +0 -4
  18. dexcontrol/robot.py +15 -37
  19. dexcontrol/sensors/__init__.py +1 -2
  20. dexcontrol/sensors/camera/__init__.py +0 -2
  21. dexcontrol/sensors/camera/base_camera.py +144 -0
  22. dexcontrol/sensors/camera/rgb_camera.py +67 -63
  23. dexcontrol/sensors/camera/zed_camera.py +89 -147
  24. dexcontrol/sensors/imu/chassis_imu.py +76 -56
  25. dexcontrol/sensors/imu/zed_imu.py +54 -43
  26. dexcontrol/sensors/lidar/rplidar.py +16 -20
  27. dexcontrol/sensors/manager.py +4 -11
  28. dexcontrol/sensors/ultrasonic.py +14 -27
  29. dexcontrol/utils/__init__.py +0 -11
  30. dexcontrol/utils/comm_helper.py +111 -0
  31. dexcontrol/utils/constants.py +1 -1
  32. dexcontrol/utils/os_utils.py +8 -22
  33. {dexcontrol-0.3.0.dist-info → dexcontrol-0.3.1.dist-info}/METADATA +2 -1
  34. dexcontrol-0.3.1.dist-info/RECORD +68 -0
  35. dexcontrol/config/sensors/cameras/luxonis_camera.py +0 -51
  36. dexcontrol/sensors/camera/luxonis_camera.py +0 -169
  37. dexcontrol/utils/rate_limiter.py +0 -172
  38. dexcontrol/utils/rtc_utils.py +0 -144
  39. dexcontrol/utils/subscribers/__init__.py +0 -52
  40. dexcontrol/utils/subscribers/base.py +0 -281
  41. dexcontrol/utils/subscribers/camera.py +0 -332
  42. dexcontrol/utils/subscribers/decoders.py +0 -88
  43. dexcontrol/utils/subscribers/generic.py +0 -110
  44. dexcontrol/utils/subscribers/imu.py +0 -175
  45. dexcontrol/utils/subscribers/lidar.py +0 -172
  46. dexcontrol/utils/subscribers/protobuf.py +0 -111
  47. dexcontrol/utils/subscribers/rtc.py +0 -316
  48. dexcontrol/utils/zenoh_utils.py +0 -369
  49. dexcontrol-0.3.0.dist-info/RECORD +0 -76
  50. {dexcontrol-0.3.0.dist-info → dexcontrol-0.3.1.dist-info}/WHEEL +0 -0
  51. {dexcontrol-0.3.0.dist-info → dexcontrol-0.3.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,316 +0,0 @@
1
- # Copyright (C) 2025 Dexmate Inc.
2
- #
3
- # This software is dual-licensed:
4
- #
5
- # 1. GNU Affero General Public License v3.0 (AGPL-3.0)
6
- # See LICENSE-AGPL for details
7
- #
8
- # 2. Commercial License
9
- # For commercial licensing terms, contact: contact@dexmate.ai
10
-
11
- import asyncio
12
- import json
13
- import threading
14
- import time
15
-
16
- import numpy as np
17
- import websockets
18
- from aiortc import RTCPeerConnection, RTCSessionDescription
19
- from loguru import logger
20
-
21
- # Try to import uvloop for better performance
22
- try:
23
- import uvloop # type: ignore[import-untyped]
24
-
25
- UVLOOP_AVAILABLE = True
26
- except ImportError:
27
- uvloop = None # type: ignore[assignment]
28
- UVLOOP_AVAILABLE = False
29
-
30
-
31
- class RTCSubscriber:
32
- """
33
- Subscriber for receiving video data via RTC.
34
-
35
- This class connects to a RTC peer through a signaling server,
36
- receives a video stream, and makes the latest frame available.
37
- """
38
-
39
- def __init__(
40
- self,
41
- url: str,
42
- name: str = "rtc_subscriber",
43
- enable_fps_tracking: bool = True,
44
- fps_log_interval: int = 100,
45
- ):
46
- """
47
- Initialize the RTC subscriber.
48
-
49
- Args:
50
- url: WebSocket URL of the signaling server.
51
- name: Name for logging purposes.
52
- enable_fps_tracking: Whether to track and log FPS metrics.
53
- fps_log_interval: Number of frames between FPS calculations.
54
- """
55
- self._url = url
56
- self._name = name
57
- self._pc = RTCPeerConnection()
58
- self._latest_frame: np.ndarray | None = None
59
- self._active = False
60
- self._data_lock = threading.Lock()
61
- self._stop_event = (
62
- threading.Event()
63
- ) # Use threading.Event for cross-thread communication
64
- self._async_stop_event = None # Will be created in the async context
65
- self._websocket = None # Store websocket reference for clean shutdown
66
-
67
- # FPS tracking
68
- self._enable_fps_tracking = enable_fps_tracking
69
- self._fps_log_interval = fps_log_interval
70
- self._frame_count = 0
71
- self._fps = 0.0
72
- self._last_fps_time = time.time()
73
-
74
- self._thread = threading.Thread(target=self._run_event_loop, daemon=True)
75
- self._thread.start()
76
-
77
- def _run_event_loop(self):
78
- """Run the asyncio event loop in a separate thread."""
79
- try:
80
- # Use uvloop if available for better performance
81
- if UVLOOP_AVAILABLE and uvloop is not None:
82
- # Create a new uvloop event loop for this thread
83
- loop = uvloop.new_event_loop()
84
- asyncio.set_event_loop(loop)
85
- logger.debug(f"Using uvloop for {self._name}")
86
-
87
- try:
88
- loop.run_until_complete(self._run())
89
- finally:
90
- loop.close()
91
- else:
92
- # Use default asyncio event loop
93
- asyncio.run(self._run())
94
-
95
- except Exception as e:
96
- logger.error(f"Event loop error for {self._name}: {e}")
97
- finally:
98
- with self._data_lock:
99
- self._active = False
100
-
101
- async def _run(self):
102
- """
103
- Connects to a RTC peer, receives video, and saves frames to disk.
104
- """
105
- # Create async stop event in the async context
106
- self._async_stop_event = asyncio.Event()
107
-
108
- # Start a task to monitor the threading stop event
109
- monitor_task = asyncio.create_task(self._monitor_stop_event())
110
-
111
- @self._pc.on("track")
112
- async def on_track(track):
113
- if track.kind == "video":
114
- while (
115
- self._async_stop_event is not None
116
- and not self._async_stop_event.is_set()
117
- ):
118
- try:
119
- frame = await asyncio.wait_for(
120
- track.recv(), timeout=1.0
121
- ) # Reduced timeout for faster shutdown response
122
- img = frame.to_ndarray(format="rgb24")
123
- with self._data_lock:
124
- self._latest_frame = img
125
- if not self._active:
126
- self._active = True
127
- self._update_fps_metrics()
128
- except asyncio.TimeoutError:
129
- # Check if we should stop before logging error
130
- if (
131
- self._async_stop_event is not None
132
- and not self._async_stop_event.is_set()
133
- ):
134
- logger.warning(
135
- f"Timeout: No frame received in 1 second from {self._url}"
136
- )
137
- continue
138
- except Exception as e:
139
- if (
140
- self._async_stop_event is not None
141
- and not self._async_stop_event.is_set()
142
- ):
143
- logger.error(f"Error receiving frame from {self._url}: {e}")
144
- break
145
-
146
- @self._pc.on("connectionstatechange")
147
- async def on_connectionstatechange():
148
- if self._pc.connectionState == "failed":
149
- logger.warning(f"RTC connection failed for {self._url}")
150
- await self._pc.close()
151
- if self._async_stop_event is not None:
152
- self._async_stop_event.set()
153
-
154
- try:
155
- async with websockets.connect(self._url) as websocket:
156
- self._websocket = websocket
157
-
158
- # Create an offer. The server's assertive codec control makes
159
- # client-side preferences redundant and potentially conflicting.
160
- self._pc.addTransceiver("video", direction="recvonly")
161
- offer = await self._pc.createOffer()
162
- await self._pc.setLocalDescription(offer)
163
-
164
- # Send the offer to the server
165
- await websocket.send(
166
- json.dumps(
167
- {
168
- "sdp": self._pc.localDescription.sdp,
169
- "type": self._pc.localDescription.type,
170
- }
171
- )
172
- )
173
-
174
- # Wait for the answer
175
- response = json.loads(await websocket.recv())
176
- if response["type"] == "answer":
177
- await self._pc.setRemoteDescription(
178
- RTCSessionDescription(
179
- sdp=response["sdp"], type=response["type"]
180
- )
181
- )
182
- else:
183
- logger.error(
184
- f"Received unexpected message type: {response['type']} from {self._url}"
185
- )
186
- if self._async_stop_event is not None:
187
- self._async_stop_event.set()
188
-
189
- # Wait until the stop event is set
190
- if self._async_stop_event is not None:
191
- await self._async_stop_event.wait()
192
-
193
- except websockets.exceptions.ConnectionClosed:
194
- logger.info(f"WebSocket connection closed for {self._url}")
195
- except Exception as e:
196
- if not self._async_stop_event.is_set():
197
- logger.error(f"Operation failed for {self._url}: {e}")
198
- finally:
199
- # Cancel the monitor task
200
- monitor_task.cancel()
201
- try:
202
- await monitor_task
203
- except asyncio.CancelledError:
204
- pass
205
-
206
- # Close websocket if still open
207
- if self._websocket:
208
- try:
209
- await self._websocket.close()
210
- except Exception as e:
211
- logger.debug(f"Error closing websocket for {self._url}: {e}")
212
-
213
- # Close peer connection if not already closed
214
- if self._pc.connectionState != "closed":
215
- try:
216
- await self._pc.close()
217
- except Exception as e:
218
- logger.debug(f"Error closing peer connection for {self._url}: {e}")
219
-
220
- with self._data_lock:
221
- self._active = False
222
-
223
- async def _monitor_stop_event(self):
224
- """Monitor the threading stop event and set the async stop event when needed."""
225
- while not self._stop_event.is_set():
226
- await asyncio.sleep(0.1) # Check every 100ms
227
- if self._async_stop_event is not None:
228
- self._async_stop_event.set()
229
-
230
- def _update_fps_metrics(self) -> None:
231
- """Update FPS tracking metrics.
232
-
233
- Increments frame counter and recalculates FPS at specified intervals.
234
- Only has an effect if fps_tracking was enabled during initialization.
235
- """
236
- if not self._enable_fps_tracking:
237
- return
238
-
239
- self._frame_count += 1
240
- if self._frame_count >= self._fps_log_interval:
241
- current_time = time.time()
242
- elapsed = current_time - self._last_fps_time
243
- self._fps = self._frame_count / elapsed
244
- logger.info(f"{self._name} frequency: {self._fps:.2f} Hz")
245
- self._frame_count = 0
246
- self._last_fps_time = current_time
247
-
248
- def get_latest_data(self) -> np.ndarray | None:
249
- """
250
- Get the latest video frame.
251
-
252
- Returns:
253
- Latest video frame as a numpy array (HxWxC RGB) if available, None otherwise.
254
- """
255
- with self._data_lock:
256
- return self._latest_frame.copy() if self._latest_frame is not None else None
257
-
258
- def is_active(self) -> bool:
259
- """Check if the subscriber is actively receiving data."""
260
- with self._data_lock:
261
- return self._active
262
-
263
- def wait_for_active(self, timeout: float = 5.0) -> bool:
264
- """
265
- Wait for the subscriber to start receiving data.
266
-
267
- Args:
268
- timeout: Maximum time to wait in seconds.
269
-
270
- Returns:
271
- True if subscriber becomes active, False if timeout is reached.
272
- """
273
- start_time = time.time()
274
- while not self.is_active():
275
- if time.time() - start_time > timeout:
276
- logger.error(
277
- f"No data received from {self._name} at {self._url} after {timeout}s"
278
- )
279
- return False
280
- time.sleep(0.1)
281
- return True
282
-
283
- def shutdown(self):
284
- """Stop the subscriber and release resources."""
285
-
286
- # Signal the async loop to stop
287
- self._stop_event.set()
288
-
289
- # Wait for the thread to finish with a reasonable timeout
290
- if self._thread.is_alive():
291
- self._thread.join(
292
- timeout=10.0
293
- ) # Increased timeout for more graceful shutdown
294
-
295
- if self._thread.is_alive():
296
- logger.warning(
297
- f"{self._name} thread did not shut down gracefully within timeout."
298
- )
299
-
300
- # Ensure active state is set to False
301
- with self._data_lock:
302
- self._active = False
303
-
304
- @property
305
- def name(self) -> str:
306
- """Get the subscriber name."""
307
- return self._name
308
-
309
- @property
310
- def fps(self) -> float:
311
- """Get the current FPS measurement.
312
-
313
- Returns:
314
- Current frames per second measurement.
315
- """
316
- return self._fps
@@ -1,369 +0,0 @@
1
- # Copyright (C) 2025 Dexmate Inc.
2
- #
3
- # This software is dual-licensed:
4
- #
5
- # 1. GNU Affero General Public License v3.0 (AGPL-3.0)
6
- # See LICENSE-AGPL for details
7
- #
8
- # 2. Commercial License
9
- # For commercial licensing terms, contact: contact@dexmate.ai
10
-
11
- """Zenoh utilities for dexcontrol.
12
-
13
- This module provides comprehensive utility functions for working with Zenoh
14
- communication framework, including session management, configuration loading,
15
- JSON queries, and statistics computation.
16
- """
17
-
18
- import gc
19
- import json
20
- import threading
21
- import time
22
- from pathlib import Path
23
- from typing import TYPE_CHECKING
24
-
25
- import numpy as np
26
- import zenoh
27
- from loguru import logger
28
- from omegaconf import DictConfig, OmegaConf
29
-
30
- import dexcontrol
31
- from dexcontrol.config.vega import get_vega_config
32
- from dexcontrol.utils.os_utils import resolve_key_name
33
-
34
- if TYPE_CHECKING:
35
- from dexcontrol.config.vega import VegaConfig
36
-
37
-
38
- # =============================================================================
39
- # Session Management Functions
40
- # =============================================================================
41
-
42
-
43
- def get_default_zenoh_config() -> str | None:
44
- """Gets the default zenoh configuration file path.
45
-
46
- Returns:
47
- Path to default config file if it exists, None otherwise.
48
- """
49
- default_path = dexcontrol.COMM_CFG_PATH
50
- if not default_path.exists():
51
- logger.warning(f"Zenoh config file not found at {default_path}")
52
- logger.warning("Please use dextop to set up the zenoh config file")
53
- return None
54
- return str(default_path)
55
-
56
-
57
- def create_zenoh_session(zenoh_config_file: str | None = None) -> zenoh.Session:
58
- """Creates and initializes a Zenoh communication session.
59
-
60
- Args:
61
- zenoh_config_file: Path to zenoh configuration file. If None,
62
- uses the default configuration path.
63
-
64
- Returns:
65
- Initialized zenoh session.
66
-
67
- Raises:
68
- RuntimeError: If zenoh session initialization fails.
69
- """
70
- try:
71
- config_path = zenoh_config_file or get_default_zenoh_config()
72
- if config_path is None:
73
- logger.warning("Using default zenoh config settings")
74
- return zenoh.open(zenoh.Config())
75
- return zenoh.open(zenoh.Config.from_file(config_path))
76
- except Exception as e:
77
- raise RuntimeError(f"Failed to initialize zenoh session: {e}") from e
78
-
79
-
80
- def load_robot_config(
81
- robot_config_path: str | None = None,
82
- ) -> "VegaConfig":
83
- """Load robot configuration from file or use default variant.
84
-
85
- Args:
86
- robot_config_path: Path to robot configuration file. If None,
87
- uses default configuration for detected robot model.
88
-
89
- Returns:
90
- Robot configuration as OmegaConf object.
91
-
92
- Raises:
93
- ValueError: If configuration cannot be loaded or parsed.
94
- """
95
- try:
96
- if robot_config_path is not None:
97
- # Load custom configuration from file
98
- config_path = Path(robot_config_path)
99
- if not config_path.exists():
100
- raise ValueError(f"Configuration file not found: {config_path}")
101
-
102
- # Load YAML configuration and merge with default
103
- base_config = DictConfig, get_vega_config()
104
- custom_config = OmegaConf.load(config_path)
105
- return OmegaConf.merge(base_config, custom_config)
106
- else:
107
- # Use default configuration for detected robot model
108
- try:
109
- return get_vega_config()
110
- except ValueError as e:
111
- # If robot model detection fails, use default vega-1 config
112
- if "Robot name is not set" in str(e):
113
- logger.warning(
114
- "Robot model not detected, using default vega-1 configuration"
115
- )
116
- return get_vega_config("vega-1")
117
- raise
118
-
119
- except Exception as e:
120
- raise ValueError(f"Failed to load robot configuration: {e}") from e
121
-
122
-
123
- def create_standalone_robot_interface(
124
- zenoh_config_file: str | None = None,
125
- robot_config_path: str | None = None,
126
- ) -> tuple[zenoh.Session, "VegaConfig"]:
127
- """Create standalone zenoh session and robot configuration.
128
-
129
- This function provides a convenient way to create both a zenoh session
130
- and robot configuration for use with RobotQueryInterface without
131
- requiring the full Robot class initialization.
132
-
133
- Args:
134
- zenoh_config_file: Path to zenoh configuration file. If None,
135
- uses the default configuration path.
136
- robot_config_path: Path to robot configuration file. If None,
137
- uses default configuration for detected robot model.
138
-
139
- Returns:
140
- Tuple of (zenoh_session, robot_config) ready for use with
141
- RobotQueryInterface.
142
-
143
- Raises:
144
- RuntimeError: If zenoh session initialization fails.
145
- ValueError: If robot configuration cannot be loaded.
146
-
147
- Example:
148
- >>> session, config = create_standalone_robot_interface()
149
- >>> query_interface = RobotQueryInterface(session, config)
150
- >>> version_info = query_interface.get_version_info()
151
- >>> session.close()
152
- """
153
- # Create zenoh session
154
- session = create_zenoh_session(zenoh_config_file)
155
-
156
- # Load robot configuration
157
- config = load_robot_config(robot_config_path)
158
-
159
- return session, config
160
-
161
-
162
- # =============================================================================
163
- # Query and Communication Functions
164
- # =============================================================================
165
- def query_zenoh_json(
166
- zenoh_session: zenoh.Session,
167
- topic: str,
168
- timeout: float = 2.0,
169
- max_retries: int = 1,
170
- retry_delay: float = 0.5,
171
- ) -> dict | None:
172
- """Query Zenoh for JSON information with retry logic.
173
-
174
- Args:
175
- zenoh_session: Active Zenoh session for communication.
176
- topic: Zenoh topic to query.
177
- timeout: Maximum time to wait for a response in seconds.
178
- max_retries: Maximum number of retry attempts.
179
- retry_delay: Initial delay between retries (doubles each retry).
180
-
181
- Returns:
182
- Dictionary containing the parsed JSON response if successful, None otherwise.
183
- """
184
- resolved_topic = resolve_key_name(topic)
185
- logger.debug(f"Querying Zenoh topic: {resolved_topic}")
186
-
187
- for attempt in range(max_retries + 1):
188
- try:
189
- # Add delay before retry (except first attempt)
190
- if attempt > 0:
191
- delay = retry_delay * (2 ** (attempt - 1)) # Exponential backoff
192
- logger.debug(f"Retry {attempt}/{max_retries} after {delay}s delay...")
193
- time.sleep(delay)
194
-
195
- # Try to get the info
196
- for reply in zenoh_session.get(resolved_topic, timeout=timeout):
197
- if reply.ok:
198
- response = json.loads(reply.ok.payload.to_bytes())
199
- return response
200
- else:
201
- # No valid reply received
202
- if attempt < max_retries:
203
- logger.debug(f"No reply on attempt {attempt + 1}, will retry...")
204
- else:
205
- logger.error(
206
- f"No valid reply received on topic '{resolved_topic}' after {max_retries + 1} attempts."
207
- )
208
-
209
- except StopIteration:
210
- if attempt < max_retries:
211
- logger.debug(f"Query timed out on attempt {attempt + 1}, will retry...")
212
- else:
213
- logger.error(f"Query timed out after {max_retries + 1} attempts.")
214
- except Exception as e:
215
- if attempt < max_retries:
216
- logger.debug(
217
- f"Query failed on attempt {attempt + 1}: {e}, will retry..."
218
- )
219
- else:
220
- logger.error(f"Query failed after {max_retries + 1} attempts: {e}")
221
-
222
- return None
223
-
224
-
225
- # =============================================================================
226
- # Cleanup and Exit Handling Functions
227
- # =============================================================================
228
- def close_zenoh_session_with_timeout(
229
- session: zenoh.Session, timeout: float = 2.0
230
- ) -> tuple[bool, Exception | None]:
231
- """Close a Zenoh session with timeout handling.
232
-
233
- This function attempts to close a Zenoh session gracefully with a timeout.
234
- If the close operation takes too long, it returns with a timeout indication.
235
-
236
- Args:
237
- session: The Zenoh session to close.
238
- timeout: Maximum time to wait for session close (default 2.0 seconds).
239
-
240
- Returns:
241
- Tuple of (success, exception):
242
- - success: True if session closed successfully, False otherwise
243
- - exception: Any exception that occurred during close, or None
244
- """
245
-
246
- close_success = False
247
- close_exception = None
248
-
249
- def _close_session():
250
- """Inner function to close the session."""
251
- nonlocal close_success, close_exception
252
- try:
253
- session.close()
254
- close_success = True
255
- except Exception as e: # pylint: disable=broad-except
256
- close_exception = e
257
- logger.debug(f"Zenoh session close attempt failed: {e}")
258
- # Try to trigger garbage collection as fallback
259
- try:
260
- gc.collect()
261
- except Exception: # pylint: disable=broad-except
262
- pass
263
-
264
- # Try to close zenoh session with timeout
265
- close_thread = threading.Thread(target=_close_session, daemon=True)
266
- close_thread.start()
267
-
268
- # Use progressive timeout strategy
269
- timeouts = [timeout / 2, timeout / 2] # Split timeout into two attempts
270
- for i, wait_time in enumerate(timeouts):
271
- close_thread.join(timeout=wait_time)
272
- if not close_thread.is_alive():
273
- break
274
-
275
- if close_thread.is_alive():
276
- return False, Exception("Close operation timed out")
277
- elif close_success:
278
- return True, None
279
- else:
280
- logger.debug(f"Zenoh session closed with error: {close_exception}")
281
- return False, close_exception
282
-
283
-
284
- def wait_for_zenoh_cleanup(cleanup_delays: list[float] | None = None) -> list[str]:
285
- """Wait for Zenoh internal threads to clean up.
286
-
287
- This function waits for Zenoh's internal pyo3 threads to clean up after
288
- session closure, using progressive delays to balance responsiveness and
289
- thoroughness.
290
-
291
- Args:
292
- cleanup_delays: List of delays in seconds to wait between checks.
293
- Defaults to [0.1, 0.2, 0.3] if not provided.
294
-
295
- Returns:
296
- List of thread names that are still active after cleanup attempts.
297
- """
298
- if cleanup_delays is None:
299
- cleanup_delays = [0.1, 0.2, 0.3] # Progressive delays totaling 0.6s
300
-
301
- for delay in cleanup_delays:
302
- time.sleep(delay)
303
- # Check if threads are still active
304
- active_threads = get_active_zenoh_threads()
305
- if not active_threads:
306
- return []
307
-
308
- # Return any remaining threads
309
- lingering_threads = get_active_zenoh_threads()
310
- if lingering_threads:
311
- logger.debug(
312
- f"Note: {len(lingering_threads)} Zenoh internal thread(s) still active. "
313
- "These typically clean up after script exit."
314
- )
315
- return lingering_threads
316
-
317
-
318
- def get_active_zenoh_threads() -> list[str]:
319
- """Get list of active Zenoh (pyo3) threads.
320
-
321
- Returns:
322
- List of thread names that are pyo3-related and still active.
323
- """
324
- return [
325
- t.name
326
- for t in threading.enumerate()
327
- if "pyo3" in t.name and t.is_alive() and not t.daemon
328
- ]
329
-
330
-
331
- # =============================================================================
332
- # Statistics and Analysis Functions
333
- # =============================================================================
334
- def compute_ntp_stats(offsets: list[float], rtts: list[float]) -> dict[str, float]:
335
- """Compute NTP statistics, removing outliers based on RTT median and std.
336
-
337
- Args:
338
- offsets: List of offset values (seconds).
339
- rtts: List of round-trip time values (seconds).
340
-
341
- Returns:
342
- Dictionary with computed statistics (mean, std, min, max, sample_count) for offset and rtt.
343
- """
344
- offsets_np = np.array(offsets)
345
- rtts_np = np.array(rtts)
346
- if len(rtts_np) < 3:
347
- mask = np.ones_like(rtts_np, dtype=bool)
348
- else:
349
- median = np.median(rtts_np)
350
- std = np.std(rtts_np)
351
- mask = np.abs(rtts_np - median) <= 2 * std
352
- offsets_filtered = offsets_np[mask]
353
- rtts_filtered = rtts_np[mask]
354
-
355
- def safe_stat(arr, func):
356
- return float(func(arr)) if len(arr) > 0 else 0.0
357
-
358
- stats = {
359
- "offset (mean)": safe_stat(offsets_filtered, np.mean),
360
- "offset (std)": safe_stat(offsets_filtered, np.std),
361
- "offset (min)": safe_stat(offsets_filtered, np.min),
362
- "offset (max)": safe_stat(offsets_filtered, np.max),
363
- "round_trip_time (mean)": safe_stat(rtts_filtered, np.mean),
364
- "round_trip_time (std)": safe_stat(rtts_filtered, np.std),
365
- "round_trip_time (min)": safe_stat(rtts_filtered, np.min),
366
- "round_trip_time (max)": safe_stat(rtts_filtered, np.max),
367
- "sample_count": int(len(offsets_filtered)),
368
- }
369
- return stats