petal-user-journey-coordinator 0.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- petal_user_journey_coordinator/__init__.py +18 -0
- petal_user_journey_coordinator/controllers.py +3406 -0
- petal_user_journey_coordinator/data_model.py +556 -0
- petal_user_journey_coordinator/plugin.py +2167 -0
- petal_user_journey_coordinator-0.1.5.dist-info/METADATA +87 -0
- petal_user_journey_coordinator-0.1.5.dist-info/RECORD +8 -0
- petal_user_journey_coordinator-0.1.5.dist-info/WHEEL +4 -0
- petal_user_journey_coordinator-0.1.5.dist-info/entry_points.txt +7 -0
|
@@ -0,0 +1,2167 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Main plugin module for petal-user-journey-coordinator
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import math
|
|
7
|
+
import numpy as np
|
|
8
|
+
from typing import Dict, Any, List, Union, Optional, Callable
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
import threading
|
|
11
|
+
from enum import Enum
|
|
12
|
+
import time
|
|
13
|
+
|
|
14
|
+
from . import logger
|
|
15
|
+
from petal_app_manager.plugins.base import Petal
|
|
16
|
+
from petal_app_manager.plugins.decorators import http_action, websocket_action
|
|
17
|
+
from petal_app_manager.proxies import (
|
|
18
|
+
MQTTProxy,
|
|
19
|
+
MavLinkExternalProxy,
|
|
20
|
+
LocalDBProxy,
|
|
21
|
+
RedisProxy
|
|
22
|
+
)
|
|
23
|
+
from petal_app_manager import Config
|
|
24
|
+
|
|
25
|
+
import json, math
|
|
26
|
+
from pymavlink import mavutil
|
|
27
|
+
from pymavlink.dialects.v20 import all as mavlink_dialect
|
|
28
|
+
|
|
29
|
+
from pydantic import ValidationError
|
|
30
|
+
from fastapi import HTTPException
|
|
31
|
+
import asyncio
|
|
32
|
+
|
|
33
|
+
from .controllers import (
|
|
34
|
+
# Parameter controllers
|
|
35
|
+
BaseParameterHandler,
|
|
36
|
+
RotorCountHandler,
|
|
37
|
+
GPSModuleHandler,
|
|
38
|
+
DistanceModuleHandler,
|
|
39
|
+
OpticalFlowModuleHandler,
|
|
40
|
+
GPSSpatialOffsetHandler,
|
|
41
|
+
DistanceSpatialOffsetHandler,
|
|
42
|
+
OpticalFlowSpatialOffsetHandler,
|
|
43
|
+
ESCCalibrationLimitsHandler,
|
|
44
|
+
KillSwitchConfigHandler,
|
|
45
|
+
|
|
46
|
+
# Timeout controllers
|
|
47
|
+
BaseTimeoutController,
|
|
48
|
+
ESCCalibrationController,
|
|
49
|
+
ESCForceRunAllController,
|
|
50
|
+
ESCForceRunSingleController,
|
|
51
|
+
BasePubSubController,
|
|
52
|
+
RCChannelsController,
|
|
53
|
+
|
|
54
|
+
# Pubsub controllers
|
|
55
|
+
BasePubSubController,
|
|
56
|
+
RCChannelsController,
|
|
57
|
+
PositionChannelsController,
|
|
58
|
+
KillSwitchController,
|
|
59
|
+
MultiFunctionalSwitchAController,
|
|
60
|
+
MultiFunctionalSwitchBController,
|
|
61
|
+
|
|
62
|
+
# Custom controllers
|
|
63
|
+
TrajectoryVerificationController,
|
|
64
|
+
WifiOptitrackConnectivityController
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
from .data_model import (
|
|
68
|
+
# generic payloads
|
|
69
|
+
MQTTMessage,
|
|
70
|
+
SubscribePayload,
|
|
71
|
+
UnsubscribePayload,
|
|
72
|
+
|
|
73
|
+
# Timeout payloads
|
|
74
|
+
ESCCalibrationPayload,
|
|
75
|
+
ESCCalibrationLimitsPayload,
|
|
76
|
+
ESCForceRunAllPayload,
|
|
77
|
+
ESCForceRunSinglePayload,
|
|
78
|
+
VerifyPosYawDirectionsPayload,
|
|
79
|
+
ConnectToWifiAndVerifyOptitrackPayload,
|
|
80
|
+
WifiOptitrackConnectionResponse,
|
|
81
|
+
SetStaticIpAddressPayload,
|
|
82
|
+
SetStaticIpAddressResponse,
|
|
83
|
+
|
|
84
|
+
# HTTP models
|
|
85
|
+
ParameterRequestModel,
|
|
86
|
+
ParameterBaseModel,
|
|
87
|
+
ParameterResponseModel,
|
|
88
|
+
MavlinkParameterResponseModel,
|
|
89
|
+
MavlinkParametersResponseModel,
|
|
90
|
+
RotorCountParameter,
|
|
91
|
+
DistanceModulePayload,
|
|
92
|
+
OpticalFlowModulePayload
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
class OperationMode(Enum):
|
|
97
|
+
"""Enumeration of operation modes"""
|
|
98
|
+
ESC_CALIBRATION = "esc_calibration"
|
|
99
|
+
ESC_FORCE_RUN_ALL = "esc_force_run_all"
|
|
100
|
+
ESC_FORCE_RUN_SINGLE = "esc_force_run_single"
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def _json_safe(o):
|
|
104
|
+
# Recursively replace NaN/±Inf with None and coerce numpy scalars to py types
|
|
105
|
+
try:
|
|
106
|
+
import numpy as np # optional
|
|
107
|
+
np_types = (np.floating, np.integer)
|
|
108
|
+
except Exception:
|
|
109
|
+
np_types = tuple()
|
|
110
|
+
|
|
111
|
+
if isinstance(o, float):
|
|
112
|
+
return o if math.isfinite(o) else None
|
|
113
|
+
if np_types and isinstance(o, np_types):
|
|
114
|
+
py = o.item()
|
|
115
|
+
return py if not (isinstance(py, float) and not math.isfinite(py)) else None
|
|
116
|
+
if isinstance(o, dict):
|
|
117
|
+
return {k: _json_safe(v) for k, v in o.items()}
|
|
118
|
+
if isinstance(o, (list, tuple, set)):
|
|
119
|
+
return [_json_safe(v) for v in o]
|
|
120
|
+
return o
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
class PetalUserJourneyCoordinator(Petal):
|
|
124
|
+
"""
|
|
125
|
+
Main petal class for petal-user-journey-coordinator.
|
|
126
|
+
|
|
127
|
+
This petal demonstrates the basic structure and includes a health endpoint
|
|
128
|
+
that reports proxy requirements and status.
|
|
129
|
+
"""
|
|
130
|
+
|
|
131
|
+
name = "petal-user-journey-coordinator"
|
|
132
|
+
version = "0.1.0"
|
|
133
|
+
use_mqtt_proxy = True # Enable MQTT-aware startup
|
|
134
|
+
|
|
135
|
+
def __init__(self):
|
|
136
|
+
super().__init__()
|
|
137
|
+
self._status_message = "Petal initialized successfully"
|
|
138
|
+
self._startup_time = None
|
|
139
|
+
self._mavlink_proxy = None
|
|
140
|
+
self._mqtt_proxy = None
|
|
141
|
+
self._loop = None # Will be set when async context is available
|
|
142
|
+
|
|
143
|
+
# Timeout controller instances
|
|
144
|
+
self._operation_controllers: Dict[OperationMode, BaseTimeoutController] = {}
|
|
145
|
+
self._parameter_handlers: Dict[str, BaseParameterHandler] = {}
|
|
146
|
+
self._pubsub_controllers: Dict[str, BasePubSubController] = {}
|
|
147
|
+
self._active_controllers: Dict[OperationMode, BaseTimeoutController] = {
|
|
148
|
+
mode: None for mode in OperationMode
|
|
149
|
+
}
|
|
150
|
+
self._controller_locks = {mode: threading.Lock() for mode in OperationMode}
|
|
151
|
+
self._trajectory_verification = None # Initialize later in startup()
|
|
152
|
+
|
|
153
|
+
# Active subscription tracking
|
|
154
|
+
self._active_handlers: Dict[str, Dict[str, Any]] = {} # stream_name -> subscription_info
|
|
155
|
+
self._registration_lock = threading.Lock()
|
|
156
|
+
|
|
157
|
+
# Trajectory verification configuration
|
|
158
|
+
self.trajectory_collection_rate_hz: Optional[float] = None # None = match pose controller rate
|
|
159
|
+
|
|
160
|
+
# Trajectory verification parameters
|
|
161
|
+
self.rectangle_a = 3.0 # width in meters
|
|
162
|
+
self.rectangle_b = 3.0 # height in meters
|
|
163
|
+
self.points_per_edge = 10 # Number of interpolated points per edge (including start point, excluding end point)
|
|
164
|
+
self.corner_exclusion_radius = 1 # meters
|
|
165
|
+
|
|
166
|
+
def startup(self) -> None:
|
|
167
|
+
"""Called when the petal is started."""
|
|
168
|
+
super().startup()
|
|
169
|
+
self._startup_time = datetime.now()
|
|
170
|
+
self._status_message = f"Petal started at {self._startup_time.isoformat()}"
|
|
171
|
+
logger.info(f"{self.name} petal started successfully")
|
|
172
|
+
|
|
173
|
+
# Store proxy references (after inject_proxies has been called)
|
|
174
|
+
self._mqtt_proxy: MQTTProxy = self._proxies["mqtt"]
|
|
175
|
+
self._mavlink_proxy: MavLinkExternalProxy = self._proxies["ext_mavlink"]
|
|
176
|
+
|
|
177
|
+
# Initialize trajectory verification controller
|
|
178
|
+
self._trajectory_verification = TrajectoryVerificationController(
|
|
179
|
+
mqtt_proxy=self._mqtt_proxy,
|
|
180
|
+
logger=logger,
|
|
181
|
+
rectangle_a=self.rectangle_a,
|
|
182
|
+
rectangle_b=self.rectangle_b,
|
|
183
|
+
points_per_edge=self.points_per_edge,
|
|
184
|
+
corner_exclusion_radius=self.corner_exclusion_radius
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
# Initialize WiFi OptiTrack connectivity controller
|
|
188
|
+
self._wifi_optitrack_controller = WifiOptitrackConnectivityController(self._mqtt_proxy, logger)
|
|
189
|
+
|
|
190
|
+
# Initialize operation controllers
|
|
191
|
+
self._operation_controllers = {
|
|
192
|
+
OperationMode.ESC_CALIBRATION: ESCCalibrationController(self._mavlink_proxy, logger),
|
|
193
|
+
OperationMode.ESC_FORCE_RUN_ALL: ESCForceRunAllController(self._mavlink_proxy, logger),
|
|
194
|
+
OperationMode.ESC_FORCE_RUN_SINGLE: ESCForceRunSingleController(self._mavlink_proxy, logger)
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
# Initialize parameter configuration handlers
|
|
198
|
+
self._parameter_handlers = {
|
|
199
|
+
"geometry": RotorCountHandler(self._mavlink_proxy, logger),
|
|
200
|
+
"gps_module": GPSModuleHandler(self._mavlink_proxy, logger),
|
|
201
|
+
"dist_module": DistanceModuleHandler(self._mavlink_proxy, logger),
|
|
202
|
+
"oflow_module": OpticalFlowModuleHandler(self._mavlink_proxy, logger),
|
|
203
|
+
"gps_spatial_offset": GPSSpatialOffsetHandler(self._mavlink_proxy, logger),
|
|
204
|
+
"distance_spatial_offset": DistanceSpatialOffsetHandler(self._mavlink_proxy, logger),
|
|
205
|
+
"optical_flow_spatial_offset": OpticalFlowSpatialOffsetHandler(self._mavlink_proxy, logger),
|
|
206
|
+
"esc_update_calibration_limits": ESCCalibrationLimitsHandler(self._mavlink_proxy, logger)
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
# Pub/sub controllers will be initialized in async_startup once topic_base is available
|
|
210
|
+
self._pubsub_controllers = None
|
|
211
|
+
|
|
212
|
+
# Create parameter message handlers dynamically
|
|
213
|
+
parameter_configs = {
|
|
214
|
+
"geometry": "rotor count",
|
|
215
|
+
"gps_module": "GPS module",
|
|
216
|
+
"dist_module": "distance module",
|
|
217
|
+
"oflow_module": "optical flow module",
|
|
218
|
+
"gps_spatial_offset": "GPS spatial offset",
|
|
219
|
+
"distance_spatial_offset": "distance spatial offset",
|
|
220
|
+
"optical_flow_spatial_offset": "optical flow spatial offset",
|
|
221
|
+
"esc_update_calibration_limits": "ESC calibration limits"
|
|
222
|
+
}
|
|
223
|
+
|
|
224
|
+
# Dynamically create parameter message handlers
|
|
225
|
+
for handler_key, config_type in parameter_configs.items():
|
|
226
|
+
handler_method_name = f"_{handler_key}_message_handler"
|
|
227
|
+
handler_method = self._create_parameter_message_handler(handler_key, config_type)
|
|
228
|
+
setattr(self, handler_method_name, handler_method)
|
|
229
|
+
|
|
230
|
+
# Pub/Sub controller configurations
|
|
231
|
+
pubsub_configs = {
|
|
232
|
+
"rc_value_stream": "RC value stream",
|
|
233
|
+
"pose_value_stream": "real-time pose stream",
|
|
234
|
+
"ks_status_stream": "kill switch status stream",
|
|
235
|
+
"mfs_a_status_stream": "multi-functional switch A stream",
|
|
236
|
+
"mfs_b_status_stream": "multi-functional switch B stream"
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
# Dynamically create pub/sub message handlers
|
|
240
|
+
for controller_key, stream_name in pubsub_configs.items():
|
|
241
|
+
subscribe_handler, unsubscribe_handler = self._create_pubsub_message_handlers(controller_key, stream_name)
|
|
242
|
+
setattr(self, f"_subscribe_{controller_key}_handler", subscribe_handler)
|
|
243
|
+
setattr(self, f"_unsubscribe_{controller_key}_handler", unsubscribe_handler)
|
|
244
|
+
|
|
245
|
+
# Topic configuration and command handlers will be set up in async_startup
|
|
246
|
+
# once organization ID is available
|
|
247
|
+
self._command_handlers = None
|
|
248
|
+
self.mqtt_subscription_id = None
|
|
249
|
+
|
|
250
|
+
def set_trajectory_collection_rate(self, rate_hz: Optional[float]) -> None:
|
|
251
|
+
"""
|
|
252
|
+
Set the trajectory data collection rate.
|
|
253
|
+
|
|
254
|
+
Args:
|
|
255
|
+
rate_hz: Collection rate in Hz. If None, will match pose controller rate.
|
|
256
|
+
If pose controller rate cannot be determined, falls back to 10 Hz.
|
|
257
|
+
"""
|
|
258
|
+
if rate_hz is not None and rate_hz <= 0:
|
|
259
|
+
raise ValueError("Collection rate must be positive")
|
|
260
|
+
|
|
261
|
+
self.trajectory_collection_rate_hz = rate_hz
|
|
262
|
+
logger.info(f"Trajectory collection rate set to: {'auto (match pose controller)' if rate_hz is None else f'{rate_hz} Hz'}")
|
|
263
|
+
|
|
264
|
+
def _track_subscription(self, stream_name: str, stream_id: str, rate_hz: float) -> None:
|
|
265
|
+
"""Track an active subscription for management purposes."""
|
|
266
|
+
with self._registration_lock:
|
|
267
|
+
self._active_handlers[stream_name] = {
|
|
268
|
+
"stream_id": stream_id,
|
|
269
|
+
"rate_hz": rate_hz,
|
|
270
|
+
"started_at": datetime.now().isoformat(),
|
|
271
|
+
"controller": self._pubsub_controllers.get(stream_name)
|
|
272
|
+
}
|
|
273
|
+
logger.info(f"Tracking subscription for {stream_name} (ID: {stream_id}, Rate: {rate_hz} Hz)")
|
|
274
|
+
|
|
275
|
+
def _untrack_subscription(self, stream_name: str) -> None:
|
|
276
|
+
"""Stop tracking a subscription."""
|
|
277
|
+
with self._registration_lock:
|
|
278
|
+
if stream_name in self._active_handlers:
|
|
279
|
+
del self._active_handlers[stream_name]
|
|
280
|
+
logger.info(f"Stopped tracking subscription for {stream_name}")
|
|
281
|
+
|
|
282
|
+
def get_active_handlers(self) -> Dict[str, Dict[str, Any]]:
|
|
283
|
+
"""Get a copy of all active handlers."""
|
|
284
|
+
with self._registration_lock:
|
|
285
|
+
return dict(self._active_handlers)
|
|
286
|
+
|
|
287
|
+
async def unsubscribe_all_streams(self) -> Dict[str, Any]:
|
|
288
|
+
"""
|
|
289
|
+
Unsubscribe from all active pubsub streams.
|
|
290
|
+
|
|
291
|
+
Returns:
|
|
292
|
+
Dict containing the results of the unsubscribe operations
|
|
293
|
+
"""
|
|
294
|
+
logger.info("Starting unsubscribe all streams operation...")
|
|
295
|
+
|
|
296
|
+
# Get list of active handlers
|
|
297
|
+
active_handlers = self.get_active_handlers()
|
|
298
|
+
|
|
299
|
+
if not active_handlers:
|
|
300
|
+
logger.info("No active handlers to unregister")
|
|
301
|
+
return {
|
|
302
|
+
"status": "success",
|
|
303
|
+
"message": "No active handlers to unregister",
|
|
304
|
+
"unsubscribed_streams": [],
|
|
305
|
+
"timestamp": datetime.now().isoformat()
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
unsubscribed_streams = []
|
|
309
|
+
failed_streams = []
|
|
310
|
+
|
|
311
|
+
# Stop each active handler
|
|
312
|
+
for stream_name, handler_info in active_handlers.items():
|
|
313
|
+
try:
|
|
314
|
+
logger.info(f"Stopping stream: {stream_name}")
|
|
315
|
+
controller = handler_info.get("controller")
|
|
316
|
+
|
|
317
|
+
if controller and hasattr(controller, 'stop_streaming'):
|
|
318
|
+
await controller.stop_streaming()
|
|
319
|
+
self._untrack_subscription(stream_name)
|
|
320
|
+
unsubscribed_streams.append({
|
|
321
|
+
"stream_name": stream_name,
|
|
322
|
+
"stream_id": handler_info.get("stream_id"),
|
|
323
|
+
"was_rate_hz": handler_info.get("rate_hz")
|
|
324
|
+
})
|
|
325
|
+
logger.info(f"Successfully stopped stream: {stream_name}")
|
|
326
|
+
else:
|
|
327
|
+
logger.warning(f"No valid controller found for stream: {stream_name}")
|
|
328
|
+
failed_streams.append(stream_name)
|
|
329
|
+
|
|
330
|
+
except Exception as e:
|
|
331
|
+
logger.error(f"Failed to stop stream {stream_name}: {e}")
|
|
332
|
+
failed_streams.append(stream_name)
|
|
333
|
+
|
|
334
|
+
# Build response
|
|
335
|
+
result = {
|
|
336
|
+
"status": "success" if not failed_streams else "partial_success",
|
|
337
|
+
"message": f"Unsubscribed from {len(unsubscribed_streams)} streams",
|
|
338
|
+
"unsubscribed_streams": unsubscribed_streams,
|
|
339
|
+
"timestamp": datetime.now().isoformat()
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
if failed_streams:
|
|
343
|
+
result["failed_streams"] = failed_streams
|
|
344
|
+
result["message"] += f", {len(failed_streams)} failed"
|
|
345
|
+
|
|
346
|
+
logger.info(f"Unsubscribe all operation completed: {result['message']}")
|
|
347
|
+
return result
|
|
348
|
+
|
|
349
|
+
async def async_startup(self) -> None:
|
|
350
|
+
"""
|
|
351
|
+
Called after startup to handle async operations like MQTT subscriptions.
|
|
352
|
+
|
|
353
|
+
Note: The MQTT-aware startup logic (organization ID monitoring, event loop setup)
|
|
354
|
+
is handled by the main application's _mqtt_aware_petal_startup function.
|
|
355
|
+
This method will be called by that function after organization ID is available.
|
|
356
|
+
"""
|
|
357
|
+
# This method is intentionally simple - the main app handles:
|
|
358
|
+
# 1. Setting self._loop
|
|
359
|
+
# 2. Waiting for organization ID
|
|
360
|
+
# 3. Calling self._setup_mqtt_topics() when ready
|
|
361
|
+
# 4. Starting organization ID monitoring if needed
|
|
362
|
+
|
|
363
|
+
logger.info("User Journey Coordinator Petal async_startup completed (MQTT setup handled by main app)")
|
|
364
|
+
pass
|
|
365
|
+
|
|
366
|
+
async def _setup_mqtt_topics(self):
|
|
367
|
+
"""Set up MQTT topics and controllers once organization ID is available."""
|
|
368
|
+
|
|
369
|
+
# Initialize pub/sub controllers now that topic_base is available
|
|
370
|
+
self._pubsub_controllers: Dict[str, BasePubSubController] = {
|
|
371
|
+
"rc_value_stream": RCChannelsController(self._mqtt_proxy, self._mavlink_proxy, logger),
|
|
372
|
+
"pose_value_stream": PositionChannelsController(
|
|
373
|
+
mqtt_proxy=self._mqtt_proxy,
|
|
374
|
+
mavlink_proxy=self._mavlink_proxy,
|
|
375
|
+
logger=logger,
|
|
376
|
+
rectangle_a=self.rectangle_a,
|
|
377
|
+
rectangle_b=self.rectangle_b,
|
|
378
|
+
points_per_edge=self.points_per_edge,
|
|
379
|
+
corner_exclusion_radius=self.corner_exclusion_radius,
|
|
380
|
+
max_matching_distance=self._trajectory_verification.max_matching_distance,
|
|
381
|
+
corner_points=self._trajectory_verification.corner_points, # Pass corner points here
|
|
382
|
+
reference_trajectory=self._trajectory_verification.reference_trajectory # Pass reference trajectory here
|
|
383
|
+
),
|
|
384
|
+
"ks_status_stream": KillSwitchController(self._mqtt_proxy, self._mavlink_proxy, logger),
|
|
385
|
+
"mfs_a_status_stream": MultiFunctionalSwitchAController(self._mqtt_proxy, self._mavlink_proxy, logger),
|
|
386
|
+
"mfs_b_status_stream": MultiFunctionalSwitchBController(self._mqtt_proxy, self._mavlink_proxy, logger)
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
# Initialize command handlers registry
|
|
390
|
+
self._command_handlers = self._setup_command_handlers()
|
|
391
|
+
|
|
392
|
+
# Single topic subscription - the master handler will dispatch based on command
|
|
393
|
+
self.mqtt_subscription_id = self._mqtt_proxy.register_handler(self._master_command_handler)
|
|
394
|
+
if self.mqtt_subscription_id is None:
|
|
395
|
+
logger.error("Failed to register MQTT handler for Flight Log Petal")
|
|
396
|
+
return
|
|
397
|
+
|
|
398
|
+
logger.info(f"Subscribed to MQTT topics successfully with subscription ID: {self.mqtt_subscription_id}")
|
|
399
|
+
|
|
400
|
+
def _setup_command_handlers(self) -> Dict[str, Callable]:
|
|
401
|
+
"""Setup the command handlers registry mapping command names to handler methods."""
|
|
402
|
+
return {
|
|
403
|
+
# Test commands
|
|
404
|
+
# "Update": self._test_esc_calibration_message_handler,
|
|
405
|
+
# "Update": self._test_geometry_message_handler,
|
|
406
|
+
# "Update": self._test_subscribe_rc_value_stream_handler,
|
|
407
|
+
# "Update": self._test_subscribe_real_time_pose_handler,
|
|
408
|
+
# "Update": self._test_kill_switch_stream_handler,
|
|
409
|
+
# "Update": self._test_mfs_a_stream_handler,
|
|
410
|
+
# "Update": self._test_mfs_b_stream_handler,
|
|
411
|
+
# "Update": self._test_verify_pos_yaw_directions_handler,
|
|
412
|
+
# "Update": self._test_connect_to_wifi_and_verify_optitrack_handler,
|
|
413
|
+
# "Update": self._test_set_static_ip_address_handler,
|
|
414
|
+
# "Update": self._test_unregister_all_handlers,
|
|
415
|
+
|
|
416
|
+
# Timeout operation commands
|
|
417
|
+
"petal-user-journey-coordinator/esc_calibration": self._esc_calibration_message_handler,
|
|
418
|
+
"petal-user-journey-coordinator/esc_force_run_all": self._esc_force_run_all_message_handler,
|
|
419
|
+
"petal-user-journey-coordinator/esc_force_run_single": self._esc_force_run_single_message_handler,
|
|
420
|
+
|
|
421
|
+
# Parameter configuration commands
|
|
422
|
+
"petal-user-journey-coordinator/geometry": self._geometry_message_handler,
|
|
423
|
+
"petal-user-journey-coordinator/gps_module": self._gps_module_message_handler,
|
|
424
|
+
"petal-user-journey-coordinator/dist_module": self._dist_module_message_handler,
|
|
425
|
+
"petal-user-journey-coordinator/oflow_module": self._oflow_module_message_handler,
|
|
426
|
+
"petal-user-journey-coordinator/gps_spatial_offset": self._gps_spatial_offset_message_handler,
|
|
427
|
+
"petal-user-journey-coordinator/distance_spatial_offset": self._distance_spatial_offset_message_handler,
|
|
428
|
+
"petal-user-journey-coordinator/optical_flow_spatial_offset": self._optical_flow_spatial_offset_message_handler,
|
|
429
|
+
"petal-user-journey-coordinator/esc_update_calibration_limits": self._esc_update_calibration_limits_message_handler,
|
|
430
|
+
|
|
431
|
+
# Pub/Sub stream commands
|
|
432
|
+
"petal-user-journey-coordinator/subscribe_rc_value_stream": self._subscribe_rc_value_stream_handler,
|
|
433
|
+
"petal-user-journey-coordinator/unsubscribe_rc_value_stream": self._unsubscribe_rc_value_stream_handler,
|
|
434
|
+
"petal-user-journey-coordinator/subscribe_pose_value_stream": self._subscribe_pose_value_stream_handler,
|
|
435
|
+
"petal-user-journey-coordinator/unsubscribe_pose_value_stream": self._unsubscribe_pose_value_stream_handler,
|
|
436
|
+
"petal-user-journey-coordinator/subscribe_ks_status_stream": self._subscribe_ks_status_stream_handler,
|
|
437
|
+
"petal-user-journey-coordinator/unsubscribe_ks_status_stream": self._unsubscribe_ks_status_stream_handler,
|
|
438
|
+
"petal-user-journey-coordinator/subscribe_mfs_a_status_stream": self._subscribe_mfs_a_status_stream_handler,
|
|
439
|
+
"petal-user-journey-coordinator/unsubscribe_mfs_a_status_stream": self._unsubscribe_mfs_a_status_stream_handler,
|
|
440
|
+
"petal-user-journey-coordinator/subscribe_mfs_b_status_stream": self._subscribe_mfs_b_status_stream_handler,
|
|
441
|
+
"petal-user-journey-coordinator/unsubscribe_mfs_b_status_stream": self._unsubscribe_mfs_b_status_stream_handler,
|
|
442
|
+
"petal-user-journey-coordinator/unsubscribeall": self._unregister_all_handlers,
|
|
443
|
+
|
|
444
|
+
# Trajectory verification commands
|
|
445
|
+
"petal-user-journey-coordinator/verify_pos_yaw_directions": self._verify_pos_yaw_directions_handler,
|
|
446
|
+
"petal-user-journey-coordinator/verify_pos_yaw_directions_complete": self._verify_pos_yaw_directions_complete_handler,
|
|
447
|
+
|
|
448
|
+
# WiFi OptiTrack connectivity commands
|
|
449
|
+
"petal-user-journey-coordinator/connect_to_wifi_and_verify_optitrack": self._connect_to_wifi_and_verify_optitrack_handler,
|
|
450
|
+
"petal-user-journey-coordinator/set_static_ip_address": self._set_static_ip_address_handler,
|
|
451
|
+
}
|
|
452
|
+
|
|
453
|
+
async def _master_command_handler(self, topic: str, message: Dict[str, Any]):
|
|
454
|
+
"""
|
|
455
|
+
Master command handler that dispatches to specific handlers based on command field.
|
|
456
|
+
|
|
457
|
+
Args:
|
|
458
|
+
topic: MQTT topic (should be command/edge for all commands)
|
|
459
|
+
message: MQTT message containing command and payload
|
|
460
|
+
"""
|
|
461
|
+
try:
|
|
462
|
+
# Check if command handlers are initialized
|
|
463
|
+
if self._command_handlers is None:
|
|
464
|
+
error_msg = "Petal not fully initialized yet, command handlers not available"
|
|
465
|
+
logger.warning(error_msg)
|
|
466
|
+
return
|
|
467
|
+
|
|
468
|
+
# Parse the MQTT message
|
|
469
|
+
mqtt_msg = MQTTMessage(**message)
|
|
470
|
+
command = mqtt_msg.command
|
|
471
|
+
|
|
472
|
+
logger.info(f"Master handler received command: {command}")
|
|
473
|
+
|
|
474
|
+
# Dispatch to appropriate handler
|
|
475
|
+
if command in self._command_handlers:
|
|
476
|
+
handler = self._command_handlers[command]
|
|
477
|
+
await handler(topic, message)
|
|
478
|
+
else:
|
|
479
|
+
error_msg = f"Unknown command: {command}"
|
|
480
|
+
logger.error(error_msg)
|
|
481
|
+
|
|
482
|
+
if mqtt_msg.waitResponse:
|
|
483
|
+
await self._mqtt_proxy.send_command_response(
|
|
484
|
+
message_id=mqtt_msg.messageId,
|
|
485
|
+
response_data={
|
|
486
|
+
"status": "error",
|
|
487
|
+
"message": error_msg,
|
|
488
|
+
"error_code": "UNKNOWN_COMMAND",
|
|
489
|
+
"available_commands": list(self._command_handlers.keys())
|
|
490
|
+
}
|
|
491
|
+
)
|
|
492
|
+
|
|
493
|
+
except ValidationError as ve:
|
|
494
|
+
error_msg = f"Invalid MQTT message format: {ve}"
|
|
495
|
+
logger.error(error_msg)
|
|
496
|
+
try:
|
|
497
|
+
message_id = message.get("messageId", "unknown")
|
|
498
|
+
wait_response = message.get("waitResponse", False)
|
|
499
|
+
if wait_response:
|
|
500
|
+
await self._mqtt_proxy.send_command_response(
|
|
501
|
+
message_id=message_id,
|
|
502
|
+
response_data={"status": "error", "message": error_msg, "error_code": "VALIDATION_ERROR"}
|
|
503
|
+
)
|
|
504
|
+
except Exception as e:
|
|
505
|
+
logger.error(f"Failed to send error response: {e}")
|
|
506
|
+
|
|
507
|
+
except Exception as e:
|
|
508
|
+
error_msg = f"Master command handler error: {str(e)}"
|
|
509
|
+
logger.error(error_msg)
|
|
510
|
+
try:
|
|
511
|
+
message_id = message.get("messageId", "unknown")
|
|
512
|
+
wait_response = message.get("waitResponse", False)
|
|
513
|
+
if wait_response:
|
|
514
|
+
await self._mqtt_proxy.send_command_response(
|
|
515
|
+
message_id=message_id,
|
|
516
|
+
response_data={"status": "error", "message": error_msg, "error_code": "HANDLER_ERROR"}
|
|
517
|
+
)
|
|
518
|
+
except Exception as e:
|
|
519
|
+
logger.error(f"Failed to send error response: {e}")
|
|
520
|
+
|
|
521
|
+
async def _test_esc_calibration_message_handler(self, topic: str, message: Dict[str, Any]):
|
|
522
|
+
"""Test handler for ESC calibration with enhanced workflow."""
|
|
523
|
+
# allow only one call
|
|
524
|
+
if getattr(self, "_test_esc_calibration_called", False):
|
|
525
|
+
logger.warning("ESC calibration test has already been called.")
|
|
526
|
+
return
|
|
527
|
+
self._test_esc_calibration_called = True
|
|
528
|
+
|
|
529
|
+
# Test Step 1: Initialize and configure ESC calibration
|
|
530
|
+
test_payload = {
|
|
531
|
+
"is_calibration_started": False,
|
|
532
|
+
"safety_timeout_s": 3.0,
|
|
533
|
+
"force_cancel_calibration": False,
|
|
534
|
+
"esc_interface_signal_type": "PWM",
|
|
535
|
+
"ca_rotor_count": 4,
|
|
536
|
+
"throttle": None # Just configure first
|
|
537
|
+
}
|
|
538
|
+
message["payload"] = test_payload
|
|
539
|
+
logger.info("🔧 Step 1: Configuring ESC calibration...")
|
|
540
|
+
await self._esc_calibration_message_handler(topic, message)
|
|
541
|
+
|
|
542
|
+
# Wait a moment for configuration
|
|
543
|
+
await asyncio.sleep(2)
|
|
544
|
+
# Wait 5 seconds (simulate user powering up drone)
|
|
545
|
+
logger.info("⏳ Waiting 5 seconds (simulate drone power-up)...")
|
|
546
|
+
|
|
547
|
+
# Test Step 2: Send maximum throttle
|
|
548
|
+
t_start = time.time()
|
|
549
|
+
logger.info("⚡ Step 2: Sending MAXIMUM throttle (ESC calibration high point)")
|
|
550
|
+
while True:
|
|
551
|
+
# Test Step 2: Send maximum throttle
|
|
552
|
+
test_payload["throttle"] = "maximum"
|
|
553
|
+
test_payload["is_calibration_started"] = True
|
|
554
|
+
message["payload"] = test_payload
|
|
555
|
+
await self._esc_calibration_message_handler(topic, message)
|
|
556
|
+
await asyncio.sleep(0.1)
|
|
557
|
+
|
|
558
|
+
if time.time() - t_start > 10:
|
|
559
|
+
break
|
|
560
|
+
|
|
561
|
+
# Wait a moment for configuration to simulate a timeout
|
|
562
|
+
await asyncio.sleep(10)
|
|
563
|
+
|
|
564
|
+
# Test Step 3: Send minimum throttle
|
|
565
|
+
t_start = time.time()
|
|
566
|
+
while True:
|
|
567
|
+
# Test Step 2: Send maximum throttle
|
|
568
|
+
test_payload["throttle"] = "minimum"
|
|
569
|
+
test_payload["is_calibration_started"] = True
|
|
570
|
+
message["payload"] = test_payload
|
|
571
|
+
logger.info("⬇️ Step 3: Sending MINIMUM throttle (ESC calibration low point)")
|
|
572
|
+
await self._esc_calibration_message_handler(topic, message)
|
|
573
|
+
asyncio.sleep(0.1)
|
|
574
|
+
|
|
575
|
+
if time.time() - t_start > 5:
|
|
576
|
+
break
|
|
577
|
+
|
|
578
|
+
# Test Step 4: Stop motors (using force_cancel_calibration)
|
|
579
|
+
test_payload["force_cancel_calibration"] = True
|
|
580
|
+
message["payload"] = test_payload
|
|
581
|
+
logger.info("🛑 Step 4: Stopping all motors safely")
|
|
582
|
+
await self._esc_calibration_message_handler(topic, message)
|
|
583
|
+
|
|
584
|
+
logger.info("✅ ESC calibration test sequence completed!")
|
|
585
|
+
|
|
586
|
+
async def _test_geometry_message_handler(self, topic: str, message: Dict[str, Any]):
|
|
587
|
+
# intercept payload
|
|
588
|
+
test_payload = {
|
|
589
|
+
"rotor_count": 4,
|
|
590
|
+
}
|
|
591
|
+
message["payload"] = test_payload
|
|
592
|
+
# Use the dynamically created handler directly
|
|
593
|
+
await self._rotor_count_message_handler(topic, message)
|
|
594
|
+
|
|
595
|
+
async def _test_dist_module_message_handler(self, topic: str, message: Dict[str, Any]):
|
|
596
|
+
"""Test handler for distance module configuration."""
|
|
597
|
+
# Test with LiDAR Lite v3
|
|
598
|
+
test_payload = {
|
|
599
|
+
"dist_module": "LiDAR Lite v3"
|
|
600
|
+
}
|
|
601
|
+
message["payload"] = test_payload
|
|
602
|
+
await self._dist_module_message_handler(topic, message)
|
|
603
|
+
|
|
604
|
+
async def _test_oflow_module_message_handler(self, topic: str, message: Dict[str, Any]):
|
|
605
|
+
"""Test handler for optical flow module configuration."""
|
|
606
|
+
# Test with ARK Flow
|
|
607
|
+
test_payload = {
|
|
608
|
+
"oflow_module": "ARK Flow"
|
|
609
|
+
}
|
|
610
|
+
message["payload"] = test_payload
|
|
611
|
+
await self._oflow_module_message_handler(topic, message)
|
|
612
|
+
|
|
613
|
+
async def _test_subscribe_rc_value_stream_handler(self, topic: str, message: Dict[str, Any]):
|
|
614
|
+
# intercept payload
|
|
615
|
+
test_payload = {
|
|
616
|
+
"subscribed_stream_id": "px4_rc_raw",
|
|
617
|
+
"data_rate_hz": 50.0
|
|
618
|
+
}
|
|
619
|
+
message["payload"] = test_payload
|
|
620
|
+
# Use the dynamically created handler directly
|
|
621
|
+
await self._subscribe_rc_value_stream_handler(topic, message)
|
|
622
|
+
|
|
623
|
+
# unsubscribe after 10 seconds
|
|
624
|
+
await asyncio.sleep(100)
|
|
625
|
+
|
|
626
|
+
test_payload = {
|
|
627
|
+
"unsubscribed_stream_id": "px4_rc_raw"
|
|
628
|
+
}
|
|
629
|
+
message["payload"] = test_payload
|
|
630
|
+
|
|
631
|
+
await self._unsubscribe_rc_value_stream_handler(topic, message)
|
|
632
|
+
|
|
633
|
+
async def _test_subscribe_real_time_pose_handler(self, topic: str, message: Dict[str, Any]):
|
|
634
|
+
# intercept payload
|
|
635
|
+
test_payload = {
|
|
636
|
+
"subscribed_stream_id": "real_time_pose",
|
|
637
|
+
"data_rate_hz": 20.0
|
|
638
|
+
}
|
|
639
|
+
message["payload"] = test_payload
|
|
640
|
+
# Use the dynamically created handler directly
|
|
641
|
+
await self._subscribe_real_time_pose_handler(topic, message)
|
|
642
|
+
|
|
643
|
+
# unsubscribe after 15 seconds
|
|
644
|
+
await asyncio.sleep(15)
|
|
645
|
+
|
|
646
|
+
test_payload = {
|
|
647
|
+
"unsubscribed_stream_id": "real_time_pose"
|
|
648
|
+
}
|
|
649
|
+
message["payload"] = test_payload
|
|
650
|
+
|
|
651
|
+
await self._unsubscribe_real_time_pose_handler(topic, message)
|
|
652
|
+
|
|
653
|
+
async def _test_kill_switch_stream_handler(self, topic: str, message: Dict[str, Any]):
|
|
654
|
+
"""Test handler for kill switch stream."""
|
|
655
|
+
logger.info("Running kill switch stream test")
|
|
656
|
+
|
|
657
|
+
# Subscribe to kill switch stream
|
|
658
|
+
test_payload = {
|
|
659
|
+
"subscribed_stream_id": "px4_ks_status",
|
|
660
|
+
"data_rate_hz": 5.0 # 5 Hz for kill switch monitoring
|
|
661
|
+
}
|
|
662
|
+
message["payload"] = test_payload
|
|
663
|
+
await self._subscribe_ks_status_stream_handler(topic, message)
|
|
664
|
+
|
|
665
|
+
# Let it run for 30 seconds to monitor kill switch changess
|
|
666
|
+
logger.info("Monitoring kill switch for 30 seconds...")
|
|
667
|
+
await asyncio.sleep(3000)
|
|
668
|
+
|
|
669
|
+
# Unsubscribe from kill switch stream
|
|
670
|
+
test_payload = {
|
|
671
|
+
"unsubscribed_stream_id": "px4_ks_status"
|
|
672
|
+
}
|
|
673
|
+
message["payload"] = test_payload
|
|
674
|
+
await self._unsubscribe_ks_status_stream_handler(topic, message)
|
|
675
|
+
|
|
676
|
+
logger.info("Kill switch stream test completed")
|
|
677
|
+
|
|
678
|
+
async def _test_mfs_a_stream_handler(self, topic: str, message: Dict[str, Any]):
|
|
679
|
+
"""Test handler for Multi-functional Switch A stream."""
|
|
680
|
+
logger.info("Running Multi-functional Switch A stream test")
|
|
681
|
+
|
|
682
|
+
# Subscribe to MFS A stream
|
|
683
|
+
test_payload = {
|
|
684
|
+
"subscribed_stream_id": "px4_mfs_a_raw",
|
|
685
|
+
"data_rate_hz": 10.0 # 10 Hz for MFS A monitoring
|
|
686
|
+
}
|
|
687
|
+
message["payload"] = test_payload
|
|
688
|
+
await self._subscribe_mfs_a_value_stream_handler(topic, message)
|
|
689
|
+
|
|
690
|
+
# Let it run for 20 seconds to monitor MFS A changes
|
|
691
|
+
logger.info("Monitoring Multi-functional Switch A for 20 seconds...")
|
|
692
|
+
await asyncio.sleep(20)
|
|
693
|
+
|
|
694
|
+
# Unsubscribe from MFS A stream
|
|
695
|
+
test_payload = {
|
|
696
|
+
"unsubscribed_stream_id": "px4_mfs_a_raw"
|
|
697
|
+
}
|
|
698
|
+
message["payload"] = test_payload
|
|
699
|
+
await self._unsubscribe_mfs_a_value_stream_handler(topic, message)
|
|
700
|
+
|
|
701
|
+
logger.info("Multi-functional Switch A stream test completed")
|
|
702
|
+
|
|
703
|
+
async def _test_mfs_b_stream_handler(self, topic: str, message: Dict[str, Any]):
|
|
704
|
+
"""Test handler for Multi-functional Switch B stream."""
|
|
705
|
+
logger.info("Running Multi-functional Switch B stream test")
|
|
706
|
+
|
|
707
|
+
# Subscribe to MFS B stream
|
|
708
|
+
test_payload = {
|
|
709
|
+
"subscribed_stream_id": "px4_mfs_b_raw",
|
|
710
|
+
"data_rate_hz": 10.0 # 10 Hz for MFS B monitoring
|
|
711
|
+
}
|
|
712
|
+
message["payload"] = test_payload
|
|
713
|
+
await self._subscribe_mfs_b_value_stream_handler(topic, message)
|
|
714
|
+
|
|
715
|
+
# Let it run for 20 seconds to monitor MFS B changes
|
|
716
|
+
logger.info("Monitoring Multi-functional Switch B for 20 seconds...")
|
|
717
|
+
await asyncio.sleep(20)
|
|
718
|
+
|
|
719
|
+
# Unsubscribe from MFS B stream
|
|
720
|
+
test_payload = {
|
|
721
|
+
"unsubscribed_stream_id": "px4_mfs_b_raw"
|
|
722
|
+
}
|
|
723
|
+
message["payload"] = test_payload
|
|
724
|
+
await self._unsubscribe_mfs_b_value_stream_handler(topic, message)
|
|
725
|
+
|
|
726
|
+
logger.info("Multi-functional Switch B stream test completed")
|
|
727
|
+
|
|
728
|
+
async def _test_verify_pos_yaw_directions_handler(self, topic: str, message: Dict[str, Any]):
|
|
729
|
+
"""Test handler for trajectory verification with the new command structure."""
|
|
730
|
+
|
|
731
|
+
if getattr(self, "_test_verification_trajectory", False):
|
|
732
|
+
logger.warning("Trajectory verification test has already been called.")
|
|
733
|
+
return
|
|
734
|
+
self._test_verification_trajectory = True
|
|
735
|
+
|
|
736
|
+
logger.info("Running trajectory verification test with new command structure")
|
|
737
|
+
|
|
738
|
+
# Configure trajectory collection rate (optional - demonstrates the feature)
|
|
739
|
+
self.set_trajectory_collection_rate(15.0) # 15 Hz collection rate
|
|
740
|
+
|
|
741
|
+
# First, simulate user subscribing to pose data (as they would on page load)
|
|
742
|
+
# Create new message with proper command structure
|
|
743
|
+
pose_subscribe_message = {
|
|
744
|
+
"waitResponse": True,
|
|
745
|
+
"messageId": f"test-pose-subscribe-{datetime.now().timestamp()}",
|
|
746
|
+
"deviceId": message.get("deviceId", "test-device"),
|
|
747
|
+
"command": "petal-user-journey-coordinator/subscribe_pose_value_stream",
|
|
748
|
+
"timestamp": datetime.now().isoformat(),
|
|
749
|
+
"payload": {
|
|
750
|
+
"subscribed_stream_id": "real_time_pose",
|
|
751
|
+
"data_rate_hz": 10.0
|
|
752
|
+
}
|
|
753
|
+
}
|
|
754
|
+
await self._master_command_handler(topic, pose_subscribe_message)
|
|
755
|
+
|
|
756
|
+
# Wait a moment for streaming to start
|
|
757
|
+
await asyncio.sleep(2)
|
|
758
|
+
|
|
759
|
+
# Now start verification (which will check that pose stream is active)
|
|
760
|
+
verify_start_message = {
|
|
761
|
+
"waitResponse": True,
|
|
762
|
+
"messageId": f"test-verify-start-{datetime.now().timestamp()}",
|
|
763
|
+
"deviceId": message.get("deviceId", "test-device"),
|
|
764
|
+
"command": "petal-user-journey-coordinator/verify_pos_yaw_directions",
|
|
765
|
+
"timestamp": datetime.now().isoformat(),
|
|
766
|
+
"payload": {
|
|
767
|
+
"start": True
|
|
768
|
+
}
|
|
769
|
+
}
|
|
770
|
+
await self._master_command_handler(topic, verify_start_message)
|
|
771
|
+
|
|
772
|
+
# Wait for 30 seconds to collect trajectory data
|
|
773
|
+
logger.info("Collecting trajectory data for 30 seconds...")
|
|
774
|
+
await asyncio.sleep(30)
|
|
775
|
+
|
|
776
|
+
# Complete verification
|
|
777
|
+
verify_complete_message = {
|
|
778
|
+
"waitResponse": True,
|
|
779
|
+
"messageId": f"test-verify-complete-{datetime.now().timestamp()}",
|
|
780
|
+
"deviceId": message.get("deviceId", "test-device"),
|
|
781
|
+
"command": "petal-user-journey-coordinator/verify_pos_yaw_directions_complete",
|
|
782
|
+
"timestamp": datetime.now().isoformat(),
|
|
783
|
+
"payload": {}
|
|
784
|
+
}
|
|
785
|
+
await self._master_command_handler(topic, verify_complete_message)
|
|
786
|
+
|
|
787
|
+
# Optionally unsubscribe from pose data (simulating user cleanup)
|
|
788
|
+
pose_unsubscribe_message = {
|
|
789
|
+
"waitResponse": True,
|
|
790
|
+
"messageId": f"test-pose-unsubscribe-{datetime.now().timestamp()}",
|
|
791
|
+
"deviceId": message.get("deviceId", "test-device"),
|
|
792
|
+
"command": "petal-user-journey-coordinator/unsubscribe_pose_value_stream",
|
|
793
|
+
"timestamp": datetime.now().isoformat(),
|
|
794
|
+
"payload": {
|
|
795
|
+
"unsubscribed_stream_id": "real_time_pose"
|
|
796
|
+
}
|
|
797
|
+
}
|
|
798
|
+
await self._master_command_handler(topic, pose_unsubscribe_message)
|
|
799
|
+
|
|
800
|
+
logger.info("Trajectory verification test completed")
|
|
801
|
+
|
|
802
|
+
async def _test_connect_to_wifi_and_verify_optitrack_handler(self, topic: str, message: Dict[str, Any]):
|
|
803
|
+
"""Test handler for WiFi and OptiTrack connectivity verification."""
|
|
804
|
+
# allow only one call
|
|
805
|
+
if getattr(self, "_test_wifi", False):
|
|
806
|
+
logger.warning("Wifi test has already been called.")
|
|
807
|
+
return
|
|
808
|
+
self._test_wifi = True
|
|
809
|
+
|
|
810
|
+
logger.info("Running WiFi and OptiTrack connectivity verification test")
|
|
811
|
+
|
|
812
|
+
# Create test message with proper command structure
|
|
813
|
+
test_message = {
|
|
814
|
+
"waitResponse": True,
|
|
815
|
+
"messageId": f"test-wifi-optitrack-{datetime.now().timestamp()}",
|
|
816
|
+
"deviceId": message.get("deviceId", "test-device"),
|
|
817
|
+
"command": "petal-user-journey-coordinator/connect_to_wifi_and_verify_optitrack",
|
|
818
|
+
"timestamp": datetime.now().isoformat(),
|
|
819
|
+
"payload": {
|
|
820
|
+
"positioning_system_network_wifi_ssid": "Rob-Lab-C00060",
|
|
821
|
+
"positioning_system_network_wifi_pass": "kuri@1234!!",
|
|
822
|
+
"positioning_system_network_wifi_subnet": "255.255.255.0",
|
|
823
|
+
"positioning_system_network_server_ip_address": "10.0.0.27",
|
|
824
|
+
"positioning_system_network_server_multicast_address": "239.255.42.99",
|
|
825
|
+
"positioning_system_network_server_data_port": "1511"
|
|
826
|
+
}
|
|
827
|
+
}
|
|
828
|
+
|
|
829
|
+
# Execute the WiFi OptiTrack connection test
|
|
830
|
+
await self._master_command_handler(topic, test_message)
|
|
831
|
+
logger.info("WiFi and OptiTrack connectivity test completed")
|
|
832
|
+
|
|
833
|
+
async def _test_set_static_ip_address_handler(self, topic: str, message: Dict[str, Any]):
|
|
834
|
+
"""Test handler for static IP address configuration."""
|
|
835
|
+
# allow only one call
|
|
836
|
+
if getattr(self, "_test_static_ip", False):
|
|
837
|
+
logger.warning("Static IP test has already been called.")
|
|
838
|
+
return
|
|
839
|
+
self._test_static_ip = True
|
|
840
|
+
|
|
841
|
+
logger.info("Running static IP address configuration test")
|
|
842
|
+
|
|
843
|
+
# Create test message with proper command structure
|
|
844
|
+
test_message = {
|
|
845
|
+
"waitResponse": True,
|
|
846
|
+
"messageId": f"test-static-ip-{datetime.now().timestamp()}",
|
|
847
|
+
"deviceId": message.get("deviceId", "test-device"),
|
|
848
|
+
"command": "petal-user-journey-coordinator/set_static_ip_address",
|
|
849
|
+
"timestamp": datetime.now().isoformat(),
|
|
850
|
+
"payload": {
|
|
851
|
+
"positioning_system_network_wifi_subnet": "255.255.255.0",
|
|
852
|
+
"positioning_system_network_server_ip_address": "10.0.0.27"
|
|
853
|
+
}
|
|
854
|
+
}
|
|
855
|
+
|
|
856
|
+
# Execute the static IP configuration test
|
|
857
|
+
await self._master_command_handler(topic, test_message)
|
|
858
|
+
logger.info("Static IP address configuration test completed")
|
|
859
|
+
|
|
860
|
+
def _create_test_command_message(self, command: str, payload: Dict[str, Any]) -> Dict[str, Any]:
|
|
861
|
+
"""
|
|
862
|
+
Helper method to create properly formatted command messages for testing.
|
|
863
|
+
|
|
864
|
+
Args:
|
|
865
|
+
command: The command to execute (e.g., "petal-user-journey-coordinator/verify_pos_yaw_directions")
|
|
866
|
+
payload: The payload data for the command
|
|
867
|
+
|
|
868
|
+
Returns:
|
|
869
|
+
Properly formatted MQTT message dict
|
|
870
|
+
"""
|
|
871
|
+
return {
|
|
872
|
+
"waitResponse": True,
|
|
873
|
+
"messageId": f"test-{command.replace('/', '-')}-{datetime.now().timestamp()}",
|
|
874
|
+
"deviceId": "test-device",
|
|
875
|
+
"command": command,
|
|
876
|
+
"timestamp": datetime.now().isoformat(),
|
|
877
|
+
"payload": payload
|
|
878
|
+
}
|
|
879
|
+
|
|
880
|
+
async def _test_unregister_all_handlers(self, topic: str, message: Dict[str, Any]):
|
|
881
|
+
"""
|
|
882
|
+
Test handler that subscribes to multiple streams and then tests unsubscribe all functionality.
|
|
883
|
+
This demonstrates the complete workflow of subscription tracking and bulk unsubscribe.
|
|
884
|
+
"""
|
|
885
|
+
# Allow only one call
|
|
886
|
+
if getattr(self, "_test_unsubscribe_all", False):
|
|
887
|
+
logger.warning("Unsubscribe all test has already been called.")
|
|
888
|
+
return
|
|
889
|
+
self._test_unsubscribe_all = True
|
|
890
|
+
|
|
891
|
+
logger.info("Running unsubscribe all functionality test")
|
|
892
|
+
|
|
893
|
+
# List of streams to subscribe to for testing
|
|
894
|
+
test_subscriptions = [
|
|
895
|
+
{
|
|
896
|
+
"command": "petal-user-journey-coordinator/subscribe_rc_value_stream",
|
|
897
|
+
"stream_id": "px4_rc_raw",
|
|
898
|
+
"data_rate_hz": 20.0
|
|
899
|
+
},
|
|
900
|
+
{
|
|
901
|
+
"command": "petal-user-journey-coordinator/subscribe_pose_value_stream",
|
|
902
|
+
"stream_id": "real_time_pose",
|
|
903
|
+
"data_rate_hz": 10.0
|
|
904
|
+
},
|
|
905
|
+
]
|
|
906
|
+
|
|
907
|
+
successful_subscriptions = []
|
|
908
|
+
|
|
909
|
+
# Subscribe to all test streams using master command handler
|
|
910
|
+
for subscription in test_subscriptions:
|
|
911
|
+
try:
|
|
912
|
+
logger.info(f"Subscribing to {subscription['stream_id']} at {subscription['data_rate_hz']} Hz")
|
|
913
|
+
|
|
914
|
+
# Create subscription message with proper command structure
|
|
915
|
+
subscribe_message = {
|
|
916
|
+
"waitResponse": True,
|
|
917
|
+
"messageId": f"test-subscribe-{subscription['stream_id']}-{datetime.now().timestamp()}",
|
|
918
|
+
"deviceId": message.get("deviceId", "test-device"),
|
|
919
|
+
"command": subscription["command"],
|
|
920
|
+
"timestamp": datetime.now().isoformat(),
|
|
921
|
+
"payload": {
|
|
922
|
+
"subscribed_stream_id": subscription["stream_id"],
|
|
923
|
+
"data_rate_hz": subscription["data_rate_hz"]
|
|
924
|
+
}
|
|
925
|
+
}
|
|
926
|
+
|
|
927
|
+
# Execute the subscription using master command handler
|
|
928
|
+
await self._master_command_handler(topic, subscribe_message)
|
|
929
|
+
successful_subscriptions.append(subscription)
|
|
930
|
+
logger.info(f"Successfully subscribed to {subscription['stream_id']}")
|
|
931
|
+
|
|
932
|
+
except Exception as e:
|
|
933
|
+
logger.error(f"Failed to subscribe to {subscription['stream_id']}: {e}")
|
|
934
|
+
|
|
935
|
+
# Wait a moment to let subscriptions establish
|
|
936
|
+
logger.info("Waiting 3 seconds for subscriptions to establish...")
|
|
937
|
+
await asyncio.sleep(3)
|
|
938
|
+
|
|
939
|
+
# Check active subscriptions before unsubscribe all
|
|
940
|
+
active_before = self.get_active_handlers()
|
|
941
|
+
logger.info(f"Active subscriptions before unsubscribe all: {len(active_before)} streams")
|
|
942
|
+
for stream_name, stream_info in active_before.items():
|
|
943
|
+
logger.info(f"- {stream_name}: {stream_info.get('stream_id', 'unknown')}")
|
|
944
|
+
|
|
945
|
+
# Now test the unsubscribe all functionality using master command handler
|
|
946
|
+
logger.info("Testing unsubscribe all functionality...")
|
|
947
|
+
unsubscribe_all_message = {
|
|
948
|
+
"waitResponse": True,
|
|
949
|
+
"messageId": f"test-unsubscribe-all-{datetime.now().timestamp()}",
|
|
950
|
+
"deviceId": message.get("deviceId", "test-device"),
|
|
951
|
+
"command": "petal-user-journey-coordinator/unsubscribeall",
|
|
952
|
+
"timestamp": datetime.now().isoformat(),
|
|
953
|
+
"payload": {}
|
|
954
|
+
}
|
|
955
|
+
|
|
956
|
+
# Execute the unsubscribe all using master command handler
|
|
957
|
+
await self._master_command_handler(topic, unsubscribe_all_message)
|
|
958
|
+
|
|
959
|
+
# Wait a moment for unsubscribe operations to complete
|
|
960
|
+
await asyncio.sleep(2)
|
|
961
|
+
|
|
962
|
+
# Check active subscriptions after unsubscribe all
|
|
963
|
+
active_after = self.get_active_handlers()
|
|
964
|
+
logger.info(f"Active subscriptions after unsubscribe all: {len(active_after)} streams")
|
|
965
|
+
|
|
966
|
+
# Prepare and log test results
|
|
967
|
+
test_results = {
|
|
968
|
+
"attempted_subscriptions": len(test_subscriptions),
|
|
969
|
+
"successful_subscriptions": len(successful_subscriptions),
|
|
970
|
+
"active_before_unsubscribe": len(active_before),
|
|
971
|
+
"active_after_unsubscribe": len(active_after),
|
|
972
|
+
"test_passed": len(active_after) == 0
|
|
973
|
+
}
|
|
974
|
+
|
|
975
|
+
logger.info("Unsubscribe all test completed:")
|
|
976
|
+
logger.info(f"- Attempted subscriptions: {test_results['attempted_subscriptions']}")
|
|
977
|
+
logger.info(f"- Successful subscriptions: {test_results['successful_subscriptions']}")
|
|
978
|
+
logger.info(f"- Active before unsubscribe: {test_results['active_before_unsubscribe']}")
|
|
979
|
+
logger.info(f"- Active after unsubscribe: {test_results['active_after_unsubscribe']}")
|
|
980
|
+
logger.info(f"- Test passed: {test_results['test_passed']}")
|
|
981
|
+
|
|
982
|
+
# Send response if requested (following the pattern of other test handlers)
|
|
983
|
+
if message.get("waitResponse", False):
|
|
984
|
+
try:
|
|
985
|
+
await self._mqtt_proxy.send_command_response(
|
|
986
|
+
message_id=message.get("messageId", "unknown"),
|
|
987
|
+
response_data={
|
|
988
|
+
"status": "success" if test_results['test_passed'] else "warning",
|
|
989
|
+
"message": f"Unsubscribe all test {'passed' if test_results['test_passed'] else 'had issues'} - {test_results['successful_subscriptions']} subscriptions created, {test_results['active_after_unsubscribe']} remaining after unsubscribe all",
|
|
990
|
+
"test_results": test_results,
|
|
991
|
+
"active_streams_before": list(active_before.keys()),
|
|
992
|
+
"active_streams_after": list(active_after.keys()),
|
|
993
|
+
}
|
|
994
|
+
)
|
|
995
|
+
except Exception as e:
|
|
996
|
+
logger.error(f"Failed to send test response: {e}")
|
|
997
|
+
|
|
998
|
+
logger.info("Unsubscribe all functionality test completed")
|
|
999
|
+
|
|
1000
|
+
async def _verify_pos_yaw_directions_handler(self, topic: str, message: Dict[str, Any]):
|
|
1001
|
+
"""Handle trajectory verification start."""
|
|
1002
|
+
try:
|
|
1003
|
+
# Validate and parse payload
|
|
1004
|
+
mqtt_msg = MQTTMessage(**message)
|
|
1005
|
+
verify_payload = VerifyPosYawDirectionsPayload(**mqtt_msg.payload)
|
|
1006
|
+
|
|
1007
|
+
if verify_payload.start:
|
|
1008
|
+
# Check if controllers are initialized and pose controller is available
|
|
1009
|
+
if self._pubsub_controllers is None:
|
|
1010
|
+
error_msg = "Petal not fully initialized yet, controllers not available"
|
|
1011
|
+
logger.warning(error_msg)
|
|
1012
|
+
if mqtt_msg.waitResponse:
|
|
1013
|
+
await self._mqtt_proxy.send_command_response(
|
|
1014
|
+
message_id=mqtt_msg.messageId,
|
|
1015
|
+
response_data={"status": "error", "message": error_msg}
|
|
1016
|
+
)
|
|
1017
|
+
return
|
|
1018
|
+
|
|
1019
|
+
pose_controller = self._pubsub_controllers.get("pose_value_stream")
|
|
1020
|
+
if not pose_controller:
|
|
1021
|
+
error_msg = "Pose controller not found"
|
|
1022
|
+
logger.error(error_msg)
|
|
1023
|
+
if mqtt_msg.waitResponse:
|
|
1024
|
+
await self._mqtt_proxy.send_command_response(
|
|
1025
|
+
message_id=mqtt_msg.messageId,
|
|
1026
|
+
response_data={"status": "error", "message": error_msg, "error_code": "CONTROLLER_NOT_FOUND"}
|
|
1027
|
+
)
|
|
1028
|
+
return
|
|
1029
|
+
|
|
1030
|
+
if not pose_controller.is_active:
|
|
1031
|
+
error_msg = "Pose controller is not streaming. Please subscribe to pose data first before starting verification."
|
|
1032
|
+
logger.error(error_msg)
|
|
1033
|
+
if mqtt_msg.waitResponse:
|
|
1034
|
+
await self._mqtt_proxy.send_command_response(
|
|
1035
|
+
message_id=mqtt_msg.messageId,
|
|
1036
|
+
response_data={"status": "error", "message": error_msg, "error_code": "STREAM_NOT_ACTIVE"}
|
|
1037
|
+
)
|
|
1038
|
+
return
|
|
1039
|
+
|
|
1040
|
+
# Start verification process
|
|
1041
|
+
self._trajectory_verification.start_verification()
|
|
1042
|
+
|
|
1043
|
+
# Create a background task to collect trajectory data from existing stream
|
|
1044
|
+
try:
|
|
1045
|
+
asyncio.create_task(self._collect_trajectory_data())
|
|
1046
|
+
logger.info("Started trajectory verification task successfully")
|
|
1047
|
+
except Exception as e:
|
|
1048
|
+
logger.error(f"Failed to create trajectory verification task: {e}")
|
|
1049
|
+
# Cleanup verification state on failure
|
|
1050
|
+
self._trajectory_verification.stop_verification()
|
|
1051
|
+
if mqtt_msg.waitResponse:
|
|
1052
|
+
await self._mqtt_proxy.send_command_response(
|
|
1053
|
+
message_id=mqtt_msg.messageId,
|
|
1054
|
+
response_data={"status": "error", "message": f"Failed to start trajectory verification: {e}"}
|
|
1055
|
+
)
|
|
1056
|
+
return
|
|
1057
|
+
|
|
1058
|
+
logger.info("Started trajectory verification using existing pose data stream")
|
|
1059
|
+
|
|
1060
|
+
if mqtt_msg.waitResponse:
|
|
1061
|
+
await self._mqtt_proxy.send_command_response(
|
|
1062
|
+
message_id=mqtt_msg.messageId,
|
|
1063
|
+
response_data={
|
|
1064
|
+
"status": "success",
|
|
1065
|
+
"message": "Trajectory verification started"
|
|
1066
|
+
}
|
|
1067
|
+
)
|
|
1068
|
+
|
|
1069
|
+
except ValidationError as ve:
|
|
1070
|
+
error_msg = f"Invalid trajectory verification payload: {ve}"
|
|
1071
|
+
logger.error(error_msg)
|
|
1072
|
+
if mqtt_msg.waitResponse:
|
|
1073
|
+
await self._mqtt_proxy.send_command_response(
|
|
1074
|
+
message_id=mqtt_msg.messageId,
|
|
1075
|
+
response_data={"status": "error", "message": error_msg, "error_code": "VALIDATION_ERROR"}
|
|
1076
|
+
)
|
|
1077
|
+
except Exception as e:
|
|
1078
|
+
error_msg = f"Trajectory verification handler error: {str(e)}"
|
|
1079
|
+
logger.error(error_msg)
|
|
1080
|
+
if mqtt_msg.waitResponse:
|
|
1081
|
+
await self._mqtt_proxy.send_command_response(
|
|
1082
|
+
message_id=mqtt_msg.messageId,
|
|
1083
|
+
response_data={"status": "error", "message": error_msg, "error_code": "HANDLER_ERROR"}
|
|
1084
|
+
)
|
|
1085
|
+
|
|
1086
|
+
async def _verify_pos_yaw_directions_complete_handler(self, topic: str, message: Dict[str, Any]):
|
|
1087
|
+
"""Handle trajectory verification completion."""
|
|
1088
|
+
try:
|
|
1089
|
+
# Validate and parse payload
|
|
1090
|
+
mqtt_msg = MQTTMessage(**message)
|
|
1091
|
+
|
|
1092
|
+
# Finish verification and get results (the trajectory data collection will stop automatically)
|
|
1093
|
+
results = await self._trajectory_verification.finish_verification(
|
|
1094
|
+
generate_plot=Config.PetalUserJourneyCoordinatorConfig.DEBUG_SQUARE_TEST,
|
|
1095
|
+
plot_filename="trajectory_verification_plot.png"
|
|
1096
|
+
)
|
|
1097
|
+
|
|
1098
|
+
# dump trajectory_points to a json file for debugging
|
|
1099
|
+
with open("trajectory_points_debug.json", "w") as f:
|
|
1100
|
+
json.dump(self._trajectory_verification.trajectory_points, f, indent=4)
|
|
1101
|
+
|
|
1102
|
+
logger.info(f"Trajectory verification completed: {results['was_successful']}")
|
|
1103
|
+
|
|
1104
|
+
if mqtt_msg.waitResponse:
|
|
1105
|
+
await self._mqtt_proxy.send_command_response(
|
|
1106
|
+
message_id=mqtt_msg.messageId,
|
|
1107
|
+
response_data={
|
|
1108
|
+
"status": "success",
|
|
1109
|
+
"message": "Trajectory verification completed",
|
|
1110
|
+
"verification_results": results
|
|
1111
|
+
}
|
|
1112
|
+
)
|
|
1113
|
+
|
|
1114
|
+
except Exception as e:
|
|
1115
|
+
error_msg = f"Trajectory verification completion handler error: {str(e)}"
|
|
1116
|
+
logger.error(error_msg)
|
|
1117
|
+
if mqtt_msg.waitResponse:
|
|
1118
|
+
await self._mqtt_proxy.send_command_response(
|
|
1119
|
+
message_id=mqtt_msg.messageId,
|
|
1120
|
+
response_data={"status": "error", "message": error_msg, "error_code": "HANDLER_ERROR"}
|
|
1121
|
+
)
|
|
1122
|
+
|
|
1123
|
+
async def _collect_trajectory_data(self):
|
|
1124
|
+
"""Background task to collect trajectory data from pose stream."""
|
|
1125
|
+
try:
|
|
1126
|
+
# Check if controllers are initialized
|
|
1127
|
+
if self._pubsub_controllers is None:
|
|
1128
|
+
logger.warning("Controllers not initialized yet, cannot collect trajectory data")
|
|
1129
|
+
return
|
|
1130
|
+
|
|
1131
|
+
# Determine collection rate
|
|
1132
|
+
pose_controller: PositionChannelsController = self._pubsub_controllers.get("pose_value_stream")
|
|
1133
|
+
if self.trajectory_collection_rate_hz is not None:
|
|
1134
|
+
# Use configured collection rate
|
|
1135
|
+
collection_rate_hz = self.trajectory_collection_rate_hz
|
|
1136
|
+
logger.info(f"Using configured trajectory collection rate: {collection_rate_hz} Hz")
|
|
1137
|
+
elif pose_controller and hasattr(pose_controller, 'publish_rate_hz'):
|
|
1138
|
+
# Match pose controller's publish rate
|
|
1139
|
+
collection_rate_hz = pose_controller.publish_rate_hz
|
|
1140
|
+
logger.info(f"Matching pose controller rate for trajectory collection: {collection_rate_hz} Hz")
|
|
1141
|
+
else:
|
|
1142
|
+
# Fallback to 10 Hz if rate cannot be determined
|
|
1143
|
+
collection_rate_hz = 10.0
|
|
1144
|
+
logger.warning("Could not determine pose controller rate, using fallback 10 Hz for trajectory collection")
|
|
1145
|
+
|
|
1146
|
+
pose_controller.reset_reference_position()
|
|
1147
|
+
|
|
1148
|
+
collection_interval = 1.0 / collection_rate_hz
|
|
1149
|
+
|
|
1150
|
+
while self._trajectory_verification.is_active:
|
|
1151
|
+
# Get pose controller
|
|
1152
|
+
if pose_controller and hasattr(pose_controller, '_get_sample_data'):
|
|
1153
|
+
sample_data = pose_controller._get_sample_data()
|
|
1154
|
+
|
|
1155
|
+
if sample_data and 'x' in sample_data and 'y' in sample_data and 'yaw' in sample_data:
|
|
1156
|
+
self._trajectory_verification.add_trajectory_point(
|
|
1157
|
+
sample_data['x'],
|
|
1158
|
+
sample_data['y'],
|
|
1159
|
+
sample_data['yaw']
|
|
1160
|
+
)
|
|
1161
|
+
|
|
1162
|
+
await asyncio.sleep(collection_interval)
|
|
1163
|
+
|
|
1164
|
+
except asyncio.CancelledError:
|
|
1165
|
+
logger.info("Trajectory data collection cancelled")
|
|
1166
|
+
except Exception as e:
|
|
1167
|
+
logger.error(f"Error in trajectory data collection: {e}")
|
|
1168
|
+
|
|
1169
|
+
async def _connect_to_wifi_and_verify_optitrack_handler(self, topic: str, message: Dict[str, Any]):
|
|
1170
|
+
"""
|
|
1171
|
+
Handle WiFi connection and OptiTrack verification MQTT messages.
|
|
1172
|
+
|
|
1173
|
+
This handler processes requests to:
|
|
1174
|
+
1. Connect to a specific WiFi network
|
|
1175
|
+
2. Verify the assigned IP is in the expected subnet
|
|
1176
|
+
3. Test connectivity to the OptiTrack server
|
|
1177
|
+
4. Send response with status and assigned IP
|
|
1178
|
+
"""
|
|
1179
|
+
try:
|
|
1180
|
+
# Validate and parse message
|
|
1181
|
+
mqtt_msg = MQTTMessage(**message)
|
|
1182
|
+
logger.info(f"[{mqtt_msg.messageId}] Received WiFi OptiTrack connection request")
|
|
1183
|
+
|
|
1184
|
+
# Process the connection request using the controller
|
|
1185
|
+
result = await self._wifi_optitrack_controller.connect_and_verify(
|
|
1186
|
+
payload=mqtt_msg.payload,
|
|
1187
|
+
message_id=mqtt_msg.messageId
|
|
1188
|
+
)
|
|
1189
|
+
|
|
1190
|
+
# Send command response if requested
|
|
1191
|
+
if mqtt_msg.waitResponse:
|
|
1192
|
+
await self._mqtt_proxy.send_command_response(
|
|
1193
|
+
message_id=mqtt_msg.messageId,
|
|
1194
|
+
response_data={
|
|
1195
|
+
"status": "success" if result["was_successful"] else "error",
|
|
1196
|
+
"message": result["status_message"],
|
|
1197
|
+
"assigned_ip_address": result.get("assigned_ip_address", ""),
|
|
1198
|
+
"connection_details": {
|
|
1199
|
+
"was_successful": result["was_successful"],
|
|
1200
|
+
"status_message": result["status_message"],
|
|
1201
|
+
"assigned_ip_address": result.get("assigned_ip_address", "")
|
|
1202
|
+
}
|
|
1203
|
+
}
|
|
1204
|
+
)
|
|
1205
|
+
|
|
1206
|
+
except ValidationError as ve:
|
|
1207
|
+
error_msg = f"WiFi OptiTrack validation error: {str(ve)}"
|
|
1208
|
+
logger.error(f"[{message.get('messageId', 'unknown')}] {error_msg}")
|
|
1209
|
+
if message.get('waitResponse', False):
|
|
1210
|
+
await self._mqtt_proxy.send_command_response(
|
|
1211
|
+
message_id=message.get('messageId', 'unknown'),
|
|
1212
|
+
response_data={"status": "error", "message": error_msg, "error_code": "VALIDATION_ERROR"}
|
|
1213
|
+
)
|
|
1214
|
+
except Exception as e:
|
|
1215
|
+
error_msg = f"WiFi OptiTrack handler error: {str(e)}"
|
|
1216
|
+
logger.error(f"[{message.get('messageId', 'unknown')}] {error_msg}")
|
|
1217
|
+
if message.get('waitResponse', False):
|
|
1218
|
+
await self._mqtt_proxy.send_command_response(
|
|
1219
|
+
message_id=message.get('messageId', 'unknown'),
|
|
1220
|
+
response_data={"status": "error", "message": error_msg, "error_code": "HANDLER_ERROR"}
|
|
1221
|
+
)
|
|
1222
|
+
|
|
1223
|
+
async def _set_static_ip_address_handler(self, topic: str, message: Dict[str, Any]):
|
|
1224
|
+
"""
|
|
1225
|
+
Handle static IP address configuration MQTT messages.
|
|
1226
|
+
|
|
1227
|
+
This handler processes requests to set a static IP address within the
|
|
1228
|
+
OptiTrack network subnet, ensuring the current IP is within the expected
|
|
1229
|
+
gateway network.
|
|
1230
|
+
"""
|
|
1231
|
+
try:
|
|
1232
|
+
# Validate and parse message
|
|
1233
|
+
mqtt_msg = MQTTMessage(**message)
|
|
1234
|
+
logger.info(f"[{mqtt_msg.messageId}] Received static IP address configuration request")
|
|
1235
|
+
|
|
1236
|
+
# Process the static IP configuration request using the controller
|
|
1237
|
+
result = await self._wifi_optitrack_controller.set_static_ip_address(
|
|
1238
|
+
payload=mqtt_msg.payload,
|
|
1239
|
+
message_id=mqtt_msg.messageId
|
|
1240
|
+
)
|
|
1241
|
+
|
|
1242
|
+
# Send command response if requested
|
|
1243
|
+
if mqtt_msg.waitResponse:
|
|
1244
|
+
await self._mqtt_proxy.send_command_response(
|
|
1245
|
+
message_id=mqtt_msg.messageId,
|
|
1246
|
+
response_data={
|
|
1247
|
+
"status": "success" if result["was_successful"] else "error",
|
|
1248
|
+
"message": "Static IP configuration completed",
|
|
1249
|
+
"assigned_static_ip": result.get("assigned_static_ip", ""),
|
|
1250
|
+
"static_ip_details": {
|
|
1251
|
+
"assigned_static_ip": result.get("assigned_static_ip", ""),
|
|
1252
|
+
"was_successful": result["was_successful"]
|
|
1253
|
+
}
|
|
1254
|
+
}
|
|
1255
|
+
)
|
|
1256
|
+
|
|
1257
|
+
except ValidationError as ve:
|
|
1258
|
+
error_msg = f"Static IP validation error: {str(ve)}"
|
|
1259
|
+
logger.error(f"[{message.get('messageId', 'unknown')}] {error_msg}")
|
|
1260
|
+
if message.get('waitResponse', False):
|
|
1261
|
+
await self._mqtt_proxy.send_command_response(
|
|
1262
|
+
message_id=message.get('messageId', 'unknown'),
|
|
1263
|
+
response_data={"status": "error", "message": error_msg, "error_code": "VALIDATION_ERROR"}
|
|
1264
|
+
)
|
|
1265
|
+
except Exception as e:
|
|
1266
|
+
error_msg = f"Static IP handler error: {str(e)}"
|
|
1267
|
+
logger.error(f"[{message.get('messageId', 'unknown')}] {error_msg}")
|
|
1268
|
+
if message.get('waitResponse', False):
|
|
1269
|
+
await self._mqtt_proxy.send_command_response(
|
|
1270
|
+
message_id=message.get('messageId', 'unknown'),
|
|
1271
|
+
response_data={"status": "error", "message": error_msg, "error_code": "HANDLER_ERROR"}
|
|
1272
|
+
)
|
|
1273
|
+
|
|
1274
|
+
async def _esc_calibration_message_handler(self, topic: str, message: Dict[str, Any]):
|
|
1275
|
+
"""
|
|
1276
|
+
Handle ESC calibration MQTT messages.
|
|
1277
|
+
|
|
1278
|
+
Workflow:
|
|
1279
|
+
1. Send is_calibration_started=false to initialize ESC calibration system
|
|
1280
|
+
2. Send is_calibration_started=true with throttle="maximum" to start motor commands
|
|
1281
|
+
3. Send is_calibration_started=true with throttle="minimum" to switch to minimum throttle
|
|
1282
|
+
4. Send force_cancel_calibration=true to emergency stop
|
|
1283
|
+
|
|
1284
|
+
This design prevents deadlocks by using minimal lock scope and moving async operations outside locks.
|
|
1285
|
+
"""
|
|
1286
|
+
mqtt_msg = None
|
|
1287
|
+
try:
|
|
1288
|
+
# Validate and parse payload
|
|
1289
|
+
mqtt_msg = MQTTMessage(**message)
|
|
1290
|
+
calibration_payload = ESCCalibrationPayload(**mqtt_msg.payload)
|
|
1291
|
+
|
|
1292
|
+
# Get controller reference and current state (minimal lock scope)
|
|
1293
|
+
with self._controller_locks[OperationMode.ESC_CALIBRATION]:
|
|
1294
|
+
controller = self._operation_controllers[OperationMode.ESC_CALIBRATION]
|
|
1295
|
+
|
|
1296
|
+
# Safety check: clear active controller reference if controller is no longer active
|
|
1297
|
+
if self._active_controllers[OperationMode.ESC_CALIBRATION] == controller and not controller.is_active:
|
|
1298
|
+
self._active_controllers[OperationMode.ESC_CALIBRATION] = None
|
|
1299
|
+
logger.info("Cleared stale active controller reference (controller became inactive)")
|
|
1300
|
+
|
|
1301
|
+
# Capture current state for decision making
|
|
1302
|
+
controller_is_active = controller.is_active
|
|
1303
|
+
active_controller = self._active_controllers[OperationMode.ESC_CALIBRATION]
|
|
1304
|
+
current_calibration_state = controller.calibration_state if hasattr(controller, 'calibration_state') else "idle"
|
|
1305
|
+
|
|
1306
|
+
# Update timeout if provided
|
|
1307
|
+
controller.refresh_timeout(calibration_payload.safety_timeout_s)
|
|
1308
|
+
|
|
1309
|
+
# Handle force cancel (outside lock)
|
|
1310
|
+
if calibration_payload.force_cancel_calibration:
|
|
1311
|
+
# Set the controller to inactive first (minimal lock)
|
|
1312
|
+
with self._controller_locks[OperationMode.ESC_CALIBRATION]:
|
|
1313
|
+
self._active_controllers[OperationMode.ESC_CALIBRATION] = None
|
|
1314
|
+
|
|
1315
|
+
try:
|
|
1316
|
+
await controller.emergency_stop(controller.get_operation_targets())
|
|
1317
|
+
logger.info("Emergency stop completed for ESC calibration")
|
|
1318
|
+
except asyncio.TimeoutError:
|
|
1319
|
+
logger.warning("ESC calibration emergency stop timed out")
|
|
1320
|
+
except Exception as e:
|
|
1321
|
+
logger.error(f"Error during ESC calibration emergency stop: {e}")
|
|
1322
|
+
|
|
1323
|
+
if mqtt_msg.waitResponse:
|
|
1324
|
+
await self._mqtt_proxy.send_command_response(
|
|
1325
|
+
message_id=mqtt_msg.messageId,
|
|
1326
|
+
response_data={"status": "success", "message": "ESC calibration cancelled"}
|
|
1327
|
+
)
|
|
1328
|
+
return
|
|
1329
|
+
|
|
1330
|
+
# Handle stop calibration (is_calibration_started=false when already active)
|
|
1331
|
+
if calibration_payload.safe_stop_calibration and controller_is_active:
|
|
1332
|
+
if active_controller == controller:
|
|
1333
|
+
try:
|
|
1334
|
+
await controller.stop_operation()
|
|
1335
|
+
logger.info("Stopped ESC calibration")
|
|
1336
|
+
except asyncio.TimeoutError:
|
|
1337
|
+
logger.warning("ESC calibration stop operation timed out")
|
|
1338
|
+
except Exception as e:
|
|
1339
|
+
logger.error(f"Error stopping ESC calibration: {e}")
|
|
1340
|
+
finally:
|
|
1341
|
+
with self._controller_locks[OperationMode.ESC_CALIBRATION]:
|
|
1342
|
+
self._active_controllers[OperationMode.ESC_CALIBRATION] = None
|
|
1343
|
+
|
|
1344
|
+
# Handle case where user tries to stop but calibration not active
|
|
1345
|
+
elif calibration_payload.safe_stop_calibration and not controller_is_active:
|
|
1346
|
+
logger.info("ESC calibration is not active, nothing to stop")
|
|
1347
|
+
if mqtt_msg.waitResponse:
|
|
1348
|
+
await self._mqtt_proxy.send_command_response(
|
|
1349
|
+
message_id=mqtt_msg.messageId,
|
|
1350
|
+
response_data={"status": "success", "message": "ESC calibration is not active"}
|
|
1351
|
+
)
|
|
1352
|
+
return
|
|
1353
|
+
|
|
1354
|
+
# Handle workflow: is_calibration_started=false means initialize/setup
|
|
1355
|
+
if not calibration_payload.is_calibration_started and not controller_is_active:
|
|
1356
|
+
# Stop any active controller if it's different (outside lock)
|
|
1357
|
+
if active_controller is not None and active_controller != controller:
|
|
1358
|
+
logger.warning("Another operation is already active, stopping it first")
|
|
1359
|
+
try:
|
|
1360
|
+
await active_controller.stop_operation()
|
|
1361
|
+
except asyncio.TimeoutError:
|
|
1362
|
+
logger.warning("Active controller stop operation timed out")
|
|
1363
|
+
except Exception as e:
|
|
1364
|
+
logger.error(f"Error stopping active controller: {e}")
|
|
1365
|
+
|
|
1366
|
+
# Set as active controller and start operation (minimal lock for state change)
|
|
1367
|
+
with self._controller_locks[OperationMode.ESC_CALIBRATION]:
|
|
1368
|
+
self._active_controllers[OperationMode.ESC_CALIBRATION] = controller
|
|
1369
|
+
|
|
1370
|
+
try:
|
|
1371
|
+
await controller.start_operation(calibration_payload)
|
|
1372
|
+
# calibration state set to "configured"
|
|
1373
|
+
logger.info("Started ESC calibration task (initialization phase)")
|
|
1374
|
+
except asyncio.TimeoutError:
|
|
1375
|
+
logger.error("ESC calibration start operation timed out")
|
|
1376
|
+
with self._controller_locks[OperationMode.ESC_CALIBRATION]:
|
|
1377
|
+
self._active_controllers[OperationMode.ESC_CALIBRATION] = None
|
|
1378
|
+
except Exception as e:
|
|
1379
|
+
logger.error(f"Error starting ESC calibration: {e}")
|
|
1380
|
+
with self._controller_locks[OperationMode.ESC_CALIBRATION]:
|
|
1381
|
+
self._active_controllers[OperationMode.ESC_CALIBRATION] = None
|
|
1382
|
+
|
|
1383
|
+
# Handle throttle state updates (is_calibration_started=true with active controller)
|
|
1384
|
+
elif calibration_payload.is_calibration_started and controller_is_active:
|
|
1385
|
+
# Update the throttle state - the running task will pick this up
|
|
1386
|
+
with self._controller_locks[OperationMode.ESC_CALIBRATION]:
|
|
1387
|
+
if calibration_payload.throttle:
|
|
1388
|
+
if calibration_payload.throttle == "maximum":
|
|
1389
|
+
controller.calibration_state = "maximum"
|
|
1390
|
+
logger.info("Updated calibration state to MAXIMUM throttle")
|
|
1391
|
+
elif calibration_payload.throttle == "minimum":
|
|
1392
|
+
controller.calibration_state = "minimum"
|
|
1393
|
+
logger.info("Updated calibration state to MINIMUM throttle")
|
|
1394
|
+
|
|
1395
|
+
# Handle case where user sends is_calibration_started=true but controller not active
|
|
1396
|
+
elif calibration_payload.is_calibration_started and not controller_is_active:
|
|
1397
|
+
logger.warning("Received throttle command but ESC calibration not initialized. Send is_calibration_started=false first to initialize.")
|
|
1398
|
+
|
|
1399
|
+
# Send response with current state
|
|
1400
|
+
if mqtt_msg.waitResponse:
|
|
1401
|
+
with self._controller_locks[OperationMode.ESC_CALIBRATION]:
|
|
1402
|
+
current_state = controller.calibration_state if hasattr(controller, 'calibration_state') else "idle"
|
|
1403
|
+
is_active = controller.is_active
|
|
1404
|
+
|
|
1405
|
+
status_message = f"ESC calibration state: {current_state if is_active else 'stopped'}"
|
|
1406
|
+
await self._mqtt_proxy.send_command_response(
|
|
1407
|
+
message_id=mqtt_msg.messageId,
|
|
1408
|
+
response_data={
|
|
1409
|
+
"status": "success",
|
|
1410
|
+
"message": status_message,
|
|
1411
|
+
"calibration_state": current_state,
|
|
1412
|
+
"is_active": is_active
|
|
1413
|
+
}
|
|
1414
|
+
)
|
|
1415
|
+
|
|
1416
|
+
except Exception as e:
|
|
1417
|
+
logger.error(f"Error handling ESC calibration message: {e}")
|
|
1418
|
+
if mqtt_msg and mqtt_msg.waitResponse:
|
|
1419
|
+
await self._mqtt_proxy.send_command_response(
|
|
1420
|
+
message_id=mqtt_msg.messageId,
|
|
1421
|
+
response_data={"status": "error", "message": str(e)}
|
|
1422
|
+
)
|
|
1423
|
+
|
|
1424
|
+
async def _esc_force_run_all_message_handler(self, topic: str, message: Dict[str, Any]):
|
|
1425
|
+
"""Handle ESC force run all MQTT messages (stateful, idempotent)."""
|
|
1426
|
+
mqtt_msg = None
|
|
1427
|
+
try:
|
|
1428
|
+
mqtt_msg = MQTTMessage(**message)
|
|
1429
|
+
force_run_payload = ESCForceRunAllPayload(**mqtt_msg.payload)
|
|
1430
|
+
|
|
1431
|
+
# Get controller reference and current state (minimal lock scope)
|
|
1432
|
+
with self._controller_locks[OperationMode.ESC_FORCE_RUN_ALL]:
|
|
1433
|
+
controller: ESCForceRunAllController = self._operation_controllers[OperationMode.ESC_FORCE_RUN_ALL]
|
|
1434
|
+
|
|
1435
|
+
# Safety check: clear active controller reference if controller is no longer active
|
|
1436
|
+
if self._active_controllers[OperationMode.ESC_FORCE_RUN_ALL] == controller and not controller.is_active:
|
|
1437
|
+
self._active_controllers[OperationMode.ESC_FORCE_RUN_ALL] = None
|
|
1438
|
+
logger.info("Cleared stale active controller reference (ESC force run all became inactive)")
|
|
1439
|
+
|
|
1440
|
+
# Capture current state for decision making
|
|
1441
|
+
controller_is_active = controller.is_active
|
|
1442
|
+
active_controller = self._active_controllers[OperationMode.ESC_FORCE_RUN_ALL]
|
|
1443
|
+
|
|
1444
|
+
# Emergency stop (outside lock)
|
|
1445
|
+
if force_run_payload.force_cancel:
|
|
1446
|
+
# Set the controller to inactive first (minimal lock)
|
|
1447
|
+
with self._controller_locks[OperationMode.ESC_FORCE_RUN_ALL]:
|
|
1448
|
+
self._active_controllers[OperationMode.ESC_FORCE_RUN_ALL] = None
|
|
1449
|
+
|
|
1450
|
+
try:
|
|
1451
|
+
await controller.emergency_stop(controller.get_operation_targets())
|
|
1452
|
+
logger.info("Emergency stop completed for ESC force run all")
|
|
1453
|
+
except asyncio.TimeoutError:
|
|
1454
|
+
logger.warning("ESC force run all emergency stop timed out")
|
|
1455
|
+
except Exception as e:
|
|
1456
|
+
logger.error(f"Error during ESC force run all emergency stop: {e}")
|
|
1457
|
+
|
|
1458
|
+
if mqtt_msg.waitResponse:
|
|
1459
|
+
await self._mqtt_proxy.send_command_response(
|
|
1460
|
+
message_id=mqtt_msg.messageId,
|
|
1461
|
+
response_data={"status": "success", "message": "ESC force run all cancelled"}
|
|
1462
|
+
)
|
|
1463
|
+
return
|
|
1464
|
+
|
|
1465
|
+
# Start operation if not already active (outside lock)
|
|
1466
|
+
if not controller_is_active:
|
|
1467
|
+
if active_controller is not None:
|
|
1468
|
+
logger.warning("Another operation is already active, stopping it first")
|
|
1469
|
+
try:
|
|
1470
|
+
await active_controller.stop_operation()
|
|
1471
|
+
except asyncio.TimeoutError:
|
|
1472
|
+
logger.warning("Active controller stop operation timed out")
|
|
1473
|
+
except Exception as e:
|
|
1474
|
+
logger.error(f"Error stopping active controller: {e}")
|
|
1475
|
+
|
|
1476
|
+
# Set as active controller (minimal lock)
|
|
1477
|
+
with self._controller_locks[OperationMode.ESC_FORCE_RUN_ALL]:
|
|
1478
|
+
self._active_controllers[OperationMode.ESC_FORCE_RUN_ALL] = controller
|
|
1479
|
+
|
|
1480
|
+
try:
|
|
1481
|
+
await controller.start_operation(force_run_payload)
|
|
1482
|
+
logger.info("Started ESC force run all task")
|
|
1483
|
+
except asyncio.TimeoutError:
|
|
1484
|
+
logger.error("ESC force run all start operation timed out")
|
|
1485
|
+
with self._controller_locks[OperationMode.ESC_FORCE_RUN_ALL]:
|
|
1486
|
+
self._active_controllers[OperationMode.ESC_FORCE_RUN_ALL] = None
|
|
1487
|
+
except Exception as e:
|
|
1488
|
+
logger.error(f"Error starting ESC force run all: {e}")
|
|
1489
|
+
with self._controller_locks[OperationMode.ESC_FORCE_RUN_ALL]:
|
|
1490
|
+
self._active_controllers[OperationMode.ESC_FORCE_RUN_ALL] = None
|
|
1491
|
+
else:
|
|
1492
|
+
# Update command/state for running task
|
|
1493
|
+
with self._controller_locks[OperationMode.ESC_FORCE_RUN_ALL]:
|
|
1494
|
+
controller.current_command_value = force_run_payload.motors_common_command
|
|
1495
|
+
|
|
1496
|
+
controller.refresh_timeout(force_run_payload.safety_timeout_s)
|
|
1497
|
+
logger.info(f"Updated ESC force run all command to {force_run_payload.motors_common_command}")
|
|
1498
|
+
|
|
1499
|
+
# Send response with current state
|
|
1500
|
+
if mqtt_msg.waitResponse:
|
|
1501
|
+
with self._controller_locks[OperationMode.ESC_FORCE_RUN_ALL]:
|
|
1502
|
+
is_active = controller.is_active
|
|
1503
|
+
current_command = getattr(controller, 'current_command_value', None)
|
|
1504
|
+
|
|
1505
|
+
await self._mqtt_proxy.send_command_response(
|
|
1506
|
+
message_id=mqtt_msg.messageId,
|
|
1507
|
+
response_data={
|
|
1508
|
+
"status": "success",
|
|
1509
|
+
"message": f"ESC force run all state: {'active' if is_active else 'stopped'}",
|
|
1510
|
+
"is_active": is_active,
|
|
1511
|
+
"current_command_value": current_command
|
|
1512
|
+
}
|
|
1513
|
+
)
|
|
1514
|
+
|
|
1515
|
+
except Exception as e:
|
|
1516
|
+
logger.error(f"Error handling ESC force run all message: {e}")
|
|
1517
|
+
if mqtt_msg and mqtt_msg.waitResponse:
|
|
1518
|
+
await self._mqtt_proxy.send_command_response(
|
|
1519
|
+
message_id=mqtt_msg.messageId,
|
|
1520
|
+
response_data={"status": "error", "message": str(e)}
|
|
1521
|
+
)
|
|
1522
|
+
|
|
1523
|
+
async def _esc_force_run_single_message_handler(self, topic: str, message: Dict[str, Any]):
|
|
1524
|
+
"""Handle ESC force run single MQTT messages (stateful, idempotent)."""
|
|
1525
|
+
mqtt_msg = None
|
|
1526
|
+
try:
|
|
1527
|
+
mqtt_msg = MQTTMessage(**message)
|
|
1528
|
+
force_run_payload = ESCForceRunSinglePayload(**mqtt_msg.payload)
|
|
1529
|
+
|
|
1530
|
+
# Get controller reference and current state (minimal lock scope)
|
|
1531
|
+
with self._controller_locks[OperationMode.ESC_FORCE_RUN_SINGLE]:
|
|
1532
|
+
controller: ESCForceRunSingleController = self._operation_controllers[OperationMode.ESC_FORCE_RUN_SINGLE]
|
|
1533
|
+
|
|
1534
|
+
# Safety check: clear active controller reference if controller is no longer active
|
|
1535
|
+
if self._active_controllers[OperationMode.ESC_FORCE_RUN_SINGLE] == controller and not controller.is_active:
|
|
1536
|
+
self._active_controllers[OperationMode.ESC_FORCE_RUN_SINGLE] = None
|
|
1537
|
+
logger.info("Cleared stale active controller reference (ESC force run single became inactive)")
|
|
1538
|
+
|
|
1539
|
+
# Capture current state for decision making
|
|
1540
|
+
controller_is_active = controller.is_active
|
|
1541
|
+
active_controller = self._active_controllers[OperationMode.ESC_FORCE_RUN_SINGLE]
|
|
1542
|
+
|
|
1543
|
+
# Emergency stop (outside lock)
|
|
1544
|
+
if force_run_payload.force_cancel:
|
|
1545
|
+
# Set the controller to inactive first (minimal lock)
|
|
1546
|
+
with self._controller_locks[OperationMode.ESC_FORCE_RUN_SINGLE]:
|
|
1547
|
+
self._active_controllers[OperationMode.ESC_FORCE_RUN_SINGLE] = None
|
|
1548
|
+
|
|
1549
|
+
try:
|
|
1550
|
+
await controller.emergency_stop(controller.get_operation_targets())
|
|
1551
|
+
logger.info("Emergency stop completed for ESC force run single")
|
|
1552
|
+
except asyncio.TimeoutError:
|
|
1553
|
+
logger.warning("ESC force run single emergency stop timed out")
|
|
1554
|
+
except Exception as e:
|
|
1555
|
+
logger.error(f"Error during ESC force run single emergency stop: {e}")
|
|
1556
|
+
|
|
1557
|
+
if mqtt_msg.waitResponse:
|
|
1558
|
+
await self._mqtt_proxy.send_command_response(
|
|
1559
|
+
message_id=mqtt_msg.messageId,
|
|
1560
|
+
response_data={"status": "success", "message": "ESC force run single cancelled"}
|
|
1561
|
+
)
|
|
1562
|
+
return
|
|
1563
|
+
|
|
1564
|
+
# Start operation if not already active (outside lock)
|
|
1565
|
+
if not controller_is_active:
|
|
1566
|
+
if active_controller is not None:
|
|
1567
|
+
logger.warning("Another operation is already active, stopping it first")
|
|
1568
|
+
try:
|
|
1569
|
+
await active_controller.stop_operation()
|
|
1570
|
+
except asyncio.TimeoutError:
|
|
1571
|
+
logger.warning("Active controller stop operation timed out")
|
|
1572
|
+
except Exception as e:
|
|
1573
|
+
logger.error(f"Error stopping active controller: {e}")
|
|
1574
|
+
|
|
1575
|
+
# Set as active controller (minimal lock)
|
|
1576
|
+
with self._controller_locks[OperationMode.ESC_FORCE_RUN_SINGLE]:
|
|
1577
|
+
self._active_controllers[OperationMode.ESC_FORCE_RUN_SINGLE] = controller
|
|
1578
|
+
|
|
1579
|
+
try:
|
|
1580
|
+
await controller.start_operation(force_run_payload)
|
|
1581
|
+
logger.info("Started ESC force run single task")
|
|
1582
|
+
except asyncio.TimeoutError:
|
|
1583
|
+
logger.error("ESC force run single start operation timed out")
|
|
1584
|
+
with self._controller_locks[OperationMode.ESC_FORCE_RUN_SINGLE]:
|
|
1585
|
+
self._active_controllers[OperationMode.ESC_FORCE_RUN_SINGLE] = None
|
|
1586
|
+
except Exception as e:
|
|
1587
|
+
logger.error(f"Error starting ESC force run single: {e}")
|
|
1588
|
+
with self._controller_locks[OperationMode.ESC_FORCE_RUN_SINGLE]:
|
|
1589
|
+
self._active_controllers[OperationMode.ESC_FORCE_RUN_SINGLE] = None
|
|
1590
|
+
else:
|
|
1591
|
+
# Update command/state for running task
|
|
1592
|
+
with self._controller_locks[OperationMode.ESC_FORCE_RUN_SINGLE]:
|
|
1593
|
+
controller.current_command_value = force_run_payload.motor_command
|
|
1594
|
+
controller.target_motor = force_run_payload.motor_idx
|
|
1595
|
+
|
|
1596
|
+
controller.refresh_timeout(force_run_payload.safety_timeout_s)
|
|
1597
|
+
logger.info(f"Updated ESC force run single command to {force_run_payload.motor_command} for motor {force_run_payload.motor_idx}")
|
|
1598
|
+
|
|
1599
|
+
# Send response with current state
|
|
1600
|
+
if mqtt_msg.waitResponse:
|
|
1601
|
+
with self._controller_locks[OperationMode.ESC_FORCE_RUN_SINGLE]:
|
|
1602
|
+
is_active = controller.is_active
|
|
1603
|
+
current_command = getattr(controller, 'current_command_value', None)
|
|
1604
|
+
target_motor = getattr(controller, 'target_motor', None)
|
|
1605
|
+
|
|
1606
|
+
await self._mqtt_proxy.send_command_response(
|
|
1607
|
+
message_id=mqtt_msg.messageId,
|
|
1608
|
+
response_data={
|
|
1609
|
+
"status": "success",
|
|
1610
|
+
"message": f"ESC force run single state: {'active' if is_active else 'stopped'}",
|
|
1611
|
+
"is_active": is_active,
|
|
1612
|
+
"current_command_value": current_command,
|
|
1613
|
+
"target_motor": target_motor
|
|
1614
|
+
}
|
|
1615
|
+
)
|
|
1616
|
+
|
|
1617
|
+
except Exception as e:
|
|
1618
|
+
logger.error(f"Error handling ESC force run single message: {e}")
|
|
1619
|
+
if mqtt_msg and mqtt_msg.waitResponse:
|
|
1620
|
+
await self._mqtt_proxy.send_command_response(
|
|
1621
|
+
message_id=mqtt_msg.messageId,
|
|
1622
|
+
response_data={"status": "error", "message": str(e)}
|
|
1623
|
+
)
|
|
1624
|
+
|
|
1625
|
+
def _create_parameter_message_handler(self, handler_key: str, config_type: str):
|
|
1626
|
+
"""
|
|
1627
|
+
Factory method to create parameter message handlers with minimal boilerplate.
|
|
1628
|
+
|
|
1629
|
+
Args:
|
|
1630
|
+
handler_key: Key for the parameter handler in self._parameter_handlers
|
|
1631
|
+
config_type: Human-readable configuration type for error messages
|
|
1632
|
+
|
|
1633
|
+
Returns:
|
|
1634
|
+
Async function that handles the parameter configuration message
|
|
1635
|
+
"""
|
|
1636
|
+
async def parameter_handler(topic: str, message: Dict[str, Any]):
|
|
1637
|
+
try:
|
|
1638
|
+
# Parse base MQTT message
|
|
1639
|
+
mqtt_msg = MQTTMessage(**message)
|
|
1640
|
+
message_id = mqtt_msg.messageId
|
|
1641
|
+
|
|
1642
|
+
# Check if controller is in emergency mode
|
|
1643
|
+
for controller in self._active_controllers.values():
|
|
1644
|
+
if controller is not None and controller.is_active:
|
|
1645
|
+
error_msg = "Parameter configuration blocked - Active operation in progress"
|
|
1646
|
+
logger.warning(f"[{message_id}] {error_msg}")
|
|
1647
|
+
if mqtt_msg.waitResponse:
|
|
1648
|
+
await self._mqtt_proxy.send_command_response(
|
|
1649
|
+
message_id=message_id,
|
|
1650
|
+
response_data={
|
|
1651
|
+
"status": "error",
|
|
1652
|
+
"message": error_msg,
|
|
1653
|
+
"error_code": "OPERATION_ACTIVE"
|
|
1654
|
+
}
|
|
1655
|
+
)
|
|
1656
|
+
return
|
|
1657
|
+
|
|
1658
|
+
|
|
1659
|
+
# Process payload using the specified handler
|
|
1660
|
+
response_payload = await self._parameter_handlers[handler_key].process_payload(mqtt_msg.payload, message_id)
|
|
1661
|
+
|
|
1662
|
+
logger.info(f"[{message_id}] Successfully processed {config_type} configuration")
|
|
1663
|
+
|
|
1664
|
+
# Send response if requested
|
|
1665
|
+
if mqtt_msg.waitResponse:
|
|
1666
|
+
await self._mqtt_proxy.send_command_response(
|
|
1667
|
+
message_id=message_id,
|
|
1668
|
+
response_data=response_payload
|
|
1669
|
+
)
|
|
1670
|
+
|
|
1671
|
+
except ValidationError as ve:
|
|
1672
|
+
error_msg = f"Invalid {config_type} payload: {ve}"
|
|
1673
|
+
logger.error(f"Parameter config validation error: {error_msg}")
|
|
1674
|
+
if mqtt_msg.waitResponse:
|
|
1675
|
+
await self._mqtt_proxy.send_command_response(
|
|
1676
|
+
message_id=message_id,
|
|
1677
|
+
response_data={
|
|
1678
|
+
"status": "error",
|
|
1679
|
+
"message": error_msg,
|
|
1680
|
+
"error_code": "VALIDATION_ERROR"
|
|
1681
|
+
}
|
|
1682
|
+
)
|
|
1683
|
+
except Exception as e:
|
|
1684
|
+
error_msg = f"{config_type.title()} handler error: {str(e)}"
|
|
1685
|
+
logger.error(f"Unexpected {config_type} error: {error_msg}")
|
|
1686
|
+
if mqtt_msg.waitResponse:
|
|
1687
|
+
await self._mqtt_proxy.send_command_response(
|
|
1688
|
+
message_id=message_id,
|
|
1689
|
+
response_data={
|
|
1690
|
+
"status": "error",
|
|
1691
|
+
"message": error_msg,
|
|
1692
|
+
"error_code": "HANDLER_ERROR"
|
|
1693
|
+
}
|
|
1694
|
+
)
|
|
1695
|
+
|
|
1696
|
+
return parameter_handler
|
|
1697
|
+
|
|
1698
|
+
async def _unregister_all_handlers(self, topic: str, message: Dict[str, Any]):
|
|
1699
|
+
"""
|
|
1700
|
+
Handler for unsubscribe all command - stops all active PubSub streaming.
|
|
1701
|
+
|
|
1702
|
+
Args:
|
|
1703
|
+
topic: MQTT topic that triggered this handler
|
|
1704
|
+
message: MQTT message containing command details
|
|
1705
|
+
"""
|
|
1706
|
+
try:
|
|
1707
|
+
message_id = message.get("messageId", "unknown")
|
|
1708
|
+
wait_response = message.get("waitResponse", False)
|
|
1709
|
+
logger.info(f"[{message_id}] Processing unsubscribe all command")
|
|
1710
|
+
|
|
1711
|
+
# Use the subscription management system to stop all streams
|
|
1712
|
+
result = await self.unsubscribe_all_streams()
|
|
1713
|
+
|
|
1714
|
+
# Send response if requested
|
|
1715
|
+
if wait_response:
|
|
1716
|
+
success_count = result.get('stopped_count', 0)
|
|
1717
|
+
failed_count = result.get('failed_count', 0)
|
|
1718
|
+
total_count = success_count + failed_count
|
|
1719
|
+
|
|
1720
|
+
status = "success" if failed_count == 0 else "partial_success" if success_count > 0 else "error"
|
|
1721
|
+
|
|
1722
|
+
response_data = {
|
|
1723
|
+
"status": status,
|
|
1724
|
+
"message": f"Unsubscribe all completed - {success_count}/{total_count} streams stopped successfully",
|
|
1725
|
+
"stopped_count": success_count,
|
|
1726
|
+
"failed_count": failed_count,
|
|
1727
|
+
"total_streams": total_count
|
|
1728
|
+
}
|
|
1729
|
+
|
|
1730
|
+
if result.get('errors'):
|
|
1731
|
+
response_data["errors"] = result['errors']
|
|
1732
|
+
|
|
1733
|
+
await self._mqtt_proxy.send_command_response(
|
|
1734
|
+
message_id=message_id,
|
|
1735
|
+
response_data=response_data
|
|
1736
|
+
)
|
|
1737
|
+
|
|
1738
|
+
logger.info(f"[{message_id}] Unsubscribe all completed - {result.get('stopped_count', 0)} streams stopped")
|
|
1739
|
+
|
|
1740
|
+
except Exception as e:
|
|
1741
|
+
logger.error(f"Error in unsubscribe all handler: {e}")
|
|
1742
|
+
# Try to send error response if possible
|
|
1743
|
+
try:
|
|
1744
|
+
message_id = message.get("messageId", "unknown")
|
|
1745
|
+
wait_response = message.get("waitResponse", False)
|
|
1746
|
+
if wait_response:
|
|
1747
|
+
await self._mqtt_proxy.send_command_response(
|
|
1748
|
+
message_id=message_id,
|
|
1749
|
+
response_data={
|
|
1750
|
+
"status": "error",
|
|
1751
|
+
"message": f"Unsubscribe all handler error: {str(e)}",
|
|
1752
|
+
"error_code": "HANDLER_ERROR"
|
|
1753
|
+
}
|
|
1754
|
+
)
|
|
1755
|
+
except:
|
|
1756
|
+
pass # If we can't send error response, just log it
|
|
1757
|
+
|
|
1758
|
+
def _create_pubsub_message_handlers(self, controller_key: str, stream_name: str):
|
|
1759
|
+
"""
|
|
1760
|
+
Factory method to create subscribe and unsubscribe message handlers with minimal boilerplate.
|
|
1761
|
+
|
|
1762
|
+
Args:
|
|
1763
|
+
controller_key: Key for the pub/sub controller in self._pubsub_controllers
|
|
1764
|
+
stream_name: Human-readable stream name for error messages
|
|
1765
|
+
|
|
1766
|
+
Returns:
|
|
1767
|
+
Tuple of (subscribe_handler, unsubscribe_handler) functions
|
|
1768
|
+
"""
|
|
1769
|
+
async def subscribe_handler(topic: str, message: Dict[str, Any]):
|
|
1770
|
+
try:
|
|
1771
|
+
# Check if controllers are initialized
|
|
1772
|
+
if self._pubsub_controllers is None:
|
|
1773
|
+
error_msg = "Petal not fully initialized yet, controllers not available"
|
|
1774
|
+
logger.warning(error_msg)
|
|
1775
|
+
return
|
|
1776
|
+
|
|
1777
|
+
# Parse MQTT message
|
|
1778
|
+
mqtt_msg = MQTTMessage(**message)
|
|
1779
|
+
subscribe_payload = SubscribePayload(**mqtt_msg.payload)
|
|
1780
|
+
|
|
1781
|
+
controller = self._pubsub_controllers.get(controller_key)
|
|
1782
|
+
if not controller:
|
|
1783
|
+
error_msg = f"{stream_name} controller not found"
|
|
1784
|
+
logger.error(error_msg)
|
|
1785
|
+
if mqtt_msg.waitResponse:
|
|
1786
|
+
await self._mqtt_proxy.send_command_response(
|
|
1787
|
+
message_id=mqtt_msg.messageId,
|
|
1788
|
+
response_data={"status": "error", "message": error_msg, "error_code": "CONTROLLER_NOT_FOUND"}
|
|
1789
|
+
)
|
|
1790
|
+
return
|
|
1791
|
+
|
|
1792
|
+
# Start streaming
|
|
1793
|
+
await controller.start_streaming(
|
|
1794
|
+
subscribe_payload.subscribed_stream_id,
|
|
1795
|
+
subscribe_payload.data_rate_hz
|
|
1796
|
+
)
|
|
1797
|
+
|
|
1798
|
+
# Track the subscription
|
|
1799
|
+
self._track_subscription(controller_key, subscribe_payload.subscribed_stream_id, subscribe_payload.data_rate_hz)
|
|
1800
|
+
|
|
1801
|
+
# Send response if requested
|
|
1802
|
+
if mqtt_msg.waitResponse:
|
|
1803
|
+
await self._mqtt_proxy.send_command_response(
|
|
1804
|
+
message_id=mqtt_msg.messageId,
|
|
1805
|
+
response_data={
|
|
1806
|
+
"status": "success",
|
|
1807
|
+
"message": f"Started streaming {subscribe_payload.subscribed_stream_id} at {subscribe_payload.data_rate_hz} Hz"
|
|
1808
|
+
}
|
|
1809
|
+
)
|
|
1810
|
+
|
|
1811
|
+
except ValidationError as ve:
|
|
1812
|
+
error_msg = f"Invalid {stream_name} subscribe payload: {ve}"
|
|
1813
|
+
logger.error(error_msg)
|
|
1814
|
+
if mqtt_msg.waitResponse:
|
|
1815
|
+
await self._mqtt_proxy.send_command_response(
|
|
1816
|
+
message_id=mqtt_msg.messageId,
|
|
1817
|
+
response_data={"status": "error", "message": error_msg, "error_code": "VALIDATION_ERROR"}
|
|
1818
|
+
)
|
|
1819
|
+
except Exception as e:
|
|
1820
|
+
error_msg = f"{stream_name} subscribe handler error: {str(e)}"
|
|
1821
|
+
logger.error(error_msg)
|
|
1822
|
+
if mqtt_msg.waitResponse:
|
|
1823
|
+
await self._mqtt_proxy.send_command_response(
|
|
1824
|
+
message_id=mqtt_msg.messageId,
|
|
1825
|
+
response_data={"status": "error", "message": error_msg, "error_code": "HANDLER_ERROR"}
|
|
1826
|
+
)
|
|
1827
|
+
|
|
1828
|
+
async def unsubscribe_handler(topic: str, message: Dict[str, Any]):
|
|
1829
|
+
try:
|
|
1830
|
+
# Check if controllers are initialized
|
|
1831
|
+
if self._pubsub_controllers is None:
|
|
1832
|
+
error_msg = "Petal not fully initialized yet, controllers not available"
|
|
1833
|
+
logger.warning(error_msg)
|
|
1834
|
+
return
|
|
1835
|
+
|
|
1836
|
+
# Parse MQTT message
|
|
1837
|
+
mqtt_msg = MQTTMessage(**message)
|
|
1838
|
+
unsubscribe_payload = UnsubscribePayload(**mqtt_msg.payload)
|
|
1839
|
+
|
|
1840
|
+
controller = self._pubsub_controllers.get(controller_key)
|
|
1841
|
+
if not controller:
|
|
1842
|
+
error_msg = f"{stream_name} controller not found"
|
|
1843
|
+
logger.error(error_msg)
|
|
1844
|
+
if mqtt_msg.waitResponse:
|
|
1845
|
+
await self._mqtt_proxy.send_command_response(
|
|
1846
|
+
message_id=mqtt_msg.messageId,
|
|
1847
|
+
response_data={"status": "error", "message": error_msg, "error_code": "CONTROLLER_NOT_FOUND"}
|
|
1848
|
+
)
|
|
1849
|
+
return
|
|
1850
|
+
|
|
1851
|
+
# Stop streaming
|
|
1852
|
+
await controller.stop_streaming()
|
|
1853
|
+
|
|
1854
|
+
# Untrack the subscription
|
|
1855
|
+
self._untrack_subscription(controller_key)
|
|
1856
|
+
|
|
1857
|
+
# Send response if requested
|
|
1858
|
+
if mqtt_msg.waitResponse:
|
|
1859
|
+
await self._mqtt_proxy.send_command_response(
|
|
1860
|
+
message_id=mqtt_msg.messageId,
|
|
1861
|
+
response_data={
|
|
1862
|
+
"status": "success",
|
|
1863
|
+
"message": f"Stopped streaming {unsubscribe_payload.unsubscribed_stream_id}"
|
|
1864
|
+
}
|
|
1865
|
+
)
|
|
1866
|
+
|
|
1867
|
+
except ValidationError as ve:
|
|
1868
|
+
error_msg = f"Invalid {stream_name} unsubscribe payload: {ve}"
|
|
1869
|
+
logger.error(error_msg)
|
|
1870
|
+
if mqtt_msg.waitResponse:
|
|
1871
|
+
await self._mqtt_proxy.send_command_response(
|
|
1872
|
+
message_id=mqtt_msg.messageId,
|
|
1873
|
+
response_data={"status": "error", "message": error_msg, "error_code": "VALIDATION_ERROR"}
|
|
1874
|
+
)
|
|
1875
|
+
except Exception as e:
|
|
1876
|
+
error_msg = f"{stream_name} unsubscribe handler error: {str(e)}"
|
|
1877
|
+
logger.error(error_msg)
|
|
1878
|
+
if mqtt_msg.waitResponse:
|
|
1879
|
+
await self._mqtt_proxy.send_command_response(
|
|
1880
|
+
message_id=mqtt_msg.messageId,
|
|
1881
|
+
response_data={"status": "error", "message": error_msg, "error_code": "HANDLER_ERROR"}
|
|
1882
|
+
)
|
|
1883
|
+
|
|
1884
|
+
return subscribe_handler, unsubscribe_handler
|
|
1885
|
+
|
|
1886
|
+
async def async_shutdown(self) -> None:
|
|
1887
|
+
"""Called when the petal is stopped, in async context."""
|
|
1888
|
+
# Await shutdown of all active controllers
|
|
1889
|
+
for mode, controller in self._active_controllers.items():
|
|
1890
|
+
with self._controller_locks[mode]:
|
|
1891
|
+
if controller is not None:
|
|
1892
|
+
logger.info(f"Awaiting shutdown of active controller: {type(controller).__name__}")
|
|
1893
|
+
try:
|
|
1894
|
+
await controller.stop_operation()
|
|
1895
|
+
logger.info(f"Controller {type(controller).__name__} shut down successfully")
|
|
1896
|
+
except asyncio.TimeoutError:
|
|
1897
|
+
logger.warning(f"Timeout while shutting down controller: {type(controller).__name__}")
|
|
1898
|
+
except Exception as e:
|
|
1899
|
+
logger.error(f"Error during shutdown of controller {type(controller).__name__}: {e}")
|
|
1900
|
+
finally:
|
|
1901
|
+
self._active_controllers[mode] = None
|
|
1902
|
+
|
|
1903
|
+
# Cleanup all controllers
|
|
1904
|
+
for operation_mode, controller in self._operation_controllers.items():
|
|
1905
|
+
if hasattr(controller, 'stop_operation'):
|
|
1906
|
+
try:
|
|
1907
|
+
await controller.stop_operation()
|
|
1908
|
+
logger.info(f"Cleaned up {operation_mode.value} controller")
|
|
1909
|
+
except Exception as e:
|
|
1910
|
+
logger.error(f"Error cleaning up {operation_mode.value} controller: {e}")
|
|
1911
|
+
|
|
1912
|
+
# Clean up pub/sub controllers if they were initialized
|
|
1913
|
+
if self._pubsub_controllers is not None:
|
|
1914
|
+
for stream_name, controller in self._pubsub_controllers.items():
|
|
1915
|
+
if hasattr(controller, 'stop_streaming'):
|
|
1916
|
+
try:
|
|
1917
|
+
await controller.stop_streaming()
|
|
1918
|
+
logger.info(f"Cleaned up {stream_name} controller")
|
|
1919
|
+
except Exception as e:
|
|
1920
|
+
logger.error(f"Error cleaning up {stream_name} controller: {e}")
|
|
1921
|
+
|
|
1922
|
+
logger.info(f"All controllers cleaned up for {self.name} petal")
|
|
1923
|
+
|
|
1924
|
+
def shutdown(self) -> None:
|
|
1925
|
+
"""Called when the petal is stopped."""
|
|
1926
|
+
# Stop all active controllers
|
|
1927
|
+
for mode, controller in self._active_controllers.items():
|
|
1928
|
+
with self._controller_locks[mode]:
|
|
1929
|
+
if controller is not None:
|
|
1930
|
+
# Mark controller as inactive but don't wait for async operations in sync context
|
|
1931
|
+
logger.info(f"Shutting down active controller: {type(controller).__name__}")
|
|
1932
|
+
self._active_controllers[mode] = None
|
|
1933
|
+
|
|
1934
|
+
# Clear all controllers - actual cleanup will happen during async_shutdown if available
|
|
1935
|
+
for operation_mode, controller in self._operation_controllers.items():
|
|
1936
|
+
logger.info(f"Marking {operation_mode.value} controller for shutdown")
|
|
1937
|
+
|
|
1938
|
+
# Clear pub/sub controllers if they were initialized
|
|
1939
|
+
if self._pubsub_controllers is not None:
|
|
1940
|
+
for stream_name, controller in self._pubsub_controllers.items():
|
|
1941
|
+
logger.info(f"Marking {stream_name} controller for shutdown")
|
|
1942
|
+
|
|
1943
|
+
super().shutdown()
|
|
1944
|
+
self._status_message = "Petal is shutting down"
|
|
1945
|
+
logger.info(f"{self.name} petal shut down")
|
|
1946
|
+
|
|
1947
|
+
def get_required_proxies(self) -> List[str]:
|
|
1948
|
+
"""
|
|
1949
|
+
Return a list of proxy names that this petal requires.
|
|
1950
|
+
|
|
1951
|
+
Override this method to specify which proxies your petal needs.
|
|
1952
|
+
Available proxies: 'redis', 'db', 'ext_mavlink'
|
|
1953
|
+
"""
|
|
1954
|
+
|
|
1955
|
+
return ["ext_mavlink", "mqtt"] # Modify this list based on your petal's needs
|
|
1956
|
+
|
|
1957
|
+
def get_optional_proxies(self) -> List[str]:
|
|
1958
|
+
"""
|
|
1959
|
+
Return a list of proxy names that this petal can optionally use.
|
|
1960
|
+
|
|
1961
|
+
Override this method to specify which proxies your petal can use
|
|
1962
|
+
but doesn't strictly require.
|
|
1963
|
+
"""
|
|
1964
|
+
return [] # Modify this list based on your petal's needs
|
|
1965
|
+
|
|
1966
|
+
def get_petal_status(self) -> Dict[str, Any]:
|
|
1967
|
+
"""
|
|
1968
|
+
Return custom status information for this petal.
|
|
1969
|
+
|
|
1970
|
+
Override this method to provide specific status information
|
|
1971
|
+
about your petal's internal state.
|
|
1972
|
+
"""
|
|
1973
|
+
status = {
|
|
1974
|
+
"message": self._status_message,
|
|
1975
|
+
"startup_time": self._startup_time.isoformat() if self._startup_time else None,
|
|
1976
|
+
"uptime_seconds": (datetime.now() - self._startup_time).total_seconds() if self._startup_time else 0,
|
|
1977
|
+
}
|
|
1978
|
+
|
|
1979
|
+
# Add any custom status information here
|
|
1980
|
+
# For example:
|
|
1981
|
+
# status["custom_metric"] = self._some_internal_counter
|
|
1982
|
+
# status["last_operation"] = self._last_operation_time
|
|
1983
|
+
|
|
1984
|
+
return status
|
|
1985
|
+
|
|
1986
|
+
@http_action(
|
|
1987
|
+
method="GET",
|
|
1988
|
+
path="/health",
|
|
1989
|
+
description="Health check endpoint for this petal"
|
|
1990
|
+
)
|
|
1991
|
+
async def health_check(self):
|
|
1992
|
+
"""
|
|
1993
|
+
Health check endpoint that reports proxy requirements and petal status.
|
|
1994
|
+
|
|
1995
|
+
This endpoint provides information about:
|
|
1996
|
+
- Required and optional proxies
|
|
1997
|
+
- Custom petal status information
|
|
1998
|
+
"""
|
|
1999
|
+
health_info = {
|
|
2000
|
+
"petal_name": self.name,
|
|
2001
|
+
"petal_version": self.version,
|
|
2002
|
+
"timestamp": datetime.now().isoformat(),
|
|
2003
|
+
"status": "healthy",
|
|
2004
|
+
"required_proxies": self.get_required_proxies(),
|
|
2005
|
+
"optional_proxies": self.get_optional_proxies(),
|
|
2006
|
+
"petal_status": self.get_petal_status()
|
|
2007
|
+
}
|
|
2008
|
+
|
|
2009
|
+
return health_info
|
|
2010
|
+
|
|
2011
|
+
@http_action(
|
|
2012
|
+
method="GET",
|
|
2013
|
+
path="/px4-parameter",
|
|
2014
|
+
description="get a specific PX4 parameter"
|
|
2015
|
+
)
|
|
2016
|
+
async def get_px4_parameter(self, data: ParameterRequestModel) -> MavlinkParameterResponseModel:
|
|
2017
|
+
"""
|
|
2018
|
+
Get a specific PX4 parameter value.
|
|
2019
|
+
"""
|
|
2020
|
+
parameter_name = data.parameter_name
|
|
2021
|
+
logger.info(f"Getting PX4 parameter '{parameter_name}' for {self.name}")
|
|
2022
|
+
# Implement your logic to retrieve the PX4 parameter value here
|
|
2023
|
+
|
|
2024
|
+
try:
|
|
2025
|
+
|
|
2026
|
+
parameter_value = await self._mavlink_proxy.get_param(parameter_name)
|
|
2027
|
+
|
|
2028
|
+
if not parameter_value:
|
|
2029
|
+
logger.error("No value found for PX4 parameter")
|
|
2030
|
+
raise HTTPException(
|
|
2031
|
+
status_code=404,
|
|
2032
|
+
detail="No value found",
|
|
2033
|
+
headers={"source": "px4_parameter"}
|
|
2034
|
+
)
|
|
2035
|
+
return {
|
|
2036
|
+
"parameter_name": parameter_name,
|
|
2037
|
+
"parameter_value": parameter_value,
|
|
2038
|
+
"timestamp": datetime.now().isoformat()
|
|
2039
|
+
}
|
|
2040
|
+
except TimeoutError as exc:
|
|
2041
|
+
logger.error(f"Timeout while waiting for PX4 parameter: {str(exc)}")
|
|
2042
|
+
raise HTTPException(status_code=504, detail=str(exc))
|
|
2043
|
+
|
|
2044
|
+
@http_action(
|
|
2045
|
+
method="GET",
|
|
2046
|
+
path="/px4-parameters",
|
|
2047
|
+
description="get a specific PX4 parameter"
|
|
2048
|
+
)
|
|
2049
|
+
async def get_all_parameters(self) -> MavlinkParametersResponseModel:
|
|
2050
|
+
"""
|
|
2051
|
+
Get a specific PX4 parameter value.
|
|
2052
|
+
"""
|
|
2053
|
+
logger.info(f"Getting PX4 parameters for {self.name}")
|
|
2054
|
+
# Implement your logic to retrieve the PX4 parameter value here
|
|
2055
|
+
|
|
2056
|
+
try:
|
|
2057
|
+
|
|
2058
|
+
parameters = await self._mavlink_proxy.get_all_params()
|
|
2059
|
+
|
|
2060
|
+
if not parameters:
|
|
2061
|
+
logger.error("No value found for PX4 parameter")
|
|
2062
|
+
raise HTTPException(
|
|
2063
|
+
status_code=404,
|
|
2064
|
+
detail="No value found",
|
|
2065
|
+
headers={"source": "px4_parameter"}
|
|
2066
|
+
)
|
|
2067
|
+
payload = {
|
|
2068
|
+
"parameters": parameters,
|
|
2069
|
+
"timestamp": datetime.now().isoformat()
|
|
2070
|
+
}
|
|
2071
|
+
return _json_safe(payload) # ← sanitize before FastAPI encodes
|
|
2072
|
+
except TimeoutError as exc:
|
|
2073
|
+
logger.error(f"Timeout while waiting for PX4 parameter: {str(exc)}")
|
|
2074
|
+
raise HTTPException(status_code=504, detail=str(exc))
|
|
2075
|
+
|
|
2076
|
+
@http_action(
|
|
2077
|
+
method="POST",
|
|
2078
|
+
path="/px4-parameter",
|
|
2079
|
+
description="set a specific PX4 parameter"
|
|
2080
|
+
)
|
|
2081
|
+
async def set_px4_parameter(self, data: ParameterBaseModel) -> ParameterResponseModel:
|
|
2082
|
+
"""
|
|
2083
|
+
Get a specific PX4 parameter value.
|
|
2084
|
+
"""
|
|
2085
|
+
|
|
2086
|
+
parameter_name = data.parameter_name
|
|
2087
|
+
parameter_value = data.parameter_value
|
|
2088
|
+
|
|
2089
|
+
if parameter_name is None or parameter_value is None:
|
|
2090
|
+
logger.error("Missing parameter_name or parameter_value")
|
|
2091
|
+
raise HTTPException(
|
|
2092
|
+
status_code=400,
|
|
2093
|
+
detail="Missing parameter_name or parameter_value",
|
|
2094
|
+
headers={"source": "px4_parameter"}
|
|
2095
|
+
)
|
|
2096
|
+
|
|
2097
|
+
logger.info(f"Setting PX4 parameter '{parameter_name}' to {parameter_value} for {self.name}")
|
|
2098
|
+
# Implement your logic to retrieve the PX4 parameter value here
|
|
2099
|
+
|
|
2100
|
+
try:
|
|
2101
|
+
|
|
2102
|
+
result = await self._mavlink_proxy.set_param(parameter_name, parameter_value) # same result as get_param()
|
|
2103
|
+
|
|
2104
|
+
if not result:
|
|
2105
|
+
logger.error("No value found for PX4 parameter")
|
|
2106
|
+
raise HTTPException(
|
|
2107
|
+
status_code=404,
|
|
2108
|
+
detail="No value found",
|
|
2109
|
+
headers={"source": "px4_parameter"}
|
|
2110
|
+
)
|
|
2111
|
+
payload = {
|
|
2112
|
+
"parameter_name": parameter_name,
|
|
2113
|
+
"parameter_value": parameter_value,
|
|
2114
|
+
"timestamp": datetime.now().isoformat()
|
|
2115
|
+
}
|
|
2116
|
+
return _json_safe(payload) # ← sanitize before FastAPI encodes
|
|
2117
|
+
except TimeoutError as exc:
|
|
2118
|
+
logger.error(f"Timeout while waiting for PX4 parameter: {str(exc)}")
|
|
2119
|
+
raise HTTPException(status_code=504, detail=str(exc))
|
|
2120
|
+
|
|
2121
|
+
@http_action(
|
|
2122
|
+
method="POST",
|
|
2123
|
+
path="/rotor-count",
|
|
2124
|
+
description="set a specific PX4 parameter"
|
|
2125
|
+
)
|
|
2126
|
+
async def set_rotor_count(self, data: RotorCountParameter) -> ParameterResponseModel:
|
|
2127
|
+
"""
|
|
2128
|
+
Get a specific PX4 parameter value.
|
|
2129
|
+
"""
|
|
2130
|
+
|
|
2131
|
+
parameter_name = "CA_ROTOR_COUNT"
|
|
2132
|
+
parameter_value = data.rotor_count
|
|
2133
|
+
|
|
2134
|
+
if parameter_value is None:
|
|
2135
|
+
logger.error("Missing parameter_value")
|
|
2136
|
+
raise HTTPException(
|
|
2137
|
+
status_code=400,
|
|
2138
|
+
detail="Missing parameter_value",
|
|
2139
|
+
headers={"source": "px4_parameter"}
|
|
2140
|
+
)
|
|
2141
|
+
|
|
2142
|
+
logger.info(f"Setting PX4 parameter '{parameter_name}' to {parameter_value} for {self.name}")
|
|
2143
|
+
# Implement your logic to retrieve the PX4 parameter value here
|
|
2144
|
+
|
|
2145
|
+
try:
|
|
2146
|
+
result = await self._mavlink_proxy.set_param(
|
|
2147
|
+
name=parameter_name,
|
|
2148
|
+
value=int(parameter_value),
|
|
2149
|
+
ptype=mavutil.mavlink.MAV_PARAM_TYPE_INT32
|
|
2150
|
+
) # returns same result as get_param()
|
|
2151
|
+
|
|
2152
|
+
if not result:
|
|
2153
|
+
logger.error("No value found for PX4 parameter")
|
|
2154
|
+
raise HTTPException(
|
|
2155
|
+
status_code=404,
|
|
2156
|
+
detail="No value found",
|
|
2157
|
+
headers={"source": "px4_parameter"}
|
|
2158
|
+
)
|
|
2159
|
+
payload = {
|
|
2160
|
+
"parameter_name": parameter_name,
|
|
2161
|
+
"parameter_value": parameter_value,
|
|
2162
|
+
"timestamp": datetime.now().isoformat()
|
|
2163
|
+
}
|
|
2164
|
+
return _json_safe(payload) # ← sanitize before FastAPI encodes
|
|
2165
|
+
except TimeoutError as exc:
|
|
2166
|
+
logger.error(f"Timeout while waiting for PX4 parameter: {str(exc)}")
|
|
2167
|
+
raise HTTPException(status_code=504, detail=str(exc))
|