osiris-agent 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- osiris_agent/__init__.py +5 -0
- osiris_agent/agent_node.py +967 -0
- osiris_agent-0.1.0.dist-info/METADATA +315 -0
- osiris_agent-0.1.0.dist-info/RECORD +8 -0
- osiris_agent-0.1.0.dist-info/WHEEL +5 -0
- osiris_agent-0.1.0.dist-info/entry_points.txt +2 -0
- osiris_agent-0.1.0.dist-info/licenses/LICENSE +190 -0
- osiris_agent-0.1.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,967 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import asyncio
|
|
3
|
+
import threading
|
|
4
|
+
import time
|
|
5
|
+
import rclpy
|
|
6
|
+
from collections import deque
|
|
7
|
+
from rcl_interfaces.srv import GetParameters, ListParameters
|
|
8
|
+
from rclpy.node import Node
|
|
9
|
+
from rclpy.qos import QoSProfile
|
|
10
|
+
from rclpy.parameter import parameter_value_to_python
|
|
11
|
+
import websockets
|
|
12
|
+
import json
|
|
13
|
+
from rosidl_runtime_py.utilities import get_message
|
|
14
|
+
from rosidl_runtime_py import message_to_ordereddict
|
|
15
|
+
import psutil
|
|
16
|
+
|
|
17
|
+
# Security and configuration constants
|
|
18
|
+
MAX_SUBSCRIPTIONS = 100
|
|
19
|
+
ALLOWED_TOPIC_PREFIXES = ['/', ]
|
|
20
|
+
PARAMETER_REFRESH_INTERVAL = 5.0
|
|
21
|
+
GRAPH_CHECK_INTERVAL = 0.1
|
|
22
|
+
TELEMETRY_INTERVAL = 1.0
|
|
23
|
+
RECONNECT_INITIAL_DELAY = 1
|
|
24
|
+
RECONNECT_MAX_DELAY = 10
|
|
25
|
+
|
|
26
|
+
class WebBridge(Node):
|
|
27
|
+
# Initialize node, validate token, setup timers and start websocket thread
|
|
28
|
+
def __init__(self):
|
|
29
|
+
super().__init__('bridge_node')
|
|
30
|
+
auth_token = os.environ.get('OSIRIS_AUTH_TOKEN')
|
|
31
|
+
if not auth_token:
|
|
32
|
+
raise ValueError("OSIRIS_AUTH_TOKEN environment variable must be set")
|
|
33
|
+
|
|
34
|
+
self.ws_url = f'wss://osiris-gateway.fly.dev?robot=true&token={auth_token}'
|
|
35
|
+
self.ws = None
|
|
36
|
+
self._topic_subs = {}
|
|
37
|
+
self._topic_subs_lock = threading.Lock()
|
|
38
|
+
self.loop = None
|
|
39
|
+
self._send_queue = None
|
|
40
|
+
self._active_nodes = set(self.get_node_names())
|
|
41
|
+
self._active_topics = set(dict(self.get_topic_names_and_types()).keys())
|
|
42
|
+
self._active_actions = set()
|
|
43
|
+
self._active_services = set()
|
|
44
|
+
self._action_status_subs = {}
|
|
45
|
+
self._active_goals = {}
|
|
46
|
+
self._topic_relations = {}
|
|
47
|
+
self._action_relations = {}
|
|
48
|
+
self._service_relations = {}
|
|
49
|
+
self._telemetry_enabled = True
|
|
50
|
+
self._topic_last_timestamp = {}
|
|
51
|
+
self._topic_rate_history = {}
|
|
52
|
+
self._rate_history_depth = 8
|
|
53
|
+
self._node_parameter_cache = {}
|
|
54
|
+
self._parameter_fetch_inflight = {}
|
|
55
|
+
|
|
56
|
+
self._last_sent_nodes = None
|
|
57
|
+
self._last_sent_topics = None
|
|
58
|
+
self._last_sent_actions = None
|
|
59
|
+
self._last_sent_services = None
|
|
60
|
+
|
|
61
|
+
self._check_graph_changes()
|
|
62
|
+
self.create_timer(0.1, self._check_graph_changes)
|
|
63
|
+
self.create_timer(5.0, self._refresh_all_parameters)
|
|
64
|
+
self.create_timer(1.0, self._collect_telemetry)
|
|
65
|
+
|
|
66
|
+
threading.Thread(target=self._run_ws_client, daemon=True).start()
|
|
67
|
+
|
|
68
|
+
# Create event loop and queue, run websocket client
|
|
69
|
+
def _run_ws_client(self):
|
|
70
|
+
self.loop = asyncio.new_event_loop()
|
|
71
|
+
asyncio.set_event_loop(self.loop)
|
|
72
|
+
self._send_queue = asyncio.Queue()
|
|
73
|
+
self.loop.run_until_complete(self._client_loop_with_reconnect())
|
|
74
|
+
|
|
75
|
+
# Wrapper for client loop with exponential backoff reconnection
|
|
76
|
+
async def _client_loop_with_reconnect(self):
|
|
77
|
+
"""Wrapper that handles reconnection."""
|
|
78
|
+
reconnect_delay = RECONNECT_INITIAL_DELAY
|
|
79
|
+
|
|
80
|
+
while True:
|
|
81
|
+
try:
|
|
82
|
+
self.get_logger().info("Attempting to connect to gateway...")
|
|
83
|
+
await self._client_loop()
|
|
84
|
+
except Exception as e:
|
|
85
|
+
self.get_logger().error(f"Connection failed: {e}")
|
|
86
|
+
|
|
87
|
+
self.get_logger().info(f"Reconnecting in {reconnect_delay} seconds...")
|
|
88
|
+
await asyncio.sleep(reconnect_delay)
|
|
89
|
+
|
|
90
|
+
reconnect_delay = min(reconnect_delay * 2, RECONNECT_MAX_DELAY)
|
|
91
|
+
import random
|
|
92
|
+
reconnect_delay += random.uniform(0, 1) # Jitter prevents thundering herd
|
|
93
|
+
|
|
94
|
+
# Main client loop for sending and receiving messages
|
|
95
|
+
async def _client_loop(self):
|
|
96
|
+
send_task = None
|
|
97
|
+
try:
|
|
98
|
+
async with websockets.connect(self.ws_url) as ws:
|
|
99
|
+
self.get_logger().info("Connected to gateway (socket opened)")
|
|
100
|
+
# Wait for gateway auth response before sending initial state
|
|
101
|
+
try:
|
|
102
|
+
auth_msg = await ws.recv()
|
|
103
|
+
except Exception as e:
|
|
104
|
+
self.get_logger().error(f"Failed to receive auth message: {e}")
|
|
105
|
+
return
|
|
106
|
+
|
|
107
|
+
try:
|
|
108
|
+
auth_data = json.loads(auth_msg)
|
|
109
|
+
except Exception:
|
|
110
|
+
auth_data = None
|
|
111
|
+
|
|
112
|
+
self.get_logger().debug(f"Gateway auth message received: {auth_msg}")
|
|
113
|
+
|
|
114
|
+
if not auth_data or auth_data.get('type') != 'auth_success':
|
|
115
|
+
self.get_logger().error(f"Gateway did not authenticate: parsed={auth_data}")
|
|
116
|
+
return
|
|
117
|
+
|
|
118
|
+
self.get_logger().info("Authenticated with gateway")
|
|
119
|
+
|
|
120
|
+
self.ws = ws
|
|
121
|
+
|
|
122
|
+
send_task = asyncio.create_task(self._send_loop(ws))
|
|
123
|
+
|
|
124
|
+
await self._send_initial_state()
|
|
125
|
+
|
|
126
|
+
await self._receive_loop(ws)
|
|
127
|
+
except Exception as e:
|
|
128
|
+
self.get_logger().error(f"Error in client loop: {e}")
|
|
129
|
+
raise
|
|
130
|
+
finally:
|
|
131
|
+
if send_task and not send_task.done():
|
|
132
|
+
send_task.cancel()
|
|
133
|
+
try:
|
|
134
|
+
await send_task
|
|
135
|
+
except asyncio.CancelledError:
|
|
136
|
+
pass
|
|
137
|
+
|
|
138
|
+
self.ws = None
|
|
139
|
+
self.get_logger().info("Connection closed, cleaning up...")
|
|
140
|
+
|
|
141
|
+
# Collect and send complete ROS graph state on connection
|
|
142
|
+
async def _send_initial_state(self):
|
|
143
|
+
"""Send complete initial state as a single message"""
|
|
144
|
+
nodes = self._get_nodes_with_relations()
|
|
145
|
+
actions = self._get_actions_with_relations()
|
|
146
|
+
services = self._get_services_with_relations()
|
|
147
|
+
topics = self._get_topics_with_relations()
|
|
148
|
+
|
|
149
|
+
self._last_sent_nodes = nodes.copy()
|
|
150
|
+
self._last_sent_actions = actions.copy()
|
|
151
|
+
self._last_sent_services = services.copy()
|
|
152
|
+
self._last_sent_topics = topics.copy()
|
|
153
|
+
|
|
154
|
+
message = {
|
|
155
|
+
'type': 'initial_state',
|
|
156
|
+
'timestamp': time.time(),
|
|
157
|
+
'data': {
|
|
158
|
+
'nodes': nodes,
|
|
159
|
+
'topics': topics,
|
|
160
|
+
'actions': actions,
|
|
161
|
+
'services': services,
|
|
162
|
+
'telemetry': self._get_telemetry_snapshot(),
|
|
163
|
+
}
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
await self._send_queue.put(json.dumps(message))
|
|
167
|
+
self.get_logger().info(f"Sent initial state: {len(nodes)} nodes, {len(topics)} topics, {len(actions)} actions, {len(services)} services")
|
|
168
|
+
|
|
169
|
+
await self._send_bridge_subscriptions()
|
|
170
|
+
|
|
171
|
+
# Send list of currently subscribed topics to gateway
|
|
172
|
+
async def _send_bridge_subscriptions(self):
|
|
173
|
+
"""Send current bridge subscriptions as a separate message."""
|
|
174
|
+
with self._topic_subs_lock:
|
|
175
|
+
subscriptions = list(self._topic_subs.keys())
|
|
176
|
+
|
|
177
|
+
message = {
|
|
178
|
+
'type': 'bridge_subscriptions',
|
|
179
|
+
'subscriptions': subscriptions,
|
|
180
|
+
'timestamp': time.time()
|
|
181
|
+
}
|
|
182
|
+
await self._send_queue.put(json.dumps(message))
|
|
183
|
+
self.get_logger().debug(f"Sent bridge subscriptions: {len(subscriptions)} topics")
|
|
184
|
+
|
|
185
|
+
# Receive and handle commands from gateway
|
|
186
|
+
async def _receive_loop(self, ws):
|
|
187
|
+
async for msg in ws:
|
|
188
|
+
try:
|
|
189
|
+
data = json.loads(msg)
|
|
190
|
+
msg_type = data.get('type')
|
|
191
|
+
|
|
192
|
+
if msg_type == 'subscribe':
|
|
193
|
+
topic = data.get('topic')
|
|
194
|
+
if topic:
|
|
195
|
+
self.get_logger().info(f"Subscribing to topic: {topic}")
|
|
196
|
+
self._subscribe_to_topic(topic)
|
|
197
|
+
else:
|
|
198
|
+
self.get_logger().warn("Subscribe message missing topic field")
|
|
199
|
+
|
|
200
|
+
elif msg_type == 'unsubscribe':
|
|
201
|
+
topic = data.get('topic')
|
|
202
|
+
if topic:
|
|
203
|
+
self._unsubscribe_from_topic(topic)
|
|
204
|
+
else:
|
|
205
|
+
self.get_logger().warn("Unsubscribe message missing topic field")
|
|
206
|
+
|
|
207
|
+
elif msg_type == 'start_telemetry':
|
|
208
|
+
self._telemetry_enabled = True
|
|
209
|
+
self.get_logger().info("Telemetry started")
|
|
210
|
+
|
|
211
|
+
elif msg_type == 'stop_telemetry':
|
|
212
|
+
self._telemetry_enabled = False
|
|
213
|
+
self.get_logger().info("Telemetry stopped")
|
|
214
|
+
else:
|
|
215
|
+
self.get_logger().warn(f"Unknown message type: {msg_type}")
|
|
216
|
+
|
|
217
|
+
except json.JSONDecodeError as e:
|
|
218
|
+
self.get_logger().error(f"Invalid JSON received: {e}")
|
|
219
|
+
except Exception as e:
|
|
220
|
+
self.get_logger().error(f"Error processing message: {e}")
|
|
221
|
+
|
|
222
|
+
# Send messages out
|
|
223
|
+
async def _send_loop(self, ws):
|
|
224
|
+
while True:
|
|
225
|
+
msg = await self._send_queue.get()
|
|
226
|
+
try:
|
|
227
|
+
# Log truncated message for debugging
|
|
228
|
+
self.get_logger().debug(f"_send_loop: sending message (len={len(msg)}): {msg[:200]}")
|
|
229
|
+
await ws.send(msg)
|
|
230
|
+
self.get_logger().debug("_send_loop: message sent")
|
|
231
|
+
except Exception as e:
|
|
232
|
+
self.get_logger().error(f"_send_loop: failed to send message: {e}")
|
|
233
|
+
# If sending fails, log and continue (do not drop the loop)
|
|
234
|
+
try:
|
|
235
|
+
# small delay to avoid busy loop on persistent error
|
|
236
|
+
await asyncio.sleep(0.1)
|
|
237
|
+
except Exception:
|
|
238
|
+
pass
|
|
239
|
+
|
|
240
|
+
# Create ROS subscription for topic with validation and limits
|
|
241
|
+
def _subscribe_to_topic(self, topic_name):
|
|
242
|
+
if not topic_name or not isinstance(topic_name, str):
|
|
243
|
+
self.get_logger().warn(f"Invalid topic name: {topic_name}")
|
|
244
|
+
return
|
|
245
|
+
|
|
246
|
+
with self._topic_subs_lock:
|
|
247
|
+
if topic_name in self._topic_subs:
|
|
248
|
+
return
|
|
249
|
+
|
|
250
|
+
if len(self._topic_subs) >= MAX_SUBSCRIPTIONS:
|
|
251
|
+
self.get_logger().error(f"Subscription limit reached ({MAX_SUBSCRIPTIONS}). Cannot subscribe to {topic_name}")
|
|
252
|
+
return
|
|
253
|
+
|
|
254
|
+
topic_types = dict(self.get_topic_names_and_types()).get(topic_name)
|
|
255
|
+
if not topic_types:
|
|
256
|
+
self.get_logger().warn(f"Topic {topic_name} not found in ROS graph")
|
|
257
|
+
return
|
|
258
|
+
|
|
259
|
+
msg_class = get_message(topic_types[0])
|
|
260
|
+
sub = self.create_subscription(
|
|
261
|
+
msg_class,
|
|
262
|
+
topic_name,
|
|
263
|
+
lambda msg, t=topic_name: self._on_topic_msg(msg, t),
|
|
264
|
+
QoSProfile(depth=10)
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
with self._topic_subs_lock:
|
|
268
|
+
self._topic_subs[topic_name] = sub
|
|
269
|
+
|
|
270
|
+
self._update_topic_relations()
|
|
271
|
+
self.get_logger().info(f"Subscribed to {topic_name}")
|
|
272
|
+
|
|
273
|
+
asyncio.create_task(self._send_bridge_subscriptions())
|
|
274
|
+
|
|
275
|
+
# Destroy ROS subscription and update gateway
|
|
276
|
+
def _unsubscribe_from_topic(self, topic_name):
|
|
277
|
+
with self._topic_subs_lock:
|
|
278
|
+
if topic_name in self._topic_subs:
|
|
279
|
+
self.destroy_subscription(self._topic_subs[topic_name])
|
|
280
|
+
del self._topic_subs[topic_name]
|
|
281
|
+
|
|
282
|
+
self._update_topic_relations()
|
|
283
|
+
self.get_logger().info(f"Unsubscribed from {topic_name}")
|
|
284
|
+
|
|
285
|
+
asyncio.run_coroutine_threadsafe(
|
|
286
|
+
self._send_bridge_subscriptions(),
|
|
287
|
+
self.loop
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
# Handle incoming topic message, calculate rate, send to gateway
|
|
291
|
+
def _on_topic_msg(self, msg, topic_name):
|
|
292
|
+
if not self.ws or not self.loop:
|
|
293
|
+
return
|
|
294
|
+
|
|
295
|
+
timestamp = time.time()
|
|
296
|
+
last_timestamp = self._topic_last_timestamp.get(topic_name)
|
|
297
|
+
if last_timestamp is not None:
|
|
298
|
+
delta = timestamp - last_timestamp
|
|
299
|
+
if delta > 0:
|
|
300
|
+
history = self._topic_rate_history.setdefault(topic_name, deque(maxlen=self._rate_history_depth))
|
|
301
|
+
history.append(delta)
|
|
302
|
+
self._topic_last_timestamp[topic_name] = timestamp
|
|
303
|
+
|
|
304
|
+
rate = None
|
|
305
|
+
history = self._topic_rate_history.get(topic_name)
|
|
306
|
+
if history:
|
|
307
|
+
total = sum(history)
|
|
308
|
+
if total > 0:
|
|
309
|
+
rate = len(history) / total
|
|
310
|
+
|
|
311
|
+
event = {
|
|
312
|
+
'type': 'topic_data',
|
|
313
|
+
'topic': topic_name,
|
|
314
|
+
'data': message_to_ordereddict(msg),
|
|
315
|
+
'rate_hz': rate,
|
|
316
|
+
'timestamp': timestamp
|
|
317
|
+
}
|
|
318
|
+
self.get_logger().debug(f"Received message on {topic_name}")
|
|
319
|
+
self._send_event_and_update(event, f"Topic data: {topic_name}")
|
|
320
|
+
|
|
321
|
+
# Update topic publishers and subscribers
|
|
322
|
+
def _update_topic_relations(self):
|
|
323
|
+
"""Update the cached topic relations."""
|
|
324
|
+
current_topics = set(dict(self.get_topic_names_and_types()).keys())
|
|
325
|
+
current_topic_relations = {}
|
|
326
|
+
|
|
327
|
+
for topic_name in current_topics:
|
|
328
|
+
publishers = {pub.node_name for pub in self.get_publishers_info_by_topic(topic_name)}
|
|
329
|
+
subscribers = {sub.node_name for sub in self.get_subscriptions_info_by_topic(topic_name)}
|
|
330
|
+
current_topic_relations[topic_name] = {
|
|
331
|
+
'publishers': publishers,
|
|
332
|
+
'subscribers': subscribers
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
self._topic_relations = current_topic_relations
|
|
336
|
+
|
|
337
|
+
# Get topics with publishers, subscribers, and QoS profiles
|
|
338
|
+
def _get_topics_with_relations(self):
|
|
339
|
+
"""Get topics with their publishers and subscribers with QoS info (uses cached data)."""
|
|
340
|
+
self._update_topic_relations()
|
|
341
|
+
topics_with_relations = {}
|
|
342
|
+
topic_types_dict = dict(self.get_topic_names_and_types())
|
|
343
|
+
|
|
344
|
+
for topic_name, relations in self._topic_relations.items():
|
|
345
|
+
publishers_list = []
|
|
346
|
+
pub_info_list = self.get_publishers_info_by_topic(topic_name)
|
|
347
|
+
for pub_info in pub_info_list:
|
|
348
|
+
publishers_list.append({
|
|
349
|
+
'node': pub_info.node_name,
|
|
350
|
+
'qos': self._qos_profile_to_dict(pub_info.qos_profile)
|
|
351
|
+
})
|
|
352
|
+
|
|
353
|
+
subscribers_list = []
|
|
354
|
+
sub_info_list = self.get_subscriptions_info_by_topic(topic_name)
|
|
355
|
+
for sub_info in sub_info_list:
|
|
356
|
+
subscribers_list.append({
|
|
357
|
+
'node': sub_info.node_name,
|
|
358
|
+
'qos': self._qos_profile_to_dict(sub_info.qos_profile)
|
|
359
|
+
})
|
|
360
|
+
|
|
361
|
+
topics_with_relations[topic_name] = {
|
|
362
|
+
'type': topic_types_dict.get(topic_name, ['unknown'])[0],
|
|
363
|
+
'publishers': publishers_list,
|
|
364
|
+
'subscribers': subscribers_list,
|
|
365
|
+
}
|
|
366
|
+
return topics_with_relations
|
|
367
|
+
|
|
368
|
+
# Convert ROS QoS profile to dictionary
|
|
369
|
+
def _qos_profile_to_dict(self, qos_profile):
|
|
370
|
+
"""Convert a QoS profile to a dictionary."""
|
|
371
|
+
if not qos_profile:
|
|
372
|
+
return None
|
|
373
|
+
|
|
374
|
+
return {
|
|
375
|
+
'reliability': qos_profile.reliability.name if hasattr(qos_profile.reliability, 'name') else str(qos_profile.reliability),
|
|
376
|
+
'durability': qos_profile.durability.name if hasattr(qos_profile.durability, 'name') else str(qos_profile.durability),
|
|
377
|
+
'history': qos_profile.history.name if hasattr(qos_profile.history, 'name') else str(qos_profile.history),
|
|
378
|
+
'depth': qos_profile.depth,
|
|
379
|
+
'liveliness': qos_profile.liveliness.name if hasattr(qos_profile.liveliness, 'name') else str(qos_profile.liveliness),
|
|
380
|
+
}
|
|
381
|
+
|
|
382
|
+
# Get all parameters for a node using ROS services
|
|
383
|
+
def _get_node_parameters(self, node_name):
|
|
384
|
+
"""Get parameters for a specific node using the ROS parameter services."""
|
|
385
|
+
service_prefix = node_name if node_name.startswith('/') else f"/{node_name}"
|
|
386
|
+
param_names = self._list_node_parameters(service_prefix)
|
|
387
|
+
if not param_names:
|
|
388
|
+
return {}
|
|
389
|
+
|
|
390
|
+
param_values = self._get_node_parameter_values(service_prefix, param_names)
|
|
391
|
+
parameters = {}
|
|
392
|
+
for name, value in zip(param_names, param_values):
|
|
393
|
+
try:
|
|
394
|
+
parameters[name] = parameter_value_to_python(value)
|
|
395
|
+
except Exception as e:
|
|
396
|
+
self.get_logger().debug(f"Could not convert parameter {name}: {e}")
|
|
397
|
+
return parameters
|
|
398
|
+
|
|
399
|
+
# Call list_parameters service for a node
|
|
400
|
+
def _list_node_parameters(self, service_prefix, timeout_sec=0.2):
|
|
401
|
+
service_name = f"{service_prefix}/list_parameters"
|
|
402
|
+
client = self.create_client(ListParameters, service_name)
|
|
403
|
+
if not client.wait_for_service(timeout_sec=timeout_sec):
|
|
404
|
+
self.destroy_client(client)
|
|
405
|
+
return []
|
|
406
|
+
|
|
407
|
+
request = ListParameters.Request()
|
|
408
|
+
request.depth = 10
|
|
409
|
+
future = client.call_async(request)
|
|
410
|
+
rclpy.spin_until_future_complete(self, future, timeout_sec=timeout_sec)
|
|
411
|
+
response = future.result()
|
|
412
|
+
self.destroy_client(client)
|
|
413
|
+
|
|
414
|
+
if response is None:
|
|
415
|
+
return []
|
|
416
|
+
return list(response.result.names)
|
|
417
|
+
|
|
418
|
+
# Call get_parameters service for a node
|
|
419
|
+
def _get_node_parameter_values(self, service_prefix, names, timeout_sec=0.2):
|
|
420
|
+
service_name = f"{service_prefix}/get_parameters"
|
|
421
|
+
client = self.create_client(GetParameters, service_name)
|
|
422
|
+
if not client.wait_for_service(timeout_sec=timeout_sec):
|
|
423
|
+
self.destroy_client(client)
|
|
424
|
+
return []
|
|
425
|
+
|
|
426
|
+
request = GetParameters.Request()
|
|
427
|
+
request.names = names
|
|
428
|
+
future = client.call_async(request)
|
|
429
|
+
rclpy.spin_until_future_complete(self, future, timeout_sec=timeout_sec)
|
|
430
|
+
response = future.result()
|
|
431
|
+
self.destroy_client(client)
|
|
432
|
+
|
|
433
|
+
if response is None:
|
|
434
|
+
return []
|
|
435
|
+
return list(response.values)
|
|
436
|
+
|
|
437
|
+
# Trigger async parameter fetch for all nodes
|
|
438
|
+
def _refresh_all_parameters(self):
|
|
439
|
+
for node_name in self.get_node_names():
|
|
440
|
+
if node_name in self._parameter_fetch_inflight:
|
|
441
|
+
continue
|
|
442
|
+
self._start_parameter_fetch(node_name)
|
|
443
|
+
|
|
444
|
+
# Begin async parameter fetch for a node
|
|
445
|
+
def _start_parameter_fetch(self, node_name):
|
|
446
|
+
service_prefix = node_name if node_name.startswith('/') else f"/{node_name}"
|
|
447
|
+
service_name = f"{service_prefix}/list_parameters"
|
|
448
|
+
client = self.create_client(ListParameters, service_name)
|
|
449
|
+
if not client.wait_for_service(timeout_sec=0.2):
|
|
450
|
+
self.destroy_client(client)
|
|
451
|
+
return
|
|
452
|
+
|
|
453
|
+
request = ListParameters.Request()
|
|
454
|
+
request.depth = 10
|
|
455
|
+
future = client.call_async(request)
|
|
456
|
+
self._parameter_fetch_inflight[node_name] = {
|
|
457
|
+
'list_client': client,
|
|
458
|
+
'get_client': None,
|
|
459
|
+
'get_names': None,
|
|
460
|
+
}
|
|
461
|
+
future.add_done_callback(
|
|
462
|
+
lambda fut, node=node_name, client=client: self._on_list_parameters(node, fut, client)
|
|
463
|
+
)
|
|
464
|
+
|
|
465
|
+
# Handle list_parameters response, start get_parameters request
|
|
466
|
+
def _on_list_parameters(self, node_name, future, client):
|
|
467
|
+
inflight = self._parameter_fetch_inflight.get(node_name)
|
|
468
|
+
if not inflight:
|
|
469
|
+
self.destroy_client(client)
|
|
470
|
+
return
|
|
471
|
+
|
|
472
|
+
self.destroy_client(client)
|
|
473
|
+
inflight['list_client'] = None
|
|
474
|
+
|
|
475
|
+
response = None
|
|
476
|
+
try:
|
|
477
|
+
response = future.result()
|
|
478
|
+
except Exception:
|
|
479
|
+
pass
|
|
480
|
+
|
|
481
|
+
if not response or not response.result.names:
|
|
482
|
+
self._node_parameter_cache[node_name] = {}
|
|
483
|
+
self._cleanup_parameter_fetch(node_name)
|
|
484
|
+
return
|
|
485
|
+
|
|
486
|
+
names = response.result.names
|
|
487
|
+
inflight['get_names'] = names
|
|
488
|
+
|
|
489
|
+
service_prefix = node_name if node_name.startswith('/') else f"/{node_name}"
|
|
490
|
+
service_name = f"{service_prefix}/get_parameters"
|
|
491
|
+
get_client = self.create_client(GetParameters, service_name)
|
|
492
|
+
if not get_client.wait_for_service(timeout_sec=0.2):
|
|
493
|
+
self.destroy_client(get_client)
|
|
494
|
+
self._cleanup_parameter_fetch(node_name)
|
|
495
|
+
return
|
|
496
|
+
|
|
497
|
+
request = GetParameters.Request()
|
|
498
|
+
request.names = names
|
|
499
|
+
future = get_client.call_async(request)
|
|
500
|
+
inflight['get_client'] = get_client
|
|
501
|
+
future.add_done_callback(
|
|
502
|
+
lambda fut, node=node_name, client=get_client: self._on_get_parameters(node, fut, client)
|
|
503
|
+
)
|
|
504
|
+
|
|
505
|
+
# Handle get_parameters response, update cache
|
|
506
|
+
def _on_get_parameters(self, node_name, future, client):
|
|
507
|
+
inflight = self._parameter_fetch_inflight.get(node_name)
|
|
508
|
+
if not inflight:
|
|
509
|
+
self.destroy_client(client)
|
|
510
|
+
return
|
|
511
|
+
|
|
512
|
+
self.destroy_client(client)
|
|
513
|
+
inflight['get_client'] = None
|
|
514
|
+
|
|
515
|
+
response = None
|
|
516
|
+
try:
|
|
517
|
+
response = future.result()
|
|
518
|
+
except Exception:
|
|
519
|
+
pass
|
|
520
|
+
|
|
521
|
+
params = {}
|
|
522
|
+
names = inflight.get('get_names') or []
|
|
523
|
+
if response:
|
|
524
|
+
for name, value in zip(names, response.values):
|
|
525
|
+
try:
|
|
526
|
+
params[name] = parameter_value_to_python(value)
|
|
527
|
+
except Exception as e:
|
|
528
|
+
self.get_logger().debug(f"Could not convert parameter {name} for {node_name}: {e}")
|
|
529
|
+
|
|
530
|
+
self._node_parameter_cache[node_name] = params
|
|
531
|
+
self._cleanup_parameter_fetch(node_name)
|
|
532
|
+
|
|
533
|
+
# Clean up parameter fetch clients and state
|
|
534
|
+
def _cleanup_parameter_fetch(self, node_name):
|
|
535
|
+
inflight = self._parameter_fetch_inflight.pop(node_name, None)
|
|
536
|
+
if not inflight:
|
|
537
|
+
return
|
|
538
|
+
|
|
539
|
+
for key in ('list_client', 'get_client'):
|
|
540
|
+
client = inflight.get(key)
|
|
541
|
+
if client:
|
|
542
|
+
self.destroy_client(client)
|
|
543
|
+
|
|
544
|
+
# Get nodes with their topics, actions, services, and parameters
|
|
545
|
+
def _get_nodes_with_relations(self):
|
|
546
|
+
"""Get nodes with the topics they publish and subscribe to (derived from cached topic relations)."""
|
|
547
|
+
nodes_with_relations = {}
|
|
548
|
+
|
|
549
|
+
self._update_action_relations()
|
|
550
|
+
self._update_service_relations()
|
|
551
|
+
|
|
552
|
+
for node_name in self._active_nodes:
|
|
553
|
+
nodes_with_relations[node_name] = {
|
|
554
|
+
'publishes': [],
|
|
555
|
+
'subscribes': [],
|
|
556
|
+
'actions': [],
|
|
557
|
+
'services': [],
|
|
558
|
+
'parameters': {}
|
|
559
|
+
}
|
|
560
|
+
|
|
561
|
+
for topic_name, relations in self._topic_relations.items():
|
|
562
|
+
for node_name in relations['publishers']:
|
|
563
|
+
if node_name in nodes_with_relations:
|
|
564
|
+
pub_info_list = self.get_publishers_info_by_topic(topic_name)
|
|
565
|
+
qos_profile = None
|
|
566
|
+
for pub_info in pub_info_list:
|
|
567
|
+
if pub_info.node_name == node_name:
|
|
568
|
+
qos_profile = self._qos_profile_to_dict(pub_info.qos_profile)
|
|
569
|
+
break
|
|
570
|
+
|
|
571
|
+
nodes_with_relations[node_name]['publishes'].append({
|
|
572
|
+
'topic': topic_name,
|
|
573
|
+
'qos': qos_profile
|
|
574
|
+
})
|
|
575
|
+
|
|
576
|
+
for node_name in relations['subscribers']:
|
|
577
|
+
if node_name in nodes_with_relations:
|
|
578
|
+
sub_info_list = self.get_subscriptions_info_by_topic(topic_name)
|
|
579
|
+
qos_profile = None
|
|
580
|
+
for sub_info in sub_info_list:
|
|
581
|
+
if sub_info.node_name == node_name:
|
|
582
|
+
qos_profile = self._qos_profile_to_dict(sub_info.qos_profile)
|
|
583
|
+
break
|
|
584
|
+
|
|
585
|
+
nodes_with_relations[node_name]['subscribes'].append({
|
|
586
|
+
'topic': topic_name,
|
|
587
|
+
'qos': qos_profile
|
|
588
|
+
})
|
|
589
|
+
|
|
590
|
+
for action_name, relations in self._action_relations.items():
|
|
591
|
+
for node_name in relations['providers']:
|
|
592
|
+
if node_name in nodes_with_relations:
|
|
593
|
+
nodes_with_relations[node_name]['actions'].append(action_name)
|
|
594
|
+
|
|
595
|
+
for service_name, relations in self._service_relations.items():
|
|
596
|
+
for node_name in relations['providers']:
|
|
597
|
+
if node_name in nodes_with_relations:
|
|
598
|
+
nodes_with_relations[node_name]['services'].append(service_name)
|
|
599
|
+
|
|
600
|
+
cache = self._node_parameter_cache
|
|
601
|
+
for node_name in nodes_with_relations.keys():
|
|
602
|
+
nodes_with_relations[node_name]['parameters'] = cache.get(node_name, {})
|
|
603
|
+
return nodes_with_relations
|
|
604
|
+
|
|
605
|
+
# Update cached action providers by detecting status topics
|
|
606
|
+
def _update_action_relations(self):
|
|
607
|
+
"""Update the cached action relations."""
|
|
608
|
+
action_relations = {}
|
|
609
|
+
|
|
610
|
+
for topic_name in self.get_topic_names_and_types():
|
|
611
|
+
if topic_name[0].endswith('/_action/status'):
|
|
612
|
+
action_name = topic_name[0].replace('/_action/status', '')
|
|
613
|
+
providers = [info.node_name for info in self.get_publishers_info_by_topic(topic_name[0])]
|
|
614
|
+
action_relations[action_name] = {
|
|
615
|
+
'providers': set(providers),
|
|
616
|
+
}
|
|
617
|
+
|
|
618
|
+
self._action_relations = action_relations
|
|
619
|
+
|
|
620
|
+
# Get actions with their provider nodes
|
|
621
|
+
def _get_actions_with_relations(self):
|
|
622
|
+
"""Get actions from status topics and update cached action relations."""
|
|
623
|
+
self._update_action_relations()
|
|
624
|
+
|
|
625
|
+
actions_with_relations = {}
|
|
626
|
+
for action_name, relations in self._action_relations.items():
|
|
627
|
+
actions_with_relations[action_name] = {
|
|
628
|
+
'providers': list(relations['providers']),
|
|
629
|
+
}
|
|
630
|
+
|
|
631
|
+
return actions_with_relations
|
|
632
|
+
|
|
633
|
+
# Update cached service providers by querying nodes
|
|
634
|
+
def _update_service_relations(self):
|
|
635
|
+
"""Update the cached service relations."""
|
|
636
|
+
service_relations = {}
|
|
637
|
+
|
|
638
|
+
all_services = self.get_service_names_and_types()
|
|
639
|
+
|
|
640
|
+
for service_name, service_types in all_services:
|
|
641
|
+
providers = set()
|
|
642
|
+
for node_name in self.get_node_names():
|
|
643
|
+
try:
|
|
644
|
+
# Extract namespace from node name (format: /namespace/node_name or /node_name)
|
|
645
|
+
node_namespace = '/'
|
|
646
|
+
if '/' in node_name[1:]: # Has namespace
|
|
647
|
+
parts = node_name[1:].split('/', 1)
|
|
648
|
+
node_namespace = '/' + parts[0]
|
|
649
|
+
node_only = parts[1]
|
|
650
|
+
else: # No namespace
|
|
651
|
+
node_only = node_name[1:] if node_name.startswith('/') else node_name
|
|
652
|
+
|
|
653
|
+
node_services = self.get_service_names_and_types_by_node(node_only, node_namespace)
|
|
654
|
+
if any(svc_name == service_name for svc_name, _ in node_services):
|
|
655
|
+
providers.add(node_name)
|
|
656
|
+
except Exception as e:
|
|
657
|
+
self.get_logger().debug(f"Error checking services for node {node_name}: {e}")
|
|
658
|
+
|
|
659
|
+
service_relations[service_name] = {
|
|
660
|
+
'providers': providers,
|
|
661
|
+
'type': service_types[0] if service_types else 'unknown'
|
|
662
|
+
}
|
|
663
|
+
|
|
664
|
+
self._service_relations = service_relations
|
|
665
|
+
|
|
666
|
+
# Get services with their provider nodes and types
|
|
667
|
+
def _get_services_with_relations(self):
|
|
668
|
+
"""Get services with their providers and update cached service relations."""
|
|
669
|
+
self._update_service_relations()
|
|
670
|
+
|
|
671
|
+
services_with_relations = {}
|
|
672
|
+
for service_name, relations in self._service_relations.items():
|
|
673
|
+
services_with_relations[service_name] = {
|
|
674
|
+
'providers': list(relations['providers']),
|
|
675
|
+
'type': relations['type']
|
|
676
|
+
}
|
|
677
|
+
|
|
678
|
+
return services_with_relations
|
|
679
|
+
|
|
680
|
+
# Poll ROS graph for changes and send events
|
|
681
|
+
def _check_graph_changes(self):
|
|
682
|
+
"""Check for node, topic, action, and publisher/subscriber changes."""
|
|
683
|
+
current_nodes = set(self.get_node_names())
|
|
684
|
+
current_topics = set(dict(self.get_topic_names_and_types()).keys())
|
|
685
|
+
|
|
686
|
+
current_actions = {t.replace('/_action/status', '') for t in current_topics if t.endswith('/_action/status')}
|
|
687
|
+
|
|
688
|
+
current_topic_relations = {}
|
|
689
|
+
|
|
690
|
+
if not hasattr(self, '_last_topic_subscribers'):
|
|
691
|
+
self._last_topic_subscribers = {}
|
|
692
|
+
|
|
693
|
+
for topic_name in current_topics:
|
|
694
|
+
publishers = {pub.node_name for pub in self.get_publishers_info_by_topic(topic_name)}
|
|
695
|
+
subscribers = {sub.node_name for sub in self.get_subscriptions_info_by_topic(topic_name)}
|
|
696
|
+
current_topic_relations[topic_name] = {
|
|
697
|
+
'publishers': publishers,
|
|
698
|
+
'subscribers': subscribers
|
|
699
|
+
}
|
|
700
|
+
|
|
701
|
+
prev_subs = self._last_topic_subscribers.get(topic_name, set())
|
|
702
|
+
new_subs = subscribers - prev_subs
|
|
703
|
+
for node_name in new_subs:
|
|
704
|
+
event = {
|
|
705
|
+
'type': 'topic_event',
|
|
706
|
+
'topic': topic_name,
|
|
707
|
+
'node': node_name,
|
|
708
|
+
'event': 'subscribed',
|
|
709
|
+
'timestamp': time.time()
|
|
710
|
+
}
|
|
711
|
+
self._send_event_and_update(event, f"Node {node_name} subscribed to {topic_name}")
|
|
712
|
+
|
|
713
|
+
removed_subs = prev_subs - subscribers
|
|
714
|
+
for node_name in removed_subs:
|
|
715
|
+
event = {
|
|
716
|
+
'type': 'topic_event',
|
|
717
|
+
'topic': topic_name,
|
|
718
|
+
'node': node_name,
|
|
719
|
+
'event': 'unsubscribed',
|
|
720
|
+
'timestamp': time.time()
|
|
721
|
+
}
|
|
722
|
+
self._send_event_and_update(event, f"Node {node_name} unsubscribed from {topic_name}")
|
|
723
|
+
|
|
724
|
+
if topic_name in self._topic_relations:
|
|
725
|
+
old_pubs = self._topic_relations[topic_name]['publishers']
|
|
726
|
+
if publishers != old_pubs:
|
|
727
|
+
self._send_event_and_update(None, f"Topic publishers changed: {topic_name}")
|
|
728
|
+
|
|
729
|
+
self._last_topic_subscribers = {topic: set(rel['subscribers']) for topic, rel in current_topic_relations.items()}
|
|
730
|
+
|
|
731
|
+
started_nodes = current_nodes - self._active_nodes
|
|
732
|
+
for node_name in started_nodes:
|
|
733
|
+
event = {
|
|
734
|
+
'type': 'node_event',
|
|
735
|
+
'node': node_name,
|
|
736
|
+
'event': 'started',
|
|
737
|
+
'timestamp': time.time()
|
|
738
|
+
}
|
|
739
|
+
self._send_event_and_update(event, f"Node started: {node_name}")
|
|
740
|
+
|
|
741
|
+
stopped_nodes = self._active_nodes - current_nodes
|
|
742
|
+
for node_name in stopped_nodes:
|
|
743
|
+
event = {
|
|
744
|
+
'type': 'node_event',
|
|
745
|
+
'node': node_name,
|
|
746
|
+
'event': 'stopped',
|
|
747
|
+
'timestamp': time.time()
|
|
748
|
+
}
|
|
749
|
+
self._send_event_and_update(event, f"Node stopped: {node_name}")
|
|
750
|
+
|
|
751
|
+
started_topics = current_topics - self._active_topics
|
|
752
|
+
for topic_name in started_topics:
|
|
753
|
+
event = {
|
|
754
|
+
'type': 'topic_event',
|
|
755
|
+
'topic': topic_name,
|
|
756
|
+
'event': 'created',
|
|
757
|
+
'timestamp': time.time()
|
|
758
|
+
}
|
|
759
|
+
self._send_event_and_update(event, f"Topic created: {topic_name}")
|
|
760
|
+
|
|
761
|
+
stopped_topics = self._active_topics - current_topics
|
|
762
|
+
for topic_name in stopped_topics:
|
|
763
|
+
event = {
|
|
764
|
+
'type': 'topic_event',
|
|
765
|
+
'topic': topic_name,
|
|
766
|
+
'event': 'destroyed',
|
|
767
|
+
'timestamp': time.time()
|
|
768
|
+
}
|
|
769
|
+
self._send_event_and_update(event, f"Topic destroyed: {topic_name}")
|
|
770
|
+
|
|
771
|
+
started_actions = current_actions - self._active_actions
|
|
772
|
+
if started_actions:
|
|
773
|
+
self.get_logger().info(f"New actions detected: {started_actions}")
|
|
774
|
+
for action_name in started_actions:
|
|
775
|
+
event = {
|
|
776
|
+
'type': 'action_event',
|
|
777
|
+
'action': action_name,
|
|
778
|
+
'event': 'created',
|
|
779
|
+
'timestamp': time.time()
|
|
780
|
+
}
|
|
781
|
+
self._send_event_and_update(event, f"Action created: {action_name}")
|
|
782
|
+
|
|
783
|
+
stopped_actions = self._active_actions - current_actions
|
|
784
|
+
if stopped_actions:
|
|
785
|
+
self.get_logger().info(f"Actions stopped: {stopped_actions}")
|
|
786
|
+
for action_name in stopped_actions:
|
|
787
|
+
event = {
|
|
788
|
+
'type': 'action_event',
|
|
789
|
+
'action': action_name,
|
|
790
|
+
'event': 'destroyed',
|
|
791
|
+
'timestamp': time.time()
|
|
792
|
+
}
|
|
793
|
+
self._send_event_and_update(event, f"Action destroyed: {action_name}")
|
|
794
|
+
if action_name in self._action_status_subs:
|
|
795
|
+
self.destroy_subscription(self._action_status_subs[action_name])
|
|
796
|
+
del self._action_status_subs[action_name]
|
|
797
|
+
del self._active_goals[action_name]
|
|
798
|
+
|
|
799
|
+
current_services = {service_name for service_name, _ in self.get_service_names_and_types()}
|
|
800
|
+
|
|
801
|
+
started_services = current_services - self._active_services
|
|
802
|
+
for service_name in started_services:
|
|
803
|
+
if service_name.startswith('/ros2cli_daemon'):
|
|
804
|
+
continue
|
|
805
|
+
event = {
|
|
806
|
+
'type': 'service_event',
|
|
807
|
+
'service': service_name,
|
|
808
|
+
'event': 'created',
|
|
809
|
+
'timestamp': time.time()
|
|
810
|
+
}
|
|
811
|
+
self._send_event_and_update(event, f"Service created: {service_name}")
|
|
812
|
+
|
|
813
|
+
stopped_services = self._active_services - current_services
|
|
814
|
+
for service_name in stopped_services:
|
|
815
|
+
if service_name.startswith('/ros2cli_daemon'):
|
|
816
|
+
continue
|
|
817
|
+
event = {
|
|
818
|
+
'type': 'service_event',
|
|
819
|
+
'service': service_name,
|
|
820
|
+
'event': 'destroyed',
|
|
821
|
+
'timestamp': time.time()
|
|
822
|
+
}
|
|
823
|
+
self._send_event_and_update(event, f"Service destroyed: {service_name}")
|
|
824
|
+
|
|
825
|
+
self._active_nodes = current_nodes
|
|
826
|
+
self._active_topics = current_topics
|
|
827
|
+
self._active_actions = current_actions
|
|
828
|
+
self._active_services = current_services
|
|
829
|
+
self._topic_relations = current_topic_relations
|
|
830
|
+
|
|
831
|
+
# Send event to gateway and trigger graph updates
|
|
832
|
+
def _send_event_and_update(self, event, log_message):
|
|
833
|
+
"""Send event and trigger update of all graph data."""
|
|
834
|
+
if not self.ws or not self.loop:
|
|
835
|
+
return
|
|
836
|
+
|
|
837
|
+
if event:
|
|
838
|
+
asyncio.run_coroutine_threadsafe(self._send_queue.put(json.dumps(event)), self.loop)
|
|
839
|
+
|
|
840
|
+
asyncio.run_coroutine_threadsafe(self._send_topics(), self.loop)
|
|
841
|
+
asyncio.run_coroutine_threadsafe(self._send_nodes(), self.loop)
|
|
842
|
+
asyncio.run_coroutine_threadsafe(self._send_actions(), self.loop)
|
|
843
|
+
asyncio.run_coroutine_threadsafe(self._send_services(), self.loop)
|
|
844
|
+
|
|
845
|
+
if log_message:
|
|
846
|
+
self.get_logger().debug(log_message)
|
|
847
|
+
|
|
848
|
+
# Send nodes to gateway if changed
|
|
849
|
+
async def _send_nodes(self):
|
|
850
|
+
"""Send current nodes list to gateway (only when changed)."""
|
|
851
|
+
nodes = self._get_nodes_with_relations()
|
|
852
|
+
|
|
853
|
+
if self._last_sent_nodes == nodes:
|
|
854
|
+
return
|
|
855
|
+
|
|
856
|
+
self._last_sent_nodes = nodes.copy()
|
|
857
|
+
|
|
858
|
+
message = {
|
|
859
|
+
'type': 'nodes',
|
|
860
|
+
'data': nodes,
|
|
861
|
+
'timestamp': time.time()
|
|
862
|
+
}
|
|
863
|
+
await self._send_queue.put(json.dumps(message))
|
|
864
|
+
self.get_logger().debug(f"Sent nodes list: {list(nodes.keys())}")
|
|
865
|
+
|
|
866
|
+
# Send topics to gateway if changed
|
|
867
|
+
async def _send_topics(self):
|
|
868
|
+
"""Send current topics list to gateway (only when changed)."""
|
|
869
|
+
topics = self._get_topics_with_relations()
|
|
870
|
+
|
|
871
|
+
if self._last_sent_topics == topics:
|
|
872
|
+
return
|
|
873
|
+
|
|
874
|
+
self._last_sent_topics = topics.copy()
|
|
875
|
+
|
|
876
|
+
message = {
|
|
877
|
+
'type': 'topics',
|
|
878
|
+
'data': topics,
|
|
879
|
+
'timestamp': time.time()
|
|
880
|
+
}
|
|
881
|
+
await self._send_queue.put(json.dumps(message))
|
|
882
|
+
self.get_logger().debug(f"Sent topics list: {list(topics.keys())}")
|
|
883
|
+
|
|
884
|
+
# Send actions to gateway if changed
|
|
885
|
+
async def _send_actions(self):
|
|
886
|
+
"""Send current actions list to gateway (only when changed)."""
|
|
887
|
+
actions = self._get_actions_with_relations()
|
|
888
|
+
|
|
889
|
+
if self._last_sent_actions == actions:
|
|
890
|
+
return
|
|
891
|
+
|
|
892
|
+
self._last_sent_actions = actions.copy()
|
|
893
|
+
|
|
894
|
+
message = {
|
|
895
|
+
'type': 'actions',
|
|
896
|
+
'data': actions,
|
|
897
|
+
'timestamp': time.time()
|
|
898
|
+
}
|
|
899
|
+
await self._send_queue.put(json.dumps(message))
|
|
900
|
+
self.get_logger().debug(f"Sent actions list: {list(actions.keys())}")
|
|
901
|
+
|
|
902
|
+
# Send services to gateway if changed
|
|
903
|
+
async def _send_services(self):
|
|
904
|
+
"""Send current services list to gateway (only when changed)."""
|
|
905
|
+
services = self._get_services_with_relations()
|
|
906
|
+
|
|
907
|
+
if self._last_sent_services == services:
|
|
908
|
+
return
|
|
909
|
+
|
|
910
|
+
self._last_sent_services = services.copy()
|
|
911
|
+
|
|
912
|
+
message = {
|
|
913
|
+
'type': 'services',
|
|
914
|
+
'data': services,
|
|
915
|
+
'timestamp': time.time()
|
|
916
|
+
}
|
|
917
|
+
await self._send_queue.put(json.dumps(message))
|
|
918
|
+
self.get_logger().debug(f"Sent services list: {list(services.keys())}")
|
|
919
|
+
|
|
920
|
+
# Collect and send system telemetry to gateway
|
|
921
|
+
def _collect_telemetry(self):
|
|
922
|
+
"""Collect system telemetry (CPU, RAM) and send to queue."""
|
|
923
|
+
if not self._telemetry_enabled or not self.ws or not self.loop:
|
|
924
|
+
return
|
|
925
|
+
data = self._get_telemetry_snapshot()
|
|
926
|
+
telemetry = {
|
|
927
|
+
'type': 'telemetry',
|
|
928
|
+
'data': data,
|
|
929
|
+
'timestamp': time.time()
|
|
930
|
+
}
|
|
931
|
+
|
|
932
|
+
asyncio.run_coroutine_threadsafe(
|
|
933
|
+
self._send_queue.put(json.dumps(telemetry)),
|
|
934
|
+
self.loop
|
|
935
|
+
)
|
|
936
|
+
|
|
937
|
+
# Return current CPU, RAM, and disk usage
|
|
938
|
+
def _get_telemetry_snapshot(self):
|
|
939
|
+
"""Return a snapshot of system telemetry (CPU, RAM, disk)."""
|
|
940
|
+
return {
|
|
941
|
+
'cpu': psutil.cpu_percent(interval=None),
|
|
942
|
+
'ram': {
|
|
943
|
+
'percent': psutil.virtual_memory().percent,
|
|
944
|
+
'used_mb': psutil.virtual_memory().used / (1024 * 1024),
|
|
945
|
+
'total_mb': psutil.virtual_memory().total / (1024 * 1024),
|
|
946
|
+
},
|
|
947
|
+
'disk': {
|
|
948
|
+
'percent': psutil.disk_usage('/').percent,
|
|
949
|
+
'used_gb': psutil.disk_usage('/').used / (1024 * 1024 * 1024),
|
|
950
|
+
'total_gb': psutil.disk_usage('/').total / (1024 * 1024 * 1024),
|
|
951
|
+
}
|
|
952
|
+
}
|
|
953
|
+
|
|
954
|
+
|
|
955
|
+
# Initialize ROS, create node, and run until shutdown
|
|
956
|
+
def main(args=None):
|
|
957
|
+
rclpy.init(args=args)
|
|
958
|
+
node = WebBridge()
|
|
959
|
+
try:
|
|
960
|
+
rclpy.spin(node)
|
|
961
|
+
finally:
|
|
962
|
+
node.destroy_node()
|
|
963
|
+
rclpy.shutdown()
|
|
964
|
+
|
|
965
|
+
|
|
966
|
+
if __name__ == '__main__':
|
|
967
|
+
main()
|