fbuild 1.2.8__py3-none-any.whl → 1.2.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fbuild/__init__.py +5 -1
- fbuild/build/configurable_compiler.py +49 -6
- fbuild/build/configurable_linker.py +14 -9
- fbuild/build/orchestrator_esp32.py +6 -3
- fbuild/build/orchestrator_rp2040.py +6 -2
- fbuild/cli.py +300 -5
- fbuild/config/ini_parser.py +13 -1
- fbuild/daemon/__init__.py +11 -0
- fbuild/daemon/async_client.py +5 -4
- fbuild/daemon/async_client_lib.py +1543 -0
- fbuild/daemon/async_protocol.py +825 -0
- fbuild/daemon/async_server.py +2100 -0
- fbuild/daemon/client.py +425 -13
- fbuild/daemon/configuration_lock.py +13 -13
- fbuild/daemon/connection.py +508 -0
- fbuild/daemon/connection_registry.py +579 -0
- fbuild/daemon/daemon.py +517 -164
- fbuild/daemon/daemon_context.py +72 -1
- fbuild/daemon/device_discovery.py +477 -0
- fbuild/daemon/device_manager.py +821 -0
- fbuild/daemon/error_collector.py +263 -263
- fbuild/daemon/file_cache.py +332 -332
- fbuild/daemon/firmware_ledger.py +46 -123
- fbuild/daemon/lock_manager.py +508 -508
- fbuild/daemon/messages.py +431 -0
- fbuild/daemon/operation_registry.py +288 -288
- fbuild/daemon/processors/build_processor.py +34 -1
- fbuild/daemon/processors/deploy_processor.py +1 -3
- fbuild/daemon/processors/locking_processor.py +7 -7
- fbuild/daemon/request_processor.py +457 -457
- fbuild/daemon/shared_serial.py +7 -7
- fbuild/daemon/status_manager.py +238 -238
- fbuild/daemon/subprocess_manager.py +316 -316
- fbuild/deploy/docker_utils.py +182 -2
- fbuild/deploy/monitor.py +1 -1
- fbuild/deploy/qemu_runner.py +71 -13
- fbuild/ledger/board_ledger.py +46 -122
- fbuild/output.py +238 -2
- fbuild/packages/library_compiler.py +15 -5
- fbuild/packages/library_manager.py +12 -6
- fbuild-1.2.15.dist-info/METADATA +569 -0
- {fbuild-1.2.8.dist-info → fbuild-1.2.15.dist-info}/RECORD +46 -39
- fbuild-1.2.8.dist-info/METADATA +0 -468
- {fbuild-1.2.8.dist-info → fbuild-1.2.15.dist-info}/WHEEL +0 -0
- {fbuild-1.2.8.dist-info → fbuild-1.2.15.dist-info}/entry_points.txt +0 -0
- {fbuild-1.2.8.dist-info → fbuild-1.2.15.dist-info}/licenses/LICENSE +0 -0
- {fbuild-1.2.8.dist-info → fbuild-1.2.15.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,821 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Device Manager - Manages device inventory and leases for the fbuild daemon.
|
|
3
|
+
|
|
4
|
+
This module provides the DeviceManager class which handles:
|
|
5
|
+
|
|
6
|
+
- Device inventory tracking (discovered and tracked devices)
|
|
7
|
+
- Exclusive leases for deploy/flash/reset operations (single holder)
|
|
8
|
+
- Monitor leases for read-only access (multiple holders allowed)
|
|
9
|
+
- Automatic lease release on client disconnect
|
|
10
|
+
- Device preemption with mandatory reason
|
|
11
|
+
- Thread-safe operations using memory-based locks
|
|
12
|
+
|
|
13
|
+
The DeviceManager follows the same patterns as ConfigurationLockManager,
|
|
14
|
+
using threading.Lock for thread safety as per the project's locking strategy.
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
import _thread
|
|
18
|
+
import logging
|
|
19
|
+
import threading
|
|
20
|
+
import time
|
|
21
|
+
import uuid
|
|
22
|
+
from dataclasses import dataclass, field
|
|
23
|
+
from enum import Enum
|
|
24
|
+
from typing import Any
|
|
25
|
+
|
|
26
|
+
from fbuild.daemon.device_discovery import DeviceInfo, discover_devices
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class LeaseType(Enum):
|
|
30
|
+
"""Type of device lease."""
|
|
31
|
+
|
|
32
|
+
EXCLUSIVE = "exclusive" # For deploy/flash/reset operations
|
|
33
|
+
MONITOR = "monitor" # For read-only monitoring (shared)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
@dataclass
|
|
37
|
+
class DeviceLease:
|
|
38
|
+
"""Represents an active lease on a device.
|
|
39
|
+
|
|
40
|
+
Attributes:
|
|
41
|
+
device_id: The stable device ID this lease is for
|
|
42
|
+
lease_id: Unique identifier for this lease (UUID)
|
|
43
|
+
client_id: The client holding this lease
|
|
44
|
+
lease_type: Type of lease (exclusive or monitor)
|
|
45
|
+
description: Human-readable description of the operation
|
|
46
|
+
acquired_at: Unix timestamp when lease was acquired
|
|
47
|
+
allows_monitors: If exclusive, whether monitors are allowed (default True)
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
device_id: str
|
|
51
|
+
lease_id: str
|
|
52
|
+
client_id: str
|
|
53
|
+
lease_type: LeaseType
|
|
54
|
+
description: str
|
|
55
|
+
acquired_at: float = field(default_factory=time.time)
|
|
56
|
+
allows_monitors: bool = True
|
|
57
|
+
|
|
58
|
+
def to_dict(self) -> dict[str, Any]:
|
|
59
|
+
"""Convert to dictionary for JSON serialization."""
|
|
60
|
+
return {
|
|
61
|
+
"device_id": self.device_id,
|
|
62
|
+
"lease_id": self.lease_id,
|
|
63
|
+
"client_id": self.client_id,
|
|
64
|
+
"lease_type": self.lease_type.value,
|
|
65
|
+
"description": self.description,
|
|
66
|
+
"acquired_at": self.acquired_at,
|
|
67
|
+
"allows_monitors": self.allows_monitors,
|
|
68
|
+
"hold_duration": time.time() - self.acquired_at,
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
@classmethod
|
|
72
|
+
def from_dict(cls, data: dict[str, Any]) -> "DeviceLease":
|
|
73
|
+
"""Create DeviceLease from dictionary."""
|
|
74
|
+
return cls(
|
|
75
|
+
device_id=data["device_id"],
|
|
76
|
+
lease_id=data["lease_id"],
|
|
77
|
+
client_id=data["client_id"],
|
|
78
|
+
lease_type=LeaseType(data["lease_type"]),
|
|
79
|
+
description=data.get("description", ""),
|
|
80
|
+
acquired_at=data.get("acquired_at", time.time()),
|
|
81
|
+
allows_monitors=data.get("allows_monitors", True),
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
@dataclass
|
|
86
|
+
class DeviceState:
|
|
87
|
+
"""Tracks the state of a device including its leases.
|
|
88
|
+
|
|
89
|
+
Attributes:
|
|
90
|
+
device_id: The stable device ID
|
|
91
|
+
device_info: Full device information
|
|
92
|
+
exclusive_lease: Current exclusive lease holder (None if none)
|
|
93
|
+
monitor_leases: Dictionary of lease_id -> DeviceLease for monitors
|
|
94
|
+
last_seen_at: Unix timestamp when device was last seen
|
|
95
|
+
is_connected: Whether the device is currently connected
|
|
96
|
+
"""
|
|
97
|
+
|
|
98
|
+
device_id: str
|
|
99
|
+
device_info: DeviceInfo
|
|
100
|
+
exclusive_lease: DeviceLease | None = None
|
|
101
|
+
monitor_leases: dict[str, DeviceLease] = field(default_factory=dict)
|
|
102
|
+
last_seen_at: float = field(default_factory=time.time)
|
|
103
|
+
is_connected: bool = True
|
|
104
|
+
|
|
105
|
+
def is_available_for_exclusive(self) -> bool:
|
|
106
|
+
"""Check if device is available for exclusive lease."""
|
|
107
|
+
return self.exclusive_lease is None
|
|
108
|
+
|
|
109
|
+
def has_any_lease(self) -> bool:
|
|
110
|
+
"""Check if device has any active leases."""
|
|
111
|
+
return self.exclusive_lease is not None or len(self.monitor_leases) > 0
|
|
112
|
+
|
|
113
|
+
def lease_count(self) -> int:
|
|
114
|
+
"""Get total number of leases on this device."""
|
|
115
|
+
count = len(self.monitor_leases)
|
|
116
|
+
if self.exclusive_lease:
|
|
117
|
+
count += 1
|
|
118
|
+
return count
|
|
119
|
+
|
|
120
|
+
def to_dict(self) -> dict[str, Any]:
|
|
121
|
+
"""Convert to dictionary for JSON serialization."""
|
|
122
|
+
return {
|
|
123
|
+
"device_id": self.device_id,
|
|
124
|
+
"device_info": self.device_info.to_dict(),
|
|
125
|
+
"exclusive_lease": self.exclusive_lease.to_dict() if self.exclusive_lease else None,
|
|
126
|
+
"monitor_leases": {lease_id: lease.to_dict() for lease_id, lease in self.monitor_leases.items()},
|
|
127
|
+
"monitor_count": len(self.monitor_leases),
|
|
128
|
+
"last_seen_at": self.last_seen_at,
|
|
129
|
+
"is_connected": self.is_connected,
|
|
130
|
+
"is_available_for_exclusive": self.is_available_for_exclusive(),
|
|
131
|
+
"has_any_lease": self.has_any_lease(),
|
|
132
|
+
"lease_count": self.lease_count(),
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
class DeviceManagerError(RuntimeError):
|
|
137
|
+
"""Error raised when a device manager operation fails."""
|
|
138
|
+
|
|
139
|
+
def __init__(
|
|
140
|
+
self,
|
|
141
|
+
message: str,
|
|
142
|
+
device_id: str | None = None,
|
|
143
|
+
lease_id: str | None = None,
|
|
144
|
+
):
|
|
145
|
+
self.device_id = device_id
|
|
146
|
+
self.lease_id = lease_id
|
|
147
|
+
super().__init__(message)
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
class DeviceManager:
|
|
151
|
+
"""Manages device inventory and leases.
|
|
152
|
+
|
|
153
|
+
This class provides thread-safe device lease management following
|
|
154
|
+
the same patterns as ConfigurationLockManager:
|
|
155
|
+
- Memory-based locks only (threading.Lock)
|
|
156
|
+
- No file-based locking
|
|
157
|
+
- Idempotent release operations
|
|
158
|
+
- Auto-release on client disconnect
|
|
159
|
+
|
|
160
|
+
Thread Safety:
|
|
161
|
+
All public methods are thread-safe using an internal master lock.
|
|
162
|
+
|
|
163
|
+
Example:
|
|
164
|
+
>>> manager = DeviceManager()
|
|
165
|
+
>>> manager.refresh_devices() # Discover connected devices
|
|
166
|
+
>>>
|
|
167
|
+
>>> # Acquire exclusive lease for deploy
|
|
168
|
+
>>> lease = manager.acquire_exclusive(
|
|
169
|
+
... device_id="usb-ABC123",
|
|
170
|
+
... client_id="client-001",
|
|
171
|
+
... description="Deploying firmware"
|
|
172
|
+
... )
|
|
173
|
+
>>> if lease:
|
|
174
|
+
... try:
|
|
175
|
+
... do_deploy()
|
|
176
|
+
... finally:
|
|
177
|
+
... manager.release_lease(lease.lease_id, "client-001")
|
|
178
|
+
>>>
|
|
179
|
+
>>> # Acquire monitor lease
|
|
180
|
+
>>> monitor_lease = manager.acquire_monitor(
|
|
181
|
+
... device_id="usb-ABC123",
|
|
182
|
+
... client_id="client-002",
|
|
183
|
+
... description="Monitoring serial output"
|
|
184
|
+
... )
|
|
185
|
+
"""
|
|
186
|
+
|
|
187
|
+
def __init__(self) -> None:
|
|
188
|
+
"""Initialize the DeviceManager."""
|
|
189
|
+
self._master_lock = threading.Lock()
|
|
190
|
+
self._devices: dict[str, DeviceState] = {} # device_id -> DeviceState
|
|
191
|
+
self._client_leases: dict[str, set[str]] = {} # client_id -> set of lease_ids
|
|
192
|
+
self._lease_to_device: dict[str, str] = {} # lease_id -> device_id
|
|
193
|
+
|
|
194
|
+
logging.info("DeviceManager initialized")
|
|
195
|
+
|
|
196
|
+
def _generate_lease_id(self) -> str:
|
|
197
|
+
"""Generate a unique lease ID."""
|
|
198
|
+
return str(uuid.uuid4())
|
|
199
|
+
|
|
200
|
+
def _track_client_lease(self, client_id: str, lease_id: str, device_id: str) -> None:
|
|
201
|
+
"""Track that a client holds a lease.
|
|
202
|
+
|
|
203
|
+
Must be called with _master_lock held.
|
|
204
|
+
"""
|
|
205
|
+
if client_id not in self._client_leases:
|
|
206
|
+
self._client_leases[client_id] = set()
|
|
207
|
+
self._client_leases[client_id].add(lease_id)
|
|
208
|
+
self._lease_to_device[lease_id] = device_id
|
|
209
|
+
|
|
210
|
+
def _untrack_client_lease(self, client_id: str, lease_id: str) -> None:
|
|
211
|
+
"""Stop tracking that a client holds a lease.
|
|
212
|
+
|
|
213
|
+
Must be called with _master_lock held.
|
|
214
|
+
"""
|
|
215
|
+
if client_id in self._client_leases:
|
|
216
|
+
self._client_leases[client_id].discard(lease_id)
|
|
217
|
+
if not self._client_leases[client_id]:
|
|
218
|
+
del self._client_leases[client_id]
|
|
219
|
+
self._lease_to_device.pop(lease_id, None)
|
|
220
|
+
|
|
221
|
+
def refresh_devices(self) -> list[DeviceInfo]:
|
|
222
|
+
"""Refresh device inventory from hardware.
|
|
223
|
+
|
|
224
|
+
Discovers all connected devices and updates the internal inventory.
|
|
225
|
+
Existing leases are preserved for devices that remain connected.
|
|
226
|
+
Devices that are no longer connected are marked as disconnected.
|
|
227
|
+
|
|
228
|
+
Returns:
|
|
229
|
+
List of currently connected DeviceInfo objects.
|
|
230
|
+
"""
|
|
231
|
+
try:
|
|
232
|
+
discovered = discover_devices()
|
|
233
|
+
except KeyboardInterrupt:
|
|
234
|
+
_thread.interrupt_main()
|
|
235
|
+
raise
|
|
236
|
+
except Exception as e:
|
|
237
|
+
logging.error(f"Error discovering devices: {e}")
|
|
238
|
+
return []
|
|
239
|
+
|
|
240
|
+
current_time = time.time()
|
|
241
|
+
discovered_ids = set()
|
|
242
|
+
|
|
243
|
+
with self._master_lock:
|
|
244
|
+
# Update or add discovered devices
|
|
245
|
+
for device_info in discovered:
|
|
246
|
+
device_id = device_info.device_id
|
|
247
|
+
discovered_ids.add(device_id)
|
|
248
|
+
|
|
249
|
+
if device_id in self._devices:
|
|
250
|
+
# Update existing device
|
|
251
|
+
state = self._devices[device_id]
|
|
252
|
+
state.device_info = device_info
|
|
253
|
+
state.last_seen_at = current_time
|
|
254
|
+
state.is_connected = True
|
|
255
|
+
logging.debug(f"Updated device: {device_id}")
|
|
256
|
+
else:
|
|
257
|
+
# Add new device
|
|
258
|
+
state = DeviceState(
|
|
259
|
+
device_id=device_id,
|
|
260
|
+
device_info=device_info,
|
|
261
|
+
last_seen_at=current_time,
|
|
262
|
+
is_connected=True,
|
|
263
|
+
)
|
|
264
|
+
self._devices[device_id] = state
|
|
265
|
+
logging.info(f"Discovered new device: {device_id} ({device_info.port})")
|
|
266
|
+
|
|
267
|
+
# Mark devices that weren't discovered as disconnected
|
|
268
|
+
for device_id, state in self._devices.items():
|
|
269
|
+
if device_id not in discovered_ids and state.is_connected:
|
|
270
|
+
state.is_connected = False
|
|
271
|
+
logging.info(f"Device disconnected: {device_id}")
|
|
272
|
+
|
|
273
|
+
logging.info(f"Device refresh complete: {len(discovered)} connected device(s)")
|
|
274
|
+
return discovered
|
|
275
|
+
|
|
276
|
+
def get_device(self, device_id: str) -> DeviceState | None:
|
|
277
|
+
"""Get device state by ID.
|
|
278
|
+
|
|
279
|
+
Args:
|
|
280
|
+
device_id: The stable device ID
|
|
281
|
+
|
|
282
|
+
Returns:
|
|
283
|
+
DeviceState if found, None otherwise
|
|
284
|
+
"""
|
|
285
|
+
with self._master_lock:
|
|
286
|
+
return self._devices.get(device_id)
|
|
287
|
+
|
|
288
|
+
def get_device_by_port(self, port: str) -> DeviceState | None:
|
|
289
|
+
"""Get device state by port name.
|
|
290
|
+
|
|
291
|
+
Args:
|
|
292
|
+
port: The serial port (e.g., "COM3")
|
|
293
|
+
|
|
294
|
+
Returns:
|
|
295
|
+
DeviceState if found, None otherwise
|
|
296
|
+
"""
|
|
297
|
+
with self._master_lock:
|
|
298
|
+
for state in self._devices.values():
|
|
299
|
+
if state.device_info.matches_port(port):
|
|
300
|
+
return state
|
|
301
|
+
return None
|
|
302
|
+
|
|
303
|
+
def get_all_devices(self) -> dict[str, DeviceState]:
|
|
304
|
+
"""Get all tracked devices.
|
|
305
|
+
|
|
306
|
+
Returns:
|
|
307
|
+
Dictionary of device_id -> DeviceState
|
|
308
|
+
"""
|
|
309
|
+
with self._master_lock:
|
|
310
|
+
# Return a shallow copy to avoid external modification
|
|
311
|
+
return dict(self._devices)
|
|
312
|
+
|
|
313
|
+
def get_connected_devices(self) -> list[DeviceState]:
|
|
314
|
+
"""Get all currently connected devices.
|
|
315
|
+
|
|
316
|
+
Returns:
|
|
317
|
+
List of DeviceState for connected devices
|
|
318
|
+
"""
|
|
319
|
+
with self._master_lock:
|
|
320
|
+
return [s for s in self._devices.values() if s.is_connected]
|
|
321
|
+
|
|
322
|
+
def acquire_exclusive(
|
|
323
|
+
self,
|
|
324
|
+
device_id: str,
|
|
325
|
+
client_id: str,
|
|
326
|
+
description: str = "",
|
|
327
|
+
allows_monitors: bool = True,
|
|
328
|
+
timeout: float = 300.0,
|
|
329
|
+
) -> DeviceLease | None:
|
|
330
|
+
"""Acquire exclusive lease for deploy/flash/reset.
|
|
331
|
+
|
|
332
|
+
An exclusive lease can only be held by one client at a time.
|
|
333
|
+
Monitor leases may be allowed depending on allows_monitors.
|
|
334
|
+
|
|
335
|
+
Args:
|
|
336
|
+
device_id: The stable device ID to lease
|
|
337
|
+
client_id: UUID string identifying the client
|
|
338
|
+
description: Human-readable description of the operation
|
|
339
|
+
allows_monitors: Whether to allow monitor leases while holding exclusive
|
|
340
|
+
timeout: Reserved for future queuing support
|
|
341
|
+
|
|
342
|
+
Returns:
|
|
343
|
+
DeviceLease if acquired, None if not available
|
|
344
|
+
"""
|
|
345
|
+
with self._master_lock:
|
|
346
|
+
# Check if device exists
|
|
347
|
+
state = self._devices.get(device_id)
|
|
348
|
+
if state is None:
|
|
349
|
+
logging.debug(f"Device {device_id} not found for exclusive lease")
|
|
350
|
+
return None
|
|
351
|
+
|
|
352
|
+
# Check if device is connected
|
|
353
|
+
if not state.is_connected:
|
|
354
|
+
logging.debug(f"Device {device_id} is disconnected, cannot acquire exclusive lease")
|
|
355
|
+
return None
|
|
356
|
+
|
|
357
|
+
# Check if already held by this client (reentrant case)
|
|
358
|
+
if state.exclusive_lease and state.exclusive_lease.client_id == client_id:
|
|
359
|
+
logging.debug(f"Client {client_id} already holds exclusive lease for {device_id}")
|
|
360
|
+
return state.exclusive_lease
|
|
361
|
+
|
|
362
|
+
# Check if available for exclusive
|
|
363
|
+
if not state.is_available_for_exclusive():
|
|
364
|
+
holder = state.exclusive_lease
|
|
365
|
+
logging.debug(f"Exclusive lease not available for {device_id}, held by {holder.client_id if holder else 'unknown'}")
|
|
366
|
+
return None
|
|
367
|
+
|
|
368
|
+
# Acquire exclusive lease
|
|
369
|
+
lease_id = self._generate_lease_id()
|
|
370
|
+
lease = DeviceLease(
|
|
371
|
+
device_id=device_id,
|
|
372
|
+
lease_id=lease_id,
|
|
373
|
+
client_id=client_id,
|
|
374
|
+
lease_type=LeaseType.EXCLUSIVE,
|
|
375
|
+
description=description,
|
|
376
|
+
allows_monitors=allows_monitors,
|
|
377
|
+
)
|
|
378
|
+
state.exclusive_lease = lease
|
|
379
|
+
self._track_client_lease(client_id, lease_id, device_id)
|
|
380
|
+
|
|
381
|
+
logging.info(f"Exclusive lease acquired for {device_id} by {client_id} (lease={lease_id})")
|
|
382
|
+
return lease
|
|
383
|
+
|
|
384
|
+
def acquire_monitor(
|
|
385
|
+
self,
|
|
386
|
+
device_id: str,
|
|
387
|
+
client_id: str,
|
|
388
|
+
description: str = "",
|
|
389
|
+
) -> DeviceLease | None:
|
|
390
|
+
"""Acquire monitor lease for read-only access.
|
|
391
|
+
|
|
392
|
+
Multiple clients can hold monitor leases simultaneously.
|
|
393
|
+
Monitor leases can be acquired when:
|
|
394
|
+
- No exclusive lease is held, OR
|
|
395
|
+
- The exclusive lease holder allows monitors
|
|
396
|
+
|
|
397
|
+
Args:
|
|
398
|
+
device_id: The stable device ID to lease
|
|
399
|
+
client_id: UUID string identifying the client
|
|
400
|
+
description: Human-readable description of the operation
|
|
401
|
+
|
|
402
|
+
Returns:
|
|
403
|
+
DeviceLease if acquired, None if not available
|
|
404
|
+
"""
|
|
405
|
+
with self._master_lock:
|
|
406
|
+
# Check if device exists
|
|
407
|
+
state = self._devices.get(device_id)
|
|
408
|
+
if state is None:
|
|
409
|
+
logging.debug(f"Device {device_id} not found for monitor lease")
|
|
410
|
+
return None
|
|
411
|
+
|
|
412
|
+
# Check if device is connected
|
|
413
|
+
if not state.is_connected:
|
|
414
|
+
logging.debug(f"Device {device_id} is disconnected, cannot acquire monitor lease")
|
|
415
|
+
return None
|
|
416
|
+
|
|
417
|
+
# Check if client already has a monitor lease
|
|
418
|
+
for lease in state.monitor_leases.values():
|
|
419
|
+
if lease.client_id == client_id:
|
|
420
|
+
logging.debug(f"Client {client_id} already has monitor lease for {device_id}")
|
|
421
|
+
return lease
|
|
422
|
+
|
|
423
|
+
# Check if monitors are allowed
|
|
424
|
+
if state.exclusive_lease and not state.exclusive_lease.allows_monitors:
|
|
425
|
+
logging.debug(f"Monitor lease not allowed for {device_id}, exclusive holder {state.exclusive_lease.client_id} disallows monitors")
|
|
426
|
+
return None
|
|
427
|
+
|
|
428
|
+
# Acquire monitor lease
|
|
429
|
+
lease_id = self._generate_lease_id()
|
|
430
|
+
lease = DeviceLease(
|
|
431
|
+
device_id=device_id,
|
|
432
|
+
lease_id=lease_id,
|
|
433
|
+
client_id=client_id,
|
|
434
|
+
lease_type=LeaseType.MONITOR,
|
|
435
|
+
description=description,
|
|
436
|
+
)
|
|
437
|
+
state.monitor_leases[lease_id] = lease
|
|
438
|
+
self._track_client_lease(client_id, lease_id, device_id)
|
|
439
|
+
|
|
440
|
+
logging.info(f"Monitor lease acquired for {device_id} by {client_id} (lease={lease_id}, total monitors={len(state.monitor_leases)})")
|
|
441
|
+
return lease
|
|
442
|
+
|
|
443
|
+
def release_lease(self, lease_id: str, client_id: str) -> bool:
|
|
444
|
+
"""Release a specific lease.
|
|
445
|
+
|
|
446
|
+
This operation is idempotent - releasing a non-existent lease
|
|
447
|
+
returns False but does not raise an error.
|
|
448
|
+
|
|
449
|
+
Args:
|
|
450
|
+
lease_id: The lease ID to release
|
|
451
|
+
client_id: The client releasing the lease (must match lease holder)
|
|
452
|
+
|
|
453
|
+
Returns:
|
|
454
|
+
True if a lease was released, False otherwise
|
|
455
|
+
"""
|
|
456
|
+
with self._master_lock:
|
|
457
|
+
# Find the device for this lease
|
|
458
|
+
device_id = self._lease_to_device.get(lease_id)
|
|
459
|
+
if device_id is None:
|
|
460
|
+
logging.debug(f"Lease {lease_id} not found for release")
|
|
461
|
+
return False
|
|
462
|
+
|
|
463
|
+
state = self._devices.get(device_id)
|
|
464
|
+
if state is None:
|
|
465
|
+
logging.debug(f"Device {device_id} not found for lease release")
|
|
466
|
+
return False
|
|
467
|
+
|
|
468
|
+
# Check if this is the exclusive lease
|
|
469
|
+
if state.exclusive_lease and state.exclusive_lease.lease_id == lease_id:
|
|
470
|
+
if state.exclusive_lease.client_id != client_id:
|
|
471
|
+
logging.warning(f"Client {client_id} tried to release exclusive lease held by {state.exclusive_lease.client_id}")
|
|
472
|
+
return False
|
|
473
|
+
|
|
474
|
+
state.exclusive_lease = None
|
|
475
|
+
self._untrack_client_lease(client_id, lease_id)
|
|
476
|
+
logging.info(f"Exclusive lease {lease_id} released for {device_id} by {client_id}")
|
|
477
|
+
return True
|
|
478
|
+
|
|
479
|
+
# Check if this is a monitor lease
|
|
480
|
+
if lease_id in state.monitor_leases:
|
|
481
|
+
lease = state.monitor_leases[lease_id]
|
|
482
|
+
if lease.client_id != client_id:
|
|
483
|
+
logging.warning(f"Client {client_id} tried to release monitor lease held by {lease.client_id}")
|
|
484
|
+
return False
|
|
485
|
+
|
|
486
|
+
del state.monitor_leases[lease_id]
|
|
487
|
+
self._untrack_client_lease(client_id, lease_id)
|
|
488
|
+
logging.info(f"Monitor lease {lease_id} released for {device_id} by {client_id} (remaining monitors={len(state.monitor_leases)})")
|
|
489
|
+
return True
|
|
490
|
+
|
|
491
|
+
logging.debug(f"Lease {lease_id} not found on device {device_id}")
|
|
492
|
+
return False
|
|
493
|
+
|
|
494
|
+
def release_all_client_leases(self, client_id: str) -> int:
|
|
495
|
+
"""Release all leases held by a client.
|
|
496
|
+
|
|
497
|
+
This should be called when a client disconnects to ensure
|
|
498
|
+
all its leases are properly released.
|
|
499
|
+
|
|
500
|
+
Args:
|
|
501
|
+
client_id: UUID string identifying the client
|
|
502
|
+
|
|
503
|
+
Returns:
|
|
504
|
+
Number of leases released
|
|
505
|
+
"""
|
|
506
|
+
released_count = 0
|
|
507
|
+
|
|
508
|
+
with self._master_lock:
|
|
509
|
+
# Get copy of lease IDs since we'll be modifying the set
|
|
510
|
+
lease_ids = list(self._client_leases.get(client_id, set()))
|
|
511
|
+
|
|
512
|
+
# Release each lease (release_lease will acquire _master_lock internally)
|
|
513
|
+
for lease_id in lease_ids:
|
|
514
|
+
if self.release_lease(lease_id, client_id):
|
|
515
|
+
released_count += 1
|
|
516
|
+
|
|
517
|
+
if released_count > 0:
|
|
518
|
+
logging.info(f"Released {released_count} lease(s) for disconnected client {client_id}")
|
|
519
|
+
|
|
520
|
+
return released_count
|
|
521
|
+
|
|
522
|
+
def preempt_device(
|
|
523
|
+
self,
|
|
524
|
+
device_id: str,
|
|
525
|
+
requesting_client_id: str,
|
|
526
|
+
reason: str,
|
|
527
|
+
) -> tuple[bool, str | None]:
|
|
528
|
+
"""Preempt current exclusive holder.
|
|
529
|
+
|
|
530
|
+
This forcibly takes the exclusive lease from the current holder
|
|
531
|
+
and transfers it to the requesting client. The reason is mandatory
|
|
532
|
+
and will be logged.
|
|
533
|
+
|
|
534
|
+
Args:
|
|
535
|
+
device_id: The device to preempt
|
|
536
|
+
requesting_client_id: The client requesting preemption
|
|
537
|
+
reason: REQUIRED reason for preemption (must not be empty)
|
|
538
|
+
|
|
539
|
+
Returns:
|
|
540
|
+
Tuple of (success, preempted_client_id or None)
|
|
541
|
+
|
|
542
|
+
Raises:
|
|
543
|
+
DeviceManagerError: If reason is empty
|
|
544
|
+
"""
|
|
545
|
+
if not reason or not reason.strip():
|
|
546
|
+
raise DeviceManagerError(
|
|
547
|
+
"Preemption reason is required and must not be empty",
|
|
548
|
+
device_id=device_id,
|
|
549
|
+
)
|
|
550
|
+
|
|
551
|
+
with self._master_lock:
|
|
552
|
+
state = self._devices.get(device_id)
|
|
553
|
+
if state is None:
|
|
554
|
+
logging.debug(f"Device {device_id} not found for preemption")
|
|
555
|
+
return (False, None)
|
|
556
|
+
|
|
557
|
+
if state.exclusive_lease is None:
|
|
558
|
+
logging.debug(f"No exclusive lease to preempt on device {device_id}")
|
|
559
|
+
return (False, None)
|
|
560
|
+
|
|
561
|
+
preempted_client_id = state.exclusive_lease.client_id
|
|
562
|
+
preempted_lease_id = state.exclusive_lease.lease_id
|
|
563
|
+
|
|
564
|
+
# Log the preemption with full details
|
|
565
|
+
logging.warning(f"PREEMPTION: Device {device_id} taken from client {preempted_client_id} by client {requesting_client_id}. Reason: {reason}")
|
|
566
|
+
|
|
567
|
+
# Release the current lease
|
|
568
|
+
state.exclusive_lease = None
|
|
569
|
+
self._untrack_client_lease(preempted_client_id, preempted_lease_id)
|
|
570
|
+
|
|
571
|
+
# Acquire new exclusive lease for the requesting client
|
|
572
|
+
lease_id = self._generate_lease_id()
|
|
573
|
+
lease = DeviceLease(
|
|
574
|
+
device_id=device_id,
|
|
575
|
+
lease_id=lease_id,
|
|
576
|
+
client_id=requesting_client_id,
|
|
577
|
+
lease_type=LeaseType.EXCLUSIVE,
|
|
578
|
+
description=f"Preempted from {preempted_client_id}: {reason}",
|
|
579
|
+
)
|
|
580
|
+
state.exclusive_lease = lease
|
|
581
|
+
self._track_client_lease(requesting_client_id, lease_id, device_id)
|
|
582
|
+
|
|
583
|
+
logging.info(f"Preemption complete: {requesting_client_id} now holds exclusive lease for {device_id} (lease={lease_id})")
|
|
584
|
+
|
|
585
|
+
return (True, preempted_client_id)
|
|
586
|
+
|
|
587
|
+
def get_device_status(self, device_id: str) -> dict[str, Any]:
|
|
588
|
+
"""Get detailed status of a device.
|
|
589
|
+
|
|
590
|
+
Args:
|
|
591
|
+
device_id: The device to get status for
|
|
592
|
+
|
|
593
|
+
Returns:
|
|
594
|
+
Dictionary with device state, leases, and availability
|
|
595
|
+
"""
|
|
596
|
+
with self._master_lock:
|
|
597
|
+
state = self._devices.get(device_id)
|
|
598
|
+
|
|
599
|
+
if state is None:
|
|
600
|
+
return {
|
|
601
|
+
"device_id": device_id,
|
|
602
|
+
"exists": False,
|
|
603
|
+
"is_connected": False,
|
|
604
|
+
"exclusive_lease": None,
|
|
605
|
+
"monitor_leases": {},
|
|
606
|
+
"monitor_count": 0,
|
|
607
|
+
"is_available_for_exclusive": False,
|
|
608
|
+
"has_any_lease": False,
|
|
609
|
+
}
|
|
610
|
+
|
|
611
|
+
return {
|
|
612
|
+
"exists": True,
|
|
613
|
+
**state.to_dict(),
|
|
614
|
+
}
|
|
615
|
+
|
|
616
|
+
def get_all_leases(self) -> dict[str, Any]:
|
|
617
|
+
"""Get status of all device leases.
|
|
618
|
+
|
|
619
|
+
Returns:
|
|
620
|
+
Dictionary with lease information for all devices
|
|
621
|
+
"""
|
|
622
|
+
with self._master_lock:
|
|
623
|
+
result: dict[str, Any] = {
|
|
624
|
+
"devices": {},
|
|
625
|
+
"total_devices": len(self._devices),
|
|
626
|
+
"connected_devices": sum(1 for s in self._devices.values() if s.is_connected),
|
|
627
|
+
"total_leases": sum(s.lease_count() for s in self._devices.values()),
|
|
628
|
+
"total_clients": len(self._client_leases),
|
|
629
|
+
"summary": {
|
|
630
|
+
"exclusive_leases": sum(1 for s in self._devices.values() if s.exclusive_lease is not None),
|
|
631
|
+
"monitor_leases": sum(len(s.monitor_leases) for s in self._devices.values()),
|
|
632
|
+
},
|
|
633
|
+
}
|
|
634
|
+
|
|
635
|
+
for device_id, state in self._devices.items():
|
|
636
|
+
result["devices"][device_id] = state.to_dict()
|
|
637
|
+
|
|
638
|
+
return result
|
|
639
|
+
|
|
640
|
+
def get_client_leases(self, client_id: str) -> list[dict[str, Any]]:
|
|
641
|
+
"""Get all leases held by a specific client.
|
|
642
|
+
|
|
643
|
+
Args:
|
|
644
|
+
client_id: UUID string identifying the client
|
|
645
|
+
|
|
646
|
+
Returns:
|
|
647
|
+
List of lease information dictionaries
|
|
648
|
+
"""
|
|
649
|
+
with self._master_lock:
|
|
650
|
+
lease_ids = self._client_leases.get(client_id, set())
|
|
651
|
+
result: list[dict[str, Any]] = []
|
|
652
|
+
|
|
653
|
+
for lease_id in lease_ids:
|
|
654
|
+
device_id = self._lease_to_device.get(lease_id)
|
|
655
|
+
if device_id is None:
|
|
656
|
+
continue
|
|
657
|
+
|
|
658
|
+
state = self._devices.get(device_id)
|
|
659
|
+
if state is None:
|
|
660
|
+
continue
|
|
661
|
+
|
|
662
|
+
# Check exclusive lease
|
|
663
|
+
if state.exclusive_lease and state.exclusive_lease.lease_id == lease_id:
|
|
664
|
+
result.append(state.exclusive_lease.to_dict())
|
|
665
|
+
# Check monitor leases
|
|
666
|
+
elif lease_id in state.monitor_leases:
|
|
667
|
+
result.append(state.monitor_leases[lease_id].to_dict())
|
|
668
|
+
|
|
669
|
+
return result
|
|
670
|
+
|
|
671
|
+
def is_available_for_exclusive(self, device_id: str) -> bool:
|
|
672
|
+
"""Check if exclusive lease can be immediately acquired.
|
|
673
|
+
|
|
674
|
+
Args:
|
|
675
|
+
device_id: The device to check
|
|
676
|
+
|
|
677
|
+
Returns:
|
|
678
|
+
True if exclusive lease is immediately available
|
|
679
|
+
"""
|
|
680
|
+
with self._master_lock:
|
|
681
|
+
state = self._devices.get(device_id)
|
|
682
|
+
if state is None:
|
|
683
|
+
return False
|
|
684
|
+
return state.is_available_for_exclusive() and state.is_connected
|
|
685
|
+
|
|
686
|
+
def cleanup_stale_devices(self, older_than: float = 3600.0) -> int:
|
|
687
|
+
"""Clean up devices that haven't been seen recently.
|
|
688
|
+
|
|
689
|
+
This removes device entries that are:
|
|
690
|
+
- Not currently connected
|
|
691
|
+
- Have no active leases
|
|
692
|
+
- Haven't been seen in the specified time period
|
|
693
|
+
|
|
694
|
+
Args:
|
|
695
|
+
older_than: Time in seconds. Devices not seen longer than this are removed.
|
|
696
|
+
|
|
697
|
+
Returns:
|
|
698
|
+
Number of devices removed
|
|
699
|
+
"""
|
|
700
|
+
current_time = time.time()
|
|
701
|
+
removed_count = 0
|
|
702
|
+
|
|
703
|
+
with self._master_lock:
|
|
704
|
+
devices_to_remove = []
|
|
705
|
+
|
|
706
|
+
for device_id, state in self._devices.items():
|
|
707
|
+
if state.is_connected:
|
|
708
|
+
continue # Don't remove connected devices
|
|
709
|
+
|
|
710
|
+
if state.has_any_lease():
|
|
711
|
+
continue # Don't remove devices with active leases
|
|
712
|
+
|
|
713
|
+
if current_time - state.last_seen_at > older_than:
|
|
714
|
+
devices_to_remove.append(device_id)
|
|
715
|
+
|
|
716
|
+
for device_id in devices_to_remove:
|
|
717
|
+
del self._devices[device_id]
|
|
718
|
+
removed_count += 1
|
|
719
|
+
logging.debug(f"Cleaned up stale device: {device_id}")
|
|
720
|
+
|
|
721
|
+
if removed_count > 0:
|
|
722
|
+
logging.info(f"Cleaned up {removed_count} stale device(s)")
|
|
723
|
+
|
|
724
|
+
return removed_count
|
|
725
|
+
|
|
726
|
+
def clear_all_leases(self) -> int:
|
|
727
|
+
"""Clear all leases (use with extreme caution - only for daemon restart).
|
|
728
|
+
|
|
729
|
+
This releases all leases and clears all internal tracking state.
|
|
730
|
+
Should only be used during daemon shutdown/restart.
|
|
731
|
+
|
|
732
|
+
Returns:
|
|
733
|
+
Number of leases cleared
|
|
734
|
+
"""
|
|
735
|
+
with self._master_lock:
|
|
736
|
+
count = 0
|
|
737
|
+
|
|
738
|
+
for state in self._devices.values():
|
|
739
|
+
if state.exclusive_lease:
|
|
740
|
+
count += 1
|
|
741
|
+
state.exclusive_lease = None
|
|
742
|
+
count += len(state.monitor_leases)
|
|
743
|
+
state.monitor_leases.clear()
|
|
744
|
+
|
|
745
|
+
self._client_leases.clear()
|
|
746
|
+
self._lease_to_device.clear()
|
|
747
|
+
|
|
748
|
+
if count > 0:
|
|
749
|
+
logging.info(f"Cleared all {count} device lease(s)")
|
|
750
|
+
|
|
751
|
+
return count
|
|
752
|
+
|
|
753
|
+
def register_qemu_device(
|
|
754
|
+
self,
|
|
755
|
+
instance_id: str,
|
|
756
|
+
description: str = "QEMU Virtual Device",
|
|
757
|
+
) -> DeviceState:
|
|
758
|
+
"""Register a QEMU virtual device.
|
|
759
|
+
|
|
760
|
+
QEMU devices don't have physical serial ports but still need
|
|
761
|
+
to be tracked for lease management.
|
|
762
|
+
|
|
763
|
+
Args:
|
|
764
|
+
instance_id: Unique identifier for the QEMU instance
|
|
765
|
+
description: Human-readable description
|
|
766
|
+
|
|
767
|
+
Returns:
|
|
768
|
+
DeviceState for the registered QEMU device
|
|
769
|
+
"""
|
|
770
|
+
from fbuild.daemon.device_discovery import create_qemu_device
|
|
771
|
+
|
|
772
|
+
device_info = create_qemu_device(instance_id, description)
|
|
773
|
+
|
|
774
|
+
with self._master_lock:
|
|
775
|
+
if device_info.device_id in self._devices:
|
|
776
|
+
state = self._devices[device_info.device_id]
|
|
777
|
+
state.device_info = device_info
|
|
778
|
+
state.last_seen_at = time.time()
|
|
779
|
+
state.is_connected = True
|
|
780
|
+
logging.debug(f"Updated QEMU device: {device_info.device_id}")
|
|
781
|
+
else:
|
|
782
|
+
state = DeviceState(
|
|
783
|
+
device_id=device_info.device_id,
|
|
784
|
+
device_info=device_info,
|
|
785
|
+
last_seen_at=time.time(),
|
|
786
|
+
is_connected=True,
|
|
787
|
+
)
|
|
788
|
+
self._devices[device_info.device_id] = state
|
|
789
|
+
logging.info(f"Registered QEMU device: {device_info.device_id}")
|
|
790
|
+
|
|
791
|
+
return state
|
|
792
|
+
|
|
793
|
+
def unregister_qemu_device(self, instance_id: str) -> bool:
|
|
794
|
+
"""Unregister a QEMU virtual device.
|
|
795
|
+
|
|
796
|
+
Args:
|
|
797
|
+
instance_id: The QEMU instance ID to unregister
|
|
798
|
+
|
|
799
|
+
Returns:
|
|
800
|
+
True if device was unregistered, False if not found
|
|
801
|
+
"""
|
|
802
|
+
device_id = f"qemu-{instance_id}"
|
|
803
|
+
|
|
804
|
+
with self._master_lock:
|
|
805
|
+
if device_id not in self._devices:
|
|
806
|
+
return False
|
|
807
|
+
|
|
808
|
+
state = self._devices[device_id]
|
|
809
|
+
|
|
810
|
+
# Release any leases first
|
|
811
|
+
if state.exclusive_lease:
|
|
812
|
+
self._untrack_client_lease(
|
|
813
|
+
state.exclusive_lease.client_id,
|
|
814
|
+
state.exclusive_lease.lease_id,
|
|
815
|
+
)
|
|
816
|
+
for lease in list(state.monitor_leases.values()):
|
|
817
|
+
self._untrack_client_lease(lease.client_id, lease.lease_id)
|
|
818
|
+
|
|
819
|
+
del self._devices[device_id]
|
|
820
|
+
logging.info(f"Unregistered QEMU device: {device_id}")
|
|
821
|
+
return True
|