fbuild 1.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fbuild/__init__.py +390 -0
- fbuild/assets/example.txt +1 -0
- fbuild/build/__init__.py +117 -0
- fbuild/build/archive_creator.py +186 -0
- fbuild/build/binary_generator.py +444 -0
- fbuild/build/build_component_factory.py +131 -0
- fbuild/build/build_info_generator.py +624 -0
- fbuild/build/build_state.py +325 -0
- fbuild/build/build_utils.py +93 -0
- fbuild/build/compilation_executor.py +422 -0
- fbuild/build/compiler.py +165 -0
- fbuild/build/compiler_avr.py +574 -0
- fbuild/build/configurable_compiler.py +664 -0
- fbuild/build/configurable_linker.py +637 -0
- fbuild/build/flag_builder.py +214 -0
- fbuild/build/library_dependency_processor.py +185 -0
- fbuild/build/linker.py +708 -0
- fbuild/build/orchestrator.py +67 -0
- fbuild/build/orchestrator_avr.py +651 -0
- fbuild/build/orchestrator_esp32.py +878 -0
- fbuild/build/orchestrator_rp2040.py +719 -0
- fbuild/build/orchestrator_stm32.py +696 -0
- fbuild/build/orchestrator_teensy.py +580 -0
- fbuild/build/source_compilation_orchestrator.py +218 -0
- fbuild/build/source_scanner.py +516 -0
- fbuild/cli.py +717 -0
- fbuild/cli_utils.py +314 -0
- fbuild/config/__init__.py +16 -0
- fbuild/config/board_config.py +542 -0
- fbuild/config/board_loader.py +92 -0
- fbuild/config/ini_parser.py +369 -0
- fbuild/config/mcu_specs.py +88 -0
- fbuild/daemon/__init__.py +42 -0
- fbuild/daemon/async_client.py +531 -0
- fbuild/daemon/client.py +1505 -0
- fbuild/daemon/compilation_queue.py +293 -0
- fbuild/daemon/configuration_lock.py +865 -0
- fbuild/daemon/daemon.py +585 -0
- fbuild/daemon/daemon_context.py +293 -0
- fbuild/daemon/error_collector.py +263 -0
- fbuild/daemon/file_cache.py +332 -0
- fbuild/daemon/firmware_ledger.py +546 -0
- fbuild/daemon/lock_manager.py +508 -0
- fbuild/daemon/logging_utils.py +149 -0
- fbuild/daemon/messages.py +957 -0
- fbuild/daemon/operation_registry.py +288 -0
- fbuild/daemon/port_state_manager.py +249 -0
- fbuild/daemon/process_tracker.py +366 -0
- fbuild/daemon/processors/__init__.py +18 -0
- fbuild/daemon/processors/build_processor.py +248 -0
- fbuild/daemon/processors/deploy_processor.py +664 -0
- fbuild/daemon/processors/install_deps_processor.py +431 -0
- fbuild/daemon/processors/locking_processor.py +777 -0
- fbuild/daemon/processors/monitor_processor.py +285 -0
- fbuild/daemon/request_processor.py +457 -0
- fbuild/daemon/shared_serial.py +819 -0
- fbuild/daemon/status_manager.py +238 -0
- fbuild/daemon/subprocess_manager.py +316 -0
- fbuild/deploy/__init__.py +21 -0
- fbuild/deploy/deployer.py +67 -0
- fbuild/deploy/deployer_esp32.py +310 -0
- fbuild/deploy/docker_utils.py +315 -0
- fbuild/deploy/monitor.py +519 -0
- fbuild/deploy/qemu_runner.py +603 -0
- fbuild/interrupt_utils.py +34 -0
- fbuild/ledger/__init__.py +52 -0
- fbuild/ledger/board_ledger.py +560 -0
- fbuild/output.py +352 -0
- fbuild/packages/__init__.py +66 -0
- fbuild/packages/archive_utils.py +1098 -0
- fbuild/packages/arduino_core.py +412 -0
- fbuild/packages/cache.py +256 -0
- fbuild/packages/concurrent_manager.py +510 -0
- fbuild/packages/downloader.py +518 -0
- fbuild/packages/fingerprint.py +423 -0
- fbuild/packages/framework_esp32.py +538 -0
- fbuild/packages/framework_rp2040.py +349 -0
- fbuild/packages/framework_stm32.py +459 -0
- fbuild/packages/framework_teensy.py +346 -0
- fbuild/packages/github_utils.py +96 -0
- fbuild/packages/header_trampoline_cache.py +394 -0
- fbuild/packages/library_compiler.py +203 -0
- fbuild/packages/library_manager.py +549 -0
- fbuild/packages/library_manager_esp32.py +725 -0
- fbuild/packages/package.py +163 -0
- fbuild/packages/platform_esp32.py +383 -0
- fbuild/packages/platform_rp2040.py +400 -0
- fbuild/packages/platform_stm32.py +581 -0
- fbuild/packages/platform_teensy.py +312 -0
- fbuild/packages/platform_utils.py +131 -0
- fbuild/packages/platformio_registry.py +369 -0
- fbuild/packages/sdk_utils.py +231 -0
- fbuild/packages/toolchain.py +436 -0
- fbuild/packages/toolchain_binaries.py +196 -0
- fbuild/packages/toolchain_esp32.py +489 -0
- fbuild/packages/toolchain_metadata.py +185 -0
- fbuild/packages/toolchain_rp2040.py +436 -0
- fbuild/packages/toolchain_stm32.py +417 -0
- fbuild/packages/toolchain_teensy.py +404 -0
- fbuild/platform_configs/esp32.json +150 -0
- fbuild/platform_configs/esp32c2.json +144 -0
- fbuild/platform_configs/esp32c3.json +143 -0
- fbuild/platform_configs/esp32c5.json +151 -0
- fbuild/platform_configs/esp32c6.json +151 -0
- fbuild/platform_configs/esp32p4.json +149 -0
- fbuild/platform_configs/esp32s3.json +151 -0
- fbuild/platform_configs/imxrt1062.json +56 -0
- fbuild/platform_configs/rp2040.json +70 -0
- fbuild/platform_configs/rp2350.json +76 -0
- fbuild/platform_configs/stm32f1.json +59 -0
- fbuild/platform_configs/stm32f4.json +63 -0
- fbuild/py.typed +0 -0
- fbuild-1.2.8.dist-info/METADATA +468 -0
- fbuild-1.2.8.dist-info/RECORD +121 -0
- fbuild-1.2.8.dist-info/WHEEL +5 -0
- fbuild-1.2.8.dist-info/entry_points.txt +5 -0
- fbuild-1.2.8.dist-info/licenses/LICENSE +21 -0
- fbuild-1.2.8.dist-info/top_level.txt +2 -0
- fbuild_lint/__init__.py +0 -0
- fbuild_lint/ruff_plugins/__init__.py +0 -0
- fbuild_lint/ruff_plugins/keyboard_interrupt_checker.py +158 -0
|
@@ -0,0 +1,865 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Configuration Lock Manager - Lock management for daemon configuration resources.
|
|
3
|
+
|
|
4
|
+
This module provides the ConfigurationLockManager class which manages locks
|
|
5
|
+
for specific configurations identified by (project_dir, environment, port) tuples.
|
|
6
|
+
Key features:
|
|
7
|
+
- Configuration-based locking with composite keys
|
|
8
|
+
- Exclusive locks for build/deploy operations (single holder)
|
|
9
|
+
- Shared read locks for monitoring operations (multiple holders)
|
|
10
|
+
- Async client connection tracking with auto-release on disconnect
|
|
11
|
+
- Waiting queue for exclusive lock requests
|
|
12
|
+
- Lock upgrade/downgrade support (shared <-> exclusive)
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import logging
|
|
16
|
+
import threading
|
|
17
|
+
import time
|
|
18
|
+
from collections import deque
|
|
19
|
+
from dataclasses import dataclass, field
|
|
20
|
+
from enum import Enum
|
|
21
|
+
from typing import Any
|
|
22
|
+
|
|
23
|
+
# Default timeout for exclusive lock acquisition: 5 minutes
|
|
24
|
+
DEFAULT_EXCLUSIVE_TIMEOUT = 300.0
|
|
25
|
+
|
|
26
|
+
# Default lock expiry: 30 minutes (for long builds)
|
|
27
|
+
DEFAULT_LOCK_EXPIRY = 1800.0
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class LockState(Enum):
|
|
31
|
+
"""Lock state enumeration."""
|
|
32
|
+
|
|
33
|
+
UNLOCKED = "unlocked"
|
|
34
|
+
LOCKED_EXCLUSIVE = "locked_exclusive"
|
|
35
|
+
LOCKED_SHARED_READ = "locked_shared_read"
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@dataclass
|
|
39
|
+
class WaitingRequest:
|
|
40
|
+
"""Represents a waiting request for an exclusive lock.
|
|
41
|
+
|
|
42
|
+
Attributes:
|
|
43
|
+
client_id: UUID string identifying the waiting client
|
|
44
|
+
description: Human-readable description of the operation
|
|
45
|
+
requested_at: Unix timestamp when request was made
|
|
46
|
+
event: Threading event to signal when lock is granted
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
client_id: str
|
|
50
|
+
description: str
|
|
51
|
+
requested_at: float = field(default_factory=time.time)
|
|
52
|
+
event: threading.Event = field(default_factory=threading.Event)
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
@dataclass
|
|
56
|
+
class LockHolder:
|
|
57
|
+
"""Information about a client holding a lock.
|
|
58
|
+
|
|
59
|
+
Attributes:
|
|
60
|
+
client_id: UUID string identifying the client
|
|
61
|
+
description: Human-readable description of the operation
|
|
62
|
+
acquired_at: Unix timestamp when lock was acquired
|
|
63
|
+
lock_type: Type of lock held (exclusive or shared_read)
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
client_id: str
|
|
67
|
+
description: str
|
|
68
|
+
acquired_at: float = field(default_factory=time.time)
|
|
69
|
+
lock_type: str = "exclusive" # "exclusive" or "shared_read"
|
|
70
|
+
|
|
71
|
+
def to_dict(self) -> dict[str, Any]:
|
|
72
|
+
"""Convert to dictionary for JSON serialization."""
|
|
73
|
+
return {
|
|
74
|
+
"client_id": self.client_id,
|
|
75
|
+
"description": self.description,
|
|
76
|
+
"acquired_at": self.acquired_at,
|
|
77
|
+
"lock_type": self.lock_type,
|
|
78
|
+
"hold_duration": time.time() - self.acquired_at,
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
@dataclass
|
|
83
|
+
class ConfigurationLock:
|
|
84
|
+
"""Lock state and metadata for a specific configuration.
|
|
85
|
+
|
|
86
|
+
A configuration is identified by the tuple (project_dir, environment, port).
|
|
87
|
+
|
|
88
|
+
Attributes:
|
|
89
|
+
config_key: The configuration key tuple
|
|
90
|
+
state: Current lock state
|
|
91
|
+
exclusive_holder: Client holding exclusive lock (if any)
|
|
92
|
+
shared_holders: Dict of client_id -> LockHolder for shared read locks
|
|
93
|
+
waiting_queue: Queue of clients waiting for exclusive lock
|
|
94
|
+
created_at: Unix timestamp when lock was created
|
|
95
|
+
last_activity_at: Unix timestamp of last lock activity
|
|
96
|
+
"""
|
|
97
|
+
|
|
98
|
+
config_key: tuple[str, str, str]
|
|
99
|
+
state: LockState = LockState.UNLOCKED
|
|
100
|
+
exclusive_holder: LockHolder | None = None
|
|
101
|
+
shared_holders: dict[str, LockHolder] = field(default_factory=dict)
|
|
102
|
+
waiting_queue: deque[WaitingRequest] = field(default_factory=deque)
|
|
103
|
+
created_at: float = field(default_factory=time.time)
|
|
104
|
+
last_activity_at: float = field(default_factory=time.time)
|
|
105
|
+
|
|
106
|
+
def is_held(self) -> bool:
|
|
107
|
+
"""Check if lock is currently held by any client."""
|
|
108
|
+
return self.state != LockState.UNLOCKED
|
|
109
|
+
|
|
110
|
+
def holder_count(self) -> int:
|
|
111
|
+
"""Get number of clients holding the lock."""
|
|
112
|
+
if self.state == LockState.LOCKED_EXCLUSIVE:
|
|
113
|
+
return 1 if self.exclusive_holder else 0
|
|
114
|
+
elif self.state == LockState.LOCKED_SHARED_READ:
|
|
115
|
+
return len(self.shared_holders)
|
|
116
|
+
return 0
|
|
117
|
+
|
|
118
|
+
def to_dict(self) -> dict[str, Any]:
|
|
119
|
+
"""Convert to dictionary for JSON serialization."""
|
|
120
|
+
waiting_list = [
|
|
121
|
+
{
|
|
122
|
+
"client_id": req.client_id,
|
|
123
|
+
"description": req.description,
|
|
124
|
+
"requested_at": req.requested_at,
|
|
125
|
+
"wait_duration": time.time() - req.requested_at,
|
|
126
|
+
}
|
|
127
|
+
for req in self.waiting_queue
|
|
128
|
+
]
|
|
129
|
+
|
|
130
|
+
return {
|
|
131
|
+
"config_key": {
|
|
132
|
+
"project_dir": self.config_key[0],
|
|
133
|
+
"environment": self.config_key[1],
|
|
134
|
+
"port": self.config_key[2],
|
|
135
|
+
},
|
|
136
|
+
"state": self.state.value,
|
|
137
|
+
"exclusive_holder": self.exclusive_holder.to_dict() if self.exclusive_holder else None,
|
|
138
|
+
"shared_holders": {client_id: holder.to_dict() for client_id, holder in self.shared_holders.items()},
|
|
139
|
+
"waiting_queue": waiting_list,
|
|
140
|
+
"waiting_count": len(self.waiting_queue),
|
|
141
|
+
"holder_count": self.holder_count(),
|
|
142
|
+
"created_at": self.created_at,
|
|
143
|
+
"last_activity_at": self.last_activity_at,
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
class ConfigurationLockError(RuntimeError):
|
|
148
|
+
"""Error raised when a configuration lock operation fails.
|
|
149
|
+
|
|
150
|
+
Provides detailed information about the lock state and failure reason.
|
|
151
|
+
"""
|
|
152
|
+
|
|
153
|
+
def __init__(
|
|
154
|
+
self,
|
|
155
|
+
message: str,
|
|
156
|
+
config_key: tuple[str, str, str],
|
|
157
|
+
lock_info: ConfigurationLock | None = None,
|
|
158
|
+
):
|
|
159
|
+
self.config_key = config_key
|
|
160
|
+
self.lock_info = lock_info
|
|
161
|
+
super().__init__(message)
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
class ConfigurationLockManager:
|
|
165
|
+
"""Manages configuration locks for daemon operations.
|
|
166
|
+
|
|
167
|
+
This class provides locking for specific configurations identified by
|
|
168
|
+
(project_dir, environment, port) tuples. It supports:
|
|
169
|
+
- Exclusive locks for build/deploy operations (only one holder)
|
|
170
|
+
- Shared read locks for monitoring operations (multiple holders)
|
|
171
|
+
- Async client tracking with auto-release on disconnect
|
|
172
|
+
- Waiting queue for exclusive lock requests
|
|
173
|
+
- Lock upgrade/downgrade between shared and exclusive
|
|
174
|
+
|
|
175
|
+
Thread Safety:
|
|
176
|
+
All public methods are thread-safe using an internal master lock.
|
|
177
|
+
|
|
178
|
+
Example:
|
|
179
|
+
>>> manager = ConfigurationLockManager()
|
|
180
|
+
>>>
|
|
181
|
+
>>> # Acquire exclusive lock for build
|
|
182
|
+
>>> config_key = ("/path/to/project", "esp32c6", "COM3")
|
|
183
|
+
>>> if manager.acquire_exclusive(config_key, "client-123", "Building firmware"):
|
|
184
|
+
... try:
|
|
185
|
+
... do_build()
|
|
186
|
+
... finally:
|
|
187
|
+
... manager.release(config_key, "client-123")
|
|
188
|
+
>>>
|
|
189
|
+
>>> # Acquire shared read lock for monitoring
|
|
190
|
+
>>> if manager.acquire_shared_read(config_key, "client-456", "Monitoring output"):
|
|
191
|
+
... try:
|
|
192
|
+
... read_serial_output()
|
|
193
|
+
... finally:
|
|
194
|
+
... manager.release(config_key, "client-456")
|
|
195
|
+
>>>
|
|
196
|
+
>>> # Auto-release all locks when client disconnects
|
|
197
|
+
>>> manager.release_all_client_locks("client-123")
|
|
198
|
+
"""
|
|
199
|
+
|
|
200
|
+
def __init__(self) -> None:
|
|
201
|
+
"""Initialize the ConfigurationLockManager."""
|
|
202
|
+
self._master_lock = threading.Lock() # Protects all internal state
|
|
203
|
+
self._config_locks: dict[tuple[str, str, str], ConfigurationLock] = {}
|
|
204
|
+
self._client_locks: dict[str, set[tuple[str, str, str]]] = {} # client_id -> config_keys
|
|
205
|
+
|
|
206
|
+
def _get_or_create_lock(self, config_key: tuple[str, str, str]) -> ConfigurationLock:
|
|
207
|
+
"""Get or create a lock for the given configuration.
|
|
208
|
+
|
|
209
|
+
Must be called with _master_lock held.
|
|
210
|
+
|
|
211
|
+
Args:
|
|
212
|
+
config_key: Tuple of (project_dir, environment, port)
|
|
213
|
+
|
|
214
|
+
Returns:
|
|
215
|
+
ConfigurationLock for the configuration
|
|
216
|
+
"""
|
|
217
|
+
if config_key not in self._config_locks:
|
|
218
|
+
self._config_locks[config_key] = ConfigurationLock(config_key=config_key)
|
|
219
|
+
return self._config_locks[config_key]
|
|
220
|
+
|
|
221
|
+
def _track_client_lock(self, client_id: str, config_key: tuple[str, str, str]) -> None:
|
|
222
|
+
"""Track that a client holds a lock on a configuration.
|
|
223
|
+
|
|
224
|
+
Must be called with _master_lock held.
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
client_id: UUID string identifying the client
|
|
228
|
+
config_key: Configuration key tuple
|
|
229
|
+
"""
|
|
230
|
+
if client_id not in self._client_locks:
|
|
231
|
+
self._client_locks[client_id] = set()
|
|
232
|
+
self._client_locks[client_id].add(config_key)
|
|
233
|
+
|
|
234
|
+
def _untrack_client_lock(self, client_id: str, config_key: tuple[str, str, str]) -> None:
|
|
235
|
+
"""Stop tracking that a client holds a lock on a configuration.
|
|
236
|
+
|
|
237
|
+
Must be called with _master_lock held.
|
|
238
|
+
|
|
239
|
+
Args:
|
|
240
|
+
client_id: UUID string identifying the client
|
|
241
|
+
config_key: Configuration key tuple
|
|
242
|
+
"""
|
|
243
|
+
if client_id in self._client_locks:
|
|
244
|
+
self._client_locks[client_id].discard(config_key)
|
|
245
|
+
if not self._client_locks[client_id]:
|
|
246
|
+
del self._client_locks[client_id]
|
|
247
|
+
|
|
248
|
+
def _grant_next_waiting(self, lock: ConfigurationLock) -> None:
|
|
249
|
+
"""Grant the exclusive lock to the next waiting client if available.
|
|
250
|
+
|
|
251
|
+
Must be called with _master_lock held and lock in UNLOCKED state.
|
|
252
|
+
|
|
253
|
+
Args:
|
|
254
|
+
lock: The configuration lock to process
|
|
255
|
+
"""
|
|
256
|
+
while lock.waiting_queue:
|
|
257
|
+
waiting = lock.waiting_queue.popleft()
|
|
258
|
+
# Signal the waiting client
|
|
259
|
+
waiting.event.set()
|
|
260
|
+
# Note: The waiting client will acquire the lock when it wakes up
|
|
261
|
+
# We break here to let only one client proceed
|
|
262
|
+
break
|
|
263
|
+
|
|
264
|
+
def acquire_exclusive(
|
|
265
|
+
self,
|
|
266
|
+
config_key: tuple[str, str, str],
|
|
267
|
+
client_id: str,
|
|
268
|
+
description: str = "",
|
|
269
|
+
timeout: float = DEFAULT_EXCLUSIVE_TIMEOUT,
|
|
270
|
+
) -> bool:
|
|
271
|
+
"""Acquire an exclusive lock for a configuration.
|
|
272
|
+
|
|
273
|
+
An exclusive lock can only be held by one client at a time. It cannot
|
|
274
|
+
be acquired if there are shared read locks or another exclusive lock.
|
|
275
|
+
|
|
276
|
+
Args:
|
|
277
|
+
config_key: Tuple of (project_dir, environment, port)
|
|
278
|
+
client_id: UUID string identifying the client
|
|
279
|
+
description: Human-readable description of the operation
|
|
280
|
+
timeout: Maximum time in seconds to wait for lock (0 for non-blocking)
|
|
281
|
+
|
|
282
|
+
Returns:
|
|
283
|
+
True if lock was acquired, False if timeout or not available
|
|
284
|
+
|
|
285
|
+
Note:
|
|
286
|
+
If timeout > 0 and lock is not immediately available, the client
|
|
287
|
+
will be added to a waiting queue and will be notified when the
|
|
288
|
+
lock becomes available.
|
|
289
|
+
"""
|
|
290
|
+
start_time = time.time()
|
|
291
|
+
waiting_request: WaitingRequest | None = None
|
|
292
|
+
|
|
293
|
+
with self._master_lock:
|
|
294
|
+
lock = self._get_or_create_lock(config_key)
|
|
295
|
+
|
|
296
|
+
# Check if already held by this client (reentrant case)
|
|
297
|
+
if lock.state == LockState.LOCKED_EXCLUSIVE and lock.exclusive_holder and lock.exclusive_holder.client_id == client_id:
|
|
298
|
+
logging.debug(f"Client {client_id} already holds exclusive lock for {config_key}")
|
|
299
|
+
return True
|
|
300
|
+
|
|
301
|
+
# Try to acquire immediately if unlocked
|
|
302
|
+
if lock.state == LockState.UNLOCKED:
|
|
303
|
+
lock.state = LockState.LOCKED_EXCLUSIVE
|
|
304
|
+
lock.exclusive_holder = LockHolder(
|
|
305
|
+
client_id=client_id,
|
|
306
|
+
description=description,
|
|
307
|
+
lock_type="exclusive",
|
|
308
|
+
)
|
|
309
|
+
lock.last_activity_at = time.time()
|
|
310
|
+
self._track_client_lock(client_id, config_key)
|
|
311
|
+
logging.debug(f"Exclusive lock acquired for {config_key} by {client_id}")
|
|
312
|
+
return True
|
|
313
|
+
|
|
314
|
+
# If non-blocking, return False
|
|
315
|
+
if timeout <= 0:
|
|
316
|
+
logging.debug(f"Exclusive lock not available for {config_key}, " f"current state: {lock.state.value}")
|
|
317
|
+
return False
|
|
318
|
+
|
|
319
|
+
# Add to waiting queue
|
|
320
|
+
waiting_request = WaitingRequest(
|
|
321
|
+
client_id=client_id,
|
|
322
|
+
description=description,
|
|
323
|
+
)
|
|
324
|
+
lock.waiting_queue.append(waiting_request)
|
|
325
|
+
logging.debug(f"Client {client_id} added to waiting queue for {config_key}, " f"position: {len(lock.waiting_queue)}")
|
|
326
|
+
|
|
327
|
+
# Wait outside the master lock to avoid blocking other operations
|
|
328
|
+
if waiting_request:
|
|
329
|
+
remaining_timeout = timeout - (time.time() - start_time)
|
|
330
|
+
if remaining_timeout > 0:
|
|
331
|
+
signaled = waiting_request.event.wait(timeout=remaining_timeout)
|
|
332
|
+
else:
|
|
333
|
+
signaled = False
|
|
334
|
+
|
|
335
|
+
# Try to acquire the lock now
|
|
336
|
+
with self._master_lock:
|
|
337
|
+
lock = self._get_or_create_lock(config_key)
|
|
338
|
+
|
|
339
|
+
# Remove from waiting queue if still there
|
|
340
|
+
try:
|
|
341
|
+
lock.waiting_queue.remove(waiting_request)
|
|
342
|
+
except ValueError:
|
|
343
|
+
pass # Already removed
|
|
344
|
+
|
|
345
|
+
if not signaled:
|
|
346
|
+
logging.debug(f"Timeout waiting for exclusive lock on {config_key} " f"for client {client_id}")
|
|
347
|
+
return False
|
|
348
|
+
|
|
349
|
+
# Try to acquire now that we've been signaled
|
|
350
|
+
if lock.state == LockState.UNLOCKED:
|
|
351
|
+
lock.state = LockState.LOCKED_EXCLUSIVE
|
|
352
|
+
lock.exclusive_holder = LockHolder(
|
|
353
|
+
client_id=client_id,
|
|
354
|
+
description=description,
|
|
355
|
+
lock_type="exclusive",
|
|
356
|
+
)
|
|
357
|
+
lock.last_activity_at = time.time()
|
|
358
|
+
self._track_client_lock(client_id, config_key)
|
|
359
|
+
logging.debug(f"Exclusive lock acquired (after wait) for {config_key} " f"by {client_id}")
|
|
360
|
+
return True
|
|
361
|
+
else:
|
|
362
|
+
# Lock was taken by someone else
|
|
363
|
+
logging.debug(f"Lock taken by another client while {client_id} was waiting")
|
|
364
|
+
return False
|
|
365
|
+
|
|
366
|
+
return False
|
|
367
|
+
|
|
368
|
+
def acquire_shared_read(
|
|
369
|
+
self,
|
|
370
|
+
config_key: tuple[str, str, str],
|
|
371
|
+
client_id: str,
|
|
372
|
+
description: str = "",
|
|
373
|
+
) -> bool:
|
|
374
|
+
"""Acquire a shared read lock for a configuration.
|
|
375
|
+
|
|
376
|
+
Multiple clients can hold shared read locks simultaneously.
|
|
377
|
+
Shared read locks cannot be acquired if there is an exclusive lock
|
|
378
|
+
or if there are clients waiting for an exclusive lock.
|
|
379
|
+
|
|
380
|
+
Args:
|
|
381
|
+
config_key: Tuple of (project_dir, environment, port)
|
|
382
|
+
client_id: UUID string identifying the client
|
|
383
|
+
description: Human-readable description of the operation
|
|
384
|
+
|
|
385
|
+
Returns:
|
|
386
|
+
True if lock was acquired, False if not available
|
|
387
|
+
"""
|
|
388
|
+
with self._master_lock:
|
|
389
|
+
lock = self._get_or_create_lock(config_key)
|
|
390
|
+
|
|
391
|
+
# Check if already held by this client
|
|
392
|
+
if client_id in lock.shared_holders:
|
|
393
|
+
logging.debug(f"Client {client_id} already holds shared read lock for {config_key}")
|
|
394
|
+
return True
|
|
395
|
+
|
|
396
|
+
# Cannot acquire if exclusive lock is held
|
|
397
|
+
if lock.state == LockState.LOCKED_EXCLUSIVE:
|
|
398
|
+
logging.debug(f"Shared read lock not available for {config_key}, " f"exclusive lock held by {lock.exclusive_holder.client_id if lock.exclusive_holder else 'unknown'}")
|
|
399
|
+
return False
|
|
400
|
+
|
|
401
|
+
# Cannot acquire if there are clients waiting for exclusive lock
|
|
402
|
+
# (to prevent starvation of exclusive lock requests)
|
|
403
|
+
if lock.waiting_queue:
|
|
404
|
+
logging.debug(f"Shared read lock not available for {config_key}, " f"{len(lock.waiting_queue)} clients waiting for exclusive lock")
|
|
405
|
+
return False
|
|
406
|
+
|
|
407
|
+
# Acquire shared read lock
|
|
408
|
+
lock.state = LockState.LOCKED_SHARED_READ
|
|
409
|
+
lock.shared_holders[client_id] = LockHolder(
|
|
410
|
+
client_id=client_id,
|
|
411
|
+
description=description,
|
|
412
|
+
lock_type="shared_read",
|
|
413
|
+
)
|
|
414
|
+
lock.last_activity_at = time.time()
|
|
415
|
+
self._track_client_lock(client_id, config_key)
|
|
416
|
+
logging.debug(f"Shared read lock acquired for {config_key} by {client_id}, " f"total shared holders: {len(lock.shared_holders)}")
|
|
417
|
+
return True
|
|
418
|
+
|
|
419
|
+
def release(self, config_key: tuple[str, str, str], client_id: str) -> bool:
|
|
420
|
+
"""Release a lock held by a client.
|
|
421
|
+
|
|
422
|
+
This releases either an exclusive lock or a shared read lock,
|
|
423
|
+
depending on what the client holds.
|
|
424
|
+
|
|
425
|
+
Args:
|
|
426
|
+
config_key: Tuple of (project_dir, environment, port)
|
|
427
|
+
client_id: UUID string identifying the client
|
|
428
|
+
|
|
429
|
+
Returns:
|
|
430
|
+
True if a lock was released, False if client didn't hold a lock
|
|
431
|
+
"""
|
|
432
|
+
with self._master_lock:
|
|
433
|
+
if config_key not in self._config_locks:
|
|
434
|
+
logging.debug(f"No lock exists for {config_key} to release")
|
|
435
|
+
return False
|
|
436
|
+
|
|
437
|
+
lock = self._config_locks[config_key]
|
|
438
|
+
|
|
439
|
+
# Check for exclusive lock
|
|
440
|
+
if lock.state == LockState.LOCKED_EXCLUSIVE and lock.exclusive_holder and lock.exclusive_holder.client_id == client_id:
|
|
441
|
+
lock.state = LockState.UNLOCKED
|
|
442
|
+
lock.exclusive_holder = None
|
|
443
|
+
lock.last_activity_at = time.time()
|
|
444
|
+
self._untrack_client_lock(client_id, config_key)
|
|
445
|
+
logging.debug(f"Exclusive lock released for {config_key} by {client_id}")
|
|
446
|
+
# Grant to next waiting client if any
|
|
447
|
+
self._grant_next_waiting(lock)
|
|
448
|
+
return True
|
|
449
|
+
|
|
450
|
+
# Check for shared read lock
|
|
451
|
+
if client_id in lock.shared_holders:
|
|
452
|
+
del lock.shared_holders[client_id]
|
|
453
|
+
lock.last_activity_at = time.time()
|
|
454
|
+
self._untrack_client_lock(client_id, config_key)
|
|
455
|
+
|
|
456
|
+
# Update state if no more shared holders
|
|
457
|
+
if not lock.shared_holders:
|
|
458
|
+
lock.state = LockState.UNLOCKED
|
|
459
|
+
# Grant to next waiting client if any
|
|
460
|
+
self._grant_next_waiting(lock)
|
|
461
|
+
|
|
462
|
+
logging.debug(f"Shared read lock released for {config_key} by {client_id}, " f"remaining shared holders: {len(lock.shared_holders)}")
|
|
463
|
+
return True
|
|
464
|
+
|
|
465
|
+
logging.debug(f"Client {client_id} does not hold a lock for {config_key}")
|
|
466
|
+
return False
|
|
467
|
+
|
|
468
|
+
def release_all_client_locks(self, client_id: str) -> int:
|
|
469
|
+
"""Release all locks held by a client.
|
|
470
|
+
|
|
471
|
+
This should be called when a client disconnects to ensure
|
|
472
|
+
all its locks are properly released.
|
|
473
|
+
|
|
474
|
+
Args:
|
|
475
|
+
client_id: UUID string identifying the client
|
|
476
|
+
|
|
477
|
+
Returns:
|
|
478
|
+
Number of locks released
|
|
479
|
+
"""
|
|
480
|
+
released_count = 0
|
|
481
|
+
|
|
482
|
+
with self._master_lock:
|
|
483
|
+
# Get copy of config keys since we'll be modifying the set
|
|
484
|
+
config_keys = list(self._client_locks.get(client_id, set()))
|
|
485
|
+
|
|
486
|
+
# Release each lock (release() will acquire _master_lock internally)
|
|
487
|
+
for config_key in config_keys:
|
|
488
|
+
if self.release(config_key, client_id):
|
|
489
|
+
released_count += 1
|
|
490
|
+
|
|
491
|
+
if released_count > 0:
|
|
492
|
+
logging.info(f"Released {released_count} locks for disconnected client {client_id}")
|
|
493
|
+
|
|
494
|
+
return released_count
|
|
495
|
+
|
|
496
|
+
def get_lock_status(self, config_key: tuple[str, str, str]) -> dict[str, Any]:
|
|
497
|
+
"""Get detailed status of a specific configuration lock.
|
|
498
|
+
|
|
499
|
+
Args:
|
|
500
|
+
config_key: Tuple of (project_dir, environment, port)
|
|
501
|
+
|
|
502
|
+
Returns:
|
|
503
|
+
Dictionary with lock state, holders, waiting queue, etc.
|
|
504
|
+
"""
|
|
505
|
+
with self._master_lock:
|
|
506
|
+
if config_key not in self._config_locks:
|
|
507
|
+
return {
|
|
508
|
+
"config_key": {
|
|
509
|
+
"project_dir": config_key[0],
|
|
510
|
+
"environment": config_key[1],
|
|
511
|
+
"port": config_key[2],
|
|
512
|
+
},
|
|
513
|
+
"state": LockState.UNLOCKED.value,
|
|
514
|
+
"exclusive_holder": None,
|
|
515
|
+
"shared_holders": {},
|
|
516
|
+
"waiting_queue": [],
|
|
517
|
+
"waiting_count": 0,
|
|
518
|
+
"holder_count": 0,
|
|
519
|
+
"exists": False,
|
|
520
|
+
}
|
|
521
|
+
|
|
522
|
+
lock = self._config_locks[config_key]
|
|
523
|
+
result = lock.to_dict()
|
|
524
|
+
result["exists"] = True
|
|
525
|
+
return result
|
|
526
|
+
|
|
527
|
+
def get_all_locks(self) -> dict[str, Any]:
|
|
528
|
+
"""Get status of all configuration locks.
|
|
529
|
+
|
|
530
|
+
Returns:
|
|
531
|
+
Dictionary with all lock information, keyed by string representation
|
|
532
|
+
of config_key.
|
|
533
|
+
"""
|
|
534
|
+
with self._master_lock:
|
|
535
|
+
result: dict[str, Any] = {
|
|
536
|
+
"locks": {},
|
|
537
|
+
"total_locks": len(self._config_locks),
|
|
538
|
+
"total_clients": len(self._client_locks),
|
|
539
|
+
"summary": {
|
|
540
|
+
"unlocked": 0,
|
|
541
|
+
"exclusive": 0,
|
|
542
|
+
"shared_read": 0,
|
|
543
|
+
"waiting_total": 0,
|
|
544
|
+
},
|
|
545
|
+
}
|
|
546
|
+
|
|
547
|
+
for config_key, lock in self._config_locks.items():
|
|
548
|
+
key_str = f"{config_key[0]}|{config_key[1]}|{config_key[2]}"
|
|
549
|
+
result["locks"][key_str] = lock.to_dict()
|
|
550
|
+
|
|
551
|
+
# Update summary
|
|
552
|
+
if lock.state == LockState.UNLOCKED:
|
|
553
|
+
result["summary"]["unlocked"] += 1
|
|
554
|
+
elif lock.state == LockState.LOCKED_EXCLUSIVE:
|
|
555
|
+
result["summary"]["exclusive"] += 1
|
|
556
|
+
elif lock.state == LockState.LOCKED_SHARED_READ:
|
|
557
|
+
result["summary"]["shared_read"] += 1
|
|
558
|
+
|
|
559
|
+
result["summary"]["waiting_total"] += len(lock.waiting_queue)
|
|
560
|
+
|
|
561
|
+
return result
|
|
562
|
+
|
|
563
|
+
def is_available_for_exclusive(self, config_key: tuple[str, str, str]) -> bool:
|
|
564
|
+
"""Check if exclusive lock can be immediately acquired.
|
|
565
|
+
|
|
566
|
+
Args:
|
|
567
|
+
config_key: Tuple of (project_dir, environment, port)
|
|
568
|
+
|
|
569
|
+
Returns:
|
|
570
|
+
True if exclusive lock is immediately available, False otherwise
|
|
571
|
+
"""
|
|
572
|
+
with self._master_lock:
|
|
573
|
+
if config_key not in self._config_locks:
|
|
574
|
+
return True
|
|
575
|
+
|
|
576
|
+
lock = self._config_locks[config_key]
|
|
577
|
+
return lock.state == LockState.UNLOCKED
|
|
578
|
+
|
|
579
|
+
def upgrade_to_exclusive(
|
|
580
|
+
self,
|
|
581
|
+
config_key: tuple[str, str, str],
|
|
582
|
+
client_id: str,
|
|
583
|
+
timeout: float = DEFAULT_EXCLUSIVE_TIMEOUT,
|
|
584
|
+
) -> bool:
|
|
585
|
+
"""Upgrade a shared read lock to an exclusive lock.
|
|
586
|
+
|
|
587
|
+
The client must already hold a shared read lock on the configuration.
|
|
588
|
+
The upgrade will wait for other shared readers to release their locks.
|
|
589
|
+
|
|
590
|
+
Args:
|
|
591
|
+
config_key: Tuple of (project_dir, environment, port)
|
|
592
|
+
client_id: UUID string identifying the client
|
|
593
|
+
timeout: Maximum time in seconds to wait for exclusive access
|
|
594
|
+
|
|
595
|
+
Returns:
|
|
596
|
+
True if lock was upgraded, False if upgrade failed or timed out
|
|
597
|
+
"""
|
|
598
|
+
start_time = time.time()
|
|
599
|
+
waiting_request: WaitingRequest | None = None
|
|
600
|
+
|
|
601
|
+
with self._master_lock:
|
|
602
|
+
if config_key not in self._config_locks:
|
|
603
|
+
logging.debug(f"No lock exists for {config_key} to upgrade")
|
|
604
|
+
return False
|
|
605
|
+
|
|
606
|
+
lock = self._config_locks[config_key]
|
|
607
|
+
|
|
608
|
+
# Client must hold a shared read lock
|
|
609
|
+
if client_id not in lock.shared_holders:
|
|
610
|
+
logging.debug(f"Client {client_id} does not hold shared read lock for {config_key}")
|
|
611
|
+
return False
|
|
612
|
+
|
|
613
|
+
# If this is the only shared holder, upgrade immediately
|
|
614
|
+
if len(lock.shared_holders) == 1 and not lock.waiting_queue:
|
|
615
|
+
del lock.shared_holders[client_id]
|
|
616
|
+
lock.state = LockState.LOCKED_EXCLUSIVE
|
|
617
|
+
lock.exclusive_holder = LockHolder(
|
|
618
|
+
client_id=client_id,
|
|
619
|
+
description=f"Upgraded from shared: {lock.shared_holders.get(client_id, LockHolder(client_id=client_id, description='')).description}",
|
|
620
|
+
lock_type="exclusive",
|
|
621
|
+
)
|
|
622
|
+
lock.last_activity_at = time.time()
|
|
623
|
+
logging.debug(f"Lock upgraded to exclusive for {config_key} by {client_id}")
|
|
624
|
+
return True
|
|
625
|
+
|
|
626
|
+
# Need to wait for other shared holders to release
|
|
627
|
+
if timeout <= 0:
|
|
628
|
+
logging.debug(f"Cannot upgrade lock for {config_key}, " f"{len(lock.shared_holders) - 1} other shared holders")
|
|
629
|
+
return False
|
|
630
|
+
|
|
631
|
+
# Release our shared lock and join waiting queue with priority
|
|
632
|
+
# We add to front of queue for upgrade requests
|
|
633
|
+
del lock.shared_holders[client_id]
|
|
634
|
+
if not lock.shared_holders:
|
|
635
|
+
lock.state = LockState.UNLOCKED
|
|
636
|
+
|
|
637
|
+
waiting_request = WaitingRequest(
|
|
638
|
+
client_id=client_id,
|
|
639
|
+
description="Upgrading from shared read lock",
|
|
640
|
+
)
|
|
641
|
+
# Add to front of queue for upgrades (priority)
|
|
642
|
+
lock.waiting_queue.appendleft(waiting_request)
|
|
643
|
+
lock.last_activity_at = time.time()
|
|
644
|
+
logging.debug(f"Client {client_id} waiting for upgrade on {config_key}")
|
|
645
|
+
|
|
646
|
+
# Wait outside the master lock
|
|
647
|
+
if waiting_request:
|
|
648
|
+
# Grant immediately if lock is now unlocked
|
|
649
|
+
with self._master_lock:
|
|
650
|
+
lock_check = self._config_locks.get(config_key)
|
|
651
|
+
if lock_check and lock_check.state == LockState.UNLOCKED:
|
|
652
|
+
try:
|
|
653
|
+
lock_check.waiting_queue.remove(waiting_request)
|
|
654
|
+
except ValueError:
|
|
655
|
+
pass
|
|
656
|
+
lock_check.state = LockState.LOCKED_EXCLUSIVE
|
|
657
|
+
lock_check.exclusive_holder = LockHolder(
|
|
658
|
+
client_id=client_id,
|
|
659
|
+
description="Upgraded from shared read lock",
|
|
660
|
+
lock_type="exclusive",
|
|
661
|
+
)
|
|
662
|
+
lock_check.last_activity_at = time.time()
|
|
663
|
+
# Don't re-track since we kept the tracking from shared
|
|
664
|
+
logging.debug(f"Lock upgraded (immediate) to exclusive for {config_key} " f"by {client_id}")
|
|
665
|
+
return True
|
|
666
|
+
|
|
667
|
+
remaining_timeout = timeout - (time.time() - start_time)
|
|
668
|
+
if remaining_timeout > 0:
|
|
669
|
+
signaled = waiting_request.event.wait(timeout=remaining_timeout)
|
|
670
|
+
else:
|
|
671
|
+
signaled = False
|
|
672
|
+
|
|
673
|
+
with self._master_lock:
|
|
674
|
+
lock_wait = self._config_locks.get(config_key)
|
|
675
|
+
if not lock_wait:
|
|
676
|
+
return False
|
|
677
|
+
|
|
678
|
+
# Remove from waiting queue if still there
|
|
679
|
+
try:
|
|
680
|
+
lock_wait.waiting_queue.remove(waiting_request)
|
|
681
|
+
except ValueError:
|
|
682
|
+
pass
|
|
683
|
+
|
|
684
|
+
if not signaled:
|
|
685
|
+
logging.debug(f"Timeout waiting for upgrade on {config_key} " f"for client {client_id}")
|
|
686
|
+
# Re-acquire shared lock
|
|
687
|
+
lock_wait.shared_holders[client_id] = LockHolder(
|
|
688
|
+
client_id=client_id,
|
|
689
|
+
description="Upgrade timeout - restored shared",
|
|
690
|
+
lock_type="shared_read",
|
|
691
|
+
)
|
|
692
|
+
if lock_wait.state == LockState.UNLOCKED:
|
|
693
|
+
lock_wait.state = LockState.LOCKED_SHARED_READ
|
|
694
|
+
return False
|
|
695
|
+
|
|
696
|
+
# Try to acquire exclusive now
|
|
697
|
+
if lock_wait.state == LockState.UNLOCKED:
|
|
698
|
+
lock_wait.state = LockState.LOCKED_EXCLUSIVE
|
|
699
|
+
lock_wait.exclusive_holder = LockHolder(
|
|
700
|
+
client_id=client_id,
|
|
701
|
+
description="Upgraded from shared read lock",
|
|
702
|
+
lock_type="exclusive",
|
|
703
|
+
)
|
|
704
|
+
lock_wait.last_activity_at = time.time()
|
|
705
|
+
logging.debug(f"Lock upgraded (after wait) to exclusive for {config_key} " f"by {client_id}")
|
|
706
|
+
return True
|
|
707
|
+
else:
|
|
708
|
+
# Someone else got the lock
|
|
709
|
+
# Re-acquire shared lock
|
|
710
|
+
if lock_wait.state == LockState.LOCKED_SHARED_READ:
|
|
711
|
+
lock_wait.shared_holders[client_id] = LockHolder(
|
|
712
|
+
client_id=client_id,
|
|
713
|
+
description="Upgrade failed - restored shared",
|
|
714
|
+
lock_type="shared_read",
|
|
715
|
+
)
|
|
716
|
+
return False
|
|
717
|
+
|
|
718
|
+
return False
|
|
719
|
+
|
|
720
|
+
def downgrade_to_shared(
|
|
721
|
+
self,
|
|
722
|
+
config_key: tuple[str, str, str],
|
|
723
|
+
client_id: str,
|
|
724
|
+
) -> bool:
|
|
725
|
+
"""Downgrade an exclusive lock to a shared read lock.
|
|
726
|
+
|
|
727
|
+
The client must hold an exclusive lock on the configuration.
|
|
728
|
+
This allows other readers to acquire shared read locks.
|
|
729
|
+
|
|
730
|
+
Args:
|
|
731
|
+
config_key: Tuple of (project_dir, environment, port)
|
|
732
|
+
client_id: UUID string identifying the client
|
|
733
|
+
|
|
734
|
+
Returns:
|
|
735
|
+
True if lock was downgraded, False if client didn't hold exclusive lock
|
|
736
|
+
"""
|
|
737
|
+
with self._master_lock:
|
|
738
|
+
if config_key not in self._config_locks:
|
|
739
|
+
logging.debug(f"No lock exists for {config_key} to downgrade")
|
|
740
|
+
return False
|
|
741
|
+
|
|
742
|
+
lock = self._config_locks[config_key]
|
|
743
|
+
|
|
744
|
+
# Client must hold exclusive lock
|
|
745
|
+
if lock.state != LockState.LOCKED_EXCLUSIVE or not lock.exclusive_holder or lock.exclusive_holder.client_id != client_id:
|
|
746
|
+
logging.debug(f"Client {client_id} does not hold exclusive lock for {config_key}")
|
|
747
|
+
return False
|
|
748
|
+
|
|
749
|
+
# If there are clients waiting for exclusive, don't downgrade
|
|
750
|
+
# (they should get the exclusive lock next)
|
|
751
|
+
if lock.waiting_queue:
|
|
752
|
+
logging.debug(f"Cannot downgrade lock for {config_key}, " f"{len(lock.waiting_queue)} clients waiting for exclusive")
|
|
753
|
+
return False
|
|
754
|
+
|
|
755
|
+
# Downgrade to shared read
|
|
756
|
+
old_description = lock.exclusive_holder.description
|
|
757
|
+
lock.exclusive_holder = None
|
|
758
|
+
lock.state = LockState.LOCKED_SHARED_READ
|
|
759
|
+
lock.shared_holders[client_id] = LockHolder(
|
|
760
|
+
client_id=client_id,
|
|
761
|
+
description=f"Downgraded from exclusive: {old_description}",
|
|
762
|
+
lock_type="shared_read",
|
|
763
|
+
)
|
|
764
|
+
lock.last_activity_at = time.time()
|
|
765
|
+
logging.debug(f"Lock downgraded to shared read for {config_key} by {client_id}")
|
|
766
|
+
return True
|
|
767
|
+
|
|
768
|
+
def get_client_locks(self, client_id: str) -> list[dict[str, Any]]:
|
|
769
|
+
"""Get all locks held by a specific client.
|
|
770
|
+
|
|
771
|
+
Args:
|
|
772
|
+
client_id: UUID string identifying the client
|
|
773
|
+
|
|
774
|
+
Returns:
|
|
775
|
+
List of lock information dictionaries
|
|
776
|
+
"""
|
|
777
|
+
with self._master_lock:
|
|
778
|
+
config_keys = self._client_locks.get(client_id, set())
|
|
779
|
+
result = []
|
|
780
|
+
|
|
781
|
+
for config_key in config_keys:
|
|
782
|
+
if config_key in self._config_locks:
|
|
783
|
+
lock = self._config_locks[config_key]
|
|
784
|
+
lock_type = None
|
|
785
|
+
|
|
786
|
+
if lock.state == LockState.LOCKED_EXCLUSIVE and lock.exclusive_holder and lock.exclusive_holder.client_id == client_id:
|
|
787
|
+
lock_type = "exclusive"
|
|
788
|
+
elif client_id in lock.shared_holders:
|
|
789
|
+
lock_type = "shared_read"
|
|
790
|
+
|
|
791
|
+
if lock_type:
|
|
792
|
+
result.append(
|
|
793
|
+
{
|
|
794
|
+
"config_key": {
|
|
795
|
+
"project_dir": config_key[0],
|
|
796
|
+
"environment": config_key[1],
|
|
797
|
+
"port": config_key[2],
|
|
798
|
+
},
|
|
799
|
+
"lock_type": lock_type,
|
|
800
|
+
"state": lock.state.value,
|
|
801
|
+
}
|
|
802
|
+
)
|
|
803
|
+
|
|
804
|
+
return result
|
|
805
|
+
|
|
806
|
+
def cleanup_unused_locks(self, older_than: float = 3600.0) -> int:
|
|
807
|
+
"""Clean up locks that haven't been used recently.
|
|
808
|
+
|
|
809
|
+
This removes lock entries that are:
|
|
810
|
+
- Not currently held (UNLOCKED state)
|
|
811
|
+
- Have not had activity in the specified time period
|
|
812
|
+
|
|
813
|
+
Args:
|
|
814
|
+
older_than: Time in seconds. Locks inactive longer than this are removed.
|
|
815
|
+
|
|
816
|
+
Returns:
|
|
817
|
+
Number of locks removed
|
|
818
|
+
"""
|
|
819
|
+
current_time = time.time()
|
|
820
|
+
removed_count = 0
|
|
821
|
+
|
|
822
|
+
with self._master_lock:
|
|
823
|
+
keys_to_remove = []
|
|
824
|
+
|
|
825
|
+
for config_key, lock in self._config_locks.items():
|
|
826
|
+
if lock.is_held():
|
|
827
|
+
continue # Don't remove held locks
|
|
828
|
+
|
|
829
|
+
if current_time - lock.last_activity_at > older_than:
|
|
830
|
+
keys_to_remove.append(config_key)
|
|
831
|
+
|
|
832
|
+
for config_key in keys_to_remove:
|
|
833
|
+
del self._config_locks[config_key]
|
|
834
|
+
removed_count += 1
|
|
835
|
+
logging.debug(f"Cleaned up unused configuration lock: {config_key}")
|
|
836
|
+
|
|
837
|
+
if removed_count > 0:
|
|
838
|
+
logging.info(f"Cleaned up {removed_count} unused configuration locks")
|
|
839
|
+
|
|
840
|
+
return removed_count
|
|
841
|
+
|
|
842
|
+
def clear_all_locks(self) -> int:
|
|
843
|
+
"""Clear all locks (use with extreme caution - only for daemon restart).
|
|
844
|
+
|
|
845
|
+
This releases all locks and clears all internal state.
|
|
846
|
+
Should only be used during daemon shutdown/restart.
|
|
847
|
+
|
|
848
|
+
Returns:
|
|
849
|
+
Number of locks cleared
|
|
850
|
+
"""
|
|
851
|
+
with self._master_lock:
|
|
852
|
+
count = len(self._config_locks)
|
|
853
|
+
|
|
854
|
+
# Wake up any waiting clients
|
|
855
|
+
for lock in self._config_locks.values():
|
|
856
|
+
for waiting in lock.waiting_queue:
|
|
857
|
+
waiting.event.set()
|
|
858
|
+
|
|
859
|
+
self._config_locks.clear()
|
|
860
|
+
self._client_locks.clear()
|
|
861
|
+
|
|
862
|
+
if count > 0:
|
|
863
|
+
logging.info(f"Cleared all {count} configuration locks")
|
|
864
|
+
|
|
865
|
+
return count
|