fbuild 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of fbuild might be problematic. Click here for more details.
- fbuild/__init__.py +0 -0
- fbuild/assets/example.txt +1 -0
- fbuild/build/__init__.py +117 -0
- fbuild/build/archive_creator.py +186 -0
- fbuild/build/binary_generator.py +444 -0
- fbuild/build/build_component_factory.py +131 -0
- fbuild/build/build_state.py +325 -0
- fbuild/build/build_utils.py +98 -0
- fbuild/build/compilation_executor.py +422 -0
- fbuild/build/compiler.py +165 -0
- fbuild/build/compiler_avr.py +574 -0
- fbuild/build/configurable_compiler.py +612 -0
- fbuild/build/configurable_linker.py +637 -0
- fbuild/build/flag_builder.py +186 -0
- fbuild/build/library_dependency_processor.py +185 -0
- fbuild/build/linker.py +708 -0
- fbuild/build/orchestrator.py +67 -0
- fbuild/build/orchestrator_avr.py +656 -0
- fbuild/build/orchestrator_esp32.py +797 -0
- fbuild/build/orchestrator_teensy.py +543 -0
- fbuild/build/source_compilation_orchestrator.py +220 -0
- fbuild/build/source_scanner.py +516 -0
- fbuild/cli.py +566 -0
- fbuild/cli_utils.py +312 -0
- fbuild/config/__init__.py +16 -0
- fbuild/config/board_config.py +457 -0
- fbuild/config/board_loader.py +92 -0
- fbuild/config/ini_parser.py +209 -0
- fbuild/config/mcu_specs.py +88 -0
- fbuild/daemon/__init__.py +34 -0
- fbuild/daemon/client.py +929 -0
- fbuild/daemon/compilation_queue.py +293 -0
- fbuild/daemon/daemon.py +474 -0
- fbuild/daemon/daemon_context.py +196 -0
- fbuild/daemon/error_collector.py +263 -0
- fbuild/daemon/file_cache.py +332 -0
- fbuild/daemon/lock_manager.py +270 -0
- fbuild/daemon/logging_utils.py +149 -0
- fbuild/daemon/messages.py +301 -0
- fbuild/daemon/operation_registry.py +288 -0
- fbuild/daemon/process_tracker.py +366 -0
- fbuild/daemon/processors/__init__.py +12 -0
- fbuild/daemon/processors/build_processor.py +157 -0
- fbuild/daemon/processors/deploy_processor.py +327 -0
- fbuild/daemon/processors/monitor_processor.py +146 -0
- fbuild/daemon/request_processor.py +401 -0
- fbuild/daemon/status_manager.py +216 -0
- fbuild/daemon/subprocess_manager.py +316 -0
- fbuild/deploy/__init__.py +17 -0
- fbuild/deploy/deployer.py +67 -0
- fbuild/deploy/deployer_esp32.py +314 -0
- fbuild/deploy/monitor.py +495 -0
- fbuild/interrupt_utils.py +34 -0
- fbuild/packages/__init__.py +53 -0
- fbuild/packages/archive_utils.py +1098 -0
- fbuild/packages/arduino_core.py +412 -0
- fbuild/packages/cache.py +249 -0
- fbuild/packages/downloader.py +366 -0
- fbuild/packages/framework_esp32.py +538 -0
- fbuild/packages/framework_teensy.py +346 -0
- fbuild/packages/github_utils.py +96 -0
- fbuild/packages/header_trampoline_cache.py +394 -0
- fbuild/packages/library_compiler.py +203 -0
- fbuild/packages/library_manager.py +549 -0
- fbuild/packages/library_manager_esp32.py +413 -0
- fbuild/packages/package.py +163 -0
- fbuild/packages/platform_esp32.py +383 -0
- fbuild/packages/platform_teensy.py +312 -0
- fbuild/packages/platform_utils.py +131 -0
- fbuild/packages/platformio_registry.py +325 -0
- fbuild/packages/sdk_utils.py +231 -0
- fbuild/packages/toolchain.py +436 -0
- fbuild/packages/toolchain_binaries.py +196 -0
- fbuild/packages/toolchain_esp32.py +484 -0
- fbuild/packages/toolchain_metadata.py +185 -0
- fbuild/packages/toolchain_teensy.py +404 -0
- fbuild/platform_configs/esp32.json +150 -0
- fbuild/platform_configs/esp32c2.json +144 -0
- fbuild/platform_configs/esp32c3.json +143 -0
- fbuild/platform_configs/esp32c5.json +151 -0
- fbuild/platform_configs/esp32c6.json +151 -0
- fbuild/platform_configs/esp32p4.json +149 -0
- fbuild/platform_configs/esp32s3.json +151 -0
- fbuild/platform_configs/imxrt1062.json +56 -0
- fbuild-1.1.0.dist-info/METADATA +447 -0
- fbuild-1.1.0.dist-info/RECORD +93 -0
- fbuild-1.1.0.dist-info/WHEEL +5 -0
- fbuild-1.1.0.dist-info/entry_points.txt +5 -0
- fbuild-1.1.0.dist-info/licenses/LICENSE +21 -0
- fbuild-1.1.0.dist-info/top_level.txt +2 -0
- fbuild_lint/__init__.py +0 -0
- fbuild_lint/ruff_plugins/__init__.py +0 -0
- fbuild_lint/ruff_plugins/keyboard_interrupt_checker.py +158 -0
|
@@ -0,0 +1,301 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Typed message protocol for fbuild daemon operations.
|
|
3
|
+
|
|
4
|
+
This module defines typed dataclasses for all client-daemon communication,
|
|
5
|
+
ensuring type safety and validation.
|
|
6
|
+
|
|
7
|
+
Supports:
|
|
8
|
+
- Build operations (compilation and linking)
|
|
9
|
+
- Deploy operations (firmware upload)
|
|
10
|
+
- Monitor operations (serial monitoring)
|
|
11
|
+
- Status updates and progress tracking
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
import time
|
|
15
|
+
from dataclasses import asdict, dataclass, field
|
|
16
|
+
from enum import Enum
|
|
17
|
+
from typing import Any
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class DaemonState(Enum):
|
|
21
|
+
"""Daemon state enumeration."""
|
|
22
|
+
|
|
23
|
+
IDLE = "idle"
|
|
24
|
+
DEPLOYING = "deploying"
|
|
25
|
+
MONITORING = "monitoring"
|
|
26
|
+
BUILDING = "building"
|
|
27
|
+
COMPLETED = "completed"
|
|
28
|
+
FAILED = "failed"
|
|
29
|
+
UNKNOWN = "unknown"
|
|
30
|
+
|
|
31
|
+
@classmethod
|
|
32
|
+
def from_string(cls, value: str) -> "DaemonState":
|
|
33
|
+
"""Convert string to DaemonState, defaulting to UNKNOWN if invalid."""
|
|
34
|
+
try:
|
|
35
|
+
return cls(value)
|
|
36
|
+
except ValueError:
|
|
37
|
+
return cls.UNKNOWN
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class OperationType(Enum):
|
|
41
|
+
"""Type of operation being performed."""
|
|
42
|
+
|
|
43
|
+
BUILD = "build"
|
|
44
|
+
DEPLOY = "deploy"
|
|
45
|
+
MONITOR = "monitor"
|
|
46
|
+
BUILD_AND_DEPLOY = "build_and_deploy"
|
|
47
|
+
|
|
48
|
+
@classmethod
|
|
49
|
+
def from_string(cls, value: str) -> "OperationType":
|
|
50
|
+
"""Convert string to OperationType."""
|
|
51
|
+
return cls(value)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
@dataclass
|
|
55
|
+
class DeployRequest:
|
|
56
|
+
"""Client → Daemon: Deploy request message.
|
|
57
|
+
|
|
58
|
+
Attributes:
|
|
59
|
+
project_dir: Absolute path to project directory
|
|
60
|
+
environment: Build environment name
|
|
61
|
+
port: Serial port for deployment (optional, auto-detect if None)
|
|
62
|
+
clean_build: Whether to perform clean build
|
|
63
|
+
monitor_after: Whether to start monitor after deploy
|
|
64
|
+
monitor_timeout: Timeout for monitor in seconds (if monitor_after=True)
|
|
65
|
+
monitor_halt_on_error: Pattern to halt on error (if monitor_after=True)
|
|
66
|
+
monitor_halt_on_success: Pattern to halt on success (if monitor_after=True)
|
|
67
|
+
monitor_expect: Expected pattern to check at timeout/success (if monitor_after=True)
|
|
68
|
+
caller_pid: Process ID of requesting client
|
|
69
|
+
caller_cwd: Working directory of requesting client
|
|
70
|
+
timestamp: Unix timestamp when request was created
|
|
71
|
+
request_id: Unique identifier for this request
|
|
72
|
+
"""
|
|
73
|
+
|
|
74
|
+
project_dir: str
|
|
75
|
+
environment: str
|
|
76
|
+
port: str | None
|
|
77
|
+
clean_build: bool
|
|
78
|
+
monitor_after: bool
|
|
79
|
+
monitor_timeout: float | None
|
|
80
|
+
monitor_halt_on_error: str | None
|
|
81
|
+
monitor_halt_on_success: str | None
|
|
82
|
+
monitor_expect: str | None
|
|
83
|
+
caller_pid: int
|
|
84
|
+
caller_cwd: str
|
|
85
|
+
timestamp: float = field(default_factory=time.time)
|
|
86
|
+
request_id: str = field(default_factory=lambda: f"deploy_{int(time.time() * 1000)}")
|
|
87
|
+
|
|
88
|
+
def to_dict(self) -> dict[str, Any]:
|
|
89
|
+
"""Convert to dictionary for JSON serialization."""
|
|
90
|
+
return asdict(self)
|
|
91
|
+
|
|
92
|
+
@classmethod
|
|
93
|
+
def from_dict(cls, data: dict[str, Any]) -> "DeployRequest":
|
|
94
|
+
"""Create DeployRequest from dictionary."""
|
|
95
|
+
return cls(
|
|
96
|
+
project_dir=data["project_dir"],
|
|
97
|
+
environment=data["environment"],
|
|
98
|
+
port=data.get("port"),
|
|
99
|
+
clean_build=data.get("clean_build", False),
|
|
100
|
+
monitor_after=data.get("monitor_after", False),
|
|
101
|
+
monitor_timeout=data.get("monitor_timeout"),
|
|
102
|
+
monitor_halt_on_error=data.get("monitor_halt_on_error"),
|
|
103
|
+
monitor_halt_on_success=data.get("monitor_halt_on_success"),
|
|
104
|
+
monitor_expect=data.get("monitor_expect"),
|
|
105
|
+
caller_pid=data["caller_pid"],
|
|
106
|
+
caller_cwd=data["caller_cwd"],
|
|
107
|
+
timestamp=data.get("timestamp", time.time()),
|
|
108
|
+
request_id=data.get("request_id", f"deploy_{int(time.time() * 1000)}"),
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
@dataclass
|
|
113
|
+
class MonitorRequest:
|
|
114
|
+
"""Client → Daemon: Monitor request message.
|
|
115
|
+
|
|
116
|
+
Attributes:
|
|
117
|
+
project_dir: Absolute path to project directory
|
|
118
|
+
environment: Build environment name
|
|
119
|
+
port: Serial port for monitoring (optional, auto-detect if None)
|
|
120
|
+
baud_rate: Serial baud rate (optional, use config default if None)
|
|
121
|
+
halt_on_error: Pattern to halt on (error detection)
|
|
122
|
+
halt_on_success: Pattern to halt on (success detection)
|
|
123
|
+
expect: Expected pattern to check at timeout/success
|
|
124
|
+
timeout: Maximum monitoring time in seconds
|
|
125
|
+
caller_pid: Process ID of requesting client
|
|
126
|
+
caller_cwd: Working directory of requesting client
|
|
127
|
+
timestamp: Unix timestamp when request was created
|
|
128
|
+
request_id: Unique identifier for this request
|
|
129
|
+
"""
|
|
130
|
+
|
|
131
|
+
project_dir: str
|
|
132
|
+
environment: str
|
|
133
|
+
port: str | None
|
|
134
|
+
baud_rate: int | None
|
|
135
|
+
halt_on_error: str | None
|
|
136
|
+
halt_on_success: str | None
|
|
137
|
+
expect: str | None
|
|
138
|
+
timeout: float | None
|
|
139
|
+
caller_pid: int
|
|
140
|
+
caller_cwd: str
|
|
141
|
+
timestamp: float = field(default_factory=time.time)
|
|
142
|
+
request_id: str = field(default_factory=lambda: f"monitor_{int(time.time() * 1000)}")
|
|
143
|
+
|
|
144
|
+
def to_dict(self) -> dict[str, Any]:
|
|
145
|
+
"""Convert to dictionary for JSON serialization."""
|
|
146
|
+
return asdict(self)
|
|
147
|
+
|
|
148
|
+
@classmethod
|
|
149
|
+
def from_dict(cls, data: dict[str, Any]) -> "MonitorRequest":
|
|
150
|
+
"""Create MonitorRequest from dictionary."""
|
|
151
|
+
return cls(
|
|
152
|
+
project_dir=data["project_dir"],
|
|
153
|
+
environment=data["environment"],
|
|
154
|
+
port=data.get("port"),
|
|
155
|
+
baud_rate=data.get("baud_rate"),
|
|
156
|
+
halt_on_error=data.get("halt_on_error"),
|
|
157
|
+
halt_on_success=data.get("halt_on_success"),
|
|
158
|
+
expect=data.get("expect"),
|
|
159
|
+
timeout=data.get("timeout"),
|
|
160
|
+
caller_pid=data["caller_pid"],
|
|
161
|
+
caller_cwd=data["caller_cwd"],
|
|
162
|
+
timestamp=data.get("timestamp", time.time()),
|
|
163
|
+
request_id=data.get("request_id", f"monitor_{int(time.time() * 1000)}"),
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
@dataclass
|
|
168
|
+
class BuildRequest:
|
|
169
|
+
"""Client → Daemon: Build request message.
|
|
170
|
+
|
|
171
|
+
Attributes:
|
|
172
|
+
project_dir: Absolute path to project directory
|
|
173
|
+
environment: Build environment name
|
|
174
|
+
clean_build: Whether to perform clean build
|
|
175
|
+
verbose: Enable verbose build output
|
|
176
|
+
caller_pid: Process ID of requesting client
|
|
177
|
+
caller_cwd: Working directory of requesting client
|
|
178
|
+
timestamp: Unix timestamp when request was created
|
|
179
|
+
request_id: Unique identifier for this request
|
|
180
|
+
"""
|
|
181
|
+
|
|
182
|
+
project_dir: str
|
|
183
|
+
environment: str
|
|
184
|
+
clean_build: bool
|
|
185
|
+
verbose: bool
|
|
186
|
+
caller_pid: int
|
|
187
|
+
caller_cwd: str
|
|
188
|
+
timestamp: float = field(default_factory=time.time)
|
|
189
|
+
request_id: str = field(default_factory=lambda: f"build_{int(time.time() * 1000)}")
|
|
190
|
+
|
|
191
|
+
def to_dict(self) -> dict[str, Any]:
|
|
192
|
+
"""Convert to dictionary for JSON serialization."""
|
|
193
|
+
return asdict(self)
|
|
194
|
+
|
|
195
|
+
@classmethod
|
|
196
|
+
def from_dict(cls, data: dict[str, Any]) -> "BuildRequest":
|
|
197
|
+
"""Create BuildRequest from dictionary."""
|
|
198
|
+
return cls(
|
|
199
|
+
project_dir=data["project_dir"],
|
|
200
|
+
environment=data["environment"],
|
|
201
|
+
clean_build=data.get("clean_build", False),
|
|
202
|
+
verbose=data.get("verbose", False),
|
|
203
|
+
caller_pid=data["caller_pid"],
|
|
204
|
+
caller_cwd=data["caller_cwd"],
|
|
205
|
+
timestamp=data.get("timestamp", time.time()),
|
|
206
|
+
request_id=data.get("request_id", f"build_{int(time.time() * 1000)}"),
|
|
207
|
+
)
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
@dataclass
|
|
211
|
+
class DaemonStatus:
|
|
212
|
+
"""Daemon → Client: Status update message.
|
|
213
|
+
|
|
214
|
+
Attributes:
|
|
215
|
+
state: Current daemon state
|
|
216
|
+
message: Human-readable status message
|
|
217
|
+
updated_at: Unix timestamp of last status update
|
|
218
|
+
operation_in_progress: Whether an operation is actively running
|
|
219
|
+
daemon_pid: Process ID of the daemon
|
|
220
|
+
daemon_started_at: Unix timestamp when daemon started
|
|
221
|
+
caller_pid: Process ID of client whose request is being processed
|
|
222
|
+
caller_cwd: Working directory of client whose request is being processed
|
|
223
|
+
request_id: ID of the request currently being processed
|
|
224
|
+
request_started_at: Unix timestamp when current request started
|
|
225
|
+
environment: Environment being processed
|
|
226
|
+
project_dir: Project directory for current operation
|
|
227
|
+
current_operation: Detailed description of current operation
|
|
228
|
+
operation_type: Type of operation (deploy/monitor)
|
|
229
|
+
output_lines: Recent output lines from the operation
|
|
230
|
+
exit_code: Process exit code (None if still running)
|
|
231
|
+
port: Serial port being used
|
|
232
|
+
"""
|
|
233
|
+
|
|
234
|
+
state: DaemonState
|
|
235
|
+
message: str
|
|
236
|
+
updated_at: float
|
|
237
|
+
operation_in_progress: bool = False
|
|
238
|
+
daemon_pid: int | None = None
|
|
239
|
+
daemon_started_at: float | None = None
|
|
240
|
+
caller_pid: int | None = None
|
|
241
|
+
caller_cwd: str | None = None
|
|
242
|
+
request_id: str | None = None
|
|
243
|
+
request_started_at: float | None = None
|
|
244
|
+
environment: str | None = None
|
|
245
|
+
project_dir: str | None = None
|
|
246
|
+
current_operation: str | None = None
|
|
247
|
+
operation_type: OperationType | None = None
|
|
248
|
+
output_lines: list[str] = field(default_factory=list)
|
|
249
|
+
exit_code: int | None = None
|
|
250
|
+
port: str | None = None
|
|
251
|
+
|
|
252
|
+
def to_dict(self) -> dict[str, Any]:
|
|
253
|
+
"""Convert to dictionary for JSON serialization."""
|
|
254
|
+
result = asdict(self)
|
|
255
|
+
# Convert enums to string values
|
|
256
|
+
result["state"] = self.state.value
|
|
257
|
+
if self.operation_type:
|
|
258
|
+
result["operation_type"] = self.operation_type.value
|
|
259
|
+
else:
|
|
260
|
+
result["operation_type"] = None
|
|
261
|
+
return result
|
|
262
|
+
|
|
263
|
+
@classmethod
|
|
264
|
+
def from_dict(cls, data: dict[str, Any]) -> "DaemonStatus":
|
|
265
|
+
"""Create DaemonStatus from dictionary."""
|
|
266
|
+
# Convert state string to enum
|
|
267
|
+
state_str = data.get("state", "unknown")
|
|
268
|
+
state = DaemonState.from_string(state_str)
|
|
269
|
+
|
|
270
|
+
# Convert operation_type string to enum
|
|
271
|
+
operation_type = None
|
|
272
|
+
if data.get("operation_type"):
|
|
273
|
+
operation_type = OperationType.from_string(data["operation_type"])
|
|
274
|
+
|
|
275
|
+
return cls(
|
|
276
|
+
state=state,
|
|
277
|
+
message=data.get("message", ""),
|
|
278
|
+
updated_at=data.get("updated_at", time.time()),
|
|
279
|
+
operation_in_progress=data.get("operation_in_progress", False),
|
|
280
|
+
daemon_pid=data.get("daemon_pid"),
|
|
281
|
+
daemon_started_at=data.get("daemon_started_at"),
|
|
282
|
+
caller_pid=data.get("caller_pid"),
|
|
283
|
+
caller_cwd=data.get("caller_cwd"),
|
|
284
|
+
request_id=data.get("request_id"),
|
|
285
|
+
request_started_at=data.get("request_started_at"),
|
|
286
|
+
environment=data.get("environment"),
|
|
287
|
+
project_dir=data.get("project_dir"),
|
|
288
|
+
current_operation=data.get("current_operation"),
|
|
289
|
+
operation_type=operation_type,
|
|
290
|
+
output_lines=data.get("output_lines", []),
|
|
291
|
+
exit_code=data.get("exit_code"),
|
|
292
|
+
port=data.get("port"),
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
def is_stale(self, timeout_seconds: float = 30.0) -> bool:
|
|
296
|
+
"""Check if status hasn't been updated recently."""
|
|
297
|
+
return (time.time() - self.updated_at) > timeout_seconds
|
|
298
|
+
|
|
299
|
+
def get_age_seconds(self) -> float:
|
|
300
|
+
"""Get age of this status update in seconds."""
|
|
301
|
+
return time.time() - self.updated_at
|
|
@@ -0,0 +1,288 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Operation Registry - Structured operation state tracking.
|
|
3
|
+
|
|
4
|
+
This module provides a registry for tracking all daemon operations (build/deploy/monitor)
|
|
5
|
+
with structured state management, replacing the simple boolean _operation_in_progress flag.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
import threading
|
|
10
|
+
import time
|
|
11
|
+
from dataclasses import dataclass, field
|
|
12
|
+
from enum import Enum
|
|
13
|
+
from typing import Any, Optional
|
|
14
|
+
|
|
15
|
+
from fbuild.daemon.messages import OperationType
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class OperationState(Enum):
|
|
19
|
+
"""State of a daemon operation."""
|
|
20
|
+
|
|
21
|
+
QUEUED = "queued"
|
|
22
|
+
RUNNING = "running"
|
|
23
|
+
COMPLETED = "completed"
|
|
24
|
+
FAILED = "failed"
|
|
25
|
+
CANCELLED = "cancelled"
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@dataclass
|
|
29
|
+
class Operation:
|
|
30
|
+
"""Tracks a daemon operation (build/deploy/monitor)."""
|
|
31
|
+
|
|
32
|
+
operation_id: str
|
|
33
|
+
operation_type: OperationType
|
|
34
|
+
project_dir: str
|
|
35
|
+
environment: str
|
|
36
|
+
state: OperationState
|
|
37
|
+
request_id: str
|
|
38
|
+
caller_pid: int
|
|
39
|
+
created_at: float = field(default_factory=time.time)
|
|
40
|
+
started_at: Optional[float] = None
|
|
41
|
+
completed_at: Optional[float] = None
|
|
42
|
+
error_message: Optional[str] = None
|
|
43
|
+
result: Optional[Any] = None
|
|
44
|
+
|
|
45
|
+
# Subprocess tracking
|
|
46
|
+
subprocess_ids: list[str] = field(default_factory=list)
|
|
47
|
+
compilation_job_ids: list[str] = field(default_factory=list)
|
|
48
|
+
|
|
49
|
+
def duration(self) -> Optional[float]:
|
|
50
|
+
"""Get operation duration in seconds.
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
Duration in seconds, or None if not complete
|
|
54
|
+
"""
|
|
55
|
+
if self.started_at and self.completed_at:
|
|
56
|
+
return self.completed_at - self.started_at
|
|
57
|
+
return None
|
|
58
|
+
|
|
59
|
+
def elapsed_time(self) -> Optional[float]:
|
|
60
|
+
"""Get elapsed time since operation started.
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
Elapsed time in seconds, or None if not started
|
|
64
|
+
"""
|
|
65
|
+
if self.started_at:
|
|
66
|
+
return time.time() - self.started_at
|
|
67
|
+
return None
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
class OperationRegistry:
|
|
71
|
+
"""Registry for tracking all daemon operations."""
|
|
72
|
+
|
|
73
|
+
def __init__(self, max_history: int = 100):
|
|
74
|
+
"""Initialize operation registry.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
max_history: Maximum number of completed operations to retain
|
|
78
|
+
"""
|
|
79
|
+
self.operations: dict[str, Operation] = {}
|
|
80
|
+
self.lock = threading.Lock()
|
|
81
|
+
self.max_history = max_history
|
|
82
|
+
logging.info(f"OperationRegistry initialized (max_history={max_history})")
|
|
83
|
+
|
|
84
|
+
def register_operation(self, operation: Operation) -> str:
|
|
85
|
+
"""Register new operation.
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
operation: Operation to register
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
Operation ID
|
|
92
|
+
"""
|
|
93
|
+
logging.debug(f"Operation type: {operation.operation_type.value}, project: {operation.project_dir}, env: {operation.environment}")
|
|
94
|
+
logging.debug(f"Initial state: {operation.state.value}")
|
|
95
|
+
|
|
96
|
+
with self.lock:
|
|
97
|
+
existing_count = len(self.operations)
|
|
98
|
+
self.operations[operation.operation_id] = operation
|
|
99
|
+
logging.debug(f"Operation added to registry, total operations: {existing_count} -> {len(self.operations)}")
|
|
100
|
+
self._cleanup_old_operations()
|
|
101
|
+
|
|
102
|
+
logging.info(f"Registered operation {operation.operation_id}: {operation.operation_type.value} {operation.project_dir}")
|
|
103
|
+
logging.debug(f"Active operations after registration: {len([op for op in self.operations.values() if op.state in (OperationState.QUEUED, OperationState.RUNNING)])}")
|
|
104
|
+
return operation.operation_id
|
|
105
|
+
|
|
106
|
+
def get_operation(self, operation_id: str) -> Optional[Operation]:
|
|
107
|
+
"""Get operation by ID.
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
operation_id: Operation ID to query
|
|
111
|
+
|
|
112
|
+
Returns:
|
|
113
|
+
Operation or None if not found
|
|
114
|
+
"""
|
|
115
|
+
with self.lock:
|
|
116
|
+
op = self.operations.get(operation_id)
|
|
117
|
+
if op:
|
|
118
|
+
logging.debug(f"Found operation {operation_id}")
|
|
119
|
+
else:
|
|
120
|
+
logging.debug(f"Operation {operation_id} not found")
|
|
121
|
+
return op
|
|
122
|
+
|
|
123
|
+
def update_state(self, operation_id: str, state: OperationState, **kwargs: Any) -> None:
|
|
124
|
+
"""Update operation state.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
operation_id: Operation ID to update
|
|
128
|
+
state: New state
|
|
129
|
+
**kwargs: Additional fields to update
|
|
130
|
+
"""
|
|
131
|
+
logging.debug(f"Additional fields to update: {list(kwargs.keys())}")
|
|
132
|
+
|
|
133
|
+
with self.lock:
|
|
134
|
+
if operation_id not in self.operations:
|
|
135
|
+
logging.warning(f"Cannot update unknown operation: {operation_id}")
|
|
136
|
+
logging.debug(f"Known operations: {list(self.operations.keys())}")
|
|
137
|
+
return
|
|
138
|
+
|
|
139
|
+
op = self.operations[operation_id]
|
|
140
|
+
old_state = op.state
|
|
141
|
+
op.state = state
|
|
142
|
+
|
|
143
|
+
# Auto-update timestamps
|
|
144
|
+
if state == OperationState.RUNNING and op.started_at is None:
|
|
145
|
+
op.started_at = time.time()
|
|
146
|
+
elif state in (OperationState.COMPLETED, OperationState.FAILED, OperationState.CANCELLED):
|
|
147
|
+
if op.completed_at is None:
|
|
148
|
+
op.completed_at = time.time()
|
|
149
|
+
|
|
150
|
+
# Update additional fields
|
|
151
|
+
for key, value in kwargs.items():
|
|
152
|
+
if hasattr(op, key):
|
|
153
|
+
setattr(op, key, value)
|
|
154
|
+
|
|
155
|
+
logging.info(f"Operation {operation_id} state: {old_state.value} -> {state.value}")
|
|
156
|
+
if state in (OperationState.COMPLETED, OperationState.FAILED, OperationState.CANCELLED):
|
|
157
|
+
logging.info(
|
|
158
|
+
f"Operation {operation_id} finished: state={state.value}, type={op.operation_type.value}, duration={op.duration():.2f}s"
|
|
159
|
+
if op.duration()
|
|
160
|
+
else f"Operation {operation_id} finished: state={state.value}"
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
def get_active_operations(self) -> list[Operation]:
|
|
164
|
+
"""Get all active (running/queued) operations.
|
|
165
|
+
|
|
166
|
+
Returns:
|
|
167
|
+
List of active operations
|
|
168
|
+
"""
|
|
169
|
+
with self.lock:
|
|
170
|
+
active = [op for op in self.operations.values() if op.state in (OperationState.QUEUED, OperationState.RUNNING)]
|
|
171
|
+
logging.debug(f"Found {len(active)} active operations (queued or running)")
|
|
172
|
+
if active:
|
|
173
|
+
logging.info(f"Active operations: {[op.operation_id for op in active]}")
|
|
174
|
+
return active
|
|
175
|
+
|
|
176
|
+
def get_operations_by_project(self, project_dir: str) -> list[Operation]:
|
|
177
|
+
"""Get all operations for a specific project.
|
|
178
|
+
|
|
179
|
+
Args:
|
|
180
|
+
project_dir: Project directory path
|
|
181
|
+
|
|
182
|
+
Returns:
|
|
183
|
+
List of operations for the project
|
|
184
|
+
"""
|
|
185
|
+
with self.lock:
|
|
186
|
+
ops = [op for op in self.operations.values() if op.project_dir == project_dir]
|
|
187
|
+
logging.debug(f"Found {len(ops)} operations for project {project_dir}")
|
|
188
|
+
if ops:
|
|
189
|
+
logging.debug(f"Operation states: {[(op.operation_id, op.state.value) for op in ops]}")
|
|
190
|
+
return ops
|
|
191
|
+
|
|
192
|
+
def is_project_busy(self, project_dir: str) -> bool:
|
|
193
|
+
"""Check if a project has any active operations.
|
|
194
|
+
|
|
195
|
+
Args:
|
|
196
|
+
project_dir: Project directory path
|
|
197
|
+
|
|
198
|
+
Returns:
|
|
199
|
+
True if project has active operations
|
|
200
|
+
"""
|
|
201
|
+
with self.lock:
|
|
202
|
+
busy = any(op.project_dir == project_dir and op.state in (OperationState.QUEUED, OperationState.RUNNING) for op in self.operations.values())
|
|
203
|
+
return busy
|
|
204
|
+
|
|
205
|
+
def get_statistics(self) -> dict[str, int]:
|
|
206
|
+
"""Get operation statistics.
|
|
207
|
+
|
|
208
|
+
Returns:
|
|
209
|
+
Dictionary with operation counts by state
|
|
210
|
+
"""
|
|
211
|
+
with self.lock:
|
|
212
|
+
stats = {
|
|
213
|
+
"total_operations": len(self.operations),
|
|
214
|
+
"queued": sum(1 for op in self.operations.values() if op.state == OperationState.QUEUED),
|
|
215
|
+
"running": sum(1 for op in self.operations.values() if op.state == OperationState.RUNNING),
|
|
216
|
+
"completed": sum(1 for op in self.operations.values() if op.state == OperationState.COMPLETED),
|
|
217
|
+
"failed": sum(1 for op in self.operations.values() if op.state == OperationState.FAILED),
|
|
218
|
+
"cancelled": sum(1 for op in self.operations.values() if op.state == OperationState.CANCELLED),
|
|
219
|
+
}
|
|
220
|
+
if stats["total_operations"] > 0:
|
|
221
|
+
success_rate = (stats["completed"] / stats["total_operations"]) * 100 if stats["total_operations"] > 0 else 0
|
|
222
|
+
logging.info(f"Operation success rate: {success_rate:.1f}% ({stats['completed']}/{stats['total_operations']})")
|
|
223
|
+
return stats
|
|
224
|
+
|
|
225
|
+
def _cleanup_old_operations(self) -> None:
|
|
226
|
+
"""Remove old completed operations beyond max_history."""
|
|
227
|
+
completed_ops = sorted(
|
|
228
|
+
[op for op in self.operations.values() if op.state in (OperationState.COMPLETED, OperationState.FAILED, OperationState.CANCELLED)],
|
|
229
|
+
key=lambda x: x.completed_at or 0,
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
logging.debug(f"Checking for old operations to cleanup: {len(completed_ops)} completed, max_history={self.max_history}")
|
|
233
|
+
|
|
234
|
+
if len(completed_ops) > self.max_history:
|
|
235
|
+
to_remove = completed_ops[: len(completed_ops) - self.max_history]
|
|
236
|
+
logging.debug(f"Removing {len(to_remove)} old operations to maintain max_history limit")
|
|
237
|
+
for op in to_remove:
|
|
238
|
+
del self.operations[op.operation_id]
|
|
239
|
+
|
|
240
|
+
logging.info(f"Cleaned up {len(to_remove)} old operations (history size: {len(completed_ops)} -> {len(completed_ops) - len(to_remove)})")
|
|
241
|
+
else:
|
|
242
|
+
logging.debug(f"No cleanup needed: {len(completed_ops)} operations within max_history={self.max_history}")
|
|
243
|
+
|
|
244
|
+
def clear_completed_operations(self, older_than_seconds: Optional[float] = None) -> int:
|
|
245
|
+
"""Clear completed operations.
|
|
246
|
+
|
|
247
|
+
Args:
|
|
248
|
+
older_than_seconds: Only clear operations older than this (None = all)
|
|
249
|
+
|
|
250
|
+
Returns:
|
|
251
|
+
Number of operations cleared
|
|
252
|
+
"""
|
|
253
|
+
logging.debug(f"Clearing completed operations (older_than: {older_than_seconds}s)" if older_than_seconds else "Clearing all completed operations")
|
|
254
|
+
|
|
255
|
+
with self.lock:
|
|
256
|
+
now = time.time()
|
|
257
|
+
to_remove = []
|
|
258
|
+
total_completed = 0
|
|
259
|
+
|
|
260
|
+
for op_id, op in self.operations.items():
|
|
261
|
+
if op.state not in (
|
|
262
|
+
OperationState.COMPLETED,
|
|
263
|
+
OperationState.FAILED,
|
|
264
|
+
OperationState.CANCELLED,
|
|
265
|
+
):
|
|
266
|
+
continue
|
|
267
|
+
|
|
268
|
+
total_completed += 1
|
|
269
|
+
|
|
270
|
+
if older_than_seconds is None:
|
|
271
|
+
to_remove.append(op_id)
|
|
272
|
+
logging.debug(f"Marking operation for removal: {op_id} (no age filter)")
|
|
273
|
+
elif op.completed_at and (now - op.completed_at) > older_than_seconds:
|
|
274
|
+
age = now - op.completed_at
|
|
275
|
+
to_remove.append(op_id)
|
|
276
|
+
logging.debug(f"Marking operation for removal: {op_id} (age: {age:.1f}s > {older_than_seconds}s)")
|
|
277
|
+
|
|
278
|
+
logging.debug(f"Found {len(to_remove)} operations to remove out of {total_completed} completed")
|
|
279
|
+
|
|
280
|
+
for op_id in to_remove:
|
|
281
|
+
del self.operations[op_id]
|
|
282
|
+
|
|
283
|
+
if to_remove:
|
|
284
|
+
logging.info(f"Cleared {len(to_remove)} completed operations (remaining: {len(self.operations)})")
|
|
285
|
+
else:
|
|
286
|
+
logging.debug("No completed operations to clear")
|
|
287
|
+
|
|
288
|
+
return len(to_remove)
|