fbuild 1.2.8__py3-none-any.whl → 1.2.15__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fbuild/__init__.py +5 -1
- fbuild/build/configurable_compiler.py +49 -6
- fbuild/build/configurable_linker.py +14 -9
- fbuild/build/orchestrator_esp32.py +6 -3
- fbuild/build/orchestrator_rp2040.py +6 -2
- fbuild/cli.py +300 -5
- fbuild/config/ini_parser.py +13 -1
- fbuild/daemon/__init__.py +11 -0
- fbuild/daemon/async_client.py +5 -4
- fbuild/daemon/async_client_lib.py +1543 -0
- fbuild/daemon/async_protocol.py +825 -0
- fbuild/daemon/async_server.py +2100 -0
- fbuild/daemon/client.py +425 -13
- fbuild/daemon/configuration_lock.py +13 -13
- fbuild/daemon/connection.py +508 -0
- fbuild/daemon/connection_registry.py +579 -0
- fbuild/daemon/daemon.py +517 -164
- fbuild/daemon/daemon_context.py +72 -1
- fbuild/daemon/device_discovery.py +477 -0
- fbuild/daemon/device_manager.py +821 -0
- fbuild/daemon/error_collector.py +263 -263
- fbuild/daemon/file_cache.py +332 -332
- fbuild/daemon/firmware_ledger.py +46 -123
- fbuild/daemon/lock_manager.py +508 -508
- fbuild/daemon/messages.py +431 -0
- fbuild/daemon/operation_registry.py +288 -288
- fbuild/daemon/processors/build_processor.py +34 -1
- fbuild/daemon/processors/deploy_processor.py +1 -3
- fbuild/daemon/processors/locking_processor.py +7 -7
- fbuild/daemon/request_processor.py +457 -457
- fbuild/daemon/shared_serial.py +7 -7
- fbuild/daemon/status_manager.py +238 -238
- fbuild/daemon/subprocess_manager.py +316 -316
- fbuild/deploy/docker_utils.py +182 -2
- fbuild/deploy/monitor.py +1 -1
- fbuild/deploy/qemu_runner.py +71 -13
- fbuild/ledger/board_ledger.py +46 -122
- fbuild/output.py +238 -2
- fbuild/packages/library_compiler.py +15 -5
- fbuild/packages/library_manager.py +12 -6
- fbuild-1.2.15.dist-info/METADATA +569 -0
- {fbuild-1.2.8.dist-info → fbuild-1.2.15.dist-info}/RECORD +46 -39
- fbuild-1.2.8.dist-info/METADATA +0 -468
- {fbuild-1.2.8.dist-info → fbuild-1.2.15.dist-info}/WHEEL +0 -0
- {fbuild-1.2.8.dist-info → fbuild-1.2.15.dist-info}/entry_points.txt +0 -0
- {fbuild-1.2.8.dist-info → fbuild-1.2.15.dist-info}/licenses/LICENSE +0 -0
- {fbuild-1.2.8.dist-info → fbuild-1.2.15.dist-info}/top_level.txt +0 -0
|
@@ -1,288 +1,288 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Operation Registry - Structured operation state tracking.
|
|
3
|
-
|
|
4
|
-
This module provides a registry for tracking all daemon operations (build/deploy/monitor)
|
|
5
|
-
with structured state management, replacing the simple boolean _operation_in_progress flag.
|
|
6
|
-
"""
|
|
7
|
-
|
|
8
|
-
import logging
|
|
9
|
-
import threading
|
|
10
|
-
import time
|
|
11
|
-
from dataclasses import dataclass, field
|
|
12
|
-
from enum import Enum
|
|
13
|
-
from typing import Any, Optional
|
|
14
|
-
|
|
15
|
-
from fbuild.daemon.messages import OperationType
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
class OperationState(Enum):
|
|
19
|
-
"""State of a daemon operation."""
|
|
20
|
-
|
|
21
|
-
QUEUED = "queued"
|
|
22
|
-
RUNNING = "running"
|
|
23
|
-
COMPLETED = "completed"
|
|
24
|
-
FAILED = "failed"
|
|
25
|
-
CANCELLED = "cancelled"
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
@dataclass
|
|
29
|
-
class Operation:
|
|
30
|
-
"""Tracks a daemon operation (build/deploy/monitor)."""
|
|
31
|
-
|
|
32
|
-
operation_id: str
|
|
33
|
-
operation_type: OperationType
|
|
34
|
-
project_dir: str
|
|
35
|
-
environment: str
|
|
36
|
-
state: OperationState
|
|
37
|
-
request_id: str
|
|
38
|
-
caller_pid: int
|
|
39
|
-
created_at: float = field(default_factory=time.time)
|
|
40
|
-
started_at: Optional[float] = None
|
|
41
|
-
completed_at: Optional[float] = None
|
|
42
|
-
error_message: Optional[str] = None
|
|
43
|
-
result: Optional[Any] = None
|
|
44
|
-
|
|
45
|
-
# Subprocess tracking
|
|
46
|
-
subprocess_ids: list[str] = field(default_factory=list)
|
|
47
|
-
compilation_job_ids: list[str] = field(default_factory=list)
|
|
48
|
-
|
|
49
|
-
def duration(self) -> Optional[float]:
|
|
50
|
-
"""Get operation duration in seconds.
|
|
51
|
-
|
|
52
|
-
Returns:
|
|
53
|
-
Duration in seconds, or None if not complete
|
|
54
|
-
"""
|
|
55
|
-
if self.started_at and self.completed_at:
|
|
56
|
-
return self.completed_at - self.started_at
|
|
57
|
-
return None
|
|
58
|
-
|
|
59
|
-
def elapsed_time(self) -> Optional[float]:
|
|
60
|
-
"""Get elapsed time since operation started.
|
|
61
|
-
|
|
62
|
-
Returns:
|
|
63
|
-
Elapsed time in seconds, or None if not started
|
|
64
|
-
"""
|
|
65
|
-
if self.started_at:
|
|
66
|
-
return time.time() - self.started_at
|
|
67
|
-
return None
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
class OperationRegistry:
|
|
71
|
-
"""Registry for tracking all daemon operations."""
|
|
72
|
-
|
|
73
|
-
def __init__(self, max_history: int = 100):
|
|
74
|
-
"""Initialize operation registry.
|
|
75
|
-
|
|
76
|
-
Args:
|
|
77
|
-
max_history: Maximum number of completed operations to retain
|
|
78
|
-
"""
|
|
79
|
-
self.operations: dict[str, Operation] = {}
|
|
80
|
-
self.lock = threading.Lock()
|
|
81
|
-
self.max_history = max_history
|
|
82
|
-
logging.info(f"OperationRegistry initialized (max_history={max_history})")
|
|
83
|
-
|
|
84
|
-
def register_operation(self, operation: Operation) -> str:
|
|
85
|
-
"""Register new operation.
|
|
86
|
-
|
|
87
|
-
Args:
|
|
88
|
-
operation: Operation to register
|
|
89
|
-
|
|
90
|
-
Returns:
|
|
91
|
-
Operation ID
|
|
92
|
-
"""
|
|
93
|
-
logging.debug(f"Operation type: {operation.operation_type.value}, project: {operation.project_dir}, env: {operation.environment}")
|
|
94
|
-
logging.debug(f"Initial state: {operation.state.value}")
|
|
95
|
-
|
|
96
|
-
with self.lock:
|
|
97
|
-
existing_count = len(self.operations)
|
|
98
|
-
self.operations[operation.operation_id] = operation
|
|
99
|
-
logging.debug(f"Operation added to registry, total operations: {existing_count} -> {len(self.operations)}")
|
|
100
|
-
self._cleanup_old_operations()
|
|
101
|
-
|
|
102
|
-
logging.info(f"Registered operation {operation.operation_id}: {operation.operation_type.value} {operation.project_dir}")
|
|
103
|
-
logging.debug(f"Active operations after registration: {len([op for op in self.operations.values() if op.state in (OperationState.QUEUED, OperationState.RUNNING)])}")
|
|
104
|
-
return operation.operation_id
|
|
105
|
-
|
|
106
|
-
def get_operation(self, operation_id: str) -> Optional[Operation]:
|
|
107
|
-
"""Get operation by ID.
|
|
108
|
-
|
|
109
|
-
Args:
|
|
110
|
-
operation_id: Operation ID to query
|
|
111
|
-
|
|
112
|
-
Returns:
|
|
113
|
-
Operation or None if not found
|
|
114
|
-
"""
|
|
115
|
-
with self.lock:
|
|
116
|
-
op = self.operations.get(operation_id)
|
|
117
|
-
if op:
|
|
118
|
-
logging.debug(f"Found operation {operation_id}")
|
|
119
|
-
else:
|
|
120
|
-
logging.debug(f"Operation {operation_id} not found")
|
|
121
|
-
return op
|
|
122
|
-
|
|
123
|
-
def update_state(self, operation_id: str, state: OperationState, **kwargs: Any) -> None:
|
|
124
|
-
"""Update operation state.
|
|
125
|
-
|
|
126
|
-
Args:
|
|
127
|
-
operation_id: Operation ID to update
|
|
128
|
-
state: New state
|
|
129
|
-
**kwargs: Additional fields to update
|
|
130
|
-
"""
|
|
131
|
-
logging.debug(f"Additional fields to update: {list(kwargs.keys())}")
|
|
132
|
-
|
|
133
|
-
with self.lock:
|
|
134
|
-
if operation_id not in self.operations:
|
|
135
|
-
logging.warning(f"Cannot update unknown operation: {operation_id}")
|
|
136
|
-
logging.debug(f"Known operations: {list(self.operations.keys())}")
|
|
137
|
-
return
|
|
138
|
-
|
|
139
|
-
op = self.operations[operation_id]
|
|
140
|
-
old_state = op.state
|
|
141
|
-
op.state = state
|
|
142
|
-
|
|
143
|
-
# Auto-update timestamps
|
|
144
|
-
if state == OperationState.RUNNING and op.started_at is None:
|
|
145
|
-
op.started_at = time.time()
|
|
146
|
-
elif state in (OperationState.COMPLETED, OperationState.FAILED, OperationState.CANCELLED):
|
|
147
|
-
if op.completed_at is None:
|
|
148
|
-
op.completed_at = time.time()
|
|
149
|
-
|
|
150
|
-
# Update additional fields
|
|
151
|
-
for key, value in kwargs.items():
|
|
152
|
-
if hasattr(op, key):
|
|
153
|
-
setattr(op, key, value)
|
|
154
|
-
|
|
155
|
-
logging.info(f"Operation {operation_id} state: {old_state.value} -> {state.value}")
|
|
156
|
-
if state in (OperationState.COMPLETED, OperationState.FAILED, OperationState.CANCELLED):
|
|
157
|
-
logging.info(
|
|
158
|
-
f"Operation {operation_id} finished: state={state.value}, type={op.operation_type.value}, duration={op.duration():.2f}s"
|
|
159
|
-
if op.duration()
|
|
160
|
-
else f"Operation {operation_id} finished: state={state.value}"
|
|
161
|
-
)
|
|
162
|
-
|
|
163
|
-
def get_active_operations(self) -> list[Operation]:
|
|
164
|
-
"""Get all active (running/queued) operations.
|
|
165
|
-
|
|
166
|
-
Returns:
|
|
167
|
-
List of active operations
|
|
168
|
-
"""
|
|
169
|
-
with self.lock:
|
|
170
|
-
active = [op for op in self.operations.values() if op.state in (OperationState.QUEUED, OperationState.RUNNING)]
|
|
171
|
-
logging.debug(f"Found {len(active)} active operations (queued or running)")
|
|
172
|
-
if active:
|
|
173
|
-
logging.info(f"Active operations: {[op.operation_id for op in active]}")
|
|
174
|
-
return active
|
|
175
|
-
|
|
176
|
-
def get_operations_by_project(self, project_dir: str) -> list[Operation]:
|
|
177
|
-
"""Get all operations for a specific project.
|
|
178
|
-
|
|
179
|
-
Args:
|
|
180
|
-
project_dir: Project directory path
|
|
181
|
-
|
|
182
|
-
Returns:
|
|
183
|
-
List of operations for the project
|
|
184
|
-
"""
|
|
185
|
-
with self.lock:
|
|
186
|
-
ops = [op for op in self.operations.values() if op.project_dir == project_dir]
|
|
187
|
-
logging.debug(f"Found {len(ops)} operations for project {project_dir}")
|
|
188
|
-
if ops:
|
|
189
|
-
logging.debug(f"Operation states: {[(op.operation_id, op.state.value) for op in ops]}")
|
|
190
|
-
return ops
|
|
191
|
-
|
|
192
|
-
def is_project_busy(self, project_dir: str) -> bool:
|
|
193
|
-
"""Check if a project has any active operations.
|
|
194
|
-
|
|
195
|
-
Args:
|
|
196
|
-
project_dir: Project directory path
|
|
197
|
-
|
|
198
|
-
Returns:
|
|
199
|
-
True if project has active operations
|
|
200
|
-
"""
|
|
201
|
-
with self.lock:
|
|
202
|
-
busy = any(op.project_dir == project_dir and op.state in (OperationState.QUEUED, OperationState.RUNNING) for op in self.operations.values())
|
|
203
|
-
return busy
|
|
204
|
-
|
|
205
|
-
def get_statistics(self) -> dict[str, int]:
|
|
206
|
-
"""Get operation statistics.
|
|
207
|
-
|
|
208
|
-
Returns:
|
|
209
|
-
Dictionary with operation counts by state
|
|
210
|
-
"""
|
|
211
|
-
with self.lock:
|
|
212
|
-
stats = {
|
|
213
|
-
"total_operations": len(self.operations),
|
|
214
|
-
"queued": sum(1 for op in self.operations.values() if op.state == OperationState.QUEUED),
|
|
215
|
-
"running": sum(1 for op in self.operations.values() if op.state == OperationState.RUNNING),
|
|
216
|
-
"completed": sum(1 for op in self.operations.values() if op.state == OperationState.COMPLETED),
|
|
217
|
-
"failed": sum(1 for op in self.operations.values() if op.state == OperationState.FAILED),
|
|
218
|
-
"cancelled": sum(1 for op in self.operations.values() if op.state == OperationState.CANCELLED),
|
|
219
|
-
}
|
|
220
|
-
if stats["total_operations"] > 0:
|
|
221
|
-
success_rate = (stats["completed"] / stats["total_operations"]) * 100 if stats["total_operations"] > 0 else 0
|
|
222
|
-
logging.info(f"Operation success rate: {success_rate:.1f}% ({stats['completed']}/{stats['total_operations']})")
|
|
223
|
-
return stats
|
|
224
|
-
|
|
225
|
-
def _cleanup_old_operations(self) -> None:
|
|
226
|
-
"""Remove old completed operations beyond max_history."""
|
|
227
|
-
completed_ops = sorted(
|
|
228
|
-
[op for op in self.operations.values() if op.state in (OperationState.COMPLETED, OperationState.FAILED, OperationState.CANCELLED)],
|
|
229
|
-
key=lambda x: x.completed_at or 0,
|
|
230
|
-
)
|
|
231
|
-
|
|
232
|
-
logging.debug(f"Checking for old operations to cleanup: {len(completed_ops)} completed, max_history={self.max_history}")
|
|
233
|
-
|
|
234
|
-
if len(completed_ops) > self.max_history:
|
|
235
|
-
to_remove = completed_ops[: len(completed_ops) - self.max_history]
|
|
236
|
-
logging.debug(f"Removing {len(to_remove)} old operations to maintain max_history limit")
|
|
237
|
-
for op in to_remove:
|
|
238
|
-
del self.operations[op.operation_id]
|
|
239
|
-
|
|
240
|
-
logging.info(f"Cleaned up {len(to_remove)} old operations (history size: {len(completed_ops)} -> {len(completed_ops) - len(to_remove)})")
|
|
241
|
-
else:
|
|
242
|
-
logging.debug(f"No cleanup needed: {len(completed_ops)} operations within max_history={self.max_history}")
|
|
243
|
-
|
|
244
|
-
def clear_completed_operations(self, older_than_seconds: Optional[float] = None) -> int:
|
|
245
|
-
"""Clear completed operations.
|
|
246
|
-
|
|
247
|
-
Args:
|
|
248
|
-
older_than_seconds: Only clear operations older than this (None = all)
|
|
249
|
-
|
|
250
|
-
Returns:
|
|
251
|
-
Number of operations cleared
|
|
252
|
-
"""
|
|
253
|
-
logging.debug(f"Clearing completed operations (older_than: {older_than_seconds}s)" if older_than_seconds else "Clearing all completed operations")
|
|
254
|
-
|
|
255
|
-
with self.lock:
|
|
256
|
-
now = time.time()
|
|
257
|
-
to_remove = []
|
|
258
|
-
total_completed = 0
|
|
259
|
-
|
|
260
|
-
for op_id, op in self.operations.items():
|
|
261
|
-
if op.state not in (
|
|
262
|
-
OperationState.COMPLETED,
|
|
263
|
-
OperationState.FAILED,
|
|
264
|
-
OperationState.CANCELLED,
|
|
265
|
-
):
|
|
266
|
-
continue
|
|
267
|
-
|
|
268
|
-
total_completed += 1
|
|
269
|
-
|
|
270
|
-
if older_than_seconds is None:
|
|
271
|
-
to_remove.append(op_id)
|
|
272
|
-
logging.debug(f"Marking operation for removal: {op_id} (no age filter)")
|
|
273
|
-
elif op.completed_at and (now - op.completed_at) > older_than_seconds:
|
|
274
|
-
age = now - op.completed_at
|
|
275
|
-
to_remove.append(op_id)
|
|
276
|
-
logging.debug(f"Marking operation for removal: {op_id} (age: {age:.1f}s > {older_than_seconds}s)")
|
|
277
|
-
|
|
278
|
-
logging.debug(f"Found {len(to_remove)} operations to remove out of {total_completed} completed")
|
|
279
|
-
|
|
280
|
-
for op_id in to_remove:
|
|
281
|
-
del self.operations[op_id]
|
|
282
|
-
|
|
283
|
-
if to_remove:
|
|
284
|
-
logging.info(f"Cleared {len(to_remove)} completed operations (remaining: {len(self.operations)})")
|
|
285
|
-
else:
|
|
286
|
-
logging.debug("No completed operations to clear")
|
|
287
|
-
|
|
288
|
-
return len(to_remove)
|
|
1
|
+
"""
|
|
2
|
+
Operation Registry - Structured operation state tracking.
|
|
3
|
+
|
|
4
|
+
This module provides a registry for tracking all daemon operations (build/deploy/monitor)
|
|
5
|
+
with structured state management, replacing the simple boolean _operation_in_progress flag.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
import threading
|
|
10
|
+
import time
|
|
11
|
+
from dataclasses import dataclass, field
|
|
12
|
+
from enum import Enum
|
|
13
|
+
from typing import Any, Optional
|
|
14
|
+
|
|
15
|
+
from fbuild.daemon.messages import OperationType
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class OperationState(Enum):
|
|
19
|
+
"""State of a daemon operation."""
|
|
20
|
+
|
|
21
|
+
QUEUED = "queued"
|
|
22
|
+
RUNNING = "running"
|
|
23
|
+
COMPLETED = "completed"
|
|
24
|
+
FAILED = "failed"
|
|
25
|
+
CANCELLED = "cancelled"
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@dataclass
|
|
29
|
+
class Operation:
|
|
30
|
+
"""Tracks a daemon operation (build/deploy/monitor)."""
|
|
31
|
+
|
|
32
|
+
operation_id: str
|
|
33
|
+
operation_type: OperationType
|
|
34
|
+
project_dir: str
|
|
35
|
+
environment: str
|
|
36
|
+
state: OperationState
|
|
37
|
+
request_id: str
|
|
38
|
+
caller_pid: int
|
|
39
|
+
created_at: float = field(default_factory=time.time)
|
|
40
|
+
started_at: Optional[float] = None
|
|
41
|
+
completed_at: Optional[float] = None
|
|
42
|
+
error_message: Optional[str] = None
|
|
43
|
+
result: Optional[Any] = None
|
|
44
|
+
|
|
45
|
+
# Subprocess tracking
|
|
46
|
+
subprocess_ids: list[str] = field(default_factory=list)
|
|
47
|
+
compilation_job_ids: list[str] = field(default_factory=list)
|
|
48
|
+
|
|
49
|
+
def duration(self) -> Optional[float]:
|
|
50
|
+
"""Get operation duration in seconds.
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
Duration in seconds, or None if not complete
|
|
54
|
+
"""
|
|
55
|
+
if self.started_at and self.completed_at:
|
|
56
|
+
return self.completed_at - self.started_at
|
|
57
|
+
return None
|
|
58
|
+
|
|
59
|
+
def elapsed_time(self) -> Optional[float]:
|
|
60
|
+
"""Get elapsed time since operation started.
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
Elapsed time in seconds, or None if not started
|
|
64
|
+
"""
|
|
65
|
+
if self.started_at:
|
|
66
|
+
return time.time() - self.started_at
|
|
67
|
+
return None
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
class OperationRegistry:
|
|
71
|
+
"""Registry for tracking all daemon operations."""
|
|
72
|
+
|
|
73
|
+
def __init__(self, max_history: int = 100):
|
|
74
|
+
"""Initialize operation registry.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
max_history: Maximum number of completed operations to retain
|
|
78
|
+
"""
|
|
79
|
+
self.operations: dict[str, Operation] = {}
|
|
80
|
+
self.lock = threading.Lock()
|
|
81
|
+
self.max_history = max_history
|
|
82
|
+
logging.info(f"OperationRegistry initialized (max_history={max_history})")
|
|
83
|
+
|
|
84
|
+
def register_operation(self, operation: Operation) -> str:
|
|
85
|
+
"""Register new operation.
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
operation: Operation to register
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
Operation ID
|
|
92
|
+
"""
|
|
93
|
+
logging.debug(f"Operation type: {operation.operation_type.value}, project: {operation.project_dir}, env: {operation.environment}")
|
|
94
|
+
logging.debug(f"Initial state: {operation.state.value}")
|
|
95
|
+
|
|
96
|
+
with self.lock:
|
|
97
|
+
existing_count = len(self.operations)
|
|
98
|
+
self.operations[operation.operation_id] = operation
|
|
99
|
+
logging.debug(f"Operation added to registry, total operations: {existing_count} -> {len(self.operations)}")
|
|
100
|
+
self._cleanup_old_operations()
|
|
101
|
+
|
|
102
|
+
logging.info(f"Registered operation {operation.operation_id}: {operation.operation_type.value} {operation.project_dir}")
|
|
103
|
+
logging.debug(f"Active operations after registration: {len([op for op in self.operations.values() if op.state in (OperationState.QUEUED, OperationState.RUNNING)])}")
|
|
104
|
+
return operation.operation_id
|
|
105
|
+
|
|
106
|
+
def get_operation(self, operation_id: str) -> Optional[Operation]:
|
|
107
|
+
"""Get operation by ID.
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
operation_id: Operation ID to query
|
|
111
|
+
|
|
112
|
+
Returns:
|
|
113
|
+
Operation or None if not found
|
|
114
|
+
"""
|
|
115
|
+
with self.lock:
|
|
116
|
+
op = self.operations.get(operation_id)
|
|
117
|
+
if op:
|
|
118
|
+
logging.debug(f"Found operation {operation_id}")
|
|
119
|
+
else:
|
|
120
|
+
logging.debug(f"Operation {operation_id} not found")
|
|
121
|
+
return op
|
|
122
|
+
|
|
123
|
+
def update_state(self, operation_id: str, state: OperationState, **kwargs: Any) -> None:
|
|
124
|
+
"""Update operation state.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
operation_id: Operation ID to update
|
|
128
|
+
state: New state
|
|
129
|
+
**kwargs: Additional fields to update
|
|
130
|
+
"""
|
|
131
|
+
logging.debug(f"Additional fields to update: {list(kwargs.keys())}")
|
|
132
|
+
|
|
133
|
+
with self.lock:
|
|
134
|
+
if operation_id not in self.operations:
|
|
135
|
+
logging.warning(f"Cannot update unknown operation: {operation_id}")
|
|
136
|
+
logging.debug(f"Known operations: {list(self.operations.keys())}")
|
|
137
|
+
return
|
|
138
|
+
|
|
139
|
+
op = self.operations[operation_id]
|
|
140
|
+
old_state = op.state
|
|
141
|
+
op.state = state
|
|
142
|
+
|
|
143
|
+
# Auto-update timestamps
|
|
144
|
+
if state == OperationState.RUNNING and op.started_at is None:
|
|
145
|
+
op.started_at = time.time()
|
|
146
|
+
elif state in (OperationState.COMPLETED, OperationState.FAILED, OperationState.CANCELLED):
|
|
147
|
+
if op.completed_at is None:
|
|
148
|
+
op.completed_at = time.time()
|
|
149
|
+
|
|
150
|
+
# Update additional fields
|
|
151
|
+
for key, value in kwargs.items():
|
|
152
|
+
if hasattr(op, key):
|
|
153
|
+
setattr(op, key, value)
|
|
154
|
+
|
|
155
|
+
logging.info(f"Operation {operation_id} state: {old_state.value} -> {state.value}")
|
|
156
|
+
if state in (OperationState.COMPLETED, OperationState.FAILED, OperationState.CANCELLED):
|
|
157
|
+
logging.info(
|
|
158
|
+
f"Operation {operation_id} finished: state={state.value}, type={op.operation_type.value}, duration={op.duration():.2f}s"
|
|
159
|
+
if op.duration()
|
|
160
|
+
else f"Operation {operation_id} finished: state={state.value}"
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
def get_active_operations(self) -> list[Operation]:
|
|
164
|
+
"""Get all active (running/queued) operations.
|
|
165
|
+
|
|
166
|
+
Returns:
|
|
167
|
+
List of active operations
|
|
168
|
+
"""
|
|
169
|
+
with self.lock:
|
|
170
|
+
active = [op for op in self.operations.values() if op.state in (OperationState.QUEUED, OperationState.RUNNING)]
|
|
171
|
+
logging.debug(f"Found {len(active)} active operations (queued or running)")
|
|
172
|
+
if active:
|
|
173
|
+
logging.info(f"Active operations: {[op.operation_id for op in active]}")
|
|
174
|
+
return active
|
|
175
|
+
|
|
176
|
+
def get_operations_by_project(self, project_dir: str) -> list[Operation]:
|
|
177
|
+
"""Get all operations for a specific project.
|
|
178
|
+
|
|
179
|
+
Args:
|
|
180
|
+
project_dir: Project directory path
|
|
181
|
+
|
|
182
|
+
Returns:
|
|
183
|
+
List of operations for the project
|
|
184
|
+
"""
|
|
185
|
+
with self.lock:
|
|
186
|
+
ops = [op for op in self.operations.values() if op.project_dir == project_dir]
|
|
187
|
+
logging.debug(f"Found {len(ops)} operations for project {project_dir}")
|
|
188
|
+
if ops:
|
|
189
|
+
logging.debug(f"Operation states: {[(op.operation_id, op.state.value) for op in ops]}")
|
|
190
|
+
return ops
|
|
191
|
+
|
|
192
|
+
def is_project_busy(self, project_dir: str) -> bool:
|
|
193
|
+
"""Check if a project has any active operations.
|
|
194
|
+
|
|
195
|
+
Args:
|
|
196
|
+
project_dir: Project directory path
|
|
197
|
+
|
|
198
|
+
Returns:
|
|
199
|
+
True if project has active operations
|
|
200
|
+
"""
|
|
201
|
+
with self.lock:
|
|
202
|
+
busy = any(op.project_dir == project_dir and op.state in (OperationState.QUEUED, OperationState.RUNNING) for op in self.operations.values())
|
|
203
|
+
return busy
|
|
204
|
+
|
|
205
|
+
def get_statistics(self) -> dict[str, int]:
|
|
206
|
+
"""Get operation statistics.
|
|
207
|
+
|
|
208
|
+
Returns:
|
|
209
|
+
Dictionary with operation counts by state
|
|
210
|
+
"""
|
|
211
|
+
with self.lock:
|
|
212
|
+
stats = {
|
|
213
|
+
"total_operations": len(self.operations),
|
|
214
|
+
"queued": sum(1 for op in self.operations.values() if op.state == OperationState.QUEUED),
|
|
215
|
+
"running": sum(1 for op in self.operations.values() if op.state == OperationState.RUNNING),
|
|
216
|
+
"completed": sum(1 for op in self.operations.values() if op.state == OperationState.COMPLETED),
|
|
217
|
+
"failed": sum(1 for op in self.operations.values() if op.state == OperationState.FAILED),
|
|
218
|
+
"cancelled": sum(1 for op in self.operations.values() if op.state == OperationState.CANCELLED),
|
|
219
|
+
}
|
|
220
|
+
if stats["total_operations"] > 0:
|
|
221
|
+
success_rate = (stats["completed"] / stats["total_operations"]) * 100 if stats["total_operations"] > 0 else 0
|
|
222
|
+
logging.info(f"Operation success rate: {success_rate:.1f}% ({stats['completed']}/{stats['total_operations']})")
|
|
223
|
+
return stats
|
|
224
|
+
|
|
225
|
+
def _cleanup_old_operations(self) -> None:
|
|
226
|
+
"""Remove old completed operations beyond max_history."""
|
|
227
|
+
completed_ops = sorted(
|
|
228
|
+
[op for op in self.operations.values() if op.state in (OperationState.COMPLETED, OperationState.FAILED, OperationState.CANCELLED)],
|
|
229
|
+
key=lambda x: x.completed_at or 0,
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
logging.debug(f"Checking for old operations to cleanup: {len(completed_ops)} completed, max_history={self.max_history}")
|
|
233
|
+
|
|
234
|
+
if len(completed_ops) > self.max_history:
|
|
235
|
+
to_remove = completed_ops[: len(completed_ops) - self.max_history]
|
|
236
|
+
logging.debug(f"Removing {len(to_remove)} old operations to maintain max_history limit")
|
|
237
|
+
for op in to_remove:
|
|
238
|
+
del self.operations[op.operation_id]
|
|
239
|
+
|
|
240
|
+
logging.info(f"Cleaned up {len(to_remove)} old operations (history size: {len(completed_ops)} -> {len(completed_ops) - len(to_remove)})")
|
|
241
|
+
else:
|
|
242
|
+
logging.debug(f"No cleanup needed: {len(completed_ops)} operations within max_history={self.max_history}")
|
|
243
|
+
|
|
244
|
+
def clear_completed_operations(self, older_than_seconds: Optional[float] = None) -> int:
|
|
245
|
+
"""Clear completed operations.
|
|
246
|
+
|
|
247
|
+
Args:
|
|
248
|
+
older_than_seconds: Only clear operations older than this (None = all)
|
|
249
|
+
|
|
250
|
+
Returns:
|
|
251
|
+
Number of operations cleared
|
|
252
|
+
"""
|
|
253
|
+
logging.debug(f"Clearing completed operations (older_than: {older_than_seconds}s)" if older_than_seconds else "Clearing all completed operations")
|
|
254
|
+
|
|
255
|
+
with self.lock:
|
|
256
|
+
now = time.time()
|
|
257
|
+
to_remove = []
|
|
258
|
+
total_completed = 0
|
|
259
|
+
|
|
260
|
+
for op_id, op in self.operations.items():
|
|
261
|
+
if op.state not in (
|
|
262
|
+
OperationState.COMPLETED,
|
|
263
|
+
OperationState.FAILED,
|
|
264
|
+
OperationState.CANCELLED,
|
|
265
|
+
):
|
|
266
|
+
continue
|
|
267
|
+
|
|
268
|
+
total_completed += 1
|
|
269
|
+
|
|
270
|
+
if older_than_seconds is None:
|
|
271
|
+
to_remove.append(op_id)
|
|
272
|
+
logging.debug(f"Marking operation for removal: {op_id} (no age filter)")
|
|
273
|
+
elif op.completed_at and (now - op.completed_at) > older_than_seconds:
|
|
274
|
+
age = now - op.completed_at
|
|
275
|
+
to_remove.append(op_id)
|
|
276
|
+
logging.debug(f"Marking operation for removal: {op_id} (age: {age:.1f}s > {older_than_seconds}s)")
|
|
277
|
+
|
|
278
|
+
logging.debug(f"Found {len(to_remove)} operations to remove out of {total_completed} completed")
|
|
279
|
+
|
|
280
|
+
for op_id in to_remove:
|
|
281
|
+
del self.operations[op_id]
|
|
282
|
+
|
|
283
|
+
if to_remove:
|
|
284
|
+
logging.info(f"Cleared {len(to_remove)} completed operations (remaining: {len(self.operations)})")
|
|
285
|
+
else:
|
|
286
|
+
logging.debug("No completed operations to clear")
|
|
287
|
+
|
|
288
|
+
return len(to_remove)
|
|
@@ -65,11 +65,44 @@ class BuildRequestProcessor(RequestProcessor):
|
|
|
65
65
|
"""
|
|
66
66
|
logging.info(f"Building project: {request.project_dir}")
|
|
67
67
|
|
|
68
|
-
# Reload build modules to pick up code changes
|
|
68
|
+
# Reload build modules FIRST to pick up code changes
|
|
69
69
|
# This is critical for development on Windows where daemon caching
|
|
70
70
|
# prevents testing code changes
|
|
71
|
+
# IMPORTANT: Must happen before setting output file because reload resets global state
|
|
71
72
|
self._reload_build_modules()
|
|
72
73
|
|
|
74
|
+
# Set up output file for streaming to client (after module reload!)
|
|
75
|
+
from fbuild.output import reset_timer, set_output_file
|
|
76
|
+
|
|
77
|
+
output_file_path = Path(request.project_dir) / ".fbuild" / "build_output.txt"
|
|
78
|
+
output_file_path.parent.mkdir(parents=True, exist_ok=True)
|
|
79
|
+
|
|
80
|
+
# Clear output file at start to prevent stale output from previous builds
|
|
81
|
+
output_file_path.write_text("", encoding="utf-8")
|
|
82
|
+
|
|
83
|
+
output_file = None
|
|
84
|
+
try:
|
|
85
|
+
output_file = open(output_file_path, "a", encoding="utf-8")
|
|
86
|
+
set_output_file(output_file)
|
|
87
|
+
reset_timer() # Fresh timestamps for this build
|
|
88
|
+
|
|
89
|
+
return self._execute_build(request, context)
|
|
90
|
+
finally:
|
|
91
|
+
set_output_file(None) # Always clean up
|
|
92
|
+
if output_file is not None:
|
|
93
|
+
output_file.close()
|
|
94
|
+
|
|
95
|
+
def _execute_build(self, request: "BuildRequest", context: "DaemonContext") -> bool:
|
|
96
|
+
"""Internal build execution logic.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
request: The build request containing project_dir, environment, etc.
|
|
100
|
+
context: The daemon context with all subsystems
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
True if build succeeded, False otherwise
|
|
104
|
+
"""
|
|
105
|
+
|
|
73
106
|
# Detect platform type from platformio.ini to select appropriate orchestrator
|
|
74
107
|
try:
|
|
75
108
|
from fbuild.config.ini_parser import PlatformIOConfig
|
|
@@ -251,9 +251,7 @@ class DeployRequestProcessor(RequestProcessor):
|
|
|
251
251
|
env_config = config.get_env_config(environment)
|
|
252
252
|
build_flags = env_config.get("build_flags", "")
|
|
253
253
|
|
|
254
|
-
if
|
|
255
|
-
return build_flags.split() if build_flags else []
|
|
256
|
-
return list(build_flags) if build_flags else []
|
|
254
|
+
return build_flags.split() if build_flags else []
|
|
257
255
|
except KeyboardInterrupt: # noqa: KBI002
|
|
258
256
|
raise
|
|
259
257
|
except Exception as e:
|