fbuild 1.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fbuild/__init__.py +390 -0
- fbuild/assets/example.txt +1 -0
- fbuild/build/__init__.py +117 -0
- fbuild/build/archive_creator.py +186 -0
- fbuild/build/binary_generator.py +444 -0
- fbuild/build/build_component_factory.py +131 -0
- fbuild/build/build_info_generator.py +624 -0
- fbuild/build/build_state.py +325 -0
- fbuild/build/build_utils.py +93 -0
- fbuild/build/compilation_executor.py +422 -0
- fbuild/build/compiler.py +165 -0
- fbuild/build/compiler_avr.py +574 -0
- fbuild/build/configurable_compiler.py +664 -0
- fbuild/build/configurable_linker.py +637 -0
- fbuild/build/flag_builder.py +214 -0
- fbuild/build/library_dependency_processor.py +185 -0
- fbuild/build/linker.py +708 -0
- fbuild/build/orchestrator.py +67 -0
- fbuild/build/orchestrator_avr.py +651 -0
- fbuild/build/orchestrator_esp32.py +878 -0
- fbuild/build/orchestrator_rp2040.py +719 -0
- fbuild/build/orchestrator_stm32.py +696 -0
- fbuild/build/orchestrator_teensy.py +580 -0
- fbuild/build/source_compilation_orchestrator.py +218 -0
- fbuild/build/source_scanner.py +516 -0
- fbuild/cli.py +717 -0
- fbuild/cli_utils.py +314 -0
- fbuild/config/__init__.py +16 -0
- fbuild/config/board_config.py +542 -0
- fbuild/config/board_loader.py +92 -0
- fbuild/config/ini_parser.py +369 -0
- fbuild/config/mcu_specs.py +88 -0
- fbuild/daemon/__init__.py +42 -0
- fbuild/daemon/async_client.py +531 -0
- fbuild/daemon/client.py +1505 -0
- fbuild/daemon/compilation_queue.py +293 -0
- fbuild/daemon/configuration_lock.py +865 -0
- fbuild/daemon/daemon.py +585 -0
- fbuild/daemon/daemon_context.py +293 -0
- fbuild/daemon/error_collector.py +263 -0
- fbuild/daemon/file_cache.py +332 -0
- fbuild/daemon/firmware_ledger.py +546 -0
- fbuild/daemon/lock_manager.py +508 -0
- fbuild/daemon/logging_utils.py +149 -0
- fbuild/daemon/messages.py +957 -0
- fbuild/daemon/operation_registry.py +288 -0
- fbuild/daemon/port_state_manager.py +249 -0
- fbuild/daemon/process_tracker.py +366 -0
- fbuild/daemon/processors/__init__.py +18 -0
- fbuild/daemon/processors/build_processor.py +248 -0
- fbuild/daemon/processors/deploy_processor.py +664 -0
- fbuild/daemon/processors/install_deps_processor.py +431 -0
- fbuild/daemon/processors/locking_processor.py +777 -0
- fbuild/daemon/processors/monitor_processor.py +285 -0
- fbuild/daemon/request_processor.py +457 -0
- fbuild/daemon/shared_serial.py +819 -0
- fbuild/daemon/status_manager.py +238 -0
- fbuild/daemon/subprocess_manager.py +316 -0
- fbuild/deploy/__init__.py +21 -0
- fbuild/deploy/deployer.py +67 -0
- fbuild/deploy/deployer_esp32.py +310 -0
- fbuild/deploy/docker_utils.py +315 -0
- fbuild/deploy/monitor.py +519 -0
- fbuild/deploy/qemu_runner.py +603 -0
- fbuild/interrupt_utils.py +34 -0
- fbuild/ledger/__init__.py +52 -0
- fbuild/ledger/board_ledger.py +560 -0
- fbuild/output.py +352 -0
- fbuild/packages/__init__.py +66 -0
- fbuild/packages/archive_utils.py +1098 -0
- fbuild/packages/arduino_core.py +412 -0
- fbuild/packages/cache.py +256 -0
- fbuild/packages/concurrent_manager.py +510 -0
- fbuild/packages/downloader.py +518 -0
- fbuild/packages/fingerprint.py +423 -0
- fbuild/packages/framework_esp32.py +538 -0
- fbuild/packages/framework_rp2040.py +349 -0
- fbuild/packages/framework_stm32.py +459 -0
- fbuild/packages/framework_teensy.py +346 -0
- fbuild/packages/github_utils.py +96 -0
- fbuild/packages/header_trampoline_cache.py +394 -0
- fbuild/packages/library_compiler.py +203 -0
- fbuild/packages/library_manager.py +549 -0
- fbuild/packages/library_manager_esp32.py +725 -0
- fbuild/packages/package.py +163 -0
- fbuild/packages/platform_esp32.py +383 -0
- fbuild/packages/platform_rp2040.py +400 -0
- fbuild/packages/platform_stm32.py +581 -0
- fbuild/packages/platform_teensy.py +312 -0
- fbuild/packages/platform_utils.py +131 -0
- fbuild/packages/platformio_registry.py +369 -0
- fbuild/packages/sdk_utils.py +231 -0
- fbuild/packages/toolchain.py +436 -0
- fbuild/packages/toolchain_binaries.py +196 -0
- fbuild/packages/toolchain_esp32.py +489 -0
- fbuild/packages/toolchain_metadata.py +185 -0
- fbuild/packages/toolchain_rp2040.py +436 -0
- fbuild/packages/toolchain_stm32.py +417 -0
- fbuild/packages/toolchain_teensy.py +404 -0
- fbuild/platform_configs/esp32.json +150 -0
- fbuild/platform_configs/esp32c2.json +144 -0
- fbuild/platform_configs/esp32c3.json +143 -0
- fbuild/platform_configs/esp32c5.json +151 -0
- fbuild/platform_configs/esp32c6.json +151 -0
- fbuild/platform_configs/esp32p4.json +149 -0
- fbuild/platform_configs/esp32s3.json +151 -0
- fbuild/platform_configs/imxrt1062.json +56 -0
- fbuild/platform_configs/rp2040.json +70 -0
- fbuild/platform_configs/rp2350.json +76 -0
- fbuild/platform_configs/stm32f1.json +59 -0
- fbuild/platform_configs/stm32f4.json +63 -0
- fbuild/py.typed +0 -0
- fbuild-1.2.8.dist-info/METADATA +468 -0
- fbuild-1.2.8.dist-info/RECORD +121 -0
- fbuild-1.2.8.dist-info/WHEEL +5 -0
- fbuild-1.2.8.dist-info/entry_points.txt +5 -0
- fbuild-1.2.8.dist-info/licenses/LICENSE +21 -0
- fbuild-1.2.8.dist-info/top_level.txt +2 -0
- fbuild_lint/__init__.py +0 -0
- fbuild_lint/ruff_plugins/__init__.py +0 -0
- fbuild_lint/ruff_plugins/keyboard_interrupt_checker.py +158 -0
|
@@ -0,0 +1,366 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Process Tracking and Cleanup Module
|
|
3
|
+
|
|
4
|
+
This module manages tracking of build/deploy/monitor processes and their entire
|
|
5
|
+
process trees. When client processes die, orphaned process trees are automatically
|
|
6
|
+
cleaned up to prevent resource leaks and file locking issues.
|
|
7
|
+
|
|
8
|
+
Key features:
|
|
9
|
+
- Track root process + all children (recursive)
|
|
10
|
+
- Detect dead client processes
|
|
11
|
+
- Kill entire process trees recursively
|
|
12
|
+
- Thread-safe operations for daemon use
|
|
13
|
+
"""
|
|
14
|
+
|
|
15
|
+
import _thread
|
|
16
|
+
import json
|
|
17
|
+
import logging
|
|
18
|
+
import threading
|
|
19
|
+
import time
|
|
20
|
+
from dataclasses import asdict, dataclass, field
|
|
21
|
+
from pathlib import Path
|
|
22
|
+
from typing import Any
|
|
23
|
+
|
|
24
|
+
import psutil
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass
|
|
28
|
+
class ProcessTreeInfo:
|
|
29
|
+
"""Information about a tracked process tree.
|
|
30
|
+
|
|
31
|
+
Attributes:
|
|
32
|
+
client_pid: PID of the client that initiated the operation
|
|
33
|
+
root_pid: PID of the root process
|
|
34
|
+
child_pids: List of all child PIDs (updated periodically)
|
|
35
|
+
request_id: Request ID
|
|
36
|
+
project_dir: Project directory
|
|
37
|
+
operation_type: Type of operation (deploy/monitor)
|
|
38
|
+
port: Serial port (if applicable)
|
|
39
|
+
started_at: Unix timestamp when tracking started
|
|
40
|
+
last_updated: Unix timestamp of last child PID refresh
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
client_pid: int
|
|
44
|
+
root_pid: int
|
|
45
|
+
child_pids: list[int] = field(default_factory=list)
|
|
46
|
+
request_id: str = ""
|
|
47
|
+
project_dir: str = ""
|
|
48
|
+
operation_type: str = ""
|
|
49
|
+
port: str | None = None
|
|
50
|
+
started_at: float = field(default_factory=time.time)
|
|
51
|
+
last_updated: float = field(default_factory=time.time)
|
|
52
|
+
|
|
53
|
+
def to_dict(self) -> dict[str, Any]:
|
|
54
|
+
"""Convert to dictionary for JSON serialization."""
|
|
55
|
+
return asdict(self)
|
|
56
|
+
|
|
57
|
+
@classmethod
|
|
58
|
+
def from_dict(cls, data: dict[str, Any]) -> "ProcessTreeInfo":
|
|
59
|
+
"""Create ProcessTreeInfo from dictionary."""
|
|
60
|
+
return cls(
|
|
61
|
+
client_pid=data["client_pid"],
|
|
62
|
+
root_pid=data["root_pid"],
|
|
63
|
+
child_pids=data.get("child_pids", []),
|
|
64
|
+
request_id=data.get("request_id", ""),
|
|
65
|
+
project_dir=data.get("project_dir", ""),
|
|
66
|
+
operation_type=data.get("operation_type", ""),
|
|
67
|
+
port=data.get("port"),
|
|
68
|
+
started_at=data.get("started_at", time.time()),
|
|
69
|
+
last_updated=data.get("last_updated", time.time()),
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class ProcessTracker:
|
|
74
|
+
"""Thread-safe tracker for process trees.
|
|
75
|
+
|
|
76
|
+
This class maintains a registry of active processes and provides
|
|
77
|
+
methods to detect and cleanup orphaned process trees.
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
def __init__(self, registry_file: Path):
|
|
81
|
+
"""Initialize the tracker.
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
registry_file: Path to JSON file for persisting process trees
|
|
85
|
+
"""
|
|
86
|
+
self.registry_file = registry_file
|
|
87
|
+
self.lock = threading.Lock()
|
|
88
|
+
self._registry: dict[int, ProcessTreeInfo] = {}
|
|
89
|
+
self._load_registry()
|
|
90
|
+
logging.info(f"ProcessTracker initialized with {len(self._registry)} tracked processes")
|
|
91
|
+
|
|
92
|
+
def _load_registry(self) -> None:
|
|
93
|
+
"""Load registry from disk (if it exists)."""
|
|
94
|
+
if not self.registry_file.exists():
|
|
95
|
+
return
|
|
96
|
+
|
|
97
|
+
try:
|
|
98
|
+
with open(self.registry_file) as f:
|
|
99
|
+
data = json.load(f)
|
|
100
|
+
|
|
101
|
+
with self.lock:
|
|
102
|
+
self._registry = {int(client_pid): ProcessTreeInfo.from_dict(info) for client_pid, info in data.items()}
|
|
103
|
+
|
|
104
|
+
logging.info(f"Loaded {len(self._registry)} process trees from registry")
|
|
105
|
+
except KeyboardInterrupt:
|
|
106
|
+
_thread.interrupt_main()
|
|
107
|
+
raise
|
|
108
|
+
except Exception as e:
|
|
109
|
+
logging.warning(f"Failed to load process registry: {e}")
|
|
110
|
+
self._registry = {}
|
|
111
|
+
|
|
112
|
+
def _save_registry(self) -> None:
|
|
113
|
+
"""Save registry to disk atomically."""
|
|
114
|
+
try:
|
|
115
|
+
# Prepare data for serialization
|
|
116
|
+
data = {str(client_pid): info.to_dict() for client_pid, info in self._registry.items()}
|
|
117
|
+
|
|
118
|
+
# Atomic write
|
|
119
|
+
temp_file = self.registry_file.with_suffix(".tmp")
|
|
120
|
+
with open(temp_file, "w") as f:
|
|
121
|
+
json.dump(data, f, indent=2)
|
|
122
|
+
|
|
123
|
+
temp_file.replace(self.registry_file)
|
|
124
|
+
|
|
125
|
+
except KeyboardInterrupt:
|
|
126
|
+
_thread.interrupt_main()
|
|
127
|
+
raise
|
|
128
|
+
except Exception as e:
|
|
129
|
+
logging.error(f"Failed to save process registry: {e}")
|
|
130
|
+
|
|
131
|
+
def register_process(
|
|
132
|
+
self,
|
|
133
|
+
client_pid: int,
|
|
134
|
+
root_pid: int,
|
|
135
|
+
request_id: str = "",
|
|
136
|
+
project_dir: str = "",
|
|
137
|
+
operation_type: str = "",
|
|
138
|
+
port: str | None = None,
|
|
139
|
+
) -> None:
|
|
140
|
+
"""Register a new process tree.
|
|
141
|
+
|
|
142
|
+
Args:
|
|
143
|
+
client_pid: PID of client that initiated operation
|
|
144
|
+
root_pid: PID of root process
|
|
145
|
+
request_id: Request ID (optional)
|
|
146
|
+
project_dir: Project directory (optional)
|
|
147
|
+
operation_type: Type of operation (optional)
|
|
148
|
+
port: Serial port (optional)
|
|
149
|
+
"""
|
|
150
|
+
with self.lock:
|
|
151
|
+
self._registry[client_pid] = ProcessTreeInfo(
|
|
152
|
+
client_pid=client_pid,
|
|
153
|
+
root_pid=root_pid,
|
|
154
|
+
request_id=request_id,
|
|
155
|
+
project_dir=project_dir,
|
|
156
|
+
operation_type=operation_type,
|
|
157
|
+
port=port,
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
# Immediately refresh child PIDs
|
|
161
|
+
self._update_child_pids(client_pid)
|
|
162
|
+
|
|
163
|
+
self._save_registry()
|
|
164
|
+
logging.info(f"Registered process tree: client={client_pid}, root={root_pid}, children={len(self._registry[client_pid].child_pids)}, operation={operation_type}")
|
|
165
|
+
|
|
166
|
+
def unregister_process(self, client_pid: int) -> None:
|
|
167
|
+
"""Remove a process tree from tracking.
|
|
168
|
+
|
|
169
|
+
Args:
|
|
170
|
+
client_pid: Client PID to remove
|
|
171
|
+
"""
|
|
172
|
+
with self.lock:
|
|
173
|
+
if client_pid in self._registry:
|
|
174
|
+
info = self._registry.pop(client_pid)
|
|
175
|
+
logging.info(f"Unregistered process tree: client={client_pid}, root={info.root_pid}")
|
|
176
|
+
else:
|
|
177
|
+
logging.warning(f"Attempted to unregister unknown client PID: {client_pid}")
|
|
178
|
+
|
|
179
|
+
self._save_registry()
|
|
180
|
+
|
|
181
|
+
def _update_child_pids(self, client_pid: int) -> None:
|
|
182
|
+
"""Update child PID list for a tracked process.
|
|
183
|
+
|
|
184
|
+
This method MUST be called with self.lock held.
|
|
185
|
+
|
|
186
|
+
Args:
|
|
187
|
+
client_pid: Client PID to update
|
|
188
|
+
"""
|
|
189
|
+
if client_pid not in self._registry:
|
|
190
|
+
return
|
|
191
|
+
|
|
192
|
+
info = self._registry[client_pid]
|
|
193
|
+
|
|
194
|
+
try:
|
|
195
|
+
# Get root process
|
|
196
|
+
root_proc = psutil.Process(info.root_pid)
|
|
197
|
+
|
|
198
|
+
# Get ALL descendants recursively
|
|
199
|
+
children = root_proc.children(recursive=True)
|
|
200
|
+
info.child_pids = [child.pid for child in children]
|
|
201
|
+
info.last_updated = time.time()
|
|
202
|
+
|
|
203
|
+
except psutil.NoSuchProcess:
|
|
204
|
+
# Root process died - mark as empty
|
|
205
|
+
info.child_pids = []
|
|
206
|
+
info.last_updated = time.time()
|
|
207
|
+
except KeyboardInterrupt:
|
|
208
|
+
_thread.interrupt_main()
|
|
209
|
+
raise
|
|
210
|
+
except Exception as e:
|
|
211
|
+
logging.warning(f"Failed to update child PIDs for client={client_pid}: {e}")
|
|
212
|
+
|
|
213
|
+
def refresh_all_child_pids(self) -> None:
|
|
214
|
+
"""Refresh child PID lists for all tracked processes."""
|
|
215
|
+
with self.lock:
|
|
216
|
+
for client_pid in list(self._registry.keys()):
|
|
217
|
+
self._update_child_pids(client_pid)
|
|
218
|
+
|
|
219
|
+
self._save_registry()
|
|
220
|
+
|
|
221
|
+
def cleanup_orphaned_processes(self) -> list[int]:
|
|
222
|
+
"""Detect and kill process trees for dead clients.
|
|
223
|
+
|
|
224
|
+
Returns:
|
|
225
|
+
List of client PIDs that were cleaned up
|
|
226
|
+
"""
|
|
227
|
+
orphaned_clients = []
|
|
228
|
+
|
|
229
|
+
with self.lock:
|
|
230
|
+
for client_pid, info in list(self._registry.items()):
|
|
231
|
+
# Check if client is still alive
|
|
232
|
+
if psutil.pid_exists(client_pid):
|
|
233
|
+
continue
|
|
234
|
+
|
|
235
|
+
# Client is dead - kill the entire process tree
|
|
236
|
+
logging.info(f"Client {client_pid} is dead, cleaning up process tree (root={info.root_pid}, children={len(info.child_pids)}, operation={info.operation_type})")
|
|
237
|
+
|
|
238
|
+
killed_count = self._kill_process_tree(info)
|
|
239
|
+
orphaned_clients.append(client_pid)
|
|
240
|
+
|
|
241
|
+
logging.info(f"Cleaned up {killed_count} processes for dead client {client_pid}")
|
|
242
|
+
|
|
243
|
+
# Remove from registry
|
|
244
|
+
del self._registry[client_pid]
|
|
245
|
+
|
|
246
|
+
if orphaned_clients:
|
|
247
|
+
logging.info(f"Orphaned clients cleaned up: {orphaned_clients}")
|
|
248
|
+
self._save_registry()
|
|
249
|
+
|
|
250
|
+
return orphaned_clients
|
|
251
|
+
|
|
252
|
+
def _kill_process_tree(self, info: ProcessTreeInfo) -> int:
|
|
253
|
+
"""Kill an entire process tree (root + all children).
|
|
254
|
+
|
|
255
|
+
This method MUST be called with self.lock held.
|
|
256
|
+
|
|
257
|
+
Args:
|
|
258
|
+
info: ProcessTreeInfo containing root and child PIDs
|
|
259
|
+
|
|
260
|
+
Returns:
|
|
261
|
+
Number of processes killed
|
|
262
|
+
"""
|
|
263
|
+
killed_count = 0
|
|
264
|
+
all_pids = info.child_pids + [info.root_pid]
|
|
265
|
+
|
|
266
|
+
# Refresh child list one last time before killing
|
|
267
|
+
try:
|
|
268
|
+
root_proc = psutil.Process(info.root_pid)
|
|
269
|
+
children = root_proc.children(recursive=True)
|
|
270
|
+
all_pids = [child.pid for child in children] + [info.root_pid]
|
|
271
|
+
except KeyboardInterrupt:
|
|
272
|
+
_thread.interrupt_main()
|
|
273
|
+
raise
|
|
274
|
+
except Exception:
|
|
275
|
+
pass # Use cached PID list
|
|
276
|
+
|
|
277
|
+
# Kill children first (bottom-up to avoid orphans)
|
|
278
|
+
processes_to_kill: list[psutil.Process] = []
|
|
279
|
+
for pid in reversed(all_pids): # Reverse to kill children before parents
|
|
280
|
+
try:
|
|
281
|
+
proc = psutil.Process(pid)
|
|
282
|
+
processes_to_kill.append(proc)
|
|
283
|
+
except psutil.NoSuchProcess:
|
|
284
|
+
pass # Already dead
|
|
285
|
+
except KeyboardInterrupt:
|
|
286
|
+
_thread.interrupt_main()
|
|
287
|
+
raise
|
|
288
|
+
except Exception as e:
|
|
289
|
+
logging.warning(f"Failed to get process {pid}: {e}")
|
|
290
|
+
|
|
291
|
+
logging.info(f"Terminating {len(processes_to_kill)} processes")
|
|
292
|
+
# Terminate all processes
|
|
293
|
+
for proc in processes_to_kill:
|
|
294
|
+
try:
|
|
295
|
+
proc.terminate()
|
|
296
|
+
killed_count += 1
|
|
297
|
+
except psutil.NoSuchProcess:
|
|
298
|
+
pass # Already dead
|
|
299
|
+
except KeyboardInterrupt:
|
|
300
|
+
_thread.interrupt_main()
|
|
301
|
+
raise
|
|
302
|
+
except Exception as e:
|
|
303
|
+
logging.warning(f"Failed to terminate process {proc.pid}: {e}")
|
|
304
|
+
|
|
305
|
+
# Wait for graceful termination
|
|
306
|
+
_gone, alive = psutil.wait_procs(processes_to_kill, timeout=3)
|
|
307
|
+
|
|
308
|
+
# Force kill any stragglers
|
|
309
|
+
if alive:
|
|
310
|
+
logging.warning(f"Force killing {len(alive)} stubborn processes")
|
|
311
|
+
for proc in alive:
|
|
312
|
+
try:
|
|
313
|
+
proc.kill()
|
|
314
|
+
logging.warning(f"Force killed stubborn process {proc.pid}")
|
|
315
|
+
except KeyboardInterrupt:
|
|
316
|
+
_thread.interrupt_main()
|
|
317
|
+
raise
|
|
318
|
+
except Exception as e:
|
|
319
|
+
logging.warning(f"Failed to force kill process {proc.pid}: {e}")
|
|
320
|
+
|
|
321
|
+
return killed_count
|
|
322
|
+
|
|
323
|
+
def get_tracked_clients(self) -> list[int]:
|
|
324
|
+
"""Get list of all tracked client PIDs.
|
|
325
|
+
|
|
326
|
+
Returns:
|
|
327
|
+
List of client PIDs currently being tracked
|
|
328
|
+
"""
|
|
329
|
+
with self.lock:
|
|
330
|
+
return list(self._registry.keys())
|
|
331
|
+
|
|
332
|
+
def get_process_info(self, client_pid: int) -> ProcessTreeInfo | None:
|
|
333
|
+
"""Get process tree info for a client.
|
|
334
|
+
|
|
335
|
+
Args:
|
|
336
|
+
client_pid: Client PID to query
|
|
337
|
+
|
|
338
|
+
Returns:
|
|
339
|
+
ProcessTreeInfo if found, None otherwise
|
|
340
|
+
"""
|
|
341
|
+
with self.lock:
|
|
342
|
+
return self._registry.get(client_pid)
|
|
343
|
+
|
|
344
|
+
def get_processes_by_port(self, port: str) -> list[ProcessTreeInfo]:
|
|
345
|
+
"""Get all processes using a specific serial port.
|
|
346
|
+
|
|
347
|
+
Args:
|
|
348
|
+
port: Serial port to search for
|
|
349
|
+
|
|
350
|
+
Returns:
|
|
351
|
+
List of ProcessTreeInfo for processes using this port
|
|
352
|
+
"""
|
|
353
|
+
with self.lock:
|
|
354
|
+
return [info for info in self._registry.values() if info.port == port]
|
|
355
|
+
|
|
356
|
+
def get_processes_by_project(self, project_dir: str) -> list[ProcessTreeInfo]:
|
|
357
|
+
"""Get all processes for a specific project.
|
|
358
|
+
|
|
359
|
+
Args:
|
|
360
|
+
project_dir: Project directory to search for
|
|
361
|
+
|
|
362
|
+
Returns:
|
|
363
|
+
List of ProcessTreeInfo for processes in this project
|
|
364
|
+
"""
|
|
365
|
+
with self.lock:
|
|
366
|
+
return [info for info in self._registry.values() if info.project_dir == project_dir]
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Daemon Request Processors - Concrete implementations of request handling.
|
|
3
|
+
|
|
4
|
+
This package contains concrete processor implementations for different
|
|
5
|
+
operation types (build, deploy, monitor, install_dependencies).
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from fbuild.daemon.processors.build_processor import BuildRequestProcessor
|
|
9
|
+
from fbuild.daemon.processors.deploy_processor import DeployRequestProcessor
|
|
10
|
+
from fbuild.daemon.processors.install_deps_processor import InstallDependenciesProcessor
|
|
11
|
+
from fbuild.daemon.processors.monitor_processor import MonitorRequestProcessor
|
|
12
|
+
|
|
13
|
+
__all__ = [
|
|
14
|
+
"BuildRequestProcessor",
|
|
15
|
+
"DeployRequestProcessor",
|
|
16
|
+
"InstallDependenciesProcessor",
|
|
17
|
+
"MonitorRequestProcessor",
|
|
18
|
+
]
|
|
@@ -0,0 +1,248 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Build Request Processor - Handles build operations.
|
|
3
|
+
|
|
4
|
+
This module implements the BuildRequestProcessor which executes build
|
|
5
|
+
operations for Arduino/ESP32 projects using the appropriate orchestrator.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import importlib
|
|
9
|
+
import logging
|
|
10
|
+
import sys
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import TYPE_CHECKING
|
|
13
|
+
|
|
14
|
+
from fbuild.daemon.messages import OperationType
|
|
15
|
+
from fbuild.daemon.request_processor import RequestProcessor
|
|
16
|
+
|
|
17
|
+
if TYPE_CHECKING:
|
|
18
|
+
from fbuild.daemon.daemon_context import DaemonContext
|
|
19
|
+
from fbuild.daemon.messages import BuildRequest
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class BuildRequestProcessor(RequestProcessor):
|
|
23
|
+
"""Processor for build requests.
|
|
24
|
+
|
|
25
|
+
This processor handles compilation of Arduino/ESP32 projects. It:
|
|
26
|
+
1. Reloads build modules to pick up code changes (for development)
|
|
27
|
+
2. Creates the appropriate orchestrator (AVR or ESP32)
|
|
28
|
+
3. Executes the build with the specified settings
|
|
29
|
+
4. Returns success/failure based on build result
|
|
30
|
+
|
|
31
|
+
Example:
|
|
32
|
+
>>> processor = BuildRequestProcessor()
|
|
33
|
+
>>> success = processor.process_request(build_request, daemon_context)
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
def get_operation_type(self) -> OperationType:
|
|
37
|
+
"""Return BUILD operation type."""
|
|
38
|
+
return OperationType.BUILD
|
|
39
|
+
|
|
40
|
+
def get_required_locks(self, request: "BuildRequest", context: "DaemonContext") -> dict[str, str]:
|
|
41
|
+
"""Build operations require only a project lock.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
request: The build request
|
|
45
|
+
context: The daemon context
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
Dictionary with project lock requirement
|
|
49
|
+
"""
|
|
50
|
+
return {"project": request.project_dir}
|
|
51
|
+
|
|
52
|
+
def execute_operation(self, request: "BuildRequest", context: "DaemonContext") -> bool:
|
|
53
|
+
"""Execute the build operation.
|
|
54
|
+
|
|
55
|
+
This is the core build logic extracted from the original
|
|
56
|
+
process_build_request function. All boilerplate (locks, status
|
|
57
|
+
updates, error handling) is handled by the base RequestProcessor.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
request: The build request containing project_dir, environment, etc.
|
|
61
|
+
context: The daemon context with all subsystems
|
|
62
|
+
|
|
63
|
+
Returns:
|
|
64
|
+
True if build succeeded, False otherwise
|
|
65
|
+
"""
|
|
66
|
+
logging.info(f"Building project: {request.project_dir}")
|
|
67
|
+
|
|
68
|
+
# Reload build modules to pick up code changes
|
|
69
|
+
# This is critical for development on Windows where daemon caching
|
|
70
|
+
# prevents testing code changes
|
|
71
|
+
self._reload_build_modules()
|
|
72
|
+
|
|
73
|
+
# Detect platform type from platformio.ini to select appropriate orchestrator
|
|
74
|
+
try:
|
|
75
|
+
from fbuild.config.ini_parser import PlatformIOConfig
|
|
76
|
+
|
|
77
|
+
project_path = Path(request.project_dir)
|
|
78
|
+
ini_path = project_path / "platformio.ini"
|
|
79
|
+
|
|
80
|
+
if not ini_path.exists():
|
|
81
|
+
logging.error(f"platformio.ini not found at {ini_path}")
|
|
82
|
+
return False
|
|
83
|
+
|
|
84
|
+
config = PlatformIOConfig(ini_path)
|
|
85
|
+
env_config = config.get_env_config(request.environment)
|
|
86
|
+
platform = env_config.get("platform", "").lower()
|
|
87
|
+
|
|
88
|
+
logging.info(f"Detected platform: {platform}")
|
|
89
|
+
|
|
90
|
+
except KeyboardInterrupt as ke:
|
|
91
|
+
from fbuild.interrupt_utils import handle_keyboard_interrupt_properly
|
|
92
|
+
|
|
93
|
+
handle_keyboard_interrupt_properly(ke)
|
|
94
|
+
raise # Never reached, but satisfies type checker
|
|
95
|
+
except Exception as e:
|
|
96
|
+
logging.error(f"Failed to parse platformio.ini: {e}")
|
|
97
|
+
return False
|
|
98
|
+
|
|
99
|
+
# Normalize platform name (handle various platform specification formats)
|
|
100
|
+
# URL formats: "https://.../platform-espressif32.zip" -> "espressif32"
|
|
101
|
+
# PlatformIO format: "platformio/espressif32" -> "espressif32"
|
|
102
|
+
# Direct names: "atmelavr", "espressif32", "ststm32", etc.
|
|
103
|
+
platform_name = platform
|
|
104
|
+
if "platform-espressif32" in platform or "platformio/espressif32" in platform or platform == "espressif32":
|
|
105
|
+
platform_name = "espressif32"
|
|
106
|
+
elif "platform-atmelavr" in platform or "platformio/atmelavr" in platform or platform == "atmelavr":
|
|
107
|
+
platform_name = "atmelavr"
|
|
108
|
+
elif "platform-raspberrypi" in platform or "platformio/raspberrypi" in platform or platform == "raspberrypi":
|
|
109
|
+
platform_name = "raspberrypi"
|
|
110
|
+
elif "platform-ststm32" in platform or "platformio/ststm32" in platform or platform == "ststm32":
|
|
111
|
+
platform_name = "ststm32"
|
|
112
|
+
|
|
113
|
+
logging.info(f"Normalized platform: {platform_name}")
|
|
114
|
+
|
|
115
|
+
# Select orchestrator based on platform
|
|
116
|
+
if platform_name == "atmelavr":
|
|
117
|
+
module_name = "fbuild.build.orchestrator_avr"
|
|
118
|
+
class_name = "BuildOrchestratorAVR"
|
|
119
|
+
elif platform_name == "espressif32":
|
|
120
|
+
module_name = "fbuild.build.orchestrator_esp32"
|
|
121
|
+
class_name = "OrchestratorESP32"
|
|
122
|
+
elif platform_name == "raspberrypi":
|
|
123
|
+
module_name = "fbuild.build.orchestrator_rp2040"
|
|
124
|
+
class_name = "OrchestratorRP2040"
|
|
125
|
+
elif platform_name == "ststm32":
|
|
126
|
+
module_name = "fbuild.build.orchestrator_stm32"
|
|
127
|
+
class_name = "OrchestratorSTM32"
|
|
128
|
+
else:
|
|
129
|
+
logging.error(f"Unsupported platform: {platform_name}")
|
|
130
|
+
return False
|
|
131
|
+
|
|
132
|
+
# Get fresh orchestrator class after module reload
|
|
133
|
+
# Using direct import would use cached version
|
|
134
|
+
try:
|
|
135
|
+
orchestrator_class = getattr(sys.modules[module_name], class_name)
|
|
136
|
+
except (KeyError, AttributeError) as e:
|
|
137
|
+
logging.error(f"Failed to get {class_name} from {module_name}: {e}")
|
|
138
|
+
return False
|
|
139
|
+
|
|
140
|
+
# Create orchestrator and execute build
|
|
141
|
+
# Create a Cache instance for package management
|
|
142
|
+
from fbuild.packages.cache import Cache
|
|
143
|
+
|
|
144
|
+
cache = Cache(project_dir=Path(request.project_dir))
|
|
145
|
+
|
|
146
|
+
# Initialize orchestrator with cache (ESP32 requires it, AVR accepts it)
|
|
147
|
+
logging.debug(f"[BUILD_PROCESSOR] Initializing {class_name} with cache={cache}, verbose={request.verbose}")
|
|
148
|
+
logging.debug(f"[BUILD_PROCESSOR] orchestrator_class={orchestrator_class}, module={module_name}")
|
|
149
|
+
orchestrator = orchestrator_class(cache=cache, verbose=request.verbose)
|
|
150
|
+
logging.debug(f"[BUILD_PROCESSOR] orchestrator created successfully: {orchestrator}")
|
|
151
|
+
build_result = orchestrator.build(
|
|
152
|
+
project_dir=Path(request.project_dir),
|
|
153
|
+
env_name=request.environment,
|
|
154
|
+
clean=request.clean_build,
|
|
155
|
+
verbose=request.verbose,
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
if not build_result.success:
|
|
159
|
+
logging.error(f"Build failed: {build_result.message}")
|
|
160
|
+
return False
|
|
161
|
+
|
|
162
|
+
logging.info("Build completed successfully")
|
|
163
|
+
return True
|
|
164
|
+
|
|
165
|
+
def _reload_build_modules(self) -> None:
|
|
166
|
+
"""Reload build-related modules to pick up code changes.
|
|
167
|
+
|
|
168
|
+
This is critical for development on Windows where daemon caching prevents
|
|
169
|
+
testing code changes. Reloads key modules that are frequently modified.
|
|
170
|
+
|
|
171
|
+
Order matters: reload dependencies first, then modules that import them.
|
|
172
|
+
"""
|
|
173
|
+
modules_to_reload = [
|
|
174
|
+
# Core utilities and packages (reload first - no dependencies)
|
|
175
|
+
"fbuild.packages.cache",
|
|
176
|
+
"fbuild.packages.downloader",
|
|
177
|
+
"fbuild.packages.archive_utils",
|
|
178
|
+
"fbuild.packages.platformio_registry",
|
|
179
|
+
"fbuild.packages.toolchain",
|
|
180
|
+
"fbuild.packages.toolchain_esp32",
|
|
181
|
+
"fbuild.packages.toolchain_teensy",
|
|
182
|
+
"fbuild.packages.toolchain_rp2040",
|
|
183
|
+
"fbuild.packages.toolchain_stm32",
|
|
184
|
+
"fbuild.packages.arduino_core",
|
|
185
|
+
"fbuild.packages.framework_esp32",
|
|
186
|
+
"fbuild.packages.framework_teensy",
|
|
187
|
+
"fbuild.packages.framework_rp2040",
|
|
188
|
+
"fbuild.packages.framework_stm32",
|
|
189
|
+
"fbuild.packages.platform_esp32",
|
|
190
|
+
"fbuild.packages.platform_teensy",
|
|
191
|
+
"fbuild.packages.platform_rp2040",
|
|
192
|
+
"fbuild.packages.platform_stm32",
|
|
193
|
+
"fbuild.packages.library_manager",
|
|
194
|
+
"fbuild.packages.library_manager_esp32",
|
|
195
|
+
# Config system (reload early - needed to detect platform type)
|
|
196
|
+
"fbuild.config.ini_parser",
|
|
197
|
+
"fbuild.config.board_config",
|
|
198
|
+
"fbuild.config.board_loader",
|
|
199
|
+
# Build system (reload second - depends on packages)
|
|
200
|
+
"fbuild.build.archive_creator",
|
|
201
|
+
"fbuild.build.flag_builder",
|
|
202
|
+
"fbuild.build.compiler",
|
|
203
|
+
"fbuild.build.configurable_compiler",
|
|
204
|
+
"fbuild.build.linker",
|
|
205
|
+
"fbuild.build.configurable_linker",
|
|
206
|
+
"fbuild.build.source_scanner",
|
|
207
|
+
"fbuild.build.compilation_executor",
|
|
208
|
+
"fbuild.build.build_state",
|
|
209
|
+
"fbuild.build.build_info_generator",
|
|
210
|
+
"fbuild.build.build_utils",
|
|
211
|
+
# Orchestrators (reload third - depends on build system)
|
|
212
|
+
"fbuild.build.orchestrator",
|
|
213
|
+
"fbuild.build.orchestrator_avr",
|
|
214
|
+
"fbuild.build.orchestrator_esp32",
|
|
215
|
+
"fbuild.build.orchestrator_teensy",
|
|
216
|
+
"fbuild.build.orchestrator_rp2040",
|
|
217
|
+
"fbuild.build.orchestrator_stm32",
|
|
218
|
+
# Daemon processors (reload to pick up processor code changes)
|
|
219
|
+
"fbuild.daemon.processors.build_processor",
|
|
220
|
+
# Deploy and monitor (reload with build system)
|
|
221
|
+
"fbuild.deploy.deployer",
|
|
222
|
+
"fbuild.deploy.deployer_esp32",
|
|
223
|
+
"fbuild.deploy.monitor",
|
|
224
|
+
# Top-level module packages (reload last to update __init__.py imports)
|
|
225
|
+
"fbuild.build",
|
|
226
|
+
"fbuild.deploy",
|
|
227
|
+
]
|
|
228
|
+
|
|
229
|
+
reloaded_count = 0
|
|
230
|
+
for module_name in modules_to_reload:
|
|
231
|
+
try:
|
|
232
|
+
if module_name in sys.modules:
|
|
233
|
+
# Module already loaded - reload it to pick up changes
|
|
234
|
+
importlib.reload(sys.modules[module_name])
|
|
235
|
+
reloaded_count += 1
|
|
236
|
+
else:
|
|
237
|
+
# Module not loaded yet - import it for the first time
|
|
238
|
+
__import__(module_name)
|
|
239
|
+
reloaded_count += 1
|
|
240
|
+
except KeyboardInterrupt as ke:
|
|
241
|
+
from fbuild.interrupt_utils import handle_keyboard_interrupt_properly
|
|
242
|
+
|
|
243
|
+
handle_keyboard_interrupt_properly(ke)
|
|
244
|
+
except Exception as e:
|
|
245
|
+
logging.warning(f"Failed to reload/import module {module_name}: {e}")
|
|
246
|
+
|
|
247
|
+
if reloaded_count > 0:
|
|
248
|
+
logging.info(f"Loaded/reloaded {reloaded_count} build modules")
|