fbuild 1.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (121) hide show
  1. fbuild/__init__.py +390 -0
  2. fbuild/assets/example.txt +1 -0
  3. fbuild/build/__init__.py +117 -0
  4. fbuild/build/archive_creator.py +186 -0
  5. fbuild/build/binary_generator.py +444 -0
  6. fbuild/build/build_component_factory.py +131 -0
  7. fbuild/build/build_info_generator.py +624 -0
  8. fbuild/build/build_state.py +325 -0
  9. fbuild/build/build_utils.py +93 -0
  10. fbuild/build/compilation_executor.py +422 -0
  11. fbuild/build/compiler.py +165 -0
  12. fbuild/build/compiler_avr.py +574 -0
  13. fbuild/build/configurable_compiler.py +664 -0
  14. fbuild/build/configurable_linker.py +637 -0
  15. fbuild/build/flag_builder.py +214 -0
  16. fbuild/build/library_dependency_processor.py +185 -0
  17. fbuild/build/linker.py +708 -0
  18. fbuild/build/orchestrator.py +67 -0
  19. fbuild/build/orchestrator_avr.py +651 -0
  20. fbuild/build/orchestrator_esp32.py +878 -0
  21. fbuild/build/orchestrator_rp2040.py +719 -0
  22. fbuild/build/orchestrator_stm32.py +696 -0
  23. fbuild/build/orchestrator_teensy.py +580 -0
  24. fbuild/build/source_compilation_orchestrator.py +218 -0
  25. fbuild/build/source_scanner.py +516 -0
  26. fbuild/cli.py +717 -0
  27. fbuild/cli_utils.py +314 -0
  28. fbuild/config/__init__.py +16 -0
  29. fbuild/config/board_config.py +542 -0
  30. fbuild/config/board_loader.py +92 -0
  31. fbuild/config/ini_parser.py +369 -0
  32. fbuild/config/mcu_specs.py +88 -0
  33. fbuild/daemon/__init__.py +42 -0
  34. fbuild/daemon/async_client.py +531 -0
  35. fbuild/daemon/client.py +1505 -0
  36. fbuild/daemon/compilation_queue.py +293 -0
  37. fbuild/daemon/configuration_lock.py +865 -0
  38. fbuild/daemon/daemon.py +585 -0
  39. fbuild/daemon/daemon_context.py +293 -0
  40. fbuild/daemon/error_collector.py +263 -0
  41. fbuild/daemon/file_cache.py +332 -0
  42. fbuild/daemon/firmware_ledger.py +546 -0
  43. fbuild/daemon/lock_manager.py +508 -0
  44. fbuild/daemon/logging_utils.py +149 -0
  45. fbuild/daemon/messages.py +957 -0
  46. fbuild/daemon/operation_registry.py +288 -0
  47. fbuild/daemon/port_state_manager.py +249 -0
  48. fbuild/daemon/process_tracker.py +366 -0
  49. fbuild/daemon/processors/__init__.py +18 -0
  50. fbuild/daemon/processors/build_processor.py +248 -0
  51. fbuild/daemon/processors/deploy_processor.py +664 -0
  52. fbuild/daemon/processors/install_deps_processor.py +431 -0
  53. fbuild/daemon/processors/locking_processor.py +777 -0
  54. fbuild/daemon/processors/monitor_processor.py +285 -0
  55. fbuild/daemon/request_processor.py +457 -0
  56. fbuild/daemon/shared_serial.py +819 -0
  57. fbuild/daemon/status_manager.py +238 -0
  58. fbuild/daemon/subprocess_manager.py +316 -0
  59. fbuild/deploy/__init__.py +21 -0
  60. fbuild/deploy/deployer.py +67 -0
  61. fbuild/deploy/deployer_esp32.py +310 -0
  62. fbuild/deploy/docker_utils.py +315 -0
  63. fbuild/deploy/monitor.py +519 -0
  64. fbuild/deploy/qemu_runner.py +603 -0
  65. fbuild/interrupt_utils.py +34 -0
  66. fbuild/ledger/__init__.py +52 -0
  67. fbuild/ledger/board_ledger.py +560 -0
  68. fbuild/output.py +352 -0
  69. fbuild/packages/__init__.py +66 -0
  70. fbuild/packages/archive_utils.py +1098 -0
  71. fbuild/packages/arduino_core.py +412 -0
  72. fbuild/packages/cache.py +256 -0
  73. fbuild/packages/concurrent_manager.py +510 -0
  74. fbuild/packages/downloader.py +518 -0
  75. fbuild/packages/fingerprint.py +423 -0
  76. fbuild/packages/framework_esp32.py +538 -0
  77. fbuild/packages/framework_rp2040.py +349 -0
  78. fbuild/packages/framework_stm32.py +459 -0
  79. fbuild/packages/framework_teensy.py +346 -0
  80. fbuild/packages/github_utils.py +96 -0
  81. fbuild/packages/header_trampoline_cache.py +394 -0
  82. fbuild/packages/library_compiler.py +203 -0
  83. fbuild/packages/library_manager.py +549 -0
  84. fbuild/packages/library_manager_esp32.py +725 -0
  85. fbuild/packages/package.py +163 -0
  86. fbuild/packages/platform_esp32.py +383 -0
  87. fbuild/packages/platform_rp2040.py +400 -0
  88. fbuild/packages/platform_stm32.py +581 -0
  89. fbuild/packages/platform_teensy.py +312 -0
  90. fbuild/packages/platform_utils.py +131 -0
  91. fbuild/packages/platformio_registry.py +369 -0
  92. fbuild/packages/sdk_utils.py +231 -0
  93. fbuild/packages/toolchain.py +436 -0
  94. fbuild/packages/toolchain_binaries.py +196 -0
  95. fbuild/packages/toolchain_esp32.py +489 -0
  96. fbuild/packages/toolchain_metadata.py +185 -0
  97. fbuild/packages/toolchain_rp2040.py +436 -0
  98. fbuild/packages/toolchain_stm32.py +417 -0
  99. fbuild/packages/toolchain_teensy.py +404 -0
  100. fbuild/platform_configs/esp32.json +150 -0
  101. fbuild/platform_configs/esp32c2.json +144 -0
  102. fbuild/platform_configs/esp32c3.json +143 -0
  103. fbuild/platform_configs/esp32c5.json +151 -0
  104. fbuild/platform_configs/esp32c6.json +151 -0
  105. fbuild/platform_configs/esp32p4.json +149 -0
  106. fbuild/platform_configs/esp32s3.json +151 -0
  107. fbuild/platform_configs/imxrt1062.json +56 -0
  108. fbuild/platform_configs/rp2040.json +70 -0
  109. fbuild/platform_configs/rp2350.json +76 -0
  110. fbuild/platform_configs/stm32f1.json +59 -0
  111. fbuild/platform_configs/stm32f4.json +63 -0
  112. fbuild/py.typed +0 -0
  113. fbuild-1.2.8.dist-info/METADATA +468 -0
  114. fbuild-1.2.8.dist-info/RECORD +121 -0
  115. fbuild-1.2.8.dist-info/WHEEL +5 -0
  116. fbuild-1.2.8.dist-info/entry_points.txt +5 -0
  117. fbuild-1.2.8.dist-info/licenses/LICENSE +21 -0
  118. fbuild-1.2.8.dist-info/top_level.txt +2 -0
  119. fbuild_lint/__init__.py +0 -0
  120. fbuild_lint/ruff_plugins/__init__.py +0 -0
  121. fbuild_lint/ruff_plugins/keyboard_interrupt_checker.py +158 -0
@@ -0,0 +1,238 @@
1
+ """
2
+ Status Manager - Centralized status file management for daemon operations.
3
+
4
+ This module provides the StatusManager class which handles all status file
5
+ I/O operations with proper locking and atomic writes. It eliminates the
6
+ scattered update_status() calls throughout daemon.py and provides a clean
7
+ API for status management.
8
+ """
9
+
10
+ import json
11
+ import logging
12
+ import threading
13
+ import time
14
+ from pathlib import Path
15
+ from typing import TYPE_CHECKING, Any
16
+
17
+ from fbuild.daemon.messages import DaemonState, DaemonStatus
18
+ from fbuild.daemon.port_state_manager import PortStateManager
19
+ from fbuild.interrupt_utils import handle_keyboard_interrupt_properly
20
+
21
+ if TYPE_CHECKING:
22
+ from fbuild.daemon.lock_manager import ResourceLockManager
23
+
24
+
25
+ class StatusManager:
26
+ """Manages daemon status file operations.
27
+
28
+ This class provides centralized management of the daemon status file,
29
+ ensuring:
30
+ - Atomic writes (write to temp file + rename)
31
+ - Thread-safe operations (internal locking)
32
+ - Consistent status structure
33
+ - Request ID validation
34
+
35
+ The status file is used for communication between the daemon and client,
36
+ allowing the client to monitor the progress of operations.
37
+
38
+ Example:
39
+ >>> manager = StatusManager(status_file_path, daemon_pid=1234)
40
+ >>> manager.update_status(
41
+ ... DaemonState.BUILDING,
42
+ ... "Building firmware",
43
+ ... environment="esp32dev",
44
+ ... project_dir="/path/to/project"
45
+ ... )
46
+ >>> status = manager.read_status()
47
+ >>> print(status.state)
48
+ DaemonState.BUILDING
49
+ """
50
+
51
+ def __init__(
52
+ self,
53
+ status_file: Path,
54
+ daemon_pid: int,
55
+ daemon_started_at: float | None = None,
56
+ port_state_manager: PortStateManager | None = None,
57
+ lock_manager: "ResourceLockManager | None" = None,
58
+ ):
59
+ """Initialize the StatusManager.
60
+
61
+ Args:
62
+ status_file: Path to the status file
63
+ daemon_pid: PID of the daemon process
64
+ daemon_started_at: Timestamp when daemon started (defaults to now)
65
+ port_state_manager: Optional PortStateManager for including port state in status
66
+ lock_manager: Optional ResourceLockManager for including lock state in status
67
+ """
68
+ self.status_file = status_file
69
+ self.daemon_pid = daemon_pid
70
+ self.daemon_started_at = daemon_started_at if daemon_started_at is not None else time.time()
71
+ self._lock = threading.Lock()
72
+ self._operation_in_progress = False
73
+ self._port_state_manager = port_state_manager
74
+ self._lock_manager = lock_manager
75
+
76
+ # Ensure parent directory exists
77
+ self.status_file.parent.mkdir(parents=True, exist_ok=True)
78
+
79
+ def update_status(
80
+ self,
81
+ state: DaemonState,
82
+ message: str,
83
+ operation_in_progress: bool | None = None,
84
+ **kwargs: Any,
85
+ ) -> None:
86
+ """Update the status file with current daemon state.
87
+
88
+ This method is thread-safe and performs atomic writes to prevent
89
+ corruption during concurrent access.
90
+
91
+ Args:
92
+ state: DaemonState enum value
93
+ message: Human-readable status message
94
+ operation_in_progress: Whether an operation is in progress (None = use current value)
95
+ **kwargs: Additional fields to include in status (e.g., environment, project_dir)
96
+
97
+ Example:
98
+ >>> manager.update_status(
99
+ ... DaemonState.BUILDING,
100
+ ... "Building firmware",
101
+ ... environment="esp32dev",
102
+ ... project_dir="/path/to/project",
103
+ ... request_id="build_1234567890",
104
+ ... )
105
+ """
106
+ with self._lock:
107
+
108
+ # Update internal operation state if provided
109
+ if operation_in_progress is not None:
110
+ self._operation_in_progress = operation_in_progress
111
+
112
+ # Get port state summary if port_state_manager is available
113
+ ports_summary: dict[str, Any] = {}
114
+ if self._port_state_manager is not None:
115
+ ports_summary = self._port_state_manager.get_ports_summary()
116
+
117
+ # Get lock state summary if lock_manager is available
118
+ locks_summary: dict[str, Any] = {}
119
+ if self._lock_manager is not None:
120
+ locks_summary = self._lock_manager.get_lock_details()
121
+
122
+ # Create typed DaemonStatus object
123
+ status_obj = DaemonStatus(
124
+ state=state,
125
+ message=message,
126
+ updated_at=time.time(),
127
+ daemon_pid=self.daemon_pid,
128
+ daemon_started_at=self.daemon_started_at,
129
+ operation_in_progress=self._operation_in_progress,
130
+ ports=ports_summary,
131
+ locks=locks_summary,
132
+ **kwargs,
133
+ )
134
+
135
+ logging.debug(f"Writing status to file (additional fields: {len(kwargs)})")
136
+ self._write_status_atomic(status_obj.to_dict())
137
+
138
+ def read_status(self) -> DaemonStatus:
139
+ """Read and parse the status file.
140
+
141
+ Returns:
142
+ DaemonStatus object with current daemon state
143
+
144
+ If the file doesn't exist or is corrupted, returns a default status
145
+ indicating the daemon is idle.
146
+ """
147
+ with self._lock:
148
+ if not self.status_file.exists():
149
+ return self._get_default_status()
150
+
151
+ try:
152
+ with open(self.status_file, encoding="utf-8") as f:
153
+ data = json.load(f)
154
+
155
+ status = DaemonStatus.from_dict(data)
156
+ return status
157
+
158
+ except KeyboardInterrupt as ke:
159
+ handle_keyboard_interrupt_properly(ke)
160
+ except (json.JSONDecodeError, ValueError) as e:
161
+ logging.warning(f"Corrupted status file detected: {e}")
162
+ logging.warning("Creating fresh status file")
163
+
164
+ # Write fresh status file
165
+ default_status = self._get_default_status()
166
+ self._write_status_atomic(default_status.to_dict())
167
+ return default_status
168
+
169
+ except Exception as e:
170
+ logging.error(f"Unexpected error reading status file: {e}")
171
+ default_status = self._get_default_status()
172
+ self._write_status_atomic(default_status.to_dict())
173
+ return default_status
174
+
175
+ def set_operation_in_progress(self, in_progress: bool) -> None:
176
+ """Set the operation_in_progress flag.
177
+
178
+ This is used to track whether the daemon is currently executing
179
+ an operation. It's typically set to True when starting an operation
180
+ and False when completing or failing.
181
+
182
+ Args:
183
+ in_progress: Whether an operation is in progress
184
+ """
185
+ with self._lock:
186
+ self._operation_in_progress = in_progress
187
+
188
+ def get_operation_in_progress(self) -> bool:
189
+ """Get the current operation_in_progress flag.
190
+
191
+ Returns:
192
+ True if an operation is in progress, False otherwise
193
+ """
194
+ with self._lock:
195
+ return self._operation_in_progress
196
+
197
+ def _write_status_atomic(self, status: dict[str, Any]) -> None:
198
+ """Write status file atomically to prevent corruption during writes.
199
+
200
+ This method writes to a temporary file first, then atomically renames
201
+ it to the actual status file. This ensures the status file is never
202
+ in a partially-written state.
203
+
204
+ Args:
205
+ status: Status dictionary to write
206
+ """
207
+ temp_file = self.status_file.with_suffix(".tmp")
208
+ logging.debug(f"Using temp file: {temp_file}")
209
+
210
+ try:
211
+ logging.debug(f"Writing JSON to temp file ({len(status)} keys)...")
212
+ with open(temp_file, "w", encoding="utf-8") as f:
213
+ json.dump(status, f, indent=2)
214
+ # Atomic rename
215
+ temp_file.replace(self.status_file)
216
+
217
+ except KeyboardInterrupt: # noqa: KBI002
218
+ logging.warning("KeyboardInterrupt during status file write, cleaning up temp file")
219
+ temp_file.unlink(missing_ok=True)
220
+ raise
221
+ except Exception as e:
222
+ logging.error(f"Failed to write status file: {e}")
223
+ temp_file.unlink(missing_ok=True)
224
+
225
+ def _get_default_status(self) -> DaemonStatus:
226
+ """Get default idle status.
227
+
228
+ Returns:
229
+ DaemonStatus object indicating daemon is idle
230
+ """
231
+ return DaemonStatus(
232
+ state=DaemonState.IDLE,
233
+ message="Daemon is idle",
234
+ updated_at=time.time(),
235
+ daemon_pid=self.daemon_pid,
236
+ daemon_started_at=self.daemon_started_at,
237
+ operation_in_progress=False,
238
+ )
@@ -0,0 +1,316 @@
1
+ """Centralized subprocess execution manager for daemon operations.
2
+
3
+ This module provides a unified interface for executing subprocesses with tracking,
4
+ logging, and statistics. All subprocess calls should go through this manager for
5
+ consistent error handling and monitoring.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import logging
11
+ import subprocess
12
+ import threading
13
+ import time
14
+ from dataclasses import dataclass
15
+ from pathlib import Path
16
+ from typing import Any, Optional
17
+
18
+ from ..interrupt_utils import handle_keyboard_interrupt_properly
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+
23
+ @dataclass
24
+ class SubprocessExecution:
25
+ """Single subprocess execution with full tracking."""
26
+
27
+ execution_id: str
28
+ command: list[str]
29
+ cwd: Optional[Path]
30
+ env: Optional[dict[str, str]]
31
+ timeout: Optional[float]
32
+ returncode: Optional[int] = None
33
+ stdout: Optional[str] = None
34
+ stderr: Optional[str] = None
35
+ start_time: Optional[float] = None
36
+ end_time: Optional[float] = None
37
+ error: Optional[str] = None
38
+
39
+ def duration(self) -> Optional[float]:
40
+ """Calculate execution duration in seconds."""
41
+ if self.start_time and self.end_time:
42
+ return self.end_time - self.start_time
43
+ return None
44
+
45
+ def success(self) -> bool:
46
+ """Check if execution was successful."""
47
+ return self.returncode == 0 and self.error is None
48
+
49
+
50
+ class SubprocessManager:
51
+ """Centralized subprocess execution manager.
52
+
53
+ Provides tracking, logging, and statistics for all subprocess executions
54
+ in the daemon. Thread-safe for concurrent use.
55
+ """
56
+
57
+ def __init__(self, max_history: int = 1000):
58
+ """Initialize subprocess manager.
59
+
60
+ Args:
61
+ max_history: Maximum number of executions to keep in history
62
+ """
63
+ self.executions: dict[str, SubprocessExecution] = {}
64
+ self.lock = threading.Lock()
65
+ self.max_history = max_history
66
+ self._execution_counter = 0
67
+ logger.info(f"SubprocessManager initialized successfully (max_history={max_history})")
68
+
69
+ def execute(
70
+ self,
71
+ command: list[str],
72
+ cwd: Optional[Path] = None,
73
+ env: Optional[dict[str, str]] = None,
74
+ timeout: Optional[float] = 60,
75
+ capture_output: bool = True,
76
+ check: bool = False,
77
+ ) -> SubprocessExecution:
78
+ """Execute subprocess with tracking.
79
+
80
+ Args:
81
+ command: Command and arguments to execute
82
+ cwd: Working directory for subprocess
83
+ env: Environment variables
84
+ timeout: Timeout in seconds (None = no timeout)
85
+ capture_output: Whether to capture stdout/stderr
86
+ check: Whether to raise exception on non-zero exit code
87
+
88
+ Returns:
89
+ SubprocessExecution with results and timing information
90
+ """
91
+ # Generate unique execution ID
92
+ with self.lock:
93
+ self._execution_counter += 1
94
+ execution_id = f"subprocess_{int(time.time() * 1000000)}_{self._execution_counter}"
95
+ logger.debug(f"Subprocess command: {' '.join(str(c) for c in command)}")
96
+ logger.debug(f"Environment variables: {len(env) if env else 0} vars")
97
+ logger.debug(f"Capture output: {capture_output}")
98
+
99
+ execution = SubprocessExecution(
100
+ execution_id=execution_id,
101
+ command=command,
102
+ cwd=cwd,
103
+ env=env,
104
+ timeout=timeout,
105
+ start_time=time.time(),
106
+ )
107
+
108
+ # Store execution
109
+ with self.lock:
110
+ self.executions[execution_id] = execution
111
+ logger.debug(f"Stored execution {execution_id} in history (total: {len(self.executions)})")
112
+ self._cleanup_old_executions()
113
+
114
+ # Log execution start
115
+ cmd_str = " ".join(str(c) for c in command[:3]) # First 3 args
116
+ if len(command) > 3:
117
+ cmd_str += "..."
118
+ logger.info(f"Starting subprocess {execution_id}: {cmd_str}")
119
+
120
+ try:
121
+ # Execute subprocess
122
+ logger.debug(f"Executing subprocess.run() for {execution_id}")
123
+ result = subprocess.run(
124
+ command,
125
+ cwd=cwd,
126
+ env=env,
127
+ capture_output=capture_output,
128
+ text=True,
129
+ timeout=timeout,
130
+ check=check,
131
+ )
132
+ execution.returncode = result.returncode
133
+ execution.stdout = result.stdout if capture_output else None
134
+ execution.stderr = result.stderr if capture_output else None
135
+ execution.end_time = time.time()
136
+
137
+ # Log output details
138
+ if capture_output:
139
+ logger.debug(f"Subprocess {execution_id} stdout: {len(result.stdout) if result.stdout else 0} bytes")
140
+ logger.debug(f"Subprocess {execution_id} stderr: {len(result.stderr) if result.stderr else 0} bytes")
141
+
142
+ # Log result
143
+ duration = execution.duration()
144
+ if result.returncode == 0:
145
+ logger.info(f"Subprocess {execution_id}: SUCCESS in {duration:.2f}s")
146
+ else:
147
+ logger.warning(f"Subprocess {execution_id}: FAILED with code {result.returncode} in {duration:.2f}s")
148
+ if result.stderr and capture_output:
149
+ logger.debug(f"Subprocess {execution_id} stderr: {result.stderr[:200]}")
150
+
151
+ except subprocess.TimeoutExpired as e:
152
+ logger.error(f"Subprocess {execution_id}: TIMEOUT after {timeout}s")
153
+ execution.error = f"Timeout after {timeout}s"
154
+ execution.returncode = -1
155
+ execution.stderr = str(e)
156
+ execution.end_time = time.time()
157
+
158
+ except subprocess.CalledProcessError as e:
159
+ logger.error(f"Subprocess {execution_id}: CalledProcessError with exit code {e.returncode}")
160
+ execution.error = f"Process failed with exit code {e.returncode}"
161
+ execution.returncode = e.returncode
162
+ execution.stdout = e.stdout if capture_output else None
163
+ execution.stderr = e.stderr if capture_output else None
164
+ execution.end_time = time.time()
165
+
166
+ except KeyboardInterrupt as ke:
167
+ logger.warning(f"Subprocess {execution_id}: KeyboardInterrupt received")
168
+ handle_keyboard_interrupt_properly(ke)
169
+
170
+ except Exception as e:
171
+ logger.error(f"Subprocess {execution_id}: Unexpected exception: {e}", exc_info=True)
172
+ execution.error = str(e)
173
+ execution.returncode = -1
174
+ execution.end_time = time.time()
175
+
176
+ logger.debug(f"Returning execution result for {execution_id} (success={execution.success()})")
177
+ return execution
178
+
179
+ def get_execution(self, execution_id: str) -> Optional[SubprocessExecution]:
180
+ """Get execution by ID.
181
+
182
+ Args:
183
+ execution_id: Execution ID to retrieve
184
+
185
+ Returns:
186
+ SubprocessExecution if found, None otherwise
187
+ """
188
+ with self.lock:
189
+ execution = self.executions.get(execution_id)
190
+ if execution:
191
+ logger.debug(f"Found execution {execution_id} (success={execution.success()})")
192
+ else:
193
+ logger.debug(f"Execution {execution_id} not found")
194
+ return execution
195
+
196
+ def get_statistics(self) -> dict[str, Any]:
197
+ """Get subprocess execution statistics.
198
+
199
+ Returns:
200
+ Dictionary with execution counts and statistics
201
+ """
202
+ with self.lock:
203
+ total = len(self.executions)
204
+ successful = sum(1 for e in self.executions.values() if e.success())
205
+ failed = sum(1 for e in self.executions.values() if not e.success())
206
+
207
+ # Calculate average duration for successful executions
208
+ successful_durations: list[float] = []
209
+ for e in self.executions.values():
210
+ if e.success():
211
+ duration = e.duration()
212
+ if duration is not None:
213
+ successful_durations.append(duration)
214
+ avg_duration = sum(successful_durations) / len(successful_durations) if successful_durations else 0.0
215
+
216
+ logger.debug(f"Average execution duration: {avg_duration:.3f}s (from {len(successful_durations)} successful executions)")
217
+
218
+ success_rate = (successful / total * 100) if total > 0 else 0.0
219
+ logger.info(f"Subprocess statistics: {successful}/{total} successful ({success_rate:.1f}% success rate)")
220
+
221
+ return {
222
+ "total_executions": total,
223
+ "successful": successful,
224
+ "failed": failed,
225
+ "average_duration_seconds": round(avg_duration, 3),
226
+ }
227
+
228
+ def get_recent_failures(self, count: int = 10) -> list[SubprocessExecution]:
229
+ """Get most recent failed executions.
230
+
231
+ Args:
232
+ count: Maximum number of failures to return
233
+
234
+ Returns:
235
+ List of failed SubprocessExecution objects
236
+ """
237
+ with self.lock:
238
+ failures = [e for e in self.executions.values() if not e.success()]
239
+ logger.debug(f"Found {len(failures)} total failures in history")
240
+ # Sort by end_time descending (most recent first)
241
+ failures.sort(key=lambda e: e.end_time or 0, reverse=True)
242
+ result = failures[:count]
243
+ logger.debug(f"Returning {len(result)} most recent failures")
244
+ if result:
245
+ logger.info(f"Recent subprocess failures: {len(result)} failures found")
246
+ return result
247
+
248
+ def clear_history(self):
249
+ """Clear all execution history."""
250
+ with self.lock:
251
+ count = len(self.executions)
252
+ self.executions.clear()
253
+ logger.info(f"Subprocess execution history cleared ({count} records removed)")
254
+
255
+ def _cleanup_old_executions(self):
256
+ """Remove old executions beyond max_history limit.
257
+
258
+ Keeps successful executions to max_history, but always keeps all recent failures.
259
+ """
260
+ current_count = len(self.executions)
261
+ if current_count <= self.max_history:
262
+ return
263
+
264
+ # Get all executions sorted by end time
265
+ all_executions = sorted(self.executions.values(), key=lambda e: e.end_time or 0)
266
+ logger.debug(f"Sorted {len(all_executions)} executions by end time")
267
+
268
+ # Keep all failures and recent successes
269
+ successes = [e for e in all_executions if e.success()]
270
+ failures = [e for e in all_executions if not e.success()]
271
+ logger.debug(f"Execution breakdown: {len(successes)} successes, {len(failures)} failures")
272
+
273
+ # Remove oldest successes if we're over limit
274
+ to_remove = len(self.executions) - self.max_history
275
+ if to_remove > 0 and len(successes) > to_remove:
276
+ for execution in successes[:to_remove]:
277
+ del self.executions[execution.execution_id]
278
+
279
+ logger.info(f"Cleaned up {to_remove} old successful subprocess executions (history now: {len(self.executions)})")
280
+ else:
281
+ logger.debug(f"Cannot remove {to_remove} executions (only {len(successes)} successes available)")
282
+
283
+
284
+ # Global subprocess manager instance (initialized by daemon)
285
+ _subprocess_manager: Optional[SubprocessManager] = None
286
+
287
+
288
+ def get_subprocess_manager() -> SubprocessManager:
289
+ """Get global subprocess manager instance.
290
+
291
+ Returns:
292
+ Global SubprocessManager instance
293
+
294
+ Raises:
295
+ RuntimeError: If subprocess manager not initialized
296
+ """
297
+ global _subprocess_manager
298
+ if _subprocess_manager is None:
299
+ logger.error("SubprocessManager accessed before initialization")
300
+ raise RuntimeError("SubprocessManager not initialized. Call init_subprocess_manager() first.")
301
+ return _subprocess_manager
302
+
303
+
304
+ def init_subprocess_manager(max_history: int = 1000) -> SubprocessManager:
305
+ """Initialize global subprocess manager.
306
+
307
+ Args:
308
+ max_history: Maximum number of executions to keep in history
309
+
310
+ Returns:
311
+ Initialized SubprocessManager instance
312
+ """
313
+ global _subprocess_manager
314
+ _subprocess_manager = SubprocessManager(max_history=max_history)
315
+ logger.info("Global SubprocessManager initialized successfully")
316
+ return _subprocess_manager
@@ -0,0 +1,21 @@
1
+ """
2
+ Firmware deployment functionality for fbuild.
3
+
4
+ This module provides deployment capabilities for uploading firmware to devices.
5
+ """
6
+
7
+ from .deployer import DeploymentError, DeploymentResult, IDeployer
8
+ from .deployer_esp32 import ESP32Deployer
9
+ from .monitor import SerialMonitor
10
+ from .qemu_runner import QEMURunner, check_docker_available, map_board_to_machine
11
+
12
+ __all__ = [
13
+ "IDeployer",
14
+ "ESP32Deployer",
15
+ "DeploymentResult",
16
+ "DeploymentError",
17
+ "SerialMonitor",
18
+ "QEMURunner",
19
+ "check_docker_available",
20
+ "map_board_to_machine",
21
+ ]
@@ -0,0 +1,67 @@
1
+ """Abstract base class for firmware deployers.
2
+
3
+ This module defines the interface for platform-specific deployers
4
+ to ensure consistent behavior across different platforms.
5
+ """
6
+
7
+ from abc import ABC, abstractmethod
8
+ from dataclasses import dataclass
9
+ from pathlib import Path
10
+ from typing import Optional
11
+
12
+
13
+ @dataclass
14
+ class DeploymentResult:
15
+ """Result of a firmware deployment operation."""
16
+
17
+ success: bool
18
+ message: str
19
+ port: Optional[str] = None
20
+
21
+
22
+ class DeploymentError(Exception):
23
+ """Base exception for deployment errors."""
24
+
25
+ pass
26
+
27
+
28
+ class IDeployer(ABC):
29
+ """Interface for firmware deployers.
30
+
31
+ Deployers handle uploading firmware to embedded devices:
32
+ 1. Locate firmware binaries
33
+ 2. Detect or validate serial port
34
+ 3. Flash firmware to device
35
+ 4. Verify upload success
36
+ """
37
+
38
+ @abstractmethod
39
+ def deploy(
40
+ self,
41
+ project_dir: Path,
42
+ env_name: str,
43
+ port: Optional[str] = None,
44
+ ) -> DeploymentResult:
45
+ """Deploy firmware to a device.
46
+
47
+ Args:
48
+ project_dir: Path to project directory
49
+ env_name: Environment name to deploy
50
+ port: Serial port to use (auto-detect if None)
51
+
52
+ Returns:
53
+ DeploymentResult with success status and message
54
+
55
+ Raises:
56
+ DeploymentError: If deployment fails
57
+ """
58
+ pass
59
+
60
+ @abstractmethod
61
+ def _detect_serial_port(self) -> Optional[str]:
62
+ """Auto-detect serial port for device.
63
+
64
+ Returns:
65
+ Serial port name or None if not found
66
+ """
67
+ pass