fbuild 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of fbuild might be problematic. Click here for more details.

Files changed (93) hide show
  1. fbuild/__init__.py +0 -0
  2. fbuild/assets/example.txt +1 -0
  3. fbuild/build/__init__.py +117 -0
  4. fbuild/build/archive_creator.py +186 -0
  5. fbuild/build/binary_generator.py +444 -0
  6. fbuild/build/build_component_factory.py +131 -0
  7. fbuild/build/build_state.py +325 -0
  8. fbuild/build/build_utils.py +98 -0
  9. fbuild/build/compilation_executor.py +422 -0
  10. fbuild/build/compiler.py +165 -0
  11. fbuild/build/compiler_avr.py +574 -0
  12. fbuild/build/configurable_compiler.py +612 -0
  13. fbuild/build/configurable_linker.py +637 -0
  14. fbuild/build/flag_builder.py +186 -0
  15. fbuild/build/library_dependency_processor.py +185 -0
  16. fbuild/build/linker.py +708 -0
  17. fbuild/build/orchestrator.py +67 -0
  18. fbuild/build/orchestrator_avr.py +656 -0
  19. fbuild/build/orchestrator_esp32.py +797 -0
  20. fbuild/build/orchestrator_teensy.py +543 -0
  21. fbuild/build/source_compilation_orchestrator.py +220 -0
  22. fbuild/build/source_scanner.py +516 -0
  23. fbuild/cli.py +566 -0
  24. fbuild/cli_utils.py +312 -0
  25. fbuild/config/__init__.py +16 -0
  26. fbuild/config/board_config.py +457 -0
  27. fbuild/config/board_loader.py +92 -0
  28. fbuild/config/ini_parser.py +209 -0
  29. fbuild/config/mcu_specs.py +88 -0
  30. fbuild/daemon/__init__.py +34 -0
  31. fbuild/daemon/client.py +929 -0
  32. fbuild/daemon/compilation_queue.py +293 -0
  33. fbuild/daemon/daemon.py +474 -0
  34. fbuild/daemon/daemon_context.py +196 -0
  35. fbuild/daemon/error_collector.py +263 -0
  36. fbuild/daemon/file_cache.py +332 -0
  37. fbuild/daemon/lock_manager.py +270 -0
  38. fbuild/daemon/logging_utils.py +149 -0
  39. fbuild/daemon/messages.py +301 -0
  40. fbuild/daemon/operation_registry.py +288 -0
  41. fbuild/daemon/process_tracker.py +366 -0
  42. fbuild/daemon/processors/__init__.py +12 -0
  43. fbuild/daemon/processors/build_processor.py +157 -0
  44. fbuild/daemon/processors/deploy_processor.py +327 -0
  45. fbuild/daemon/processors/monitor_processor.py +146 -0
  46. fbuild/daemon/request_processor.py +401 -0
  47. fbuild/daemon/status_manager.py +216 -0
  48. fbuild/daemon/subprocess_manager.py +316 -0
  49. fbuild/deploy/__init__.py +17 -0
  50. fbuild/deploy/deployer.py +67 -0
  51. fbuild/deploy/deployer_esp32.py +314 -0
  52. fbuild/deploy/monitor.py +495 -0
  53. fbuild/interrupt_utils.py +34 -0
  54. fbuild/packages/__init__.py +53 -0
  55. fbuild/packages/archive_utils.py +1098 -0
  56. fbuild/packages/arduino_core.py +412 -0
  57. fbuild/packages/cache.py +249 -0
  58. fbuild/packages/downloader.py +366 -0
  59. fbuild/packages/framework_esp32.py +538 -0
  60. fbuild/packages/framework_teensy.py +346 -0
  61. fbuild/packages/github_utils.py +96 -0
  62. fbuild/packages/header_trampoline_cache.py +394 -0
  63. fbuild/packages/library_compiler.py +203 -0
  64. fbuild/packages/library_manager.py +549 -0
  65. fbuild/packages/library_manager_esp32.py +413 -0
  66. fbuild/packages/package.py +163 -0
  67. fbuild/packages/platform_esp32.py +383 -0
  68. fbuild/packages/platform_teensy.py +312 -0
  69. fbuild/packages/platform_utils.py +131 -0
  70. fbuild/packages/platformio_registry.py +325 -0
  71. fbuild/packages/sdk_utils.py +231 -0
  72. fbuild/packages/toolchain.py +436 -0
  73. fbuild/packages/toolchain_binaries.py +196 -0
  74. fbuild/packages/toolchain_esp32.py +484 -0
  75. fbuild/packages/toolchain_metadata.py +185 -0
  76. fbuild/packages/toolchain_teensy.py +404 -0
  77. fbuild/platform_configs/esp32.json +150 -0
  78. fbuild/platform_configs/esp32c2.json +144 -0
  79. fbuild/platform_configs/esp32c3.json +143 -0
  80. fbuild/platform_configs/esp32c5.json +151 -0
  81. fbuild/platform_configs/esp32c6.json +151 -0
  82. fbuild/platform_configs/esp32p4.json +149 -0
  83. fbuild/platform_configs/esp32s3.json +151 -0
  84. fbuild/platform_configs/imxrt1062.json +56 -0
  85. fbuild-1.1.0.dist-info/METADATA +447 -0
  86. fbuild-1.1.0.dist-info/RECORD +93 -0
  87. fbuild-1.1.0.dist-info/WHEEL +5 -0
  88. fbuild-1.1.0.dist-info/entry_points.txt +5 -0
  89. fbuild-1.1.0.dist-info/licenses/LICENSE +21 -0
  90. fbuild-1.1.0.dist-info/top_level.txt +2 -0
  91. fbuild_lint/__init__.py +0 -0
  92. fbuild_lint/ruff_plugins/__init__.py +0 -0
  93. fbuild_lint/ruff_plugins/keyboard_interrupt_checker.py +158 -0
@@ -0,0 +1,401 @@
1
+ """
2
+ Request Processor - Template method pattern for daemon request handling.
3
+
4
+ This module provides the RequestProcessor abstract base class which implements
5
+ the Template Method pattern to eliminate code duplication across build, deploy,
6
+ and monitor request handlers. It handles all common concerns (lock management,
7
+ status updates, error handling) while allowing subclasses to implement only
8
+ the operation-specific business logic.
9
+ """
10
+
11
+ import logging
12
+ import time
13
+ from abc import ABC, abstractmethod
14
+ from contextlib import ExitStack
15
+ from typing import TYPE_CHECKING, Any
16
+
17
+ from fbuild.daemon.messages import DaemonState, OperationType
18
+
19
+ if TYPE_CHECKING:
20
+ from fbuild.daemon.daemon_context import DaemonContext
21
+ from fbuild.daemon.messages import BuildRequest, DeployRequest, MonitorRequest
22
+
23
+
24
+ class RequestProcessor(ABC):
25
+ """Abstract base class for processing daemon requests.
26
+
27
+ This class implements the Template Method pattern to handle all common
28
+ concerns of request processing:
29
+ - Request validation
30
+ - Lock acquisition (port and/or project locks)
31
+ - Status updates (started, in-progress, completed, failed)
32
+ - Error handling and cleanup
33
+ - Operation tracking
34
+
35
+ Subclasses only need to implement:
36
+ - get_operation_type(): Return the OperationType
37
+ - get_required_locks(): Specify which locks are needed
38
+ - execute_operation(): Implement the actual business logic
39
+
40
+ Example:
41
+ >>> class BuildRequestProcessor(RequestProcessor):
42
+ ... def get_operation_type(self) -> OperationType:
43
+ ... return OperationType.BUILD
44
+ ...
45
+ ... def get_required_locks(self, request, context):
46
+ ... return {"project": request.project_dir}
47
+ ...
48
+ ... def execute_operation(self, request, context):
49
+ ... # Actual build logic here
50
+ ... result = build_project(request.project_dir)
51
+ ... return result.success
52
+ """
53
+
54
+ def process_request(
55
+ self,
56
+ request: "BuildRequest | DeployRequest | MonitorRequest",
57
+ context: "DaemonContext",
58
+ ) -> bool:
59
+ """Process a request using the template method pattern.
60
+
61
+ This is the main entry point that coordinates the entire request
62
+ processing lifecycle. It handles all boilerplate while calling
63
+ abstract methods for operation-specific logic.
64
+
65
+ Args:
66
+ request: The request to process (BuildRequest, DeployRequest, or MonitorRequest)
67
+ context: The daemon context containing all subsystems
68
+
69
+ Returns:
70
+ True if operation succeeded, False otherwise
71
+
72
+ Lifecycle:
73
+ 1. Validate request
74
+ 2. Acquire required locks (project and/or port)
75
+ 3. Mark operation as in progress
76
+ 4. Update status to starting state
77
+ 5. Execute operation (abstract method)
78
+ 6. Update status based on result
79
+ 7. Release locks and cleanup
80
+
81
+ Example:
82
+ >>> processor = BuildRequestProcessor()
83
+ >>> success = processor.process_request(build_request, daemon_context)
84
+ """
85
+ logging.info(f"Processing {self.get_operation_type().value} request {request.request_id}: " + f"env={request.environment}, project={request.project_dir}")
86
+
87
+ # Validate request
88
+ if not self.validate_request(request, context):
89
+ self._update_status(
90
+ context,
91
+ DaemonState.FAILED,
92
+ "Request validation failed",
93
+ request=request,
94
+ exit_code=1,
95
+ )
96
+ return False
97
+
98
+ # Use ExitStack to manage multiple locks as context managers
99
+ with ExitStack() as lock_stack:
100
+ # Acquire required locks
101
+ if not self._acquire_locks(request, context, lock_stack):
102
+ return False
103
+
104
+ try:
105
+ # Mark operation in progress
106
+ with context.operation_lock:
107
+ context.operation_in_progress = True
108
+
109
+ # Update status to starting state
110
+ self._update_status(
111
+ context,
112
+ self.get_starting_state(),
113
+ self.get_starting_message(request),
114
+ request=request,
115
+ request_started_at=time.time(),
116
+ operation_type=self.get_operation_type(),
117
+ )
118
+
119
+ # Execute the operation (implemented by subclass)
120
+ success = self.execute_operation(request, context)
121
+
122
+ # Update final status
123
+ if success:
124
+ self._update_status(
125
+ context,
126
+ DaemonState.COMPLETED,
127
+ self.get_success_message(request),
128
+ request=request,
129
+ exit_code=0,
130
+ operation_in_progress=False,
131
+ )
132
+ else:
133
+ self._update_status(
134
+ context,
135
+ DaemonState.FAILED,
136
+ self.get_failure_message(request),
137
+ request=request,
138
+ exit_code=1,
139
+ operation_in_progress=False,
140
+ )
141
+
142
+ return success
143
+
144
+ except KeyboardInterrupt:
145
+ import _thread
146
+
147
+ _thread.interrupt_main()
148
+ raise
149
+ except Exception as e:
150
+ logging.error(f"{self.get_operation_type().value} exception: {e}")
151
+ self._update_status(
152
+ context,
153
+ DaemonState.FAILED,
154
+ f"{self.get_operation_type().value} exception: {e}",
155
+ request=request,
156
+ exit_code=1,
157
+ operation_in_progress=False,
158
+ )
159
+ return False
160
+ finally:
161
+ # Mark operation complete
162
+ with context.operation_lock:
163
+ context.operation_in_progress = False
164
+
165
+ @abstractmethod
166
+ def get_operation_type(self) -> OperationType:
167
+ """Get the operation type for this processor.
168
+
169
+ Returns:
170
+ OperationType enum value (BUILD, DEPLOY, MONITOR, etc.)
171
+ """
172
+ pass
173
+
174
+ @abstractmethod
175
+ def get_required_locks(
176
+ self,
177
+ request: "BuildRequest | DeployRequest | MonitorRequest",
178
+ context: "DaemonContext",
179
+ ) -> dict[str, str]:
180
+ """Specify which locks are required for this operation.
181
+
182
+ Returns:
183
+ Dictionary with lock types as keys and resource identifiers as values.
184
+ Valid keys: "project" (for project_dir), "port" (for serial port)
185
+
186
+ Examples:
187
+ Build only needs project lock:
188
+ return {"project": request.project_dir}
189
+
190
+ Deploy needs both project and port locks:
191
+ return {"project": request.project_dir, "port": request.port}
192
+
193
+ Monitor only needs port lock:
194
+ return {"port": request.port}
195
+ """
196
+ pass
197
+
198
+ @abstractmethod
199
+ def execute_operation(
200
+ self,
201
+ request: "BuildRequest | DeployRequest | MonitorRequest",
202
+ context: "DaemonContext",
203
+ ) -> bool:
204
+ """Execute the actual operation logic.
205
+
206
+ This is the core business logic that subclasses must implement.
207
+ All boilerplate (locks, status updates, error handling) is handled
208
+ by the base class.
209
+
210
+ Args:
211
+ request: The request being processed
212
+ context: The daemon context with all subsystems
213
+
214
+ Returns:
215
+ True if operation succeeded, False otherwise
216
+
217
+ Example:
218
+ >>> def execute_operation(self, request, context):
219
+ ... # Build the project
220
+ ... orchestrator = BuildOrchestratorAVR(verbose=request.verbose)
221
+ ... result = orchestrator.build(
222
+ ... project_dir=Path(request.project_dir),
223
+ ... env_name=request.environment,
224
+ ... clean=request.clean_build,
225
+ ... )
226
+ ... return result.success
227
+ """
228
+ pass
229
+
230
+ def validate_request(
231
+ self,
232
+ request: "BuildRequest | DeployRequest | MonitorRequest",
233
+ context: "DaemonContext",
234
+ ) -> bool:
235
+ """Validate the request before processing.
236
+
237
+ Default implementation always returns True. Override to add validation.
238
+
239
+ Args:
240
+ request: The request to validate
241
+ context: The daemon context
242
+
243
+ Returns:
244
+ True if request is valid, False otherwise
245
+ """
246
+ return True
247
+
248
+ def get_starting_state(self) -> DaemonState:
249
+ """Get the daemon state when operation starts.
250
+
251
+ Default implementation uses BUILDING. Override for different operations.
252
+
253
+ Returns:
254
+ DaemonState enum value for operation start
255
+ """
256
+ operation_type = self.get_operation_type()
257
+ if operation_type == OperationType.BUILD:
258
+ return DaemonState.BUILDING
259
+ elif operation_type == OperationType.DEPLOY or operation_type == OperationType.BUILD_AND_DEPLOY:
260
+ return DaemonState.DEPLOYING
261
+ elif operation_type == OperationType.MONITOR:
262
+ return DaemonState.MONITORING
263
+ else:
264
+ return DaemonState.BUILDING
265
+
266
+ def get_starting_message(self, request: "BuildRequest | DeployRequest | MonitorRequest") -> str:
267
+ """Get the status message when operation starts.
268
+
269
+ Args:
270
+ request: The request being processed
271
+
272
+ Returns:
273
+ Human-readable status message
274
+ """
275
+ operation_type = self.get_operation_type()
276
+ if operation_type == OperationType.BUILD:
277
+ return f"Building {request.environment}"
278
+ elif operation_type == OperationType.DEPLOY or operation_type == OperationType.BUILD_AND_DEPLOY:
279
+ return f"Deploying {request.environment}"
280
+ elif operation_type == OperationType.MONITOR:
281
+ return f"Monitoring {request.environment}"
282
+ else:
283
+ return f"Processing {request.environment}"
284
+
285
+ def get_success_message(self, request: "BuildRequest | DeployRequest | MonitorRequest") -> str:
286
+ """Get the status message on success.
287
+
288
+ Args:
289
+ request: The request that was processed
290
+
291
+ Returns:
292
+ Human-readable success message
293
+ """
294
+ operation_type = self.get_operation_type()
295
+ if operation_type == OperationType.BUILD:
296
+ return "Build successful"
297
+ elif operation_type == OperationType.DEPLOY or operation_type == OperationType.BUILD_AND_DEPLOY:
298
+ return "Deploy successful"
299
+ elif operation_type == OperationType.MONITOR:
300
+ return "Monitor completed"
301
+ else:
302
+ return "Operation successful"
303
+
304
+ def get_failure_message(self, request: "BuildRequest | DeployRequest | MonitorRequest") -> str:
305
+ """Get the status message on failure.
306
+
307
+ Args:
308
+ request: The request that failed
309
+
310
+ Returns:
311
+ Human-readable failure message
312
+ """
313
+ operation_type = self.get_operation_type()
314
+ if operation_type == OperationType.BUILD:
315
+ return "Build failed"
316
+ elif operation_type == OperationType.DEPLOY or operation_type == OperationType.BUILD_AND_DEPLOY:
317
+ return "Deploy failed"
318
+ elif operation_type == OperationType.MONITOR:
319
+ return "Monitor failed"
320
+ else:
321
+ return "Operation failed"
322
+
323
+ def _acquire_locks(
324
+ self,
325
+ request: "BuildRequest | DeployRequest | MonitorRequest",
326
+ context: "DaemonContext",
327
+ lock_stack: ExitStack,
328
+ ) -> bool:
329
+ """Acquire all required locks for the operation.
330
+
331
+ Args:
332
+ request: The request being processed
333
+ context: The daemon context
334
+ lock_stack: ExitStack to manage lock lifetimes
335
+
336
+ Returns:
337
+ True if all locks acquired, False if any lock is unavailable
338
+ """
339
+ required_locks = self.get_required_locks(request, context)
340
+
341
+ # Acquire project lock if needed
342
+ if "project" in required_locks:
343
+ project_dir = required_locks["project"]
344
+ try:
345
+ lock_stack.enter_context(context.lock_manager.acquire_project_lock(project_dir, blocking=False))
346
+ except RuntimeError:
347
+ logging.warning(f"Project {project_dir} is already being built")
348
+ self._update_status(
349
+ context,
350
+ DaemonState.FAILED,
351
+ f"Project {project_dir} is already being built by another process",
352
+ request=request,
353
+ )
354
+ return False
355
+
356
+ # Acquire port lock if needed
357
+ if "port" in required_locks:
358
+ port = required_locks["port"]
359
+ if port: # Only acquire if port is not None/empty
360
+ try:
361
+ lock_stack.enter_context(context.lock_manager.acquire_port_lock(port, blocking=False))
362
+ except RuntimeError:
363
+ logging.warning(f"Port {port} is already in use")
364
+ self._update_status(
365
+ context,
366
+ DaemonState.FAILED,
367
+ f"Port {port} is already in use by another operation",
368
+ request=request,
369
+ )
370
+ return False
371
+
372
+ return True
373
+
374
+ def _update_status(
375
+ self,
376
+ context: "DaemonContext",
377
+ state: DaemonState,
378
+ message: str,
379
+ request: "BuildRequest | DeployRequest | MonitorRequest",
380
+ **kwargs: Any,
381
+ ) -> None:
382
+ """Update daemon status file.
383
+
384
+ Args:
385
+ context: The daemon context
386
+ state: New daemon state
387
+ message: Status message
388
+ request: The request being processed
389
+ **kwargs: Additional fields for status update
390
+ """
391
+ # Use the status manager from context
392
+ context.status_manager.update_status(
393
+ state=state,
394
+ message=message,
395
+ environment=request.environment,
396
+ project_dir=request.project_dir,
397
+ request_id=request.request_id,
398
+ caller_pid=request.caller_pid,
399
+ caller_cwd=request.caller_cwd,
400
+ **kwargs,
401
+ )
@@ -0,0 +1,216 @@
1
+ """
2
+ Status Manager - Centralized status file management for daemon operations.
3
+
4
+ This module provides the StatusManager class which handles all status file
5
+ I/O operations with proper locking and atomic writes. It eliminates the
6
+ scattered update_status() calls throughout daemon.py and provides a clean
7
+ API for status management.
8
+ """
9
+
10
+ import json
11
+ import logging
12
+ import threading
13
+ import time
14
+ from pathlib import Path
15
+ from typing import Any
16
+
17
+ from fbuild.daemon.messages import DaemonState, DaemonStatus
18
+ from fbuild.interrupt_utils import handle_keyboard_interrupt_properly
19
+
20
+
21
+ class StatusManager:
22
+ """Manages daemon status file operations.
23
+
24
+ This class provides centralized management of the daemon status file,
25
+ ensuring:
26
+ - Atomic writes (write to temp file + rename)
27
+ - Thread-safe operations (internal locking)
28
+ - Consistent status structure
29
+ - Request ID validation
30
+
31
+ The status file is used for communication between the daemon and client,
32
+ allowing the client to monitor the progress of operations.
33
+
34
+ Example:
35
+ >>> manager = StatusManager(status_file_path, daemon_pid=1234)
36
+ >>> manager.update_status(
37
+ ... DaemonState.BUILDING,
38
+ ... "Building firmware",
39
+ ... environment="esp32dev",
40
+ ... project_dir="/path/to/project"
41
+ ... )
42
+ >>> status = manager.read_status()
43
+ >>> print(status.state)
44
+ DaemonState.BUILDING
45
+ """
46
+
47
+ def __init__(
48
+ self,
49
+ status_file: Path,
50
+ daemon_pid: int,
51
+ daemon_started_at: float | None = None,
52
+ ):
53
+ """Initialize the StatusManager.
54
+
55
+ Args:
56
+ status_file: Path to the status file
57
+ daemon_pid: PID of the daemon process
58
+ daemon_started_at: Timestamp when daemon started (defaults to now)
59
+ """
60
+ self.status_file = status_file
61
+ self.daemon_pid = daemon_pid
62
+ self.daemon_started_at = daemon_started_at if daemon_started_at is not None else time.time()
63
+ self._lock = threading.Lock()
64
+ self._operation_in_progress = False
65
+
66
+ # Ensure parent directory exists
67
+ self.status_file.parent.mkdir(parents=True, exist_ok=True)
68
+
69
+ def update_status(
70
+ self,
71
+ state: DaemonState,
72
+ message: str,
73
+ operation_in_progress: bool | None = None,
74
+ **kwargs: Any,
75
+ ) -> None:
76
+ """Update the status file with current daemon state.
77
+
78
+ This method is thread-safe and performs atomic writes to prevent
79
+ corruption during concurrent access.
80
+
81
+ Args:
82
+ state: DaemonState enum value
83
+ message: Human-readable status message
84
+ operation_in_progress: Whether an operation is in progress (None = use current value)
85
+ **kwargs: Additional fields to include in status (e.g., environment, project_dir)
86
+
87
+ Example:
88
+ >>> manager.update_status(
89
+ ... DaemonState.BUILDING,
90
+ ... "Building firmware",
91
+ ... environment="esp32dev",
92
+ ... project_dir="/path/to/project",
93
+ ... request_id="build_1234567890",
94
+ ... )
95
+ """
96
+ with self._lock:
97
+
98
+ # Update internal operation state if provided
99
+ if operation_in_progress is not None:
100
+ self._operation_in_progress = operation_in_progress
101
+
102
+ # Create typed DaemonStatus object
103
+ status_obj = DaemonStatus(
104
+ state=state,
105
+ message=message,
106
+ updated_at=time.time(),
107
+ daemon_pid=self.daemon_pid,
108
+ daemon_started_at=self.daemon_started_at,
109
+ operation_in_progress=self._operation_in_progress,
110
+ **kwargs,
111
+ )
112
+
113
+ logging.debug(f"Writing status to file (additional fields: {len(kwargs)})")
114
+ self._write_status_atomic(status_obj.to_dict())
115
+
116
+ def read_status(self) -> DaemonStatus:
117
+ """Read and parse the status file.
118
+
119
+ Returns:
120
+ DaemonStatus object with current daemon state
121
+
122
+ If the file doesn't exist or is corrupted, returns a default status
123
+ indicating the daemon is idle.
124
+ """
125
+ with self._lock:
126
+ if not self.status_file.exists():
127
+ return self._get_default_status()
128
+
129
+ try:
130
+ with open(self.status_file, encoding="utf-8") as f:
131
+ data = json.load(f)
132
+
133
+ status = DaemonStatus.from_dict(data)
134
+ return status
135
+
136
+ except KeyboardInterrupt as ke:
137
+ handle_keyboard_interrupt_properly(ke)
138
+ except (json.JSONDecodeError, ValueError) as e:
139
+ logging.warning(f"Corrupted status file detected: {e}")
140
+ logging.warning("Creating fresh status file")
141
+
142
+ # Write fresh status file
143
+ default_status = self._get_default_status()
144
+ self._write_status_atomic(default_status.to_dict())
145
+ return default_status
146
+
147
+ except Exception as e:
148
+ logging.error(f"Unexpected error reading status file: {e}")
149
+ default_status = self._get_default_status()
150
+ self._write_status_atomic(default_status.to_dict())
151
+ return default_status
152
+
153
+ def set_operation_in_progress(self, in_progress: bool) -> None:
154
+ """Set the operation_in_progress flag.
155
+
156
+ This is used to track whether the daemon is currently executing
157
+ an operation. It's typically set to True when starting an operation
158
+ and False when completing or failing.
159
+
160
+ Args:
161
+ in_progress: Whether an operation is in progress
162
+ """
163
+ with self._lock:
164
+ self._operation_in_progress = in_progress
165
+
166
+ def get_operation_in_progress(self) -> bool:
167
+ """Get the current operation_in_progress flag.
168
+
169
+ Returns:
170
+ True if an operation is in progress, False otherwise
171
+ """
172
+ with self._lock:
173
+ return self._operation_in_progress
174
+
175
+ def _write_status_atomic(self, status: dict[str, Any]) -> None:
176
+ """Write status file atomically to prevent corruption during writes.
177
+
178
+ This method writes to a temporary file first, then atomically renames
179
+ it to the actual status file. This ensures the status file is never
180
+ in a partially-written state.
181
+
182
+ Args:
183
+ status: Status dictionary to write
184
+ """
185
+ temp_file = self.status_file.with_suffix(".tmp")
186
+ logging.debug(f"Using temp file: {temp_file}")
187
+
188
+ try:
189
+ logging.debug(f"Writing JSON to temp file ({len(status)} keys)...")
190
+ with open(temp_file, "w", encoding="utf-8") as f:
191
+ json.dump(status, f, indent=2)
192
+ # Atomic rename
193
+ temp_file.replace(self.status_file)
194
+
195
+ except KeyboardInterrupt:
196
+ logging.warning("KeyboardInterrupt during status file write, cleaning up temp file")
197
+ temp_file.unlink(missing_ok=True)
198
+ raise
199
+ except Exception as e:
200
+ logging.error(f"Failed to write status file: {e}")
201
+ temp_file.unlink(missing_ok=True)
202
+
203
+ def _get_default_status(self) -> DaemonStatus:
204
+ """Get default idle status.
205
+
206
+ Returns:
207
+ DaemonStatus object indicating daemon is idle
208
+ """
209
+ return DaemonStatus(
210
+ state=DaemonState.IDLE,
211
+ message="Daemon is idle",
212
+ updated_at=time.time(),
213
+ daemon_pid=self.daemon_pid,
214
+ daemon_started_at=self.daemon_started_at,
215
+ operation_in_progress=False,
216
+ )