fbuild 1.2.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (121) hide show
  1. fbuild/__init__.py +390 -0
  2. fbuild/assets/example.txt +1 -0
  3. fbuild/build/__init__.py +117 -0
  4. fbuild/build/archive_creator.py +186 -0
  5. fbuild/build/binary_generator.py +444 -0
  6. fbuild/build/build_component_factory.py +131 -0
  7. fbuild/build/build_info_generator.py +624 -0
  8. fbuild/build/build_state.py +325 -0
  9. fbuild/build/build_utils.py +93 -0
  10. fbuild/build/compilation_executor.py +422 -0
  11. fbuild/build/compiler.py +165 -0
  12. fbuild/build/compiler_avr.py +574 -0
  13. fbuild/build/configurable_compiler.py +664 -0
  14. fbuild/build/configurable_linker.py +637 -0
  15. fbuild/build/flag_builder.py +214 -0
  16. fbuild/build/library_dependency_processor.py +185 -0
  17. fbuild/build/linker.py +708 -0
  18. fbuild/build/orchestrator.py +67 -0
  19. fbuild/build/orchestrator_avr.py +651 -0
  20. fbuild/build/orchestrator_esp32.py +878 -0
  21. fbuild/build/orchestrator_rp2040.py +719 -0
  22. fbuild/build/orchestrator_stm32.py +696 -0
  23. fbuild/build/orchestrator_teensy.py +580 -0
  24. fbuild/build/source_compilation_orchestrator.py +218 -0
  25. fbuild/build/source_scanner.py +516 -0
  26. fbuild/cli.py +717 -0
  27. fbuild/cli_utils.py +314 -0
  28. fbuild/config/__init__.py +16 -0
  29. fbuild/config/board_config.py +542 -0
  30. fbuild/config/board_loader.py +92 -0
  31. fbuild/config/ini_parser.py +369 -0
  32. fbuild/config/mcu_specs.py +88 -0
  33. fbuild/daemon/__init__.py +42 -0
  34. fbuild/daemon/async_client.py +531 -0
  35. fbuild/daemon/client.py +1505 -0
  36. fbuild/daemon/compilation_queue.py +293 -0
  37. fbuild/daemon/configuration_lock.py +865 -0
  38. fbuild/daemon/daemon.py +585 -0
  39. fbuild/daemon/daemon_context.py +293 -0
  40. fbuild/daemon/error_collector.py +263 -0
  41. fbuild/daemon/file_cache.py +332 -0
  42. fbuild/daemon/firmware_ledger.py +546 -0
  43. fbuild/daemon/lock_manager.py +508 -0
  44. fbuild/daemon/logging_utils.py +149 -0
  45. fbuild/daemon/messages.py +957 -0
  46. fbuild/daemon/operation_registry.py +288 -0
  47. fbuild/daemon/port_state_manager.py +249 -0
  48. fbuild/daemon/process_tracker.py +366 -0
  49. fbuild/daemon/processors/__init__.py +18 -0
  50. fbuild/daemon/processors/build_processor.py +248 -0
  51. fbuild/daemon/processors/deploy_processor.py +664 -0
  52. fbuild/daemon/processors/install_deps_processor.py +431 -0
  53. fbuild/daemon/processors/locking_processor.py +777 -0
  54. fbuild/daemon/processors/monitor_processor.py +285 -0
  55. fbuild/daemon/request_processor.py +457 -0
  56. fbuild/daemon/shared_serial.py +819 -0
  57. fbuild/daemon/status_manager.py +238 -0
  58. fbuild/daemon/subprocess_manager.py +316 -0
  59. fbuild/deploy/__init__.py +21 -0
  60. fbuild/deploy/deployer.py +67 -0
  61. fbuild/deploy/deployer_esp32.py +310 -0
  62. fbuild/deploy/docker_utils.py +315 -0
  63. fbuild/deploy/monitor.py +519 -0
  64. fbuild/deploy/qemu_runner.py +603 -0
  65. fbuild/interrupt_utils.py +34 -0
  66. fbuild/ledger/__init__.py +52 -0
  67. fbuild/ledger/board_ledger.py +560 -0
  68. fbuild/output.py +352 -0
  69. fbuild/packages/__init__.py +66 -0
  70. fbuild/packages/archive_utils.py +1098 -0
  71. fbuild/packages/arduino_core.py +412 -0
  72. fbuild/packages/cache.py +256 -0
  73. fbuild/packages/concurrent_manager.py +510 -0
  74. fbuild/packages/downloader.py +518 -0
  75. fbuild/packages/fingerprint.py +423 -0
  76. fbuild/packages/framework_esp32.py +538 -0
  77. fbuild/packages/framework_rp2040.py +349 -0
  78. fbuild/packages/framework_stm32.py +459 -0
  79. fbuild/packages/framework_teensy.py +346 -0
  80. fbuild/packages/github_utils.py +96 -0
  81. fbuild/packages/header_trampoline_cache.py +394 -0
  82. fbuild/packages/library_compiler.py +203 -0
  83. fbuild/packages/library_manager.py +549 -0
  84. fbuild/packages/library_manager_esp32.py +725 -0
  85. fbuild/packages/package.py +163 -0
  86. fbuild/packages/platform_esp32.py +383 -0
  87. fbuild/packages/platform_rp2040.py +400 -0
  88. fbuild/packages/platform_stm32.py +581 -0
  89. fbuild/packages/platform_teensy.py +312 -0
  90. fbuild/packages/platform_utils.py +131 -0
  91. fbuild/packages/platformio_registry.py +369 -0
  92. fbuild/packages/sdk_utils.py +231 -0
  93. fbuild/packages/toolchain.py +436 -0
  94. fbuild/packages/toolchain_binaries.py +196 -0
  95. fbuild/packages/toolchain_esp32.py +489 -0
  96. fbuild/packages/toolchain_metadata.py +185 -0
  97. fbuild/packages/toolchain_rp2040.py +436 -0
  98. fbuild/packages/toolchain_stm32.py +417 -0
  99. fbuild/packages/toolchain_teensy.py +404 -0
  100. fbuild/platform_configs/esp32.json +150 -0
  101. fbuild/platform_configs/esp32c2.json +144 -0
  102. fbuild/platform_configs/esp32c3.json +143 -0
  103. fbuild/platform_configs/esp32c5.json +151 -0
  104. fbuild/platform_configs/esp32c6.json +151 -0
  105. fbuild/platform_configs/esp32p4.json +149 -0
  106. fbuild/platform_configs/esp32s3.json +151 -0
  107. fbuild/platform_configs/imxrt1062.json +56 -0
  108. fbuild/platform_configs/rp2040.json +70 -0
  109. fbuild/platform_configs/rp2350.json +76 -0
  110. fbuild/platform_configs/stm32f1.json +59 -0
  111. fbuild/platform_configs/stm32f4.json +63 -0
  112. fbuild/py.typed +0 -0
  113. fbuild-1.2.8.dist-info/METADATA +468 -0
  114. fbuild-1.2.8.dist-info/RECORD +121 -0
  115. fbuild-1.2.8.dist-info/WHEEL +5 -0
  116. fbuild-1.2.8.dist-info/entry_points.txt +5 -0
  117. fbuild-1.2.8.dist-info/licenses/LICENSE +21 -0
  118. fbuild-1.2.8.dist-info/top_level.txt +2 -0
  119. fbuild_lint/__init__.py +0 -0
  120. fbuild_lint/ruff_plugins/__init__.py +0 -0
  121. fbuild_lint/ruff_plugins/keyboard_interrupt_checker.py +158 -0
@@ -0,0 +1,585 @@
1
+ """
2
+ fbuild Daemon - Concurrent Deploy and Monitor Management
3
+
4
+ This daemon manages deploy and monitor operations to prevent resource conflicts
5
+ when multiple operations are running. The daemon:
6
+
7
+ 1. Runs as a singleton process (enforced via PID file)
8
+ 2. Survives client termination
9
+ 3. Processes requests with appropriate locking (per-port, per-project)
10
+ 4. Provides status updates via status file
11
+ 5. Auto-shuts down after idle timeout
12
+ 6. Cleans up orphaned processes
13
+
14
+ Architecture:
15
+ Clients -> Request File -> Daemon -> Deploy/Monitor Process
16
+ | |
17
+ v v
18
+ Status File Progress Updates
19
+ """
20
+
21
+ import _thread
22
+ import logging
23
+ import multiprocessing
24
+ import os
25
+ import signal
26
+ import subprocess
27
+ import sys
28
+ import threading
29
+ import time
30
+ from logging.handlers import TimedRotatingFileHandler
31
+ from pathlib import Path
32
+
33
+ import psutil
34
+
35
+ from fbuild.daemon.daemon_context import (
36
+ DaemonContext,
37
+ cleanup_daemon_context,
38
+ create_daemon_context,
39
+ )
40
+ from fbuild.daemon.messages import (
41
+ BuildRequest,
42
+ DaemonState,
43
+ DeployRequest,
44
+ InstallDependenciesRequest,
45
+ MonitorRequest,
46
+ )
47
+ from fbuild.daemon.process_tracker import ProcessTracker
48
+ from fbuild.daemon.processors.build_processor import BuildRequestProcessor
49
+ from fbuild.daemon.processors.deploy_processor import DeployRequestProcessor
50
+ from fbuild.daemon.processors.install_deps_processor import InstallDependenciesProcessor
51
+ from fbuild.daemon.processors.monitor_processor import MonitorRequestProcessor
52
+
53
+ # Daemon configuration
54
+ DAEMON_NAME = "fbuild_daemon"
55
+
56
+ # Check for development mode (when running from repo)
57
+ if os.environ.get("FBUILD_DEV_MODE") == "1":
58
+ # Use project-local daemon directory for development
59
+ DAEMON_DIR = Path.cwd() / ".fbuild" / "daemon_dev"
60
+ else:
61
+ # Use home directory for production
62
+ DAEMON_DIR = Path.home() / ".fbuild" / "daemon"
63
+
64
+ PID_FILE = DAEMON_DIR / f"{DAEMON_NAME}.pid"
65
+ STATUS_FILE = DAEMON_DIR / "daemon_status.json"
66
+ BUILD_REQUEST_FILE = DAEMON_DIR / "build_request.json"
67
+ DEPLOY_REQUEST_FILE = DAEMON_DIR / "deploy_request.json"
68
+ MONITOR_REQUEST_FILE = DAEMON_DIR / "monitor_request.json"
69
+ INSTALL_DEPS_REQUEST_FILE = DAEMON_DIR / "install_deps_request.json"
70
+ LOG_FILE = DAEMON_DIR / "daemon.log"
71
+ PROCESS_REGISTRY_FILE = DAEMON_DIR / "process_registry.json"
72
+ FILE_CACHE_FILE = DAEMON_DIR / "file_cache.json"
73
+ ORPHAN_CHECK_INTERVAL = 5 # Check for orphaned processes every 5 seconds
74
+ STALE_LOCK_CHECK_INTERVAL = 60 # Check for stale locks every 60 seconds
75
+ DEAD_CLIENT_CHECK_INTERVAL = 10 # Check for dead clients every 10 seconds
76
+ IDLE_TIMEOUT = 43200 # 12 hours
77
+
78
+
79
+ def setup_logging(foreground: bool = False) -> None:
80
+ """Setup logging for daemon."""
81
+ DAEMON_DIR.mkdir(parents=True, exist_ok=True)
82
+
83
+ # Enhanced log format with function name and line number
84
+ LOG_FORMAT = "%(asctime)s - %(name)s - %(levelname)s - [%(funcName)s:%(lineno)d] - %(message)s"
85
+ LOG_DATEFMT = "%Y-%m-%d %H:%M:%S"
86
+
87
+ # Configure root logger
88
+ logger = logging.getLogger()
89
+ logger.setLevel(logging.DEBUG) # CHANGED: Enable DEBUG logging
90
+
91
+ # Console handler (for foreground mode)
92
+ if foreground:
93
+ console_handler = logging.StreamHandler(sys.stdout)
94
+ console_handler.setLevel(logging.DEBUG) # CHANGED: Enable DEBUG logging
95
+ console_formatter = logging.Formatter(fmt=LOG_FORMAT, datefmt=LOG_DATEFMT)
96
+ console_handler.setFormatter(console_formatter)
97
+ logger.addHandler(console_handler)
98
+
99
+ # Timed rotating file handler (always) - rotates daily at midnight
100
+ file_handler = TimedRotatingFileHandler(
101
+ str(LOG_FILE),
102
+ when="midnight", # Rotate at midnight
103
+ interval=1, # Daily rotation
104
+ backupCount=2, # Keep 2 days of backups (total 3 files)
105
+ utc=False, # Use local time
106
+ atTime=None, # Rotate exactly at midnight
107
+ )
108
+ file_handler.setLevel(logging.DEBUG) # CHANGED: Enable DEBUG logging
109
+ file_formatter = logging.Formatter(fmt=LOG_FORMAT, datefmt=LOG_DATEFMT)
110
+ file_handler.setFormatter(file_formatter)
111
+ logger.addHandler(file_handler)
112
+
113
+
114
+ def read_request_file(request_file: Path, request_class: type) -> BuildRequest | DeployRequest | MonitorRequest | None:
115
+ """Read and parse request file.
116
+
117
+ Args:
118
+ request_file: Path to request file
119
+ request_class: Class to parse into (BuildRequest, DeployRequest, or MonitorRequest)
120
+
121
+ Returns:
122
+ Request object if valid, None otherwise
123
+ """
124
+ import json
125
+
126
+ if not request_file.exists():
127
+ return None
128
+
129
+ try:
130
+ with open(request_file) as f:
131
+ data = json.load(f)
132
+
133
+ # Parse into typed request
134
+ request = request_class.from_dict(data)
135
+ return request
136
+
137
+ except (json.JSONDecodeError, ValueError, TypeError) as e:
138
+ logging.error(f"Failed to parse request file {request_file}: {e}")
139
+ return None
140
+ except KeyboardInterrupt:
141
+ _thread.interrupt_main()
142
+ raise
143
+ except Exception as e:
144
+ logging.error(f"Unexpected error reading request file {request_file}: {e}")
145
+ return None
146
+
147
+
148
+ def clear_request_file(request_file: Path) -> None:
149
+ """Remove request file after processing."""
150
+ try:
151
+ file_existed = request_file.exists()
152
+ request_file.unlink(missing_ok=True)
153
+ if file_existed:
154
+ logging.debug(f"[ATOMIC_CONSUME] Successfully deleted request file: {request_file.name}")
155
+ else:
156
+ logging.warning(f"[ATOMIC_CONSUME] Request file already deleted: {request_file.name}")
157
+ except KeyboardInterrupt:
158
+ logging.warning(f"KeyboardInterrupt while clearing request file: {request_file}")
159
+ _thread.interrupt_main()
160
+ raise
161
+ except Exception as e:
162
+ logging.error(f"Failed to clear request file {request_file}: {e}")
163
+
164
+
165
+ def should_shutdown() -> bool:
166
+ """Check if daemon should shutdown.
167
+
168
+ Returns:
169
+ True if shutdown signal detected, False otherwise
170
+ """
171
+ # Check for shutdown signal file
172
+ shutdown_file = DAEMON_DIR / "shutdown.signal"
173
+ if shutdown_file.exists():
174
+ logging.info("Shutdown signal detected")
175
+ try:
176
+ shutdown_file.unlink()
177
+ except KeyboardInterrupt:
178
+ _thread.interrupt_main()
179
+ raise
180
+ except Exception as e:
181
+ logging.warning(f"Failed to remove shutdown signal file: {e}")
182
+ return True
183
+ return False
184
+
185
+
186
+ def cleanup_stale_cancel_signals() -> None:
187
+ """Clean up stale cancel signal files (older than 5 minutes)."""
188
+ try:
189
+ signal_files = list(DAEMON_DIR.glob("cancel_*.signal"))
190
+ logging.debug(f"Found {len(signal_files)} cancel signal files")
191
+
192
+ cleaned_count = 0
193
+ for signal_file in signal_files:
194
+ try:
195
+ # Check file age
196
+ file_age = time.time() - signal_file.stat().st_mtime
197
+ if file_age > 300: # 5 minutes
198
+ logging.info(f"Cleaning up stale cancel signal: {signal_file.name} (age: {file_age:.1f}s)")
199
+ signal_file.unlink()
200
+ cleaned_count += 1
201
+ except KeyboardInterrupt:
202
+ _thread.interrupt_main()
203
+ raise
204
+ except Exception as e:
205
+ logging.warning(f"Failed to clean up {signal_file.name}: {e}")
206
+
207
+ if cleaned_count > 0:
208
+ logging.info(f"Cleaned up {cleaned_count} cancel signal files")
209
+ except KeyboardInterrupt:
210
+ _thread.interrupt_main()
211
+ raise
212
+ except Exception as e:
213
+ logging.error(f"Error during cancel signal cleanup: {e}")
214
+
215
+
216
+ def signal_handler(signum: int, frame: object, context: DaemonContext) -> None:
217
+ """Handle SIGTERM/SIGINT - refuse shutdown during operation."""
218
+ signal_name = "SIGTERM" if signum == signal.SIGTERM else "SIGINT"
219
+ logging.info(f"Signal handler invoked: received {signal_name} (signal number {signum})")
220
+
221
+ if context.status_manager.get_operation_in_progress():
222
+ logging.warning(f"Received {signal_name} during active operation. Refusing graceful shutdown.")
223
+ print(
224
+ f"\n⚠️ {signal_name} received during operation\n⚠️ Cannot shutdown gracefully while operation is active\n⚠️ Use 'kill -9 {os.getpid()}' to force termination\n",
225
+ flush=True,
226
+ )
227
+ return # Refuse shutdown
228
+ else:
229
+ logging.info(f"Received {signal_name}, shutting down gracefully (no operation in progress)")
230
+ cleanup_and_exit(context)
231
+
232
+
233
+ def cleanup_and_exit(context: DaemonContext) -> None:
234
+ """Clean up daemon state and exit."""
235
+ logging.info("Daemon shutting down")
236
+
237
+ # Shutdown subsystems
238
+ cleanup_daemon_context(context)
239
+
240
+ # Remove PID file
241
+ try:
242
+ PID_FILE.unlink(missing_ok=True)
243
+ except KeyboardInterrupt:
244
+ _thread.interrupt_main()
245
+ raise
246
+ except Exception as e:
247
+ logging.error(f"Failed to remove PID file: {e}")
248
+
249
+ # Set final status
250
+ context.status_manager.update_status(DaemonState.IDLE, "Daemon shut down")
251
+
252
+ logging.info("Cleanup complete, exiting with status 0")
253
+ sys.exit(0)
254
+
255
+
256
+ def run_daemon_loop() -> None:
257
+ """Main daemon loop: process build, deploy and monitor requests."""
258
+ daemon_pid = os.getpid()
259
+ daemon_started_at = time.time()
260
+
261
+ logging.info("Starting daemon loop...")
262
+
263
+ # Determine optimal worker pool size
264
+ try:
265
+ num_workers = multiprocessing.cpu_count()
266
+ except (ImportError, NotImplementedError) as e:
267
+ num_workers = 4 # Fallback for systems without multiprocessing
268
+ logging.warning(f"Could not detect CPU count ({e}), using fallback: {num_workers} workers")
269
+
270
+ # Create daemon context (includes status manager)
271
+ context = create_daemon_context(
272
+ daemon_pid=daemon_pid,
273
+ daemon_started_at=daemon_started_at,
274
+ num_workers=num_workers,
275
+ file_cache_path=FILE_CACHE_FILE,
276
+ status_file_path=STATUS_FILE,
277
+ )
278
+
279
+ # Write initial IDLE status IMMEDIATELY to prevent clients from reading stale status
280
+ context.status_manager.update_status(DaemonState.IDLE, "Daemon starting...")
281
+
282
+ # Initialize process tracker
283
+ process_tracker = ProcessTracker(PROCESS_REGISTRY_FILE)
284
+
285
+ # Register signal handlers
286
+ def signal_handler_wrapper(signum: int, frame: object) -> None:
287
+ signal_handler(signum, frame, context)
288
+
289
+ signal.signal(signal.SIGTERM, signal_handler_wrapper)
290
+ signal.signal(signal.SIGINT, signal_handler_wrapper)
291
+
292
+ # Create request processors
293
+ build_processor = BuildRequestProcessor()
294
+ deploy_processor = DeployRequestProcessor()
295
+ install_deps_processor = InstallDependenciesProcessor()
296
+ monitor_processor = MonitorRequestProcessor()
297
+
298
+ logging.info(f"Daemon started with PID {daemon_pid}")
299
+ context.status_manager.update_status(DaemonState.IDLE, "Daemon ready")
300
+
301
+ last_activity = time.time()
302
+ last_orphan_check = time.time()
303
+ last_cancel_cleanup = time.time()
304
+ last_stale_lock_check = time.time()
305
+ last_dead_client_check = time.time()
306
+
307
+ logging.info("Entering main daemon loop...")
308
+ iteration_count = 0
309
+
310
+ # Locks for atomic request consumption
311
+ build_request_lock = threading.Lock()
312
+ deploy_request_lock = threading.Lock()
313
+ install_deps_request_lock = threading.Lock()
314
+ monitor_request_lock = threading.Lock()
315
+
316
+ while True:
317
+ try:
318
+ iteration_count += 1
319
+ if iteration_count % 100 == 0: # Log every 100 iterations to avoid spam
320
+ logging.debug(f"Daemon main loop iteration {iteration_count}")
321
+
322
+ # Check for shutdown signal
323
+ if should_shutdown():
324
+ logging.info("Shutdown requested via signal")
325
+ cleanup_and_exit(context)
326
+
327
+ # Check idle timeout
328
+ idle_time = time.time() - last_activity
329
+ if idle_time > IDLE_TIMEOUT:
330
+ logging.info(f"Idle timeout reached ({idle_time:.1f}s / {IDLE_TIMEOUT}s), shutting down")
331
+ cleanup_and_exit(context)
332
+
333
+ # Periodically check for and cleanup orphaned processes
334
+ if time.time() - last_orphan_check >= ORPHAN_CHECK_INTERVAL:
335
+ try:
336
+ orphaned_clients = process_tracker.cleanup_orphaned_processes()
337
+ if orphaned_clients:
338
+ logging.info(f"Cleaned up orphaned processes for {len(orphaned_clients)} dead clients: {orphaned_clients}")
339
+ last_orphan_check = time.time()
340
+ except KeyboardInterrupt:
341
+ _thread.interrupt_main()
342
+ raise
343
+ except Exception as e:
344
+ logging.error(f"Error during orphan cleanup: {e}", exc_info=True)
345
+
346
+ # Periodically cleanup stale cancel signals (every 60 seconds)
347
+ if time.time() - last_cancel_cleanup >= 60:
348
+ try:
349
+ cleanup_stale_cancel_signals()
350
+ last_cancel_cleanup = time.time()
351
+ except KeyboardInterrupt:
352
+ _thread.interrupt_main()
353
+ raise
354
+ except Exception as e:
355
+ logging.error(f"Error during cancel signal cleanup: {e}", exc_info=True)
356
+
357
+ # Check for manual stale lock clear signal
358
+ clear_locks_signal = DAEMON_DIR / "clear_stale_locks.signal"
359
+ if clear_locks_signal.exists():
360
+ try:
361
+ clear_locks_signal.unlink()
362
+ logging.info("Received manual clear stale locks signal")
363
+ stale_locks = context.lock_manager.get_stale_locks()
364
+ stale_count = len(stale_locks["port_locks"]) + len(stale_locks["project_locks"])
365
+ if stale_count > 0:
366
+ logging.warning(f"Manually clearing {stale_count} stale locks...")
367
+ released = context.lock_manager.force_release_stale_locks()
368
+ logging.info(f"Force-released {released} stale locks")
369
+ else:
370
+ logging.info("No stale locks to clear")
371
+ except KeyboardInterrupt:
372
+ _thread.interrupt_main()
373
+ raise
374
+ except Exception as e:
375
+ logging.error(f"Error handling clear locks signal: {e}", exc_info=True)
376
+
377
+ # Periodically check for and cleanup dead clients (every 10 seconds)
378
+ if time.time() - last_dead_client_check >= DEAD_CLIENT_CHECK_INTERVAL:
379
+ try:
380
+ dead_clients = context.client_manager.cleanup_dead_clients()
381
+ if dead_clients:
382
+ logging.info(f"Cleaned up {len(dead_clients)} dead clients: {dead_clients}")
383
+ last_dead_client_check = time.time()
384
+ except KeyboardInterrupt:
385
+ _thread.interrupt_main()
386
+ raise
387
+ except Exception as e:
388
+ logging.error(f"Error during dead client cleanup: {e}", exc_info=True)
389
+
390
+ # Periodically check for and cleanup stale locks (every 60 seconds)
391
+ if time.time() - last_stale_lock_check >= STALE_LOCK_CHECK_INTERVAL:
392
+ try:
393
+ # Check for stale locks (held beyond timeout)
394
+ stale_locks = context.lock_manager.get_stale_locks()
395
+ stale_count = len(stale_locks["port_locks"]) + len(stale_locks["project_locks"])
396
+ if stale_count > 0:
397
+ logging.warning(f"Found {stale_count} stale locks, force-releasing...")
398
+ released = context.lock_manager.force_release_stale_locks()
399
+ logging.info(f"Force-released {released} stale locks")
400
+
401
+ # Also clean up unused lock entries (memory cleanup)
402
+ context.lock_manager.cleanup_unused_locks()
403
+ last_stale_lock_check = time.time()
404
+ except KeyboardInterrupt:
405
+ _thread.interrupt_main()
406
+ raise
407
+ except Exception as e:
408
+ logging.error(f"Error during stale lock cleanup: {e}", exc_info=True)
409
+
410
+ # Check for build requests (with lock for atomic consumption)
411
+ with build_request_lock:
412
+ build_request = read_request_file(BUILD_REQUEST_FILE, BuildRequest)
413
+ if build_request:
414
+ # Clear request file IMMEDIATELY (atomic consumption)
415
+ clear_request_file(BUILD_REQUEST_FILE)
416
+
417
+ if build_request:
418
+ last_activity = time.time()
419
+ logging.info(f"Received build request: {build_request}")
420
+
421
+ # Mark operation in progress
422
+ context.status_manager.set_operation_in_progress(True)
423
+
424
+ # Process request
425
+ build_processor.process_request(build_request, context)
426
+
427
+ # Mark operation complete
428
+ context.status_manager.set_operation_in_progress(False)
429
+
430
+ # Check for deploy requests (with lock for atomic consumption)
431
+ with deploy_request_lock:
432
+ deploy_request = read_request_file(DEPLOY_REQUEST_FILE, DeployRequest)
433
+ if deploy_request:
434
+ # Clear request file IMMEDIATELY (atomic consumption)
435
+ clear_request_file(DEPLOY_REQUEST_FILE)
436
+
437
+ if deploy_request:
438
+ last_activity = time.time()
439
+ logging.info(f"Received deploy request: {deploy_request}")
440
+
441
+ # Mark operation in progress
442
+ context.status_manager.set_operation_in_progress(True)
443
+
444
+ # Process request
445
+ deploy_processor.process_request(deploy_request, context)
446
+
447
+ # Mark operation complete
448
+ context.status_manager.set_operation_in_progress(False)
449
+
450
+ # Check for monitor requests (with lock for atomic consumption)
451
+ with monitor_request_lock:
452
+ monitor_request = read_request_file(MONITOR_REQUEST_FILE, MonitorRequest)
453
+ if monitor_request:
454
+ # Clear request file IMMEDIATELY (atomic consumption)
455
+ clear_request_file(MONITOR_REQUEST_FILE)
456
+
457
+ if monitor_request:
458
+ last_activity = time.time()
459
+ logging.info(f"Received monitor request: {monitor_request}")
460
+
461
+ # Mark operation in progress
462
+ context.status_manager.set_operation_in_progress(True)
463
+
464
+ # Process request
465
+ monitor_processor.process_request(monitor_request, context)
466
+
467
+ # Mark operation complete
468
+ context.status_manager.set_operation_in_progress(False)
469
+
470
+ # Check for install dependencies requests (with lock for atomic consumption)
471
+ with install_deps_request_lock:
472
+ install_deps_request = read_request_file(INSTALL_DEPS_REQUEST_FILE, InstallDependenciesRequest)
473
+ if install_deps_request:
474
+ # Clear request file IMMEDIATELY (atomic consumption)
475
+ clear_request_file(INSTALL_DEPS_REQUEST_FILE)
476
+
477
+ if install_deps_request:
478
+ last_activity = time.time()
479
+ logging.info(f"Received install dependencies request: {install_deps_request}")
480
+
481
+ # Mark operation in progress
482
+ context.status_manager.set_operation_in_progress(True)
483
+
484
+ # Process request
485
+ install_deps_processor.process_request(install_deps_request, context)
486
+
487
+ # Mark operation complete
488
+ context.status_manager.set_operation_in_progress(False)
489
+
490
+ # Sleep briefly to avoid busy-wait
491
+ time.sleep(0.5)
492
+
493
+ except KeyboardInterrupt:
494
+ logging.warning("Daemon interrupted by user")
495
+ _thread.interrupt_main()
496
+ cleanup_and_exit(context)
497
+ except Exception as e:
498
+ logging.error(f"Daemon error: {e}", exc_info=True)
499
+ # Continue running despite errors
500
+ time.sleep(1)
501
+
502
+
503
+ def main() -> int:
504
+ """Main entry point for daemon."""
505
+ # Parse command-line arguments
506
+ foreground = "--foreground" in sys.argv
507
+
508
+ # Setup logging
509
+ setup_logging(foreground=foreground)
510
+
511
+ # Ensure daemon directory exists
512
+ DAEMON_DIR.mkdir(parents=True, exist_ok=True)
513
+
514
+ if foreground:
515
+ # Run in foreground (for debugging)
516
+ logging.info("Running in foreground mode")
517
+ # Write PID file
518
+ with open(PID_FILE, "w") as f:
519
+ f.write(str(os.getpid()))
520
+ try:
521
+ run_daemon_loop()
522
+ finally:
523
+ PID_FILE.unlink(missing_ok=True)
524
+ return 0
525
+
526
+ # Check if daemon already running
527
+ if PID_FILE.exists():
528
+ try:
529
+ with open(PID_FILE) as f:
530
+ existing_pid = int(f.read().strip())
531
+ if psutil.pid_exists(existing_pid):
532
+ logging.info(f"Daemon already running with PID {existing_pid}")
533
+ print(f"Daemon already running with PID {existing_pid}")
534
+ return 0
535
+ else:
536
+ # Stale PID file
537
+ logging.info(f"Removing stale PID file for PID {existing_pid}")
538
+ PID_FILE.unlink()
539
+ except KeyboardInterrupt:
540
+ _thread.interrupt_main()
541
+ raise
542
+ except Exception as e:
543
+ logging.warning(f"Error checking existing PID: {e}")
544
+ PID_FILE.unlink(missing_ok=True)
545
+
546
+ # Simple daemonization for cross-platform compatibility
547
+ try:
548
+ # Fork to background
549
+ if hasattr(os, "fork") and os.fork() > 0: # type: ignore[attr-defined]
550
+ # Parent process exits
551
+ return 0
552
+ except (OSError, AttributeError):
553
+ # Fork not supported (Windows) - run in background as subprocess
554
+ logging.info("Fork not supported, using subprocess")
555
+ subprocess.Popen(
556
+ [sys.executable, __file__, "--foreground"],
557
+ stdout=subprocess.DEVNULL,
558
+ stderr=subprocess.DEVNULL,
559
+ stdin=subprocess.DEVNULL,
560
+ cwd=str(DAEMON_DIR),
561
+ )
562
+ return 0
563
+
564
+ # Child process continues
565
+ # Write PID file
566
+ with open(PID_FILE, "w") as f:
567
+ f.write(str(os.getpid()))
568
+
569
+ try:
570
+ run_daemon_loop()
571
+ finally:
572
+ PID_FILE.unlink(missing_ok=True)
573
+
574
+ return 0
575
+
576
+
577
+ if __name__ == "__main__":
578
+ try:
579
+ sys.exit(main())
580
+ except KeyboardInterrupt as ke:
581
+ from fbuild.interrupt_utils import handle_keyboard_interrupt_properly
582
+
583
+ handle_keyboard_interrupt_properly(ke)
584
+ print("\nDaemon interrupted by user")
585
+ sys.exit(130)