fbuild 1.2.8__py3-none-any.whl → 1.2.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. fbuild/__init__.py +5 -1
  2. fbuild/build/configurable_compiler.py +49 -6
  3. fbuild/build/configurable_linker.py +14 -9
  4. fbuild/build/orchestrator_esp32.py +6 -3
  5. fbuild/build/orchestrator_rp2040.py +6 -2
  6. fbuild/cli.py +300 -5
  7. fbuild/config/ini_parser.py +13 -1
  8. fbuild/daemon/__init__.py +11 -0
  9. fbuild/daemon/async_client.py +5 -4
  10. fbuild/daemon/async_client_lib.py +1543 -0
  11. fbuild/daemon/async_protocol.py +825 -0
  12. fbuild/daemon/async_server.py +2100 -0
  13. fbuild/daemon/client.py +425 -13
  14. fbuild/daemon/configuration_lock.py +13 -13
  15. fbuild/daemon/connection.py +508 -0
  16. fbuild/daemon/connection_registry.py +579 -0
  17. fbuild/daemon/daemon.py +517 -164
  18. fbuild/daemon/daemon_context.py +72 -1
  19. fbuild/daemon/device_discovery.py +477 -0
  20. fbuild/daemon/device_manager.py +821 -0
  21. fbuild/daemon/error_collector.py +263 -263
  22. fbuild/daemon/file_cache.py +332 -332
  23. fbuild/daemon/firmware_ledger.py +46 -123
  24. fbuild/daemon/lock_manager.py +508 -508
  25. fbuild/daemon/messages.py +431 -0
  26. fbuild/daemon/operation_registry.py +288 -288
  27. fbuild/daemon/processors/build_processor.py +34 -1
  28. fbuild/daemon/processors/deploy_processor.py +1 -3
  29. fbuild/daemon/processors/locking_processor.py +7 -7
  30. fbuild/daemon/request_processor.py +457 -457
  31. fbuild/daemon/shared_serial.py +7 -7
  32. fbuild/daemon/status_manager.py +238 -238
  33. fbuild/daemon/subprocess_manager.py +316 -316
  34. fbuild/deploy/docker_utils.py +182 -2
  35. fbuild/deploy/monitor.py +1 -1
  36. fbuild/deploy/qemu_runner.py +71 -13
  37. fbuild/ledger/board_ledger.py +46 -122
  38. fbuild/output.py +238 -2
  39. fbuild/packages/library_compiler.py +15 -5
  40. fbuild/packages/library_manager.py +12 -6
  41. fbuild-1.2.15.dist-info/METADATA +569 -0
  42. {fbuild-1.2.8.dist-info → fbuild-1.2.15.dist-info}/RECORD +46 -39
  43. fbuild-1.2.8.dist-info/METADATA +0 -468
  44. {fbuild-1.2.8.dist-info → fbuild-1.2.15.dist-info}/WHEEL +0 -0
  45. {fbuild-1.2.8.dist-info → fbuild-1.2.15.dist-info}/entry_points.txt +0 -0
  46. {fbuild-1.2.8.dist-info → fbuild-1.2.15.dist-info}/licenses/LICENSE +0 -0
  47. {fbuild-1.2.8.dist-info → fbuild-1.2.15.dist-info}/top_level.txt +0 -0
fbuild/daemon/client.py CHANGED
@@ -85,18 +85,37 @@ def is_daemon_running() -> bool:
85
85
 
86
86
 
87
87
  def start_daemon() -> None:
88
- """Start the daemon process."""
88
+ """Start the daemon process.
89
+
90
+ Passes the spawning client's PID as an argument so the daemon can log
91
+ which client originally started it.
92
+
93
+ On Windows, uses proper detachment flags to ensure:
94
+ - Daemon survives client termination (DETACHED_PROCESS)
95
+ - Daemon is isolated from client's Ctrl-C signals (CREATE_NEW_PROCESS_GROUP)
96
+ """
89
97
  daemon_script = Path(__file__).parent / "daemon.py"
90
98
 
91
99
  if not daemon_script.exists():
92
100
  raise RuntimeError(f"Daemon script not found: {daemon_script}")
93
101
 
94
- # Start daemon in background
102
+ # Pass spawning client PID so daemon can log who started it
103
+ spawner_pid = os.getpid()
104
+
105
+ # On Windows, use proper detachment flags:
106
+ # - CREATE_NEW_PROCESS_GROUP: Isolates daemon from client's Ctrl-C signals
107
+ # - DETACHED_PROCESS: Daemon survives client termination, no console inherited
108
+ creationflags = 0
109
+ if sys.platform == "win32":
110
+ creationflags = subprocess.CREATE_NEW_PROCESS_GROUP | subprocess.DETACHED_PROCESS
111
+
112
+ # Start daemon in background as a fully detached process
95
113
  subprocess.Popen(
96
- [sys.executable, str(daemon_script)],
114
+ [sys.executable, str(daemon_script), f"--spawned-by={spawner_pid}"],
97
115
  stdout=subprocess.DEVNULL,
98
116
  stderr=subprocess.DEVNULL,
99
117
  stdin=subprocess.DEVNULL,
118
+ creationflags=creationflags,
100
119
  )
101
120
 
102
121
 
@@ -372,6 +391,7 @@ class BaseRequestHandler(ABC):
372
391
  self.start_time = 0.0
373
392
  self.last_message: str | None = None
374
393
  self.monitoring_started = False
394
+ self.build_output_started = False
375
395
  self.output_file_position = 0
376
396
  self.spinner_idx = 0
377
397
  self.last_spinner_update = 0.0
@@ -420,6 +440,14 @@ class BaseRequestHandler(ABC):
420
440
  """
421
441
  return False
422
442
 
443
+ def get_output_file_path(self) -> Path:
444
+ """Get the output file path to tail. Override in subclasses.
445
+
446
+ Returns:
447
+ Path to the output file
448
+ """
449
+ return self.project_dir / ".fbuild" / "monitor_output.txt"
450
+
423
451
  def on_monitoring_started(self) -> None:
424
452
  """Hook called when monitoring phase starts."""
425
453
  pass
@@ -449,7 +477,7 @@ class BaseRequestHandler(ABC):
449
477
 
450
478
  def tail_output_file(self) -> None:
451
479
  """Tail the output file and print new lines."""
452
- output_file = self.project_dir / ".fbuild" / "monitor_output.txt"
480
+ output_file = self.get_output_file_path()
453
481
  if output_file.exists():
454
482
  try:
455
483
  with open(output_file, "r", encoding="utf-8", errors="replace") as f:
@@ -465,10 +493,10 @@ class BaseRequestHandler(ABC):
465
493
 
466
494
  def read_remaining_output(self) -> None:
467
495
  """Read any remaining output from output file."""
468
- if not self.monitoring_started:
496
+ if not self.monitoring_started and not self.build_output_started:
469
497
  return
470
498
 
471
- output_file = self.project_dir / ".fbuild" / "monitor_output.txt"
499
+ output_file = self.get_output_file_path()
472
500
  if output_file.exists():
473
501
  try:
474
502
  with open(output_file, "r", encoding="utf-8", errors="replace") as f:
@@ -550,18 +578,32 @@ class BaseRequestHandler(ABC):
550
578
  self.last_spinner_update = time.time()
551
579
  else:
552
580
  # Show spinner with elapsed time when in building/deploying state
581
+ # Only show spinner if we're not tailing build output
553
582
  if status.state in (DaemonState.BUILDING, DaemonState.DEPLOYING):
554
- current_time = time.time()
555
- # Update spinner every 100ms
556
- if current_time - self.last_spinner_update >= 0.1:
557
- self.spinner_idx += 1
558
- display_spinner_progress(status, elapsed, self.spinner_idx)
559
- self.last_spinner_update = current_time
583
+ if not (self.should_tail_output() and status.state == DaemonState.BUILDING and self.build_output_started):
584
+ current_time = time.time()
585
+ # Update spinner every 100ms
586
+ if current_time - self.last_spinner_update >= 0.1:
587
+ self.spinner_idx += 1
588
+ display_spinner_progress(status, elapsed, self.spinner_idx)
589
+ self.last_spinner_update = current_time
590
+
591
+ # Handle build output tailing phase
592
+ if self.should_tail_output() and status.state == DaemonState.BUILDING:
593
+ if not self.build_output_started:
594
+ self.build_output_started = True
595
+ # Clear spinner line before build output
596
+ print("\r" + " " * 80 + "\r", end="", flush=True)
597
+ print() # Blank line before build output
598
+ self.tail_output_file()
560
599
 
561
600
  # Handle monitoring phase
562
601
  if self.should_tail_output() and status.state == DaemonState.MONITORING:
563
602
  if not self.monitoring_started:
564
603
  self.monitoring_started = True
604
+ # Reset file position when transitioning from build to monitor
605
+ if self.build_output_started:
606
+ self.output_file_position = 0
565
607
  # Clear spinner line before monitor output
566
608
  print("\r" + " " * 80 + "\r", end="", flush=True)
567
609
  print() # Blank line before serial output
@@ -646,6 +688,14 @@ class BuildRequestHandler(BaseRequestHandler):
646
688
  """Get operation emoji."""
647
689
  return "🔨"
648
690
 
691
+ def should_tail_output(self) -> bool:
692
+ """Build operations should tail output."""
693
+ return True
694
+
695
+ def get_output_file_path(self) -> Path:
696
+ """Build output goes to build_output.txt."""
697
+ return self.project_dir / ".fbuild" / "build_output.txt"
698
+
649
699
  def print_submission_info(self) -> None:
650
700
  """Print build submission information."""
651
701
  super().print_submission_info()
@@ -726,7 +776,15 @@ class DeployRequestHandler(BaseRequestHandler):
726
776
 
727
777
  def should_tail_output(self) -> bool:
728
778
  """Check if output should be tailed."""
729
- return self.monitor_after
779
+ # Always tail during build phase, and during monitor if monitor_after is set
780
+ return True
781
+
782
+ def get_output_file_path(self) -> Path:
783
+ """During build phase, use build_output.txt; during monitor, use monitor_output.txt."""
784
+ status = read_status_file()
785
+ if status.state == DaemonState.BUILDING:
786
+ return self.project_dir / ".fbuild" / "build_output.txt"
787
+ return self.project_dir / ".fbuild" / "monitor_output.txt"
730
788
 
731
789
  def print_submission_info(self) -> None:
732
790
  """Print deploy submission information."""
@@ -1440,6 +1498,353 @@ def display_daemon_list() -> None:
1440
1498
  print()
1441
1499
 
1442
1500
 
1501
+ # ============================================================================
1502
+ # DEVICE MANAGEMENT FUNCTIONS
1503
+ # ============================================================================
1504
+
1505
+
1506
+ def list_devices(refresh: bool = False) -> list[dict[str, Any]] | None:
1507
+ """List all devices known to the daemon.
1508
+
1509
+ Args:
1510
+ refresh: Whether to refresh device discovery before listing.
1511
+
1512
+ Returns:
1513
+ List of device info dictionaries, or None if daemon not running.
1514
+ Each device dict contains:
1515
+ - device_id: Stable device identifier
1516
+ - port: Current port (may change)
1517
+ - is_connected: Whether device is currently connected
1518
+ - exclusive_holder: Client ID holding exclusive lease (or None)
1519
+ - monitor_count: Number of active monitor leases
1520
+ """
1521
+ if not is_daemon_running():
1522
+ return None
1523
+
1524
+ # For now, we use a signal file to communicate with the daemon
1525
+ # In the future, this should use the async TCP connection
1526
+ request_file = DAEMON_DIR / "device_list_request.json"
1527
+ response_file = DAEMON_DIR / "device_list_response.json"
1528
+
1529
+ # Clean up any old response file
1530
+ response_file.unlink(missing_ok=True)
1531
+
1532
+ # Write request
1533
+ request = {"refresh": refresh, "timestamp": time.time()}
1534
+ with open(request_file, "w") as f:
1535
+ json.dump(request, f)
1536
+
1537
+ # Wait for response (timeout 5 seconds)
1538
+ for _ in range(50):
1539
+ if response_file.exists():
1540
+ try:
1541
+ with open(response_file) as f:
1542
+ response = json.load(f)
1543
+ response_file.unlink(missing_ok=True)
1544
+ if response.get("success"):
1545
+ return response.get("devices", [])
1546
+ return []
1547
+ except (json.JSONDecodeError, OSError):
1548
+ pass
1549
+ time.sleep(0.1)
1550
+
1551
+ # Timeout - clean up
1552
+ request_file.unlink(missing_ok=True)
1553
+ return None
1554
+
1555
+
1556
+ def get_device_status(device_id: str) -> dict[str, Any] | None:
1557
+ """Get detailed status for a specific device.
1558
+
1559
+ Args:
1560
+ device_id: The device ID to query.
1561
+
1562
+ Returns:
1563
+ Device status dictionary, or None if device not found or daemon not running.
1564
+ """
1565
+ if not is_daemon_running():
1566
+ return None
1567
+
1568
+ request_file = DAEMON_DIR / "device_status_request.json"
1569
+ response_file = DAEMON_DIR / "device_status_response.json"
1570
+
1571
+ # Clean up any old response file
1572
+ response_file.unlink(missing_ok=True)
1573
+
1574
+ # Write request
1575
+ request = {"device_id": device_id, "timestamp": time.time()}
1576
+ with open(request_file, "w") as f:
1577
+ json.dump(request, f)
1578
+
1579
+ # Wait for response
1580
+ for _ in range(50):
1581
+ if response_file.exists():
1582
+ try:
1583
+ with open(response_file) as f:
1584
+ response = json.load(f)
1585
+ response_file.unlink(missing_ok=True)
1586
+ if response.get("success"):
1587
+ return response
1588
+ return None
1589
+ except (json.JSONDecodeError, OSError):
1590
+ pass
1591
+ time.sleep(0.1)
1592
+
1593
+ request_file.unlink(missing_ok=True)
1594
+ return None
1595
+
1596
+
1597
+ def acquire_device_lease(
1598
+ device_id: str,
1599
+ lease_type: str = "exclusive",
1600
+ description: str = "",
1601
+ ) -> dict[str, Any] | None:
1602
+ """Acquire a lease on a device.
1603
+
1604
+ Args:
1605
+ device_id: The device ID to lease.
1606
+ lease_type: Type of lease - "exclusive" or "monitor".
1607
+ description: Description of the operation.
1608
+
1609
+ Returns:
1610
+ Response dictionary with success status and lease_id, or None if failed.
1611
+ """
1612
+ if not is_daemon_running():
1613
+ return None
1614
+
1615
+ request_file = DAEMON_DIR / "device_lease_request.json"
1616
+ response_file = DAEMON_DIR / "device_lease_response.json"
1617
+
1618
+ response_file.unlink(missing_ok=True)
1619
+
1620
+ request = {
1621
+ "device_id": device_id,
1622
+ "lease_type": lease_type,
1623
+ "description": description,
1624
+ "timestamp": time.time(),
1625
+ }
1626
+ with open(request_file, "w") as f:
1627
+ json.dump(request, f)
1628
+
1629
+ for _ in range(50):
1630
+ if response_file.exists():
1631
+ try:
1632
+ with open(response_file) as f:
1633
+ response = json.load(f)
1634
+ response_file.unlink(missing_ok=True)
1635
+ return response
1636
+ except (json.JSONDecodeError, OSError):
1637
+ pass
1638
+ time.sleep(0.1)
1639
+
1640
+ request_file.unlink(missing_ok=True)
1641
+ return None
1642
+
1643
+
1644
+ def release_device_lease(device_id: str) -> dict[str, Any] | None:
1645
+ """Release a lease on a device.
1646
+
1647
+ Args:
1648
+ device_id: The device ID or lease ID to release.
1649
+
1650
+ Returns:
1651
+ Response dictionary with success status, or None if failed.
1652
+ """
1653
+ if not is_daemon_running():
1654
+ return None
1655
+
1656
+ request_file = DAEMON_DIR / "device_release_request.json"
1657
+ response_file = DAEMON_DIR / "device_release_response.json"
1658
+
1659
+ response_file.unlink(missing_ok=True)
1660
+
1661
+ request = {"device_id": device_id, "timestamp": time.time()}
1662
+ with open(request_file, "w") as f:
1663
+ json.dump(request, f)
1664
+
1665
+ for _ in range(50):
1666
+ if response_file.exists():
1667
+ try:
1668
+ with open(response_file) as f:
1669
+ response = json.load(f)
1670
+ response_file.unlink(missing_ok=True)
1671
+ return response
1672
+ except (json.JSONDecodeError, OSError):
1673
+ pass
1674
+ time.sleep(0.1)
1675
+
1676
+ request_file.unlink(missing_ok=True)
1677
+ return None
1678
+
1679
+
1680
+ def preempt_device(device_id: str, reason: str) -> dict[str, Any] | None:
1681
+ """Preempt a device from its current holder.
1682
+
1683
+ Args:
1684
+ device_id: The device ID to preempt.
1685
+ reason: Reason for preemption (required).
1686
+
1687
+ Returns:
1688
+ Response dictionary with success status and preempted_client_id, or None if failed.
1689
+ """
1690
+ if not is_daemon_running():
1691
+ return None
1692
+
1693
+ if not reason:
1694
+ return {"success": False, "message": "Reason is required for preemption"}
1695
+
1696
+ request_file = DAEMON_DIR / "device_preempt_request.json"
1697
+ response_file = DAEMON_DIR / "device_preempt_response.json"
1698
+
1699
+ response_file.unlink(missing_ok=True)
1700
+
1701
+ request = {"device_id": device_id, "reason": reason, "timestamp": time.time()}
1702
+ with open(request_file, "w") as f:
1703
+ json.dump(request, f)
1704
+
1705
+ for _ in range(50):
1706
+ if response_file.exists():
1707
+ try:
1708
+ with open(response_file) as f:
1709
+ response = json.load(f)
1710
+ response_file.unlink(missing_ok=True)
1711
+ return response
1712
+ except (json.JSONDecodeError, OSError):
1713
+ pass
1714
+ time.sleep(0.1)
1715
+
1716
+ request_file.unlink(missing_ok=True)
1717
+ return None
1718
+
1719
+
1720
+ def tail_daemon_logs(follow: bool = True, lines: int = 50) -> None:
1721
+ """Tail the daemon log file.
1722
+
1723
+ This function streams the daemon's log output, allowing users to see
1724
+ what the daemon is doing in real-time without affecting its operation.
1725
+
1726
+ Per TASK.md: `fbuild show daemon` should attach to daemon log stream
1727
+ and tail it, with exit NOT stopping the daemon.
1728
+
1729
+ Args:
1730
+ follow: If True, continuously follow the log file (like tail -f).
1731
+ If False, just print the last N lines and exit.
1732
+ lines: Number of lines to show initially (default: 50).
1733
+ """
1734
+ log_file = DAEMON_DIR / "daemon.log"
1735
+
1736
+ if not log_file.exists():
1737
+ print("❌ Daemon log file not found")
1738
+ print(f" Expected at: {log_file}")
1739
+ print(" Hint: Start the daemon first with 'fbuild build <project>'")
1740
+ return
1741
+
1742
+ print(f"📋 Tailing daemon log: {log_file}")
1743
+ if follow:
1744
+ print(" (Press Ctrl-C to stop viewing - daemon will continue running)\n")
1745
+ print("=" * 60)
1746
+
1747
+ try:
1748
+ with open(log_file, "r", encoding="utf-8", errors="replace") as f:
1749
+ # Read initial lines
1750
+ all_lines = f.readlines()
1751
+
1752
+ # Show last N lines
1753
+ if len(all_lines) > lines:
1754
+ print(f"... (showing last {lines} lines) ...\n")
1755
+ for line in all_lines[-lines:]:
1756
+ print(line, end="")
1757
+ else:
1758
+ for line in all_lines:
1759
+ print(line, end="")
1760
+
1761
+ if not follow:
1762
+ return
1763
+
1764
+ # Follow mode - continuously read new content
1765
+ while True:
1766
+ line = f.readline()
1767
+ if line:
1768
+ print(line, end="", flush=True)
1769
+ else:
1770
+ # No new content - sleep briefly
1771
+ time.sleep(0.1)
1772
+
1773
+ except KeyboardInterrupt:
1774
+ import _thread
1775
+
1776
+ _thread.interrupt_main()
1777
+ print("\n\n" + "=" * 60)
1778
+ print("✅ Stopped viewing logs (daemon continues running)")
1779
+ print(" Use 'fbuild daemon status' to check daemon status")
1780
+ print(" Use 'fbuild daemon stop' to stop the daemon")
1781
+
1782
+
1783
+ def get_daemon_log_path() -> Path:
1784
+ """Get the path to the daemon log file.
1785
+
1786
+ Returns:
1787
+ Path to daemon.log file
1788
+ """
1789
+ return DAEMON_DIR / "daemon.log"
1790
+
1791
+
1792
+ def display_daemon_stats_compact() -> None:
1793
+ """Display daemon stats in a compact single-line format.
1794
+
1795
+ This function is called immediately when the client starts to show
1796
+ the current daemon status. It's designed to be non-intrusive.
1797
+ """
1798
+ if not is_daemon_running():
1799
+ print("🔴 Daemon: not running")
1800
+ return
1801
+
1802
+ status = read_status_file()
1803
+
1804
+ # Calculate uptime if daemon_started_at is available
1805
+ uptime_str = ""
1806
+ if status.daemon_started_at:
1807
+ uptime = time.time() - status.daemon_started_at
1808
+ if uptime < 60:
1809
+ uptime_str = f"{uptime:.0f}s"
1810
+ elif uptime < 3600:
1811
+ uptime_str = f"{uptime / 60:.0f}m"
1812
+ else:
1813
+ uptime_str = f"{uptime / 3600:.1f}h"
1814
+
1815
+ # Build the status line
1816
+ pid_str = f"PID {status.daemon_pid}" if status.daemon_pid else ""
1817
+ state_emoji = {
1818
+ DaemonState.IDLE: "🟢",
1819
+ DaemonState.BUILDING: "🔨",
1820
+ DaemonState.DEPLOYING: "📦",
1821
+ DaemonState.MONITORING: "👁️",
1822
+ DaemonState.COMPLETED: "✅",
1823
+ DaemonState.FAILED: "❌",
1824
+ DaemonState.UNKNOWN: "❓",
1825
+ }.get(status.state, "❓")
1826
+
1827
+ # Count active locks
1828
+ port_locks = status.locks.get("port_locks", {}) if status.locks else {}
1829
+ active_port_locks = sum(1 for info in port_locks.values() if isinstance(info, dict) and info.get("is_held"))
1830
+
1831
+ # Build compact stats line
1832
+ parts = [f"{state_emoji} Daemon: {status.state.value}"]
1833
+ if pid_str:
1834
+ parts.append(pid_str)
1835
+ if uptime_str:
1836
+ parts.append(f"up {uptime_str}")
1837
+ if active_port_locks > 0:
1838
+ parts.append(f"locks: {active_port_locks}")
1839
+ if status.operation_in_progress:
1840
+ op_info = status.current_operation or status.message
1841
+ if op_info and len(op_info) > 30:
1842
+ op_info = op_info[:27] + "..."
1843
+ parts.append(f"[{op_info}]")
1844
+
1845
+ print(" | ".join(parts))
1846
+
1847
+
1443
1848
  def main() -> int:
1444
1849
  """Command-line interface for client."""
1445
1850
  import argparse
@@ -1453,6 +1858,9 @@ def main() -> int:
1453
1858
  parser.add_argument("--kill", type=int, metavar="PID", help="Kill specific daemon by PID")
1454
1859
  parser.add_argument("--kill-all", action="store_true", help="Kill all daemon instances")
1455
1860
  parser.add_argument("--force", action="store_true", help="Force kill (with --kill or --kill-all)")
1861
+ parser.add_argument("--tail", action="store_true", help="Tail daemon logs")
1862
+ parser.add_argument("--no-follow", action="store_true", help="Don't follow log file (with --tail)")
1863
+ parser.add_argument("--lines", type=int, default=50, help="Number of lines to show initially (with --tail)")
1456
1864
 
1457
1865
  args = parser.parse_args()
1458
1866
 
@@ -1493,6 +1901,10 @@ def main() -> int:
1493
1901
  print(f"Killed {killed} daemon instance(s)")
1494
1902
  return 0
1495
1903
 
1904
+ if args.tail:
1905
+ tail_daemon_logs(follow=not args.no_follow, lines=args.lines)
1906
+ return 0
1907
+
1496
1908
  parser.print_help()
1497
1909
  return 1
1498
1910
 
@@ -313,7 +313,7 @@ class ConfigurationLockManager:
313
313
 
314
314
  # If non-blocking, return False
315
315
  if timeout <= 0:
316
- logging.debug(f"Exclusive lock not available for {config_key}, " f"current state: {lock.state.value}")
316
+ logging.debug(f"Exclusive lock not available for {config_key}, current state: {lock.state.value}")
317
317
  return False
318
318
 
319
319
  # Add to waiting queue
@@ -322,7 +322,7 @@ class ConfigurationLockManager:
322
322
  description=description,
323
323
  )
324
324
  lock.waiting_queue.append(waiting_request)
325
- logging.debug(f"Client {client_id} added to waiting queue for {config_key}, " f"position: {len(lock.waiting_queue)}")
325
+ logging.debug(f"Client {client_id} added to waiting queue for {config_key}, position: {len(lock.waiting_queue)}")
326
326
 
327
327
  # Wait outside the master lock to avoid blocking other operations
328
328
  if waiting_request:
@@ -343,7 +343,7 @@ class ConfigurationLockManager:
343
343
  pass # Already removed
344
344
 
345
345
  if not signaled:
346
- logging.debug(f"Timeout waiting for exclusive lock on {config_key} " f"for client {client_id}")
346
+ logging.debug(f"Timeout waiting for exclusive lock on {config_key} for client {client_id}")
347
347
  return False
348
348
 
349
349
  # Try to acquire now that we've been signaled
@@ -356,7 +356,7 @@ class ConfigurationLockManager:
356
356
  )
357
357
  lock.last_activity_at = time.time()
358
358
  self._track_client_lock(client_id, config_key)
359
- logging.debug(f"Exclusive lock acquired (after wait) for {config_key} " f"by {client_id}")
359
+ logging.debug(f"Exclusive lock acquired (after wait) for {config_key} by {client_id}")
360
360
  return True
361
361
  else:
362
362
  # Lock was taken by someone else
@@ -395,13 +395,13 @@ class ConfigurationLockManager:
395
395
 
396
396
  # Cannot acquire if exclusive lock is held
397
397
  if lock.state == LockState.LOCKED_EXCLUSIVE:
398
- logging.debug(f"Shared read lock not available for {config_key}, " f"exclusive lock held by {lock.exclusive_holder.client_id if lock.exclusive_holder else 'unknown'}")
398
+ logging.debug(f"Shared read lock not available for {config_key}, exclusive lock held by {lock.exclusive_holder.client_id if lock.exclusive_holder else 'unknown'}")
399
399
  return False
400
400
 
401
401
  # Cannot acquire if there are clients waiting for exclusive lock
402
402
  # (to prevent starvation of exclusive lock requests)
403
403
  if lock.waiting_queue:
404
- logging.debug(f"Shared read lock not available for {config_key}, " f"{len(lock.waiting_queue)} clients waiting for exclusive lock")
404
+ logging.debug(f"Shared read lock not available for {config_key}, {len(lock.waiting_queue)} clients waiting for exclusive lock")
405
405
  return False
406
406
 
407
407
  # Acquire shared read lock
@@ -413,7 +413,7 @@ class ConfigurationLockManager:
413
413
  )
414
414
  lock.last_activity_at = time.time()
415
415
  self._track_client_lock(client_id, config_key)
416
- logging.debug(f"Shared read lock acquired for {config_key} by {client_id}, " f"total shared holders: {len(lock.shared_holders)}")
416
+ logging.debug(f"Shared read lock acquired for {config_key} by {client_id}, total shared holders: {len(lock.shared_holders)}")
417
417
  return True
418
418
 
419
419
  def release(self, config_key: tuple[str, str, str], client_id: str) -> bool:
@@ -459,7 +459,7 @@ class ConfigurationLockManager:
459
459
  # Grant to next waiting client if any
460
460
  self._grant_next_waiting(lock)
461
461
 
462
- logging.debug(f"Shared read lock released for {config_key} by {client_id}, " f"remaining shared holders: {len(lock.shared_holders)}")
462
+ logging.debug(f"Shared read lock released for {config_key} by {client_id}, remaining shared holders: {len(lock.shared_holders)}")
463
463
  return True
464
464
 
465
465
  logging.debug(f"Client {client_id} does not hold a lock for {config_key}")
@@ -625,7 +625,7 @@ class ConfigurationLockManager:
625
625
 
626
626
  # Need to wait for other shared holders to release
627
627
  if timeout <= 0:
628
- logging.debug(f"Cannot upgrade lock for {config_key}, " f"{len(lock.shared_holders) - 1} other shared holders")
628
+ logging.debug(f"Cannot upgrade lock for {config_key}, {len(lock.shared_holders) - 1} other shared holders")
629
629
  return False
630
630
 
631
631
  # Release our shared lock and join waiting queue with priority
@@ -661,7 +661,7 @@ class ConfigurationLockManager:
661
661
  )
662
662
  lock_check.last_activity_at = time.time()
663
663
  # Don't re-track since we kept the tracking from shared
664
- logging.debug(f"Lock upgraded (immediate) to exclusive for {config_key} " f"by {client_id}")
664
+ logging.debug(f"Lock upgraded (immediate) to exclusive for {config_key} by {client_id}")
665
665
  return True
666
666
 
667
667
  remaining_timeout = timeout - (time.time() - start_time)
@@ -682,7 +682,7 @@ class ConfigurationLockManager:
682
682
  pass
683
683
 
684
684
  if not signaled:
685
- logging.debug(f"Timeout waiting for upgrade on {config_key} " f"for client {client_id}")
685
+ logging.debug(f"Timeout waiting for upgrade on {config_key} for client {client_id}")
686
686
  # Re-acquire shared lock
687
687
  lock_wait.shared_holders[client_id] = LockHolder(
688
688
  client_id=client_id,
@@ -702,7 +702,7 @@ class ConfigurationLockManager:
702
702
  lock_type="exclusive",
703
703
  )
704
704
  lock_wait.last_activity_at = time.time()
705
- logging.debug(f"Lock upgraded (after wait) to exclusive for {config_key} " f"by {client_id}")
705
+ logging.debug(f"Lock upgraded (after wait) to exclusive for {config_key} by {client_id}")
706
706
  return True
707
707
  else:
708
708
  # Someone else got the lock
@@ -749,7 +749,7 @@ class ConfigurationLockManager:
749
749
  # If there are clients waiting for exclusive, don't downgrade
750
750
  # (they should get the exclusive lock next)
751
751
  if lock.waiting_queue:
752
- logging.debug(f"Cannot downgrade lock for {config_key}, " f"{len(lock.waiting_queue)} clients waiting for exclusive")
752
+ logging.debug(f"Cannot downgrade lock for {config_key}, {len(lock.waiting_queue)} clients waiting for exclusive")
753
753
  return False
754
754
 
755
755
  # Downgrade to shared read