crackerjack 0.31.10__py3-none-any.whl → 0.31.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

Files changed (155) hide show
  1. crackerjack/CLAUDE.md +288 -705
  2. crackerjack/__main__.py +22 -8
  3. crackerjack/agents/__init__.py +0 -3
  4. crackerjack/agents/architect_agent.py +0 -43
  5. crackerjack/agents/base.py +1 -9
  6. crackerjack/agents/coordinator.py +2 -148
  7. crackerjack/agents/documentation_agent.py +109 -81
  8. crackerjack/agents/dry_agent.py +122 -97
  9. crackerjack/agents/formatting_agent.py +3 -16
  10. crackerjack/agents/import_optimization_agent.py +1174 -130
  11. crackerjack/agents/performance_agent.py +956 -188
  12. crackerjack/agents/performance_helpers.py +229 -0
  13. crackerjack/agents/proactive_agent.py +1 -48
  14. crackerjack/agents/refactoring_agent.py +516 -246
  15. crackerjack/agents/refactoring_helpers.py +282 -0
  16. crackerjack/agents/security_agent.py +393 -90
  17. crackerjack/agents/test_creation_agent.py +1776 -120
  18. crackerjack/agents/test_specialist_agent.py +59 -15
  19. crackerjack/agents/tracker.py +0 -102
  20. crackerjack/api.py +145 -37
  21. crackerjack/cli/handlers.py +48 -30
  22. crackerjack/cli/interactive.py +11 -11
  23. crackerjack/cli/options.py +66 -4
  24. crackerjack/code_cleaner.py +808 -148
  25. crackerjack/config/global_lock_config.py +110 -0
  26. crackerjack/config/hooks.py +43 -64
  27. crackerjack/core/async_workflow_orchestrator.py +247 -97
  28. crackerjack/core/autofix_coordinator.py +192 -109
  29. crackerjack/core/enhanced_container.py +46 -63
  30. crackerjack/core/file_lifecycle.py +549 -0
  31. crackerjack/core/performance.py +9 -8
  32. crackerjack/core/performance_monitor.py +395 -0
  33. crackerjack/core/phase_coordinator.py +281 -94
  34. crackerjack/core/proactive_workflow.py +9 -58
  35. crackerjack/core/resource_manager.py +501 -0
  36. crackerjack/core/service_watchdog.py +490 -0
  37. crackerjack/core/session_coordinator.py +4 -8
  38. crackerjack/core/timeout_manager.py +504 -0
  39. crackerjack/core/websocket_lifecycle.py +475 -0
  40. crackerjack/core/workflow_orchestrator.py +343 -209
  41. crackerjack/dynamic_config.py +47 -6
  42. crackerjack/errors.py +3 -4
  43. crackerjack/executors/async_hook_executor.py +63 -13
  44. crackerjack/executors/cached_hook_executor.py +14 -14
  45. crackerjack/executors/hook_executor.py +100 -37
  46. crackerjack/executors/hook_lock_manager.py +856 -0
  47. crackerjack/executors/individual_hook_executor.py +120 -86
  48. crackerjack/intelligence/__init__.py +0 -7
  49. crackerjack/intelligence/adaptive_learning.py +13 -86
  50. crackerjack/intelligence/agent_orchestrator.py +15 -78
  51. crackerjack/intelligence/agent_registry.py +12 -59
  52. crackerjack/intelligence/agent_selector.py +31 -92
  53. crackerjack/intelligence/integration.py +1 -41
  54. crackerjack/interactive.py +9 -9
  55. crackerjack/managers/async_hook_manager.py +25 -8
  56. crackerjack/managers/hook_manager.py +9 -9
  57. crackerjack/managers/publish_manager.py +57 -59
  58. crackerjack/managers/test_command_builder.py +6 -36
  59. crackerjack/managers/test_executor.py +9 -61
  60. crackerjack/managers/test_manager.py +17 -63
  61. crackerjack/managers/test_manager_backup.py +77 -127
  62. crackerjack/managers/test_progress.py +4 -23
  63. crackerjack/mcp/cache.py +5 -12
  64. crackerjack/mcp/client_runner.py +10 -10
  65. crackerjack/mcp/context.py +64 -6
  66. crackerjack/mcp/dashboard.py +14 -11
  67. crackerjack/mcp/enhanced_progress_monitor.py +55 -55
  68. crackerjack/mcp/file_monitor.py +72 -42
  69. crackerjack/mcp/progress_components.py +103 -84
  70. crackerjack/mcp/progress_monitor.py +122 -49
  71. crackerjack/mcp/rate_limiter.py +12 -12
  72. crackerjack/mcp/server_core.py +16 -22
  73. crackerjack/mcp/service_watchdog.py +26 -26
  74. crackerjack/mcp/state.py +15 -0
  75. crackerjack/mcp/tools/core_tools.py +95 -39
  76. crackerjack/mcp/tools/error_analyzer.py +6 -32
  77. crackerjack/mcp/tools/execution_tools.py +1 -56
  78. crackerjack/mcp/tools/execution_tools_backup.py +35 -131
  79. crackerjack/mcp/tools/intelligence_tool_registry.py +0 -36
  80. crackerjack/mcp/tools/intelligence_tools.py +2 -55
  81. crackerjack/mcp/tools/monitoring_tools.py +308 -145
  82. crackerjack/mcp/tools/proactive_tools.py +12 -42
  83. crackerjack/mcp/tools/progress_tools.py +23 -15
  84. crackerjack/mcp/tools/utility_tools.py +3 -40
  85. crackerjack/mcp/tools/workflow_executor.py +40 -60
  86. crackerjack/mcp/websocket/app.py +0 -3
  87. crackerjack/mcp/websocket/endpoints.py +206 -268
  88. crackerjack/mcp/websocket/jobs.py +213 -66
  89. crackerjack/mcp/websocket/server.py +84 -6
  90. crackerjack/mcp/websocket/websocket_handler.py +137 -29
  91. crackerjack/models/config_adapter.py +3 -16
  92. crackerjack/models/protocols.py +162 -3
  93. crackerjack/models/resource_protocols.py +454 -0
  94. crackerjack/models/task.py +3 -3
  95. crackerjack/monitoring/__init__.py +0 -0
  96. crackerjack/monitoring/ai_agent_watchdog.py +25 -71
  97. crackerjack/monitoring/regression_prevention.py +28 -87
  98. crackerjack/orchestration/advanced_orchestrator.py +44 -78
  99. crackerjack/orchestration/coverage_improvement.py +10 -60
  100. crackerjack/orchestration/execution_strategies.py +16 -16
  101. crackerjack/orchestration/test_progress_streamer.py +61 -53
  102. crackerjack/plugins/base.py +1 -1
  103. crackerjack/plugins/managers.py +22 -20
  104. crackerjack/py313.py +65 -21
  105. crackerjack/services/backup_service.py +467 -0
  106. crackerjack/services/bounded_status_operations.py +627 -0
  107. crackerjack/services/cache.py +7 -9
  108. crackerjack/services/config.py +35 -52
  109. crackerjack/services/config_integrity.py +5 -16
  110. crackerjack/services/config_merge.py +542 -0
  111. crackerjack/services/contextual_ai_assistant.py +17 -19
  112. crackerjack/services/coverage_ratchet.py +44 -73
  113. crackerjack/services/debug.py +25 -39
  114. crackerjack/services/dependency_monitor.py +52 -50
  115. crackerjack/services/enhanced_filesystem.py +14 -11
  116. crackerjack/services/file_hasher.py +1 -1
  117. crackerjack/services/filesystem.py +1 -12
  118. crackerjack/services/git.py +71 -47
  119. crackerjack/services/health_metrics.py +31 -27
  120. crackerjack/services/initialization.py +276 -428
  121. crackerjack/services/input_validator.py +760 -0
  122. crackerjack/services/log_manager.py +16 -16
  123. crackerjack/services/logging.py +7 -6
  124. crackerjack/services/metrics.py +43 -43
  125. crackerjack/services/pattern_cache.py +2 -31
  126. crackerjack/services/pattern_detector.py +26 -63
  127. crackerjack/services/performance_benchmarks.py +20 -45
  128. crackerjack/services/regex_patterns.py +2887 -0
  129. crackerjack/services/regex_utils.py +537 -0
  130. crackerjack/services/secure_path_utils.py +683 -0
  131. crackerjack/services/secure_status_formatter.py +534 -0
  132. crackerjack/services/secure_subprocess.py +605 -0
  133. crackerjack/services/security.py +47 -10
  134. crackerjack/services/security_logger.py +492 -0
  135. crackerjack/services/server_manager.py +109 -50
  136. crackerjack/services/smart_scheduling.py +8 -25
  137. crackerjack/services/status_authentication.py +603 -0
  138. crackerjack/services/status_security_manager.py +442 -0
  139. crackerjack/services/thread_safe_status_collector.py +546 -0
  140. crackerjack/services/tool_version_service.py +1 -23
  141. crackerjack/services/unified_config.py +36 -58
  142. crackerjack/services/validation_rate_limiter.py +269 -0
  143. crackerjack/services/version_checker.py +9 -40
  144. crackerjack/services/websocket_resource_limiter.py +572 -0
  145. crackerjack/slash_commands/__init__.py +52 -2
  146. crackerjack/tools/__init__.py +0 -0
  147. crackerjack/tools/validate_input_validator_patterns.py +262 -0
  148. crackerjack/tools/validate_regex_patterns.py +198 -0
  149. {crackerjack-0.31.10.dist-info → crackerjack-0.31.12.dist-info}/METADATA +197 -12
  150. crackerjack-0.31.12.dist-info/RECORD +178 -0
  151. crackerjack/cli/facade.py +0 -104
  152. crackerjack-0.31.10.dist-info/RECORD +0 -149
  153. {crackerjack-0.31.10.dist-info → crackerjack-0.31.12.dist-info}/WHEEL +0 -0
  154. {crackerjack-0.31.10.dist-info → crackerjack-0.31.12.dist-info}/entry_points.txt +0 -0
  155. {crackerjack-0.31.10.dist-info → crackerjack-0.31.12.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,546 @@
1
+ """
2
+ Thread-safe status collection to prevent race conditions.
3
+
4
+ Provides synchronized status data collection with proper locking,
5
+ atomic operations, and consistency guarantees.
6
+ """
7
+
8
+ import asyncio
9
+ import json
10
+ import threading
11
+ import time
12
+ import typing as t
13
+ from contextlib import asynccontextmanager
14
+ from dataclasses import dataclass, field
15
+
16
+ from .security_logger import SecurityEventLevel, SecurityEventType, get_security_logger
17
+
18
+
19
+ @dataclass
20
+ class StatusSnapshot:
21
+ """Thread-safe snapshot of system status."""
22
+
23
+ services: dict[str, t.Any] = field(default_factory=dict)
24
+ jobs: dict[str, t.Any] = field(default_factory=dict)
25
+ server_stats: dict[str, t.Any] = field(default_factory=dict)
26
+ agent_suggestions: dict[str, t.Any] = field(default_factory=dict)
27
+ timestamp: float = field(default_factory=time.time)
28
+ collection_duration: float = 0.0
29
+ is_complete: bool = False
30
+ errors: list[str] = field(default_factory=list)
31
+
32
+
33
+ class ThreadSafeStatusCollector:
34
+ """
35
+ Thread-safe status collector with race condition prevention.
36
+
37
+ Features:
38
+ - Thread-safe data collection and aggregation
39
+ - Atomic status updates with consistency guarantees
40
+ - Deadlock prevention with ordered locking
41
+ - Timeout protection for all operations
42
+ - Error isolation and recovery
43
+ """
44
+
45
+ def __init__(self, timeout: float = 30.0):
46
+ """
47
+ Initialize thread-safe status collector.
48
+
49
+ Args:
50
+ timeout: Maximum time for status collection operations
51
+ """
52
+ self.timeout = timeout
53
+ self.security_logger = get_security_logger()
54
+
55
+ # Thread safety primitives with ordered locking to prevent deadlocks
56
+ self._collection_lock = threading.RLock() # Main collection lock
57
+ self._data_lock = threading.RLock() # Data structure lock
58
+ self._file_lock = threading.RLock() # File operation lock
59
+
60
+ # Status collection state
61
+ self._current_snapshot: StatusSnapshot | None = None
62
+ self._collection_in_progress = False
63
+ self._collection_start_time = 0.0
64
+
65
+ # Cached data with expiration
66
+ self._cache: dict[str, t.Any] = {}
67
+ self._cache_timestamps: dict[str, float] = {}
68
+ self._cache_ttl = 5.0 # 5 second cache TTL
69
+
70
+ # Thread-local storage for per-thread state
71
+ self._local = threading.local()
72
+
73
+ async def collect_comprehensive_status(
74
+ self,
75
+ client_id: str = "unknown",
76
+ include_jobs: bool = True,
77
+ include_services: bool = True,
78
+ include_stats: bool = True,
79
+ ) -> StatusSnapshot:
80
+ """
81
+ Collect comprehensive system status with thread safety.
82
+
83
+ Args:
84
+ client_id: Client identifier for logging
85
+ include_jobs: Include job information
86
+ include_services: Include service information
87
+ include_stats: Include server statistics
88
+
89
+ Returns:
90
+ StatusSnapshot with collected data
91
+
92
+ Raises:
93
+ TimeoutError: If collection takes too long
94
+ RuntimeError: If collection fails due to concurrency issues
95
+ """
96
+
97
+ # Use async context manager for proper cleanup
98
+ async with self._collection_context(client_id):
99
+ start_time = time.time()
100
+
101
+ try:
102
+ # Create new snapshot
103
+ snapshot = StatusSnapshot(timestamp=start_time)
104
+
105
+ # Collect data components in parallel with timeouts
106
+ collection_tasks = []
107
+
108
+ if include_services:
109
+ collection_tasks.append(
110
+ self._collect_services_data(client_id, snapshot)
111
+ )
112
+
113
+ if include_jobs:
114
+ collection_tasks.append(
115
+ self._collect_jobs_data(client_id, snapshot)
116
+ )
117
+
118
+ if include_stats:
119
+ collection_tasks.append(
120
+ self._collect_server_stats(client_id, snapshot)
121
+ )
122
+
123
+ # Wait for all collections to complete with timeout
124
+ await asyncio.wait_for(
125
+ asyncio.gather(*collection_tasks, return_exceptions=True),
126
+ timeout=self.timeout,
127
+ )
128
+
129
+ # Finalize snapshot
130
+ snapshot.collection_duration = time.time() - start_time
131
+ snapshot.is_complete = True
132
+
133
+ # Log successful collection
134
+ self.security_logger.log_security_event(
135
+ event_type=SecurityEventType.STATUS_COLLECTED,
136
+ level=SecurityEventLevel.INFO,
137
+ message=f"Status collection completed in {snapshot.collection_duration:.2f}s",
138
+ client_id=client_id,
139
+ operation="collect_status",
140
+ additional_data={
141
+ "components": {
142
+ "services": include_services,
143
+ "jobs": include_jobs,
144
+ "stats": include_stats,
145
+ },
146
+ "duration": snapshot.collection_duration,
147
+ },
148
+ )
149
+
150
+ return snapshot
151
+
152
+ except TimeoutError:
153
+ self.security_logger.log_security_event(
154
+ event_type=SecurityEventType.REQUEST_TIMEOUT,
155
+ level=SecurityEventLevel.ERROR,
156
+ message=f"Status collection timeout after {self.timeout}s",
157
+ client_id=client_id,
158
+ operation="collect_status",
159
+ )
160
+ raise TimeoutError(f"Status collection timed out after {self.timeout}s")
161
+
162
+ except Exception as e:
163
+ self.security_logger.log_security_event(
164
+ event_type=SecurityEventType.COLLECTION_ERROR,
165
+ level=SecurityEventLevel.ERROR,
166
+ message=f"Status collection failed: {e}",
167
+ client_id=client_id,
168
+ operation="collect_status",
169
+ additional_data={"error": str(e)},
170
+ )
171
+
172
+ # Return partial snapshot with error information
173
+ snapshot = StatusSnapshot(
174
+ timestamp=start_time,
175
+ collection_duration=time.time() - start_time,
176
+ errors=[str(e)],
177
+ is_complete=False,
178
+ )
179
+
180
+ return snapshot
181
+
182
+ @asynccontextmanager
183
+ async def _collection_context(self, client_id: str):
184
+ """Context manager for status collection with proper cleanup."""
185
+
186
+ # Acquire collection lock with timeout
187
+ collection_acquired = False
188
+ start_wait = time.time()
189
+
190
+ try:
191
+ # Try to acquire collection lock
192
+ while time.time() - start_wait < 5.0: # 5 second wait limit
193
+ with self._collection_lock:
194
+ if not self._collection_in_progress:
195
+ self._collection_in_progress = True
196
+ self._collection_start_time = time.time()
197
+ collection_acquired = True
198
+ break
199
+
200
+ await asyncio.sleep(0.1)
201
+
202
+ if not collection_acquired:
203
+ raise RuntimeError("Unable to acquire collection lock - system busy")
204
+
205
+ self.security_logger.log_security_event(
206
+ event_type=SecurityEventType.COLLECTION_START,
207
+ level=SecurityEventLevel.INFO,
208
+ message="Status collection started",
209
+ client_id=client_id,
210
+ operation="collect_status",
211
+ )
212
+
213
+ yield
214
+
215
+ finally:
216
+ # Always release the collection lock
217
+ if collection_acquired:
218
+ with self._collection_lock:
219
+ self._collection_in_progress = False
220
+ self._collection_start_time = 0.0
221
+
222
+ self.security_logger.log_security_event(
223
+ event_type=SecurityEventType.COLLECTION_END,
224
+ level=SecurityEventLevel.INFO,
225
+ message="Status collection ended",
226
+ client_id=client_id,
227
+ operation="collect_status",
228
+ )
229
+
230
+ async def _collect_services_data(
231
+ self,
232
+ client_id: str,
233
+ snapshot: StatusSnapshot,
234
+ ) -> None:
235
+ """Collect services data with thread safety."""
236
+
237
+ try:
238
+ # Check cache first
239
+ cached_data = self._get_cached_data("services")
240
+ if cached_data is not None:
241
+ with self._data_lock:
242
+ snapshot.services = cached_data
243
+ return
244
+
245
+ # Import here to avoid circular dependencies
246
+ from crackerjack.services.server_manager import (
247
+ find_mcp_server_processes,
248
+ find_websocket_server_processes,
249
+ )
250
+
251
+ # Collect process data with timeout
252
+ mcp_task = asyncio.create_task(asyncio.to_thread(find_mcp_server_processes))
253
+ websocket_task = asyncio.create_task(
254
+ asyncio.to_thread(find_websocket_server_processes)
255
+ )
256
+
257
+ mcp_processes, websocket_processes = await asyncio.wait_for(
258
+ asyncio.gather(mcp_task, websocket_task),
259
+ timeout=10.0,
260
+ )
261
+
262
+ services_data = {
263
+ "mcp_server": {
264
+ "running": len(mcp_processes) > 0,
265
+ "processes": mcp_processes,
266
+ },
267
+ "websocket_server": {
268
+ "running": len(websocket_processes) > 0,
269
+ "processes": websocket_processes,
270
+ },
271
+ }
272
+
273
+ # Atomically update snapshot and cache
274
+ with self._data_lock:
275
+ snapshot.services = services_data
276
+ self._set_cached_data("services", services_data)
277
+
278
+ except Exception as e:
279
+ error_msg = f"Failed to collect services data: {e}"
280
+ with self._data_lock:
281
+ snapshot.errors.append(error_msg)
282
+ snapshot.services = {"error": error_msg}
283
+
284
+ async def _collect_jobs_data(
285
+ self,
286
+ client_id: str,
287
+ snapshot: StatusSnapshot,
288
+ ) -> None:
289
+ """Collect jobs data with thread safety."""
290
+
291
+ try:
292
+ # Check cache first
293
+ cached_data = self._get_cached_data("jobs")
294
+ if cached_data is not None:
295
+ with self._data_lock:
296
+ snapshot.jobs = cached_data
297
+ return
298
+
299
+ # Get active jobs with file locking
300
+ active_jobs = await self._get_active_jobs_safe()
301
+
302
+ jobs_data = {
303
+ "active_count": len(
304
+ [j for j in active_jobs if j["status"] == "running"]
305
+ ),
306
+ "completed_count": len(
307
+ [j for j in active_jobs if j["status"] == "completed"]
308
+ ),
309
+ "failed_count": len(
310
+ [j for j in active_jobs if j["status"] == "failed"]
311
+ ),
312
+ "details": active_jobs,
313
+ }
314
+
315
+ # Atomically update snapshot and cache
316
+ with self._data_lock:
317
+ snapshot.jobs = jobs_data
318
+ self._set_cached_data("jobs", jobs_data)
319
+
320
+ except Exception as e:
321
+ error_msg = f"Failed to collect jobs data: {e}"
322
+ with self._data_lock:
323
+ snapshot.errors.append(error_msg)
324
+ snapshot.jobs = {"error": error_msg}
325
+
326
+ async def _collect_server_stats(
327
+ self,
328
+ client_id: str,
329
+ snapshot: StatusSnapshot,
330
+ ) -> None:
331
+ """Collect server statistics with thread safety."""
332
+
333
+ try:
334
+ # Get context safely
335
+ from crackerjack.mcp.context import get_context
336
+
337
+ try:
338
+ context = get_context()
339
+ except RuntimeError:
340
+ context = None
341
+
342
+ if not context:
343
+ with self._data_lock:
344
+ snapshot.server_stats = {"error": "Server context not available"}
345
+ return
346
+
347
+ # Build stats with timeout protection
348
+ stats_task = asyncio.create_task(
349
+ asyncio.to_thread(self._build_server_stats_safe, context)
350
+ )
351
+
352
+ server_stats = await asyncio.wait_for(stats_task, timeout=5.0)
353
+
354
+ # Atomically update snapshot
355
+ with self._data_lock:
356
+ snapshot.server_stats = server_stats
357
+
358
+ except Exception as e:
359
+ error_msg = f"Failed to collect server stats: {e}"
360
+ with self._data_lock:
361
+ snapshot.errors.append(error_msg)
362
+ snapshot.server_stats = {"error": error_msg}
363
+
364
+ async def _get_active_jobs_safe(self) -> list[dict[str, t.Any]]:
365
+ """Get active jobs with file system synchronization."""
366
+
367
+ jobs = []
368
+
369
+ # Use file lock to prevent race conditions during file reading
370
+ with self._file_lock:
371
+ try:
372
+ from crackerjack.mcp.context import get_context
373
+
374
+ context = get_context()
375
+ if not context or not context.progress_dir.exists():
376
+ return jobs
377
+
378
+ # Read job files with error handling
379
+ for progress_file in context.progress_dir.glob("job-*.json"):
380
+ try:
381
+ # Use atomic read with timeout
382
+ content = progress_file.read_text(encoding="utf-8")
383
+ progress_data = json.loads(content)
384
+
385
+ # Validate required fields
386
+ job_data = {
387
+ "job_id": progress_data.get("job_id", "unknown"),
388
+ "status": progress_data.get("status", "unknown"),
389
+ "iteration": progress_data.get("iteration", 0),
390
+ "max_iterations": progress_data.get("max_iterations", 10),
391
+ "current_stage": progress_data.get(
392
+ "current_stage", "unknown"
393
+ ),
394
+ "overall_progress": progress_data.get(
395
+ "overall_progress", 0
396
+ ),
397
+ "stage_progress": progress_data.get("stage_progress", 0),
398
+ "message": progress_data.get("message", ""),
399
+ "timestamp": progress_data.get("timestamp", ""),
400
+ "error_counts": progress_data.get("error_counts", {}),
401
+ }
402
+
403
+ jobs.append(job_data)
404
+
405
+ except (json.JSONDecodeError, OSError, UnicodeDecodeError) as e:
406
+ # Log file read error but continue processing other files
407
+ self.security_logger.log_security_event(
408
+ event_type=SecurityEventType.FILE_READ_ERROR,
409
+ level=SecurityEventLevel.WARNING,
410
+ message=f"Failed to read job file {progress_file}: {e}",
411
+ operation="get_active_jobs",
412
+ )
413
+ continue
414
+
415
+ except Exception as e:
416
+ self.security_logger.log_security_event(
417
+ event_type=SecurityEventType.COLLECTION_ERROR,
418
+ level=SecurityEventLevel.ERROR,
419
+ message=f"Failed to get active jobs: {e}",
420
+ operation="get_active_jobs",
421
+ )
422
+
423
+ return jobs
424
+
425
+ def _build_server_stats_safe(self, context: t.Any) -> dict[str, t.Any]:
426
+ """Build server stats with thread safety."""
427
+
428
+ try:
429
+ stats = {
430
+ "server_info": {
431
+ "project_path": str(context.config.project_path),
432
+ "websocket_port": getattr(context, "websocket_server_port", None),
433
+ "websocket_active": getattr(
434
+ context, "websocket_server_process", None
435
+ )
436
+ is not None,
437
+ },
438
+ "rate_limiting": {
439
+ "enabled": context.rate_limiter is not None,
440
+ "config": context.rate_limiter.config.__dict__
441
+ if context.rate_limiter
442
+ else None,
443
+ },
444
+ "resource_usage": {
445
+ "temp_files_count": len(list(context.progress_dir.glob("*.json")))
446
+ if context.progress_dir.exists()
447
+ else 0,
448
+ "progress_dir": str(context.progress_dir),
449
+ },
450
+ "timestamp": time.time(),
451
+ }
452
+
453
+ # Add state manager stats if available
454
+ state_manager = getattr(context, "state_manager", None)
455
+ if state_manager:
456
+ stats["state_manager"] = {
457
+ "iteration_count": getattr(state_manager, "iteration_count", 0),
458
+ "session_active": getattr(state_manager, "session_active", False),
459
+ "issues_count": len(getattr(state_manager, "issues", [])),
460
+ }
461
+
462
+ return stats
463
+
464
+ except Exception as e:
465
+ return {"error": f"Failed to build server stats: {e}"}
466
+
467
+ def _get_cached_data(self, key: str) -> dict[str, t.Any] | None:
468
+ """Get cached data if still valid."""
469
+
470
+ current_time = time.time()
471
+
472
+ with self._data_lock:
473
+ if key in self._cache and key in self._cache_timestamps:
474
+ cache_age = current_time - self._cache_timestamps[key]
475
+ if cache_age < self._cache_ttl:
476
+ return self._cache[key]
477
+
478
+ return None
479
+
480
+ def _set_cached_data(self, key: str, data: dict[str, t.Any]) -> None:
481
+ """Set cached data with timestamp."""
482
+
483
+ with self._data_lock:
484
+ self._cache[key] = data.copy() if hasattr(data, "copy") else data
485
+ self._cache_timestamps[key] = time.time()
486
+
487
+ def clear_cache(self) -> None:
488
+ """Clear all cached data."""
489
+
490
+ with self._data_lock:
491
+ self._cache.clear()
492
+ self._cache_timestamps.clear()
493
+
494
+ def get_collection_status(self) -> dict[str, t.Any]:
495
+ """Get current collection status and metrics."""
496
+
497
+ with self._collection_lock:
498
+ return {
499
+ "collection_in_progress": self._collection_in_progress,
500
+ "collection_duration": time.time() - self._collection_start_time
501
+ if self._collection_in_progress
502
+ else 0.0,
503
+ "cache_entries": len(self._cache),
504
+ "timeout": self.timeout,
505
+ }
506
+
507
+
508
+ # Global singleton instance
509
+ _status_collector: ThreadSafeStatusCollector | None = None
510
+
511
+
512
+ def get_thread_safe_status_collector() -> ThreadSafeStatusCollector:
513
+ """Get the global thread-safe status collector instance."""
514
+
515
+ global _status_collector
516
+ if _status_collector is None:
517
+ _status_collector = ThreadSafeStatusCollector()
518
+ return _status_collector
519
+
520
+
521
+ async def collect_secure_status(
522
+ client_id: str = "unknown",
523
+ include_jobs: bool = True,
524
+ include_services: bool = True,
525
+ include_stats: bool = True,
526
+ ) -> StatusSnapshot:
527
+ """
528
+ Convenience function for secure status collection.
529
+
530
+ Args:
531
+ client_id: Client identifier for logging
532
+ include_jobs: Include job information
533
+ include_services: Include service information
534
+ include_stats: Include server statistics
535
+
536
+ Returns:
537
+ StatusSnapshot with collected data
538
+ """
539
+
540
+ collector = get_thread_safe_status_collector()
541
+ return await collector.collect_comprehensive_status(
542
+ client_id=client_id,
543
+ include_jobs=include_jobs,
544
+ include_services=include_services,
545
+ include_stats=include_stats,
546
+ )
@@ -1,15 +1,3 @@
1
- """Tool version service - unified facade for version and configuration management.
2
-
3
- This module provides a unified interface to various tool and configuration services.
4
- The implementation has been split into focused modules following single responsibility principle.
5
-
6
- REFACTORING NOTE: This file was reduced from 1353 lines to ~50 lines by splitting into:
7
- - version_checker.py: Core version checking and comparison
8
- - config_integrity.py: Configuration file integrity checking
9
- - smart_scheduling.py: Intelligent scheduling for automated initialization
10
- - (Additional services extracted into separate files)
11
- """
12
-
13
1
  from pathlib import Path
14
2
 
15
3
  from rich.console import Console
@@ -18,7 +6,6 @@ from .config_integrity import ConfigIntegrityService
18
6
  from .smart_scheduling import SmartSchedulingService
19
7
  from .version_checker import VersionChecker, VersionInfo
20
8
 
21
- # Re-export for backward compatibility
22
9
  __all__ = [
23
10
  "VersionInfo",
24
11
  "ToolVersionService",
@@ -28,34 +15,25 @@ __all__ = [
28
15
 
29
16
 
30
17
  class ToolVersionService:
31
- """Facade for tool version management services."""
32
-
33
18
  def __init__(self, console: Console, project_path: Path | None = None) -> None:
34
19
  self.console = console
35
20
  self.project_path = project_path or Path.cwd()
36
21
 
37
- # Initialize component services
38
22
  self._version_checker = VersionChecker(console)
39
23
  self._config_integrity = ConfigIntegrityService(console, self.project_path)
40
24
  self._scheduling = SmartSchedulingService(console, self.project_path)
41
25
 
42
26
  async def check_tool_updates(self) -> dict[str, VersionInfo]:
43
- """Check for tool updates using the version checker service."""
44
27
  return await self._version_checker.check_tool_updates()
45
28
 
46
29
  def check_config_integrity(self) -> bool:
47
- """Check configuration integrity using the config integrity service."""
48
30
  return self._config_integrity.check_config_integrity()
49
31
 
50
32
  def should_scheduled_init(self) -> bool:
51
- """Check if scheduled initialization should run."""
52
33
  return self._scheduling.should_scheduled_init()
53
34
 
54
35
  def record_init_timestamp(self) -> None:
55
- """Record initialization timestamp."""
56
36
  self._scheduling.record_init_timestamp()
57
37
 
58
38
 
59
- # For backward compatibility, maintain the other services here if needed
60
- # They are primarily accessed through the facade now
61
- ToolManager = ToolVersionService # Alias for compatibility
39
+ ToolManager = ToolVersionService