crackerjack 0.32.0__py3-none-any.whl → 0.33.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

Files changed (200) hide show
  1. crackerjack/__main__.py +1350 -34
  2. crackerjack/adapters/__init__.py +17 -0
  3. crackerjack/adapters/lsp_client.py +358 -0
  4. crackerjack/adapters/rust_tool_adapter.py +194 -0
  5. crackerjack/adapters/rust_tool_manager.py +193 -0
  6. crackerjack/adapters/skylos_adapter.py +231 -0
  7. crackerjack/adapters/zuban_adapter.py +560 -0
  8. crackerjack/agents/base.py +7 -3
  9. crackerjack/agents/coordinator.py +271 -33
  10. crackerjack/agents/documentation_agent.py +9 -15
  11. crackerjack/agents/dry_agent.py +3 -15
  12. crackerjack/agents/formatting_agent.py +1 -1
  13. crackerjack/agents/import_optimization_agent.py +36 -180
  14. crackerjack/agents/performance_agent.py +17 -98
  15. crackerjack/agents/performance_helpers.py +7 -31
  16. crackerjack/agents/proactive_agent.py +1 -3
  17. crackerjack/agents/refactoring_agent.py +16 -85
  18. crackerjack/agents/refactoring_helpers.py +7 -42
  19. crackerjack/agents/security_agent.py +9 -48
  20. crackerjack/agents/test_creation_agent.py +356 -513
  21. crackerjack/agents/test_specialist_agent.py +0 -4
  22. crackerjack/api.py +6 -25
  23. crackerjack/cli/cache_handlers.py +204 -0
  24. crackerjack/cli/cache_handlers_enhanced.py +683 -0
  25. crackerjack/cli/facade.py +100 -0
  26. crackerjack/cli/handlers.py +224 -9
  27. crackerjack/cli/interactive.py +6 -4
  28. crackerjack/cli/options.py +642 -55
  29. crackerjack/cli/utils.py +2 -1
  30. crackerjack/code_cleaner.py +58 -117
  31. crackerjack/config/global_lock_config.py +8 -48
  32. crackerjack/config/hooks.py +53 -62
  33. crackerjack/core/async_workflow_orchestrator.py +24 -34
  34. crackerjack/core/autofix_coordinator.py +3 -17
  35. crackerjack/core/enhanced_container.py +64 -6
  36. crackerjack/core/file_lifecycle.py +12 -89
  37. crackerjack/core/performance.py +2 -2
  38. crackerjack/core/performance_monitor.py +15 -55
  39. crackerjack/core/phase_coordinator.py +257 -218
  40. crackerjack/core/resource_manager.py +14 -90
  41. crackerjack/core/service_watchdog.py +62 -95
  42. crackerjack/core/session_coordinator.py +149 -0
  43. crackerjack/core/timeout_manager.py +14 -72
  44. crackerjack/core/websocket_lifecycle.py +13 -78
  45. crackerjack/core/workflow_orchestrator.py +558 -240
  46. crackerjack/docs/INDEX.md +11 -0
  47. crackerjack/docs/generated/api/API_REFERENCE.md +10895 -0
  48. crackerjack/docs/generated/api/CLI_REFERENCE.md +109 -0
  49. crackerjack/docs/generated/api/CROSS_REFERENCES.md +1755 -0
  50. crackerjack/docs/generated/api/PROTOCOLS.md +3 -0
  51. crackerjack/docs/generated/api/SERVICES.md +1252 -0
  52. crackerjack/documentation/__init__.py +31 -0
  53. crackerjack/documentation/ai_templates.py +756 -0
  54. crackerjack/documentation/dual_output_generator.py +765 -0
  55. crackerjack/documentation/mkdocs_integration.py +518 -0
  56. crackerjack/documentation/reference_generator.py +977 -0
  57. crackerjack/dynamic_config.py +55 -50
  58. crackerjack/executors/async_hook_executor.py +10 -15
  59. crackerjack/executors/cached_hook_executor.py +117 -43
  60. crackerjack/executors/hook_executor.py +8 -34
  61. crackerjack/executors/hook_lock_manager.py +26 -183
  62. crackerjack/executors/individual_hook_executor.py +13 -11
  63. crackerjack/executors/lsp_aware_hook_executor.py +270 -0
  64. crackerjack/executors/tool_proxy.py +417 -0
  65. crackerjack/hooks/lsp_hook.py +79 -0
  66. crackerjack/intelligence/adaptive_learning.py +25 -10
  67. crackerjack/intelligence/agent_orchestrator.py +2 -5
  68. crackerjack/intelligence/agent_registry.py +34 -24
  69. crackerjack/intelligence/agent_selector.py +5 -7
  70. crackerjack/interactive.py +17 -6
  71. crackerjack/managers/async_hook_manager.py +0 -1
  72. crackerjack/managers/hook_manager.py +79 -1
  73. crackerjack/managers/publish_manager.py +66 -13
  74. crackerjack/managers/test_command_builder.py +5 -17
  75. crackerjack/managers/test_executor.py +1 -3
  76. crackerjack/managers/test_manager.py +109 -7
  77. crackerjack/managers/test_manager_backup.py +10 -9
  78. crackerjack/mcp/cache.py +2 -2
  79. crackerjack/mcp/client_runner.py +1 -1
  80. crackerjack/mcp/context.py +191 -68
  81. crackerjack/mcp/dashboard.py +7 -5
  82. crackerjack/mcp/enhanced_progress_monitor.py +31 -28
  83. crackerjack/mcp/file_monitor.py +30 -23
  84. crackerjack/mcp/progress_components.py +31 -21
  85. crackerjack/mcp/progress_monitor.py +50 -53
  86. crackerjack/mcp/rate_limiter.py +6 -6
  87. crackerjack/mcp/server_core.py +161 -32
  88. crackerjack/mcp/service_watchdog.py +2 -1
  89. crackerjack/mcp/state.py +4 -7
  90. crackerjack/mcp/task_manager.py +11 -9
  91. crackerjack/mcp/tools/core_tools.py +174 -33
  92. crackerjack/mcp/tools/error_analyzer.py +3 -2
  93. crackerjack/mcp/tools/execution_tools.py +15 -12
  94. crackerjack/mcp/tools/execution_tools_backup.py +42 -30
  95. crackerjack/mcp/tools/intelligence_tool_registry.py +7 -5
  96. crackerjack/mcp/tools/intelligence_tools.py +5 -2
  97. crackerjack/mcp/tools/monitoring_tools.py +33 -70
  98. crackerjack/mcp/tools/proactive_tools.py +24 -11
  99. crackerjack/mcp/tools/progress_tools.py +5 -8
  100. crackerjack/mcp/tools/utility_tools.py +20 -14
  101. crackerjack/mcp/tools/workflow_executor.py +62 -40
  102. crackerjack/mcp/websocket/app.py +8 -0
  103. crackerjack/mcp/websocket/endpoints.py +352 -357
  104. crackerjack/mcp/websocket/jobs.py +40 -57
  105. crackerjack/mcp/websocket/monitoring_endpoints.py +2935 -0
  106. crackerjack/mcp/websocket/server.py +7 -25
  107. crackerjack/mcp/websocket/websocket_handler.py +6 -17
  108. crackerjack/mixins/__init__.py +3 -0
  109. crackerjack/mixins/error_handling.py +145 -0
  110. crackerjack/models/config.py +21 -1
  111. crackerjack/models/config_adapter.py +49 -1
  112. crackerjack/models/protocols.py +176 -107
  113. crackerjack/models/resource_protocols.py +55 -210
  114. crackerjack/models/task.py +3 -0
  115. crackerjack/monitoring/ai_agent_watchdog.py +13 -13
  116. crackerjack/monitoring/metrics_collector.py +426 -0
  117. crackerjack/monitoring/regression_prevention.py +8 -8
  118. crackerjack/monitoring/websocket_server.py +643 -0
  119. crackerjack/orchestration/advanced_orchestrator.py +11 -6
  120. crackerjack/orchestration/coverage_improvement.py +3 -3
  121. crackerjack/orchestration/execution_strategies.py +26 -6
  122. crackerjack/orchestration/test_progress_streamer.py +8 -5
  123. crackerjack/plugins/base.py +2 -2
  124. crackerjack/plugins/hooks.py +7 -0
  125. crackerjack/plugins/managers.py +11 -8
  126. crackerjack/security/__init__.py +0 -1
  127. crackerjack/security/audit.py +90 -105
  128. crackerjack/services/anomaly_detector.py +392 -0
  129. crackerjack/services/api_extractor.py +615 -0
  130. crackerjack/services/backup_service.py +2 -2
  131. crackerjack/services/bounded_status_operations.py +15 -152
  132. crackerjack/services/cache.py +127 -1
  133. crackerjack/services/changelog_automation.py +395 -0
  134. crackerjack/services/config.py +18 -11
  135. crackerjack/services/config_merge.py +30 -85
  136. crackerjack/services/config_template.py +506 -0
  137. crackerjack/services/contextual_ai_assistant.py +48 -22
  138. crackerjack/services/coverage_badge_service.py +171 -0
  139. crackerjack/services/coverage_ratchet.py +41 -17
  140. crackerjack/services/debug.py +3 -3
  141. crackerjack/services/dependency_analyzer.py +460 -0
  142. crackerjack/services/dependency_monitor.py +14 -11
  143. crackerjack/services/documentation_generator.py +491 -0
  144. crackerjack/services/documentation_service.py +675 -0
  145. crackerjack/services/enhanced_filesystem.py +6 -5
  146. crackerjack/services/enterprise_optimizer.py +865 -0
  147. crackerjack/services/error_pattern_analyzer.py +676 -0
  148. crackerjack/services/file_hasher.py +1 -1
  149. crackerjack/services/git.py +41 -45
  150. crackerjack/services/health_metrics.py +10 -8
  151. crackerjack/services/heatmap_generator.py +735 -0
  152. crackerjack/services/initialization.py +30 -33
  153. crackerjack/services/input_validator.py +5 -97
  154. crackerjack/services/intelligent_commit.py +327 -0
  155. crackerjack/services/log_manager.py +15 -12
  156. crackerjack/services/logging.py +4 -3
  157. crackerjack/services/lsp_client.py +628 -0
  158. crackerjack/services/memory_optimizer.py +409 -0
  159. crackerjack/services/metrics.py +42 -33
  160. crackerjack/services/parallel_executor.py +416 -0
  161. crackerjack/services/pattern_cache.py +1 -1
  162. crackerjack/services/pattern_detector.py +6 -6
  163. crackerjack/services/performance_benchmarks.py +250 -576
  164. crackerjack/services/performance_cache.py +382 -0
  165. crackerjack/services/performance_monitor.py +565 -0
  166. crackerjack/services/predictive_analytics.py +510 -0
  167. crackerjack/services/quality_baseline.py +234 -0
  168. crackerjack/services/quality_baseline_enhanced.py +646 -0
  169. crackerjack/services/quality_intelligence.py +785 -0
  170. crackerjack/services/regex_patterns.py +605 -524
  171. crackerjack/services/regex_utils.py +43 -123
  172. crackerjack/services/secure_path_utils.py +5 -164
  173. crackerjack/services/secure_status_formatter.py +30 -141
  174. crackerjack/services/secure_subprocess.py +11 -92
  175. crackerjack/services/security.py +61 -30
  176. crackerjack/services/security_logger.py +18 -22
  177. crackerjack/services/server_manager.py +124 -16
  178. crackerjack/services/status_authentication.py +16 -159
  179. crackerjack/services/status_security_manager.py +4 -131
  180. crackerjack/services/terminal_utils.py +0 -0
  181. crackerjack/services/thread_safe_status_collector.py +19 -125
  182. crackerjack/services/unified_config.py +21 -13
  183. crackerjack/services/validation_rate_limiter.py +5 -54
  184. crackerjack/services/version_analyzer.py +459 -0
  185. crackerjack/services/version_checker.py +1 -1
  186. crackerjack/services/websocket_resource_limiter.py +10 -144
  187. crackerjack/services/zuban_lsp_service.py +390 -0
  188. crackerjack/slash_commands/__init__.py +2 -7
  189. crackerjack/slash_commands/run.md +2 -2
  190. crackerjack/tools/validate_input_validator_patterns.py +14 -40
  191. crackerjack/tools/validate_regex_patterns.py +19 -48
  192. {crackerjack-0.32.0.dist-info → crackerjack-0.33.1.dist-info}/METADATA +197 -26
  193. crackerjack-0.33.1.dist-info/RECORD +229 -0
  194. crackerjack/CLAUDE.md +0 -207
  195. crackerjack/RULES.md +0 -380
  196. crackerjack/py313.py +0 -234
  197. crackerjack-0.32.0.dist-info/RECORD +0 -180
  198. {crackerjack-0.32.0.dist-info → crackerjack-0.33.1.dist-info}/WHEEL +0 -0
  199. {crackerjack-0.32.0.dist-info → crackerjack-0.33.1.dist-info}/entry_points.txt +0 -0
  200. {crackerjack-0.32.0.dist-info → crackerjack-0.33.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,628 +1,302 @@
1
+ import asyncio
1
2
  import json
2
3
  import statistics
3
- import subprocess
4
4
  import time
5
5
  import typing as t
6
- from contextlib import suppress
7
6
  from dataclasses import dataclass, field
7
+ from datetime import datetime
8
8
  from pathlib import Path
9
- from typing import Any
10
9
 
11
- from rich.console import Console
12
- from rich.table import Table
13
-
14
- from crackerjack.models.protocols import FileSystemInterface
10
+ from crackerjack.services.logging import get_logger
11
+ from crackerjack.services.memory_optimizer import get_memory_optimizer
12
+ from crackerjack.services.performance_cache import get_performance_cache
13
+ from crackerjack.services.performance_monitor import get_performance_monitor
15
14
 
16
15
 
17
16
  @dataclass
18
17
  class BenchmarkResult:
19
- name: str
20
- duration_seconds: float
21
- memory_usage_mb: float = 0.0
22
- cpu_percent: float = 0.0
23
- iterations: int = 1
24
- metadata: dict[str, Any] = field(default_factory=dict)
18
+ test_name: str
19
+ baseline_time_seconds: float
20
+ optimized_time_seconds: float
21
+ memory_baseline_mb: float
22
+ memory_optimized_mb: float
23
+ cache_hits: int = 0
24
+ cache_misses: int = 0
25
+ parallel_operations: int = 0
26
+ sequential_operations: int = 0
27
+
28
+ @property
29
+ def time_improvement_percentage(self) -> float:
30
+ if self.baseline_time_seconds == 0:
31
+ return 0.0
32
+ return (
33
+ (self.baseline_time_seconds - self.optimized_time_seconds)
34
+ / self.baseline_time_seconds
35
+ * 100
36
+ )
37
+
38
+ @property
39
+ def memory_improvement_percentage(self) -> float:
40
+ if self.memory_baseline_mb == 0:
41
+ return 0.0
42
+ return (
43
+ (self.memory_baseline_mb - self.memory_optimized_mb)
44
+ / self.memory_baseline_mb
45
+ * 100
46
+ )
47
+
48
+ @property
49
+ def cache_hit_ratio(self) -> float:
50
+ total = self.cache_hits + self.cache_misses
51
+ return self.cache_hits / total if total > 0 else 0.0
52
+
53
+ @property
54
+ def parallelization_ratio(self) -> float:
55
+ total = self.parallel_operations + self.sequential_operations
56
+ return self.parallel_operations / total if total > 0 else 0.0
25
57
 
26
58
 
27
59
  @dataclass
28
- class PerformanceReport:
29
- total_duration: float
30
- workflow_benchmarks: list[BenchmarkResult] = field(default_factory=list)
31
- test_benchmarks: dict[str, Any] = field(default_factory=dict)
32
- hook_performance: dict[str, float] = field(default_factory=dict)
33
- file_operation_stats: dict[str, float] = field(default_factory=dict)
34
- recommendations: list[str] = field(default_factory=list)
35
- baseline_comparison: dict[str, float] = field(default_factory=dict)
36
-
37
-
38
- class PerformanceBenchmarkService:
39
- def __init__(
40
- self,
41
- filesystem: FileSystemInterface,
42
- console: Console | None = None,
43
- ) -> None:
44
- self.filesystem = filesystem
45
- self.console = console or Console()
46
- self.project_root = Path.cwd()
47
- self.benchmarks_dir = self.project_root / ".benchmarks"
48
- self.history_file = self.benchmarks_dir / "performance_history.json"
49
-
50
- self.benchmarks_dir.mkdir(exist_ok=True)
51
-
52
- def run_comprehensive_benchmark(
53
- self,
54
- run_tests: bool = True,
55
- run_hooks: bool = True,
56
- iterations: int = 1,
57
- ) -> PerformanceReport:
58
- self.console.print(
59
- "[cyan]🚀 Starting comprehensive performance benchmark...[/ cyan]",
60
+ class BenchmarkSuite:
61
+ suite_name: str
62
+ results: list[BenchmarkResult] = field(default_factory=list)
63
+ run_timestamp: datetime = field(default_factory=datetime.now)
64
+
65
+ @property
66
+ def average_time_improvement(self) -> float:
67
+ if not self.results:
68
+ return 0.0
69
+ improvements = [r.time_improvement_percentage for r in self.results]
70
+ return statistics.mean(improvements)
71
+
72
+ @property
73
+ def average_memory_improvement(self) -> float:
74
+ if not self.results:
75
+ return 0.0
76
+ improvements = [r.memory_improvement_percentage for r in self.results]
77
+ return statistics.mean(improvements)
78
+
79
+ @property
80
+ def overall_cache_hit_ratio(self) -> float:
81
+ total_hits = sum(r.cache_hits for r in self.results)
82
+ total_misses = sum(r.cache_misses for r in self.results)
83
+ total = total_hits + total_misses
84
+ return total_hits / total if total > 0 else 0.0
85
+
86
+ def add_result(self, result: BenchmarkResult) -> None:
87
+ self.results.append(result)
88
+
89
+
90
+ class PerformanceBenchmarker:
91
+ def __init__(self) -> None:
92
+ self._logger = get_logger("crackerjack.benchmarker")
93
+ self._monitor = get_performance_monitor()
94
+ self._memory_optimizer = get_memory_optimizer()
95
+ self._cache = get_performance_cache()
96
+
97
+ self._test_iterations = 3
98
+ self._warmup_iterations = 1
99
+
100
+ async def run_comprehensive_benchmark(self) -> BenchmarkSuite:
101
+ self._logger.info("Starting comprehensive performance benchmark")
102
+
103
+ suite = BenchmarkSuite("Phase 3 Optimization Benchmark")
104
+
105
+ suite.add_result(await self._benchmark_memory_optimization())
106
+
107
+ suite.add_result(await self._benchmark_caching_performance())
108
+
109
+ suite.add_result(await self._benchmark_async_workflows())
110
+
111
+ self._logger.info(
112
+ f"Benchmark complete. Average improvements: "
113
+ f"Time: {suite.average_time_improvement: .1f}%, "
114
+ f"Memory: {suite.average_memory_improvement: .1f}%, "
115
+ f"Cache ratio: {suite.overall_cache_hit_ratio: .2f}"
60
116
  )
61
117
 
62
- start_time = time.time()
63
- report = self._initialize_performance_report()
118
+ return suite
64
119
 
65
- self._run_requested_benchmarks(report, run_tests, run_hooks, iterations)
66
- self._finalize_performance_report(report, start_time)
120
+ async def _benchmark_memory_optimization(self) -> BenchmarkResult:
121
+ self._logger.debug("Benchmarking memory optimization")
67
122
 
68
- return report
123
+ baseline_start = time.time()
124
+ baseline_memory_start = self._memory_optimizer.record_checkpoint(
125
+ "baseline_start"
126
+ )
69
127
 
70
- def _initialize_performance_report(self) -> PerformanceReport:
71
- return PerformanceReport(total_duration=0.0)
128
+ heavy_objects = []
129
+ for i in range(50):
130
+ obj = {
131
+ "data": f"heavy_data_{i}" * 100,
132
+ "metadata": {"created": time.time(), "index": i},
133
+ "payload": list[t.Any](range(100)),
134
+ }
135
+ heavy_objects.append(obj)
72
136
 
73
- def _run_requested_benchmarks(
74
- self,
75
- report: PerformanceReport,
76
- run_tests: bool,
77
- run_hooks: bool,
78
- iterations: int,
79
- ) -> None:
80
- if run_tests:
81
- report.test_benchmarks = self._benchmark_test_suite(iterations)
137
+ baseline_time = time.time() - baseline_start
138
+ baseline_memory_peak = self._memory_optimizer.record_checkpoint("baseline_peak")
82
139
 
83
- if run_hooks:
84
- report.hook_performance = self._benchmark_hooks(iterations)
140
+ del heavy_objects
85
141
 
86
- report.workflow_benchmarks = self._benchmark_workflow_components(iterations)
87
- report.file_operation_stats = self._benchmark_file_operations()
142
+ optimized_start = time.time()
143
+ optimized_memory_start = self._memory_optimizer.record_checkpoint(
144
+ "optimized_start"
145
+ )
88
146
 
89
- def _finalize_performance_report(
90
- self,
91
- report: PerformanceReport,
92
- start_time: float,
93
- ) -> None:
94
- report.total_duration = time.time() - start_time
95
- report.recommendations = self._generate_performance_recommendations(report)
96
- report.baseline_comparison = self._compare_with_baseline(report)
97
- self._save_performance_history(report)
98
-
99
- def _benchmark_test_suite(self, iterations: int = 1) -> dict[str, Any]:
100
- self.console.print("[dim]📊 Benchmarking test suite...[/ dim]")
101
-
102
- benchmark_results = {}
103
-
104
- try:
105
- for i in range(iterations):
106
- start_time = time.time()
107
-
108
- result = subprocess.run(
109
- [
110
- "uv",
111
- "run",
112
- "pytest",
113
- "--benchmark-only",
114
- "--benchmark-json=.benchmarks/test_benchmark.json",
115
- "--tb=no",
116
- "-q",
117
- ],
118
- check=False,
119
- capture_output=True,
120
- text=True,
121
- timeout=300,
122
- )
123
-
124
- duration = time.time() - start_time
125
-
126
- benchmark_file = self.benchmarks_dir / "test_benchmark.json"
127
- if benchmark_file.exists():
128
- with benchmark_file.open() as f:
129
- benchmark_data = json.load(f)
130
-
131
- benchmark_results[f"iteration_{i + 1}"] = {
132
- "total_duration": duration,
133
- "benchmark_data": benchmark_data,
134
- "success": result.returncode == 0,
135
- }
136
- else:
137
- benchmark_results[f"iteration_{i + 1}"] = {
138
- "total_duration": duration,
139
- "success": result.returncode == 0,
140
- "note": "No benchmark tests found",
141
- }
142
-
143
- except subprocess.TimeoutExpired:
144
- benchmark_results["error"] = "Test benchmarking timed out"
145
- except Exception as e:
146
- benchmark_results["error"] = f"Test benchmarking failed: {e}"
147
-
148
- return benchmark_results
149
-
150
- def _benchmark_hooks(self, iterations: int = 1) -> dict[str, float]:
151
- self.console.print("[dim]🔧 Benchmarking hooks performance...[/ dim]")
152
-
153
- hook_performance = {}
154
-
155
- hooks_to_test = [
156
- "trailing-whitespace",
157
- "end-of-file-fixer",
158
- "ruff-format",
159
- "ruff-check",
160
- "gitleaks",
161
- "pyright",
162
- "bandit",
163
- "vulture",
164
- ]
165
-
166
- for hook_name in hooks_to_test:
167
- durations: list[float] = []
168
-
169
- for _i in range(iterations):
170
- try:
171
- start_time = time.time()
172
- subprocess.run(
173
- [
174
- "uv",
175
- "run",
176
- "pre-commit",
177
- "run",
178
- hook_name,
179
- "--all-files",
180
- ],
181
- check=False,
182
- capture_output=True,
183
- text=True,
184
- timeout=300, # Fixed: Use 300s to match pytest config
185
- )
186
- duration = time.time() - start_time
187
- durations.append(duration)
188
- except subprocess.TimeoutExpired:
189
- durations.append(300.0) # Fixed: Use 300s to match timeout
190
- except Exception:
191
- durations.append(float("inf"))
192
-
193
- if durations and all(d != float("inf") for d in durations):
194
- hook_performance[hook_name] = {
195
- "mean_duration": statistics.mean(durations),
196
- "min_duration": min(durations),
197
- "max_duration": max(durations),
198
- }
147
+ from crackerjack.services.memory_optimizer import LazyLoader
199
148
 
200
- return hook_performance
149
+ lazy_objects = []
150
+ for i in range(50):
201
151
 
202
- def _benchmark_workflow_components(
203
- self,
204
- iterations: int = 1,
205
- ) -> list[BenchmarkResult]:
206
- self.console.print("[dim]⚙️ Benchmarking workflow components...[/ dim]")
152
+ def create_heavy_object(index: int = i) -> dict[str, t.Any]:
153
+ return {
154
+ "data": f"heavy_data_{index}" * 100,
155
+ "metadata": {"created": time.time(), "index": index},
156
+ "payload": list[t.Any](range(100)),
157
+ }
158
+
159
+ lazy_obj = LazyLoader(create_heavy_object, f"heavy_object_{i}")
160
+ lazy_objects.append(lazy_obj)
207
161
 
208
- results = []
162
+ optimized_time = time.time() - optimized_start
163
+ optimized_memory_peak = self._memory_optimizer.record_checkpoint(
164
+ "optimized_peak"
165
+ )
209
166
 
210
- start_time = time.time()
211
- python_files = list(self.project_root.rglob("*.py"))
212
- file_discovery_duration = time.time() - start_time
167
+ del lazy_objects
213
168
 
214
- results.append(
215
- BenchmarkResult(
216
- name="file_discovery",
217
- duration_seconds=file_discovery_duration,
218
- metadata={"files_found": len(python_files)},
219
- ),
169
+ return BenchmarkResult(
170
+ test_name="memory_optimization",
171
+ baseline_time_seconds=baseline_time,
172
+ optimized_time_seconds=optimized_time,
173
+ memory_baseline_mb=max(0, baseline_memory_peak - baseline_memory_start),
174
+ memory_optimized_mb=max(0, optimized_memory_peak - optimized_memory_start),
220
175
  )
221
176
 
222
- start_time = time.time()
223
- pyproject_path = self.project_root / "pyproject.toml"
224
- if pyproject_path.exists():
225
- with suppress(Exception):
226
- import tomllib
227
-
228
- with pyproject_path.open("rb") as f:
229
- tomllib.load(f)
230
- config_load_duration = time.time() - start_time
231
-
232
- results.append(
233
- BenchmarkResult(
234
- name="config_loading",
235
- duration_seconds=config_load_duration,
236
- ),
237
- )
238
-
239
- return results
240
-
241
- def _benchmark_file_operations(self) -> dict[str, float]:
242
- stats = {}
243
-
244
- test_files = list(self.project_root.glob("*.py"))[:10]
245
- if test_files:
246
- start_time = time.time()
247
- for file_path in test_files:
248
- with suppress(Exception):
249
- file_path.read_text(encoding="utf-8")
250
- read_duration = time.time() - start_time
251
- stats["file_read_ops"] = read_duration / len(test_files)
252
-
253
- return stats
254
-
255
- def _generate_performance_recommendations(
256
- self,
257
- report: PerformanceReport,
258
- ) -> list[str]:
259
- recommendations = []
260
-
261
- self._add_test_suite_recommendations(report, recommendations)
262
- self._add_hook_performance_recommendations(report, recommendations)
263
- self._add_component_performance_recommendations(report, recommendations)
264
- self._add_overall_performance_recommendations(report, recommendations)
265
-
266
- return recommendations
267
-
268
- def _add_test_suite_recommendations(
269
- self,
270
- report: PerformanceReport,
271
- recommendations: list[str],
272
- ) -> None:
273
- if not report.test_benchmarks:
274
- return
177
+ async def _benchmark_caching_performance(self) -> BenchmarkResult:
178
+ self._logger.debug("Benchmarking caching performance")
275
179
 
276
- for iteration_data in report.test_benchmarks.values():
277
- if self._is_slow_test_iteration(iteration_data):
278
- recommendations.append(
279
- "Consider optimizing test suite-execution time exceeds 1 minute",
280
- )
281
- break
180
+ self._cache.clear()
282
181
 
283
- def _is_slow_test_iteration(self, iteration_data: Any) -> bool:
284
- return (
285
- isinstance(iteration_data, dict)
286
- and iteration_data.get("total_duration", 0) > 60
287
- )
182
+ baseline_start = time.time()
288
183
 
289
- def _add_hook_performance_recommendations(
290
- self,
291
- report: PerformanceReport,
292
- recommendations: list[str],
293
- ) -> None:
294
- slow_hooks = self._identify_slow_hooks(report.hook_performance)
295
- if slow_hooks:
296
- recommendations.append(self._format_slow_hooks_message(slow_hooks))
297
-
298
- def _identify_slow_hooks(
299
- self,
300
- hook_performance: dict[str, float],
301
- ) -> list[tuple[str, float]]:
302
- slow_hooks = []
303
- for hook_name, perf_data in hook_performance.items():
304
- if isinstance(perf_data, dict):
305
- mean_duration = perf_data.get("mean_duration", 0)
306
- if mean_duration > 30:
307
- slow_hooks.append((hook_name, mean_duration))
308
- return slow_hooks
309
-
310
- def _format_slow_hooks_message(self, slow_hooks: list[tuple[str, float]]) -> str:
311
- hooks_info = ", ".join(f"{h}({d: .1f}s)" for h, d in slow_hooks[:3])
312
- return (
313
- f"Slow hooks detected: {hooks_info}. "
314
- "Consider hook optimization or selective execution."
315
- )
184
+ for i in range(10):
185
+ await self._simulate_expensive_operation(f"operation_{i % 3}")
316
186
 
317
- def _add_component_performance_recommendations(
318
- self,
319
- report: PerformanceReport,
320
- recommendations: list[str],
321
- ) -> None:
322
- slow_components = self._identify_slow_components(report.workflow_benchmarks)
323
- if slow_components:
324
- components_names = ", ".join(c.name for c in slow_components)
325
- recommendations.append(
326
- f"Slow workflow components: {components_names}. "
327
- "Consider caching or optimization.",
328
- )
329
-
330
- def _identify_slow_components(
331
- self,
332
- workflow_benchmarks: list[BenchmarkResult],
333
- ) -> list[BenchmarkResult]:
334
- return [b for b in workflow_benchmarks if b.duration_seconds > 5]
335
-
336
- def _add_overall_performance_recommendations(
337
- self,
338
- report: PerformanceReport,
339
- recommendations: list[str],
340
- ) -> None:
341
- if report.total_duration > 300:
342
- recommendations.append(
343
- "Overall workflow execution is slow. Consider enabling --skip-hooks "
344
- "during development iterations.",
345
- )
346
-
347
- def _compare_with_baseline(
348
- self,
349
- current_report: PerformanceReport,
350
- ) -> dict[str, float]:
351
- baseline_comparison = {}
352
-
353
- try:
354
- history = self._load_performance_history()
355
- if not history:
356
- return baseline_comparison
357
-
358
- self._add_overall_performance_comparison(
359
- current_report,
360
- history,
361
- baseline_comparison,
362
- )
363
- self._add_component_performance_comparison(
364
- current_report,
365
- history,
366
- baseline_comparison,
367
- )
368
-
369
- except Exception as e:
370
- baseline_comparison["error"] = f"Could not load baseline: {e}"
371
-
372
- return baseline_comparison
373
-
374
- def _load_performance_history(self) -> list[dict[str, Any]] | None:
375
- if not self.history_file.exists():
376
- return None
377
-
378
- with self.history_file.open() as f:
379
- history = json.load(f)
380
-
381
- return history if history and len(history) > 1 else None
382
-
383
- def _add_overall_performance_comparison(
384
- self,
385
- current_report: PerformanceReport,
386
- history: list[dict[str, Any]],
387
- comparison: dict[str, Any],
388
- ) -> None:
389
- recent_runs = history[-5:]
390
- baseline_duration = statistics.median(
391
- [r["total_duration"] for r in recent_runs],
187
+ baseline_time = time.time() - baseline_start
188
+
189
+ optimized_start = time.time()
190
+ cache_stats_start = self._cache.get_stats()
191
+
192
+ for i in range(10):
193
+ await self._simulate_cached_operation(f"operation_{i % 3}")
194
+
195
+ optimized_time = time.time() - optimized_start
196
+ cache_stats_end = self._cache.get_stats()
197
+
198
+ cache_hits = cache_stats_end.hits - cache_stats_start.hits
199
+ cache_misses = cache_stats_end.misses - cache_stats_start.misses
200
+
201
+ return BenchmarkResult(
202
+ test_name="caching_performance",
203
+ baseline_time_seconds=baseline_time,
204
+ optimized_time_seconds=optimized_time,
205
+ memory_baseline_mb=0.0,
206
+ memory_optimized_mb=0.0,
207
+ cache_hits=cache_hits,
208
+ cache_misses=cache_misses,
392
209
  )
393
210
 
394
- performance_change = (
395
- (current_report.total_duration - baseline_duration) / baseline_duration
396
- ) * 100
397
- comparison["overall_performance_change_percent"] = performance_change
211
+ async def _benchmark_async_workflows(self) -> BenchmarkResult:
212
+ self._logger.debug("Benchmarking async workflows")
398
213
 
399
- def _add_component_performance_comparison(
400
- self,
401
- current_report: PerformanceReport,
402
- history: list[dict[str, Any]],
403
- comparison: dict[str, Any],
404
- ) -> None:
405
- recent_runs = history[-5:]
406
- if not recent_runs:
407
- return
408
-
409
- component_durations = recent_runs[-1].get("component_durations", {})
410
-
411
- for component in current_report.workflow_benchmarks:
412
- if component.name in component_durations:
413
- old_duration = component_durations[component.name]
414
- change = self._calculate_performance_change(
415
- component.duration_seconds,
416
- old_duration,
417
- )
418
- comparison[f"{component.name}_change_percent"] = change
419
-
420
- def _calculate_performance_change(
421
- self,
422
- current_duration: float,
423
- old_duration: float,
424
- ) -> float:
425
- return ((current_duration - old_duration) / old_duration) * 100
426
-
427
- def _save_performance_history(self, report: PerformanceReport) -> None:
428
- try:
429
- history = []
430
- if self.history_file.exists():
431
- with self.history_file.open() as f:
432
- history = json.load(f)
433
-
434
- record = {
435
- "timestamp": time.time(),
436
- "total_duration": report.total_duration,
437
- "component_durations": {
438
- c.name: c.duration_seconds for c in report.workflow_benchmarks
439
- },
440
- "hook_durations": {
441
- hook: (perf["mean_duration"] if isinstance(perf, dict) else perf)
442
- for hook, perf in report.hook_performance.items()
443
- },
444
- "recommendations_count": len(report.recommendations),
445
- }
214
+ baseline_start = time.time()
446
215
 
447
- history.append(record)
216
+ for i in range(5):
217
+ await self._simulate_io_operation(f"seq_{i}", 0.01)
448
218
 
449
- history = history[-50:]
219
+ baseline_time = time.time() - baseline_start
450
220
 
451
- with self.history_file.open("w") as f:
452
- json.dump(history, f, indent=2)
221
+ optimized_start = time.time()
453
222
 
454
- except Exception as e:
455
- self.console.print(
456
- f"[yellow]⚠️[/ yellow] Could not save performance history: {e}",
457
- )
223
+ tasks = [self._simulate_io_operation(f"par_{i}", 0.01) for i in range(5)]
224
+ await asyncio.gather(*tasks)
458
225
 
459
- def display_performance_report(self, report: PerformanceReport) -> None:
460
- self.console.print(
461
- "\n[bold cyan]🚀 Performance Benchmark Report[/ bold cyan]\n"
226
+ optimized_time = time.time() - optimized_start
227
+
228
+ return BenchmarkResult(
229
+ test_name="async_workflows",
230
+ baseline_time_seconds=baseline_time,
231
+ optimized_time_seconds=optimized_time,
232
+ memory_baseline_mb=0.0,
233
+ memory_optimized_mb=0.0,
234
+ parallel_operations=5,
235
+ sequential_operations=5,
462
236
  )
463
237
 
464
- self._display_overall_stats(report)
465
- self._display_workflow_components(report)
466
- self._display_hook_performance(report)
467
- self._display_baseline_comparison(report)
468
- self._display_recommendations(report)
238
+ async def _simulate_expensive_operation(self, operation_id: str) -> str:
239
+ await asyncio.sleep(0.002)
469
240
 
470
- self.console.print(
471
- f"\n[dim]📁 Benchmark data saved to: {self.benchmarks_dir}[/ dim]",
472
- )
241
+ result = ""
242
+ for i in range(100):
243
+ result += f"{operation_id}_{i}"
244
+
245
+ return result[:50]
473
246
 
474
- def _display_overall_stats(self, report: PerformanceReport) -> None:
475
- self.console.print(
476
- f"[green]⏱️ Total Duration: {report.total_duration: .2f}s[/ green]",
247
+ async def _simulate_cached_operation(self, operation_id: str) -> str:
248
+ cached_result = await self._cache.get_async(f"expensive_op: {operation_id}")
249
+ if cached_result is not None:
250
+ return str(cached_result)
251
+
252
+ result = await self._simulate_expensive_operation(operation_id)
253
+ await self._cache.set_async(
254
+ f"expensive_op: {operation_id}", result, ttl_seconds=60
477
255
  )
478
256
 
479
- def _display_workflow_components(self, report: PerformanceReport) -> None:
480
- if not report.workflow_benchmarks:
481
- return
482
-
483
- table = Table(title="Workflow Component Performance")
484
- table.add_column("Component", style="cyan")
485
- table.add_column("Duration (s)", style="yellow", justify="right")
486
- table.add_column("Metadata", style="dim")
487
-
488
- for benchmark in report.workflow_benchmarks:
489
- metadata_str = ", ".join(f"{k}={v}" for k, v in benchmark.metadata.items())
490
- table.add_row(
491
- benchmark.name,
492
- f"{benchmark.duration_seconds: .3f}",
493
- metadata_str,
494
- )
495
-
496
- self.console.print(table)
497
- self.console.print()
498
-
499
- def _display_hook_performance(self, report: PerformanceReport) -> None:
500
- if not report.hook_performance:
501
- return
502
-
503
- table = Table(title="Hook Performance Analysis")
504
- table.add_column("Hook", style="cyan")
505
- table.add_column("Mean (s)", style="yellow", justify="right")
506
- table.add_column("Min (s)", style="green", justify="right")
507
- table.add_column("Max (s)", style="red", justify="right")
508
-
509
- for hook_name, perf_data in report.hook_performance.items():
510
- if isinstance(perf_data, dict):
511
- table.add_row(
512
- hook_name,
513
- f"{perf_data.get('mean_duration', 0): .2f}",
514
- f"{perf_data.get('min_duration', 0): .2f}",
515
- f"{perf_data.get('max_duration', 0): .2f}",
516
- )
517
-
518
- self.console.print(table)
519
- self.console.print()
520
-
521
- def _display_baseline_comparison(self, report: PerformanceReport) -> None:
522
- if not report.baseline_comparison:
523
- return
524
-
525
- self._print_comparison_header()
526
- self._print_comparison_metrics(report.baseline_comparison)
527
- self.console.print()
528
-
529
- def _print_comparison_header(self) -> None:
530
- self.console.print("[bold]📊 Performance Comparison[/ bold]")
531
-
532
- def _print_comparison_metrics(self, baseline_comparison: dict[str, t.Any]) -> None:
533
- for metric, value in baseline_comparison.items():
534
- if isinstance(value, float | int) and "percent" in metric:
535
- color = "green" if value < 0 else "red" if value > 10 else "yellow"
536
- direction = "faster" if value < 0 else "slower"
537
- self.console.print(
538
- f" {metric}: [{color}]{abs(value): .1f}% {direction}[/{color}]",
539
- )
540
-
541
- def _display_recommendations(self, report: PerformanceReport) -> None:
542
- if report.recommendations:
543
- self.console.print(
544
- "[bold yellow]💡 Performance Recommendations[/ bold yellow]",
545
- )
546
- for i, rec in enumerate(report.recommendations, 1):
547
- self.console.print(f" {i}. {rec}")
548
- else:
549
- self.console.print("[green]✨ No performance issues detected ![/ green]")
550
-
551
- def get_performance_trends(self, days: int = 7) -> dict[str, Any]:
552
- try:
553
- recent_history = self._get_recent_history(days)
554
- if not recent_history:
555
- return self._handle_insufficient_trend_data()
556
-
557
- trends = {}
558
- self._add_duration_trends(recent_history, trends)
559
- self._add_component_trends(recent_history, trends)
560
- trends["data_points"] = len(recent_history)
561
-
562
- return trends
563
-
564
- except Exception as e:
565
- return {"error": f"Could not analyze trends: {e}"}
566
-
567
- def _get_recent_history(self, days: int) -> list[dict[str, Any]] | None:
568
- if not self.history_file.exists():
569
- return None
570
-
571
- with self.history_file.open() as f:
572
- history = json.load(f)
573
-
574
- cutoff_time = time.time() - (days * 86400)
575
- recent_history = [r for r in history if r.get("timestamp", 0) > cutoff_time]
576
-
577
- return recent_history if len(recent_history) >= 2 else None
578
-
579
- def _handle_insufficient_trend_data(self) -> dict[str, str]:
580
- if not self.history_file.exists():
581
- return {"error": "No performance history available"}
582
- return {"error": "Insufficient data for trend analysis"}
583
-
584
- def _add_duration_trends(
585
- self, recent_history: list[dict[str, Any]], trends: dict[str, Any]
586
- ) -> None:
587
- durations = [r["total_duration"] for r in recent_history]
588
- trends["duration_trend"] = {
589
- "current": durations[-1],
590
- "average": statistics.mean(durations),
591
- "trend": self._determine_trend_direction(durations),
592
- }
257
+ return result
593
258
 
594
- def _add_component_trends(
595
- self, recent_history: list[dict[str, Any]], trends: dict[str, Any]
259
+ async def _simulate_io_operation(self, operation_id: str, duration: float) -> str:
260
+ await asyncio.sleep(duration)
261
+ return f"result_{operation_id}"
262
+
263
+ def export_benchmark_results(
264
+ self, suite: BenchmarkSuite, output_path: Path
596
265
  ) -> None:
597
- component_trends = {}
598
- latest_components = recent_history[-1].get("component_durations", {})
599
-
600
- for component in latest_components:
601
- component_durations = self._extract_component_durations(
602
- recent_history,
603
- component,
604
- )
605
- if len(component_durations) >= 2:
606
- component_trends[component] = {
607
- "current": component_durations[-1],
608
- "average": statistics.mean(component_durations),
609
- "trend": self._determine_trend_direction(component_durations),
266
+ data = {
267
+ "suite_name": suite.suite_name,
268
+ "run_timestamp": suite.run_timestamp.isoformat(),
269
+ "summary": {
270
+ "average_time_improvement_percentage": suite.average_time_improvement,
271
+ "average_memory_improvement_percentage": suite.average_memory_improvement,
272
+ "overall_cache_hit_ratio": suite.overall_cache_hit_ratio,
273
+ "total_tests": len(suite.results),
274
+ },
275
+ "results": [
276
+ {
277
+ "test_name": r.test_name,
278
+ "baseline_time_seconds": r.baseline_time_seconds,
279
+ "optimized_time_seconds": r.optimized_time_seconds,
280
+ "time_improvement_percentage": r.time_improvement_percentage,
281
+ "memory_baseline_mb": r.memory_baseline_mb,
282
+ "memory_optimized_mb": r.memory_optimized_mb,
283
+ "memory_improvement_percentage": r.memory_improvement_percentage,
284
+ "cache_hits": r.cache_hits,
285
+ "cache_misses": r.cache_misses,
286
+ "cache_hit_ratio": r.cache_hit_ratio,
287
+ "parallel_operations": r.parallel_operations,
288
+ "sequential_operations": r.sequential_operations,
289
+ "parallelization_ratio": r.parallelization_ratio,
610
290
  }
291
+ for r in suite.results
292
+ ],
293
+ }
294
+
295
+ with output_path.open("w") as f:
296
+ json.dump(data, f, indent=2)
297
+
298
+ self._logger.info(f"Exported benchmark results to {output_path}")
299
+
611
300
 
612
- trends["component_trends"] = component_trends
613
-
614
- def _extract_component_durations(
615
- self,
616
- recent_history: list[dict[str, Any]],
617
- component: str,
618
- ) -> list[float]:
619
- return [
620
- r.get("component_durations", {}).get(component)
621
- for r in recent_history
622
- if component in r.get("component_durations", {})
623
- ]
624
-
625
- def _determine_trend_direction(self, durations: list[float]) -> str:
626
- current = durations[-1]
627
- historical_average = statistics.mean(durations[:-1])
628
- return "improving" if current < historical_average else "degrading"
301
+ def get_benchmarker() -> PerformanceBenchmarker:
302
+ return PerformanceBenchmarker()