crackerjack 0.18.2__py3-none-any.whl → 0.45.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (533) hide show
  1. crackerjack/README.md +19 -0
  2. crackerjack/__init__.py +96 -2
  3. crackerjack/__main__.py +637 -138
  4. crackerjack/adapters/README.md +18 -0
  5. crackerjack/adapters/__init__.py +39 -0
  6. crackerjack/adapters/_output_paths.py +167 -0
  7. crackerjack/adapters/_qa_adapter_base.py +309 -0
  8. crackerjack/adapters/_tool_adapter_base.py +706 -0
  9. crackerjack/adapters/ai/README.md +65 -0
  10. crackerjack/adapters/ai/__init__.py +5 -0
  11. crackerjack/adapters/ai/claude.py +853 -0
  12. crackerjack/adapters/complexity/README.md +53 -0
  13. crackerjack/adapters/complexity/__init__.py +10 -0
  14. crackerjack/adapters/complexity/complexipy.py +641 -0
  15. crackerjack/adapters/dependency/__init__.py +22 -0
  16. crackerjack/adapters/dependency/pip_audit.py +418 -0
  17. crackerjack/adapters/format/README.md +72 -0
  18. crackerjack/adapters/format/__init__.py +11 -0
  19. crackerjack/adapters/format/mdformat.py +313 -0
  20. crackerjack/adapters/format/ruff.py +516 -0
  21. crackerjack/adapters/lint/README.md +47 -0
  22. crackerjack/adapters/lint/__init__.py +11 -0
  23. crackerjack/adapters/lint/codespell.py +273 -0
  24. crackerjack/adapters/lsp/README.md +49 -0
  25. crackerjack/adapters/lsp/__init__.py +27 -0
  26. crackerjack/adapters/lsp/_base.py +194 -0
  27. crackerjack/adapters/lsp/_client.py +358 -0
  28. crackerjack/adapters/lsp/_manager.py +193 -0
  29. crackerjack/adapters/lsp/skylos.py +283 -0
  30. crackerjack/adapters/lsp/zuban.py +557 -0
  31. crackerjack/adapters/refactor/README.md +59 -0
  32. crackerjack/adapters/refactor/__init__.py +12 -0
  33. crackerjack/adapters/refactor/creosote.py +318 -0
  34. crackerjack/adapters/refactor/refurb.py +406 -0
  35. crackerjack/adapters/refactor/skylos.py +494 -0
  36. crackerjack/adapters/sast/README.md +132 -0
  37. crackerjack/adapters/sast/__init__.py +32 -0
  38. crackerjack/adapters/sast/_base.py +201 -0
  39. crackerjack/adapters/sast/bandit.py +423 -0
  40. crackerjack/adapters/sast/pyscn.py +405 -0
  41. crackerjack/adapters/sast/semgrep.py +241 -0
  42. crackerjack/adapters/security/README.md +111 -0
  43. crackerjack/adapters/security/__init__.py +17 -0
  44. crackerjack/adapters/security/gitleaks.py +339 -0
  45. crackerjack/adapters/type/README.md +52 -0
  46. crackerjack/adapters/type/__init__.py +12 -0
  47. crackerjack/adapters/type/pyrefly.py +402 -0
  48. crackerjack/adapters/type/ty.py +402 -0
  49. crackerjack/adapters/type/zuban.py +522 -0
  50. crackerjack/adapters/utility/README.md +51 -0
  51. crackerjack/adapters/utility/__init__.py +10 -0
  52. crackerjack/adapters/utility/checks.py +884 -0
  53. crackerjack/agents/README.md +264 -0
  54. crackerjack/agents/__init__.py +66 -0
  55. crackerjack/agents/architect_agent.py +238 -0
  56. crackerjack/agents/base.py +167 -0
  57. crackerjack/agents/claude_code_bridge.py +641 -0
  58. crackerjack/agents/coordinator.py +600 -0
  59. crackerjack/agents/documentation_agent.py +520 -0
  60. crackerjack/agents/dry_agent.py +585 -0
  61. crackerjack/agents/enhanced_coordinator.py +279 -0
  62. crackerjack/agents/enhanced_proactive_agent.py +185 -0
  63. crackerjack/agents/error_middleware.py +53 -0
  64. crackerjack/agents/formatting_agent.py +230 -0
  65. crackerjack/agents/helpers/__init__.py +9 -0
  66. crackerjack/agents/helpers/performance/__init__.py +22 -0
  67. crackerjack/agents/helpers/performance/performance_ast_analyzer.py +357 -0
  68. crackerjack/agents/helpers/performance/performance_pattern_detector.py +909 -0
  69. crackerjack/agents/helpers/performance/performance_recommender.py +572 -0
  70. crackerjack/agents/helpers/refactoring/__init__.py +22 -0
  71. crackerjack/agents/helpers/refactoring/code_transformer.py +536 -0
  72. crackerjack/agents/helpers/refactoring/complexity_analyzer.py +344 -0
  73. crackerjack/agents/helpers/refactoring/dead_code_detector.py +437 -0
  74. crackerjack/agents/helpers/test_creation/__init__.py +19 -0
  75. crackerjack/agents/helpers/test_creation/test_ast_analyzer.py +216 -0
  76. crackerjack/agents/helpers/test_creation/test_coverage_analyzer.py +643 -0
  77. crackerjack/agents/helpers/test_creation/test_template_generator.py +1031 -0
  78. crackerjack/agents/import_optimization_agent.py +1181 -0
  79. crackerjack/agents/performance_agent.py +325 -0
  80. crackerjack/agents/performance_helpers.py +205 -0
  81. crackerjack/agents/proactive_agent.py +55 -0
  82. crackerjack/agents/refactoring_agent.py +511 -0
  83. crackerjack/agents/refactoring_helpers.py +247 -0
  84. crackerjack/agents/security_agent.py +793 -0
  85. crackerjack/agents/semantic_agent.py +479 -0
  86. crackerjack/agents/semantic_helpers.py +356 -0
  87. crackerjack/agents/test_creation_agent.py +570 -0
  88. crackerjack/agents/test_specialist_agent.py +526 -0
  89. crackerjack/agents/tracker.py +110 -0
  90. crackerjack/api.py +647 -0
  91. crackerjack/cli/README.md +394 -0
  92. crackerjack/cli/__init__.py +24 -0
  93. crackerjack/cli/cache_handlers.py +209 -0
  94. crackerjack/cli/cache_handlers_enhanced.py +680 -0
  95. crackerjack/cli/facade.py +162 -0
  96. crackerjack/cli/formatting.py +13 -0
  97. crackerjack/cli/handlers/__init__.py +85 -0
  98. crackerjack/cli/handlers/advanced.py +103 -0
  99. crackerjack/cli/handlers/ai_features.py +62 -0
  100. crackerjack/cli/handlers/analytics.py +479 -0
  101. crackerjack/cli/handlers/changelog.py +271 -0
  102. crackerjack/cli/handlers/config_handlers.py +16 -0
  103. crackerjack/cli/handlers/coverage.py +84 -0
  104. crackerjack/cli/handlers/documentation.py +280 -0
  105. crackerjack/cli/handlers/main_handlers.py +497 -0
  106. crackerjack/cli/handlers/monitoring.py +371 -0
  107. crackerjack/cli/handlers.py +700 -0
  108. crackerjack/cli/interactive.py +488 -0
  109. crackerjack/cli/options.py +1216 -0
  110. crackerjack/cli/semantic_handlers.py +292 -0
  111. crackerjack/cli/utils.py +19 -0
  112. crackerjack/cli/version.py +19 -0
  113. crackerjack/code_cleaner.py +1307 -0
  114. crackerjack/config/README.md +472 -0
  115. crackerjack/config/__init__.py +275 -0
  116. crackerjack/config/global_lock_config.py +207 -0
  117. crackerjack/config/hooks.py +390 -0
  118. crackerjack/config/loader.py +239 -0
  119. crackerjack/config/settings.py +141 -0
  120. crackerjack/config/tool_commands.py +331 -0
  121. crackerjack/core/README.md +393 -0
  122. crackerjack/core/__init__.py +0 -0
  123. crackerjack/core/async_workflow_orchestrator.py +738 -0
  124. crackerjack/core/autofix_coordinator.py +282 -0
  125. crackerjack/core/container.py +105 -0
  126. crackerjack/core/enhanced_container.py +583 -0
  127. crackerjack/core/file_lifecycle.py +472 -0
  128. crackerjack/core/performance.py +244 -0
  129. crackerjack/core/performance_monitor.py +357 -0
  130. crackerjack/core/phase_coordinator.py +1227 -0
  131. crackerjack/core/proactive_workflow.py +267 -0
  132. crackerjack/core/resource_manager.py +425 -0
  133. crackerjack/core/retry.py +275 -0
  134. crackerjack/core/service_watchdog.py +601 -0
  135. crackerjack/core/session_coordinator.py +239 -0
  136. crackerjack/core/timeout_manager.py +563 -0
  137. crackerjack/core/websocket_lifecycle.py +410 -0
  138. crackerjack/core/workflow/__init__.py +21 -0
  139. crackerjack/core/workflow/workflow_ai_coordinator.py +863 -0
  140. crackerjack/core/workflow/workflow_event_orchestrator.py +1107 -0
  141. crackerjack/core/workflow/workflow_issue_parser.py +714 -0
  142. crackerjack/core/workflow/workflow_phase_executor.py +1158 -0
  143. crackerjack/core/workflow/workflow_security_gates.py +400 -0
  144. crackerjack/core/workflow_orchestrator.py +2243 -0
  145. crackerjack/data/README.md +11 -0
  146. crackerjack/data/__init__.py +8 -0
  147. crackerjack/data/models.py +79 -0
  148. crackerjack/data/repository.py +210 -0
  149. crackerjack/decorators/README.md +180 -0
  150. crackerjack/decorators/__init__.py +35 -0
  151. crackerjack/decorators/error_handling.py +649 -0
  152. crackerjack/decorators/error_handling_decorators.py +334 -0
  153. crackerjack/decorators/helpers.py +58 -0
  154. crackerjack/decorators/patterns.py +281 -0
  155. crackerjack/decorators/utils.py +58 -0
  156. crackerjack/docs/INDEX.md +11 -0
  157. crackerjack/docs/README.md +11 -0
  158. crackerjack/docs/generated/api/API_REFERENCE.md +10895 -0
  159. crackerjack/docs/generated/api/CLI_REFERENCE.md +109 -0
  160. crackerjack/docs/generated/api/CROSS_REFERENCES.md +1755 -0
  161. crackerjack/docs/generated/api/PROTOCOLS.md +3 -0
  162. crackerjack/docs/generated/api/SERVICES.md +1252 -0
  163. crackerjack/documentation/README.md +11 -0
  164. crackerjack/documentation/__init__.py +31 -0
  165. crackerjack/documentation/ai_templates.py +756 -0
  166. crackerjack/documentation/dual_output_generator.py +767 -0
  167. crackerjack/documentation/mkdocs_integration.py +518 -0
  168. crackerjack/documentation/reference_generator.py +1065 -0
  169. crackerjack/dynamic_config.py +678 -0
  170. crackerjack/errors.py +378 -0
  171. crackerjack/events/README.md +11 -0
  172. crackerjack/events/__init__.py +16 -0
  173. crackerjack/events/telemetry.py +175 -0
  174. crackerjack/events/workflow_bus.py +346 -0
  175. crackerjack/exceptions/README.md +301 -0
  176. crackerjack/exceptions/__init__.py +5 -0
  177. crackerjack/exceptions/config.py +4 -0
  178. crackerjack/exceptions/tool_execution_error.py +245 -0
  179. crackerjack/executors/README.md +591 -0
  180. crackerjack/executors/__init__.py +13 -0
  181. crackerjack/executors/async_hook_executor.py +938 -0
  182. crackerjack/executors/cached_hook_executor.py +316 -0
  183. crackerjack/executors/hook_executor.py +1295 -0
  184. crackerjack/executors/hook_lock_manager.py +708 -0
  185. crackerjack/executors/individual_hook_executor.py +739 -0
  186. crackerjack/executors/lsp_aware_hook_executor.py +349 -0
  187. crackerjack/executors/progress_hook_executor.py +282 -0
  188. crackerjack/executors/tool_proxy.py +433 -0
  189. crackerjack/hooks/README.md +485 -0
  190. crackerjack/hooks/lsp_hook.py +93 -0
  191. crackerjack/intelligence/README.md +557 -0
  192. crackerjack/intelligence/__init__.py +37 -0
  193. crackerjack/intelligence/adaptive_learning.py +693 -0
  194. crackerjack/intelligence/agent_orchestrator.py +485 -0
  195. crackerjack/intelligence/agent_registry.py +377 -0
  196. crackerjack/intelligence/agent_selector.py +439 -0
  197. crackerjack/intelligence/integration.py +250 -0
  198. crackerjack/interactive.py +719 -0
  199. crackerjack/managers/README.md +369 -0
  200. crackerjack/managers/__init__.py +11 -0
  201. crackerjack/managers/async_hook_manager.py +135 -0
  202. crackerjack/managers/hook_manager.py +585 -0
  203. crackerjack/managers/publish_manager.py +631 -0
  204. crackerjack/managers/test_command_builder.py +391 -0
  205. crackerjack/managers/test_executor.py +474 -0
  206. crackerjack/managers/test_manager.py +1357 -0
  207. crackerjack/managers/test_progress.py +187 -0
  208. crackerjack/mcp/README.md +374 -0
  209. crackerjack/mcp/__init__.py +0 -0
  210. crackerjack/mcp/cache.py +352 -0
  211. crackerjack/mcp/client_runner.py +121 -0
  212. crackerjack/mcp/context.py +802 -0
  213. crackerjack/mcp/dashboard.py +657 -0
  214. crackerjack/mcp/enhanced_progress_monitor.py +493 -0
  215. crackerjack/mcp/file_monitor.py +394 -0
  216. crackerjack/mcp/progress_components.py +607 -0
  217. crackerjack/mcp/progress_monitor.py +1016 -0
  218. crackerjack/mcp/rate_limiter.py +336 -0
  219. crackerjack/mcp/server.py +24 -0
  220. crackerjack/mcp/server_core.py +526 -0
  221. crackerjack/mcp/service_watchdog.py +505 -0
  222. crackerjack/mcp/state.py +407 -0
  223. crackerjack/mcp/task_manager.py +259 -0
  224. crackerjack/mcp/tools/README.md +27 -0
  225. crackerjack/mcp/tools/__init__.py +19 -0
  226. crackerjack/mcp/tools/core_tools.py +469 -0
  227. crackerjack/mcp/tools/error_analyzer.py +283 -0
  228. crackerjack/mcp/tools/execution_tools.py +384 -0
  229. crackerjack/mcp/tools/intelligence_tool_registry.py +46 -0
  230. crackerjack/mcp/tools/intelligence_tools.py +264 -0
  231. crackerjack/mcp/tools/monitoring_tools.py +628 -0
  232. crackerjack/mcp/tools/proactive_tools.py +367 -0
  233. crackerjack/mcp/tools/progress_tools.py +222 -0
  234. crackerjack/mcp/tools/semantic_tools.py +584 -0
  235. crackerjack/mcp/tools/utility_tools.py +358 -0
  236. crackerjack/mcp/tools/workflow_executor.py +699 -0
  237. crackerjack/mcp/websocket/README.md +31 -0
  238. crackerjack/mcp/websocket/__init__.py +14 -0
  239. crackerjack/mcp/websocket/app.py +54 -0
  240. crackerjack/mcp/websocket/endpoints.py +492 -0
  241. crackerjack/mcp/websocket/event_bridge.py +188 -0
  242. crackerjack/mcp/websocket/jobs.py +406 -0
  243. crackerjack/mcp/websocket/monitoring/__init__.py +25 -0
  244. crackerjack/mcp/websocket/monitoring/api/__init__.py +19 -0
  245. crackerjack/mcp/websocket/monitoring/api/dependencies.py +141 -0
  246. crackerjack/mcp/websocket/monitoring/api/heatmap.py +154 -0
  247. crackerjack/mcp/websocket/monitoring/api/intelligence.py +199 -0
  248. crackerjack/mcp/websocket/monitoring/api/metrics.py +203 -0
  249. crackerjack/mcp/websocket/monitoring/api/telemetry.py +101 -0
  250. crackerjack/mcp/websocket/monitoring/dashboard.py +18 -0
  251. crackerjack/mcp/websocket/monitoring/factory.py +109 -0
  252. crackerjack/mcp/websocket/monitoring/filters.py +10 -0
  253. crackerjack/mcp/websocket/monitoring/metrics.py +64 -0
  254. crackerjack/mcp/websocket/monitoring/models.py +90 -0
  255. crackerjack/mcp/websocket/monitoring/utils.py +171 -0
  256. crackerjack/mcp/websocket/monitoring/websocket_manager.py +78 -0
  257. crackerjack/mcp/websocket/monitoring/websockets/__init__.py +17 -0
  258. crackerjack/mcp/websocket/monitoring/websockets/dependencies.py +126 -0
  259. crackerjack/mcp/websocket/monitoring/websockets/heatmap.py +176 -0
  260. crackerjack/mcp/websocket/monitoring/websockets/intelligence.py +291 -0
  261. crackerjack/mcp/websocket/monitoring/websockets/metrics.py +291 -0
  262. crackerjack/mcp/websocket/monitoring_endpoints.py +21 -0
  263. crackerjack/mcp/websocket/server.py +174 -0
  264. crackerjack/mcp/websocket/websocket_handler.py +276 -0
  265. crackerjack/mcp/websocket_server.py +10 -0
  266. crackerjack/models/README.md +308 -0
  267. crackerjack/models/__init__.py +40 -0
  268. crackerjack/models/config.py +730 -0
  269. crackerjack/models/config_adapter.py +265 -0
  270. crackerjack/models/protocols.py +1535 -0
  271. crackerjack/models/pydantic_models.py +320 -0
  272. crackerjack/models/qa_config.py +145 -0
  273. crackerjack/models/qa_results.py +134 -0
  274. crackerjack/models/resource_protocols.py +299 -0
  275. crackerjack/models/results.py +35 -0
  276. crackerjack/models/semantic_models.py +258 -0
  277. crackerjack/models/task.py +173 -0
  278. crackerjack/models/test_models.py +60 -0
  279. crackerjack/monitoring/README.md +11 -0
  280. crackerjack/monitoring/__init__.py +0 -0
  281. crackerjack/monitoring/ai_agent_watchdog.py +405 -0
  282. crackerjack/monitoring/metrics_collector.py +427 -0
  283. crackerjack/monitoring/regression_prevention.py +580 -0
  284. crackerjack/monitoring/websocket_server.py +406 -0
  285. crackerjack/orchestration/README.md +340 -0
  286. crackerjack/orchestration/__init__.py +43 -0
  287. crackerjack/orchestration/advanced_orchestrator.py +894 -0
  288. crackerjack/orchestration/cache/README.md +312 -0
  289. crackerjack/orchestration/cache/__init__.py +37 -0
  290. crackerjack/orchestration/cache/memory_cache.py +338 -0
  291. crackerjack/orchestration/cache/tool_proxy_cache.py +340 -0
  292. crackerjack/orchestration/config.py +297 -0
  293. crackerjack/orchestration/coverage_improvement.py +180 -0
  294. crackerjack/orchestration/execution_strategies.py +361 -0
  295. crackerjack/orchestration/hook_orchestrator.py +1398 -0
  296. crackerjack/orchestration/strategies/README.md +401 -0
  297. crackerjack/orchestration/strategies/__init__.py +39 -0
  298. crackerjack/orchestration/strategies/adaptive_strategy.py +630 -0
  299. crackerjack/orchestration/strategies/parallel_strategy.py +237 -0
  300. crackerjack/orchestration/strategies/sequential_strategy.py +299 -0
  301. crackerjack/orchestration/test_progress_streamer.py +647 -0
  302. crackerjack/plugins/README.md +11 -0
  303. crackerjack/plugins/__init__.py +15 -0
  304. crackerjack/plugins/base.py +200 -0
  305. crackerjack/plugins/hooks.py +254 -0
  306. crackerjack/plugins/loader.py +335 -0
  307. crackerjack/plugins/managers.py +264 -0
  308. crackerjack/py313.py +191 -0
  309. crackerjack/security/README.md +11 -0
  310. crackerjack/security/__init__.py +0 -0
  311. crackerjack/security/audit.py +197 -0
  312. crackerjack/services/README.md +374 -0
  313. crackerjack/services/__init__.py +9 -0
  314. crackerjack/services/ai/README.md +295 -0
  315. crackerjack/services/ai/__init__.py +7 -0
  316. crackerjack/services/ai/advanced_optimizer.py +878 -0
  317. crackerjack/services/ai/contextual_ai_assistant.py +542 -0
  318. crackerjack/services/ai/embeddings.py +444 -0
  319. crackerjack/services/ai/intelligent_commit.py +328 -0
  320. crackerjack/services/ai/predictive_analytics.py +510 -0
  321. crackerjack/services/anomaly_detector.py +392 -0
  322. crackerjack/services/api_extractor.py +617 -0
  323. crackerjack/services/backup_service.py +467 -0
  324. crackerjack/services/bounded_status_operations.py +530 -0
  325. crackerjack/services/cache.py +369 -0
  326. crackerjack/services/changelog_automation.py +399 -0
  327. crackerjack/services/command_execution_service.py +305 -0
  328. crackerjack/services/config_integrity.py +132 -0
  329. crackerjack/services/config_merge.py +546 -0
  330. crackerjack/services/config_service.py +198 -0
  331. crackerjack/services/config_template.py +493 -0
  332. crackerjack/services/coverage_badge_service.py +173 -0
  333. crackerjack/services/coverage_ratchet.py +381 -0
  334. crackerjack/services/debug.py +733 -0
  335. crackerjack/services/dependency_analyzer.py +460 -0
  336. crackerjack/services/dependency_monitor.py +622 -0
  337. crackerjack/services/documentation_generator.py +493 -0
  338. crackerjack/services/documentation_service.py +704 -0
  339. crackerjack/services/enhanced_filesystem.py +497 -0
  340. crackerjack/services/enterprise_optimizer.py +865 -0
  341. crackerjack/services/error_pattern_analyzer.py +676 -0
  342. crackerjack/services/file_filter.py +221 -0
  343. crackerjack/services/file_hasher.py +149 -0
  344. crackerjack/services/file_io_service.py +361 -0
  345. crackerjack/services/file_modifier.py +615 -0
  346. crackerjack/services/filesystem.py +381 -0
  347. crackerjack/services/git.py +422 -0
  348. crackerjack/services/health_metrics.py +615 -0
  349. crackerjack/services/heatmap_generator.py +744 -0
  350. crackerjack/services/incremental_executor.py +380 -0
  351. crackerjack/services/initialization.py +823 -0
  352. crackerjack/services/input_validator.py +668 -0
  353. crackerjack/services/intelligent_commit.py +327 -0
  354. crackerjack/services/log_manager.py +289 -0
  355. crackerjack/services/logging.py +228 -0
  356. crackerjack/services/lsp_client.py +628 -0
  357. crackerjack/services/memory_optimizer.py +414 -0
  358. crackerjack/services/metrics.py +587 -0
  359. crackerjack/services/monitoring/README.md +30 -0
  360. crackerjack/services/monitoring/__init__.py +9 -0
  361. crackerjack/services/monitoring/dependency_monitor.py +678 -0
  362. crackerjack/services/monitoring/error_pattern_analyzer.py +676 -0
  363. crackerjack/services/monitoring/health_metrics.py +716 -0
  364. crackerjack/services/monitoring/metrics.py +587 -0
  365. crackerjack/services/monitoring/performance_benchmarks.py +410 -0
  366. crackerjack/services/monitoring/performance_cache.py +388 -0
  367. crackerjack/services/monitoring/performance_monitor.py +569 -0
  368. crackerjack/services/parallel_executor.py +527 -0
  369. crackerjack/services/pattern_cache.py +333 -0
  370. crackerjack/services/pattern_detector.py +478 -0
  371. crackerjack/services/patterns/__init__.py +142 -0
  372. crackerjack/services/patterns/agents.py +107 -0
  373. crackerjack/services/patterns/code/__init__.py +15 -0
  374. crackerjack/services/patterns/code/detection.py +118 -0
  375. crackerjack/services/patterns/code/imports.py +107 -0
  376. crackerjack/services/patterns/code/paths.py +159 -0
  377. crackerjack/services/patterns/code/performance.py +119 -0
  378. crackerjack/services/patterns/code/replacement.py +36 -0
  379. crackerjack/services/patterns/core.py +212 -0
  380. crackerjack/services/patterns/documentation/__init__.py +14 -0
  381. crackerjack/services/patterns/documentation/badges_markdown.py +96 -0
  382. crackerjack/services/patterns/documentation/comments_blocks.py +83 -0
  383. crackerjack/services/patterns/documentation/docstrings.py +89 -0
  384. crackerjack/services/patterns/formatting.py +226 -0
  385. crackerjack/services/patterns/operations.py +339 -0
  386. crackerjack/services/patterns/security/__init__.py +23 -0
  387. crackerjack/services/patterns/security/code_injection.py +122 -0
  388. crackerjack/services/patterns/security/credentials.py +190 -0
  389. crackerjack/services/patterns/security/path_traversal.py +221 -0
  390. crackerjack/services/patterns/security/unsafe_operations.py +216 -0
  391. crackerjack/services/patterns/templates.py +62 -0
  392. crackerjack/services/patterns/testing/__init__.py +18 -0
  393. crackerjack/services/patterns/testing/error_patterns.py +107 -0
  394. crackerjack/services/patterns/testing/pytest_output.py +126 -0
  395. crackerjack/services/patterns/tool_output/__init__.py +16 -0
  396. crackerjack/services/patterns/tool_output/bandit.py +72 -0
  397. crackerjack/services/patterns/tool_output/other.py +97 -0
  398. crackerjack/services/patterns/tool_output/pyright.py +67 -0
  399. crackerjack/services/patterns/tool_output/ruff.py +44 -0
  400. crackerjack/services/patterns/url_sanitization.py +114 -0
  401. crackerjack/services/patterns/utilities.py +42 -0
  402. crackerjack/services/patterns/utils.py +339 -0
  403. crackerjack/services/patterns/validation.py +46 -0
  404. crackerjack/services/patterns/versioning.py +62 -0
  405. crackerjack/services/predictive_analytics.py +523 -0
  406. crackerjack/services/profiler.py +280 -0
  407. crackerjack/services/quality/README.md +415 -0
  408. crackerjack/services/quality/__init__.py +11 -0
  409. crackerjack/services/quality/anomaly_detector.py +392 -0
  410. crackerjack/services/quality/pattern_cache.py +333 -0
  411. crackerjack/services/quality/pattern_detector.py +479 -0
  412. crackerjack/services/quality/qa_orchestrator.py +491 -0
  413. crackerjack/services/quality/quality_baseline.py +395 -0
  414. crackerjack/services/quality/quality_baseline_enhanced.py +649 -0
  415. crackerjack/services/quality/quality_intelligence.py +949 -0
  416. crackerjack/services/regex_patterns.py +58 -0
  417. crackerjack/services/regex_utils.py +483 -0
  418. crackerjack/services/secure_path_utils.py +524 -0
  419. crackerjack/services/secure_status_formatter.py +450 -0
  420. crackerjack/services/secure_subprocess.py +635 -0
  421. crackerjack/services/security.py +239 -0
  422. crackerjack/services/security_logger.py +495 -0
  423. crackerjack/services/server_manager.py +411 -0
  424. crackerjack/services/smart_scheduling.py +167 -0
  425. crackerjack/services/status_authentication.py +460 -0
  426. crackerjack/services/status_security_manager.py +315 -0
  427. crackerjack/services/terminal_utils.py +0 -0
  428. crackerjack/services/thread_safe_status_collector.py +441 -0
  429. crackerjack/services/tool_filter.py +368 -0
  430. crackerjack/services/tool_version_service.py +43 -0
  431. crackerjack/services/unified_config.py +115 -0
  432. crackerjack/services/validation_rate_limiter.py +220 -0
  433. crackerjack/services/vector_store.py +689 -0
  434. crackerjack/services/version_analyzer.py +461 -0
  435. crackerjack/services/version_checker.py +223 -0
  436. crackerjack/services/websocket_resource_limiter.py +438 -0
  437. crackerjack/services/zuban_lsp_service.py +391 -0
  438. crackerjack/slash_commands/README.md +11 -0
  439. crackerjack/slash_commands/__init__.py +59 -0
  440. crackerjack/slash_commands/init.md +112 -0
  441. crackerjack/slash_commands/run.md +197 -0
  442. crackerjack/slash_commands/status.md +127 -0
  443. crackerjack/tools/README.md +11 -0
  444. crackerjack/tools/__init__.py +30 -0
  445. crackerjack/tools/_git_utils.py +105 -0
  446. crackerjack/tools/check_added_large_files.py +139 -0
  447. crackerjack/tools/check_ast.py +105 -0
  448. crackerjack/tools/check_json.py +103 -0
  449. crackerjack/tools/check_jsonschema.py +297 -0
  450. crackerjack/tools/check_toml.py +103 -0
  451. crackerjack/tools/check_yaml.py +110 -0
  452. crackerjack/tools/codespell_wrapper.py +72 -0
  453. crackerjack/tools/end_of_file_fixer.py +202 -0
  454. crackerjack/tools/format_json.py +128 -0
  455. crackerjack/tools/mdformat_wrapper.py +114 -0
  456. crackerjack/tools/trailing_whitespace.py +198 -0
  457. crackerjack/tools/validate_input_validator_patterns.py +236 -0
  458. crackerjack/tools/validate_regex_patterns.py +188 -0
  459. crackerjack/ui/README.md +11 -0
  460. crackerjack/ui/__init__.py +1 -0
  461. crackerjack/ui/dashboard_renderer.py +28 -0
  462. crackerjack/ui/templates/README.md +11 -0
  463. crackerjack/utils/console_utils.py +13 -0
  464. crackerjack/utils/dependency_guard.py +230 -0
  465. crackerjack/utils/retry_utils.py +275 -0
  466. crackerjack/workflows/README.md +590 -0
  467. crackerjack/workflows/__init__.py +46 -0
  468. crackerjack/workflows/actions.py +811 -0
  469. crackerjack/workflows/auto_fix.py +444 -0
  470. crackerjack/workflows/container_builder.py +499 -0
  471. crackerjack/workflows/definitions.py +443 -0
  472. crackerjack/workflows/engine.py +177 -0
  473. crackerjack/workflows/event_bridge.py +242 -0
  474. crackerjack-0.45.2.dist-info/METADATA +1678 -0
  475. crackerjack-0.45.2.dist-info/RECORD +478 -0
  476. {crackerjack-0.18.2.dist-info → crackerjack-0.45.2.dist-info}/WHEEL +1 -1
  477. crackerjack-0.45.2.dist-info/entry_points.txt +2 -0
  478. crackerjack/.gitignore +0 -14
  479. crackerjack/.libcst.codemod.yaml +0 -18
  480. crackerjack/.pdm.toml +0 -1
  481. crackerjack/.pre-commit-config.yaml +0 -91
  482. crackerjack/.pytest_cache/.gitignore +0 -2
  483. crackerjack/.pytest_cache/CACHEDIR.TAG +0 -4
  484. crackerjack/.pytest_cache/README.md +0 -8
  485. crackerjack/.pytest_cache/v/cache/nodeids +0 -1
  486. crackerjack/.pytest_cache/v/cache/stepwise +0 -1
  487. crackerjack/.ruff_cache/.gitignore +0 -1
  488. crackerjack/.ruff_cache/0.1.11/3256171999636029978 +0 -0
  489. crackerjack/.ruff_cache/0.1.14/602324811142551221 +0 -0
  490. crackerjack/.ruff_cache/0.1.4/10355199064880463147 +0 -0
  491. crackerjack/.ruff_cache/0.1.6/15140459877605758699 +0 -0
  492. crackerjack/.ruff_cache/0.1.7/1790508110482614856 +0 -0
  493. crackerjack/.ruff_cache/0.1.9/17041001205004563469 +0 -0
  494. crackerjack/.ruff_cache/0.11.2/4070660268492669020 +0 -0
  495. crackerjack/.ruff_cache/0.11.3/9818742842212983150 +0 -0
  496. crackerjack/.ruff_cache/0.11.4/9818742842212983150 +0 -0
  497. crackerjack/.ruff_cache/0.11.6/3557596832929915217 +0 -0
  498. crackerjack/.ruff_cache/0.11.7/10386934055395314831 +0 -0
  499. crackerjack/.ruff_cache/0.11.7/3557596832929915217 +0 -0
  500. crackerjack/.ruff_cache/0.11.8/530407680854991027 +0 -0
  501. crackerjack/.ruff_cache/0.2.0/10047773857155985907 +0 -0
  502. crackerjack/.ruff_cache/0.2.1/8522267973936635051 +0 -0
  503. crackerjack/.ruff_cache/0.2.2/18053836298936336950 +0 -0
  504. crackerjack/.ruff_cache/0.3.0/12548816621480535786 +0 -0
  505. crackerjack/.ruff_cache/0.3.3/11081883392474770722 +0 -0
  506. crackerjack/.ruff_cache/0.3.4/676973378459347183 +0 -0
  507. crackerjack/.ruff_cache/0.3.5/16311176246009842383 +0 -0
  508. crackerjack/.ruff_cache/0.5.7/1493622539551733492 +0 -0
  509. crackerjack/.ruff_cache/0.5.7/6231957614044513175 +0 -0
  510. crackerjack/.ruff_cache/0.5.7/9932762556785938009 +0 -0
  511. crackerjack/.ruff_cache/0.6.0/11982804814124138945 +0 -0
  512. crackerjack/.ruff_cache/0.6.0/12055761203849489982 +0 -0
  513. crackerjack/.ruff_cache/0.6.2/1206147804896221174 +0 -0
  514. crackerjack/.ruff_cache/0.6.4/1206147804896221174 +0 -0
  515. crackerjack/.ruff_cache/0.6.5/1206147804896221174 +0 -0
  516. crackerjack/.ruff_cache/0.6.7/3657366982708166874 +0 -0
  517. crackerjack/.ruff_cache/0.6.9/285614542852677309 +0 -0
  518. crackerjack/.ruff_cache/0.7.1/1024065805990144819 +0 -0
  519. crackerjack/.ruff_cache/0.7.1/285614542852677309 +0 -0
  520. crackerjack/.ruff_cache/0.7.3/16061516852537040135 +0 -0
  521. crackerjack/.ruff_cache/0.8.4/16354268377385700367 +0 -0
  522. crackerjack/.ruff_cache/0.9.10/12813592349865671909 +0 -0
  523. crackerjack/.ruff_cache/0.9.10/923908772239632759 +0 -0
  524. crackerjack/.ruff_cache/0.9.3/13948373885254993391 +0 -0
  525. crackerjack/.ruff_cache/0.9.9/12813592349865671909 +0 -0
  526. crackerjack/.ruff_cache/0.9.9/8843823720003377982 +0 -0
  527. crackerjack/.ruff_cache/CACHEDIR.TAG +0 -1
  528. crackerjack/crackerjack.py +0 -855
  529. crackerjack/pyproject.toml +0 -214
  530. crackerjack-0.18.2.dist-info/METADATA +0 -420
  531. crackerjack-0.18.2.dist-info/RECORD +0 -59
  532. crackerjack-0.18.2.dist-info/entry_points.txt +0 -4
  533. {crackerjack-0.18.2.dist-info → crackerjack-0.45.2.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,949 @@
1
+ """Advanced ML-based quality intelligence with anomaly detection and predictive analytics."""
2
+
3
+ import json
4
+ import typing as t
5
+ from dataclasses import dataclass, field
6
+ from datetime import datetime
7
+ from enum import Enum
8
+ from pathlib import Path
9
+
10
+ import numpy as np
11
+ from scipy import stats
12
+
13
+ from crackerjack.models.protocols import QualityIntelligenceProtocol
14
+
15
+ from .quality_baseline_enhanced import (
16
+ AlertSeverity,
17
+ EnhancedQualityBaselineService,
18
+ TrendDirection,
19
+ )
20
+
21
+
22
+ class AnomalyType(str, Enum):
23
+ """Types of anomalies that can be detected."""
24
+
25
+ SPIKE = "spike" # Sudden increase in metrics
26
+ DROP = "drop" # Sudden decrease in metrics
27
+ DRIFT = "drift" # Gradual change over time
28
+ OSCILLATION = "oscillation" # Unusual fluctuation patterns
29
+ OUTLIER = "outlier" # Statistical outlier
30
+
31
+
32
+ class PatternType(str, Enum):
33
+ """Types of patterns that can be identified."""
34
+
35
+ CYCLIC = "cyclic" # Regular recurring patterns
36
+ SEASONAL = "seasonal" # Time-based patterns
37
+ CORRELATION = "correlation" # Metric correlation patterns
38
+ REGRESSION = "regression" # Quality regression patterns
39
+ IMPROVEMENT = "improvement" # Quality improvement patterns
40
+
41
+
42
+ @dataclass
43
+ class QualityAnomaly:
44
+ """Detected quality anomaly with ML confidence."""
45
+
46
+ anomaly_type: AnomalyType
47
+ metric_name: str
48
+ detected_at: datetime
49
+ confidence: float # 0.0 to 1.0
50
+ severity: AlertSeverity
51
+ description: str
52
+ actual_value: float
53
+ expected_value: float
54
+ deviation_sigma: float # Standard deviations from normal
55
+ context: dict[str, t.Any] = field(default_factory=dict[str, t.Any])
56
+
57
+ def to_dict(self) -> dict[str, t.Any]:
58
+ data = {
59
+ "anomaly_type": self.anomaly_type,
60
+ "metric_name": self.metric_name,
61
+ "detected_at": self.detected_at.isoformat(),
62
+ "confidence": self.confidence,
63
+ "severity": self.severity,
64
+ "description": self.description,
65
+ "actual_value": self.actual_value,
66
+ "expected_value": self.expected_value,
67
+ "deviation_sigma": self.deviation_sigma,
68
+ "context": self.context,
69
+ }
70
+ return data
71
+
72
+
73
+ @dataclass
74
+ class QualityPattern:
75
+ """Identified quality pattern with statistical analysis."""
76
+
77
+ pattern_type: PatternType
78
+ metric_names: list[str]
79
+ detected_at: datetime
80
+ confidence: float
81
+ description: str
82
+ period_days: int
83
+ correlation_strength: float # For correlation patterns
84
+ trend_direction: TrendDirection
85
+ statistical_significance: float # p-value
86
+ context: dict[str, t.Any] = field(default_factory=dict[str, t.Any])
87
+
88
+ def to_dict(self) -> dict[str, t.Any]:
89
+ return {
90
+ "pattern_type": self.pattern_type,
91
+ "metric_names": self.metric_names,
92
+ "detected_at": self.detected_at.isoformat(),
93
+ "confidence": self.confidence,
94
+ "description": self.description,
95
+ "period_days": self.period_days,
96
+ "correlation_strength": self.correlation_strength,
97
+ "trend_direction": self.trend_direction,
98
+ "statistical_significance": self.statistical_significance,
99
+ "context": self.context,
100
+ }
101
+
102
+
103
+ @dataclass
104
+ class QualityPrediction:
105
+ """Advanced quality prediction with confidence intervals."""
106
+
107
+ metric_name: str
108
+ predicted_value: float
109
+ confidence_lower: float
110
+ confidence_upper: float
111
+ confidence_level: float # e.g., 0.95 for 95% confidence
112
+ prediction_horizon_days: int
113
+ prediction_method: str
114
+ created_at: datetime
115
+ factors: list[str] = field(default_factory=list)
116
+ risk_assessment: str = "low" # low, medium, high
117
+
118
+ def to_dict(self) -> dict[str, t.Any]:
119
+ return {
120
+ "metric_name": self.metric_name,
121
+ "predicted_value": self.predicted_value,
122
+ "confidence_lower": self.confidence_lower,
123
+ "confidence_upper": self.confidence_upper,
124
+ "confidence_level": self.confidence_level,
125
+ "prediction_horizon_days": self.prediction_horizon_days,
126
+ "prediction_method": self.prediction_method,
127
+ "created_at": self.created_at.isoformat(),
128
+ "factors": self.factors,
129
+ "risk_assessment": self.risk_assessment,
130
+ }
131
+
132
+
133
+ @dataclass
134
+ class QualityInsights:
135
+ """Comprehensive quality insights with ML analysis."""
136
+
137
+ anomalies: list[QualityAnomaly]
138
+ patterns: list[QualityPattern]
139
+ predictions: list[QualityPrediction]
140
+ recommendations: list[str]
141
+ overall_health_score: float # 0.0 to 1.0
142
+ risk_level: str # low, medium, high, critical
143
+ generated_at: datetime = field(default_factory=datetime.now)
144
+
145
+ def to_dict(self) -> dict[str, t.Any]:
146
+ return {
147
+ "anomalies": [a.to_dict() for a in self.anomalies],
148
+ "patterns": [p.to_dict() for p in self.patterns],
149
+ "predictions": [p.to_dict() for p in self.predictions],
150
+ "recommendations": self.recommendations,
151
+ "overall_health_score": self.overall_health_score,
152
+ "risk_level": self.risk_level,
153
+ "generated_at": self.generated_at.isoformat(),
154
+ }
155
+
156
+
157
+ class QualityIntelligenceService(QualityIntelligenceProtocol):
158
+ """Advanced ML-based quality intelligence service."""
159
+
160
+ def __init__(
161
+ self,
162
+ quality_service: EnhancedQualityBaselineService,
163
+ anomaly_sensitivity: float = 2.0, # Standard deviations for anomaly detection
164
+ min_data_points: int = 10,
165
+ ) -> None:
166
+ self.quality_service = quality_service
167
+ self.anomaly_sensitivity = anomaly_sensitivity
168
+ self.min_data_points = min_data_points
169
+
170
+ def detect_anomalies(
171
+ self, days: int = 30, metrics: list[str] | None = None
172
+ ) -> list[QualityAnomaly]:
173
+ """Detect anomalies in quality metrics using statistical analysis (sync version)."""
174
+ metrics = self._get_default_metrics() if metrics is None else metrics
175
+
176
+ baselines = self.quality_service.get_recent_baselines(limit=days * 2)
177
+ if len(baselines) < self.min_data_points:
178
+ return []
179
+
180
+ anomalies = []
181
+ for metric_name in metrics:
182
+ metric_anomalies = self._detect_metric_anomalies(metric_name, baselines)
183
+ anomalies.extend(metric_anomalies)
184
+
185
+ return anomalies
186
+
187
+ async def detect_anomalies_async(
188
+ self, days: int = 30, metrics: list[str] | None = None
189
+ ) -> list[QualityAnomaly]:
190
+ """Detect anomalies in quality metrics using statistical analysis (async version)."""
191
+ metrics = self._get_default_metrics() if metrics is None else metrics
192
+
193
+ baselines = await self.quality_service.aget_recent_baselines(limit=days * 2)
194
+ if len(baselines) < self.min_data_points:
195
+ return []
196
+
197
+ anomalies = []
198
+ for metric_name in metrics:
199
+ metric_anomalies = self._detect_metric_anomalies(metric_name, baselines)
200
+ anomalies.extend(metric_anomalies)
201
+
202
+ return anomalies
203
+
204
+ def _get_default_metrics(self) -> list[str]:
205
+ """Get default metrics list[t.Any] for anomaly detection."""
206
+ return [
207
+ "quality_score",
208
+ "coverage_percent",
209
+ "hook_failures",
210
+ "security_issues",
211
+ "type_errors",
212
+ "linting_issues",
213
+ ]
214
+
215
+ def _detect_metric_anomalies(
216
+ self, metric_name: str, baselines: list[t.Any]
217
+ ) -> list[QualityAnomaly]:
218
+ """Detect anomalies for a specific metric."""
219
+ values, timestamps = self._extract_metric_values(metric_name, baselines)
220
+
221
+ if len(values) < self.min_data_points:
222
+ return []
223
+
224
+ stats_data = self._calculate_statistical_metrics(values)
225
+ if stats_data is None: # No variation
226
+ return []
227
+
228
+ return self._identify_outlier_anomalies(
229
+ metric_name, values, timestamps, stats_data
230
+ )
231
+
232
+ def _extract_metric_values(
233
+ self, metric_name: str, baselines: list[t.Any]
234
+ ) -> tuple[list[float], list[t.Any]]:
235
+ """Extract metric values and timestamps from baselines."""
236
+ values = []
237
+ timestamps = []
238
+
239
+ for baseline in baselines:
240
+ metric_value = self._get_baseline_metric_value(baseline, metric_name)
241
+ if metric_value is not None:
242
+ values.append(metric_value)
243
+ timestamps.append(baseline.timestamp)
244
+
245
+ return values, timestamps
246
+
247
+ def _get_baseline_metric_value(
248
+ self, baseline: t.Any, metric_name: str
249
+ ) -> float | None:
250
+ """Get metric value from baseline object."""
251
+ metric_mapping = {
252
+ "quality_score": baseline.quality_score,
253
+ "coverage_percent": baseline.coverage_percent,
254
+ "hook_failures": baseline.hook_failures,
255
+ "security_issues": baseline.security_issues,
256
+ "type_errors": baseline.type_errors,
257
+ "linting_issues": baseline.linting_issues,
258
+ }
259
+ return metric_mapping.get(metric_name)
260
+
261
+ def _calculate_statistical_metrics(
262
+ self, values: list[float]
263
+ ) -> dict[str, float] | None:
264
+ """Calculate statistical metrics for anomaly detection."""
265
+ values_array = np.array(values)
266
+ mean_val = np.mean(values_array)
267
+ std_val = np.std(values_array)
268
+
269
+ if std_val == 0:
270
+ return None # No variation to detect anomalies
271
+
272
+ z_scores = np.abs((values_array - mean_val) / std_val)
273
+
274
+ return {
275
+ "mean": mean_val,
276
+ "std": std_val,
277
+ "z_scores": z_scores,
278
+ "values_array": values_array,
279
+ }
280
+
281
+ def _identify_outlier_anomalies(
282
+ self,
283
+ metric_name: str,
284
+ values: list[float],
285
+ timestamps: list[t.Any],
286
+ stats_data: dict[str, t.Any],
287
+ ) -> list[QualityAnomaly]:
288
+ """Identify outlier anomalies based on z-scores."""
289
+ anomalies = []
290
+ z_scores = stats_data["z_scores"]
291
+ mean_val = stats_data["mean"]
292
+ std_val = stats_data["std"]
293
+
294
+ for i, (value, timestamp, z_score) in enumerate(
295
+ zip(values, timestamps, z_scores)
296
+ ):
297
+ if z_score > self.anomaly_sensitivity:
298
+ anomaly = self._create_anomaly_object(
299
+ metric_name,
300
+ value,
301
+ timestamp,
302
+ z_score,
303
+ mean_val,
304
+ std_val,
305
+ i,
306
+ len(values),
307
+ )
308
+ anomalies.append(anomaly)
309
+
310
+ return anomalies
311
+
312
+ def _create_anomaly_object(
313
+ self,
314
+ metric_name: str,
315
+ value: float,
316
+ timestamp: t.Any,
317
+ z_score: float,
318
+ mean_val: float,
319
+ std_val: float,
320
+ position: int,
321
+ data_points: int,
322
+ ) -> QualityAnomaly:
323
+ """Create QualityAnomaly object from detected outlier."""
324
+ anomaly_type, severity = self._determine_anomaly_type_and_severity(
325
+ value, mean_val, z_score
326
+ )
327
+ confidence = min(1.0, z_score / 4.0) # Scale to 0-1
328
+
329
+ return QualityAnomaly(
330
+ anomaly_type=anomaly_type,
331
+ metric_name=metric_name,
332
+ detected_at=timestamp,
333
+ confidence=confidence,
334
+ severity=severity,
335
+ description=f"{metric_name} {anomaly_type} detected: {value:.2f} (expected ~{mean_val:.2f})",
336
+ actual_value=value,
337
+ expected_value=mean_val,
338
+ deviation_sigma=z_score,
339
+ context={
340
+ "metric_mean": mean_val,
341
+ "metric_std": std_val,
342
+ "data_points": data_points,
343
+ "position_in_series": position,
344
+ },
345
+ )
346
+
347
+ def _determine_anomaly_type_and_severity(
348
+ self, value: float, mean_val: float, z_score: float
349
+ ) -> tuple[AnomalyType, AlertSeverity]:
350
+ """Determine anomaly type and severity based on value and z-score."""
351
+ if value > mean_val:
352
+ anomaly_type = AnomalyType.SPIKE
353
+ else:
354
+ anomaly_type = AnomalyType.DROP
355
+
356
+ severity = AlertSeverity.CRITICAL if z_score > 3.0 else AlertSeverity.WARNING
357
+
358
+ return anomaly_type, severity
359
+
360
+ def identify_patterns(self, days: int = 60) -> list[QualityPattern]:
361
+ """Identify patterns in quality metrics using correlation and trend analysis (sync version)."""
362
+ baselines = self.quality_service.get_recent_baselines(limit=days * 2)
363
+ if len(baselines) < self.min_data_points:
364
+ return []
365
+
366
+ metrics_data = self._extract_metrics_data(baselines)
367
+ return self._find_correlation_patterns(metrics_data, days)
368
+
369
+ async def identify_patterns_async(self, days: int = 60) -> list[QualityPattern]:
370
+ """Identify patterns in quality metrics using correlation and trend analysis (async version)."""
371
+ baselines = await self.quality_service.aget_recent_baselines(limit=days * 2)
372
+ if len(baselines) < self.min_data_points:
373
+ return []
374
+
375
+ metrics_data = self._extract_metrics_data(baselines)
376
+ return self._find_correlation_patterns(metrics_data, days)
377
+
378
+ def _extract_metrics_data(self, baselines: list[t.Any]) -> dict[str, list[float]]:
379
+ """Extract metric data from baselines for correlation analysis."""
380
+ metrics_data = {
381
+ "quality_score": [],
382
+ "coverage_percent": [],
383
+ "hook_failures": [],
384
+ "security_issues": [],
385
+ "type_errors": [],
386
+ "linting_issues": [],
387
+ }
388
+
389
+ for baseline in baselines:
390
+ metrics_data["quality_score"].append(baseline.quality_score)
391
+ metrics_data["coverage_percent"].append(baseline.coverage_percent)
392
+ metrics_data["hook_failures"].append(baseline.hook_failures)
393
+ metrics_data["security_issues"].append(baseline.security_issues)
394
+ metrics_data["type_errors"].append(baseline.type_errors)
395
+ metrics_data["linting_issues"].append(baseline.linting_issues)
396
+
397
+ return metrics_data
398
+
399
+ def _find_correlation_patterns(
400
+ self, metrics_data: dict[str, list[float]], days: int
401
+ ) -> list[QualityPattern]:
402
+ """Find correlation patterns between metrics."""
403
+ patterns = []
404
+ metric_names = list[t.Any](metrics_data.keys())
405
+
406
+ for i, metric1 in enumerate(metric_names):
407
+ for metric2 in metric_names[i + 1 :]:
408
+ pattern = self._analyze_metric_correlation(
409
+ metric1, metric2, metrics_data, days
410
+ )
411
+ if pattern:
412
+ patterns.append(pattern)
413
+
414
+ return patterns
415
+
416
+ def _analyze_metric_correlation(
417
+ self,
418
+ metric1: str,
419
+ metric2: str,
420
+ metrics_data: dict[str, list[float]],
421
+ days: int,
422
+ ) -> QualityPattern | None:
423
+ """Analyze correlation between two metrics."""
424
+ values1 = np.array(metrics_data[metric1])
425
+ values2 = np.array(metrics_data[metric2])
426
+
427
+ if len(values1) < self.min_data_points:
428
+ return None
429
+
430
+ # Handle constant input arrays that would cause correlation warnings
431
+ try:
432
+ # Check for constant arrays (all values the same)
433
+ if 0 in (np.var(values1), np.var(values2)):
434
+ # Cannot calculate correlation for constant arrays
435
+ return None
436
+
437
+ correlation, p_value = stats.pearsonr(values1, values2)
438
+ except (ValueError, RuntimeWarning):
439
+ # Handle any other correlation calculation issues
440
+ return None
441
+
442
+ # Strong correlation threshold
443
+ if abs(correlation) > 0.7 and p_value < 0.05:
444
+ return self._create_correlation_pattern(
445
+ metric1, metric2, correlation, p_value, values1, days
446
+ )
447
+
448
+ return None
449
+
450
+ def _create_correlation_pattern(
451
+ self,
452
+ metric1: str,
453
+ metric2: str,
454
+ correlation: float,
455
+ p_value: float,
456
+ values1: np.ndarray,
457
+ days: int,
458
+ ) -> QualityPattern:
459
+ """Create a quality pattern from correlation analysis."""
460
+ trend_dir, description = self._get_correlation_trend_and_description(
461
+ metric1, metric2, correlation
462
+ )
463
+
464
+ return QualityPattern(
465
+ pattern_type=PatternType.CORRELATION,
466
+ metric_names=[metric1, metric2],
467
+ detected_at=datetime.now(),
468
+ confidence=abs(correlation),
469
+ description=description,
470
+ period_days=days,
471
+ correlation_strength=abs(correlation),
472
+ trend_direction=trend_dir,
473
+ statistical_significance=p_value,
474
+ context={
475
+ "correlation_coefficient": correlation,
476
+ "sample_size": len(values1),
477
+ "strength": self._get_correlation_strength_label(correlation),
478
+ },
479
+ )
480
+
481
+ def _get_correlation_trend_and_description(
482
+ self, metric1: str, metric2: str, correlation: float
483
+ ) -> tuple[TrendDirection, str]:
484
+ """Get trend direction and description for correlation."""
485
+ if correlation > 0:
486
+ return (
487
+ TrendDirection.IMPROVING,
488
+ f"Strong positive correlation between {metric1} and {metric2}",
489
+ )
490
+ return (
491
+ TrendDirection.DECLINING,
492
+ f"Strong negative correlation between {metric1} and {metric2}",
493
+ )
494
+
495
+ def _get_correlation_strength_label(self, correlation: float) -> str:
496
+ """Get strength label for correlation coefficient."""
497
+ abs_corr = abs(correlation)
498
+ if abs_corr > 0.9:
499
+ return "very strong"
500
+ elif abs_corr > 0.7:
501
+ return "strong"
502
+ return "moderate"
503
+
504
+ def generate_advanced_predictions(
505
+ self, horizon_days: int = 14, confidence_level: float = 0.95
506
+ ) -> list[QualityPrediction]:
507
+ """Generate advanced predictions with confidence intervals."""
508
+ baselines = self.quality_service.get_recent_baselines(limit=90)
509
+ if len(baselines) < self.min_data_points:
510
+ return []
511
+
512
+ predictions = []
513
+ metrics = ["quality_score", "coverage_percent"]
514
+
515
+ for metric_name in metrics:
516
+ values, timestamps = self._extract_time_series(baselines, metric_name)
517
+
518
+ if len(values) < self.min_data_points:
519
+ continue
520
+
521
+ prediction = self._create_metric_prediction(
522
+ metric_name, values, horizon_days, confidence_level
523
+ )
524
+ predictions.append(prediction)
525
+
526
+ return predictions
527
+
528
+ def _extract_time_series(
529
+ self, baselines: list[t.Any], metric_name: str
530
+ ) -> tuple[list[t.Any], list[t.Any]]:
531
+ """Extract time series data for specified metric."""
532
+ values = []
533
+ timestamps = []
534
+
535
+ for baseline in baselines:
536
+ if metric_name == "quality_score":
537
+ values.append(baseline.quality_score)
538
+ elif metric_name == "coverage_percent":
539
+ values.append(baseline.coverage_percent)
540
+ timestamps.append(baseline.timestamp)
541
+
542
+ return values, timestamps
543
+
544
+ def _create_metric_prediction(
545
+ self,
546
+ metric_name: str,
547
+ values: list[t.Any],
548
+ horizon_days: int,
549
+ confidence_level: float,
550
+ ) -> QualityPrediction:
551
+ """Create prediction for a single metric."""
552
+ regression_results = self._perform_linear_regression(values, horizon_days)
553
+ confidence_bounds = self._calculate_confidence_interval(
554
+ values, regression_results, confidence_level
555
+ )
556
+ risk_level = self._assess_prediction_risk(
557
+ metric_name, regression_results["predicted_value"]
558
+ )
559
+
560
+ return QualityPrediction(
561
+ metric_name=metric_name,
562
+ predicted_value=float(regression_results["predicted_value"]),
563
+ confidence_lower=float(confidence_bounds["lower"]),
564
+ confidence_upper=float(confidence_bounds["upper"]),
565
+ confidence_level=confidence_level,
566
+ prediction_horizon_days=horizon_days,
567
+ prediction_method="linear_regression_with_confidence_intervals",
568
+ created_at=datetime.now(),
569
+ factors=["historical_trend", "statistical_analysis"],
570
+ risk_assessment=risk_level,
571
+ )
572
+
573
+ def _perform_linear_regression(
574
+ self, values: list[t.Any], horizon_days: int
575
+ ) -> dict[str, t.Any]:
576
+ """Perform linear regression and predict future value."""
577
+ values_array = np.array(values)
578
+ time_indices = np.arange(len(values))
579
+
580
+ slope, intercept, r_value, p_value, std_err = stats.linregress(
581
+ time_indices, values_array
582
+ )
583
+
584
+ future_index = len(values) + horizon_days
585
+ predicted_value = slope * future_index + intercept
586
+
587
+ return {
588
+ "slope": slope,
589
+ "intercept": intercept,
590
+ "predicted_value": predicted_value,
591
+ "time_indices": time_indices,
592
+ "values_array": values_array,
593
+ "horizon_days": horizon_days,
594
+ }
595
+
596
+ def _calculate_confidence_interval(
597
+ self,
598
+ values: list[t.Any],
599
+ regression_results: dict[str, t.Any],
600
+ confidence_level: float,
601
+ ) -> dict[str, t.Any]:
602
+ """Calculate confidence interval for prediction."""
603
+ slope = regression_results["slope"]
604
+ intercept = regression_results["intercept"]
605
+ time_indices = regression_results["time_indices"]
606
+ values_array = regression_results["values_array"]
607
+ predicted_value = regression_results["predicted_value"]
608
+
609
+ residuals = values_array - (slope * time_indices + intercept)
610
+ residual_std = np.std(residuals)
611
+
612
+ future_index = len(values) + regression_results["horizon_days"]
613
+ t_value = stats.t.ppf((1 + confidence_level) / 2, len(values) - 2)
614
+
615
+ margin_error = self._calculate_margin_error(
616
+ t_value, residual_std, len(values), future_index, time_indices
617
+ )
618
+
619
+ return {
620
+ "lower": predicted_value - margin_error,
621
+ "upper": predicted_value + margin_error,
622
+ }
623
+
624
+ def _calculate_margin_error(
625
+ self,
626
+ t_value: float,
627
+ residual_std: float,
628
+ n_values: int,
629
+ future_index: int,
630
+ time_indices: np.ndarray,
631
+ ) -> float:
632
+ """Calculate margin of error for confidence interval."""
633
+ mean_time: float = float(np.mean(time_indices))
634
+ sum_sq_diff: float = float(np.sum((time_indices - mean_time) ** 2))
635
+ numerator: float = (future_index - mean_time) ** 2
636
+
637
+ sqrt_term: float = float(np.sqrt(1 + 1 / n_values + numerator / sum_sq_diff))
638
+ return t_value * residual_std * sqrt_term
639
+
640
+ def _assess_prediction_risk(self, metric_name: str, predicted_value: float) -> str:
641
+ """Assess risk level based on predicted value."""
642
+ if metric_name == "quality_score":
643
+ return self._assess_quality_score_risk(predicted_value)
644
+ # coverage_percent
645
+ return self._assess_coverage_risk(predicted_value)
646
+
647
+ def _assess_quality_score_risk(self, predicted_value: float) -> str:
648
+ """Assess risk for quality score predictions."""
649
+ if predicted_value < 70:
650
+ return "critical"
651
+ elif predicted_value < 80:
652
+ return "high"
653
+ elif predicted_value < 90:
654
+ return "medium"
655
+ return "low"
656
+
657
+ def _assess_coverage_risk(self, predicted_value: float) -> str:
658
+ """Assess risk for coverage predictions."""
659
+ if predicted_value < 70:
660
+ return "high"
661
+ elif predicted_value < 85:
662
+ return "medium"
663
+ return "low"
664
+
665
+ def _generate_anomaly_recommendations(
666
+ self, anomalies: list[QualityAnomaly]
667
+ ) -> list[str]:
668
+ """Generate recommendations based on anomalies."""
669
+ recommendations = []
670
+
671
+ critical_anomalies = [
672
+ a for a in anomalies if a.severity == AlertSeverity.CRITICAL
673
+ ]
674
+ if critical_anomalies:
675
+ recommendations.append(
676
+ f"🚨 CRITICAL: {len(critical_anomalies)} critical anomalies detected - immediate investigation required"
677
+ )
678
+
679
+ quality_drops = [
680
+ a
681
+ for a in anomalies
682
+ if a.anomaly_type == AnomalyType.DROP and a.metric_name == "quality_score"
683
+ ]
684
+ if quality_drops:
685
+ recommendations.append(
686
+ "📉 Quality score drops detected - review recent commits and implement quality gates"
687
+ )
688
+
689
+ return recommendations
690
+
691
+ def _generate_pattern_recommendations(
692
+ self, patterns: list[QualityPattern]
693
+ ) -> list[str]:
694
+ """Generate recommendations based on patterns."""
695
+ recommendations = []
696
+
697
+ declining_correlations = [
698
+ p for p in patterns if p.trend_direction == TrendDirection.DECLINING
699
+ ]
700
+ if declining_correlations:
701
+ recommendations.append(
702
+ f"⚠️ Negative quality correlations identified - investigate dependencies between {declining_correlations[0].metric_names}"
703
+ )
704
+
705
+ strong_patterns = [p for p in patterns if p.confidence > 0.8]
706
+ if strong_patterns:
707
+ recommendations.append(
708
+ "📊 Strong quality patterns detected - leverage insights for predictive quality management"
709
+ )
710
+
711
+ return recommendations
712
+
713
+ def _generate_prediction_recommendations(
714
+ self, predictions: list[QualityPrediction]
715
+ ) -> list[str]:
716
+ """Generate recommendations based on predictions."""
717
+ recommendations = []
718
+
719
+ high_risk_predictions = [
720
+ p for p in predictions if p.risk_assessment in ("high", "critical")
721
+ ]
722
+ if high_risk_predictions:
723
+ metrics = [p.metric_name for p in high_risk_predictions]
724
+ recommendations.append(
725
+ f"🔮 High-risk quality forecast for {', '.join(metrics)} - proactive intervention recommended"
726
+ )
727
+
728
+ low_confidence_predictions = [
729
+ p for p in predictions if p.confidence_upper - p.confidence_lower > 20
730
+ ]
731
+ if low_confidence_predictions:
732
+ recommendations.append(
733
+ "📈 Wide prediction intervals detected - increase data collection frequency for better forecasting"
734
+ )
735
+
736
+ return recommendations
737
+
738
+ def _generate_general_ml_insights(
739
+ self, anomalies: list[QualityAnomaly]
740
+ ) -> list[str]:
741
+ """Generate general ML insights."""
742
+ recommendations = []
743
+
744
+ if len(anomalies) > 5:
745
+ recommendations.append(
746
+ f"🤖 High anomaly frequency ({len(anomalies)}) suggests systemic quality issues - consider ML-based automated quality monitoring"
747
+ )
748
+
749
+ return recommendations
750
+
751
+ def generate_ml_recommendations(
752
+ self,
753
+ anomalies: list[QualityAnomaly],
754
+ patterns: list[QualityPattern],
755
+ predictions: list[QualityPrediction],
756
+ ) -> list[str]:
757
+ """Generate intelligent recommendations based on ML analysis."""
758
+ recommendations = []
759
+
760
+ recommendations.extend(self._generate_anomaly_recommendations(anomalies))
761
+ recommendations.extend(self._generate_pattern_recommendations(patterns))
762
+ recommendations.extend(self._generate_prediction_recommendations(predictions))
763
+ recommendations.extend(self._generate_general_ml_insights(anomalies))
764
+
765
+ if not recommendations:
766
+ recommendations.append(
767
+ "✅ Quality metrics show stable patterns with no significant anomalies detected - maintain current practices"
768
+ )
769
+
770
+ return recommendations
771
+
772
+ def generate_comprehensive_insights(
773
+ self, analysis_days: int = 30, prediction_days: int = 14
774
+ ) -> QualityInsights:
775
+ """Generate comprehensive quality insights with ML analysis."""
776
+ # Collect all analysis results
777
+ anomalies = self.detect_anomalies(days=analysis_days)
778
+ patterns = self.identify_patterns(days=analysis_days * 2)
779
+ predictions = self.generate_advanced_predictions(horizon_days=prediction_days)
780
+ recommendations = self.generate_ml_recommendations(
781
+ anomalies, patterns, predictions
782
+ )
783
+
784
+ # Calculate derived metrics
785
+ health_score, risk_level = self._calculate_health_metrics(
786
+ anomalies, predictions
787
+ )
788
+
789
+ return QualityInsights(
790
+ anomalies=anomalies,
791
+ patterns=patterns,
792
+ predictions=predictions,
793
+ recommendations=recommendations,
794
+ overall_health_score=health_score,
795
+ risk_level=risk_level,
796
+ )
797
+
798
+ def _calculate_health_metrics(
799
+ self, anomalies: list[QualityAnomaly], predictions: list[QualityPrediction]
800
+ ) -> tuple[float, str]:
801
+ """Calculate overall health score and risk level."""
802
+ anomaly_counts = self._count_anomalies_by_severity(anomalies)
803
+ risk_prediction_count = self._count_high_risk_predictions(predictions)
804
+
805
+ health_score = self._compute_health_score(anomaly_counts, risk_prediction_count)
806
+ risk_level = self._determine_risk_level(health_score)
807
+
808
+ return health_score, risk_level
809
+
810
+ def _count_anomalies_by_severity(
811
+ self, anomalies: list[QualityAnomaly]
812
+ ) -> dict[str, int]:
813
+ """Count anomalies by severity level."""
814
+ return {
815
+ "critical": len(
816
+ [a for a in anomalies if a.severity == AlertSeverity.CRITICAL]
817
+ ),
818
+ "warning": len(
819
+ [a for a in anomalies if a.severity == AlertSeverity.WARNING]
820
+ ),
821
+ }
822
+
823
+ def _count_high_risk_predictions(self, predictions: list[QualityPrediction]) -> int:
824
+ """Count predictions with high or critical risk assessment."""
825
+ return len(
826
+ [p for p in predictions if p.risk_assessment in ("high", "critical")]
827
+ )
828
+
829
+ def _compute_health_score(
830
+ self, anomaly_counts: dict[str, int], risk_predictions: int
831
+ ) -> float:
832
+ """Compute health score based on anomalies and risk predictions."""
833
+ health_score = 1.0
834
+ health_score -= (
835
+ anomaly_counts["critical"] * 0.2
836
+ ) # Critical anomalies heavily impact health
837
+ health_score -= (
838
+ anomaly_counts["warning"] * 0.1
839
+ ) # Warning anomalies moderately impact health
840
+ health_score -= risk_predictions * 0.15 # High-risk predictions impact health
841
+ return max(0.0, min(1.0, health_score))
842
+
843
+ def _determine_risk_level(self, health_score: float) -> str:
844
+ """Determine overall risk level based on health score."""
845
+ if health_score < 0.5:
846
+ return "critical"
847
+ elif health_score < 0.7:
848
+ return "high"
849
+ elif health_score < 0.85:
850
+ return "medium"
851
+ return "low"
852
+
853
+ def export_insights(self, insights: QualityInsights, output_path: Path) -> None:
854
+ """Export quality insights to JSON file."""
855
+ with output_path.open("w") as f:
856
+ json.dump(insights.to_dict(), f, indent=2, default=str)
857
+
858
+ # Protocol methods required by QualityIntelligenceProtocol
859
+ def analyze_quality_trends(self) -> dict[str, t.Any]:
860
+ """Analyze quality trends."""
861
+ # Use existing identify_patterns method to analyze trends
862
+ patterns = self.identify_patterns()
863
+ trend_analysis = {
864
+ "total_patterns": len(patterns),
865
+ "patterns_by_type": {
866
+ "cyclic": len(
867
+ [p for p in patterns if p.pattern_type == PatternType.CYCLIC]
868
+ ),
869
+ "seasonal": len(
870
+ [p for p in patterns if p.pattern_type == PatternType.SEASONAL]
871
+ ),
872
+ "correlation": len(
873
+ [p for p in patterns if p.pattern_type == PatternType.CORRELATION]
874
+ ),
875
+ "regression": len(
876
+ [p for p in patterns if p.pattern_type == PatternType.REGRESSION]
877
+ ),
878
+ "improvement": len(
879
+ [p for p in patterns if p.pattern_type == PatternType.IMPROVEMENT]
880
+ ),
881
+ },
882
+ "trend_directions": {
883
+ "improving": len(
884
+ [
885
+ p
886
+ for p in patterns
887
+ if p.trend_direction == TrendDirection.IMPROVING
888
+ ]
889
+ ),
890
+ "declining": len(
891
+ [
892
+ p
893
+ for p in patterns
894
+ if p.trend_direction == TrendDirection.DECLINING
895
+ ]
896
+ ),
897
+ "stable": len(
898
+ [p for p in patterns if p.trend_direction == TrendDirection.STABLE]
899
+ ),
900
+ "volatile": len(
901
+ [
902
+ p
903
+ for p in patterns
904
+ if p.trend_direction == TrendDirection.VOLATILE
905
+ ]
906
+ ),
907
+ },
908
+ "generated_at": datetime.now().isoformat(),
909
+ }
910
+ return trend_analysis
911
+
912
+ def predict_quality_issues(self) -> list[dict[str, t.Any]]:
913
+ """Predict potential quality issues."""
914
+ predictions = self.generate_advanced_predictions()
915
+
916
+ return [
917
+ {
918
+ "metric": pred.metric_name,
919
+ "predicted_value": pred.predicted_value,
920
+ "risk_level": pred.risk_assessment,
921
+ "confidence_interval": {
922
+ "lower": pred.confidence_lower,
923
+ "upper": pred.confidence_upper,
924
+ },
925
+ "prediction_horizon": pred.prediction_horizon_days,
926
+ "factors": pred.factors,
927
+ }
928
+ for pred in predictions
929
+ if pred.risk_assessment in ("high", "critical")
930
+ ]
931
+
932
+ def recommend_improvements(self) -> list[dict[str, t.Any]]:
933
+ """Recommend quality improvements."""
934
+ # Generate basic analysis to get data for recommendations
935
+ anomalies = self.detect_anomalies()
936
+ patterns = self.identify_patterns()
937
+ predictions = self.generate_advanced_predictions()
938
+
939
+ recommendations = self.generate_ml_recommendations(
940
+ anomalies, patterns, predictions
941
+ )
942
+
943
+ # Convert to required format
944
+ return [{"message": rec} for rec in recommendations]
945
+
946
+ def get_intelligence_report(self) -> dict[str, t.Any]:
947
+ """Get quality intelligence report."""
948
+ insights = self.generate_comprehensive_insights()
949
+ return insights.to_dict()