empathy-framework 3.7.0__py3-none-any.whl → 3.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (267) hide show
  1. coach_wizards/code_reviewer_README.md +60 -0
  2. coach_wizards/code_reviewer_wizard.py +180 -0
  3. {empathy_framework-3.7.0.dist-info → empathy_framework-3.7.1.dist-info}/METADATA +20 -2
  4. empathy_framework-3.7.1.dist-info/RECORD +327 -0
  5. {empathy_framework-3.7.0.dist-info → empathy_framework-3.7.1.dist-info}/top_level.txt +5 -1
  6. empathy_healthcare_plugin/monitors/__init__.py +9 -0
  7. empathy_healthcare_plugin/monitors/clinical_protocol_monitor.py +315 -0
  8. empathy_healthcare_plugin/monitors/monitoring/__init__.py +44 -0
  9. empathy_healthcare_plugin/monitors/monitoring/protocol_checker.py +300 -0
  10. empathy_healthcare_plugin/monitors/monitoring/protocol_loader.py +214 -0
  11. empathy_healthcare_plugin/monitors/monitoring/sensor_parsers.py +306 -0
  12. empathy_healthcare_plugin/monitors/monitoring/trajectory_analyzer.py +389 -0
  13. empathy_llm_toolkit/agent_factory/__init__.py +53 -0
  14. empathy_llm_toolkit/agent_factory/adapters/__init__.py +85 -0
  15. empathy_llm_toolkit/agent_factory/adapters/autogen_adapter.py +312 -0
  16. empathy_llm_toolkit/agent_factory/adapters/crewai_adapter.py +454 -0
  17. empathy_llm_toolkit/agent_factory/adapters/haystack_adapter.py +298 -0
  18. empathy_llm_toolkit/agent_factory/adapters/langchain_adapter.py +362 -0
  19. empathy_llm_toolkit/agent_factory/adapters/langgraph_adapter.py +333 -0
  20. empathy_llm_toolkit/agent_factory/adapters/native.py +228 -0
  21. empathy_llm_toolkit/agent_factory/adapters/wizard_adapter.py +426 -0
  22. empathy_llm_toolkit/agent_factory/base.py +305 -0
  23. empathy_llm_toolkit/agent_factory/crews/__init__.py +67 -0
  24. empathy_llm_toolkit/agent_factory/crews/code_review.py +1113 -0
  25. empathy_llm_toolkit/agent_factory/crews/health_check.py +1246 -0
  26. empathy_llm_toolkit/agent_factory/crews/refactoring.py +1128 -0
  27. empathy_llm_toolkit/agent_factory/crews/security_audit.py +1018 -0
  28. empathy_llm_toolkit/agent_factory/decorators.py +286 -0
  29. empathy_llm_toolkit/agent_factory/factory.py +558 -0
  30. empathy_llm_toolkit/agent_factory/framework.py +192 -0
  31. empathy_llm_toolkit/agent_factory/memory_integration.py +324 -0
  32. empathy_llm_toolkit/agent_factory/resilient.py +320 -0
  33. empathy_llm_toolkit/cli/__init__.py +8 -0
  34. empathy_llm_toolkit/cli/sync_claude.py +487 -0
  35. empathy_llm_toolkit/code_health.py +150 -3
  36. empathy_llm_toolkit/config/__init__.py +29 -0
  37. empathy_llm_toolkit/config/unified.py +295 -0
  38. empathy_llm_toolkit/routing/__init__.py +32 -0
  39. empathy_llm_toolkit/routing/model_router.py +362 -0
  40. empathy_llm_toolkit/security/IMPLEMENTATION_SUMMARY.md +413 -0
  41. empathy_llm_toolkit/security/PHASE2_COMPLETE.md +384 -0
  42. empathy_llm_toolkit/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
  43. empathy_llm_toolkit/security/QUICK_REFERENCE.md +316 -0
  44. empathy_llm_toolkit/security/README.md +262 -0
  45. empathy_llm_toolkit/security/__init__.py +62 -0
  46. empathy_llm_toolkit/security/audit_logger.py +929 -0
  47. empathy_llm_toolkit/security/audit_logger_example.py +152 -0
  48. empathy_llm_toolkit/security/pii_scrubber.py +640 -0
  49. empathy_llm_toolkit/security/secrets_detector.py +678 -0
  50. empathy_llm_toolkit/security/secrets_detector_example.py +304 -0
  51. empathy_llm_toolkit/security/secure_memdocs.py +1192 -0
  52. empathy_llm_toolkit/security/secure_memdocs_example.py +278 -0
  53. empathy_llm_toolkit/wizards/__init__.py +38 -0
  54. empathy_llm_toolkit/wizards/base_wizard.py +364 -0
  55. empathy_llm_toolkit/wizards/customer_support_wizard.py +190 -0
  56. empathy_llm_toolkit/wizards/healthcare_wizard.py +362 -0
  57. empathy_llm_toolkit/wizards/patient_assessment_README.md +64 -0
  58. empathy_llm_toolkit/wizards/patient_assessment_wizard.py +193 -0
  59. empathy_llm_toolkit/wizards/technology_wizard.py +194 -0
  60. empathy_os/__init__.py +52 -52
  61. empathy_os/adaptive/__init__.py +13 -0
  62. empathy_os/adaptive/task_complexity.py +127 -0
  63. empathy_os/cli.py +118 -8
  64. empathy_os/cli_unified.py +121 -1
  65. empathy_os/config/__init__.py +63 -0
  66. empathy_os/config/xml_config.py +239 -0
  67. empathy_os/dashboard/__init__.py +15 -0
  68. empathy_os/dashboard/server.py +743 -0
  69. empathy_os/memory/__init__.py +195 -0
  70. empathy_os/memory/claude_memory.py +466 -0
  71. empathy_os/memory/config.py +224 -0
  72. empathy_os/memory/control_panel.py +1298 -0
  73. empathy_os/memory/edges.py +179 -0
  74. empathy_os/memory/graph.py +567 -0
  75. empathy_os/memory/long_term.py +1193 -0
  76. empathy_os/memory/nodes.py +179 -0
  77. empathy_os/memory/redis_bootstrap.py +540 -0
  78. empathy_os/memory/security/__init__.py +31 -0
  79. empathy_os/memory/security/audit_logger.py +930 -0
  80. empathy_os/memory/security/pii_scrubber.py +640 -0
  81. empathy_os/memory/security/secrets_detector.py +678 -0
  82. empathy_os/memory/short_term.py +2119 -0
  83. empathy_os/memory/storage/__init__.py +15 -0
  84. empathy_os/memory/summary_index.py +583 -0
  85. empathy_os/memory/unified.py +619 -0
  86. empathy_os/metrics/__init__.py +12 -0
  87. empathy_os/metrics/prompt_metrics.py +190 -0
  88. empathy_os/models/__init__.py +136 -0
  89. empathy_os/models/__main__.py +13 -0
  90. empathy_os/models/cli.py +655 -0
  91. empathy_os/models/empathy_executor.py +354 -0
  92. empathy_os/models/executor.py +252 -0
  93. empathy_os/models/fallback.py +671 -0
  94. empathy_os/models/provider_config.py +563 -0
  95. empathy_os/models/registry.py +382 -0
  96. empathy_os/models/tasks.py +302 -0
  97. empathy_os/models/telemetry.py +548 -0
  98. empathy_os/models/token_estimator.py +378 -0
  99. empathy_os/models/validation.py +274 -0
  100. empathy_os/monitoring/__init__.py +52 -0
  101. empathy_os/monitoring/alerts.py +23 -0
  102. empathy_os/monitoring/alerts_cli.py +268 -0
  103. empathy_os/monitoring/multi_backend.py +271 -0
  104. empathy_os/monitoring/otel_backend.py +363 -0
  105. empathy_os/optimization/__init__.py +19 -0
  106. empathy_os/optimization/context_optimizer.py +272 -0
  107. empathy_os/plugins/__init__.py +28 -0
  108. empathy_os/plugins/base.py +361 -0
  109. empathy_os/plugins/registry.py +268 -0
  110. empathy_os/project_index/__init__.py +30 -0
  111. empathy_os/project_index/cli.py +335 -0
  112. empathy_os/project_index/crew_integration.py +430 -0
  113. empathy_os/project_index/index.py +425 -0
  114. empathy_os/project_index/models.py +501 -0
  115. empathy_os/project_index/reports.py +473 -0
  116. empathy_os/project_index/scanner.py +538 -0
  117. empathy_os/prompts/__init__.py +61 -0
  118. empathy_os/prompts/config.py +77 -0
  119. empathy_os/prompts/context.py +177 -0
  120. empathy_os/prompts/parser.py +285 -0
  121. empathy_os/prompts/registry.py +313 -0
  122. empathy_os/prompts/templates.py +208 -0
  123. empathy_os/resilience/__init__.py +56 -0
  124. empathy_os/resilience/circuit_breaker.py +256 -0
  125. empathy_os/resilience/fallback.py +179 -0
  126. empathy_os/resilience/health.py +300 -0
  127. empathy_os/resilience/retry.py +209 -0
  128. empathy_os/resilience/timeout.py +135 -0
  129. empathy_os/routing/__init__.py +43 -0
  130. empathy_os/routing/chain_executor.py +433 -0
  131. empathy_os/routing/classifier.py +217 -0
  132. empathy_os/routing/smart_router.py +234 -0
  133. empathy_os/routing/wizard_registry.py +307 -0
  134. empathy_os/trust/__init__.py +28 -0
  135. empathy_os/trust/circuit_breaker.py +579 -0
  136. empathy_os/validation/__init__.py +19 -0
  137. empathy_os/validation/xml_validator.py +281 -0
  138. empathy_os/wizard_factory_cli.py +170 -0
  139. empathy_os/workflows/__init__.py +360 -0
  140. empathy_os/workflows/base.py +1530 -0
  141. empathy_os/workflows/bug_predict.py +962 -0
  142. empathy_os/workflows/code_review.py +960 -0
  143. empathy_os/workflows/code_review_adapters.py +310 -0
  144. empathy_os/workflows/code_review_pipeline.py +720 -0
  145. empathy_os/workflows/config.py +600 -0
  146. empathy_os/workflows/dependency_check.py +648 -0
  147. empathy_os/workflows/document_gen.py +1069 -0
  148. empathy_os/workflows/documentation_orchestrator.py +1205 -0
  149. empathy_os/workflows/health_check.py +679 -0
  150. empathy_os/workflows/keyboard_shortcuts/__init__.py +39 -0
  151. empathy_os/workflows/keyboard_shortcuts/generators.py +386 -0
  152. empathy_os/workflows/keyboard_shortcuts/parsers.py +414 -0
  153. empathy_os/workflows/keyboard_shortcuts/prompts.py +295 -0
  154. empathy_os/workflows/keyboard_shortcuts/schema.py +193 -0
  155. empathy_os/workflows/keyboard_shortcuts/workflow.py +505 -0
  156. empathy_os/workflows/manage_documentation.py +804 -0
  157. empathy_os/workflows/new_sample_workflow1.py +146 -0
  158. empathy_os/workflows/new_sample_workflow1_README.md +150 -0
  159. empathy_os/workflows/perf_audit.py +687 -0
  160. empathy_os/workflows/pr_review.py +748 -0
  161. empathy_os/workflows/progress.py +445 -0
  162. empathy_os/workflows/progress_server.py +322 -0
  163. empathy_os/workflows/refactor_plan.py +691 -0
  164. empathy_os/workflows/release_prep.py +808 -0
  165. empathy_os/workflows/research_synthesis.py +404 -0
  166. empathy_os/workflows/secure_release.py +585 -0
  167. empathy_os/workflows/security_adapters.py +297 -0
  168. empathy_os/workflows/security_audit.py +1050 -0
  169. empathy_os/workflows/step_config.py +234 -0
  170. empathy_os/workflows/test5.py +125 -0
  171. empathy_os/workflows/test5_README.md +158 -0
  172. empathy_os/workflows/test_gen.py +1855 -0
  173. empathy_os/workflows/test_lifecycle.py +526 -0
  174. empathy_os/workflows/test_maintenance.py +626 -0
  175. empathy_os/workflows/test_maintenance_cli.py +590 -0
  176. empathy_os/workflows/test_maintenance_crew.py +821 -0
  177. empathy_os/workflows/xml_enhanced_crew.py +285 -0
  178. empathy_software_plugin/cli/__init__.py +120 -0
  179. empathy_software_plugin/cli/inspect.py +362 -0
  180. empathy_software_plugin/cli.py +3 -1
  181. empathy_software_plugin/wizards/__init__.py +42 -0
  182. empathy_software_plugin/wizards/advanced_debugging_wizard.py +392 -0
  183. empathy_software_plugin/wizards/agent_orchestration_wizard.py +511 -0
  184. empathy_software_plugin/wizards/ai_collaboration_wizard.py +503 -0
  185. empathy_software_plugin/wizards/ai_context_wizard.py +441 -0
  186. empathy_software_plugin/wizards/ai_documentation_wizard.py +503 -0
  187. empathy_software_plugin/wizards/base_wizard.py +288 -0
  188. empathy_software_plugin/wizards/book_chapter_wizard.py +519 -0
  189. empathy_software_plugin/wizards/code_review_wizard.py +606 -0
  190. empathy_software_plugin/wizards/debugging/__init__.py +50 -0
  191. empathy_software_plugin/wizards/debugging/bug_risk_analyzer.py +414 -0
  192. empathy_software_plugin/wizards/debugging/config_loaders.py +442 -0
  193. empathy_software_plugin/wizards/debugging/fix_applier.py +469 -0
  194. empathy_software_plugin/wizards/debugging/language_patterns.py +383 -0
  195. empathy_software_plugin/wizards/debugging/linter_parsers.py +470 -0
  196. empathy_software_plugin/wizards/debugging/verification.py +369 -0
  197. empathy_software_plugin/wizards/enhanced_testing_wizard.py +537 -0
  198. empathy_software_plugin/wizards/memory_enhanced_debugging_wizard.py +816 -0
  199. empathy_software_plugin/wizards/multi_model_wizard.py +501 -0
  200. empathy_software_plugin/wizards/pattern_extraction_wizard.py +422 -0
  201. empathy_software_plugin/wizards/pattern_retriever_wizard.py +400 -0
  202. empathy_software_plugin/wizards/performance/__init__.py +9 -0
  203. empathy_software_plugin/wizards/performance/bottleneck_detector.py +221 -0
  204. empathy_software_plugin/wizards/performance/profiler_parsers.py +278 -0
  205. empathy_software_plugin/wizards/performance/trajectory_analyzer.py +429 -0
  206. empathy_software_plugin/wizards/performance_profiling_wizard.py +305 -0
  207. empathy_software_plugin/wizards/prompt_engineering_wizard.py +425 -0
  208. empathy_software_plugin/wizards/rag_pattern_wizard.py +461 -0
  209. empathy_software_plugin/wizards/security/__init__.py +32 -0
  210. empathy_software_plugin/wizards/security/exploit_analyzer.py +290 -0
  211. empathy_software_plugin/wizards/security/owasp_patterns.py +241 -0
  212. empathy_software_plugin/wizards/security/vulnerability_scanner.py +604 -0
  213. empathy_software_plugin/wizards/security_analysis_wizard.py +322 -0
  214. empathy_software_plugin/wizards/security_learning_wizard.py +740 -0
  215. empathy_software_plugin/wizards/tech_debt_wizard.py +726 -0
  216. empathy_software_plugin/wizards/testing/__init__.py +27 -0
  217. empathy_software_plugin/wizards/testing/coverage_analyzer.py +459 -0
  218. empathy_software_plugin/wizards/testing/quality_analyzer.py +531 -0
  219. empathy_software_plugin/wizards/testing/test_suggester.py +533 -0
  220. empathy_software_plugin/wizards/testing_wizard.py +274 -0
  221. hot_reload/README.md +473 -0
  222. hot_reload/__init__.py +62 -0
  223. hot_reload/config.py +84 -0
  224. hot_reload/integration.py +228 -0
  225. hot_reload/reloader.py +298 -0
  226. hot_reload/watcher.py +179 -0
  227. hot_reload/websocket.py +176 -0
  228. scaffolding/README.md +589 -0
  229. scaffolding/__init__.py +35 -0
  230. scaffolding/__main__.py +14 -0
  231. scaffolding/cli.py +240 -0
  232. test_generator/__init__.py +38 -0
  233. test_generator/__main__.py +14 -0
  234. test_generator/cli.py +226 -0
  235. test_generator/generator.py +325 -0
  236. test_generator/risk_analyzer.py +216 -0
  237. workflow_patterns/__init__.py +33 -0
  238. workflow_patterns/behavior.py +249 -0
  239. workflow_patterns/core.py +76 -0
  240. workflow_patterns/output.py +99 -0
  241. workflow_patterns/registry.py +255 -0
  242. workflow_patterns/structural.py +288 -0
  243. workflow_scaffolding/__init__.py +11 -0
  244. workflow_scaffolding/__main__.py +12 -0
  245. workflow_scaffolding/cli.py +206 -0
  246. workflow_scaffolding/generator.py +265 -0
  247. agents/code_inspection/patterns/inspection/recurring_B112.json +0 -18
  248. agents/code_inspection/patterns/inspection/recurring_F541.json +0 -16
  249. agents/code_inspection/patterns/inspection/recurring_FORMAT.json +0 -25
  250. agents/code_inspection/patterns/inspection/recurring_bug_20250822_def456.json +0 -16
  251. agents/code_inspection/patterns/inspection/recurring_bug_20250915_abc123.json +0 -16
  252. agents/code_inspection/patterns/inspection/recurring_bug_20251212_3c5b9951.json +0 -16
  253. agents/code_inspection/patterns/inspection/recurring_bug_20251212_97c0f72f.json +0 -16
  254. agents/code_inspection/patterns/inspection/recurring_bug_20251212_a0871d53.json +0 -16
  255. agents/code_inspection/patterns/inspection/recurring_bug_20251212_a9b6ec41.json +0 -16
  256. agents/code_inspection/patterns/inspection/recurring_bug_null_001.json +0 -16
  257. agents/code_inspection/patterns/inspection/recurring_builtin.json +0 -16
  258. agents/compliance_anticipation_agent.py +0 -1422
  259. agents/compliance_db.py +0 -339
  260. agents/epic_integration_wizard.py +0 -530
  261. agents/notifications.py +0 -291
  262. agents/trust_building_behaviors.py +0 -872
  263. empathy_framework-3.7.0.dist-info/RECORD +0 -105
  264. {empathy_framework-3.7.0.dist-info → empathy_framework-3.7.1.dist-info}/WHEEL +0 -0
  265. {empathy_framework-3.7.0.dist-info → empathy_framework-3.7.1.dist-info}/entry_points.txt +0 -0
  266. {empathy_framework-3.7.0.dist-info → empathy_framework-3.7.1.dist-info}/licenses/LICENSE +0 -0
  267. /empathy_os/{monitoring.py → agent_monitoring.py} +0 -0
@@ -0,0 +1,687 @@
1
+ """Performance Audit Workflow
2
+
3
+ Identifies performance bottlenecks and optimization opportunities
4
+ through static analysis.
5
+
6
+ Stages:
7
+ 1. profile (CHEAP) - Static analysis for common perf anti-patterns
8
+ 2. analyze (CAPABLE) - Deep analysis of algorithmic complexity
9
+ 3. hotspots (CAPABLE) - Identify performance hotspots
10
+ 4. optimize (PREMIUM) - Generate optimization recommendations (conditional)
11
+
12
+ Copyright 2025 Smart-AI-Memory
13
+ Licensed under Fair Source License 0.9
14
+ """
15
+
16
+ import json
17
+ import re
18
+ from pathlib import Path
19
+ from typing import Any
20
+
21
+ from .base import BaseWorkflow, ModelTier
22
+ from .step_config import WorkflowStepConfig
23
+
24
+ # Define step configurations for executor-based execution
25
+ PERF_AUDIT_STEPS = {
26
+ "optimize": WorkflowStepConfig(
27
+ name="optimize",
28
+ task_type="final_review", # Premium tier task
29
+ tier_hint="premium",
30
+ description="Generate performance optimization recommendations",
31
+ max_tokens=3000,
32
+ ),
33
+ }
34
+
35
+ # Performance anti-patterns to detect
36
+ PERF_PATTERNS = {
37
+ "n_plus_one": {
38
+ "patterns": [
39
+ r"for\s+\w+\s+in\s+\w+:.*?\.get\(",
40
+ r"for\s+\w+\s+in\s+\w+:.*?\.query\(",
41
+ r"for\s+\w+\s+in\s+\w+:.*?\.fetch\(",
42
+ ],
43
+ "description": "Potential N+1 query pattern",
44
+ "impact": "high",
45
+ },
46
+ "sync_in_async": {
47
+ "patterns": [
48
+ r"async\s+def.*?time\.sleep\(",
49
+ r"async\s+def.*?requests\.get\(",
50
+ r"async\s+def.*?open\([^)]+\)\.read\(",
51
+ ],
52
+ "description": "Synchronous operation in async context",
53
+ "impact": "high",
54
+ },
55
+ "list_comprehension_in_loop": {
56
+ "patterns": [
57
+ r"for\s+\w+\s+in\s+\[.*for.*\]:",
58
+ ],
59
+ "description": "List comprehension recreated in loop",
60
+ "impact": "medium",
61
+ },
62
+ "string_concat_loop": {
63
+ "patterns": [
64
+ # Match: for x in y: \n str += "..." (actual loop, not generator expression)
65
+ # Exclude: any(... for x in ...) by requiring standalone for statement
66
+ r'^[ \t]*for\s+\w+\s+in\s+[^:]+:\s*\n[ \t]+\w+\s*\+=\s*["\']',
67
+ ],
68
+ "description": "String concatenation in loop (use join)",
69
+ "impact": "medium",
70
+ },
71
+ "global_import": {
72
+ "patterns": [
73
+ r"^from\s+\w+\s+import\s+\*",
74
+ ],
75
+ "description": "Wildcard import may slow startup",
76
+ "impact": "low",
77
+ },
78
+ "large_list_copy": {
79
+ "patterns": [
80
+ r"list\(\w+\)",
81
+ r"\w+\[:\]",
82
+ ],
83
+ "description": "Full list copy (may be inefficient for large lists)",
84
+ "impact": "low",
85
+ },
86
+ "repeated_regex": {
87
+ "patterns": [
88
+ r're\.(search|match|findall)\s*\(["\'][^"\']+["\']',
89
+ ],
90
+ "description": "Regex pattern not pre-compiled",
91
+ "impact": "medium",
92
+ },
93
+ "nested_loops": {
94
+ "patterns": [
95
+ r"for\s+\w+\s+in\s+\w+:\s*\n\s+for\s+\w+\s+in\s+\w+:\s*\n\s+for",
96
+ ],
97
+ "description": "Triple nested loop (O(n³) complexity)",
98
+ "impact": "high",
99
+ },
100
+ }
101
+
102
+ # Known false positives - patterns that match but aren't performance issues
103
+ # These are documented for transparency; the regex-based detection has limitations.
104
+ #
105
+ # IMPROVED: string_concat_loop
106
+ # - Pattern now requires line to START with 'for' (excludes generator expressions)
107
+ # - Previously matched: any(x for x in y) followed by += on next line
108
+ # - Now correctly excludes: generator expressions inside any(), all(), etc.
109
+ # - Sequential string building (code += "line1"; code += "line2") correctly ignored
110
+ #
111
+ # FALSE POSITIVE: large_list_copy
112
+ # - list(x) or x[:] used for defensive copying or type conversion
113
+ # - Often intentional to avoid mutating original data
114
+ # - Verdict: OK - usually intentional, low impact
115
+ #
116
+ # FALSE POSITIVE: repeated_regex (edge cases)
117
+ # - Single-use regex in rarely-called functions
118
+ # - Verdict: OK - pre-compilation only matters for hot paths
119
+
120
+
121
+ class PerformanceAuditWorkflow(BaseWorkflow):
122
+ """Identify performance bottlenecks and optimization opportunities.
123
+
124
+ Uses static analysis to find common performance anti-patterns
125
+ and algorithmic complexity issues.
126
+ """
127
+
128
+ name = "perf-audit"
129
+ description = "Identify performance bottlenecks and optimization opportunities"
130
+ stages = ["profile", "analyze", "hotspots", "optimize"]
131
+ tier_map = {
132
+ "profile": ModelTier.CHEAP,
133
+ "analyze": ModelTier.CAPABLE,
134
+ "hotspots": ModelTier.CAPABLE,
135
+ "optimize": ModelTier.PREMIUM,
136
+ }
137
+
138
+ def __init__(
139
+ self,
140
+ min_hotspots_for_premium: int = 3,
141
+ **kwargs: Any,
142
+ ):
143
+ """Initialize performance audit workflow.
144
+
145
+ Args:
146
+ min_hotspots_for_premium: Minimum hotspots to trigger premium optimization
147
+ **kwargs: Additional arguments passed to BaseWorkflow
148
+
149
+ """
150
+ super().__init__(**kwargs)
151
+ self.min_hotspots_for_premium = min_hotspots_for_premium
152
+ self._hotspot_count: int = 0
153
+
154
+ def should_skip_stage(self, stage_name: str, input_data: Any) -> tuple[bool, str | None]:
155
+ """Downgrade optimize stage if few hotspots.
156
+
157
+ Args:
158
+ stage_name: Name of the stage to check
159
+ input_data: Current workflow data
160
+
161
+ Returns:
162
+ Tuple of (should_skip, reason)
163
+
164
+ """
165
+ if stage_name == "optimize":
166
+ if self._hotspot_count < self.min_hotspots_for_premium:
167
+ self.tier_map["optimize"] = ModelTier.CAPABLE
168
+ return False, None
169
+ return False, None
170
+
171
+ async def run_stage(
172
+ self,
173
+ stage_name: str,
174
+ tier: ModelTier,
175
+ input_data: Any,
176
+ ) -> tuple[Any, int, int]:
177
+ """Route to specific stage implementation."""
178
+ if stage_name == "profile":
179
+ return await self._profile(input_data, tier)
180
+ if stage_name == "analyze":
181
+ return await self._analyze(input_data, tier)
182
+ if stage_name == "hotspots":
183
+ return await self._hotspots(input_data, tier)
184
+ if stage_name == "optimize":
185
+ return await self._optimize(input_data, tier)
186
+ raise ValueError(f"Unknown stage: {stage_name}")
187
+
188
+ async def _profile(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
189
+ """Static analysis for common performance anti-patterns.
190
+
191
+ Scans code for known performance issues.
192
+ """
193
+ target_path = input_data.get("path", ".")
194
+ file_types = input_data.get("file_types", [".py"])
195
+
196
+ findings: list[dict] = []
197
+ files_scanned = 0
198
+
199
+ target = Path(target_path)
200
+ if target.exists():
201
+ for ext in file_types:
202
+ for file_path in target.rglob(f"*{ext}"):
203
+ if any(
204
+ skip in str(file_path)
205
+ for skip in [".git", "node_modules", "__pycache__", "venv", "test"]
206
+ ):
207
+ continue
208
+
209
+ try:
210
+ content = file_path.read_text(errors="ignore")
211
+ files_scanned += 1
212
+
213
+ for pattern_name, pattern_info in PERF_PATTERNS.items():
214
+ for pattern in pattern_info["patterns"]:
215
+ matches = list(re.finditer(pattern, content, re.MULTILINE))
216
+ for match in matches:
217
+ line_num = content[: match.start()].count("\n") + 1
218
+ findings.append(
219
+ {
220
+ "type": pattern_name,
221
+ "file": str(file_path),
222
+ "line": line_num,
223
+ "description": pattern_info["description"],
224
+ "impact": pattern_info["impact"],
225
+ "match": match.group()[:80],
226
+ },
227
+ )
228
+ except OSError:
229
+ continue
230
+
231
+ # Group by impact
232
+ by_impact: dict[str, list] = {"high": [], "medium": [], "low": []}
233
+ for f in findings:
234
+ impact = f.get("impact", "low")
235
+ by_impact[impact].append(f)
236
+
237
+ input_tokens = len(str(input_data)) // 4
238
+ output_tokens = len(str(findings)) // 4
239
+
240
+ return (
241
+ {
242
+ "findings": findings,
243
+ "finding_count": len(findings),
244
+ "files_scanned": files_scanned,
245
+ "by_impact": {k: len(v) for k, v in by_impact.items()},
246
+ **input_data,
247
+ },
248
+ input_tokens,
249
+ output_tokens,
250
+ )
251
+
252
+ async def _analyze(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
253
+ """Deep analysis of algorithmic complexity.
254
+
255
+ Examines code structure for complexity issues.
256
+ """
257
+ findings = input_data.get("findings", [])
258
+
259
+ # Group findings by file
260
+ by_file: dict[str, list] = {}
261
+ for f in findings:
262
+ file_path = f.get("file", "")
263
+ if file_path not in by_file:
264
+ by_file[file_path] = []
265
+ by_file[file_path].append(f)
266
+
267
+ # Analyze each file
268
+ analysis: list[dict] = []
269
+ for file_path, file_findings in by_file.items():
270
+ # Calculate file complexity score
271
+ high_count = len([f for f in file_findings if f["impact"] == "high"])
272
+ medium_count = len([f for f in file_findings if f["impact"] == "medium"])
273
+ low_count = len([f for f in file_findings if f["impact"] == "low"])
274
+
275
+ complexity_score = high_count * 10 + medium_count * 5 + low_count * 1
276
+
277
+ # Identify primary concerns
278
+ concerns = list({f["type"] for f in file_findings})
279
+
280
+ analysis.append(
281
+ {
282
+ "file": file_path,
283
+ "complexity_score": complexity_score,
284
+ "finding_count": len(file_findings),
285
+ "high_impact": high_count,
286
+ "concerns": concerns[:5],
287
+ },
288
+ )
289
+
290
+ # Sort by complexity score
291
+ analysis.sort(key=lambda x: -x["complexity_score"])
292
+
293
+ input_tokens = len(str(input_data)) // 4
294
+ output_tokens = len(str(analysis)) // 4
295
+
296
+ return (
297
+ {
298
+ "analysis": analysis,
299
+ "analyzed_files": len(analysis),
300
+ **input_data,
301
+ },
302
+ input_tokens,
303
+ output_tokens,
304
+ )
305
+
306
+ async def _hotspots(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
307
+ """Identify performance hotspots.
308
+
309
+ Pinpoints files and areas requiring immediate attention.
310
+ """
311
+ analysis = input_data.get("analysis", [])
312
+
313
+ # Top hotspots (highest complexity scores)
314
+ hotspots = [a for a in analysis if a["complexity_score"] >= 10 or a["high_impact"] >= 2]
315
+
316
+ self._hotspot_count = len(hotspots)
317
+
318
+ # Categorize hotspots
319
+ critical = [h for h in hotspots if h["complexity_score"] >= 20]
320
+ moderate = [h for h in hotspots if 10 <= h["complexity_score"] < 20]
321
+
322
+ # Calculate overall perf score (inverse of problems)
323
+ total_score = sum(a["complexity_score"] for a in analysis)
324
+ max_score = len(analysis) * 30 # Max possible score
325
+ perf_score = max(0, 100 - int((total_score / max(max_score, 1)) * 100))
326
+
327
+ hotspot_result = {
328
+ "hotspots": hotspots[:15], # Top 15
329
+ "hotspot_count": self._hotspot_count,
330
+ "critical_count": len(critical),
331
+ "moderate_count": len(moderate),
332
+ "perf_score": perf_score,
333
+ "perf_level": (
334
+ "critical" if perf_score < 50 else "warning" if perf_score < 75 else "good"
335
+ ),
336
+ }
337
+
338
+ input_tokens = len(str(input_data)) // 4
339
+ output_tokens = len(str(hotspot_result)) // 4
340
+
341
+ return (
342
+ {
343
+ "hotspot_result": hotspot_result,
344
+ **input_data,
345
+ },
346
+ input_tokens,
347
+ output_tokens,
348
+ )
349
+
350
+ async def _optimize(self, input_data: dict, tier: ModelTier) -> tuple[dict, int, int]:
351
+ """Generate optimization recommendations using LLM.
352
+
353
+ Creates actionable recommendations for performance improvements.
354
+
355
+ Supports XML-enhanced prompts when enabled in workflow config.
356
+ """
357
+ hotspot_result = input_data.get("hotspot_result", {})
358
+ hotspots = hotspot_result.get("hotspots", [])
359
+ findings = input_data.get("findings", [])
360
+ target = input_data.get("target", "")
361
+
362
+ # Build hotspots summary for LLM
363
+ hotspots_summary = []
364
+ for h in hotspots[:10]:
365
+ hotspots_summary.append(
366
+ f"- {h.get('file')}: score={h.get('complexity_score', 0)}, "
367
+ f"concerns={', '.join(h.get('concerns', []))}",
368
+ )
369
+
370
+ # Summary of most common issues
371
+ issue_counts: dict[str, int] = {}
372
+ for f in findings:
373
+ t = f.get("type", "unknown")
374
+ issue_counts[t] = issue_counts.get(t, 0) + 1
375
+ top_issues = sorted(issue_counts.items(), key=lambda x: -x[1])[:5]
376
+
377
+ # Build input payload for prompt
378
+ input_payload = f"""Target: {target or "codebase"}
379
+
380
+ Performance Score: {hotspot_result.get("perf_score", 0)}/100
381
+ Performance Level: {hotspot_result.get("perf_level", "unknown")}
382
+
383
+ Hotspots:
384
+ {chr(10).join(hotspots_summary) if hotspots_summary else "No hotspots identified"}
385
+
386
+ Top Issues:
387
+ {json.dumps([{"type": t, "count": c} for t, c in top_issues], indent=2)}"""
388
+
389
+ # Check if XML prompts are enabled
390
+ if self._is_xml_enabled():
391
+ # Use XML-enhanced prompt
392
+ user_message = self._render_xml_prompt(
393
+ role="performance engineer specializing in optimization",
394
+ goal="Generate comprehensive optimization recommendations for performance issues",
395
+ instructions=[
396
+ "Analyze each performance hotspot and its concerns",
397
+ "Provide specific optimization strategies with code examples",
398
+ "Estimate the impact of each optimization (high/medium/low)",
399
+ "Prioritize recommendations by potential performance gain",
400
+ "Include before/after code patterns where helpful",
401
+ ],
402
+ constraints=[
403
+ "Be specific about which files and patterns to optimize",
404
+ "Include actionable code changes",
405
+ "Focus on high-impact optimizations first",
406
+ ],
407
+ input_type="performance_hotspots",
408
+ input_payload=input_payload,
409
+ extra={
410
+ "perf_score": hotspot_result.get("perf_score", 0),
411
+ "hotspot_count": len(hotspots),
412
+ },
413
+ )
414
+ system = None # XML prompt includes all context
415
+ else:
416
+ # Use legacy plain text prompts
417
+ system = """You are a performance engineer specializing in code optimization.
418
+ Analyze the identified performance hotspots and generate actionable recommendations.
419
+
420
+ For each hotspot:
421
+ 1. Explain why the pattern causes performance issues
422
+ 2. Provide specific optimization strategies with code examples
423
+ 3. Estimate the impact of the optimization
424
+
425
+ Prioritize by potential performance gain."""
426
+
427
+ user_message = f"""Generate optimization recommendations for these performance issues:
428
+
429
+ {input_payload}
430
+
431
+ Provide detailed optimization strategies."""
432
+
433
+ # Try executor-based execution first (Phase 3 pattern)
434
+ if self._executor is not None or self._api_key:
435
+ try:
436
+ step = PERF_AUDIT_STEPS["optimize"]
437
+ response, input_tokens, output_tokens, cost = await self.run_step_with_executor(
438
+ step=step,
439
+ prompt=user_message,
440
+ system=system,
441
+ )
442
+ except Exception:
443
+ # Fall back to legacy _call_llm if executor fails
444
+ response, input_tokens, output_tokens = await self._call_llm(
445
+ tier,
446
+ system or "",
447
+ user_message,
448
+ max_tokens=3000,
449
+ )
450
+ else:
451
+ # Legacy path for backward compatibility
452
+ response, input_tokens, output_tokens = await self._call_llm(
453
+ tier,
454
+ system or "",
455
+ user_message,
456
+ max_tokens=3000,
457
+ )
458
+
459
+ # Parse XML response if enforcement is enabled
460
+ parsed_data = self._parse_xml_response(response)
461
+
462
+ result = {
463
+ "optimization_plan": response,
464
+ "recommendation_count": len(hotspots),
465
+ "top_issues": [{"type": t, "count": c} for t, c in top_issues],
466
+ "perf_score": hotspot_result.get("perf_score", 0),
467
+ "perf_level": hotspot_result.get("perf_level", "unknown"),
468
+ "model_tier_used": tier.value,
469
+ }
470
+
471
+ # Merge parsed XML data if available
472
+ if parsed_data.get("xml_parsed"):
473
+ result.update(
474
+ {
475
+ "xml_parsed": True,
476
+ "summary": parsed_data.get("summary"),
477
+ "findings": parsed_data.get("findings", []),
478
+ "checklist": parsed_data.get("checklist", []),
479
+ },
480
+ )
481
+
482
+ # Add formatted report for human readability
483
+ result["formatted_report"] = format_perf_audit_report(result, input_data)
484
+
485
+ return (result, input_tokens, output_tokens)
486
+
487
+ def _get_optimization_action(self, concern: str) -> dict | None:
488
+ """Generate specific optimization action for a concern type."""
489
+ actions = {
490
+ "n_plus_one": {
491
+ "action": "Batch database queries",
492
+ "description": "Use prefetch_related/select_related or batch queries",
493
+ "estimated_impact": "high",
494
+ },
495
+ "sync_in_async": {
496
+ "action": "Use async alternatives",
497
+ "description": "Replace sync operations with async versions",
498
+ "estimated_impact": "high",
499
+ },
500
+ "string_concat_loop": {
501
+ "action": "Use str.join()",
502
+ "description": "Build list of strings and join at the end instead of concatenating",
503
+ "estimated_impact": "medium",
504
+ },
505
+ "repeated_regex": {
506
+ "action": "Pre-compile regex",
507
+ "description": "Use re.compile() and reuse the compiled pattern",
508
+ "estimated_impact": "medium",
509
+ },
510
+ "nested_loops": {
511
+ "action": "Optimize algorithm",
512
+ "description": "Consider using sets, dicts, or itertools to reduce complexity",
513
+ "estimated_impact": "high",
514
+ },
515
+ "list_comprehension_in_loop": {
516
+ "action": "Move comprehension outside loop",
517
+ "description": "Create the list once before the loop",
518
+ "estimated_impact": "medium",
519
+ },
520
+ "large_list_copy": {
521
+ "action": "Use iterators",
522
+ "description": "Consider using iterators instead of copying entire lists",
523
+ "estimated_impact": "low",
524
+ },
525
+ "global_import": {
526
+ "action": "Use specific imports",
527
+ "description": "Import only needed names to reduce memory and startup time",
528
+ "estimated_impact": "low",
529
+ },
530
+ }
531
+ return actions.get(concern)
532
+
533
+
534
+ def format_perf_audit_report(result: dict, input_data: dict) -> str:
535
+ """Format performance audit output as a human-readable report.
536
+
537
+ Args:
538
+ result: The optimize stage result
539
+ input_data: Input data from previous stages
540
+
541
+ Returns:
542
+ Formatted report string
543
+
544
+ """
545
+ lines = []
546
+
547
+ # Header with performance score
548
+ perf_score = result.get("perf_score", 0)
549
+ perf_level = result.get("perf_level", "unknown").upper()
550
+
551
+ if perf_score >= 85:
552
+ perf_icon = "🟢"
553
+ perf_text = "EXCELLENT"
554
+ elif perf_score >= 75:
555
+ perf_icon = "🟡"
556
+ perf_text = "GOOD"
557
+ elif perf_score >= 50:
558
+ perf_icon = "🟠"
559
+ perf_text = "NEEDS OPTIMIZATION"
560
+ else:
561
+ perf_icon = "🔴"
562
+ perf_text = "CRITICAL"
563
+
564
+ lines.append("=" * 60)
565
+ lines.append("PERFORMANCE AUDIT REPORT")
566
+ lines.append("=" * 60)
567
+ lines.append("")
568
+ lines.append(f"Performance Score: {perf_icon} {perf_score}/100 ({perf_text})")
569
+ lines.append(f"Performance Level: {perf_level}")
570
+ lines.append("")
571
+
572
+ # Scan summary
573
+ files_scanned = input_data.get("files_scanned", 0)
574
+ finding_count = input_data.get("finding_count", 0)
575
+ by_impact = input_data.get("by_impact", {})
576
+
577
+ lines.append("-" * 60)
578
+ lines.append("SCAN SUMMARY")
579
+ lines.append("-" * 60)
580
+ lines.append(f"Files Scanned: {files_scanned}")
581
+ lines.append(f"Issues Found: {finding_count}")
582
+ lines.append("")
583
+ lines.append("Issues by Impact:")
584
+ lines.append(f" 🔴 High: {by_impact.get('high', 0)}")
585
+ lines.append(f" 🟡 Medium: {by_impact.get('medium', 0)}")
586
+ lines.append(f" 🟢 Low: {by_impact.get('low', 0)}")
587
+ lines.append("")
588
+
589
+ # Top issues
590
+ top_issues = result.get("top_issues", [])
591
+ if top_issues:
592
+ lines.append("-" * 60)
593
+ lines.append("TOP PERFORMANCE ISSUES")
594
+ lines.append("-" * 60)
595
+ for issue in top_issues:
596
+ issue_type = issue.get("type", "unknown").replace("_", " ").title()
597
+ count = issue.get("count", 0)
598
+ lines.append(f" • {issue_type}: {count} occurrences")
599
+ lines.append("")
600
+
601
+ # Hotspots
602
+ hotspot_result = input_data.get("hotspot_result", {})
603
+ hotspots = hotspot_result.get("hotspots", [])
604
+ if hotspots:
605
+ lines.append("-" * 60)
606
+ lines.append("PERFORMANCE HOTSPOTS")
607
+ lines.append("-" * 60)
608
+ lines.append(f"Critical Hotspots: {hotspot_result.get('critical_count', 0)}")
609
+ lines.append(f"Moderate Hotspots: {hotspot_result.get('moderate_count', 0)}")
610
+ lines.append("")
611
+ for h in hotspots[:8]:
612
+ file_path = h.get("file", "unknown")
613
+ score = h.get("complexity_score", 0)
614
+ concerns = h.get("concerns", [])
615
+ score_icon = "🔴" if score >= 20 else "🟠" if score >= 10 else "🟡"
616
+ lines.append(f" {score_icon} {file_path}")
617
+ lines.append(f" Score: {score} | Concerns: {', '.join(concerns[:3])}")
618
+ lines.append("")
619
+
620
+ # High impact findings
621
+ findings = input_data.get("findings", [])
622
+ high_impact = [f for f in findings if f.get("impact") == "high"]
623
+ if high_impact:
624
+ lines.append("-" * 60)
625
+ lines.append("HIGH IMPACT FINDINGS")
626
+ lines.append("-" * 60)
627
+ for f in high_impact[:10]:
628
+ file_path = f.get("file", "unknown")
629
+ line = f.get("line", "?")
630
+ desc = f.get("description", "Unknown issue")
631
+ lines.append(f" 🔴 {file_path}:{line}")
632
+ lines.append(f" {desc}")
633
+ lines.append("")
634
+
635
+ # Optimization recommendations
636
+ optimization_plan = result.get("optimization_plan", "")
637
+ if optimization_plan:
638
+ lines.append("-" * 60)
639
+ lines.append("OPTIMIZATION RECOMMENDATIONS")
640
+ lines.append("-" * 60)
641
+ lines.append(optimization_plan)
642
+ lines.append("")
643
+
644
+ # Footer
645
+ lines.append("=" * 60)
646
+ model_tier = result.get("model_tier_used", "unknown")
647
+ rec_count = result.get("recommendation_count", 0)
648
+ lines.append(f"Analyzed {rec_count} hotspots using {model_tier} tier model")
649
+ lines.append("=" * 60)
650
+
651
+ return "\n".join(lines)
652
+
653
+
654
+ def main():
655
+ """CLI entry point for performance audit workflow."""
656
+ import asyncio
657
+
658
+ async def run():
659
+ workflow = PerformanceAuditWorkflow()
660
+ result = await workflow.execute(path=".", file_types=[".py"])
661
+
662
+ print("\nPerformance Audit Results")
663
+ print("=" * 50)
664
+ print(f"Provider: {result.provider}")
665
+ print(f"Success: {result.success}")
666
+
667
+ output = result.final_output
668
+ print(f"Performance Level: {output.get('perf_level', 'N/A')}")
669
+ print(f"Performance Score: {output.get('perf_score', 0)}/100")
670
+ print(f"Recommendations: {output.get('recommendation_count', 0)}")
671
+
672
+ if output.get("top_issues"):
673
+ print("\nTop Issues:")
674
+ for issue in output["top_issues"]:
675
+ print(f" - {issue['type']}: {issue['count']} occurrences")
676
+
677
+ print("\nCost Report:")
678
+ print(f" Total Cost: ${result.cost_report.total_cost:.4f}")
679
+ savings = result.cost_report.savings
680
+ pct = result.cost_report.savings_percent
681
+ print(f" Savings: ${savings:.4f} ({pct:.1f}%)")
682
+
683
+ asyncio.run(run())
684
+
685
+
686
+ if __name__ == "__main__":
687
+ main()