empathy-framework 4.6.6__py3-none-any.whl → 4.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/METADATA +7 -6
- empathy_framework-4.7.0.dist-info/RECORD +354 -0
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/top_level.txt +0 -2
- empathy_healthcare_plugin/monitors/monitoring/__init__.py +9 -9
- empathy_llm_toolkit/agent_factory/__init__.py +6 -6
- empathy_llm_toolkit/agent_factory/adapters/wizard_adapter.py +7 -10
- empathy_llm_toolkit/agents_md/__init__.py +22 -0
- empathy_llm_toolkit/agents_md/loader.py +218 -0
- empathy_llm_toolkit/agents_md/parser.py +271 -0
- empathy_llm_toolkit/agents_md/registry.py +307 -0
- empathy_llm_toolkit/commands/__init__.py +51 -0
- empathy_llm_toolkit/commands/context.py +375 -0
- empathy_llm_toolkit/commands/loader.py +301 -0
- empathy_llm_toolkit/commands/models.py +231 -0
- empathy_llm_toolkit/commands/parser.py +371 -0
- empathy_llm_toolkit/commands/registry.py +429 -0
- empathy_llm_toolkit/config/__init__.py +8 -8
- empathy_llm_toolkit/config/unified.py +3 -7
- empathy_llm_toolkit/context/__init__.py +22 -0
- empathy_llm_toolkit/context/compaction.py +455 -0
- empathy_llm_toolkit/context/manager.py +434 -0
- empathy_llm_toolkit/hooks/__init__.py +24 -0
- empathy_llm_toolkit/hooks/config.py +306 -0
- empathy_llm_toolkit/hooks/executor.py +289 -0
- empathy_llm_toolkit/hooks/registry.py +302 -0
- empathy_llm_toolkit/hooks/scripts/__init__.py +39 -0
- empathy_llm_toolkit/hooks/scripts/evaluate_session.py +201 -0
- empathy_llm_toolkit/hooks/scripts/first_time_init.py +285 -0
- empathy_llm_toolkit/hooks/scripts/pre_compact.py +207 -0
- empathy_llm_toolkit/hooks/scripts/session_end.py +183 -0
- empathy_llm_toolkit/hooks/scripts/session_start.py +163 -0
- empathy_llm_toolkit/hooks/scripts/suggest_compact.py +225 -0
- empathy_llm_toolkit/learning/__init__.py +30 -0
- empathy_llm_toolkit/learning/evaluator.py +438 -0
- empathy_llm_toolkit/learning/extractor.py +514 -0
- empathy_llm_toolkit/learning/storage.py +560 -0
- empathy_llm_toolkit/providers.py +4 -11
- empathy_llm_toolkit/security/__init__.py +17 -17
- empathy_llm_toolkit/utils/tokens.py +2 -5
- empathy_os/__init__.py +202 -70
- empathy_os/cache_monitor.py +5 -3
- empathy_os/cli/__init__.py +11 -55
- empathy_os/cli/__main__.py +29 -15
- empathy_os/cli/commands/inspection.py +21 -12
- empathy_os/cli/commands/memory.py +4 -12
- empathy_os/cli/commands/profiling.py +198 -0
- empathy_os/cli/commands/utilities.py +27 -7
- empathy_os/cli.py +28 -57
- empathy_os/cli_unified.py +525 -1164
- empathy_os/cost_tracker.py +9 -3
- empathy_os/dashboard/server.py +200 -2
- empathy_os/hot_reload/__init__.py +7 -7
- empathy_os/hot_reload/config.py +6 -7
- empathy_os/hot_reload/integration.py +35 -35
- empathy_os/hot_reload/reloader.py +57 -57
- empathy_os/hot_reload/watcher.py +28 -28
- empathy_os/hot_reload/websocket.py +2 -2
- empathy_os/memory/__init__.py +11 -4
- empathy_os/memory/claude_memory.py +1 -1
- empathy_os/memory/cross_session.py +8 -12
- empathy_os/memory/edges.py +6 -6
- empathy_os/memory/file_session.py +770 -0
- empathy_os/memory/graph.py +30 -30
- empathy_os/memory/nodes.py +6 -6
- empathy_os/memory/short_term.py +15 -9
- empathy_os/memory/unified.py +606 -140
- empathy_os/meta_workflows/agent_creator.py +3 -9
- empathy_os/meta_workflows/cli_meta_workflows.py +113 -53
- empathy_os/meta_workflows/form_engine.py +6 -18
- empathy_os/meta_workflows/intent_detector.py +64 -24
- empathy_os/meta_workflows/models.py +3 -1
- empathy_os/meta_workflows/pattern_learner.py +13 -31
- empathy_os/meta_workflows/plan_generator.py +55 -47
- empathy_os/meta_workflows/session_context.py +2 -3
- empathy_os/meta_workflows/workflow.py +20 -51
- empathy_os/models/cli.py +2 -2
- empathy_os/models/tasks.py +1 -2
- empathy_os/models/telemetry.py +4 -1
- empathy_os/models/token_estimator.py +3 -1
- empathy_os/monitoring/alerts.py +938 -9
- empathy_os/monitoring/alerts_cli.py +346 -183
- empathy_os/orchestration/execution_strategies.py +12 -29
- empathy_os/orchestration/pattern_learner.py +20 -26
- empathy_os/orchestration/real_tools.py +6 -15
- empathy_os/platform_utils.py +2 -1
- empathy_os/plugins/__init__.py +2 -2
- empathy_os/plugins/base.py +64 -64
- empathy_os/plugins/registry.py +32 -32
- empathy_os/project_index/index.py +49 -15
- empathy_os/project_index/models.py +1 -2
- empathy_os/project_index/reports.py +1 -1
- empathy_os/project_index/scanner.py +1 -0
- empathy_os/redis_memory.py +10 -7
- empathy_os/resilience/__init__.py +1 -1
- empathy_os/resilience/health.py +10 -10
- empathy_os/routing/__init__.py +7 -7
- empathy_os/routing/chain_executor.py +37 -37
- empathy_os/routing/classifier.py +36 -36
- empathy_os/routing/smart_router.py +40 -40
- empathy_os/routing/{wizard_registry.py → workflow_registry.py} +47 -47
- empathy_os/scaffolding/__init__.py +8 -8
- empathy_os/scaffolding/__main__.py +1 -1
- empathy_os/scaffolding/cli.py +28 -28
- empathy_os/socratic/__init__.py +3 -19
- empathy_os/socratic/ab_testing.py +25 -36
- empathy_os/socratic/blueprint.py +38 -38
- empathy_os/socratic/cli.py +34 -20
- empathy_os/socratic/collaboration.py +30 -28
- empathy_os/socratic/domain_templates.py +9 -1
- empathy_os/socratic/embeddings.py +17 -13
- empathy_os/socratic/engine.py +135 -70
- empathy_os/socratic/explainer.py +70 -60
- empathy_os/socratic/feedback.py +24 -19
- empathy_os/socratic/forms.py +15 -10
- empathy_os/socratic/generator.py +51 -35
- empathy_os/socratic/llm_analyzer.py +25 -23
- empathy_os/socratic/mcp_server.py +99 -159
- empathy_os/socratic/session.py +19 -13
- empathy_os/socratic/storage.py +98 -67
- empathy_os/socratic/success.py +38 -27
- empathy_os/socratic/visual_editor.py +51 -39
- empathy_os/socratic/web_ui.py +99 -66
- empathy_os/telemetry/cli.py +3 -1
- empathy_os/telemetry/usage_tracker.py +1 -3
- empathy_os/test_generator/__init__.py +3 -3
- empathy_os/test_generator/cli.py +28 -28
- empathy_os/test_generator/generator.py +64 -66
- empathy_os/test_generator/risk_analyzer.py +11 -11
- empathy_os/vscode_bridge.py +173 -0
- empathy_os/workflows/__init__.py +212 -120
- empathy_os/workflows/batch_processing.py +8 -24
- empathy_os/workflows/bug_predict.py +1 -1
- empathy_os/workflows/code_review.py +20 -5
- empathy_os/workflows/code_review_pipeline.py +13 -8
- empathy_os/workflows/keyboard_shortcuts/workflow.py +6 -2
- empathy_os/workflows/manage_documentation.py +1 -0
- empathy_os/workflows/orchestrated_health_check.py +6 -11
- empathy_os/workflows/orchestrated_release_prep.py +3 -3
- empathy_os/workflows/pr_review.py +18 -10
- empathy_os/workflows/progressive/__init__.py +2 -12
- empathy_os/workflows/progressive/cli.py +14 -37
- empathy_os/workflows/progressive/core.py +12 -12
- empathy_os/workflows/progressive/orchestrator.py +166 -144
- empathy_os/workflows/progressive/reports.py +22 -31
- empathy_os/workflows/progressive/telemetry.py +8 -14
- empathy_os/workflows/progressive/test_gen.py +29 -48
- empathy_os/workflows/progressive/workflow.py +31 -70
- empathy_os/workflows/release_prep.py +21 -6
- empathy_os/workflows/release_prep_crew.py +1 -0
- empathy_os/workflows/secure_release.py +13 -6
- empathy_os/workflows/security_audit.py +8 -3
- empathy_os/workflows/test_coverage_boost_crew.py +3 -2
- empathy_os/workflows/test_maintenance_crew.py +1 -0
- empathy_os/workflows/test_runner.py +16 -12
- empathy_software_plugin/SOFTWARE_PLUGIN_README.md +25 -703
- empathy_software_plugin/cli.py +0 -122
- coach_wizards/__init__.py +0 -45
- coach_wizards/accessibility_wizard.py +0 -91
- coach_wizards/api_wizard.py +0 -91
- coach_wizards/base_wizard.py +0 -209
- coach_wizards/cicd_wizard.py +0 -91
- coach_wizards/code_reviewer_README.md +0 -60
- coach_wizards/code_reviewer_wizard.py +0 -180
- coach_wizards/compliance_wizard.py +0 -91
- coach_wizards/database_wizard.py +0 -91
- coach_wizards/debugging_wizard.py +0 -91
- coach_wizards/documentation_wizard.py +0 -91
- coach_wizards/generate_wizards.py +0 -347
- coach_wizards/localization_wizard.py +0 -173
- coach_wizards/migration_wizard.py +0 -91
- coach_wizards/monitoring_wizard.py +0 -91
- coach_wizards/observability_wizard.py +0 -91
- coach_wizards/performance_wizard.py +0 -91
- coach_wizards/prompt_engineering_wizard.py +0 -661
- coach_wizards/refactoring_wizard.py +0 -91
- coach_wizards/scaling_wizard.py +0 -90
- coach_wizards/security_wizard.py +0 -92
- coach_wizards/testing_wizard.py +0 -91
- empathy_framework-4.6.6.dist-info/RECORD +0 -410
- empathy_llm_toolkit/wizards/__init__.py +0 -43
- empathy_llm_toolkit/wizards/base_wizard.py +0 -364
- empathy_llm_toolkit/wizards/customer_support_wizard.py +0 -190
- empathy_llm_toolkit/wizards/healthcare_wizard.py +0 -378
- empathy_llm_toolkit/wizards/patient_assessment_README.md +0 -64
- empathy_llm_toolkit/wizards/patient_assessment_wizard.py +0 -193
- empathy_llm_toolkit/wizards/technology_wizard.py +0 -209
- empathy_os/wizard_factory_cli.py +0 -170
- empathy_software_plugin/wizards/__init__.py +0 -42
- empathy_software_plugin/wizards/advanced_debugging_wizard.py +0 -395
- empathy_software_plugin/wizards/agent_orchestration_wizard.py +0 -511
- empathy_software_plugin/wizards/ai_collaboration_wizard.py +0 -503
- empathy_software_plugin/wizards/ai_context_wizard.py +0 -441
- empathy_software_plugin/wizards/ai_documentation_wizard.py +0 -503
- empathy_software_plugin/wizards/base_wizard.py +0 -288
- empathy_software_plugin/wizards/book_chapter_wizard.py +0 -519
- empathy_software_plugin/wizards/code_review_wizard.py +0 -604
- empathy_software_plugin/wizards/debugging/__init__.py +0 -50
- empathy_software_plugin/wizards/debugging/bug_risk_analyzer.py +0 -414
- empathy_software_plugin/wizards/debugging/config_loaders.py +0 -446
- empathy_software_plugin/wizards/debugging/fix_applier.py +0 -469
- empathy_software_plugin/wizards/debugging/language_patterns.py +0 -385
- empathy_software_plugin/wizards/debugging/linter_parsers.py +0 -470
- empathy_software_plugin/wizards/debugging/verification.py +0 -369
- empathy_software_plugin/wizards/enhanced_testing_wizard.py +0 -537
- empathy_software_plugin/wizards/memory_enhanced_debugging_wizard.py +0 -816
- empathy_software_plugin/wizards/multi_model_wizard.py +0 -501
- empathy_software_plugin/wizards/pattern_extraction_wizard.py +0 -422
- empathy_software_plugin/wizards/pattern_retriever_wizard.py +0 -400
- empathy_software_plugin/wizards/performance/__init__.py +0 -9
- empathy_software_plugin/wizards/performance/bottleneck_detector.py +0 -221
- empathy_software_plugin/wizards/performance/profiler_parsers.py +0 -278
- empathy_software_plugin/wizards/performance/trajectory_analyzer.py +0 -429
- empathy_software_plugin/wizards/performance_profiling_wizard.py +0 -305
- empathy_software_plugin/wizards/prompt_engineering_wizard.py +0 -425
- empathy_software_plugin/wizards/rag_pattern_wizard.py +0 -461
- empathy_software_plugin/wizards/security/__init__.py +0 -32
- empathy_software_plugin/wizards/security/exploit_analyzer.py +0 -290
- empathy_software_plugin/wizards/security/owasp_patterns.py +0 -241
- empathy_software_plugin/wizards/security/vulnerability_scanner.py +0 -604
- empathy_software_plugin/wizards/security_analysis_wizard.py +0 -322
- empathy_software_plugin/wizards/security_learning_wizard.py +0 -740
- empathy_software_plugin/wizards/tech_debt_wizard.py +0 -726
- empathy_software_plugin/wizards/testing/__init__.py +0 -27
- empathy_software_plugin/wizards/testing/coverage_analyzer.py +0 -459
- empathy_software_plugin/wizards/testing/quality_analyzer.py +0 -525
- empathy_software_plugin/wizards/testing/test_suggester.py +0 -533
- empathy_software_plugin/wizards/testing_wizard.py +0 -274
- wizards/__init__.py +0 -82
- wizards/admission_assessment_wizard.py +0 -644
- wizards/care_plan.py +0 -321
- wizards/clinical_assessment.py +0 -769
- wizards/discharge_planning.py +0 -77
- wizards/discharge_summary_wizard.py +0 -468
- wizards/dosage_calculation.py +0 -497
- wizards/incident_report_wizard.py +0 -454
- wizards/medication_reconciliation.py +0 -85
- wizards/nursing_assessment.py +0 -171
- wizards/patient_education.py +0 -654
- wizards/quality_improvement.py +0 -705
- wizards/sbar_report.py +0 -324
- wizards/sbar_wizard.py +0 -608
- wizards/shift_handoff_wizard.py +0 -535
- wizards/soap_note_wizard.py +0 -679
- wizards/treatment_plan.py +0 -15
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/WHEEL +0 -0
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/entry_points.txt +0 -0
- {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,278 +0,0 @@
|
|
|
1
|
-
"""Profiler Output Parsers
|
|
2
|
-
|
|
3
|
-
Parses output from various profilers (cProfile, perf, Chrome DevTools, etc.)
|
|
4
|
-
|
|
5
|
-
Copyright 2025 Smart AI Memory, LLC
|
|
6
|
-
Licensed under Fair Source 0.9
|
|
7
|
-
"""
|
|
8
|
-
|
|
9
|
-
import json
|
|
10
|
-
import re
|
|
11
|
-
from dataclasses import dataclass
|
|
12
|
-
from enum import Enum
|
|
13
|
-
from typing import Any
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
class ProfilerType(Enum):
|
|
17
|
-
"""Types of profilers"""
|
|
18
|
-
|
|
19
|
-
CPROFILE = "cprofile"
|
|
20
|
-
PYINSTRUMENT = "pyinstrument"
|
|
21
|
-
CHROME_DEVTOOLS = "chrome_devtools"
|
|
22
|
-
NODE_PROFILER = "node_profiler"
|
|
23
|
-
GOLANG_PPROF = "golang_pprof"
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
@dataclass
|
|
27
|
-
class FunctionProfile:
|
|
28
|
-
"""Standardized function profile data.
|
|
29
|
-
|
|
30
|
-
Universal format across all profilers.
|
|
31
|
-
"""
|
|
32
|
-
|
|
33
|
-
function_name: str
|
|
34
|
-
file_path: str
|
|
35
|
-
line_number: int
|
|
36
|
-
total_time: float # seconds
|
|
37
|
-
self_time: float # seconds (excluding called functions)
|
|
38
|
-
call_count: int
|
|
39
|
-
cumulative_time: float # seconds
|
|
40
|
-
percent_total: float
|
|
41
|
-
profiler: str
|
|
42
|
-
children: list["FunctionProfile"] | None = None
|
|
43
|
-
metadata: dict[str, Any] | None = None
|
|
44
|
-
|
|
45
|
-
def to_dict(self) -> dict[str, Any]:
|
|
46
|
-
"""Convert to dictionary"""
|
|
47
|
-
return {
|
|
48
|
-
"function_name": self.function_name,
|
|
49
|
-
"file_path": self.file_path,
|
|
50
|
-
"line_number": self.line_number,
|
|
51
|
-
"total_time": self.total_time,
|
|
52
|
-
"self_time": self.self_time,
|
|
53
|
-
"call_count": self.call_count,
|
|
54
|
-
"cumulative_time": self.cumulative_time,
|
|
55
|
-
"percent_total": self.percent_total,
|
|
56
|
-
"profiler": self.profiler,
|
|
57
|
-
"children": [c.to_dict() for c in (self.children or [])],
|
|
58
|
-
"metadata": self.metadata or {},
|
|
59
|
-
}
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
class BaseProfilerParser:
|
|
63
|
-
"""Base class for profiler parsers"""
|
|
64
|
-
|
|
65
|
-
def __init__(self, profiler_name: str):
|
|
66
|
-
self.profiler_name = profiler_name
|
|
67
|
-
|
|
68
|
-
def parse(self, data: str) -> list[FunctionProfile]:
|
|
69
|
-
"""Parse profiler output"""
|
|
70
|
-
raise NotImplementedError(
|
|
71
|
-
f"{self.__class__.__name__}.parse() must be implemented. "
|
|
72
|
-
"Create a subclass of BaseProfilerParser and implement the parse() method. "
|
|
73
|
-
f"See CProfileParser, ChromeDevToolsParser, or SimpleJSONProfilerParser for examples."
|
|
74
|
-
)
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
class CProfileParser(BaseProfilerParser):
|
|
78
|
-
"""Parse Python cProfile output.
|
|
79
|
-
|
|
80
|
-
Handles both text and pstats format.
|
|
81
|
-
"""
|
|
82
|
-
|
|
83
|
-
def __init__(self):
|
|
84
|
-
super().__init__("cprofile")
|
|
85
|
-
|
|
86
|
-
def parse(self, data: str) -> list[FunctionProfile]:
|
|
87
|
-
"""Parse cProfile text output"""
|
|
88
|
-
profiles = []
|
|
89
|
-
|
|
90
|
-
# Pattern for cProfile text output:
|
|
91
|
-
# ncalls tottime percall cumtime percall filename:lineno(function)
|
|
92
|
-
pattern = r"(\d+)\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)\s+(.+?):(\d+)\((.+?)\)"
|
|
93
|
-
|
|
94
|
-
for line in data.split("\n"):
|
|
95
|
-
match = re.match(pattern, line.strip())
|
|
96
|
-
if match:
|
|
97
|
-
(
|
|
98
|
-
ncalls,
|
|
99
|
-
tottime,
|
|
100
|
-
percall_tot,
|
|
101
|
-
cumtime,
|
|
102
|
-
percall_cum,
|
|
103
|
-
filepath,
|
|
104
|
-
lineno,
|
|
105
|
-
funcname,
|
|
106
|
-
) = match.groups()
|
|
107
|
-
|
|
108
|
-
profiles.append(
|
|
109
|
-
FunctionProfile(
|
|
110
|
-
function_name=funcname,
|
|
111
|
-
file_path=filepath,
|
|
112
|
-
line_number=int(lineno),
|
|
113
|
-
total_time=float(tottime),
|
|
114
|
-
self_time=float(tottime), # tottime is self time in cProfile
|
|
115
|
-
call_count=int(ncalls),
|
|
116
|
-
cumulative_time=float(cumtime),
|
|
117
|
-
percent_total=0.0, # Calculate later
|
|
118
|
-
profiler=self.profiler_name,
|
|
119
|
-
),
|
|
120
|
-
)
|
|
121
|
-
|
|
122
|
-
# Calculate percentages
|
|
123
|
-
if profiles:
|
|
124
|
-
total_time = sum(p.total_time for p in profiles)
|
|
125
|
-
for profile in profiles:
|
|
126
|
-
profile.percent_total = (
|
|
127
|
-
(profile.total_time / total_time * 100) if total_time > 0 else 0
|
|
128
|
-
)
|
|
129
|
-
|
|
130
|
-
# Sort by total time descending
|
|
131
|
-
profiles.sort(key=lambda p: p.total_time, reverse=True)
|
|
132
|
-
|
|
133
|
-
return profiles
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
class ChromeDevToolsParser(BaseProfilerParser):
|
|
137
|
-
"""Parse Chrome DevTools Performance profile.
|
|
138
|
-
|
|
139
|
-
JSON format from Chrome DevTools Performance tab.
|
|
140
|
-
"""
|
|
141
|
-
|
|
142
|
-
def __init__(self):
|
|
143
|
-
super().__init__("chrome_devtools")
|
|
144
|
-
|
|
145
|
-
def parse(self, data: str) -> list[FunctionProfile]:
|
|
146
|
-
"""Parse Chrome DevTools JSON"""
|
|
147
|
-
profiles = []
|
|
148
|
-
|
|
149
|
-
try:
|
|
150
|
-
profile_data = json.loads(data)
|
|
151
|
-
|
|
152
|
-
# Chrome DevTools format is complex - simplified parsing
|
|
153
|
-
# Look for function calls in trace events
|
|
154
|
-
events = profile_data.get("traceEvents", [])
|
|
155
|
-
|
|
156
|
-
function_times = {}
|
|
157
|
-
|
|
158
|
-
for event in events:
|
|
159
|
-
if event.get("ph") == "X": # Complete events
|
|
160
|
-
name = event.get("name", "unknown")
|
|
161
|
-
dur = event.get("dur", 0) / 1000000 # Convert microseconds to seconds
|
|
162
|
-
|
|
163
|
-
if name not in function_times:
|
|
164
|
-
function_times[name] = {"total_time": 0, "call_count": 0}
|
|
165
|
-
|
|
166
|
-
function_times[name]["total_time"] += dur
|
|
167
|
-
function_times[name]["call_count"] += 1
|
|
168
|
-
|
|
169
|
-
# Convert to FunctionProfile
|
|
170
|
-
total_time = sum(stats["total_time"] for stats in function_times.values())
|
|
171
|
-
|
|
172
|
-
for func_name, stats in function_times.items():
|
|
173
|
-
profiles.append(
|
|
174
|
-
FunctionProfile(
|
|
175
|
-
function_name=func_name,
|
|
176
|
-
file_path="", # Chrome doesn't always provide
|
|
177
|
-
line_number=0,
|
|
178
|
-
total_time=stats["total_time"],
|
|
179
|
-
self_time=stats["total_time"],
|
|
180
|
-
call_count=stats["call_count"],
|
|
181
|
-
cumulative_time=stats["total_time"],
|
|
182
|
-
percent_total=(
|
|
183
|
-
(stats["total_time"] / total_time * 100) if total_time > 0 else 0
|
|
184
|
-
),
|
|
185
|
-
profiler=self.profiler_name,
|
|
186
|
-
),
|
|
187
|
-
)
|
|
188
|
-
|
|
189
|
-
profiles.sort(key=lambda p: p.total_time, reverse=True)
|
|
190
|
-
|
|
191
|
-
except json.JSONDecodeError:
|
|
192
|
-
pass
|
|
193
|
-
|
|
194
|
-
return profiles
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
class SimpleJSONProfilerParser(BaseProfilerParser):
|
|
198
|
-
"""Parse simple JSON profiler format.
|
|
199
|
-
|
|
200
|
-
For custom or simplified profiling data.
|
|
201
|
-
"""
|
|
202
|
-
|
|
203
|
-
def __init__(self):
|
|
204
|
-
super().__init__("simple_json")
|
|
205
|
-
|
|
206
|
-
def parse(self, data: str) -> list[FunctionProfile]:
|
|
207
|
-
"""Parse simple JSON format"""
|
|
208
|
-
profiles = []
|
|
209
|
-
|
|
210
|
-
try:
|
|
211
|
-
parsed = json.loads(data)
|
|
212
|
-
|
|
213
|
-
functions = parsed.get("functions", [])
|
|
214
|
-
|
|
215
|
-
for func in functions:
|
|
216
|
-
profiles.append(
|
|
217
|
-
FunctionProfile(
|
|
218
|
-
function_name=func.get("name", "unknown"),
|
|
219
|
-
file_path=func.get("file", ""),
|
|
220
|
-
line_number=func.get("line", 0),
|
|
221
|
-
total_time=func.get("total_time", 0.0),
|
|
222
|
-
self_time=func.get("self_time", 0.0),
|
|
223
|
-
call_count=func.get("calls", 0),
|
|
224
|
-
cumulative_time=func.get("cumulative_time", 0.0),
|
|
225
|
-
percent_total=func.get("percent", 0.0),
|
|
226
|
-
profiler=self.profiler_name,
|
|
227
|
-
),
|
|
228
|
-
)
|
|
229
|
-
|
|
230
|
-
profiles.sort(key=lambda p: p.total_time, reverse=True)
|
|
231
|
-
|
|
232
|
-
except json.JSONDecodeError:
|
|
233
|
-
pass
|
|
234
|
-
|
|
235
|
-
return profiles
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
class ProfilerParserFactory:
|
|
239
|
-
"""Factory for creating profiler parsers"""
|
|
240
|
-
|
|
241
|
-
_parsers = {
|
|
242
|
-
"cprofile": CProfileParser,
|
|
243
|
-
"chrome_devtools": ChromeDevToolsParser,
|
|
244
|
-
"simple_json": SimpleJSONProfilerParser,
|
|
245
|
-
}
|
|
246
|
-
|
|
247
|
-
@classmethod
|
|
248
|
-
def create(cls, profiler_type: str) -> BaseProfilerParser:
|
|
249
|
-
"""Create parser for profiler type"""
|
|
250
|
-
parser_class = cls._parsers.get(profiler_type.lower())
|
|
251
|
-
|
|
252
|
-
if not parser_class:
|
|
253
|
-
raise ValueError(
|
|
254
|
-
f"Unsupported profiler: {profiler_type}. "
|
|
255
|
-
f"Supported: {', '.join(cls._parsers.keys())}",
|
|
256
|
-
)
|
|
257
|
-
|
|
258
|
-
return parser_class()
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
def parse_profiler_output(profiler_type: str, data: str) -> list[FunctionProfile]:
|
|
262
|
-
"""Convenience function to parse profiler output.
|
|
263
|
-
|
|
264
|
-
Args:
|
|
265
|
-
profiler_type: Type of profiler ("cprofile", "chrome_devtools", etc.)
|
|
266
|
-
data: Raw profiler output
|
|
267
|
-
|
|
268
|
-
Returns:
|
|
269
|
-
List of FunctionProfile objects
|
|
270
|
-
|
|
271
|
-
Example:
|
|
272
|
-
>>> profiles = parse_profiler_output("cprofile", profile_data)
|
|
273
|
-
>>> for profile in profiles[:5]: # Top 5 slowest
|
|
274
|
-
... print(f"{profile.function_name}: {profile.total_time:.3f}s")
|
|
275
|
-
|
|
276
|
-
"""
|
|
277
|
-
parser = ProfilerParserFactory.create(profiler_type)
|
|
278
|
-
return parser.parse(data)
|
|
@@ -1,429 +0,0 @@
|
|
|
1
|
-
"""Performance Trajectory Analyzer (Level 4)
|
|
2
|
-
|
|
3
|
-
Analyzes performance trends to predict future bottlenecks.
|
|
4
|
-
|
|
5
|
-
This is Level 4 Anticipatory Empathy - predicting performance degradation
|
|
6
|
-
BEFORE it becomes critical.
|
|
7
|
-
|
|
8
|
-
Copyright 2025 Smart AI Memory, LLC
|
|
9
|
-
Licensed under Fair Source 0.9
|
|
10
|
-
"""
|
|
11
|
-
|
|
12
|
-
from dataclasses import dataclass
|
|
13
|
-
from typing import Any
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
@dataclass
|
|
17
|
-
class PerformanceTrend:
|
|
18
|
-
"""Trend analysis for a metric"""
|
|
19
|
-
|
|
20
|
-
metric_name: str
|
|
21
|
-
current_value: float
|
|
22
|
-
previous_value: float
|
|
23
|
-
change: float
|
|
24
|
-
change_percent: float
|
|
25
|
-
direction: str # "improving", "degrading", "stable"
|
|
26
|
-
rate_of_change: float # per day/hour
|
|
27
|
-
concerning: bool
|
|
28
|
-
reasoning: str
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
@dataclass
|
|
32
|
-
class TrajectoryPrediction:
|
|
33
|
-
"""Performance trajectory prediction.
|
|
34
|
-
|
|
35
|
-
This is Level 4 - predicting BEFORE hitting limits.
|
|
36
|
-
"""
|
|
37
|
-
|
|
38
|
-
trajectory_state: str # "optimal", "degrading", "critical"
|
|
39
|
-
estimated_time_to_critical: str | None
|
|
40
|
-
trends: list[PerformanceTrend]
|
|
41
|
-
overall_assessment: str
|
|
42
|
-
confidence: float
|
|
43
|
-
recommendations: list[str]
|
|
44
|
-
|
|
45
|
-
def to_dict(self) -> dict[str, Any]:
|
|
46
|
-
"""Convert to dictionary format for compatibility"""
|
|
47
|
-
return {
|
|
48
|
-
"trajectory_state": self.trajectory_state,
|
|
49
|
-
"estimated_time_to_critical": self.estimated_time_to_critical,
|
|
50
|
-
"trends": [
|
|
51
|
-
{
|
|
52
|
-
"metric_name": t.metric_name,
|
|
53
|
-
"current_value": t.current_value,
|
|
54
|
-
"previous_value": t.previous_value,
|
|
55
|
-
"change": t.change,
|
|
56
|
-
"change_percent": t.change_percent,
|
|
57
|
-
"direction": t.direction,
|
|
58
|
-
"rate_of_change": t.rate_of_change,
|
|
59
|
-
"concerning": t.concerning,
|
|
60
|
-
"reasoning": t.reasoning,
|
|
61
|
-
"severity": "HIGH" if t.concerning else "LOW",
|
|
62
|
-
}
|
|
63
|
-
for t in self.trends
|
|
64
|
-
],
|
|
65
|
-
"overall_assessment": self.overall_assessment,
|
|
66
|
-
"confidence": self.confidence,
|
|
67
|
-
"recommendations": self.recommendations,
|
|
68
|
-
}
|
|
69
|
-
|
|
70
|
-
def __getitem__(self, key):
|
|
71
|
-
"""Allow dict-style access for backward compatibility"""
|
|
72
|
-
d = self.to_dict()
|
|
73
|
-
return d[key]
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
class PerformanceTrajectoryAnalyzer:
|
|
77
|
-
"""Analyzes performance trajectory to predict degradation.
|
|
78
|
-
|
|
79
|
-
Level 4 Anticipatory Empathy implementation.
|
|
80
|
-
"""
|
|
81
|
-
|
|
82
|
-
def __init__(self):
|
|
83
|
-
# Define acceptable ranges
|
|
84
|
-
self.acceptable_ranges = {
|
|
85
|
-
"response_time": (0, 1.0), # seconds
|
|
86
|
-
"throughput": (100, float("inf")), # requests/sec
|
|
87
|
-
"error_rate": (0, 0.01), # 1%
|
|
88
|
-
"cpu_usage": (0, 0.80), # 80%
|
|
89
|
-
"memory_usage": (0, 0.85), # 85%
|
|
90
|
-
}
|
|
91
|
-
|
|
92
|
-
# Define concerning growth rates
|
|
93
|
-
self.concerning_rates = {
|
|
94
|
-
"response_time": 0.1, # 100ms increase per day
|
|
95
|
-
"error_rate": 0.005, # 0.5% increase
|
|
96
|
-
"memory_usage": 0.05, # 5% increase
|
|
97
|
-
}
|
|
98
|
-
|
|
99
|
-
def analyze_trajectory(
|
|
100
|
-
self,
|
|
101
|
-
current_metrics,
|
|
102
|
-
historical_metrics=None, # Can be dict or list # Can be dict or list
|
|
103
|
-
) -> TrajectoryPrediction:
|
|
104
|
-
"""Analyze performance trajectory.
|
|
105
|
-
|
|
106
|
-
Accepts parameters in either order for backward compatibility:
|
|
107
|
-
- analyze_trajectory(current_metrics: dict, historical_metrics: list)
|
|
108
|
-
- analyze_trajectory(historical_metrics: list, current_metrics: dict)
|
|
109
|
-
|
|
110
|
-
Args:
|
|
111
|
-
current_metrics: Current performance metrics (dict) OR historical data (list)
|
|
112
|
-
historical_metrics: Historical data (list) OR current metrics (dict)
|
|
113
|
-
|
|
114
|
-
Returns:
|
|
115
|
-
TrajectoryPrediction with assessment
|
|
116
|
-
|
|
117
|
-
Example:
|
|
118
|
-
>>> history = [
|
|
119
|
-
... {"time": "day1", "response_time": 0.2, "error_rate": 0.001},
|
|
120
|
-
... {"time": "day2", "response_time": 0.45, "error_rate": 0.003},
|
|
121
|
-
... {"time": "day3", "response_time": 0.8, "error_rate": 0.007}
|
|
122
|
-
... ]
|
|
123
|
-
>>> prediction = analyzer.analyze_trajectory(current_metrics, history)
|
|
124
|
-
>>> if prediction.trajectory_state == "degrading":
|
|
125
|
-
... print(f"ALERT: {prediction.overall_assessment}")
|
|
126
|
-
|
|
127
|
-
"""
|
|
128
|
-
# Auto-detect parameter order (backward compatibility)
|
|
129
|
-
if isinstance(current_metrics, list) and isinstance(historical_metrics, dict):
|
|
130
|
-
# Parameters were passed in reverse order
|
|
131
|
-
current_metrics, historical_metrics = historical_metrics, current_metrics
|
|
132
|
-
elif isinstance(current_metrics, list) and historical_metrics is None:
|
|
133
|
-
# Only historical metrics provided
|
|
134
|
-
historical_metrics = current_metrics
|
|
135
|
-
current_metrics = historical_metrics[-1] if historical_metrics else {}
|
|
136
|
-
|
|
137
|
-
if not historical_metrics:
|
|
138
|
-
return TrajectoryPrediction(
|
|
139
|
-
trajectory_state="optimal",
|
|
140
|
-
estimated_time_to_critical=None,
|
|
141
|
-
trends=[],
|
|
142
|
-
overall_assessment="Insufficient historical data for trajectory analysis",
|
|
143
|
-
confidence=0.3,
|
|
144
|
-
recommendations=["Collect performance metrics over time"],
|
|
145
|
-
)
|
|
146
|
-
|
|
147
|
-
# Analyze trends for each metric
|
|
148
|
-
trends = []
|
|
149
|
-
|
|
150
|
-
for metric_name, current_value in current_metrics.items():
|
|
151
|
-
trend = self._analyze_metric_trend(metric_name, current_value, historical_metrics)
|
|
152
|
-
|
|
153
|
-
if trend:
|
|
154
|
-
trends.append(trend)
|
|
155
|
-
|
|
156
|
-
# Determine overall trajectory state
|
|
157
|
-
trajectory_state = self._determine_trajectory_state(trends)
|
|
158
|
-
|
|
159
|
-
# Estimate time to critical (if degrading)
|
|
160
|
-
time_to_critical = None
|
|
161
|
-
if trajectory_state in ["degrading", "critical"]:
|
|
162
|
-
time_to_critical = self._estimate_time_to_critical(trends, current_metrics)
|
|
163
|
-
|
|
164
|
-
# Generate assessment
|
|
165
|
-
assessment = self._generate_assessment(trajectory_state, trends, time_to_critical)
|
|
166
|
-
|
|
167
|
-
# Generate recommendations
|
|
168
|
-
recommendations = self._generate_recommendations(trajectory_state, trends)
|
|
169
|
-
|
|
170
|
-
# Calculate confidence
|
|
171
|
-
confidence = self._calculate_confidence(historical_metrics, trends)
|
|
172
|
-
|
|
173
|
-
return TrajectoryPrediction(
|
|
174
|
-
trajectory_state=trajectory_state,
|
|
175
|
-
estimated_time_to_critical=time_to_critical,
|
|
176
|
-
trends=trends,
|
|
177
|
-
overall_assessment=assessment,
|
|
178
|
-
confidence=confidence,
|
|
179
|
-
recommendations=recommendations,
|
|
180
|
-
)
|
|
181
|
-
|
|
182
|
-
def _analyze_metric_trend(
|
|
183
|
-
self,
|
|
184
|
-
metric_name: str,
|
|
185
|
-
current_value: float,
|
|
186
|
-
historical_metrics: list[dict[str, Any]],
|
|
187
|
-
) -> PerformanceTrend | None:
|
|
188
|
-
"""Analyze trend for single metric"""
|
|
189
|
-
# Validate current_value is numeric
|
|
190
|
-
try:
|
|
191
|
-
current_value = float(current_value)
|
|
192
|
-
except (ValueError, TypeError):
|
|
193
|
-
# Skip non-numeric fields (timestamps, etc.)
|
|
194
|
-
return None
|
|
195
|
-
|
|
196
|
-
# Extract historical values
|
|
197
|
-
historical_values = []
|
|
198
|
-
for entry in historical_metrics:
|
|
199
|
-
if metric_name in entry and entry[metric_name] is not None:
|
|
200
|
-
try:
|
|
201
|
-
historical_values.append(float(entry[metric_name]))
|
|
202
|
-
except (ValueError, TypeError):
|
|
203
|
-
# Skip non-numeric fields (timestamps, etc.)
|
|
204
|
-
return None
|
|
205
|
-
|
|
206
|
-
if not historical_values:
|
|
207
|
-
return None
|
|
208
|
-
|
|
209
|
-
# Calculate overall trend from first to current
|
|
210
|
-
first_value = historical_values[0]
|
|
211
|
-
previous_value = historical_values[-1]
|
|
212
|
-
|
|
213
|
-
# Overall change from start to current
|
|
214
|
-
total_change = current_value - first_value
|
|
215
|
-
change_percent = (total_change / first_value * 100) if first_value != 0 else 0
|
|
216
|
-
|
|
217
|
-
# Determine direction based on overall trend
|
|
218
|
-
if abs(change_percent) < 5:
|
|
219
|
-
direction = "stable"
|
|
220
|
-
elif total_change > 0:
|
|
221
|
-
# For metrics like response_time, error_rate - increase is bad
|
|
222
|
-
if metric_name in ["response_time", "error_rate", "cpu_usage", "memory_usage"]:
|
|
223
|
-
direction = "degrading"
|
|
224
|
-
else:
|
|
225
|
-
direction = "improving"
|
|
226
|
-
elif metric_name in ["response_time", "error_rate", "cpu_usage", "memory_usage"]:
|
|
227
|
-
direction = "improving"
|
|
228
|
-
else:
|
|
229
|
-
direction = "degrading"
|
|
230
|
-
|
|
231
|
-
# Calculate rate of change (per time period)
|
|
232
|
-
time_periods = len(historical_values)
|
|
233
|
-
rate_of_change = abs(total_change) / time_periods if time_periods > 0 else 0
|
|
234
|
-
|
|
235
|
-
# Determine if concerning
|
|
236
|
-
concerning, reasoning = self._is_trend_concerning(
|
|
237
|
-
metric_name,
|
|
238
|
-
current_value,
|
|
239
|
-
total_change,
|
|
240
|
-
rate_of_change,
|
|
241
|
-
direction,
|
|
242
|
-
)
|
|
243
|
-
|
|
244
|
-
return PerformanceTrend(
|
|
245
|
-
metric_name=metric_name,
|
|
246
|
-
current_value=current_value,
|
|
247
|
-
previous_value=previous_value,
|
|
248
|
-
change=total_change,
|
|
249
|
-
change_percent=change_percent,
|
|
250
|
-
direction=direction,
|
|
251
|
-
rate_of_change=rate_of_change,
|
|
252
|
-
concerning=concerning,
|
|
253
|
-
reasoning=reasoning,
|
|
254
|
-
)
|
|
255
|
-
|
|
256
|
-
def _is_trend_concerning(
|
|
257
|
-
self,
|
|
258
|
-
metric_name: str,
|
|
259
|
-
current_value: float,
|
|
260
|
-
change: float,
|
|
261
|
-
rate_of_change: float,
|
|
262
|
-
direction: str,
|
|
263
|
-
) -> tuple[bool, str]:
|
|
264
|
-
"""Determine if trend is concerning"""
|
|
265
|
-
# Check if currently out of acceptable range
|
|
266
|
-
if metric_name in self.acceptable_ranges:
|
|
267
|
-
min_val, max_val = self.acceptable_ranges[metric_name]
|
|
268
|
-
|
|
269
|
-
if current_value < min_val:
|
|
270
|
-
return True, f"{metric_name} below acceptable range"
|
|
271
|
-
if current_value > max_val:
|
|
272
|
-
return True, f"{metric_name} above acceptable range ({max_val})"
|
|
273
|
-
|
|
274
|
-
# Check rate of change
|
|
275
|
-
if metric_name in self.concerning_rates:
|
|
276
|
-
threshold = self.concerning_rates[metric_name]
|
|
277
|
-
|
|
278
|
-
if direction == "degrading" and rate_of_change > threshold:
|
|
279
|
-
return True, f"{metric_name} degrading rapidly ({change:+.3f} per period)"
|
|
280
|
-
|
|
281
|
-
return False, "Within acceptable trajectory"
|
|
282
|
-
|
|
283
|
-
def _determine_trajectory_state(self, trends: list[PerformanceTrend]) -> str:
|
|
284
|
-
"""Determine overall trajectory state"""
|
|
285
|
-
concerning_trends = [t for t in trends if t.concerning]
|
|
286
|
-
|
|
287
|
-
if not concerning_trends:
|
|
288
|
-
return "optimal"
|
|
289
|
-
|
|
290
|
-
# Count critical metrics
|
|
291
|
-
critical_metrics = ["response_time", "error_rate"]
|
|
292
|
-
critical_concerning = sum(1 for t in concerning_trends if t.metric_name in critical_metrics)
|
|
293
|
-
|
|
294
|
-
if critical_concerning >= 1:
|
|
295
|
-
return "critical"
|
|
296
|
-
|
|
297
|
-
if len(concerning_trends) >= 2:
|
|
298
|
-
return "degrading"
|
|
299
|
-
|
|
300
|
-
return "degrading"
|
|
301
|
-
|
|
302
|
-
def _estimate_time_to_critical(
|
|
303
|
-
self,
|
|
304
|
-
trends: list[PerformanceTrend],
|
|
305
|
-
current_metrics: dict[str, float],
|
|
306
|
-
) -> str | None:
|
|
307
|
-
"""Estimate time until metrics hit critical thresholds.
|
|
308
|
-
|
|
309
|
-
Core Level 4 - predicting the future.
|
|
310
|
-
"""
|
|
311
|
-
for trend in trends:
|
|
312
|
-
if not trend.concerning:
|
|
313
|
-
continue
|
|
314
|
-
|
|
315
|
-
# Response time prediction
|
|
316
|
-
if trend.metric_name == "response_time" and trend.direction == "degrading":
|
|
317
|
-
critical_threshold = 1.0 # 1 second
|
|
318
|
-
current = trend.current_value
|
|
319
|
-
rate = trend.rate_of_change
|
|
320
|
-
|
|
321
|
-
if rate > 0 and current < critical_threshold:
|
|
322
|
-
periods_to_critical = (critical_threshold - current) / rate
|
|
323
|
-
if 0 < periods_to_critical < 30: # Within 30 periods
|
|
324
|
-
return f"~{int(periods_to_critical)} time periods"
|
|
325
|
-
|
|
326
|
-
# Memory usage prediction
|
|
327
|
-
if trend.metric_name == "memory_usage" and trend.direction == "degrading":
|
|
328
|
-
critical_threshold = 0.95 # 95%
|
|
329
|
-
current = trend.current_value
|
|
330
|
-
rate = trend.rate_of_change
|
|
331
|
-
|
|
332
|
-
if rate > 0 and current < critical_threshold:
|
|
333
|
-
periods_to_critical = (critical_threshold - current) / rate
|
|
334
|
-
if 0 < periods_to_critical < 30:
|
|
335
|
-
return f"~{int(periods_to_critical)} time periods"
|
|
336
|
-
|
|
337
|
-
return None
|
|
338
|
-
|
|
339
|
-
def _generate_assessment(
|
|
340
|
-
self,
|
|
341
|
-
trajectory_state: str,
|
|
342
|
-
trends: list[PerformanceTrend],
|
|
343
|
-
time_to_critical: str | None,
|
|
344
|
-
) -> str:
|
|
345
|
-
"""Generate overall assessment"""
|
|
346
|
-
if trajectory_state == "optimal":
|
|
347
|
-
return "Performance metrics stable. System operating within acceptable ranges."
|
|
348
|
-
|
|
349
|
-
concerning = [t for t in trends if t.concerning]
|
|
350
|
-
|
|
351
|
-
if trajectory_state == "critical":
|
|
352
|
-
trends_desc = ", ".join(f"{t.metric_name} {t.direction}" for t in concerning[:3])
|
|
353
|
-
return (
|
|
354
|
-
f"CRITICAL performance trajectory: {trends_desc}. Immediate investigation required."
|
|
355
|
-
)
|
|
356
|
-
|
|
357
|
-
if trajectory_state == "degrading":
|
|
358
|
-
trends_desc = ", ".join(f"{t.metric_name} {t.direction}" for t in concerning[:3])
|
|
359
|
-
|
|
360
|
-
if time_to_critical:
|
|
361
|
-
return (
|
|
362
|
-
f"Performance degrading: {trends_desc}. "
|
|
363
|
-
f"In our experience, this pattern leads to service degradation. "
|
|
364
|
-
f"Estimated time to critical: {time_to_critical}. "
|
|
365
|
-
"Early optimization recommended."
|
|
366
|
-
)
|
|
367
|
-
|
|
368
|
-
return (
|
|
369
|
-
f"Performance degrading: {trends_desc}. "
|
|
370
|
-
"In our experience, this warrants investigation."
|
|
371
|
-
)
|
|
372
|
-
|
|
373
|
-
return "Performance trajectory under assessment."
|
|
374
|
-
|
|
375
|
-
def _generate_recommendations(
|
|
376
|
-
self,
|
|
377
|
-
trajectory_state: str,
|
|
378
|
-
trends: list[PerformanceTrend],
|
|
379
|
-
) -> list[str]:
|
|
380
|
-
"""Generate actionable recommendations"""
|
|
381
|
-
if trajectory_state == "optimal":
|
|
382
|
-
return ["Continue monitoring performance metrics"]
|
|
383
|
-
|
|
384
|
-
recommendations = []
|
|
385
|
-
|
|
386
|
-
if trajectory_state in ["degrading", "critical"]:
|
|
387
|
-
recommendations.append("Investigate performance degradation immediately")
|
|
388
|
-
recommendations.append("Review recent code changes")
|
|
389
|
-
|
|
390
|
-
concerning = [t for t in trends if t.concerning]
|
|
391
|
-
|
|
392
|
-
for trend in concerning:
|
|
393
|
-
if trend.metric_name == "response_time":
|
|
394
|
-
recommendations.append("Profile slow endpoints to identify bottlenecks")
|
|
395
|
-
recommendations.append("Consider adding caching or database optimization")
|
|
396
|
-
elif trend.metric_name == "memory_usage":
|
|
397
|
-
recommendations.append("Check for memory leaks")
|
|
398
|
-
recommendations.append("Review object lifecycle and garbage collection")
|
|
399
|
-
elif trend.metric_name == "error_rate":
|
|
400
|
-
recommendations.append("Review error logs for patterns")
|
|
401
|
-
recommendations.append("Add error monitoring and alerting")
|
|
402
|
-
|
|
403
|
-
if trajectory_state == "critical":
|
|
404
|
-
recommendations.append("Consider scaling resources immediately")
|
|
405
|
-
|
|
406
|
-
return list(dict.fromkeys(recommendations)) # Deduplicate (preserves order)
|
|
407
|
-
|
|
408
|
-
def _calculate_confidence(
|
|
409
|
-
self,
|
|
410
|
-
historical_metrics: list[dict[str, Any]],
|
|
411
|
-
trends: list[PerformanceTrend],
|
|
412
|
-
) -> float:
|
|
413
|
-
"""Calculate confidence in prediction"""
|
|
414
|
-
# More data = higher confidence
|
|
415
|
-
data_points = len(historical_metrics)
|
|
416
|
-
data_confidence = min(data_points / 10, 1.0)
|
|
417
|
-
|
|
418
|
-
# More consistent trends = higher confidence
|
|
419
|
-
if trends:
|
|
420
|
-
concerning_count = sum(1 for t in trends if t.concerning)
|
|
421
|
-
trend_confidence = concerning_count / len(trends) if trends else 0.5
|
|
422
|
-
else:
|
|
423
|
-
trend_confidence = 0.5
|
|
424
|
-
|
|
425
|
-
return (data_confidence + trend_confidence) / 2
|
|
426
|
-
|
|
427
|
-
|
|
428
|
-
# Alias for backward compatibility
|
|
429
|
-
TrajectoryAnalyzer = PerformanceTrajectoryAnalyzer
|