fraiseql-confiture 0.3.4__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- confiture/__init__.py +48 -0
- confiture/_core.cp311-win_amd64.pyd +0 -0
- confiture/cli/__init__.py +0 -0
- confiture/cli/dry_run.py +116 -0
- confiture/cli/lint_formatter.py +193 -0
- confiture/cli/main.py +1656 -0
- confiture/config/__init__.py +0 -0
- confiture/config/environment.py +263 -0
- confiture/core/__init__.py +51 -0
- confiture/core/anonymization/__init__.py +0 -0
- confiture/core/anonymization/audit.py +485 -0
- confiture/core/anonymization/benchmarking.py +372 -0
- confiture/core/anonymization/breach_notification.py +652 -0
- confiture/core/anonymization/compliance.py +617 -0
- confiture/core/anonymization/composer.py +298 -0
- confiture/core/anonymization/data_subject_rights.py +669 -0
- confiture/core/anonymization/factory.py +319 -0
- confiture/core/anonymization/governance.py +737 -0
- confiture/core/anonymization/performance.py +1092 -0
- confiture/core/anonymization/profile.py +284 -0
- confiture/core/anonymization/registry.py +195 -0
- confiture/core/anonymization/security/kms_manager.py +547 -0
- confiture/core/anonymization/security/lineage.py +888 -0
- confiture/core/anonymization/security/token_store.py +686 -0
- confiture/core/anonymization/strategies/__init__.py +41 -0
- confiture/core/anonymization/strategies/address.py +359 -0
- confiture/core/anonymization/strategies/credit_card.py +374 -0
- confiture/core/anonymization/strategies/custom.py +161 -0
- confiture/core/anonymization/strategies/date.py +218 -0
- confiture/core/anonymization/strategies/differential_privacy.py +398 -0
- confiture/core/anonymization/strategies/email.py +141 -0
- confiture/core/anonymization/strategies/format_preserving_encryption.py +310 -0
- confiture/core/anonymization/strategies/hash.py +150 -0
- confiture/core/anonymization/strategies/ip_address.py +235 -0
- confiture/core/anonymization/strategies/masking_retention.py +252 -0
- confiture/core/anonymization/strategies/name.py +298 -0
- confiture/core/anonymization/strategies/phone.py +119 -0
- confiture/core/anonymization/strategies/preserve.py +85 -0
- confiture/core/anonymization/strategies/redact.py +101 -0
- confiture/core/anonymization/strategies/salted_hashing.py +322 -0
- confiture/core/anonymization/strategies/text_redaction.py +183 -0
- confiture/core/anonymization/strategies/tokenization.py +334 -0
- confiture/core/anonymization/strategy.py +241 -0
- confiture/core/anonymization/syncer_audit.py +357 -0
- confiture/core/blue_green.py +683 -0
- confiture/core/builder.py +500 -0
- confiture/core/checksum.py +358 -0
- confiture/core/connection.py +132 -0
- confiture/core/differ.py +522 -0
- confiture/core/drift.py +564 -0
- confiture/core/dry_run.py +182 -0
- confiture/core/health.py +313 -0
- confiture/core/hooks/__init__.py +87 -0
- confiture/core/hooks/base.py +232 -0
- confiture/core/hooks/context.py +146 -0
- confiture/core/hooks/execution_strategies.py +57 -0
- confiture/core/hooks/observability.py +220 -0
- confiture/core/hooks/phases.py +53 -0
- confiture/core/hooks/registry.py +295 -0
- confiture/core/large_tables.py +775 -0
- confiture/core/linting/__init__.py +70 -0
- confiture/core/linting/composer.py +192 -0
- confiture/core/linting/libraries/__init__.py +17 -0
- confiture/core/linting/libraries/gdpr.py +168 -0
- confiture/core/linting/libraries/general.py +184 -0
- confiture/core/linting/libraries/hipaa.py +144 -0
- confiture/core/linting/libraries/pci_dss.py +104 -0
- confiture/core/linting/libraries/sox.py +120 -0
- confiture/core/linting/schema_linter.py +491 -0
- confiture/core/linting/versioning.py +151 -0
- confiture/core/locking.py +389 -0
- confiture/core/migration_generator.py +298 -0
- confiture/core/migrator.py +793 -0
- confiture/core/observability/__init__.py +44 -0
- confiture/core/observability/audit.py +323 -0
- confiture/core/observability/logging.py +187 -0
- confiture/core/observability/metrics.py +174 -0
- confiture/core/observability/tracing.py +192 -0
- confiture/core/pg_version.py +418 -0
- confiture/core/pool.py +406 -0
- confiture/core/risk/__init__.py +39 -0
- confiture/core/risk/predictor.py +188 -0
- confiture/core/risk/scoring.py +248 -0
- confiture/core/rollback_generator.py +388 -0
- confiture/core/schema_analyzer.py +769 -0
- confiture/core/schema_to_schema.py +590 -0
- confiture/core/security/__init__.py +32 -0
- confiture/core/security/logging.py +201 -0
- confiture/core/security/validation.py +416 -0
- confiture/core/signals.py +371 -0
- confiture/core/syncer.py +540 -0
- confiture/exceptions.py +192 -0
- confiture/integrations/__init__.py +0 -0
- confiture/models/__init__.py +0 -0
- confiture/models/lint.py +193 -0
- confiture/models/migration.py +180 -0
- confiture/models/schema.py +203 -0
- confiture/scenarios/__init__.py +36 -0
- confiture/scenarios/compliance.py +586 -0
- confiture/scenarios/ecommerce.py +199 -0
- confiture/scenarios/financial.py +253 -0
- confiture/scenarios/healthcare.py +315 -0
- confiture/scenarios/multi_tenant.py +340 -0
- confiture/scenarios/saas.py +295 -0
- confiture/testing/FRAMEWORK_API.md +722 -0
- confiture/testing/__init__.py +38 -0
- confiture/testing/fixtures/__init__.py +11 -0
- confiture/testing/fixtures/data_validator.py +229 -0
- confiture/testing/fixtures/migration_runner.py +167 -0
- confiture/testing/fixtures/schema_snapshotter.py +352 -0
- confiture/testing/frameworks/__init__.py +10 -0
- confiture/testing/frameworks/mutation.py +587 -0
- confiture/testing/frameworks/performance.py +479 -0
- confiture/testing/utils/__init__.py +0 -0
- fraiseql_confiture-0.3.4.dist-info/METADATA +438 -0
- fraiseql_confiture-0.3.4.dist-info/RECORD +119 -0
- fraiseql_confiture-0.3.4.dist-info/WHEEL +4 -0
- fraiseql_confiture-0.3.4.dist-info/entry_points.txt +2 -0
- fraiseql_confiture-0.3.4.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,479 @@
|
|
|
1
|
+
"""Performance profiling system for database migrations.
|
|
2
|
+
|
|
3
|
+
Provides detailed performance metrics and regression detection for migrations.
|
|
4
|
+
|
|
5
|
+
Architecture:
|
|
6
|
+
- MigrationPerformanceProfiler: Profiles migration execution with detailed metrics
|
|
7
|
+
- PerformanceProfile: Detailed metrics for a single migration
|
|
8
|
+
- PerformanceBaseline: Reference metrics for regression detection
|
|
9
|
+
- PerformanceOptimizationReport: Bottleneck identification and recommendations
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
import json
|
|
13
|
+
import time
|
|
14
|
+
from dataclasses import asdict, dataclass, field
|
|
15
|
+
from pathlib import Path
|
|
16
|
+
from typing import Any
|
|
17
|
+
|
|
18
|
+
import psycopg
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@dataclass
|
|
22
|
+
class OperationMetrics:
|
|
23
|
+
"""Metrics for a single operation."""
|
|
24
|
+
|
|
25
|
+
name: str # Operation name (e.g., "ALTER TABLE")
|
|
26
|
+
start_time: float # Timestamp when operation started
|
|
27
|
+
end_time: float # Timestamp when operation ended
|
|
28
|
+
duration_seconds: float # Total duration in seconds
|
|
29
|
+
percent_of_total: float # Percentage of migration time
|
|
30
|
+
memory_before_mb: float | None # Memory before operation (if tracked)
|
|
31
|
+
memory_after_mb: float | None # Memory after operation (if tracked)
|
|
32
|
+
io_operations: int | None # Number of I/O operations (if tracked)
|
|
33
|
+
|
|
34
|
+
@property
|
|
35
|
+
def memory_delta_mb(self) -> float | None:
|
|
36
|
+
"""Calculate memory change during operation."""
|
|
37
|
+
if self.memory_before_mb is not None and self.memory_after_mb is not None:
|
|
38
|
+
return self.memory_after_mb - self.memory_before_mb
|
|
39
|
+
return None
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
@dataclass
|
|
43
|
+
class PerformanceProfile:
|
|
44
|
+
"""Performance profile for a migration execution."""
|
|
45
|
+
|
|
46
|
+
migration_name: str
|
|
47
|
+
start_timestamp: float
|
|
48
|
+
end_timestamp: float
|
|
49
|
+
total_duration_seconds: float
|
|
50
|
+
|
|
51
|
+
operations: dict[str, OperationMetrics] = field(default_factory=dict)
|
|
52
|
+
memory_peak_mb: float | None = None
|
|
53
|
+
cpu_avg_percent: float | None = None
|
|
54
|
+
total_io_operations: int | None = None
|
|
55
|
+
|
|
56
|
+
def get_bottlenecks(self, threshold: float = 0.05) -> list[OperationMetrics]:
|
|
57
|
+
"""Get operations consuming more than threshold of total time.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
threshold: Percentage threshold (e.g., 0.05 for 5%)
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
List of bottleneck operations sorted by duration descending
|
|
64
|
+
"""
|
|
65
|
+
bottlenecks = [
|
|
66
|
+
op for op in self.operations.values() if op.percent_of_total >= (threshold * 100)
|
|
67
|
+
]
|
|
68
|
+
return sorted(bottlenecks, key=lambda x: x.duration_seconds, reverse=True)
|
|
69
|
+
|
|
70
|
+
def to_dict(self) -> dict[str, Any]:
|
|
71
|
+
"""Convert profile to dictionary for JSON serialization."""
|
|
72
|
+
return {
|
|
73
|
+
"migration_name": self.migration_name,
|
|
74
|
+
"total_duration_seconds": self.total_duration_seconds,
|
|
75
|
+
"memory_peak_mb": self.memory_peak_mb,
|
|
76
|
+
"cpu_avg_percent": self.cpu_avg_percent,
|
|
77
|
+
"total_io_operations": self.total_io_operations,
|
|
78
|
+
"operations": [asdict(op) for op in self.operations.values()],
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
@dataclass
|
|
83
|
+
class RegressionReport:
|
|
84
|
+
"""Report of performance regressions detected."""
|
|
85
|
+
|
|
86
|
+
migration_name: str
|
|
87
|
+
regressions: list[dict[str, Any]] = field(default_factory=list)
|
|
88
|
+
|
|
89
|
+
@property
|
|
90
|
+
def has_regressions(self) -> bool:
|
|
91
|
+
"""Whether any regressions were detected."""
|
|
92
|
+
return len(self.regressions) > 0
|
|
93
|
+
|
|
94
|
+
@property
|
|
95
|
+
def worst_regression_pct(self) -> float:
|
|
96
|
+
"""Worst regression percentage if any."""
|
|
97
|
+
if not self.regressions:
|
|
98
|
+
return 0.0
|
|
99
|
+
return max(r["regression_pct"] for r in self.regressions)
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
@dataclass
|
|
103
|
+
class PerformanceOptimizationRecommendation:
|
|
104
|
+
"""A recommendation for performance optimization."""
|
|
105
|
+
|
|
106
|
+
operation: str
|
|
107
|
+
current_duration_seconds: float
|
|
108
|
+
percent_of_total: float
|
|
109
|
+
severity: str # "CRITICAL", "IMPORTANT", "MINOR"
|
|
110
|
+
recommendation: str
|
|
111
|
+
potential_speedup: str # e.g., "2-3x"
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
@dataclass
|
|
115
|
+
class PerformanceOptimizationReport:
|
|
116
|
+
"""Report with optimization recommendations."""
|
|
117
|
+
|
|
118
|
+
migration_name: str
|
|
119
|
+
bottlenecks: list[OperationMetrics]
|
|
120
|
+
recommendations: list[PerformanceOptimizationRecommendation] = field(default_factory=list)
|
|
121
|
+
|
|
122
|
+
def to_dict(self) -> dict[str, Any]:
|
|
123
|
+
"""Convert to dictionary."""
|
|
124
|
+
return {
|
|
125
|
+
"migration_name": self.migration_name,
|
|
126
|
+
"bottleneck_count": len(self.bottlenecks),
|
|
127
|
+
"recommendations": [asdict(r) for r in self.recommendations],
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
class MigrationPerformanceProfiler:
|
|
132
|
+
"""Profile database migration performance."""
|
|
133
|
+
|
|
134
|
+
def __init__(self, db_connection: psycopg.Connection):
|
|
135
|
+
self.connection = db_connection
|
|
136
|
+
self.current_profile: PerformanceProfile | None = None
|
|
137
|
+
self.section_stack: list[tuple[str, float]] = []
|
|
138
|
+
|
|
139
|
+
def profile_migration(self, migration_name: str, execute_fn) -> PerformanceProfile:
|
|
140
|
+
"""Profile migration execution.
|
|
141
|
+
|
|
142
|
+
Args:
|
|
143
|
+
migration_name: Name of the migration
|
|
144
|
+
execute_fn: Function to execute (receives profiler as argument)
|
|
145
|
+
|
|
146
|
+
Returns:
|
|
147
|
+
PerformanceProfile with detailed metrics
|
|
148
|
+
"""
|
|
149
|
+
start_time = time.time()
|
|
150
|
+
|
|
151
|
+
self.current_profile = PerformanceProfile(
|
|
152
|
+
migration_name=migration_name,
|
|
153
|
+
start_timestamp=start_time,
|
|
154
|
+
end_timestamp=0.0,
|
|
155
|
+
total_duration_seconds=0.0,
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
try:
|
|
159
|
+
# Execute migration with profiling
|
|
160
|
+
execute_fn(self)
|
|
161
|
+
finally:
|
|
162
|
+
end_time = time.time()
|
|
163
|
+
self.current_profile.end_timestamp = end_time
|
|
164
|
+
self.current_profile.total_duration_seconds = end_time - start_time
|
|
165
|
+
|
|
166
|
+
# Finalize operation metrics
|
|
167
|
+
self._finalize_operations()
|
|
168
|
+
|
|
169
|
+
return self.current_profile
|
|
170
|
+
|
|
171
|
+
def track_section(self, section_name: str):
|
|
172
|
+
"""Context manager for tracking operation duration.
|
|
173
|
+
|
|
174
|
+
Usage:
|
|
175
|
+
with profiler.track_section("operation_name"):
|
|
176
|
+
# Do work
|
|
177
|
+
pass
|
|
178
|
+
"""
|
|
179
|
+
return _SectionTracker(self, section_name)
|
|
180
|
+
|
|
181
|
+
def record_operation(
|
|
182
|
+
self,
|
|
183
|
+
name: str,
|
|
184
|
+
duration_seconds: float,
|
|
185
|
+
memory_before_mb: float | None = None,
|
|
186
|
+
memory_after_mb: float | None = None,
|
|
187
|
+
io_operations: int | None = None,
|
|
188
|
+
):
|
|
189
|
+
"""Record an operation's metrics.
|
|
190
|
+
|
|
191
|
+
Args:
|
|
192
|
+
name: Operation name
|
|
193
|
+
duration_seconds: Operation duration
|
|
194
|
+
memory_before_mb: Memory before (optional)
|
|
195
|
+
memory_after_mb: Memory after (optional)
|
|
196
|
+
io_operations: Number of I/O ops (optional)
|
|
197
|
+
"""
|
|
198
|
+
if self.current_profile is None:
|
|
199
|
+
return
|
|
200
|
+
|
|
201
|
+
metrics = OperationMetrics(
|
|
202
|
+
name=name,
|
|
203
|
+
start_time=time.time(),
|
|
204
|
+
end_time=time.time() + duration_seconds,
|
|
205
|
+
duration_seconds=duration_seconds,
|
|
206
|
+
percent_of_total=0.0, # Will be calculated later
|
|
207
|
+
memory_before_mb=memory_before_mb,
|
|
208
|
+
memory_after_mb=memory_after_mb,
|
|
209
|
+
io_operations=io_operations,
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
self.current_profile.operations[name] = metrics
|
|
213
|
+
|
|
214
|
+
def _finalize_operations(self):
|
|
215
|
+
"""Calculate percentages and finalize operation metrics."""
|
|
216
|
+
if self.current_profile is None:
|
|
217
|
+
return
|
|
218
|
+
|
|
219
|
+
total = self.current_profile.total_duration_seconds
|
|
220
|
+
if total <= 0:
|
|
221
|
+
return
|
|
222
|
+
|
|
223
|
+
for operation in self.current_profile.operations.values():
|
|
224
|
+
operation.percent_of_total = (operation.duration_seconds / total) * 100
|
|
225
|
+
|
|
226
|
+
def get_profile(self) -> PerformanceProfile | None:
|
|
227
|
+
"""Get current profile."""
|
|
228
|
+
return self.current_profile
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
class _SectionTracker:
|
|
232
|
+
"""Context manager for tracking operation sections."""
|
|
233
|
+
|
|
234
|
+
def __init__(self, profiler: MigrationPerformanceProfiler, section_name: str):
|
|
235
|
+
self.profiler = profiler
|
|
236
|
+
self.section_name = section_name
|
|
237
|
+
self.start_time = 0.0
|
|
238
|
+
self.memory_before_mb: float | None = None
|
|
239
|
+
self.memory_after_mb: float | None = None
|
|
240
|
+
|
|
241
|
+
def __enter__(self):
|
|
242
|
+
self.start_time = time.time()
|
|
243
|
+
self.memory_before_mb = self._get_memory_usage_mb()
|
|
244
|
+
return self
|
|
245
|
+
|
|
246
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
247
|
+
end_time = time.time()
|
|
248
|
+
duration = end_time - self.start_time
|
|
249
|
+
self.memory_after_mb = self._get_memory_usage_mb()
|
|
250
|
+
|
|
251
|
+
self.profiler.record_operation(
|
|
252
|
+
name=self.section_name,
|
|
253
|
+
duration_seconds=duration,
|
|
254
|
+
memory_before_mb=self.memory_before_mb,
|
|
255
|
+
memory_after_mb=self.memory_after_mb,
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
def _get_memory_usage_mb(self) -> float | None:
|
|
259
|
+
"""Get current memory usage (best effort)."""
|
|
260
|
+
try:
|
|
261
|
+
import psutil # type: ignore[import-untyped]
|
|
262
|
+
|
|
263
|
+
process = psutil.Process()
|
|
264
|
+
return process.memory_info().rss / 1024 / 1024
|
|
265
|
+
except ImportError:
|
|
266
|
+
return None
|
|
267
|
+
|
|
268
|
+
|
|
269
|
+
class PerformanceBaseline:
|
|
270
|
+
"""Baseline performance metrics for regression detection."""
|
|
271
|
+
|
|
272
|
+
def __init__(self, baselines_file: Path):
|
|
273
|
+
self.baselines_file = baselines_file
|
|
274
|
+
self.baselines: dict[str, dict[str, Any]] = {}
|
|
275
|
+
self._load_baselines()
|
|
276
|
+
|
|
277
|
+
def _load_baselines(self):
|
|
278
|
+
"""Load baseline metrics from file."""
|
|
279
|
+
if self.baselines_file.exists():
|
|
280
|
+
with open(self.baselines_file) as f:
|
|
281
|
+
data = json.load(f)
|
|
282
|
+
self.baselines = data.get("baselines", {})
|
|
283
|
+
|
|
284
|
+
def save_baselines(self):
|
|
285
|
+
"""Save baseline metrics to file."""
|
|
286
|
+
data = {"baselines": self.baselines}
|
|
287
|
+
self.baselines_file.parent.mkdir(parents=True, exist_ok=True)
|
|
288
|
+
with open(self.baselines_file, "w") as f:
|
|
289
|
+
json.dump(data, f, indent=2)
|
|
290
|
+
|
|
291
|
+
def set_baseline(self, migration_name: str, profile: PerformanceProfile):
|
|
292
|
+
"""Set baseline for a migration."""
|
|
293
|
+
self.baselines[migration_name] = {
|
|
294
|
+
"total_duration_seconds": profile.total_duration_seconds,
|
|
295
|
+
"memory_peak_mb": profile.memory_peak_mb or 0.0,
|
|
296
|
+
"operations": {name: op.duration_seconds for name, op in profile.operations.items()},
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
def detect_regression(
|
|
300
|
+
self,
|
|
301
|
+
current_profile: PerformanceProfile,
|
|
302
|
+
threshold_pct: float = 20.0,
|
|
303
|
+
) -> RegressionReport:
|
|
304
|
+
"""Detect performance regressions.
|
|
305
|
+
|
|
306
|
+
Args:
|
|
307
|
+
current_profile: Current performance profile
|
|
308
|
+
threshold_pct: Regression threshold percentage (default 20%)
|
|
309
|
+
|
|
310
|
+
Returns:
|
|
311
|
+
RegressionReport with detected regressions
|
|
312
|
+
"""
|
|
313
|
+
report = RegressionReport(migration_name=current_profile.migration_name)
|
|
314
|
+
|
|
315
|
+
baseline = self.baselines.get(current_profile.migration_name)
|
|
316
|
+
if not baseline:
|
|
317
|
+
# No baseline to compare against
|
|
318
|
+
return report
|
|
319
|
+
|
|
320
|
+
# Check total duration regression
|
|
321
|
+
baseline_total = baseline["total_duration_seconds"]
|
|
322
|
+
current_total = current_profile.total_duration_seconds
|
|
323
|
+
|
|
324
|
+
if current_total > baseline_total * (1.0 + threshold_pct / 100.0):
|
|
325
|
+
regression_pct = ((current_total / baseline_total) - 1.0) * 100
|
|
326
|
+
report.regressions.append(
|
|
327
|
+
{
|
|
328
|
+
"type": "total_duration",
|
|
329
|
+
"operation": "Overall migration",
|
|
330
|
+
"baseline": baseline_total,
|
|
331
|
+
"current": current_total,
|
|
332
|
+
"regression_pct": regression_pct,
|
|
333
|
+
}
|
|
334
|
+
)
|
|
335
|
+
|
|
336
|
+
# Check individual operation regressions
|
|
337
|
+
baseline_ops = baseline.get("operations", {})
|
|
338
|
+
for op_name, current_duration in current_profile.operations.items():
|
|
339
|
+
baseline_duration = baseline_ops.get(op_name)
|
|
340
|
+
if baseline_duration is None:
|
|
341
|
+
continue
|
|
342
|
+
|
|
343
|
+
if current_duration.duration_seconds > baseline_duration * (
|
|
344
|
+
1.0 + threshold_pct / 100.0
|
|
345
|
+
):
|
|
346
|
+
regression_pct = (
|
|
347
|
+
(current_duration.duration_seconds / baseline_duration) - 1.0
|
|
348
|
+
) * 100
|
|
349
|
+
report.regressions.append(
|
|
350
|
+
{
|
|
351
|
+
"type": "operation_duration",
|
|
352
|
+
"operation": op_name,
|
|
353
|
+
"baseline": baseline_duration,
|
|
354
|
+
"current": current_duration.duration_seconds,
|
|
355
|
+
"regression_pct": regression_pct,
|
|
356
|
+
}
|
|
357
|
+
)
|
|
358
|
+
|
|
359
|
+
return report
|
|
360
|
+
|
|
361
|
+
def generate_optimization_report(
|
|
362
|
+
self,
|
|
363
|
+
profile: PerformanceProfile,
|
|
364
|
+
) -> PerformanceOptimizationReport:
|
|
365
|
+
"""Generate optimization recommendations based on profile.
|
|
366
|
+
|
|
367
|
+
Args:
|
|
368
|
+
profile: Performance profile to analyze
|
|
369
|
+
|
|
370
|
+
Returns:
|
|
371
|
+
PerformanceOptimizationReport with recommendations
|
|
372
|
+
"""
|
|
373
|
+
bottlenecks = profile.get_bottlenecks(threshold=0.05)
|
|
374
|
+
report = PerformanceOptimizationReport(
|
|
375
|
+
migration_name=profile.migration_name,
|
|
376
|
+
bottlenecks=bottlenecks,
|
|
377
|
+
)
|
|
378
|
+
|
|
379
|
+
# Generate recommendations for each bottleneck
|
|
380
|
+
for bottleneck in bottlenecks:
|
|
381
|
+
recommendation = self._generate_recommendation(bottleneck, profile)
|
|
382
|
+
if recommendation:
|
|
383
|
+
report.recommendations.append(recommendation)
|
|
384
|
+
|
|
385
|
+
return report
|
|
386
|
+
|
|
387
|
+
def _generate_recommendation(
|
|
388
|
+
self,
|
|
389
|
+
bottleneck: OperationMetrics,
|
|
390
|
+
_profile: PerformanceProfile,
|
|
391
|
+
) -> PerformanceOptimizationRecommendation | None:
|
|
392
|
+
"""Generate optimization recommendation for a bottleneck."""
|
|
393
|
+
operation_type = self._extract_operation_type(bottleneck.name)
|
|
394
|
+
|
|
395
|
+
if operation_type == "UPDATE" and bottleneck.duration_seconds > 0.01:
|
|
396
|
+
return PerformanceOptimizationRecommendation(
|
|
397
|
+
operation=bottleneck.name,
|
|
398
|
+
current_duration_seconds=bottleneck.duration_seconds,
|
|
399
|
+
percent_of_total=bottleneck.percent_of_total,
|
|
400
|
+
severity="CRITICAL" if bottleneck.percent_of_total > 50 else "IMPORTANT",
|
|
401
|
+
recommendation=(
|
|
402
|
+
"UPDATE operation is slow. Consider:\n"
|
|
403
|
+
" - Use bulk update with WHERE clause\n"
|
|
404
|
+
" - Add index on filter columns\n"
|
|
405
|
+
" - Batch processing with LIMIT\n"
|
|
406
|
+
" - Analyze query plan with EXPLAIN"
|
|
407
|
+
),
|
|
408
|
+
potential_speedup="2-5x",
|
|
409
|
+
)
|
|
410
|
+
|
|
411
|
+
elif operation_type == "INSERT" and bottleneck.duration_seconds > 0.01:
|
|
412
|
+
return PerformanceOptimizationRecommendation(
|
|
413
|
+
operation=bottleneck.name,
|
|
414
|
+
current_duration_seconds=bottleneck.duration_seconds,
|
|
415
|
+
percent_of_total=bottleneck.percent_of_total,
|
|
416
|
+
severity="IMPORTANT",
|
|
417
|
+
recommendation=(
|
|
418
|
+
"INSERT operation is slow. Consider:\n"
|
|
419
|
+
" - Use COPY command for bulk insert\n"
|
|
420
|
+
" - Disable triggers during insert\n"
|
|
421
|
+
" - Increase work_mem for sort operations\n"
|
|
422
|
+
" - Batch insert in smaller chunks"
|
|
423
|
+
),
|
|
424
|
+
potential_speedup="3-10x",
|
|
425
|
+
)
|
|
426
|
+
|
|
427
|
+
elif operation_type == "INDEX" and bottleneck.duration_seconds > 0.01:
|
|
428
|
+
return PerformanceOptimizationRecommendation(
|
|
429
|
+
operation=bottleneck.name,
|
|
430
|
+
current_duration_seconds=bottleneck.duration_seconds,
|
|
431
|
+
percent_of_total=bottleneck.percent_of_total,
|
|
432
|
+
severity="IMPORTANT",
|
|
433
|
+
recommendation=(
|
|
434
|
+
"Index creation is slow. Consider:\n"
|
|
435
|
+
" - Create index CONCURRENTLY\n"
|
|
436
|
+
" - Use FILLFACTOR for indexes on volatile tables\n"
|
|
437
|
+
" - Create in parallel on replicas first\n"
|
|
438
|
+
" - Consider partial index if possible"
|
|
439
|
+
),
|
|
440
|
+
potential_speedup="1.5-3x",
|
|
441
|
+
)
|
|
442
|
+
|
|
443
|
+
return None
|
|
444
|
+
|
|
445
|
+
def _extract_operation_type(self, operation_name: str) -> str:
|
|
446
|
+
"""Extract operation type from operation name."""
|
|
447
|
+
name_upper = operation_name.upper()
|
|
448
|
+
|
|
449
|
+
for op_type in ["UPDATE", "INSERT", "DELETE", "ALTER", "CREATE", "INDEX"]:
|
|
450
|
+
if op_type in name_upper:
|
|
451
|
+
return op_type
|
|
452
|
+
|
|
453
|
+
return "UNKNOWN"
|
|
454
|
+
|
|
455
|
+
def export_baseline(self, path: Path):
|
|
456
|
+
"""Export baselines to file."""
|
|
457
|
+
data = {"baselines": self.baselines}
|
|
458
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
459
|
+
with open(path, "w") as f:
|
|
460
|
+
json.dump(data, f, indent=2)
|
|
461
|
+
|
|
462
|
+
def export_comparison(self, profile: PerformanceProfile, path: Path):
|
|
463
|
+
"""Export comparison with baseline."""
|
|
464
|
+
regression = self.detect_regression(profile)
|
|
465
|
+
optimization = self.generate_optimization_report(profile)
|
|
466
|
+
|
|
467
|
+
comparison = {
|
|
468
|
+
"migration": profile.migration_name,
|
|
469
|
+
"profile": profile.to_dict(),
|
|
470
|
+
"regression": {
|
|
471
|
+
"has_regressions": regression.has_regressions,
|
|
472
|
+
"regressions": regression.regressions,
|
|
473
|
+
},
|
|
474
|
+
"optimization": optimization.to_dict(),
|
|
475
|
+
}
|
|
476
|
+
|
|
477
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
478
|
+
with open(path, "w") as f:
|
|
479
|
+
json.dump(comparison, f, indent=2)
|
|
File without changes
|