fraiseql-confiture 0.3.7__cp311-cp311-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- confiture/__init__.py +48 -0
- confiture/_core.cpython-311-darwin.so +0 -0
- confiture/cli/__init__.py +0 -0
- confiture/cli/dry_run.py +116 -0
- confiture/cli/lint_formatter.py +193 -0
- confiture/cli/main.py +1893 -0
- confiture/config/__init__.py +0 -0
- confiture/config/environment.py +263 -0
- confiture/core/__init__.py +51 -0
- confiture/core/anonymization/__init__.py +0 -0
- confiture/core/anonymization/audit.py +485 -0
- confiture/core/anonymization/benchmarking.py +372 -0
- confiture/core/anonymization/breach_notification.py +652 -0
- confiture/core/anonymization/compliance.py +617 -0
- confiture/core/anonymization/composer.py +298 -0
- confiture/core/anonymization/data_subject_rights.py +669 -0
- confiture/core/anonymization/factory.py +319 -0
- confiture/core/anonymization/governance.py +737 -0
- confiture/core/anonymization/performance.py +1092 -0
- confiture/core/anonymization/profile.py +284 -0
- confiture/core/anonymization/registry.py +195 -0
- confiture/core/anonymization/security/kms_manager.py +547 -0
- confiture/core/anonymization/security/lineage.py +888 -0
- confiture/core/anonymization/security/token_store.py +686 -0
- confiture/core/anonymization/strategies/__init__.py +41 -0
- confiture/core/anonymization/strategies/address.py +359 -0
- confiture/core/anonymization/strategies/credit_card.py +374 -0
- confiture/core/anonymization/strategies/custom.py +161 -0
- confiture/core/anonymization/strategies/date.py +218 -0
- confiture/core/anonymization/strategies/differential_privacy.py +398 -0
- confiture/core/anonymization/strategies/email.py +141 -0
- confiture/core/anonymization/strategies/format_preserving_encryption.py +310 -0
- confiture/core/anonymization/strategies/hash.py +150 -0
- confiture/core/anonymization/strategies/ip_address.py +235 -0
- confiture/core/anonymization/strategies/masking_retention.py +252 -0
- confiture/core/anonymization/strategies/name.py +298 -0
- confiture/core/anonymization/strategies/phone.py +119 -0
- confiture/core/anonymization/strategies/preserve.py +85 -0
- confiture/core/anonymization/strategies/redact.py +101 -0
- confiture/core/anonymization/strategies/salted_hashing.py +322 -0
- confiture/core/anonymization/strategies/text_redaction.py +183 -0
- confiture/core/anonymization/strategies/tokenization.py +334 -0
- confiture/core/anonymization/strategy.py +241 -0
- confiture/core/anonymization/syncer_audit.py +357 -0
- confiture/core/blue_green.py +683 -0
- confiture/core/builder.py +500 -0
- confiture/core/checksum.py +358 -0
- confiture/core/connection.py +184 -0
- confiture/core/differ.py +522 -0
- confiture/core/drift.py +564 -0
- confiture/core/dry_run.py +182 -0
- confiture/core/health.py +313 -0
- confiture/core/hooks/__init__.py +87 -0
- confiture/core/hooks/base.py +232 -0
- confiture/core/hooks/context.py +146 -0
- confiture/core/hooks/execution_strategies.py +57 -0
- confiture/core/hooks/observability.py +220 -0
- confiture/core/hooks/phases.py +53 -0
- confiture/core/hooks/registry.py +295 -0
- confiture/core/large_tables.py +775 -0
- confiture/core/linting/__init__.py +70 -0
- confiture/core/linting/composer.py +192 -0
- confiture/core/linting/libraries/__init__.py +17 -0
- confiture/core/linting/libraries/gdpr.py +168 -0
- confiture/core/linting/libraries/general.py +184 -0
- confiture/core/linting/libraries/hipaa.py +144 -0
- confiture/core/linting/libraries/pci_dss.py +104 -0
- confiture/core/linting/libraries/sox.py +120 -0
- confiture/core/linting/schema_linter.py +491 -0
- confiture/core/linting/versioning.py +151 -0
- confiture/core/locking.py +389 -0
- confiture/core/migration_generator.py +298 -0
- confiture/core/migrator.py +882 -0
- confiture/core/observability/__init__.py +44 -0
- confiture/core/observability/audit.py +323 -0
- confiture/core/observability/logging.py +187 -0
- confiture/core/observability/metrics.py +174 -0
- confiture/core/observability/tracing.py +192 -0
- confiture/core/pg_version.py +418 -0
- confiture/core/pool.py +406 -0
- confiture/core/risk/__init__.py +39 -0
- confiture/core/risk/predictor.py +188 -0
- confiture/core/risk/scoring.py +248 -0
- confiture/core/rollback_generator.py +388 -0
- confiture/core/schema_analyzer.py +769 -0
- confiture/core/schema_to_schema.py +590 -0
- confiture/core/security/__init__.py +32 -0
- confiture/core/security/logging.py +201 -0
- confiture/core/security/validation.py +416 -0
- confiture/core/signals.py +371 -0
- confiture/core/syncer.py +540 -0
- confiture/exceptions.py +192 -0
- confiture/integrations/__init__.py +0 -0
- confiture/models/__init__.py +24 -0
- confiture/models/lint.py +193 -0
- confiture/models/migration.py +265 -0
- confiture/models/schema.py +203 -0
- confiture/models/sql_file_migration.py +225 -0
- confiture/scenarios/__init__.py +36 -0
- confiture/scenarios/compliance.py +586 -0
- confiture/scenarios/ecommerce.py +199 -0
- confiture/scenarios/financial.py +253 -0
- confiture/scenarios/healthcare.py +315 -0
- confiture/scenarios/multi_tenant.py +340 -0
- confiture/scenarios/saas.py +295 -0
- confiture/testing/FRAMEWORK_API.md +722 -0
- confiture/testing/__init__.py +100 -0
- confiture/testing/fixtures/__init__.py +11 -0
- confiture/testing/fixtures/data_validator.py +229 -0
- confiture/testing/fixtures/migration_runner.py +167 -0
- confiture/testing/fixtures/schema_snapshotter.py +352 -0
- confiture/testing/frameworks/__init__.py +10 -0
- confiture/testing/frameworks/mutation.py +587 -0
- confiture/testing/frameworks/performance.py +479 -0
- confiture/testing/loader.py +225 -0
- confiture/testing/pytest/__init__.py +38 -0
- confiture/testing/pytest_plugin.py +190 -0
- confiture/testing/sandbox.py +304 -0
- confiture/testing/utils/__init__.py +0 -0
- fraiseql_confiture-0.3.7.dist-info/METADATA +438 -0
- fraiseql_confiture-0.3.7.dist-info/RECORD +124 -0
- fraiseql_confiture-0.3.7.dist-info/WHEEL +4 -0
- fraiseql_confiture-0.3.7.dist-info/entry_points.txt +4 -0
- fraiseql_confiture-0.3.7.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,372 @@
|
|
|
1
|
+
"""Performance benchmarking and profiling for anonymization strategies.
|
|
2
|
+
|
|
3
|
+
Provides:
|
|
4
|
+
- Timing and memory profiling for strategies
|
|
5
|
+
- Batch operation benchmarking
|
|
6
|
+
- Comparative performance analysis
|
|
7
|
+
- Performance regression detection
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import sys
|
|
11
|
+
import time
|
|
12
|
+
from collections.abc import Callable
|
|
13
|
+
from dataclasses import dataclass
|
|
14
|
+
from typing import Any
|
|
15
|
+
|
|
16
|
+
from confiture.core.anonymization.strategy import AnonymizationStrategy
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@dataclass
|
|
20
|
+
class BenchmarkResult:
|
|
21
|
+
"""Results from a single benchmark run."""
|
|
22
|
+
|
|
23
|
+
operation: str
|
|
24
|
+
iterations: int
|
|
25
|
+
total_time_ms: float
|
|
26
|
+
avg_time_ms: float
|
|
27
|
+
min_time_ms: float
|
|
28
|
+
max_time_ms: float
|
|
29
|
+
ops_per_second: float
|
|
30
|
+
memory_estimate_kb: float
|
|
31
|
+
|
|
32
|
+
def __str__(self) -> str:
|
|
33
|
+
"""Format as readable string."""
|
|
34
|
+
return (
|
|
35
|
+
f"{self.operation:30} | "
|
|
36
|
+
f"Iterations: {self.iterations:5} | "
|
|
37
|
+
f"Avg: {self.avg_time_ms:8.4f}ms | "
|
|
38
|
+
f"Min: {self.min_time_ms:8.4f}ms | "
|
|
39
|
+
f"Max: {self.max_time_ms:8.4f}ms | "
|
|
40
|
+
f"Ops/sec: {self.ops_per_second:8.1f}"
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
@dataclass
|
|
45
|
+
class ComparativeResult:
|
|
46
|
+
"""Comparison of benchmark results."""
|
|
47
|
+
|
|
48
|
+
operation: str
|
|
49
|
+
baseline: BenchmarkResult
|
|
50
|
+
candidate: BenchmarkResult
|
|
51
|
+
speedup: float
|
|
52
|
+
regression: bool
|
|
53
|
+
|
|
54
|
+
def __str__(self) -> str:
|
|
55
|
+
"""Format as readable string."""
|
|
56
|
+
status = "🔴 REGRESSION" if self.regression else "🟢 IMPROVEMENT"
|
|
57
|
+
return f"{self.operation:30} | {status:20} | Speedup: {self.speedup:6.2f}x"
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class Benchmarker:
|
|
61
|
+
"""Performance benchmarking for anonymization operations."""
|
|
62
|
+
|
|
63
|
+
def __init__(self, verbose: bool = False):
|
|
64
|
+
"""Initialize benchmarker.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
verbose: Print detailed timing information.
|
|
68
|
+
"""
|
|
69
|
+
self.verbose = verbose
|
|
70
|
+
self.results: list[BenchmarkResult] = []
|
|
71
|
+
|
|
72
|
+
def benchmark_strategy(
|
|
73
|
+
self, strategy: AnonymizationStrategy, test_values: list[Any], iterations: int = 1000
|
|
74
|
+
) -> BenchmarkResult:
|
|
75
|
+
"""Benchmark a single strategy.
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
strategy: Strategy to benchmark.
|
|
79
|
+
test_values: Sample values to anonymize.
|
|
80
|
+
iterations: Number of iterations to run.
|
|
81
|
+
|
|
82
|
+
Returns:
|
|
83
|
+
BenchmarkResult with timing information.
|
|
84
|
+
"""
|
|
85
|
+
times = []
|
|
86
|
+
|
|
87
|
+
# Warmup
|
|
88
|
+
for value in test_values[: min(10, len(test_values))]:
|
|
89
|
+
_ = strategy.anonymize(value)
|
|
90
|
+
|
|
91
|
+
# Benchmark
|
|
92
|
+
for _ in range(iterations):
|
|
93
|
+
for value in test_values:
|
|
94
|
+
start = time.perf_counter()
|
|
95
|
+
_ = strategy.anonymize(value)
|
|
96
|
+
elapsed = (time.perf_counter() - start) * 1000 # Convert to ms
|
|
97
|
+
times.append(elapsed)
|
|
98
|
+
|
|
99
|
+
total_time_ms = sum(times)
|
|
100
|
+
avg_time_ms = total_time_ms / len(times)
|
|
101
|
+
min_time_ms = min(times)
|
|
102
|
+
max_time_ms = max(times)
|
|
103
|
+
ops_per_second = 1000.0 / avg_time_ms
|
|
104
|
+
|
|
105
|
+
result = BenchmarkResult(
|
|
106
|
+
operation=f"{strategy.__class__.__name__}",
|
|
107
|
+
iterations=len(times),
|
|
108
|
+
total_time_ms=total_time_ms,
|
|
109
|
+
avg_time_ms=avg_time_ms,
|
|
110
|
+
min_time_ms=min_time_ms,
|
|
111
|
+
max_time_ms=max_time_ms,
|
|
112
|
+
ops_per_second=ops_per_second,
|
|
113
|
+
memory_estimate_kb=sys.getsizeof(strategy) / 1024.0,
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
self.results.append(result)
|
|
117
|
+
|
|
118
|
+
if self.verbose:
|
|
119
|
+
print(result)
|
|
120
|
+
|
|
121
|
+
return result
|
|
122
|
+
|
|
123
|
+
def benchmark_batch_anonymization(
|
|
124
|
+
self, anonymize_func: Callable[[list[dict]], list[dict]], batch_sizes: list[int]
|
|
125
|
+
) -> dict[int, BenchmarkResult]:
|
|
126
|
+
"""Benchmark batch anonymization at different sizes.
|
|
127
|
+
|
|
128
|
+
Args:
|
|
129
|
+
anonymize_func: Function that takes list of dicts and returns anonymized list.
|
|
130
|
+
batch_sizes: List of batch sizes to test.
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
Dictionary mapping batch size to BenchmarkResult.
|
|
134
|
+
"""
|
|
135
|
+
results = {}
|
|
136
|
+
|
|
137
|
+
for batch_size in batch_sizes:
|
|
138
|
+
# Create test data
|
|
139
|
+
test_data = [
|
|
140
|
+
{
|
|
141
|
+
"id": i,
|
|
142
|
+
"name": f"Person {i}",
|
|
143
|
+
"email": f"user{i}@example.com",
|
|
144
|
+
"age": 25 + (i % 50),
|
|
145
|
+
}
|
|
146
|
+
for i in range(batch_size)
|
|
147
|
+
]
|
|
148
|
+
|
|
149
|
+
# Benchmark
|
|
150
|
+
start = time.perf_counter()
|
|
151
|
+
_ = anonymize_func(test_data)
|
|
152
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
153
|
+
|
|
154
|
+
result = BenchmarkResult(
|
|
155
|
+
operation=f"Batch Anonymization (size={batch_size})",
|
|
156
|
+
iterations=1,
|
|
157
|
+
total_time_ms=elapsed_ms,
|
|
158
|
+
avg_time_ms=elapsed_ms,
|
|
159
|
+
min_time_ms=elapsed_ms,
|
|
160
|
+
max_time_ms=elapsed_ms,
|
|
161
|
+
ops_per_second=1000.0 / elapsed_ms * batch_size,
|
|
162
|
+
memory_estimate_kb=sys.getsizeof(test_data) / 1024.0,
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
results[batch_size] = result
|
|
166
|
+
|
|
167
|
+
if self.verbose:
|
|
168
|
+
print(result)
|
|
169
|
+
|
|
170
|
+
return results
|
|
171
|
+
|
|
172
|
+
def compare_performance(
|
|
173
|
+
self,
|
|
174
|
+
baseline: BenchmarkResult,
|
|
175
|
+
candidate: BenchmarkResult,
|
|
176
|
+
regression_threshold: float = 0.95,
|
|
177
|
+
) -> ComparativeResult:
|
|
178
|
+
"""Compare performance between two benchmark results.
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
baseline: Baseline benchmark result.
|
|
182
|
+
candidate: Candidate benchmark result to compare.
|
|
183
|
+
regression_threshold: Speedup threshold below which it's considered a regression.
|
|
184
|
+
|
|
185
|
+
Returns:
|
|
186
|
+
ComparativeResult with comparison analysis.
|
|
187
|
+
"""
|
|
188
|
+
speedup = baseline.avg_time_ms / candidate.avg_time_ms
|
|
189
|
+
regression = speedup < regression_threshold
|
|
190
|
+
|
|
191
|
+
return ComparativeResult(
|
|
192
|
+
operation=baseline.operation,
|
|
193
|
+
baseline=baseline,
|
|
194
|
+
candidate=candidate,
|
|
195
|
+
speedup=speedup,
|
|
196
|
+
regression=regression,
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
def get_summary(self) -> str:
|
|
200
|
+
"""Get summary of all benchmark results.
|
|
201
|
+
|
|
202
|
+
Returns:
|
|
203
|
+
Formatted summary string.
|
|
204
|
+
"""
|
|
205
|
+
if not self.results:
|
|
206
|
+
return "No results to summarize"
|
|
207
|
+
|
|
208
|
+
summary = "BENCHMARK SUMMARY\n"
|
|
209
|
+
summary += "=" * 120 + "\n"
|
|
210
|
+
|
|
211
|
+
for result in sorted(self.results, key=lambda r: r.avg_time_ms, reverse=True):
|
|
212
|
+
summary += str(result) + "\n"
|
|
213
|
+
|
|
214
|
+
summary += "=" * 120 + "\n"
|
|
215
|
+
|
|
216
|
+
fastest = min(self.results, key=lambda r: r.avg_time_ms)
|
|
217
|
+
slowest = max(self.results, key=lambda r: r.avg_time_ms)
|
|
218
|
+
|
|
219
|
+
summary += f"\nFastest: {fastest.operation:40} ({fastest.avg_time_ms:.4f}ms)\n"
|
|
220
|
+
summary += f"Slowest: {slowest.operation:40} ({slowest.avg_time_ms:.4f}ms)\n"
|
|
221
|
+
summary += f"Ratio: {slowest.avg_time_ms / fastest.avg_time_ms:.2f}x\n"
|
|
222
|
+
|
|
223
|
+
return summary
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
class PerformanceTracker:
|
|
227
|
+
"""Track and report performance metrics across multiple operations."""
|
|
228
|
+
|
|
229
|
+
def __init__(self):
|
|
230
|
+
"""Initialize tracker."""
|
|
231
|
+
self.timings: dict[str, list[float]] = {}
|
|
232
|
+
|
|
233
|
+
def record(self, operation: str, elapsed_ms: float) -> None:
|
|
234
|
+
"""Record a timing measurement.
|
|
235
|
+
|
|
236
|
+
Args:
|
|
237
|
+
operation: Name of operation.
|
|
238
|
+
elapsed_ms: Time elapsed in milliseconds.
|
|
239
|
+
"""
|
|
240
|
+
if operation not in self.timings:
|
|
241
|
+
self.timings[operation] = []
|
|
242
|
+
self.timings[operation].append(elapsed_ms)
|
|
243
|
+
|
|
244
|
+
def get_stats(self, operation: str) -> dict[str, float]:
|
|
245
|
+
"""Get statistics for an operation.
|
|
246
|
+
|
|
247
|
+
Args:
|
|
248
|
+
operation: Name of operation.
|
|
249
|
+
|
|
250
|
+
Returns:
|
|
251
|
+
Dictionary with timing statistics.
|
|
252
|
+
"""
|
|
253
|
+
if operation not in self.timings:
|
|
254
|
+
return {}
|
|
255
|
+
|
|
256
|
+
times = self.timings[operation]
|
|
257
|
+
return {
|
|
258
|
+
"count": len(times),
|
|
259
|
+
"total_ms": sum(times),
|
|
260
|
+
"avg_ms": sum(times) / len(times),
|
|
261
|
+
"min_ms": min(times),
|
|
262
|
+
"max_ms": max(times),
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
def get_report(self) -> str:
|
|
266
|
+
"""Get performance report.
|
|
267
|
+
|
|
268
|
+
Returns:
|
|
269
|
+
Formatted performance report.
|
|
270
|
+
"""
|
|
271
|
+
report = "PERFORMANCE REPORT\n"
|
|
272
|
+
report += "=" * 100 + "\n"
|
|
273
|
+
report += f"{'Operation':<40} {'Count':<10} {'Total (ms)':<15} {'Avg (ms)':<15} {'Min (ms)':<15}\n"
|
|
274
|
+
report += "-" * 100 + "\n"
|
|
275
|
+
|
|
276
|
+
for operation in sorted(self.timings.keys()):
|
|
277
|
+
stats = self.get_stats(operation)
|
|
278
|
+
report += (
|
|
279
|
+
f"{operation:<40} "
|
|
280
|
+
f"{stats['count']:<10} "
|
|
281
|
+
f"{stats['total_ms']:<15.2f} "
|
|
282
|
+
f"{stats['avg_ms']:<15.4f} "
|
|
283
|
+
f"{stats['min_ms']:<15.4f}\n"
|
|
284
|
+
)
|
|
285
|
+
|
|
286
|
+
report += "=" * 100 + "\n"
|
|
287
|
+
return report
|
|
288
|
+
|
|
289
|
+
|
|
290
|
+
class ScalabilityTester:
|
|
291
|
+
"""Test scalability and performance with varying data sizes."""
|
|
292
|
+
|
|
293
|
+
def __init__(self, anonymize_func: Callable[[dict], dict]):
|
|
294
|
+
"""Initialize tester.
|
|
295
|
+
|
|
296
|
+
Args:
|
|
297
|
+
anonymize_func: Function that anonymizes a single record.
|
|
298
|
+
"""
|
|
299
|
+
self.anonymize_func = anonymize_func
|
|
300
|
+
|
|
301
|
+
def test_scaling(self, field_count_range: tuple[int, int], step: int = 10) -> dict[int, float]:
|
|
302
|
+
"""Test anonymization performance as number of fields increases.
|
|
303
|
+
|
|
304
|
+
Args:
|
|
305
|
+
field_count_range: (min_fields, max_fields) tuple.
|
|
306
|
+
step: Step size for field count.
|
|
307
|
+
|
|
308
|
+
Returns:
|
|
309
|
+
Dictionary mapping field count to average time in ms.
|
|
310
|
+
"""
|
|
311
|
+
results = {}
|
|
312
|
+
|
|
313
|
+
for field_count in range(field_count_range[0], field_count_range[1] + 1, step):
|
|
314
|
+
# Create test record with specified number of fields
|
|
315
|
+
test_record = {f"field_{i}": f"value_{i}" for i in range(field_count)}
|
|
316
|
+
|
|
317
|
+
# Measure time to anonymize
|
|
318
|
+
start = time.perf_counter()
|
|
319
|
+
for _ in range(100):
|
|
320
|
+
_ = self.anonymize_func(test_record)
|
|
321
|
+
elapsed_ms = (time.perf_counter() - start) * 10 # Convert to avg per call
|
|
322
|
+
|
|
323
|
+
results[field_count] = elapsed_ms
|
|
324
|
+
|
|
325
|
+
return results
|
|
326
|
+
|
|
327
|
+
def analyze_complexity(self, scaling_results: dict[int, float]) -> str:
|
|
328
|
+
"""Analyze computational complexity from scaling results.
|
|
329
|
+
|
|
330
|
+
Args:
|
|
331
|
+
scaling_results: Results from test_scaling().
|
|
332
|
+
|
|
333
|
+
Returns:
|
|
334
|
+
Analysis string describing complexity pattern.
|
|
335
|
+
"""
|
|
336
|
+
if len(scaling_results) < 2:
|
|
337
|
+
return "Insufficient data for complexity analysis"
|
|
338
|
+
|
|
339
|
+
items = sorted(scaling_results.items())
|
|
340
|
+
ratios = []
|
|
341
|
+
|
|
342
|
+
for i in range(1, len(items)):
|
|
343
|
+
field_ratio = items[i][0] / items[i - 1][0]
|
|
344
|
+
time_ratio = items[i][1] / items[i - 1][1]
|
|
345
|
+
ratios.append(time_ratio / field_ratio if field_ratio > 0 else 0)
|
|
346
|
+
|
|
347
|
+
avg_ratio = sum(ratios) / len(ratios) if ratios else 0
|
|
348
|
+
|
|
349
|
+
# Classify complexity
|
|
350
|
+
if avg_ratio < 1.05:
|
|
351
|
+
complexity = "O(1) - Constant"
|
|
352
|
+
elif avg_ratio < 1.15:
|
|
353
|
+
complexity = "O(log n) - Logarithmic"
|
|
354
|
+
elif avg_ratio < 1.3:
|
|
355
|
+
complexity = "O(n) - Linear"
|
|
356
|
+
elif avg_ratio < 1.5:
|
|
357
|
+
complexity = "O(n log n) - Linearithmic"
|
|
358
|
+
else:
|
|
359
|
+
complexity = "O(n²) or higher - Quadratic or worse"
|
|
360
|
+
|
|
361
|
+
analysis = "Complexity Analysis\n"
|
|
362
|
+
analysis += f"{'Field Count':<15} {'Time (ms)':<15} {'Trend':<20}\n"
|
|
363
|
+
analysis += "-" * 50 + "\n"
|
|
364
|
+
|
|
365
|
+
for field_count, time_ms in items:
|
|
366
|
+
analysis += f"{field_count:<15} {time_ms:<15.4f}\n"
|
|
367
|
+
|
|
368
|
+
analysis += "-" * 50 + "\n"
|
|
369
|
+
analysis += f"Estimated Complexity: {complexity}\n"
|
|
370
|
+
analysis += f"Average Growth Ratio: {avg_ratio:.3f}x per field\n"
|
|
371
|
+
|
|
372
|
+
return analysis
|