truthound-dashboard 1.3.1__py3-none-any.whl → 1.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- truthound_dashboard/api/alerts.py +258 -0
- truthound_dashboard/api/anomaly.py +1302 -0
- truthound_dashboard/api/cross_alerts.py +352 -0
- truthound_dashboard/api/deps.py +143 -0
- truthound_dashboard/api/drift_monitor.py +540 -0
- truthound_dashboard/api/lineage.py +1151 -0
- truthound_dashboard/api/maintenance.py +363 -0
- truthound_dashboard/api/middleware.py +373 -1
- truthound_dashboard/api/model_monitoring.py +805 -0
- truthound_dashboard/api/notifications_advanced.py +2452 -0
- truthound_dashboard/api/plugins.py +2096 -0
- truthound_dashboard/api/profile.py +211 -14
- truthound_dashboard/api/reports.py +853 -0
- truthound_dashboard/api/router.py +147 -0
- truthound_dashboard/api/rule_suggestions.py +310 -0
- truthound_dashboard/api/schema_evolution.py +231 -0
- truthound_dashboard/api/sources.py +47 -3
- truthound_dashboard/api/triggers.py +190 -0
- truthound_dashboard/api/validations.py +13 -0
- truthound_dashboard/api/validators.py +333 -4
- truthound_dashboard/api/versioning.py +309 -0
- truthound_dashboard/api/websocket.py +301 -0
- truthound_dashboard/core/__init__.py +27 -0
- truthound_dashboard/core/anomaly.py +1395 -0
- truthound_dashboard/core/anomaly_explainer.py +633 -0
- truthound_dashboard/core/cache.py +206 -0
- truthound_dashboard/core/cached_services.py +422 -0
- truthound_dashboard/core/charts.py +352 -0
- truthound_dashboard/core/connections.py +1069 -42
- truthound_dashboard/core/cross_alerts.py +837 -0
- truthound_dashboard/core/drift_monitor.py +1477 -0
- truthound_dashboard/core/drift_sampling.py +669 -0
- truthound_dashboard/core/i18n/__init__.py +42 -0
- truthound_dashboard/core/i18n/detector.py +173 -0
- truthound_dashboard/core/i18n/messages.py +564 -0
- truthound_dashboard/core/lineage.py +971 -0
- truthound_dashboard/core/maintenance.py +443 -5
- truthound_dashboard/core/model_monitoring.py +1043 -0
- truthound_dashboard/core/notifications/channels.py +1020 -1
- truthound_dashboard/core/notifications/deduplication/__init__.py +143 -0
- truthound_dashboard/core/notifications/deduplication/policies.py +274 -0
- truthound_dashboard/core/notifications/deduplication/service.py +400 -0
- truthound_dashboard/core/notifications/deduplication/stores.py +2365 -0
- truthound_dashboard/core/notifications/deduplication/strategies.py +422 -0
- truthound_dashboard/core/notifications/dispatcher.py +43 -0
- truthound_dashboard/core/notifications/escalation/__init__.py +149 -0
- truthound_dashboard/core/notifications/escalation/backends.py +1384 -0
- truthound_dashboard/core/notifications/escalation/engine.py +429 -0
- truthound_dashboard/core/notifications/escalation/models.py +336 -0
- truthound_dashboard/core/notifications/escalation/scheduler.py +1187 -0
- truthound_dashboard/core/notifications/escalation/state_machine.py +330 -0
- truthound_dashboard/core/notifications/escalation/stores.py +2896 -0
- truthound_dashboard/core/notifications/events.py +49 -0
- truthound_dashboard/core/notifications/metrics/__init__.py +115 -0
- truthound_dashboard/core/notifications/metrics/base.py +528 -0
- truthound_dashboard/core/notifications/metrics/collectors.py +583 -0
- truthound_dashboard/core/notifications/routing/__init__.py +169 -0
- truthound_dashboard/core/notifications/routing/combinators.py +184 -0
- truthound_dashboard/core/notifications/routing/config.py +375 -0
- truthound_dashboard/core/notifications/routing/config_parser.py +867 -0
- truthound_dashboard/core/notifications/routing/engine.py +382 -0
- truthound_dashboard/core/notifications/routing/expression_engine.py +1269 -0
- truthound_dashboard/core/notifications/routing/jinja2_engine.py +774 -0
- truthound_dashboard/core/notifications/routing/rules.py +625 -0
- truthound_dashboard/core/notifications/routing/validator.py +678 -0
- truthound_dashboard/core/notifications/service.py +2 -0
- truthound_dashboard/core/notifications/stats_aggregator.py +850 -0
- truthound_dashboard/core/notifications/throttling/__init__.py +83 -0
- truthound_dashboard/core/notifications/throttling/builder.py +311 -0
- truthound_dashboard/core/notifications/throttling/stores.py +1859 -0
- truthound_dashboard/core/notifications/throttling/throttlers.py +633 -0
- truthound_dashboard/core/openlineage.py +1028 -0
- truthound_dashboard/core/plugins/__init__.py +39 -0
- truthound_dashboard/core/plugins/docs/__init__.py +39 -0
- truthound_dashboard/core/plugins/docs/extractor.py +703 -0
- truthound_dashboard/core/plugins/docs/renderers.py +804 -0
- truthound_dashboard/core/plugins/hooks/__init__.py +63 -0
- truthound_dashboard/core/plugins/hooks/decorators.py +367 -0
- truthound_dashboard/core/plugins/hooks/manager.py +403 -0
- truthound_dashboard/core/plugins/hooks/protocols.py +265 -0
- truthound_dashboard/core/plugins/lifecycle/__init__.py +41 -0
- truthound_dashboard/core/plugins/lifecycle/hot_reload.py +584 -0
- truthound_dashboard/core/plugins/lifecycle/machine.py +419 -0
- truthound_dashboard/core/plugins/lifecycle/states.py +266 -0
- truthound_dashboard/core/plugins/loader.py +504 -0
- truthound_dashboard/core/plugins/registry.py +810 -0
- truthound_dashboard/core/plugins/reporter_executor.py +588 -0
- truthound_dashboard/core/plugins/sandbox/__init__.py +59 -0
- truthound_dashboard/core/plugins/sandbox/code_validator.py +243 -0
- truthound_dashboard/core/plugins/sandbox/engines.py +770 -0
- truthound_dashboard/core/plugins/sandbox/protocols.py +194 -0
- truthound_dashboard/core/plugins/sandbox.py +617 -0
- truthound_dashboard/core/plugins/security/__init__.py +68 -0
- truthound_dashboard/core/plugins/security/analyzer.py +535 -0
- truthound_dashboard/core/plugins/security/policies.py +311 -0
- truthound_dashboard/core/plugins/security/protocols.py +296 -0
- truthound_dashboard/core/plugins/security/signing.py +842 -0
- truthound_dashboard/core/plugins/security.py +446 -0
- truthound_dashboard/core/plugins/validator_executor.py +401 -0
- truthound_dashboard/core/plugins/versioning/__init__.py +51 -0
- truthound_dashboard/core/plugins/versioning/constraints.py +377 -0
- truthound_dashboard/core/plugins/versioning/dependencies.py +541 -0
- truthound_dashboard/core/plugins/versioning/semver.py +266 -0
- truthound_dashboard/core/profile_comparison.py +601 -0
- truthound_dashboard/core/report_history.py +570 -0
- truthound_dashboard/core/reporters/__init__.py +57 -0
- truthound_dashboard/core/reporters/base.py +296 -0
- truthound_dashboard/core/reporters/csv_reporter.py +155 -0
- truthound_dashboard/core/reporters/html_reporter.py +598 -0
- truthound_dashboard/core/reporters/i18n/__init__.py +65 -0
- truthound_dashboard/core/reporters/i18n/base.py +494 -0
- truthound_dashboard/core/reporters/i18n/catalogs.py +930 -0
- truthound_dashboard/core/reporters/json_reporter.py +160 -0
- truthound_dashboard/core/reporters/junit_reporter.py +233 -0
- truthound_dashboard/core/reporters/markdown_reporter.py +207 -0
- truthound_dashboard/core/reporters/pdf_reporter.py +209 -0
- truthound_dashboard/core/reporters/registry.py +272 -0
- truthound_dashboard/core/rule_generator.py +2088 -0
- truthound_dashboard/core/scheduler.py +822 -12
- truthound_dashboard/core/schema_evolution.py +858 -0
- truthound_dashboard/core/services.py +152 -9
- truthound_dashboard/core/statistics.py +718 -0
- truthound_dashboard/core/streaming_anomaly.py +883 -0
- truthound_dashboard/core/triggers/__init__.py +45 -0
- truthound_dashboard/core/triggers/base.py +226 -0
- truthound_dashboard/core/triggers/evaluators.py +609 -0
- truthound_dashboard/core/triggers/factory.py +363 -0
- truthound_dashboard/core/unified_alerts.py +870 -0
- truthound_dashboard/core/validation_limits.py +509 -0
- truthound_dashboard/core/versioning.py +709 -0
- truthound_dashboard/core/websocket/__init__.py +59 -0
- truthound_dashboard/core/websocket/manager.py +512 -0
- truthound_dashboard/core/websocket/messages.py +130 -0
- truthound_dashboard/db/__init__.py +30 -0
- truthound_dashboard/db/models.py +3375 -3
- truthound_dashboard/main.py +22 -0
- truthound_dashboard/schemas/__init__.py +396 -1
- truthound_dashboard/schemas/anomaly.py +1258 -0
- truthound_dashboard/schemas/base.py +4 -0
- truthound_dashboard/schemas/cross_alerts.py +334 -0
- truthound_dashboard/schemas/drift_monitor.py +890 -0
- truthound_dashboard/schemas/lineage.py +428 -0
- truthound_dashboard/schemas/maintenance.py +154 -0
- truthound_dashboard/schemas/model_monitoring.py +374 -0
- truthound_dashboard/schemas/notifications_advanced.py +1363 -0
- truthound_dashboard/schemas/openlineage.py +704 -0
- truthound_dashboard/schemas/plugins.py +1293 -0
- truthound_dashboard/schemas/profile.py +420 -34
- truthound_dashboard/schemas/profile_comparison.py +242 -0
- truthound_dashboard/schemas/reports.py +285 -0
- truthound_dashboard/schemas/rule_suggestion.py +434 -0
- truthound_dashboard/schemas/schema_evolution.py +164 -0
- truthound_dashboard/schemas/source.py +117 -2
- truthound_dashboard/schemas/triggers.py +511 -0
- truthound_dashboard/schemas/unified_alerts.py +223 -0
- truthound_dashboard/schemas/validation.py +25 -1
- truthound_dashboard/schemas/validators/__init__.py +11 -0
- truthound_dashboard/schemas/validators/base.py +151 -0
- truthound_dashboard/schemas/versioning.py +152 -0
- truthound_dashboard/static/index.html +2 -2
- {truthound_dashboard-1.3.1.dist-info → truthound_dashboard-1.4.0.dist-info}/METADATA +142 -22
- truthound_dashboard-1.4.0.dist-info/RECORD +239 -0
- truthound_dashboard/static/assets/index-BZG20KuF.js +0 -586
- truthound_dashboard/static/assets/index-D_HyZ3pb.css +0 -1
- truthound_dashboard/static/assets/unmerged_dictionaries-CtpqQBm0.js +0 -1
- truthound_dashboard-1.3.1.dist-info/RECORD +0 -110
- {truthound_dashboard-1.3.1.dist-info → truthound_dashboard-1.4.0.dist-info}/WHEEL +0 -0
- {truthound_dashboard-1.3.1.dist-info → truthound_dashboard-1.4.0.dist-info}/entry_points.txt +0 -0
- {truthound_dashboard-1.3.1.dist-info → truthound_dashboard-1.4.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,850 @@
|
|
|
1
|
+
"""Stats aggregation service for notification subsystems.
|
|
2
|
+
|
|
3
|
+
This module provides efficient stats aggregation using SQLAlchemy
|
|
4
|
+
aggregate queries instead of fetching all records. Includes caching
|
|
5
|
+
layer for frequently accessed statistics.
|
|
6
|
+
|
|
7
|
+
The StatsAggregator follows the Repository pattern with caching
|
|
8
|
+
to optimize database queries for stats endpoints.
|
|
9
|
+
|
|
10
|
+
Example:
|
|
11
|
+
aggregator = StatsAggregator(session)
|
|
12
|
+
|
|
13
|
+
# Get escalation stats with time range filter
|
|
14
|
+
stats = await aggregator.get_escalation_stats(
|
|
15
|
+
start_time=datetime(2024, 1, 1),
|
|
16
|
+
end_time=datetime(2024, 12, 31),
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
# Get deduplication stats with caching (default 30s TTL)
|
|
20
|
+
stats = await aggregator.get_deduplication_stats(use_cache=True)
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
from __future__ import annotations
|
|
24
|
+
|
|
25
|
+
import asyncio
|
|
26
|
+
import hashlib
|
|
27
|
+
import logging
|
|
28
|
+
from dataclasses import dataclass, field
|
|
29
|
+
from datetime import datetime, timedelta
|
|
30
|
+
from enum import Enum
|
|
31
|
+
from typing import Any, Generic, TypeVar
|
|
32
|
+
|
|
33
|
+
from sqlalchemy import func, select
|
|
34
|
+
from sqlalchemy.ext.asyncio import AsyncSession
|
|
35
|
+
|
|
36
|
+
from ...db.models import (
|
|
37
|
+
DeduplicationConfig,
|
|
38
|
+
EscalationIncidentModel,
|
|
39
|
+
EscalationPolicyModel,
|
|
40
|
+
EscalationStateEnum,
|
|
41
|
+
ThrottlingConfig,
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
logger = logging.getLogger(__name__)
|
|
45
|
+
|
|
46
|
+
T = TypeVar("T")
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class CacheStrategy(str, Enum):
|
|
50
|
+
"""Cache strategy options."""
|
|
51
|
+
|
|
52
|
+
NONE = "none"
|
|
53
|
+
MEMORY = "memory"
|
|
54
|
+
LFU = "lfu"
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
@dataclass
|
|
58
|
+
class CacheEntry(Generic[T]):
|
|
59
|
+
"""Cache entry with TTL.
|
|
60
|
+
|
|
61
|
+
Attributes:
|
|
62
|
+
value: Cached value.
|
|
63
|
+
expires_at: Expiration timestamp.
|
|
64
|
+
created_at: Creation timestamp.
|
|
65
|
+
hit_count: Number of cache hits.
|
|
66
|
+
"""
|
|
67
|
+
|
|
68
|
+
value: T
|
|
69
|
+
expires_at: datetime
|
|
70
|
+
created_at: datetime = field(default_factory=datetime.utcnow)
|
|
71
|
+
hit_count: int = 0
|
|
72
|
+
|
|
73
|
+
@property
|
|
74
|
+
def is_expired(self) -> bool:
|
|
75
|
+
"""Check if entry is expired."""
|
|
76
|
+
return datetime.utcnow() >= self.expires_at
|
|
77
|
+
|
|
78
|
+
@property
|
|
79
|
+
def remaining_ttl_seconds(self) -> float:
|
|
80
|
+
"""Get remaining TTL in seconds."""
|
|
81
|
+
delta = self.expires_at - datetime.utcnow()
|
|
82
|
+
return max(0, delta.total_seconds())
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
class StatsCache:
|
|
86
|
+
"""Thread-safe in-memory cache for stats with TTL.
|
|
87
|
+
|
|
88
|
+
Provides configurable caching with support for:
|
|
89
|
+
- TTL-based expiration
|
|
90
|
+
- Pattern-based invalidation
|
|
91
|
+
- Cache statistics
|
|
92
|
+
"""
|
|
93
|
+
|
|
94
|
+
def __init__(
|
|
95
|
+
self,
|
|
96
|
+
default_ttl_seconds: int = 30,
|
|
97
|
+
max_entries: int = 100,
|
|
98
|
+
) -> None:
|
|
99
|
+
"""Initialize stats cache.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
default_ttl_seconds: Default TTL for cache entries.
|
|
103
|
+
max_entries: Maximum number of cache entries.
|
|
104
|
+
"""
|
|
105
|
+
self._cache: dict[str, CacheEntry[Any]] = {}
|
|
106
|
+
self._lock = asyncio.Lock()
|
|
107
|
+
self._default_ttl = default_ttl_seconds
|
|
108
|
+
self._max_entries = max_entries
|
|
109
|
+
self._total_hits = 0
|
|
110
|
+
self._total_misses = 0
|
|
111
|
+
|
|
112
|
+
async def get(self, key: str) -> Any | None:
|
|
113
|
+
"""Get value from cache.
|
|
114
|
+
|
|
115
|
+
Args:
|
|
116
|
+
key: Cache key.
|
|
117
|
+
|
|
118
|
+
Returns:
|
|
119
|
+
Cached value or None if not found/expired.
|
|
120
|
+
"""
|
|
121
|
+
async with self._lock:
|
|
122
|
+
entry = self._cache.get(key)
|
|
123
|
+
if entry is None:
|
|
124
|
+
self._total_misses += 1
|
|
125
|
+
return None
|
|
126
|
+
|
|
127
|
+
if entry.is_expired:
|
|
128
|
+
del self._cache[key]
|
|
129
|
+
self._total_misses += 1
|
|
130
|
+
return None
|
|
131
|
+
|
|
132
|
+
entry.hit_count += 1
|
|
133
|
+
self._total_hits += 1
|
|
134
|
+
return entry.value
|
|
135
|
+
|
|
136
|
+
async def set(
|
|
137
|
+
self,
|
|
138
|
+
key: str,
|
|
139
|
+
value: Any,
|
|
140
|
+
ttl_seconds: int | None = None,
|
|
141
|
+
) -> None:
|
|
142
|
+
"""Set value in cache.
|
|
143
|
+
|
|
144
|
+
Args:
|
|
145
|
+
key: Cache key.
|
|
146
|
+
value: Value to cache.
|
|
147
|
+
ttl_seconds: TTL in seconds. Uses default if None.
|
|
148
|
+
"""
|
|
149
|
+
ttl = ttl_seconds if ttl_seconds is not None else self._default_ttl
|
|
150
|
+
expires_at = datetime.utcnow() + timedelta(seconds=ttl)
|
|
151
|
+
|
|
152
|
+
async with self._lock:
|
|
153
|
+
# Evict if at capacity
|
|
154
|
+
if len(self._cache) >= self._max_entries and key not in self._cache:
|
|
155
|
+
await self._evict_oldest_unlocked()
|
|
156
|
+
|
|
157
|
+
self._cache[key] = CacheEntry(
|
|
158
|
+
value=value,
|
|
159
|
+
expires_at=expires_at,
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
async def _evict_oldest_unlocked(self) -> None:
|
|
163
|
+
"""Evict oldest entry (must be called with lock held)."""
|
|
164
|
+
if not self._cache:
|
|
165
|
+
return
|
|
166
|
+
|
|
167
|
+
oldest_key = min(
|
|
168
|
+
self._cache.keys(),
|
|
169
|
+
key=lambda k: self._cache[k].created_at,
|
|
170
|
+
)
|
|
171
|
+
del self._cache[oldest_key]
|
|
172
|
+
|
|
173
|
+
async def invalidate(self, key: str) -> bool:
|
|
174
|
+
"""Invalidate a specific key.
|
|
175
|
+
|
|
176
|
+
Args:
|
|
177
|
+
key: Cache key to invalidate.
|
|
178
|
+
|
|
179
|
+
Returns:
|
|
180
|
+
True if key was invalidated.
|
|
181
|
+
"""
|
|
182
|
+
async with self._lock:
|
|
183
|
+
if key in self._cache:
|
|
184
|
+
del self._cache[key]
|
|
185
|
+
return True
|
|
186
|
+
return False
|
|
187
|
+
|
|
188
|
+
async def invalidate_pattern(self, prefix: str) -> int:
|
|
189
|
+
"""Invalidate all keys with given prefix.
|
|
190
|
+
|
|
191
|
+
Args:
|
|
192
|
+
prefix: Key prefix to match.
|
|
193
|
+
|
|
194
|
+
Returns:
|
|
195
|
+
Number of keys invalidated.
|
|
196
|
+
"""
|
|
197
|
+
async with self._lock:
|
|
198
|
+
keys_to_remove = [k for k in self._cache if k.startswith(prefix)]
|
|
199
|
+
for key in keys_to_remove:
|
|
200
|
+
del self._cache[key]
|
|
201
|
+
return len(keys_to_remove)
|
|
202
|
+
|
|
203
|
+
async def clear(self) -> None:
|
|
204
|
+
"""Clear all cache entries."""
|
|
205
|
+
async with self._lock:
|
|
206
|
+
self._cache.clear()
|
|
207
|
+
self._total_hits = 0
|
|
208
|
+
self._total_misses = 0
|
|
209
|
+
|
|
210
|
+
async def get_stats(self) -> dict[str, Any]:
|
|
211
|
+
"""Get cache statistics.
|
|
212
|
+
|
|
213
|
+
Returns:
|
|
214
|
+
Dictionary with cache statistics.
|
|
215
|
+
"""
|
|
216
|
+
async with self._lock:
|
|
217
|
+
valid_entries = sum(1 for e in self._cache.values() if not e.is_expired)
|
|
218
|
+
total = self._total_hits + self._total_misses
|
|
219
|
+
hit_rate = self._total_hits / total if total > 0 else 0.0
|
|
220
|
+
|
|
221
|
+
return {
|
|
222
|
+
"total_entries": len(self._cache),
|
|
223
|
+
"valid_entries": valid_entries,
|
|
224
|
+
"expired_entries": len(self._cache) - valid_entries,
|
|
225
|
+
"max_entries": self._max_entries,
|
|
226
|
+
"default_ttl_seconds": self._default_ttl,
|
|
227
|
+
"total_hits": self._total_hits,
|
|
228
|
+
"total_misses": self._total_misses,
|
|
229
|
+
"hit_rate": hit_rate,
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
|
|
233
|
+
# Global stats cache singleton
|
|
234
|
+
_stats_cache: StatsCache | None = None
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
def get_stats_cache(
|
|
238
|
+
default_ttl_seconds: int = 30,
|
|
239
|
+
max_entries: int = 100,
|
|
240
|
+
) -> StatsCache:
|
|
241
|
+
"""Get or create stats cache singleton.
|
|
242
|
+
|
|
243
|
+
Args:
|
|
244
|
+
default_ttl_seconds: Default TTL for new cache.
|
|
245
|
+
max_entries: Maximum entries for new cache.
|
|
246
|
+
|
|
247
|
+
Returns:
|
|
248
|
+
StatsCache instance.
|
|
249
|
+
"""
|
|
250
|
+
global _stats_cache
|
|
251
|
+
if _stats_cache is None:
|
|
252
|
+
_stats_cache = StatsCache(
|
|
253
|
+
default_ttl_seconds=default_ttl_seconds,
|
|
254
|
+
max_entries=max_entries,
|
|
255
|
+
)
|
|
256
|
+
return _stats_cache
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
def reset_stats_cache() -> None:
|
|
260
|
+
"""Reset stats cache singleton (for testing)."""
|
|
261
|
+
global _stats_cache
|
|
262
|
+
_stats_cache = None
|
|
263
|
+
|
|
264
|
+
|
|
265
|
+
@dataclass
|
|
266
|
+
class TimeRange:
|
|
267
|
+
"""Time range filter for stats queries.
|
|
268
|
+
|
|
269
|
+
Attributes:
|
|
270
|
+
start_time: Start of time range (inclusive).
|
|
271
|
+
end_time: End of time range (exclusive).
|
|
272
|
+
"""
|
|
273
|
+
|
|
274
|
+
start_time: datetime | None = None
|
|
275
|
+
end_time: datetime | None = None
|
|
276
|
+
|
|
277
|
+
def to_cache_key_part(self) -> str:
|
|
278
|
+
"""Generate cache key part for this time range."""
|
|
279
|
+
start_str = self.start_time.isoformat() if self.start_time else "none"
|
|
280
|
+
end_str = self.end_time.isoformat() if self.end_time else "none"
|
|
281
|
+
return f"{start_str}_{end_str}"
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
@dataclass
|
|
285
|
+
class EscalationStatsResult:
|
|
286
|
+
"""Escalation statistics result.
|
|
287
|
+
|
|
288
|
+
Attributes:
|
|
289
|
+
total_incidents: Total number of incidents.
|
|
290
|
+
by_state: Count by state.
|
|
291
|
+
active_count: Non-resolved incidents count.
|
|
292
|
+
total_policies: Total policies count.
|
|
293
|
+
avg_resolution_time_seconds: Average resolution time in seconds.
|
|
294
|
+
time_range: Time range filter applied.
|
|
295
|
+
cached: Whether result was served from cache.
|
|
296
|
+
cached_at: When result was cached (if cached).
|
|
297
|
+
"""
|
|
298
|
+
|
|
299
|
+
total_incidents: int
|
|
300
|
+
by_state: dict[str, int]
|
|
301
|
+
active_count: int
|
|
302
|
+
total_policies: int
|
|
303
|
+
avg_resolution_time_seconds: float | None
|
|
304
|
+
time_range: TimeRange | None = None
|
|
305
|
+
cached: bool = False
|
|
306
|
+
cached_at: datetime | None = None
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
@dataclass
|
|
310
|
+
class DeduplicationStatsResult:
|
|
311
|
+
"""Deduplication statistics result.
|
|
312
|
+
|
|
313
|
+
Attributes:
|
|
314
|
+
total_configs: Total deduplication configs.
|
|
315
|
+
active_configs: Active configs count.
|
|
316
|
+
by_strategy: Count by strategy.
|
|
317
|
+
by_policy: Count by policy.
|
|
318
|
+
avg_window_seconds: Average window duration.
|
|
319
|
+
time_range: Time range filter applied.
|
|
320
|
+
cached: Whether result was served from cache.
|
|
321
|
+
cached_at: When result was cached (if cached).
|
|
322
|
+
"""
|
|
323
|
+
|
|
324
|
+
total_configs: int
|
|
325
|
+
active_configs: int
|
|
326
|
+
by_strategy: dict[str, int]
|
|
327
|
+
by_policy: dict[str, int]
|
|
328
|
+
avg_window_seconds: float
|
|
329
|
+
time_range: TimeRange | None = None
|
|
330
|
+
cached: bool = False
|
|
331
|
+
cached_at: datetime | None = None
|
|
332
|
+
|
|
333
|
+
|
|
334
|
+
@dataclass
|
|
335
|
+
class ThrottlingStatsResult:
|
|
336
|
+
"""Throttling statistics result.
|
|
337
|
+
|
|
338
|
+
Attributes:
|
|
339
|
+
total_configs: Total throttling configs.
|
|
340
|
+
active_configs: Active configs count.
|
|
341
|
+
configs_with_per_minute: Configs with per-minute limits.
|
|
342
|
+
configs_with_per_hour: Configs with per-hour limits.
|
|
343
|
+
configs_with_per_day: Configs with per-day limits.
|
|
344
|
+
avg_burst_allowance: Average burst allowance.
|
|
345
|
+
time_range: Time range filter applied.
|
|
346
|
+
cached: Whether result was served from cache.
|
|
347
|
+
cached_at: When result was cached (if cached).
|
|
348
|
+
"""
|
|
349
|
+
|
|
350
|
+
total_configs: int
|
|
351
|
+
active_configs: int
|
|
352
|
+
configs_with_per_minute: int
|
|
353
|
+
configs_with_per_hour: int
|
|
354
|
+
configs_with_per_day: int
|
|
355
|
+
avg_burst_allowance: float
|
|
356
|
+
time_range: TimeRange | None = None
|
|
357
|
+
cached: bool = False
|
|
358
|
+
cached_at: datetime | None = None
|
|
359
|
+
|
|
360
|
+
|
|
361
|
+
class StatsAggregator:
|
|
362
|
+
"""Efficient stats aggregation service with caching.
|
|
363
|
+
|
|
364
|
+
Uses SQLAlchemy aggregate queries (COUNT, AVG, GROUP BY) instead
|
|
365
|
+
of fetching all records. Includes optional caching layer.
|
|
366
|
+
|
|
367
|
+
Example:
|
|
368
|
+
aggregator = StatsAggregator(session, cache_ttl_seconds=60)
|
|
369
|
+
|
|
370
|
+
# Get stats with caching
|
|
371
|
+
stats = await aggregator.get_escalation_stats(use_cache=True)
|
|
372
|
+
|
|
373
|
+
# Get stats with time range filter
|
|
374
|
+
stats = await aggregator.get_escalation_stats(
|
|
375
|
+
time_range=TimeRange(
|
|
376
|
+
start_time=datetime(2024, 1, 1),
|
|
377
|
+
end_time=datetime(2024, 6, 30),
|
|
378
|
+
)
|
|
379
|
+
)
|
|
380
|
+
"""
|
|
381
|
+
|
|
382
|
+
def __init__(
|
|
383
|
+
self,
|
|
384
|
+
session: AsyncSession,
|
|
385
|
+
cache: StatsCache | None = None,
|
|
386
|
+
cache_ttl_seconds: int = 30,
|
|
387
|
+
) -> None:
|
|
388
|
+
"""Initialize stats aggregator.
|
|
389
|
+
|
|
390
|
+
Args:
|
|
391
|
+
session: Database session.
|
|
392
|
+
cache: Stats cache instance. Uses global singleton if None.
|
|
393
|
+
cache_ttl_seconds: Default cache TTL in seconds.
|
|
394
|
+
"""
|
|
395
|
+
self._session = session
|
|
396
|
+
self._cache = cache or get_stats_cache()
|
|
397
|
+
self._cache_ttl = cache_ttl_seconds
|
|
398
|
+
|
|
399
|
+
def _generate_cache_key(
|
|
400
|
+
self,
|
|
401
|
+
prefix: str,
|
|
402
|
+
time_range: TimeRange | None = None,
|
|
403
|
+
**kwargs: Any,
|
|
404
|
+
) -> str:
|
|
405
|
+
"""Generate cache key for stats query.
|
|
406
|
+
|
|
407
|
+
Args:
|
|
408
|
+
prefix: Key prefix (e.g., "escalation_stats").
|
|
409
|
+
time_range: Optional time range filter.
|
|
410
|
+
**kwargs: Additional key components.
|
|
411
|
+
|
|
412
|
+
Returns:
|
|
413
|
+
Cache key string.
|
|
414
|
+
"""
|
|
415
|
+
parts = [prefix]
|
|
416
|
+
|
|
417
|
+
if time_range:
|
|
418
|
+
parts.append(time_range.to_cache_key_part())
|
|
419
|
+
else:
|
|
420
|
+
parts.append("all_time")
|
|
421
|
+
|
|
422
|
+
for key, value in sorted(kwargs.items()):
|
|
423
|
+
parts.append(f"{key}={value}")
|
|
424
|
+
|
|
425
|
+
key_string = ":".join(parts)
|
|
426
|
+
# Use hash for long keys
|
|
427
|
+
if len(key_string) > 100:
|
|
428
|
+
key_hash = hashlib.sha256(key_string.encode()).hexdigest()[:16]
|
|
429
|
+
return f"{prefix}:{key_hash}"
|
|
430
|
+
return key_string
|
|
431
|
+
|
|
432
|
+
# =========================================================================
|
|
433
|
+
# Escalation Stats
|
|
434
|
+
# =========================================================================
|
|
435
|
+
|
|
436
|
+
async def get_escalation_stats(
|
|
437
|
+
self,
|
|
438
|
+
time_range: TimeRange | None = None,
|
|
439
|
+
use_cache: bool = True,
|
|
440
|
+
cache_ttl_seconds: int | None = None,
|
|
441
|
+
) -> EscalationStatsResult:
|
|
442
|
+
"""Get escalation statistics using efficient aggregate queries.
|
|
443
|
+
|
|
444
|
+
Args:
|
|
445
|
+
time_range: Optional time range filter.
|
|
446
|
+
use_cache: Whether to use caching.
|
|
447
|
+
cache_ttl_seconds: Cache TTL override.
|
|
448
|
+
|
|
449
|
+
Returns:
|
|
450
|
+
EscalationStatsResult with aggregated statistics.
|
|
451
|
+
"""
|
|
452
|
+
cache_key = self._generate_cache_key("escalation_stats", time_range)
|
|
453
|
+
ttl = cache_ttl_seconds if cache_ttl_seconds is not None else self._cache_ttl
|
|
454
|
+
|
|
455
|
+
# Try cache first
|
|
456
|
+
if use_cache:
|
|
457
|
+
cached = await self._cache.get(cache_key)
|
|
458
|
+
if cached is not None:
|
|
459
|
+
cached.cached = True
|
|
460
|
+
return cached
|
|
461
|
+
|
|
462
|
+
# Build base query with time range filter
|
|
463
|
+
base_query = select(EscalationIncidentModel)
|
|
464
|
+
if time_range:
|
|
465
|
+
if time_range.start_time:
|
|
466
|
+
base_query = base_query.where(
|
|
467
|
+
EscalationIncidentModel.created_at >= time_range.start_time
|
|
468
|
+
)
|
|
469
|
+
if time_range.end_time:
|
|
470
|
+
base_query = base_query.where(
|
|
471
|
+
EscalationIncidentModel.created_at < time_range.end_time
|
|
472
|
+
)
|
|
473
|
+
|
|
474
|
+
# Query 1: Total count
|
|
475
|
+
count_query = select(func.count(EscalationIncidentModel.id))
|
|
476
|
+
if time_range:
|
|
477
|
+
if time_range.start_time:
|
|
478
|
+
count_query = count_query.where(
|
|
479
|
+
EscalationIncidentModel.created_at >= time_range.start_time
|
|
480
|
+
)
|
|
481
|
+
if time_range.end_time:
|
|
482
|
+
count_query = count_query.where(
|
|
483
|
+
EscalationIncidentModel.created_at < time_range.end_time
|
|
484
|
+
)
|
|
485
|
+
result = await self._session.execute(count_query)
|
|
486
|
+
total_incidents = result.scalar() or 0
|
|
487
|
+
|
|
488
|
+
# Query 2: Count by state (GROUP BY)
|
|
489
|
+
state_count_query = select(
|
|
490
|
+
EscalationIncidentModel.state,
|
|
491
|
+
func.count(EscalationIncidentModel.id).label("count"),
|
|
492
|
+
).group_by(EscalationIncidentModel.state)
|
|
493
|
+
if time_range:
|
|
494
|
+
if time_range.start_time:
|
|
495
|
+
state_count_query = state_count_query.where(
|
|
496
|
+
EscalationIncidentModel.created_at >= time_range.start_time
|
|
497
|
+
)
|
|
498
|
+
if time_range.end_time:
|
|
499
|
+
state_count_query = state_count_query.where(
|
|
500
|
+
EscalationIncidentModel.created_at < time_range.end_time
|
|
501
|
+
)
|
|
502
|
+
result = await self._session.execute(state_count_query)
|
|
503
|
+
by_state: dict[str, int] = {}
|
|
504
|
+
active_count = 0
|
|
505
|
+
for row in result:
|
|
506
|
+
state = row.state
|
|
507
|
+
count = row.count
|
|
508
|
+
by_state[state] = count
|
|
509
|
+
if state != EscalationStateEnum.RESOLVED.value:
|
|
510
|
+
active_count += count
|
|
511
|
+
|
|
512
|
+
# Query 3: Average resolution time for resolved incidents
|
|
513
|
+
avg_resolution_query = select(
|
|
514
|
+
func.avg(
|
|
515
|
+
func.julianday(EscalationIncidentModel.resolved_at)
|
|
516
|
+
- func.julianday(EscalationIncidentModel.created_at)
|
|
517
|
+
).label("avg_days")
|
|
518
|
+
).where(
|
|
519
|
+
EscalationIncidentModel.state == EscalationStateEnum.RESOLVED.value,
|
|
520
|
+
EscalationIncidentModel.resolved_at.isnot(None),
|
|
521
|
+
)
|
|
522
|
+
if time_range:
|
|
523
|
+
if time_range.start_time:
|
|
524
|
+
avg_resolution_query = avg_resolution_query.where(
|
|
525
|
+
EscalationIncidentModel.created_at >= time_range.start_time
|
|
526
|
+
)
|
|
527
|
+
if time_range.end_time:
|
|
528
|
+
avg_resolution_query = avg_resolution_query.where(
|
|
529
|
+
EscalationIncidentModel.created_at < time_range.end_time
|
|
530
|
+
)
|
|
531
|
+
result = await self._session.execute(avg_resolution_query)
|
|
532
|
+
avg_days = result.scalar()
|
|
533
|
+
# Convert days to seconds
|
|
534
|
+
avg_resolution_seconds = avg_days * 86400 if avg_days else None
|
|
535
|
+
|
|
536
|
+
# Query 4: Total policies count
|
|
537
|
+
policies_count_query = select(func.count(EscalationPolicyModel.id))
|
|
538
|
+
result = await self._session.execute(policies_count_query)
|
|
539
|
+
total_policies = result.scalar() or 0
|
|
540
|
+
|
|
541
|
+
# Build result
|
|
542
|
+
stats_result = EscalationStatsResult(
|
|
543
|
+
total_incidents=total_incidents,
|
|
544
|
+
by_state=by_state,
|
|
545
|
+
active_count=active_count,
|
|
546
|
+
total_policies=total_policies,
|
|
547
|
+
avg_resolution_time_seconds=avg_resolution_seconds,
|
|
548
|
+
time_range=time_range,
|
|
549
|
+
cached=False,
|
|
550
|
+
cached_at=None,
|
|
551
|
+
)
|
|
552
|
+
|
|
553
|
+
# Cache result
|
|
554
|
+
if use_cache:
|
|
555
|
+
stats_result.cached_at = datetime.utcnow()
|
|
556
|
+
await self._cache.set(cache_key, stats_result, ttl)
|
|
557
|
+
|
|
558
|
+
return stats_result
|
|
559
|
+
|
|
560
|
+
# =========================================================================
|
|
561
|
+
# Deduplication Stats
|
|
562
|
+
# =========================================================================
|
|
563
|
+
|
|
564
|
+
async def get_deduplication_stats(
|
|
565
|
+
self,
|
|
566
|
+
time_range: TimeRange | None = None,
|
|
567
|
+
use_cache: bool = True,
|
|
568
|
+
cache_ttl_seconds: int | None = None,
|
|
569
|
+
) -> DeduplicationStatsResult:
|
|
570
|
+
"""Get deduplication configuration statistics.
|
|
571
|
+
|
|
572
|
+
Args:
|
|
573
|
+
time_range: Optional time range filter.
|
|
574
|
+
use_cache: Whether to use caching.
|
|
575
|
+
cache_ttl_seconds: Cache TTL override.
|
|
576
|
+
|
|
577
|
+
Returns:
|
|
578
|
+
DeduplicationStatsResult with aggregated statistics.
|
|
579
|
+
"""
|
|
580
|
+
cache_key = self._generate_cache_key("deduplication_stats", time_range)
|
|
581
|
+
ttl = cache_ttl_seconds if cache_ttl_seconds is not None else self._cache_ttl
|
|
582
|
+
|
|
583
|
+
# Try cache first
|
|
584
|
+
if use_cache:
|
|
585
|
+
cached = await self._cache.get(cache_key)
|
|
586
|
+
if cached is not None:
|
|
587
|
+
cached.cached = True
|
|
588
|
+
return cached
|
|
589
|
+
|
|
590
|
+
# Query 1: Total and active count
|
|
591
|
+
count_query = select(
|
|
592
|
+
func.count(DeduplicationConfig.id).label("total"),
|
|
593
|
+
func.sum(
|
|
594
|
+
func.cast(DeduplicationConfig.is_active == True, func.Integer)
|
|
595
|
+
).label("active"),
|
|
596
|
+
)
|
|
597
|
+
if time_range:
|
|
598
|
+
if time_range.start_time:
|
|
599
|
+
count_query = count_query.where(
|
|
600
|
+
DeduplicationConfig.created_at >= time_range.start_time
|
|
601
|
+
)
|
|
602
|
+
if time_range.end_time:
|
|
603
|
+
count_query = count_query.where(
|
|
604
|
+
DeduplicationConfig.created_at < time_range.end_time
|
|
605
|
+
)
|
|
606
|
+
result = await self._session.execute(count_query)
|
|
607
|
+
row = result.first()
|
|
608
|
+
total_configs = row.total if row else 0
|
|
609
|
+
active_configs = int(row.active or 0) if row else 0
|
|
610
|
+
|
|
611
|
+
# Query 2: Count by strategy (GROUP BY)
|
|
612
|
+
strategy_query = select(
|
|
613
|
+
DeduplicationConfig.strategy,
|
|
614
|
+
func.count(DeduplicationConfig.id).label("count"),
|
|
615
|
+
).group_by(DeduplicationConfig.strategy)
|
|
616
|
+
if time_range:
|
|
617
|
+
if time_range.start_time:
|
|
618
|
+
strategy_query = strategy_query.where(
|
|
619
|
+
DeduplicationConfig.created_at >= time_range.start_time
|
|
620
|
+
)
|
|
621
|
+
if time_range.end_time:
|
|
622
|
+
strategy_query = strategy_query.where(
|
|
623
|
+
DeduplicationConfig.created_at < time_range.end_time
|
|
624
|
+
)
|
|
625
|
+
result = await self._session.execute(strategy_query)
|
|
626
|
+
by_strategy = {row.strategy: row.count for row in result}
|
|
627
|
+
|
|
628
|
+
# Query 3: Count by policy (GROUP BY)
|
|
629
|
+
policy_query = select(
|
|
630
|
+
DeduplicationConfig.policy,
|
|
631
|
+
func.count(DeduplicationConfig.id).label("count"),
|
|
632
|
+
).group_by(DeduplicationConfig.policy)
|
|
633
|
+
if time_range:
|
|
634
|
+
if time_range.start_time:
|
|
635
|
+
policy_query = policy_query.where(
|
|
636
|
+
DeduplicationConfig.created_at >= time_range.start_time
|
|
637
|
+
)
|
|
638
|
+
if time_range.end_time:
|
|
639
|
+
policy_query = policy_query.where(
|
|
640
|
+
DeduplicationConfig.created_at < time_range.end_time
|
|
641
|
+
)
|
|
642
|
+
result = await self._session.execute(policy_query)
|
|
643
|
+
by_policy = {row.policy: row.count for row in result}
|
|
644
|
+
|
|
645
|
+
# Query 4: Average window seconds
|
|
646
|
+
avg_window_query = select(
|
|
647
|
+
func.avg(DeduplicationConfig.window_seconds).label("avg_window")
|
|
648
|
+
)
|
|
649
|
+
if time_range:
|
|
650
|
+
if time_range.start_time:
|
|
651
|
+
avg_window_query = avg_window_query.where(
|
|
652
|
+
DeduplicationConfig.created_at >= time_range.start_time
|
|
653
|
+
)
|
|
654
|
+
if time_range.end_time:
|
|
655
|
+
avg_window_query = avg_window_query.where(
|
|
656
|
+
DeduplicationConfig.created_at < time_range.end_time
|
|
657
|
+
)
|
|
658
|
+
result = await self._session.execute(avg_window_query)
|
|
659
|
+
avg_window = result.scalar() or 0.0
|
|
660
|
+
|
|
661
|
+
# Build result
|
|
662
|
+
stats_result = DeduplicationStatsResult(
|
|
663
|
+
total_configs=total_configs,
|
|
664
|
+
active_configs=active_configs,
|
|
665
|
+
by_strategy=by_strategy,
|
|
666
|
+
by_policy=by_policy,
|
|
667
|
+
avg_window_seconds=float(avg_window),
|
|
668
|
+
time_range=time_range,
|
|
669
|
+
cached=False,
|
|
670
|
+
cached_at=None,
|
|
671
|
+
)
|
|
672
|
+
|
|
673
|
+
# Cache result
|
|
674
|
+
if use_cache:
|
|
675
|
+
stats_result.cached_at = datetime.utcnow()
|
|
676
|
+
await self._cache.set(cache_key, stats_result, ttl)
|
|
677
|
+
|
|
678
|
+
return stats_result
|
|
679
|
+
|
|
680
|
+
# =========================================================================
|
|
681
|
+
# Throttling Stats
|
|
682
|
+
# =========================================================================
|
|
683
|
+
|
|
684
|
+
async def get_throttling_stats(
|
|
685
|
+
self,
|
|
686
|
+
time_range: TimeRange | None = None,
|
|
687
|
+
use_cache: bool = True,
|
|
688
|
+
cache_ttl_seconds: int | None = None,
|
|
689
|
+
) -> ThrottlingStatsResult:
|
|
690
|
+
"""Get throttling configuration statistics.
|
|
691
|
+
|
|
692
|
+
Args:
|
|
693
|
+
time_range: Optional time range filter.
|
|
694
|
+
use_cache: Whether to use caching.
|
|
695
|
+
cache_ttl_seconds: Cache TTL override.
|
|
696
|
+
|
|
697
|
+
Returns:
|
|
698
|
+
ThrottlingStatsResult with aggregated statistics.
|
|
699
|
+
"""
|
|
700
|
+
cache_key = self._generate_cache_key("throttling_stats", time_range)
|
|
701
|
+
ttl = cache_ttl_seconds if cache_ttl_seconds is not None else self._cache_ttl
|
|
702
|
+
|
|
703
|
+
# Try cache first
|
|
704
|
+
if use_cache:
|
|
705
|
+
cached = await self._cache.get(cache_key)
|
|
706
|
+
if cached is not None:
|
|
707
|
+
cached.cached = True
|
|
708
|
+
return cached
|
|
709
|
+
|
|
710
|
+
# Query 1: Total, active, and limit counts
|
|
711
|
+
count_query = select(
|
|
712
|
+
func.count(ThrottlingConfig.id).label("total"),
|
|
713
|
+
func.sum(
|
|
714
|
+
func.cast(ThrottlingConfig.is_active == True, func.Integer)
|
|
715
|
+
).label("active"),
|
|
716
|
+
func.sum(
|
|
717
|
+
func.cast(ThrottlingConfig.per_minute.isnot(None), func.Integer)
|
|
718
|
+
).label("with_per_minute"),
|
|
719
|
+
func.sum(
|
|
720
|
+
func.cast(ThrottlingConfig.per_hour.isnot(None), func.Integer)
|
|
721
|
+
).label("with_per_hour"),
|
|
722
|
+
func.sum(
|
|
723
|
+
func.cast(ThrottlingConfig.per_day.isnot(None), func.Integer)
|
|
724
|
+
).label("with_per_day"),
|
|
725
|
+
func.avg(ThrottlingConfig.burst_allowance).label("avg_burst"),
|
|
726
|
+
)
|
|
727
|
+
if time_range:
|
|
728
|
+
if time_range.start_time:
|
|
729
|
+
count_query = count_query.where(
|
|
730
|
+
ThrottlingConfig.created_at >= time_range.start_time
|
|
731
|
+
)
|
|
732
|
+
if time_range.end_time:
|
|
733
|
+
count_query = count_query.where(
|
|
734
|
+
ThrottlingConfig.created_at < time_range.end_time
|
|
735
|
+
)
|
|
736
|
+
|
|
737
|
+
result = await self._session.execute(count_query)
|
|
738
|
+
row = result.first()
|
|
739
|
+
|
|
740
|
+
total_configs = row.total if row else 0
|
|
741
|
+
active_configs = int(row.active or 0) if row else 0
|
|
742
|
+
configs_with_per_minute = int(row.with_per_minute or 0) if row else 0
|
|
743
|
+
configs_with_per_hour = int(row.with_per_hour or 0) if row else 0
|
|
744
|
+
configs_with_per_day = int(row.with_per_day or 0) if row else 0
|
|
745
|
+
avg_burst = float(row.avg_burst or 0.0) if row else 0.0
|
|
746
|
+
|
|
747
|
+
# Build result
|
|
748
|
+
stats_result = ThrottlingStatsResult(
|
|
749
|
+
total_configs=total_configs,
|
|
750
|
+
active_configs=active_configs,
|
|
751
|
+
configs_with_per_minute=configs_with_per_minute,
|
|
752
|
+
configs_with_per_hour=configs_with_per_hour,
|
|
753
|
+
configs_with_per_day=configs_with_per_day,
|
|
754
|
+
avg_burst_allowance=avg_burst,
|
|
755
|
+
time_range=time_range,
|
|
756
|
+
cached=False,
|
|
757
|
+
cached_at=None,
|
|
758
|
+
)
|
|
759
|
+
|
|
760
|
+
# Cache result
|
|
761
|
+
if use_cache:
|
|
762
|
+
stats_result.cached_at = datetime.utcnow()
|
|
763
|
+
await self._cache.set(cache_key, stats_result, ttl)
|
|
764
|
+
|
|
765
|
+
return stats_result
|
|
766
|
+
|
|
767
|
+
# =========================================================================
|
|
768
|
+
# Batch Aggregation
|
|
769
|
+
# =========================================================================
|
|
770
|
+
|
|
771
|
+
async def get_all_stats(
|
|
772
|
+
self,
|
|
773
|
+
time_range: TimeRange | None = None,
|
|
774
|
+
use_cache: bool = True,
|
|
775
|
+
) -> dict[str, Any]:
|
|
776
|
+
"""Get all notification stats in a single call.
|
|
777
|
+
|
|
778
|
+
Executes stats queries in parallel for better performance.
|
|
779
|
+
|
|
780
|
+
Args:
|
|
781
|
+
time_range: Optional time range filter.
|
|
782
|
+
use_cache: Whether to use caching.
|
|
783
|
+
|
|
784
|
+
Returns:
|
|
785
|
+
Dictionary with all stats results.
|
|
786
|
+
"""
|
|
787
|
+
# Run all stats queries in parallel
|
|
788
|
+
escalation_task = self.get_escalation_stats(time_range, use_cache)
|
|
789
|
+
deduplication_task = self.get_deduplication_stats(time_range, use_cache)
|
|
790
|
+
throttling_task = self.get_throttling_stats(time_range, use_cache)
|
|
791
|
+
|
|
792
|
+
escalation_stats, deduplication_stats, throttling_stats = await asyncio.gather(
|
|
793
|
+
escalation_task,
|
|
794
|
+
deduplication_task,
|
|
795
|
+
throttling_task,
|
|
796
|
+
)
|
|
797
|
+
|
|
798
|
+
return {
|
|
799
|
+
"escalation": escalation_stats,
|
|
800
|
+
"deduplication": deduplication_stats,
|
|
801
|
+
"throttling": throttling_stats,
|
|
802
|
+
"time_range": {
|
|
803
|
+
"start_time": time_range.start_time.isoformat()
|
|
804
|
+
if time_range and time_range.start_time
|
|
805
|
+
else None,
|
|
806
|
+
"end_time": time_range.end_time.isoformat()
|
|
807
|
+
if time_range and time_range.end_time
|
|
808
|
+
else None,
|
|
809
|
+
},
|
|
810
|
+
}
|
|
811
|
+
|
|
812
|
+
# =========================================================================
|
|
813
|
+
# Cache Management
|
|
814
|
+
# =========================================================================
|
|
815
|
+
|
|
816
|
+
async def invalidate_escalation_cache(self) -> int:
|
|
817
|
+
"""Invalidate all escalation stats cache entries.
|
|
818
|
+
|
|
819
|
+
Returns:
|
|
820
|
+
Number of entries invalidated.
|
|
821
|
+
"""
|
|
822
|
+
return await self._cache.invalidate_pattern("escalation_stats")
|
|
823
|
+
|
|
824
|
+
async def invalidate_deduplication_cache(self) -> int:
|
|
825
|
+
"""Invalidate all deduplication stats cache entries.
|
|
826
|
+
|
|
827
|
+
Returns:
|
|
828
|
+
Number of entries invalidated.
|
|
829
|
+
"""
|
|
830
|
+
return await self._cache.invalidate_pattern("deduplication_stats")
|
|
831
|
+
|
|
832
|
+
async def invalidate_throttling_cache(self) -> int:
|
|
833
|
+
"""Invalidate all throttling stats cache entries.
|
|
834
|
+
|
|
835
|
+
Returns:
|
|
836
|
+
Number of entries invalidated.
|
|
837
|
+
"""
|
|
838
|
+
return await self._cache.invalidate_pattern("throttling_stats")
|
|
839
|
+
|
|
840
|
+
async def invalidate_all_cache(self) -> None:
|
|
841
|
+
"""Invalidate all stats cache entries."""
|
|
842
|
+
await self._cache.clear()
|
|
843
|
+
|
|
844
|
+
async def get_cache_stats(self) -> dict[str, Any]:
|
|
845
|
+
"""Get stats cache statistics.
|
|
846
|
+
|
|
847
|
+
Returns:
|
|
848
|
+
Dictionary with cache statistics.
|
|
849
|
+
"""
|
|
850
|
+
return await self._cache.get_stats()
|