truthound-dashboard 1.3.1__py3-none-any.whl → 1.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- truthound_dashboard/api/alerts.py +258 -0
- truthound_dashboard/api/anomaly.py +1302 -0
- truthound_dashboard/api/cross_alerts.py +352 -0
- truthound_dashboard/api/deps.py +143 -0
- truthound_dashboard/api/drift_monitor.py +540 -0
- truthound_dashboard/api/lineage.py +1151 -0
- truthound_dashboard/api/maintenance.py +363 -0
- truthound_dashboard/api/middleware.py +373 -1
- truthound_dashboard/api/model_monitoring.py +805 -0
- truthound_dashboard/api/notifications_advanced.py +2452 -0
- truthound_dashboard/api/plugins.py +2096 -0
- truthound_dashboard/api/profile.py +211 -14
- truthound_dashboard/api/reports.py +853 -0
- truthound_dashboard/api/router.py +147 -0
- truthound_dashboard/api/rule_suggestions.py +310 -0
- truthound_dashboard/api/schema_evolution.py +231 -0
- truthound_dashboard/api/sources.py +47 -3
- truthound_dashboard/api/triggers.py +190 -0
- truthound_dashboard/api/validations.py +13 -0
- truthound_dashboard/api/validators.py +333 -4
- truthound_dashboard/api/versioning.py +309 -0
- truthound_dashboard/api/websocket.py +301 -0
- truthound_dashboard/core/__init__.py +27 -0
- truthound_dashboard/core/anomaly.py +1395 -0
- truthound_dashboard/core/anomaly_explainer.py +633 -0
- truthound_dashboard/core/cache.py +206 -0
- truthound_dashboard/core/cached_services.py +422 -0
- truthound_dashboard/core/charts.py +352 -0
- truthound_dashboard/core/connections.py +1069 -42
- truthound_dashboard/core/cross_alerts.py +837 -0
- truthound_dashboard/core/drift_monitor.py +1477 -0
- truthound_dashboard/core/drift_sampling.py +669 -0
- truthound_dashboard/core/i18n/__init__.py +42 -0
- truthound_dashboard/core/i18n/detector.py +173 -0
- truthound_dashboard/core/i18n/messages.py +564 -0
- truthound_dashboard/core/lineage.py +971 -0
- truthound_dashboard/core/maintenance.py +443 -5
- truthound_dashboard/core/model_monitoring.py +1043 -0
- truthound_dashboard/core/notifications/channels.py +1020 -1
- truthound_dashboard/core/notifications/deduplication/__init__.py +143 -0
- truthound_dashboard/core/notifications/deduplication/policies.py +274 -0
- truthound_dashboard/core/notifications/deduplication/service.py +400 -0
- truthound_dashboard/core/notifications/deduplication/stores.py +2365 -0
- truthound_dashboard/core/notifications/deduplication/strategies.py +422 -0
- truthound_dashboard/core/notifications/dispatcher.py +43 -0
- truthound_dashboard/core/notifications/escalation/__init__.py +149 -0
- truthound_dashboard/core/notifications/escalation/backends.py +1384 -0
- truthound_dashboard/core/notifications/escalation/engine.py +429 -0
- truthound_dashboard/core/notifications/escalation/models.py +336 -0
- truthound_dashboard/core/notifications/escalation/scheduler.py +1187 -0
- truthound_dashboard/core/notifications/escalation/state_machine.py +330 -0
- truthound_dashboard/core/notifications/escalation/stores.py +2896 -0
- truthound_dashboard/core/notifications/events.py +49 -0
- truthound_dashboard/core/notifications/metrics/__init__.py +115 -0
- truthound_dashboard/core/notifications/metrics/base.py +528 -0
- truthound_dashboard/core/notifications/metrics/collectors.py +583 -0
- truthound_dashboard/core/notifications/routing/__init__.py +169 -0
- truthound_dashboard/core/notifications/routing/combinators.py +184 -0
- truthound_dashboard/core/notifications/routing/config.py +375 -0
- truthound_dashboard/core/notifications/routing/config_parser.py +867 -0
- truthound_dashboard/core/notifications/routing/engine.py +382 -0
- truthound_dashboard/core/notifications/routing/expression_engine.py +1269 -0
- truthound_dashboard/core/notifications/routing/jinja2_engine.py +774 -0
- truthound_dashboard/core/notifications/routing/rules.py +625 -0
- truthound_dashboard/core/notifications/routing/validator.py +678 -0
- truthound_dashboard/core/notifications/service.py +2 -0
- truthound_dashboard/core/notifications/stats_aggregator.py +850 -0
- truthound_dashboard/core/notifications/throttling/__init__.py +83 -0
- truthound_dashboard/core/notifications/throttling/builder.py +311 -0
- truthound_dashboard/core/notifications/throttling/stores.py +1859 -0
- truthound_dashboard/core/notifications/throttling/throttlers.py +633 -0
- truthound_dashboard/core/openlineage.py +1028 -0
- truthound_dashboard/core/plugins/__init__.py +39 -0
- truthound_dashboard/core/plugins/docs/__init__.py +39 -0
- truthound_dashboard/core/plugins/docs/extractor.py +703 -0
- truthound_dashboard/core/plugins/docs/renderers.py +804 -0
- truthound_dashboard/core/plugins/hooks/__init__.py +63 -0
- truthound_dashboard/core/plugins/hooks/decorators.py +367 -0
- truthound_dashboard/core/plugins/hooks/manager.py +403 -0
- truthound_dashboard/core/plugins/hooks/protocols.py +265 -0
- truthound_dashboard/core/plugins/lifecycle/__init__.py +41 -0
- truthound_dashboard/core/plugins/lifecycle/hot_reload.py +584 -0
- truthound_dashboard/core/plugins/lifecycle/machine.py +419 -0
- truthound_dashboard/core/plugins/lifecycle/states.py +266 -0
- truthound_dashboard/core/plugins/loader.py +504 -0
- truthound_dashboard/core/plugins/registry.py +810 -0
- truthound_dashboard/core/plugins/reporter_executor.py +588 -0
- truthound_dashboard/core/plugins/sandbox/__init__.py +59 -0
- truthound_dashboard/core/plugins/sandbox/code_validator.py +243 -0
- truthound_dashboard/core/plugins/sandbox/engines.py +770 -0
- truthound_dashboard/core/plugins/sandbox/protocols.py +194 -0
- truthound_dashboard/core/plugins/sandbox.py +617 -0
- truthound_dashboard/core/plugins/security/__init__.py +68 -0
- truthound_dashboard/core/plugins/security/analyzer.py +535 -0
- truthound_dashboard/core/plugins/security/policies.py +311 -0
- truthound_dashboard/core/plugins/security/protocols.py +296 -0
- truthound_dashboard/core/plugins/security/signing.py +842 -0
- truthound_dashboard/core/plugins/security.py +446 -0
- truthound_dashboard/core/plugins/validator_executor.py +401 -0
- truthound_dashboard/core/plugins/versioning/__init__.py +51 -0
- truthound_dashboard/core/plugins/versioning/constraints.py +377 -0
- truthound_dashboard/core/plugins/versioning/dependencies.py +541 -0
- truthound_dashboard/core/plugins/versioning/semver.py +266 -0
- truthound_dashboard/core/profile_comparison.py +601 -0
- truthound_dashboard/core/report_history.py +570 -0
- truthound_dashboard/core/reporters/__init__.py +57 -0
- truthound_dashboard/core/reporters/base.py +296 -0
- truthound_dashboard/core/reporters/csv_reporter.py +155 -0
- truthound_dashboard/core/reporters/html_reporter.py +598 -0
- truthound_dashboard/core/reporters/i18n/__init__.py +65 -0
- truthound_dashboard/core/reporters/i18n/base.py +494 -0
- truthound_dashboard/core/reporters/i18n/catalogs.py +930 -0
- truthound_dashboard/core/reporters/json_reporter.py +160 -0
- truthound_dashboard/core/reporters/junit_reporter.py +233 -0
- truthound_dashboard/core/reporters/markdown_reporter.py +207 -0
- truthound_dashboard/core/reporters/pdf_reporter.py +209 -0
- truthound_dashboard/core/reporters/registry.py +272 -0
- truthound_dashboard/core/rule_generator.py +2088 -0
- truthound_dashboard/core/scheduler.py +822 -12
- truthound_dashboard/core/schema_evolution.py +858 -0
- truthound_dashboard/core/services.py +152 -9
- truthound_dashboard/core/statistics.py +718 -0
- truthound_dashboard/core/streaming_anomaly.py +883 -0
- truthound_dashboard/core/triggers/__init__.py +45 -0
- truthound_dashboard/core/triggers/base.py +226 -0
- truthound_dashboard/core/triggers/evaluators.py +609 -0
- truthound_dashboard/core/triggers/factory.py +363 -0
- truthound_dashboard/core/unified_alerts.py +870 -0
- truthound_dashboard/core/validation_limits.py +509 -0
- truthound_dashboard/core/versioning.py +709 -0
- truthound_dashboard/core/websocket/__init__.py +59 -0
- truthound_dashboard/core/websocket/manager.py +512 -0
- truthound_dashboard/core/websocket/messages.py +130 -0
- truthound_dashboard/db/__init__.py +30 -0
- truthound_dashboard/db/models.py +3375 -3
- truthound_dashboard/main.py +22 -0
- truthound_dashboard/schemas/__init__.py +396 -1
- truthound_dashboard/schemas/anomaly.py +1258 -0
- truthound_dashboard/schemas/base.py +4 -0
- truthound_dashboard/schemas/cross_alerts.py +334 -0
- truthound_dashboard/schemas/drift_monitor.py +890 -0
- truthound_dashboard/schemas/lineage.py +428 -0
- truthound_dashboard/schemas/maintenance.py +154 -0
- truthound_dashboard/schemas/model_monitoring.py +374 -0
- truthound_dashboard/schemas/notifications_advanced.py +1363 -0
- truthound_dashboard/schemas/openlineage.py +704 -0
- truthound_dashboard/schemas/plugins.py +1293 -0
- truthound_dashboard/schemas/profile.py +420 -34
- truthound_dashboard/schemas/profile_comparison.py +242 -0
- truthound_dashboard/schemas/reports.py +285 -0
- truthound_dashboard/schemas/rule_suggestion.py +434 -0
- truthound_dashboard/schemas/schema_evolution.py +164 -0
- truthound_dashboard/schemas/source.py +117 -2
- truthound_dashboard/schemas/triggers.py +511 -0
- truthound_dashboard/schemas/unified_alerts.py +223 -0
- truthound_dashboard/schemas/validation.py +25 -1
- truthound_dashboard/schemas/validators/__init__.py +11 -0
- truthound_dashboard/schemas/validators/base.py +151 -0
- truthound_dashboard/schemas/versioning.py +152 -0
- truthound_dashboard/static/index.html +2 -2
- {truthound_dashboard-1.3.1.dist-info → truthound_dashboard-1.4.1.dist-info}/METADATA +147 -23
- truthound_dashboard-1.4.1.dist-info/RECORD +239 -0
- truthound_dashboard/static/assets/index-BZG20KuF.js +0 -586
- truthound_dashboard/static/assets/index-D_HyZ3pb.css +0 -1
- truthound_dashboard/static/assets/unmerged_dictionaries-CtpqQBm0.js +0 -1
- truthound_dashboard-1.3.1.dist-info/RECORD +0 -110
- {truthound_dashboard-1.3.1.dist-info → truthound_dashboard-1.4.1.dist-info}/WHEEL +0 -0
- {truthound_dashboard-1.3.1.dist-info → truthound_dashboard-1.4.1.dist-info}/entry_points.txt +0 -0
- {truthound_dashboard-1.3.1.dist-info → truthound_dashboard-1.4.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,2452 @@
|
|
|
1
|
+
"""Advanced notification management API endpoints.
|
|
2
|
+
|
|
3
|
+
This module provides REST API endpoints for managing advanced notification
|
|
4
|
+
features: routing rules, deduplication, throttling, and escalation.
|
|
5
|
+
|
|
6
|
+
Endpoints:
|
|
7
|
+
Routing Rules:
|
|
8
|
+
GET /notifications/routing/rules - List routing rules
|
|
9
|
+
POST /notifications/routing/rules - Create routing rule
|
|
10
|
+
GET /notifications/routing/rules/{id} - Get routing rule
|
|
11
|
+
PUT /notifications/routing/rules/{id} - Update routing rule
|
|
12
|
+
DELETE /notifications/routing/rules/{id} - Delete routing rule
|
|
13
|
+
GET /notifications/routing/rules/types - Get available rule types (incl. combinators)
|
|
14
|
+
POST /notifications/routing/rules/validate - Validate rule configuration
|
|
15
|
+
|
|
16
|
+
Deduplication:
|
|
17
|
+
GET /notifications/deduplication/configs - List configs
|
|
18
|
+
POST /notifications/deduplication/configs - Create config
|
|
19
|
+
GET /notifications/deduplication/configs/{id} - Get config
|
|
20
|
+
PUT /notifications/deduplication/configs/{id} - Update config
|
|
21
|
+
DELETE /notifications/deduplication/configs/{id} - Delete config
|
|
22
|
+
GET /notifications/deduplication/stats - Get statistics
|
|
23
|
+
|
|
24
|
+
Throttling:
|
|
25
|
+
GET /notifications/throttling/configs - List configs
|
|
26
|
+
POST /notifications/throttling/configs - Create config
|
|
27
|
+
GET /notifications/throttling/configs/{id} - Get config
|
|
28
|
+
PUT /notifications/throttling/configs/{id} - Update config
|
|
29
|
+
DELETE /notifications/throttling/configs/{id} - Delete config
|
|
30
|
+
GET /notifications/throttling/stats - Get statistics
|
|
31
|
+
|
|
32
|
+
Escalation Policies:
|
|
33
|
+
GET /notifications/escalation/policies - List policies
|
|
34
|
+
POST /notifications/escalation/policies - Create policy
|
|
35
|
+
GET /notifications/escalation/policies/{id} - Get policy
|
|
36
|
+
PUT /notifications/escalation/policies/{id} - Update policy
|
|
37
|
+
DELETE /notifications/escalation/policies/{id} - Delete policy
|
|
38
|
+
|
|
39
|
+
Escalation Incidents:
|
|
40
|
+
GET /notifications/escalation/incidents - List incidents
|
|
41
|
+
GET /notifications/escalation/incidents/active - List active only
|
|
42
|
+
GET /notifications/escalation/incidents/{id} - Get incident
|
|
43
|
+
POST /notifications/escalation/incidents/{id}/acknowledge - Acknowledge
|
|
44
|
+
POST /notifications/escalation/incidents/{id}/resolve - Resolve
|
|
45
|
+
GET /notifications/escalation/stats - Get statistics
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
from __future__ import annotations
|
|
49
|
+
|
|
50
|
+
import asyncio
|
|
51
|
+
import logging
|
|
52
|
+
from datetime import datetime
|
|
53
|
+
from typing import Any
|
|
54
|
+
|
|
55
|
+
from fastapi import APIRouter, BackgroundTasks, Depends, HTTPException, Query
|
|
56
|
+
from sqlalchemy import select
|
|
57
|
+
from sqlalchemy.ext.asyncio import AsyncSession
|
|
58
|
+
|
|
59
|
+
from ..api.deps import get_session
|
|
60
|
+
from .websocket import notify_incident_state_changed
|
|
61
|
+
from ..core.notifications.metrics.collectors import (
|
|
62
|
+
DeduplicationMetrics,
|
|
63
|
+
EscalationMetrics,
|
|
64
|
+
ThrottlingMetrics,
|
|
65
|
+
)
|
|
66
|
+
from ..core.notifications.routing.rules import RuleRegistry
|
|
67
|
+
from ..core.notifications.routing.validator import (
|
|
68
|
+
RuleValidationConfig,
|
|
69
|
+
RuleValidator,
|
|
70
|
+
ValidationErrorType,
|
|
71
|
+
)
|
|
72
|
+
from ..core.notifications.stats_aggregator import (
|
|
73
|
+
StatsAggregator,
|
|
74
|
+
TimeRange,
|
|
75
|
+
get_stats_cache,
|
|
76
|
+
)
|
|
77
|
+
from ..db.models import (
|
|
78
|
+
DeduplicationConfig,
|
|
79
|
+
EscalationIncidentModel,
|
|
80
|
+
EscalationPolicyModel,
|
|
81
|
+
EscalationStateEnum,
|
|
82
|
+
RoutingRuleModel,
|
|
83
|
+
ThrottlingConfig,
|
|
84
|
+
)
|
|
85
|
+
from ..schemas.notifications_advanced import (
|
|
86
|
+
AcknowledgeRequest,
|
|
87
|
+
CacheInfo,
|
|
88
|
+
CombinatorType,
|
|
89
|
+
ConfigExportRequest,
|
|
90
|
+
ConfigImportConflict,
|
|
91
|
+
ConfigImportPreview,
|
|
92
|
+
ConfigImportRequest,
|
|
93
|
+
ConfigImportResult,
|
|
94
|
+
DeduplicationConfigCreate,
|
|
95
|
+
DeduplicationConfigListResponse,
|
|
96
|
+
DeduplicationConfigResponse,
|
|
97
|
+
DeduplicationConfigUpdate,
|
|
98
|
+
DeduplicationStats,
|
|
99
|
+
DeduplicationStatsEnhanced,
|
|
100
|
+
EscalationEventBase,
|
|
101
|
+
EscalationIncidentListResponse,
|
|
102
|
+
EscalationIncidentResponse,
|
|
103
|
+
EscalationPolicyCreate,
|
|
104
|
+
EscalationPolicyListResponse,
|
|
105
|
+
EscalationPolicyResponse,
|
|
106
|
+
EscalationPolicyUpdate,
|
|
107
|
+
EscalationSchedulerAction,
|
|
108
|
+
EscalationSchedulerConfigRequest,
|
|
109
|
+
EscalationSchedulerStatus,
|
|
110
|
+
EscalationStats,
|
|
111
|
+
EscalationStatsEnhanced,
|
|
112
|
+
ExpressionValidateRequest,
|
|
113
|
+
ExpressionValidateResponse,
|
|
114
|
+
NestedRuleConfig,
|
|
115
|
+
NotificationConfigBundle,
|
|
116
|
+
ResolveRequest,
|
|
117
|
+
RoutingRuleCreate,
|
|
118
|
+
RoutingRuleListResponse,
|
|
119
|
+
RoutingRuleResponse,
|
|
120
|
+
RoutingRuleUpdate,
|
|
121
|
+
RuleTypeInfo,
|
|
122
|
+
RuleTypesResponse,
|
|
123
|
+
RuleValidationResult,
|
|
124
|
+
StatsCacheStatus,
|
|
125
|
+
ThrottlingConfigCreate,
|
|
126
|
+
ThrottlingConfigListResponse,
|
|
127
|
+
ThrottlingConfigResponse,
|
|
128
|
+
ThrottlingConfigUpdate,
|
|
129
|
+
ThrottlingStats,
|
|
130
|
+
ThrottlingStatsEnhanced,
|
|
131
|
+
TimeRangeFilter,
|
|
132
|
+
TriggerCheckResponse,
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
logger = logging.getLogger(__name__)
|
|
136
|
+
|
|
137
|
+
router = APIRouter(prefix="/notifications")
|
|
138
|
+
|
|
139
|
+
# Global metrics instances
|
|
140
|
+
_dedup_metrics = DeduplicationMetrics()
|
|
141
|
+
_throttle_metrics = ThrottlingMetrics()
|
|
142
|
+
_escalation_metrics = EscalationMetrics()
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def get_dedup_metrics() -> DeduplicationMetrics:
|
|
146
|
+
"""Get the global deduplication metrics instance."""
|
|
147
|
+
return _dedup_metrics
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def get_throttle_metrics() -> ThrottlingMetrics:
|
|
151
|
+
"""Get the global throttling metrics instance."""
|
|
152
|
+
return _throttle_metrics
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def get_escalation_metrics() -> EscalationMetrics:
|
|
156
|
+
"""Get the global escalation metrics instance."""
|
|
157
|
+
return _escalation_metrics
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
# =============================================================================
|
|
161
|
+
# Helper Functions
|
|
162
|
+
# =============================================================================
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def _routing_rule_to_response(rule: RoutingRuleModel) -> RoutingRuleResponse:
|
|
166
|
+
"""Convert database model to response schema."""
|
|
167
|
+
return RoutingRuleResponse(
|
|
168
|
+
id=rule.id,
|
|
169
|
+
name=rule.name,
|
|
170
|
+
rule_config=rule.rule_config,
|
|
171
|
+
actions=rule.actions,
|
|
172
|
+
priority=rule.priority,
|
|
173
|
+
is_active=rule.is_active,
|
|
174
|
+
stop_on_match=rule.stop_on_match,
|
|
175
|
+
metadata=rule.routing_metadata or {},
|
|
176
|
+
created_at=rule.created_at,
|
|
177
|
+
updated_at=rule.updated_at,
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
def _dedup_config_to_response(config: DeduplicationConfig) -> DeduplicationConfigResponse:
|
|
182
|
+
"""Convert database model to response schema."""
|
|
183
|
+
return DeduplicationConfigResponse(
|
|
184
|
+
id=config.id,
|
|
185
|
+
name=config.name,
|
|
186
|
+
strategy=config.strategy,
|
|
187
|
+
policy=config.policy,
|
|
188
|
+
window_seconds=config.window_seconds,
|
|
189
|
+
is_active=config.is_active,
|
|
190
|
+
created_at=config.created_at,
|
|
191
|
+
updated_at=config.updated_at,
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
def _throttle_config_to_response(config: ThrottlingConfig) -> ThrottlingConfigResponse:
|
|
196
|
+
"""Convert database model to response schema."""
|
|
197
|
+
return ThrottlingConfigResponse(
|
|
198
|
+
id=config.id,
|
|
199
|
+
name=config.name,
|
|
200
|
+
per_minute=config.per_minute,
|
|
201
|
+
per_hour=config.per_hour,
|
|
202
|
+
per_day=config.per_day,
|
|
203
|
+
burst_allowance=config.burst_allowance,
|
|
204
|
+
channel_id=config.channel_id,
|
|
205
|
+
is_active=config.is_active,
|
|
206
|
+
created_at=config.created_at,
|
|
207
|
+
updated_at=config.updated_at,
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
def _escalation_policy_to_response(
|
|
212
|
+
policy: EscalationPolicyModel,
|
|
213
|
+
) -> EscalationPolicyResponse:
|
|
214
|
+
"""Convert database model to response schema."""
|
|
215
|
+
return EscalationPolicyResponse(
|
|
216
|
+
id=policy.id,
|
|
217
|
+
name=policy.name,
|
|
218
|
+
description=policy.description or "",
|
|
219
|
+
levels=policy.levels,
|
|
220
|
+
auto_resolve_on_success=policy.auto_resolve_on_success,
|
|
221
|
+
max_escalations=policy.max_escalations,
|
|
222
|
+
is_active=policy.is_active,
|
|
223
|
+
created_at=policy.created_at,
|
|
224
|
+
updated_at=policy.updated_at,
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
|
|
228
|
+
def _escalation_incident_to_response(
|
|
229
|
+
incident: EscalationIncidentModel,
|
|
230
|
+
) -> EscalationIncidentResponse:
|
|
231
|
+
"""Convert database model to response schema."""
|
|
232
|
+
events = []
|
|
233
|
+
for event in incident.events or []:
|
|
234
|
+
events.append(
|
|
235
|
+
EscalationEventBase(
|
|
236
|
+
from_state=event.get("from_state"),
|
|
237
|
+
to_state=event.get("to_state", ""),
|
|
238
|
+
actor=event.get("actor"),
|
|
239
|
+
message=event.get("message", ""),
|
|
240
|
+
timestamp=datetime.fromisoformat(event.get("timestamp", datetime.utcnow().isoformat())),
|
|
241
|
+
)
|
|
242
|
+
)
|
|
243
|
+
|
|
244
|
+
return EscalationIncidentResponse(
|
|
245
|
+
id=incident.id,
|
|
246
|
+
policy_id=incident.policy_id,
|
|
247
|
+
incident_ref=incident.incident_ref,
|
|
248
|
+
state=incident.state,
|
|
249
|
+
current_level=incident.current_level,
|
|
250
|
+
escalation_count=incident.escalation_count,
|
|
251
|
+
context=incident.context or {},
|
|
252
|
+
acknowledged_by=incident.acknowledged_by,
|
|
253
|
+
acknowledged_at=incident.acknowledged_at,
|
|
254
|
+
resolved_by=incident.resolved_by,
|
|
255
|
+
resolved_at=incident.resolved_at,
|
|
256
|
+
next_escalation_at=incident.next_escalation_at,
|
|
257
|
+
created_at=incident.created_at,
|
|
258
|
+
updated_at=incident.updated_at,
|
|
259
|
+
events=events,
|
|
260
|
+
)
|
|
261
|
+
|
|
262
|
+
|
|
263
|
+
async def _broadcast_incident_state_change(
|
|
264
|
+
incident_id: str,
|
|
265
|
+
incident_ref: str,
|
|
266
|
+
policy_id: str,
|
|
267
|
+
from_state: str,
|
|
268
|
+
to_state: str,
|
|
269
|
+
current_level: int,
|
|
270
|
+
actor: str | None = None,
|
|
271
|
+
message: str | None = None,
|
|
272
|
+
) -> None:
|
|
273
|
+
"""Broadcast incident state change via WebSocket.
|
|
274
|
+
|
|
275
|
+
This is called as a background task to avoid blocking the HTTP response.
|
|
276
|
+
|
|
277
|
+
Args:
|
|
278
|
+
incident_id: ID of the incident.
|
|
279
|
+
incident_ref: External reference.
|
|
280
|
+
policy_id: Associated policy ID.
|
|
281
|
+
from_state: Previous state.
|
|
282
|
+
to_state: New state.
|
|
283
|
+
current_level: Current escalation level.
|
|
284
|
+
actor: Who triggered the change.
|
|
285
|
+
message: Optional message.
|
|
286
|
+
"""
|
|
287
|
+
try:
|
|
288
|
+
count = await notify_incident_state_changed(
|
|
289
|
+
incident_id=incident_id,
|
|
290
|
+
incident_ref=incident_ref,
|
|
291
|
+
policy_id=policy_id,
|
|
292
|
+
from_state=from_state,
|
|
293
|
+
to_state=to_state,
|
|
294
|
+
current_level=current_level,
|
|
295
|
+
actor=actor,
|
|
296
|
+
message=message,
|
|
297
|
+
)
|
|
298
|
+
if count > 0:
|
|
299
|
+
logger.debug(
|
|
300
|
+
f"Broadcast incident state change to {count} clients: "
|
|
301
|
+
f"{incident_id} {from_state} -> {to_state}"
|
|
302
|
+
)
|
|
303
|
+
except Exception as e:
|
|
304
|
+
logger.error(f"Failed to broadcast incident state change: {e}")
|
|
305
|
+
|
|
306
|
+
|
|
307
|
+
# =============================================================================
|
|
308
|
+
# Routing Rule Endpoints
|
|
309
|
+
# =============================================================================
|
|
310
|
+
|
|
311
|
+
|
|
312
|
+
@router.get("/routing/rules/types", response_model=RuleTypesResponse)
|
|
313
|
+
async def get_rule_types() -> RuleTypesResponse:
|
|
314
|
+
"""Get available routing rule types with their parameter schemas.
|
|
315
|
+
|
|
316
|
+
Returns both simple rule types and combinator types (all_of, any_of, not).
|
|
317
|
+
"""
|
|
318
|
+
rule_types = []
|
|
319
|
+
|
|
320
|
+
# Add simple rule types from RuleRegistry
|
|
321
|
+
for rule_type in RuleRegistry.list_types():
|
|
322
|
+
rule_class = RuleRegistry.get(rule_type)
|
|
323
|
+
if rule_class:
|
|
324
|
+
rule_types.append(
|
|
325
|
+
RuleTypeInfo(
|
|
326
|
+
type=rule_type,
|
|
327
|
+
name=rule_class.__name__.replace("Rule", "").replace("_", " "),
|
|
328
|
+
description=rule_class.__doc__ or "",
|
|
329
|
+
param_schema=rule_class.get_param_schema(),
|
|
330
|
+
)
|
|
331
|
+
)
|
|
332
|
+
|
|
333
|
+
# Add combinator types
|
|
334
|
+
combinator_types = [
|
|
335
|
+
RuleTypeInfo(
|
|
336
|
+
type=CombinatorType.ALL_OF.value,
|
|
337
|
+
name="All Of",
|
|
338
|
+
description="Matches when ALL nested rules match. Use for AND logic.",
|
|
339
|
+
param_schema={
|
|
340
|
+
"rules": {
|
|
341
|
+
"type": "array",
|
|
342
|
+
"required": True,
|
|
343
|
+
"description": "List of nested rules that must all match",
|
|
344
|
+
"items": {"type": "object", "description": "Nested rule configuration"},
|
|
345
|
+
}
|
|
346
|
+
},
|
|
347
|
+
),
|
|
348
|
+
RuleTypeInfo(
|
|
349
|
+
type=CombinatorType.ANY_OF.value,
|
|
350
|
+
name="Any Of",
|
|
351
|
+
description="Matches when ANY nested rule matches. Use for OR logic.",
|
|
352
|
+
param_schema={
|
|
353
|
+
"rules": {
|
|
354
|
+
"type": "array",
|
|
355
|
+
"required": True,
|
|
356
|
+
"description": "List of nested rules where at least one must match",
|
|
357
|
+
"items": {"type": "object", "description": "Nested rule configuration"},
|
|
358
|
+
}
|
|
359
|
+
},
|
|
360
|
+
),
|
|
361
|
+
RuleTypeInfo(
|
|
362
|
+
type=CombinatorType.NOT.value,
|
|
363
|
+
name="Not",
|
|
364
|
+
description="Negates the nested rule. Matches when the nested rule does NOT match.",
|
|
365
|
+
param_schema={
|
|
366
|
+
"rule": {
|
|
367
|
+
"type": "object",
|
|
368
|
+
"required": True,
|
|
369
|
+
"description": "The rule to negate",
|
|
370
|
+
}
|
|
371
|
+
},
|
|
372
|
+
),
|
|
373
|
+
]
|
|
374
|
+
rule_types.extend(combinator_types)
|
|
375
|
+
|
|
376
|
+
return RuleTypesResponse(rule_types=rule_types)
|
|
377
|
+
|
|
378
|
+
|
|
379
|
+
@router.get("/routing/rules", response_model=RoutingRuleListResponse)
|
|
380
|
+
async def list_routing_rules(
|
|
381
|
+
offset: int = Query(default=0, ge=0),
|
|
382
|
+
limit: int = Query(default=50, ge=1, le=100),
|
|
383
|
+
active_only: bool = Query(default=False),
|
|
384
|
+
session: AsyncSession = Depends(get_session),
|
|
385
|
+
) -> RoutingRuleListResponse:
|
|
386
|
+
"""List routing rules ordered by priority."""
|
|
387
|
+
query = select(RoutingRuleModel).order_by(
|
|
388
|
+
RoutingRuleModel.priority.desc(),
|
|
389
|
+
RoutingRuleModel.created_at.desc(),
|
|
390
|
+
)
|
|
391
|
+
|
|
392
|
+
if active_only:
|
|
393
|
+
query = query.where(RoutingRuleModel.is_active == True)
|
|
394
|
+
|
|
395
|
+
query = query.offset(offset).limit(limit)
|
|
396
|
+
result = await session.execute(query)
|
|
397
|
+
rules = result.scalars().all()
|
|
398
|
+
|
|
399
|
+
return RoutingRuleListResponse(
|
|
400
|
+
items=[_routing_rule_to_response(r) for r in rules],
|
|
401
|
+
total=len(rules),
|
|
402
|
+
offset=offset,
|
|
403
|
+
limit=limit,
|
|
404
|
+
)
|
|
405
|
+
|
|
406
|
+
|
|
407
|
+
def _validate_rule_config_on_save(
|
|
408
|
+
rule_config: dict[str, Any],
|
|
409
|
+
max_depth: int = 10,
|
|
410
|
+
max_rules_per_combinator: int = 50,
|
|
411
|
+
) -> None:
|
|
412
|
+
"""Validate rule configuration before saving.
|
|
413
|
+
|
|
414
|
+
Raises HTTPException if validation fails.
|
|
415
|
+
|
|
416
|
+
Args:
|
|
417
|
+
rule_config: The rule configuration dictionary.
|
|
418
|
+
max_depth: Maximum nesting depth.
|
|
419
|
+
max_rules_per_combinator: Maximum rules per combinator.
|
|
420
|
+
|
|
421
|
+
Raises:
|
|
422
|
+
HTTPException: If validation fails with 400 status code.
|
|
423
|
+
"""
|
|
424
|
+
validation_config = RuleValidationConfig(
|
|
425
|
+
max_depth=max_depth,
|
|
426
|
+
max_rules_per_combinator=max_rules_per_combinator,
|
|
427
|
+
check_circular_refs=True,
|
|
428
|
+
)
|
|
429
|
+
validator = RuleValidator(validation_config)
|
|
430
|
+
result = validator.validate(rule_config)
|
|
431
|
+
|
|
432
|
+
if not result.valid:
|
|
433
|
+
# Format error messages with paths for clarity
|
|
434
|
+
error_details = []
|
|
435
|
+
for error in result.errors:
|
|
436
|
+
if error.path:
|
|
437
|
+
error_details.append(f"[{error.path}] {error.message}")
|
|
438
|
+
else:
|
|
439
|
+
error_details.append(error.message)
|
|
440
|
+
|
|
441
|
+
raise HTTPException(
|
|
442
|
+
status_code=400,
|
|
443
|
+
detail={
|
|
444
|
+
"message": "Invalid rule configuration",
|
|
445
|
+
"errors": error_details,
|
|
446
|
+
"validation_result": {
|
|
447
|
+
"rule_count": result.rule_count,
|
|
448
|
+
"max_depth": result.max_depth,
|
|
449
|
+
"circular_paths": result.circular_paths,
|
|
450
|
+
},
|
|
451
|
+
},
|
|
452
|
+
)
|
|
453
|
+
|
|
454
|
+
|
|
455
|
+
@router.post("/routing/rules", response_model=RoutingRuleResponse)
|
|
456
|
+
async def create_routing_rule(
|
|
457
|
+
request: RoutingRuleCreate,
|
|
458
|
+
session: AsyncSession = Depends(get_session),
|
|
459
|
+
) -> RoutingRuleResponse:
|
|
460
|
+
"""Create a new routing rule.
|
|
461
|
+
|
|
462
|
+
Validates the rule configuration for:
|
|
463
|
+
- Valid rule types
|
|
464
|
+
- Required parameters
|
|
465
|
+
- Circular references
|
|
466
|
+
- Maximum nesting depth
|
|
467
|
+
- Reserved field names
|
|
468
|
+
"""
|
|
469
|
+
# Validate rule configuration before saving
|
|
470
|
+
_validate_rule_config_on_save(request.rule_config)
|
|
471
|
+
|
|
472
|
+
rule = RoutingRuleModel(
|
|
473
|
+
name=request.name,
|
|
474
|
+
rule_config=request.rule_config,
|
|
475
|
+
actions=request.actions,
|
|
476
|
+
priority=request.priority,
|
|
477
|
+
is_active=request.is_active,
|
|
478
|
+
stop_on_match=request.stop_on_match,
|
|
479
|
+
routing_metadata=request.metadata,
|
|
480
|
+
)
|
|
481
|
+
session.add(rule)
|
|
482
|
+
await session.commit()
|
|
483
|
+
await session.refresh(rule)
|
|
484
|
+
|
|
485
|
+
return _routing_rule_to_response(rule)
|
|
486
|
+
|
|
487
|
+
|
|
488
|
+
@router.get("/routing/rules/{rule_id}", response_model=RoutingRuleResponse)
|
|
489
|
+
async def get_routing_rule(
|
|
490
|
+
rule_id: str,
|
|
491
|
+
session: AsyncSession = Depends(get_session),
|
|
492
|
+
) -> RoutingRuleResponse:
|
|
493
|
+
"""Get a routing rule by ID."""
|
|
494
|
+
result = await session.execute(
|
|
495
|
+
select(RoutingRuleModel).where(RoutingRuleModel.id == rule_id)
|
|
496
|
+
)
|
|
497
|
+
rule = result.scalar_one_or_none()
|
|
498
|
+
|
|
499
|
+
if rule is None:
|
|
500
|
+
raise HTTPException(status_code=404, detail="Routing rule not found")
|
|
501
|
+
|
|
502
|
+
return _routing_rule_to_response(rule)
|
|
503
|
+
|
|
504
|
+
|
|
505
|
+
@router.put("/routing/rules/{rule_id}", response_model=RoutingRuleResponse)
|
|
506
|
+
async def update_routing_rule(
|
|
507
|
+
rule_id: str,
|
|
508
|
+
request: RoutingRuleUpdate,
|
|
509
|
+
session: AsyncSession = Depends(get_session),
|
|
510
|
+
) -> RoutingRuleResponse:
|
|
511
|
+
"""Update a routing rule.
|
|
512
|
+
|
|
513
|
+
If rule_config is provided, validates it for:
|
|
514
|
+
- Valid rule types
|
|
515
|
+
- Required parameters
|
|
516
|
+
- Circular references
|
|
517
|
+
- Maximum nesting depth
|
|
518
|
+
- Reserved field names
|
|
519
|
+
"""
|
|
520
|
+
result = await session.execute(
|
|
521
|
+
select(RoutingRuleModel).where(RoutingRuleModel.id == rule_id)
|
|
522
|
+
)
|
|
523
|
+
rule = result.scalar_one_or_none()
|
|
524
|
+
|
|
525
|
+
if rule is None:
|
|
526
|
+
raise HTTPException(status_code=404, detail="Routing rule not found")
|
|
527
|
+
|
|
528
|
+
# Validate rule_config if it's being updated
|
|
529
|
+
if request.rule_config is not None:
|
|
530
|
+
_validate_rule_config_on_save(request.rule_config)
|
|
531
|
+
|
|
532
|
+
if request.name is not None:
|
|
533
|
+
rule.name = request.name
|
|
534
|
+
if request.rule_config is not None:
|
|
535
|
+
rule.rule_config = request.rule_config
|
|
536
|
+
if request.actions is not None:
|
|
537
|
+
rule.actions = request.actions
|
|
538
|
+
if request.priority is not None:
|
|
539
|
+
rule.priority = request.priority
|
|
540
|
+
if request.is_active is not None:
|
|
541
|
+
rule.is_active = request.is_active
|
|
542
|
+
if request.stop_on_match is not None:
|
|
543
|
+
rule.stop_on_match = request.stop_on_match
|
|
544
|
+
if request.metadata is not None:
|
|
545
|
+
rule.routing_metadata = request.metadata
|
|
546
|
+
|
|
547
|
+
await session.commit()
|
|
548
|
+
await session.refresh(rule)
|
|
549
|
+
|
|
550
|
+
return _routing_rule_to_response(rule)
|
|
551
|
+
|
|
552
|
+
|
|
553
|
+
@router.delete("/routing/rules/{rule_id}")
|
|
554
|
+
async def delete_routing_rule(
|
|
555
|
+
rule_id: str,
|
|
556
|
+
session: AsyncSession = Depends(get_session),
|
|
557
|
+
) -> dict[str, Any]:
|
|
558
|
+
"""Delete a routing rule."""
|
|
559
|
+
result = await session.execute(
|
|
560
|
+
select(RoutingRuleModel).where(RoutingRuleModel.id == rule_id)
|
|
561
|
+
)
|
|
562
|
+
rule = result.scalar_one_or_none()
|
|
563
|
+
|
|
564
|
+
if rule is None:
|
|
565
|
+
raise HTTPException(status_code=404, detail="Routing rule not found")
|
|
566
|
+
|
|
567
|
+
await session.delete(rule)
|
|
568
|
+
await session.commit()
|
|
569
|
+
|
|
570
|
+
return {"success": True, "message": "Routing rule deleted"}
|
|
571
|
+
|
|
572
|
+
|
|
573
|
+
def _convert_nested_rule_config_to_dict(config: NestedRuleConfig) -> dict[str, Any]:
|
|
574
|
+
"""Convert NestedRuleConfig Pydantic model to plain dict for validation.
|
|
575
|
+
|
|
576
|
+
Args:
|
|
577
|
+
config: The NestedRuleConfig to convert.
|
|
578
|
+
|
|
579
|
+
Returns:
|
|
580
|
+
Dictionary representation of the rule configuration.
|
|
581
|
+
"""
|
|
582
|
+
result: dict[str, Any] = {"type": config.type}
|
|
583
|
+
|
|
584
|
+
if config.params:
|
|
585
|
+
result["params"] = config.params
|
|
586
|
+
|
|
587
|
+
if config.rules:
|
|
588
|
+
result["rules"] = [_convert_nested_rule_config_to_dict(r) for r in config.rules]
|
|
589
|
+
|
|
590
|
+
if config.rule:
|
|
591
|
+
result["rule"] = _convert_nested_rule_config_to_dict(config.rule)
|
|
592
|
+
|
|
593
|
+
return result
|
|
594
|
+
|
|
595
|
+
|
|
596
|
+
@router.post("/routing/rules/validate", response_model=RuleValidationResult)
|
|
597
|
+
async def validate_rule_config(
|
|
598
|
+
config: NestedRuleConfig,
|
|
599
|
+
max_depth: int = Query(default=10, ge=1, le=50, description="Maximum nesting depth"),
|
|
600
|
+
max_rules_per_combinator: int = Query(
|
|
601
|
+
default=50, ge=1, le=200, description="Maximum rules per combinator"
|
|
602
|
+
),
|
|
603
|
+
check_circular_refs: bool = Query(
|
|
604
|
+
default=True, description="Check for circular references"
|
|
605
|
+
),
|
|
606
|
+
) -> RuleValidationResult:
|
|
607
|
+
"""Validate a rule configuration without creating it.
|
|
608
|
+
|
|
609
|
+
Performs comprehensive validation including:
|
|
610
|
+
- Rule type existence (via RuleRegistry)
|
|
611
|
+
- Required parameter validation
|
|
612
|
+
- Circular reference detection (direct and indirect)
|
|
613
|
+
- Maximum nesting depth enforcement
|
|
614
|
+
- Maximum rules per combinator enforcement
|
|
615
|
+
- Reserved field name checking
|
|
616
|
+
|
|
617
|
+
Args:
|
|
618
|
+
config: The rule configuration to validate.
|
|
619
|
+
max_depth: Maximum allowed nesting depth (default: 10).
|
|
620
|
+
max_rules_per_combinator: Maximum rules in a single combinator (default: 50).
|
|
621
|
+
check_circular_refs: Whether to check for circular references (default: True).
|
|
622
|
+
|
|
623
|
+
Returns:
|
|
624
|
+
Validation result with errors, warnings, and statistics.
|
|
625
|
+
"""
|
|
626
|
+
# Convert Pydantic model to dict for the validator
|
|
627
|
+
rule_dict = _convert_nested_rule_config_to_dict(config)
|
|
628
|
+
|
|
629
|
+
# Create validator with configuration
|
|
630
|
+
validation_config = RuleValidationConfig(
|
|
631
|
+
max_depth=max_depth,
|
|
632
|
+
max_rules_per_combinator=max_rules_per_combinator,
|
|
633
|
+
check_circular_refs=check_circular_refs,
|
|
634
|
+
)
|
|
635
|
+
validator = RuleValidator(validation_config)
|
|
636
|
+
|
|
637
|
+
# Perform validation
|
|
638
|
+
result = validator.validate(rule_dict)
|
|
639
|
+
|
|
640
|
+
# Convert to response model
|
|
641
|
+
return RuleValidationResult(
|
|
642
|
+
valid=result.valid,
|
|
643
|
+
errors=result.error_messages(),
|
|
644
|
+
warnings=result.warning_messages(),
|
|
645
|
+
rule_count=result.rule_count,
|
|
646
|
+
max_depth=result.max_depth,
|
|
647
|
+
circular_paths=result.circular_paths,
|
|
648
|
+
)
|
|
649
|
+
|
|
650
|
+
|
|
651
|
+
@router.post("/routing/rules/validate-expression", response_model=ExpressionValidateResponse)
|
|
652
|
+
async def validate_expression(
|
|
653
|
+
request: ExpressionValidateRequest,
|
|
654
|
+
) -> ExpressionValidateResponse:
|
|
655
|
+
"""Validate a Python-like expression for use in routing rules.
|
|
656
|
+
|
|
657
|
+
Performs syntax validation and optionally evaluates the expression
|
|
658
|
+
with sample data to check for runtime errors.
|
|
659
|
+
|
|
660
|
+
The expression is evaluated in a safe, sandboxed environment with:
|
|
661
|
+
- AST-based parsing (no exec/eval)
|
|
662
|
+
- Timeout protection
|
|
663
|
+
- Blocked access to dangerous attributes
|
|
664
|
+
|
|
665
|
+
Args:
|
|
666
|
+
request: Expression validation request.
|
|
667
|
+
|
|
668
|
+
Returns:
|
|
669
|
+
Validation result with any errors and optional preview result.
|
|
670
|
+
|
|
671
|
+
Example expressions:
|
|
672
|
+
- severity == 'critical'
|
|
673
|
+
- pass_rate < 0.8 and issue_count > 5
|
|
674
|
+
- 'production' in tags
|
|
675
|
+
- metadata.get('environment') == 'production'
|
|
676
|
+
"""
|
|
677
|
+
from ..core.notifications.routing.expression_engine import (
|
|
678
|
+
ExpressionContext,
|
|
679
|
+
ExpressionError,
|
|
680
|
+
ExpressionSecurityError,
|
|
681
|
+
ExpressionTimeout,
|
|
682
|
+
SafeExpressionEvaluator,
|
|
683
|
+
)
|
|
684
|
+
|
|
685
|
+
expression = request.expression.strip()
|
|
686
|
+
warnings: list[str] = []
|
|
687
|
+
error: str | None = None
|
|
688
|
+
error_line: int | None = None
|
|
689
|
+
preview_result: bool | None = None
|
|
690
|
+
preview_error: str | None = None
|
|
691
|
+
|
|
692
|
+
# Check for empty expression
|
|
693
|
+
if not expression:
|
|
694
|
+
return ExpressionValidateResponse(
|
|
695
|
+
valid=False,
|
|
696
|
+
error="Expression cannot be empty",
|
|
697
|
+
)
|
|
698
|
+
|
|
699
|
+
# Create evaluator with configured timeout
|
|
700
|
+
evaluator = SafeExpressionEvaluator(
|
|
701
|
+
timeout_seconds=request.timeout_seconds,
|
|
702
|
+
)
|
|
703
|
+
|
|
704
|
+
# Create sample context for preview evaluation
|
|
705
|
+
sample_context = ExpressionContext(
|
|
706
|
+
checkpoint_name="sample_validation",
|
|
707
|
+
action_type="check",
|
|
708
|
+
severity="high",
|
|
709
|
+
issues=["null_values", "duplicates"],
|
|
710
|
+
pass_rate=0.85,
|
|
711
|
+
timestamp=datetime.utcnow(),
|
|
712
|
+
metadata={
|
|
713
|
+
"environment": "production",
|
|
714
|
+
"table": "orders",
|
|
715
|
+
"row_count": 10000,
|
|
716
|
+
},
|
|
717
|
+
)
|
|
718
|
+
|
|
719
|
+
# Validate and evaluate
|
|
720
|
+
try:
|
|
721
|
+
preview_result = evaluator.evaluate(expression, sample_context)
|
|
722
|
+
except ExpressionSecurityError as e:
|
|
723
|
+
error = f"Security error: {e.reason}"
|
|
724
|
+
return ExpressionValidateResponse(
|
|
725
|
+
valid=False,
|
|
726
|
+
error=error,
|
|
727
|
+
)
|
|
728
|
+
except ExpressionTimeout as e:
|
|
729
|
+
error = f"Timeout: Expression took too long to evaluate"
|
|
730
|
+
return ExpressionValidateResponse(
|
|
731
|
+
valid=False,
|
|
732
|
+
error=error,
|
|
733
|
+
warnings=["Consider simplifying the expression"],
|
|
734
|
+
)
|
|
735
|
+
except ExpressionError as e:
|
|
736
|
+
error = e.reason
|
|
737
|
+
|
|
738
|
+
# Try to extract line number from syntax errors
|
|
739
|
+
if "Syntax error:" in error:
|
|
740
|
+
import re
|
|
741
|
+
line_match = re.search(r"line (\d+)", error)
|
|
742
|
+
if line_match:
|
|
743
|
+
error_line = int(line_match.group(1))
|
|
744
|
+
|
|
745
|
+
return ExpressionValidateResponse(
|
|
746
|
+
valid=False,
|
|
747
|
+
error=error,
|
|
748
|
+
error_line=error_line,
|
|
749
|
+
)
|
|
750
|
+
except Exception as e:
|
|
751
|
+
error = f"Unexpected error: {str(e)}"
|
|
752
|
+
return ExpressionValidateResponse(
|
|
753
|
+
valid=False,
|
|
754
|
+
error=error,
|
|
755
|
+
)
|
|
756
|
+
|
|
757
|
+
# Add warnings for potential issues
|
|
758
|
+
if "metadata[" in expression and ".get(" not in expression:
|
|
759
|
+
warnings.append(
|
|
760
|
+
"Consider using metadata.get('key') instead of metadata['key'] "
|
|
761
|
+
"to handle missing keys gracefully"
|
|
762
|
+
)
|
|
763
|
+
|
|
764
|
+
if len(expression) > 500:
|
|
765
|
+
warnings.append(
|
|
766
|
+
"Expression is quite long. Consider breaking it into multiple rules."
|
|
767
|
+
)
|
|
768
|
+
|
|
769
|
+
return ExpressionValidateResponse(
|
|
770
|
+
valid=True,
|
|
771
|
+
error=None,
|
|
772
|
+
preview_result=preview_result,
|
|
773
|
+
preview_error=preview_error,
|
|
774
|
+
warnings=warnings,
|
|
775
|
+
)
|
|
776
|
+
|
|
777
|
+
|
|
778
|
+
# =============================================================================
|
|
779
|
+
# Deduplication Endpoints
|
|
780
|
+
# =============================================================================
|
|
781
|
+
|
|
782
|
+
|
|
783
|
+
@router.get("/deduplication/configs", response_model=DeduplicationConfigListResponse)
|
|
784
|
+
async def list_deduplication_configs(
|
|
785
|
+
offset: int = Query(default=0, ge=0),
|
|
786
|
+
limit: int = Query(default=50, ge=1, le=100),
|
|
787
|
+
active_only: bool = Query(default=False),
|
|
788
|
+
session: AsyncSession = Depends(get_session),
|
|
789
|
+
) -> DeduplicationConfigListResponse:
|
|
790
|
+
"""List deduplication configurations."""
|
|
791
|
+
query = select(DeduplicationConfig).order_by(DeduplicationConfig.created_at.desc())
|
|
792
|
+
|
|
793
|
+
if active_only:
|
|
794
|
+
query = query.where(DeduplicationConfig.is_active == True)
|
|
795
|
+
|
|
796
|
+
query = query.offset(offset).limit(limit)
|
|
797
|
+
result = await session.execute(query)
|
|
798
|
+
configs = result.scalars().all()
|
|
799
|
+
|
|
800
|
+
return DeduplicationConfigListResponse(
|
|
801
|
+
items=[_dedup_config_to_response(c) for c in configs],
|
|
802
|
+
total=len(configs),
|
|
803
|
+
offset=offset,
|
|
804
|
+
limit=limit,
|
|
805
|
+
)
|
|
806
|
+
|
|
807
|
+
|
|
808
|
+
@router.post("/deduplication/configs", response_model=DeduplicationConfigResponse)
|
|
809
|
+
async def create_deduplication_config(
|
|
810
|
+
request: DeduplicationConfigCreate,
|
|
811
|
+
session: AsyncSession = Depends(get_session),
|
|
812
|
+
) -> DeduplicationConfigResponse:
|
|
813
|
+
"""Create a new deduplication configuration."""
|
|
814
|
+
config = DeduplicationConfig(
|
|
815
|
+
name=request.name,
|
|
816
|
+
strategy=request.strategy.value,
|
|
817
|
+
policy=request.policy.value,
|
|
818
|
+
window_seconds=request.window_seconds,
|
|
819
|
+
is_active=request.is_active,
|
|
820
|
+
)
|
|
821
|
+
session.add(config)
|
|
822
|
+
await session.commit()
|
|
823
|
+
await session.refresh(config)
|
|
824
|
+
|
|
825
|
+
return _dedup_config_to_response(config)
|
|
826
|
+
|
|
827
|
+
|
|
828
|
+
@router.get("/deduplication/configs/{config_id}", response_model=DeduplicationConfigResponse)
|
|
829
|
+
async def get_deduplication_config(
|
|
830
|
+
config_id: str,
|
|
831
|
+
session: AsyncSession = Depends(get_session),
|
|
832
|
+
) -> DeduplicationConfigResponse:
|
|
833
|
+
"""Get a deduplication config by ID."""
|
|
834
|
+
result = await session.execute(
|
|
835
|
+
select(DeduplicationConfig).where(DeduplicationConfig.id == config_id)
|
|
836
|
+
)
|
|
837
|
+
config = result.scalar_one_or_none()
|
|
838
|
+
|
|
839
|
+
if config is None:
|
|
840
|
+
raise HTTPException(status_code=404, detail="Deduplication config not found")
|
|
841
|
+
|
|
842
|
+
return _dedup_config_to_response(config)
|
|
843
|
+
|
|
844
|
+
|
|
845
|
+
@router.put("/deduplication/configs/{config_id}", response_model=DeduplicationConfigResponse)
|
|
846
|
+
async def update_deduplication_config(
|
|
847
|
+
config_id: str,
|
|
848
|
+
request: DeduplicationConfigUpdate,
|
|
849
|
+
session: AsyncSession = Depends(get_session),
|
|
850
|
+
) -> DeduplicationConfigResponse:
|
|
851
|
+
"""Update a deduplication config."""
|
|
852
|
+
result = await session.execute(
|
|
853
|
+
select(DeduplicationConfig).where(DeduplicationConfig.id == config_id)
|
|
854
|
+
)
|
|
855
|
+
config = result.scalar_one_or_none()
|
|
856
|
+
|
|
857
|
+
if config is None:
|
|
858
|
+
raise HTTPException(status_code=404, detail="Deduplication config not found")
|
|
859
|
+
|
|
860
|
+
if request.name is not None:
|
|
861
|
+
config.name = request.name
|
|
862
|
+
if request.strategy is not None:
|
|
863
|
+
config.strategy = request.strategy.value
|
|
864
|
+
if request.policy is not None:
|
|
865
|
+
config.policy = request.policy.value
|
|
866
|
+
if request.window_seconds is not None:
|
|
867
|
+
config.window_seconds = request.window_seconds
|
|
868
|
+
if request.is_active is not None:
|
|
869
|
+
config.is_active = request.is_active
|
|
870
|
+
|
|
871
|
+
await session.commit()
|
|
872
|
+
await session.refresh(config)
|
|
873
|
+
|
|
874
|
+
return _dedup_config_to_response(config)
|
|
875
|
+
|
|
876
|
+
|
|
877
|
+
@router.delete("/deduplication/configs/{config_id}")
|
|
878
|
+
async def delete_deduplication_config(
|
|
879
|
+
config_id: str,
|
|
880
|
+
session: AsyncSession = Depends(get_session),
|
|
881
|
+
) -> dict[str, Any]:
|
|
882
|
+
"""Delete a deduplication config."""
|
|
883
|
+
result = await session.execute(
|
|
884
|
+
select(DeduplicationConfig).where(DeduplicationConfig.id == config_id)
|
|
885
|
+
)
|
|
886
|
+
config = result.scalar_one_or_none()
|
|
887
|
+
|
|
888
|
+
if config is None:
|
|
889
|
+
raise HTTPException(status_code=404, detail="Deduplication config not found")
|
|
890
|
+
|
|
891
|
+
await session.delete(config)
|
|
892
|
+
await session.commit()
|
|
893
|
+
|
|
894
|
+
return {"success": True, "message": "Deduplication config deleted"}
|
|
895
|
+
|
|
896
|
+
|
|
897
|
+
@router.get("/deduplication/stats", response_model=DeduplicationStats)
|
|
898
|
+
async def get_deduplication_stats(
|
|
899
|
+
session: AsyncSession = Depends(get_session),
|
|
900
|
+
) -> DeduplicationStats:
|
|
901
|
+
"""Get deduplication statistics (runtime metrics)."""
|
|
902
|
+
stats = await _dedup_metrics.get_stats()
|
|
903
|
+
return DeduplicationStats(
|
|
904
|
+
total_received=stats.total_received,
|
|
905
|
+
total_deduplicated=stats.total_deduplicated,
|
|
906
|
+
total_passed=stats.total_passed,
|
|
907
|
+
dedup_rate=stats.dedup_rate,
|
|
908
|
+
active_fingerprints=stats.active_fingerprints,
|
|
909
|
+
)
|
|
910
|
+
|
|
911
|
+
|
|
912
|
+
@router.get("/deduplication/stats/enhanced", response_model=DeduplicationStatsEnhanced)
|
|
913
|
+
async def get_deduplication_stats_enhanced(
|
|
914
|
+
session: AsyncSession = Depends(get_session),
|
|
915
|
+
start_time: datetime | None = Query(
|
|
916
|
+
default=None,
|
|
917
|
+
description="Start of time range filter (inclusive)",
|
|
918
|
+
),
|
|
919
|
+
end_time: datetime | None = Query(
|
|
920
|
+
default=None,
|
|
921
|
+
description="End of time range filter (exclusive)",
|
|
922
|
+
),
|
|
923
|
+
use_cache: bool = Query(
|
|
924
|
+
default=True,
|
|
925
|
+
description="Whether to use cached results",
|
|
926
|
+
),
|
|
927
|
+
cache_ttl_seconds: int = Query(
|
|
928
|
+
default=30,
|
|
929
|
+
ge=1,
|
|
930
|
+
le=3600,
|
|
931
|
+
description="Cache TTL in seconds",
|
|
932
|
+
),
|
|
933
|
+
) -> DeduplicationStatsEnhanced:
|
|
934
|
+
"""Get enhanced deduplication statistics with config aggregates and caching info.
|
|
935
|
+
|
|
936
|
+
This endpoint combines:
|
|
937
|
+
- Runtime metrics (from in-memory collector)
|
|
938
|
+
- Config aggregates (from database with efficient GROUP BY queries)
|
|
939
|
+
- Cache information and time range filter
|
|
940
|
+
"""
|
|
941
|
+
# Build time range filter
|
|
942
|
+
time_range = None
|
|
943
|
+
if start_time or end_time:
|
|
944
|
+
time_range = TimeRange(start_time=start_time, end_time=end_time)
|
|
945
|
+
|
|
946
|
+
# Get runtime metrics
|
|
947
|
+
runtime_stats = await _dedup_metrics.get_stats()
|
|
948
|
+
|
|
949
|
+
# Get config aggregates from database
|
|
950
|
+
aggregator = StatsAggregator(session, cache_ttl_seconds=cache_ttl_seconds)
|
|
951
|
+
db_stats = await aggregator.get_deduplication_stats(
|
|
952
|
+
time_range=time_range,
|
|
953
|
+
use_cache=use_cache,
|
|
954
|
+
cache_ttl_seconds=cache_ttl_seconds,
|
|
955
|
+
)
|
|
956
|
+
|
|
957
|
+
# Build response
|
|
958
|
+
time_range_filter = None
|
|
959
|
+
if time_range:
|
|
960
|
+
time_range_filter = TimeRangeFilter(
|
|
961
|
+
start_time=time_range.start_time,
|
|
962
|
+
end_time=time_range.end_time,
|
|
963
|
+
)
|
|
964
|
+
|
|
965
|
+
cache_info = CacheInfo(
|
|
966
|
+
cached=db_stats.cached,
|
|
967
|
+
cached_at=db_stats.cached_at,
|
|
968
|
+
ttl_seconds=cache_ttl_seconds if use_cache else None,
|
|
969
|
+
)
|
|
970
|
+
|
|
971
|
+
return DeduplicationStatsEnhanced(
|
|
972
|
+
# Runtime metrics
|
|
973
|
+
total_received=runtime_stats.total_received,
|
|
974
|
+
total_deduplicated=runtime_stats.total_deduplicated,
|
|
975
|
+
total_passed=runtime_stats.total_passed,
|
|
976
|
+
dedup_rate=runtime_stats.dedup_rate,
|
|
977
|
+
active_fingerprints=runtime_stats.active_fingerprints,
|
|
978
|
+
# Config aggregates
|
|
979
|
+
total_configs=db_stats.total_configs,
|
|
980
|
+
active_configs=db_stats.active_configs,
|
|
981
|
+
by_strategy=db_stats.by_strategy,
|
|
982
|
+
by_policy=db_stats.by_policy,
|
|
983
|
+
avg_window_seconds=db_stats.avg_window_seconds,
|
|
984
|
+
time_range=time_range_filter,
|
|
985
|
+
cache_info=cache_info,
|
|
986
|
+
)
|
|
987
|
+
|
|
988
|
+
|
|
989
|
+
# =============================================================================
|
|
990
|
+
# Throttling Endpoints
|
|
991
|
+
# =============================================================================
|
|
992
|
+
|
|
993
|
+
|
|
994
|
+
@router.get("/throttling/configs", response_model=ThrottlingConfigListResponse)
|
|
995
|
+
async def list_throttling_configs(
|
|
996
|
+
offset: int = Query(default=0, ge=0),
|
|
997
|
+
limit: int = Query(default=50, ge=1, le=100),
|
|
998
|
+
active_only: bool = Query(default=False),
|
|
999
|
+
channel_id: str | None = Query(default=None),
|
|
1000
|
+
session: AsyncSession = Depends(get_session),
|
|
1001
|
+
) -> ThrottlingConfigListResponse:
|
|
1002
|
+
"""List throttling configurations."""
|
|
1003
|
+
query = select(ThrottlingConfig).order_by(ThrottlingConfig.created_at.desc())
|
|
1004
|
+
|
|
1005
|
+
if active_only:
|
|
1006
|
+
query = query.where(ThrottlingConfig.is_active == True)
|
|
1007
|
+
if channel_id is not None:
|
|
1008
|
+
query = query.where(ThrottlingConfig.channel_id == channel_id)
|
|
1009
|
+
|
|
1010
|
+
query = query.offset(offset).limit(limit)
|
|
1011
|
+
result = await session.execute(query)
|
|
1012
|
+
configs = result.scalars().all()
|
|
1013
|
+
|
|
1014
|
+
return ThrottlingConfigListResponse(
|
|
1015
|
+
items=[_throttle_config_to_response(c) for c in configs],
|
|
1016
|
+
total=len(configs),
|
|
1017
|
+
offset=offset,
|
|
1018
|
+
limit=limit,
|
|
1019
|
+
)
|
|
1020
|
+
|
|
1021
|
+
|
|
1022
|
+
@router.post("/throttling/configs", response_model=ThrottlingConfigResponse)
|
|
1023
|
+
async def create_throttling_config(
|
|
1024
|
+
request: ThrottlingConfigCreate,
|
|
1025
|
+
session: AsyncSession = Depends(get_session),
|
|
1026
|
+
) -> ThrottlingConfigResponse:
|
|
1027
|
+
"""Create a new throttling configuration."""
|
|
1028
|
+
config = ThrottlingConfig(
|
|
1029
|
+
name=request.name,
|
|
1030
|
+
per_minute=request.per_minute,
|
|
1031
|
+
per_hour=request.per_hour,
|
|
1032
|
+
per_day=request.per_day,
|
|
1033
|
+
burst_allowance=request.burst_allowance,
|
|
1034
|
+
channel_id=request.channel_id,
|
|
1035
|
+
is_active=request.is_active,
|
|
1036
|
+
)
|
|
1037
|
+
session.add(config)
|
|
1038
|
+
await session.commit()
|
|
1039
|
+
await session.refresh(config)
|
|
1040
|
+
|
|
1041
|
+
return _throttle_config_to_response(config)
|
|
1042
|
+
|
|
1043
|
+
|
|
1044
|
+
@router.get("/throttling/configs/{config_id}", response_model=ThrottlingConfigResponse)
|
|
1045
|
+
async def get_throttling_config(
|
|
1046
|
+
config_id: str,
|
|
1047
|
+
session: AsyncSession = Depends(get_session),
|
|
1048
|
+
) -> ThrottlingConfigResponse:
|
|
1049
|
+
"""Get a throttling config by ID."""
|
|
1050
|
+
result = await session.execute(
|
|
1051
|
+
select(ThrottlingConfig).where(ThrottlingConfig.id == config_id)
|
|
1052
|
+
)
|
|
1053
|
+
config = result.scalar_one_or_none()
|
|
1054
|
+
|
|
1055
|
+
if config is None:
|
|
1056
|
+
raise HTTPException(status_code=404, detail="Throttling config not found")
|
|
1057
|
+
|
|
1058
|
+
return _throttle_config_to_response(config)
|
|
1059
|
+
|
|
1060
|
+
|
|
1061
|
+
@router.put("/throttling/configs/{config_id}", response_model=ThrottlingConfigResponse)
|
|
1062
|
+
async def update_throttling_config(
|
|
1063
|
+
config_id: str,
|
|
1064
|
+
request: ThrottlingConfigUpdate,
|
|
1065
|
+
session: AsyncSession = Depends(get_session),
|
|
1066
|
+
) -> ThrottlingConfigResponse:
|
|
1067
|
+
"""Update a throttling config."""
|
|
1068
|
+
result = await session.execute(
|
|
1069
|
+
select(ThrottlingConfig).where(ThrottlingConfig.id == config_id)
|
|
1070
|
+
)
|
|
1071
|
+
config = result.scalar_one_or_none()
|
|
1072
|
+
|
|
1073
|
+
if config is None:
|
|
1074
|
+
raise HTTPException(status_code=404, detail="Throttling config not found")
|
|
1075
|
+
|
|
1076
|
+
if request.name is not None:
|
|
1077
|
+
config.name = request.name
|
|
1078
|
+
if request.per_minute is not None:
|
|
1079
|
+
config.per_minute = request.per_minute
|
|
1080
|
+
if request.per_hour is not None:
|
|
1081
|
+
config.per_hour = request.per_hour
|
|
1082
|
+
if request.per_day is not None:
|
|
1083
|
+
config.per_day = request.per_day
|
|
1084
|
+
if request.burst_allowance is not None:
|
|
1085
|
+
config.burst_allowance = request.burst_allowance
|
|
1086
|
+
if request.channel_id is not None:
|
|
1087
|
+
config.channel_id = request.channel_id
|
|
1088
|
+
if request.is_active is not None:
|
|
1089
|
+
config.is_active = request.is_active
|
|
1090
|
+
|
|
1091
|
+
await session.commit()
|
|
1092
|
+
await session.refresh(config)
|
|
1093
|
+
|
|
1094
|
+
return _throttle_config_to_response(config)
|
|
1095
|
+
|
|
1096
|
+
|
|
1097
|
+
@router.delete("/throttling/configs/{config_id}")
|
|
1098
|
+
async def delete_throttling_config(
|
|
1099
|
+
config_id: str,
|
|
1100
|
+
session: AsyncSession = Depends(get_session),
|
|
1101
|
+
) -> dict[str, Any]:
|
|
1102
|
+
"""Delete a throttling config."""
|
|
1103
|
+
result = await session.execute(
|
|
1104
|
+
select(ThrottlingConfig).where(ThrottlingConfig.id == config_id)
|
|
1105
|
+
)
|
|
1106
|
+
config = result.scalar_one_or_none()
|
|
1107
|
+
|
|
1108
|
+
if config is None:
|
|
1109
|
+
raise HTTPException(status_code=404, detail="Throttling config not found")
|
|
1110
|
+
|
|
1111
|
+
await session.delete(config)
|
|
1112
|
+
await session.commit()
|
|
1113
|
+
|
|
1114
|
+
return {"success": True, "message": "Throttling config deleted"}
|
|
1115
|
+
|
|
1116
|
+
|
|
1117
|
+
@router.get("/throttling/stats", response_model=ThrottlingStats)
|
|
1118
|
+
async def get_throttling_stats(
|
|
1119
|
+
session: AsyncSession = Depends(get_session),
|
|
1120
|
+
) -> ThrottlingStats:
|
|
1121
|
+
"""Get throttling statistics (runtime metrics)."""
|
|
1122
|
+
stats = await _throttle_metrics.get_stats()
|
|
1123
|
+
return ThrottlingStats(
|
|
1124
|
+
total_received=stats.total_received,
|
|
1125
|
+
total_throttled=stats.total_throttled,
|
|
1126
|
+
total_passed=stats.total_passed,
|
|
1127
|
+
throttle_rate=stats.throttle_rate,
|
|
1128
|
+
current_window_count=stats.current_window_count,
|
|
1129
|
+
)
|
|
1130
|
+
|
|
1131
|
+
|
|
1132
|
+
@router.get("/throttling/stats/enhanced", response_model=ThrottlingStatsEnhanced)
|
|
1133
|
+
async def get_throttling_stats_enhanced(
|
|
1134
|
+
session: AsyncSession = Depends(get_session),
|
|
1135
|
+
start_time: datetime | None = Query(
|
|
1136
|
+
default=None,
|
|
1137
|
+
description="Start of time range filter (inclusive)",
|
|
1138
|
+
),
|
|
1139
|
+
end_time: datetime | None = Query(
|
|
1140
|
+
default=None,
|
|
1141
|
+
description="End of time range filter (exclusive)",
|
|
1142
|
+
),
|
|
1143
|
+
use_cache: bool = Query(
|
|
1144
|
+
default=True,
|
|
1145
|
+
description="Whether to use cached results",
|
|
1146
|
+
),
|
|
1147
|
+
cache_ttl_seconds: int = Query(
|
|
1148
|
+
default=30,
|
|
1149
|
+
ge=1,
|
|
1150
|
+
le=3600,
|
|
1151
|
+
description="Cache TTL in seconds",
|
|
1152
|
+
),
|
|
1153
|
+
) -> ThrottlingStatsEnhanced:
|
|
1154
|
+
"""Get enhanced throttling statistics with config aggregates and caching info.
|
|
1155
|
+
|
|
1156
|
+
This endpoint combines:
|
|
1157
|
+
- Runtime metrics (from in-memory collector)
|
|
1158
|
+
- Config aggregates (from database with efficient GROUP BY queries)
|
|
1159
|
+
- Cache information and time range filter
|
|
1160
|
+
"""
|
|
1161
|
+
# Build time range filter
|
|
1162
|
+
time_range = None
|
|
1163
|
+
if start_time or end_time:
|
|
1164
|
+
time_range = TimeRange(start_time=start_time, end_time=end_time)
|
|
1165
|
+
|
|
1166
|
+
# Get runtime metrics
|
|
1167
|
+
runtime_stats = await _throttle_metrics.get_stats()
|
|
1168
|
+
|
|
1169
|
+
# Get config aggregates from database
|
|
1170
|
+
aggregator = StatsAggregator(session, cache_ttl_seconds=cache_ttl_seconds)
|
|
1171
|
+
db_stats = await aggregator.get_throttling_stats(
|
|
1172
|
+
time_range=time_range,
|
|
1173
|
+
use_cache=use_cache,
|
|
1174
|
+
cache_ttl_seconds=cache_ttl_seconds,
|
|
1175
|
+
)
|
|
1176
|
+
|
|
1177
|
+
# Build response
|
|
1178
|
+
time_range_filter = None
|
|
1179
|
+
if time_range:
|
|
1180
|
+
time_range_filter = TimeRangeFilter(
|
|
1181
|
+
start_time=time_range.start_time,
|
|
1182
|
+
end_time=time_range.end_time,
|
|
1183
|
+
)
|
|
1184
|
+
|
|
1185
|
+
cache_info = CacheInfo(
|
|
1186
|
+
cached=db_stats.cached,
|
|
1187
|
+
cached_at=db_stats.cached_at,
|
|
1188
|
+
ttl_seconds=cache_ttl_seconds if use_cache else None,
|
|
1189
|
+
)
|
|
1190
|
+
|
|
1191
|
+
return ThrottlingStatsEnhanced(
|
|
1192
|
+
# Runtime metrics
|
|
1193
|
+
total_received=runtime_stats.total_received,
|
|
1194
|
+
total_throttled=runtime_stats.total_throttled,
|
|
1195
|
+
total_passed=runtime_stats.total_passed,
|
|
1196
|
+
throttle_rate=runtime_stats.throttle_rate,
|
|
1197
|
+
current_window_count=runtime_stats.current_window_count,
|
|
1198
|
+
# Config aggregates
|
|
1199
|
+
total_configs=db_stats.total_configs,
|
|
1200
|
+
active_configs=db_stats.active_configs,
|
|
1201
|
+
configs_with_per_minute=db_stats.configs_with_per_minute,
|
|
1202
|
+
configs_with_per_hour=db_stats.configs_with_per_hour,
|
|
1203
|
+
configs_with_per_day=db_stats.configs_with_per_day,
|
|
1204
|
+
avg_burst_allowance=db_stats.avg_burst_allowance,
|
|
1205
|
+
time_range=time_range_filter,
|
|
1206
|
+
cache_info=cache_info,
|
|
1207
|
+
)
|
|
1208
|
+
|
|
1209
|
+
|
|
1210
|
+
# =============================================================================
|
|
1211
|
+
# Escalation Policy Endpoints
|
|
1212
|
+
# =============================================================================
|
|
1213
|
+
|
|
1214
|
+
|
|
1215
|
+
@router.get("/escalation/policies", response_model=EscalationPolicyListResponse)
|
|
1216
|
+
async def list_escalation_policies(
|
|
1217
|
+
offset: int = Query(default=0, ge=0),
|
|
1218
|
+
limit: int = Query(default=50, ge=1, le=100),
|
|
1219
|
+
active_only: bool = Query(default=False),
|
|
1220
|
+
session: AsyncSession = Depends(get_session),
|
|
1221
|
+
) -> EscalationPolicyListResponse:
|
|
1222
|
+
"""List escalation policies."""
|
|
1223
|
+
query = select(EscalationPolicyModel).order_by(
|
|
1224
|
+
EscalationPolicyModel.created_at.desc()
|
|
1225
|
+
)
|
|
1226
|
+
|
|
1227
|
+
if active_only:
|
|
1228
|
+
query = query.where(EscalationPolicyModel.is_active == True)
|
|
1229
|
+
|
|
1230
|
+
query = query.offset(offset).limit(limit)
|
|
1231
|
+
result = await session.execute(query)
|
|
1232
|
+
policies = result.scalars().all()
|
|
1233
|
+
|
|
1234
|
+
return EscalationPolicyListResponse(
|
|
1235
|
+
items=[_escalation_policy_to_response(p) for p in policies],
|
|
1236
|
+
total=len(policies),
|
|
1237
|
+
offset=offset,
|
|
1238
|
+
limit=limit,
|
|
1239
|
+
)
|
|
1240
|
+
|
|
1241
|
+
|
|
1242
|
+
@router.post("/escalation/policies", response_model=EscalationPolicyResponse)
|
|
1243
|
+
async def create_escalation_policy(
|
|
1244
|
+
request: EscalationPolicyCreate,
|
|
1245
|
+
session: AsyncSession = Depends(get_session),
|
|
1246
|
+
) -> EscalationPolicyResponse:
|
|
1247
|
+
"""Create a new escalation policy."""
|
|
1248
|
+
# Convert levels to dict format
|
|
1249
|
+
levels = [level.model_dump() for level in request.levels]
|
|
1250
|
+
|
|
1251
|
+
policy = EscalationPolicyModel(
|
|
1252
|
+
name=request.name,
|
|
1253
|
+
description=request.description,
|
|
1254
|
+
levels=levels,
|
|
1255
|
+
auto_resolve_on_success=request.auto_resolve_on_success,
|
|
1256
|
+
max_escalations=request.max_escalations,
|
|
1257
|
+
is_active=request.is_active,
|
|
1258
|
+
)
|
|
1259
|
+
session.add(policy)
|
|
1260
|
+
await session.commit()
|
|
1261
|
+
await session.refresh(policy)
|
|
1262
|
+
|
|
1263
|
+
return _escalation_policy_to_response(policy)
|
|
1264
|
+
|
|
1265
|
+
|
|
1266
|
+
@router.get("/escalation/policies/{policy_id}", response_model=EscalationPolicyResponse)
|
|
1267
|
+
async def get_escalation_policy(
|
|
1268
|
+
policy_id: str,
|
|
1269
|
+
session: AsyncSession = Depends(get_session),
|
|
1270
|
+
) -> EscalationPolicyResponse:
|
|
1271
|
+
"""Get an escalation policy by ID."""
|
|
1272
|
+
result = await session.execute(
|
|
1273
|
+
select(EscalationPolicyModel).where(EscalationPolicyModel.id == policy_id)
|
|
1274
|
+
)
|
|
1275
|
+
policy = result.scalar_one_or_none()
|
|
1276
|
+
|
|
1277
|
+
if policy is None:
|
|
1278
|
+
raise HTTPException(status_code=404, detail="Escalation policy not found")
|
|
1279
|
+
|
|
1280
|
+
return _escalation_policy_to_response(policy)
|
|
1281
|
+
|
|
1282
|
+
|
|
1283
|
+
@router.put("/escalation/policies/{policy_id}", response_model=EscalationPolicyResponse)
|
|
1284
|
+
async def update_escalation_policy(
|
|
1285
|
+
policy_id: str,
|
|
1286
|
+
request: EscalationPolicyUpdate,
|
|
1287
|
+
session: AsyncSession = Depends(get_session),
|
|
1288
|
+
) -> EscalationPolicyResponse:
|
|
1289
|
+
"""Update an escalation policy."""
|
|
1290
|
+
result = await session.execute(
|
|
1291
|
+
select(EscalationPolicyModel).where(EscalationPolicyModel.id == policy_id)
|
|
1292
|
+
)
|
|
1293
|
+
policy = result.scalar_one_or_none()
|
|
1294
|
+
|
|
1295
|
+
if policy is None:
|
|
1296
|
+
raise HTTPException(status_code=404, detail="Escalation policy not found")
|
|
1297
|
+
|
|
1298
|
+
if request.name is not None:
|
|
1299
|
+
policy.name = request.name
|
|
1300
|
+
if request.description is not None:
|
|
1301
|
+
policy.description = request.description
|
|
1302
|
+
if request.levels is not None:
|
|
1303
|
+
policy.levels = [level.model_dump() for level in request.levels]
|
|
1304
|
+
if request.auto_resolve_on_success is not None:
|
|
1305
|
+
policy.auto_resolve_on_success = request.auto_resolve_on_success
|
|
1306
|
+
if request.max_escalations is not None:
|
|
1307
|
+
policy.max_escalations = request.max_escalations
|
|
1308
|
+
if request.is_active is not None:
|
|
1309
|
+
policy.is_active = request.is_active
|
|
1310
|
+
|
|
1311
|
+
await session.commit()
|
|
1312
|
+
await session.refresh(policy)
|
|
1313
|
+
|
|
1314
|
+
return _escalation_policy_to_response(policy)
|
|
1315
|
+
|
|
1316
|
+
|
|
1317
|
+
@router.delete("/escalation/policies/{policy_id}")
|
|
1318
|
+
async def delete_escalation_policy(
|
|
1319
|
+
policy_id: str,
|
|
1320
|
+
session: AsyncSession = Depends(get_session),
|
|
1321
|
+
) -> dict[str, Any]:
|
|
1322
|
+
"""Delete an escalation policy."""
|
|
1323
|
+
result = await session.execute(
|
|
1324
|
+
select(EscalationPolicyModel).where(EscalationPolicyModel.id == policy_id)
|
|
1325
|
+
)
|
|
1326
|
+
policy = result.scalar_one_or_none()
|
|
1327
|
+
|
|
1328
|
+
if policy is None:
|
|
1329
|
+
raise HTTPException(status_code=404, detail="Escalation policy not found")
|
|
1330
|
+
|
|
1331
|
+
await session.delete(policy)
|
|
1332
|
+
await session.commit()
|
|
1333
|
+
|
|
1334
|
+
return {"success": True, "message": "Escalation policy deleted"}
|
|
1335
|
+
|
|
1336
|
+
|
|
1337
|
+
# =============================================================================
|
|
1338
|
+
# Escalation Incident Endpoints
|
|
1339
|
+
# =============================================================================
|
|
1340
|
+
|
|
1341
|
+
|
|
1342
|
+
@router.get("/escalation/incidents", response_model=EscalationIncidentListResponse)
|
|
1343
|
+
async def list_escalation_incidents(
|
|
1344
|
+
offset: int = Query(default=0, ge=0),
|
|
1345
|
+
limit: int = Query(default=50, ge=1, le=100),
|
|
1346
|
+
policy_id: str | None = Query(default=None),
|
|
1347
|
+
state: str | None = Query(default=None),
|
|
1348
|
+
session: AsyncSession = Depends(get_session),
|
|
1349
|
+
) -> EscalationIncidentListResponse:
|
|
1350
|
+
"""List escalation incidents."""
|
|
1351
|
+
query = select(EscalationIncidentModel).order_by(
|
|
1352
|
+
EscalationIncidentModel.created_at.desc()
|
|
1353
|
+
)
|
|
1354
|
+
|
|
1355
|
+
if policy_id is not None:
|
|
1356
|
+
query = query.where(EscalationIncidentModel.policy_id == policy_id)
|
|
1357
|
+
if state is not None:
|
|
1358
|
+
query = query.where(EscalationIncidentModel.state == state)
|
|
1359
|
+
|
|
1360
|
+
query = query.offset(offset).limit(limit)
|
|
1361
|
+
result = await session.execute(query)
|
|
1362
|
+
incidents = result.scalars().all()
|
|
1363
|
+
|
|
1364
|
+
return EscalationIncidentListResponse(
|
|
1365
|
+
items=[_escalation_incident_to_response(i) for i in incidents],
|
|
1366
|
+
total=len(incidents),
|
|
1367
|
+
offset=offset,
|
|
1368
|
+
limit=limit,
|
|
1369
|
+
)
|
|
1370
|
+
|
|
1371
|
+
|
|
1372
|
+
@router.get("/escalation/incidents/active", response_model=EscalationIncidentListResponse)
|
|
1373
|
+
async def list_active_incidents(
|
|
1374
|
+
offset: int = Query(default=0, ge=0),
|
|
1375
|
+
limit: int = Query(default=50, ge=1, le=100),
|
|
1376
|
+
session: AsyncSession = Depends(get_session),
|
|
1377
|
+
) -> EscalationIncidentListResponse:
|
|
1378
|
+
"""List active (non-resolved) escalation incidents."""
|
|
1379
|
+
query = (
|
|
1380
|
+
select(EscalationIncidentModel)
|
|
1381
|
+
.where(EscalationIncidentModel.state != EscalationStateEnum.RESOLVED.value)
|
|
1382
|
+
.order_by(EscalationIncidentModel.created_at.desc())
|
|
1383
|
+
.offset(offset)
|
|
1384
|
+
.limit(limit)
|
|
1385
|
+
)
|
|
1386
|
+
|
|
1387
|
+
result = await session.execute(query)
|
|
1388
|
+
incidents = result.scalars().all()
|
|
1389
|
+
|
|
1390
|
+
return EscalationIncidentListResponse(
|
|
1391
|
+
items=[_escalation_incident_to_response(i) for i in incidents],
|
|
1392
|
+
total=len(incidents),
|
|
1393
|
+
offset=offset,
|
|
1394
|
+
limit=limit,
|
|
1395
|
+
)
|
|
1396
|
+
|
|
1397
|
+
|
|
1398
|
+
@router.get("/escalation/incidents/{incident_id}", response_model=EscalationIncidentResponse)
|
|
1399
|
+
async def get_escalation_incident(
|
|
1400
|
+
incident_id: str,
|
|
1401
|
+
session: AsyncSession = Depends(get_session),
|
|
1402
|
+
) -> EscalationIncidentResponse:
|
|
1403
|
+
"""Get an escalation incident by ID."""
|
|
1404
|
+
result = await session.execute(
|
|
1405
|
+
select(EscalationIncidentModel).where(EscalationIncidentModel.id == incident_id)
|
|
1406
|
+
)
|
|
1407
|
+
incident = result.scalar_one_or_none()
|
|
1408
|
+
|
|
1409
|
+
if incident is None:
|
|
1410
|
+
raise HTTPException(status_code=404, detail="Escalation incident not found")
|
|
1411
|
+
|
|
1412
|
+
return _escalation_incident_to_response(incident)
|
|
1413
|
+
|
|
1414
|
+
|
|
1415
|
+
@router.post("/escalation/incidents/{incident_id}/acknowledge", response_model=EscalationIncidentResponse)
|
|
1416
|
+
async def acknowledge_incident(
|
|
1417
|
+
incident_id: str,
|
|
1418
|
+
request: AcknowledgeRequest,
|
|
1419
|
+
background_tasks: BackgroundTasks,
|
|
1420
|
+
session: AsyncSession = Depends(get_session),
|
|
1421
|
+
) -> EscalationIncidentResponse:
|
|
1422
|
+
"""Acknowledge an escalation incident."""
|
|
1423
|
+
result = await session.execute(
|
|
1424
|
+
select(EscalationIncidentModel).where(EscalationIncidentModel.id == incident_id)
|
|
1425
|
+
)
|
|
1426
|
+
incident = result.scalar_one_or_none()
|
|
1427
|
+
|
|
1428
|
+
if incident is None:
|
|
1429
|
+
raise HTTPException(status_code=404, detail="Escalation incident not found")
|
|
1430
|
+
|
|
1431
|
+
if incident.state == EscalationStateEnum.RESOLVED.value:
|
|
1432
|
+
raise HTTPException(status_code=400, detail="Cannot acknowledge resolved incident")
|
|
1433
|
+
|
|
1434
|
+
if incident.state == EscalationStateEnum.ACKNOWLEDGED.value:
|
|
1435
|
+
raise HTTPException(status_code=400, detail="Incident already acknowledged")
|
|
1436
|
+
|
|
1437
|
+
# Record state transition
|
|
1438
|
+
old_state = incident.state
|
|
1439
|
+
incident.state = EscalationStateEnum.ACKNOWLEDGED.value
|
|
1440
|
+
incident.acknowledged_by = request.actor
|
|
1441
|
+
incident.acknowledged_at = datetime.utcnow()
|
|
1442
|
+
incident.add_event(
|
|
1443
|
+
from_state=old_state,
|
|
1444
|
+
to_state=EscalationStateEnum.ACKNOWLEDGED.value,
|
|
1445
|
+
actor=request.actor,
|
|
1446
|
+
message=request.message or f"Acknowledged by {request.actor}",
|
|
1447
|
+
)
|
|
1448
|
+
|
|
1449
|
+
await session.commit()
|
|
1450
|
+
await session.refresh(incident)
|
|
1451
|
+
|
|
1452
|
+
# Broadcast WebSocket event for real-time updates
|
|
1453
|
+
background_tasks.add_task(
|
|
1454
|
+
_broadcast_incident_state_change,
|
|
1455
|
+
incident_id=incident.id,
|
|
1456
|
+
incident_ref=incident.incident_ref,
|
|
1457
|
+
policy_id=incident.policy_id,
|
|
1458
|
+
from_state=old_state,
|
|
1459
|
+
to_state=EscalationStateEnum.ACKNOWLEDGED.value,
|
|
1460
|
+
current_level=incident.current_level,
|
|
1461
|
+
actor=request.actor,
|
|
1462
|
+
message=request.message or f"Acknowledged by {request.actor}",
|
|
1463
|
+
)
|
|
1464
|
+
|
|
1465
|
+
return _escalation_incident_to_response(incident)
|
|
1466
|
+
|
|
1467
|
+
|
|
1468
|
+
@router.post("/escalation/incidents/{incident_id}/resolve", response_model=EscalationIncidentResponse)
|
|
1469
|
+
async def resolve_incident(
|
|
1470
|
+
incident_id: str,
|
|
1471
|
+
request: ResolveRequest,
|
|
1472
|
+
background_tasks: BackgroundTasks,
|
|
1473
|
+
session: AsyncSession = Depends(get_session),
|
|
1474
|
+
) -> EscalationIncidentResponse:
|
|
1475
|
+
"""Resolve an escalation incident."""
|
|
1476
|
+
result = await session.execute(
|
|
1477
|
+
select(EscalationIncidentModel).where(EscalationIncidentModel.id == incident_id)
|
|
1478
|
+
)
|
|
1479
|
+
incident = result.scalar_one_or_none()
|
|
1480
|
+
|
|
1481
|
+
if incident is None:
|
|
1482
|
+
raise HTTPException(status_code=404, detail="Escalation incident not found")
|
|
1483
|
+
|
|
1484
|
+
if incident.state == EscalationStateEnum.RESOLVED.value:
|
|
1485
|
+
raise HTTPException(status_code=400, detail="Incident already resolved")
|
|
1486
|
+
|
|
1487
|
+
# Record state transition
|
|
1488
|
+
old_state = incident.state
|
|
1489
|
+
incident.state = EscalationStateEnum.RESOLVED.value
|
|
1490
|
+
incident.resolved_by = request.actor
|
|
1491
|
+
incident.resolved_at = datetime.utcnow()
|
|
1492
|
+
incident.next_escalation_at = None
|
|
1493
|
+
|
|
1494
|
+
actor_msg = request.actor or "system"
|
|
1495
|
+
incident.add_event(
|
|
1496
|
+
from_state=old_state,
|
|
1497
|
+
to_state=EscalationStateEnum.RESOLVED.value,
|
|
1498
|
+
actor=request.actor,
|
|
1499
|
+
message=request.message or f"Resolved by {actor_msg}",
|
|
1500
|
+
)
|
|
1501
|
+
|
|
1502
|
+
await session.commit()
|
|
1503
|
+
await session.refresh(incident)
|
|
1504
|
+
|
|
1505
|
+
# Broadcast WebSocket event for real-time updates
|
|
1506
|
+
background_tasks.add_task(
|
|
1507
|
+
_broadcast_incident_state_change,
|
|
1508
|
+
incident_id=incident.id,
|
|
1509
|
+
incident_ref=incident.incident_ref,
|
|
1510
|
+
policy_id=incident.policy_id,
|
|
1511
|
+
from_state=old_state,
|
|
1512
|
+
to_state=EscalationStateEnum.RESOLVED.value,
|
|
1513
|
+
current_level=incident.current_level,
|
|
1514
|
+
actor=request.actor,
|
|
1515
|
+
message=request.message or f"Resolved by {actor_msg}",
|
|
1516
|
+
)
|
|
1517
|
+
|
|
1518
|
+
return _escalation_incident_to_response(incident)
|
|
1519
|
+
|
|
1520
|
+
|
|
1521
|
+
@router.get("/escalation/stats", response_model=EscalationStats)
|
|
1522
|
+
async def get_escalation_stats(
|
|
1523
|
+
session: AsyncSession = Depends(get_session),
|
|
1524
|
+
start_time: datetime | None = Query(
|
|
1525
|
+
default=None,
|
|
1526
|
+
description="Start of time range filter (inclusive)",
|
|
1527
|
+
),
|
|
1528
|
+
end_time: datetime | None = Query(
|
|
1529
|
+
default=None,
|
|
1530
|
+
description="End of time range filter (exclusive)",
|
|
1531
|
+
),
|
|
1532
|
+
use_cache: bool = Query(
|
|
1533
|
+
default=True,
|
|
1534
|
+
description="Whether to use cached results",
|
|
1535
|
+
),
|
|
1536
|
+
cache_ttl_seconds: int | None = Query(
|
|
1537
|
+
default=None,
|
|
1538
|
+
ge=1,
|
|
1539
|
+
le=3600,
|
|
1540
|
+
description="Cache TTL override in seconds",
|
|
1541
|
+
),
|
|
1542
|
+
) -> EscalationStats:
|
|
1543
|
+
"""Get escalation statistics.
|
|
1544
|
+
|
|
1545
|
+
Uses efficient aggregate queries with optional caching and time-range filtering.
|
|
1546
|
+
"""
|
|
1547
|
+
# Build time range filter
|
|
1548
|
+
time_range = None
|
|
1549
|
+
if start_time or end_time:
|
|
1550
|
+
time_range = TimeRange(start_time=start_time, end_time=end_time)
|
|
1551
|
+
|
|
1552
|
+
# Use StatsAggregator for efficient queries
|
|
1553
|
+
aggregator = StatsAggregator(session, cache_ttl_seconds=cache_ttl_seconds or 30)
|
|
1554
|
+
db_stats = await aggregator.get_escalation_stats(
|
|
1555
|
+
time_range=time_range,
|
|
1556
|
+
use_cache=use_cache,
|
|
1557
|
+
cache_ttl_seconds=cache_ttl_seconds,
|
|
1558
|
+
)
|
|
1559
|
+
|
|
1560
|
+
# Also get avg_resolution_time from metrics collector for real-time data
|
|
1561
|
+
metrics_stats = await _escalation_metrics.get_stats()
|
|
1562
|
+
avg_resolution_time_minutes = None
|
|
1563
|
+
|
|
1564
|
+
# Prefer database resolution time if available, otherwise use metrics
|
|
1565
|
+
if db_stats.avg_resolution_time_seconds is not None:
|
|
1566
|
+
avg_resolution_time_minutes = db_stats.avg_resolution_time_seconds / 60.0
|
|
1567
|
+
elif metrics_stats.avg_resolution_time > 0:
|
|
1568
|
+
avg_resolution_time_minutes = metrics_stats.avg_resolution_time / 60.0
|
|
1569
|
+
|
|
1570
|
+
return EscalationStats(
|
|
1571
|
+
total_incidents=db_stats.total_incidents,
|
|
1572
|
+
by_state=db_stats.by_state,
|
|
1573
|
+
active_count=db_stats.active_count,
|
|
1574
|
+
total_policies=db_stats.total_policies,
|
|
1575
|
+
avg_resolution_time_minutes=avg_resolution_time_minutes,
|
|
1576
|
+
)
|
|
1577
|
+
|
|
1578
|
+
|
|
1579
|
+
@router.get("/escalation/stats/enhanced", response_model=EscalationStatsEnhanced)
|
|
1580
|
+
async def get_escalation_stats_enhanced(
|
|
1581
|
+
session: AsyncSession = Depends(get_session),
|
|
1582
|
+
start_time: datetime | None = Query(
|
|
1583
|
+
default=None,
|
|
1584
|
+
description="Start of time range filter (inclusive)",
|
|
1585
|
+
),
|
|
1586
|
+
end_time: datetime | None = Query(
|
|
1587
|
+
default=None,
|
|
1588
|
+
description="End of time range filter (exclusive)",
|
|
1589
|
+
),
|
|
1590
|
+
use_cache: bool = Query(
|
|
1591
|
+
default=True,
|
|
1592
|
+
description="Whether to use cached results",
|
|
1593
|
+
),
|
|
1594
|
+
cache_ttl_seconds: int = Query(
|
|
1595
|
+
default=30,
|
|
1596
|
+
ge=1,
|
|
1597
|
+
le=3600,
|
|
1598
|
+
description="Cache TTL in seconds",
|
|
1599
|
+
),
|
|
1600
|
+
) -> EscalationStatsEnhanced:
|
|
1601
|
+
"""Get enhanced escalation statistics with time range and caching info.
|
|
1602
|
+
|
|
1603
|
+
This endpoint provides additional metadata including:
|
|
1604
|
+
- Time range filter applied
|
|
1605
|
+
- Cache information (hit/miss, cached_at, ttl)
|
|
1606
|
+
"""
|
|
1607
|
+
# Build time range filter
|
|
1608
|
+
time_range = None
|
|
1609
|
+
if start_time or end_time:
|
|
1610
|
+
time_range = TimeRange(start_time=start_time, end_time=end_time)
|
|
1611
|
+
|
|
1612
|
+
# Use StatsAggregator for efficient queries
|
|
1613
|
+
aggregator = StatsAggregator(session, cache_ttl_seconds=cache_ttl_seconds)
|
|
1614
|
+
db_stats = await aggregator.get_escalation_stats(
|
|
1615
|
+
time_range=time_range,
|
|
1616
|
+
use_cache=use_cache,
|
|
1617
|
+
cache_ttl_seconds=cache_ttl_seconds,
|
|
1618
|
+
)
|
|
1619
|
+
|
|
1620
|
+
# Get avg_resolution_time
|
|
1621
|
+
avg_resolution_time_minutes = None
|
|
1622
|
+
if db_stats.avg_resolution_time_seconds is not None:
|
|
1623
|
+
avg_resolution_time_minutes = db_stats.avg_resolution_time_seconds / 60.0
|
|
1624
|
+
else:
|
|
1625
|
+
# Fallback to metrics collector
|
|
1626
|
+
metrics_stats = await _escalation_metrics.get_stats()
|
|
1627
|
+
if metrics_stats.avg_resolution_time > 0:
|
|
1628
|
+
avg_resolution_time_minutes = metrics_stats.avg_resolution_time / 60.0
|
|
1629
|
+
|
|
1630
|
+
# Build response
|
|
1631
|
+
time_range_filter = None
|
|
1632
|
+
if time_range:
|
|
1633
|
+
time_range_filter = TimeRangeFilter(
|
|
1634
|
+
start_time=time_range.start_time,
|
|
1635
|
+
end_time=time_range.end_time,
|
|
1636
|
+
)
|
|
1637
|
+
|
|
1638
|
+
cache_info = CacheInfo(
|
|
1639
|
+
cached=db_stats.cached,
|
|
1640
|
+
cached_at=db_stats.cached_at,
|
|
1641
|
+
ttl_seconds=cache_ttl_seconds if use_cache else None,
|
|
1642
|
+
)
|
|
1643
|
+
|
|
1644
|
+
return EscalationStatsEnhanced(
|
|
1645
|
+
total_incidents=db_stats.total_incidents,
|
|
1646
|
+
by_state=db_stats.by_state,
|
|
1647
|
+
active_count=db_stats.active_count,
|
|
1648
|
+
total_policies=db_stats.total_policies,
|
|
1649
|
+
avg_resolution_time_minutes=avg_resolution_time_minutes,
|
|
1650
|
+
time_range=time_range_filter,
|
|
1651
|
+
cache_info=cache_info,
|
|
1652
|
+
)
|
|
1653
|
+
|
|
1654
|
+
|
|
1655
|
+
# =============================================================================
|
|
1656
|
+
# Escalation Scheduler Endpoints
|
|
1657
|
+
# =============================================================================
|
|
1658
|
+
|
|
1659
|
+
|
|
1660
|
+
def _get_scheduler_service() -> Any:
|
|
1661
|
+
"""Get the escalation scheduler service.
|
|
1662
|
+
|
|
1663
|
+
Returns:
|
|
1664
|
+
EscalationSchedulerService instance.
|
|
1665
|
+
"""
|
|
1666
|
+
from ..core.notifications.escalation.scheduler import get_escalation_scheduler
|
|
1667
|
+
return get_escalation_scheduler()
|
|
1668
|
+
|
|
1669
|
+
|
|
1670
|
+
@router.get("/escalation/scheduler/status", response_model=EscalationSchedulerStatus)
|
|
1671
|
+
async def get_scheduler_status() -> EscalationSchedulerStatus:
|
|
1672
|
+
"""Get the current status of the escalation scheduler.
|
|
1673
|
+
|
|
1674
|
+
Returns scheduler running state, configuration, and metrics.
|
|
1675
|
+
"""
|
|
1676
|
+
scheduler = _get_scheduler_service()
|
|
1677
|
+
status = scheduler.get_status()
|
|
1678
|
+
|
|
1679
|
+
return EscalationSchedulerStatus(
|
|
1680
|
+
running=status["running"],
|
|
1681
|
+
enabled=status["enabled"],
|
|
1682
|
+
check_interval_seconds=status["check_interval_seconds"],
|
|
1683
|
+
last_check_at=status["last_check_at"],
|
|
1684
|
+
next_check_at=status["next_check_at"],
|
|
1685
|
+
check_count=status["check_count"],
|
|
1686
|
+
escalation_count=status["escalation_count"],
|
|
1687
|
+
error_count=status["error_count"],
|
|
1688
|
+
handlers=status["handlers"],
|
|
1689
|
+
strategy=status["strategy"],
|
|
1690
|
+
)
|
|
1691
|
+
|
|
1692
|
+
|
|
1693
|
+
@router.post("/escalation/scheduler/start", response_model=EscalationSchedulerAction)
|
|
1694
|
+
async def start_scheduler() -> EscalationSchedulerAction:
|
|
1695
|
+
"""Start the escalation scheduler.
|
|
1696
|
+
|
|
1697
|
+
Begins periodic checking for pending escalations.
|
|
1698
|
+
"""
|
|
1699
|
+
scheduler = _get_scheduler_service()
|
|
1700
|
+
|
|
1701
|
+
if scheduler.is_running:
|
|
1702
|
+
return EscalationSchedulerAction(
|
|
1703
|
+
success=False,
|
|
1704
|
+
message="Scheduler is already running",
|
|
1705
|
+
action="start",
|
|
1706
|
+
timestamp=datetime.utcnow().isoformat(),
|
|
1707
|
+
)
|
|
1708
|
+
|
|
1709
|
+
await scheduler.start()
|
|
1710
|
+
|
|
1711
|
+
return EscalationSchedulerAction(
|
|
1712
|
+
success=True,
|
|
1713
|
+
message="Escalation scheduler started",
|
|
1714
|
+
action="start",
|
|
1715
|
+
timestamp=datetime.utcnow().isoformat(),
|
|
1716
|
+
)
|
|
1717
|
+
|
|
1718
|
+
|
|
1719
|
+
@router.post("/escalation/scheduler/stop", response_model=EscalationSchedulerAction)
|
|
1720
|
+
async def stop_scheduler() -> EscalationSchedulerAction:
|
|
1721
|
+
"""Stop the escalation scheduler.
|
|
1722
|
+
|
|
1723
|
+
Stops periodic checking for pending escalations.
|
|
1724
|
+
"""
|
|
1725
|
+
scheduler = _get_scheduler_service()
|
|
1726
|
+
|
|
1727
|
+
if not scheduler.is_running:
|
|
1728
|
+
return EscalationSchedulerAction(
|
|
1729
|
+
success=False,
|
|
1730
|
+
message="Scheduler is not running",
|
|
1731
|
+
action="stop",
|
|
1732
|
+
timestamp=datetime.utcnow().isoformat(),
|
|
1733
|
+
)
|
|
1734
|
+
|
|
1735
|
+
await scheduler.stop()
|
|
1736
|
+
|
|
1737
|
+
return EscalationSchedulerAction(
|
|
1738
|
+
success=True,
|
|
1739
|
+
message="Escalation scheduler stopped",
|
|
1740
|
+
action="stop",
|
|
1741
|
+
timestamp=datetime.utcnow().isoformat(),
|
|
1742
|
+
)
|
|
1743
|
+
|
|
1744
|
+
|
|
1745
|
+
@router.post("/escalation/scheduler/trigger", response_model=TriggerCheckResponse)
|
|
1746
|
+
async def trigger_check() -> TriggerCheckResponse:
|
|
1747
|
+
"""Trigger an immediate escalation check.
|
|
1748
|
+
|
|
1749
|
+
Manually triggers the escalation checker without waiting for
|
|
1750
|
+
the scheduled interval.
|
|
1751
|
+
"""
|
|
1752
|
+
scheduler = _get_scheduler_service()
|
|
1753
|
+
|
|
1754
|
+
if not scheduler.is_running:
|
|
1755
|
+
return TriggerCheckResponse(
|
|
1756
|
+
success=False,
|
|
1757
|
+
message="Scheduler is not running",
|
|
1758
|
+
escalations_processed=0,
|
|
1759
|
+
timestamp=datetime.utcnow().isoformat(),
|
|
1760
|
+
)
|
|
1761
|
+
|
|
1762
|
+
result = await scheduler.trigger_immediate_check()
|
|
1763
|
+
|
|
1764
|
+
return TriggerCheckResponse(
|
|
1765
|
+
success=result["success"],
|
|
1766
|
+
message=result["message"],
|
|
1767
|
+
escalations_processed=result.get("escalations_processed", 0),
|
|
1768
|
+
timestamp=result.get("timestamp", datetime.utcnow().isoformat()),
|
|
1769
|
+
)
|
|
1770
|
+
|
|
1771
|
+
|
|
1772
|
+
@router.put("/escalation/scheduler/config", response_model=EscalationSchedulerStatus)
|
|
1773
|
+
async def update_scheduler_config(
|
|
1774
|
+
request: EscalationSchedulerConfigRequest,
|
|
1775
|
+
) -> EscalationSchedulerStatus:
|
|
1776
|
+
"""Update escalation scheduler configuration.
|
|
1777
|
+
|
|
1778
|
+
Note: Some changes may require a scheduler restart to take effect.
|
|
1779
|
+
"""
|
|
1780
|
+
scheduler = _get_scheduler_service()
|
|
1781
|
+
|
|
1782
|
+
# Update config
|
|
1783
|
+
if request.check_interval_seconds is not None:
|
|
1784
|
+
scheduler.config.check_interval_seconds = request.check_interval_seconds
|
|
1785
|
+
|
|
1786
|
+
if request.max_escalations_per_check is not None:
|
|
1787
|
+
scheduler.config.max_escalations_per_check = request.max_escalations_per_check
|
|
1788
|
+
|
|
1789
|
+
if request.enabled is not None:
|
|
1790
|
+
scheduler.config.enabled = request.enabled
|
|
1791
|
+
# If disabling, stop the scheduler
|
|
1792
|
+
if not request.enabled and scheduler.is_running:
|
|
1793
|
+
await scheduler.stop()
|
|
1794
|
+
|
|
1795
|
+
# Return updated status
|
|
1796
|
+
status = scheduler.get_status()
|
|
1797
|
+
|
|
1798
|
+
return EscalationSchedulerStatus(
|
|
1799
|
+
running=status["running"],
|
|
1800
|
+
enabled=status["enabled"],
|
|
1801
|
+
check_interval_seconds=status["check_interval_seconds"],
|
|
1802
|
+
last_check_at=status["last_check_at"],
|
|
1803
|
+
next_check_at=status["next_check_at"],
|
|
1804
|
+
check_count=status["check_count"],
|
|
1805
|
+
escalation_count=status["escalation_count"],
|
|
1806
|
+
error_count=status["error_count"],
|
|
1807
|
+
handlers=status["handlers"],
|
|
1808
|
+
strategy=status["strategy"],
|
|
1809
|
+
)
|
|
1810
|
+
|
|
1811
|
+
|
|
1812
|
+
@router.post("/escalation/scheduler/reset-metrics", response_model=EscalationSchedulerAction)
|
|
1813
|
+
async def reset_scheduler_metrics() -> EscalationSchedulerAction:
|
|
1814
|
+
"""Reset escalation scheduler metrics.
|
|
1815
|
+
|
|
1816
|
+
Resets counters for checks, escalations, and errors.
|
|
1817
|
+
"""
|
|
1818
|
+
scheduler = _get_scheduler_service()
|
|
1819
|
+
scheduler.reset_metrics()
|
|
1820
|
+
|
|
1821
|
+
return EscalationSchedulerAction(
|
|
1822
|
+
success=True,
|
|
1823
|
+
message="Scheduler metrics reset",
|
|
1824
|
+
action="reset-metrics",
|
|
1825
|
+
timestamp=datetime.utcnow().isoformat(),
|
|
1826
|
+
)
|
|
1827
|
+
|
|
1828
|
+
|
|
1829
|
+
# =============================================================================
|
|
1830
|
+
# Config Import/Export Endpoints
|
|
1831
|
+
# =============================================================================
|
|
1832
|
+
|
|
1833
|
+
|
|
1834
|
+
@router.get("/config/export", response_model=NotificationConfigBundle)
|
|
1835
|
+
async def export_notification_config(
|
|
1836
|
+
include_routing_rules: bool = Query(default=True, description="Include routing rules"),
|
|
1837
|
+
include_deduplication: bool = Query(default=True, description="Include deduplication configs"),
|
|
1838
|
+
include_throttling: bool = Query(default=True, description="Include throttling configs"),
|
|
1839
|
+
include_escalation: bool = Query(default=True, description="Include escalation policies"),
|
|
1840
|
+
session: AsyncSession = Depends(get_session),
|
|
1841
|
+
) -> NotificationConfigBundle:
|
|
1842
|
+
"""Export all notification configurations as a portable bundle.
|
|
1843
|
+
|
|
1844
|
+
Returns a JSON bundle containing all notification configurations that can
|
|
1845
|
+
be saved to a file and later imported to restore settings.
|
|
1846
|
+
|
|
1847
|
+
Args:
|
|
1848
|
+
include_routing_rules: Include routing rules in export.
|
|
1849
|
+
include_deduplication: Include deduplication configs in export.
|
|
1850
|
+
include_throttling: Include throttling configs in export.
|
|
1851
|
+
include_escalation: Include escalation policies in export.
|
|
1852
|
+
|
|
1853
|
+
Returns:
|
|
1854
|
+
NotificationConfigBundle with all requested configurations.
|
|
1855
|
+
"""
|
|
1856
|
+
bundle = NotificationConfigBundle(
|
|
1857
|
+
version="1.0",
|
|
1858
|
+
exported_at=datetime.utcnow(),
|
|
1859
|
+
routing_rules=[],
|
|
1860
|
+
deduplication_configs=[],
|
|
1861
|
+
throttling_configs=[],
|
|
1862
|
+
escalation_policies=[],
|
|
1863
|
+
)
|
|
1864
|
+
|
|
1865
|
+
# Export routing rules
|
|
1866
|
+
if include_routing_rules:
|
|
1867
|
+
result = await session.execute(
|
|
1868
|
+
select(RoutingRuleModel).order_by(RoutingRuleModel.priority.desc())
|
|
1869
|
+
)
|
|
1870
|
+
rules = result.scalars().all()
|
|
1871
|
+
bundle.routing_rules = [_routing_rule_to_response(r) for r in rules]
|
|
1872
|
+
|
|
1873
|
+
# Export deduplication configs
|
|
1874
|
+
if include_deduplication:
|
|
1875
|
+
result = await session.execute(
|
|
1876
|
+
select(DeduplicationConfig).order_by(DeduplicationConfig.created_at.desc())
|
|
1877
|
+
)
|
|
1878
|
+
configs = result.scalars().all()
|
|
1879
|
+
bundle.deduplication_configs = [_dedup_config_to_response(c) for c in configs]
|
|
1880
|
+
|
|
1881
|
+
# Export throttling configs
|
|
1882
|
+
if include_throttling:
|
|
1883
|
+
result = await session.execute(
|
|
1884
|
+
select(ThrottlingConfig).order_by(ThrottlingConfig.created_at.desc())
|
|
1885
|
+
)
|
|
1886
|
+
configs = result.scalars().all()
|
|
1887
|
+
bundle.throttling_configs = [_throttle_config_to_response(c) for c in configs]
|
|
1888
|
+
|
|
1889
|
+
# Export escalation policies
|
|
1890
|
+
if include_escalation:
|
|
1891
|
+
result = await session.execute(
|
|
1892
|
+
select(EscalationPolicyModel).order_by(EscalationPolicyModel.created_at.desc())
|
|
1893
|
+
)
|
|
1894
|
+
policies = result.scalars().all()
|
|
1895
|
+
bundle.escalation_policies = [_escalation_policy_to_response(p) for p in policies]
|
|
1896
|
+
|
|
1897
|
+
logger.info(
|
|
1898
|
+
f"Exported notification config: {len(bundle.routing_rules)} rules, "
|
|
1899
|
+
f"{len(bundle.deduplication_configs)} dedup configs, "
|
|
1900
|
+
f"{len(bundle.throttling_configs)} throttle configs, "
|
|
1901
|
+
f"{len(bundle.escalation_policies)} escalation policies"
|
|
1902
|
+
)
|
|
1903
|
+
|
|
1904
|
+
return bundle
|
|
1905
|
+
|
|
1906
|
+
|
|
1907
|
+
@router.post("/config/import/preview", response_model=ConfigImportPreview)
|
|
1908
|
+
async def preview_notification_config_import(
|
|
1909
|
+
bundle: NotificationConfigBundle,
|
|
1910
|
+
session: AsyncSession = Depends(get_session),
|
|
1911
|
+
) -> ConfigImportPreview:
|
|
1912
|
+
"""Preview import operation to detect conflicts before execution.
|
|
1913
|
+
|
|
1914
|
+
Analyzes the configuration bundle and detects any conflicts with
|
|
1915
|
+
existing configurations based on ID matching.
|
|
1916
|
+
|
|
1917
|
+
Args:
|
|
1918
|
+
bundle: The configuration bundle to analyze.
|
|
1919
|
+
|
|
1920
|
+
Returns:
|
|
1921
|
+
ConfigImportPreview with conflict information and counts.
|
|
1922
|
+
"""
|
|
1923
|
+
conflicts: list[ConfigImportConflict] = []
|
|
1924
|
+
total_configs = (
|
|
1925
|
+
len(bundle.routing_rules)
|
|
1926
|
+
+ len(bundle.deduplication_configs)
|
|
1927
|
+
+ len(bundle.throttling_configs)
|
|
1928
|
+
+ len(bundle.escalation_policies)
|
|
1929
|
+
)
|
|
1930
|
+
|
|
1931
|
+
# Check routing rules for conflicts
|
|
1932
|
+
for rule in bundle.routing_rules:
|
|
1933
|
+
result = await session.execute(
|
|
1934
|
+
select(RoutingRuleModel).where(RoutingRuleModel.id == rule.id)
|
|
1935
|
+
)
|
|
1936
|
+
existing = result.scalar_one_or_none()
|
|
1937
|
+
if existing:
|
|
1938
|
+
conflicts.append(
|
|
1939
|
+
ConfigImportConflict(
|
|
1940
|
+
config_type="routing_rule",
|
|
1941
|
+
config_id=rule.id,
|
|
1942
|
+
config_name=rule.name,
|
|
1943
|
+
existing_name=existing.name,
|
|
1944
|
+
suggested_action="skip",
|
|
1945
|
+
)
|
|
1946
|
+
)
|
|
1947
|
+
|
|
1948
|
+
# Check deduplication configs for conflicts
|
|
1949
|
+
for config in bundle.deduplication_configs:
|
|
1950
|
+
result = await session.execute(
|
|
1951
|
+
select(DeduplicationConfig).where(DeduplicationConfig.id == config.id)
|
|
1952
|
+
)
|
|
1953
|
+
existing = result.scalar_one_or_none()
|
|
1954
|
+
if existing:
|
|
1955
|
+
conflicts.append(
|
|
1956
|
+
ConfigImportConflict(
|
|
1957
|
+
config_type="deduplication",
|
|
1958
|
+
config_id=config.id,
|
|
1959
|
+
config_name=config.name,
|
|
1960
|
+
existing_name=existing.name,
|
|
1961
|
+
suggested_action="skip",
|
|
1962
|
+
)
|
|
1963
|
+
)
|
|
1964
|
+
|
|
1965
|
+
# Check throttling configs for conflicts
|
|
1966
|
+
for config in bundle.throttling_configs:
|
|
1967
|
+
result = await session.execute(
|
|
1968
|
+
select(ThrottlingConfig).where(ThrottlingConfig.id == config.id)
|
|
1969
|
+
)
|
|
1970
|
+
existing = result.scalar_one_or_none()
|
|
1971
|
+
if existing:
|
|
1972
|
+
conflicts.append(
|
|
1973
|
+
ConfigImportConflict(
|
|
1974
|
+
config_type="throttling",
|
|
1975
|
+
config_id=config.id,
|
|
1976
|
+
config_name=config.name,
|
|
1977
|
+
existing_name=existing.name,
|
|
1978
|
+
suggested_action="skip",
|
|
1979
|
+
)
|
|
1980
|
+
)
|
|
1981
|
+
|
|
1982
|
+
# Check escalation policies for conflicts
|
|
1983
|
+
for policy in bundle.escalation_policies:
|
|
1984
|
+
result = await session.execute(
|
|
1985
|
+
select(EscalationPolicyModel).where(EscalationPolicyModel.id == policy.id)
|
|
1986
|
+
)
|
|
1987
|
+
existing = result.scalar_one_or_none()
|
|
1988
|
+
if existing:
|
|
1989
|
+
conflicts.append(
|
|
1990
|
+
ConfigImportConflict(
|
|
1991
|
+
config_type="escalation",
|
|
1992
|
+
config_id=policy.id,
|
|
1993
|
+
config_name=policy.name,
|
|
1994
|
+
existing_name=existing.name,
|
|
1995
|
+
suggested_action="skip",
|
|
1996
|
+
)
|
|
1997
|
+
)
|
|
1998
|
+
|
|
1999
|
+
new_configs = total_configs - len(conflicts)
|
|
2000
|
+
|
|
2001
|
+
return ConfigImportPreview(
|
|
2002
|
+
total_configs=total_configs,
|
|
2003
|
+
new_configs=new_configs,
|
|
2004
|
+
conflicts=conflicts,
|
|
2005
|
+
routing_rules_count=len(bundle.routing_rules),
|
|
2006
|
+
deduplication_configs_count=len(bundle.deduplication_configs),
|
|
2007
|
+
throttling_configs_count=len(bundle.throttling_configs),
|
|
2008
|
+
escalation_policies_count=len(bundle.escalation_policies),
|
|
2009
|
+
)
|
|
2010
|
+
|
|
2011
|
+
|
|
2012
|
+
@router.post("/config/import", response_model=ConfigImportResult)
|
|
2013
|
+
async def import_notification_config(
|
|
2014
|
+
request: ConfigImportRequest,
|
|
2015
|
+
session: AsyncSession = Depends(get_session),
|
|
2016
|
+
) -> ConfigImportResult:
|
|
2017
|
+
"""Import notification configurations from a bundle.
|
|
2018
|
+
|
|
2019
|
+
Imports configurations with conflict resolution based on the specified strategy:
|
|
2020
|
+
- skip: Skip configs that already exist (default)
|
|
2021
|
+
- overwrite: Replace existing configs with imported ones
|
|
2022
|
+
- rename: Create new configs with modified IDs
|
|
2023
|
+
|
|
2024
|
+
Args:
|
|
2025
|
+
request: Import request containing the bundle and options.
|
|
2026
|
+
|
|
2027
|
+
Returns:
|
|
2028
|
+
ConfigImportResult with summary of the import operation.
|
|
2029
|
+
"""
|
|
2030
|
+
bundle = request.bundle
|
|
2031
|
+
conflict_resolution = request.conflict_resolution
|
|
2032
|
+
errors: list[str] = []
|
|
2033
|
+
created_ids: dict[str, list[str]] = {
|
|
2034
|
+
"routing_rules": [],
|
|
2035
|
+
"deduplication_configs": [],
|
|
2036
|
+
"throttling_configs": [],
|
|
2037
|
+
"escalation_policies": [],
|
|
2038
|
+
}
|
|
2039
|
+
created_count = 0
|
|
2040
|
+
skipped_count = 0
|
|
2041
|
+
overwritten_count = 0
|
|
2042
|
+
|
|
2043
|
+
try:
|
|
2044
|
+
# Import routing rules
|
|
2045
|
+
for rule in bundle.routing_rules:
|
|
2046
|
+
try:
|
|
2047
|
+
result = await session.execute(
|
|
2048
|
+
select(RoutingRuleModel).where(RoutingRuleModel.id == rule.id)
|
|
2049
|
+
)
|
|
2050
|
+
existing = result.scalar_one_or_none()
|
|
2051
|
+
|
|
2052
|
+
if existing:
|
|
2053
|
+
if conflict_resolution == "skip":
|
|
2054
|
+
skipped_count += 1
|
|
2055
|
+
continue
|
|
2056
|
+
elif conflict_resolution == "overwrite":
|
|
2057
|
+
existing.name = rule.name
|
|
2058
|
+
existing.rule_config = rule.rule_config
|
|
2059
|
+
existing.actions = rule.actions
|
|
2060
|
+
existing.priority = rule.priority
|
|
2061
|
+
existing.is_active = rule.is_active
|
|
2062
|
+
existing.stop_on_match = rule.stop_on_match
|
|
2063
|
+
existing.routing_metadata = rule.metadata
|
|
2064
|
+
overwritten_count += 1
|
|
2065
|
+
created_ids["routing_rules"].append(rule.id)
|
|
2066
|
+
else: # rename
|
|
2067
|
+
import uuid
|
|
2068
|
+
new_id = str(uuid.uuid4())
|
|
2069
|
+
new_rule = RoutingRuleModel(
|
|
2070
|
+
id=new_id,
|
|
2071
|
+
name=f"{rule.name} (imported)",
|
|
2072
|
+
rule_config=rule.rule_config,
|
|
2073
|
+
actions=rule.actions,
|
|
2074
|
+
priority=rule.priority,
|
|
2075
|
+
is_active=rule.is_active,
|
|
2076
|
+
stop_on_match=rule.stop_on_match,
|
|
2077
|
+
routing_metadata=rule.metadata,
|
|
2078
|
+
)
|
|
2079
|
+
session.add(new_rule)
|
|
2080
|
+
created_count += 1
|
|
2081
|
+
created_ids["routing_rules"].append(new_id)
|
|
2082
|
+
else:
|
|
2083
|
+
new_rule = RoutingRuleModel(
|
|
2084
|
+
id=rule.id,
|
|
2085
|
+
name=rule.name,
|
|
2086
|
+
rule_config=rule.rule_config,
|
|
2087
|
+
actions=rule.actions,
|
|
2088
|
+
priority=rule.priority,
|
|
2089
|
+
is_active=rule.is_active,
|
|
2090
|
+
stop_on_match=rule.stop_on_match,
|
|
2091
|
+
routing_metadata=rule.metadata,
|
|
2092
|
+
)
|
|
2093
|
+
session.add(new_rule)
|
|
2094
|
+
created_count += 1
|
|
2095
|
+
created_ids["routing_rules"].append(rule.id)
|
|
2096
|
+
except Exception as e:
|
|
2097
|
+
errors.append(f"Failed to import routing rule '{rule.name}': {str(e)}")
|
|
2098
|
+
|
|
2099
|
+
# Import deduplication configs
|
|
2100
|
+
for config in bundle.deduplication_configs:
|
|
2101
|
+
try:
|
|
2102
|
+
result = await session.execute(
|
|
2103
|
+
select(DeduplicationConfig).where(DeduplicationConfig.id == config.id)
|
|
2104
|
+
)
|
|
2105
|
+
existing = result.scalar_one_or_none()
|
|
2106
|
+
|
|
2107
|
+
if existing:
|
|
2108
|
+
if conflict_resolution == "skip":
|
|
2109
|
+
skipped_count += 1
|
|
2110
|
+
continue
|
|
2111
|
+
elif conflict_resolution == "overwrite":
|
|
2112
|
+
existing.name = config.name
|
|
2113
|
+
existing.strategy = config.strategy
|
|
2114
|
+
existing.policy = config.policy
|
|
2115
|
+
existing.window_seconds = config.window_seconds
|
|
2116
|
+
existing.is_active = config.is_active
|
|
2117
|
+
overwritten_count += 1
|
|
2118
|
+
created_ids["deduplication_configs"].append(config.id)
|
|
2119
|
+
else: # rename
|
|
2120
|
+
import uuid
|
|
2121
|
+
new_id = str(uuid.uuid4())
|
|
2122
|
+
new_config = DeduplicationConfig(
|
|
2123
|
+
id=new_id,
|
|
2124
|
+
name=f"{config.name} (imported)",
|
|
2125
|
+
strategy=config.strategy,
|
|
2126
|
+
policy=config.policy,
|
|
2127
|
+
window_seconds=config.window_seconds,
|
|
2128
|
+
is_active=config.is_active,
|
|
2129
|
+
)
|
|
2130
|
+
session.add(new_config)
|
|
2131
|
+
created_count += 1
|
|
2132
|
+
created_ids["deduplication_configs"].append(new_id)
|
|
2133
|
+
else:
|
|
2134
|
+
new_config = DeduplicationConfig(
|
|
2135
|
+
id=config.id,
|
|
2136
|
+
name=config.name,
|
|
2137
|
+
strategy=config.strategy,
|
|
2138
|
+
policy=config.policy,
|
|
2139
|
+
window_seconds=config.window_seconds,
|
|
2140
|
+
is_active=config.is_active,
|
|
2141
|
+
)
|
|
2142
|
+
session.add(new_config)
|
|
2143
|
+
created_count += 1
|
|
2144
|
+
created_ids["deduplication_configs"].append(config.id)
|
|
2145
|
+
except Exception as e:
|
|
2146
|
+
errors.append(f"Failed to import deduplication config '{config.name}': {str(e)}")
|
|
2147
|
+
|
|
2148
|
+
# Import throttling configs
|
|
2149
|
+
for config in bundle.throttling_configs:
|
|
2150
|
+
try:
|
|
2151
|
+
result = await session.execute(
|
|
2152
|
+
select(ThrottlingConfig).where(ThrottlingConfig.id == config.id)
|
|
2153
|
+
)
|
|
2154
|
+
existing = result.scalar_one_or_none()
|
|
2155
|
+
|
|
2156
|
+
if existing:
|
|
2157
|
+
if conflict_resolution == "skip":
|
|
2158
|
+
skipped_count += 1
|
|
2159
|
+
continue
|
|
2160
|
+
elif conflict_resolution == "overwrite":
|
|
2161
|
+
existing.name = config.name
|
|
2162
|
+
existing.per_minute = config.per_minute
|
|
2163
|
+
existing.per_hour = config.per_hour
|
|
2164
|
+
existing.per_day = config.per_day
|
|
2165
|
+
existing.burst_allowance = config.burst_allowance
|
|
2166
|
+
existing.channel_id = config.channel_id
|
|
2167
|
+
existing.is_active = config.is_active
|
|
2168
|
+
overwritten_count += 1
|
|
2169
|
+
created_ids["throttling_configs"].append(config.id)
|
|
2170
|
+
else: # rename
|
|
2171
|
+
import uuid
|
|
2172
|
+
new_id = str(uuid.uuid4())
|
|
2173
|
+
new_config = ThrottlingConfig(
|
|
2174
|
+
id=new_id,
|
|
2175
|
+
name=f"{config.name} (imported)",
|
|
2176
|
+
per_minute=config.per_minute,
|
|
2177
|
+
per_hour=config.per_hour,
|
|
2178
|
+
per_day=config.per_day,
|
|
2179
|
+
burst_allowance=config.burst_allowance,
|
|
2180
|
+
channel_id=config.channel_id,
|
|
2181
|
+
is_active=config.is_active,
|
|
2182
|
+
)
|
|
2183
|
+
session.add(new_config)
|
|
2184
|
+
created_count += 1
|
|
2185
|
+
created_ids["throttling_configs"].append(new_id)
|
|
2186
|
+
else:
|
|
2187
|
+
new_config = ThrottlingConfig(
|
|
2188
|
+
id=config.id,
|
|
2189
|
+
name=config.name,
|
|
2190
|
+
per_minute=config.per_minute,
|
|
2191
|
+
per_hour=config.per_hour,
|
|
2192
|
+
per_day=config.per_day,
|
|
2193
|
+
burst_allowance=config.burst_allowance,
|
|
2194
|
+
channel_id=config.channel_id,
|
|
2195
|
+
is_active=config.is_active,
|
|
2196
|
+
)
|
|
2197
|
+
session.add(new_config)
|
|
2198
|
+
created_count += 1
|
|
2199
|
+
created_ids["throttling_configs"].append(config.id)
|
|
2200
|
+
except Exception as e:
|
|
2201
|
+
errors.append(f"Failed to import throttling config '{config.name}': {str(e)}")
|
|
2202
|
+
|
|
2203
|
+
# Import escalation policies
|
|
2204
|
+
for policy in bundle.escalation_policies:
|
|
2205
|
+
try:
|
|
2206
|
+
result = await session.execute(
|
|
2207
|
+
select(EscalationPolicyModel).where(EscalationPolicyModel.id == policy.id)
|
|
2208
|
+
)
|
|
2209
|
+
existing = result.scalar_one_or_none()
|
|
2210
|
+
|
|
2211
|
+
if existing:
|
|
2212
|
+
if conflict_resolution == "skip":
|
|
2213
|
+
skipped_count += 1
|
|
2214
|
+
continue
|
|
2215
|
+
elif conflict_resolution == "overwrite":
|
|
2216
|
+
existing.name = policy.name
|
|
2217
|
+
existing.description = policy.description
|
|
2218
|
+
existing.levels = policy.levels
|
|
2219
|
+
existing.auto_resolve_on_success = policy.auto_resolve_on_success
|
|
2220
|
+
existing.max_escalations = policy.max_escalations
|
|
2221
|
+
existing.is_active = policy.is_active
|
|
2222
|
+
overwritten_count += 1
|
|
2223
|
+
created_ids["escalation_policies"].append(policy.id)
|
|
2224
|
+
else: # rename
|
|
2225
|
+
import uuid
|
|
2226
|
+
new_id = str(uuid.uuid4())
|
|
2227
|
+
new_policy = EscalationPolicyModel(
|
|
2228
|
+
id=new_id,
|
|
2229
|
+
name=f"{policy.name} (imported)",
|
|
2230
|
+
description=policy.description,
|
|
2231
|
+
levels=policy.levels,
|
|
2232
|
+
auto_resolve_on_success=policy.auto_resolve_on_success,
|
|
2233
|
+
max_escalations=policy.max_escalations,
|
|
2234
|
+
is_active=policy.is_active,
|
|
2235
|
+
)
|
|
2236
|
+
session.add(new_policy)
|
|
2237
|
+
created_count += 1
|
|
2238
|
+
created_ids["escalation_policies"].append(new_id)
|
|
2239
|
+
else:
|
|
2240
|
+
new_policy = EscalationPolicyModel(
|
|
2241
|
+
id=policy.id,
|
|
2242
|
+
name=policy.name,
|
|
2243
|
+
description=policy.description,
|
|
2244
|
+
levels=policy.levels,
|
|
2245
|
+
auto_resolve_on_success=policy.auto_resolve_on_success,
|
|
2246
|
+
max_escalations=policy.max_escalations,
|
|
2247
|
+
is_active=policy.is_active,
|
|
2248
|
+
)
|
|
2249
|
+
session.add(new_policy)
|
|
2250
|
+
created_count += 1
|
|
2251
|
+
created_ids["escalation_policies"].append(policy.id)
|
|
2252
|
+
except Exception as e:
|
|
2253
|
+
errors.append(f"Failed to import escalation policy '{policy.name}': {str(e)}")
|
|
2254
|
+
|
|
2255
|
+
await session.commit()
|
|
2256
|
+
|
|
2257
|
+
logger.info(
|
|
2258
|
+
f"Imported notification config: {created_count} created, "
|
|
2259
|
+
f"{skipped_count} skipped, {overwritten_count} overwritten, "
|
|
2260
|
+
f"{len(errors)} errors"
|
|
2261
|
+
)
|
|
2262
|
+
|
|
2263
|
+
return ConfigImportResult(
|
|
2264
|
+
success=len(errors) == 0,
|
|
2265
|
+
message=f"Import completed: {created_count} created, {skipped_count} skipped, {overwritten_count} overwritten",
|
|
2266
|
+
created_count=created_count,
|
|
2267
|
+
skipped_count=skipped_count,
|
|
2268
|
+
overwritten_count=overwritten_count,
|
|
2269
|
+
errors=errors,
|
|
2270
|
+
created_ids=created_ids,
|
|
2271
|
+
)
|
|
2272
|
+
|
|
2273
|
+
except Exception as e:
|
|
2274
|
+
await session.rollback()
|
|
2275
|
+
logger.error(f"Failed to import notification config: {e}")
|
|
2276
|
+
return ConfigImportResult(
|
|
2277
|
+
success=False,
|
|
2278
|
+
message=f"Import failed: {str(e)}",
|
|
2279
|
+
created_count=0,
|
|
2280
|
+
skipped_count=0,
|
|
2281
|
+
overwritten_count=0,
|
|
2282
|
+
errors=[str(e)],
|
|
2283
|
+
created_ids={},
|
|
2284
|
+
)
|
|
2285
|
+
|
|
2286
|
+
|
|
2287
|
+
# =============================================================================
|
|
2288
|
+
# Stats Cache Management Endpoints
|
|
2289
|
+
# =============================================================================
|
|
2290
|
+
|
|
2291
|
+
|
|
2292
|
+
@router.get("/stats/cache", response_model=StatsCacheStatus)
|
|
2293
|
+
async def get_stats_cache_status() -> StatsCacheStatus:
|
|
2294
|
+
"""Get stats cache status.
|
|
2295
|
+
|
|
2296
|
+
Returns cache hit rate, entry count, and configuration.
|
|
2297
|
+
"""
|
|
2298
|
+
cache = get_stats_cache()
|
|
2299
|
+
stats = await cache.get_stats()
|
|
2300
|
+
|
|
2301
|
+
return StatsCacheStatus(
|
|
2302
|
+
total_entries=stats["total_entries"],
|
|
2303
|
+
valid_entries=stats["valid_entries"],
|
|
2304
|
+
expired_entries=stats["expired_entries"],
|
|
2305
|
+
max_entries=stats["max_entries"],
|
|
2306
|
+
default_ttl_seconds=stats["default_ttl_seconds"],
|
|
2307
|
+
total_hits=stats["total_hits"],
|
|
2308
|
+
total_misses=stats["total_misses"],
|
|
2309
|
+
hit_rate=stats["hit_rate"],
|
|
2310
|
+
)
|
|
2311
|
+
|
|
2312
|
+
|
|
2313
|
+
@router.post("/stats/cache/invalidate")
|
|
2314
|
+
async def invalidate_stats_cache(
|
|
2315
|
+
target: str = Query(
|
|
2316
|
+
default="all",
|
|
2317
|
+
description="Cache target to invalidate: all, escalation, deduplication, throttling",
|
|
2318
|
+
),
|
|
2319
|
+
session: AsyncSession = Depends(get_session),
|
|
2320
|
+
) -> dict[str, Any]:
|
|
2321
|
+
"""Invalidate stats cache entries.
|
|
2322
|
+
|
|
2323
|
+
Use this endpoint to force fresh database queries on the next stats request.
|
|
2324
|
+
|
|
2325
|
+
Args:
|
|
2326
|
+
target: Which cache entries to invalidate
|
|
2327
|
+
- all: Invalidate all stats cache entries
|
|
2328
|
+
- escalation: Invalidate only escalation stats
|
|
2329
|
+
- deduplication: Invalidate only deduplication stats
|
|
2330
|
+
- throttling: Invalidate only throttling stats
|
|
2331
|
+
"""
|
|
2332
|
+
aggregator = StatsAggregator(session)
|
|
2333
|
+
|
|
2334
|
+
if target == "all":
|
|
2335
|
+
await aggregator.invalidate_all_cache()
|
|
2336
|
+
message = "All stats cache entries invalidated"
|
|
2337
|
+
elif target == "escalation":
|
|
2338
|
+
count = await aggregator.invalidate_escalation_cache()
|
|
2339
|
+
message = f"{count} escalation cache entries invalidated"
|
|
2340
|
+
elif target == "deduplication":
|
|
2341
|
+
count = await aggregator.invalidate_deduplication_cache()
|
|
2342
|
+
message = f"{count} deduplication cache entries invalidated"
|
|
2343
|
+
elif target == "throttling":
|
|
2344
|
+
count = await aggregator.invalidate_throttling_cache()
|
|
2345
|
+
message = f"{count} throttling cache entries invalidated"
|
|
2346
|
+
else:
|
|
2347
|
+
raise HTTPException(
|
|
2348
|
+
status_code=400,
|
|
2349
|
+
detail=f"Invalid target: {target}. Must be one of: all, escalation, deduplication, throttling",
|
|
2350
|
+
)
|
|
2351
|
+
|
|
2352
|
+
return {
|
|
2353
|
+
"success": True,
|
|
2354
|
+
"message": message,
|
|
2355
|
+
"target": target,
|
|
2356
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
2357
|
+
}
|
|
2358
|
+
|
|
2359
|
+
|
|
2360
|
+
# =============================================================================
|
|
2361
|
+
# Jinja2 Template Validation Endpoints
|
|
2362
|
+
# =============================================================================
|
|
2363
|
+
|
|
2364
|
+
|
|
2365
|
+
@router.post("/routing/rules/validate-jinja2")
|
|
2366
|
+
async def validate_jinja2_template(
|
|
2367
|
+
request: dict[str, Any],
|
|
2368
|
+
) -> dict[str, Any]:
|
|
2369
|
+
"""Validate a Jinja2 template for use in routing rules.
|
|
2370
|
+
|
|
2371
|
+
This endpoint validates Jinja2 template syntax and optionally
|
|
2372
|
+
renders the template with provided sample data.
|
|
2373
|
+
|
|
2374
|
+
Args:
|
|
2375
|
+
request: Dictionary containing:
|
|
2376
|
+
- template: The Jinja2 template string to validate
|
|
2377
|
+
- sample_data: Optional sample event data for rendering preview
|
|
2378
|
+
- expected_result: Optional expected result ("true" or "false")
|
|
2379
|
+
|
|
2380
|
+
Returns:
|
|
2381
|
+
Dictionary containing:
|
|
2382
|
+
- valid: Whether the template is syntactically valid
|
|
2383
|
+
- rendered_output: The rendered output if sample_data provided
|
|
2384
|
+
- error: Error message if validation failed
|
|
2385
|
+
- error_line: Line number where error occurred (if applicable)
|
|
2386
|
+
"""
|
|
2387
|
+
from ..core.notifications.routing.jinja2_engine import Jinja2Engine
|
|
2388
|
+
|
|
2389
|
+
template = request.get("template", "")
|
|
2390
|
+
sample_data = request.get("sample_data", {})
|
|
2391
|
+
expected_result = request.get("expected_result", "true")
|
|
2392
|
+
|
|
2393
|
+
if not template:
|
|
2394
|
+
return {
|
|
2395
|
+
"valid": False,
|
|
2396
|
+
"error": "Template cannot be empty",
|
|
2397
|
+
"rendered_output": None,
|
|
2398
|
+
"error_line": None,
|
|
2399
|
+
}
|
|
2400
|
+
|
|
2401
|
+
try:
|
|
2402
|
+
# Create Jinja2 engine instance
|
|
2403
|
+
engine = Jinja2Engine()
|
|
2404
|
+
|
|
2405
|
+
# Validate template syntax by compiling it
|
|
2406
|
+
compiled = engine.compile_template(template)
|
|
2407
|
+
|
|
2408
|
+
result: dict[str, Any] = {
|
|
2409
|
+
"valid": True,
|
|
2410
|
+
"error": None,
|
|
2411
|
+
"error_line": None,
|
|
2412
|
+
"rendered_output": None,
|
|
2413
|
+
}
|
|
2414
|
+
|
|
2415
|
+
# If sample data provided, render the template
|
|
2416
|
+
if sample_data:
|
|
2417
|
+
try:
|
|
2418
|
+
rendered = engine.render(template, sample_data)
|
|
2419
|
+
result["rendered_output"] = str(rendered)
|
|
2420
|
+
|
|
2421
|
+
# Check if output matches expected result
|
|
2422
|
+
if expected_result:
|
|
2423
|
+
rendered_lower = str(rendered).lower().strip()
|
|
2424
|
+
expected_lower = expected_result.lower().strip()
|
|
2425
|
+
result["matches_expected"] = rendered_lower == expected_lower
|
|
2426
|
+
|
|
2427
|
+
except Exception as render_error:
|
|
2428
|
+
# Template is valid but rendering failed with given data
|
|
2429
|
+
result["rendered_output"] = None
|
|
2430
|
+
result["render_error"] = str(render_error)
|
|
2431
|
+
|
|
2432
|
+
return result
|
|
2433
|
+
|
|
2434
|
+
except Exception as e:
|
|
2435
|
+
error_msg = str(e)
|
|
2436
|
+
error_line = None
|
|
2437
|
+
|
|
2438
|
+
# Try to extract line number from Jinja2 error
|
|
2439
|
+
if "line" in error_msg.lower():
|
|
2440
|
+
import re
|
|
2441
|
+
|
|
2442
|
+
line_match = re.search(r"line\s+(\d+)", error_msg, re.IGNORECASE)
|
|
2443
|
+
if line_match:
|
|
2444
|
+
error_line = int(line_match.group(1))
|
|
2445
|
+
|
|
2446
|
+
return {
|
|
2447
|
+
"valid": False,
|
|
2448
|
+
"error": error_msg,
|
|
2449
|
+
"error_line": error_line,
|
|
2450
|
+
"rendered_output": None,
|
|
2451
|
+
}
|
|
2452
|
+
|