mcp-hangar 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcp_hangar/__init__.py +139 -0
- mcp_hangar/application/__init__.py +1 -0
- mcp_hangar/application/commands/__init__.py +67 -0
- mcp_hangar/application/commands/auth_commands.py +118 -0
- mcp_hangar/application/commands/auth_handlers.py +296 -0
- mcp_hangar/application/commands/commands.py +59 -0
- mcp_hangar/application/commands/handlers.py +189 -0
- mcp_hangar/application/discovery/__init__.py +21 -0
- mcp_hangar/application/discovery/discovery_metrics.py +283 -0
- mcp_hangar/application/discovery/discovery_orchestrator.py +497 -0
- mcp_hangar/application/discovery/lifecycle_manager.py +315 -0
- mcp_hangar/application/discovery/security_validator.py +414 -0
- mcp_hangar/application/event_handlers/__init__.py +50 -0
- mcp_hangar/application/event_handlers/alert_handler.py +191 -0
- mcp_hangar/application/event_handlers/audit_handler.py +203 -0
- mcp_hangar/application/event_handlers/knowledge_base_handler.py +120 -0
- mcp_hangar/application/event_handlers/logging_handler.py +69 -0
- mcp_hangar/application/event_handlers/metrics_handler.py +152 -0
- mcp_hangar/application/event_handlers/persistent_audit_store.py +217 -0
- mcp_hangar/application/event_handlers/security_handler.py +604 -0
- mcp_hangar/application/mcp/tooling.py +158 -0
- mcp_hangar/application/ports/__init__.py +9 -0
- mcp_hangar/application/ports/observability.py +237 -0
- mcp_hangar/application/queries/__init__.py +52 -0
- mcp_hangar/application/queries/auth_handlers.py +237 -0
- mcp_hangar/application/queries/auth_queries.py +118 -0
- mcp_hangar/application/queries/handlers.py +227 -0
- mcp_hangar/application/read_models/__init__.py +11 -0
- mcp_hangar/application/read_models/provider_views.py +139 -0
- mcp_hangar/application/sagas/__init__.py +11 -0
- mcp_hangar/application/sagas/group_rebalance_saga.py +137 -0
- mcp_hangar/application/sagas/provider_failover_saga.py +266 -0
- mcp_hangar/application/sagas/provider_recovery_saga.py +172 -0
- mcp_hangar/application/services/__init__.py +9 -0
- mcp_hangar/application/services/provider_service.py +208 -0
- mcp_hangar/application/services/traced_provider_service.py +211 -0
- mcp_hangar/bootstrap/runtime.py +328 -0
- mcp_hangar/context.py +178 -0
- mcp_hangar/domain/__init__.py +117 -0
- mcp_hangar/domain/contracts/__init__.py +57 -0
- mcp_hangar/domain/contracts/authentication.py +225 -0
- mcp_hangar/domain/contracts/authorization.py +229 -0
- mcp_hangar/domain/contracts/event_store.py +178 -0
- mcp_hangar/domain/contracts/metrics_publisher.py +59 -0
- mcp_hangar/domain/contracts/persistence.py +383 -0
- mcp_hangar/domain/contracts/provider_runtime.py +146 -0
- mcp_hangar/domain/discovery/__init__.py +20 -0
- mcp_hangar/domain/discovery/conflict_resolver.py +267 -0
- mcp_hangar/domain/discovery/discovered_provider.py +185 -0
- mcp_hangar/domain/discovery/discovery_service.py +412 -0
- mcp_hangar/domain/discovery/discovery_source.py +192 -0
- mcp_hangar/domain/events.py +433 -0
- mcp_hangar/domain/exceptions.py +525 -0
- mcp_hangar/domain/model/__init__.py +70 -0
- mcp_hangar/domain/model/aggregate.py +58 -0
- mcp_hangar/domain/model/circuit_breaker.py +152 -0
- mcp_hangar/domain/model/event_sourced_api_key.py +413 -0
- mcp_hangar/domain/model/event_sourced_provider.py +423 -0
- mcp_hangar/domain/model/event_sourced_role_assignment.py +268 -0
- mcp_hangar/domain/model/health_tracker.py +183 -0
- mcp_hangar/domain/model/load_balancer.py +185 -0
- mcp_hangar/domain/model/provider.py +810 -0
- mcp_hangar/domain/model/provider_group.py +656 -0
- mcp_hangar/domain/model/tool_catalog.py +105 -0
- mcp_hangar/domain/policies/__init__.py +19 -0
- mcp_hangar/domain/policies/provider_health.py +187 -0
- mcp_hangar/domain/repository.py +249 -0
- mcp_hangar/domain/security/__init__.py +85 -0
- mcp_hangar/domain/security/input_validator.py +710 -0
- mcp_hangar/domain/security/rate_limiter.py +387 -0
- mcp_hangar/domain/security/roles.py +237 -0
- mcp_hangar/domain/security/sanitizer.py +387 -0
- mcp_hangar/domain/security/secrets.py +501 -0
- mcp_hangar/domain/services/__init__.py +20 -0
- mcp_hangar/domain/services/audit_service.py +376 -0
- mcp_hangar/domain/services/image_builder.py +328 -0
- mcp_hangar/domain/services/provider_launcher.py +1046 -0
- mcp_hangar/domain/value_objects.py +1138 -0
- mcp_hangar/errors.py +818 -0
- mcp_hangar/fastmcp_server.py +1105 -0
- mcp_hangar/gc.py +134 -0
- mcp_hangar/infrastructure/__init__.py +79 -0
- mcp_hangar/infrastructure/async_executor.py +133 -0
- mcp_hangar/infrastructure/auth/__init__.py +37 -0
- mcp_hangar/infrastructure/auth/api_key_authenticator.py +388 -0
- mcp_hangar/infrastructure/auth/event_sourced_store.py +567 -0
- mcp_hangar/infrastructure/auth/jwt_authenticator.py +360 -0
- mcp_hangar/infrastructure/auth/middleware.py +340 -0
- mcp_hangar/infrastructure/auth/opa_authorizer.py +243 -0
- mcp_hangar/infrastructure/auth/postgres_store.py +659 -0
- mcp_hangar/infrastructure/auth/projections.py +366 -0
- mcp_hangar/infrastructure/auth/rate_limiter.py +311 -0
- mcp_hangar/infrastructure/auth/rbac_authorizer.py +323 -0
- mcp_hangar/infrastructure/auth/sqlite_store.py +624 -0
- mcp_hangar/infrastructure/command_bus.py +112 -0
- mcp_hangar/infrastructure/discovery/__init__.py +110 -0
- mcp_hangar/infrastructure/discovery/docker_source.py +289 -0
- mcp_hangar/infrastructure/discovery/entrypoint_source.py +249 -0
- mcp_hangar/infrastructure/discovery/filesystem_source.py +383 -0
- mcp_hangar/infrastructure/discovery/kubernetes_source.py +247 -0
- mcp_hangar/infrastructure/event_bus.py +260 -0
- mcp_hangar/infrastructure/event_sourced_repository.py +443 -0
- mcp_hangar/infrastructure/event_store.py +396 -0
- mcp_hangar/infrastructure/knowledge_base/__init__.py +259 -0
- mcp_hangar/infrastructure/knowledge_base/contracts.py +202 -0
- mcp_hangar/infrastructure/knowledge_base/memory.py +177 -0
- mcp_hangar/infrastructure/knowledge_base/postgres.py +545 -0
- mcp_hangar/infrastructure/knowledge_base/sqlite.py +513 -0
- mcp_hangar/infrastructure/metrics_publisher.py +36 -0
- mcp_hangar/infrastructure/observability/__init__.py +10 -0
- mcp_hangar/infrastructure/observability/langfuse_adapter.py +534 -0
- mcp_hangar/infrastructure/persistence/__init__.py +33 -0
- mcp_hangar/infrastructure/persistence/audit_repository.py +371 -0
- mcp_hangar/infrastructure/persistence/config_repository.py +398 -0
- mcp_hangar/infrastructure/persistence/database.py +333 -0
- mcp_hangar/infrastructure/persistence/database_common.py +330 -0
- mcp_hangar/infrastructure/persistence/event_serializer.py +280 -0
- mcp_hangar/infrastructure/persistence/event_upcaster.py +166 -0
- mcp_hangar/infrastructure/persistence/in_memory_event_store.py +150 -0
- mcp_hangar/infrastructure/persistence/recovery_service.py +312 -0
- mcp_hangar/infrastructure/persistence/sqlite_event_store.py +386 -0
- mcp_hangar/infrastructure/persistence/unit_of_work.py +409 -0
- mcp_hangar/infrastructure/persistence/upcasters/README.md +13 -0
- mcp_hangar/infrastructure/persistence/upcasters/__init__.py +7 -0
- mcp_hangar/infrastructure/query_bus.py +153 -0
- mcp_hangar/infrastructure/saga_manager.py +401 -0
- mcp_hangar/logging_config.py +209 -0
- mcp_hangar/metrics.py +1007 -0
- mcp_hangar/models.py +31 -0
- mcp_hangar/observability/__init__.py +54 -0
- mcp_hangar/observability/health.py +487 -0
- mcp_hangar/observability/metrics.py +319 -0
- mcp_hangar/observability/tracing.py +433 -0
- mcp_hangar/progress.py +542 -0
- mcp_hangar/retry.py +613 -0
- mcp_hangar/server/__init__.py +120 -0
- mcp_hangar/server/__main__.py +6 -0
- mcp_hangar/server/auth_bootstrap.py +340 -0
- mcp_hangar/server/auth_cli.py +335 -0
- mcp_hangar/server/auth_config.py +305 -0
- mcp_hangar/server/bootstrap.py +735 -0
- mcp_hangar/server/cli.py +161 -0
- mcp_hangar/server/config.py +224 -0
- mcp_hangar/server/context.py +215 -0
- mcp_hangar/server/http_auth_middleware.py +165 -0
- mcp_hangar/server/lifecycle.py +467 -0
- mcp_hangar/server/state.py +117 -0
- mcp_hangar/server/tools/__init__.py +16 -0
- mcp_hangar/server/tools/discovery.py +186 -0
- mcp_hangar/server/tools/groups.py +75 -0
- mcp_hangar/server/tools/health.py +301 -0
- mcp_hangar/server/tools/provider.py +939 -0
- mcp_hangar/server/tools/registry.py +320 -0
- mcp_hangar/server/validation.py +113 -0
- mcp_hangar/stdio_client.py +229 -0
- mcp_hangar-0.2.0.dist-info/METADATA +347 -0
- mcp_hangar-0.2.0.dist-info/RECORD +160 -0
- mcp_hangar-0.2.0.dist-info/WHEEL +4 -0
- mcp_hangar-0.2.0.dist-info/entry_points.txt +2 -0
- mcp_hangar-0.2.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,423 @@
|
|
|
1
|
+
"""Event Sourced Provider aggregate - provider that rebuilds state from events."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
import threading
|
|
5
|
+
from typing import Any, Dict, List, Optional
|
|
6
|
+
|
|
7
|
+
from ...logging_config import get_logger
|
|
8
|
+
from ..events import (
|
|
9
|
+
DomainEvent,
|
|
10
|
+
HealthCheckFailed,
|
|
11
|
+
HealthCheckPassed,
|
|
12
|
+
ProviderDegraded,
|
|
13
|
+
ProviderIdleDetected,
|
|
14
|
+
ProviderStarted,
|
|
15
|
+
ProviderStateChanged,
|
|
16
|
+
ProviderStopped,
|
|
17
|
+
ToolInvocationCompleted,
|
|
18
|
+
ToolInvocationFailed,
|
|
19
|
+
ToolInvocationRequested,
|
|
20
|
+
)
|
|
21
|
+
from ..value_objects import ProviderId
|
|
22
|
+
from .health_tracker import HealthTracker
|
|
23
|
+
from .provider import Provider, ProviderState
|
|
24
|
+
from .tool_catalog import ToolCatalog
|
|
25
|
+
|
|
26
|
+
logger = get_logger(__name__)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass
|
|
30
|
+
class ProviderSnapshot:
|
|
31
|
+
"""Snapshot of provider state for faster loading."""
|
|
32
|
+
|
|
33
|
+
provider_id: str
|
|
34
|
+
mode: str
|
|
35
|
+
state: str
|
|
36
|
+
version: int
|
|
37
|
+
command: Optional[List[str]]
|
|
38
|
+
image: Optional[str]
|
|
39
|
+
endpoint: Optional[str]
|
|
40
|
+
env: Dict[str, str]
|
|
41
|
+
idle_ttl_s: int
|
|
42
|
+
health_check_interval_s: int
|
|
43
|
+
max_consecutive_failures: int
|
|
44
|
+
consecutive_failures: int
|
|
45
|
+
total_failures: int
|
|
46
|
+
total_invocations: int
|
|
47
|
+
last_success_at: Optional[float]
|
|
48
|
+
last_failure_at: Optional[float]
|
|
49
|
+
tool_names: List[str]
|
|
50
|
+
last_used: float
|
|
51
|
+
meta: Dict[str, Any]
|
|
52
|
+
|
|
53
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
54
|
+
"""Convert to dictionary for serialization."""
|
|
55
|
+
return {
|
|
56
|
+
"provider_id": self.provider_id,
|
|
57
|
+
"mode": self.mode,
|
|
58
|
+
"state": self.state,
|
|
59
|
+
"version": self.version,
|
|
60
|
+
"command": self.command,
|
|
61
|
+
"image": self.image,
|
|
62
|
+
"endpoint": self.endpoint,
|
|
63
|
+
"env": self.env,
|
|
64
|
+
"idle_ttl_s": self.idle_ttl_s,
|
|
65
|
+
"health_check_interval_s": self.health_check_interval_s,
|
|
66
|
+
"max_consecutive_failures": self.max_consecutive_failures,
|
|
67
|
+
"consecutive_failures": self.consecutive_failures,
|
|
68
|
+
"total_failures": self.total_failures,
|
|
69
|
+
"total_invocations": self.total_invocations,
|
|
70
|
+
"last_success_at": self.last_success_at,
|
|
71
|
+
"last_failure_at": self.last_failure_at,
|
|
72
|
+
"tool_names": self.tool_names,
|
|
73
|
+
"last_used": self.last_used,
|
|
74
|
+
"meta": self.meta,
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
@classmethod
|
|
78
|
+
def from_dict(cls, d: Dict[str, Any]) -> "ProviderSnapshot":
|
|
79
|
+
"""Create from dictionary."""
|
|
80
|
+
return cls(
|
|
81
|
+
provider_id=d["provider_id"],
|
|
82
|
+
mode=d["mode"],
|
|
83
|
+
state=d["state"],
|
|
84
|
+
version=d["version"],
|
|
85
|
+
command=d.get("command"),
|
|
86
|
+
image=d.get("image"),
|
|
87
|
+
endpoint=d.get("endpoint"),
|
|
88
|
+
env=d.get("env", {}),
|
|
89
|
+
idle_ttl_s=d.get("idle_ttl_s", 300),
|
|
90
|
+
health_check_interval_s=d.get("health_check_interval_s", 60),
|
|
91
|
+
max_consecutive_failures=d.get("max_consecutive_failures", 3),
|
|
92
|
+
consecutive_failures=d.get("consecutive_failures", 0),
|
|
93
|
+
total_failures=d.get("total_failures", 0),
|
|
94
|
+
total_invocations=d.get("total_invocations", 0),
|
|
95
|
+
last_success_at=d.get("last_success_at"),
|
|
96
|
+
last_failure_at=d.get("last_failure_at"),
|
|
97
|
+
tool_names=d.get("tool_names", []),
|
|
98
|
+
last_used=d.get("last_used", 0.0),
|
|
99
|
+
meta=d.get("meta", {}),
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
class EventSourcedProvider(Provider):
|
|
104
|
+
"""
|
|
105
|
+
Provider that rebuilds its state from domain events.
|
|
106
|
+
|
|
107
|
+
Supports:
|
|
108
|
+
- Loading from event stream
|
|
109
|
+
- Creating snapshots for performance
|
|
110
|
+
- Loading from snapshot + subsequent events
|
|
111
|
+
- Time-travel debugging
|
|
112
|
+
"""
|
|
113
|
+
|
|
114
|
+
def __init__(
|
|
115
|
+
self,
|
|
116
|
+
provider_id: str,
|
|
117
|
+
mode: str,
|
|
118
|
+
command: Optional[List[str]] = None,
|
|
119
|
+
image: Optional[str] = None,
|
|
120
|
+
endpoint: Optional[str] = None,
|
|
121
|
+
env: Optional[Dict[str, str]] = None,
|
|
122
|
+
idle_ttl_s: int = 300,
|
|
123
|
+
health_check_interval_s: int = 60,
|
|
124
|
+
max_consecutive_failures: int = 3,
|
|
125
|
+
):
|
|
126
|
+
# Don't call super().__init__ to avoid recording ProviderStateChanged
|
|
127
|
+
# Instead, manually initialize fields
|
|
128
|
+
from .aggregate import AggregateRoot
|
|
129
|
+
|
|
130
|
+
AggregateRoot.__init__(self)
|
|
131
|
+
|
|
132
|
+
# Identity
|
|
133
|
+
self._id = ProviderId(provider_id)
|
|
134
|
+
self._mode = mode
|
|
135
|
+
|
|
136
|
+
# Configuration
|
|
137
|
+
self._command = command
|
|
138
|
+
self._image = image
|
|
139
|
+
self._endpoint = endpoint
|
|
140
|
+
self._env = env or {}
|
|
141
|
+
self._idle_ttl_s = idle_ttl_s
|
|
142
|
+
self._health_check_interval_s = health_check_interval_s
|
|
143
|
+
|
|
144
|
+
# State - start in COLD
|
|
145
|
+
self._state = ProviderState.COLD
|
|
146
|
+
self._health = HealthTracker(max_consecutive_failures=max_consecutive_failures)
|
|
147
|
+
self._tools = ToolCatalog()
|
|
148
|
+
self._client: Optional[Any] = None
|
|
149
|
+
self._meta: Dict[str, Any] = {}
|
|
150
|
+
self._last_used: float = 0.0
|
|
151
|
+
|
|
152
|
+
# Thread safety
|
|
153
|
+
self._lock = threading.RLock()
|
|
154
|
+
|
|
155
|
+
# Event sourcing specific
|
|
156
|
+
self._events_applied: int = 0
|
|
157
|
+
|
|
158
|
+
@classmethod
|
|
159
|
+
def from_events(
|
|
160
|
+
cls,
|
|
161
|
+
provider_id: str,
|
|
162
|
+
mode: str,
|
|
163
|
+
events: List[DomainEvent],
|
|
164
|
+
command: Optional[List[str]] = None,
|
|
165
|
+
image: Optional[str] = None,
|
|
166
|
+
endpoint: Optional[str] = None,
|
|
167
|
+
env: Optional[Dict[str, str]] = None,
|
|
168
|
+
idle_ttl_s: int = 300,
|
|
169
|
+
health_check_interval_s: int = 60,
|
|
170
|
+
max_consecutive_failures: int = 3,
|
|
171
|
+
) -> "EventSourcedProvider":
|
|
172
|
+
"""
|
|
173
|
+
Create a provider by replaying events.
|
|
174
|
+
|
|
175
|
+
Args:
|
|
176
|
+
provider_id: Provider identifier
|
|
177
|
+
mode: Provider mode
|
|
178
|
+
events: List of domain events to replay
|
|
179
|
+
command: Command for subprocess mode
|
|
180
|
+
image: Docker image for docker mode
|
|
181
|
+
endpoint: Endpoint for remote mode
|
|
182
|
+
env: Environment variables
|
|
183
|
+
idle_ttl_s: Idle TTL in seconds
|
|
184
|
+
health_check_interval_s: Health check interval
|
|
185
|
+
max_consecutive_failures: Max failures before degradation
|
|
186
|
+
|
|
187
|
+
Returns:
|
|
188
|
+
Provider with state rebuilt from events
|
|
189
|
+
"""
|
|
190
|
+
provider = cls(
|
|
191
|
+
provider_id=provider_id,
|
|
192
|
+
mode=mode,
|
|
193
|
+
command=command,
|
|
194
|
+
image=image,
|
|
195
|
+
endpoint=endpoint,
|
|
196
|
+
env=env,
|
|
197
|
+
idle_ttl_s=idle_ttl_s,
|
|
198
|
+
health_check_interval_s=health_check_interval_s,
|
|
199
|
+
max_consecutive_failures=max_consecutive_failures,
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
for event in events:
|
|
203
|
+
provider._apply_event(event)
|
|
204
|
+
|
|
205
|
+
return provider
|
|
206
|
+
|
|
207
|
+
@classmethod
|
|
208
|
+
def from_snapshot(
|
|
209
|
+
cls, snapshot: ProviderSnapshot, events: Optional[List[DomainEvent]] = None
|
|
210
|
+
) -> "EventSourcedProvider":
|
|
211
|
+
"""
|
|
212
|
+
Create a provider from snapshot and subsequent events.
|
|
213
|
+
|
|
214
|
+
Args:
|
|
215
|
+
snapshot: Provider state snapshot
|
|
216
|
+
events: Events that occurred after the snapshot
|
|
217
|
+
|
|
218
|
+
Returns:
|
|
219
|
+
Provider with state rebuilt from snapshot + events
|
|
220
|
+
"""
|
|
221
|
+
provider = cls(
|
|
222
|
+
provider_id=snapshot.provider_id,
|
|
223
|
+
mode=snapshot.mode,
|
|
224
|
+
command=snapshot.command,
|
|
225
|
+
image=snapshot.image,
|
|
226
|
+
endpoint=snapshot.endpoint,
|
|
227
|
+
env=snapshot.env,
|
|
228
|
+
idle_ttl_s=snapshot.idle_ttl_s,
|
|
229
|
+
health_check_interval_s=snapshot.health_check_interval_s,
|
|
230
|
+
max_consecutive_failures=snapshot.max_consecutive_failures,
|
|
231
|
+
)
|
|
232
|
+
|
|
233
|
+
# Restore state from snapshot
|
|
234
|
+
provider._state = ProviderState(snapshot.state)
|
|
235
|
+
provider._version = snapshot.version
|
|
236
|
+
|
|
237
|
+
# Restore health tracker state
|
|
238
|
+
provider._health._consecutive_failures = snapshot.consecutive_failures
|
|
239
|
+
provider._health._total_failures = snapshot.total_failures
|
|
240
|
+
provider._health._total_invocations = snapshot.total_invocations
|
|
241
|
+
provider._health._last_success_at = snapshot.last_success_at
|
|
242
|
+
provider._health._last_failure_at = snapshot.last_failure_at
|
|
243
|
+
|
|
244
|
+
# Restore tools (just names, no full schemas)
|
|
245
|
+
for tool_name in snapshot.tool_names:
|
|
246
|
+
provider._tools._tools[tool_name] = {"name": tool_name}
|
|
247
|
+
|
|
248
|
+
# Restore other state
|
|
249
|
+
provider._last_used = snapshot.last_used
|
|
250
|
+
provider._meta = dict(snapshot.meta)
|
|
251
|
+
provider._events_applied = snapshot.version
|
|
252
|
+
|
|
253
|
+
# Apply subsequent events
|
|
254
|
+
if events:
|
|
255
|
+
for event in events:
|
|
256
|
+
provider._apply_event(event)
|
|
257
|
+
|
|
258
|
+
return provider
|
|
259
|
+
|
|
260
|
+
def _apply_event(self, event: DomainEvent) -> None:
|
|
261
|
+
"""
|
|
262
|
+
Apply a single event to update state.
|
|
263
|
+
|
|
264
|
+
This is the core of event sourcing - each event type
|
|
265
|
+
has specific handlers that update the aggregate state.
|
|
266
|
+
"""
|
|
267
|
+
self._events_applied += 1
|
|
268
|
+
self._increment_version()
|
|
269
|
+
|
|
270
|
+
if isinstance(event, ProviderStarted):
|
|
271
|
+
self._apply_provider_started(event)
|
|
272
|
+
elif isinstance(event, ProviderStopped):
|
|
273
|
+
self._apply_provider_stopped(event)
|
|
274
|
+
elif isinstance(event, ProviderDegraded):
|
|
275
|
+
self._apply_provider_degraded(event)
|
|
276
|
+
elif isinstance(event, ProviderStateChanged):
|
|
277
|
+
self._apply_state_changed(event)
|
|
278
|
+
elif isinstance(event, ToolInvocationRequested):
|
|
279
|
+
self._apply_tool_requested(event)
|
|
280
|
+
elif isinstance(event, ToolInvocationCompleted):
|
|
281
|
+
self._apply_tool_completed(event)
|
|
282
|
+
elif isinstance(event, ToolInvocationFailed):
|
|
283
|
+
self._apply_tool_failed(event)
|
|
284
|
+
elif isinstance(event, HealthCheckPassed):
|
|
285
|
+
self._apply_health_passed(event)
|
|
286
|
+
elif isinstance(event, HealthCheckFailed):
|
|
287
|
+
self._apply_health_failed(event)
|
|
288
|
+
elif isinstance(event, ProviderIdleDetected):
|
|
289
|
+
self._apply_idle_detected(event)
|
|
290
|
+
|
|
291
|
+
def _apply_provider_started(self, event: ProviderStarted) -> None:
|
|
292
|
+
"""Apply ProviderStarted event."""
|
|
293
|
+
self._state = ProviderState.READY
|
|
294
|
+
self._mode = event.mode
|
|
295
|
+
self._health._consecutive_failures = 0
|
|
296
|
+
self._last_used = event.occurred_at
|
|
297
|
+
self._meta["started_at"] = event.occurred_at
|
|
298
|
+
self._meta["tools_count"] = event.tools_count
|
|
299
|
+
|
|
300
|
+
def _apply_provider_stopped(self, event: ProviderStopped) -> None:
|
|
301
|
+
"""Apply ProviderStopped event."""
|
|
302
|
+
self._state = ProviderState.COLD
|
|
303
|
+
self._client = None
|
|
304
|
+
self._tools.clear()
|
|
305
|
+
|
|
306
|
+
def _apply_provider_degraded(self, event: ProviderDegraded) -> None:
|
|
307
|
+
"""Apply ProviderDegraded event."""
|
|
308
|
+
self._state = ProviderState.DEGRADED
|
|
309
|
+
self._health._consecutive_failures = event.consecutive_failures
|
|
310
|
+
self._health._total_failures = event.total_failures
|
|
311
|
+
|
|
312
|
+
def _apply_state_changed(self, event: ProviderStateChanged) -> None:
|
|
313
|
+
"""Apply ProviderStateChanged event."""
|
|
314
|
+
self._state = ProviderState(event.new_state)
|
|
315
|
+
|
|
316
|
+
def _apply_tool_requested(self, event: ToolInvocationRequested) -> None:
|
|
317
|
+
"""Apply ToolInvocationRequested event."""
|
|
318
|
+
self._health._total_invocations += 1
|
|
319
|
+
|
|
320
|
+
def _apply_tool_completed(self, event: ToolInvocationCompleted) -> None:
|
|
321
|
+
"""Apply ToolInvocationCompleted event."""
|
|
322
|
+
self._health._consecutive_failures = 0
|
|
323
|
+
self._health._last_success_at = event.occurred_at
|
|
324
|
+
self._last_used = event.occurred_at
|
|
325
|
+
|
|
326
|
+
def _apply_tool_failed(self, event: ToolInvocationFailed) -> None:
|
|
327
|
+
"""Apply ToolInvocationFailed event."""
|
|
328
|
+
self._health._consecutive_failures += 1
|
|
329
|
+
self._health._total_failures += 1
|
|
330
|
+
self._health._last_failure_at = event.occurred_at
|
|
331
|
+
|
|
332
|
+
def _apply_health_passed(self, event: HealthCheckPassed) -> None:
|
|
333
|
+
"""Apply HealthCheckPassed event."""
|
|
334
|
+
self._health._consecutive_failures = 0
|
|
335
|
+
self._health._last_success_at = event.occurred_at
|
|
336
|
+
|
|
337
|
+
def _apply_health_failed(self, event: HealthCheckFailed) -> None:
|
|
338
|
+
"""Apply HealthCheckFailed event."""
|
|
339
|
+
self._health._consecutive_failures = event.consecutive_failures
|
|
340
|
+
self._health._last_failure_at = event.occurred_at
|
|
341
|
+
|
|
342
|
+
def _apply_idle_detected(self, event: ProviderIdleDetected) -> None:
|
|
343
|
+
"""Apply ProviderIdleDetected event."""
|
|
344
|
+
# Just a marker event, no state change
|
|
345
|
+
pass
|
|
346
|
+
|
|
347
|
+
def create_snapshot(self) -> ProviderSnapshot:
|
|
348
|
+
"""
|
|
349
|
+
Create a snapshot of current state.
|
|
350
|
+
|
|
351
|
+
Returns:
|
|
352
|
+
ProviderSnapshot that can be serialized
|
|
353
|
+
"""
|
|
354
|
+
with self._lock:
|
|
355
|
+
return ProviderSnapshot(
|
|
356
|
+
provider_id=self.provider_id,
|
|
357
|
+
mode=self._mode,
|
|
358
|
+
state=self._state.value,
|
|
359
|
+
version=self._version,
|
|
360
|
+
command=self._command,
|
|
361
|
+
image=self._image,
|
|
362
|
+
endpoint=self._endpoint,
|
|
363
|
+
env=dict(self._env),
|
|
364
|
+
idle_ttl_s=self._idle_ttl_s,
|
|
365
|
+
health_check_interval_s=self._health_check_interval_s,
|
|
366
|
+
max_consecutive_failures=self._health.max_consecutive_failures,
|
|
367
|
+
consecutive_failures=self._health._consecutive_failures,
|
|
368
|
+
total_failures=self._health._total_failures,
|
|
369
|
+
total_invocations=self._health._total_invocations,
|
|
370
|
+
last_success_at=self._health._last_success_at,
|
|
371
|
+
last_failure_at=self._health._last_failure_at,
|
|
372
|
+
tool_names=self._tools.list_names(),
|
|
373
|
+
last_used=self._last_used,
|
|
374
|
+
meta=dict(self._meta),
|
|
375
|
+
)
|
|
376
|
+
|
|
377
|
+
@property
|
|
378
|
+
def events_applied(self) -> int:
|
|
379
|
+
"""Number of events applied to this aggregate."""
|
|
380
|
+
return self._events_applied
|
|
381
|
+
|
|
382
|
+
def replay_to_version(self, target_version: int, events: List[DomainEvent]) -> "EventSourcedProvider":
|
|
383
|
+
"""
|
|
384
|
+
Create a new provider at a specific version (time travel).
|
|
385
|
+
|
|
386
|
+
Args:
|
|
387
|
+
target_version: Target version to replay to
|
|
388
|
+
events: All events for this provider
|
|
389
|
+
|
|
390
|
+
Returns:
|
|
391
|
+
New provider instance at the target version
|
|
392
|
+
"""
|
|
393
|
+
provider = EventSourcedProvider(
|
|
394
|
+
provider_id=self.provider_id,
|
|
395
|
+
mode=self._mode,
|
|
396
|
+
command=self._command,
|
|
397
|
+
image=self._image,
|
|
398
|
+
endpoint=self._endpoint,
|
|
399
|
+
env=self._env,
|
|
400
|
+
idle_ttl_s=self._idle_ttl_s,
|
|
401
|
+
health_check_interval_s=self._health_check_interval_s,
|
|
402
|
+
max_consecutive_failures=self._health.max_consecutive_failures,
|
|
403
|
+
)
|
|
404
|
+
|
|
405
|
+
for i, event in enumerate(events):
|
|
406
|
+
if i >= target_version:
|
|
407
|
+
break
|
|
408
|
+
provider._apply_event(event)
|
|
409
|
+
|
|
410
|
+
return provider
|
|
411
|
+
|
|
412
|
+
def get_uncommitted_events(self) -> List[DomainEvent]:
|
|
413
|
+
"""
|
|
414
|
+
Get events recorded but not yet persisted.
|
|
415
|
+
|
|
416
|
+
Returns:
|
|
417
|
+
List of uncommitted domain events
|
|
418
|
+
"""
|
|
419
|
+
return list(self._uncommitted_events)
|
|
420
|
+
|
|
421
|
+
def mark_events_committed(self) -> None:
|
|
422
|
+
"""Clear uncommitted events after persistence."""
|
|
423
|
+
self._uncommitted_events.clear()
|
|
@@ -0,0 +1,268 @@
|
|
|
1
|
+
"""Event Sourced Role Assignment aggregate.
|
|
2
|
+
|
|
3
|
+
Implements Event Sourcing pattern for role assignments where:
|
|
4
|
+
- State is derived from events, not stored directly
|
|
5
|
+
- All changes are captured as immutable events
|
|
6
|
+
- State can be rebuilt by replaying events
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from dataclasses import dataclass
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
from ..events import DomainEvent, RoleAssigned, RoleRevoked
|
|
13
|
+
from .aggregate import AggregateRoot
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@dataclass
|
|
17
|
+
class RoleAssignmentSnapshot:
|
|
18
|
+
"""Snapshot of principal's role assignments.
|
|
19
|
+
|
|
20
|
+
Attributes:
|
|
21
|
+
principal_id: Principal ID.
|
|
22
|
+
assignments: Dict of scope -> set of role names.
|
|
23
|
+
version: Aggregate version.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
principal_id: str
|
|
27
|
+
assignments: dict[str, list[str]]
|
|
28
|
+
version: int
|
|
29
|
+
|
|
30
|
+
def to_dict(self) -> dict[str, Any]:
|
|
31
|
+
"""Convert to dictionary for serialization."""
|
|
32
|
+
return {
|
|
33
|
+
"principal_id": self.principal_id,
|
|
34
|
+
"assignments": self.assignments,
|
|
35
|
+
"version": self.version,
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
@classmethod
|
|
39
|
+
def from_dict(cls, d: dict[str, Any]) -> "RoleAssignmentSnapshot":
|
|
40
|
+
"""Create from dictionary."""
|
|
41
|
+
return cls(
|
|
42
|
+
principal_id=d["principal_id"],
|
|
43
|
+
assignments=d.get("assignments", {}),
|
|
44
|
+
version=d.get("version", 0),
|
|
45
|
+
)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class EventSourcedRoleAssignment(AggregateRoot):
|
|
49
|
+
"""Event Sourced Role Assignment aggregate.
|
|
50
|
+
|
|
51
|
+
Tracks all role assignments for a single principal.
|
|
52
|
+
All changes are recorded as events.
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
def __init__(self, principal_id: str):
|
|
56
|
+
"""Initialize role assignment aggregate.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
principal_id: Principal whose roles are tracked.
|
|
60
|
+
"""
|
|
61
|
+
super().__init__()
|
|
62
|
+
|
|
63
|
+
self._principal_id = principal_id
|
|
64
|
+
# scope -> set of role names
|
|
65
|
+
self._assignments: dict[str, set[str]] = {}
|
|
66
|
+
|
|
67
|
+
@property
|
|
68
|
+
def principal_id(self) -> str:
|
|
69
|
+
return self._principal_id
|
|
70
|
+
|
|
71
|
+
# =========================================================================
|
|
72
|
+
# Factory Methods
|
|
73
|
+
# =========================================================================
|
|
74
|
+
|
|
75
|
+
@classmethod
|
|
76
|
+
def from_events(
|
|
77
|
+
cls,
|
|
78
|
+
principal_id: str,
|
|
79
|
+
events: list[DomainEvent],
|
|
80
|
+
) -> "EventSourcedRoleAssignment":
|
|
81
|
+
"""Rebuild role assignment state from events.
|
|
82
|
+
|
|
83
|
+
Args:
|
|
84
|
+
principal_id: Principal ID.
|
|
85
|
+
events: Events to replay.
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
EventSourcedRoleAssignment with state rebuilt from events.
|
|
89
|
+
"""
|
|
90
|
+
assignment = cls(principal_id)
|
|
91
|
+
|
|
92
|
+
for event in events:
|
|
93
|
+
assignment._apply_event(event)
|
|
94
|
+
|
|
95
|
+
return assignment
|
|
96
|
+
|
|
97
|
+
@classmethod
|
|
98
|
+
def from_snapshot(
|
|
99
|
+
cls,
|
|
100
|
+
snapshot: RoleAssignmentSnapshot,
|
|
101
|
+
events: list[DomainEvent] | None = None,
|
|
102
|
+
) -> "EventSourcedRoleAssignment":
|
|
103
|
+
"""Load from snapshot and optional subsequent events.
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
snapshot: Snapshot to load from.
|
|
107
|
+
events: Optional events after snapshot.
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
110
|
+
EventSourcedRoleAssignment with state from snapshot + events.
|
|
111
|
+
"""
|
|
112
|
+
assignment = cls(snapshot.principal_id)
|
|
113
|
+
|
|
114
|
+
# Restore state from snapshot
|
|
115
|
+
assignment._assignments = {scope: set(roles) for scope, roles in snapshot.assignments.items()}
|
|
116
|
+
assignment._version = snapshot.version
|
|
117
|
+
|
|
118
|
+
# Apply any events after snapshot
|
|
119
|
+
if events:
|
|
120
|
+
for event in events:
|
|
121
|
+
assignment._apply_event(event)
|
|
122
|
+
|
|
123
|
+
return assignment
|
|
124
|
+
|
|
125
|
+
# =========================================================================
|
|
126
|
+
# Commands (mutate state via events)
|
|
127
|
+
# =========================================================================
|
|
128
|
+
|
|
129
|
+
def assign_role(
|
|
130
|
+
self,
|
|
131
|
+
role_name: str,
|
|
132
|
+
scope: str = "global",
|
|
133
|
+
assigned_by: str = "system",
|
|
134
|
+
) -> bool:
|
|
135
|
+
"""Assign a role to this principal.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
role_name: Name of the role to assign.
|
|
139
|
+
scope: Scope of the assignment.
|
|
140
|
+
assigned_by: Who is assigning the role.
|
|
141
|
+
|
|
142
|
+
Returns:
|
|
143
|
+
True if role was assigned, False if already assigned.
|
|
144
|
+
"""
|
|
145
|
+
# Check if already assigned
|
|
146
|
+
if scope in self._assignments and role_name in self._assignments[scope]:
|
|
147
|
+
return False
|
|
148
|
+
|
|
149
|
+
self._record_event(
|
|
150
|
+
RoleAssigned(
|
|
151
|
+
principal_id=self._principal_id,
|
|
152
|
+
role_name=role_name,
|
|
153
|
+
scope=scope,
|
|
154
|
+
assigned_by=assigned_by,
|
|
155
|
+
)
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
# Apply immediately
|
|
159
|
+
if scope not in self._assignments:
|
|
160
|
+
self._assignments[scope] = set()
|
|
161
|
+
self._assignments[scope].add(role_name)
|
|
162
|
+
|
|
163
|
+
return True
|
|
164
|
+
|
|
165
|
+
def revoke_role(
|
|
166
|
+
self,
|
|
167
|
+
role_name: str,
|
|
168
|
+
scope: str = "global",
|
|
169
|
+
revoked_by: str = "system",
|
|
170
|
+
) -> bool:
|
|
171
|
+
"""Revoke a role from this principal.
|
|
172
|
+
|
|
173
|
+
Args:
|
|
174
|
+
role_name: Name of the role to revoke.
|
|
175
|
+
scope: Scope from which to revoke.
|
|
176
|
+
revoked_by: Who is revoking the role.
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
True if role was revoked, False if not assigned.
|
|
180
|
+
"""
|
|
181
|
+
# Check if assigned
|
|
182
|
+
if scope not in self._assignments or role_name not in self._assignments[scope]:
|
|
183
|
+
return False
|
|
184
|
+
|
|
185
|
+
self._record_event(
|
|
186
|
+
RoleRevoked(
|
|
187
|
+
principal_id=self._principal_id,
|
|
188
|
+
role_name=role_name,
|
|
189
|
+
scope=scope,
|
|
190
|
+
revoked_by=revoked_by,
|
|
191
|
+
)
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
# Apply immediately
|
|
195
|
+
self._assignments[scope].discard(role_name)
|
|
196
|
+
if not self._assignments[scope]:
|
|
197
|
+
del self._assignments[scope]
|
|
198
|
+
|
|
199
|
+
return True
|
|
200
|
+
|
|
201
|
+
# =========================================================================
|
|
202
|
+
# Event Application
|
|
203
|
+
# =========================================================================
|
|
204
|
+
|
|
205
|
+
def _apply_event(self, event: DomainEvent) -> None:
|
|
206
|
+
"""Apply an event to update state.
|
|
207
|
+
|
|
208
|
+
This is called when replaying events to rebuild state.
|
|
209
|
+
"""
|
|
210
|
+
if isinstance(event, RoleAssigned):
|
|
211
|
+
scope = event.scope
|
|
212
|
+
if scope not in self._assignments:
|
|
213
|
+
self._assignments[scope] = set()
|
|
214
|
+
self._assignments[scope].add(event.role_name)
|
|
215
|
+
|
|
216
|
+
elif isinstance(event, RoleRevoked):
|
|
217
|
+
scope = event.scope
|
|
218
|
+
if scope in self._assignments:
|
|
219
|
+
self._assignments[scope].discard(event.role_name)
|
|
220
|
+
if not self._assignments[scope]:
|
|
221
|
+
del self._assignments[scope]
|
|
222
|
+
|
|
223
|
+
self._version += 1
|
|
224
|
+
|
|
225
|
+
# =========================================================================
|
|
226
|
+
# Queries
|
|
227
|
+
# =========================================================================
|
|
228
|
+
|
|
229
|
+
def get_role_names(self, scope: str = "*") -> set[str]:
|
|
230
|
+
"""Get all role names for this principal.
|
|
231
|
+
|
|
232
|
+
Args:
|
|
233
|
+
scope: Scope to filter by, or "*" for all scopes.
|
|
234
|
+
|
|
235
|
+
Returns:
|
|
236
|
+
Set of role names.
|
|
237
|
+
"""
|
|
238
|
+
if scope == "*":
|
|
239
|
+
# All roles across all scopes
|
|
240
|
+
result = set()
|
|
241
|
+
for roles in self._assignments.values():
|
|
242
|
+
result.update(roles)
|
|
243
|
+
return result
|
|
244
|
+
else:
|
|
245
|
+
# Specific scope + global
|
|
246
|
+
result = set(self._assignments.get(scope, set()))
|
|
247
|
+
result.update(self._assignments.get("global", set()))
|
|
248
|
+
return result
|
|
249
|
+
|
|
250
|
+
def has_role(self, role_name: str, scope: str = "*") -> bool:
|
|
251
|
+
"""Check if principal has a specific role.
|
|
252
|
+
|
|
253
|
+
Args:
|
|
254
|
+
role_name: Role to check.
|
|
255
|
+
scope: Scope to check in.
|
|
256
|
+
|
|
257
|
+
Returns:
|
|
258
|
+
True if principal has the role.
|
|
259
|
+
"""
|
|
260
|
+
return role_name in self.get_role_names(scope)
|
|
261
|
+
|
|
262
|
+
def create_snapshot(self) -> RoleAssignmentSnapshot:
|
|
263
|
+
"""Create a snapshot of current state."""
|
|
264
|
+
return RoleAssignmentSnapshot(
|
|
265
|
+
principal_id=self._principal_id,
|
|
266
|
+
assignments={scope: list(roles) for scope, roles in self._assignments.items()},
|
|
267
|
+
version=self._version,
|
|
268
|
+
)
|