mcp-hangar 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (160) hide show
  1. mcp_hangar/__init__.py +139 -0
  2. mcp_hangar/application/__init__.py +1 -0
  3. mcp_hangar/application/commands/__init__.py +67 -0
  4. mcp_hangar/application/commands/auth_commands.py +118 -0
  5. mcp_hangar/application/commands/auth_handlers.py +296 -0
  6. mcp_hangar/application/commands/commands.py +59 -0
  7. mcp_hangar/application/commands/handlers.py +189 -0
  8. mcp_hangar/application/discovery/__init__.py +21 -0
  9. mcp_hangar/application/discovery/discovery_metrics.py +283 -0
  10. mcp_hangar/application/discovery/discovery_orchestrator.py +497 -0
  11. mcp_hangar/application/discovery/lifecycle_manager.py +315 -0
  12. mcp_hangar/application/discovery/security_validator.py +414 -0
  13. mcp_hangar/application/event_handlers/__init__.py +50 -0
  14. mcp_hangar/application/event_handlers/alert_handler.py +191 -0
  15. mcp_hangar/application/event_handlers/audit_handler.py +203 -0
  16. mcp_hangar/application/event_handlers/knowledge_base_handler.py +120 -0
  17. mcp_hangar/application/event_handlers/logging_handler.py +69 -0
  18. mcp_hangar/application/event_handlers/metrics_handler.py +152 -0
  19. mcp_hangar/application/event_handlers/persistent_audit_store.py +217 -0
  20. mcp_hangar/application/event_handlers/security_handler.py +604 -0
  21. mcp_hangar/application/mcp/tooling.py +158 -0
  22. mcp_hangar/application/ports/__init__.py +9 -0
  23. mcp_hangar/application/ports/observability.py +237 -0
  24. mcp_hangar/application/queries/__init__.py +52 -0
  25. mcp_hangar/application/queries/auth_handlers.py +237 -0
  26. mcp_hangar/application/queries/auth_queries.py +118 -0
  27. mcp_hangar/application/queries/handlers.py +227 -0
  28. mcp_hangar/application/read_models/__init__.py +11 -0
  29. mcp_hangar/application/read_models/provider_views.py +139 -0
  30. mcp_hangar/application/sagas/__init__.py +11 -0
  31. mcp_hangar/application/sagas/group_rebalance_saga.py +137 -0
  32. mcp_hangar/application/sagas/provider_failover_saga.py +266 -0
  33. mcp_hangar/application/sagas/provider_recovery_saga.py +172 -0
  34. mcp_hangar/application/services/__init__.py +9 -0
  35. mcp_hangar/application/services/provider_service.py +208 -0
  36. mcp_hangar/application/services/traced_provider_service.py +211 -0
  37. mcp_hangar/bootstrap/runtime.py +328 -0
  38. mcp_hangar/context.py +178 -0
  39. mcp_hangar/domain/__init__.py +117 -0
  40. mcp_hangar/domain/contracts/__init__.py +57 -0
  41. mcp_hangar/domain/contracts/authentication.py +225 -0
  42. mcp_hangar/domain/contracts/authorization.py +229 -0
  43. mcp_hangar/domain/contracts/event_store.py +178 -0
  44. mcp_hangar/domain/contracts/metrics_publisher.py +59 -0
  45. mcp_hangar/domain/contracts/persistence.py +383 -0
  46. mcp_hangar/domain/contracts/provider_runtime.py +146 -0
  47. mcp_hangar/domain/discovery/__init__.py +20 -0
  48. mcp_hangar/domain/discovery/conflict_resolver.py +267 -0
  49. mcp_hangar/domain/discovery/discovered_provider.py +185 -0
  50. mcp_hangar/domain/discovery/discovery_service.py +412 -0
  51. mcp_hangar/domain/discovery/discovery_source.py +192 -0
  52. mcp_hangar/domain/events.py +433 -0
  53. mcp_hangar/domain/exceptions.py +525 -0
  54. mcp_hangar/domain/model/__init__.py +70 -0
  55. mcp_hangar/domain/model/aggregate.py +58 -0
  56. mcp_hangar/domain/model/circuit_breaker.py +152 -0
  57. mcp_hangar/domain/model/event_sourced_api_key.py +413 -0
  58. mcp_hangar/domain/model/event_sourced_provider.py +423 -0
  59. mcp_hangar/domain/model/event_sourced_role_assignment.py +268 -0
  60. mcp_hangar/domain/model/health_tracker.py +183 -0
  61. mcp_hangar/domain/model/load_balancer.py +185 -0
  62. mcp_hangar/domain/model/provider.py +810 -0
  63. mcp_hangar/domain/model/provider_group.py +656 -0
  64. mcp_hangar/domain/model/tool_catalog.py +105 -0
  65. mcp_hangar/domain/policies/__init__.py +19 -0
  66. mcp_hangar/domain/policies/provider_health.py +187 -0
  67. mcp_hangar/domain/repository.py +249 -0
  68. mcp_hangar/domain/security/__init__.py +85 -0
  69. mcp_hangar/domain/security/input_validator.py +710 -0
  70. mcp_hangar/domain/security/rate_limiter.py +387 -0
  71. mcp_hangar/domain/security/roles.py +237 -0
  72. mcp_hangar/domain/security/sanitizer.py +387 -0
  73. mcp_hangar/domain/security/secrets.py +501 -0
  74. mcp_hangar/domain/services/__init__.py +20 -0
  75. mcp_hangar/domain/services/audit_service.py +376 -0
  76. mcp_hangar/domain/services/image_builder.py +328 -0
  77. mcp_hangar/domain/services/provider_launcher.py +1046 -0
  78. mcp_hangar/domain/value_objects.py +1138 -0
  79. mcp_hangar/errors.py +818 -0
  80. mcp_hangar/fastmcp_server.py +1105 -0
  81. mcp_hangar/gc.py +134 -0
  82. mcp_hangar/infrastructure/__init__.py +79 -0
  83. mcp_hangar/infrastructure/async_executor.py +133 -0
  84. mcp_hangar/infrastructure/auth/__init__.py +37 -0
  85. mcp_hangar/infrastructure/auth/api_key_authenticator.py +388 -0
  86. mcp_hangar/infrastructure/auth/event_sourced_store.py +567 -0
  87. mcp_hangar/infrastructure/auth/jwt_authenticator.py +360 -0
  88. mcp_hangar/infrastructure/auth/middleware.py +340 -0
  89. mcp_hangar/infrastructure/auth/opa_authorizer.py +243 -0
  90. mcp_hangar/infrastructure/auth/postgres_store.py +659 -0
  91. mcp_hangar/infrastructure/auth/projections.py +366 -0
  92. mcp_hangar/infrastructure/auth/rate_limiter.py +311 -0
  93. mcp_hangar/infrastructure/auth/rbac_authorizer.py +323 -0
  94. mcp_hangar/infrastructure/auth/sqlite_store.py +624 -0
  95. mcp_hangar/infrastructure/command_bus.py +112 -0
  96. mcp_hangar/infrastructure/discovery/__init__.py +110 -0
  97. mcp_hangar/infrastructure/discovery/docker_source.py +289 -0
  98. mcp_hangar/infrastructure/discovery/entrypoint_source.py +249 -0
  99. mcp_hangar/infrastructure/discovery/filesystem_source.py +383 -0
  100. mcp_hangar/infrastructure/discovery/kubernetes_source.py +247 -0
  101. mcp_hangar/infrastructure/event_bus.py +260 -0
  102. mcp_hangar/infrastructure/event_sourced_repository.py +443 -0
  103. mcp_hangar/infrastructure/event_store.py +396 -0
  104. mcp_hangar/infrastructure/knowledge_base/__init__.py +259 -0
  105. mcp_hangar/infrastructure/knowledge_base/contracts.py +202 -0
  106. mcp_hangar/infrastructure/knowledge_base/memory.py +177 -0
  107. mcp_hangar/infrastructure/knowledge_base/postgres.py +545 -0
  108. mcp_hangar/infrastructure/knowledge_base/sqlite.py +513 -0
  109. mcp_hangar/infrastructure/metrics_publisher.py +36 -0
  110. mcp_hangar/infrastructure/observability/__init__.py +10 -0
  111. mcp_hangar/infrastructure/observability/langfuse_adapter.py +534 -0
  112. mcp_hangar/infrastructure/persistence/__init__.py +33 -0
  113. mcp_hangar/infrastructure/persistence/audit_repository.py +371 -0
  114. mcp_hangar/infrastructure/persistence/config_repository.py +398 -0
  115. mcp_hangar/infrastructure/persistence/database.py +333 -0
  116. mcp_hangar/infrastructure/persistence/database_common.py +330 -0
  117. mcp_hangar/infrastructure/persistence/event_serializer.py +280 -0
  118. mcp_hangar/infrastructure/persistence/event_upcaster.py +166 -0
  119. mcp_hangar/infrastructure/persistence/in_memory_event_store.py +150 -0
  120. mcp_hangar/infrastructure/persistence/recovery_service.py +312 -0
  121. mcp_hangar/infrastructure/persistence/sqlite_event_store.py +386 -0
  122. mcp_hangar/infrastructure/persistence/unit_of_work.py +409 -0
  123. mcp_hangar/infrastructure/persistence/upcasters/README.md +13 -0
  124. mcp_hangar/infrastructure/persistence/upcasters/__init__.py +7 -0
  125. mcp_hangar/infrastructure/query_bus.py +153 -0
  126. mcp_hangar/infrastructure/saga_manager.py +401 -0
  127. mcp_hangar/logging_config.py +209 -0
  128. mcp_hangar/metrics.py +1007 -0
  129. mcp_hangar/models.py +31 -0
  130. mcp_hangar/observability/__init__.py +54 -0
  131. mcp_hangar/observability/health.py +487 -0
  132. mcp_hangar/observability/metrics.py +319 -0
  133. mcp_hangar/observability/tracing.py +433 -0
  134. mcp_hangar/progress.py +542 -0
  135. mcp_hangar/retry.py +613 -0
  136. mcp_hangar/server/__init__.py +120 -0
  137. mcp_hangar/server/__main__.py +6 -0
  138. mcp_hangar/server/auth_bootstrap.py +340 -0
  139. mcp_hangar/server/auth_cli.py +335 -0
  140. mcp_hangar/server/auth_config.py +305 -0
  141. mcp_hangar/server/bootstrap.py +735 -0
  142. mcp_hangar/server/cli.py +161 -0
  143. mcp_hangar/server/config.py +224 -0
  144. mcp_hangar/server/context.py +215 -0
  145. mcp_hangar/server/http_auth_middleware.py +165 -0
  146. mcp_hangar/server/lifecycle.py +467 -0
  147. mcp_hangar/server/state.py +117 -0
  148. mcp_hangar/server/tools/__init__.py +16 -0
  149. mcp_hangar/server/tools/discovery.py +186 -0
  150. mcp_hangar/server/tools/groups.py +75 -0
  151. mcp_hangar/server/tools/health.py +301 -0
  152. mcp_hangar/server/tools/provider.py +939 -0
  153. mcp_hangar/server/tools/registry.py +320 -0
  154. mcp_hangar/server/validation.py +113 -0
  155. mcp_hangar/stdio_client.py +229 -0
  156. mcp_hangar-0.2.0.dist-info/METADATA +347 -0
  157. mcp_hangar-0.2.0.dist-info/RECORD +160 -0
  158. mcp_hangar-0.2.0.dist-info/WHEEL +4 -0
  159. mcp_hangar-0.2.0.dist-info/entry_points.txt +2 -0
  160. mcp_hangar-0.2.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,312 @@
1
+ """Recovery service for system startup.
2
+
3
+ Responsible for loading persisted provider configurations and
4
+ restoring system state after restart.
5
+ """
6
+
7
+ from dataclasses import dataclass, field
8
+ from datetime import datetime, timezone
9
+ from typing import Any, Dict, List, Optional
10
+
11
+ from ...domain.contracts.persistence import AuditAction, AuditEntry, ProviderConfigSnapshot
12
+ from ...domain.model import Provider
13
+ from ...domain.repository import IProviderRepository
14
+ from ...logging_config import get_logger
15
+ from .audit_repository import SQLiteAuditRepository
16
+ from .config_repository import SQLiteProviderConfigRepository
17
+ from .database import Database
18
+
19
+ logger = get_logger(__name__)
20
+
21
+
22
+ @dataclass
23
+ class RecoveryResult:
24
+ """Result of a recovery operation."""
25
+
26
+ recovered_count: int = 0
27
+ failed_count: int = 0
28
+ skipped_count: int = 0
29
+ recovered_ids: List[str] = field(default_factory=list)
30
+ failed_ids: List[str] = field(default_factory=list)
31
+ errors: Dict[str, str] = field(default_factory=dict)
32
+ duration_ms: float = 0.0
33
+ started_at: Optional[datetime] = None
34
+ completed_at: Optional[datetime] = None
35
+
36
+
37
+ class RecoveryService:
38
+ """Service for recovering system state on startup.
39
+
40
+ Loads persisted provider configurations from the database
41
+ and registers them with the provider repository.
42
+ """
43
+
44
+ def __init__(
45
+ self,
46
+ database: Database,
47
+ provider_repository: IProviderRepository,
48
+ config_repository: Optional[SQLiteProviderConfigRepository] = None,
49
+ audit_repository: Optional[SQLiteAuditRepository] = None,
50
+ auto_start: bool = False,
51
+ ):
52
+ """Initialize recovery service.
53
+
54
+ Args:
55
+ database: Database instance
56
+ provider_repository: Repository for registering recovered providers
57
+ config_repository: Optional config repository (created if not provided)
58
+ audit_repository: Optional audit repository for logging recovery
59
+ auto_start: Whether to auto-start recovered providers
60
+ """
61
+ self._db = database
62
+ self._provider_repo = provider_repository
63
+ self._config_repo = config_repository or SQLiteProviderConfigRepository(database)
64
+ self._audit_repo = audit_repository or SQLiteAuditRepository(database)
65
+ self._auto_start = auto_start
66
+ self._last_recovery: Optional[RecoveryResult] = None
67
+
68
+ async def recover_providers(self) -> List[str]:
69
+ """Recover all provider configurations from storage.
70
+
71
+ Loads saved configurations and registers Provider aggregates
72
+ with the provider repository.
73
+
74
+ Returns:
75
+ List of recovered provider IDs
76
+ """
77
+ result = RecoveryResult(started_at=datetime.now(timezone.utc))
78
+ start_time = datetime.now(timezone.utc)
79
+
80
+ try:
81
+ # Ensure database is initialized
82
+ await self._db.initialize()
83
+
84
+ # Load all enabled configurations
85
+ configs = await self._config_repo.get_all()
86
+
87
+ logger.info(f"Recovery: Found {len(configs)} provider configurations")
88
+
89
+ for config in configs:
90
+ try:
91
+ # Create Provider aggregate from config
92
+ provider = self._create_provider_from_config(config)
93
+
94
+ # Register with repository
95
+ self._provider_repo.add(config.provider_id, provider)
96
+
97
+ result.recovered_count += 1
98
+ result.recovered_ids.append(config.provider_id)
99
+
100
+ logger.debug(f"Recovery: Restored provider {config.provider_id}")
101
+
102
+ except Exception as e:
103
+ result.failed_count += 1
104
+ result.failed_ids.append(config.provider_id)
105
+ result.errors[config.provider_id] = str(e)
106
+ logger.error(f"Recovery: Failed to restore provider {config.provider_id}: {e}")
107
+
108
+ # Record recovery in audit log
109
+ await self._record_recovery_audit(result)
110
+
111
+ except Exception as e:
112
+ logger.error(f"Recovery: Critical failure: {e}")
113
+ result.errors["_critical"] = str(e)
114
+
115
+ finally:
116
+ result.completed_at = datetime.now(timezone.utc)
117
+ result.duration_ms = (result.completed_at - start_time).total_seconds() * 1000
118
+ self._last_recovery = result
119
+
120
+ logger.info(
121
+ f"Recovery completed: {result.recovered_count} recovered, "
122
+ f"{result.failed_count} failed, {result.duration_ms:.2f}ms"
123
+ )
124
+
125
+ return result.recovered_ids
126
+
127
+ def _create_provider_from_config(self, config: ProviderConfigSnapshot) -> Provider:
128
+ """Create Provider aggregate from configuration snapshot.
129
+
130
+ Args:
131
+ config: Provider configuration snapshot
132
+
133
+ Returns:
134
+ Provider aggregate instance
135
+ """
136
+ return Provider(
137
+ provider_id=config.provider_id,
138
+ mode=config.mode,
139
+ command=config.command,
140
+ image=config.image,
141
+ endpoint=config.endpoint,
142
+ env=config.env,
143
+ idle_ttl_s=config.idle_ttl_s,
144
+ health_check_interval_s=config.health_check_interval_s,
145
+ max_consecutive_failures=config.max_consecutive_failures,
146
+ description=config.description,
147
+ volumes=config.volumes,
148
+ build=config.build,
149
+ resources=config.resources,
150
+ network=config.network,
151
+ read_only=config.read_only,
152
+ user=config.user,
153
+ tools=config.tools,
154
+ )
155
+
156
+ async def _record_recovery_audit(self, result: RecoveryResult) -> None:
157
+ """Record recovery operation in audit log.
158
+
159
+ Args:
160
+ result: Recovery result to record
161
+ """
162
+ try:
163
+ await self._audit_repo.append(
164
+ AuditEntry(
165
+ entity_id="_system",
166
+ entity_type="recovery",
167
+ action=AuditAction.RECOVERED,
168
+ timestamp=result.completed_at or datetime.now(timezone.utc),
169
+ actor="system",
170
+ metadata={
171
+ "recovered_count": result.recovered_count,
172
+ "failed_count": result.failed_count,
173
+ "duration_ms": result.duration_ms,
174
+ "recovered_ids": result.recovered_ids,
175
+ "failed_ids": result.failed_ids,
176
+ "errors": result.errors,
177
+ },
178
+ )
179
+ )
180
+ except Exception as e:
181
+ logger.warning(f"Failed to record recovery audit: {e}")
182
+
183
+ async def get_recovery_status(self) -> Dict[str, Any]:
184
+ """Get status of last recovery operation.
185
+
186
+ Returns:
187
+ Dictionary with recovery metrics and status
188
+ """
189
+ if self._last_recovery is None:
190
+ return {
191
+ "status": "not_run",
192
+ "message": "No recovery has been performed",
193
+ }
194
+
195
+ result = self._last_recovery
196
+
197
+ return {
198
+ "status": "completed" if not result.errors else "completed_with_errors",
199
+ "recovered_count": result.recovered_count,
200
+ "failed_count": result.failed_count,
201
+ "skipped_count": result.skipped_count,
202
+ "duration_ms": result.duration_ms,
203
+ "started_at": result.started_at.isoformat() if result.started_at else None,
204
+ "completed_at": (result.completed_at.isoformat() if result.completed_at else None),
205
+ "recovered_ids": result.recovered_ids,
206
+ "failed_ids": result.failed_ids,
207
+ "errors": result.errors,
208
+ }
209
+
210
+ async def recover_single_provider(self, provider_id: str) -> bool:
211
+ """Recover a single provider from storage.
212
+
213
+ Useful for re-loading a specific provider without full recovery.
214
+
215
+ Args:
216
+ provider_id: Provider identifier to recover
217
+
218
+ Returns:
219
+ True if recovered successfully, False otherwise
220
+ """
221
+ try:
222
+ config = await self._config_repo.get(provider_id)
223
+
224
+ if config is None:
225
+ logger.warning(f"Recovery: No config found for {provider_id}")
226
+ return False
227
+
228
+ provider = self._create_provider_from_config(config)
229
+ self._provider_repo.add(provider_id, provider)
230
+
231
+ logger.info(f"Recovery: Single provider {provider_id} restored")
232
+ return True
233
+
234
+ except Exception as e:
235
+ logger.error(f"Recovery: Failed to restore {provider_id}: {e}")
236
+ return False
237
+
238
+ async def save_provider_config(self, provider: Provider) -> None:
239
+ """Save a provider's configuration to persistent storage.
240
+
241
+ Creates a snapshot of the current provider configuration
242
+ and persists it for future recovery.
243
+
244
+ Args:
245
+ provider: Provider to save configuration for
246
+ """
247
+ config = ProviderConfigSnapshot(
248
+ provider_id=provider.provider_id,
249
+ mode=provider.mode_str,
250
+ command=provider._command,
251
+ image=provider._image,
252
+ endpoint=provider._endpoint,
253
+ env=provider._env,
254
+ idle_ttl_s=provider._idle_ttl.seconds,
255
+ health_check_interval_s=provider._health_check_interval.seconds,
256
+ max_consecutive_failures=provider._health.max_consecutive_failures,
257
+ description=provider.description,
258
+ volumes=provider._volumes,
259
+ build=provider._build,
260
+ resources=provider._resources,
261
+ network=provider._network,
262
+ read_only=provider._read_only,
263
+ user=provider._user,
264
+ tools=([t.to_dict() for t in provider.tools] if provider._tools_predefined else None),
265
+ enabled=True,
266
+ )
267
+
268
+ await self._config_repo.save(config)
269
+
270
+ # Record in audit log
271
+ await self._audit_repo.append(
272
+ AuditEntry(
273
+ entity_id=provider.provider_id,
274
+ entity_type="provider",
275
+ action=AuditAction.UPDATED,
276
+ timestamp=datetime.now(timezone.utc),
277
+ actor="system",
278
+ new_state=config.to_dict(),
279
+ )
280
+ )
281
+
282
+ logger.debug(f"Saved config for provider: {provider.provider_id}")
283
+
284
+ async def delete_provider_config(self, provider_id: str) -> bool:
285
+ """Delete a provider's configuration from storage.
286
+
287
+ Soft-deletes the configuration (marks as disabled).
288
+
289
+ Args:
290
+ provider_id: Provider identifier
291
+
292
+ Returns:
293
+ True if deleted, False if not found
294
+ """
295
+ # Get current config for audit
296
+ old_config = await self._config_repo.get(provider_id)
297
+
298
+ deleted = await self._config_repo.delete(provider_id)
299
+
300
+ if deleted and old_config:
301
+ await self._audit_repo.append(
302
+ AuditEntry(
303
+ entity_id=provider_id,
304
+ entity_type="provider",
305
+ action=AuditAction.DELETED,
306
+ timestamp=datetime.now(timezone.utc),
307
+ actor="system",
308
+ old_state=old_config.to_dict(),
309
+ )
310
+ )
311
+
312
+ return deleted
@@ -0,0 +1,386 @@
1
+ """SQLite-based Event Store implementation.
2
+
3
+ Provides durable event persistence suitable for single-node deployments.
4
+ For distributed systems, consider PostgreSQL or EventStoreDB.
5
+ """
6
+
7
+ from datetime import datetime, timezone
8
+ from pathlib import Path
9
+ import sqlite3
10
+ import threading
11
+ from typing import Iterator
12
+
13
+ from mcp_hangar.domain.contracts.event_store import ConcurrencyError, IEventStore
14
+ from mcp_hangar.domain.events import DomainEvent
15
+ from mcp_hangar.logging_config import get_logger
16
+
17
+ from .event_serializer import EventSerializer
18
+
19
+ logger = get_logger(__name__)
20
+
21
+
22
+ class SQLiteEventStore(IEventStore):
23
+ """SQLite-based event store with optimistic concurrency.
24
+
25
+ Thread-safe implementation suitable for single-node deployments.
26
+
27
+ Features:
28
+ - Append-only event storage
29
+ - Optimistic concurrency control via version checks
30
+ - Global ordering across all streams
31
+ - Efficient stream reads with indexing
32
+
33
+ Schema:
34
+ - events: Main event table with global ordering
35
+ - streams: Track stream versions for concurrency control
36
+ """
37
+
38
+ def __init__(self, db_path: str | Path = ":memory:", *, serializer: EventSerializer | None = None):
39
+ """Initialize SQLite event store.
40
+
41
+ Args:
42
+ db_path: Path to SQLite database file.
43
+ Use ":memory:" for in-memory store (testing).
44
+ serializer: Optional EventSerializer instance. Allows injecting an upcaster-aware serializer.
45
+ """
46
+ self._db_path = str(db_path)
47
+ self._serializer = serializer or EventSerializer()
48
+ self._lock = threading.Lock()
49
+ self._is_memory = self._db_path == ":memory:"
50
+
51
+ # For in-memory database, keep a persistent connection
52
+ # (each new connection to :memory: creates a NEW database)
53
+ self._persistent_conn: sqlite3.Connection | None = None
54
+ if self._is_memory:
55
+ self._persistent_conn = self._create_connection()
56
+
57
+ self._init_schema()
58
+
59
+ logger.info(
60
+ "sqlite_event_store_initialized",
61
+ db_path=self._db_path,
62
+ in_memory=self._is_memory,
63
+ )
64
+
65
+ def _create_connection(self) -> sqlite3.Connection:
66
+ """Create a new database connection."""
67
+ conn = sqlite3.connect(self._db_path, check_same_thread=False)
68
+ conn.row_factory = sqlite3.Row
69
+ conn.execute("PRAGMA foreign_keys = ON")
70
+ if not self._is_memory:
71
+ conn.execute("PRAGMA journal_mode = WAL")
72
+ return conn
73
+
74
+ def _init_schema(self) -> None:
75
+ """Initialize database schema."""
76
+ conn = self._connect()
77
+ try:
78
+ conn.executescript(
79
+ """
80
+ -- Main events table
81
+ CREATE TABLE IF NOT EXISTS events (
82
+ global_position INTEGER PRIMARY KEY AUTOINCREMENT,
83
+ stream_id TEXT NOT NULL,
84
+ stream_version INTEGER NOT NULL,
85
+ event_type TEXT NOT NULL,
86
+ data TEXT NOT NULL,
87
+ metadata TEXT,
88
+ created_at TEXT NOT NULL,
89
+ UNIQUE(stream_id, stream_version)
90
+ );
91
+
92
+ -- Index for efficient stream reads
93
+ CREATE INDEX IF NOT EXISTS idx_events_stream
94
+ ON events(stream_id, stream_version);
95
+
96
+ -- Index for global reads (projections)
97
+ CREATE INDEX IF NOT EXISTS idx_events_global
98
+ ON events(global_position);
99
+
100
+ -- Stream version tracking for optimistic concurrency
101
+ CREATE TABLE IF NOT EXISTS streams (
102
+ stream_id TEXT PRIMARY KEY,
103
+ version INTEGER NOT NULL DEFAULT -1,
104
+ created_at TEXT NOT NULL,
105
+ updated_at TEXT NOT NULL
106
+ );
107
+ """
108
+ )
109
+ finally:
110
+ if not self._is_memory:
111
+ conn.close()
112
+
113
+ def _connect(self) -> sqlite3.Connection:
114
+ """Get database connection.
115
+
116
+ For in-memory databases, returns the persistent connection.
117
+ For file-based databases, creates a new connection.
118
+ """
119
+ if self._is_memory and self._persistent_conn:
120
+ return self._persistent_conn
121
+ return self._create_connection()
122
+
123
+ def append(
124
+ self,
125
+ stream_id: str,
126
+ events: list[DomainEvent],
127
+ expected_version: int,
128
+ ) -> int:
129
+ """Append events to a stream with optimistic concurrency.
130
+
131
+ Args:
132
+ stream_id: Stream identifier (e.g., "provider:math").
133
+ events: Events to append.
134
+ expected_version: Expected current version (-1 for new stream).
135
+
136
+ Returns:
137
+ New stream version after append.
138
+
139
+ Raises:
140
+ ConcurrencyError: If version mismatch.
141
+ """
142
+ if not events:
143
+ return expected_version
144
+
145
+ with self._lock:
146
+ conn = self._connect()
147
+ try:
148
+ cursor = conn.cursor()
149
+ timestamp = datetime.now(timezone.utc).isoformat()
150
+
151
+ # Check current version
152
+ cursor.execute(
153
+ "SELECT version FROM streams WHERE stream_id = ?",
154
+ (stream_id,),
155
+ )
156
+ row = cursor.fetchone()
157
+ current_version = row["version"] if row else -1
158
+
159
+ if current_version != expected_version:
160
+ raise ConcurrencyError(stream_id, expected_version, current_version)
161
+
162
+ # Append events
163
+ new_version = current_version
164
+ for event in events:
165
+ new_version += 1
166
+ event_type, data = self._serializer.serialize(event)
167
+
168
+ cursor.execute(
169
+ """
170
+ INSERT INTO events
171
+ (stream_id, stream_version, event_type, data, created_at)
172
+ VALUES (?, ?, ?, ?, ?)
173
+ """,
174
+ (stream_id, new_version, event_type, data, timestamp),
175
+ )
176
+
177
+ # Update or insert stream version
178
+ if current_version == -1:
179
+ cursor.execute(
180
+ """
181
+ INSERT INTO streams (stream_id, version, created_at, updated_at)
182
+ VALUES (?, ?, ?, ?)
183
+ """,
184
+ (stream_id, new_version, timestamp, timestamp),
185
+ )
186
+ else:
187
+ cursor.execute(
188
+ """
189
+ UPDATE streams SET version = ?, updated_at = ?
190
+ WHERE stream_id = ?
191
+ """,
192
+ (new_version, timestamp, stream_id),
193
+ )
194
+
195
+ conn.commit()
196
+
197
+ logger.debug(
198
+ "events_appended",
199
+ stream_id=stream_id,
200
+ events_count=len(events),
201
+ new_version=new_version,
202
+ )
203
+
204
+ return new_version
205
+
206
+ except ConcurrencyError:
207
+ conn.rollback()
208
+ raise
209
+ except Exception as e:
210
+ conn.rollback()
211
+ logger.error(
212
+ "event_append_failed",
213
+ stream_id=stream_id,
214
+ error=str(e),
215
+ )
216
+ raise
217
+ finally:
218
+ if not self._is_memory:
219
+ conn.close()
220
+
221
+ def read_stream(
222
+ self,
223
+ stream_id: str,
224
+ from_version: int = 0,
225
+ ) -> list[DomainEvent]:
226
+ """Read events from a stream.
227
+
228
+ Args:
229
+ stream_id: Stream identifier.
230
+ from_version: Start version (inclusive).
231
+
232
+ Returns:
233
+ List of events in order. Empty if stream doesn't exist.
234
+ """
235
+ conn = self._connect()
236
+ try:
237
+ cursor = conn.execute(
238
+ """
239
+ SELECT event_type, data FROM events
240
+ WHERE stream_id = ? AND stream_version >= ?
241
+ ORDER BY stream_version ASC
242
+ """,
243
+ (stream_id, from_version),
244
+ )
245
+
246
+ events = []
247
+ for row in cursor.fetchall():
248
+ event = self._serializer.deserialize(row["event_type"], row["data"])
249
+ events.append(event)
250
+
251
+ logger.debug(
252
+ "stream_read",
253
+ stream_id=stream_id,
254
+ from_version=from_version,
255
+ events_count=len(events),
256
+ )
257
+
258
+ return events
259
+ finally:
260
+ if not self._is_memory:
261
+ conn.close()
262
+
263
+ def read_all(
264
+ self,
265
+ from_position: int = 0,
266
+ limit: int = 1000,
267
+ ) -> Iterator[tuple[int, str, DomainEvent]]:
268
+ """Read all events across streams (for projections).
269
+
270
+ Args:
271
+ from_position: Start position (exclusive).
272
+ limit: Maximum events to return.
273
+
274
+ Yields:
275
+ Tuples of (global_position, stream_id, event).
276
+ """
277
+ conn = self._connect()
278
+ try:
279
+ cursor = conn.execute(
280
+ """
281
+ SELECT global_position, stream_id, event_type, data
282
+ FROM events
283
+ WHERE global_position > ?
284
+ ORDER BY global_position ASC
285
+ LIMIT ?
286
+ """,
287
+ (from_position, limit),
288
+ )
289
+
290
+ # Fetch all rows first to allow closing connection
291
+ rows = cursor.fetchall()
292
+ finally:
293
+ if not self._is_memory:
294
+ conn.close()
295
+
296
+ for row in rows:
297
+ event = self._serializer.deserialize(row["event_type"], row["data"])
298
+ yield row["global_position"], row["stream_id"], event
299
+
300
+ def get_stream_version(self, stream_id: str) -> int:
301
+ """Get current version of a stream.
302
+
303
+ Args:
304
+ stream_id: Stream identifier.
305
+
306
+ Returns:
307
+ Current version, or -1 if stream doesn't exist.
308
+ """
309
+ conn = self._connect()
310
+ try:
311
+ cursor = conn.execute(
312
+ "SELECT version FROM streams WHERE stream_id = ?",
313
+ (stream_id,),
314
+ )
315
+ row = cursor.fetchone()
316
+ return row["version"] if row else -1
317
+ finally:
318
+ if not self._is_memory:
319
+ conn.close()
320
+
321
+ def get_all_stream_ids(self) -> list[str]:
322
+ """Get all stream IDs in the store.
323
+
324
+ Returns:
325
+ List of stream identifiers.
326
+ """
327
+ conn = self._connect()
328
+ try:
329
+ cursor = conn.execute("SELECT stream_id FROM streams ORDER BY stream_id")
330
+ return [row["stream_id"] for row in cursor.fetchall()]
331
+ finally:
332
+ if not self._is_memory:
333
+ conn.close()
334
+
335
+ def get_event_count(self) -> int:
336
+ """Get total number of events in the store.
337
+
338
+ Returns:
339
+ Total event count.
340
+ """
341
+ conn = self._connect()
342
+ try:
343
+ cursor = conn.execute("SELECT COUNT(*) as count FROM events")
344
+ row = cursor.fetchone()
345
+ return row["count"] if row else 0
346
+ finally:
347
+ if not self._is_memory:
348
+ conn.close()
349
+
350
+ def get_stream_count(self) -> int:
351
+ """Get total number of streams.
352
+
353
+ Returns:
354
+ Total stream count.
355
+ """
356
+ conn = self._connect()
357
+ try:
358
+ cursor = conn.execute("SELECT COUNT(*) as count FROM streams")
359
+ row = cursor.fetchone()
360
+ return row["count"] if row else 0
361
+ finally:
362
+ if not self._is_memory:
363
+ conn.close()
364
+
365
+ def list_streams(self, prefix: str = "") -> list[str]:
366
+ """List all stream IDs, optionally filtered by prefix.
367
+
368
+ Args:
369
+ prefix: Optional prefix to filter streams.
370
+
371
+ Returns:
372
+ List of stream IDs matching the prefix.
373
+ """
374
+ conn = self._connect()
375
+ try:
376
+ if prefix:
377
+ cursor = conn.execute(
378
+ "SELECT stream_id FROM streams WHERE stream_id LIKE ? ORDER BY stream_id",
379
+ (f"{prefix}%",),
380
+ )
381
+ else:
382
+ cursor = conn.execute("SELECT stream_id FROM streams ORDER BY stream_id")
383
+ return [row["stream_id"] for row in cursor.fetchall()]
384
+ finally:
385
+ if not self._is_memory:
386
+ conn.close()