stabilize 0.9.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. stabilize/__init__.py +29 -0
  2. stabilize/cli.py +1193 -0
  3. stabilize/context/__init__.py +7 -0
  4. stabilize/context/stage_context.py +170 -0
  5. stabilize/dag/__init__.py +15 -0
  6. stabilize/dag/graph.py +215 -0
  7. stabilize/dag/topological.py +199 -0
  8. stabilize/examples/__init__.py +1 -0
  9. stabilize/examples/docker-example.py +759 -0
  10. stabilize/examples/golden-standard-expected-result.txt +1 -0
  11. stabilize/examples/golden-standard.py +488 -0
  12. stabilize/examples/http-example.py +606 -0
  13. stabilize/examples/llama-example.py +662 -0
  14. stabilize/examples/python-example.py +731 -0
  15. stabilize/examples/shell-example.py +399 -0
  16. stabilize/examples/ssh-example.py +603 -0
  17. stabilize/handlers/__init__.py +53 -0
  18. stabilize/handlers/base.py +226 -0
  19. stabilize/handlers/complete_stage.py +209 -0
  20. stabilize/handlers/complete_task.py +75 -0
  21. stabilize/handlers/complete_workflow.py +150 -0
  22. stabilize/handlers/run_task.py +369 -0
  23. stabilize/handlers/start_stage.py +262 -0
  24. stabilize/handlers/start_task.py +74 -0
  25. stabilize/handlers/start_workflow.py +136 -0
  26. stabilize/launcher.py +307 -0
  27. stabilize/migrations/01KDQ4N9QPJ6Q4MCV3V9GHWPV4_initial_schema.sql +97 -0
  28. stabilize/migrations/01KDRK3TXW4R2GERC1WBCQYJGG_rag_embeddings.sql +25 -0
  29. stabilize/migrations/__init__.py +1 -0
  30. stabilize/models/__init__.py +15 -0
  31. stabilize/models/stage.py +389 -0
  32. stabilize/models/status.py +146 -0
  33. stabilize/models/task.py +125 -0
  34. stabilize/models/workflow.py +317 -0
  35. stabilize/orchestrator.py +113 -0
  36. stabilize/persistence/__init__.py +28 -0
  37. stabilize/persistence/connection.py +185 -0
  38. stabilize/persistence/factory.py +136 -0
  39. stabilize/persistence/memory.py +214 -0
  40. stabilize/persistence/postgres.py +655 -0
  41. stabilize/persistence/sqlite.py +674 -0
  42. stabilize/persistence/store.py +235 -0
  43. stabilize/queue/__init__.py +59 -0
  44. stabilize/queue/messages.py +377 -0
  45. stabilize/queue/processor.py +312 -0
  46. stabilize/queue/queue.py +526 -0
  47. stabilize/queue/sqlite_queue.py +354 -0
  48. stabilize/rag/__init__.py +19 -0
  49. stabilize/rag/assistant.py +459 -0
  50. stabilize/rag/cache.py +294 -0
  51. stabilize/stages/__init__.py +11 -0
  52. stabilize/stages/builder.py +253 -0
  53. stabilize/tasks/__init__.py +19 -0
  54. stabilize/tasks/interface.py +335 -0
  55. stabilize/tasks/registry.py +255 -0
  56. stabilize/tasks/result.py +283 -0
  57. stabilize-0.9.2.dist-info/METADATA +301 -0
  58. stabilize-0.9.2.dist-info/RECORD +61 -0
  59. stabilize-0.9.2.dist-info/WHEEL +4 -0
  60. stabilize-0.9.2.dist-info/entry_points.txt +2 -0
  61. stabilize-0.9.2.dist-info/licenses/LICENSE +201 -0
@@ -0,0 +1,317 @@
1
+ """
2
+ Workflow model.
3
+
4
+ A pipeline execution represents a running instance of a pipeline, containing
5
+ all stages and their runtime state. The execution tracks:
6
+ - Overall status
7
+ - All stages (including synthetic stages)
8
+ - Trigger information
9
+ - Timing data
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ from dataclasses import dataclass, field
15
+ from enum import Enum
16
+ from typing import Any
17
+
18
+ from stabilize.models.stage import StageExecution
19
+ from stabilize.models.status import WorkflowStatus
20
+
21
+
22
+ def _generate_execution_id() -> str:
23
+ """Generate a unique execution ID using ULID."""
24
+ import ulid
25
+
26
+ return str(ulid.new())
27
+
28
+
29
+ class WorkflowType(Enum):
30
+ """
31
+ Type of execution.
32
+
33
+ PIPELINE: A full pipeline execution
34
+ ORCHESTRATION: An ad-hoc orchestration (single stage)
35
+ """
36
+
37
+ PIPELINE = "PIPELINE"
38
+ ORCHESTRATION = "ORCHESTRATION"
39
+
40
+
41
+ @dataclass
42
+ class Trigger:
43
+ """
44
+ Trigger information for a pipeline execution.
45
+
46
+ Contains details about what triggered the pipeline (manual, webhook, cron, etc.)
47
+ and any parameters passed to the execution.
48
+ """
49
+
50
+ type: str = "manual"
51
+ user: str = "anonymous"
52
+ parameters: dict[str, Any] = field(default_factory=dict)
53
+ artifacts: list[dict[str, Any]] = field(default_factory=list)
54
+ payload: dict[str, Any] = field(default_factory=dict)
55
+
56
+ def to_dict(self) -> dict[str, Any]:
57
+ """Convert trigger to dictionary for storage."""
58
+ return {
59
+ "type": self.type,
60
+ "user": self.user,
61
+ "parameters": self.parameters,
62
+ "artifacts": self.artifacts,
63
+ "payload": self.payload,
64
+ }
65
+
66
+ @classmethod
67
+ def from_dict(cls, data: dict[str, Any]) -> Trigger:
68
+ """Create trigger from dictionary."""
69
+ return cls(
70
+ type=data.get("type", "manual"),
71
+ user=data.get("user", "anonymous"),
72
+ parameters=data.get("parameters", {}),
73
+ artifacts=data.get("artifacts", []),
74
+ payload=data.get("payload", {}),
75
+ )
76
+
77
+
78
+ @dataclass
79
+ class PausedDetails:
80
+ """
81
+ Details about a paused execution.
82
+ """
83
+
84
+ paused_by: str = ""
85
+ pause_time: int | None = None
86
+ resume_time: int | None = None
87
+ paused_ms: int = 0
88
+
89
+ @property
90
+ def is_paused(self) -> bool:
91
+ """Check if currently paused."""
92
+ return self.pause_time is not None and self.resume_time is None
93
+
94
+
95
+ @dataclass
96
+ class Workflow:
97
+ """
98
+ Represents a pipeline execution.
99
+
100
+ This is the top-level container for all execution state. It holds all stages
101
+ and tracks the overall execution status.
102
+
103
+ Attributes:
104
+ id: Unique identifier (ULID)
105
+ type: PIPELINE or ORCHESTRATION
106
+ application: Application name this pipeline belongs to
107
+ name: Pipeline name
108
+ status: Current execution status
109
+ stages: All stages in this execution (including synthetic)
110
+ trigger: Trigger information
111
+ start_time: Epoch milliseconds when execution started
112
+ end_time: Epoch milliseconds when execution completed
113
+ start_time_expiry: If not started by this time, cancel
114
+ is_canceled: Whether execution has been canceled
115
+ canceled_by: User who canceled the execution
116
+ cancellation_reason: Reason for cancellation
117
+ paused: Pause details if execution is paused
118
+ pipeline_config_id: ID of the pipeline configuration
119
+ is_limit_concurrent: Whether to limit concurrent executions
120
+ max_concurrent_executions: Max concurrent executions allowed
121
+ keep_waiting_pipelines: Keep queued pipelines on cancel
122
+ origin: Origin of the execution (e.g., "api", "deck")
123
+ """
124
+
125
+ id: str = field(default_factory=_generate_execution_id)
126
+ type: WorkflowType = WorkflowType.PIPELINE
127
+ application: str = ""
128
+ name: str = ""
129
+ status: WorkflowStatus = WorkflowStatus.NOT_STARTED
130
+ stages: list[StageExecution] = field(default_factory=list)
131
+ trigger: Trigger = field(default_factory=Trigger)
132
+ start_time: int | None = None
133
+ end_time: int | None = None
134
+ start_time_expiry: int | None = None
135
+ is_canceled: bool = False
136
+ canceled_by: str | None = None
137
+ cancellation_reason: str | None = None
138
+ paused: PausedDetails | None = None
139
+ pipeline_config_id: str | None = None
140
+ is_limit_concurrent: bool = False
141
+ max_concurrent_executions: int = 0
142
+ keep_waiting_pipelines: bool = False
143
+ origin: str = "unknown"
144
+
145
+ def __post_init__(self) -> None:
146
+ """Set execution reference on all stages after construction."""
147
+ for stage in self.stages:
148
+ stage._execution = self
149
+
150
+ def add_stage(self, stage: StageExecution) -> None:
151
+ """Add a stage to this execution."""
152
+ stage._execution = self
153
+ self.stages.append(stage)
154
+
155
+ def remove_stage(self, stage_id: str) -> None:
156
+ """Remove a stage from this execution."""
157
+ self.stages = [s for s in self.stages if s.id != stage_id]
158
+
159
+ def stage_by_id(self, stage_id: str) -> StageExecution:
160
+ """
161
+ Get a stage by its ID.
162
+
163
+ Raises:
164
+ ValueError: If stage not found
165
+ """
166
+ for stage in self.stages:
167
+ if stage.id == stage_id:
168
+ return stage
169
+ raise ValueError(f"Stage {stage_id} not found")
170
+
171
+ def stage_by_ref_id(self, ref_id: str) -> StageExecution | None:
172
+ """Get a stage by its reference ID."""
173
+ for stage in self.stages:
174
+ if stage.ref_id == ref_id:
175
+ return stage
176
+ return None
177
+
178
+ # ========== Stage Queries ==========
179
+
180
+ def initial_stages(self) -> list[StageExecution]:
181
+ """
182
+ Get all initial stages (no dependencies, not synthetic).
183
+
184
+ These are the stages that can start immediately when execution begins.
185
+ """
186
+ return [stage for stage in self.stages if stage.is_initial() and not stage.is_synthetic()]
187
+
188
+ def top_level_stages(self) -> list[StageExecution]:
189
+ """Get all top-level stages (not synthetic)."""
190
+ return [stage for stage in self.stages if not stage.is_synthetic()]
191
+
192
+ # ========== Context Aggregation ==========
193
+
194
+ def get_context(self) -> dict[str, Any]:
195
+ """
196
+ Get aggregated context from all stages.
197
+
198
+ Returns merged outputs from all stages in topological order.
199
+ Collections are concatenated, latest value wins for non-collections.
200
+ """
201
+ from stabilize.dag.topological import topological_sort
202
+
203
+ result: dict[str, Any] = {}
204
+
205
+ for stage in topological_sort(self.stages):
206
+ for key, value in stage.outputs.items():
207
+ if key in result and isinstance(result[key], list) and isinstance(value, list):
208
+ # Concatenate lists, avoiding duplicates
209
+ existing = result[key]
210
+ for item in value:
211
+ if item not in existing:
212
+ existing.append(item)
213
+ else:
214
+ result[key] = value
215
+
216
+ return result
217
+
218
+ # ========== Status Methods ==========
219
+
220
+ def update_status(self, status: WorkflowStatus) -> None:
221
+ """Update the execution status."""
222
+ self.status = status
223
+
224
+ def cancel(self, user: str, reason: str) -> None:
225
+ """Mark this execution as canceled."""
226
+ self.is_canceled = True
227
+ self.canceled_by = user
228
+ self.cancellation_reason = reason
229
+
230
+ def pause(self, user: str) -> None:
231
+ """Pause this execution."""
232
+ import time
233
+
234
+ self.paused = PausedDetails(
235
+ paused_by=user,
236
+ pause_time=int(time.time() * 1000),
237
+ )
238
+ self.status = WorkflowStatus.PAUSED
239
+
240
+ def resume(self) -> None:
241
+ """Resume this execution."""
242
+ import time
243
+
244
+ if self.paused and self.paused.pause_time:
245
+ self.paused.resume_time = int(time.time() * 1000)
246
+ self.paused.paused_ms = self.paused.resume_time - self.paused.pause_time
247
+ self.status = WorkflowStatus.RUNNING
248
+
249
+ def paused_duration_relative_to(self, instant_ms: int) -> int:
250
+ """
251
+ Get paused duration relative to a given instant.
252
+
253
+ Returns 0 if not paused or pause was before the instant.
254
+ """
255
+ if self.paused and self.paused.pause_time:
256
+ if self.paused.pause_time > instant_ms:
257
+ return self.paused.paused_ms
258
+ return 0
259
+
260
+ # ========== Factory Methods ==========
261
+
262
+ @classmethod
263
+ def create(
264
+ cls,
265
+ application: str,
266
+ name: str,
267
+ stages: list[StageExecution],
268
+ trigger: Trigger | None = None,
269
+ pipeline_config_id: str | None = None,
270
+ ) -> Workflow:
271
+ """
272
+ Factory method to create a new pipeline execution.
273
+
274
+ Args:
275
+ application: Application name
276
+ name: Pipeline name
277
+ stages: List of stages
278
+ trigger: Optional trigger info
279
+ pipeline_config_id: Optional config ID
280
+
281
+ Returns:
282
+ A new Workflow instance
283
+ """
284
+ execution = cls(
285
+ application=application,
286
+ name=name,
287
+ stages=stages,
288
+ trigger=trigger or Trigger(),
289
+ pipeline_config_id=pipeline_config_id,
290
+ )
291
+ return execution
292
+
293
+ @classmethod
294
+ def create_orchestration(
295
+ cls,
296
+ application: str,
297
+ name: str,
298
+ stages: list[StageExecution],
299
+ ) -> Workflow:
300
+ """
301
+ Factory method to create an orchestration (ad-hoc execution).
302
+
303
+ Args:
304
+ application: Application name
305
+ name: Orchestration name
306
+ stages: List of stages
307
+
308
+ Returns:
309
+ A new Workflow with type ORCHESTRATION
310
+ """
311
+ execution = cls(
312
+ type=WorkflowType.ORCHESTRATION,
313
+ application=application,
314
+ name=name,
315
+ stages=stages,
316
+ )
317
+ return execution
@@ -0,0 +1,113 @@
1
+ """
2
+ Orchestrator - starts and manages pipeline executions.
3
+
4
+ This module provides the main entry point for running pipelines.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from typing import TYPE_CHECKING
10
+
11
+ from stabilize.queue.messages import (
12
+ CancelWorkflow,
13
+ RestartStage,
14
+ ResumeStage,
15
+ StartWorkflow,
16
+ )
17
+
18
+ if TYPE_CHECKING:
19
+ from stabilize.models.workflow import Workflow
20
+ from stabilize.queue.queue import Queue
21
+
22
+
23
+ class Orchestrator:
24
+ """
25
+ Runner for pipeline executions.
26
+
27
+ Provides methods to start, cancel, restart, and resume executions
28
+ by pushing appropriate messages to the queue.
29
+ """
30
+
31
+ def __init__(self, queue: Queue) -> None:
32
+ """
33
+ Initialize the runner.
34
+
35
+ Args:
36
+ queue: The message queue
37
+ """
38
+ self.queue = queue
39
+
40
+ def start(self, execution: Workflow) -> None:
41
+ """
42
+ Start a pipeline execution.
43
+
44
+ Args:
45
+ execution: The execution to start
46
+ """
47
+ self.queue.push(
48
+ StartWorkflow(
49
+ execution_type=execution.type.value,
50
+ execution_id=execution.id,
51
+ )
52
+ )
53
+
54
+ def cancel(
55
+ self,
56
+ execution: Workflow,
57
+ user: str,
58
+ reason: str,
59
+ ) -> None:
60
+ """
61
+ Cancel a running execution.
62
+
63
+ Args:
64
+ execution: The execution to cancel
65
+ user: Who is canceling
66
+ reason: Why it's being canceled
67
+ """
68
+ self.queue.push(
69
+ CancelWorkflow(
70
+ execution_type=execution.type.value,
71
+ execution_id=execution.id,
72
+ user=user,
73
+ reason=reason,
74
+ )
75
+ )
76
+
77
+ def restart(
78
+ self,
79
+ execution: Workflow,
80
+ stage_id: str,
81
+ ) -> None:
82
+ """
83
+ Restart a stage in an execution.
84
+
85
+ Args:
86
+ execution: The execution
87
+ stage_id: The stage to restart
88
+ """
89
+ self.queue.push(
90
+ RestartStage(
91
+ execution_type=execution.type.value,
92
+ execution_id=execution.id,
93
+ stage_id=stage_id,
94
+ )
95
+ )
96
+
97
+ def unpause(self, execution: Workflow) -> None:
98
+ """
99
+ Resume a paused execution.
100
+
101
+ Args:
102
+ execution: The execution to resume
103
+ """
104
+ # Resume all paused stages
105
+ for stage in execution.stages:
106
+ if stage.status.name == "PAUSED":
107
+ self.queue.push(
108
+ ResumeStage(
109
+ execution_type=execution.type.value,
110
+ execution_id=execution.id,
111
+ stage_id=stage.id,
112
+ )
113
+ )
@@ -0,0 +1,28 @@
1
+ """Persistence layer for pipeline execution."""
2
+
3
+ from stabilize.persistence.factory import (
4
+ create_queue,
5
+ create_repository,
6
+ detect_backend,
7
+ )
8
+ from stabilize.persistence.memory import InMemoryWorkflowStore
9
+ from stabilize.persistence.postgres import PostgresWorkflowStore
10
+ from stabilize.persistence.sqlite import SqliteWorkflowStore
11
+ from stabilize.persistence.store import (
12
+ WorkflowNotFoundError,
13
+ WorkflowStore,
14
+ )
15
+
16
+ __all__ = [
17
+ # Abstract interface
18
+ "WorkflowStore",
19
+ "WorkflowNotFoundError",
20
+ # Implementations
21
+ "PostgresWorkflowStore",
22
+ "SqliteWorkflowStore",
23
+ "InMemoryWorkflowStore",
24
+ # Factory functions
25
+ "create_repository",
26
+ "create_queue",
27
+ "detect_backend",
28
+ ]
@@ -0,0 +1,185 @@
1
+ """
2
+ Singleton connection manager for database connections.
3
+
4
+ Provides centralized connection pooling for PostgreSQL and thread-local
5
+ connections for SQLite, ensuring efficient resource usage across all
6
+ repository and queue instances.
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ import sqlite3
12
+ import threading
13
+ from typing import TYPE_CHECKING, Any
14
+
15
+ if TYPE_CHECKING:
16
+ from psycopg_pool import ConnectionPool
17
+
18
+
19
+ class SingletonMeta(type):
20
+ """
21
+ Thread-safe metaclass for singleton pattern.
22
+
23
+ Ensures only one instance of a class exists, even when accessed
24
+ from multiple threads simultaneously.
25
+ """
26
+
27
+ _instances: dict[type, Any] = {}
28
+ _lock: threading.Lock = threading.Lock()
29
+
30
+ def __call__(cls, *args: Any, **kwargs: Any) -> Any:
31
+ if cls not in cls._instances:
32
+ with cls._lock:
33
+ # Double-check locking pattern
34
+ if cls not in cls._instances:
35
+ instance = super().__call__(*args, **kwargs)
36
+ cls._instances[cls] = instance
37
+ return cls._instances[cls]
38
+
39
+ @classmethod
40
+ def reset(mcs, cls: type) -> None:
41
+ """Reset singleton instance (for testing)."""
42
+ with mcs._lock:
43
+ if cls in mcs._instances:
44
+ instance = mcs._instances.pop(cls)
45
+ if hasattr(instance, "close_all"):
46
+ instance.close_all()
47
+
48
+
49
+ class ConnectionManager(metaclass=SingletonMeta):
50
+ """
51
+ Singleton connection manager for all database connections.
52
+
53
+ Manages:
54
+ - PostgreSQL connection pools (one pool per connection string)
55
+ - SQLite thread-local connections (one connection per thread per db path)
56
+
57
+ Usage:
58
+ manager = ConnectionManager()
59
+ pool = manager.get_postgres_pool("postgresql://...")
60
+ conn = manager.get_sqlite_connection("sqlite:///./db.sqlite")
61
+ """
62
+
63
+ def __init__(self) -> None:
64
+ self._postgres_pools: dict[str, ConnectionPool] = {}
65
+ self._postgres_lock = threading.Lock()
66
+
67
+ self._sqlite_local = threading.local()
68
+ self._sqlite_lock = threading.Lock()
69
+ self._sqlite_paths: set[str] = set()
70
+
71
+ def get_postgres_pool(
72
+ self,
73
+ connection_string: str,
74
+ min_size: int = 5,
75
+ max_size: int = 15,
76
+ ) -> ConnectionPool:
77
+ """
78
+ Get or create a PostgreSQL connection pool.
79
+
80
+ Args:
81
+ connection_string: PostgreSQL connection string
82
+ min_size: Minimum pool size
83
+ max_size: Maximum pool size
84
+
85
+ Returns:
86
+ Shared ConnectionPool instance for this connection string
87
+ """
88
+ if connection_string not in self._postgres_pools:
89
+ with self._postgres_lock:
90
+ if connection_string not in self._postgres_pools:
91
+ from psycopg.rows import dict_row
92
+ from psycopg_pool import ConnectionPool
93
+
94
+ pool = ConnectionPool(
95
+ connection_string,
96
+ min_size=min_size,
97
+ max_size=max_size,
98
+ open=True,
99
+ kwargs={"row_factory": dict_row},
100
+ )
101
+ self._postgres_pools[connection_string] = pool
102
+ return self._postgres_pools[connection_string]
103
+
104
+ def get_sqlite_connection(self, connection_string: str) -> sqlite3.Connection:
105
+ """
106
+ Get or create a thread-local SQLite connection.
107
+
108
+ Args:
109
+ connection_string: SQLite connection string
110
+
111
+ Returns:
112
+ Thread-local Connection instance for this database
113
+ """
114
+ db_path = self._parse_sqlite_path(connection_string)
115
+
116
+ # Track paths for cleanup
117
+ with self._sqlite_lock:
118
+ self._sqlite_paths.add(db_path)
119
+
120
+ # Get thread-local storage
121
+ if not hasattr(self._sqlite_local, "connections"):
122
+ self._sqlite_local.connections = {}
123
+
124
+ connections: dict[str, sqlite3.Connection] = self._sqlite_local.connections
125
+
126
+ if db_path not in connections or connections[db_path] is None:
127
+ conn = sqlite3.connect(
128
+ db_path,
129
+ timeout=30,
130
+ check_same_thread=False,
131
+ )
132
+ conn.row_factory = sqlite3.Row
133
+ conn.execute("PRAGMA foreign_keys = ON")
134
+ if db_path != ":memory:":
135
+ conn.execute("PRAGMA journal_mode = WAL")
136
+ conn.execute("PRAGMA busy_timeout = 30000")
137
+ connections[db_path] = conn
138
+
139
+ return connections[db_path]
140
+
141
+ def _parse_sqlite_path(self, connection_string: str) -> str:
142
+ """Parse SQLite connection string to extract database path."""
143
+ if connection_string.startswith("sqlite:///"):
144
+ return connection_string[10:]
145
+ elif connection_string.startswith("sqlite://"):
146
+ return connection_string[9:]
147
+ return connection_string
148
+
149
+ def close_postgres_pool(self, connection_string: str) -> None:
150
+ """Close a specific PostgreSQL pool."""
151
+ with self._postgres_lock:
152
+ if connection_string in self._postgres_pools:
153
+ pool = self._postgres_pools.pop(connection_string)
154
+ pool.close()
155
+
156
+ def close_sqlite_connection(self, connection_string: str) -> None:
157
+ """Close SQLite connection for current thread."""
158
+ db_path = self._parse_sqlite_path(connection_string)
159
+ if hasattr(self._sqlite_local, "connections"):
160
+ connections: dict[str, sqlite3.Connection | None] = self._sqlite_local.connections
161
+ conn = connections.get(db_path)
162
+ if conn is not None:
163
+ conn.close()
164
+ connections[db_path] = None
165
+
166
+ def close_all(self) -> None:
167
+ """Close all connections (for shutdown/testing)."""
168
+ # Close all PostgreSQL pools
169
+ with self._postgres_lock:
170
+ for pool in self._postgres_pools.values():
171
+ pool.close()
172
+ self._postgres_pools.clear()
173
+
174
+ # Close SQLite connections for current thread
175
+ if hasattr(self._sqlite_local, "connections"):
176
+ connections: dict[str, sqlite3.Connection] = self._sqlite_local.connections
177
+ for conn in connections.values():
178
+ if conn is not None:
179
+ conn.close()
180
+ connections.clear()
181
+
182
+
183
+ def get_connection_manager() -> ConnectionManager:
184
+ """Get the singleton ConnectionManager instance."""
185
+ return ConnectionManager()