boti-data 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. boti_data/__init__.py +92 -0
  2. boti_data/arrow_schema.py +326 -0
  3. boti_data/connection_catalog.py +121 -0
  4. boti_data/db/__init__.py +42 -0
  5. boti_data/db/arrow_schema_mapper.py +331 -0
  6. boti_data/db/engine_registry.py +113 -0
  7. boti_data/db/partitioned_execution.py +333 -0
  8. boti_data/db/partitioned_loader.py +169 -0
  9. boti_data/db/partitioned_planner.py +429 -0
  10. boti_data/db/partitioned_types.py +123 -0
  11. boti_data/db/sql_config.py +154 -0
  12. boti_data/db/sql_engine.py +330 -0
  13. boti_data/db/sql_manager.py +43 -0
  14. boti_data/db/sql_model_builder.py +79 -0
  15. boti_data/db/sql_model_registry.py +384 -0
  16. boti_data/db/sql_readonly.py +76 -0
  17. boti_data/db/sql_resource.py +180 -0
  18. boti_data/db/sqlalchemy_async.py +19 -0
  19. boti_data/distributed.py +182 -0
  20. boti_data/field_map.py +177 -0
  21. boti_data/filters/__init__.py +25 -0
  22. boti_data/filters/arrow_kernels.py +351 -0
  23. boti_data/filters/expressions.py +149 -0
  24. boti_data/filters/handler.py +268 -0
  25. boti_data/filters/utils.py +449 -0
  26. boti_data/gateway/__init__.py +13 -0
  27. boti_data/gateway/arrow_adapters.py +337 -0
  28. boti_data/gateway/core.py +2068 -0
  29. boti_data/gateway/frame_strategies.py +385 -0
  30. boti_data/gateway/loaders.py +284 -0
  31. boti_data/gateway/normalization.py +182 -0
  32. boti_data/gateway/requests.py +150 -0
  33. boti_data/helper.py +199 -0
  34. boti_data/joins.py +147 -0
  35. boti_data/parquet/__init__.py +8 -0
  36. boti_data/parquet/reader.py +190 -0
  37. boti_data/parquet/resource.py +572 -0
  38. boti_data/schema.py +245 -0
  39. boti_data-0.1.0.dist-info/METADATA +189 -0
  40. boti_data-0.1.0.dist-info/RECORD +42 -0
  41. boti_data-0.1.0.dist-info/WHEEL +5 -0
  42. boti_data-0.1.0.dist-info/top_level.txt +1 -0
boti_data/__init__.py ADDED
@@ -0,0 +1,92 @@
1
+ """
2
+ Data modules and interfaces for the Boti pipeline context.
3
+ """
4
+
5
+ from boti_data.db import (
6
+ AsyncSqlDatabaseResource,
7
+ BuilderConfig,
8
+ DefaultBase,
9
+ EngineRegistry,
10
+ RegistryConfig,
11
+ SqlAlchemyModelBuilder,
12
+ SqlDatabaseConfig,
13
+ SqlDatabaseResource,
14
+ SqlPartitionPlan,
15
+ SqlPartitionSpec,
16
+ SqlPartitionedLoadRequest,
17
+ SqlPartitionedLoader,
18
+ SqlModelRegistry,
19
+ ensure_greenlet_available,
20
+ get_global_registry,
21
+ )
22
+ from boti_data.connection_catalog import ConnectionCatalog
23
+ from boti_data.parquet import ParquetDataConfig, ParquetDataResource, ParquetReader
24
+ from boti_data.filters import (
25
+ FilterHandler,
26
+ Expr,
27
+ TrueExpr,
28
+ And,
29
+ Or,
30
+ Not,
31
+ )
32
+ from boti_data.gateway import DataGateway, ParquetLoadRequest, SqlLoadRequest
33
+ from boti_data.helper import DataHelper
34
+ from boti_data.field_map import FieldMap
35
+ from boti_data.distributed import DaskSession, dask_session
36
+ from boti_data.gateway import DataFrameOptions, DataFrameParams
37
+ from boti_data.joins import indexed_left_join, left_join_frames
38
+ from boti_data.schema import (
39
+ SchemaValidationError,
40
+ align_frames_for_join,
41
+ apply_schema_map,
42
+ infer_schema_map,
43
+ normalize_dtype_alias,
44
+ normalize_schema_map,
45
+ validate_schema,
46
+ )
47
+
48
+ __all__ = [
49
+ "And",
50
+ "AsyncSqlDatabaseResource",
51
+ "BuilderConfig",
52
+ "ConnectionCatalog",
53
+ "DataFrameOptions",
54
+ "DataFrameParams",
55
+ "DataGateway",
56
+ "DataHelper",
57
+ "DaskSession",
58
+ "DefaultBase",
59
+ "EngineRegistry",
60
+ "Expr",
61
+ "FieldMap",
62
+ "FilterHandler",
63
+ "indexed_left_join",
64
+ "Not",
65
+ "Or",
66
+ "ParquetDataConfig",
67
+ "ParquetLoadRequest",
68
+ "ParquetDataResource",
69
+ "ParquetReader",
70
+ "RegistryConfig",
71
+ "SchemaValidationError",
72
+ "SqlLoadRequest",
73
+ "SqlAlchemyModelBuilder",
74
+ "SqlDatabaseConfig",
75
+ "SqlDatabaseResource",
76
+ "SqlPartitionPlan",
77
+ "SqlPartitionSpec",
78
+ "SqlPartitionedLoadRequest",
79
+ "SqlPartitionedLoader",
80
+ "SqlModelRegistry",
81
+ "TrueExpr",
82
+ "align_frames_for_join",
83
+ "apply_schema_map",
84
+ "ensure_greenlet_available",
85
+ "get_global_registry",
86
+ "infer_schema_map",
87
+ "dask_session",
88
+ "left_join_frames",
89
+ "normalize_dtype_alias",
90
+ "normalize_schema_map",
91
+ "validate_schema",
92
+ ]
@@ -0,0 +1,326 @@
1
+ """
2
+ Arrow Schema Contract — PyArrow-backed schema definition and coercion.
3
+
4
+ Provides a canonical ``ArrowSchema`` class that wraps ``pa.Schema`` with:
5
+ - Single-pass ``cast_table()`` coercion (replaces per-column pandas loops)
6
+ - Built-in schema equality comparison
7
+ - Dict-compatible constructor for migration from existing schema maps
8
+ - DataFrame ↔ Table conversion helpers
9
+ """
10
+ from __future__ import annotations
11
+
12
+ from collections.abc import Mapping
13
+ from typing import Any
14
+
15
+ import pandas as pd
16
+ import pyarrow as pa
17
+
18
+
19
+ class ArrowSchema:
20
+ """Canonical PyArrow schema contract with single-pass coercion.
21
+
22
+ Usage::
23
+
24
+ schema = ArrowSchema.from_dict({
25
+ "col_a": "Int64",
26
+ "col_b": "boolean",
27
+ "col_c": "datetime64[ns, UTC]",
28
+ })
29
+
30
+ # Coerce any DataFrame to this schema in one pass
31
+ table = schema.to_arrow_table(df)
32
+ coerced_table = schema.cast_table(table)
33
+ result_df = schema.to_dataframe(coerced_table)
34
+ """
35
+
36
+ def __init__(self, schema: pa.Schema) -> None:
37
+ self._schema = schema
38
+
39
+ @property
40
+ def pa_schema(self) -> pa.Schema:
41
+ """Return the underlying PyArrow Schema."""
42
+ return self._schema
43
+
44
+ @property
45
+ def column_names(self) -> list[str]:
46
+ return list(self._schema.names)
47
+
48
+ @property
49
+ def column_types(self) -> dict[str, pa.DataType]:
50
+ return {field.name: field.type for field in self._schema}
51
+
52
+ # ------------------------------------------------------------------
53
+ # Constructors
54
+ # ------------------------------------------------------------------
55
+
56
+ @classmethod
57
+ def from_dict(cls, dtype_map: Mapping[str, str]) -> "ArrowSchema":
58
+ """Build from a pandas-style dtype mapping (same format as existing schema_map)."""
59
+ from boti_data.db.arrow_schema_mapper import pandas_dtype_to_arrow
60
+ from boti_data.schema import normalize_schema_map
61
+
62
+ normalized = normalize_schema_map(dtype_map)
63
+ fields = [
64
+ (col, pandas_dtype_to_arrow(dtype))
65
+ for col, dtype in normalized.items()
66
+ ]
67
+ return cls(pa.schema(fields))
68
+
69
+ @classmethod
70
+ def from_fields(cls, fields: list[tuple[str, pa.DataType]]) -> "ArrowSchema":
71
+ """Build from explicit (name, arrow_type) pairs."""
72
+ return cls(pa.schema(fields))
73
+
74
+ @classmethod
75
+ def from_dataframe(cls, df: Any) -> "ArrowSchema":
76
+ """Infer schema from a pandas or Dask DataFrame.
77
+
78
+ Note: For Dask DataFrames, this computes the schema from metadata
79
+ without triggering execution.
80
+ """
81
+ import dask.dataframe as dd
82
+
83
+ if isinstance(df, dd.DataFrame):
84
+ # Use Dask metadata only — no compute
85
+ arrow_schema = _pandas_meta_to_arrow_schema(df._meta)
86
+ else:
87
+ arrow_schema = _pandas_meta_to_arrow_schema(df)
88
+ return cls(arrow_schema)
89
+
90
+ @classmethod
91
+ def empty(cls) -> "ArrowSchema":
92
+ """Build an empty schema (zero columns)."""
93
+ return cls(pa.schema([]))
94
+
95
+ # ------------------------------------------------------------------
96
+ # Schema operations
97
+ # ------------------------------------------------------------------
98
+
99
+ def equals(self, other: "ArrowSchema") -> bool:
100
+ """Check if two schemas are equal (column names and types)."""
101
+ return self._schema.equals(other._schema)
102
+
103
+ def contains_column(self, name: str) -> bool:
104
+ return name in self._schema.names
105
+
106
+ def validate_columns(self, df: Any, *, require_all: bool = True) -> list[str]:
107
+ """Validate that a DataFrame has the expected columns.
108
+
109
+ Returns list of missing columns. If require_all=False, only warns.
110
+ """
111
+ df_columns = set(df.columns)
112
+ schema_columns = set(self._schema.names)
113
+
114
+ if require_all:
115
+ missing = sorted(schema_columns - df_columns)
116
+ else:
117
+ missing = sorted(df_columns - schema_columns)
118
+
119
+ return missing
120
+
121
+ # ------------------------------------------------------------------
122
+ # Coercion
123
+ # ------------------------------------------------------------------
124
+
125
+ def cast_table(self, table: pa.Table) -> pa.Table:
126
+ """Cast an Arrow Table to this schema in a single pass.
127
+
128
+ Uses ``table.cast()`` for batch coercion instead of per-column
129
+ pandas operations. Handles type mismatches via safe/unsafe casting.
130
+ """
131
+ from boti_data.db.arrow_schema_mapper import coerce_arrow_table
132
+ return coerce_arrow_table(table, self._schema)
133
+
134
+ def to_arrow_table(self, df: Any) -> pa.Table:
135
+ """Convert a pandas/Dask DataFrame to an Arrow Table with this schema.
136
+
137
+ Uses PyArrow's native conversion with type preservation.
138
+ """
139
+ import dask.dataframe as dd
140
+
141
+ if isinstance(df, dd.DataFrame):
142
+ df = df.compute()
143
+
144
+ # Convert to Arrow first
145
+ table = pa.Table.from_pandas(df, preserve_index=False)
146
+
147
+ # Then cast to target schema (handles type mismatches)
148
+ try:
149
+ return self.cast_table(table)
150
+ except (KeyError, TypeError, ValueError, pa.ArrowInvalid, pa.ArrowTypeError):
151
+ # If cast fails, return as-is — caller can handle errors
152
+ return table
153
+
154
+ def to_dataframe(self, table: pa.Table, *, as_pandas: bool = True) -> Any:
155
+ """Convert an Arrow Table to pandas with proper type mapping."""
156
+ from boti_data.db.arrow_schema_mapper import arrow_table_to_pandas
157
+
158
+ df = arrow_table_to_pandas(table)
159
+
160
+ # Ensure column order matches schema
161
+ df = df[[field.name for field in self._schema if field.name in df.columns]]
162
+
163
+ return df
164
+
165
+ def to_pandas(self, table: pa.Table) -> pd.DataFrame:
166
+ """Compatibility alias for callers that expect a pandas-specific helper."""
167
+ return self.to_dataframe(table)
168
+
169
+ def coerce_dataframe(self, df: Any) -> Any:
170
+ """Coerce a pandas/Dask DataFrame to this schema.
171
+
172
+ Single-pass operation that replaces the per-column ``apply_schema_map()`` loop.
173
+ """
174
+ import dask.dataframe as dd
175
+
176
+ if isinstance(df, dd.DataFrame):
177
+ # For Dask, we need to compute — Arrow coercion is not lazy
178
+ df = df.compute()
179
+
180
+ table = self.to_arrow_table(df)
181
+ return self.to_dataframe(table)
182
+
183
+ # ------------------------------------------------------------------
184
+ # Validation
185
+ # ------------------------------------------------------------------
186
+
187
+ def validate_table(self, table: pa.Table, *, require_columns: bool = True) -> list[str]:
188
+ """Validate an Arrow Table against this schema.
189
+
190
+ Returns list of errors (empty if valid).
191
+ """
192
+ errors: list[str] = []
193
+
194
+ if require_columns:
195
+ missing = self.validate_columns(
196
+ type("MockDF", (), {"columns": list(table.column_names)})(),
197
+ require_all=True,
198
+ )
199
+ if missing:
200
+ errors.append(f"Missing columns: {missing}")
201
+
202
+ for field in self._schema:
203
+ if field.name not in table.column_names:
204
+ continue
205
+
206
+ actual_type = table.schema.field(field.name).type
207
+ if actual_type != field.type:
208
+ # Check if compatible (safe cast possible)
209
+ try:
210
+ table.column(field.name).cast(field.type, safe=True)
211
+ except (pa.ArrowInvalid, pa.ArrowTypeError):
212
+ errors.append(
213
+ f"Column '{field.name}': expected {field.type}, found {actual_type} "
214
+ f"(incompatible types)"
215
+ )
216
+
217
+ return errors
218
+
219
+ def validate_dataframe(self, df: Any, *, require_columns: bool = True) -> list[str]:
220
+ """Validate a pandas/Dask DataFrame against this schema.
221
+
222
+ Returns list of errors (empty if valid).
223
+ """
224
+ import dask.dataframe as dd
225
+
226
+ if isinstance(df, dd.DataFrame):
227
+ df = df._meta # Use metadata
228
+
229
+ table = self.to_arrow_table(df)
230
+ return self.validate_table(table, require_columns=require_columns)
231
+
232
+ # ------------------------------------------------------------------
233
+ # Serialization
234
+ # ------------------------------------------------------------------
235
+
236
+ def to_dict(self) -> dict[str, str]:
237
+ """Convert to a pandas-style dtype mapping for compatibility."""
238
+ from boti_data.db.arrow_schema_mapper import _PANDAS_DTYPE_TO_ARROW
239
+
240
+ # Reverse mapping: Arrow → pandas dtype string
241
+ arrow_to_pandas = {v: k for k, v in _PANDAS_DTYPE_TO_ARROW.items()}
242
+
243
+ result = {}
244
+ for field in self._schema:
245
+ pandas_dtype = arrow_to_pandas.get(field.type, str(field.type))
246
+ result[field.name] = pandas_dtype
247
+ return result
248
+
249
+ def __repr__(self) -> str:
250
+ return f"ArrowSchema({len(self._schema)} columns: {self.column_names})"
251
+
252
+ def __eq__(self, other: object) -> bool:
253
+ if isinstance(other, ArrowSchema):
254
+ return self.equals(other)
255
+ return NotImplemented
256
+
257
+ def __hash__(self) -> int:
258
+ return hash(tuple((f.name, f.type) for f in self._schema))
259
+
260
+
261
+ # ---------------------------------------------------------------------------
262
+ # Helper functions
263
+ # ---------------------------------------------------------------------------
264
+
265
+ def _pandas_dtype_to_arrow(dtype_str: str) -> pa.DataType:
266
+ """Convert a pandas dtype string to PyArrow type."""
267
+ from boti_data.db.arrow_schema_mapper import pandas_dtype_to_arrow
268
+ return pandas_dtype_to_arrow(dtype_str)
269
+
270
+
271
+ def _pandas_meta_to_arrow_schema(df: Any) -> pa.Schema:
272
+ """Extract Arrow Schema from pandas DataFrame metadata."""
273
+ fields = []
274
+ for col in df.columns:
275
+ dtype = df.dtypes[col]
276
+ arrow_type = _pandas_dtype_to_arrow(str(dtype))
277
+ fields.append(pa.field(col, arrow_type))
278
+ return pa.schema(fields)
279
+
280
+
281
+ # ---------------------------------------------------------------------------
282
+ # Convenience functions (drop-in replacements for schema.py functions)
283
+ # ---------------------------------------------------------------------------
284
+
285
+ def apply_arrow_schema_map(
286
+ dataframe: Any,
287
+ schema_map: Mapping[str, str],
288
+ *,
289
+ require_columns: bool = False,
290
+ ) -> Any:
291
+ """Cast a DataFrame to a schema using PyArrow single-pass coercion.
292
+
293
+ Drop-in replacement for ``schema.apply_schema_map()`` that uses Arrow
294
+ for coercion instead of per-column pandas operations.
295
+ """
296
+ arrow_schema = ArrowSchema.from_dict(schema_map)
297
+
298
+ # Validate required columns
299
+ if require_columns:
300
+ missing = arrow_schema.validate_columns(
301
+ dataframe, require_all=True,
302
+ )
303
+ if missing:
304
+ from boti_data.schema import SchemaValidationError
305
+ raise SchemaValidationError(f"Missing required column(s): {missing}.")
306
+
307
+ return arrow_schema.coerce_dataframe(dataframe)
308
+
309
+
310
+ def validate_arrow_schema(
311
+ dataframe: Any,
312
+ expected_schema_map: Mapping[str, str],
313
+ *,
314
+ require_columns: bool = True,
315
+ ) -> None:
316
+ """Validate a DataFrame against an expected schema using Arrow.
317
+
318
+ Drop-in replacement for ``schema.validate_schema()``.
319
+ """
320
+ arrow_schema = ArrowSchema.from_dict(expected_schema_map)
321
+ errors = arrow_schema.validate_dataframe(
322
+ dataframe, require_columns=require_columns,
323
+ )
324
+ if errors:
325
+ from boti_data.schema import SchemaValidationError
326
+ raise SchemaValidationError("Schema validation failed:\n" + "\n".join(errors))
@@ -0,0 +1,121 @@
1
+ """
2
+ Typed catalog for named SQL and filesystem connection profiles.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ import functools
8
+ import re
9
+ import threading
10
+ from pathlib import Path
11
+ from typing import Optional
12
+
13
+ import fsspec
14
+ import pyarrow.fs as pafs
15
+
16
+ from boti.core.filesystem import FilesystemAdapter, FilesystemConfig, create_filesystem
17
+ from boti_data.db.sql_config import SqlDatabaseConfig
18
+ from boti_data.db.sql_resource import AsyncSqlDatabaseResource, SqlDatabaseResource
19
+
20
+ _PROFILE_NAME_PATTERN = re.compile(r"^[A-Za-z0-9_.-]+$")
21
+
22
+
23
+ class ConnectionCatalog:
24
+ """Named registry for typed connection profiles and runtime adapters."""
25
+
26
+ def __init__(self) -> None:
27
+ self._lock = threading.RLock()
28
+ self._sql_configs: dict[str, SqlDatabaseConfig] = {}
29
+ self._filesystem_configs: dict[str, FilesystemConfig] = {}
30
+ self._filesystem_adapters: dict[str, FilesystemAdapter] = {}
31
+
32
+ @staticmethod
33
+ def _validate_name(name: str) -> str:
34
+ normalized = name.strip()
35
+ if not normalized or not _PROFILE_NAME_PATTERN.fullmatch(normalized):
36
+ raise ValueError(
37
+ "Connection profile names must contain only letters, digits, dots, underscores, or hyphens."
38
+ )
39
+ return normalized
40
+
41
+ def register_sql(self, name: str, config: SqlDatabaseConfig) -> SqlDatabaseConfig:
42
+ profile_name = self._validate_name(name)
43
+ with self._lock:
44
+ self._sql_configs[profile_name] = config
45
+ return config
46
+
47
+ def load_sql(
48
+ self,
49
+ name: str,
50
+ prefix: str,
51
+ *,
52
+ env_file: Optional[str | Path] = None,
53
+ **overrides: object,
54
+ ) -> SqlDatabaseConfig:
55
+ config = SqlDatabaseConfig.from_env_prefix(prefix, env_file=env_file, **overrides)
56
+ return self.register_sql(name, config)
57
+
58
+ def sql_config(self, name: str) -> SqlDatabaseConfig:
59
+ profile_name = self._validate_name(name)
60
+ with self._lock:
61
+ try:
62
+ return self._sql_configs[profile_name]
63
+ except KeyError as exc:
64
+ raise KeyError(
65
+ f"Unknown SQL profile '{profile_name}'. Available: {sorted(self._sql_configs)}"
66
+ ) from exc
67
+
68
+ def create_sql_resource(self, name: str) -> SqlDatabaseResource:
69
+ return SqlDatabaseResource(self.sql_config(name))
70
+
71
+ def create_async_sql_resource(self, name: str) -> AsyncSqlDatabaseResource:
72
+ return AsyncSqlDatabaseResource(self.sql_config(name))
73
+
74
+ def register_filesystem(self, name: str, config: FilesystemConfig) -> FilesystemConfig:
75
+ profile_name = self._validate_name(name)
76
+ with self._lock:
77
+ self._filesystem_configs[profile_name] = config
78
+ self._filesystem_adapters.pop(profile_name, None)
79
+ return config
80
+
81
+ def load_filesystem(
82
+ self,
83
+ name: str,
84
+ prefix: str,
85
+ *,
86
+ env_file: Optional[str | Path] = None,
87
+ **overrides: object,
88
+ ) -> FilesystemConfig:
89
+ config = FilesystemConfig.from_env_prefix(prefix, env_file=env_file, **overrides)
90
+ return self.register_filesystem(name, config)
91
+
92
+ def filesystem_config(self, name: str) -> FilesystemConfig:
93
+ profile_name = self._validate_name(name)
94
+ with self._lock:
95
+ try:
96
+ return self._filesystem_configs[profile_name]
97
+ except KeyError as exc:
98
+ raise KeyError(
99
+ f"Unknown filesystem profile '{profile_name}'. Available: {sorted(self._filesystem_configs)}"
100
+ ) from exc
101
+
102
+ def filesystem_adapter(self, name: str) -> FilesystemAdapter:
103
+ profile_name = self._validate_name(name)
104
+ with self._lock:
105
+ adapter = self._filesystem_adapters.get(profile_name)
106
+ if adapter is None:
107
+ adapter = FilesystemAdapter(self.filesystem_config(profile_name))
108
+ self._filesystem_adapters[profile_name] = adapter
109
+ return adapter
110
+
111
+ def filesystem(self, name: str) -> fsspec.AbstractFileSystem:
112
+ return self.filesystem_adapter(name).get_filesystem()
113
+
114
+ def pyarrow_filesystem(self, name: str) -> tuple[pafs.FileSystem, str]:
115
+ return self.filesystem_adapter(name).get_pyarrow_filesystem()
116
+
117
+ def invalidate_filesystem(self, name: str) -> None:
118
+ self.filesystem_adapter(name).invalidate()
119
+
120
+ def make_filesystem_factory(self, name: str):
121
+ return functools.partial(create_filesystem, self.filesystem_config(name))
@@ -0,0 +1,42 @@
1
+ """
2
+ Database-backed data resources and helpers.
3
+ """
4
+
5
+ from boti_data.db.sql_manager import (
6
+ AsyncSqlDatabaseResource,
7
+ EngineRegistry,
8
+ SqlDatabaseConfig,
9
+ SqlDatabaseResource,
10
+ )
11
+ from boti_data.db.partitioned_loader import (
12
+ SqlPartitionPlan,
13
+ SqlPartitionSpec,
14
+ SqlPartitionedLoadRequest,
15
+ SqlPartitionedLoader,
16
+ )
17
+ from boti_data.db.sql_model_builder import BuilderConfig, SqlAlchemyModelBuilder
18
+ from boti_data.db.sql_model_registry import (
19
+ DefaultBase,
20
+ RegistryConfig,
21
+ SqlModelRegistry,
22
+ get_global_registry,
23
+ )
24
+ from boti_data.db.sqlalchemy_async import ensure_greenlet_available
25
+
26
+ __all__ = [
27
+ "AsyncSqlDatabaseResource",
28
+ "BuilderConfig",
29
+ "DefaultBase",
30
+ "EngineRegistry",
31
+ "RegistryConfig",
32
+ "SqlAlchemyModelBuilder",
33
+ "SqlDatabaseConfig",
34
+ "SqlDatabaseResource",
35
+ "SqlPartitionPlan",
36
+ "SqlPartitionSpec",
37
+ "SqlPartitionedLoadRequest",
38
+ "SqlPartitionedLoader",
39
+ "SqlModelRegistry",
40
+ "ensure_greenlet_available",
41
+ "get_global_registry",
42
+ ]