sqlspec 0.26.0__py3-none-any.whl → 0.28.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sqlspec might be problematic. Click here for more details.
- sqlspec/__init__.py +7 -15
- sqlspec/_serialization.py +55 -25
- sqlspec/_typing.py +155 -52
- sqlspec/adapters/adbc/_types.py +1 -1
- sqlspec/adapters/adbc/adk/__init__.py +5 -0
- sqlspec/adapters/adbc/adk/store.py +880 -0
- sqlspec/adapters/adbc/config.py +62 -12
- sqlspec/adapters/adbc/data_dictionary.py +74 -2
- sqlspec/adapters/adbc/driver.py +226 -58
- sqlspec/adapters/adbc/litestar/__init__.py +5 -0
- sqlspec/adapters/adbc/litestar/store.py +504 -0
- sqlspec/adapters/adbc/type_converter.py +44 -50
- sqlspec/adapters/aiosqlite/_types.py +1 -1
- sqlspec/adapters/aiosqlite/adk/__init__.py +5 -0
- sqlspec/adapters/aiosqlite/adk/store.py +536 -0
- sqlspec/adapters/aiosqlite/config.py +86 -16
- sqlspec/adapters/aiosqlite/data_dictionary.py +34 -2
- sqlspec/adapters/aiosqlite/driver.py +127 -38
- sqlspec/adapters/aiosqlite/litestar/__init__.py +5 -0
- sqlspec/adapters/aiosqlite/litestar/store.py +281 -0
- sqlspec/adapters/aiosqlite/pool.py +7 -7
- sqlspec/adapters/asyncmy/__init__.py +7 -1
- sqlspec/adapters/asyncmy/_types.py +1 -1
- sqlspec/adapters/asyncmy/adk/__init__.py +5 -0
- sqlspec/adapters/asyncmy/adk/store.py +503 -0
- sqlspec/adapters/asyncmy/config.py +59 -17
- sqlspec/adapters/asyncmy/data_dictionary.py +41 -2
- sqlspec/adapters/asyncmy/driver.py +293 -62
- sqlspec/adapters/asyncmy/litestar/__init__.py +5 -0
- sqlspec/adapters/asyncmy/litestar/store.py +296 -0
- sqlspec/adapters/asyncpg/__init__.py +2 -1
- sqlspec/adapters/asyncpg/_type_handlers.py +71 -0
- sqlspec/adapters/asyncpg/_types.py +11 -7
- sqlspec/adapters/asyncpg/adk/__init__.py +5 -0
- sqlspec/adapters/asyncpg/adk/store.py +460 -0
- sqlspec/adapters/asyncpg/config.py +57 -36
- sqlspec/adapters/asyncpg/data_dictionary.py +48 -2
- sqlspec/adapters/asyncpg/driver.py +153 -23
- sqlspec/adapters/asyncpg/litestar/__init__.py +5 -0
- sqlspec/adapters/asyncpg/litestar/store.py +253 -0
- sqlspec/adapters/bigquery/_types.py +1 -1
- sqlspec/adapters/bigquery/adk/__init__.py +5 -0
- sqlspec/adapters/bigquery/adk/store.py +585 -0
- sqlspec/adapters/bigquery/config.py +36 -11
- sqlspec/adapters/bigquery/data_dictionary.py +42 -2
- sqlspec/adapters/bigquery/driver.py +489 -144
- sqlspec/adapters/bigquery/litestar/__init__.py +5 -0
- sqlspec/adapters/bigquery/litestar/store.py +327 -0
- sqlspec/adapters/bigquery/type_converter.py +55 -23
- sqlspec/adapters/duckdb/_types.py +2 -2
- sqlspec/adapters/duckdb/adk/__init__.py +14 -0
- sqlspec/adapters/duckdb/adk/store.py +563 -0
- sqlspec/adapters/duckdb/config.py +79 -21
- sqlspec/adapters/duckdb/data_dictionary.py +41 -2
- sqlspec/adapters/duckdb/driver.py +225 -44
- sqlspec/adapters/duckdb/litestar/__init__.py +5 -0
- sqlspec/adapters/duckdb/litestar/store.py +332 -0
- sqlspec/adapters/duckdb/pool.py +5 -5
- sqlspec/adapters/duckdb/type_converter.py +51 -21
- sqlspec/adapters/oracledb/_numpy_handlers.py +133 -0
- sqlspec/adapters/oracledb/_types.py +20 -2
- sqlspec/adapters/oracledb/adk/__init__.py +5 -0
- sqlspec/adapters/oracledb/adk/store.py +1628 -0
- sqlspec/adapters/oracledb/config.py +120 -36
- sqlspec/adapters/oracledb/data_dictionary.py +87 -20
- sqlspec/adapters/oracledb/driver.py +475 -86
- sqlspec/adapters/oracledb/litestar/__init__.py +5 -0
- sqlspec/adapters/oracledb/litestar/store.py +765 -0
- sqlspec/adapters/oracledb/migrations.py +316 -25
- sqlspec/adapters/oracledb/type_converter.py +91 -16
- sqlspec/adapters/psqlpy/_type_handlers.py +44 -0
- sqlspec/adapters/psqlpy/_types.py +2 -1
- sqlspec/adapters/psqlpy/adk/__init__.py +5 -0
- sqlspec/adapters/psqlpy/adk/store.py +483 -0
- sqlspec/adapters/psqlpy/config.py +45 -19
- sqlspec/adapters/psqlpy/data_dictionary.py +48 -2
- sqlspec/adapters/psqlpy/driver.py +108 -41
- sqlspec/adapters/psqlpy/litestar/__init__.py +5 -0
- sqlspec/adapters/psqlpy/litestar/store.py +272 -0
- sqlspec/adapters/psqlpy/type_converter.py +40 -11
- sqlspec/adapters/psycopg/_type_handlers.py +80 -0
- sqlspec/adapters/psycopg/_types.py +2 -1
- sqlspec/adapters/psycopg/adk/__init__.py +5 -0
- sqlspec/adapters/psycopg/adk/store.py +962 -0
- sqlspec/adapters/psycopg/config.py +65 -37
- sqlspec/adapters/psycopg/data_dictionary.py +91 -3
- sqlspec/adapters/psycopg/driver.py +200 -78
- sqlspec/adapters/psycopg/litestar/__init__.py +5 -0
- sqlspec/adapters/psycopg/litestar/store.py +554 -0
- sqlspec/adapters/sqlite/__init__.py +2 -1
- sqlspec/adapters/sqlite/_type_handlers.py +86 -0
- sqlspec/adapters/sqlite/_types.py +1 -1
- sqlspec/adapters/sqlite/adk/__init__.py +5 -0
- sqlspec/adapters/sqlite/adk/store.py +582 -0
- sqlspec/adapters/sqlite/config.py +85 -16
- sqlspec/adapters/sqlite/data_dictionary.py +34 -2
- sqlspec/adapters/sqlite/driver.py +120 -52
- sqlspec/adapters/sqlite/litestar/__init__.py +5 -0
- sqlspec/adapters/sqlite/litestar/store.py +318 -0
- sqlspec/adapters/sqlite/pool.py +5 -5
- sqlspec/base.py +45 -26
- sqlspec/builder/__init__.py +73 -4
- sqlspec/builder/_base.py +91 -58
- sqlspec/builder/_column.py +5 -5
- sqlspec/builder/_ddl.py +98 -89
- sqlspec/builder/_delete.py +5 -4
- sqlspec/builder/_dml.py +388 -0
- sqlspec/{_sql.py → builder/_factory.py} +41 -44
- sqlspec/builder/_insert.py +5 -82
- sqlspec/builder/{mixins/_join_operations.py → _join.py} +145 -143
- sqlspec/builder/_merge.py +446 -11
- sqlspec/builder/_parsing_utils.py +9 -11
- sqlspec/builder/_select.py +1313 -25
- sqlspec/builder/_update.py +11 -42
- sqlspec/cli.py +76 -69
- sqlspec/config.py +331 -62
- sqlspec/core/__init__.py +5 -4
- sqlspec/core/cache.py +18 -18
- sqlspec/core/compiler.py +6 -8
- sqlspec/core/filters.py +55 -47
- sqlspec/core/hashing.py +9 -9
- sqlspec/core/parameters.py +76 -45
- sqlspec/core/result.py +234 -47
- sqlspec/core/splitter.py +16 -17
- sqlspec/core/statement.py +32 -31
- sqlspec/core/type_conversion.py +3 -2
- sqlspec/driver/__init__.py +1 -3
- sqlspec/driver/_async.py +183 -160
- sqlspec/driver/_common.py +197 -109
- sqlspec/driver/_sync.py +189 -161
- sqlspec/driver/mixins/_result_tools.py +20 -236
- sqlspec/driver/mixins/_sql_translator.py +4 -4
- sqlspec/exceptions.py +70 -7
- sqlspec/extensions/adk/__init__.py +53 -0
- sqlspec/extensions/adk/_types.py +51 -0
- sqlspec/extensions/adk/converters.py +172 -0
- sqlspec/extensions/adk/migrations/0001_create_adk_tables.py +144 -0
- sqlspec/extensions/adk/migrations/__init__.py +0 -0
- sqlspec/extensions/adk/service.py +181 -0
- sqlspec/extensions/adk/store.py +536 -0
- sqlspec/extensions/aiosql/adapter.py +69 -61
- sqlspec/extensions/fastapi/__init__.py +21 -0
- sqlspec/extensions/fastapi/extension.py +331 -0
- sqlspec/extensions/fastapi/providers.py +543 -0
- sqlspec/extensions/flask/__init__.py +36 -0
- sqlspec/extensions/flask/_state.py +71 -0
- sqlspec/extensions/flask/_utils.py +40 -0
- sqlspec/extensions/flask/extension.py +389 -0
- sqlspec/extensions/litestar/__init__.py +21 -4
- sqlspec/extensions/litestar/cli.py +54 -10
- sqlspec/extensions/litestar/config.py +56 -266
- sqlspec/extensions/litestar/handlers.py +46 -17
- sqlspec/extensions/litestar/migrations/0001_create_session_table.py +137 -0
- sqlspec/extensions/litestar/migrations/__init__.py +3 -0
- sqlspec/extensions/litestar/plugin.py +349 -224
- sqlspec/extensions/litestar/providers.py +25 -25
- sqlspec/extensions/litestar/store.py +265 -0
- sqlspec/extensions/starlette/__init__.py +10 -0
- sqlspec/extensions/starlette/_state.py +25 -0
- sqlspec/extensions/starlette/_utils.py +52 -0
- sqlspec/extensions/starlette/extension.py +254 -0
- sqlspec/extensions/starlette/middleware.py +154 -0
- sqlspec/loader.py +30 -49
- sqlspec/migrations/base.py +200 -76
- sqlspec/migrations/commands.py +591 -62
- sqlspec/migrations/context.py +6 -9
- sqlspec/migrations/fix.py +199 -0
- sqlspec/migrations/loaders.py +47 -19
- sqlspec/migrations/runner.py +241 -75
- sqlspec/migrations/tracker.py +237 -21
- sqlspec/migrations/utils.py +51 -3
- sqlspec/migrations/validation.py +177 -0
- sqlspec/protocols.py +106 -36
- sqlspec/storage/_utils.py +85 -0
- sqlspec/storage/backends/fsspec.py +133 -107
- sqlspec/storage/backends/local.py +78 -51
- sqlspec/storage/backends/obstore.py +276 -168
- sqlspec/storage/registry.py +75 -39
- sqlspec/typing.py +30 -84
- sqlspec/utils/__init__.py +25 -4
- sqlspec/utils/arrow_helpers.py +81 -0
- sqlspec/utils/config_resolver.py +6 -6
- sqlspec/utils/correlation.py +4 -5
- sqlspec/utils/data_transformation.py +3 -2
- sqlspec/utils/deprecation.py +9 -8
- sqlspec/utils/fixtures.py +4 -4
- sqlspec/utils/logging.py +46 -6
- sqlspec/utils/module_loader.py +205 -5
- sqlspec/utils/portal.py +311 -0
- sqlspec/utils/schema.py +288 -0
- sqlspec/utils/serializers.py +113 -4
- sqlspec/utils/sync_tools.py +36 -22
- sqlspec/utils/text.py +1 -2
- sqlspec/utils/type_guards.py +136 -20
- sqlspec/utils/version.py +433 -0
- {sqlspec-0.26.0.dist-info → sqlspec-0.28.0.dist-info}/METADATA +41 -22
- sqlspec-0.28.0.dist-info/RECORD +221 -0
- sqlspec/builder/mixins/__init__.py +0 -55
- sqlspec/builder/mixins/_cte_and_set_ops.py +0 -253
- sqlspec/builder/mixins/_delete_operations.py +0 -50
- sqlspec/builder/mixins/_insert_operations.py +0 -282
- sqlspec/builder/mixins/_merge_operations.py +0 -698
- sqlspec/builder/mixins/_order_limit_operations.py +0 -145
- sqlspec/builder/mixins/_pivot_operations.py +0 -157
- sqlspec/builder/mixins/_select_operations.py +0 -930
- sqlspec/builder/mixins/_update_operations.py +0 -199
- sqlspec/builder/mixins/_where_clause.py +0 -1298
- sqlspec-0.26.0.dist-info/RECORD +0 -157
- sqlspec-0.26.0.dist-info/licenses/NOTICE +0 -29
- {sqlspec-0.26.0.dist-info → sqlspec-0.28.0.dist-info}/WHEEL +0 -0
- {sqlspec-0.26.0.dist-info → sqlspec-0.28.0.dist-info}/entry_points.txt +0 -0
- {sqlspec-0.26.0.dist-info → sqlspec-0.28.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,327 @@
|
|
|
1
|
+
"""BigQuery session store for Litestar integration."""
|
|
2
|
+
|
|
3
|
+
from datetime import datetime, timedelta, timezone
|
|
4
|
+
from typing import TYPE_CHECKING
|
|
5
|
+
|
|
6
|
+
from sqlspec.extensions.litestar.store import BaseSQLSpecStore
|
|
7
|
+
from sqlspec.utils.logging import get_logger
|
|
8
|
+
from sqlspec.utils.sync_tools import async_
|
|
9
|
+
|
|
10
|
+
if TYPE_CHECKING:
|
|
11
|
+
from sqlspec.adapters.bigquery.config import BigQueryConfig
|
|
12
|
+
|
|
13
|
+
logger = get_logger("adapters.bigquery.litestar.store")
|
|
14
|
+
|
|
15
|
+
__all__ = ("BigQueryStore",)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class BigQueryStore(BaseSQLSpecStore["BigQueryConfig"]):
|
|
19
|
+
"""BigQuery session store using synchronous BigQuery driver.
|
|
20
|
+
|
|
21
|
+
Implements server-side session storage for Litestar using Google BigQuery.
|
|
22
|
+
Uses Litestar's sync_to_thread utility to provide an async interface
|
|
23
|
+
compatible with the Store protocol.
|
|
24
|
+
|
|
25
|
+
Provides efficient session management with:
|
|
26
|
+
- Sync operations wrapped for async compatibility
|
|
27
|
+
- MERGE for UPSERT functionality
|
|
28
|
+
- Native TIMESTAMP type support
|
|
29
|
+
- Automatic expiration handling
|
|
30
|
+
- Efficient cleanup of expired sessions
|
|
31
|
+
- Table clustering on session_id for optimized lookups
|
|
32
|
+
|
|
33
|
+
Note:
|
|
34
|
+
BigQuery is designed for analytical (OLAP) workloads and scales to petabytes.
|
|
35
|
+
For typical session store workloads, clustering by session_id provides good
|
|
36
|
+
performance. Consider partitioning by created_at if session volume exceeds
|
|
37
|
+
millions of rows per day.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
config: BigQueryConfig instance.
|
|
41
|
+
|
|
42
|
+
Example:
|
|
43
|
+
from sqlspec.adapters.bigquery import BigQueryConfig
|
|
44
|
+
from sqlspec.adapters.bigquery.litestar.store import BigQueryStore
|
|
45
|
+
|
|
46
|
+
config = BigQueryConfig(connection_config={"project": "my-project"})
|
|
47
|
+
store = BigQueryStore(config)
|
|
48
|
+
await store.create_table()
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
__slots__ = ()
|
|
52
|
+
|
|
53
|
+
def __init__(self, config: "BigQueryConfig") -> None:
|
|
54
|
+
"""Initialize BigQuery session store.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
config: BigQueryConfig instance.
|
|
58
|
+
|
|
59
|
+
Notes:
|
|
60
|
+
Table name is read from config.extension_config["litestar"]["session_table"].
|
|
61
|
+
"""
|
|
62
|
+
super().__init__(config)
|
|
63
|
+
|
|
64
|
+
def _get_create_table_sql(self) -> str:
|
|
65
|
+
"""Get BigQuery CREATE TABLE SQL with optimized schema.
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
SQL statement to create the sessions table with clustering.
|
|
69
|
+
|
|
70
|
+
Notes:
|
|
71
|
+
- Uses TIMESTAMP for timezone-aware expiration timestamps
|
|
72
|
+
- BYTES for binary session data storage
|
|
73
|
+
- Clustered by session_id for efficient lookups
|
|
74
|
+
- No indexes needed - BigQuery uses columnar storage
|
|
75
|
+
- Table name is internally controlled, not user input
|
|
76
|
+
"""
|
|
77
|
+
return f"""
|
|
78
|
+
CREATE TABLE IF NOT EXISTS {self._table_name} (
|
|
79
|
+
session_id STRING NOT NULL,
|
|
80
|
+
data BYTES NOT NULL,
|
|
81
|
+
expires_at TIMESTAMP,
|
|
82
|
+
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP()
|
|
83
|
+
)
|
|
84
|
+
CLUSTER BY session_id
|
|
85
|
+
"""
|
|
86
|
+
|
|
87
|
+
def _get_drop_table_sql(self) -> "list[str]":
|
|
88
|
+
"""Get BigQuery DROP TABLE SQL statements.
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
List containing DROP TABLE statement.
|
|
92
|
+
|
|
93
|
+
Notes:
|
|
94
|
+
BigQuery doesn't have separate indexes to drop.
|
|
95
|
+
"""
|
|
96
|
+
return [f"DROP TABLE IF EXISTS {self._table_name}"]
|
|
97
|
+
|
|
98
|
+
def _datetime_to_timestamp(self, dt: "datetime | None") -> "datetime | None":
|
|
99
|
+
"""Convert datetime to BigQuery TIMESTAMP.
|
|
100
|
+
|
|
101
|
+
Args:
|
|
102
|
+
dt: Datetime to convert (must be UTC-aware).
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
UTC datetime object, or None if dt is None.
|
|
106
|
+
|
|
107
|
+
Notes:
|
|
108
|
+
BigQuery TIMESTAMP type expects UTC datetime objects.
|
|
109
|
+
The BigQuery client library handles the conversion.
|
|
110
|
+
"""
|
|
111
|
+
if dt is None:
|
|
112
|
+
return None
|
|
113
|
+
if dt.tzinfo is None:
|
|
114
|
+
return dt.replace(tzinfo=timezone.utc)
|
|
115
|
+
return dt
|
|
116
|
+
|
|
117
|
+
def _timestamp_to_datetime(self, ts: "datetime | None") -> "datetime | None":
|
|
118
|
+
"""Convert TIMESTAMP back to datetime.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
ts: Datetime object from BigQuery.
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
UTC-aware datetime, or None if ts is None.
|
|
125
|
+
"""
|
|
126
|
+
if ts is None:
|
|
127
|
+
return None
|
|
128
|
+
if ts.tzinfo is None:
|
|
129
|
+
return ts.replace(tzinfo=timezone.utc)
|
|
130
|
+
return ts
|
|
131
|
+
|
|
132
|
+
def _create_table(self) -> None:
|
|
133
|
+
"""Synchronous implementation of create_table."""
|
|
134
|
+
sql = self._get_create_table_sql()
|
|
135
|
+
with self._config.provide_session() as driver:
|
|
136
|
+
driver.execute_script(sql)
|
|
137
|
+
logger.debug("Created session table: %s", self._table_name)
|
|
138
|
+
|
|
139
|
+
async def create_table(self) -> None:
|
|
140
|
+
"""Create the session table if it doesn't exist."""
|
|
141
|
+
await async_(self._create_table)()
|
|
142
|
+
|
|
143
|
+
def _get(self, key: str, renew_for: "int | timedelta | None" = None) -> "bytes | None":
|
|
144
|
+
"""Synchronous implementation of get."""
|
|
145
|
+
sql = f"""
|
|
146
|
+
SELECT data, expires_at FROM {self._table_name}
|
|
147
|
+
WHERE session_id = @session_id
|
|
148
|
+
AND (expires_at IS NULL OR expires_at > CURRENT_TIMESTAMP())
|
|
149
|
+
"""
|
|
150
|
+
|
|
151
|
+
with self._config.provide_session() as driver:
|
|
152
|
+
result = driver.select_one(sql, {"session_id": key})
|
|
153
|
+
|
|
154
|
+
if result is None:
|
|
155
|
+
return None
|
|
156
|
+
|
|
157
|
+
data = result.get("data")
|
|
158
|
+
expires_at = result.get("expires_at")
|
|
159
|
+
|
|
160
|
+
if renew_for is not None and expires_at is not None:
|
|
161
|
+
new_expires_at = self._calculate_expires_at(renew_for)
|
|
162
|
+
new_expires_at_ts = self._datetime_to_timestamp(new_expires_at)
|
|
163
|
+
if new_expires_at_ts is not None:
|
|
164
|
+
update_sql = f"""
|
|
165
|
+
UPDATE {self._table_name}
|
|
166
|
+
SET expires_at = @expires_at
|
|
167
|
+
WHERE session_id = @session_id
|
|
168
|
+
"""
|
|
169
|
+
driver.execute(update_sql, {"expires_at": new_expires_at_ts, "session_id": key})
|
|
170
|
+
|
|
171
|
+
return bytes(data) if data is not None else None
|
|
172
|
+
|
|
173
|
+
async def get(self, key: str, renew_for: "int | timedelta | None" = None) -> "bytes | None":
|
|
174
|
+
"""Get a session value by key.
|
|
175
|
+
|
|
176
|
+
Args:
|
|
177
|
+
key: Session ID to retrieve.
|
|
178
|
+
renew_for: If given, renew the expiry time for this duration.
|
|
179
|
+
|
|
180
|
+
Returns:
|
|
181
|
+
Session data as bytes if found and not expired, None otherwise.
|
|
182
|
+
"""
|
|
183
|
+
return await async_(self._get)(key, renew_for)
|
|
184
|
+
|
|
185
|
+
def _set(self, key: str, value: "str | bytes", expires_in: "int | timedelta | None" = None) -> None:
|
|
186
|
+
"""Synchronous implementation of set.
|
|
187
|
+
|
|
188
|
+
Notes:
|
|
189
|
+
Uses MERGE for UPSERT functionality in BigQuery.
|
|
190
|
+
BigQuery requires source data to come from a table or inline VALUES.
|
|
191
|
+
"""
|
|
192
|
+
data = self._value_to_bytes(value)
|
|
193
|
+
expires_at = self._calculate_expires_at(expires_in)
|
|
194
|
+
expires_at_ts = self._datetime_to_timestamp(expires_at)
|
|
195
|
+
|
|
196
|
+
sql = f"""
|
|
197
|
+
MERGE {self._table_name} AS target
|
|
198
|
+
USING (SELECT @session_id AS session_id, @data AS data, @expires_at AS expires_at) AS source
|
|
199
|
+
ON target.session_id = source.session_id
|
|
200
|
+
WHEN MATCHED THEN
|
|
201
|
+
UPDATE SET data = source.data, expires_at = source.expires_at
|
|
202
|
+
WHEN NOT MATCHED THEN
|
|
203
|
+
INSERT (session_id, data, expires_at, created_at)
|
|
204
|
+
VALUES (source.session_id, source.data, source.expires_at, CURRENT_TIMESTAMP())
|
|
205
|
+
"""
|
|
206
|
+
|
|
207
|
+
with self._config.provide_session() as driver:
|
|
208
|
+
driver.execute(sql, {"session_id": key, "data": data, "expires_at": expires_at_ts})
|
|
209
|
+
|
|
210
|
+
async def set(self, key: str, value: "str | bytes", expires_in: "int | timedelta | None" = None) -> None:
|
|
211
|
+
"""Store a session value.
|
|
212
|
+
|
|
213
|
+
Args:
|
|
214
|
+
key: Session ID.
|
|
215
|
+
value: Session data.
|
|
216
|
+
expires_in: Time until expiration.
|
|
217
|
+
"""
|
|
218
|
+
await async_(self._set)(key, value, expires_in)
|
|
219
|
+
|
|
220
|
+
def _delete(self, key: str) -> None:
|
|
221
|
+
"""Synchronous implementation of delete."""
|
|
222
|
+
sql = f"DELETE FROM {self._table_name} WHERE session_id = @session_id"
|
|
223
|
+
|
|
224
|
+
with self._config.provide_session() as driver:
|
|
225
|
+
driver.execute(sql, {"session_id": key})
|
|
226
|
+
|
|
227
|
+
async def delete(self, key: str) -> None:
|
|
228
|
+
"""Delete a session by key.
|
|
229
|
+
|
|
230
|
+
Args:
|
|
231
|
+
key: Session ID to delete.
|
|
232
|
+
"""
|
|
233
|
+
await async_(self._delete)(key)
|
|
234
|
+
|
|
235
|
+
def _delete_all(self) -> None:
|
|
236
|
+
"""Synchronous implementation of delete_all."""
|
|
237
|
+
sql = f"DELETE FROM {self._table_name} WHERE TRUE"
|
|
238
|
+
|
|
239
|
+
with self._config.provide_session() as driver:
|
|
240
|
+
driver.execute(sql)
|
|
241
|
+
logger.debug("Deleted all sessions from table: %s", self._table_name)
|
|
242
|
+
|
|
243
|
+
async def delete_all(self) -> None:
|
|
244
|
+
"""Delete all sessions from the store."""
|
|
245
|
+
await async_(self._delete_all)()
|
|
246
|
+
|
|
247
|
+
def _exists(self, key: str) -> bool:
|
|
248
|
+
"""Synchronous implementation of exists."""
|
|
249
|
+
sql = f"""
|
|
250
|
+
SELECT 1 FROM {self._table_name}
|
|
251
|
+
WHERE session_id = @session_id
|
|
252
|
+
AND (expires_at IS NULL OR expires_at > CURRENT_TIMESTAMP())
|
|
253
|
+
LIMIT 1
|
|
254
|
+
"""
|
|
255
|
+
|
|
256
|
+
with self._config.provide_session() as driver:
|
|
257
|
+
result = driver.select_one(sql, {"session_id": key})
|
|
258
|
+
return result is not None
|
|
259
|
+
|
|
260
|
+
async def exists(self, key: str) -> bool:
|
|
261
|
+
"""Check if a session key exists and is not expired.
|
|
262
|
+
|
|
263
|
+
Args:
|
|
264
|
+
key: Session ID to check.
|
|
265
|
+
|
|
266
|
+
Returns:
|
|
267
|
+
True if the session exists and is not expired.
|
|
268
|
+
"""
|
|
269
|
+
return await async_(self._exists)(key)
|
|
270
|
+
|
|
271
|
+
def _expires_in(self, key: str) -> "int | None":
|
|
272
|
+
"""Synchronous implementation of expires_in."""
|
|
273
|
+
sql = f"""
|
|
274
|
+
SELECT expires_at FROM {self._table_name}
|
|
275
|
+
WHERE session_id = @session_id
|
|
276
|
+
"""
|
|
277
|
+
|
|
278
|
+
with self._config.provide_session() as driver:
|
|
279
|
+
result = driver.select_one(sql, {"session_id": key})
|
|
280
|
+
|
|
281
|
+
if result is None:
|
|
282
|
+
return None
|
|
283
|
+
|
|
284
|
+
expires_at = result.get("expires_at")
|
|
285
|
+
if expires_at is None:
|
|
286
|
+
return None
|
|
287
|
+
|
|
288
|
+
expires_at_dt = self._timestamp_to_datetime(expires_at)
|
|
289
|
+
if expires_at_dt is None:
|
|
290
|
+
return None
|
|
291
|
+
|
|
292
|
+
now = datetime.now(timezone.utc)
|
|
293
|
+
if expires_at_dt <= now:
|
|
294
|
+
return 0
|
|
295
|
+
|
|
296
|
+
delta = expires_at_dt - now
|
|
297
|
+
return int(delta.total_seconds())
|
|
298
|
+
|
|
299
|
+
async def expires_in(self, key: str) -> "int | None":
|
|
300
|
+
"""Get the time in seconds until the session expires.
|
|
301
|
+
|
|
302
|
+
Args:
|
|
303
|
+
key: Session ID to check.
|
|
304
|
+
|
|
305
|
+
Returns:
|
|
306
|
+
Seconds until expiration, or None if no expiry or key doesn't exist.
|
|
307
|
+
"""
|
|
308
|
+
return await async_(self._expires_in)(key)
|
|
309
|
+
|
|
310
|
+
def _delete_expired(self) -> int:
|
|
311
|
+
"""Synchronous implementation of delete_expired."""
|
|
312
|
+
sql = f"DELETE FROM {self._table_name} WHERE expires_at <= CURRENT_TIMESTAMP()"
|
|
313
|
+
|
|
314
|
+
with self._config.provide_session() as driver:
|
|
315
|
+
result = driver.execute(sql)
|
|
316
|
+
count = result.get_affected_count()
|
|
317
|
+
if count > 0:
|
|
318
|
+
logger.debug("Cleaned up %d expired sessions", count)
|
|
319
|
+
return count
|
|
320
|
+
|
|
321
|
+
async def delete_expired(self) -> int:
|
|
322
|
+
"""Delete all expired sessions.
|
|
323
|
+
|
|
324
|
+
Returns:
|
|
325
|
+
Number of sessions deleted.
|
|
326
|
+
"""
|
|
327
|
+
return await async_(self._delete_expired)()
|
|
@@ -4,7 +4,8 @@ Provides specialized type handling for BigQuery, including UUID support
|
|
|
4
4
|
for the native BigQuery driver.
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
|
-
from
|
|
7
|
+
from functools import lru_cache
|
|
8
|
+
from typing import Any, Final
|
|
8
9
|
from uuid import UUID
|
|
9
10
|
|
|
10
11
|
from sqlspec.core.type_conversion import BaseTypeConverter, convert_uuid
|
|
@@ -14,7 +15,6 @@ try:
|
|
|
14
15
|
except ImportError:
|
|
15
16
|
ScalarQueryParameter = None # type: ignore[assignment,misc]
|
|
16
17
|
|
|
17
|
-
# Enhanced BigQuery type mapping with UUID support
|
|
18
18
|
BQ_TYPE_MAP: Final[dict[str, str]] = {
|
|
19
19
|
"str": "STRING",
|
|
20
20
|
"int": "INT64",
|
|
@@ -23,7 +23,7 @@ BQ_TYPE_MAP: Final[dict[str, str]] = {
|
|
|
23
23
|
"datetime": "DATETIME",
|
|
24
24
|
"date": "DATE",
|
|
25
25
|
"time": "TIME",
|
|
26
|
-
"UUID": "STRING",
|
|
26
|
+
"UUID": "STRING",
|
|
27
27
|
"uuid": "STRING",
|
|
28
28
|
"Decimal": "NUMERIC",
|
|
29
29
|
"bytes": "BYTES",
|
|
@@ -31,17 +31,57 @@ BQ_TYPE_MAP: Final[dict[str, str]] = {
|
|
|
31
31
|
"dict": "STRUCT",
|
|
32
32
|
}
|
|
33
33
|
|
|
34
|
+
BIGQUERY_SPECIAL_CHARS: Final[frozenset[str]] = frozenset({"{", "[", "-", ":", "T", "."})
|
|
35
|
+
|
|
34
36
|
|
|
35
37
|
class BigQueryTypeConverter(BaseTypeConverter):
|
|
36
38
|
"""BigQuery-specific type conversion with UUID support.
|
|
37
39
|
|
|
38
40
|
Extends the base TypeDetector with BigQuery-specific functionality
|
|
39
41
|
including UUID parameter handling for the native BigQuery driver.
|
|
42
|
+
Includes per-instance LRU cache for improved performance.
|
|
40
43
|
"""
|
|
41
44
|
|
|
42
|
-
__slots__ = ()
|
|
45
|
+
__slots__ = ("_convert_cache", "_enable_uuid_conversion")
|
|
46
|
+
|
|
47
|
+
def __init__(self, cache_size: int = 5000, *, enable_uuid_conversion: bool = True) -> None:
|
|
48
|
+
"""Initialize converter with per-instance conversion cache.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
cache_size: Maximum number of string values to cache (default: 5000)
|
|
52
|
+
enable_uuid_conversion: Whether to enable automatic UUID conversion (default: True)
|
|
53
|
+
"""
|
|
54
|
+
super().__init__()
|
|
55
|
+
self._enable_uuid_conversion = enable_uuid_conversion
|
|
56
|
+
|
|
57
|
+
@lru_cache(maxsize=cache_size)
|
|
58
|
+
def _cached_convert(value: str) -> Any:
|
|
59
|
+
if not value or not any(c in value for c in BIGQUERY_SPECIAL_CHARS):
|
|
60
|
+
return value
|
|
61
|
+
detected_type = self.detect_type(value)
|
|
62
|
+
if detected_type:
|
|
63
|
+
try:
|
|
64
|
+
return self.convert_value(value, detected_type)
|
|
65
|
+
except Exception:
|
|
66
|
+
return value
|
|
67
|
+
return value
|
|
68
|
+
|
|
69
|
+
self._convert_cache = _cached_convert
|
|
70
|
+
|
|
71
|
+
def convert_if_detected(self, value: Any) -> Any:
|
|
72
|
+
"""Convert string if special type detected (cached).
|
|
43
73
|
|
|
44
|
-
|
|
74
|
+
Args:
|
|
75
|
+
value: Value to potentially convert
|
|
76
|
+
|
|
77
|
+
Returns:
|
|
78
|
+
Converted value or original value
|
|
79
|
+
"""
|
|
80
|
+
if not isinstance(value, str):
|
|
81
|
+
return value
|
|
82
|
+
return self._convert_cache(value)
|
|
83
|
+
|
|
84
|
+
def create_parameter(self, name: str, value: Any) -> Any | None:
|
|
45
85
|
"""Create BigQuery parameter with proper type mapping.
|
|
46
86
|
|
|
47
87
|
Args:
|
|
@@ -54,16 +94,16 @@ class BigQueryTypeConverter(BaseTypeConverter):
|
|
|
54
94
|
if ScalarQueryParameter is None:
|
|
55
95
|
return None
|
|
56
96
|
|
|
57
|
-
if
|
|
58
|
-
|
|
97
|
+
if self._enable_uuid_conversion:
|
|
98
|
+
if isinstance(value, UUID):
|
|
99
|
+
return ScalarQueryParameter(name, "STRING", str(value))
|
|
59
100
|
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
101
|
+
if isinstance(value, str):
|
|
102
|
+
detected_type = self.detect_type(value)
|
|
103
|
+
if detected_type == "uuid":
|
|
104
|
+
uuid_obj = convert_uuid(value)
|
|
105
|
+
return ScalarQueryParameter(name, "STRING", str(uuid_obj))
|
|
65
106
|
|
|
66
|
-
# Handle other types
|
|
67
107
|
param_type = BQ_TYPE_MAP.get(type(value).__name__, "STRING")
|
|
68
108
|
return ScalarQueryParameter(name, param_type, value)
|
|
69
109
|
|
|
@@ -78,16 +118,8 @@ class BigQueryTypeConverter(BaseTypeConverter):
|
|
|
78
118
|
Converted value appropriate for the column type.
|
|
79
119
|
"""
|
|
80
120
|
if column_type == "STRING" and isinstance(value, str):
|
|
81
|
-
|
|
82
|
-
detected_type = self.detect_type(value)
|
|
83
|
-
if detected_type:
|
|
84
|
-
try:
|
|
85
|
-
return self.convert_value(value, detected_type)
|
|
86
|
-
except Exception:
|
|
87
|
-
# If conversion fails, return original value
|
|
88
|
-
return value
|
|
89
|
-
|
|
121
|
+
return self.convert_if_detected(value)
|
|
90
122
|
return value
|
|
91
123
|
|
|
92
124
|
|
|
93
|
-
__all__ = ("BQ_TYPE_MAP", "BigQueryTypeConverter")
|
|
125
|
+
__all__ = ("BIGQUERY_SPECIAL_CHARS", "BQ_TYPE_MAP", "BigQueryTypeConverter")
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
from typing import TYPE_CHECKING
|
|
2
2
|
|
|
3
|
-
from duckdb import DuckDBPyConnection
|
|
3
|
+
from duckdb import DuckDBPyConnection
|
|
4
4
|
|
|
5
5
|
if TYPE_CHECKING:
|
|
6
|
-
from
|
|
6
|
+
from typing import TypeAlias
|
|
7
7
|
|
|
8
8
|
DuckDBConnection: TypeAlias = DuckDBPyConnection
|
|
9
9
|
else:
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
"""DuckDB ADK store for Google Agent Development Kit.
|
|
2
|
+
|
|
3
|
+
DuckDB is an OLAP database optimized for analytical queries. This adapter provides
|
|
4
|
+
embedded session storage with zero-configuration setup, excellent for development,
|
|
5
|
+
testing, and analytical workloads.
|
|
6
|
+
|
|
7
|
+
Notes:
|
|
8
|
+
For highly concurrent DML operations, consider PostgreSQL or other
|
|
9
|
+
OLTP-optimized databases.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from sqlspec.adapters.duckdb.adk.store import DuckdbADKStore
|
|
13
|
+
|
|
14
|
+
__all__ = ("DuckdbADKStore",)
|