sqlspec 0.27.0__py3-none-any.whl → 0.28.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sqlspec might be problematic. Click here for more details.
- sqlspec/_typing.py +93 -0
- sqlspec/adapters/adbc/adk/store.py +21 -11
- sqlspec/adapters/adbc/data_dictionary.py +27 -5
- sqlspec/adapters/adbc/driver.py +83 -14
- sqlspec/adapters/aiosqlite/adk/store.py +27 -18
- sqlspec/adapters/asyncmy/adk/store.py +26 -16
- sqlspec/adapters/asyncpg/adk/store.py +26 -16
- sqlspec/adapters/asyncpg/data_dictionary.py +24 -17
- sqlspec/adapters/bigquery/adk/store.py +30 -21
- sqlspec/adapters/bigquery/config.py +11 -0
- sqlspec/adapters/bigquery/driver.py +138 -1
- sqlspec/adapters/duckdb/adk/store.py +21 -11
- sqlspec/adapters/duckdb/driver.py +87 -1
- sqlspec/adapters/oracledb/adk/store.py +89 -206
- sqlspec/adapters/oracledb/driver.py +183 -2
- sqlspec/adapters/oracledb/litestar/store.py +22 -24
- sqlspec/adapters/psqlpy/adk/store.py +28 -27
- sqlspec/adapters/psqlpy/data_dictionary.py +24 -17
- sqlspec/adapters/psqlpy/driver.py +7 -10
- sqlspec/adapters/psycopg/adk/store.py +51 -33
- sqlspec/adapters/psycopg/data_dictionary.py +48 -34
- sqlspec/adapters/sqlite/adk/store.py +29 -19
- sqlspec/config.py +100 -2
- sqlspec/core/filters.py +18 -10
- sqlspec/core/result.py +133 -2
- sqlspec/driver/_async.py +89 -0
- sqlspec/driver/_common.py +64 -29
- sqlspec/driver/_sync.py +95 -0
- sqlspec/extensions/adk/migrations/0001_create_adk_tables.py +2 -2
- sqlspec/extensions/adk/service.py +3 -3
- sqlspec/extensions/adk/store.py +8 -8
- sqlspec/extensions/aiosql/adapter.py +3 -15
- sqlspec/extensions/fastapi/__init__.py +21 -0
- sqlspec/extensions/fastapi/extension.py +331 -0
- sqlspec/extensions/fastapi/providers.py +543 -0
- sqlspec/extensions/flask/__init__.py +36 -0
- sqlspec/extensions/flask/_state.py +71 -0
- sqlspec/extensions/flask/_utils.py +40 -0
- sqlspec/extensions/flask/extension.py +389 -0
- sqlspec/extensions/litestar/config.py +3 -6
- sqlspec/extensions/litestar/plugin.py +26 -2
- sqlspec/extensions/starlette/__init__.py +10 -0
- sqlspec/extensions/starlette/_state.py +25 -0
- sqlspec/extensions/starlette/_utils.py +52 -0
- sqlspec/extensions/starlette/extension.py +254 -0
- sqlspec/extensions/starlette/middleware.py +154 -0
- sqlspec/protocols.py +40 -0
- sqlspec/storage/_utils.py +1 -14
- sqlspec/storage/backends/fsspec.py +3 -5
- sqlspec/storage/backends/local.py +1 -1
- sqlspec/storage/backends/obstore.py +10 -18
- sqlspec/typing.py +16 -0
- sqlspec/utils/__init__.py +25 -4
- sqlspec/utils/arrow_helpers.py +81 -0
- sqlspec/utils/module_loader.py +203 -3
- sqlspec/utils/portal.py +311 -0
- sqlspec/utils/serializers.py +110 -1
- sqlspec/utils/sync_tools.py +15 -5
- sqlspec/utils/type_guards.py +25 -0
- {sqlspec-0.27.0.dist-info → sqlspec-0.28.0.dist-info}/METADATA +2 -2
- {sqlspec-0.27.0.dist-info → sqlspec-0.28.0.dist-info}/RECORD +64 -50
- {sqlspec-0.27.0.dist-info → sqlspec-0.28.0.dist-info}/WHEEL +0 -0
- {sqlspec-0.27.0.dist-info → sqlspec-0.28.0.dist-info}/entry_points.txt +0 -0
- {sqlspec-0.27.0.dist-info → sqlspec-0.28.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -50,12 +50,13 @@ class OracleAsyncStore(BaseSQLSpecStore["OracleAsyncConfig"]):
|
|
|
50
50
|
Notes:
|
|
51
51
|
Configuration is read from config.extension_config["litestar"]:
|
|
52
52
|
- session_table: Session table name (default: "litestar_session")
|
|
53
|
-
- in_memory: Enable INMEMORY clause (default: False, Oracle-specific)
|
|
53
|
+
- in_memory: Enable INMEMORY PRIORITY HIGH clause (default: False, Oracle-specific)
|
|
54
54
|
|
|
55
|
-
When in_memory=True, the table is created with INMEMORY clause for
|
|
56
|
-
faster read operations.
|
|
57
|
-
|
|
58
|
-
|
|
55
|
+
When in_memory=True, the table is created with INMEMORY PRIORITY HIGH clause for
|
|
56
|
+
faster read operations. PRIORITY HIGH ensures the table is populated into the
|
|
57
|
+
In-Memory column store at database startup for immediate performance benefits.
|
|
58
|
+
This requires Oracle Database 12.1.0.2+ with the Database In-Memory option licensed.
|
|
59
|
+
If In-Memory is not available, the table creation will fail with ORA-00439 or ORA-62142.
|
|
59
60
|
"""
|
|
60
61
|
|
|
61
62
|
__slots__ = ("_in_memory",)
|
|
@@ -73,11 +74,8 @@ class OracleAsyncStore(BaseSQLSpecStore["OracleAsyncConfig"]):
|
|
|
73
74
|
"""
|
|
74
75
|
super().__init__(config)
|
|
75
76
|
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
self._in_memory: bool = bool(litestar_config.get("in_memory", False))
|
|
79
|
-
else:
|
|
80
|
-
self._in_memory = False
|
|
77
|
+
litestar_config = config.extension_config.get("litestar", {})
|
|
78
|
+
self._in_memory = bool(litestar_config.get("in_memory", False))
|
|
81
79
|
|
|
82
80
|
def _get_create_table_sql(self) -> str:
|
|
83
81
|
"""Get Oracle CREATE TABLE SQL with optimized schema.
|
|
@@ -91,9 +89,10 @@ class OracleAsyncStore(BaseSQLSpecStore["OracleAsyncConfig"]):
|
|
|
91
89
|
- BLOB type for data storage (Oracle native binary type)
|
|
92
90
|
- Audit columns (created_at, updated_at) help with debugging
|
|
93
91
|
- Table name is internally controlled, not user input (S608 suppressed)
|
|
94
|
-
- INMEMORY clause added when in_memory=True for faster reads
|
|
92
|
+
- INMEMORY PRIORITY HIGH clause added when in_memory=True for faster reads
|
|
93
|
+
- HIGH priority ensures table population at database startup
|
|
95
94
|
"""
|
|
96
|
-
inmemory_clause = "INMEMORY" if self._in_memory else ""
|
|
95
|
+
inmemory_clause = "INMEMORY PRIORITY HIGH" if self._in_memory else ""
|
|
97
96
|
return f"""
|
|
98
97
|
BEGIN
|
|
99
98
|
EXECUTE IMMEDIATE 'CREATE TABLE {self._table_name} (
|
|
@@ -419,12 +418,13 @@ class OracleSyncStore(BaseSQLSpecStore["OracleSyncConfig"]):
|
|
|
419
418
|
Notes:
|
|
420
419
|
Configuration is read from config.extension_config["litestar"]:
|
|
421
420
|
- session_table: Session table name (default: "litestar_session")
|
|
422
|
-
- in_memory: Enable INMEMORY clause (default: False, Oracle-specific)
|
|
421
|
+
- in_memory: Enable INMEMORY PRIORITY HIGH clause (default: False, Oracle-specific)
|
|
423
422
|
|
|
424
|
-
When in_memory=True, the table is created with INMEMORY clause for
|
|
425
|
-
faster read operations.
|
|
426
|
-
|
|
427
|
-
|
|
423
|
+
When in_memory=True, the table is created with INMEMORY PRIORITY HIGH clause for
|
|
424
|
+
faster read operations. PRIORITY HIGH ensures the table is populated into the
|
|
425
|
+
In-Memory column store at database startup for immediate performance benefits.
|
|
426
|
+
This requires Oracle Database 12.1.0.2+ with the Database In-Memory option licensed.
|
|
427
|
+
If In-Memory is not available, the table creation will fail with ORA-00439 or ORA-62142.
|
|
428
428
|
"""
|
|
429
429
|
|
|
430
430
|
__slots__ = ("_in_memory",)
|
|
@@ -442,11 +442,8 @@ class OracleSyncStore(BaseSQLSpecStore["OracleSyncConfig"]):
|
|
|
442
442
|
"""
|
|
443
443
|
super().__init__(config)
|
|
444
444
|
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
self._in_memory: bool = bool(litestar_config.get("in_memory", False))
|
|
448
|
-
else:
|
|
449
|
-
self._in_memory = False
|
|
445
|
+
litestar_config = config.extension_config.get("litestar", {})
|
|
446
|
+
self._in_memory = bool(litestar_config.get("in_memory", False))
|
|
450
447
|
|
|
451
448
|
def _get_create_table_sql(self) -> str:
|
|
452
449
|
"""Get Oracle CREATE TABLE SQL with optimized schema.
|
|
@@ -460,9 +457,10 @@ class OracleSyncStore(BaseSQLSpecStore["OracleSyncConfig"]):
|
|
|
460
457
|
- BLOB type for data storage (Oracle native binary type)
|
|
461
458
|
- Audit columns (created_at, updated_at) help with debugging
|
|
462
459
|
- Table name is internally controlled, not user input (S608 suppressed)
|
|
463
|
-
- INMEMORY clause added when in_memory=True for faster reads
|
|
460
|
+
- INMEMORY PRIORITY HIGH clause added when in_memory=True for faster reads
|
|
461
|
+
- HIGH priority ensures table population at database startup
|
|
464
462
|
"""
|
|
465
|
-
inmemory_clause = "INMEMORY" if self._in_memory else ""
|
|
463
|
+
inmemory_clause = "INMEMORY PRIORITY HIGH" if self._in_memory else ""
|
|
466
464
|
return f"""
|
|
467
465
|
BEGIN
|
|
468
466
|
EXECUTE IMMEDIATE 'CREATE TABLE {self._table_name} (
|
|
@@ -81,7 +81,7 @@ class PsqlpyADKStore(BaseAsyncADKStore["PsqlpyConfig"]):
|
|
|
81
81
|
"""
|
|
82
82
|
super().__init__(config)
|
|
83
83
|
|
|
84
|
-
def _get_create_sessions_table_sql(self) -> str:
|
|
84
|
+
async def _get_create_sessions_table_sql(self) -> str:
|
|
85
85
|
"""Get PostgreSQL CREATE TABLE SQL for sessions.
|
|
86
86
|
|
|
87
87
|
Returns:
|
|
@@ -122,7 +122,7 @@ class PsqlpyADKStore(BaseAsyncADKStore["PsqlpyConfig"]):
|
|
|
122
122
|
WHERE state != '{{}}'::jsonb;
|
|
123
123
|
"""
|
|
124
124
|
|
|
125
|
-
def _get_create_events_table_sql(self) -> str:
|
|
125
|
+
async def _get_create_events_table_sql(self) -> str:
|
|
126
126
|
"""Get PostgreSQL CREATE TABLE SQL for events.
|
|
127
127
|
|
|
128
128
|
Returns:
|
|
@@ -180,22 +180,13 @@ class PsqlpyADKStore(BaseAsyncADKStore["PsqlpyConfig"]):
|
|
|
180
180
|
"""Create both sessions and events tables if they don't exist.
|
|
181
181
|
|
|
182
182
|
Notes:
|
|
183
|
-
|
|
184
|
-
Splits SQL statements and executes them separately.
|
|
183
|
+
Uses driver.execute_script() which handles multiple statements.
|
|
185
184
|
Creates sessions table first, then events table (FK dependency).
|
|
186
185
|
"""
|
|
187
|
-
async with self._config.
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
if statement:
|
|
192
|
-
await conn.execute(statement, [])
|
|
193
|
-
|
|
194
|
-
events_sql = self._get_create_events_table_sql()
|
|
195
|
-
for statement in events_sql.split(";"):
|
|
196
|
-
statement = statement.strip()
|
|
197
|
-
if statement:
|
|
198
|
-
await conn.execute(statement, [])
|
|
186
|
+
async with self._config.provide_session() as driver:
|
|
187
|
+
await driver.execute_script(await self._get_create_sessions_table_sql())
|
|
188
|
+
await driver.execute_script(await self._get_create_events_table_sql())
|
|
189
|
+
|
|
199
190
|
logger.debug("Created ADK tables: %s, %s", self._session_table, self._events_table)
|
|
200
191
|
|
|
201
192
|
async def create_session(
|
|
@@ -313,30 +304,40 @@ class PsqlpyADKStore(BaseAsyncADKStore["PsqlpyConfig"]):
|
|
|
313
304
|
async with self._config.provide_connection() as conn: # pyright: ignore[reportAttributeAccessIssue]
|
|
314
305
|
await conn.execute(sql, [session_id])
|
|
315
306
|
|
|
316
|
-
async def list_sessions(self, app_name: str, user_id: str) -> "list[SessionRecord]":
|
|
317
|
-
"""List
|
|
307
|
+
async def list_sessions(self, app_name: str, user_id: str | None = None) -> "list[SessionRecord]":
|
|
308
|
+
"""List sessions for an app, optionally filtered by user.
|
|
318
309
|
|
|
319
310
|
Args:
|
|
320
311
|
app_name: Application name.
|
|
321
|
-
user_id: User identifier.
|
|
312
|
+
user_id: User identifier. If None, lists all sessions for the app.
|
|
322
313
|
|
|
323
314
|
Returns:
|
|
324
315
|
List of session records ordered by update_time DESC.
|
|
325
316
|
|
|
326
317
|
Notes:
|
|
327
|
-
Uses composite index on (app_name, user_id).
|
|
318
|
+
Uses composite index on (app_name, user_id) when user_id is provided.
|
|
328
319
|
Returns empty list if table doesn't exist.
|
|
329
320
|
"""
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
321
|
+
if user_id is None:
|
|
322
|
+
sql = f"""
|
|
323
|
+
SELECT id, app_name, user_id, state, create_time, update_time
|
|
324
|
+
FROM {self._session_table}
|
|
325
|
+
WHERE app_name = $1
|
|
326
|
+
ORDER BY update_time DESC
|
|
327
|
+
"""
|
|
328
|
+
params = [app_name]
|
|
329
|
+
else:
|
|
330
|
+
sql = f"""
|
|
331
|
+
SELECT id, app_name, user_id, state, create_time, update_time
|
|
332
|
+
FROM {self._session_table}
|
|
333
|
+
WHERE app_name = $1 AND user_id = $2
|
|
334
|
+
ORDER BY update_time DESC
|
|
335
|
+
"""
|
|
336
|
+
params = [app_name, user_id]
|
|
336
337
|
|
|
337
338
|
try:
|
|
338
339
|
async with self._config.provide_connection() as conn: # pyright: ignore[reportAttributeAccessIssue]
|
|
339
|
-
result = await conn.fetch(sql,
|
|
340
|
+
result = await conn.fetch(sql, params)
|
|
340
341
|
rows: list[dict[str, Any]] = result.result() if result else []
|
|
341
342
|
|
|
342
343
|
return [
|
|
@@ -116,7 +116,7 @@ class PsqlpyAsyncDataDictionary(AsyncDataDictionaryBase):
|
|
|
116
116
|
async def get_columns(
|
|
117
117
|
self, driver: AsyncDriverAdapterBase, table: str, schema: "str | None" = None
|
|
118
118
|
) -> "list[dict[str, Any]]":
|
|
119
|
-
"""Get column information for a table using
|
|
119
|
+
"""Get column information for a table using pg_catalog.
|
|
120
120
|
|
|
121
121
|
Args:
|
|
122
122
|
driver: Psqlpy async driver instance
|
|
@@ -129,25 +129,32 @@ class PsqlpyAsyncDataDictionary(AsyncDataDictionaryBase):
|
|
|
129
129
|
- data_type: PostgreSQL data type
|
|
130
130
|
- is_nullable: Whether column allows NULL (YES/NO)
|
|
131
131
|
- column_default: Default value if any
|
|
132
|
+
|
|
133
|
+
Notes:
|
|
134
|
+
Uses pg_catalog instead of information_schema to avoid psqlpy's
|
|
135
|
+
inability to handle the PostgreSQL 'name' type returned by information_schema.
|
|
132
136
|
"""
|
|
133
137
|
psqlpy_driver = cast("PsqlpyDriver", driver)
|
|
134
138
|
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
139
|
+
schema_name = schema or "public"
|
|
140
|
+
sql = """
|
|
141
|
+
SELECT
|
|
142
|
+
a.attname::text AS column_name,
|
|
143
|
+
pg_catalog.format_type(a.atttypid, a.atttypmod) AS data_type,
|
|
144
|
+
CASE WHEN a.attnotnull THEN 'NO' ELSE 'YES' END AS is_nullable,
|
|
145
|
+
pg_catalog.pg_get_expr(d.adbin, d.adrelid)::text AS column_default
|
|
146
|
+
FROM pg_catalog.pg_attribute a
|
|
147
|
+
JOIN pg_catalog.pg_class c ON a.attrelid = c.oid
|
|
148
|
+
JOIN pg_catalog.pg_namespace n ON c.relnamespace = n.oid
|
|
149
|
+
LEFT JOIN pg_catalog.pg_attrdef d ON a.attrelid = d.adrelid AND a.attnum = d.adnum
|
|
150
|
+
WHERE c.relname = $1
|
|
151
|
+
AND n.nspname = $2
|
|
152
|
+
AND a.attnum > 0
|
|
153
|
+
AND NOT a.attisdropped
|
|
154
|
+
ORDER BY a.attnum
|
|
155
|
+
"""
|
|
156
|
+
|
|
157
|
+
result = await psqlpy_driver.execute(sql, (table, schema_name))
|
|
151
158
|
return result.data or []
|
|
152
159
|
|
|
153
160
|
def list_available_features(self) -> "list[str]":
|
|
@@ -8,9 +8,9 @@ import decimal
|
|
|
8
8
|
import re
|
|
9
9
|
from typing import TYPE_CHECKING, Any, Final
|
|
10
10
|
|
|
11
|
-
import psqlpy
|
|
12
11
|
import psqlpy.exceptions
|
|
13
12
|
|
|
13
|
+
from sqlspec.adapters.psqlpy.data_dictionary import PsqlpyAsyncDataDictionary
|
|
14
14
|
from sqlspec.adapters.psqlpy.type_converter import PostgreSQLTypeConverter
|
|
15
15
|
from sqlspec.core.cache import get_cache_config
|
|
16
16
|
from sqlspec.core.parameters import ParameterStyle, ParameterStyleConfig
|
|
@@ -269,17 +269,16 @@ class PsqlpyDriver(AsyncDriverAdapterBase):
|
|
|
269
269
|
|
|
270
270
|
Returns:
|
|
271
271
|
ExecutionResult with script execution metadata
|
|
272
|
+
|
|
273
|
+
Notes:
|
|
274
|
+
Uses execute() with empty parameters for each statement instead of execute_batch().
|
|
275
|
+
execute_batch() uses simple query protocol which can break subsequent queries
|
|
276
|
+
that rely on extended protocol (e.g., information_schema queries with name type).
|
|
272
277
|
"""
|
|
273
278
|
sql, prepared_parameters = self._get_compiled_sql(statement, self.statement_config)
|
|
274
279
|
statement_config = statement.statement_config
|
|
275
|
-
|
|
276
|
-
if not prepared_parameters:
|
|
277
|
-
await cursor.execute_batch(sql)
|
|
278
|
-
statements = self.split_script_statements(sql, statement_config, strip_trailing_semicolon=True)
|
|
279
|
-
return self.create_execution_result(
|
|
280
|
-
cursor, statement_count=len(statements), successful_statements=len(statements), is_script_result=True
|
|
281
|
-
)
|
|
282
280
|
statements = self.split_script_statements(sql, statement_config, strip_trailing_semicolon=True)
|
|
281
|
+
|
|
283
282
|
successful_count = 0
|
|
284
283
|
last_result = None
|
|
285
284
|
|
|
@@ -421,7 +420,5 @@ class PsqlpyDriver(AsyncDriverAdapterBase):
|
|
|
421
420
|
Data dictionary instance for metadata queries
|
|
422
421
|
"""
|
|
423
422
|
if self._data_dictionary is None:
|
|
424
|
-
from sqlspec.adapters.psqlpy.data_dictionary import PsqlpyAsyncDataDictionary
|
|
425
|
-
|
|
426
423
|
self._data_dictionary = PsqlpyAsyncDataDictionary()
|
|
427
424
|
return self._data_dictionary
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
"""Psycopg ADK store for Google Agent Development Kit session/event storage."""
|
|
2
2
|
|
|
3
|
-
from typing import TYPE_CHECKING, Any
|
|
3
|
+
from typing import TYPE_CHECKING, Any
|
|
4
4
|
|
|
5
5
|
from psycopg import errors
|
|
6
6
|
from psycopg import sql as pg_sql
|
|
@@ -12,8 +12,6 @@ from sqlspec.utils.logging import get_logger
|
|
|
12
12
|
if TYPE_CHECKING:
|
|
13
13
|
from datetime import datetime
|
|
14
14
|
|
|
15
|
-
from psycopg.abc import Query
|
|
16
|
-
|
|
17
15
|
from sqlspec.adapters.psycopg.config import PsycopgAsyncConfig, PsycopgSyncConfig
|
|
18
16
|
|
|
19
17
|
logger = get_logger("adapters.psycopg.adk.store")
|
|
@@ -84,7 +82,7 @@ class PsycopgAsyncADKStore(BaseAsyncADKStore["PsycopgAsyncConfig"]):
|
|
|
84
82
|
"""
|
|
85
83
|
super().__init__(config)
|
|
86
84
|
|
|
87
|
-
def _get_create_sessions_table_sql(self) -> str:
|
|
85
|
+
async def _get_create_sessions_table_sql(self) -> str:
|
|
88
86
|
"""Get PostgreSQL CREATE TABLE SQL for sessions.
|
|
89
87
|
|
|
90
88
|
Returns:
|
|
@@ -125,7 +123,7 @@ class PsycopgAsyncADKStore(BaseAsyncADKStore["PsycopgAsyncConfig"]):
|
|
|
125
123
|
WHERE state != '{{}}'::jsonb;
|
|
126
124
|
"""
|
|
127
125
|
|
|
128
|
-
def _get_create_events_table_sql(self) -> str:
|
|
126
|
+
async def _get_create_events_table_sql(self) -> str:
|
|
129
127
|
"""Get PostgreSQL CREATE TABLE SQL for events.
|
|
130
128
|
|
|
131
129
|
Returns:
|
|
@@ -181,9 +179,9 @@ class PsycopgAsyncADKStore(BaseAsyncADKStore["PsycopgAsyncConfig"]):
|
|
|
181
179
|
|
|
182
180
|
async def create_tables(self) -> None:
|
|
183
181
|
"""Create both sessions and events tables if they don't exist."""
|
|
184
|
-
async with self._config.
|
|
185
|
-
await
|
|
186
|
-
await
|
|
182
|
+
async with self._config.provide_session() as driver:
|
|
183
|
+
await driver.execute_script(await self._get_create_sessions_table_sql())
|
|
184
|
+
await driver.execute_script(await self._get_create_events_table_sql())
|
|
187
185
|
logger.debug("Created ADK tables: %s, %s", self._session_table, self._events_table)
|
|
188
186
|
|
|
189
187
|
async def create_session(
|
|
@@ -300,29 +298,39 @@ class PsycopgAsyncADKStore(BaseAsyncADKStore["PsycopgAsyncConfig"]):
|
|
|
300
298
|
async with self._config.provide_connection() as conn, conn.cursor() as cur:
|
|
301
299
|
await cur.execute(query, (session_id,))
|
|
302
300
|
|
|
303
|
-
async def list_sessions(self, app_name: str, user_id: str) -> "list[SessionRecord]":
|
|
304
|
-
"""List
|
|
301
|
+
async def list_sessions(self, app_name: str, user_id: str | None = None) -> "list[SessionRecord]":
|
|
302
|
+
"""List sessions for an app, optionally filtered by user.
|
|
305
303
|
|
|
306
304
|
Args:
|
|
307
305
|
app_name: Application name.
|
|
308
|
-
user_id: User identifier.
|
|
306
|
+
user_id: User identifier. If None, lists all sessions for the app.
|
|
309
307
|
|
|
310
308
|
Returns:
|
|
311
309
|
List of session records ordered by update_time DESC.
|
|
312
310
|
|
|
313
311
|
Notes:
|
|
314
|
-
Uses composite index on (app_name, user_id).
|
|
312
|
+
Uses composite index on (app_name, user_id) when user_id is provided.
|
|
315
313
|
"""
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
314
|
+
if user_id is None:
|
|
315
|
+
query = pg_sql.SQL("""
|
|
316
|
+
SELECT id, app_name, user_id, state, create_time, update_time
|
|
317
|
+
FROM {table}
|
|
318
|
+
WHERE app_name = %s
|
|
319
|
+
ORDER BY update_time DESC
|
|
320
|
+
""").format(table=pg_sql.Identifier(self._session_table))
|
|
321
|
+
params: tuple[str, ...] = (app_name,)
|
|
322
|
+
else:
|
|
323
|
+
query = pg_sql.SQL("""
|
|
324
|
+
SELECT id, app_name, user_id, state, create_time, update_time
|
|
325
|
+
FROM {table}
|
|
326
|
+
WHERE app_name = %s AND user_id = %s
|
|
327
|
+
ORDER BY update_time DESC
|
|
328
|
+
""").format(table=pg_sql.Identifier(self._session_table))
|
|
329
|
+
params = (app_name, user_id)
|
|
322
330
|
|
|
323
331
|
try:
|
|
324
332
|
async with self._config.provide_connection() as conn, conn.cursor() as cur:
|
|
325
|
-
await cur.execute(query,
|
|
333
|
+
await cur.execute(query, params)
|
|
326
334
|
rows = await cur.fetchall()
|
|
327
335
|
|
|
328
336
|
return [
|
|
@@ -626,9 +634,9 @@ class PsycopgSyncADKStore(BaseSyncADKStore["PsycopgSyncConfig"]):
|
|
|
626
634
|
|
|
627
635
|
def create_tables(self) -> None:
|
|
628
636
|
"""Create both sessions and events tables if they don't exist."""
|
|
629
|
-
with self._config.
|
|
630
|
-
|
|
631
|
-
|
|
637
|
+
with self._config.provide_session() as driver:
|
|
638
|
+
driver.execute_script(self._get_create_sessions_table_sql())
|
|
639
|
+
driver.execute_script(self._get_create_events_table_sql())
|
|
632
640
|
logger.debug("Created ADK tables: %s, %s", self._session_table, self._events_table)
|
|
633
641
|
|
|
634
642
|
def create_session(
|
|
@@ -745,29 +753,39 @@ class PsycopgSyncADKStore(BaseSyncADKStore["PsycopgSyncConfig"]):
|
|
|
745
753
|
with self._config.provide_connection() as conn, conn.cursor() as cur:
|
|
746
754
|
cur.execute(query, (session_id,))
|
|
747
755
|
|
|
748
|
-
def list_sessions(self, app_name: str, user_id: str) -> "list[SessionRecord]":
|
|
749
|
-
"""List
|
|
756
|
+
def list_sessions(self, app_name: str, user_id: str | None = None) -> "list[SessionRecord]":
|
|
757
|
+
"""List sessions for an app, optionally filtered by user.
|
|
750
758
|
|
|
751
759
|
Args:
|
|
752
760
|
app_name: Application name.
|
|
753
|
-
user_id: User identifier.
|
|
761
|
+
user_id: User identifier. If None, lists all sessions for the app.
|
|
754
762
|
|
|
755
763
|
Returns:
|
|
756
764
|
List of session records ordered by update_time DESC.
|
|
757
765
|
|
|
758
766
|
Notes:
|
|
759
|
-
Uses composite index on (app_name, user_id).
|
|
767
|
+
Uses composite index on (app_name, user_id) when user_id is provided.
|
|
760
768
|
"""
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
769
|
+
if user_id is None:
|
|
770
|
+
query = pg_sql.SQL("""
|
|
771
|
+
SELECT id, app_name, user_id, state, create_time, update_time
|
|
772
|
+
FROM {table}
|
|
773
|
+
WHERE app_name = %s
|
|
774
|
+
ORDER BY update_time DESC
|
|
775
|
+
""").format(table=pg_sql.Identifier(self._session_table))
|
|
776
|
+
params: tuple[str, ...] = (app_name,)
|
|
777
|
+
else:
|
|
778
|
+
query = pg_sql.SQL("""
|
|
779
|
+
SELECT id, app_name, user_id, state, create_time, update_time
|
|
780
|
+
FROM {table}
|
|
781
|
+
WHERE app_name = %s AND user_id = %s
|
|
782
|
+
ORDER BY update_time DESC
|
|
783
|
+
""").format(table=pg_sql.Identifier(self._session_table))
|
|
784
|
+
params = (app_name, user_id)
|
|
767
785
|
|
|
768
786
|
try:
|
|
769
787
|
with self._config.provide_connection() as conn, conn.cursor() as cur:
|
|
770
|
-
cur.execute(query,
|
|
788
|
+
cur.execute(query, params)
|
|
771
789
|
rows = cur.fetchall()
|
|
772
790
|
|
|
773
791
|
return [
|
|
@@ -122,7 +122,7 @@ class PostgresSyncDataDictionary(SyncDataDictionaryBase):
|
|
|
122
122
|
def get_columns(
|
|
123
123
|
self, driver: SyncDriverAdapterBase, table: str, schema: "str | None" = None
|
|
124
124
|
) -> "list[dict[str, Any]]":
|
|
125
|
-
"""Get column information for a table using
|
|
125
|
+
"""Get column information for a table using pg_catalog.
|
|
126
126
|
|
|
127
127
|
Args:
|
|
128
128
|
driver: Psycopg sync driver instance
|
|
@@ -135,25 +135,32 @@ class PostgresSyncDataDictionary(SyncDataDictionaryBase):
|
|
|
135
135
|
- data_type: PostgreSQL data type
|
|
136
136
|
- is_nullable: Whether column allows NULL (YES/NO)
|
|
137
137
|
- column_default: Default value if any
|
|
138
|
+
|
|
139
|
+
Notes:
|
|
140
|
+
Uses pg_catalog instead of information_schema to avoid potential
|
|
141
|
+
issues with PostgreSQL 'name' type in some drivers.
|
|
138
142
|
"""
|
|
139
143
|
psycopg_driver = cast("PsycopgSyncDriver", driver)
|
|
140
144
|
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
145
|
+
schema_name = schema or "public"
|
|
146
|
+
sql = """
|
|
147
|
+
SELECT
|
|
148
|
+
a.attname::text AS column_name,
|
|
149
|
+
pg_catalog.format_type(a.atttypid, a.atttypmod) AS data_type,
|
|
150
|
+
CASE WHEN a.attnotnull THEN 'NO' ELSE 'YES' END AS is_nullable,
|
|
151
|
+
pg_catalog.pg_get_expr(d.adbin, d.adrelid)::text AS column_default
|
|
152
|
+
FROM pg_catalog.pg_attribute a
|
|
153
|
+
JOIN pg_catalog.pg_class c ON a.attrelid = c.oid
|
|
154
|
+
JOIN pg_catalog.pg_namespace n ON c.relnamespace = n.oid
|
|
155
|
+
LEFT JOIN pg_catalog.pg_attrdef d ON a.attrelid = d.adrelid AND a.attnum = d.adnum
|
|
156
|
+
WHERE c.relname = %s
|
|
157
|
+
AND n.nspname = %s
|
|
158
|
+
AND a.attnum > 0
|
|
159
|
+
AND NOT a.attisdropped
|
|
160
|
+
ORDER BY a.attnum
|
|
161
|
+
"""
|
|
162
|
+
|
|
163
|
+
result = psycopg_driver.execute(sql, (table, schema_name))
|
|
157
164
|
return result.data or []
|
|
158
165
|
|
|
159
166
|
def list_available_features(self) -> "list[str]":
|
|
@@ -275,7 +282,7 @@ class PostgresAsyncDataDictionary(AsyncDataDictionaryBase):
|
|
|
275
282
|
async def get_columns(
|
|
276
283
|
self, driver: AsyncDriverAdapterBase, table: str, schema: "str | None" = None
|
|
277
284
|
) -> "list[dict[str, Any]]":
|
|
278
|
-
"""Get column information for a table using
|
|
285
|
+
"""Get column information for a table using pg_catalog.
|
|
279
286
|
|
|
280
287
|
Args:
|
|
281
288
|
driver: Psycopg async driver instance
|
|
@@ -288,25 +295,32 @@ class PostgresAsyncDataDictionary(AsyncDataDictionaryBase):
|
|
|
288
295
|
- data_type: PostgreSQL data type
|
|
289
296
|
- is_nullable: Whether column allows NULL (YES/NO)
|
|
290
297
|
- column_default: Default value if any
|
|
298
|
+
|
|
299
|
+
Notes:
|
|
300
|
+
Uses pg_catalog instead of information_schema to avoid potential
|
|
301
|
+
issues with PostgreSQL 'name' type in some drivers.
|
|
291
302
|
"""
|
|
292
303
|
psycopg_driver = cast("PsycopgAsyncDriver", driver)
|
|
293
304
|
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
|
|
308
|
-
|
|
309
|
-
|
|
305
|
+
schema_name = schema or "public"
|
|
306
|
+
sql = """
|
|
307
|
+
SELECT
|
|
308
|
+
a.attname::text AS column_name,
|
|
309
|
+
pg_catalog.format_type(a.atttypid, a.atttypmod) AS data_type,
|
|
310
|
+
CASE WHEN a.attnotnull THEN 'NO' ELSE 'YES' END AS is_nullable,
|
|
311
|
+
pg_catalog.pg_get_expr(d.adbin, d.adrelid)::text AS column_default
|
|
312
|
+
FROM pg_catalog.pg_attribute a
|
|
313
|
+
JOIN pg_catalog.pg_class c ON a.attrelid = c.oid
|
|
314
|
+
JOIN pg_catalog.pg_namespace n ON c.relnamespace = n.oid
|
|
315
|
+
LEFT JOIN pg_catalog.pg_attrdef d ON a.attrelid = d.adrelid AND a.attnum = d.adnum
|
|
316
|
+
WHERE c.relname = %s
|
|
317
|
+
AND n.nspname = %s
|
|
318
|
+
AND a.attnum > 0
|
|
319
|
+
AND NOT a.attisdropped
|
|
320
|
+
ORDER BY a.attnum
|
|
321
|
+
"""
|
|
322
|
+
|
|
323
|
+
result = await psycopg_driver.execute(sql, (table, schema_name))
|
|
310
324
|
return result.data or []
|
|
311
325
|
|
|
312
326
|
def list_available_features(self) -> "list[str]":
|