sqlspec 0.25.0__py3-none-any.whl → 0.27.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sqlspec might be problematic. Click here for more details.
- sqlspec/__init__.py +7 -15
- sqlspec/_serialization.py +256 -24
- sqlspec/_typing.py +71 -52
- sqlspec/adapters/adbc/_types.py +1 -1
- sqlspec/adapters/adbc/adk/__init__.py +5 -0
- sqlspec/adapters/adbc/adk/store.py +870 -0
- sqlspec/adapters/adbc/config.py +69 -12
- sqlspec/adapters/adbc/data_dictionary.py +340 -0
- sqlspec/adapters/adbc/driver.py +266 -58
- sqlspec/adapters/adbc/litestar/__init__.py +5 -0
- sqlspec/adapters/adbc/litestar/store.py +504 -0
- sqlspec/adapters/adbc/type_converter.py +153 -0
- sqlspec/adapters/aiosqlite/_types.py +1 -1
- sqlspec/adapters/aiosqlite/adk/__init__.py +5 -0
- sqlspec/adapters/aiosqlite/adk/store.py +527 -0
- sqlspec/adapters/aiosqlite/config.py +88 -15
- sqlspec/adapters/aiosqlite/data_dictionary.py +149 -0
- sqlspec/adapters/aiosqlite/driver.py +143 -40
- sqlspec/adapters/aiosqlite/litestar/__init__.py +5 -0
- sqlspec/adapters/aiosqlite/litestar/store.py +281 -0
- sqlspec/adapters/aiosqlite/pool.py +7 -7
- sqlspec/adapters/asyncmy/__init__.py +7 -1
- sqlspec/adapters/asyncmy/_types.py +2 -2
- sqlspec/adapters/asyncmy/adk/__init__.py +5 -0
- sqlspec/adapters/asyncmy/adk/store.py +493 -0
- sqlspec/adapters/asyncmy/config.py +68 -23
- sqlspec/adapters/asyncmy/data_dictionary.py +161 -0
- sqlspec/adapters/asyncmy/driver.py +313 -58
- sqlspec/adapters/asyncmy/litestar/__init__.py +5 -0
- sqlspec/adapters/asyncmy/litestar/store.py +296 -0
- sqlspec/adapters/asyncpg/__init__.py +2 -1
- sqlspec/adapters/asyncpg/_type_handlers.py +71 -0
- sqlspec/adapters/asyncpg/_types.py +11 -7
- sqlspec/adapters/asyncpg/adk/__init__.py +5 -0
- sqlspec/adapters/asyncpg/adk/store.py +450 -0
- sqlspec/adapters/asyncpg/config.py +59 -35
- sqlspec/adapters/asyncpg/data_dictionary.py +173 -0
- sqlspec/adapters/asyncpg/driver.py +170 -25
- sqlspec/adapters/asyncpg/litestar/__init__.py +5 -0
- sqlspec/adapters/asyncpg/litestar/store.py +253 -0
- sqlspec/adapters/bigquery/_types.py +1 -1
- sqlspec/adapters/bigquery/adk/__init__.py +5 -0
- sqlspec/adapters/bigquery/adk/store.py +576 -0
- sqlspec/adapters/bigquery/config.py +27 -10
- sqlspec/adapters/bigquery/data_dictionary.py +149 -0
- sqlspec/adapters/bigquery/driver.py +368 -142
- sqlspec/adapters/bigquery/litestar/__init__.py +5 -0
- sqlspec/adapters/bigquery/litestar/store.py +327 -0
- sqlspec/adapters/bigquery/type_converter.py +125 -0
- sqlspec/adapters/duckdb/_types.py +1 -1
- sqlspec/adapters/duckdb/adk/__init__.py +14 -0
- sqlspec/adapters/duckdb/adk/store.py +553 -0
- sqlspec/adapters/duckdb/config.py +80 -20
- sqlspec/adapters/duckdb/data_dictionary.py +163 -0
- sqlspec/adapters/duckdb/driver.py +167 -45
- sqlspec/adapters/duckdb/litestar/__init__.py +5 -0
- sqlspec/adapters/duckdb/litestar/store.py +332 -0
- sqlspec/adapters/duckdb/pool.py +4 -4
- sqlspec/adapters/duckdb/type_converter.py +133 -0
- sqlspec/adapters/oracledb/_numpy_handlers.py +133 -0
- sqlspec/adapters/oracledb/_types.py +20 -2
- sqlspec/adapters/oracledb/adk/__init__.py +5 -0
- sqlspec/adapters/oracledb/adk/store.py +1745 -0
- sqlspec/adapters/oracledb/config.py +122 -32
- sqlspec/adapters/oracledb/data_dictionary.py +509 -0
- sqlspec/adapters/oracledb/driver.py +353 -91
- sqlspec/adapters/oracledb/litestar/__init__.py +5 -0
- sqlspec/adapters/oracledb/litestar/store.py +767 -0
- sqlspec/adapters/oracledb/migrations.py +348 -73
- sqlspec/adapters/oracledb/type_converter.py +207 -0
- sqlspec/adapters/psqlpy/_type_handlers.py +44 -0
- sqlspec/adapters/psqlpy/_types.py +2 -1
- sqlspec/adapters/psqlpy/adk/__init__.py +5 -0
- sqlspec/adapters/psqlpy/adk/store.py +482 -0
- sqlspec/adapters/psqlpy/config.py +46 -17
- sqlspec/adapters/psqlpy/data_dictionary.py +172 -0
- sqlspec/adapters/psqlpy/driver.py +123 -209
- sqlspec/adapters/psqlpy/litestar/__init__.py +5 -0
- sqlspec/adapters/psqlpy/litestar/store.py +272 -0
- sqlspec/adapters/psqlpy/type_converter.py +102 -0
- sqlspec/adapters/psycopg/_type_handlers.py +80 -0
- sqlspec/adapters/psycopg/_types.py +2 -1
- sqlspec/adapters/psycopg/adk/__init__.py +5 -0
- sqlspec/adapters/psycopg/adk/store.py +944 -0
- sqlspec/adapters/psycopg/config.py +69 -35
- sqlspec/adapters/psycopg/data_dictionary.py +331 -0
- sqlspec/adapters/psycopg/driver.py +238 -81
- sqlspec/adapters/psycopg/litestar/__init__.py +5 -0
- sqlspec/adapters/psycopg/litestar/store.py +554 -0
- sqlspec/adapters/sqlite/__init__.py +2 -1
- sqlspec/adapters/sqlite/_type_handlers.py +86 -0
- sqlspec/adapters/sqlite/_types.py +1 -1
- sqlspec/adapters/sqlite/adk/__init__.py +5 -0
- sqlspec/adapters/sqlite/adk/store.py +572 -0
- sqlspec/adapters/sqlite/config.py +87 -15
- sqlspec/adapters/sqlite/data_dictionary.py +149 -0
- sqlspec/adapters/sqlite/driver.py +137 -54
- sqlspec/adapters/sqlite/litestar/__init__.py +5 -0
- sqlspec/adapters/sqlite/litestar/store.py +318 -0
- sqlspec/adapters/sqlite/pool.py +18 -9
- sqlspec/base.py +45 -26
- sqlspec/builder/__init__.py +73 -4
- sqlspec/builder/_base.py +162 -89
- sqlspec/builder/_column.py +62 -29
- sqlspec/builder/_ddl.py +180 -121
- sqlspec/builder/_delete.py +5 -4
- sqlspec/builder/_dml.py +388 -0
- sqlspec/{_sql.py → builder/_factory.py} +53 -94
- sqlspec/builder/_insert.py +32 -131
- sqlspec/builder/_join.py +375 -0
- sqlspec/builder/_merge.py +446 -11
- sqlspec/builder/_parsing_utils.py +111 -17
- sqlspec/builder/_select.py +1457 -24
- sqlspec/builder/_update.py +11 -42
- sqlspec/cli.py +307 -194
- sqlspec/config.py +252 -67
- sqlspec/core/__init__.py +5 -4
- sqlspec/core/cache.py +17 -17
- sqlspec/core/compiler.py +62 -9
- sqlspec/core/filters.py +37 -37
- sqlspec/core/hashing.py +9 -9
- sqlspec/core/parameters.py +83 -48
- sqlspec/core/result.py +102 -46
- sqlspec/core/splitter.py +16 -17
- sqlspec/core/statement.py +36 -30
- sqlspec/core/type_conversion.py +235 -0
- sqlspec/driver/__init__.py +7 -6
- sqlspec/driver/_async.py +188 -151
- sqlspec/driver/_common.py +285 -80
- sqlspec/driver/_sync.py +188 -152
- sqlspec/driver/mixins/_result_tools.py +20 -236
- sqlspec/driver/mixins/_sql_translator.py +4 -4
- sqlspec/exceptions.py +75 -7
- sqlspec/extensions/adk/__init__.py +53 -0
- sqlspec/extensions/adk/_types.py +51 -0
- sqlspec/extensions/adk/converters.py +172 -0
- sqlspec/extensions/adk/migrations/0001_create_adk_tables.py +144 -0
- sqlspec/extensions/adk/migrations/__init__.py +0 -0
- sqlspec/extensions/adk/service.py +181 -0
- sqlspec/extensions/adk/store.py +536 -0
- sqlspec/extensions/aiosql/adapter.py +73 -53
- sqlspec/extensions/litestar/__init__.py +21 -4
- sqlspec/extensions/litestar/cli.py +54 -10
- sqlspec/extensions/litestar/config.py +59 -266
- sqlspec/extensions/litestar/handlers.py +46 -17
- sqlspec/extensions/litestar/migrations/0001_create_session_table.py +137 -0
- sqlspec/extensions/litestar/migrations/__init__.py +3 -0
- sqlspec/extensions/litestar/plugin.py +324 -223
- sqlspec/extensions/litestar/providers.py +25 -25
- sqlspec/extensions/litestar/store.py +265 -0
- sqlspec/loader.py +30 -49
- sqlspec/migrations/__init__.py +4 -3
- sqlspec/migrations/base.py +302 -39
- sqlspec/migrations/commands.py +611 -144
- sqlspec/migrations/context.py +142 -0
- sqlspec/migrations/fix.py +199 -0
- sqlspec/migrations/loaders.py +68 -23
- sqlspec/migrations/runner.py +543 -107
- sqlspec/migrations/tracker.py +237 -21
- sqlspec/migrations/utils.py +51 -3
- sqlspec/migrations/validation.py +177 -0
- sqlspec/protocols.py +66 -36
- sqlspec/storage/_utils.py +98 -0
- sqlspec/storage/backends/fsspec.py +134 -106
- sqlspec/storage/backends/local.py +78 -51
- sqlspec/storage/backends/obstore.py +278 -162
- sqlspec/storage/registry.py +75 -39
- sqlspec/typing.py +16 -84
- sqlspec/utils/config_resolver.py +153 -0
- sqlspec/utils/correlation.py +4 -5
- sqlspec/utils/data_transformation.py +3 -2
- sqlspec/utils/deprecation.py +9 -8
- sqlspec/utils/fixtures.py +4 -4
- sqlspec/utils/logging.py +46 -6
- sqlspec/utils/module_loader.py +2 -2
- sqlspec/utils/schema.py +288 -0
- sqlspec/utils/serializers.py +50 -2
- sqlspec/utils/sync_tools.py +21 -17
- sqlspec/utils/text.py +1 -2
- sqlspec/utils/type_guards.py +111 -20
- sqlspec/utils/version.py +433 -0
- {sqlspec-0.25.0.dist-info → sqlspec-0.27.0.dist-info}/METADATA +40 -21
- sqlspec-0.27.0.dist-info/RECORD +207 -0
- sqlspec/builder/mixins/__init__.py +0 -55
- sqlspec/builder/mixins/_cte_and_set_ops.py +0 -254
- sqlspec/builder/mixins/_delete_operations.py +0 -50
- sqlspec/builder/mixins/_insert_operations.py +0 -282
- sqlspec/builder/mixins/_join_operations.py +0 -389
- sqlspec/builder/mixins/_merge_operations.py +0 -592
- sqlspec/builder/mixins/_order_limit_operations.py +0 -152
- sqlspec/builder/mixins/_pivot_operations.py +0 -157
- sqlspec/builder/mixins/_select_operations.py +0 -936
- sqlspec/builder/mixins/_update_operations.py +0 -218
- sqlspec/builder/mixins/_where_clause.py +0 -1304
- sqlspec-0.25.0.dist-info/RECORD +0 -139
- sqlspec-0.25.0.dist-info/licenses/NOTICE +0 -29
- {sqlspec-0.25.0.dist-info → sqlspec-0.27.0.dist-info}/WHEEL +0 -0
- {sqlspec-0.25.0.dist-info → sqlspec-0.27.0.dist-info}/entry_points.txt +0 -0
- {sqlspec-0.25.0.dist-info → sqlspec-0.27.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,1745 @@
|
|
|
1
|
+
"""Oracle ADK store for Google Agent Development Kit session/event storage."""
|
|
2
|
+
|
|
3
|
+
from enum import Enum
|
|
4
|
+
from typing import TYPE_CHECKING, Any, Final
|
|
5
|
+
|
|
6
|
+
import oracledb
|
|
7
|
+
|
|
8
|
+
from sqlspec import SQL
|
|
9
|
+
from sqlspec.extensions.adk import BaseAsyncADKStore, BaseSyncADKStore, EventRecord, SessionRecord
|
|
10
|
+
from sqlspec.utils.logging import get_logger
|
|
11
|
+
from sqlspec.utils.serializers import from_json, to_json
|
|
12
|
+
|
|
13
|
+
if TYPE_CHECKING:
|
|
14
|
+
from datetime import datetime
|
|
15
|
+
|
|
16
|
+
from sqlspec.adapters.oracledb.config import OracleAsyncConfig, OracleSyncConfig
|
|
17
|
+
|
|
18
|
+
logger = get_logger("adapters.oracledb.adk.store")
|
|
19
|
+
|
|
20
|
+
__all__ = ("OracleAsyncADKStore", "OracleSyncADKStore")
|
|
21
|
+
|
|
22
|
+
ORACLE_TABLE_NOT_FOUND_ERROR: Final = 942
|
|
23
|
+
ORACLE_MIN_JSON_NATIVE_VERSION: Final = 21
|
|
24
|
+
ORACLE_MIN_JSON_NATIVE_COMPATIBLE: Final = 20
|
|
25
|
+
ORACLE_MIN_JSON_BLOB_VERSION: Final = 12
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class JSONStorageType(str, Enum):
|
|
29
|
+
"""JSON storage type based on Oracle version."""
|
|
30
|
+
|
|
31
|
+
JSON_NATIVE = "json"
|
|
32
|
+
BLOB_JSON = "blob_json"
|
|
33
|
+
BLOB_PLAIN = "blob_plain"
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def _to_oracle_bool(value: "bool | None") -> "int | None":
|
|
37
|
+
"""Convert Python boolean to Oracle NUMBER(1).
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
value: Python boolean value or None.
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
1 for True, 0 for False, None for None.
|
|
44
|
+
"""
|
|
45
|
+
if value is None:
|
|
46
|
+
return None
|
|
47
|
+
return 1 if value else 0
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def _from_oracle_bool(value: "int | None") -> "bool | None":
|
|
51
|
+
"""Convert Oracle NUMBER(1) to Python boolean.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
value: Oracle NUMBER value (0, 1, or None).
|
|
55
|
+
|
|
56
|
+
Returns:
|
|
57
|
+
Python boolean or None.
|
|
58
|
+
"""
|
|
59
|
+
if value is None:
|
|
60
|
+
return None
|
|
61
|
+
return bool(value)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class OracleAsyncADKStore(BaseAsyncADKStore["OracleAsyncConfig"]):
|
|
65
|
+
"""Oracle async ADK store using oracledb async driver.
|
|
66
|
+
|
|
67
|
+
Implements session and event storage for Google Agent Development Kit
|
|
68
|
+
using Oracle Database via the python-oracledb async driver. Provides:
|
|
69
|
+
- Session state management with version-specific JSON storage
|
|
70
|
+
- Event history tracking with BLOB-serialized actions
|
|
71
|
+
- TIMESTAMP WITH TIME ZONE for timezone-aware timestamps
|
|
72
|
+
- Foreign key constraints with cascade delete
|
|
73
|
+
- Efficient upserts using MERGE statement
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
config: OracleAsyncConfig with extension_config["adk"] settings.
|
|
77
|
+
|
|
78
|
+
Example:
|
|
79
|
+
from sqlspec.adapters.oracledb import OracleAsyncConfig
|
|
80
|
+
from sqlspec.adapters.oracledb.adk import OracleAsyncADKStore
|
|
81
|
+
|
|
82
|
+
config = OracleAsyncConfig(
|
|
83
|
+
pool_config={"dsn": "oracle://..."},
|
|
84
|
+
extension_config={
|
|
85
|
+
"adk": {
|
|
86
|
+
"session_table": "my_sessions",
|
|
87
|
+
"events_table": "my_events",
|
|
88
|
+
"owner_id_column": "tenant_id NUMBER(10) REFERENCES tenants(id)"
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
)
|
|
92
|
+
store = OracleAsyncADKStore(config)
|
|
93
|
+
await store.create_tables()
|
|
94
|
+
|
|
95
|
+
Notes:
|
|
96
|
+
- JSON storage type detected based on Oracle version (21c+, 12c+, legacy)
|
|
97
|
+
- BLOB for pre-serialized actions from Google ADK
|
|
98
|
+
- TIMESTAMP WITH TIME ZONE for timezone-aware timestamps
|
|
99
|
+
- NUMBER(1) for booleans (0/1/NULL)
|
|
100
|
+
- Named parameters using :param_name
|
|
101
|
+
- State merging handled at application level
|
|
102
|
+
- owner_id_column supports NUMBER, VARCHAR2, RAW for Oracle FK types
|
|
103
|
+
- Configuration is read from config.extension_config["adk"]
|
|
104
|
+
"""
|
|
105
|
+
|
|
106
|
+
__slots__ = ("_in_memory", "_json_storage_type")
|
|
107
|
+
|
|
108
|
+
def __init__(self, config: "OracleAsyncConfig") -> None:
|
|
109
|
+
"""Initialize Oracle ADK store.
|
|
110
|
+
|
|
111
|
+
Args:
|
|
112
|
+
config: OracleAsyncConfig instance.
|
|
113
|
+
|
|
114
|
+
Notes:
|
|
115
|
+
Configuration is read from config.extension_config["adk"]:
|
|
116
|
+
- session_table: Sessions table name (default: "adk_sessions")
|
|
117
|
+
- events_table: Events table name (default: "adk_events")
|
|
118
|
+
- owner_id_column: Optional owner FK column DDL (default: None)
|
|
119
|
+
- in_memory: Enable INMEMORY clause (default: False)
|
|
120
|
+
"""
|
|
121
|
+
super().__init__(config)
|
|
122
|
+
self._json_storage_type: JSONStorageType | None = None
|
|
123
|
+
|
|
124
|
+
if hasattr(config, "extension_config") and config.extension_config:
|
|
125
|
+
adk_config = config.extension_config.get("adk", {})
|
|
126
|
+
self._in_memory: bool = bool(adk_config.get("in_memory", False))
|
|
127
|
+
else:
|
|
128
|
+
self._in_memory = False
|
|
129
|
+
|
|
130
|
+
async def _detect_json_storage_type(self) -> JSONStorageType:
|
|
131
|
+
"""Detect the appropriate JSON storage type based on Oracle version.
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
Appropriate JSONStorageType for this Oracle version.
|
|
135
|
+
|
|
136
|
+
Notes:
|
|
137
|
+
Queries product_component_version to determine Oracle version.
|
|
138
|
+
- Oracle 21c+ with compatible >= 20: Native JSON type
|
|
139
|
+
- Oracle 12c+: BLOB with IS JSON constraint (preferred)
|
|
140
|
+
- Oracle 11g and earlier: BLOB without constraint
|
|
141
|
+
|
|
142
|
+
BLOB is preferred over CLOB for 12c+ as per Oracle recommendations.
|
|
143
|
+
Result is cached in self._json_storage_type.
|
|
144
|
+
"""
|
|
145
|
+
if self._json_storage_type is not None:
|
|
146
|
+
return self._json_storage_type
|
|
147
|
+
|
|
148
|
+
async with self._config.provide_connection() as conn:
|
|
149
|
+
cursor = conn.cursor()
|
|
150
|
+
await cursor.execute(
|
|
151
|
+
"""
|
|
152
|
+
SELECT version FROM product_component_version
|
|
153
|
+
WHERE product LIKE 'Oracle%Database%'
|
|
154
|
+
"""
|
|
155
|
+
)
|
|
156
|
+
row = await cursor.fetchone()
|
|
157
|
+
|
|
158
|
+
if row is None:
|
|
159
|
+
logger.warning("Could not detect Oracle version, defaulting to BLOB_JSON")
|
|
160
|
+
self._json_storage_type = JSONStorageType.BLOB_JSON
|
|
161
|
+
return self._json_storage_type
|
|
162
|
+
|
|
163
|
+
version_str = str(row[0])
|
|
164
|
+
version_parts = version_str.split(".")
|
|
165
|
+
major_version = int(version_parts[0]) if version_parts else 0
|
|
166
|
+
|
|
167
|
+
if major_version >= ORACLE_MIN_JSON_NATIVE_VERSION:
|
|
168
|
+
await cursor.execute("SELECT value FROM v$parameter WHERE name = 'compatible'")
|
|
169
|
+
compatible_row = await cursor.fetchone()
|
|
170
|
+
if compatible_row:
|
|
171
|
+
compatible_parts = str(compatible_row[0]).split(".")
|
|
172
|
+
compatible_major = int(compatible_parts[0]) if compatible_parts else 0
|
|
173
|
+
if compatible_major >= ORACLE_MIN_JSON_NATIVE_COMPATIBLE:
|
|
174
|
+
logger.info("Detected Oracle %s with compatible >= 20, using JSON_NATIVE", version_str)
|
|
175
|
+
self._json_storage_type = JSONStorageType.JSON_NATIVE
|
|
176
|
+
return self._json_storage_type
|
|
177
|
+
|
|
178
|
+
if major_version >= ORACLE_MIN_JSON_BLOB_VERSION:
|
|
179
|
+
logger.info("Detected Oracle %s, using BLOB_JSON (recommended)", version_str)
|
|
180
|
+
self._json_storage_type = JSONStorageType.BLOB_JSON
|
|
181
|
+
return self._json_storage_type
|
|
182
|
+
|
|
183
|
+
logger.info("Detected Oracle %s (pre-12c), using BLOB_PLAIN", version_str)
|
|
184
|
+
self._json_storage_type = JSONStorageType.BLOB_PLAIN
|
|
185
|
+
return self._json_storage_type
|
|
186
|
+
|
|
187
|
+
async def _serialize_state(self, state: "dict[str, Any]") -> "str | bytes":
|
|
188
|
+
"""Serialize state dictionary to appropriate format based on storage type.
|
|
189
|
+
|
|
190
|
+
Args:
|
|
191
|
+
state: State dictionary to serialize.
|
|
192
|
+
|
|
193
|
+
Returns:
|
|
194
|
+
JSON string for JSON_NATIVE, bytes for BLOB types.
|
|
195
|
+
"""
|
|
196
|
+
storage_type = await self._detect_json_storage_type()
|
|
197
|
+
|
|
198
|
+
if storage_type == JSONStorageType.JSON_NATIVE:
|
|
199
|
+
return to_json(state)
|
|
200
|
+
|
|
201
|
+
return to_json(state, as_bytes=True)
|
|
202
|
+
|
|
203
|
+
async def _deserialize_state(self, data: Any) -> "dict[str, Any]":
|
|
204
|
+
"""Deserialize state data from database format.
|
|
205
|
+
|
|
206
|
+
Args:
|
|
207
|
+
data: Data from database (may be LOB, str, bytes, or dict).
|
|
208
|
+
|
|
209
|
+
Returns:
|
|
210
|
+
Deserialized state dictionary.
|
|
211
|
+
|
|
212
|
+
Notes:
|
|
213
|
+
Handles LOB reading if data has read() method.
|
|
214
|
+
Oracle JSON type may return dict directly.
|
|
215
|
+
"""
|
|
216
|
+
if hasattr(data, "read"):
|
|
217
|
+
data = await data.read()
|
|
218
|
+
|
|
219
|
+
if isinstance(data, dict):
|
|
220
|
+
return data
|
|
221
|
+
|
|
222
|
+
if isinstance(data, bytes):
|
|
223
|
+
return from_json(data) # type: ignore[no-any-return]
|
|
224
|
+
|
|
225
|
+
if isinstance(data, str):
|
|
226
|
+
return from_json(data) # type: ignore[no-any-return]
|
|
227
|
+
|
|
228
|
+
return from_json(str(data)) # type: ignore[no-any-return]
|
|
229
|
+
|
|
230
|
+
async def _serialize_json_field(self, value: Any) -> "str | bytes | None":
|
|
231
|
+
"""Serialize optional JSON field for event storage.
|
|
232
|
+
|
|
233
|
+
Args:
|
|
234
|
+
value: Value to serialize (dict or None).
|
|
235
|
+
|
|
236
|
+
Returns:
|
|
237
|
+
Serialized JSON or None.
|
|
238
|
+
"""
|
|
239
|
+
if value is None:
|
|
240
|
+
return None
|
|
241
|
+
|
|
242
|
+
storage_type = await self._detect_json_storage_type()
|
|
243
|
+
|
|
244
|
+
if storage_type == JSONStorageType.JSON_NATIVE:
|
|
245
|
+
return to_json(value)
|
|
246
|
+
|
|
247
|
+
return to_json(value, as_bytes=True)
|
|
248
|
+
|
|
249
|
+
async def _deserialize_json_field(self, data: Any) -> "dict[str, Any] | None":
|
|
250
|
+
"""Deserialize optional JSON field from database.
|
|
251
|
+
|
|
252
|
+
Args:
|
|
253
|
+
data: Data from database (may be LOB, str, bytes, dict, or None).
|
|
254
|
+
|
|
255
|
+
Returns:
|
|
256
|
+
Deserialized dictionary or None.
|
|
257
|
+
|
|
258
|
+
Notes:
|
|
259
|
+
Oracle JSON type may return dict directly.
|
|
260
|
+
"""
|
|
261
|
+
if data is None:
|
|
262
|
+
return None
|
|
263
|
+
|
|
264
|
+
if hasattr(data, "read"):
|
|
265
|
+
data = await data.read()
|
|
266
|
+
|
|
267
|
+
if isinstance(data, dict):
|
|
268
|
+
return data
|
|
269
|
+
|
|
270
|
+
if isinstance(data, bytes):
|
|
271
|
+
return from_json(data) # type: ignore[no-any-return]
|
|
272
|
+
|
|
273
|
+
if isinstance(data, str):
|
|
274
|
+
return from_json(data) # type: ignore[no-any-return]
|
|
275
|
+
|
|
276
|
+
return from_json(str(data)) # type: ignore[no-any-return]
|
|
277
|
+
|
|
278
|
+
def _get_create_sessions_table_sql_for_type(self, storage_type: JSONStorageType) -> str:
|
|
279
|
+
"""Get Oracle CREATE TABLE SQL for sessions with specified storage type.
|
|
280
|
+
|
|
281
|
+
Args:
|
|
282
|
+
storage_type: JSON storage type to use.
|
|
283
|
+
|
|
284
|
+
Returns:
|
|
285
|
+
SQL statement to create adk_sessions table.
|
|
286
|
+
"""
|
|
287
|
+
if storage_type == JSONStorageType.JSON_NATIVE:
|
|
288
|
+
state_column = "state JSON NOT NULL"
|
|
289
|
+
elif storage_type == JSONStorageType.BLOB_JSON:
|
|
290
|
+
state_column = "state BLOB CHECK (state IS JSON) NOT NULL"
|
|
291
|
+
else:
|
|
292
|
+
state_column = "state BLOB NOT NULL"
|
|
293
|
+
|
|
294
|
+
owner_id_column_sql = f", {self._owner_id_column_ddl}" if self._owner_id_column_ddl else ""
|
|
295
|
+
inmemory_clause = " INMEMORY" if self._in_memory else ""
|
|
296
|
+
|
|
297
|
+
return f"""
|
|
298
|
+
BEGIN
|
|
299
|
+
EXECUTE IMMEDIATE 'CREATE TABLE {self._session_table} (
|
|
300
|
+
id VARCHAR2(128) PRIMARY KEY,
|
|
301
|
+
app_name VARCHAR2(128) NOT NULL,
|
|
302
|
+
user_id VARCHAR2(128) NOT NULL,
|
|
303
|
+
{state_column},
|
|
304
|
+
create_time TIMESTAMP WITH TIME ZONE DEFAULT SYSTIMESTAMP NOT NULL,
|
|
305
|
+
update_time TIMESTAMP WITH TIME ZONE DEFAULT SYSTIMESTAMP NOT NULL{owner_id_column_sql}
|
|
306
|
+
){inmemory_clause}';
|
|
307
|
+
EXCEPTION
|
|
308
|
+
WHEN OTHERS THEN
|
|
309
|
+
IF SQLCODE != -955 THEN
|
|
310
|
+
RAISE;
|
|
311
|
+
END IF;
|
|
312
|
+
END;
|
|
313
|
+
|
|
314
|
+
BEGIN
|
|
315
|
+
EXECUTE IMMEDIATE 'CREATE INDEX idx_{self._session_table}_app_user
|
|
316
|
+
ON {self._session_table}(app_name, user_id)';
|
|
317
|
+
EXCEPTION
|
|
318
|
+
WHEN OTHERS THEN
|
|
319
|
+
IF SQLCODE != -955 THEN
|
|
320
|
+
RAISE;
|
|
321
|
+
END IF;
|
|
322
|
+
END;
|
|
323
|
+
|
|
324
|
+
BEGIN
|
|
325
|
+
EXECUTE IMMEDIATE 'CREATE INDEX idx_{self._session_table}_update_time
|
|
326
|
+
ON {self._session_table}(update_time DESC)';
|
|
327
|
+
EXCEPTION
|
|
328
|
+
WHEN OTHERS THEN
|
|
329
|
+
IF SQLCODE != -955 THEN
|
|
330
|
+
RAISE;
|
|
331
|
+
END IF;
|
|
332
|
+
END;
|
|
333
|
+
"""
|
|
334
|
+
|
|
335
|
+
def _get_create_events_table_sql_for_type(self, storage_type: JSONStorageType) -> str:
|
|
336
|
+
"""Get Oracle CREATE TABLE SQL for events with specified storage type.
|
|
337
|
+
|
|
338
|
+
Args:
|
|
339
|
+
storage_type: JSON storage type to use.
|
|
340
|
+
|
|
341
|
+
Returns:
|
|
342
|
+
SQL statement to create adk_events table.
|
|
343
|
+
"""
|
|
344
|
+
if storage_type == JSONStorageType.JSON_NATIVE:
|
|
345
|
+
json_columns = """
|
|
346
|
+
content JSON,
|
|
347
|
+
grounding_metadata JSON,
|
|
348
|
+
custom_metadata JSON,
|
|
349
|
+
long_running_tool_ids_json JSON
|
|
350
|
+
"""
|
|
351
|
+
elif storage_type == JSONStorageType.BLOB_JSON:
|
|
352
|
+
json_columns = """
|
|
353
|
+
content BLOB CHECK (content IS JSON),
|
|
354
|
+
grounding_metadata BLOB CHECK (grounding_metadata IS JSON),
|
|
355
|
+
custom_metadata BLOB CHECK (custom_metadata IS JSON),
|
|
356
|
+
long_running_tool_ids_json BLOB CHECK (long_running_tool_ids_json IS JSON)
|
|
357
|
+
"""
|
|
358
|
+
else:
|
|
359
|
+
json_columns = """
|
|
360
|
+
content BLOB,
|
|
361
|
+
grounding_metadata BLOB,
|
|
362
|
+
custom_metadata BLOB,
|
|
363
|
+
long_running_tool_ids_json BLOB
|
|
364
|
+
"""
|
|
365
|
+
|
|
366
|
+
inmemory_clause = " INMEMORY" if self._in_memory else ""
|
|
367
|
+
|
|
368
|
+
return f"""
|
|
369
|
+
BEGIN
|
|
370
|
+
EXECUTE IMMEDIATE 'CREATE TABLE {self._events_table} (
|
|
371
|
+
id VARCHAR2(128) PRIMARY KEY,
|
|
372
|
+
session_id VARCHAR2(128) NOT NULL,
|
|
373
|
+
app_name VARCHAR2(128) NOT NULL,
|
|
374
|
+
user_id VARCHAR2(128) NOT NULL,
|
|
375
|
+
invocation_id VARCHAR2(256),
|
|
376
|
+
author VARCHAR2(256),
|
|
377
|
+
actions BLOB,
|
|
378
|
+
branch VARCHAR2(256),
|
|
379
|
+
timestamp TIMESTAMP WITH TIME ZONE DEFAULT SYSTIMESTAMP NOT NULL,
|
|
380
|
+
{json_columns},
|
|
381
|
+
partial NUMBER(1),
|
|
382
|
+
turn_complete NUMBER(1),
|
|
383
|
+
interrupted NUMBER(1),
|
|
384
|
+
error_code VARCHAR2(256),
|
|
385
|
+
error_message VARCHAR2(1024),
|
|
386
|
+
CONSTRAINT fk_{self._events_table}_session FOREIGN KEY (session_id)
|
|
387
|
+
REFERENCES {self._session_table}(id) ON DELETE CASCADE
|
|
388
|
+
){inmemory_clause}';
|
|
389
|
+
EXCEPTION
|
|
390
|
+
WHEN OTHERS THEN
|
|
391
|
+
IF SQLCODE != -955 THEN
|
|
392
|
+
RAISE;
|
|
393
|
+
END IF;
|
|
394
|
+
END;
|
|
395
|
+
|
|
396
|
+
BEGIN
|
|
397
|
+
EXECUTE IMMEDIATE 'CREATE INDEX idx_{self._events_table}_session
|
|
398
|
+
ON {self._events_table}(session_id, timestamp ASC)';
|
|
399
|
+
EXCEPTION
|
|
400
|
+
WHEN OTHERS THEN
|
|
401
|
+
IF SQLCODE != -955 THEN
|
|
402
|
+
RAISE;
|
|
403
|
+
END IF;
|
|
404
|
+
END;
|
|
405
|
+
"""
|
|
406
|
+
|
|
407
|
+
def _get_create_sessions_table_sql(self) -> str:
|
|
408
|
+
"""Get Oracle CREATE TABLE SQL for sessions.
|
|
409
|
+
|
|
410
|
+
Returns:
|
|
411
|
+
SQL statement to create adk_sessions table with indexes.
|
|
412
|
+
|
|
413
|
+
Notes:
|
|
414
|
+
- VARCHAR2(128) for IDs and names
|
|
415
|
+
- CLOB with IS JSON constraint for state storage
|
|
416
|
+
- TIMESTAMP WITH TIME ZONE for timezone-aware timestamps
|
|
417
|
+
- SYSTIMESTAMP for default current timestamp
|
|
418
|
+
- Composite index on (app_name, user_id) for listing
|
|
419
|
+
- Index on update_time DESC for recent session queries
|
|
420
|
+
"""
|
|
421
|
+
return f"""
|
|
422
|
+
BEGIN
|
|
423
|
+
EXECUTE IMMEDIATE 'CREATE TABLE {self._session_table} (
|
|
424
|
+
id VARCHAR2(128) PRIMARY KEY,
|
|
425
|
+
app_name VARCHAR2(128) NOT NULL,
|
|
426
|
+
user_id VARCHAR2(128) NOT NULL,
|
|
427
|
+
state CLOB CHECK (state IS JSON),
|
|
428
|
+
create_time TIMESTAMP WITH TIME ZONE DEFAULT SYSTIMESTAMP NOT NULL,
|
|
429
|
+
update_time TIMESTAMP WITH TIME ZONE DEFAULT SYSTIMESTAMP NOT NULL
|
|
430
|
+
)';
|
|
431
|
+
EXCEPTION
|
|
432
|
+
WHEN OTHERS THEN
|
|
433
|
+
IF SQLCODE != -955 THEN
|
|
434
|
+
RAISE;
|
|
435
|
+
END IF;
|
|
436
|
+
END;
|
|
437
|
+
"""
|
|
438
|
+
|
|
439
|
+
def _get_create_events_table_sql(self) -> str:
|
|
440
|
+
"""Get Oracle CREATE TABLE SQL for events (legacy method).
|
|
441
|
+
|
|
442
|
+
Returns:
|
|
443
|
+
SQL statement to create adk_events table with indexes.
|
|
444
|
+
|
|
445
|
+
Notes:
|
|
446
|
+
DEPRECATED: Use _get_create_events_table_sql_for_type() instead.
|
|
447
|
+
This method uses BLOB with IS JSON constraints (12c+ compatible).
|
|
448
|
+
|
|
449
|
+
- VARCHAR2 sizes: id(128), session_id(128), invocation_id(256), author(256),
|
|
450
|
+
branch(256), error_code(256), error_message(1024)
|
|
451
|
+
- BLOB for pickled actions
|
|
452
|
+
- BLOB with IS JSON for all JSON fields (content, grounding_metadata,
|
|
453
|
+
custom_metadata, long_running_tool_ids_json)
|
|
454
|
+
- NUMBER(1) for partial, turn_complete, interrupted
|
|
455
|
+
- Foreign key to sessions with CASCADE delete
|
|
456
|
+
- Index on (session_id, timestamp ASC) for ordered event retrieval
|
|
457
|
+
"""
|
|
458
|
+
return f"""
|
|
459
|
+
BEGIN
|
|
460
|
+
EXECUTE IMMEDIATE 'CREATE TABLE {self._events_table} (
|
|
461
|
+
id VARCHAR2(128) PRIMARY KEY,
|
|
462
|
+
session_id VARCHAR2(128) NOT NULL,
|
|
463
|
+
app_name VARCHAR2(128) NOT NULL,
|
|
464
|
+
user_id VARCHAR2(128) NOT NULL,
|
|
465
|
+
invocation_id VARCHAR2(256),
|
|
466
|
+
author VARCHAR2(256),
|
|
467
|
+
actions BLOB,
|
|
468
|
+
branch VARCHAR2(256),
|
|
469
|
+
timestamp TIMESTAMP WITH TIME ZONE DEFAULT SYSTIMESTAMP NOT NULL,
|
|
470
|
+
content BLOB CHECK (content IS JSON),
|
|
471
|
+
grounding_metadata BLOB CHECK (grounding_metadata IS JSON),
|
|
472
|
+
custom_metadata BLOB CHECK (custom_metadata IS JSON),
|
|
473
|
+
long_running_tool_ids_json BLOB CHECK (long_running_tool_ids_json IS JSON),
|
|
474
|
+
partial NUMBER(1),
|
|
475
|
+
turn_complete NUMBER(1),
|
|
476
|
+
interrupted NUMBER(1),
|
|
477
|
+
error_code VARCHAR2(256),
|
|
478
|
+
error_message VARCHAR2(1024),
|
|
479
|
+
CONSTRAINT fk_{self._events_table}_session FOREIGN KEY (session_id)
|
|
480
|
+
REFERENCES {self._session_table}(id) ON DELETE CASCADE
|
|
481
|
+
)';
|
|
482
|
+
EXCEPTION
|
|
483
|
+
WHEN OTHERS THEN
|
|
484
|
+
IF SQLCODE != -955 THEN
|
|
485
|
+
RAISE;
|
|
486
|
+
END IF;
|
|
487
|
+
END;
|
|
488
|
+
"""
|
|
489
|
+
|
|
490
|
+
def _get_drop_tables_sql(self) -> "list[str]":
|
|
491
|
+
"""Get Oracle DROP TABLE SQL statements.
|
|
492
|
+
|
|
493
|
+
Returns:
|
|
494
|
+
List of SQL statements to drop tables and indexes.
|
|
495
|
+
|
|
496
|
+
Notes:
|
|
497
|
+
Order matters: drop events table (child) before sessions (parent).
|
|
498
|
+
Oracle automatically drops indexes when dropping tables.
|
|
499
|
+
"""
|
|
500
|
+
return [
|
|
501
|
+
f"""
|
|
502
|
+
BEGIN
|
|
503
|
+
EXECUTE IMMEDIATE 'DROP INDEX idx_{self._events_table}_session';
|
|
504
|
+
EXCEPTION
|
|
505
|
+
WHEN OTHERS THEN
|
|
506
|
+
IF SQLCODE != -1418 THEN
|
|
507
|
+
RAISE;
|
|
508
|
+
END IF;
|
|
509
|
+
END;
|
|
510
|
+
""",
|
|
511
|
+
f"""
|
|
512
|
+
BEGIN
|
|
513
|
+
EXECUTE IMMEDIATE 'DROP INDEX idx_{self._session_table}_update_time';
|
|
514
|
+
EXCEPTION
|
|
515
|
+
WHEN OTHERS THEN
|
|
516
|
+
IF SQLCODE != -1418 THEN
|
|
517
|
+
RAISE;
|
|
518
|
+
END IF;
|
|
519
|
+
END;
|
|
520
|
+
""",
|
|
521
|
+
f"""
|
|
522
|
+
BEGIN
|
|
523
|
+
EXECUTE IMMEDIATE 'DROP INDEX idx_{self._session_table}_app_user';
|
|
524
|
+
EXCEPTION
|
|
525
|
+
WHEN OTHERS THEN
|
|
526
|
+
IF SQLCODE != -1418 THEN
|
|
527
|
+
RAISE;
|
|
528
|
+
END IF;
|
|
529
|
+
END;
|
|
530
|
+
""",
|
|
531
|
+
f"""
|
|
532
|
+
BEGIN
|
|
533
|
+
EXECUTE IMMEDIATE 'DROP TABLE {self._events_table}';
|
|
534
|
+
EXCEPTION
|
|
535
|
+
WHEN OTHERS THEN
|
|
536
|
+
IF SQLCODE != -942 THEN
|
|
537
|
+
RAISE;
|
|
538
|
+
END IF;
|
|
539
|
+
END;
|
|
540
|
+
""",
|
|
541
|
+
f"""
|
|
542
|
+
BEGIN
|
|
543
|
+
EXECUTE IMMEDIATE 'DROP TABLE {self._session_table}';
|
|
544
|
+
EXCEPTION
|
|
545
|
+
WHEN OTHERS THEN
|
|
546
|
+
IF SQLCODE != -942 THEN
|
|
547
|
+
RAISE;
|
|
548
|
+
END IF;
|
|
549
|
+
END;
|
|
550
|
+
""",
|
|
551
|
+
]
|
|
552
|
+
|
|
553
|
+
async def create_tables(self) -> None:
|
|
554
|
+
"""Create both sessions and events tables if they don't exist.
|
|
555
|
+
|
|
556
|
+
Notes:
|
|
557
|
+
Detects Oracle version to determine optimal JSON storage type.
|
|
558
|
+
Uses version-appropriate table schema.
|
|
559
|
+
"""
|
|
560
|
+
storage_type = await self._detect_json_storage_type()
|
|
561
|
+
logger.info("Creating ADK tables with storage type: %s", storage_type)
|
|
562
|
+
|
|
563
|
+
async with self._config.provide_session() as driver:
|
|
564
|
+
sessions_sql = SQL(self._get_create_sessions_table_sql_for_type(storage_type))
|
|
565
|
+
await driver.execute_script(sessions_sql)
|
|
566
|
+
|
|
567
|
+
await driver.execute_script(self._get_create_events_table_sql_for_type(storage_type))
|
|
568
|
+
|
|
569
|
+
logger.debug("Created ADK tables: %s, %s", self._session_table, self._events_table)
|
|
570
|
+
|
|
571
|
+
async def create_session(
|
|
572
|
+
self, session_id: str, app_name: str, user_id: str, state: "dict[str, Any]", owner_id: "Any | None" = None
|
|
573
|
+
) -> SessionRecord:
|
|
574
|
+
"""Create a new session.
|
|
575
|
+
|
|
576
|
+
Args:
|
|
577
|
+
session_id: Unique session identifier.
|
|
578
|
+
app_name: Application name.
|
|
579
|
+
user_id: User identifier.
|
|
580
|
+
state: Initial session state.
|
|
581
|
+
owner_id: Optional owner ID value for owner_id_column (if configured).
|
|
582
|
+
|
|
583
|
+
Returns:
|
|
584
|
+
Created session record.
|
|
585
|
+
|
|
586
|
+
Notes:
|
|
587
|
+
Uses SYSTIMESTAMP for create_time and update_time.
|
|
588
|
+
State is serialized using version-appropriate format.
|
|
589
|
+
owner_id is ignored if owner_id_column not configured.
|
|
590
|
+
"""
|
|
591
|
+
state_data = await self._serialize_state(state)
|
|
592
|
+
|
|
593
|
+
if self._owner_id_column_name:
|
|
594
|
+
sql = f"""
|
|
595
|
+
INSERT INTO {self._session_table} (id, app_name, user_id, state, create_time, update_time, {self._owner_id_column_name})
|
|
596
|
+
VALUES (:id, :app_name, :user_id, :state, SYSTIMESTAMP, SYSTIMESTAMP, :owner_id)
|
|
597
|
+
"""
|
|
598
|
+
params = {
|
|
599
|
+
"id": session_id,
|
|
600
|
+
"app_name": app_name,
|
|
601
|
+
"user_id": user_id,
|
|
602
|
+
"state": state_data,
|
|
603
|
+
"owner_id": owner_id,
|
|
604
|
+
}
|
|
605
|
+
else:
|
|
606
|
+
sql = f"""
|
|
607
|
+
INSERT INTO {self._session_table} (id, app_name, user_id, state, create_time, update_time)
|
|
608
|
+
VALUES (:id, :app_name, :user_id, :state, SYSTIMESTAMP, SYSTIMESTAMP)
|
|
609
|
+
"""
|
|
610
|
+
params = {"id": session_id, "app_name": app_name, "user_id": user_id, "state": state_data}
|
|
611
|
+
|
|
612
|
+
async with self._config.provide_connection() as conn:
|
|
613
|
+
cursor = conn.cursor()
|
|
614
|
+
await cursor.execute(sql, params)
|
|
615
|
+
await conn.commit()
|
|
616
|
+
|
|
617
|
+
return await self.get_session(session_id) # type: ignore[return-value]
|
|
618
|
+
|
|
619
|
+
async def get_session(self, session_id: str) -> "SessionRecord | None":
|
|
620
|
+
"""Get session by ID.
|
|
621
|
+
|
|
622
|
+
Args:
|
|
623
|
+
session_id: Session identifier.
|
|
624
|
+
|
|
625
|
+
Returns:
|
|
626
|
+
Session record or None if not found.
|
|
627
|
+
|
|
628
|
+
Notes:
|
|
629
|
+
Oracle returns datetime objects for TIMESTAMP columns.
|
|
630
|
+
State is deserialized using version-appropriate format.
|
|
631
|
+
"""
|
|
632
|
+
|
|
633
|
+
sql = f"""
|
|
634
|
+
SELECT id, app_name, user_id, state, create_time, update_time
|
|
635
|
+
FROM {self._session_table}
|
|
636
|
+
WHERE id = :id
|
|
637
|
+
"""
|
|
638
|
+
|
|
639
|
+
try:
|
|
640
|
+
async with self._config.provide_connection() as conn:
|
|
641
|
+
cursor = conn.cursor()
|
|
642
|
+
await cursor.execute(sql, {"id": session_id})
|
|
643
|
+
row = await cursor.fetchone()
|
|
644
|
+
|
|
645
|
+
if row is None:
|
|
646
|
+
return None
|
|
647
|
+
|
|
648
|
+
session_id_val, app_name, user_id, state_data, create_time, update_time = row
|
|
649
|
+
|
|
650
|
+
state = await self._deserialize_state(state_data)
|
|
651
|
+
|
|
652
|
+
return SessionRecord(
|
|
653
|
+
id=session_id_val,
|
|
654
|
+
app_name=app_name,
|
|
655
|
+
user_id=user_id,
|
|
656
|
+
state=state,
|
|
657
|
+
create_time=create_time,
|
|
658
|
+
update_time=update_time,
|
|
659
|
+
)
|
|
660
|
+
except oracledb.DatabaseError as e:
|
|
661
|
+
error_obj = e.args[0] if e.args else None
|
|
662
|
+
if error_obj and error_obj.code == ORACLE_TABLE_NOT_FOUND_ERROR:
|
|
663
|
+
return None
|
|
664
|
+
raise
|
|
665
|
+
|
|
666
|
+
async def update_session_state(self, session_id: str, state: "dict[str, Any]") -> None:
|
|
667
|
+
"""Update session state.
|
|
668
|
+
|
|
669
|
+
Args:
|
|
670
|
+
session_id: Session identifier.
|
|
671
|
+
state: New state dictionary (replaces existing state).
|
|
672
|
+
|
|
673
|
+
Notes:
|
|
674
|
+
This replaces the entire state dictionary.
|
|
675
|
+
Updates update_time to current timestamp.
|
|
676
|
+
State is serialized using version-appropriate format.
|
|
677
|
+
"""
|
|
678
|
+
state_data = await self._serialize_state(state)
|
|
679
|
+
|
|
680
|
+
sql = f"""
|
|
681
|
+
UPDATE {self._session_table}
|
|
682
|
+
SET state = :state, update_time = SYSTIMESTAMP
|
|
683
|
+
WHERE id = :id
|
|
684
|
+
"""
|
|
685
|
+
|
|
686
|
+
async with self._config.provide_connection() as conn:
|
|
687
|
+
cursor = conn.cursor()
|
|
688
|
+
await cursor.execute(sql, {"state": state_data, "id": session_id})
|
|
689
|
+
await conn.commit()
|
|
690
|
+
|
|
691
|
+
async def delete_session(self, session_id: str) -> None:
|
|
692
|
+
"""Delete session and all associated events (cascade).
|
|
693
|
+
|
|
694
|
+
Args:
|
|
695
|
+
session_id: Session identifier.
|
|
696
|
+
|
|
697
|
+
Notes:
|
|
698
|
+
Foreign key constraint ensures events are cascade-deleted.
|
|
699
|
+
"""
|
|
700
|
+
sql = f"DELETE FROM {self._session_table} WHERE id = :id"
|
|
701
|
+
|
|
702
|
+
async with self._config.provide_connection() as conn:
|
|
703
|
+
cursor = conn.cursor()
|
|
704
|
+
await cursor.execute(sql, {"id": session_id})
|
|
705
|
+
await conn.commit()
|
|
706
|
+
|
|
707
|
+
async def list_sessions(self, app_name: str, user_id: str) -> "list[SessionRecord]":
|
|
708
|
+
"""List all sessions for a user in an app.
|
|
709
|
+
|
|
710
|
+
Args:
|
|
711
|
+
app_name: Application name.
|
|
712
|
+
user_id: User identifier.
|
|
713
|
+
|
|
714
|
+
Returns:
|
|
715
|
+
List of session records ordered by update_time DESC.
|
|
716
|
+
|
|
717
|
+
Notes:
|
|
718
|
+
Uses composite index on (app_name, user_id).
|
|
719
|
+
State is deserialized using version-appropriate format.
|
|
720
|
+
"""
|
|
721
|
+
|
|
722
|
+
sql = f"""
|
|
723
|
+
SELECT id, app_name, user_id, state, create_time, update_time
|
|
724
|
+
FROM {self._session_table}
|
|
725
|
+
WHERE app_name = :app_name AND user_id = :user_id
|
|
726
|
+
ORDER BY update_time DESC
|
|
727
|
+
"""
|
|
728
|
+
|
|
729
|
+
try:
|
|
730
|
+
async with self._config.provide_connection() as conn:
|
|
731
|
+
cursor = conn.cursor()
|
|
732
|
+
await cursor.execute(sql, {"app_name": app_name, "user_id": user_id})
|
|
733
|
+
rows = await cursor.fetchall()
|
|
734
|
+
|
|
735
|
+
results = []
|
|
736
|
+
for row in rows:
|
|
737
|
+
state = await self._deserialize_state(row[3])
|
|
738
|
+
|
|
739
|
+
results.append(
|
|
740
|
+
SessionRecord(
|
|
741
|
+
id=row[0],
|
|
742
|
+
app_name=row[1],
|
|
743
|
+
user_id=row[2],
|
|
744
|
+
state=state,
|
|
745
|
+
create_time=row[4],
|
|
746
|
+
update_time=row[5],
|
|
747
|
+
)
|
|
748
|
+
)
|
|
749
|
+
return results
|
|
750
|
+
except oracledb.DatabaseError as e:
|
|
751
|
+
error_obj = e.args[0] if e.args else None
|
|
752
|
+
if error_obj and error_obj.code == ORACLE_TABLE_NOT_FOUND_ERROR:
|
|
753
|
+
return []
|
|
754
|
+
raise
|
|
755
|
+
|
|
756
|
+
async def append_event(self, event_record: EventRecord) -> None:
|
|
757
|
+
"""Append an event to a session.
|
|
758
|
+
|
|
759
|
+
Args:
|
|
760
|
+
event_record: Event record to store.
|
|
761
|
+
|
|
762
|
+
Notes:
|
|
763
|
+
Uses SYSTIMESTAMP for timestamp if not provided.
|
|
764
|
+
JSON fields are serialized using version-appropriate format.
|
|
765
|
+
Boolean fields are converted to NUMBER(1).
|
|
766
|
+
"""
|
|
767
|
+
content_data = await self._serialize_json_field(event_record.get("content"))
|
|
768
|
+
grounding_metadata_data = await self._serialize_json_field(event_record.get("grounding_metadata"))
|
|
769
|
+
custom_metadata_data = await self._serialize_json_field(event_record.get("custom_metadata"))
|
|
770
|
+
|
|
771
|
+
sql = f"""
|
|
772
|
+
INSERT INTO {self._events_table} (
|
|
773
|
+
id, session_id, app_name, user_id, invocation_id, author, actions,
|
|
774
|
+
long_running_tool_ids_json, branch, timestamp, content,
|
|
775
|
+
grounding_metadata, custom_metadata, partial, turn_complete,
|
|
776
|
+
interrupted, error_code, error_message
|
|
777
|
+
) VALUES (
|
|
778
|
+
:id, :session_id, :app_name, :user_id, :invocation_id, :author, :actions,
|
|
779
|
+
:long_running_tool_ids_json, :branch, :timestamp, :content,
|
|
780
|
+
:grounding_metadata, :custom_metadata, :partial, :turn_complete,
|
|
781
|
+
:interrupted, :error_code, :error_message
|
|
782
|
+
)
|
|
783
|
+
"""
|
|
784
|
+
|
|
785
|
+
async with self._config.provide_connection() as conn:
|
|
786
|
+
cursor = conn.cursor()
|
|
787
|
+
await cursor.execute(
|
|
788
|
+
sql,
|
|
789
|
+
{
|
|
790
|
+
"id": event_record["id"],
|
|
791
|
+
"session_id": event_record["session_id"],
|
|
792
|
+
"app_name": event_record["app_name"],
|
|
793
|
+
"user_id": event_record["user_id"],
|
|
794
|
+
"invocation_id": event_record["invocation_id"],
|
|
795
|
+
"author": event_record["author"],
|
|
796
|
+
"actions": event_record["actions"],
|
|
797
|
+
"long_running_tool_ids_json": event_record.get("long_running_tool_ids_json"),
|
|
798
|
+
"branch": event_record.get("branch"),
|
|
799
|
+
"timestamp": event_record["timestamp"],
|
|
800
|
+
"content": content_data,
|
|
801
|
+
"grounding_metadata": grounding_metadata_data,
|
|
802
|
+
"custom_metadata": custom_metadata_data,
|
|
803
|
+
"partial": _to_oracle_bool(event_record.get("partial")),
|
|
804
|
+
"turn_complete": _to_oracle_bool(event_record.get("turn_complete")),
|
|
805
|
+
"interrupted": _to_oracle_bool(event_record.get("interrupted")),
|
|
806
|
+
"error_code": event_record.get("error_code"),
|
|
807
|
+
"error_message": event_record.get("error_message"),
|
|
808
|
+
},
|
|
809
|
+
)
|
|
810
|
+
await conn.commit()
|
|
811
|
+
|
|
812
|
+
async def get_events(
|
|
813
|
+
self, session_id: str, after_timestamp: "datetime | None" = None, limit: "int | None" = None
|
|
814
|
+
) -> "list[EventRecord]":
|
|
815
|
+
"""Get events for a session.
|
|
816
|
+
|
|
817
|
+
Args:
|
|
818
|
+
session_id: Session identifier.
|
|
819
|
+
after_timestamp: Only return events after this time.
|
|
820
|
+
limit: Maximum number of events to return.
|
|
821
|
+
|
|
822
|
+
Returns:
|
|
823
|
+
List of event records ordered by timestamp ASC.
|
|
824
|
+
|
|
825
|
+
Notes:
|
|
826
|
+
Uses index on (session_id, timestamp ASC).
|
|
827
|
+
JSON fields deserialized using version-appropriate format.
|
|
828
|
+
Converts BLOB actions to bytes and NUMBER(1) booleans to Python bool.
|
|
829
|
+
"""
|
|
830
|
+
|
|
831
|
+
where_clauses = ["session_id = :session_id"]
|
|
832
|
+
params: dict[str, Any] = {"session_id": session_id}
|
|
833
|
+
|
|
834
|
+
if after_timestamp is not None:
|
|
835
|
+
where_clauses.append("timestamp > :after_timestamp")
|
|
836
|
+
params["after_timestamp"] = after_timestamp
|
|
837
|
+
|
|
838
|
+
where_clause = " AND ".join(where_clauses)
|
|
839
|
+
limit_clause = ""
|
|
840
|
+
if limit:
|
|
841
|
+
limit_clause = f" FETCH FIRST {limit} ROWS ONLY"
|
|
842
|
+
|
|
843
|
+
sql = f"""
|
|
844
|
+
SELECT id, session_id, app_name, user_id, invocation_id, author, actions,
|
|
845
|
+
long_running_tool_ids_json, branch, timestamp, content,
|
|
846
|
+
grounding_metadata, custom_metadata, partial, turn_complete,
|
|
847
|
+
interrupted, error_code, error_message
|
|
848
|
+
FROM {self._events_table}
|
|
849
|
+
WHERE {where_clause}
|
|
850
|
+
ORDER BY timestamp ASC{limit_clause}
|
|
851
|
+
"""
|
|
852
|
+
|
|
853
|
+
try:
|
|
854
|
+
async with self._config.provide_connection() as conn:
|
|
855
|
+
cursor = conn.cursor()
|
|
856
|
+
await cursor.execute(sql, params)
|
|
857
|
+
rows = await cursor.fetchall()
|
|
858
|
+
|
|
859
|
+
results = []
|
|
860
|
+
for row in rows:
|
|
861
|
+
actions_blob = row[6]
|
|
862
|
+
if hasattr(actions_blob, "read"):
|
|
863
|
+
actions_data = await actions_blob.read()
|
|
864
|
+
else:
|
|
865
|
+
actions_data = actions_blob
|
|
866
|
+
|
|
867
|
+
content = await self._deserialize_json_field(row[10])
|
|
868
|
+
grounding_metadata = await self._deserialize_json_field(row[11])
|
|
869
|
+
custom_metadata = await self._deserialize_json_field(row[12])
|
|
870
|
+
|
|
871
|
+
results.append(
|
|
872
|
+
EventRecord(
|
|
873
|
+
id=row[0],
|
|
874
|
+
session_id=row[1],
|
|
875
|
+
app_name=row[2],
|
|
876
|
+
user_id=row[3],
|
|
877
|
+
invocation_id=row[4],
|
|
878
|
+
author=row[5],
|
|
879
|
+
actions=bytes(actions_data) if actions_data is not None else b"",
|
|
880
|
+
long_running_tool_ids_json=row[7],
|
|
881
|
+
branch=row[8],
|
|
882
|
+
timestamp=row[9],
|
|
883
|
+
content=content,
|
|
884
|
+
grounding_metadata=grounding_metadata,
|
|
885
|
+
custom_metadata=custom_metadata,
|
|
886
|
+
partial=_from_oracle_bool(row[13]),
|
|
887
|
+
turn_complete=_from_oracle_bool(row[14]),
|
|
888
|
+
interrupted=_from_oracle_bool(row[15]),
|
|
889
|
+
error_code=row[16],
|
|
890
|
+
error_message=row[17],
|
|
891
|
+
)
|
|
892
|
+
)
|
|
893
|
+
return results
|
|
894
|
+
except oracledb.DatabaseError as e:
|
|
895
|
+
error_obj = e.args[0] if e.args else None
|
|
896
|
+
if error_obj and error_obj.code == ORACLE_TABLE_NOT_FOUND_ERROR:
|
|
897
|
+
return []
|
|
898
|
+
raise
|
|
899
|
+
|
|
900
|
+
|
|
901
|
+
class OracleSyncADKStore(BaseSyncADKStore["OracleSyncConfig"]):
|
|
902
|
+
"""Oracle synchronous ADK store using oracledb sync driver.
|
|
903
|
+
|
|
904
|
+
Implements session and event storage for Google Agent Development Kit
|
|
905
|
+
using Oracle Database via the python-oracledb synchronous driver. Provides:
|
|
906
|
+
- Session state management with version-specific JSON storage
|
|
907
|
+
- Event history tracking with BLOB-serialized actions
|
|
908
|
+
- TIMESTAMP WITH TIME ZONE for timezone-aware timestamps
|
|
909
|
+
- Foreign key constraints with cascade delete
|
|
910
|
+
- Efficient upserts using MERGE statement
|
|
911
|
+
|
|
912
|
+
Args:
|
|
913
|
+
config: OracleSyncConfig with extension_config["adk"] settings.
|
|
914
|
+
|
|
915
|
+
Example:
|
|
916
|
+
from sqlspec.adapters.oracledb import OracleSyncConfig
|
|
917
|
+
from sqlspec.adapters.oracledb.adk import OracleSyncADKStore
|
|
918
|
+
|
|
919
|
+
config = OracleSyncConfig(
|
|
920
|
+
pool_config={"dsn": "oracle://..."},
|
|
921
|
+
extension_config={
|
|
922
|
+
"adk": {
|
|
923
|
+
"session_table": "my_sessions",
|
|
924
|
+
"events_table": "my_events",
|
|
925
|
+
"owner_id_column": "account_id NUMBER(19) REFERENCES accounts(id)"
|
|
926
|
+
}
|
|
927
|
+
}
|
|
928
|
+
)
|
|
929
|
+
store = OracleSyncADKStore(config)
|
|
930
|
+
store.create_tables()
|
|
931
|
+
|
|
932
|
+
Notes:
|
|
933
|
+
- JSON storage type detected based on Oracle version (21c+, 12c+, legacy)
|
|
934
|
+
- BLOB for pre-serialized actions from Google ADK
|
|
935
|
+
- TIMESTAMP WITH TIME ZONE for timezone-aware timestamps
|
|
936
|
+
- NUMBER(1) for booleans (0/1/NULL)
|
|
937
|
+
- Named parameters using :param_name
|
|
938
|
+
- State merging handled at application level
|
|
939
|
+
- owner_id_column supports NUMBER, VARCHAR2, RAW for Oracle FK types
|
|
940
|
+
- Configuration is read from config.extension_config["adk"]
|
|
941
|
+
"""
|
|
942
|
+
|
|
943
|
+
__slots__ = ("_in_memory", "_json_storage_type")
|
|
944
|
+
|
|
945
|
+
def __init__(self, config: "OracleSyncConfig") -> None:
|
|
946
|
+
"""Initialize Oracle synchronous ADK store.
|
|
947
|
+
|
|
948
|
+
Args:
|
|
949
|
+
config: OracleSyncConfig instance.
|
|
950
|
+
|
|
951
|
+
Notes:
|
|
952
|
+
Configuration is read from config.extension_config["adk"]:
|
|
953
|
+
- session_table: Sessions table name (default: "adk_sessions")
|
|
954
|
+
- events_table: Events table name (default: "adk_events")
|
|
955
|
+
- owner_id_column: Optional owner FK column DDL (default: None)
|
|
956
|
+
- in_memory: Enable INMEMORY clause (default: False)
|
|
957
|
+
"""
|
|
958
|
+
super().__init__(config)
|
|
959
|
+
self._json_storage_type: JSONStorageType | None = None
|
|
960
|
+
|
|
961
|
+
if hasattr(config, "extension_config") and config.extension_config:
|
|
962
|
+
adk_config = config.extension_config.get("adk", {})
|
|
963
|
+
self._in_memory: bool = bool(adk_config.get("in_memory", False))
|
|
964
|
+
else:
|
|
965
|
+
self._in_memory = False
|
|
966
|
+
|
|
967
|
+
def _detect_json_storage_type(self) -> JSONStorageType:
|
|
968
|
+
"""Detect the appropriate JSON storage type based on Oracle version.
|
|
969
|
+
|
|
970
|
+
Returns:
|
|
971
|
+
Appropriate JSONStorageType for this Oracle version.
|
|
972
|
+
|
|
973
|
+
Notes:
|
|
974
|
+
Queries product_component_version to determine Oracle version.
|
|
975
|
+
- Oracle 21c+ with compatible >= 20: Native JSON type
|
|
976
|
+
- Oracle 12c+: BLOB with IS JSON constraint (preferred)
|
|
977
|
+
- Oracle 11g and earlier: BLOB without constraint
|
|
978
|
+
|
|
979
|
+
BLOB is preferred over CLOB for 12c+ as per Oracle recommendations.
|
|
980
|
+
Result is cached in self._json_storage_type.
|
|
981
|
+
"""
|
|
982
|
+
if self._json_storage_type is not None:
|
|
983
|
+
return self._json_storage_type
|
|
984
|
+
|
|
985
|
+
with self._config.provide_connection() as conn:
|
|
986
|
+
cursor = conn.cursor()
|
|
987
|
+
cursor.execute(
|
|
988
|
+
"""
|
|
989
|
+
SELECT version FROM product_component_version
|
|
990
|
+
WHERE product LIKE 'Oracle%Database%'
|
|
991
|
+
"""
|
|
992
|
+
)
|
|
993
|
+
row = cursor.fetchone()
|
|
994
|
+
|
|
995
|
+
if row is None:
|
|
996
|
+
logger.warning("Could not detect Oracle version, defaulting to BLOB_JSON")
|
|
997
|
+
self._json_storage_type = JSONStorageType.BLOB_JSON
|
|
998
|
+
return self._json_storage_type
|
|
999
|
+
|
|
1000
|
+
version_str = str(row[0])
|
|
1001
|
+
version_parts = version_str.split(".")
|
|
1002
|
+
major_version = int(version_parts[0]) if version_parts else 0
|
|
1003
|
+
|
|
1004
|
+
if major_version >= ORACLE_MIN_JSON_NATIVE_VERSION:
|
|
1005
|
+
cursor.execute("SELECT value FROM v$parameter WHERE name = 'compatible'")
|
|
1006
|
+
compatible_row = cursor.fetchone()
|
|
1007
|
+
if compatible_row:
|
|
1008
|
+
compatible_parts = str(compatible_row[0]).split(".")
|
|
1009
|
+
compatible_major = int(compatible_parts[0]) if compatible_parts else 0
|
|
1010
|
+
if compatible_major >= ORACLE_MIN_JSON_NATIVE_COMPATIBLE:
|
|
1011
|
+
logger.info("Detected Oracle %s with compatible >= 20, using JSON_NATIVE", version_str)
|
|
1012
|
+
self._json_storage_type = JSONStorageType.JSON_NATIVE
|
|
1013
|
+
return self._json_storage_type
|
|
1014
|
+
|
|
1015
|
+
if major_version >= ORACLE_MIN_JSON_BLOB_VERSION:
|
|
1016
|
+
logger.info("Detected Oracle %s, using BLOB_JSON (recommended)", version_str)
|
|
1017
|
+
self._json_storage_type = JSONStorageType.BLOB_JSON
|
|
1018
|
+
return self._json_storage_type
|
|
1019
|
+
|
|
1020
|
+
logger.info("Detected Oracle %s (pre-12c), using BLOB_PLAIN", version_str)
|
|
1021
|
+
self._json_storage_type = JSONStorageType.BLOB_PLAIN
|
|
1022
|
+
return self._json_storage_type
|
|
1023
|
+
|
|
1024
|
+
def _serialize_state(self, state: "dict[str, Any]") -> "str | bytes":
|
|
1025
|
+
"""Serialize state dictionary to appropriate format based on storage type.
|
|
1026
|
+
|
|
1027
|
+
Args:
|
|
1028
|
+
state: State dictionary to serialize.
|
|
1029
|
+
|
|
1030
|
+
Returns:
|
|
1031
|
+
JSON string for JSON_NATIVE, bytes for BLOB types.
|
|
1032
|
+
"""
|
|
1033
|
+
storage_type = self._detect_json_storage_type()
|
|
1034
|
+
|
|
1035
|
+
if storage_type == JSONStorageType.JSON_NATIVE:
|
|
1036
|
+
return to_json(state)
|
|
1037
|
+
|
|
1038
|
+
return to_json(state, as_bytes=True)
|
|
1039
|
+
|
|
1040
|
+
def _deserialize_state(self, data: Any) -> "dict[str, Any]":
|
|
1041
|
+
"""Deserialize state data from database format.
|
|
1042
|
+
|
|
1043
|
+
Args:
|
|
1044
|
+
data: Data from database (may be LOB, str, bytes, or dict).
|
|
1045
|
+
|
|
1046
|
+
Returns:
|
|
1047
|
+
Deserialized state dictionary.
|
|
1048
|
+
|
|
1049
|
+
Notes:
|
|
1050
|
+
Handles LOB reading if data has read() method.
|
|
1051
|
+
Oracle JSON type may return dict directly.
|
|
1052
|
+
"""
|
|
1053
|
+
if hasattr(data, "read"):
|
|
1054
|
+
data = data.read()
|
|
1055
|
+
|
|
1056
|
+
if isinstance(data, dict):
|
|
1057
|
+
return data
|
|
1058
|
+
|
|
1059
|
+
if isinstance(data, bytes):
|
|
1060
|
+
return from_json(data) # type: ignore[no-any-return]
|
|
1061
|
+
|
|
1062
|
+
if isinstance(data, str):
|
|
1063
|
+
return from_json(data) # type: ignore[no-any-return]
|
|
1064
|
+
|
|
1065
|
+
return from_json(str(data)) # type: ignore[no-any-return]
|
|
1066
|
+
|
|
1067
|
+
def _serialize_json_field(self, value: Any) -> "str | bytes | None":
|
|
1068
|
+
"""Serialize optional JSON field for event storage.
|
|
1069
|
+
|
|
1070
|
+
Args:
|
|
1071
|
+
value: Value to serialize (dict or None).
|
|
1072
|
+
|
|
1073
|
+
Returns:
|
|
1074
|
+
Serialized JSON or None.
|
|
1075
|
+
"""
|
|
1076
|
+
if value is None:
|
|
1077
|
+
return None
|
|
1078
|
+
|
|
1079
|
+
storage_type = self._detect_json_storage_type()
|
|
1080
|
+
|
|
1081
|
+
if storage_type == JSONStorageType.JSON_NATIVE:
|
|
1082
|
+
return to_json(value)
|
|
1083
|
+
|
|
1084
|
+
return to_json(value, as_bytes=True)
|
|
1085
|
+
|
|
1086
|
+
def _deserialize_json_field(self, data: Any) -> "dict[str, Any] | None":
|
|
1087
|
+
"""Deserialize optional JSON field from database.
|
|
1088
|
+
|
|
1089
|
+
Args:
|
|
1090
|
+
data: Data from database (may be LOB, str, bytes, dict, or None).
|
|
1091
|
+
|
|
1092
|
+
Returns:
|
|
1093
|
+
Deserialized dictionary or None.
|
|
1094
|
+
|
|
1095
|
+
Notes:
|
|
1096
|
+
Oracle JSON type may return dict directly.
|
|
1097
|
+
"""
|
|
1098
|
+
if data is None:
|
|
1099
|
+
return None
|
|
1100
|
+
|
|
1101
|
+
if hasattr(data, "read"):
|
|
1102
|
+
data = data.read()
|
|
1103
|
+
|
|
1104
|
+
if isinstance(data, dict):
|
|
1105
|
+
return data
|
|
1106
|
+
|
|
1107
|
+
if isinstance(data, bytes):
|
|
1108
|
+
return from_json(data) # type: ignore[no-any-return]
|
|
1109
|
+
|
|
1110
|
+
if isinstance(data, str):
|
|
1111
|
+
return from_json(data) # type: ignore[no-any-return]
|
|
1112
|
+
|
|
1113
|
+
return from_json(str(data)) # type: ignore[no-any-return]
|
|
1114
|
+
|
|
1115
|
+
def _get_create_sessions_table_sql_for_type(self, storage_type: JSONStorageType) -> str:
|
|
1116
|
+
"""Get Oracle CREATE TABLE SQL for sessions with specified storage type.
|
|
1117
|
+
|
|
1118
|
+
Args:
|
|
1119
|
+
storage_type: JSON storage type to use.
|
|
1120
|
+
|
|
1121
|
+
Returns:
|
|
1122
|
+
SQL statement to create adk_sessions table.
|
|
1123
|
+
"""
|
|
1124
|
+
if storage_type == JSONStorageType.JSON_NATIVE:
|
|
1125
|
+
state_column = "state JSON NOT NULL"
|
|
1126
|
+
elif storage_type == JSONStorageType.BLOB_JSON:
|
|
1127
|
+
state_column = "state BLOB CHECK (state IS JSON) NOT NULL"
|
|
1128
|
+
else:
|
|
1129
|
+
state_column = "state BLOB NOT NULL"
|
|
1130
|
+
|
|
1131
|
+
owner_id_column_sql = f", {self._owner_id_column_ddl}" if self._owner_id_column_ddl else ""
|
|
1132
|
+
inmemory_clause = " INMEMORY" if self._in_memory else ""
|
|
1133
|
+
|
|
1134
|
+
return f"""
|
|
1135
|
+
BEGIN
|
|
1136
|
+
EXECUTE IMMEDIATE 'CREATE TABLE {self._session_table} (
|
|
1137
|
+
id VARCHAR2(128) PRIMARY KEY,
|
|
1138
|
+
app_name VARCHAR2(128) NOT NULL,
|
|
1139
|
+
user_id VARCHAR2(128) NOT NULL,
|
|
1140
|
+
{state_column},
|
|
1141
|
+
create_time TIMESTAMP WITH TIME ZONE DEFAULT SYSTIMESTAMP NOT NULL,
|
|
1142
|
+
update_time TIMESTAMP WITH TIME ZONE DEFAULT SYSTIMESTAMP NOT NULL{owner_id_column_sql}
|
|
1143
|
+
){inmemory_clause}';
|
|
1144
|
+
EXCEPTION
|
|
1145
|
+
WHEN OTHERS THEN
|
|
1146
|
+
IF SQLCODE != -955 THEN
|
|
1147
|
+
RAISE;
|
|
1148
|
+
END IF;
|
|
1149
|
+
END;
|
|
1150
|
+
|
|
1151
|
+
BEGIN
|
|
1152
|
+
EXECUTE IMMEDIATE 'CREATE INDEX idx_{self._session_table}_app_user
|
|
1153
|
+
ON {self._session_table}(app_name, user_id)';
|
|
1154
|
+
EXCEPTION
|
|
1155
|
+
WHEN OTHERS THEN
|
|
1156
|
+
IF SQLCODE != -955 THEN
|
|
1157
|
+
RAISE;
|
|
1158
|
+
END IF;
|
|
1159
|
+
END;
|
|
1160
|
+
|
|
1161
|
+
BEGIN
|
|
1162
|
+
EXECUTE IMMEDIATE 'CREATE INDEX idx_{self._session_table}_update_time
|
|
1163
|
+
ON {self._session_table}(update_time DESC)';
|
|
1164
|
+
EXCEPTION
|
|
1165
|
+
WHEN OTHERS THEN
|
|
1166
|
+
IF SQLCODE != -955 THEN
|
|
1167
|
+
RAISE;
|
|
1168
|
+
END IF;
|
|
1169
|
+
END;
|
|
1170
|
+
"""
|
|
1171
|
+
|
|
1172
|
+
def _get_create_events_table_sql_for_type(self, storage_type: JSONStorageType) -> str:
|
|
1173
|
+
"""Get Oracle CREATE TABLE SQL for events with specified storage type.
|
|
1174
|
+
|
|
1175
|
+
Args:
|
|
1176
|
+
storage_type: JSON storage type to use.
|
|
1177
|
+
|
|
1178
|
+
Returns:
|
|
1179
|
+
SQL statement to create adk_events table.
|
|
1180
|
+
"""
|
|
1181
|
+
if storage_type == JSONStorageType.JSON_NATIVE:
|
|
1182
|
+
json_columns = """
|
|
1183
|
+
content JSON,
|
|
1184
|
+
grounding_metadata JSON,
|
|
1185
|
+
custom_metadata JSON,
|
|
1186
|
+
long_running_tool_ids_json JSON
|
|
1187
|
+
"""
|
|
1188
|
+
elif storage_type == JSONStorageType.BLOB_JSON:
|
|
1189
|
+
json_columns = """
|
|
1190
|
+
content BLOB CHECK (content IS JSON),
|
|
1191
|
+
grounding_metadata BLOB CHECK (grounding_metadata IS JSON),
|
|
1192
|
+
custom_metadata BLOB CHECK (custom_metadata IS JSON),
|
|
1193
|
+
long_running_tool_ids_json BLOB CHECK (long_running_tool_ids_json IS JSON)
|
|
1194
|
+
"""
|
|
1195
|
+
else:
|
|
1196
|
+
json_columns = """
|
|
1197
|
+
content BLOB,
|
|
1198
|
+
grounding_metadata BLOB,
|
|
1199
|
+
custom_metadata BLOB,
|
|
1200
|
+
long_running_tool_ids_json BLOB
|
|
1201
|
+
"""
|
|
1202
|
+
|
|
1203
|
+
inmemory_clause = " INMEMORY" if self._in_memory else ""
|
|
1204
|
+
|
|
1205
|
+
return f"""
|
|
1206
|
+
BEGIN
|
|
1207
|
+
EXECUTE IMMEDIATE 'CREATE TABLE {self._events_table} (
|
|
1208
|
+
id VARCHAR2(128) PRIMARY KEY,
|
|
1209
|
+
session_id VARCHAR2(128) NOT NULL,
|
|
1210
|
+
app_name VARCHAR2(128) NOT NULL,
|
|
1211
|
+
user_id VARCHAR2(128) NOT NULL,
|
|
1212
|
+
invocation_id VARCHAR2(256),
|
|
1213
|
+
author VARCHAR2(256),
|
|
1214
|
+
actions BLOB,
|
|
1215
|
+
branch VARCHAR2(256),
|
|
1216
|
+
timestamp TIMESTAMP WITH TIME ZONE DEFAULT SYSTIMESTAMP NOT NULL,
|
|
1217
|
+
{json_columns},
|
|
1218
|
+
partial NUMBER(1),
|
|
1219
|
+
turn_complete NUMBER(1),
|
|
1220
|
+
interrupted NUMBER(1),
|
|
1221
|
+
error_code VARCHAR2(256),
|
|
1222
|
+
error_message VARCHAR2(1024),
|
|
1223
|
+
CONSTRAINT fk_{self._events_table}_session FOREIGN KEY (session_id)
|
|
1224
|
+
REFERENCES {self._session_table}(id) ON DELETE CASCADE
|
|
1225
|
+
){inmemory_clause}';
|
|
1226
|
+
EXCEPTION
|
|
1227
|
+
WHEN OTHERS THEN
|
|
1228
|
+
IF SQLCODE != -955 THEN
|
|
1229
|
+
RAISE;
|
|
1230
|
+
END IF;
|
|
1231
|
+
END;
|
|
1232
|
+
|
|
1233
|
+
BEGIN
|
|
1234
|
+
EXECUTE IMMEDIATE 'CREATE INDEX idx_{self._events_table}_session
|
|
1235
|
+
ON {self._events_table}(session_id, timestamp ASC)';
|
|
1236
|
+
EXCEPTION
|
|
1237
|
+
WHEN OTHERS THEN
|
|
1238
|
+
IF SQLCODE != -955 THEN
|
|
1239
|
+
RAISE;
|
|
1240
|
+
END IF;
|
|
1241
|
+
END;
|
|
1242
|
+
"""
|
|
1243
|
+
|
|
1244
|
+
def _get_create_sessions_table_sql(self) -> str:
|
|
1245
|
+
"""Get Oracle CREATE TABLE SQL for sessions.
|
|
1246
|
+
|
|
1247
|
+
Returns:
|
|
1248
|
+
SQL statement to create adk_sessions table with indexes.
|
|
1249
|
+
|
|
1250
|
+
Notes:
|
|
1251
|
+
- VARCHAR2(128) for IDs and names
|
|
1252
|
+
- CLOB with IS JSON constraint for state storage
|
|
1253
|
+
- TIMESTAMP WITH TIME ZONE for timezone-aware timestamps
|
|
1254
|
+
- SYSTIMESTAMP for default current timestamp
|
|
1255
|
+
- Composite index on (app_name, user_id) for listing
|
|
1256
|
+
- Index on update_time DESC for recent session queries
|
|
1257
|
+
"""
|
|
1258
|
+
return f"""
|
|
1259
|
+
BEGIN
|
|
1260
|
+
EXECUTE IMMEDIATE 'CREATE TABLE {self._session_table} (
|
|
1261
|
+
id VARCHAR2(128) PRIMARY KEY,
|
|
1262
|
+
app_name VARCHAR2(128) NOT NULL,
|
|
1263
|
+
user_id VARCHAR2(128) NOT NULL,
|
|
1264
|
+
state CLOB CHECK (state IS JSON),
|
|
1265
|
+
create_time TIMESTAMP WITH TIME ZONE DEFAULT SYSTIMESTAMP NOT NULL,
|
|
1266
|
+
update_time TIMESTAMP WITH TIME ZONE DEFAULT SYSTIMESTAMP NOT NULL
|
|
1267
|
+
)';
|
|
1268
|
+
EXCEPTION
|
|
1269
|
+
WHEN OTHERS THEN
|
|
1270
|
+
IF SQLCODE != -955 THEN
|
|
1271
|
+
RAISE;
|
|
1272
|
+
END IF;
|
|
1273
|
+
END;
|
|
1274
|
+
"""
|
|
1275
|
+
|
|
1276
|
+
def _get_create_events_table_sql(self) -> str:
|
|
1277
|
+
"""Get Oracle CREATE TABLE SQL for events (legacy method).
|
|
1278
|
+
|
|
1279
|
+
Returns:
|
|
1280
|
+
SQL statement to create adk_events table with indexes.
|
|
1281
|
+
|
|
1282
|
+
Notes:
|
|
1283
|
+
DEPRECATED: Use _get_create_events_table_sql_for_type() instead.
|
|
1284
|
+
This method uses BLOB with IS JSON constraints (12c+ compatible).
|
|
1285
|
+
|
|
1286
|
+
- VARCHAR2 sizes: id(128), session_id(128), invocation_id(256), author(256),
|
|
1287
|
+
branch(256), error_code(256), error_message(1024)
|
|
1288
|
+
- BLOB for pickled actions
|
|
1289
|
+
- BLOB with IS JSON for all JSON fields (content, grounding_metadata,
|
|
1290
|
+
custom_metadata, long_running_tool_ids_json)
|
|
1291
|
+
- NUMBER(1) for partial, turn_complete, interrupted
|
|
1292
|
+
- Foreign key to sessions with CASCADE delete
|
|
1293
|
+
- Index on (session_id, timestamp ASC) for ordered event retrieval
|
|
1294
|
+
"""
|
|
1295
|
+
return f"""
|
|
1296
|
+
BEGIN
|
|
1297
|
+
EXECUTE IMMEDIATE 'CREATE TABLE {self._events_table} (
|
|
1298
|
+
id VARCHAR2(128) PRIMARY KEY,
|
|
1299
|
+
session_id VARCHAR2(128) NOT NULL,
|
|
1300
|
+
app_name VARCHAR2(128) NOT NULL,
|
|
1301
|
+
user_id VARCHAR2(128) NOT NULL,
|
|
1302
|
+
invocation_id VARCHAR2(256),
|
|
1303
|
+
author VARCHAR2(256),
|
|
1304
|
+
actions BLOB,
|
|
1305
|
+
branch VARCHAR2(256),
|
|
1306
|
+
timestamp TIMESTAMP WITH TIME ZONE DEFAULT SYSTIMESTAMP NOT NULL,
|
|
1307
|
+
content BLOB CHECK (content IS JSON),
|
|
1308
|
+
grounding_metadata BLOB CHECK (grounding_metadata IS JSON),
|
|
1309
|
+
custom_metadata BLOB CHECK (custom_metadata IS JSON),
|
|
1310
|
+
long_running_tool_ids_json BLOB CHECK (long_running_tool_ids_json IS JSON),
|
|
1311
|
+
partial NUMBER(1),
|
|
1312
|
+
turn_complete NUMBER(1),
|
|
1313
|
+
interrupted NUMBER(1),
|
|
1314
|
+
error_code VARCHAR2(256),
|
|
1315
|
+
error_message VARCHAR2(1024),
|
|
1316
|
+
CONSTRAINT fk_{self._events_table}_session FOREIGN KEY (session_id)
|
|
1317
|
+
REFERENCES {self._session_table}(id) ON DELETE CASCADE
|
|
1318
|
+
)';
|
|
1319
|
+
EXCEPTION
|
|
1320
|
+
WHEN OTHERS THEN
|
|
1321
|
+
IF SQLCODE != -955 THEN
|
|
1322
|
+
RAISE;
|
|
1323
|
+
END IF;
|
|
1324
|
+
END;
|
|
1325
|
+
"""
|
|
1326
|
+
|
|
1327
|
+
def _get_drop_tables_sql(self) -> "list[str]":
|
|
1328
|
+
"""Get Oracle DROP TABLE SQL statements.
|
|
1329
|
+
|
|
1330
|
+
Returns:
|
|
1331
|
+
List of SQL statements to drop tables and indexes.
|
|
1332
|
+
|
|
1333
|
+
Notes:
|
|
1334
|
+
Order matters: drop events table (child) before sessions (parent).
|
|
1335
|
+
Oracle automatically drops indexes when dropping tables.
|
|
1336
|
+
"""
|
|
1337
|
+
return [
|
|
1338
|
+
f"""
|
|
1339
|
+
BEGIN
|
|
1340
|
+
EXECUTE IMMEDIATE 'DROP INDEX idx_{self._events_table}_session';
|
|
1341
|
+
EXCEPTION
|
|
1342
|
+
WHEN OTHERS THEN
|
|
1343
|
+
IF SQLCODE != -1418 THEN
|
|
1344
|
+
RAISE;
|
|
1345
|
+
END IF;
|
|
1346
|
+
END;
|
|
1347
|
+
""",
|
|
1348
|
+
f"""
|
|
1349
|
+
BEGIN
|
|
1350
|
+
EXECUTE IMMEDIATE 'DROP INDEX idx_{self._session_table}_update_time';
|
|
1351
|
+
EXCEPTION
|
|
1352
|
+
WHEN OTHERS THEN
|
|
1353
|
+
IF SQLCODE != -1418 THEN
|
|
1354
|
+
RAISE;
|
|
1355
|
+
END IF;
|
|
1356
|
+
END;
|
|
1357
|
+
""",
|
|
1358
|
+
f"""
|
|
1359
|
+
BEGIN
|
|
1360
|
+
EXECUTE IMMEDIATE 'DROP INDEX idx_{self._session_table}_app_user';
|
|
1361
|
+
EXCEPTION
|
|
1362
|
+
WHEN OTHERS THEN
|
|
1363
|
+
IF SQLCODE != -1418 THEN
|
|
1364
|
+
RAISE;
|
|
1365
|
+
END IF;
|
|
1366
|
+
END;
|
|
1367
|
+
""",
|
|
1368
|
+
f"""
|
|
1369
|
+
BEGIN
|
|
1370
|
+
EXECUTE IMMEDIATE 'DROP TABLE {self._events_table}';
|
|
1371
|
+
EXCEPTION
|
|
1372
|
+
WHEN OTHERS THEN
|
|
1373
|
+
IF SQLCODE != -942 THEN
|
|
1374
|
+
RAISE;
|
|
1375
|
+
END IF;
|
|
1376
|
+
END;
|
|
1377
|
+
""",
|
|
1378
|
+
f"""
|
|
1379
|
+
BEGIN
|
|
1380
|
+
EXECUTE IMMEDIATE 'DROP TABLE {self._session_table}';
|
|
1381
|
+
EXCEPTION
|
|
1382
|
+
WHEN OTHERS THEN
|
|
1383
|
+
IF SQLCODE != -942 THEN
|
|
1384
|
+
RAISE;
|
|
1385
|
+
END IF;
|
|
1386
|
+
END;
|
|
1387
|
+
""",
|
|
1388
|
+
]
|
|
1389
|
+
|
|
1390
|
+
def create_tables(self) -> None:
|
|
1391
|
+
"""Create both sessions and events tables if they don't exist.
|
|
1392
|
+
|
|
1393
|
+
Notes:
|
|
1394
|
+
Detects Oracle version to determine optimal JSON storage type.
|
|
1395
|
+
Uses version-appropriate table schema.
|
|
1396
|
+
"""
|
|
1397
|
+
storage_type = self._detect_json_storage_type()
|
|
1398
|
+
logger.info("Creating ADK tables with storage type: %s", storage_type)
|
|
1399
|
+
|
|
1400
|
+
with self._config.provide_session() as driver:
|
|
1401
|
+
sessions_sql = SQL(self._get_create_sessions_table_sql_for_type(storage_type))
|
|
1402
|
+
driver.execute_script(sessions_sql)
|
|
1403
|
+
|
|
1404
|
+
events_sql = SQL(self._get_create_events_table_sql_for_type(storage_type))
|
|
1405
|
+
driver.execute_script(events_sql)
|
|
1406
|
+
|
|
1407
|
+
logger.debug("Created ADK tables: %s, %s", self._session_table, self._events_table)
|
|
1408
|
+
|
|
1409
|
+
def create_session(
|
|
1410
|
+
self, session_id: str, app_name: str, user_id: str, state: "dict[str, Any]", owner_id: "Any | None" = None
|
|
1411
|
+
) -> SessionRecord:
|
|
1412
|
+
"""Create a new session.
|
|
1413
|
+
|
|
1414
|
+
Args:
|
|
1415
|
+
session_id: Unique session identifier.
|
|
1416
|
+
app_name: Application name.
|
|
1417
|
+
user_id: User identifier.
|
|
1418
|
+
state: Initial session state.
|
|
1419
|
+
owner_id: Optional owner ID value for owner_id_column (if configured).
|
|
1420
|
+
|
|
1421
|
+
Returns:
|
|
1422
|
+
Created session record.
|
|
1423
|
+
|
|
1424
|
+
Notes:
|
|
1425
|
+
Uses SYSTIMESTAMP for create_time and update_time.
|
|
1426
|
+
State is serialized using version-appropriate format.
|
|
1427
|
+
owner_id is ignored if owner_id_column not configured.
|
|
1428
|
+
"""
|
|
1429
|
+
state_data = self._serialize_state(state)
|
|
1430
|
+
|
|
1431
|
+
if self._owner_id_column_name:
|
|
1432
|
+
sql = f"""
|
|
1433
|
+
INSERT INTO {self._session_table} (id, app_name, user_id, state, create_time, update_time, {self._owner_id_column_name})
|
|
1434
|
+
VALUES (:id, :app_name, :user_id, :state, SYSTIMESTAMP, SYSTIMESTAMP, :owner_id)
|
|
1435
|
+
"""
|
|
1436
|
+
params = {
|
|
1437
|
+
"id": session_id,
|
|
1438
|
+
"app_name": app_name,
|
|
1439
|
+
"user_id": user_id,
|
|
1440
|
+
"state": state_data,
|
|
1441
|
+
"owner_id": owner_id,
|
|
1442
|
+
}
|
|
1443
|
+
else:
|
|
1444
|
+
sql = f"""
|
|
1445
|
+
INSERT INTO {self._session_table} (id, app_name, user_id, state, create_time, update_time)
|
|
1446
|
+
VALUES (:id, :app_name, :user_id, :state, SYSTIMESTAMP, SYSTIMESTAMP)
|
|
1447
|
+
"""
|
|
1448
|
+
params = {"id": session_id, "app_name": app_name, "user_id": user_id, "state": state_data}
|
|
1449
|
+
|
|
1450
|
+
with self._config.provide_connection() as conn:
|
|
1451
|
+
cursor = conn.cursor()
|
|
1452
|
+
cursor.execute(sql, params)
|
|
1453
|
+
conn.commit()
|
|
1454
|
+
|
|
1455
|
+
return self.get_session(session_id) # type: ignore[return-value]
|
|
1456
|
+
|
|
1457
|
+
def get_session(self, session_id: str) -> "SessionRecord | None":
|
|
1458
|
+
"""Get session by ID.
|
|
1459
|
+
|
|
1460
|
+
Args:
|
|
1461
|
+
session_id: Session identifier.
|
|
1462
|
+
|
|
1463
|
+
Returns:
|
|
1464
|
+
Session record or None if not found.
|
|
1465
|
+
|
|
1466
|
+
Notes:
|
|
1467
|
+
Oracle returns datetime objects for TIMESTAMP columns.
|
|
1468
|
+
State is deserialized using version-appropriate format.
|
|
1469
|
+
"""
|
|
1470
|
+
|
|
1471
|
+
sql = f"""
|
|
1472
|
+
SELECT id, app_name, user_id, state, create_time, update_time
|
|
1473
|
+
FROM {self._session_table}
|
|
1474
|
+
WHERE id = :id
|
|
1475
|
+
"""
|
|
1476
|
+
|
|
1477
|
+
try:
|
|
1478
|
+
with self._config.provide_connection() as conn:
|
|
1479
|
+
cursor = conn.cursor()
|
|
1480
|
+
cursor.execute(sql, {"id": session_id})
|
|
1481
|
+
row = cursor.fetchone()
|
|
1482
|
+
|
|
1483
|
+
if row is None:
|
|
1484
|
+
return None
|
|
1485
|
+
|
|
1486
|
+
session_id_val, app_name, user_id, state_data, create_time, update_time = row
|
|
1487
|
+
|
|
1488
|
+
state = self._deserialize_state(state_data)
|
|
1489
|
+
|
|
1490
|
+
return SessionRecord(
|
|
1491
|
+
id=session_id_val,
|
|
1492
|
+
app_name=app_name,
|
|
1493
|
+
user_id=user_id,
|
|
1494
|
+
state=state,
|
|
1495
|
+
create_time=create_time,
|
|
1496
|
+
update_time=update_time,
|
|
1497
|
+
)
|
|
1498
|
+
except oracledb.DatabaseError as e:
|
|
1499
|
+
error_obj = e.args[0] if e.args else None
|
|
1500
|
+
if error_obj and error_obj.code == ORACLE_TABLE_NOT_FOUND_ERROR:
|
|
1501
|
+
return None
|
|
1502
|
+
raise
|
|
1503
|
+
|
|
1504
|
+
def update_session_state(self, session_id: str, state: "dict[str, Any]") -> None:
|
|
1505
|
+
"""Update session state.
|
|
1506
|
+
|
|
1507
|
+
Args:
|
|
1508
|
+
session_id: Session identifier.
|
|
1509
|
+
state: New state dictionary (replaces existing state).
|
|
1510
|
+
|
|
1511
|
+
Notes:
|
|
1512
|
+
This replaces the entire state dictionary.
|
|
1513
|
+
Updates update_time to current timestamp.
|
|
1514
|
+
State is serialized using version-appropriate format.
|
|
1515
|
+
"""
|
|
1516
|
+
state_data = self._serialize_state(state)
|
|
1517
|
+
|
|
1518
|
+
sql = f"""
|
|
1519
|
+
UPDATE {self._session_table}
|
|
1520
|
+
SET state = :state, update_time = SYSTIMESTAMP
|
|
1521
|
+
WHERE id = :id
|
|
1522
|
+
"""
|
|
1523
|
+
|
|
1524
|
+
with self._config.provide_connection() as conn:
|
|
1525
|
+
cursor = conn.cursor()
|
|
1526
|
+
cursor.execute(sql, {"state": state_data, "id": session_id})
|
|
1527
|
+
conn.commit()
|
|
1528
|
+
|
|
1529
|
+
def delete_session(self, session_id: str) -> None:
|
|
1530
|
+
"""Delete session and all associated events (cascade).
|
|
1531
|
+
|
|
1532
|
+
Args:
|
|
1533
|
+
session_id: Session identifier.
|
|
1534
|
+
|
|
1535
|
+
Notes:
|
|
1536
|
+
Foreign key constraint ensures events are cascade-deleted.
|
|
1537
|
+
"""
|
|
1538
|
+
sql = f"DELETE FROM {self._session_table} WHERE id = :id"
|
|
1539
|
+
|
|
1540
|
+
with self._config.provide_connection() as conn:
|
|
1541
|
+
cursor = conn.cursor()
|
|
1542
|
+
cursor.execute(sql, {"id": session_id})
|
|
1543
|
+
conn.commit()
|
|
1544
|
+
|
|
1545
|
+
def list_sessions(self, app_name: str, user_id: str) -> "list[SessionRecord]":
|
|
1546
|
+
"""List all sessions for a user in an app.
|
|
1547
|
+
|
|
1548
|
+
Args:
|
|
1549
|
+
app_name: Application name.
|
|
1550
|
+
user_id: User identifier.
|
|
1551
|
+
|
|
1552
|
+
Returns:
|
|
1553
|
+
List of session records ordered by update_time DESC.
|
|
1554
|
+
|
|
1555
|
+
Notes:
|
|
1556
|
+
Uses composite index on (app_name, user_id).
|
|
1557
|
+
State is deserialized using version-appropriate format.
|
|
1558
|
+
"""
|
|
1559
|
+
|
|
1560
|
+
sql = f"""
|
|
1561
|
+
SELECT id, app_name, user_id, state, create_time, update_time
|
|
1562
|
+
FROM {self._session_table}
|
|
1563
|
+
WHERE app_name = :app_name AND user_id = :user_id
|
|
1564
|
+
ORDER BY update_time DESC
|
|
1565
|
+
"""
|
|
1566
|
+
|
|
1567
|
+
try:
|
|
1568
|
+
with self._config.provide_connection() as conn:
|
|
1569
|
+
cursor = conn.cursor()
|
|
1570
|
+
cursor.execute(sql, {"app_name": app_name, "user_id": user_id})
|
|
1571
|
+
rows = cursor.fetchall()
|
|
1572
|
+
|
|
1573
|
+
results = []
|
|
1574
|
+
for row in rows:
|
|
1575
|
+
state = self._deserialize_state(row[3])
|
|
1576
|
+
|
|
1577
|
+
results.append(
|
|
1578
|
+
SessionRecord(
|
|
1579
|
+
id=row[0],
|
|
1580
|
+
app_name=row[1],
|
|
1581
|
+
user_id=row[2],
|
|
1582
|
+
state=state,
|
|
1583
|
+
create_time=row[4],
|
|
1584
|
+
update_time=row[5],
|
|
1585
|
+
)
|
|
1586
|
+
)
|
|
1587
|
+
return results
|
|
1588
|
+
except oracledb.DatabaseError as e:
|
|
1589
|
+
error_obj = e.args[0] if e.args else None
|
|
1590
|
+
if error_obj and error_obj.code == ORACLE_TABLE_NOT_FOUND_ERROR:
|
|
1591
|
+
return []
|
|
1592
|
+
raise
|
|
1593
|
+
|
|
1594
|
+
def create_event(
|
|
1595
|
+
self,
|
|
1596
|
+
event_id: str,
|
|
1597
|
+
session_id: str,
|
|
1598
|
+
app_name: str,
|
|
1599
|
+
user_id: str,
|
|
1600
|
+
author: "str | None" = None,
|
|
1601
|
+
actions: "bytes | None" = None,
|
|
1602
|
+
content: "dict[str, Any] | None" = None,
|
|
1603
|
+
**kwargs: Any,
|
|
1604
|
+
) -> "EventRecord":
|
|
1605
|
+
"""Create a new event.
|
|
1606
|
+
|
|
1607
|
+
Args:
|
|
1608
|
+
event_id: Unique event identifier.
|
|
1609
|
+
session_id: Session identifier.
|
|
1610
|
+
app_name: Application name.
|
|
1611
|
+
user_id: User identifier.
|
|
1612
|
+
author: Event author (user/assistant/system).
|
|
1613
|
+
actions: Pickled actions object.
|
|
1614
|
+
content: Event content (JSONB/JSON).
|
|
1615
|
+
**kwargs: Additional optional fields.
|
|
1616
|
+
|
|
1617
|
+
Returns:
|
|
1618
|
+
Created event record.
|
|
1619
|
+
|
|
1620
|
+
Notes:
|
|
1621
|
+
Uses SYSTIMESTAMP for timestamp if not provided.
|
|
1622
|
+
JSON fields are serialized using version-appropriate format.
|
|
1623
|
+
Boolean fields are converted to NUMBER(1).
|
|
1624
|
+
"""
|
|
1625
|
+
content_data = self._serialize_json_field(content)
|
|
1626
|
+
grounding_metadata_data = self._serialize_json_field(kwargs.get("grounding_metadata"))
|
|
1627
|
+
custom_metadata_data = self._serialize_json_field(kwargs.get("custom_metadata"))
|
|
1628
|
+
|
|
1629
|
+
sql = f"""
|
|
1630
|
+
INSERT INTO {self._events_table} (
|
|
1631
|
+
id, session_id, app_name, user_id, invocation_id, author, actions,
|
|
1632
|
+
long_running_tool_ids_json, branch, timestamp, content,
|
|
1633
|
+
grounding_metadata, custom_metadata, partial, turn_complete,
|
|
1634
|
+
interrupted, error_code, error_message
|
|
1635
|
+
) VALUES (
|
|
1636
|
+
:id, :session_id, :app_name, :user_id, :invocation_id, :author, :actions,
|
|
1637
|
+
:long_running_tool_ids_json, :branch, :timestamp, :content,
|
|
1638
|
+
:grounding_metadata, :custom_metadata, :partial, :turn_complete,
|
|
1639
|
+
:interrupted, :error_code, :error_message
|
|
1640
|
+
)
|
|
1641
|
+
"""
|
|
1642
|
+
|
|
1643
|
+
with self._config.provide_connection() as conn:
|
|
1644
|
+
cursor = conn.cursor()
|
|
1645
|
+
cursor.execute(
|
|
1646
|
+
sql,
|
|
1647
|
+
{
|
|
1648
|
+
"id": event_id,
|
|
1649
|
+
"session_id": session_id,
|
|
1650
|
+
"app_name": app_name,
|
|
1651
|
+
"user_id": user_id,
|
|
1652
|
+
"invocation_id": kwargs.get("invocation_id"),
|
|
1653
|
+
"author": author,
|
|
1654
|
+
"actions": actions,
|
|
1655
|
+
"long_running_tool_ids_json": kwargs.get("long_running_tool_ids_json"),
|
|
1656
|
+
"branch": kwargs.get("branch"),
|
|
1657
|
+
"timestamp": kwargs.get("timestamp"),
|
|
1658
|
+
"content": content_data,
|
|
1659
|
+
"grounding_metadata": grounding_metadata_data,
|
|
1660
|
+
"custom_metadata": custom_metadata_data,
|
|
1661
|
+
"partial": _to_oracle_bool(kwargs.get("partial")),
|
|
1662
|
+
"turn_complete": _to_oracle_bool(kwargs.get("turn_complete")),
|
|
1663
|
+
"interrupted": _to_oracle_bool(kwargs.get("interrupted")),
|
|
1664
|
+
"error_code": kwargs.get("error_code"),
|
|
1665
|
+
"error_message": kwargs.get("error_message"),
|
|
1666
|
+
},
|
|
1667
|
+
)
|
|
1668
|
+
conn.commit()
|
|
1669
|
+
|
|
1670
|
+
events = self.list_events(session_id)
|
|
1671
|
+
for event in events:
|
|
1672
|
+
if event["id"] == event_id:
|
|
1673
|
+
return event
|
|
1674
|
+
|
|
1675
|
+
msg = f"Failed to retrieve created event {event_id}"
|
|
1676
|
+
raise RuntimeError(msg)
|
|
1677
|
+
|
|
1678
|
+
def list_events(self, session_id: str) -> "list[EventRecord]":
|
|
1679
|
+
"""List events for a session ordered by timestamp.
|
|
1680
|
+
|
|
1681
|
+
Args:
|
|
1682
|
+
session_id: Session identifier.
|
|
1683
|
+
|
|
1684
|
+
Returns:
|
|
1685
|
+
List of event records ordered by timestamp ASC.
|
|
1686
|
+
|
|
1687
|
+
Notes:
|
|
1688
|
+
Uses index on (session_id, timestamp ASC).
|
|
1689
|
+
JSON fields deserialized using version-appropriate format.
|
|
1690
|
+
Converts BLOB actions to bytes and NUMBER(1) booleans to Python bool.
|
|
1691
|
+
"""
|
|
1692
|
+
|
|
1693
|
+
sql = f"""
|
|
1694
|
+
SELECT id, session_id, app_name, user_id, invocation_id, author, actions,
|
|
1695
|
+
long_running_tool_ids_json, branch, timestamp, content,
|
|
1696
|
+
grounding_metadata, custom_metadata, partial, turn_complete,
|
|
1697
|
+
interrupted, error_code, error_message
|
|
1698
|
+
FROM {self._events_table}
|
|
1699
|
+
WHERE session_id = :session_id
|
|
1700
|
+
ORDER BY timestamp ASC
|
|
1701
|
+
"""
|
|
1702
|
+
|
|
1703
|
+
try:
|
|
1704
|
+
with self._config.provide_connection() as conn:
|
|
1705
|
+
cursor = conn.cursor()
|
|
1706
|
+
cursor.execute(sql, {"session_id": session_id})
|
|
1707
|
+
rows = cursor.fetchall()
|
|
1708
|
+
|
|
1709
|
+
results = []
|
|
1710
|
+
for row in rows:
|
|
1711
|
+
actions_blob = row[6]
|
|
1712
|
+
actions_data = actions_blob.read() if hasattr(actions_blob, "read") else actions_blob
|
|
1713
|
+
|
|
1714
|
+
content = self._deserialize_json_field(row[10])
|
|
1715
|
+
grounding_metadata = self._deserialize_json_field(row[11])
|
|
1716
|
+
custom_metadata = self._deserialize_json_field(row[12])
|
|
1717
|
+
|
|
1718
|
+
results.append(
|
|
1719
|
+
EventRecord(
|
|
1720
|
+
id=row[0],
|
|
1721
|
+
session_id=row[1],
|
|
1722
|
+
app_name=row[2],
|
|
1723
|
+
user_id=row[3],
|
|
1724
|
+
invocation_id=row[4],
|
|
1725
|
+
author=row[5],
|
|
1726
|
+
actions=bytes(actions_data) if actions_data is not None else b"",
|
|
1727
|
+
long_running_tool_ids_json=row[7],
|
|
1728
|
+
branch=row[8],
|
|
1729
|
+
timestamp=row[9],
|
|
1730
|
+
content=content,
|
|
1731
|
+
grounding_metadata=grounding_metadata,
|
|
1732
|
+
custom_metadata=custom_metadata,
|
|
1733
|
+
partial=_from_oracle_bool(row[13]),
|
|
1734
|
+
turn_complete=_from_oracle_bool(row[14]),
|
|
1735
|
+
interrupted=_from_oracle_bool(row[15]),
|
|
1736
|
+
error_code=row[16],
|
|
1737
|
+
error_message=row[17],
|
|
1738
|
+
)
|
|
1739
|
+
)
|
|
1740
|
+
return results
|
|
1741
|
+
except oracledb.DatabaseError as e:
|
|
1742
|
+
error_obj = e.args[0] if e.args else None
|
|
1743
|
+
if error_obj and error_obj.code == ORACLE_TABLE_NOT_FOUND_ERROR:
|
|
1744
|
+
return []
|
|
1745
|
+
raise
|