sqlspec 0.32.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sqlspec/__init__.py +104 -0
- sqlspec/__main__.py +12 -0
- sqlspec/__metadata__.py +14 -0
- sqlspec/_serialization.py +312 -0
- sqlspec/_typing.py +784 -0
- sqlspec/adapters/__init__.py +0 -0
- sqlspec/adapters/adbc/__init__.py +5 -0
- sqlspec/adapters/adbc/_types.py +12 -0
- sqlspec/adapters/adbc/adk/__init__.py +5 -0
- sqlspec/adapters/adbc/adk/store.py +880 -0
- sqlspec/adapters/adbc/config.py +436 -0
- sqlspec/adapters/adbc/data_dictionary.py +537 -0
- sqlspec/adapters/adbc/driver.py +841 -0
- sqlspec/adapters/adbc/litestar/__init__.py +5 -0
- sqlspec/adapters/adbc/litestar/store.py +504 -0
- sqlspec/adapters/adbc/type_converter.py +153 -0
- sqlspec/adapters/aiosqlite/__init__.py +29 -0
- sqlspec/adapters/aiosqlite/_types.py +13 -0
- sqlspec/adapters/aiosqlite/adk/__init__.py +5 -0
- sqlspec/adapters/aiosqlite/adk/store.py +536 -0
- sqlspec/adapters/aiosqlite/config.py +310 -0
- sqlspec/adapters/aiosqlite/data_dictionary.py +260 -0
- sqlspec/adapters/aiosqlite/driver.py +463 -0
- sqlspec/adapters/aiosqlite/litestar/__init__.py +5 -0
- sqlspec/adapters/aiosqlite/litestar/store.py +281 -0
- sqlspec/adapters/aiosqlite/pool.py +500 -0
- sqlspec/adapters/asyncmy/__init__.py +25 -0
- sqlspec/adapters/asyncmy/_types.py +12 -0
- sqlspec/adapters/asyncmy/adk/__init__.py +5 -0
- sqlspec/adapters/asyncmy/adk/store.py +503 -0
- sqlspec/adapters/asyncmy/config.py +246 -0
- sqlspec/adapters/asyncmy/data_dictionary.py +241 -0
- sqlspec/adapters/asyncmy/driver.py +632 -0
- sqlspec/adapters/asyncmy/litestar/__init__.py +5 -0
- sqlspec/adapters/asyncmy/litestar/store.py +296 -0
- sqlspec/adapters/asyncpg/__init__.py +23 -0
- sqlspec/adapters/asyncpg/_type_handlers.py +76 -0
- sqlspec/adapters/asyncpg/_types.py +23 -0
- sqlspec/adapters/asyncpg/adk/__init__.py +5 -0
- sqlspec/adapters/asyncpg/adk/store.py +460 -0
- sqlspec/adapters/asyncpg/config.py +464 -0
- sqlspec/adapters/asyncpg/data_dictionary.py +321 -0
- sqlspec/adapters/asyncpg/driver.py +720 -0
- sqlspec/adapters/asyncpg/litestar/__init__.py +5 -0
- sqlspec/adapters/asyncpg/litestar/store.py +253 -0
- sqlspec/adapters/bigquery/__init__.py +18 -0
- sqlspec/adapters/bigquery/_types.py +12 -0
- sqlspec/adapters/bigquery/adk/__init__.py +5 -0
- sqlspec/adapters/bigquery/adk/store.py +585 -0
- sqlspec/adapters/bigquery/config.py +298 -0
- sqlspec/adapters/bigquery/data_dictionary.py +256 -0
- sqlspec/adapters/bigquery/driver.py +1073 -0
- sqlspec/adapters/bigquery/litestar/__init__.py +5 -0
- sqlspec/adapters/bigquery/litestar/store.py +327 -0
- sqlspec/adapters/bigquery/type_converter.py +125 -0
- sqlspec/adapters/duckdb/__init__.py +24 -0
- sqlspec/adapters/duckdb/_types.py +12 -0
- sqlspec/adapters/duckdb/adk/__init__.py +14 -0
- sqlspec/adapters/duckdb/adk/store.py +563 -0
- sqlspec/adapters/duckdb/config.py +396 -0
- sqlspec/adapters/duckdb/data_dictionary.py +264 -0
- sqlspec/adapters/duckdb/driver.py +604 -0
- sqlspec/adapters/duckdb/litestar/__init__.py +5 -0
- sqlspec/adapters/duckdb/litestar/store.py +332 -0
- sqlspec/adapters/duckdb/pool.py +273 -0
- sqlspec/adapters/duckdb/type_converter.py +133 -0
- sqlspec/adapters/oracledb/__init__.py +32 -0
- sqlspec/adapters/oracledb/_numpy_handlers.py +133 -0
- sqlspec/adapters/oracledb/_types.py +39 -0
- sqlspec/adapters/oracledb/_uuid_handlers.py +130 -0
- sqlspec/adapters/oracledb/adk/__init__.py +5 -0
- sqlspec/adapters/oracledb/adk/store.py +1632 -0
- sqlspec/adapters/oracledb/config.py +469 -0
- sqlspec/adapters/oracledb/data_dictionary.py +717 -0
- sqlspec/adapters/oracledb/driver.py +1493 -0
- sqlspec/adapters/oracledb/litestar/__init__.py +5 -0
- sqlspec/adapters/oracledb/litestar/store.py +765 -0
- sqlspec/adapters/oracledb/migrations.py +532 -0
- sqlspec/adapters/oracledb/type_converter.py +207 -0
- sqlspec/adapters/psqlpy/__init__.py +16 -0
- sqlspec/adapters/psqlpy/_type_handlers.py +44 -0
- sqlspec/adapters/psqlpy/_types.py +12 -0
- sqlspec/adapters/psqlpy/adk/__init__.py +5 -0
- sqlspec/adapters/psqlpy/adk/store.py +483 -0
- sqlspec/adapters/psqlpy/config.py +271 -0
- sqlspec/adapters/psqlpy/data_dictionary.py +179 -0
- sqlspec/adapters/psqlpy/driver.py +892 -0
- sqlspec/adapters/psqlpy/litestar/__init__.py +5 -0
- sqlspec/adapters/psqlpy/litestar/store.py +272 -0
- sqlspec/adapters/psqlpy/type_converter.py +102 -0
- sqlspec/adapters/psycopg/__init__.py +32 -0
- sqlspec/adapters/psycopg/_type_handlers.py +90 -0
- sqlspec/adapters/psycopg/_types.py +18 -0
- sqlspec/adapters/psycopg/adk/__init__.py +5 -0
- sqlspec/adapters/psycopg/adk/store.py +962 -0
- sqlspec/adapters/psycopg/config.py +487 -0
- sqlspec/adapters/psycopg/data_dictionary.py +630 -0
- sqlspec/adapters/psycopg/driver.py +1336 -0
- sqlspec/adapters/psycopg/litestar/__init__.py +5 -0
- sqlspec/adapters/psycopg/litestar/store.py +554 -0
- sqlspec/adapters/spanner/__init__.py +38 -0
- sqlspec/adapters/spanner/_type_handlers.py +186 -0
- sqlspec/adapters/spanner/_types.py +12 -0
- sqlspec/adapters/spanner/adk/__init__.py +5 -0
- sqlspec/adapters/spanner/adk/store.py +435 -0
- sqlspec/adapters/spanner/config.py +241 -0
- sqlspec/adapters/spanner/data_dictionary.py +95 -0
- sqlspec/adapters/spanner/dialect/__init__.py +6 -0
- sqlspec/adapters/spanner/dialect/_spangres.py +52 -0
- sqlspec/adapters/spanner/dialect/_spanner.py +123 -0
- sqlspec/adapters/spanner/driver.py +366 -0
- sqlspec/adapters/spanner/litestar/__init__.py +5 -0
- sqlspec/adapters/spanner/litestar/store.py +266 -0
- sqlspec/adapters/spanner/type_converter.py +46 -0
- sqlspec/adapters/sqlite/__init__.py +18 -0
- sqlspec/adapters/sqlite/_type_handlers.py +86 -0
- sqlspec/adapters/sqlite/_types.py +11 -0
- sqlspec/adapters/sqlite/adk/__init__.py +5 -0
- sqlspec/adapters/sqlite/adk/store.py +582 -0
- sqlspec/adapters/sqlite/config.py +221 -0
- sqlspec/adapters/sqlite/data_dictionary.py +256 -0
- sqlspec/adapters/sqlite/driver.py +527 -0
- sqlspec/adapters/sqlite/litestar/__init__.py +5 -0
- sqlspec/adapters/sqlite/litestar/store.py +318 -0
- sqlspec/adapters/sqlite/pool.py +140 -0
- sqlspec/base.py +811 -0
- sqlspec/builder/__init__.py +146 -0
- sqlspec/builder/_base.py +900 -0
- sqlspec/builder/_column.py +517 -0
- sqlspec/builder/_ddl.py +1642 -0
- sqlspec/builder/_delete.py +84 -0
- sqlspec/builder/_dml.py +381 -0
- sqlspec/builder/_expression_wrappers.py +46 -0
- sqlspec/builder/_factory.py +1537 -0
- sqlspec/builder/_insert.py +315 -0
- sqlspec/builder/_join.py +375 -0
- sqlspec/builder/_merge.py +848 -0
- sqlspec/builder/_parsing_utils.py +297 -0
- sqlspec/builder/_select.py +1615 -0
- sqlspec/builder/_update.py +161 -0
- sqlspec/builder/_vector_expressions.py +259 -0
- sqlspec/cli.py +764 -0
- sqlspec/config.py +1540 -0
- sqlspec/core/__init__.py +305 -0
- sqlspec/core/cache.py +785 -0
- sqlspec/core/compiler.py +603 -0
- sqlspec/core/filters.py +872 -0
- sqlspec/core/hashing.py +274 -0
- sqlspec/core/metrics.py +83 -0
- sqlspec/core/parameters/__init__.py +64 -0
- sqlspec/core/parameters/_alignment.py +266 -0
- sqlspec/core/parameters/_converter.py +413 -0
- sqlspec/core/parameters/_processor.py +341 -0
- sqlspec/core/parameters/_registry.py +201 -0
- sqlspec/core/parameters/_transformers.py +226 -0
- sqlspec/core/parameters/_types.py +430 -0
- sqlspec/core/parameters/_validator.py +123 -0
- sqlspec/core/pipeline.py +187 -0
- sqlspec/core/result.py +1124 -0
- sqlspec/core/splitter.py +940 -0
- sqlspec/core/stack.py +163 -0
- sqlspec/core/statement.py +835 -0
- sqlspec/core/type_conversion.py +235 -0
- sqlspec/driver/__init__.py +36 -0
- sqlspec/driver/_async.py +1027 -0
- sqlspec/driver/_common.py +1236 -0
- sqlspec/driver/_sync.py +1025 -0
- sqlspec/driver/mixins/__init__.py +7 -0
- sqlspec/driver/mixins/_result_tools.py +61 -0
- sqlspec/driver/mixins/_sql_translator.py +122 -0
- sqlspec/driver/mixins/_storage.py +311 -0
- sqlspec/exceptions.py +321 -0
- sqlspec/extensions/__init__.py +0 -0
- sqlspec/extensions/adk/__init__.py +53 -0
- sqlspec/extensions/adk/_types.py +51 -0
- sqlspec/extensions/adk/converters.py +172 -0
- sqlspec/extensions/adk/migrations/0001_create_adk_tables.py +144 -0
- sqlspec/extensions/adk/migrations/__init__.py +0 -0
- sqlspec/extensions/adk/service.py +181 -0
- sqlspec/extensions/adk/store.py +536 -0
- sqlspec/extensions/aiosql/__init__.py +10 -0
- sqlspec/extensions/aiosql/adapter.py +471 -0
- sqlspec/extensions/fastapi/__init__.py +19 -0
- sqlspec/extensions/fastapi/extension.py +341 -0
- sqlspec/extensions/fastapi/providers.py +543 -0
- sqlspec/extensions/flask/__init__.py +36 -0
- sqlspec/extensions/flask/_state.py +72 -0
- sqlspec/extensions/flask/_utils.py +40 -0
- sqlspec/extensions/flask/extension.py +402 -0
- sqlspec/extensions/litestar/__init__.py +23 -0
- sqlspec/extensions/litestar/_utils.py +52 -0
- sqlspec/extensions/litestar/cli.py +92 -0
- sqlspec/extensions/litestar/config.py +90 -0
- sqlspec/extensions/litestar/handlers.py +316 -0
- sqlspec/extensions/litestar/migrations/0001_create_session_table.py +137 -0
- sqlspec/extensions/litestar/migrations/__init__.py +3 -0
- sqlspec/extensions/litestar/plugin.py +638 -0
- sqlspec/extensions/litestar/providers.py +454 -0
- sqlspec/extensions/litestar/store.py +265 -0
- sqlspec/extensions/otel/__init__.py +58 -0
- sqlspec/extensions/prometheus/__init__.py +107 -0
- sqlspec/extensions/starlette/__init__.py +10 -0
- sqlspec/extensions/starlette/_state.py +26 -0
- sqlspec/extensions/starlette/_utils.py +52 -0
- sqlspec/extensions/starlette/extension.py +257 -0
- sqlspec/extensions/starlette/middleware.py +154 -0
- sqlspec/loader.py +716 -0
- sqlspec/migrations/__init__.py +36 -0
- sqlspec/migrations/base.py +728 -0
- sqlspec/migrations/commands.py +1140 -0
- sqlspec/migrations/context.py +142 -0
- sqlspec/migrations/fix.py +203 -0
- sqlspec/migrations/loaders.py +450 -0
- sqlspec/migrations/runner.py +1024 -0
- sqlspec/migrations/templates.py +234 -0
- sqlspec/migrations/tracker.py +403 -0
- sqlspec/migrations/utils.py +256 -0
- sqlspec/migrations/validation.py +203 -0
- sqlspec/observability/__init__.py +22 -0
- sqlspec/observability/_config.py +228 -0
- sqlspec/observability/_diagnostics.py +67 -0
- sqlspec/observability/_dispatcher.py +151 -0
- sqlspec/observability/_observer.py +180 -0
- sqlspec/observability/_runtime.py +381 -0
- sqlspec/observability/_spans.py +158 -0
- sqlspec/protocols.py +530 -0
- sqlspec/py.typed +0 -0
- sqlspec/storage/__init__.py +46 -0
- sqlspec/storage/_utils.py +104 -0
- sqlspec/storage/backends/__init__.py +1 -0
- sqlspec/storage/backends/base.py +163 -0
- sqlspec/storage/backends/fsspec.py +398 -0
- sqlspec/storage/backends/local.py +377 -0
- sqlspec/storage/backends/obstore.py +580 -0
- sqlspec/storage/errors.py +104 -0
- sqlspec/storage/pipeline.py +604 -0
- sqlspec/storage/registry.py +289 -0
- sqlspec/typing.py +219 -0
- sqlspec/utils/__init__.py +31 -0
- sqlspec/utils/arrow_helpers.py +95 -0
- sqlspec/utils/config_resolver.py +153 -0
- sqlspec/utils/correlation.py +132 -0
- sqlspec/utils/data_transformation.py +114 -0
- sqlspec/utils/dependencies.py +79 -0
- sqlspec/utils/deprecation.py +113 -0
- sqlspec/utils/fixtures.py +250 -0
- sqlspec/utils/logging.py +172 -0
- sqlspec/utils/module_loader.py +273 -0
- sqlspec/utils/portal.py +325 -0
- sqlspec/utils/schema.py +288 -0
- sqlspec/utils/serializers.py +396 -0
- sqlspec/utils/singleton.py +41 -0
- sqlspec/utils/sync_tools.py +277 -0
- sqlspec/utils/text.py +108 -0
- sqlspec/utils/type_converters.py +99 -0
- sqlspec/utils/type_guards.py +1324 -0
- sqlspec/utils/version.py +444 -0
- sqlspec-0.32.0.dist-info/METADATA +202 -0
- sqlspec-0.32.0.dist-info/RECORD +262 -0
- sqlspec-0.32.0.dist-info/WHEEL +4 -0
- sqlspec-0.32.0.dist-info/entry_points.txt +2 -0
- sqlspec-0.32.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,1493 @@
|
|
|
1
|
+
"""Oracle Driver"""
|
|
2
|
+
|
|
3
|
+
import contextlib
|
|
4
|
+
import logging
|
|
5
|
+
import re
|
|
6
|
+
from typing import TYPE_CHECKING, Any, Final, NamedTuple, cast
|
|
7
|
+
|
|
8
|
+
import oracledb
|
|
9
|
+
from oracledb import AsyncCursor, Cursor
|
|
10
|
+
|
|
11
|
+
from sqlspec.adapters.oracledb._types import OracleAsyncConnection, OracleSyncConnection
|
|
12
|
+
from sqlspec.adapters.oracledb.data_dictionary import OracleAsyncDataDictionary, OracleSyncDataDictionary
|
|
13
|
+
from sqlspec.adapters.oracledb.type_converter import OracleTypeConverter
|
|
14
|
+
from sqlspec.core import (
|
|
15
|
+
SQL,
|
|
16
|
+
DriverParameterProfile,
|
|
17
|
+
ParameterStyle,
|
|
18
|
+
StackResult,
|
|
19
|
+
StatementConfig,
|
|
20
|
+
StatementStack,
|
|
21
|
+
build_statement_config_from_profile,
|
|
22
|
+
create_arrow_result,
|
|
23
|
+
create_sql_result,
|
|
24
|
+
get_cache_config,
|
|
25
|
+
register_driver_profile,
|
|
26
|
+
)
|
|
27
|
+
from sqlspec.driver import (
|
|
28
|
+
AsyncDataDictionaryBase,
|
|
29
|
+
AsyncDriverAdapterBase,
|
|
30
|
+
SyncDataDictionaryBase,
|
|
31
|
+
SyncDriverAdapterBase,
|
|
32
|
+
)
|
|
33
|
+
from sqlspec.driver._common import StackExecutionObserver, VersionInfo, describe_stack_statement, hash_stack_operations
|
|
34
|
+
from sqlspec.exceptions import (
|
|
35
|
+
CheckViolationError,
|
|
36
|
+
DatabaseConnectionError,
|
|
37
|
+
DataError,
|
|
38
|
+
ForeignKeyViolationError,
|
|
39
|
+
IntegrityError,
|
|
40
|
+
NotNullViolationError,
|
|
41
|
+
OperationalError,
|
|
42
|
+
SQLParsingError,
|
|
43
|
+
SQLSpecError,
|
|
44
|
+
StackExecutionError,
|
|
45
|
+
TransactionError,
|
|
46
|
+
UniqueViolationError,
|
|
47
|
+
)
|
|
48
|
+
from sqlspec.utils.logging import log_with_context
|
|
49
|
+
from sqlspec.utils.module_loader import ensure_pyarrow
|
|
50
|
+
from sqlspec.utils.serializers import to_json
|
|
51
|
+
|
|
52
|
+
if TYPE_CHECKING:
|
|
53
|
+
from collections.abc import Sequence
|
|
54
|
+
from contextlib import AbstractAsyncContextManager, AbstractContextManager
|
|
55
|
+
from typing import Protocol
|
|
56
|
+
|
|
57
|
+
from sqlspec.builder import QueryBuilder
|
|
58
|
+
from sqlspec.core import ArrowResult, SQLResult, Statement, StatementConfig, StatementFilter
|
|
59
|
+
from sqlspec.core.stack import StackOperation
|
|
60
|
+
from sqlspec.driver import ExecutionResult
|
|
61
|
+
from sqlspec.storage import (
|
|
62
|
+
AsyncStoragePipeline,
|
|
63
|
+
StorageBridgeJob,
|
|
64
|
+
StorageDestination,
|
|
65
|
+
StorageFormat,
|
|
66
|
+
StorageTelemetry,
|
|
67
|
+
SyncStoragePipeline,
|
|
68
|
+
)
|
|
69
|
+
from sqlspec.typing import ArrowReturnFormat, StatementParameters
|
|
70
|
+
|
|
71
|
+
class _PipelineDriver(Protocol):
|
|
72
|
+
statement_config: StatementConfig
|
|
73
|
+
driver_features: "dict[str, Any]"
|
|
74
|
+
|
|
75
|
+
def prepare_statement(
|
|
76
|
+
self,
|
|
77
|
+
statement: "str | Statement | QueryBuilder",
|
|
78
|
+
parameters: "tuple[Any, ...] | dict[str, Any] | None",
|
|
79
|
+
*,
|
|
80
|
+
statement_config: StatementConfig,
|
|
81
|
+
kwargs: "dict[str, Any]",
|
|
82
|
+
) -> SQL: ...
|
|
83
|
+
|
|
84
|
+
def _get_compiled_sql(self, statement: SQL, statement_config: StatementConfig) -> "tuple[str, Any]": ...
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
logger = logging.getLogger(__name__)
|
|
88
|
+
|
|
89
|
+
# Oracle-specific constants
|
|
90
|
+
LARGE_STRING_THRESHOLD = 4000 # Threshold for large string parameters to avoid ORA-01704
|
|
91
|
+
|
|
92
|
+
_type_converter = OracleTypeConverter()
|
|
93
|
+
|
|
94
|
+
IMPLICIT_UPPER_COLUMN_PATTERN: Final[re.Pattern[str]] = re.compile(r"^(?!\d)(?:[A-Z0-9_]+)$")
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
__all__ = (
|
|
98
|
+
"OracleAsyncDriver",
|
|
99
|
+
"OracleAsyncExceptionHandler",
|
|
100
|
+
"OracleSyncDriver",
|
|
101
|
+
"OracleSyncExceptionHandler",
|
|
102
|
+
"oracledb_statement_config",
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
PIPELINE_MIN_DRIVER_VERSION: Final[tuple[int, int, int]] = (2, 4, 0)
|
|
106
|
+
PIPELINE_MIN_DATABASE_MAJOR: Final[int] = 23
|
|
107
|
+
_VERSION_COMPONENTS: Final[int] = 3
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def _parse_version_tuple(version: str) -> "tuple[int, int, int]":
|
|
111
|
+
parts = [int(part) for part in version.split(".") if part.isdigit()]
|
|
112
|
+
while len(parts) < _VERSION_COMPONENTS:
|
|
113
|
+
parts.append(0)
|
|
114
|
+
return parts[0], parts[1], parts[2]
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
_ORACLEDB_VERSION: Final[tuple[int, int, int]] = _parse_version_tuple(getattr(oracledb, "__version__", "0.0.0"))
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
class _CompiledStackOperation(NamedTuple):
|
|
121
|
+
statement: SQL
|
|
122
|
+
sql: str
|
|
123
|
+
parameters: Any
|
|
124
|
+
method: str
|
|
125
|
+
returns_rows: bool
|
|
126
|
+
summary: str
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
class OraclePipelineMixin:
|
|
130
|
+
"""Shared helpers for Oracle pipeline execution."""
|
|
131
|
+
|
|
132
|
+
__slots__ = ()
|
|
133
|
+
|
|
134
|
+
def _pipeline_driver(self) -> "_PipelineDriver":
|
|
135
|
+
return cast("_PipelineDriver", self)
|
|
136
|
+
|
|
137
|
+
def _stack_native_blocker(self, stack: "StatementStack") -> "str | None":
|
|
138
|
+
for operation in stack.operations:
|
|
139
|
+
if operation.method == "execute_arrow":
|
|
140
|
+
return "arrow_operation"
|
|
141
|
+
if operation.method == "execute_script":
|
|
142
|
+
return "script_operation"
|
|
143
|
+
return None
|
|
144
|
+
|
|
145
|
+
def _log_pipeline_skip(self, reason: str, stack: "StatementStack") -> None:
|
|
146
|
+
log_level = logging.INFO if reason == "env_override" else logging.DEBUG
|
|
147
|
+
log_with_context(
|
|
148
|
+
logger,
|
|
149
|
+
log_level,
|
|
150
|
+
"stack.native_pipeline.skip",
|
|
151
|
+
driver=type(self).__name__,
|
|
152
|
+
reason=reason,
|
|
153
|
+
hashed_operations=hash_stack_operations(stack),
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
def _prepare_pipeline_operation(self, operation: "StackOperation") -> _CompiledStackOperation:
|
|
157
|
+
driver = self._pipeline_driver()
|
|
158
|
+
kwargs = dict(operation.keyword_arguments) if operation.keyword_arguments else {}
|
|
159
|
+
statement_config = kwargs.pop("statement_config", None)
|
|
160
|
+
config = statement_config or driver.statement_config
|
|
161
|
+
|
|
162
|
+
if operation.method == "execute":
|
|
163
|
+
sql_statement = driver.prepare_statement(
|
|
164
|
+
operation.statement, operation.arguments, statement_config=config, kwargs=kwargs
|
|
165
|
+
)
|
|
166
|
+
elif operation.method == "execute_many":
|
|
167
|
+
if not operation.arguments:
|
|
168
|
+
msg = "execute_many stack operation requires parameter sets"
|
|
169
|
+
raise ValueError(msg)
|
|
170
|
+
parameter_sets = operation.arguments[0]
|
|
171
|
+
filters = operation.arguments[1:]
|
|
172
|
+
sql_statement = self._build_execute_many_statement(
|
|
173
|
+
operation.statement, parameter_sets, filters, config, kwargs
|
|
174
|
+
)
|
|
175
|
+
else:
|
|
176
|
+
msg = f"Unsupported stack operation method: {operation.method}"
|
|
177
|
+
raise ValueError(msg)
|
|
178
|
+
|
|
179
|
+
compiled_sql, prepared_parameters = driver._get_compiled_sql( # pyright: ignore[reportPrivateUsage]
|
|
180
|
+
sql_statement, config
|
|
181
|
+
)
|
|
182
|
+
summary = describe_stack_statement(operation.statement)
|
|
183
|
+
return _CompiledStackOperation(
|
|
184
|
+
statement=sql_statement,
|
|
185
|
+
sql=compiled_sql,
|
|
186
|
+
parameters=prepared_parameters,
|
|
187
|
+
method=operation.method,
|
|
188
|
+
returns_rows=sql_statement.returns_rows(),
|
|
189
|
+
summary=summary,
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
def _build_execute_many_statement(
|
|
193
|
+
self,
|
|
194
|
+
statement: "str | Statement | QueryBuilder",
|
|
195
|
+
parameter_sets: "Sequence[StatementParameters]",
|
|
196
|
+
filters: "tuple[StatementParameters | StatementFilter, ...]",
|
|
197
|
+
statement_config: "StatementConfig",
|
|
198
|
+
kwargs: "dict[str, Any]",
|
|
199
|
+
) -> SQL:
|
|
200
|
+
driver = self._pipeline_driver()
|
|
201
|
+
if isinstance(statement, SQL):
|
|
202
|
+
return SQL(statement.raw_sql, parameter_sets, statement_config=statement_config, is_many=True, **kwargs)
|
|
203
|
+
|
|
204
|
+
base_statement = driver.prepare_statement(statement, filters, statement_config=statement_config, kwargs=kwargs)
|
|
205
|
+
return SQL(base_statement.raw_sql, parameter_sets, statement_config=statement_config, is_many=True, **kwargs)
|
|
206
|
+
|
|
207
|
+
def _add_pipeline_operation(self, pipeline: Any, operation: _CompiledStackOperation) -> None:
|
|
208
|
+
parameters = operation.parameters or []
|
|
209
|
+
if operation.method == "execute":
|
|
210
|
+
if operation.returns_rows:
|
|
211
|
+
pipeline.add_fetchall(operation.sql, parameters)
|
|
212
|
+
else:
|
|
213
|
+
pipeline.add_execute(operation.sql, parameters)
|
|
214
|
+
return
|
|
215
|
+
|
|
216
|
+
if operation.method == "execute_many":
|
|
217
|
+
pipeline.add_executemany(operation.sql, parameters)
|
|
218
|
+
return
|
|
219
|
+
|
|
220
|
+
msg = f"Unsupported pipeline operation: {operation.method}"
|
|
221
|
+
raise ValueError(msg)
|
|
222
|
+
|
|
223
|
+
def _build_stack_results_from_pipeline(
|
|
224
|
+
self,
|
|
225
|
+
compiled_operations: "Sequence[_CompiledStackOperation]",
|
|
226
|
+
pipeline_results: "Sequence[Any]",
|
|
227
|
+
continue_on_error: bool,
|
|
228
|
+
observer: StackExecutionObserver,
|
|
229
|
+
) -> "list[StackResult]":
|
|
230
|
+
stack_results: list[StackResult] = []
|
|
231
|
+
for index, (compiled, result) in enumerate(zip(compiled_operations, pipeline_results, strict=False)):
|
|
232
|
+
error = getattr(result, "error", None)
|
|
233
|
+
if error is not None:
|
|
234
|
+
stack_error = StackExecutionError(
|
|
235
|
+
index,
|
|
236
|
+
compiled.summary,
|
|
237
|
+
error,
|
|
238
|
+
adapter=type(self).__name__,
|
|
239
|
+
mode="continue-on-error" if continue_on_error else "fail-fast",
|
|
240
|
+
)
|
|
241
|
+
if continue_on_error:
|
|
242
|
+
observer.record_operation_error(stack_error)
|
|
243
|
+
stack_results.append(StackResult.from_error(stack_error))
|
|
244
|
+
continue
|
|
245
|
+
raise stack_error
|
|
246
|
+
|
|
247
|
+
stack_results.append(self._pipeline_result_to_stack_result(compiled, result))
|
|
248
|
+
return stack_results
|
|
249
|
+
|
|
250
|
+
def _pipeline_result_to_stack_result(self, operation: _CompiledStackOperation, pipeline_result: Any) -> StackResult:
|
|
251
|
+
rows = getattr(pipeline_result, "rows", None)
|
|
252
|
+
columns = getattr(pipeline_result, "columns", None)
|
|
253
|
+
data = self._rows_from_pipeline_result(columns, rows) if operation.returns_rows else None
|
|
254
|
+
metadata: dict[str, Any] = {"pipeline_operation": operation.method}
|
|
255
|
+
|
|
256
|
+
warning = getattr(pipeline_result, "warning", None)
|
|
257
|
+
if warning is not None:
|
|
258
|
+
metadata["warning"] = warning
|
|
259
|
+
|
|
260
|
+
return_value = getattr(pipeline_result, "return_value", None)
|
|
261
|
+
if return_value is not None:
|
|
262
|
+
metadata["return_value"] = return_value
|
|
263
|
+
|
|
264
|
+
rowcount = self._rows_affected_from_pipeline(operation, pipeline_result, data)
|
|
265
|
+
sql_result = create_sql_result(operation.statement, data=data, rows_affected=rowcount, metadata=metadata)
|
|
266
|
+
return StackResult.from_sql_result(sql_result)
|
|
267
|
+
|
|
268
|
+
def _rows_affected_from_pipeline(
|
|
269
|
+
self, operation: _CompiledStackOperation, pipeline_result: Any, data: "list[dict[str, Any]] | None"
|
|
270
|
+
) -> int:
|
|
271
|
+
rowcount = getattr(pipeline_result, "rowcount", None)
|
|
272
|
+
if isinstance(rowcount, int) and rowcount >= 0:
|
|
273
|
+
return rowcount
|
|
274
|
+
if operation.method == "execute_many":
|
|
275
|
+
parameter_sets = operation.parameters or ()
|
|
276
|
+
try:
|
|
277
|
+
return len(parameter_sets)
|
|
278
|
+
except TypeError:
|
|
279
|
+
return 0
|
|
280
|
+
if operation.method == "execute" and not operation.returns_rows:
|
|
281
|
+
return 1
|
|
282
|
+
if operation.returns_rows:
|
|
283
|
+
return len(data or [])
|
|
284
|
+
return 0
|
|
285
|
+
|
|
286
|
+
def _rows_from_pipeline_result(self, columns: Any, rows: Any) -> "list[dict[str, Any]]":
|
|
287
|
+
if not rows:
|
|
288
|
+
return []
|
|
289
|
+
|
|
290
|
+
driver = self._pipeline_driver()
|
|
291
|
+
if columns:
|
|
292
|
+
names = [getattr(column, "name", f"column_{index}") for index, column in enumerate(columns)]
|
|
293
|
+
else:
|
|
294
|
+
first = rows[0]
|
|
295
|
+
names = [f"column_{index}" for index in range(len(first) if hasattr(first, "__len__") else 0)]
|
|
296
|
+
names = _normalize_column_names(names, driver.driver_features)
|
|
297
|
+
|
|
298
|
+
normalized_rows: list[dict[str, Any]] = []
|
|
299
|
+
for row in rows:
|
|
300
|
+
if isinstance(row, dict):
|
|
301
|
+
normalized_rows.append(row)
|
|
302
|
+
continue
|
|
303
|
+
normalized_rows.append(dict(zip(names, row, strict=False)))
|
|
304
|
+
return normalized_rows
|
|
305
|
+
|
|
306
|
+
def _wrap_pipeline_error(
|
|
307
|
+
self, error: Exception, stack: "StatementStack", continue_on_error: bool
|
|
308
|
+
) -> StackExecutionError:
|
|
309
|
+
mode = "continue-on-error" if continue_on_error else "fail-fast"
|
|
310
|
+
return StackExecutionError(
|
|
311
|
+
-1, "Oracle pipeline execution failed", error, adapter=type(self).__name__, mode=mode
|
|
312
|
+
)
|
|
313
|
+
|
|
314
|
+
|
|
315
|
+
def _normalize_column_names(column_names: "list[str]", driver_features: "dict[str, Any]") -> "list[str]":
|
|
316
|
+
should_lowercase = driver_features.get("enable_lowercase_column_names", False)
|
|
317
|
+
if not should_lowercase:
|
|
318
|
+
return column_names
|
|
319
|
+
normalized: list[str] = []
|
|
320
|
+
for name in column_names:
|
|
321
|
+
if name and IMPLICIT_UPPER_COLUMN_PATTERN.fullmatch(name):
|
|
322
|
+
normalized.append(name.lower())
|
|
323
|
+
else:
|
|
324
|
+
normalized.append(name)
|
|
325
|
+
return normalized
|
|
326
|
+
|
|
327
|
+
|
|
328
|
+
def _oracle_insert_statement(table: str, columns: "list[str]") -> str:
|
|
329
|
+
column_list = ", ".join(columns)
|
|
330
|
+
placeholders = ", ".join(f":{idx + 1}" for idx in range(len(columns)))
|
|
331
|
+
return f"INSERT INTO {table} ({column_list}) VALUES ({placeholders})"
|
|
332
|
+
|
|
333
|
+
|
|
334
|
+
def _oracle_truncate_statement(table: str) -> str:
|
|
335
|
+
return f"TRUNCATE TABLE {table}"
|
|
336
|
+
|
|
337
|
+
|
|
338
|
+
def _coerce_sync_row_values(row: "tuple[Any, ...]") -> "list[Any]":
|
|
339
|
+
"""Coerce LOB handles to concrete values for synchronous execution.
|
|
340
|
+
|
|
341
|
+
Processes each value in the row, reading LOB objects and applying
|
|
342
|
+
type detection for JSON values stored in CLOBs.
|
|
343
|
+
|
|
344
|
+
Args:
|
|
345
|
+
row: Tuple of column values from database fetch.
|
|
346
|
+
|
|
347
|
+
Returns:
|
|
348
|
+
List of coerced values with LOBs read to strings/bytes.
|
|
349
|
+
"""
|
|
350
|
+
coerced_values: list[Any] = []
|
|
351
|
+
for value in row:
|
|
352
|
+
if hasattr(value, "read"):
|
|
353
|
+
try:
|
|
354
|
+
processed_value = value.read()
|
|
355
|
+
except Exception:
|
|
356
|
+
coerced_values.append(value)
|
|
357
|
+
continue
|
|
358
|
+
if isinstance(processed_value, str):
|
|
359
|
+
processed_value = _type_converter.convert_if_detected(processed_value)
|
|
360
|
+
coerced_values.append(processed_value)
|
|
361
|
+
else:
|
|
362
|
+
coerced_values.append(value)
|
|
363
|
+
return coerced_values
|
|
364
|
+
|
|
365
|
+
|
|
366
|
+
async def _coerce_async_row_values(row: "tuple[Any, ...]") -> "list[Any]":
|
|
367
|
+
"""Coerce LOB handles to concrete values for asynchronous execution.
|
|
368
|
+
|
|
369
|
+
Processes each value in the row, reading LOB objects asynchronously
|
|
370
|
+
and applying type detection for JSON values stored in CLOBs.
|
|
371
|
+
|
|
372
|
+
Args:
|
|
373
|
+
row: Tuple of column values from database fetch.
|
|
374
|
+
|
|
375
|
+
Returns:
|
|
376
|
+
List of coerced values with LOBs read to strings/bytes.
|
|
377
|
+
"""
|
|
378
|
+
coerced_values: list[Any] = []
|
|
379
|
+
for value in row:
|
|
380
|
+
if hasattr(value, "read"):
|
|
381
|
+
try:
|
|
382
|
+
processed_value = await _type_converter.process_lob(value)
|
|
383
|
+
except Exception:
|
|
384
|
+
coerced_values.append(value)
|
|
385
|
+
continue
|
|
386
|
+
if isinstance(processed_value, str):
|
|
387
|
+
processed_value = _type_converter.convert_if_detected(processed_value)
|
|
388
|
+
coerced_values.append(processed_value)
|
|
389
|
+
else:
|
|
390
|
+
coerced_values.append(value)
|
|
391
|
+
return coerced_values
|
|
392
|
+
|
|
393
|
+
|
|
394
|
+
ORA_CHECK_CONSTRAINT = 2290
|
|
395
|
+
ORA_INTEGRITY_RANGE_START = 2200
|
|
396
|
+
ORA_INTEGRITY_RANGE_END = 2300
|
|
397
|
+
ORA_PARSING_RANGE_START = 900
|
|
398
|
+
ORA_PARSING_RANGE_END = 1000
|
|
399
|
+
ORA_TABLESPACE_FULL = 1652
|
|
400
|
+
|
|
401
|
+
|
|
402
|
+
class OracleSyncCursor:
|
|
403
|
+
"""Sync context manager for Oracle cursor management."""
|
|
404
|
+
|
|
405
|
+
__slots__ = ("connection", "cursor")
|
|
406
|
+
|
|
407
|
+
def __init__(self, connection: OracleSyncConnection) -> None:
|
|
408
|
+
self.connection = connection
|
|
409
|
+
self.cursor: Cursor | None = None
|
|
410
|
+
|
|
411
|
+
def __enter__(self) -> Cursor:
|
|
412
|
+
self.cursor = self.connection.cursor()
|
|
413
|
+
return self.cursor
|
|
414
|
+
|
|
415
|
+
def __exit__(self, *_: Any) -> None:
|
|
416
|
+
if self.cursor is not None:
|
|
417
|
+
self.cursor.close()
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
class OracleAsyncCursor:
|
|
421
|
+
"""Async context manager for Oracle cursor management."""
|
|
422
|
+
|
|
423
|
+
__slots__ = ("connection", "cursor")
|
|
424
|
+
|
|
425
|
+
def __init__(self, connection: OracleAsyncConnection) -> None:
|
|
426
|
+
self.connection = connection
|
|
427
|
+
self.cursor: AsyncCursor | None = None
|
|
428
|
+
|
|
429
|
+
async def __aenter__(self) -> AsyncCursor:
|
|
430
|
+
self.cursor = self.connection.cursor()
|
|
431
|
+
return self.cursor
|
|
432
|
+
|
|
433
|
+
async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
|
|
434
|
+
_ = (exc_type, exc_val, exc_tb) # Mark as intentionally unused
|
|
435
|
+
if self.cursor is not None:
|
|
436
|
+
with contextlib.suppress(Exception):
|
|
437
|
+
# Oracle async cursors have a synchronous close method
|
|
438
|
+
# but we need to ensure proper cleanup in the event loop context
|
|
439
|
+
self.cursor.close()
|
|
440
|
+
|
|
441
|
+
|
|
442
|
+
class OracleExceptionHandler:
|
|
443
|
+
"""Context manager for handling Oracle database exceptions.
|
|
444
|
+
|
|
445
|
+
Maps Oracle ORA-XXXXX error codes to specific SQLSpec exceptions
|
|
446
|
+
for better error handling in application code.
|
|
447
|
+
"""
|
|
448
|
+
|
|
449
|
+
__slots__ = ()
|
|
450
|
+
|
|
451
|
+
def _map_oracle_exception(self, e: Any) -> None:
|
|
452
|
+
"""Map Oracle exception to SQLSpec exception.
|
|
453
|
+
|
|
454
|
+
Args:
|
|
455
|
+
e: oracledb.DatabaseError instance
|
|
456
|
+
"""
|
|
457
|
+
error_obj = e.args[0] if e.args else None
|
|
458
|
+
if not error_obj:
|
|
459
|
+
self._raise_generic_error(e, None)
|
|
460
|
+
|
|
461
|
+
error_code = getattr(error_obj, "code", None)
|
|
462
|
+
|
|
463
|
+
if not error_code:
|
|
464
|
+
self._raise_generic_error(e, None)
|
|
465
|
+
|
|
466
|
+
if error_code == 1:
|
|
467
|
+
self._raise_unique_violation(e, error_code)
|
|
468
|
+
elif error_code in {2291, 2292}:
|
|
469
|
+
self._raise_foreign_key_violation(e, error_code)
|
|
470
|
+
elif error_code == ORA_CHECK_CONSTRAINT:
|
|
471
|
+
self._raise_check_violation(e, error_code)
|
|
472
|
+
elif error_code in {1400, 1407}:
|
|
473
|
+
self._raise_not_null_violation(e, error_code)
|
|
474
|
+
elif error_code and ORA_INTEGRITY_RANGE_START <= error_code < ORA_INTEGRITY_RANGE_END:
|
|
475
|
+
self._raise_integrity_error(e, error_code)
|
|
476
|
+
elif error_code in {1017, 12154, 12541, 12545, 12514, 12505}:
|
|
477
|
+
self._raise_connection_error(e, error_code)
|
|
478
|
+
elif error_code in {60, 8176}:
|
|
479
|
+
self._raise_transaction_error(e, error_code)
|
|
480
|
+
elif error_code in {1722, 1858, 1840}:
|
|
481
|
+
self._raise_data_error(e, error_code)
|
|
482
|
+
elif error_code and ORA_PARSING_RANGE_START <= error_code < ORA_PARSING_RANGE_END:
|
|
483
|
+
self._raise_parsing_error(e, error_code)
|
|
484
|
+
elif error_code == ORA_TABLESPACE_FULL:
|
|
485
|
+
self._raise_operational_error(e, error_code)
|
|
486
|
+
else:
|
|
487
|
+
self._raise_generic_error(e, error_code)
|
|
488
|
+
|
|
489
|
+
def _raise_unique_violation(self, e: Any, code: int) -> None:
|
|
490
|
+
msg = f"Oracle unique constraint violation [ORA-{code:05d}]: {e}"
|
|
491
|
+
raise UniqueViolationError(msg) from e
|
|
492
|
+
|
|
493
|
+
def _raise_foreign_key_violation(self, e: Any, code: int) -> None:
|
|
494
|
+
msg = f"Oracle foreign key constraint violation [ORA-{code:05d}]: {e}"
|
|
495
|
+
raise ForeignKeyViolationError(msg) from e
|
|
496
|
+
|
|
497
|
+
def _raise_check_violation(self, e: Any, code: int) -> None:
|
|
498
|
+
msg = f"Oracle check constraint violation [ORA-{code:05d}]: {e}"
|
|
499
|
+
raise CheckViolationError(msg) from e
|
|
500
|
+
|
|
501
|
+
def _raise_not_null_violation(self, e: Any, code: int) -> None:
|
|
502
|
+
msg = f"Oracle not-null constraint violation [ORA-{code:05d}]: {e}"
|
|
503
|
+
raise NotNullViolationError(msg) from e
|
|
504
|
+
|
|
505
|
+
def _raise_integrity_error(self, e: Any, code: int) -> None:
|
|
506
|
+
msg = f"Oracle integrity constraint violation [ORA-{code:05d}]: {e}"
|
|
507
|
+
raise IntegrityError(msg) from e
|
|
508
|
+
|
|
509
|
+
def _raise_parsing_error(self, e: Any, code: int) -> None:
|
|
510
|
+
msg = f"Oracle SQL syntax error [ORA-{code:05d}]: {e}"
|
|
511
|
+
raise SQLParsingError(msg) from e
|
|
512
|
+
|
|
513
|
+
def _raise_connection_error(self, e: Any, code: int) -> None:
|
|
514
|
+
msg = f"Oracle connection error [ORA-{code:05d}]: {e}"
|
|
515
|
+
raise DatabaseConnectionError(msg) from e
|
|
516
|
+
|
|
517
|
+
def _raise_transaction_error(self, e: Any, code: int) -> None:
|
|
518
|
+
msg = f"Oracle transaction error [ORA-{code:05d}]: {e}"
|
|
519
|
+
raise TransactionError(msg) from e
|
|
520
|
+
|
|
521
|
+
def _raise_data_error(self, e: Any, code: int) -> None:
|
|
522
|
+
msg = f"Oracle data error [ORA-{code:05d}]: {e}"
|
|
523
|
+
raise DataError(msg) from e
|
|
524
|
+
|
|
525
|
+
def _raise_operational_error(self, e: Any, code: int) -> None:
|
|
526
|
+
msg = f"Oracle operational error [ORA-{code:05d}]: {e}"
|
|
527
|
+
raise OperationalError(msg) from e
|
|
528
|
+
|
|
529
|
+
def _raise_generic_error(self, e: Any, code: "int | None") -> None:
|
|
530
|
+
msg = f"Oracle database error [ORA-{code:05d}]: {e}" if code else f"Oracle database error: {e}"
|
|
531
|
+
raise SQLSpecError(msg) from e
|
|
532
|
+
|
|
533
|
+
|
|
534
|
+
class OracleSyncExceptionHandler(OracleExceptionHandler):
|
|
535
|
+
"""Sync Context manager for handling Oracle database exceptions.
|
|
536
|
+
|
|
537
|
+
Maps Oracle ORA-XXXXX error codes to specific SQLSpec exceptions
|
|
538
|
+
for better error handling in application code.
|
|
539
|
+
"""
|
|
540
|
+
|
|
541
|
+
def __enter__(self) -> None:
|
|
542
|
+
return None
|
|
543
|
+
|
|
544
|
+
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
|
|
545
|
+
_ = exc_tb
|
|
546
|
+
if exc_type is None:
|
|
547
|
+
return
|
|
548
|
+
if issubclass(exc_type, oracledb.DatabaseError):
|
|
549
|
+
self._map_oracle_exception(exc_val)
|
|
550
|
+
|
|
551
|
+
|
|
552
|
+
class OracleAsyncExceptionHandler(OracleExceptionHandler):
|
|
553
|
+
"""Async context manager for handling Oracle database exceptions.
|
|
554
|
+
|
|
555
|
+
Maps Oracle ORA-XXXXX error codes to specific SQLSpec exceptions
|
|
556
|
+
for better error handling in application code.
|
|
557
|
+
"""
|
|
558
|
+
|
|
559
|
+
async def __aenter__(self) -> None:
|
|
560
|
+
return None
|
|
561
|
+
|
|
562
|
+
async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
|
|
563
|
+
_ = exc_tb
|
|
564
|
+
if exc_type is None:
|
|
565
|
+
return
|
|
566
|
+
if issubclass(exc_type, oracledb.DatabaseError):
|
|
567
|
+
self._map_oracle_exception(exc_val)
|
|
568
|
+
|
|
569
|
+
|
|
570
|
+
class OracleSyncDriver(OraclePipelineMixin, SyncDriverAdapterBase):
|
|
571
|
+
"""Synchronous Oracle Database driver.
|
|
572
|
+
|
|
573
|
+
Provides Oracle Database connectivity with parameter style conversion,
|
|
574
|
+
error handling, and transaction management.
|
|
575
|
+
"""
|
|
576
|
+
|
|
577
|
+
__slots__ = ("_data_dictionary", "_oracle_version", "_pipeline_support", "_pipeline_support_reason")
|
|
578
|
+
dialect = "oracle"
|
|
579
|
+
|
|
580
|
+
def __init__(
|
|
581
|
+
self,
|
|
582
|
+
connection: OracleSyncConnection,
|
|
583
|
+
statement_config: "StatementConfig | None" = None,
|
|
584
|
+
driver_features: "dict[str, Any] | None" = None,
|
|
585
|
+
) -> None:
|
|
586
|
+
if statement_config is None:
|
|
587
|
+
cache_config = get_cache_config()
|
|
588
|
+
statement_config = oracledb_statement_config.replace(
|
|
589
|
+
enable_caching=cache_config.compiled_cache_enabled,
|
|
590
|
+
enable_parsing=True,
|
|
591
|
+
enable_validation=True,
|
|
592
|
+
dialect="oracle",
|
|
593
|
+
)
|
|
594
|
+
|
|
595
|
+
super().__init__(connection=connection, statement_config=statement_config, driver_features=driver_features)
|
|
596
|
+
self._data_dictionary: SyncDataDictionaryBase | None = None
|
|
597
|
+
self._pipeline_support: bool | None = None
|
|
598
|
+
self._pipeline_support_reason: str | None = None
|
|
599
|
+
self._oracle_version: VersionInfo | None = None
|
|
600
|
+
|
|
601
|
+
def with_cursor(self, connection: OracleSyncConnection) -> OracleSyncCursor:
|
|
602
|
+
"""Create context manager for Oracle cursor.
|
|
603
|
+
|
|
604
|
+
Args:
|
|
605
|
+
connection: Oracle database connection
|
|
606
|
+
|
|
607
|
+
Returns:
|
|
608
|
+
Context manager for cursor operations
|
|
609
|
+
"""
|
|
610
|
+
return OracleSyncCursor(connection)
|
|
611
|
+
|
|
612
|
+
def handle_database_exceptions(self) -> "AbstractContextManager[None]":
|
|
613
|
+
"""Handle database-specific exceptions and wrap them appropriately."""
|
|
614
|
+
return OracleSyncExceptionHandler()
|
|
615
|
+
|
|
616
|
+
def _try_special_handling(self, cursor: Any, statement: "SQL") -> "SQLResult | None":
|
|
617
|
+
"""Hook for Oracle-specific special operations.
|
|
618
|
+
|
|
619
|
+
Oracle doesn't have complex special operations like PostgreSQL COPY,
|
|
620
|
+
so this always returns None to proceed with standard execution.
|
|
621
|
+
|
|
622
|
+
Args:
|
|
623
|
+
cursor: Oracle cursor object
|
|
624
|
+
statement: SQL statement to analyze
|
|
625
|
+
|
|
626
|
+
Returns:
|
|
627
|
+
None - always proceeds with standard execution for Oracle
|
|
628
|
+
"""
|
|
629
|
+
_ = (cursor, statement) # Mark as intentionally unused
|
|
630
|
+
return None
|
|
631
|
+
|
|
632
|
+
def _execute_script(self, cursor: Any, statement: "SQL") -> "ExecutionResult":
|
|
633
|
+
"""Execute SQL script with statement splitting and parameter handling.
|
|
634
|
+
|
|
635
|
+
Parameters are embedded as static values for script execution compatibility.
|
|
636
|
+
|
|
637
|
+
Args:
|
|
638
|
+
cursor: Oracle cursor object
|
|
639
|
+
statement: SQL script statement to execute
|
|
640
|
+
|
|
641
|
+
Returns:
|
|
642
|
+
Execution result containing statement count and success information
|
|
643
|
+
"""
|
|
644
|
+
sql, prepared_parameters = self._get_compiled_sql(statement, self.statement_config)
|
|
645
|
+
statements = self.split_script_statements(sql, statement.statement_config, strip_trailing_semicolon=True)
|
|
646
|
+
|
|
647
|
+
successful_count = 0
|
|
648
|
+
last_cursor = cursor
|
|
649
|
+
|
|
650
|
+
for stmt in statements:
|
|
651
|
+
cursor.execute(stmt, prepared_parameters or {})
|
|
652
|
+
successful_count += 1
|
|
653
|
+
|
|
654
|
+
return self.create_execution_result(
|
|
655
|
+
last_cursor, statement_count=len(statements), successful_statements=successful_count, is_script_result=True
|
|
656
|
+
)
|
|
657
|
+
|
|
658
|
+
def execute_stack(self, stack: "StatementStack", *, continue_on_error: bool = False) -> "tuple[StackResult, ...]":
|
|
659
|
+
"""Execute a StatementStack using Oracle's pipeline when available."""
|
|
660
|
+
|
|
661
|
+
if not isinstance(stack, StatementStack) or not stack:
|
|
662
|
+
return super().execute_stack(stack, continue_on_error=continue_on_error)
|
|
663
|
+
|
|
664
|
+
blocker = self._stack_native_blocker(stack)
|
|
665
|
+
if blocker is not None:
|
|
666
|
+
self._log_pipeline_skip(blocker, stack)
|
|
667
|
+
return super().execute_stack(stack, continue_on_error=continue_on_error)
|
|
668
|
+
|
|
669
|
+
if not self._pipeline_native_supported():
|
|
670
|
+
self._log_pipeline_skip(self._pipeline_support_reason or "database_version", stack)
|
|
671
|
+
return super().execute_stack(stack, continue_on_error=continue_on_error)
|
|
672
|
+
|
|
673
|
+
return self._execute_stack_native(stack, continue_on_error=continue_on_error)
|
|
674
|
+
|
|
675
|
+
def _execute_many(self, cursor: Any, statement: "SQL") -> "ExecutionResult":
|
|
676
|
+
"""Execute SQL with multiple parameter sets using Oracle batch processing.
|
|
677
|
+
|
|
678
|
+
Args:
|
|
679
|
+
cursor: Oracle cursor object
|
|
680
|
+
statement: SQL statement with multiple parameter sets
|
|
681
|
+
|
|
682
|
+
Returns:
|
|
683
|
+
Execution result with affected row count
|
|
684
|
+
|
|
685
|
+
Raises:
|
|
686
|
+
ValueError: If no parameters are provided
|
|
687
|
+
"""
|
|
688
|
+
sql, prepared_parameters = self._get_compiled_sql(statement, self.statement_config)
|
|
689
|
+
|
|
690
|
+
# Parameter validation for executemany
|
|
691
|
+
if not prepared_parameters:
|
|
692
|
+
msg = "execute_many requires parameters"
|
|
693
|
+
raise ValueError(msg)
|
|
694
|
+
|
|
695
|
+
# Oracle-specific fix: Ensure parameters are in list format for executemany
|
|
696
|
+
# Oracle expects a list of sequences, not a tuple of sequences
|
|
697
|
+
if isinstance(prepared_parameters, tuple):
|
|
698
|
+
prepared_parameters = list(prepared_parameters)
|
|
699
|
+
|
|
700
|
+
cursor.executemany(sql, prepared_parameters)
|
|
701
|
+
|
|
702
|
+
# Calculate affected rows based on parameter count
|
|
703
|
+
affected_rows = len(prepared_parameters) if prepared_parameters else 0
|
|
704
|
+
|
|
705
|
+
return self.create_execution_result(cursor, rowcount_override=affected_rows, is_many_result=True)
|
|
706
|
+
|
|
707
|
+
def _execute_stack_native(self, stack: "StatementStack", *, continue_on_error: bool) -> "tuple[StackResult, ...]":
|
|
708
|
+
compiled_operations = [self._prepare_pipeline_operation(op) for op in stack.operations]
|
|
709
|
+
pipeline = oracledb.create_pipeline()
|
|
710
|
+
for compiled in compiled_operations:
|
|
711
|
+
self._add_pipeline_operation(pipeline, compiled)
|
|
712
|
+
|
|
713
|
+
results: list[StackResult] = []
|
|
714
|
+
started_transaction = False
|
|
715
|
+
|
|
716
|
+
with StackExecutionObserver(self, stack, continue_on_error, native_pipeline=True) as observer:
|
|
717
|
+
try:
|
|
718
|
+
if not continue_on_error and not self._connection_in_transaction():
|
|
719
|
+
self.begin()
|
|
720
|
+
started_transaction = True
|
|
721
|
+
|
|
722
|
+
pipeline_results = self.connection.run_pipeline(pipeline, continue_on_error=continue_on_error)
|
|
723
|
+
results = self._build_stack_results_from_pipeline(
|
|
724
|
+
compiled_operations, pipeline_results, continue_on_error, observer
|
|
725
|
+
)
|
|
726
|
+
|
|
727
|
+
if started_transaction:
|
|
728
|
+
self.commit()
|
|
729
|
+
except Exception as exc:
|
|
730
|
+
if started_transaction:
|
|
731
|
+
try:
|
|
732
|
+
self.rollback()
|
|
733
|
+
except Exception as rollback_error: # pragma: no cover - diagnostics only
|
|
734
|
+
logger.debug("Rollback after pipeline failure failed: %s", rollback_error)
|
|
735
|
+
raise self._wrap_pipeline_error(exc, stack, continue_on_error) from exc
|
|
736
|
+
|
|
737
|
+
return tuple(results)
|
|
738
|
+
|
|
739
|
+
def _execute_statement(self, cursor: Any, statement: "SQL") -> "ExecutionResult":
|
|
740
|
+
"""Execute single SQL statement with Oracle data handling.
|
|
741
|
+
|
|
742
|
+
Args:
|
|
743
|
+
cursor: Oracle cursor object
|
|
744
|
+
statement: SQL statement to execute
|
|
745
|
+
|
|
746
|
+
Returns:
|
|
747
|
+
Execution result containing data for SELECT statements or row count for others
|
|
748
|
+
"""
|
|
749
|
+
sql, prepared_parameters = self._get_compiled_sql(statement, self.statement_config)
|
|
750
|
+
|
|
751
|
+
# Oracle-specific: Use setinputsizes for large string parameters to avoid ORA-01704
|
|
752
|
+
if prepared_parameters and isinstance(prepared_parameters, dict):
|
|
753
|
+
for param_name, param_value in prepared_parameters.items():
|
|
754
|
+
if isinstance(param_value, str) and len(param_value) > LARGE_STRING_THRESHOLD:
|
|
755
|
+
clob = self.connection.createlob(oracledb.DB_TYPE_CLOB)
|
|
756
|
+
clob.write(param_value)
|
|
757
|
+
prepared_parameters[param_name] = clob
|
|
758
|
+
|
|
759
|
+
cursor.execute(sql, prepared_parameters or {})
|
|
760
|
+
|
|
761
|
+
# SELECT result processing for Oracle
|
|
762
|
+
if statement.returns_rows():
|
|
763
|
+
fetched_data = cursor.fetchall()
|
|
764
|
+
column_names = [col[0] for col in cursor.description or []]
|
|
765
|
+
column_names = _normalize_column_names(column_names, self.driver_features)
|
|
766
|
+
|
|
767
|
+
# Oracle returns tuples - convert to consistent dict format after LOB hydration
|
|
768
|
+
data = [dict(zip(column_names, _coerce_sync_row_values(row), strict=False)) for row in fetched_data]
|
|
769
|
+
|
|
770
|
+
return self.create_execution_result(
|
|
771
|
+
cursor, selected_data=data, column_names=column_names, data_row_count=len(data), is_select_result=True
|
|
772
|
+
)
|
|
773
|
+
|
|
774
|
+
# Non-SELECT result processing
|
|
775
|
+
affected_rows = cursor.rowcount if cursor.rowcount is not None else 0
|
|
776
|
+
return self.create_execution_result(cursor, rowcount_override=affected_rows)
|
|
777
|
+
|
|
778
|
+
def select_to_storage(
|
|
779
|
+
self,
|
|
780
|
+
statement: "Statement | QueryBuilder | SQL | str",
|
|
781
|
+
destination: "StorageDestination",
|
|
782
|
+
/,
|
|
783
|
+
*parameters: "StatementParameters | StatementFilter",
|
|
784
|
+
statement_config: "StatementConfig | None" = None,
|
|
785
|
+
partitioner: "dict[str, Any] | None" = None,
|
|
786
|
+
format_hint: "StorageFormat | None" = None,
|
|
787
|
+
telemetry: "StorageTelemetry | None" = None,
|
|
788
|
+
**kwargs: Any,
|
|
789
|
+
) -> "StorageBridgeJob":
|
|
790
|
+
"""Execute a query and stream Arrow-formatted output to storage (sync)."""
|
|
791
|
+
|
|
792
|
+
self._require_capability("arrow_export_enabled")
|
|
793
|
+
arrow_result = self.select_to_arrow(statement, *parameters, statement_config=statement_config, **kwargs)
|
|
794
|
+
sync_pipeline: SyncStoragePipeline = cast("SyncStoragePipeline", self._storage_pipeline())
|
|
795
|
+
telemetry_payload = self._write_result_to_storage_sync(
|
|
796
|
+
arrow_result, destination, format_hint=format_hint, pipeline=sync_pipeline
|
|
797
|
+
)
|
|
798
|
+
self._attach_partition_telemetry(telemetry_payload, partitioner)
|
|
799
|
+
return self._create_storage_job(telemetry_payload, telemetry)
|
|
800
|
+
|
|
801
|
+
def _detect_oracle_version(self) -> "VersionInfo | None":
|
|
802
|
+
if self._oracle_version is not None:
|
|
803
|
+
return self._oracle_version
|
|
804
|
+
version = self.data_dictionary.get_version(self)
|
|
805
|
+
self._oracle_version = version
|
|
806
|
+
return version
|
|
807
|
+
|
|
808
|
+
def _detect_oracledb_version(self) -> "tuple[int, int, int]":
|
|
809
|
+
return _ORACLEDB_VERSION
|
|
810
|
+
|
|
811
|
+
def _pipeline_native_supported(self) -> bool:
|
|
812
|
+
if self._pipeline_support is not None:
|
|
813
|
+
return self._pipeline_support
|
|
814
|
+
|
|
815
|
+
if self.stack_native_disabled:
|
|
816
|
+
self._pipeline_support = False
|
|
817
|
+
self._pipeline_support_reason = "env_override"
|
|
818
|
+
return False
|
|
819
|
+
|
|
820
|
+
if self._detect_oracledb_version() < PIPELINE_MIN_DRIVER_VERSION:
|
|
821
|
+
self._pipeline_support = False
|
|
822
|
+
self._pipeline_support_reason = "driver_version"
|
|
823
|
+
return False
|
|
824
|
+
|
|
825
|
+
if not hasattr(self.connection, "run_pipeline"):
|
|
826
|
+
self._pipeline_support = False
|
|
827
|
+
self._pipeline_support_reason = "driver_api_missing"
|
|
828
|
+
return False
|
|
829
|
+
|
|
830
|
+
version_info = self._detect_oracle_version()
|
|
831
|
+
if version_info and version_info.major >= PIPELINE_MIN_DATABASE_MAJOR:
|
|
832
|
+
self._pipeline_support = True
|
|
833
|
+
self._pipeline_support_reason = None
|
|
834
|
+
return True
|
|
835
|
+
|
|
836
|
+
self._pipeline_support = False
|
|
837
|
+
self._pipeline_support_reason = "database_version"
|
|
838
|
+
return False
|
|
839
|
+
|
|
840
|
+
def load_from_arrow(
|
|
841
|
+
self,
|
|
842
|
+
table: str,
|
|
843
|
+
source: "ArrowResult | Any",
|
|
844
|
+
*,
|
|
845
|
+
partitioner: "dict[str, Any] | None" = None,
|
|
846
|
+
overwrite: bool = False,
|
|
847
|
+
telemetry: "StorageTelemetry | None" = None,
|
|
848
|
+
) -> "StorageBridgeJob":
|
|
849
|
+
"""Load Arrow data into Oracle using batched executemany calls."""
|
|
850
|
+
|
|
851
|
+
self._require_capability("arrow_import_enabled")
|
|
852
|
+
arrow_table = self._coerce_arrow_table(source)
|
|
853
|
+
if overwrite:
|
|
854
|
+
self._truncate_table_sync(table)
|
|
855
|
+
columns, records = self._arrow_table_to_rows(arrow_table)
|
|
856
|
+
if records:
|
|
857
|
+
statement = _oracle_insert_statement(table, columns)
|
|
858
|
+
with self.with_cursor(self.connection) as cursor, self.handle_database_exceptions():
|
|
859
|
+
cursor.executemany(statement, records)
|
|
860
|
+
telemetry_payload = self._build_ingest_telemetry(arrow_table)
|
|
861
|
+
telemetry_payload["destination"] = table
|
|
862
|
+
self._attach_partition_telemetry(telemetry_payload, partitioner)
|
|
863
|
+
return self._create_storage_job(telemetry_payload, telemetry)
|
|
864
|
+
|
|
865
|
+
def load_from_storage(
|
|
866
|
+
self,
|
|
867
|
+
table: str,
|
|
868
|
+
source: "StorageDestination",
|
|
869
|
+
*,
|
|
870
|
+
file_format: "StorageFormat",
|
|
871
|
+
partitioner: "dict[str, Any] | None" = None,
|
|
872
|
+
overwrite: bool = False,
|
|
873
|
+
) -> "StorageBridgeJob":
|
|
874
|
+
"""Load staged artifacts into Oracle."""
|
|
875
|
+
|
|
876
|
+
arrow_table, inbound = self._read_arrow_from_storage_sync(source, file_format=file_format)
|
|
877
|
+
return self.load_from_arrow(table, arrow_table, partitioner=partitioner, overwrite=overwrite, telemetry=inbound)
|
|
878
|
+
|
|
879
|
+
# Oracle transaction management
|
|
880
|
+
def begin(self) -> None:
|
|
881
|
+
"""Begin a database transaction.
|
|
882
|
+
|
|
883
|
+
Oracle handles transactions automatically, so this is a no-op.
|
|
884
|
+
"""
|
|
885
|
+
# Oracle handles transactions implicitly
|
|
886
|
+
|
|
887
|
+
def rollback(self) -> None:
|
|
888
|
+
"""Rollback the current transaction.
|
|
889
|
+
|
|
890
|
+
Raises:
|
|
891
|
+
SQLSpecError: If rollback fails
|
|
892
|
+
"""
|
|
893
|
+
try:
|
|
894
|
+
self.connection.rollback()
|
|
895
|
+
except oracledb.Error as e:
|
|
896
|
+
msg = f"Failed to rollback Oracle transaction: {e}"
|
|
897
|
+
raise SQLSpecError(msg) from e
|
|
898
|
+
|
|
899
|
+
def commit(self) -> None:
|
|
900
|
+
"""Commit the current transaction.
|
|
901
|
+
|
|
902
|
+
Raises:
|
|
903
|
+
SQLSpecError: If commit fails
|
|
904
|
+
"""
|
|
905
|
+
try:
|
|
906
|
+
self.connection.commit()
|
|
907
|
+
except oracledb.Error as e:
|
|
908
|
+
msg = f"Failed to commit Oracle transaction: {e}"
|
|
909
|
+
raise SQLSpecError(msg) from e
|
|
910
|
+
|
|
911
|
+
def select_to_arrow(
|
|
912
|
+
self,
|
|
913
|
+
statement: "Statement | QueryBuilder",
|
|
914
|
+
/,
|
|
915
|
+
*parameters: "StatementParameters | StatementFilter",
|
|
916
|
+
statement_config: "StatementConfig | None" = None,
|
|
917
|
+
return_format: "ArrowReturnFormat" = "table",
|
|
918
|
+
native_only: bool = False,
|
|
919
|
+
batch_size: int | None = None,
|
|
920
|
+
arrow_schema: Any = None,
|
|
921
|
+
**kwargs: Any,
|
|
922
|
+
) -> "Any":
|
|
923
|
+
"""Execute query and return results as Apache Arrow format using Oracle native support.
|
|
924
|
+
|
|
925
|
+
This implementation uses Oracle's native fetch_df_all() method which returns
|
|
926
|
+
an OracleDataFrame with Arrow PyCapsule interface, providing zero-copy data
|
|
927
|
+
transfer and 5-10x performance improvement over dict conversion.
|
|
928
|
+
|
|
929
|
+
Args:
|
|
930
|
+
statement: SQL query string, Statement, or QueryBuilder
|
|
931
|
+
*parameters: Query parameters (same format as execute()/select())
|
|
932
|
+
statement_config: Optional statement configuration override
|
|
933
|
+
return_format: "table" for pyarrow.Table (default), "batches" for RecordBatch
|
|
934
|
+
native_only: If False, use base conversion path instead of native (default: False uses native)
|
|
935
|
+
batch_size: Rows per batch when using "batches" format
|
|
936
|
+
arrow_schema: Optional pyarrow.Schema for type casting
|
|
937
|
+
**kwargs: Additional keyword arguments
|
|
938
|
+
|
|
939
|
+
Returns:
|
|
940
|
+
ArrowResult containing pyarrow.Table or RecordBatch
|
|
941
|
+
|
|
942
|
+
Examples:
|
|
943
|
+
>>> result = driver.select_to_arrow(
|
|
944
|
+
... "SELECT * FROM users WHERE age > :1", (18,)
|
|
945
|
+
... )
|
|
946
|
+
>>> df = result.to_pandas()
|
|
947
|
+
>>> print(df.head())
|
|
948
|
+
"""
|
|
949
|
+
# Check pyarrow is available
|
|
950
|
+
ensure_pyarrow()
|
|
951
|
+
|
|
952
|
+
# If native_only=False explicitly passed, use base conversion path
|
|
953
|
+
if native_only is False:
|
|
954
|
+
return super().select_to_arrow(
|
|
955
|
+
statement,
|
|
956
|
+
*parameters,
|
|
957
|
+
statement_config=statement_config,
|
|
958
|
+
return_format=return_format,
|
|
959
|
+
native_only=native_only,
|
|
960
|
+
batch_size=batch_size,
|
|
961
|
+
arrow_schema=arrow_schema,
|
|
962
|
+
**kwargs,
|
|
963
|
+
)
|
|
964
|
+
|
|
965
|
+
import pyarrow as pa
|
|
966
|
+
|
|
967
|
+
# Prepare statement with parameters
|
|
968
|
+
config = statement_config or self.statement_config
|
|
969
|
+
prepared_statement = self.prepare_statement(statement, parameters, statement_config=config, kwargs=kwargs)
|
|
970
|
+
sql, prepared_parameters = self._get_compiled_sql(prepared_statement, config)
|
|
971
|
+
|
|
972
|
+
# Use Oracle's native fetch_df_all() for zero-copy Arrow transfer
|
|
973
|
+
oracle_df = self.connection.fetch_df_all(
|
|
974
|
+
statement=sql, parameters=prepared_parameters or [], arraysize=batch_size or 1000
|
|
975
|
+
)
|
|
976
|
+
|
|
977
|
+
# Convert OracleDataFrame to PyArrow Table using PyCapsule interface
|
|
978
|
+
arrow_table = pa.table(oracle_df)
|
|
979
|
+
|
|
980
|
+
# Apply schema casting if provided
|
|
981
|
+
if arrow_schema is not None:
|
|
982
|
+
if not isinstance(arrow_schema, pa.Schema):
|
|
983
|
+
msg = f"arrow_schema must be a pyarrow.Schema, got {type(arrow_schema).__name__}"
|
|
984
|
+
raise TypeError(msg)
|
|
985
|
+
arrow_table = arrow_table.cast(arrow_schema)
|
|
986
|
+
|
|
987
|
+
# Convert to batches if requested
|
|
988
|
+
if return_format == "batches":
|
|
989
|
+
batches = arrow_table.to_batches()
|
|
990
|
+
arrow_data: Any = batches[0] if batches else pa.RecordBatch.from_pydict({})
|
|
991
|
+
else:
|
|
992
|
+
arrow_data = arrow_table
|
|
993
|
+
|
|
994
|
+
# Get row count
|
|
995
|
+
rows_affected = len(arrow_table)
|
|
996
|
+
|
|
997
|
+
return create_arrow_result(statement=prepared_statement, data=arrow_data, rows_affected=rows_affected)
|
|
998
|
+
|
|
999
|
+
@property
|
|
1000
|
+
def data_dictionary(self) -> "SyncDataDictionaryBase":
|
|
1001
|
+
"""Get the data dictionary for this driver.
|
|
1002
|
+
|
|
1003
|
+
Returns:
|
|
1004
|
+
Data dictionary instance for metadata queries
|
|
1005
|
+
"""
|
|
1006
|
+
if self._data_dictionary is None:
|
|
1007
|
+
self._data_dictionary = OracleSyncDataDictionary()
|
|
1008
|
+
return self._data_dictionary
|
|
1009
|
+
|
|
1010
|
+
def _truncate_table_sync(self, table: str) -> None:
|
|
1011
|
+
statement = _oracle_truncate_statement(table)
|
|
1012
|
+
with self.handle_database_exceptions():
|
|
1013
|
+
self.connection.execute(statement)
|
|
1014
|
+
|
|
1015
|
+
|
|
1016
|
+
class OracleAsyncDriver(OraclePipelineMixin, AsyncDriverAdapterBase):
|
|
1017
|
+
"""Asynchronous Oracle Database driver.
|
|
1018
|
+
|
|
1019
|
+
Provides Oracle Database connectivity with parameter style conversion,
|
|
1020
|
+
error handling, and transaction management for async operations.
|
|
1021
|
+
"""
|
|
1022
|
+
|
|
1023
|
+
__slots__ = ("_data_dictionary", "_oracle_version", "_pipeline_support", "_pipeline_support_reason")
|
|
1024
|
+
dialect = "oracle"
|
|
1025
|
+
|
|
1026
|
+
def __init__(
|
|
1027
|
+
self,
|
|
1028
|
+
connection: OracleAsyncConnection,
|
|
1029
|
+
statement_config: "StatementConfig | None" = None,
|
|
1030
|
+
driver_features: "dict[str, Any] | None" = None,
|
|
1031
|
+
) -> None:
|
|
1032
|
+
if statement_config is None:
|
|
1033
|
+
cache_config = get_cache_config()
|
|
1034
|
+
statement_config = oracledb_statement_config.replace(
|
|
1035
|
+
enable_caching=cache_config.compiled_cache_enabled,
|
|
1036
|
+
enable_parsing=True,
|
|
1037
|
+
enable_validation=True,
|
|
1038
|
+
dialect="oracle",
|
|
1039
|
+
)
|
|
1040
|
+
|
|
1041
|
+
super().__init__(connection=connection, statement_config=statement_config, driver_features=driver_features)
|
|
1042
|
+
self._data_dictionary: AsyncDataDictionaryBase | None = None
|
|
1043
|
+
self._pipeline_support: bool | None = None
|
|
1044
|
+
self._pipeline_support_reason: str | None = None
|
|
1045
|
+
self._oracle_version: VersionInfo | None = None
|
|
1046
|
+
|
|
1047
|
+
def with_cursor(self, connection: OracleAsyncConnection) -> OracleAsyncCursor:
|
|
1048
|
+
"""Create context manager for Oracle cursor.
|
|
1049
|
+
|
|
1050
|
+
Args:
|
|
1051
|
+
connection: Oracle database connection
|
|
1052
|
+
|
|
1053
|
+
Returns:
|
|
1054
|
+
Context manager for cursor operations
|
|
1055
|
+
"""
|
|
1056
|
+
return OracleAsyncCursor(connection)
|
|
1057
|
+
|
|
1058
|
+
def handle_database_exceptions(self) -> "AbstractAsyncContextManager[None]":
|
|
1059
|
+
"""Handle database-specific exceptions and wrap them appropriately."""
|
|
1060
|
+
return OracleAsyncExceptionHandler()
|
|
1061
|
+
|
|
1062
|
+
async def _try_special_handling(self, cursor: Any, statement: "SQL") -> "SQLResult | None":
|
|
1063
|
+
"""Hook for Oracle-specific special operations.
|
|
1064
|
+
|
|
1065
|
+
Oracle doesn't have complex special operations like PostgreSQL COPY,
|
|
1066
|
+
so this always returns None to proceed with standard execution.
|
|
1067
|
+
|
|
1068
|
+
Args:
|
|
1069
|
+
cursor: Oracle cursor object
|
|
1070
|
+
statement: SQL statement to analyze
|
|
1071
|
+
|
|
1072
|
+
Returns:
|
|
1073
|
+
None - always proceeds with standard execution for Oracle
|
|
1074
|
+
"""
|
|
1075
|
+
_ = (cursor, statement) # Mark as intentionally unused
|
|
1076
|
+
return None
|
|
1077
|
+
|
|
1078
|
+
async def _execute_script(self, cursor: Any, statement: "SQL") -> "ExecutionResult":
|
|
1079
|
+
"""Execute SQL script with statement splitting and parameter handling.
|
|
1080
|
+
|
|
1081
|
+
Parameters are embedded as static values for script execution compatibility.
|
|
1082
|
+
|
|
1083
|
+
Args:
|
|
1084
|
+
cursor: Oracle cursor object
|
|
1085
|
+
statement: SQL script statement to execute
|
|
1086
|
+
|
|
1087
|
+
Returns:
|
|
1088
|
+
Execution result containing statement count and success information
|
|
1089
|
+
"""
|
|
1090
|
+
sql, prepared_parameters = self._get_compiled_sql(statement, self.statement_config)
|
|
1091
|
+
statements = self.split_script_statements(sql, statement.statement_config, strip_trailing_semicolon=True)
|
|
1092
|
+
|
|
1093
|
+
successful_count = 0
|
|
1094
|
+
last_cursor = cursor
|
|
1095
|
+
|
|
1096
|
+
for stmt in statements:
|
|
1097
|
+
await cursor.execute(stmt, prepared_parameters or {})
|
|
1098
|
+
successful_count += 1
|
|
1099
|
+
|
|
1100
|
+
return self.create_execution_result(
|
|
1101
|
+
last_cursor, statement_count=len(statements), successful_statements=successful_count, is_script_result=True
|
|
1102
|
+
)
|
|
1103
|
+
|
|
1104
|
+
async def execute_stack(
|
|
1105
|
+
self, stack: "StatementStack", *, continue_on_error: bool = False
|
|
1106
|
+
) -> "tuple[StackResult, ...]":
|
|
1107
|
+
"""Execute a StatementStack using Oracle's pipeline when available."""
|
|
1108
|
+
|
|
1109
|
+
if not isinstance(stack, StatementStack) or not stack:
|
|
1110
|
+
return await super().execute_stack(stack, continue_on_error=continue_on_error)
|
|
1111
|
+
|
|
1112
|
+
blocker = self._stack_native_blocker(stack)
|
|
1113
|
+
if blocker is not None:
|
|
1114
|
+
self._log_pipeline_skip(blocker, stack)
|
|
1115
|
+
return await super().execute_stack(stack, continue_on_error=continue_on_error)
|
|
1116
|
+
|
|
1117
|
+
if not await self._pipeline_native_supported():
|
|
1118
|
+
self._log_pipeline_skip(self._pipeline_support_reason or "database_version", stack)
|
|
1119
|
+
return await super().execute_stack(stack, continue_on_error=continue_on_error)
|
|
1120
|
+
|
|
1121
|
+
return await self._execute_stack_native(stack, continue_on_error=continue_on_error)
|
|
1122
|
+
|
|
1123
|
+
async def _execute_many(self, cursor: Any, statement: "SQL") -> "ExecutionResult":
|
|
1124
|
+
"""Execute SQL with multiple parameter sets using Oracle batch processing.
|
|
1125
|
+
|
|
1126
|
+
Args:
|
|
1127
|
+
cursor: Oracle cursor object
|
|
1128
|
+
statement: SQL statement with multiple parameter sets
|
|
1129
|
+
|
|
1130
|
+
Returns:
|
|
1131
|
+
Execution result with affected row count
|
|
1132
|
+
|
|
1133
|
+
Raises:
|
|
1134
|
+
ValueError: If no parameters are provided
|
|
1135
|
+
"""
|
|
1136
|
+
sql, prepared_parameters = self._get_compiled_sql(statement, self.statement_config)
|
|
1137
|
+
|
|
1138
|
+
# Parameter validation for executemany
|
|
1139
|
+
if not prepared_parameters:
|
|
1140
|
+
msg = "execute_many requires parameters"
|
|
1141
|
+
raise ValueError(msg)
|
|
1142
|
+
|
|
1143
|
+
await cursor.executemany(sql, prepared_parameters)
|
|
1144
|
+
|
|
1145
|
+
# Calculate affected rows based on parameter count
|
|
1146
|
+
affected_rows = len(prepared_parameters) if prepared_parameters else 0
|
|
1147
|
+
|
|
1148
|
+
return self.create_execution_result(cursor, rowcount_override=affected_rows, is_many_result=True)
|
|
1149
|
+
|
|
1150
|
+
async def _execute_stack_native(
|
|
1151
|
+
self, stack: "StatementStack", *, continue_on_error: bool
|
|
1152
|
+
) -> "tuple[StackResult, ...]":
|
|
1153
|
+
compiled_operations = [self._prepare_pipeline_operation(op) for op in stack.operations]
|
|
1154
|
+
pipeline = oracledb.create_pipeline()
|
|
1155
|
+
for compiled in compiled_operations:
|
|
1156
|
+
self._add_pipeline_operation(pipeline, compiled)
|
|
1157
|
+
|
|
1158
|
+
results: list[StackResult] = []
|
|
1159
|
+
started_transaction = False
|
|
1160
|
+
|
|
1161
|
+
with StackExecutionObserver(self, stack, continue_on_error, native_pipeline=True) as observer:
|
|
1162
|
+
try:
|
|
1163
|
+
if not continue_on_error and not self._connection_in_transaction():
|
|
1164
|
+
await self.begin()
|
|
1165
|
+
started_transaction = True
|
|
1166
|
+
|
|
1167
|
+
pipeline_results = await self.connection.run_pipeline(pipeline, continue_on_error=continue_on_error)
|
|
1168
|
+
results = self._build_stack_results_from_pipeline(
|
|
1169
|
+
compiled_operations, pipeline_results, continue_on_error, observer
|
|
1170
|
+
)
|
|
1171
|
+
|
|
1172
|
+
if started_transaction:
|
|
1173
|
+
await self.commit()
|
|
1174
|
+
except Exception as exc:
|
|
1175
|
+
if started_transaction:
|
|
1176
|
+
try:
|
|
1177
|
+
await self.rollback()
|
|
1178
|
+
except Exception as rollback_error: # pragma: no cover - diagnostics only
|
|
1179
|
+
logger.debug("Rollback after pipeline failure failed: %s", rollback_error)
|
|
1180
|
+
raise self._wrap_pipeline_error(exc, stack, continue_on_error) from exc
|
|
1181
|
+
|
|
1182
|
+
return tuple(results)
|
|
1183
|
+
|
|
1184
|
+
async def _pipeline_native_supported(self) -> bool:
|
|
1185
|
+
if self._pipeline_support is not None:
|
|
1186
|
+
return self._pipeline_support
|
|
1187
|
+
|
|
1188
|
+
if self.stack_native_disabled:
|
|
1189
|
+
self._pipeline_support = False
|
|
1190
|
+
self._pipeline_support_reason = "env_override"
|
|
1191
|
+
return False
|
|
1192
|
+
|
|
1193
|
+
if self._detect_oracledb_version() < PIPELINE_MIN_DRIVER_VERSION:
|
|
1194
|
+
self._pipeline_support = False
|
|
1195
|
+
self._pipeline_support_reason = "driver_version"
|
|
1196
|
+
return False
|
|
1197
|
+
|
|
1198
|
+
if not hasattr(self.connection, "run_pipeline"):
|
|
1199
|
+
self._pipeline_support = False
|
|
1200
|
+
self._pipeline_support_reason = "driver_api_missing"
|
|
1201
|
+
return False
|
|
1202
|
+
|
|
1203
|
+
version_info = await self._detect_oracle_version()
|
|
1204
|
+
if version_info and version_info.major >= PIPELINE_MIN_DATABASE_MAJOR:
|
|
1205
|
+
self._pipeline_support = True
|
|
1206
|
+
self._pipeline_support_reason = None
|
|
1207
|
+
return True
|
|
1208
|
+
|
|
1209
|
+
self._pipeline_support = False
|
|
1210
|
+
self._pipeline_support_reason = "database_version"
|
|
1211
|
+
return False
|
|
1212
|
+
|
|
1213
|
+
async def _detect_oracle_version(self) -> "VersionInfo | None":
|
|
1214
|
+
if self._oracle_version is not None:
|
|
1215
|
+
return self._oracle_version
|
|
1216
|
+
version = await self.data_dictionary.get_version(self)
|
|
1217
|
+
self._oracle_version = version
|
|
1218
|
+
return version
|
|
1219
|
+
|
|
1220
|
+
def _detect_oracledb_version(self) -> "tuple[int, int, int]":
|
|
1221
|
+
return _ORACLEDB_VERSION
|
|
1222
|
+
|
|
1223
|
+
async def _execute_statement(self, cursor: Any, statement: "SQL") -> "ExecutionResult":
|
|
1224
|
+
"""Execute single SQL statement with Oracle data handling.
|
|
1225
|
+
|
|
1226
|
+
Args:
|
|
1227
|
+
cursor: Oracle cursor object
|
|
1228
|
+
statement: SQL statement to execute
|
|
1229
|
+
|
|
1230
|
+
Returns:
|
|
1231
|
+
Execution result containing data for SELECT statements or row count for others
|
|
1232
|
+
"""
|
|
1233
|
+
sql, prepared_parameters = self._get_compiled_sql(statement, self.statement_config)
|
|
1234
|
+
|
|
1235
|
+
# Oracle-specific: Use setinputsizes for large string parameters to avoid ORA-01704
|
|
1236
|
+
if prepared_parameters and isinstance(prepared_parameters, dict):
|
|
1237
|
+
for param_name, param_value in prepared_parameters.items():
|
|
1238
|
+
if isinstance(param_value, str) and len(param_value) > LARGE_STRING_THRESHOLD:
|
|
1239
|
+
clob = await self.connection.createlob(oracledb.DB_TYPE_CLOB)
|
|
1240
|
+
await clob.write(param_value)
|
|
1241
|
+
prepared_parameters[param_name] = clob
|
|
1242
|
+
|
|
1243
|
+
await cursor.execute(sql, prepared_parameters or {})
|
|
1244
|
+
|
|
1245
|
+
# SELECT result processing for Oracle
|
|
1246
|
+
is_select_like = statement.returns_rows() or self._should_force_select(statement, cursor)
|
|
1247
|
+
|
|
1248
|
+
if is_select_like:
|
|
1249
|
+
fetched_data = await cursor.fetchall()
|
|
1250
|
+
column_names = [col[0] for col in cursor.description or []]
|
|
1251
|
+
column_names = _normalize_column_names(column_names, self.driver_features)
|
|
1252
|
+
|
|
1253
|
+
# Oracle returns tuples - convert to consistent dict format after LOB hydration
|
|
1254
|
+
data = []
|
|
1255
|
+
for row in fetched_data:
|
|
1256
|
+
coerced_row = await _coerce_async_row_values(row)
|
|
1257
|
+
data.append(dict(zip(column_names, coerced_row, strict=False)))
|
|
1258
|
+
|
|
1259
|
+
return self.create_execution_result(
|
|
1260
|
+
cursor, selected_data=data, column_names=column_names, data_row_count=len(data), is_select_result=True
|
|
1261
|
+
)
|
|
1262
|
+
|
|
1263
|
+
# Non-SELECT result processing
|
|
1264
|
+
affected_rows = cursor.rowcount if cursor.rowcount is not None else 0
|
|
1265
|
+
return self.create_execution_result(cursor, rowcount_override=affected_rows)
|
|
1266
|
+
|
|
1267
|
+
async def select_to_storage(
|
|
1268
|
+
self,
|
|
1269
|
+
statement: "Statement | QueryBuilder | SQL | str",
|
|
1270
|
+
destination: "StorageDestination",
|
|
1271
|
+
/,
|
|
1272
|
+
*parameters: "StatementParameters | StatementFilter",
|
|
1273
|
+
statement_config: "StatementConfig | None" = None,
|
|
1274
|
+
partitioner: "dict[str, Any] | None" = None,
|
|
1275
|
+
format_hint: "StorageFormat | None" = None,
|
|
1276
|
+
telemetry: "StorageTelemetry | None" = None,
|
|
1277
|
+
**kwargs: Any,
|
|
1278
|
+
) -> "StorageBridgeJob":
|
|
1279
|
+
"""Execute a query and write Arrow-compatible output to storage (async)."""
|
|
1280
|
+
|
|
1281
|
+
self._require_capability("arrow_export_enabled")
|
|
1282
|
+
arrow_result = await self.select_to_arrow(statement, *parameters, statement_config=statement_config, **kwargs)
|
|
1283
|
+
async_pipeline: AsyncStoragePipeline = cast("AsyncStoragePipeline", self._storage_pipeline())
|
|
1284
|
+
telemetry_payload = await self._write_result_to_storage_async(
|
|
1285
|
+
arrow_result, destination, format_hint=format_hint, pipeline=async_pipeline
|
|
1286
|
+
)
|
|
1287
|
+
self._attach_partition_telemetry(telemetry_payload, partitioner)
|
|
1288
|
+
return self._create_storage_job(telemetry_payload, telemetry)
|
|
1289
|
+
|
|
1290
|
+
async def load_from_arrow(
|
|
1291
|
+
self,
|
|
1292
|
+
table: str,
|
|
1293
|
+
source: "ArrowResult | Any",
|
|
1294
|
+
*,
|
|
1295
|
+
partitioner: "dict[str, Any] | None" = None,
|
|
1296
|
+
overwrite: bool = False,
|
|
1297
|
+
telemetry: "StorageTelemetry | None" = None,
|
|
1298
|
+
) -> "StorageBridgeJob":
|
|
1299
|
+
"""Asynchronously load Arrow data into Oracle."""
|
|
1300
|
+
|
|
1301
|
+
self._require_capability("arrow_import_enabled")
|
|
1302
|
+
arrow_table = self._coerce_arrow_table(source)
|
|
1303
|
+
if overwrite:
|
|
1304
|
+
await self._truncate_table_async(table)
|
|
1305
|
+
columns, records = self._arrow_table_to_rows(arrow_table)
|
|
1306
|
+
if records:
|
|
1307
|
+
statement = _oracle_insert_statement(table, columns)
|
|
1308
|
+
async with self.with_cursor(self.connection) as cursor, self.handle_database_exceptions():
|
|
1309
|
+
await cursor.executemany(statement, records)
|
|
1310
|
+
telemetry_payload = self._build_ingest_telemetry(arrow_table)
|
|
1311
|
+
telemetry_payload["destination"] = table
|
|
1312
|
+
self._attach_partition_telemetry(telemetry_payload, partitioner)
|
|
1313
|
+
return self._create_storage_job(telemetry_payload, telemetry)
|
|
1314
|
+
|
|
1315
|
+
async def load_from_storage(
|
|
1316
|
+
self,
|
|
1317
|
+
table: str,
|
|
1318
|
+
source: "StorageDestination",
|
|
1319
|
+
*,
|
|
1320
|
+
file_format: "StorageFormat",
|
|
1321
|
+
partitioner: "dict[str, Any] | None" = None,
|
|
1322
|
+
overwrite: bool = False,
|
|
1323
|
+
) -> "StorageBridgeJob":
|
|
1324
|
+
"""Asynchronously load staged artifacts into Oracle."""
|
|
1325
|
+
|
|
1326
|
+
arrow_table, inbound = await self._read_arrow_from_storage_async(source, file_format=file_format)
|
|
1327
|
+
return await self.load_from_arrow(
|
|
1328
|
+
table, arrow_table, partitioner=partitioner, overwrite=overwrite, telemetry=inbound
|
|
1329
|
+
)
|
|
1330
|
+
|
|
1331
|
+
# Oracle transaction management
|
|
1332
|
+
async def begin(self) -> None:
|
|
1333
|
+
"""Begin a database transaction.
|
|
1334
|
+
|
|
1335
|
+
Oracle handles transactions automatically, so this is a no-op.
|
|
1336
|
+
"""
|
|
1337
|
+
# Oracle handles transactions implicitly
|
|
1338
|
+
|
|
1339
|
+
async def rollback(self) -> None:
|
|
1340
|
+
"""Rollback the current transaction.
|
|
1341
|
+
|
|
1342
|
+
Raises:
|
|
1343
|
+
SQLSpecError: If rollback fails
|
|
1344
|
+
"""
|
|
1345
|
+
try:
|
|
1346
|
+
await self.connection.rollback()
|
|
1347
|
+
except oracledb.Error as e:
|
|
1348
|
+
msg = f"Failed to rollback Oracle transaction: {e}"
|
|
1349
|
+
raise SQLSpecError(msg) from e
|
|
1350
|
+
|
|
1351
|
+
async def commit(self) -> None:
|
|
1352
|
+
"""Commit the current transaction.
|
|
1353
|
+
|
|
1354
|
+
Raises:
|
|
1355
|
+
SQLSpecError: If commit fails
|
|
1356
|
+
"""
|
|
1357
|
+
try:
|
|
1358
|
+
await self.connection.commit()
|
|
1359
|
+
except oracledb.Error as e:
|
|
1360
|
+
msg = f"Failed to commit Oracle transaction: {e}"
|
|
1361
|
+
raise SQLSpecError(msg) from e
|
|
1362
|
+
|
|
1363
|
+
async def select_to_arrow(
|
|
1364
|
+
self,
|
|
1365
|
+
statement: "Statement | QueryBuilder",
|
|
1366
|
+
/,
|
|
1367
|
+
*parameters: "StatementParameters | StatementFilter",
|
|
1368
|
+
statement_config: "StatementConfig | None" = None,
|
|
1369
|
+
return_format: "ArrowReturnFormat" = "table",
|
|
1370
|
+
native_only: bool = False,
|
|
1371
|
+
batch_size: int | None = None,
|
|
1372
|
+
arrow_schema: Any = None,
|
|
1373
|
+
**kwargs: Any,
|
|
1374
|
+
) -> "Any":
|
|
1375
|
+
"""Execute query and return results as Apache Arrow format using Oracle native support.
|
|
1376
|
+
|
|
1377
|
+
This implementation uses Oracle's native fetch_df_all() method which returns
|
|
1378
|
+
an OracleDataFrame with Arrow PyCapsule interface, providing zero-copy data
|
|
1379
|
+
transfer and 5-10x performance improvement over dict conversion.
|
|
1380
|
+
|
|
1381
|
+
Args:
|
|
1382
|
+
statement: SQL query string, Statement, or QueryBuilder
|
|
1383
|
+
*parameters: Query parameters (same format as execute()/select())
|
|
1384
|
+
statement_config: Optional statement configuration override
|
|
1385
|
+
return_format: "table" for pyarrow.Table (default), "batches" for RecordBatch
|
|
1386
|
+
native_only: If False, use base conversion path instead of native (default: False uses native)
|
|
1387
|
+
batch_size: Rows per batch when using "batches" format
|
|
1388
|
+
arrow_schema: Optional pyarrow.Schema for type casting
|
|
1389
|
+
**kwargs: Additional keyword arguments
|
|
1390
|
+
|
|
1391
|
+
Returns:
|
|
1392
|
+
ArrowResult containing pyarrow.Table or RecordBatch
|
|
1393
|
+
|
|
1394
|
+
Examples:
|
|
1395
|
+
>>> result = await driver.select_to_arrow(
|
|
1396
|
+
... "SELECT * FROM users WHERE age > :1", (18,)
|
|
1397
|
+
... )
|
|
1398
|
+
>>> df = result.to_pandas()
|
|
1399
|
+
>>> print(df.head())
|
|
1400
|
+
"""
|
|
1401
|
+
# Check pyarrow is available
|
|
1402
|
+
ensure_pyarrow()
|
|
1403
|
+
|
|
1404
|
+
# If native_only=False explicitly passed, use base conversion path
|
|
1405
|
+
if native_only is False:
|
|
1406
|
+
return await super().select_to_arrow(
|
|
1407
|
+
statement,
|
|
1408
|
+
*parameters,
|
|
1409
|
+
statement_config=statement_config,
|
|
1410
|
+
return_format=return_format,
|
|
1411
|
+
native_only=native_only,
|
|
1412
|
+
batch_size=batch_size,
|
|
1413
|
+
arrow_schema=arrow_schema,
|
|
1414
|
+
**kwargs,
|
|
1415
|
+
)
|
|
1416
|
+
|
|
1417
|
+
import pyarrow as pa
|
|
1418
|
+
|
|
1419
|
+
# Prepare statement with parameters
|
|
1420
|
+
config = statement_config or self.statement_config
|
|
1421
|
+
prepared_statement = self.prepare_statement(statement, parameters, statement_config=config, kwargs=kwargs)
|
|
1422
|
+
sql, prepared_parameters = self._get_compiled_sql(prepared_statement, config)
|
|
1423
|
+
|
|
1424
|
+
# Use Oracle's native fetch_df_all() for zero-copy Arrow transfer
|
|
1425
|
+
oracle_df = await self.connection.fetch_df_all(
|
|
1426
|
+
statement=sql, parameters=prepared_parameters or [], arraysize=batch_size or 1000
|
|
1427
|
+
)
|
|
1428
|
+
|
|
1429
|
+
# Convert OracleDataFrame to PyArrow Table using PyCapsule interface
|
|
1430
|
+
arrow_table = pa.table(oracle_df)
|
|
1431
|
+
|
|
1432
|
+
# Apply schema casting if provided
|
|
1433
|
+
if arrow_schema is not None:
|
|
1434
|
+
if not isinstance(arrow_schema, pa.Schema):
|
|
1435
|
+
msg = f"arrow_schema must be a pyarrow.Schema, got {type(arrow_schema).__name__}"
|
|
1436
|
+
raise TypeError(msg)
|
|
1437
|
+
arrow_table = arrow_table.cast(arrow_schema)
|
|
1438
|
+
|
|
1439
|
+
# Convert to batches if requested
|
|
1440
|
+
if return_format == "batches":
|
|
1441
|
+
batches = arrow_table.to_batches()
|
|
1442
|
+
arrow_data: Any = batches[0] if batches else pa.RecordBatch.from_pydict({})
|
|
1443
|
+
else:
|
|
1444
|
+
arrow_data = arrow_table
|
|
1445
|
+
|
|
1446
|
+
# Get row count
|
|
1447
|
+
rows_affected = len(arrow_table)
|
|
1448
|
+
|
|
1449
|
+
return create_arrow_result(statement=prepared_statement, data=arrow_data, rows_affected=rows_affected)
|
|
1450
|
+
|
|
1451
|
+
@property
|
|
1452
|
+
def data_dictionary(self) -> "AsyncDataDictionaryBase":
|
|
1453
|
+
"""Get the data dictionary for this driver.
|
|
1454
|
+
|
|
1455
|
+
Returns:
|
|
1456
|
+
Data dictionary instance for metadata queries
|
|
1457
|
+
"""
|
|
1458
|
+
if self._data_dictionary is None:
|
|
1459
|
+
self._data_dictionary = OracleAsyncDataDictionary()
|
|
1460
|
+
return self._data_dictionary
|
|
1461
|
+
|
|
1462
|
+
async def _truncate_table_async(self, table: str) -> None:
|
|
1463
|
+
statement = _oracle_truncate_statement(table)
|
|
1464
|
+
async with self.handle_database_exceptions():
|
|
1465
|
+
await self.connection.execute(statement)
|
|
1466
|
+
|
|
1467
|
+
|
|
1468
|
+
def _build_oracledb_profile() -> DriverParameterProfile:
|
|
1469
|
+
"""Create the OracleDB driver parameter profile."""
|
|
1470
|
+
|
|
1471
|
+
return DriverParameterProfile(
|
|
1472
|
+
name="OracleDB",
|
|
1473
|
+
default_style=ParameterStyle.POSITIONAL_COLON,
|
|
1474
|
+
supported_styles={ParameterStyle.NAMED_COLON, ParameterStyle.POSITIONAL_COLON, ParameterStyle.QMARK},
|
|
1475
|
+
default_execution_style=ParameterStyle.NAMED_COLON,
|
|
1476
|
+
supported_execution_styles={ParameterStyle.NAMED_COLON, ParameterStyle.POSITIONAL_COLON},
|
|
1477
|
+
has_native_list_expansion=False,
|
|
1478
|
+
preserve_parameter_format=True,
|
|
1479
|
+
needs_static_script_compilation=False,
|
|
1480
|
+
allow_mixed_parameter_styles=False,
|
|
1481
|
+
preserve_original_params_for_many=False,
|
|
1482
|
+
json_serializer_strategy="helper",
|
|
1483
|
+
default_dialect="oracle",
|
|
1484
|
+
)
|
|
1485
|
+
|
|
1486
|
+
|
|
1487
|
+
_ORACLE_PROFILE = _build_oracledb_profile()
|
|
1488
|
+
|
|
1489
|
+
register_driver_profile("oracledb", _ORACLE_PROFILE)
|
|
1490
|
+
|
|
1491
|
+
oracledb_statement_config = build_statement_config_from_profile(
|
|
1492
|
+
_ORACLE_PROFILE, statement_overrides={"dialect": "oracle"}, json_serializer=to_json
|
|
1493
|
+
)
|