databricks-sqlalchemy 2.0.3__py3-none-any.whl → 2.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- CHANGELOG.md +6 -3
- databricks/__init__.py +0 -0
- databricks/sqlalchemy/_parse.py +1 -1
- {databricks_sqlalchemy-2.0.3.dist-info → databricks_sqlalchemy-2.0.5.dist-info}/METADATA +2 -2
- databricks_sqlalchemy-2.0.5.dist-info/RECORD +16 -0
- databricks/sqlalchemy/dependency_test/test_dependency.py +0 -22
- databricks/sqlalchemy/test/_extra.py +0 -70
- databricks/sqlalchemy/test/_future.py +0 -331
- databricks/sqlalchemy/test/_regression.py +0 -311
- databricks/sqlalchemy/test/_unsupported.py +0 -450
- databricks/sqlalchemy/test/conftest.py +0 -13
- databricks/sqlalchemy/test/overrides/_componentreflectiontest.py +0 -189
- databricks/sqlalchemy/test/overrides/_ctetest.py +0 -33
- databricks/sqlalchemy/test/test_suite.py +0 -13
- databricks/sqlalchemy/test_local/__init__.py +0 -5
- databricks/sqlalchemy/test_local/conftest.py +0 -44
- databricks/sqlalchemy/test_local/e2e/MOCK_DATA.xlsx +0 -0
- databricks/sqlalchemy/test_local/e2e/test_basic.py +0 -543
- databricks/sqlalchemy/test_local/test_ddl.py +0 -96
- databricks/sqlalchemy/test_local/test_parsing.py +0 -160
- databricks/sqlalchemy/test_local/test_types.py +0 -161
- databricks_sqlalchemy-2.0.3.dist-info/RECORD +0 -31
- {databricks_sqlalchemy-2.0.3.dist-info → databricks_sqlalchemy-2.0.5.dist-info}/LICENSE +0 -0
- {databricks_sqlalchemy-2.0.3.dist-info → databricks_sqlalchemy-2.0.5.dist-info}/WHEEL +0 -0
- {databricks_sqlalchemy-2.0.3.dist-info → databricks_sqlalchemy-2.0.5.dist-info}/entry_points.txt +0 -0
CHANGELOG.md
CHANGED
@@ -1,7 +1,10 @@
|
|
1
1
|
# Release History
|
2
2
|
|
3
|
-
# 2.0.
|
3
|
+
# 2.0.5 (2025-02-22)
|
4
|
+
|
5
|
+
- Added support for double column types (databricks/databricks-sqlalchemy#19 by @up-stevesloan)
|
6
|
+
|
7
|
+
# 2.0.4 (2025-01-27)
|
4
8
|
|
5
9
|
- All the SQLAlchemy features from `databricks-sql-connector>=4.0.0` have been moved to this `databricks-sqlalchemy` library
|
6
|
-
- Support for SQLAlchemy v2 dialect is provided
|
7
|
-
-
|
10
|
+
- Support for SQLAlchemy v2 dialect is provided
|
databricks/__init__.py
ADDED
File without changes
|
databricks/sqlalchemy/_parse.py
CHANGED
@@ -309,7 +309,7 @@ GET_COLUMNS_TYPE_MAP = {
|
|
309
309
|
"int": sqlalchemy.types.Integer,
|
310
310
|
"bigint": sqlalchemy.types.BigInteger,
|
311
311
|
"float": sqlalchemy.types.Float,
|
312
|
-
"double": sqlalchemy.types.
|
312
|
+
"double": sqlalchemy.types.Double,
|
313
313
|
"string": sqlalchemy.types.String,
|
314
314
|
"varchar": sqlalchemy.types.String,
|
315
315
|
"char": sqlalchemy.types.String,
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: databricks-sqlalchemy
|
3
|
-
Version: 2.0.
|
3
|
+
Version: 2.0.5
|
4
4
|
Summary: Databricks SQLAlchemy plugin for Python
|
5
5
|
License: Apache-2.0
|
6
6
|
Author: Databricks
|
@@ -13,7 +13,7 @@ Classifier: Programming Language :: Python :: 3.9
|
|
13
13
|
Classifier: Programming Language :: Python :: 3.10
|
14
14
|
Classifier: Programming Language :: Python :: 3.11
|
15
15
|
Classifier: Programming Language :: Python :: 3.12
|
16
|
-
Requires-Dist: databricks_sql_connector (
|
16
|
+
Requires-Dist: databricks_sql_connector (>=4.0.0)
|
17
17
|
Requires-Dist: pyarrow (>=14.0.1,<17)
|
18
18
|
Requires-Dist: sqlalchemy (>=2.0.21)
|
19
19
|
Project-URL: Bug Tracker, https://github.com/databricks/databricks-sqlalchemy/issues
|
@@ -0,0 +1,16 @@
|
|
1
|
+
CHANGELOG.md,sha256=Wd1sCpqs2A9pMaQ1Eg07EablTZkyQFgZUzY1VldbI3g,332
|
2
|
+
databricks/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
|
+
databricks/sqlalchemy/__init__.py,sha256=Gk3XC5OCzq7LuxMVpxK3t4q0rkflXJ8uJRJh9uusMqc,185
|
4
|
+
databricks/sqlalchemy/_ddl.py,sha256=c0_GwfmnrFVr4-Ls14fmdGUUFyUok_GW4Uo45hLABFc,3983
|
5
|
+
databricks/sqlalchemy/_parse.py,sha256=aFpwcLowSDP1R7BY3G-yuEXiPFL-_VaIGvqKNDMehcQ,13049
|
6
|
+
databricks/sqlalchemy/_types.py,sha256=EqC_TWWY7mDw9EM2AVZnPrw5DD6G-vBV7wiwX4tcBcM,11753
|
7
|
+
databricks/sqlalchemy/base.py,sha256=KcjfHMH0NsceYE2NRxrePtf5T1uw9u8JHofRdbnAKS4,15619
|
8
|
+
databricks/sqlalchemy/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
9
|
+
databricks/sqlalchemy/pytest.ini,sha256=ImutflUjkhByVNWCQ18Todj6XTvgJAQX_v7fD-gWHhU,106
|
10
|
+
databricks/sqlalchemy/requirements.py,sha256=OobunAEwZ9y2dvSQLOmdgJciVn9xGlY9NAFfszPCTU0,9018
|
11
|
+
databricks/sqlalchemy/setup.cfg,sha256=ImutflUjkhByVNWCQ18Todj6XTvgJAQX_v7fD-gWHhU,106
|
12
|
+
databricks_sqlalchemy-2.0.5.dist-info/LICENSE,sha256=WgVm2VpfZ3CsUfPndD2NeCrEIcFA4UB-YnnW4ejxcbE,11346
|
13
|
+
databricks_sqlalchemy-2.0.5.dist-info/METADATA,sha256=Gx3ImxEf55dFm4irqSSFPg5eIaJF1RnbeEs7e5rmGww,12717
|
14
|
+
databricks_sqlalchemy-2.0.5.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
15
|
+
databricks_sqlalchemy-2.0.5.dist-info/entry_points.txt,sha256=AAjpsvZbVcoMAcWLIesoAT5FNZhBEcIhxdKknVua3jw,74
|
16
|
+
databricks_sqlalchemy-2.0.5.dist-info/RECORD,,
|
@@ -1,22 +0,0 @@
|
|
1
|
-
import pytest
|
2
|
-
|
3
|
-
class DatabricksImportError(Exception):
|
4
|
-
pass
|
5
|
-
|
6
|
-
class TestLibraryDependencySuite:
|
7
|
-
|
8
|
-
@pytest.mark.skipif(pytest.importorskip("databricks_sql_connector"), reason="databricks_sql_connector is present")
|
9
|
-
def test_sql_core(self):
|
10
|
-
with pytest.raises(DatabricksImportError, match="databricks_sql_connector module is not available"):
|
11
|
-
try:
|
12
|
-
import databricks
|
13
|
-
except ImportError:
|
14
|
-
raise DatabricksImportError("databricks_sql_connector_core module is not available")
|
15
|
-
|
16
|
-
@pytest.mark.skipif(pytest.importorskip("sqlalchemy"), reason="SQLAlchemy is present")
|
17
|
-
def test_sqlalchemy(self):
|
18
|
-
with pytest.raises(DatabricksImportError, match="sqlalchemy module is not available"):
|
19
|
-
try:
|
20
|
-
import sqlalchemy
|
21
|
-
except ImportError:
|
22
|
-
raise DatabricksImportError("sqlalchemy module is not available")
|
@@ -1,70 +0,0 @@
|
|
1
|
-
"""Additional tests authored by Databricks that use SQLAlchemy's test fixtures
|
2
|
-
"""
|
3
|
-
|
4
|
-
import datetime
|
5
|
-
|
6
|
-
from sqlalchemy.testing.suite.test_types import (
|
7
|
-
_LiteralRoundTripFixture,
|
8
|
-
fixtures,
|
9
|
-
testing,
|
10
|
-
eq_,
|
11
|
-
select,
|
12
|
-
Table,
|
13
|
-
Column,
|
14
|
-
config,
|
15
|
-
_DateFixture,
|
16
|
-
literal,
|
17
|
-
)
|
18
|
-
from databricks.sqlalchemy import TINYINT, TIMESTAMP
|
19
|
-
|
20
|
-
|
21
|
-
class TinyIntegerTest(_LiteralRoundTripFixture, fixtures.TestBase):
|
22
|
-
__backend__ = True
|
23
|
-
|
24
|
-
def test_literal(self, literal_round_trip):
|
25
|
-
literal_round_trip(TINYINT, [5], [5])
|
26
|
-
|
27
|
-
@testing.fixture
|
28
|
-
def integer_round_trip(self, metadata, connection):
|
29
|
-
def run(datatype, data):
|
30
|
-
int_table = Table(
|
31
|
-
"tiny_integer_table",
|
32
|
-
metadata,
|
33
|
-
Column(
|
34
|
-
"id",
|
35
|
-
TINYINT,
|
36
|
-
primary_key=True,
|
37
|
-
test_needs_autoincrement=False,
|
38
|
-
),
|
39
|
-
Column("tiny_integer_data", datatype),
|
40
|
-
)
|
41
|
-
|
42
|
-
metadata.create_all(config.db)
|
43
|
-
|
44
|
-
connection.execute(int_table.insert(), {"id": 1, "integer_data": data})
|
45
|
-
|
46
|
-
row = connection.execute(select(int_table.c.integer_data)).first()
|
47
|
-
|
48
|
-
eq_(row, (data,))
|
49
|
-
|
50
|
-
assert isinstance(row[0], int)
|
51
|
-
|
52
|
-
return run
|
53
|
-
|
54
|
-
|
55
|
-
class DateTimeTZTestCustom(_DateFixture, fixtures.TablesTest):
|
56
|
-
"""This test confirms that when a user uses the TIMESTAMP
|
57
|
-
type to store a datetime object, it retains its timezone
|
58
|
-
"""
|
59
|
-
|
60
|
-
__backend__ = True
|
61
|
-
datatype = TIMESTAMP
|
62
|
-
data = datetime.datetime(2012, 10, 15, 12, 57, 18, tzinfo=datetime.timezone.utc)
|
63
|
-
|
64
|
-
@testing.requires.datetime_implicit_bound
|
65
|
-
def test_select_direct(self, connection):
|
66
|
-
|
67
|
-
# We need to pass the TIMESTAMP type to the literal function
|
68
|
-
# so that the value is processed correctly.
|
69
|
-
result = connection.scalar(select(literal(self.data, TIMESTAMP)))
|
70
|
-
eq_(result, self.data)
|
@@ -1,331 +0,0 @@
|
|
1
|
-
# type: ignore
|
2
|
-
|
3
|
-
from enum import Enum
|
4
|
-
|
5
|
-
import pytest
|
6
|
-
from databricks.sqlalchemy.test._regression import (
|
7
|
-
ExpandingBoundInTest,
|
8
|
-
IdentityAutoincrementTest,
|
9
|
-
LikeFunctionsTest,
|
10
|
-
NormalizedNameTest,
|
11
|
-
)
|
12
|
-
from databricks.sqlalchemy.test._unsupported import (
|
13
|
-
ComponentReflectionTest,
|
14
|
-
ComponentReflectionTestExtra,
|
15
|
-
CTETest,
|
16
|
-
InsertBehaviorTest,
|
17
|
-
)
|
18
|
-
from sqlalchemy.testing.suite import (
|
19
|
-
ArrayTest,
|
20
|
-
BinaryTest,
|
21
|
-
BizarroCharacterFKResolutionTest,
|
22
|
-
CollateTest,
|
23
|
-
ComputedColumnTest,
|
24
|
-
ComputedReflectionTest,
|
25
|
-
DifficultParametersTest,
|
26
|
-
FutureWeCanSetDefaultSchemaWEventsTest,
|
27
|
-
IdentityColumnTest,
|
28
|
-
IdentityReflectionTest,
|
29
|
-
JSONLegacyStringCastIndexTest,
|
30
|
-
JSONTest,
|
31
|
-
NativeUUIDTest,
|
32
|
-
QuotedNameArgumentTest,
|
33
|
-
RowCountTest,
|
34
|
-
SimpleUpdateDeleteTest,
|
35
|
-
WeCanSetDefaultSchemaWEventsTest,
|
36
|
-
)
|
37
|
-
|
38
|
-
|
39
|
-
class FutureFeature(Enum):
|
40
|
-
ARRAY = "ARRAY column type handling"
|
41
|
-
BINARY = "BINARY column type handling"
|
42
|
-
CHECK = "CHECK constraint handling"
|
43
|
-
COLLATE = "COLLATE DDL generation"
|
44
|
-
CTE_FEAT = "required CTE features"
|
45
|
-
EMPTY_INSERT = "empty INSERT support"
|
46
|
-
FK_OPTS = "foreign key option checking"
|
47
|
-
GENERATED_COLUMNS = "Delta computed / generated columns support"
|
48
|
-
IDENTITY = "identity reflection"
|
49
|
-
JSON = "JSON column type handling"
|
50
|
-
MULTI_PK = "get_multi_pk_constraint method"
|
51
|
-
PROVISION = "event-driven engine configuration"
|
52
|
-
REGEXP = "_visit_regexp"
|
53
|
-
SANE_ROWCOUNT = "sane_rowcount support"
|
54
|
-
TBL_OPTS = "get_table_options method"
|
55
|
-
TEST_DESIGN = "required test-fixture overrides"
|
56
|
-
TUPLE_LITERAL = "tuple-like IN markers completely"
|
57
|
-
UUID = "native Uuid() type"
|
58
|
-
VIEW_DEF = "get_view_definition method"
|
59
|
-
|
60
|
-
|
61
|
-
def render_future_feature(rsn: FutureFeature, extra=False) -> str:
|
62
|
-
postfix = " More detail in _future.py" if extra else ""
|
63
|
-
return f"[FUTURE][{rsn.name}]: This dialect doesn't implement {rsn.value}.{postfix}"
|
64
|
-
|
65
|
-
|
66
|
-
@pytest.mark.reviewed
|
67
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.BINARY))
|
68
|
-
class BinaryTest(BinaryTest):
|
69
|
-
"""Databricks doesn't support binding of BINARY type values. When DBR supports this, we can implement
|
70
|
-
in this dialect.
|
71
|
-
"""
|
72
|
-
|
73
|
-
pass
|
74
|
-
|
75
|
-
|
76
|
-
class ExpandingBoundInTest(ExpandingBoundInTest):
|
77
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.TUPLE_LITERAL))
|
78
|
-
def test_empty_heterogeneous_tuples_bindparam(self):
|
79
|
-
pass
|
80
|
-
|
81
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.TUPLE_LITERAL))
|
82
|
-
def test_empty_heterogeneous_tuples_direct(self):
|
83
|
-
pass
|
84
|
-
|
85
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.TUPLE_LITERAL))
|
86
|
-
def test_empty_homogeneous_tuples_bindparam(self):
|
87
|
-
pass
|
88
|
-
|
89
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.TUPLE_LITERAL))
|
90
|
-
def test_empty_homogeneous_tuples_direct(self):
|
91
|
-
pass
|
92
|
-
|
93
|
-
|
94
|
-
class NormalizedNameTest(NormalizedNameTest):
|
95
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.TEST_DESIGN, True))
|
96
|
-
def test_get_table_names(self):
|
97
|
-
"""I'm not clear how this test can ever pass given that it's assertion looks like this:
|
98
|
-
|
99
|
-
```python
|
100
|
-
eq_(tablenames[0].upper(), tablenames[0].lower())
|
101
|
-
eq_(tablenames[1].upper(), tablenames[1].lower())
|
102
|
-
```
|
103
|
-
|
104
|
-
It's forcibly calling .upper() and .lower() on the same string and expecting them to be equal.
|
105
|
-
"""
|
106
|
-
pass
|
107
|
-
|
108
|
-
|
109
|
-
class CTETest(CTETest):
|
110
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.CTE_FEAT, True))
|
111
|
-
def test_delete_from_round_trip(self):
|
112
|
-
"""Databricks dialect doesn't implement multiple-table criteria within DELETE"""
|
113
|
-
pass
|
114
|
-
|
115
|
-
|
116
|
-
@pytest.mark.reviewed
|
117
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.TEST_DESIGN, True))
|
118
|
-
class IdentityColumnTest(IdentityColumnTest):
|
119
|
-
"""Identity works. Test needs rewrite for Databricks. See comments in test_suite.py
|
120
|
-
|
121
|
-
The setup for these tests tries to create a table with a DELTA IDENTITY column but has two problems:
|
122
|
-
1. It uses an Integer() type for the column. Whereas DELTA IDENTITY columns must be BIGINT.
|
123
|
-
2. It tries to set the start == 42, which Databricks doesn't support
|
124
|
-
|
125
|
-
I can get the tests to _run_ by patching the table fixture to use BigInteger(). But it asserts that the
|
126
|
-
identity of two rows are 42 and 43, which is not possible since they will be rows 1 and 2 instead.
|
127
|
-
|
128
|
-
I'm satisified through manual testing that our implementation of visit_identity_column works but a better test is needed.
|
129
|
-
"""
|
130
|
-
|
131
|
-
pass
|
132
|
-
|
133
|
-
|
134
|
-
class IdentityAutoincrementTest(IdentityAutoincrementTest):
|
135
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.TEST_DESIGN, True))
|
136
|
-
def test_autoincrement_with_identity(self):
|
137
|
-
"""This test has the same issue as IdentityColumnTest.test_select_all in that it creates a table with identity
|
138
|
-
using an Integer() rather than a BigInteger(). If I override this behaviour to use a BigInteger() instead, the
|
139
|
-
test passes.
|
140
|
-
"""
|
141
|
-
|
142
|
-
|
143
|
-
@pytest.mark.reviewed
|
144
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.TEST_DESIGN))
|
145
|
-
class BizarroCharacterFKResolutionTest(BizarroCharacterFKResolutionTest):
|
146
|
-
"""Some of the combinations in this test pass. Others fail. Given the esoteric nature of these failures,
|
147
|
-
we have opted to defer implementing fixes to a later time, guided by customer feedback. Passage of
|
148
|
-
these tests is not an acceptance criteria for our dialect.
|
149
|
-
"""
|
150
|
-
|
151
|
-
|
152
|
-
@pytest.mark.reviewed
|
153
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.TEST_DESIGN))
|
154
|
-
class DifficultParametersTest(DifficultParametersTest):
|
155
|
-
"""Some of the combinations in this test pass. Others fail. Given the esoteric nature of these failures,
|
156
|
-
we have opted to defer implementing fixes to a later time, guided by customer feedback. Passage of
|
157
|
-
these tests is not an acceptance criteria for our dialect.
|
158
|
-
"""
|
159
|
-
|
160
|
-
|
161
|
-
@pytest.mark.reviewed
|
162
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.IDENTITY, True))
|
163
|
-
class IdentityReflectionTest(IdentityReflectionTest):
|
164
|
-
"""It's not clear _how_ to implement this for SQLAlchemy. Columns created with GENERATED ALWAYS AS IDENTITY
|
165
|
-
are not specially demarked in the output of TGetColumnsResponse or DESCRIBE TABLE EXTENDED.
|
166
|
-
|
167
|
-
We could theoretically parse this from the contents of `SHOW CREATE TABLE` but that feels like a hack.
|
168
|
-
"""
|
169
|
-
|
170
|
-
|
171
|
-
@pytest.mark.reviewed
|
172
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.JSON))
|
173
|
-
class JSONTest(JSONTest):
|
174
|
-
"""Databricks supports JSON path expressions in queries it's just not implemented in this dialect."""
|
175
|
-
|
176
|
-
pass
|
177
|
-
|
178
|
-
|
179
|
-
@pytest.mark.reviewed
|
180
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.JSON))
|
181
|
-
class JSONLegacyStringCastIndexTest(JSONLegacyStringCastIndexTest):
|
182
|
-
"""Same comment applies as JSONTest"""
|
183
|
-
|
184
|
-
pass
|
185
|
-
|
186
|
-
|
187
|
-
class LikeFunctionsTest(LikeFunctionsTest):
|
188
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.REGEXP))
|
189
|
-
def test_not_regexp_match(self):
|
190
|
-
"""The defaul dialect doesn't implement _visit_regexp methods so we don't get them automatically."""
|
191
|
-
pass
|
192
|
-
|
193
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.REGEXP))
|
194
|
-
def test_regexp_match(self):
|
195
|
-
"""The defaul dialect doesn't implement _visit_regexp methods so we don't get them automatically."""
|
196
|
-
pass
|
197
|
-
|
198
|
-
|
199
|
-
@pytest.mark.reviewed
|
200
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.COLLATE))
|
201
|
-
class CollateTest(CollateTest):
|
202
|
-
"""This is supported in Databricks. Not implemented here."""
|
203
|
-
|
204
|
-
|
205
|
-
@pytest.mark.reviewed
|
206
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.UUID, True))
|
207
|
-
class NativeUUIDTest(NativeUUIDTest):
|
208
|
-
"""Type implementation will be straightforward. Since Databricks doesn't have a native UUID type we can use
|
209
|
-
a STRING field, create a custom TypeDecorator for sqlalchemy.types.Uuid and add it to the dialect's colspecs.
|
210
|
-
|
211
|
-
Then mark requirements.uuid_data_type as open() so this test can run.
|
212
|
-
"""
|
213
|
-
|
214
|
-
|
215
|
-
@pytest.mark.reviewed
|
216
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.SANE_ROWCOUNT))
|
217
|
-
class RowCountTest(RowCountTest):
|
218
|
-
pass
|
219
|
-
|
220
|
-
|
221
|
-
@pytest.mark.reviewed
|
222
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.SANE_ROWCOUNT))
|
223
|
-
class SimpleUpdateDeleteTest(SimpleUpdateDeleteTest):
|
224
|
-
pass
|
225
|
-
|
226
|
-
|
227
|
-
@pytest.mark.reviewed
|
228
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.PROVISION, True))
|
229
|
-
class WeCanSetDefaultSchemaWEventsTest(WeCanSetDefaultSchemaWEventsTest):
|
230
|
-
"""provision.py allows us to define event listeners that emit DDL for things like setting up a test schema
|
231
|
-
or, in this case, changing the default schema for the connection after it's been built. This would override
|
232
|
-
the schema defined in the sqlalchemy connection string. This support is possible but is not implemented
|
233
|
-
in the dialect. Deferred for now.
|
234
|
-
"""
|
235
|
-
|
236
|
-
pass
|
237
|
-
|
238
|
-
|
239
|
-
@pytest.mark.reviewed
|
240
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.PROVISION, True))
|
241
|
-
class FutureWeCanSetDefaultSchemaWEventsTest(FutureWeCanSetDefaultSchemaWEventsTest):
|
242
|
-
"""provision.py allows us to define event listeners that emit DDL for things like setting up a test schema
|
243
|
-
or, in this case, changing the default schema for the connection after it's been built. This would override
|
244
|
-
the schema defined in the sqlalchemy connection string. This support is possible but is not implemented
|
245
|
-
in the dialect. Deferred for now.
|
246
|
-
"""
|
247
|
-
|
248
|
-
pass
|
249
|
-
|
250
|
-
|
251
|
-
class ComponentReflectionTest(ComponentReflectionTest):
|
252
|
-
@pytest.mark.skip(reason=render_future_feature(FutureFeature.TBL_OPTS, True))
|
253
|
-
def test_multi_get_table_options_tables(self):
|
254
|
-
"""It's not clear what the expected ouput from this method would even _be_. Requires research."""
|
255
|
-
pass
|
256
|
-
|
257
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.VIEW_DEF))
|
258
|
-
def test_get_view_definition(self):
|
259
|
-
pass
|
260
|
-
|
261
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.VIEW_DEF))
|
262
|
-
def test_get_view_definition_does_not_exist(self):
|
263
|
-
pass
|
264
|
-
|
265
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.MULTI_PK))
|
266
|
-
def test_get_multi_pk_constraint(self):
|
267
|
-
pass
|
268
|
-
|
269
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.CHECK))
|
270
|
-
def test_get_multi_check_constraints(self):
|
271
|
-
pass
|
272
|
-
|
273
|
-
|
274
|
-
class ComponentReflectionTestExtra(ComponentReflectionTestExtra):
|
275
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.CHECK))
|
276
|
-
def test_get_check_constraints(self):
|
277
|
-
pass
|
278
|
-
|
279
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.FK_OPTS))
|
280
|
-
def test_get_foreign_key_options(self):
|
281
|
-
"""It's not clear from the test code what the expected output is here. Further research required."""
|
282
|
-
pass
|
283
|
-
|
284
|
-
|
285
|
-
class InsertBehaviorTest(InsertBehaviorTest):
|
286
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.EMPTY_INSERT, True))
|
287
|
-
def test_empty_insert(self):
|
288
|
-
"""Empty inserts are possible using DEFAULT VALUES on Databricks. To implement it, we need
|
289
|
-
to hook into the SQLCompiler to render a no-op column list. With SQLAlchemy's default implementation
|
290
|
-
the request fails with a syntax error
|
291
|
-
"""
|
292
|
-
pass
|
293
|
-
|
294
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.EMPTY_INSERT, True))
|
295
|
-
def test_empty_insert_multiple(self):
|
296
|
-
"""Empty inserts are possible using DEFAULT VALUES on Databricks. To implement it, we need
|
297
|
-
to hook into the SQLCompiler to render a no-op column list. With SQLAlchemy's default implementation
|
298
|
-
the request fails with a syntax error
|
299
|
-
"""
|
300
|
-
pass
|
301
|
-
|
302
|
-
|
303
|
-
@pytest.mark.reviewed
|
304
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.ARRAY))
|
305
|
-
class ArrayTest(ArrayTest):
|
306
|
-
"""While Databricks supports ARRAY types, DBR cannot handle bound parameters of this type.
|
307
|
-
This makes them unusable to SQLAlchemy without some workaround. Potentially we could inline
|
308
|
-
the values of these parameters (which risks sql injection).
|
309
|
-
"""
|
310
|
-
|
311
|
-
|
312
|
-
@pytest.mark.reviewed
|
313
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.TEST_DESIGN, True))
|
314
|
-
class QuotedNameArgumentTest(QuotedNameArgumentTest):
|
315
|
-
"""These tests are challenging. The whole test setup depends on a table with a name like `quote ' one`
|
316
|
-
which will never work on Databricks because table names can't contains spaces. But QuotedNamedArgumentTest
|
317
|
-
also checks the behaviour of DDL identifier preparation process. We need to override some of IdentifierPreparer
|
318
|
-
methods because these are the ultimate control for whether or not CHECK and UNIQUE constraints are emitted.
|
319
|
-
"""
|
320
|
-
|
321
|
-
|
322
|
-
@pytest.mark.reviewed
|
323
|
-
@pytest.mark.skip(reason=render_future_feature(FutureFeature.GENERATED_COLUMNS))
|
324
|
-
class ComputedColumnTest(ComputedColumnTest):
|
325
|
-
pass
|
326
|
-
|
327
|
-
|
328
|
-
@pytest.mark.reviewed
|
329
|
-
@pytest.mark.skip(reason=render_future_feature(FutureFeature.GENERATED_COLUMNS))
|
330
|
-
class ComputedReflectionTest(ComputedReflectionTest):
|
331
|
-
pass
|