databricks-sqlalchemy 1.0.0__py3-none-any.whl → 1.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- CHANGELOG.md +2 -271
- databricks/sqlalchemy/__init__.py +1 -4
- databricks/sqlalchemy/dialect/__init__.py +340 -0
- databricks/sqlalchemy/dialect/base.py +17 -0
- databricks/sqlalchemy/dialect/compiler.py +38 -0
- {databricks_sqlalchemy-1.0.0.dist-info → databricks_sqlalchemy-1.0.2.dist-info}/METADATA +39 -61
- databricks_sqlalchemy-1.0.2.dist-info/RECORD +10 -0
- databricks/sqlalchemy/_ddl.py +0 -100
- databricks/sqlalchemy/_parse.py +0 -385
- databricks/sqlalchemy/_types.py +0 -323
- databricks/sqlalchemy/base.py +0 -436
- databricks/sqlalchemy/dependency_test/test_dependency.py +0 -22
- databricks/sqlalchemy/py.typed +0 -0
- databricks/sqlalchemy/pytest.ini +0 -4
- databricks/sqlalchemy/requirements.py +0 -249
- databricks/sqlalchemy/setup.cfg +0 -4
- databricks/sqlalchemy/test/_extra.py +0 -70
- databricks/sqlalchemy/test/_future.py +0 -331
- databricks/sqlalchemy/test/_regression.py +0 -311
- databricks/sqlalchemy/test/_unsupported.py +0 -450
- databricks/sqlalchemy/test/conftest.py +0 -13
- databricks/sqlalchemy/test/overrides/_componentreflectiontest.py +0 -189
- databricks/sqlalchemy/test/overrides/_ctetest.py +0 -33
- databricks/sqlalchemy/test/test_suite.py +0 -13
- databricks/sqlalchemy/test_local/__init__.py +0 -5
- databricks/sqlalchemy/test_local/conftest.py +0 -44
- databricks/sqlalchemy/test_local/e2e/MOCK_DATA.xlsx +0 -0
- databricks/sqlalchemy/test_local/e2e/test_basic.py +0 -543
- databricks/sqlalchemy/test_local/test_ddl.py +0 -96
- databricks/sqlalchemy/test_local/test_parsing.py +0 -160
- databricks/sqlalchemy/test_local/test_types.py +0 -161
- databricks_sqlalchemy-1.0.0.dist-info/RECORD +0 -31
- {databricks_sqlalchemy-1.0.0.dist-info → databricks_sqlalchemy-1.0.2.dist-info}/LICENSE +0 -0
- {databricks_sqlalchemy-1.0.0.dist-info → databricks_sqlalchemy-1.0.2.dist-info}/WHEEL +0 -0
- {databricks_sqlalchemy-1.0.0.dist-info → databricks_sqlalchemy-1.0.2.dist-info}/entry_points.txt +0 -0
@@ -1,249 +0,0 @@
|
|
1
|
-
"""
|
2
|
-
The complete list of requirements is provided by SQLAlchemy here:
|
3
|
-
|
4
|
-
https://github.com/sqlalchemy/sqlalchemy/blob/main/lib/sqlalchemy/testing/requirements.py
|
5
|
-
|
6
|
-
When SQLAlchemy skips a test because a requirement is closed() it gives a generic skip message.
|
7
|
-
To make these failures more actionable, we only define requirements in this file that we wish to
|
8
|
-
force to be open(). If a test should be skipped on Databricks, it will be specifically marked skip
|
9
|
-
in test_suite.py with a Databricks-specific reason.
|
10
|
-
|
11
|
-
See the special note about the array_type exclusion below.
|
12
|
-
See special note about has_temp_table exclusion below.
|
13
|
-
"""
|
14
|
-
|
15
|
-
import sqlalchemy.testing.requirements
|
16
|
-
import sqlalchemy.testing.exclusions
|
17
|
-
|
18
|
-
|
19
|
-
class Requirements(sqlalchemy.testing.requirements.SuiteRequirements):
|
20
|
-
@property
|
21
|
-
def date_historic(self):
|
22
|
-
"""target dialect supports representation of Python
|
23
|
-
datetime.datetime() objects with historic (pre 1970) values."""
|
24
|
-
|
25
|
-
return sqlalchemy.testing.exclusions.open()
|
26
|
-
|
27
|
-
@property
|
28
|
-
def datetime_historic(self):
|
29
|
-
"""target dialect supports representation of Python
|
30
|
-
datetime.datetime() objects with historic (pre 1970) values."""
|
31
|
-
|
32
|
-
return sqlalchemy.testing.exclusions.open()
|
33
|
-
|
34
|
-
@property
|
35
|
-
def datetime_literals(self):
|
36
|
-
"""target dialect supports rendering of a date, time, or datetime as a
|
37
|
-
literal string, e.g. via the TypeEngine.literal_processor() method.
|
38
|
-
|
39
|
-
"""
|
40
|
-
|
41
|
-
return sqlalchemy.testing.exclusions.open()
|
42
|
-
|
43
|
-
@property
|
44
|
-
def timestamp_microseconds(self):
|
45
|
-
"""target dialect supports representation of Python
|
46
|
-
datetime.datetime() with microsecond objects but only
|
47
|
-
if TIMESTAMP is used."""
|
48
|
-
|
49
|
-
return sqlalchemy.testing.exclusions.open()
|
50
|
-
|
51
|
-
@property
|
52
|
-
def time_microseconds(self):
|
53
|
-
"""target dialect supports representation of Python
|
54
|
-
datetime.time() with microsecond objects.
|
55
|
-
|
56
|
-
This requirement declaration isn't needed but I've included it here for completeness.
|
57
|
-
Since Databricks doesn't have a TIME type, SQLAlchemy will compile Time() columns
|
58
|
-
as STRING Databricks data types. And we use a custom time type to render those strings
|
59
|
-
between str() and time.time() representations. Therefore we can store _any_ precision
|
60
|
-
that SQLAlchemy needs. The time_microseconds requirement defaults to ON for all dialects
|
61
|
-
except mssql, mysql, mariadb, and oracle.
|
62
|
-
"""
|
63
|
-
|
64
|
-
return sqlalchemy.testing.exclusions.open()
|
65
|
-
|
66
|
-
@property
|
67
|
-
def infinity_floats(self):
|
68
|
-
"""The Float type can persist and load float('inf'), float('-inf')."""
|
69
|
-
|
70
|
-
return sqlalchemy.testing.exclusions.open()
|
71
|
-
|
72
|
-
@property
|
73
|
-
def precision_numerics_retains_significant_digits(self):
|
74
|
-
"""A precision numeric type will return empty significant digits,
|
75
|
-
i.e. a value such as 10.000 will come back in Decimal form with
|
76
|
-
the .000 maintained."""
|
77
|
-
|
78
|
-
return sqlalchemy.testing.exclusions.open()
|
79
|
-
|
80
|
-
@property
|
81
|
-
def precision_numerics_many_significant_digits(self):
|
82
|
-
"""target backend supports values with many digits on both sides,
|
83
|
-
such as 319438950232418390.273596, 87673.594069654243
|
84
|
-
|
85
|
-
"""
|
86
|
-
return sqlalchemy.testing.exclusions.open()
|
87
|
-
|
88
|
-
@property
|
89
|
-
def array_type(self):
|
90
|
-
"""While Databricks does support ARRAY types, pysql cannot bind them. So
|
91
|
-
we cannot use them with SQLAlchemy
|
92
|
-
|
93
|
-
Due to a bug in SQLAlchemy, we _must_ define this exclusion as closed() here or else the
|
94
|
-
test runner will crash the pytest process due to an AttributeError
|
95
|
-
"""
|
96
|
-
|
97
|
-
# TODO: Implement array type using inline?
|
98
|
-
return sqlalchemy.testing.exclusions.closed()
|
99
|
-
|
100
|
-
@property
|
101
|
-
def table_ddl_if_exists(self):
|
102
|
-
"""target platform supports IF NOT EXISTS / IF EXISTS for tables."""
|
103
|
-
|
104
|
-
return sqlalchemy.testing.exclusions.open()
|
105
|
-
|
106
|
-
@property
|
107
|
-
def identity_columns(self):
|
108
|
-
"""If a backend supports GENERATED { ALWAYS | BY DEFAULT }
|
109
|
-
AS IDENTITY"""
|
110
|
-
return sqlalchemy.testing.exclusions.open()
|
111
|
-
|
112
|
-
@property
|
113
|
-
def identity_columns_standard(self):
|
114
|
-
"""If a backend supports GENERATED { ALWAYS | BY DEFAULT }
|
115
|
-
AS IDENTITY with a standard syntax.
|
116
|
-
This is mainly to exclude MSSql.
|
117
|
-
"""
|
118
|
-
return sqlalchemy.testing.exclusions.open()
|
119
|
-
|
120
|
-
@property
|
121
|
-
def has_temp_table(self):
|
122
|
-
"""target dialect supports checking a single temp table name
|
123
|
-
|
124
|
-
unfortunately this is not the same as temp_table_names
|
125
|
-
|
126
|
-
SQLAlchemy's HasTableTest is not normalised in such a way that temp table tests
|
127
|
-
are separate from temp view and normal table tests. If those tests were split out,
|
128
|
-
we would just add detailed skip markers in test_suite.py. But since we'd like to
|
129
|
-
run the HasTableTest group for the features we support, we must set this exclusinon
|
130
|
-
to closed().
|
131
|
-
|
132
|
-
It would be ideal if there were a separate requirement for has_temp_view. Without it,
|
133
|
-
we're in a bind.
|
134
|
-
"""
|
135
|
-
return sqlalchemy.testing.exclusions.closed()
|
136
|
-
|
137
|
-
@property
|
138
|
-
def temporary_views(self):
|
139
|
-
"""target database supports temporary views"""
|
140
|
-
return sqlalchemy.testing.exclusions.open()
|
141
|
-
|
142
|
-
@property
|
143
|
-
def views(self):
|
144
|
-
"""Target database must support VIEWs."""
|
145
|
-
|
146
|
-
return sqlalchemy.testing.exclusions.open()
|
147
|
-
|
148
|
-
@property
|
149
|
-
def temporary_tables(self):
|
150
|
-
"""target database supports temporary tables
|
151
|
-
|
152
|
-
ComponentReflection test is intricate and simply cannot function without this exclusion being defined here.
|
153
|
-
This happens because we cannot skip individual combinations used in ComponentReflection test.
|
154
|
-
"""
|
155
|
-
return sqlalchemy.testing.exclusions.closed()
|
156
|
-
|
157
|
-
@property
|
158
|
-
def table_reflection(self):
|
159
|
-
"""target database has general support for table reflection"""
|
160
|
-
return sqlalchemy.testing.exclusions.open()
|
161
|
-
|
162
|
-
@property
|
163
|
-
def comment_reflection(self):
|
164
|
-
"""Indicates if the database support table comment reflection"""
|
165
|
-
return sqlalchemy.testing.exclusions.open()
|
166
|
-
|
167
|
-
@property
|
168
|
-
def comment_reflection_full_unicode(self):
|
169
|
-
"""Indicates if the database support table comment reflection in the
|
170
|
-
full unicode range, including emoji etc.
|
171
|
-
"""
|
172
|
-
return sqlalchemy.testing.exclusions.open()
|
173
|
-
|
174
|
-
@property
|
175
|
-
def temp_table_reflection(self):
|
176
|
-
"""ComponentReflection test is intricate and simply cannot function without this exclusion being defined here.
|
177
|
-
This happens because we cannot skip individual combinations used in ComponentReflection test.
|
178
|
-
"""
|
179
|
-
return sqlalchemy.testing.exclusions.closed()
|
180
|
-
|
181
|
-
@property
|
182
|
-
def index_reflection(self):
|
183
|
-
"""ComponentReflection test is intricate and simply cannot function without this exclusion being defined here.
|
184
|
-
This happens because we cannot skip individual combinations used in ComponentReflection test.
|
185
|
-
"""
|
186
|
-
return sqlalchemy.testing.exclusions.closed()
|
187
|
-
|
188
|
-
@property
|
189
|
-
def unique_constraint_reflection(self):
|
190
|
-
"""ComponentReflection test is intricate and simply cannot function without this exclusion being defined here.
|
191
|
-
This happens because we cannot skip individual combinations used in ComponentReflection test.
|
192
|
-
|
193
|
-
Databricks doesn't support UNIQUE constraints.
|
194
|
-
"""
|
195
|
-
return sqlalchemy.testing.exclusions.closed()
|
196
|
-
|
197
|
-
@property
|
198
|
-
def reflects_pk_names(self):
|
199
|
-
"""Target driver reflects the name of primary key constraints."""
|
200
|
-
|
201
|
-
return sqlalchemy.testing.exclusions.open()
|
202
|
-
|
203
|
-
@property
|
204
|
-
def datetime_implicit_bound(self):
|
205
|
-
"""target dialect when given a datetime object will bind it such
|
206
|
-
that the database server knows the object is a date, and not
|
207
|
-
a plain string.
|
208
|
-
"""
|
209
|
-
|
210
|
-
return sqlalchemy.testing.exclusions.open()
|
211
|
-
|
212
|
-
@property
|
213
|
-
def tuple_in(self):
|
214
|
-
return sqlalchemy.testing.exclusions.open()
|
215
|
-
|
216
|
-
@property
|
217
|
-
def ctes(self):
|
218
|
-
return sqlalchemy.testing.exclusions.open()
|
219
|
-
|
220
|
-
@property
|
221
|
-
def ctes_with_update_delete(self):
|
222
|
-
return sqlalchemy.testing.exclusions.open()
|
223
|
-
|
224
|
-
@property
|
225
|
-
def delete_from(self):
|
226
|
-
"""Target must support DELETE FROM..FROM or DELETE..USING syntax"""
|
227
|
-
return sqlalchemy.testing.exclusions.open()
|
228
|
-
|
229
|
-
@property
|
230
|
-
def table_value_constructor(self):
|
231
|
-
return sqlalchemy.testing.exclusions.open()
|
232
|
-
|
233
|
-
@property
|
234
|
-
def reflect_tables_no_columns(self):
|
235
|
-
return sqlalchemy.testing.exclusions.open()
|
236
|
-
|
237
|
-
@property
|
238
|
-
def denormalized_names(self):
|
239
|
-
"""Target database must have 'denormalized', i.e.
|
240
|
-
UPPERCASE as case insensitive names."""
|
241
|
-
|
242
|
-
return sqlalchemy.testing.exclusions.open()
|
243
|
-
|
244
|
-
@property
|
245
|
-
def time_timezone(self):
|
246
|
-
"""target dialect supports representation of Python
|
247
|
-
datetime.time() with tzinfo with Time(timezone=True)."""
|
248
|
-
|
249
|
-
return sqlalchemy.testing.exclusions.open()
|
databricks/sqlalchemy/setup.cfg
DELETED
@@ -1,70 +0,0 @@
|
|
1
|
-
"""Additional tests authored by Databricks that use SQLAlchemy's test fixtures
|
2
|
-
"""
|
3
|
-
|
4
|
-
import datetime
|
5
|
-
|
6
|
-
from sqlalchemy.testing.suite.test_types import (
|
7
|
-
_LiteralRoundTripFixture,
|
8
|
-
fixtures,
|
9
|
-
testing,
|
10
|
-
eq_,
|
11
|
-
select,
|
12
|
-
Table,
|
13
|
-
Column,
|
14
|
-
config,
|
15
|
-
_DateFixture,
|
16
|
-
literal,
|
17
|
-
)
|
18
|
-
from databricks.sqlalchemy import TINYINT, TIMESTAMP
|
19
|
-
|
20
|
-
|
21
|
-
class TinyIntegerTest(_LiteralRoundTripFixture, fixtures.TestBase):
|
22
|
-
__backend__ = True
|
23
|
-
|
24
|
-
def test_literal(self, literal_round_trip):
|
25
|
-
literal_round_trip(TINYINT, [5], [5])
|
26
|
-
|
27
|
-
@testing.fixture
|
28
|
-
def integer_round_trip(self, metadata, connection):
|
29
|
-
def run(datatype, data):
|
30
|
-
int_table = Table(
|
31
|
-
"tiny_integer_table",
|
32
|
-
metadata,
|
33
|
-
Column(
|
34
|
-
"id",
|
35
|
-
TINYINT,
|
36
|
-
primary_key=True,
|
37
|
-
test_needs_autoincrement=False,
|
38
|
-
),
|
39
|
-
Column("tiny_integer_data", datatype),
|
40
|
-
)
|
41
|
-
|
42
|
-
metadata.create_all(config.db)
|
43
|
-
|
44
|
-
connection.execute(int_table.insert(), {"id": 1, "integer_data": data})
|
45
|
-
|
46
|
-
row = connection.execute(select(int_table.c.integer_data)).first()
|
47
|
-
|
48
|
-
eq_(row, (data,))
|
49
|
-
|
50
|
-
assert isinstance(row[0], int)
|
51
|
-
|
52
|
-
return run
|
53
|
-
|
54
|
-
|
55
|
-
class DateTimeTZTestCustom(_DateFixture, fixtures.TablesTest):
|
56
|
-
"""This test confirms that when a user uses the TIMESTAMP
|
57
|
-
type to store a datetime object, it retains its timezone
|
58
|
-
"""
|
59
|
-
|
60
|
-
__backend__ = True
|
61
|
-
datatype = TIMESTAMP
|
62
|
-
data = datetime.datetime(2012, 10, 15, 12, 57, 18, tzinfo=datetime.timezone.utc)
|
63
|
-
|
64
|
-
@testing.requires.datetime_implicit_bound
|
65
|
-
def test_select_direct(self, connection):
|
66
|
-
|
67
|
-
# We need to pass the TIMESTAMP type to the literal function
|
68
|
-
# so that the value is processed correctly.
|
69
|
-
result = connection.scalar(select(literal(self.data, TIMESTAMP)))
|
70
|
-
eq_(result, self.data)
|
@@ -1,331 +0,0 @@
|
|
1
|
-
# type: ignore
|
2
|
-
|
3
|
-
from enum import Enum
|
4
|
-
|
5
|
-
import pytest
|
6
|
-
from databricks.sqlalchemy.test._regression import (
|
7
|
-
ExpandingBoundInTest,
|
8
|
-
IdentityAutoincrementTest,
|
9
|
-
LikeFunctionsTest,
|
10
|
-
NormalizedNameTest,
|
11
|
-
)
|
12
|
-
from databricks.sqlalchemy.test._unsupported import (
|
13
|
-
ComponentReflectionTest,
|
14
|
-
ComponentReflectionTestExtra,
|
15
|
-
CTETest,
|
16
|
-
InsertBehaviorTest,
|
17
|
-
)
|
18
|
-
from sqlalchemy.testing.suite import (
|
19
|
-
ArrayTest,
|
20
|
-
BinaryTest,
|
21
|
-
BizarroCharacterFKResolutionTest,
|
22
|
-
CollateTest,
|
23
|
-
ComputedColumnTest,
|
24
|
-
ComputedReflectionTest,
|
25
|
-
DifficultParametersTest,
|
26
|
-
FutureWeCanSetDefaultSchemaWEventsTest,
|
27
|
-
IdentityColumnTest,
|
28
|
-
IdentityReflectionTest,
|
29
|
-
JSONLegacyStringCastIndexTest,
|
30
|
-
JSONTest,
|
31
|
-
NativeUUIDTest,
|
32
|
-
QuotedNameArgumentTest,
|
33
|
-
RowCountTest,
|
34
|
-
SimpleUpdateDeleteTest,
|
35
|
-
WeCanSetDefaultSchemaWEventsTest,
|
36
|
-
)
|
37
|
-
|
38
|
-
|
39
|
-
class FutureFeature(Enum):
|
40
|
-
ARRAY = "ARRAY column type handling"
|
41
|
-
BINARY = "BINARY column type handling"
|
42
|
-
CHECK = "CHECK constraint handling"
|
43
|
-
COLLATE = "COLLATE DDL generation"
|
44
|
-
CTE_FEAT = "required CTE features"
|
45
|
-
EMPTY_INSERT = "empty INSERT support"
|
46
|
-
FK_OPTS = "foreign key option checking"
|
47
|
-
GENERATED_COLUMNS = "Delta computed / generated columns support"
|
48
|
-
IDENTITY = "identity reflection"
|
49
|
-
JSON = "JSON column type handling"
|
50
|
-
MULTI_PK = "get_multi_pk_constraint method"
|
51
|
-
PROVISION = "event-driven engine configuration"
|
52
|
-
REGEXP = "_visit_regexp"
|
53
|
-
SANE_ROWCOUNT = "sane_rowcount support"
|
54
|
-
TBL_OPTS = "get_table_options method"
|
55
|
-
TEST_DESIGN = "required test-fixture overrides"
|
56
|
-
TUPLE_LITERAL = "tuple-like IN markers completely"
|
57
|
-
UUID = "native Uuid() type"
|
58
|
-
VIEW_DEF = "get_view_definition method"
|
59
|
-
|
60
|
-
|
61
|
-
def render_future_feature(rsn: FutureFeature, extra=False) -> str:
|
62
|
-
postfix = " More detail in _future.py" if extra else ""
|
63
|
-
return f"[FUTURE][{rsn.name}]: This dialect doesn't implement {rsn.value}.{postfix}"
|
64
|
-
|
65
|
-
|
66
|
-
@pytest.mark.reviewed
|
67
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.BINARY))
|
68
|
-
class BinaryTest(BinaryTest):
|
69
|
-
"""Databricks doesn't support binding of BINARY type values. When DBR supports this, we can implement
|
70
|
-
in this dialect.
|
71
|
-
"""
|
72
|
-
|
73
|
-
pass
|
74
|
-
|
75
|
-
|
76
|
-
class ExpandingBoundInTest(ExpandingBoundInTest):
|
77
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.TUPLE_LITERAL))
|
78
|
-
def test_empty_heterogeneous_tuples_bindparam(self):
|
79
|
-
pass
|
80
|
-
|
81
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.TUPLE_LITERAL))
|
82
|
-
def test_empty_heterogeneous_tuples_direct(self):
|
83
|
-
pass
|
84
|
-
|
85
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.TUPLE_LITERAL))
|
86
|
-
def test_empty_homogeneous_tuples_bindparam(self):
|
87
|
-
pass
|
88
|
-
|
89
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.TUPLE_LITERAL))
|
90
|
-
def test_empty_homogeneous_tuples_direct(self):
|
91
|
-
pass
|
92
|
-
|
93
|
-
|
94
|
-
class NormalizedNameTest(NormalizedNameTest):
|
95
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.TEST_DESIGN, True))
|
96
|
-
def test_get_table_names(self):
|
97
|
-
"""I'm not clear how this test can ever pass given that it's assertion looks like this:
|
98
|
-
|
99
|
-
```python
|
100
|
-
eq_(tablenames[0].upper(), tablenames[0].lower())
|
101
|
-
eq_(tablenames[1].upper(), tablenames[1].lower())
|
102
|
-
```
|
103
|
-
|
104
|
-
It's forcibly calling .upper() and .lower() on the same string and expecting them to be equal.
|
105
|
-
"""
|
106
|
-
pass
|
107
|
-
|
108
|
-
|
109
|
-
class CTETest(CTETest):
|
110
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.CTE_FEAT, True))
|
111
|
-
def test_delete_from_round_trip(self):
|
112
|
-
"""Databricks dialect doesn't implement multiple-table criteria within DELETE"""
|
113
|
-
pass
|
114
|
-
|
115
|
-
|
116
|
-
@pytest.mark.reviewed
|
117
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.TEST_DESIGN, True))
|
118
|
-
class IdentityColumnTest(IdentityColumnTest):
|
119
|
-
"""Identity works. Test needs rewrite for Databricks. See comments in test_suite.py
|
120
|
-
|
121
|
-
The setup for these tests tries to create a table with a DELTA IDENTITY column but has two problems:
|
122
|
-
1. It uses an Integer() type for the column. Whereas DELTA IDENTITY columns must be BIGINT.
|
123
|
-
2. It tries to set the start == 42, which Databricks doesn't support
|
124
|
-
|
125
|
-
I can get the tests to _run_ by patching the table fixture to use BigInteger(). But it asserts that the
|
126
|
-
identity of two rows are 42 and 43, which is not possible since they will be rows 1 and 2 instead.
|
127
|
-
|
128
|
-
I'm satisified through manual testing that our implementation of visit_identity_column works but a better test is needed.
|
129
|
-
"""
|
130
|
-
|
131
|
-
pass
|
132
|
-
|
133
|
-
|
134
|
-
class IdentityAutoincrementTest(IdentityAutoincrementTest):
|
135
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.TEST_DESIGN, True))
|
136
|
-
def test_autoincrement_with_identity(self):
|
137
|
-
"""This test has the same issue as IdentityColumnTest.test_select_all in that it creates a table with identity
|
138
|
-
using an Integer() rather than a BigInteger(). If I override this behaviour to use a BigInteger() instead, the
|
139
|
-
test passes.
|
140
|
-
"""
|
141
|
-
|
142
|
-
|
143
|
-
@pytest.mark.reviewed
|
144
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.TEST_DESIGN))
|
145
|
-
class BizarroCharacterFKResolutionTest(BizarroCharacterFKResolutionTest):
|
146
|
-
"""Some of the combinations in this test pass. Others fail. Given the esoteric nature of these failures,
|
147
|
-
we have opted to defer implementing fixes to a later time, guided by customer feedback. Passage of
|
148
|
-
these tests is not an acceptance criteria for our dialect.
|
149
|
-
"""
|
150
|
-
|
151
|
-
|
152
|
-
@pytest.mark.reviewed
|
153
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.TEST_DESIGN))
|
154
|
-
class DifficultParametersTest(DifficultParametersTest):
|
155
|
-
"""Some of the combinations in this test pass. Others fail. Given the esoteric nature of these failures,
|
156
|
-
we have opted to defer implementing fixes to a later time, guided by customer feedback. Passage of
|
157
|
-
these tests is not an acceptance criteria for our dialect.
|
158
|
-
"""
|
159
|
-
|
160
|
-
|
161
|
-
@pytest.mark.reviewed
|
162
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.IDENTITY, True))
|
163
|
-
class IdentityReflectionTest(IdentityReflectionTest):
|
164
|
-
"""It's not clear _how_ to implement this for SQLAlchemy. Columns created with GENERATED ALWAYS AS IDENTITY
|
165
|
-
are not specially demarked in the output of TGetColumnsResponse or DESCRIBE TABLE EXTENDED.
|
166
|
-
|
167
|
-
We could theoretically parse this from the contents of `SHOW CREATE TABLE` but that feels like a hack.
|
168
|
-
"""
|
169
|
-
|
170
|
-
|
171
|
-
@pytest.mark.reviewed
|
172
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.JSON))
|
173
|
-
class JSONTest(JSONTest):
|
174
|
-
"""Databricks supports JSON path expressions in queries it's just not implemented in this dialect."""
|
175
|
-
|
176
|
-
pass
|
177
|
-
|
178
|
-
|
179
|
-
@pytest.mark.reviewed
|
180
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.JSON))
|
181
|
-
class JSONLegacyStringCastIndexTest(JSONLegacyStringCastIndexTest):
|
182
|
-
"""Same comment applies as JSONTest"""
|
183
|
-
|
184
|
-
pass
|
185
|
-
|
186
|
-
|
187
|
-
class LikeFunctionsTest(LikeFunctionsTest):
|
188
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.REGEXP))
|
189
|
-
def test_not_regexp_match(self):
|
190
|
-
"""The defaul dialect doesn't implement _visit_regexp methods so we don't get them automatically."""
|
191
|
-
pass
|
192
|
-
|
193
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.REGEXP))
|
194
|
-
def test_regexp_match(self):
|
195
|
-
"""The defaul dialect doesn't implement _visit_regexp methods so we don't get them automatically."""
|
196
|
-
pass
|
197
|
-
|
198
|
-
|
199
|
-
@pytest.mark.reviewed
|
200
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.COLLATE))
|
201
|
-
class CollateTest(CollateTest):
|
202
|
-
"""This is supported in Databricks. Not implemented here."""
|
203
|
-
|
204
|
-
|
205
|
-
@pytest.mark.reviewed
|
206
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.UUID, True))
|
207
|
-
class NativeUUIDTest(NativeUUIDTest):
|
208
|
-
"""Type implementation will be straightforward. Since Databricks doesn't have a native UUID type we can use
|
209
|
-
a STRING field, create a custom TypeDecorator for sqlalchemy.types.Uuid and add it to the dialect's colspecs.
|
210
|
-
|
211
|
-
Then mark requirements.uuid_data_type as open() so this test can run.
|
212
|
-
"""
|
213
|
-
|
214
|
-
|
215
|
-
@pytest.mark.reviewed
|
216
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.SANE_ROWCOUNT))
|
217
|
-
class RowCountTest(RowCountTest):
|
218
|
-
pass
|
219
|
-
|
220
|
-
|
221
|
-
@pytest.mark.reviewed
|
222
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.SANE_ROWCOUNT))
|
223
|
-
class SimpleUpdateDeleteTest(SimpleUpdateDeleteTest):
|
224
|
-
pass
|
225
|
-
|
226
|
-
|
227
|
-
@pytest.mark.reviewed
|
228
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.PROVISION, True))
|
229
|
-
class WeCanSetDefaultSchemaWEventsTest(WeCanSetDefaultSchemaWEventsTest):
|
230
|
-
"""provision.py allows us to define event listeners that emit DDL for things like setting up a test schema
|
231
|
-
or, in this case, changing the default schema for the connection after it's been built. This would override
|
232
|
-
the schema defined in the sqlalchemy connection string. This support is possible but is not implemented
|
233
|
-
in the dialect. Deferred for now.
|
234
|
-
"""
|
235
|
-
|
236
|
-
pass
|
237
|
-
|
238
|
-
|
239
|
-
@pytest.mark.reviewed
|
240
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.PROVISION, True))
|
241
|
-
class FutureWeCanSetDefaultSchemaWEventsTest(FutureWeCanSetDefaultSchemaWEventsTest):
|
242
|
-
"""provision.py allows us to define event listeners that emit DDL for things like setting up a test schema
|
243
|
-
or, in this case, changing the default schema for the connection after it's been built. This would override
|
244
|
-
the schema defined in the sqlalchemy connection string. This support is possible but is not implemented
|
245
|
-
in the dialect. Deferred for now.
|
246
|
-
"""
|
247
|
-
|
248
|
-
pass
|
249
|
-
|
250
|
-
|
251
|
-
class ComponentReflectionTest(ComponentReflectionTest):
|
252
|
-
@pytest.mark.skip(reason=render_future_feature(FutureFeature.TBL_OPTS, True))
|
253
|
-
def test_multi_get_table_options_tables(self):
|
254
|
-
"""It's not clear what the expected ouput from this method would even _be_. Requires research."""
|
255
|
-
pass
|
256
|
-
|
257
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.VIEW_DEF))
|
258
|
-
def test_get_view_definition(self):
|
259
|
-
pass
|
260
|
-
|
261
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.VIEW_DEF))
|
262
|
-
def test_get_view_definition_does_not_exist(self):
|
263
|
-
pass
|
264
|
-
|
265
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.MULTI_PK))
|
266
|
-
def test_get_multi_pk_constraint(self):
|
267
|
-
pass
|
268
|
-
|
269
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.CHECK))
|
270
|
-
def test_get_multi_check_constraints(self):
|
271
|
-
pass
|
272
|
-
|
273
|
-
|
274
|
-
class ComponentReflectionTestExtra(ComponentReflectionTestExtra):
|
275
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.CHECK))
|
276
|
-
def test_get_check_constraints(self):
|
277
|
-
pass
|
278
|
-
|
279
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.FK_OPTS))
|
280
|
-
def test_get_foreign_key_options(self):
|
281
|
-
"""It's not clear from the test code what the expected output is here. Further research required."""
|
282
|
-
pass
|
283
|
-
|
284
|
-
|
285
|
-
class InsertBehaviorTest(InsertBehaviorTest):
|
286
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.EMPTY_INSERT, True))
|
287
|
-
def test_empty_insert(self):
|
288
|
-
"""Empty inserts are possible using DEFAULT VALUES on Databricks. To implement it, we need
|
289
|
-
to hook into the SQLCompiler to render a no-op column list. With SQLAlchemy's default implementation
|
290
|
-
the request fails with a syntax error
|
291
|
-
"""
|
292
|
-
pass
|
293
|
-
|
294
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.EMPTY_INSERT, True))
|
295
|
-
def test_empty_insert_multiple(self):
|
296
|
-
"""Empty inserts are possible using DEFAULT VALUES on Databricks. To implement it, we need
|
297
|
-
to hook into the SQLCompiler to render a no-op column list. With SQLAlchemy's default implementation
|
298
|
-
the request fails with a syntax error
|
299
|
-
"""
|
300
|
-
pass
|
301
|
-
|
302
|
-
|
303
|
-
@pytest.mark.reviewed
|
304
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.ARRAY))
|
305
|
-
class ArrayTest(ArrayTest):
|
306
|
-
"""While Databricks supports ARRAY types, DBR cannot handle bound parameters of this type.
|
307
|
-
This makes them unusable to SQLAlchemy without some workaround. Potentially we could inline
|
308
|
-
the values of these parameters (which risks sql injection).
|
309
|
-
"""
|
310
|
-
|
311
|
-
|
312
|
-
@pytest.mark.reviewed
|
313
|
-
@pytest.mark.skip(render_future_feature(FutureFeature.TEST_DESIGN, True))
|
314
|
-
class QuotedNameArgumentTest(QuotedNameArgumentTest):
|
315
|
-
"""These tests are challenging. The whole test setup depends on a table with a name like `quote ' one`
|
316
|
-
which will never work on Databricks because table names can't contains spaces. But QuotedNamedArgumentTest
|
317
|
-
also checks the behaviour of DDL identifier preparation process. We need to override some of IdentifierPreparer
|
318
|
-
methods because these are the ultimate control for whether or not CHECK and UNIQUE constraints are emitted.
|
319
|
-
"""
|
320
|
-
|
321
|
-
|
322
|
-
@pytest.mark.reviewed
|
323
|
-
@pytest.mark.skip(reason=render_future_feature(FutureFeature.GENERATED_COLUMNS))
|
324
|
-
class ComputedColumnTest(ComputedColumnTest):
|
325
|
-
pass
|
326
|
-
|
327
|
-
|
328
|
-
@pytest.mark.reviewed
|
329
|
-
@pytest.mark.skip(reason=render_future_feature(FutureFeature.GENERATED_COLUMNS))
|
330
|
-
class ComputedReflectionTest(ComputedReflectionTest):
|
331
|
-
pass
|