databricks-sqlalchemy 1.0.2__tar.gz → 2.0.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- databricks_sqlalchemy-2.0.1/CHANGELOG.md +274 -0
- {databricks_sqlalchemy-1.0.2 → databricks_sqlalchemy-2.0.1}/PKG-INFO +60 -39
- {databricks_sqlalchemy-1.0.2 → databricks_sqlalchemy-2.0.1}/README.md +58 -37
- {databricks_sqlalchemy-1.0.2 → databricks_sqlalchemy-2.0.1}/pyproject.toml +10 -3
- databricks_sqlalchemy-2.0.1/src/databricks/sqlalchemy/__init__.py +4 -0
- databricks_sqlalchemy-2.0.1/src/databricks/sqlalchemy/_ddl.py +100 -0
- databricks_sqlalchemy-2.0.1/src/databricks/sqlalchemy/_parse.py +385 -0
- databricks_sqlalchemy-2.0.1/src/databricks/sqlalchemy/_types.py +323 -0
- databricks_sqlalchemy-2.0.1/src/databricks/sqlalchemy/base.py +436 -0
- databricks_sqlalchemy-2.0.1/src/databricks/sqlalchemy/dependency_test/test_dependency.py +22 -0
- databricks_sqlalchemy-2.0.1/src/databricks/sqlalchemy/py.typed +0 -0
- databricks_sqlalchemy-2.0.1/src/databricks/sqlalchemy/pytest.ini +4 -0
- databricks_sqlalchemy-2.0.1/src/databricks/sqlalchemy/requirements.py +249 -0
- databricks_sqlalchemy-2.0.1/src/databricks/sqlalchemy/setup.cfg +4 -0
- databricks_sqlalchemy-2.0.1/src/databricks/sqlalchemy/test/_extra.py +70 -0
- databricks_sqlalchemy-2.0.1/src/databricks/sqlalchemy/test/_future.py +331 -0
- databricks_sqlalchemy-2.0.1/src/databricks/sqlalchemy/test/_regression.py +311 -0
- databricks_sqlalchemy-2.0.1/src/databricks/sqlalchemy/test/_unsupported.py +450 -0
- databricks_sqlalchemy-2.0.1/src/databricks/sqlalchemy/test/conftest.py +13 -0
- databricks_sqlalchemy-2.0.1/src/databricks/sqlalchemy/test/overrides/_componentreflectiontest.py +189 -0
- databricks_sqlalchemy-2.0.1/src/databricks/sqlalchemy/test/overrides/_ctetest.py +33 -0
- databricks_sqlalchemy-2.0.1/src/databricks/sqlalchemy/test/test_suite.py +13 -0
- databricks_sqlalchemy-2.0.1/src/databricks/sqlalchemy/test_local/__init__.py +5 -0
- databricks_sqlalchemy-2.0.1/src/databricks/sqlalchemy/test_local/conftest.py +44 -0
- databricks_sqlalchemy-2.0.1/src/databricks/sqlalchemy/test_local/e2e/MOCK_DATA.xlsx +0 -0
- databricks_sqlalchemy-2.0.1/src/databricks/sqlalchemy/test_local/e2e/test_basic.py +543 -0
- databricks_sqlalchemy-2.0.1/src/databricks/sqlalchemy/test_local/test_ddl.py +96 -0
- databricks_sqlalchemy-2.0.1/src/databricks/sqlalchemy/test_local/test_parsing.py +160 -0
- databricks_sqlalchemy-2.0.1/src/databricks/sqlalchemy/test_local/test_types.py +161 -0
- databricks_sqlalchemy-1.0.2/CHANGELOG.md +0 -5
- databricks_sqlalchemy-1.0.2/src/databricks/sqlalchemy/__init__.py +0 -1
- databricks_sqlalchemy-1.0.2/src/databricks/sqlalchemy/dialect/__init__.py +0 -340
- databricks_sqlalchemy-1.0.2/src/databricks/sqlalchemy/dialect/base.py +0 -17
- databricks_sqlalchemy-1.0.2/src/databricks/sqlalchemy/dialect/compiler.py +0 -38
- {databricks_sqlalchemy-1.0.2 → databricks_sqlalchemy-2.0.1}/LICENSE +0 -0
@@ -0,0 +1,274 @@
|
|
1
|
+
# Release History
|
2
|
+
|
3
|
+
# 3.3.0 (2024-07-18)
|
4
|
+
|
5
|
+
- Don't retry requests that fail with HTTP code 401 (databricks/databricks-sql-python#408 by @Hodnebo)
|
6
|
+
- Remove username/password (aka "basic") auth option (databricks/databricks-sql-python#409 by @jackyhu-db)
|
7
|
+
- Refactor CloudFetch handler to fix numerous issues with it (databricks/databricks-sql-python#405 by @kravets-levko)
|
8
|
+
- Add option to disable SSL verification for CloudFetch links (databricks/databricks-sql-python#414 by @kravets-levko)
|
9
|
+
|
10
|
+
Databricks-managed passwords reached end of life on July 10, 2024. Therefore, Basic auth support was removed from
|
11
|
+
the library. See https://docs.databricks.com/en/security/auth-authz/password-deprecation.html
|
12
|
+
|
13
|
+
The existing option `_tls_no_verify=True` of `sql.connect(...)` will now also disable SSL cert verification
|
14
|
+
(but not the SSL itself) for CloudFetch links. This option should be used as a workaround only, when other ways
|
15
|
+
to fix SSL certificate errors didn't work.
|
16
|
+
|
17
|
+
# 3.2.0 (2024-06-06)
|
18
|
+
|
19
|
+
- Update proxy authentication (databricks/databricks-sql-python#354 by @amir-haroun)
|
20
|
+
- Relax `pyarrow` pin (databricks/databricks-sql-python#389 by @dhirschfeld)
|
21
|
+
- Fix error logging in OAuth manager (databricks/databricks-sql-python#269 by @susodapop)
|
22
|
+
- SQLAlchemy: enable delta.feature.allowColumnDefaults for all tables (databricks/databricks-sql-python#343 by @dhirschfeld)
|
23
|
+
- Update `thrift` dependency (databricks/databricks-sql-python#397 by @m1n0)
|
24
|
+
|
25
|
+
# 3.1.2 (2024-04-18)
|
26
|
+
|
27
|
+
- Remove broken cookie code (#379)
|
28
|
+
- Small typing fixes (#382, #384 thanks @wyattscarpenter)
|
29
|
+
|
30
|
+
# 3.1.1 (2024-03-19)
|
31
|
+
|
32
|
+
- Don't retry requests that fail with code 403 (#373)
|
33
|
+
- Assume a default retry-after for 429/503 (#371)
|
34
|
+
- Fix boolean literals (#357)
|
35
|
+
|
36
|
+
# 3.1.0 (2024-02-16)
|
37
|
+
|
38
|
+
- Revert retry-after behavior to be exponential backoff (#349)
|
39
|
+
- Support Databricks OAuth on Azure (#351)
|
40
|
+
- Support Databricks OAuth on GCP (#338)
|
41
|
+
|
42
|
+
# 3.0.3 (2024-02-02)
|
43
|
+
|
44
|
+
- Revised docstrings and examples for OAuth (#339)
|
45
|
+
- Redact the URL query parameters from the urllib3.connectionpool logs (#341)
|
46
|
+
|
47
|
+
# 3.0.2 (2024-01-25)
|
48
|
+
|
49
|
+
- SQLAlchemy dialect now supports table and column comments (thanks @cbornet!)
|
50
|
+
- Fix: SQLAlchemy dialect now correctly reflects TINYINT types (thanks @TimTheinAtTabs!)
|
51
|
+
- Fix: `server_hostname` URIs that included `https://` would raise an exception
|
52
|
+
- Other: pinned to `pandas<=2.1` and `urllib3>=1.26` to avoid runtime errors in dbt-databricks (#330)
|
53
|
+
|
54
|
+
## 3.0.1 (2023-12-01)
|
55
|
+
|
56
|
+
- Other: updated docstring comment about default parameterization approach (#287)
|
57
|
+
- Other: added tests for reading complex types and revised docstrings and type hints (#293)
|
58
|
+
- Fix: SQLAlchemy dialect raised DeprecationWarning due to `dbapi` classmethod (#294)
|
59
|
+
- Fix: SQLAlchemy dialect could not reflect TIMESTAMP_NTZ columns (#296)
|
60
|
+
|
61
|
+
## 3.0.0 (2023-11-17)
|
62
|
+
|
63
|
+
- Remove support for Python 3.7
|
64
|
+
- Add support for native parameterized SQL queries. Requires DBR 14.2 and above. See docs/parameters.md for more info.
|
65
|
+
- Completely rewritten SQLAlchemy dialect
|
66
|
+
- Adds support for SQLAlchemy >= 2.0 and drops support for SQLAlchemy 1.x
|
67
|
+
- Full e2e test coverage of all supported features
|
68
|
+
- Detailed usage notes in `README.sqlalchemy.md`
|
69
|
+
- Adds support for:
|
70
|
+
- New types: `TIME`, `TIMESTAMP`, `TIMESTAMP_NTZ`, `TINYINT`
|
71
|
+
- `Numeric` type scale and precision, like `Numeric(10,2)`
|
72
|
+
- Reading and writing `PrimaryKeyConstraint` and `ForeignKeyConstraint`
|
73
|
+
- Reading and writing composite keys
|
74
|
+
- Reading and writing from views
|
75
|
+
- Writing `Identity` to tables (i.e. autoincrementing primary keys)
|
76
|
+
- `LIMIT` and `OFFSET` for paging through results
|
77
|
+
- Caching metadata calls
|
78
|
+
- Enable cloud fetch by default. To disable, set `use_cloud_fetch=False` when building `databricks.sql.client`.
|
79
|
+
- Add integration tests for Databricks UC Volumes ingestion queries
|
80
|
+
- Retries:
|
81
|
+
- Add `_retry_max_redirects` config
|
82
|
+
- Set `_enable_v3_retries=True` and warn if users override it
|
83
|
+
- Security: bump minimum pyarrow version to 14.0.1 (CVE-2023-47248)
|
84
|
+
|
85
|
+
## 2.9.3 (2023-08-24)
|
86
|
+
|
87
|
+
- Fix: Connections failed when urllib3~=1.0.0 is installed (#206)
|
88
|
+
|
89
|
+
## 2.9.2 (2023-08-17)
|
90
|
+
|
91
|
+
**Note: this release was yanked from Pypi on 13 September 2023 due to compatibility issues with environments where `urllib3<=2.0.0` were installed. The log changes are incorporated into version 2.9.3 and greater.**
|
92
|
+
|
93
|
+
- Other: Add `examples/v3_retries_query_execute.py` (#199)
|
94
|
+
- Other: suppress log message when `_enable_v3_retries` is not `True` (#199)
|
95
|
+
- Other: make this connector backwards compatible with `urllib3>=1.0.0` (#197)
|
96
|
+
|
97
|
+
## 2.9.1 (2023-08-11)
|
98
|
+
|
99
|
+
**Note: this release was yanked from Pypi on 13 September 2023 due to compatibility issues with environments where `urllib3<=2.0.0` were installed.**
|
100
|
+
|
101
|
+
- Other: Explicitly pin urllib3 to ^2.0.0 (#191)
|
102
|
+
|
103
|
+
## 2.9.0 (2023-08-10)
|
104
|
+
|
105
|
+
- Replace retry handling with DatabricksRetryPolicy. This is disabled by default. To enable, set `_enable_v3_retries=True` when creating `databricks.sql.client` (#182)
|
106
|
+
- Other: Fix typo in README quick start example (#186)
|
107
|
+
- Other: Add autospec to Client mocks and tidy up `make_request` (#188)
|
108
|
+
|
109
|
+
## 2.8.0 (2023-07-21)
|
110
|
+
|
111
|
+
- Add support for Cloud Fetch. Disabled by default. Set `use_cloud_fetch=True` when building `databricks.sql.client` to enable it (#146, #151, #154)
|
112
|
+
- SQLAlchemy has_table function now honours schema= argument and adds catalog= argument (#174)
|
113
|
+
- SQLAlchemy set non_native_boolean_check_constraint False as it's not supported by Databricks (#120)
|
114
|
+
- Fix: Revised SQLAlchemy dialect and examples for compatibility with SQLAlchemy==1.3.x (#173)
|
115
|
+
- Fix: oauth would fail if expired credentials appeared in ~/.netrc (#122)
|
116
|
+
- Fix: Python HTTP proxies were broken after switch to urllib3 (#158)
|
117
|
+
- Other: remove unused import in SQLAlchemy dialect
|
118
|
+
- Other: Relax pandas dependency constraint to allow ^2.0.0 (#164)
|
119
|
+
- Other: Connector now logs operation handle guids as hexadecimal instead of bytes (#170)
|
120
|
+
- Other: test_socket_timeout_user_defined e2e test was broken (#144)
|
121
|
+
|
122
|
+
## 2.7.0 (2023-06-26)
|
123
|
+
|
124
|
+
- Fix: connector raised exception when calling close() on a closed Thrift session
|
125
|
+
- Improve e2e test development ergonomics
|
126
|
+
- Redact logged thrift responses by default
|
127
|
+
- Add support for OAuth on Databricks Azure
|
128
|
+
|
129
|
+
## 2.6.2 (2023-06-14)
|
130
|
+
|
131
|
+
- Fix: Retry GetOperationStatus requests for http errors
|
132
|
+
|
133
|
+
## 2.6.1 (2023-06-08)
|
134
|
+
|
135
|
+
- Fix: http.client would raise a BadStatusLine exception in some cases
|
136
|
+
|
137
|
+
## 2.6.0 (2023-06-07)
|
138
|
+
|
139
|
+
- Add support for HTTP 1.1 connections (connection pools)
|
140
|
+
- Add a default socket timeout for thrift RPCs
|
141
|
+
|
142
|
+
## 2.5.2 (2023-05-08)
|
143
|
+
|
144
|
+
- Fix: SQLAlchemy adapter could not reflect TIMESTAMP or DATETIME columns
|
145
|
+
- Other: Relax pandas and alembic dependency specifications
|
146
|
+
|
147
|
+
## 2.5.1 (2023-04-28)
|
148
|
+
|
149
|
+
- Other: Relax sqlalchemy required version as it was unecessarily strict.
|
150
|
+
|
151
|
+
## 2.5.0 (2023-04-14)
|
152
|
+
|
153
|
+
- Add support for External Auth providers
|
154
|
+
- Fix: Python HTTP proxies were broken
|
155
|
+
- Other: All Thrift requests that timeout during connection will be automatically retried
|
156
|
+
|
157
|
+
## 2.4.1 (2023-03-21)
|
158
|
+
|
159
|
+
- Less strict numpy and pyarrow dependencies
|
160
|
+
- Update examples in README to use security best practices
|
161
|
+
- Update docstring for client.execute() for clarity
|
162
|
+
|
163
|
+
## 2.4.0 (2023-02-21)
|
164
|
+
|
165
|
+
- Improve compatibility when installed alongside other Databricks namespace Python packages
|
166
|
+
- Add SQLAlchemy dialect
|
167
|
+
|
168
|
+
## 2.3.0 (2023-01-10)
|
169
|
+
|
170
|
+
- Support staging ingestion commands for DBR 12+
|
171
|
+
|
172
|
+
## 2.2.2 (2023-01-03)
|
173
|
+
|
174
|
+
- Support custom oauth client id and redirect port
|
175
|
+
- Fix: Add none check on \_oauth_persistence in DatabricksOAuthProvider
|
176
|
+
|
177
|
+
## 2.2.1 (2022-11-29)
|
178
|
+
|
179
|
+
- Add support for Python 3.11
|
180
|
+
|
181
|
+
## 2.2.0 (2022-11-15)
|
182
|
+
|
183
|
+
- Bump thrift version to address https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-13949
|
184
|
+
- Add support for lz4 compression
|
185
|
+
|
186
|
+
## 2.1.0 (2022-09-30)
|
187
|
+
|
188
|
+
- Introduce experimental OAuth support while Bring Your Own IDP is in Public Preview on AWS
|
189
|
+
- Add functional examples
|
190
|
+
|
191
|
+
## 2.0.5 (2022-08-23)
|
192
|
+
|
193
|
+
- Fix: closing a connection now closes any open cursors from that connection at the server
|
194
|
+
- Other: Add project links to pyproject.toml (helpful for visitors from PyPi)
|
195
|
+
|
196
|
+
## 2.0.4 (2022-08-17)
|
197
|
+
|
198
|
+
- Add support for Python 3.10
|
199
|
+
- Add unit test matrix for supported Python versions
|
200
|
+
|
201
|
+
Huge thanks to @dbaxa for contributing this change!
|
202
|
+
|
203
|
+
## 2.0.3 (2022-08-05)
|
204
|
+
|
205
|
+
- Add retry logic for `GetOperationStatus` requests that fail with an `OSError`
|
206
|
+
- Reorganised code to use Poetry for dependency management.
|
207
|
+
|
208
|
+
## 2.0.2 (2022-05-04)
|
209
|
+
|
210
|
+
- Better exception handling in automatic connection close
|
211
|
+
|
212
|
+
## 2.0.1 (2022-04-21)
|
213
|
+
|
214
|
+
- Fixed Pandas dependency in setup.cfg to be >= 1.2.0
|
215
|
+
|
216
|
+
## 2.0.0 (2022-04-19)
|
217
|
+
|
218
|
+
- Initial stable release of V2
|
219
|
+
- Added better support for complex types, so that in Databricks runtime 10.3+, Arrays, Maps and Structs will get
|
220
|
+
deserialized as lists, lists of tuples and dicts, respectively.
|
221
|
+
- Changed the name of the metadata arg to http_headers
|
222
|
+
|
223
|
+
## 2.0.b2 (2022-04-04)
|
224
|
+
|
225
|
+
- Change import of collections.Iterable to collections.abc.Iterable to make the library compatible with Python 3.10
|
226
|
+
- Fixed bug with .tables method so that .tables works as expected with Unity-Catalog enabled endpoints
|
227
|
+
|
228
|
+
## 2.0.0b1 (2022-03-04)
|
229
|
+
|
230
|
+
- Fix packaging issue (dependencies were not being installed properly)
|
231
|
+
- Fetching timestamp results will now return aware instead of naive timestamps
|
232
|
+
- The client will now default to using simplified error messages
|
233
|
+
|
234
|
+
## 2.0.0b (2022-02-08)
|
235
|
+
|
236
|
+
- Initial beta release of V2. V2 is an internal re-write of large parts of the connector to use Databricks edge features. All public APIs from V1 remain.
|
237
|
+
- Added Unity Catalog support (pass catalog and / or schema key word args to the .connect method to select initial schema and catalog)
|
238
|
+
|
239
|
+
---
|
240
|
+
|
241
|
+
**Note**: The code for versions prior to `v2.0.0b` is not contained in this repository. The below entries are included for reference only.
|
242
|
+
|
243
|
+
---
|
244
|
+
|
245
|
+
## 1.0.0 (2022-01-20)
|
246
|
+
|
247
|
+
- Add operations for retrieving metadata
|
248
|
+
- Add the ability to access columns by name on result rows
|
249
|
+
- Add the ability to provide configuration settings on connect
|
250
|
+
|
251
|
+
## 0.9.4 (2022-01-10)
|
252
|
+
|
253
|
+
- Improved logging and error messages.
|
254
|
+
|
255
|
+
## 0.9.3 (2021-12-08)
|
256
|
+
|
257
|
+
- Add retries for 429 and 503 HTTP responses.
|
258
|
+
|
259
|
+
## 0.9.2 (2021-12-02)
|
260
|
+
|
261
|
+
- (Bug fix) Increased Thrift requirement from 0.10.0 to 0.13.0 as 0.10.0 was in fact incompatible
|
262
|
+
- (Bug fix) Fixed error message after query execution failed -SQLSTATE and Error message were misplaced
|
263
|
+
|
264
|
+
## 0.9.1 (2021-09-01)
|
265
|
+
|
266
|
+
- Public Preview release, Experimental tag removed
|
267
|
+
- minor updates in internal build/packaging
|
268
|
+
- no functional changes
|
269
|
+
|
270
|
+
## 0.9.0 (2021-08-04)
|
271
|
+
|
272
|
+
- initial (Experimental) release of pyhive-forked connector
|
273
|
+
- Python DBAPI 2.0 (PEP-0249), thrift based
|
274
|
+
- see docs for more info: https://docs.databricks.com/dev-tools/python-sql-connector.html
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: databricks-sqlalchemy
|
3
|
-
Version:
|
3
|
+
Version: 2.0.1
|
4
4
|
Summary: Databricks SQLAlchemy plugin for Python
|
5
5
|
License: Apache-2.0
|
6
6
|
Author: Databricks
|
@@ -13,14 +13,17 @@ Classifier: Programming Language :: Python :: 3.9
|
|
13
13
|
Classifier: Programming Language :: Python :: 3.10
|
14
14
|
Classifier: Programming Language :: Python :: 3.11
|
15
15
|
Classifier: Programming Language :: Python :: 3.12
|
16
|
-
Requires-Dist: sqlalchemy (>=
|
16
|
+
Requires-Dist: sqlalchemy (>=2.0.21)
|
17
17
|
Project-URL: Bug Tracker, https://github.com/databricks/databricks-sqlalchemy/issues
|
18
18
|
Project-URL: Homepage, https://github.com/databricks/databricks-sqlalchemy
|
19
19
|
Description-Content-Type: text/markdown
|
20
20
|
|
21
|
-
## Databricks dialect for SQLALchemy
|
21
|
+
## Databricks dialect for SQLALchemy 2.0
|
22
22
|
|
23
|
-
The Databricks dialect for SQLAlchemy serves as bridge between [SQLAlchemy](https://www.sqlalchemy.org/) and the Databricks SQL Python driver. A working example demonstrating usage can be found in `
|
23
|
+
The Databricks dialect for SQLAlchemy serves as bridge between [SQLAlchemy](https://www.sqlalchemy.org/) and the Databricks SQL Python driver. A working example demonstrating usage can be found in `examples/sqlalchemy.py`.
|
24
|
+
|
25
|
+
## Usage with SQLAlchemy <= 2.0
|
26
|
+
A SQLAlchemy 1.4 compatible dialect was first released in connector [version 2.4](https://github.com/databricks/databricks-sql-python/releases/tag/v2.4.0). Support for SQLAlchemy 1.4 was dropped from the dialect as part of `databricks-sql-connector==3.0.0`. To continue using the dialect with SQLAlchemy 1.x, you can use `databricks-sql-connector^2.4.0`.
|
24
27
|
|
25
28
|
|
26
29
|
## Installation
|
@@ -28,7 +31,7 @@ The Databricks dialect for SQLAlchemy serves as bridge between [SQLAlchemy](http
|
|
28
31
|
To install the dialect and its dependencies:
|
29
32
|
|
30
33
|
```shell
|
31
|
-
pip install databricks-sqlalchemy
|
34
|
+
pip install databricks-sqlalchemy
|
32
35
|
```
|
33
36
|
|
34
37
|
If you also plan to use `alembic` you can alternatively run:
|
@@ -61,45 +64,41 @@ access_token = os.getenv("DATABRICKS_TOKEN")
|
|
61
64
|
catalog = os.getenv("DATABRICKS_CATALOG")
|
62
65
|
schema = os.getenv("DATABRICKS_SCHEMA")
|
63
66
|
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
conn_string = f"databricks://token:{access_token}@{host}"
|
69
|
-
connect_args = dict(catalog=catalog, schema=schema, http_path=http_path)
|
70
|
-
all_connect_args = {**extra_connect_args, **connect_args}
|
71
|
-
engine = create_engine(conn_string, connect_args=all_connect_args)
|
72
|
-
else:
|
73
|
-
engine = create_engine(
|
74
|
-
f"databricks://token:{access_token}@{host}?http_path={http_path}&catalog={catalog}&schema={schema}",
|
75
|
-
connect_args=extra_connect_args,
|
76
|
-
)
|
77
|
-
|
67
|
+
engine = create_engine(
|
68
|
+
f"databricks://token:{access_token}@{host}?http_path={http_path}&catalog={catalog}&schema={schema}"
|
69
|
+
)
|
78
70
|
```
|
79
71
|
|
80
72
|
## Types
|
81
73
|
|
82
|
-
The [SQLAlchemy type hierarchy](https://docs.sqlalchemy.org/en/
|
74
|
+
The [SQLAlchemy type hierarchy](https://docs.sqlalchemy.org/en/20/core/type_basics.html) contains backend-agnostic type implementations (represented in CamelCase) and backend-specific types (represented in UPPERCASE). The majority of SQLAlchemy's [CamelCase](https://docs.sqlalchemy.org/en/20/core/type_basics.html#the-camelcase-datatypes) types are supported. This means that a SQLAlchemy application using these types should "just work" with Databricks.
|
83
75
|
|
84
76
|
|SQLAlchemy Type|Databricks SQL Type|
|
85
77
|
|-|-|
|
86
|
-
[`BigInteger`](https://docs.sqlalchemy.org/en/
|
87
|
-
[`LargeBinary`](https://docs.sqlalchemy.org/en/
|
88
|
-
[`Boolean`](https://docs.sqlalchemy.org/en/
|
89
|
-
[`Date`](https://docs.sqlalchemy.org/en/
|
90
|
-
[`DateTime`](https://docs.sqlalchemy.org/en/
|
91
|
-
[`
|
92
|
-
[`
|
93
|
-
[`
|
94
|
-
[`
|
95
|
-
[`
|
96
|
-
[`
|
97
|
-
[`
|
98
|
-
[`
|
99
|
-
[`
|
100
|
-
[`
|
101
|
-
[`
|
102
|
-
[`
|
78
|
+
[`BigInteger`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.BigInteger)| [`BIGINT`](https://docs.databricks.com/en/sql/language-manual/data-types/bigint-type.html)
|
79
|
+
[`LargeBinary`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.LargeBinary)| (not supported)|
|
80
|
+
[`Boolean`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.Boolean)| [`BOOLEAN`](https://docs.databricks.com/en/sql/language-manual/data-types/boolean-type.html)
|
81
|
+
[`Date`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.Date)| [`DATE`](https://docs.databricks.com/en/sql/language-manual/data-types/date-type.html)
|
82
|
+
[`DateTime`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.DateTime)| [`TIMESTAMP_NTZ`](https://docs.databricks.com/en/sql/language-manual/data-types/timestamp-ntz-type.html)|
|
83
|
+
[`Double`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.Double)| [`DOUBLE`](https://docs.databricks.com/en/sql/language-manual/data-types/double-type.html)
|
84
|
+
[`Enum`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.Enum)| (not supported)|
|
85
|
+
[`Float`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.Float)| [`FLOAT`](https://docs.databricks.com/en/sql/language-manual/data-types/float-type.html)
|
86
|
+
[`Integer`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.Integer)| [`INT`](https://docs.databricks.com/en/sql/language-manual/data-types/int-type.html)
|
87
|
+
[`Numeric`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.Numeric)| [`DECIMAL`](https://docs.databricks.com/en/sql/language-manual/data-types/decimal-type.html)|
|
88
|
+
[`PickleType`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.PickleType)| (not supported)|
|
89
|
+
[`SmallInteger`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.SmallInteger)| [`SMALLINT`](https://docs.databricks.com/en/sql/language-manual/data-types/smallint-type.html)
|
90
|
+
[`String`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.String)| [`STRING`](https://docs.databricks.com/en/sql/language-manual/data-types/string-type.html)|
|
91
|
+
[`Text`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.Text)| [`STRING`](https://docs.databricks.com/en/sql/language-manual/data-types/string-type.html)|
|
92
|
+
[`Time`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.Time)| [`STRING`](https://docs.databricks.com/en/sql/language-manual/data-types/string-type.html)|
|
93
|
+
[`Unicode`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.Unicode)| [`STRING`](https://docs.databricks.com/en/sql/language-manual/data-types/string-type.html)|
|
94
|
+
[`UnicodeText`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.UnicodeText)| [`STRING`](https://docs.databricks.com/en/sql/language-manual/data-types/string-type.html)|
|
95
|
+
[`Uuid`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.Uuid)| [`STRING`](https://docs.databricks.com/en/sql/language-manual/data-types/string-type.html)
|
96
|
+
|
97
|
+
In addition, the dialect exposes three UPPERCASE SQLAlchemy types which are specific to Databricks:
|
98
|
+
|
99
|
+
- [`databricks.sqlalchemy.TINYINT`](https://docs.databricks.com/en/sql/language-manual/data-types/tinyint-type.html)
|
100
|
+
- [`databricks.sqlalchemy.TIMESTAMP`](https://docs.databricks.com/en/sql/language-manual/data-types/timestamp-type.html)
|
101
|
+
- [`databricks.sqlalchemy.TIMESTAMP_NTZ`](https://docs.databricks.com/en/sql/language-manual/data-types/timestamp-ntz-type.html)
|
103
102
|
|
104
103
|
|
105
104
|
### `LargeBinary()` and `PickleType()`
|
@@ -112,6 +111,24 @@ Support for `CHECK` constraints is not implemented in this dialect. Support is p
|
|
112
111
|
|
113
112
|
SQLAlchemy's `Enum()` type depends on `CHECK` constraints and is therefore not yet supported.
|
114
113
|
|
114
|
+
### `DateTime()`, `TIMESTAMP_NTZ()`, and `TIMESTAMP()`
|
115
|
+
|
116
|
+
Databricks Runtime provides two datetime-like types: `TIMESTAMP` which is always timezone-aware and `TIMESTAMP_NTZ` which is timezone agnostic. Both types can be imported from `databricks.sqlalchemy` and used in your models.
|
117
|
+
|
118
|
+
The SQLAlchemy documentation indicates that `DateTime()` is not timezone-aware by default. So our dialect maps this type to `TIMESTAMP_NTZ()`. In practice, you should never need to use `TIMESTAMP_NTZ()` directly. Just use `DateTime()`.
|
119
|
+
|
120
|
+
If you need your field to be timezone-aware, you can import `TIMESTAMP()` and use it instead.
|
121
|
+
|
122
|
+
_Note that SQLAlchemy documentation suggests that you can declare a `DateTime()` with `timezone=True` on supported backends. However, if you do this with the Databricks dialect, the `timezone` argument will be ignored._
|
123
|
+
|
124
|
+
```python
|
125
|
+
from sqlalchemy import DateTime
|
126
|
+
from databricks.sqlalchemy import TIMESTAMP
|
127
|
+
|
128
|
+
class SomeModel(Base):
|
129
|
+
some_date_without_timezone = DateTime()
|
130
|
+
some_date_with_timezone = TIMESTAMP()
|
131
|
+
```
|
115
132
|
|
116
133
|
### `String()`, `Text()`, `Unicode()`, and `UnicodeText()`
|
117
134
|
|
@@ -136,7 +153,7 @@ class SomeModel(Base):
|
|
136
153
|
|
137
154
|
Identity and generated value support is currently limited in this dialect.
|
138
155
|
|
139
|
-
When defining models, SQLAlchemy types can accept an [`autoincrement`](https://docs.sqlalchemy.org/en/
|
156
|
+
When defining models, SQLAlchemy types can accept an [`autoincrement`](https://docs.sqlalchemy.org/en/20/core/metadata.html#sqlalchemy.schema.Column.params.autoincrement) argument. In our dialect, this argument is currently ignored. To create an auto-incrementing field in your model you can pass in an explicit [`Identity()`](https://docs.sqlalchemy.org/en/20/core/defaults.html#identity-ddl) instead.
|
140
157
|
|
141
158
|
Furthermore, in Databricks Runtime, only `BIGINT` fields can be configured to auto-increment. So in SQLAlchemy, you must use the `BigInteger()` type.
|
142
159
|
|
@@ -150,6 +167,10 @@ class SomeModel(Base):
|
|
150
167
|
|
151
168
|
When calling `Base.metadata.create_all()`, the executed DDL will include `GENERATED ALWAYS AS IDENTITY` for the `id` column. This is useful when using SQLAlchemy to generate tables. However, as of this writing, `Identity()` constructs are not captured when SQLAlchemy reflects a table's metadata (support for this is planned).
|
152
169
|
|
170
|
+
## Parameters
|
171
|
+
|
172
|
+
`databricks-sql-connector` supports two approaches to parameterizing SQL queries: native and inline. Our SQLAlchemy 2.0 dialect always uses the native approach and is therefore limited to DBR 14.2 and above. If you are writing parameterized queries to be executed by SQLAlchemy, you must use the "named" paramstyle (`:param`). Read more about parameterization in `docs/parameters.md`.
|
173
|
+
|
153
174
|
## Usage with pandas
|
154
175
|
|
155
176
|
Use [`pandas.DataFrame.to_sql`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_sql.html) and [`pandas.read_sql`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_sql.html#pandas.read_sql) to write and read from Databricks SQL. These methods both accept a SQLAlchemy connection to interact with Databricks.
|
@@ -180,7 +201,7 @@ with engine.connect() as conn:
|
|
180
201
|
df.to_sql('squares',conn)
|
181
202
|
```
|
182
203
|
|
183
|
-
## [`PrimaryKey()`](https://docs.sqlalchemy.org/en/
|
204
|
+
## [`PrimaryKey()`](https://docs.sqlalchemy.org/en/20/core/constraints.html#sqlalchemy.schema.PrimaryKeyConstraint) and [`ForeignKey()`](https://docs.sqlalchemy.org/en/20/core/constraints.html#defining-foreign-keys)
|
184
205
|
|
185
206
|
Unity Catalog workspaces in Databricks support PRIMARY KEY and FOREIGN KEY constraints. _Note that Databricks Runtime does not enforce the integrity of FOREIGN KEY constraints_. You can establish a primary key by setting `primary_key=True` when defining a column.
|
186
207
|
|
@@ -1,6 +1,9 @@
|
|
1
|
-
## Databricks dialect for SQLALchemy
|
1
|
+
## Databricks dialect for SQLALchemy 2.0
|
2
2
|
|
3
|
-
The Databricks dialect for SQLAlchemy serves as bridge between [SQLAlchemy](https://www.sqlalchemy.org/) and the Databricks SQL Python driver. A working example demonstrating usage can be found in `
|
3
|
+
The Databricks dialect for SQLAlchemy serves as bridge between [SQLAlchemy](https://www.sqlalchemy.org/) and the Databricks SQL Python driver. A working example demonstrating usage can be found in `examples/sqlalchemy.py`.
|
4
|
+
|
5
|
+
## Usage with SQLAlchemy <= 2.0
|
6
|
+
A SQLAlchemy 1.4 compatible dialect was first released in connector [version 2.4](https://github.com/databricks/databricks-sql-python/releases/tag/v2.4.0). Support for SQLAlchemy 1.4 was dropped from the dialect as part of `databricks-sql-connector==3.0.0`. To continue using the dialect with SQLAlchemy 1.x, you can use `databricks-sql-connector^2.4.0`.
|
4
7
|
|
5
8
|
|
6
9
|
## Installation
|
@@ -8,7 +11,7 @@ The Databricks dialect for SQLAlchemy serves as bridge between [SQLAlchemy](http
|
|
8
11
|
To install the dialect and its dependencies:
|
9
12
|
|
10
13
|
```shell
|
11
|
-
pip install databricks-sqlalchemy
|
14
|
+
pip install databricks-sqlalchemy
|
12
15
|
```
|
13
16
|
|
14
17
|
If you also plan to use `alembic` you can alternatively run:
|
@@ -41,45 +44,41 @@ access_token = os.getenv("DATABRICKS_TOKEN")
|
|
41
44
|
catalog = os.getenv("DATABRICKS_CATALOG")
|
42
45
|
schema = os.getenv("DATABRICKS_SCHEMA")
|
43
46
|
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
conn_string = f"databricks://token:{access_token}@{host}"
|
49
|
-
connect_args = dict(catalog=catalog, schema=schema, http_path=http_path)
|
50
|
-
all_connect_args = {**extra_connect_args, **connect_args}
|
51
|
-
engine = create_engine(conn_string, connect_args=all_connect_args)
|
52
|
-
else:
|
53
|
-
engine = create_engine(
|
54
|
-
f"databricks://token:{access_token}@{host}?http_path={http_path}&catalog={catalog}&schema={schema}",
|
55
|
-
connect_args=extra_connect_args,
|
56
|
-
)
|
57
|
-
|
47
|
+
engine = create_engine(
|
48
|
+
f"databricks://token:{access_token}@{host}?http_path={http_path}&catalog={catalog}&schema={schema}"
|
49
|
+
)
|
58
50
|
```
|
59
51
|
|
60
52
|
## Types
|
61
53
|
|
62
|
-
The [SQLAlchemy type hierarchy](https://docs.sqlalchemy.org/en/
|
54
|
+
The [SQLAlchemy type hierarchy](https://docs.sqlalchemy.org/en/20/core/type_basics.html) contains backend-agnostic type implementations (represented in CamelCase) and backend-specific types (represented in UPPERCASE). The majority of SQLAlchemy's [CamelCase](https://docs.sqlalchemy.org/en/20/core/type_basics.html#the-camelcase-datatypes) types are supported. This means that a SQLAlchemy application using these types should "just work" with Databricks.
|
63
55
|
|
64
56
|
|SQLAlchemy Type|Databricks SQL Type|
|
65
57
|
|-|-|
|
66
|
-
[`BigInteger`](https://docs.sqlalchemy.org/en/
|
67
|
-
[`LargeBinary`](https://docs.sqlalchemy.org/en/
|
68
|
-
[`Boolean`](https://docs.sqlalchemy.org/en/
|
69
|
-
[`Date`](https://docs.sqlalchemy.org/en/
|
70
|
-
[`DateTime`](https://docs.sqlalchemy.org/en/
|
71
|
-
[`
|
72
|
-
[`
|
73
|
-
[`
|
74
|
-
[`
|
75
|
-
[`
|
76
|
-
[`
|
77
|
-
[`
|
78
|
-
[`
|
79
|
-
[`
|
80
|
-
[`
|
81
|
-
[`
|
82
|
-
[`
|
58
|
+
[`BigInteger`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.BigInteger)| [`BIGINT`](https://docs.databricks.com/en/sql/language-manual/data-types/bigint-type.html)
|
59
|
+
[`LargeBinary`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.LargeBinary)| (not supported)|
|
60
|
+
[`Boolean`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.Boolean)| [`BOOLEAN`](https://docs.databricks.com/en/sql/language-manual/data-types/boolean-type.html)
|
61
|
+
[`Date`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.Date)| [`DATE`](https://docs.databricks.com/en/sql/language-manual/data-types/date-type.html)
|
62
|
+
[`DateTime`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.DateTime)| [`TIMESTAMP_NTZ`](https://docs.databricks.com/en/sql/language-manual/data-types/timestamp-ntz-type.html)|
|
63
|
+
[`Double`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.Double)| [`DOUBLE`](https://docs.databricks.com/en/sql/language-manual/data-types/double-type.html)
|
64
|
+
[`Enum`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.Enum)| (not supported)|
|
65
|
+
[`Float`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.Float)| [`FLOAT`](https://docs.databricks.com/en/sql/language-manual/data-types/float-type.html)
|
66
|
+
[`Integer`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.Integer)| [`INT`](https://docs.databricks.com/en/sql/language-manual/data-types/int-type.html)
|
67
|
+
[`Numeric`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.Numeric)| [`DECIMAL`](https://docs.databricks.com/en/sql/language-manual/data-types/decimal-type.html)|
|
68
|
+
[`PickleType`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.PickleType)| (not supported)|
|
69
|
+
[`SmallInteger`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.SmallInteger)| [`SMALLINT`](https://docs.databricks.com/en/sql/language-manual/data-types/smallint-type.html)
|
70
|
+
[`String`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.String)| [`STRING`](https://docs.databricks.com/en/sql/language-manual/data-types/string-type.html)|
|
71
|
+
[`Text`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.Text)| [`STRING`](https://docs.databricks.com/en/sql/language-manual/data-types/string-type.html)|
|
72
|
+
[`Time`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.Time)| [`STRING`](https://docs.databricks.com/en/sql/language-manual/data-types/string-type.html)|
|
73
|
+
[`Unicode`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.Unicode)| [`STRING`](https://docs.databricks.com/en/sql/language-manual/data-types/string-type.html)|
|
74
|
+
[`UnicodeText`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.UnicodeText)| [`STRING`](https://docs.databricks.com/en/sql/language-manual/data-types/string-type.html)|
|
75
|
+
[`Uuid`](https://docs.sqlalchemy.org/en/20/core/type_basics.html#sqlalchemy.types.Uuid)| [`STRING`](https://docs.databricks.com/en/sql/language-manual/data-types/string-type.html)
|
76
|
+
|
77
|
+
In addition, the dialect exposes three UPPERCASE SQLAlchemy types which are specific to Databricks:
|
78
|
+
|
79
|
+
- [`databricks.sqlalchemy.TINYINT`](https://docs.databricks.com/en/sql/language-manual/data-types/tinyint-type.html)
|
80
|
+
- [`databricks.sqlalchemy.TIMESTAMP`](https://docs.databricks.com/en/sql/language-manual/data-types/timestamp-type.html)
|
81
|
+
- [`databricks.sqlalchemy.TIMESTAMP_NTZ`](https://docs.databricks.com/en/sql/language-manual/data-types/timestamp-ntz-type.html)
|
83
82
|
|
84
83
|
|
85
84
|
### `LargeBinary()` and `PickleType()`
|
@@ -92,6 +91,24 @@ Support for `CHECK` constraints is not implemented in this dialect. Support is p
|
|
92
91
|
|
93
92
|
SQLAlchemy's `Enum()` type depends on `CHECK` constraints and is therefore not yet supported.
|
94
93
|
|
94
|
+
### `DateTime()`, `TIMESTAMP_NTZ()`, and `TIMESTAMP()`
|
95
|
+
|
96
|
+
Databricks Runtime provides two datetime-like types: `TIMESTAMP` which is always timezone-aware and `TIMESTAMP_NTZ` which is timezone agnostic. Both types can be imported from `databricks.sqlalchemy` and used in your models.
|
97
|
+
|
98
|
+
The SQLAlchemy documentation indicates that `DateTime()` is not timezone-aware by default. So our dialect maps this type to `TIMESTAMP_NTZ()`. In practice, you should never need to use `TIMESTAMP_NTZ()` directly. Just use `DateTime()`.
|
99
|
+
|
100
|
+
If you need your field to be timezone-aware, you can import `TIMESTAMP()` and use it instead.
|
101
|
+
|
102
|
+
_Note that SQLAlchemy documentation suggests that you can declare a `DateTime()` with `timezone=True` on supported backends. However, if you do this with the Databricks dialect, the `timezone` argument will be ignored._
|
103
|
+
|
104
|
+
```python
|
105
|
+
from sqlalchemy import DateTime
|
106
|
+
from databricks.sqlalchemy import TIMESTAMP
|
107
|
+
|
108
|
+
class SomeModel(Base):
|
109
|
+
some_date_without_timezone = DateTime()
|
110
|
+
some_date_with_timezone = TIMESTAMP()
|
111
|
+
```
|
95
112
|
|
96
113
|
### `String()`, `Text()`, `Unicode()`, and `UnicodeText()`
|
97
114
|
|
@@ -116,7 +133,7 @@ class SomeModel(Base):
|
|
116
133
|
|
117
134
|
Identity and generated value support is currently limited in this dialect.
|
118
135
|
|
119
|
-
When defining models, SQLAlchemy types can accept an [`autoincrement`](https://docs.sqlalchemy.org/en/
|
136
|
+
When defining models, SQLAlchemy types can accept an [`autoincrement`](https://docs.sqlalchemy.org/en/20/core/metadata.html#sqlalchemy.schema.Column.params.autoincrement) argument. In our dialect, this argument is currently ignored. To create an auto-incrementing field in your model you can pass in an explicit [`Identity()`](https://docs.sqlalchemy.org/en/20/core/defaults.html#identity-ddl) instead.
|
120
137
|
|
121
138
|
Furthermore, in Databricks Runtime, only `BIGINT` fields can be configured to auto-increment. So in SQLAlchemy, you must use the `BigInteger()` type.
|
122
139
|
|
@@ -130,6 +147,10 @@ class SomeModel(Base):
|
|
130
147
|
|
131
148
|
When calling `Base.metadata.create_all()`, the executed DDL will include `GENERATED ALWAYS AS IDENTITY` for the `id` column. This is useful when using SQLAlchemy to generate tables. However, as of this writing, `Identity()` constructs are not captured when SQLAlchemy reflects a table's metadata (support for this is planned).
|
132
149
|
|
150
|
+
## Parameters
|
151
|
+
|
152
|
+
`databricks-sql-connector` supports two approaches to parameterizing SQL queries: native and inline. Our SQLAlchemy 2.0 dialect always uses the native approach and is therefore limited to DBR 14.2 and above. If you are writing parameterized queries to be executed by SQLAlchemy, you must use the "named" paramstyle (`:param`). Read more about parameterization in `docs/parameters.md`.
|
153
|
+
|
133
154
|
## Usage with pandas
|
134
155
|
|
135
156
|
Use [`pandas.DataFrame.to_sql`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_sql.html) and [`pandas.read_sql`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_sql.html#pandas.read_sql) to write and read from Databricks SQL. These methods both accept a SQLAlchemy connection to interact with Databricks.
|
@@ -160,7 +181,7 @@ with engine.connect() as conn:
|
|
160
181
|
df.to_sql('squares',conn)
|
161
182
|
```
|
162
183
|
|
163
|
-
## [`PrimaryKey()`](https://docs.sqlalchemy.org/en/
|
184
|
+
## [`PrimaryKey()`](https://docs.sqlalchemy.org/en/20/core/constraints.html#sqlalchemy.schema.PrimaryKeyConstraint) and [`ForeignKey()`](https://docs.sqlalchemy.org/en/20/core/constraints.html#defining-foreign-keys)
|
164
185
|
|
165
186
|
Unity Catalog workspaces in Databricks support PRIMARY KEY and FOREIGN KEY constraints. _Note that Databricks Runtime does not enforce the integrity of FOREIGN KEY constraints_. You can establish a primary key by setting `primary_key=True` when defining a column.
|
166
187
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[tool.poetry]
|
2
2
|
name = "databricks-sqlalchemy"
|
3
|
-
version = "
|
3
|
+
version = "2.0.1"
|
4
4
|
description = "Databricks SQLAlchemy plugin for Python"
|
5
5
|
authors = ["Databricks <databricks-sql-connector-maintainers@databricks.com>"]
|
6
6
|
license = "Apache-2.0"
|
@@ -10,7 +10,7 @@ include = ["CHANGELOG.md"]
|
|
10
10
|
|
11
11
|
[tool.poetry.dependencies]
|
12
12
|
python = "^3.8.0"
|
13
|
-
sqlalchemy = { version = "
|
13
|
+
sqlalchemy = { version = ">=2.0.21" }
|
14
14
|
|
15
15
|
[tool.poetry.dev-dependencies]
|
16
16
|
pytest = "^7.1.2"
|
@@ -32,4 +32,11 @@ build-backend = "poetry.core.masonry.api"
|
|
32
32
|
|
33
33
|
[tool.black]
|
34
34
|
exclude = '/(\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|\.svn|_build|buck-out|build|dist|thrift_api)/'
|
35
|
-
|
35
|
+
#
|
36
|
+
[tool.pytest.ini_options]
|
37
|
+
markers = {"reviewed" = "Test case has been reviewed by Databricks"}
|
38
|
+
minversion = "6.0"
|
39
|
+
log_cli = "false"
|
40
|
+
log_cli_level = "INFO"
|
41
|
+
testpaths = ["tests", "src/databricks/sqlalchemy/test_local"]
|
42
|
+
env_files = ["test.env"]
|