fakesnow 0.9.12__tar.gz → 0.9.14__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {fakesnow-0.9.12 → fakesnow-0.9.14}/PKG-INFO +3 -3
- {fakesnow-0.9.12 → fakesnow-0.9.14}/fakesnow/__init__.py +6 -1
- {fakesnow-0.9.12 → fakesnow-0.9.14}/fakesnow/fakes.py +9 -3
- {fakesnow-0.9.12 → fakesnow-0.9.14}/fakesnow/transforms.py +39 -25
- {fakesnow-0.9.12 → fakesnow-0.9.14}/fakesnow.egg-info/PKG-INFO +3 -3
- {fakesnow-0.9.12 → fakesnow-0.9.14}/fakesnow.egg-info/SOURCES.txt +2 -1
- {fakesnow-0.9.12 → fakesnow-0.9.14}/fakesnow.egg-info/requires.txt +2 -2
- {fakesnow-0.9.12 → fakesnow-0.9.14}/pyproject.toml +3 -3
- {fakesnow-0.9.12 → fakesnow-0.9.14}/tests/test_fakes.py +36 -182
- {fakesnow-0.9.12 → fakesnow-0.9.14}/tests/test_info_schema.py +3 -2
- {fakesnow-0.9.12 → fakesnow-0.9.14}/tests/test_transforms.py +37 -7
- fakesnow-0.9.14/tests/test_write_pandas.py +164 -0
- {fakesnow-0.9.12 → fakesnow-0.9.14}/LICENSE +0 -0
- {fakesnow-0.9.12 → fakesnow-0.9.14}/README.md +0 -0
- {fakesnow-0.9.12 → fakesnow-0.9.14}/fakesnow/__main__.py +0 -0
- {fakesnow-0.9.12 → fakesnow-0.9.14}/fakesnow/checks.py +0 -0
- {fakesnow-0.9.12 → fakesnow-0.9.14}/fakesnow/cli.py +0 -0
- {fakesnow-0.9.12 → fakesnow-0.9.14}/fakesnow/expr.py +0 -0
- {fakesnow-0.9.12 → fakesnow-0.9.14}/fakesnow/fixtures.py +0 -0
- {fakesnow-0.9.12 → fakesnow-0.9.14}/fakesnow/global_database.py +0 -0
- {fakesnow-0.9.12 → fakesnow-0.9.14}/fakesnow/info_schema.py +0 -0
- {fakesnow-0.9.12 → fakesnow-0.9.14}/fakesnow/macros.py +0 -0
- {fakesnow-0.9.12 → fakesnow-0.9.14}/fakesnow/py.typed +0 -0
- {fakesnow-0.9.12 → fakesnow-0.9.14}/fakesnow.egg-info/dependency_links.txt +0 -0
- {fakesnow-0.9.12 → fakesnow-0.9.14}/fakesnow.egg-info/entry_points.txt +0 -0
- {fakesnow-0.9.12 → fakesnow-0.9.14}/fakesnow.egg-info/top_level.txt +0 -0
- {fakesnow-0.9.12 → fakesnow-0.9.14}/setup.cfg +0 -0
- {fakesnow-0.9.12 → fakesnow-0.9.14}/tests/test_checks.py +0 -0
- {fakesnow-0.9.12 → fakesnow-0.9.14}/tests/test_cli.py +0 -0
- {fakesnow-0.9.12 → fakesnow-0.9.14}/tests/test_expr.py +0 -0
- {fakesnow-0.9.12 → fakesnow-0.9.14}/tests/test_patch.py +0 -0
- {fakesnow-0.9.12 → fakesnow-0.9.14}/tests/test_sqlalchemy.py +0 -0
- {fakesnow-0.9.12 → fakesnow-0.9.14}/tests/test_users.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: fakesnow
|
3
|
-
Version: 0.9.
|
3
|
+
Version: 0.9.14
|
4
4
|
Summary: Fake Snowflake Connector for Python. Run, mock and test Snowflake DB locally.
|
5
5
|
License: Apache License
|
6
6
|
Version 2.0, January 2004
|
@@ -210,10 +210,10 @@ Classifier: License :: OSI Approved :: MIT License
|
|
210
210
|
Requires-Python: >=3.9
|
211
211
|
Description-Content-Type: text/markdown
|
212
212
|
License-File: LICENSE
|
213
|
-
Requires-Dist: duckdb~=0.10.
|
213
|
+
Requires-Dist: duckdb~=0.10.3
|
214
214
|
Requires-Dist: pyarrow
|
215
215
|
Requires-Dist: snowflake-connector-python
|
216
|
-
Requires-Dist: sqlglot~=
|
216
|
+
Requires-Dist: sqlglot~=24.1.0
|
217
217
|
Provides-Extra: dev
|
218
218
|
Requires-Dist: build~=1.0; extra == "dev"
|
219
219
|
Requires-Dist: pandas-stubs; extra == "dev"
|
@@ -21,6 +21,7 @@ def patch(
|
|
21
21
|
create_database_on_connect: bool = True,
|
22
22
|
create_schema_on_connect: bool = True,
|
23
23
|
db_path: str | os.PathLike | None = None,
|
24
|
+
nop_regexes: list[str] | None = None,
|
24
25
|
) -> Iterator[None]:
|
25
26
|
"""Patch snowflake targets with fakes.
|
26
27
|
|
@@ -36,8 +37,11 @@ def patch(
|
|
36
37
|
|
37
38
|
create_database_on_connect (bool, optional): Create database if provided in connection. Defaults to True.
|
38
39
|
create_schema_on_connect (bool, optional): Create schema if provided in connection. Defaults to True.
|
39
|
-
db_path (str | os.PathLike | None, optional):
|
40
|
+
db_path (str | os.PathLike | None, optional): Use existing database files from this path
|
40
41
|
or create them here if they don't already exist. If None databases are in-memory. Defaults to None.
|
42
|
+
nop_regexes (list[str] | None, optional): SQL statements matching these regexes (case-insensitive) will return
|
43
|
+
the success response without being run. Useful to skip over SQL commands that aren't implemented yet.
|
44
|
+
Defaults to None.
|
41
45
|
|
42
46
|
Yields:
|
43
47
|
Iterator[None]: None.
|
@@ -57,6 +61,7 @@ def patch(
|
|
57
61
|
create_database=create_database_on_connect,
|
58
62
|
create_schema=create_schema_on_connect,
|
59
63
|
db_path=db_path,
|
64
|
+
nop_regexes=nop_regexes,
|
60
65
|
**kwargs,
|
61
66
|
),
|
62
67
|
snowflake.connector.pandas_tools.write_pandas: fakes.write_pandas,
|
@@ -135,8 +135,11 @@ class FakeSnowflakeCursor:
|
|
135
135
|
print(f"{command};{params=}" if params else f"{command};", file=sys.stderr)
|
136
136
|
|
137
137
|
command, params = self._rewrite_with_params(command, params)
|
138
|
-
|
139
|
-
|
138
|
+
if self._conn.nop_regexes and any(re.match(p, command, re.IGNORECASE) for p in self._conn.nop_regexes):
|
139
|
+
transformed = transforms.SUCCESS_NOP
|
140
|
+
else:
|
141
|
+
expression = parse_one(command, read="snowflake")
|
142
|
+
transformed = self._transform(expression)
|
140
143
|
return self._execute(transformed, params)
|
141
144
|
except snowflake.connector.errors.ProgrammingError as e:
|
142
145
|
self._sqlstate = e.sqlstate
|
@@ -172,7 +175,7 @@ class FakeSnowflakeCursor:
|
|
172
175
|
.transform(transforms.to_timestamp_ntz)
|
173
176
|
.transform(transforms.to_timestamp)
|
174
177
|
.transform(transforms.object_construct)
|
175
|
-
.transform(transforms.
|
178
|
+
.transform(transforms.timestamp_ntz)
|
176
179
|
.transform(transforms.float_to_double)
|
177
180
|
.transform(transforms.integer_precision)
|
178
181
|
.transform(transforms.extract_text_length)
|
@@ -195,6 +198,7 @@ class FakeSnowflakeCursor:
|
|
195
198
|
.transform(transforms.create_user)
|
196
199
|
.transform(transforms.sha256)
|
197
200
|
.transform(transforms.create_clone)
|
201
|
+
.transform(transforms.alias_in_join)
|
198
202
|
)
|
199
203
|
|
200
204
|
def _execute(
|
@@ -502,6 +506,7 @@ class FakeSnowflakeConnection:
|
|
502
506
|
create_database: bool = True,
|
503
507
|
create_schema: bool = True,
|
504
508
|
db_path: str | os.PathLike | None = None,
|
509
|
+
nop_regexes: list[str] | None = None,
|
505
510
|
*args: Any,
|
506
511
|
**kwargs: Any,
|
507
512
|
):
|
@@ -513,6 +518,7 @@ class FakeSnowflakeConnection:
|
|
513
518
|
self.database_set = False
|
514
519
|
self.schema_set = False
|
515
520
|
self.db_path = db_path
|
521
|
+
self.nop_regexes = nop_regexes
|
516
522
|
self._paramstyle = snowflake.connector.paramstyle
|
517
523
|
|
518
524
|
create_global_database(duck_conn)
|
@@ -13,6 +13,25 @@ MISSING_DATABASE = "missing_database"
|
|
13
13
|
SUCCESS_NOP = sqlglot.parse_one("SELECT 'Statement executed successfully.'")
|
14
14
|
|
15
15
|
|
16
|
+
def alias_in_join(expression: exp.Expression) -> exp.Expression:
|
17
|
+
if (
|
18
|
+
isinstance(expression, exp.Select)
|
19
|
+
and (aliases := {e.args.get("alias"): e for e in expression.expressions if isinstance(e, exp.Alias)})
|
20
|
+
and (joins := expression.args.get("joins"))
|
21
|
+
):
|
22
|
+
j: exp.Join
|
23
|
+
for j in joins:
|
24
|
+
if (
|
25
|
+
(on := j.args.get("on"))
|
26
|
+
and (col := on.this)
|
27
|
+
and (isinstance(col, exp.Column))
|
28
|
+
and (alias := aliases.get(col.this))
|
29
|
+
):
|
30
|
+
col.args["this"] = alias.this
|
31
|
+
|
32
|
+
return expression
|
33
|
+
|
34
|
+
|
16
35
|
def array_size(expression: exp.Expression) -> exp.Expression:
|
17
36
|
if isinstance(expression, exp.ArraySize):
|
18
37
|
# case is used to convert 0 to null, because null is returned by duckdb when no case matches
|
@@ -350,17 +369,13 @@ def extract_comment_on_table(expression: exp.Expression) -> exp.Expression:
|
|
350
369
|
return new
|
351
370
|
elif (
|
352
371
|
isinstance(expression, exp.AlterTable)
|
353
|
-
and (sexp := expression.find(exp.
|
354
|
-
and
|
355
|
-
and (
|
356
|
-
and (eid := eq.find(exp.Identifier))
|
357
|
-
and isinstance(eid.this, str)
|
358
|
-
and eid.this.upper() == "COMMENT"
|
359
|
-
and (lit := eq.find(exp.Literal))
|
372
|
+
and (sexp := expression.find(exp.AlterSet))
|
373
|
+
and (scp := sexp.find(exp.SchemaCommentProperty))
|
374
|
+
and isinstance(scp.this, exp.Literal)
|
360
375
|
and (table := expression.find(exp.Table))
|
361
376
|
):
|
362
377
|
new = SUCCESS_NOP.copy()
|
363
|
-
new.args["table_comment"] = (table,
|
378
|
+
new.args["table_comment"] = (table, scp.this.this)
|
364
379
|
return new
|
365
380
|
|
366
381
|
return expression
|
@@ -596,15 +611,12 @@ def json_extract_cast_as_varchar(expression: exp.Expression) -> exp.Expression:
|
|
596
611
|
"""
|
597
612
|
if (
|
598
613
|
isinstance(expression, exp.Cast)
|
599
|
-
and (to := expression.to)
|
600
|
-
and isinstance(to, exp.DataType)
|
601
|
-
and to.this in {exp.DataType.Type.VARCHAR, exp.DataType.Type.TEXT}
|
602
614
|
and (je := expression.this)
|
603
615
|
and isinstance(je, exp.JSONExtract)
|
604
616
|
and (path := je.expression)
|
605
617
|
and isinstance(path, exp.JSONPath)
|
606
618
|
):
|
607
|
-
|
619
|
+
je.replace(exp.JSONExtractScalar(this=je.this, expression=path))
|
608
620
|
return expression
|
609
621
|
|
610
622
|
|
@@ -937,7 +949,7 @@ def tag(expression: exp.Expression) -> exp.Expression:
|
|
937
949
|
|
938
950
|
if isinstance(expression, exp.AlterTable) and (actions := expression.args.get("actions")):
|
939
951
|
for a in actions:
|
940
|
-
if isinstance(a, exp.
|
952
|
+
if isinstance(a, exp.AlterSet) and a.args.get("tag"):
|
941
953
|
return SUCCESS_NOP
|
942
954
|
elif (
|
943
955
|
isinstance(expression, exp.Command)
|
@@ -947,6 +959,13 @@ def tag(expression: exp.Expression) -> exp.Expression:
|
|
947
959
|
):
|
948
960
|
# alter table modify column set tag
|
949
961
|
return SUCCESS_NOP
|
962
|
+
elif (
|
963
|
+
isinstance(expression, exp.Create)
|
964
|
+
and (kind := expression.args.get("kind"))
|
965
|
+
and isinstance(kind, str)
|
966
|
+
and kind.upper() == "TAG"
|
967
|
+
):
|
968
|
+
return SUCCESS_NOP
|
950
969
|
|
951
970
|
return expression
|
952
971
|
|
@@ -1114,20 +1133,16 @@ def to_timestamp_ntz(expression: exp.Expression) -> exp.Expression:
|
|
1114
1133
|
return expression
|
1115
1134
|
|
1116
1135
|
|
1117
|
-
def
|
1118
|
-
"""Convert timestamp_ntz(
|
1136
|
+
def timestamp_ntz(expression: exp.Expression) -> exp.Expression:
|
1137
|
+
"""Convert timestamp_ntz (snowflake) to timestamp (duckdb).
|
1119
1138
|
|
1120
|
-
|
1139
|
+
NB: timestamp_ntz defaults to nanosecond precision (ie: NTZ(9)). The duckdb equivalent is TIMESTAMP_NS.
|
1140
|
+
However we use TIMESTAMP (ie: microsecond precision) here rather than TIMESTAMP_NS to avoid
|
1141
|
+
https://github.com/duckdb/duckdb/issues/7980 in test_write_pandas_timestamp_ntz.
|
1121
1142
|
"""
|
1122
1143
|
|
1123
|
-
if (
|
1124
|
-
|
1125
|
-
and expression.this == exp.DataType.Type.TIMESTAMP
|
1126
|
-
and exp.DataTypeParam(this=exp.Literal(this="9", is_string=False)) in expression.expressions
|
1127
|
-
):
|
1128
|
-
new = expression.copy()
|
1129
|
-
del new.args["expressions"]
|
1130
|
-
return new
|
1144
|
+
if isinstance(expression, exp.DataType) and expression.this == exp.DataType.Type.TIMESTAMPNTZ:
|
1145
|
+
return exp.DataType(this=exp.DataType.Type.TIMESTAMP)
|
1131
1146
|
|
1132
1147
|
return expression
|
1133
1148
|
|
@@ -1175,7 +1190,6 @@ def try_parse_json(expression: exp.Expression) -> exp.Expression:
|
|
1175
1190
|
return expression
|
1176
1191
|
|
1177
1192
|
|
1178
|
-
# sqlglot.parse_one("create table example(date TIMESTAMP_NTZ(9));", read="snowflake")
|
1179
1193
|
def semi_structured_types(expression: exp.Expression) -> exp.Expression:
|
1180
1194
|
"""Convert OBJECT, ARRAY, and VARIANT types to duckdb compatible types.
|
1181
1195
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: fakesnow
|
3
|
-
Version: 0.9.
|
3
|
+
Version: 0.9.14
|
4
4
|
Summary: Fake Snowflake Connector for Python. Run, mock and test Snowflake DB locally.
|
5
5
|
License: Apache License
|
6
6
|
Version 2.0, January 2004
|
@@ -210,10 +210,10 @@ Classifier: License :: OSI Approved :: MIT License
|
|
210
210
|
Requires-Python: >=3.9
|
211
211
|
Description-Content-Type: text/markdown
|
212
212
|
License-File: LICENSE
|
213
|
-
Requires-Dist: duckdb~=0.10.
|
213
|
+
Requires-Dist: duckdb~=0.10.3
|
214
214
|
Requires-Dist: pyarrow
|
215
215
|
Requires-Dist: snowflake-connector-python
|
216
|
-
Requires-Dist: sqlglot~=
|
216
|
+
Requires-Dist: sqlglot~=24.1.0
|
217
217
|
Provides-Extra: dev
|
218
218
|
Requires-Dist: build~=1.0; extra == "dev"
|
219
219
|
Requires-Dist: pandas-stubs; extra == "dev"
|
@@ -1,17 +1,17 @@
|
|
1
1
|
[project]
|
2
2
|
name = "fakesnow"
|
3
3
|
description = "Fake Snowflake Connector for Python. Run, mock and test Snowflake DB locally."
|
4
|
-
version = "0.9.
|
4
|
+
version = "0.9.14"
|
5
5
|
readme = "README.md"
|
6
6
|
license = { file = "LICENSE" }
|
7
7
|
classifiers = ["License :: OSI Approved :: MIT License"]
|
8
8
|
keywords = ["snowflake", "snowflakedb", "fake", "local", "mock", "testing"]
|
9
9
|
requires-python = ">=3.9"
|
10
10
|
dependencies = [
|
11
|
-
"duckdb~=0.10.
|
11
|
+
"duckdb~=0.10.3",
|
12
12
|
"pyarrow",
|
13
13
|
"snowflake-connector-python",
|
14
|
-
"sqlglot~=
|
14
|
+
"sqlglot~=24.1.0",
|
15
15
|
]
|
16
16
|
|
17
17
|
[project.urls]
|
@@ -5,9 +5,7 @@ from __future__ import annotations
|
|
5
5
|
import datetime
|
6
6
|
import json
|
7
7
|
import tempfile
|
8
|
-
from collections.abc import Sequence
|
9
8
|
from decimal import Decimal
|
10
|
-
from typing import cast
|
11
9
|
|
12
10
|
import pandas as pd
|
13
11
|
import pytest
|
@@ -19,6 +17,26 @@ from pandas.testing import assert_frame_equal
|
|
19
17
|
from snowflake.connector.cursor import ResultMetadata
|
20
18
|
|
21
19
|
import fakesnow
|
20
|
+
from tests.utils import dindent, indent
|
21
|
+
|
22
|
+
|
23
|
+
def test_alias_on_join(conn: snowflake.connector.SnowflakeConnection):
|
24
|
+
*_, cur = conn.execute_string(
|
25
|
+
"""
|
26
|
+
CREATE OR REPLACE TEMPORARY TABLE TEST (COL VARCHAR);
|
27
|
+
INSERT INTO TEST (COL) VALUES ('VARCHAR1'), ('VARCHAR2');
|
28
|
+
CREATE OR REPLACE TEMPORARY TABLE JOINED (COL VARCHAR, ANOTHER VARCHAR);
|
29
|
+
INSERT INTO JOINED (COL, ANOTHER) VALUES ('CHAR1', 'JOIN');
|
30
|
+
SELECT
|
31
|
+
T.COL
|
32
|
+
, SUBSTR(T.COL, 4) AS ALIAS
|
33
|
+
, J.ANOTHER
|
34
|
+
FROM TEST AS T
|
35
|
+
LEFT JOIN JOINED AS J
|
36
|
+
ON ALIAS = J.COL;
|
37
|
+
"""
|
38
|
+
)
|
39
|
+
assert cur.fetchall() == [("VARCHAR1", "CHAR1", "JOIN"), ("VARCHAR2", "CHAR2", None)]
|
22
40
|
|
23
41
|
|
24
42
|
def test_alter_table(cur: snowflake.connector.cursor.SnowflakeCursor):
|
@@ -402,7 +420,7 @@ def test_describe(cur: snowflake.connector.cursor.SnowflakeCursor):
|
|
402
420
|
XNUMBER82 NUMBER(8,2), XNUMBER NUMBER, XDECIMAL DECIMAL, XNUMERIC NUMERIC,
|
403
421
|
XINT INT, XINTEGER INTEGER, XBIGINT BIGINT, XSMALLINT SMALLINT, XTINYINT TINYINT, XBYTEINT BYTEINT,
|
404
422
|
XVARCHAR20 VARCHAR(20), XVARCHAR VARCHAR, XTEXT TEXT,
|
405
|
-
XTIMESTAMP TIMESTAMP, XTIMESTAMP_NTZ9 TIMESTAMP_NTZ(9), XTIMESTAMP_TZ TIMESTAMP_TZ, XDATE DATE, XTIME TIME,
|
423
|
+
XTIMESTAMP TIMESTAMP, XTIMESTAMP_NTZ TIMESTAMP_NTZ, XTIMESTAMP_NTZ9 TIMESTAMP_NTZ(9), XTIMESTAMP_TZ TIMESTAMP_TZ, XDATE DATE, XTIME TIME,
|
406
424
|
XBINARY BINARY, /* XARRAY ARRAY, XOBJECT OBJECT */ XVARIANT VARIANT
|
407
425
|
)
|
408
426
|
"""
|
@@ -427,6 +445,7 @@ def test_describe(cur: snowflake.connector.cursor.SnowflakeCursor):
|
|
427
445
|
ResultMetadata(name='XVARCHAR', type_code=2, display_size=None, internal_size=16777216, precision=None, scale=None, is_nullable=True),
|
428
446
|
ResultMetadata(name='XTEXT', type_code=2, display_size=None, internal_size=16777216, precision=None, scale=None, is_nullable=True),
|
429
447
|
ResultMetadata(name='XTIMESTAMP', type_code=8, display_size=None, internal_size=None, precision=0, scale=9, is_nullable=True),
|
448
|
+
ResultMetadata(name='XTIMESTAMP_NTZ', type_code=8, display_size=None, internal_size=None, precision=0, scale=9, is_nullable=True),
|
430
449
|
ResultMetadata(name='XTIMESTAMP_NTZ9', type_code=8, display_size=None, internal_size=None, precision=0, scale=9, is_nullable=True),
|
431
450
|
ResultMetadata(name='XTIMESTAMP_TZ', type_code=7, display_size=None, internal_size=None, precision=0, scale=9, is_nullable=True),
|
432
451
|
ResultMetadata(name='XDATE', type_code=3, display_size=None, internal_size=None, precision=None, scale=None, is_nullable=True),
|
@@ -470,7 +489,7 @@ def test_describe_table(dcur: snowflake.connector.cursor.DictCursor):
|
|
470
489
|
XNUMBER82 NUMBER(8,2), XNUMBER NUMBER, XDECIMAL DECIMAL, XNUMERIC NUMERIC,
|
471
490
|
XINT INT, XINTEGER INTEGER, XBIGINT BIGINT, XSMALLINT SMALLINT, XTINYINT TINYINT, XBYTEINT BYTEINT,
|
472
491
|
XVARCHAR20 VARCHAR(20), XVARCHAR VARCHAR, XTEXT TEXT,
|
473
|
-
XTIMESTAMP TIMESTAMP, XTIMESTAMP_NTZ9 TIMESTAMP_NTZ(9), XTIMESTAMP_TZ TIMESTAMP_TZ, XDATE DATE, XTIME TIME,
|
492
|
+
XTIMESTAMP TIMESTAMP, XTIMESTAMP_NTZ TIMESTAMP_NTZ, XTIMESTAMP_NTZ9 TIMESTAMP_NTZ(9), XTIMESTAMP_TZ TIMESTAMP_TZ, XDATE DATE, XTIME TIME,
|
474
493
|
XBINARY BINARY, /* XARRAY ARRAY, XOBJECT OBJECT */ XVARIANT VARIANT
|
475
494
|
)
|
476
495
|
"""
|
@@ -508,6 +527,7 @@ def test_describe_table(dcur: snowflake.connector.cursor.DictCursor):
|
|
508
527
|
{"name": "XVARCHAR", "type": "VARCHAR(16777216)", **common},
|
509
528
|
{"name": "XTEXT", "type": "VARCHAR(16777216)", **common},
|
510
529
|
{"name": "XTIMESTAMP", "type": "TIMESTAMP_NTZ(9)", **common},
|
530
|
+
{"name": "XTIMESTAMP_NTZ", "type": "TIMESTAMP_NTZ(9)", **common},
|
511
531
|
{"name": "XTIMESTAMP_NTZ9", "type": "TIMESTAMP_NTZ(9)", **common},
|
512
532
|
{"name": "XTIMESTAMP_TZ", "type": "TIMESTAMP_TZ(9)", **common},
|
513
533
|
{"name": "XDATE", "type": "DATE", **common},
|
@@ -871,6 +891,12 @@ def test_identifier(cur: snowflake.connector.cursor.SnowflakeCursor):
|
|
871
891
|
assert cur.fetchall() == [(1,)]
|
872
892
|
|
873
893
|
|
894
|
+
def test_nop_regexes():
|
895
|
+
with fakesnow.patch(nop_regexes=["^CALL.*"]), snowflake.connector.connect() as conn, conn.cursor() as cur:
|
896
|
+
cur.execute("call this_procedure_does_not_exist('foo', 'bar);")
|
897
|
+
assert cur.fetchall() == [("Statement executed successfully.",)]
|
898
|
+
|
899
|
+
|
874
900
|
def test_non_existent_table_throws_snowflake_exception(cur: snowflake.connector.cursor.SnowflakeCursor):
|
875
901
|
with pytest.raises(snowflake.connector.errors.ProgrammingError) as _:
|
876
902
|
cur.execute("select * from this_table_does_not_exist")
|
@@ -1320,6 +1346,7 @@ def test_tags_noop(cur: snowflake.connector.cursor.SnowflakeCursor):
|
|
1320
1346
|
cur.execute("CREATE TABLE table1 (id int)")
|
1321
1347
|
cur.execute("ALTER TABLE table1 SET TAG foo='bar'")
|
1322
1348
|
cur.execute("ALTER TABLE table1 MODIFY COLUMN name1 SET TAG foo='bar'")
|
1349
|
+
cur.execute("CREATE TAG cost_center COMMENT = 'cost_center tag'")
|
1323
1350
|
|
1324
1351
|
|
1325
1352
|
def test_to_timestamp(cur: snowflake.connector.cursor.SnowflakeCursor):
|
@@ -1493,183 +1520,10 @@ def test_values(conn: snowflake.connector.SnowflakeConnection):
|
|
1493
1520
|
|
1494
1521
|
def test_json_extract_cast_as_varchar(dcur: snowflake.connector.cursor.DictCursor):
|
1495
1522
|
dcur.execute("CREATE TABLE example (j VARIANT)")
|
1496
|
-
dcur.execute("""INSERT INTO example SELECT PARSE_JSON('{"str": "100", "
|
1497
|
-
|
1498
|
-
dcur.execute("SELECT j:str::varchar as c_str_varchar, j:number::varchar as c_num_varchar FROM example")
|
1499
|
-
assert dcur.fetchall() == [{"C_STR_VARCHAR": "100", "C_NUM_VARCHAR": "100"}]
|
1500
|
-
|
1501
|
-
dcur.execute("SELECT j:str::number as c_str_number, j:number::number as c_num_number FROM example")
|
1502
|
-
assert dcur.fetchall() == [{"C_STR_NUMBER": 100, "C_NUM_NUMBER": 100}]
|
1503
|
-
|
1504
|
-
|
1505
|
-
def test_write_pandas_auto_create(conn: snowflake.connector.SnowflakeConnection):
|
1506
|
-
with conn.cursor() as cur:
|
1507
|
-
df = pd.DataFrame.from_records(
|
1508
|
-
[
|
1509
|
-
{"ID": 1, "FIRST_NAME": "Jenny"},
|
1510
|
-
{"ID": 2, "FIRST_NAME": "Jasper"},
|
1511
|
-
]
|
1512
|
-
)
|
1513
|
-
snowflake.connector.pandas_tools.write_pandas(conn, df, "CUSTOMERS", auto_create_table=True)
|
1514
|
-
|
1515
|
-
cur.execute("select id, first_name from customers")
|
1516
|
-
|
1517
|
-
assert cur.fetchall() == [(1, "Jenny"), (2, "Jasper")]
|
1518
|
-
|
1519
|
-
|
1520
|
-
def test_write_pandas_quoted_column_names(conn: snowflake.connector.SnowflakeConnection):
|
1521
|
-
with conn.cursor(snowflake.connector.cursor.DictCursor) as dcur:
|
1522
|
-
# colunmn names with spaces
|
1523
|
-
dcur.execute('create table customers (id int, "first name" varchar)')
|
1524
|
-
df = pd.DataFrame.from_records(
|
1525
|
-
[
|
1526
|
-
{"ID": 1, "first name": "Jenny"},
|
1527
|
-
{"ID": 2, "first name": "Jasper"},
|
1528
|
-
]
|
1529
|
-
)
|
1530
|
-
snowflake.connector.pandas_tools.write_pandas(conn, df, "CUSTOMERS")
|
1531
|
-
|
1532
|
-
dcur.execute("select * from customers")
|
1533
|
-
|
1534
|
-
assert dcur.fetchall() == [
|
1535
|
-
{"ID": 1, "first name": "Jenny"},
|
1536
|
-
{"ID": 2, "first name": "Jasper"},
|
1537
|
-
]
|
1538
|
-
|
1539
|
-
|
1540
|
-
def test_write_pandas_array(conn: snowflake.connector.SnowflakeConnection):
|
1541
|
-
with conn.cursor() as cur:
|
1542
|
-
cur.execute("create table customers (ID int, FIRST_NAME varchar, LAST_NAME varchar, ORDERS array)")
|
1543
|
-
|
1544
|
-
df = pd.DataFrame.from_records(
|
1545
|
-
[
|
1546
|
-
{"ID": 1, "FIRST_NAME": "Jenny", "LAST_NAME": "P", "ORDERS": ["A", "B"]},
|
1547
|
-
{"ID": 2, "FIRST_NAME": "Jasper", "LAST_NAME": "M", "ORDERS": ["C", "D"]},
|
1548
|
-
]
|
1549
|
-
)
|
1550
|
-
snowflake.connector.pandas_tools.write_pandas(conn, df, "CUSTOMERS")
|
1551
|
-
|
1552
|
-
cur.execute("select * from customers")
|
1553
|
-
|
1554
|
-
assert indent(cur.fetchall()) == [
|
1555
|
-
(1, "Jenny", "P", '[\n "A",\n "B"\n]'),
|
1556
|
-
(2, "Jasper", "M", '[\n "C",\n "D"\n]'),
|
1557
|
-
]
|
1558
|
-
|
1559
|
-
|
1560
|
-
def test_write_pandas_timestamp_ntz(conn: snowflake.connector.SnowflakeConnection):
|
1561
|
-
# compensate for https://github.com/duckdb/duckdb/issues/7980
|
1562
|
-
with conn.cursor() as cur:
|
1563
|
-
cur.execute("create table example (UPDATE_AT_NTZ timestamp_ntz(9))")
|
1564
|
-
# cur.execute("create table example (UPDATE_AT_NTZ timestamp)")
|
1565
|
-
|
1566
|
-
now_utc = datetime.datetime.now(pytz.utc)
|
1567
|
-
df = pd.DataFrame([(now_utc,)], columns=["UPDATE_AT_NTZ"])
|
1568
|
-
snowflake.connector.pandas_tools.write_pandas(conn, df, "EXAMPLE")
|
1569
|
-
|
1570
|
-
cur.execute("select * from example")
|
1571
|
-
|
1572
|
-
assert cur.fetchall() == [(now_utc.replace(tzinfo=None),)]
|
1573
|
-
|
1523
|
+
dcur.execute("""INSERT INTO example SELECT PARSE_JSON('{"str": "100", "num" : 200}')""")
|
1574
1524
|
|
1575
|
-
|
1576
|
-
|
1577
|
-
cur.execute("create table customers (ID int, FIRST_NAME varchar, LAST_NAME varchar)")
|
1578
|
-
|
1579
|
-
df = pd.DataFrame.from_records(
|
1580
|
-
[
|
1581
|
-
{"ID": 1, "FIRST_NAME": "Jenny"},
|
1582
|
-
{"ID": 2, "FIRST_NAME": "Jasper"},
|
1583
|
-
]
|
1584
|
-
)
|
1585
|
-
snowflake.connector.pandas_tools.write_pandas(conn, df, "CUSTOMERS")
|
1586
|
-
|
1587
|
-
cur.execute("select id, first_name, last_name from customers")
|
1588
|
-
|
1589
|
-
# columns not in dataframe will receive their default value
|
1590
|
-
assert cur.fetchall() == [(1, "Jenny", None), (2, "Jasper", None)]
|
1591
|
-
|
1592
|
-
|
1593
|
-
def test_write_pandas_dict_as_varchar(conn: snowflake.connector.SnowflakeConnection):
|
1594
|
-
with conn.cursor() as cur:
|
1595
|
-
cur.execute("create or replace table example (vc varchar, o object)")
|
1596
|
-
|
1597
|
-
df = pd.DataFrame([({"kind": "vc", "count": 1}, {"kind": "obj", "amount": 2})], columns=["VC", "O"])
|
1598
|
-
snowflake.connector.pandas_tools.write_pandas(conn, df, "EXAMPLE")
|
1599
|
-
|
1600
|
-
cur.execute("select * from example")
|
1601
|
-
|
1602
|
-
# returned values are valid json strings
|
1603
|
-
# NB: snowflake orders object keys alphabetically, we don't
|
1604
|
-
r = cur.fetchall()
|
1605
|
-
assert [(sort_keys(r[0][0], indent=None), sort_keys(r[0][1], indent=2))] == [
|
1606
|
-
('{"count":1,"kind":"vc"}', '{\n "amount": 2,\n "kind": "obj"\n}')
|
1607
|
-
]
|
1608
|
-
|
1609
|
-
|
1610
|
-
def test_write_pandas_dict_different_keys(conn: snowflake.connector.SnowflakeConnection):
|
1611
|
-
with conn.cursor() as cur:
|
1612
|
-
cur.execute("create or replace table customers (notes variant)")
|
1613
|
-
|
1614
|
-
df = pd.DataFrame.from_records(
|
1615
|
-
[
|
1616
|
-
# rows have dicts with unique keys and values
|
1617
|
-
{"NOTES": {"k": "v1"}},
|
1618
|
-
# test single and double quoting
|
1619
|
-
{"NOTES": {"k2": ["v'2", 'v"3']}},
|
1620
|
-
]
|
1621
|
-
)
|
1622
|
-
snowflake.connector.pandas_tools.write_pandas(conn, df, "CUSTOMERS")
|
1525
|
+
dcur.execute("SELECT j:str::varchar as j_str_varchar, j:num::varchar as j_num_varchar FROM example")
|
1526
|
+
assert dcur.fetchall() == [{"J_STR_VARCHAR": "100", "J_NUM_VARCHAR": "200"}]
|
1623
1527
|
|
1624
|
-
|
1625
|
-
|
1626
|
-
assert indent(cur.fetchall()) == [('{\n "k": "v1"\n}',), ('{\n "k2": [\n "v\'2",\n "v\\"3"\n ]\n}',)]
|
1627
|
-
|
1628
|
-
|
1629
|
-
def test_write_pandas_db_schema(conn: snowflake.connector.SnowflakeConnection):
|
1630
|
-
with conn.cursor() as cur:
|
1631
|
-
cur.execute("create database db2")
|
1632
|
-
cur.execute("create schema db2.schema2")
|
1633
|
-
cur.execute("create or replace table db2.schema2.customers (ID int, FIRST_NAME varchar, LAST_NAME varchar)")
|
1634
|
-
|
1635
|
-
df = pd.DataFrame.from_records(
|
1636
|
-
[
|
1637
|
-
{"ID": 1, "FIRST_NAME": "Jenny"},
|
1638
|
-
{"ID": 2, "FIRST_NAME": "Jasper"},
|
1639
|
-
]
|
1640
|
-
)
|
1641
|
-
snowflake.connector.pandas_tools.write_pandas(conn, df, "CUSTOMERS", "DB2", "SCHEMA2")
|
1642
|
-
|
1643
|
-
cur.execute("select id, first_name, last_name from db2.schema2.customers")
|
1644
|
-
|
1645
|
-
# columns not in dataframe will receive their default value
|
1646
|
-
assert cur.fetchall() == [(1, "Jenny", None), (2, "Jasper", None)]
|
1647
|
-
|
1648
|
-
|
1649
|
-
def indent(rows: Sequence[tuple] | Sequence[dict]) -> list[tuple]:
|
1650
|
-
# indent duckdb json strings tuple values to match snowflake json strings
|
1651
|
-
assert isinstance(rows[0], tuple)
|
1652
|
-
return [
|
1653
|
-
(*[json.dumps(json.loads(c), indent=2) if (isinstance(c, str) and c.startswith(("[", "{"))) else c for c in r],)
|
1654
|
-
for r in rows
|
1655
|
-
]
|
1656
|
-
|
1657
|
-
|
1658
|
-
def dindent(rows: Sequence[tuple] | Sequence[dict]) -> list[dict]:
|
1659
|
-
# indent duckdb json strings dict values to match snowflake json strings
|
1660
|
-
assert isinstance(rows[0], dict)
|
1661
|
-
return [
|
1662
|
-
{
|
1663
|
-
k: json.dumps(json.loads(v), indent=2) if (isinstance(v, str) and v.startswith(("[", "{"))) else v
|
1664
|
-
for k, v in cast(dict, r).items()
|
1665
|
-
}
|
1666
|
-
for r in rows
|
1667
|
-
]
|
1668
|
-
|
1669
|
-
|
1670
|
-
def sort_keys(sdict: str, indent: int | None = 2) -> str:
|
1671
|
-
return json.dumps(
|
1672
|
-
json.loads(sdict, object_pairs_hook=lambda x: dict(sorted(x))),
|
1673
|
-
indent=indent,
|
1674
|
-
separators=None if indent else (",", ":"),
|
1675
|
-
)
|
1528
|
+
dcur.execute("SELECT j:str::number as j_str_number, j:num::number as j_num_number FROM example")
|
1529
|
+
assert dcur.fetchall() == [{"J_STR_NUMBER": 100, "J_NUM_NUMBER": 200}]
|
@@ -79,7 +79,7 @@ def test_info_schema_columns_other(cur: snowflake.connector.cursor.SnowflakeCurs
|
|
79
79
|
cur.execute(
|
80
80
|
"""
|
81
81
|
create or replace table example (
|
82
|
-
XTIMESTAMP TIMESTAMP, XTIMESTAMP_NTZ9 TIMESTAMP_NTZ(9), XTIMESTAMP_TZ TIMESTAMP_TZ, XDATE DATE, XTIME TIME,
|
82
|
+
XTIMESTAMP TIMESTAMP, XTIMESTAMP_NTZ TIMESTAMP_NTZ, XTIMESTAMP_NTZ9 TIMESTAMP_NTZ(9), XTIMESTAMP_TZ TIMESTAMP_TZ, XDATE DATE, XTIME TIME,
|
83
83
|
XBINARY BINARY, /* XARRAY ARRAY, XOBJECT OBJECT */ XVARIANT VARIANT
|
84
84
|
)
|
85
85
|
"""
|
@@ -94,6 +94,7 @@ def test_info_schema_columns_other(cur: snowflake.connector.cursor.SnowflakeCurs
|
|
94
94
|
|
95
95
|
assert cur.fetchall() == [
|
96
96
|
("XTIMESTAMP", "TIMESTAMP_NTZ"),
|
97
|
+
("XTIMESTAMP_NTZ", "TIMESTAMP_NTZ"),
|
97
98
|
("XTIMESTAMP_NTZ9", "TIMESTAMP_NTZ"),
|
98
99
|
("XTIMESTAMP_TZ", "TIMESTAMP_TZ"),
|
99
100
|
("XDATE", "DATE"),
|
@@ -180,7 +181,7 @@ def test_info_schema_views_with_views(conn: snowflake.connector.SnowflakeConnect
|
|
180
181
|
"table_schema": "SCHEMA1",
|
181
182
|
"table_name": "BAR",
|
182
183
|
"table_owner": "SYSADMIN",
|
183
|
-
"view_definition": "CREATE VIEW SCHEMA1.BAR AS SELECT * FROM FOO WHERE (ID > 5)
|
184
|
+
"view_definition": "CREATE VIEW SCHEMA1.BAR AS SELECT * FROM FOO WHERE (ID > 5);",
|
184
185
|
"check_option": "NONE",
|
185
186
|
"is_updatable": "NO",
|
186
187
|
"insertable_into": "NO",
|
@@ -7,6 +7,7 @@ from sqlglot import exp
|
|
7
7
|
from fakesnow.transforms import (
|
8
8
|
SUCCESS_NOP,
|
9
9
|
_get_to_number_args,
|
10
|
+
alias_in_join,
|
10
11
|
array_agg_within_group,
|
11
12
|
array_size,
|
12
13
|
create_clone,
|
@@ -40,7 +41,7 @@ from fakesnow.transforms import (
|
|
40
41
|
show_objects_tables,
|
41
42
|
show_schemas,
|
42
43
|
tag,
|
43
|
-
|
44
|
+
timestamp_ntz,
|
44
45
|
to_date,
|
45
46
|
to_decimal,
|
46
47
|
to_timestamp,
|
@@ -53,6 +54,23 @@ from fakesnow.transforms import (
|
|
53
54
|
)
|
54
55
|
|
55
56
|
|
57
|
+
def test_alias_in_join() -> None:
|
58
|
+
assert (
|
59
|
+
sqlglot.parse_one("""
|
60
|
+
SELECT
|
61
|
+
T.COL
|
62
|
+
, SUBSTR(T.COL, 4) AS ALIAS
|
63
|
+
, J.ANOTHER
|
64
|
+
FROM TEST AS T
|
65
|
+
LEFT JOIN JOINED AS J
|
66
|
+
ON ALIAS = J.COL
|
67
|
+
""")
|
68
|
+
.transform(alias_in_join)
|
69
|
+
.sql()
|
70
|
+
== "SELECT T.COL, SUBSTR(T.COL, 4) AS ALIAS, J.ANOTHER FROM TEST AS T LEFT JOIN JOINED AS J ON SUBSTR(T.COL, 4) = J.COL" # noqa: E501
|
71
|
+
)
|
72
|
+
|
73
|
+
|
56
74
|
def test_array_size() -> None:
|
57
75
|
assert (
|
58
76
|
sqlglot.parse_one("""select array_size(parse_json('["a","b"]'))""").transform(array_size).sql(dialect="duckdb")
|
@@ -114,7 +132,7 @@ def test_describe_table() -> None:
|
|
114
132
|
|
115
133
|
def test_drop_schema_cascade() -> None:
|
116
134
|
assert (
|
117
|
-
sqlglot.parse_one("drop schema schema1").transform(drop_schema_cascade).sql() == "DROP
|
135
|
+
sqlglot.parse_one("drop schema schema1").transform(drop_schema_cascade).sql() == "DROP SCHEMA schema1 CASCADE"
|
118
136
|
)
|
119
137
|
|
120
138
|
|
@@ -422,7 +440,7 @@ def test_integer_precision() -> None:
|
|
422
440
|
)
|
423
441
|
.transform(integer_precision)
|
424
442
|
.sql(dialect="duckdb")
|
425
|
-
== "CREATE TABLE example (XNUMBER82 DECIMAL(8, 2), XNUMBER
|
443
|
+
== "CREATE TABLE example (XNUMBER82 DECIMAL(8, 2), XNUMBER DECIMAL(38, 0), XDECIMAL DECIMAL(38, 0), XNUMERIC DECIMAL(38, 0), XINT BIGINT, XINTEGER BIGINT, XBIGINT BIGINT, XSMALLINT BIGINT, XTINYINT BIGINT, XBYTEINT BIGINT)" # noqa: E501
|
426
444
|
)
|
427
445
|
|
428
446
|
|
@@ -471,17 +489,17 @@ def test_json_extract_cast_as_varchar() -> None:
|
|
471
489
|
)
|
472
490
|
.transform(json_extract_cast_as_varchar)
|
473
491
|
.sql(dialect="duckdb")
|
474
|
-
== """SELECT JSON('{"fruit":"banana"}') ->> '$.fruit'"""
|
492
|
+
== """SELECT CAST(JSON('{"fruit":"banana"}') ->> '$.fruit' AS TEXT)"""
|
475
493
|
)
|
476
494
|
|
477
495
|
assert (
|
478
496
|
sqlglot.parse_one(
|
479
|
-
"""select parse_json('{"
|
497
|
+
"""select parse_json('{"count":"9000"}'):count::number""",
|
480
498
|
read="snowflake",
|
481
499
|
)
|
482
500
|
.transform(json_extract_cast_as_varchar)
|
483
501
|
.sql(dialect="duckdb")
|
484
|
-
== """SELECT CAST(JSON('{"
|
502
|
+
== """SELECT CAST(JSON('{"count":"9000"}') ->> '$.count' AS DECIMAL(38, 0))"""
|
485
503
|
)
|
486
504
|
|
487
505
|
|
@@ -594,12 +612,24 @@ def test_show_schemas() -> None:
|
|
594
612
|
|
595
613
|
def test_tag() -> None:
|
596
614
|
assert sqlglot.parse_one("ALTER TABLE table1 SET TAG foo='bar'", read="snowflake").transform(tag) == SUCCESS_NOP
|
615
|
+
assert (
|
616
|
+
sqlglot.parse_one("ALTER TABLE db1.schema1.table1 SET TAG foo.bar='baz'", read="snowflake").transform(tag)
|
617
|
+
== SUCCESS_NOP
|
618
|
+
)
|
619
|
+
assert (
|
620
|
+
sqlglot.parse_one("ALTER TABLE table1 MODIFY COLUMN name1 SET TAG foo='bar'", read="snowflake").transform(tag)
|
621
|
+
== SUCCESS_NOP
|
622
|
+
)
|
623
|
+
assert (
|
624
|
+
sqlglot.parse_one("CREATE TAG cost_center COMMENT = 'cost_center tag'", read="snowflake").transform(tag)
|
625
|
+
== SUCCESS_NOP
|
626
|
+
)
|
597
627
|
|
598
628
|
|
599
629
|
def test_timestamp_ntz_ns() -> None:
|
600
630
|
assert (
|
601
631
|
sqlglot.parse_one("CREATE TABLE table1(ts TIMESTAMP_NTZ(9))", read="snowflake")
|
602
|
-
.transform(
|
632
|
+
.transform(timestamp_ntz)
|
603
633
|
.sql(dialect="duckdb")
|
604
634
|
== "CREATE TABLE table1 (ts TIMESTAMP)"
|
605
635
|
)
|
@@ -0,0 +1,164 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
import datetime
|
4
|
+
import json
|
5
|
+
|
6
|
+
import pandas as pd
|
7
|
+
import pytz
|
8
|
+
import snowflake.connector
|
9
|
+
import snowflake.connector.cursor
|
10
|
+
import snowflake.connector.pandas_tools
|
11
|
+
|
12
|
+
from tests.utils import indent
|
13
|
+
|
14
|
+
|
15
|
+
def test_write_pandas_auto_create(conn: snowflake.connector.SnowflakeConnection):
|
16
|
+
with conn.cursor() as cur:
|
17
|
+
df = pd.DataFrame.from_records(
|
18
|
+
[
|
19
|
+
{"ID": 1, "FIRST_NAME": "Jenny"},
|
20
|
+
{"ID": 2, "FIRST_NAME": "Jasper"},
|
21
|
+
]
|
22
|
+
)
|
23
|
+
snowflake.connector.pandas_tools.write_pandas(conn, df, "CUSTOMERS", auto_create_table=True)
|
24
|
+
|
25
|
+
cur.execute("select id, first_name from customers")
|
26
|
+
|
27
|
+
assert cur.fetchall() == [(1, "Jenny"), (2, "Jasper")]
|
28
|
+
|
29
|
+
|
30
|
+
def test_write_pandas_quoted_column_names(conn: snowflake.connector.SnowflakeConnection):
|
31
|
+
with conn.cursor(snowflake.connector.cursor.DictCursor) as dcur:
|
32
|
+
# colunmn names with spaces
|
33
|
+
dcur.execute('create table customers (id int, "first name" varchar)')
|
34
|
+
df = pd.DataFrame.from_records(
|
35
|
+
[
|
36
|
+
{"ID": 1, "first name": "Jenny"},
|
37
|
+
{"ID": 2, "first name": "Jasper"},
|
38
|
+
]
|
39
|
+
)
|
40
|
+
snowflake.connector.pandas_tools.write_pandas(conn, df, "CUSTOMERS")
|
41
|
+
|
42
|
+
dcur.execute("select * from customers")
|
43
|
+
|
44
|
+
assert dcur.fetchall() == [
|
45
|
+
{"ID": 1, "first name": "Jenny"},
|
46
|
+
{"ID": 2, "first name": "Jasper"},
|
47
|
+
]
|
48
|
+
|
49
|
+
|
50
|
+
def test_write_pandas_array(conn: snowflake.connector.SnowflakeConnection):
|
51
|
+
with conn.cursor() as cur:
|
52
|
+
cur.execute("create table customers (ID int, FIRST_NAME varchar, LAST_NAME varchar, ORDERS array)")
|
53
|
+
|
54
|
+
df = pd.DataFrame.from_records(
|
55
|
+
[
|
56
|
+
{"ID": 1, "FIRST_NAME": "Jenny", "LAST_NAME": "P", "ORDERS": ["A", "B"]},
|
57
|
+
{"ID": 2, "FIRST_NAME": "Jasper", "LAST_NAME": "M", "ORDERS": ["C", "D"]},
|
58
|
+
]
|
59
|
+
)
|
60
|
+
snowflake.connector.pandas_tools.write_pandas(conn, df, "CUSTOMERS")
|
61
|
+
|
62
|
+
cur.execute("select * from customers")
|
63
|
+
|
64
|
+
assert indent(cur.fetchall()) == [
|
65
|
+
(1, "Jenny", "P", '[\n "A",\n "B"\n]'),
|
66
|
+
(2, "Jasper", "M", '[\n "C",\n "D"\n]'),
|
67
|
+
]
|
68
|
+
|
69
|
+
|
70
|
+
def test_write_pandas_timestamp_ntz(conn: snowflake.connector.SnowflakeConnection):
|
71
|
+
# compensate for https://github.com/duckdb/duckdb/issues/7980
|
72
|
+
with conn.cursor() as cur:
|
73
|
+
cur.execute("create table example (UPDATE_AT_NTZ timestamp_ntz(9))")
|
74
|
+
# cur.execute("create table example (UPDATE_AT_NTZ timestamp)")
|
75
|
+
|
76
|
+
now_utc = datetime.datetime.now(pytz.utc)
|
77
|
+
df = pd.DataFrame([(now_utc,)], columns=["UPDATE_AT_NTZ"])
|
78
|
+
snowflake.connector.pandas_tools.write_pandas(conn, df, "EXAMPLE")
|
79
|
+
|
80
|
+
cur.execute("select * from example")
|
81
|
+
|
82
|
+
assert cur.fetchall() == [(now_utc.replace(tzinfo=None),)]
|
83
|
+
|
84
|
+
|
85
|
+
def test_write_pandas_partial_columns(conn: snowflake.connector.SnowflakeConnection):
|
86
|
+
with conn.cursor() as cur:
|
87
|
+
cur.execute("create table customers (ID int, FIRST_NAME varchar, LAST_NAME varchar)")
|
88
|
+
|
89
|
+
df = pd.DataFrame.from_records(
|
90
|
+
[
|
91
|
+
{"ID": 1, "FIRST_NAME": "Jenny"},
|
92
|
+
{"ID": 2, "FIRST_NAME": "Jasper"},
|
93
|
+
]
|
94
|
+
)
|
95
|
+
snowflake.connector.pandas_tools.write_pandas(conn, df, "CUSTOMERS")
|
96
|
+
|
97
|
+
cur.execute("select id, first_name, last_name from customers")
|
98
|
+
|
99
|
+
# columns not in dataframe will receive their default value
|
100
|
+
assert cur.fetchall() == [(1, "Jenny", None), (2, "Jasper", None)]
|
101
|
+
|
102
|
+
|
103
|
+
def test_write_pandas_dict_as_varchar(conn: snowflake.connector.SnowflakeConnection):
|
104
|
+
with conn.cursor() as cur:
|
105
|
+
cur.execute("create or replace table example (vc varchar, o object)")
|
106
|
+
|
107
|
+
df = pd.DataFrame([({"kind": "vc", "count": 1}, {"kind": "obj", "amount": 2})], columns=["VC", "O"])
|
108
|
+
snowflake.connector.pandas_tools.write_pandas(conn, df, "EXAMPLE")
|
109
|
+
|
110
|
+
cur.execute("select * from example")
|
111
|
+
|
112
|
+
# returned values are valid json strings
|
113
|
+
# NB: snowflake orders object keys alphabetically, we don't
|
114
|
+
r = cur.fetchall()
|
115
|
+
assert [(sort_keys(r[0][0], indent=None), sort_keys(r[0][1], indent=2))] == [
|
116
|
+
('{"count":1,"kind":"vc"}', '{\n "amount": 2,\n "kind": "obj"\n}')
|
117
|
+
]
|
118
|
+
|
119
|
+
|
120
|
+
def test_write_pandas_dict_different_keys(conn: snowflake.connector.SnowflakeConnection):
|
121
|
+
with conn.cursor() as cur:
|
122
|
+
cur.execute("create or replace table customers (notes variant)")
|
123
|
+
|
124
|
+
df = pd.DataFrame.from_records(
|
125
|
+
[
|
126
|
+
# rows have dicts with unique keys and values
|
127
|
+
{"NOTES": {"k": "v1"}},
|
128
|
+
# test single and double quoting
|
129
|
+
{"NOTES": {"k2": ["v'2", 'v"3']}},
|
130
|
+
]
|
131
|
+
)
|
132
|
+
snowflake.connector.pandas_tools.write_pandas(conn, df, "CUSTOMERS")
|
133
|
+
|
134
|
+
cur.execute("select * from customers")
|
135
|
+
|
136
|
+
assert indent(cur.fetchall()) == [('{\n "k": "v1"\n}',), ('{\n "k2": [\n "v\'2",\n "v\\"3"\n ]\n}',)]
|
137
|
+
|
138
|
+
|
139
|
+
def test_write_pandas_db_schema(conn: snowflake.connector.SnowflakeConnection):
|
140
|
+
with conn.cursor() as cur:
|
141
|
+
cur.execute("create database db2")
|
142
|
+
cur.execute("create schema db2.schema2")
|
143
|
+
cur.execute("create or replace table db2.schema2.customers (ID int, FIRST_NAME varchar, LAST_NAME varchar)")
|
144
|
+
|
145
|
+
df = pd.DataFrame.from_records(
|
146
|
+
[
|
147
|
+
{"ID": 1, "FIRST_NAME": "Jenny"},
|
148
|
+
{"ID": 2, "FIRST_NAME": "Jasper"},
|
149
|
+
]
|
150
|
+
)
|
151
|
+
snowflake.connector.pandas_tools.write_pandas(conn, df, "CUSTOMERS", "DB2", "SCHEMA2")
|
152
|
+
|
153
|
+
cur.execute("select id, first_name, last_name from db2.schema2.customers")
|
154
|
+
|
155
|
+
# columns not in dataframe will receive their default value
|
156
|
+
assert cur.fetchall() == [(1, "Jenny", None), (2, "Jasper", None)]
|
157
|
+
|
158
|
+
|
159
|
+
def sort_keys(sdict: str, indent: int | None = 2) -> str:
|
160
|
+
return json.dumps(
|
161
|
+
json.loads(sdict, object_pairs_hook=lambda x: dict(sorted(x))),
|
162
|
+
indent=indent,
|
163
|
+
separators=None if indent else (",", ":"),
|
164
|
+
)
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|