sqlframe 1.9.0__py3-none-any.whl → 1.11.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,320 @@
1
+ # This code is based on code from Apache Spark under the license found in the LICENSE file located in the 'sqlframe' folder.
2
+ from __future__ import annotations
3
+
4
+ import difflib
5
+ import os
6
+ import typing as t
7
+ from itertools import zip_longest
8
+
9
+ from sqlframe.base import types
10
+ from sqlframe.base.dataframe import _BaseDataFrame
11
+ from sqlframe.base.exceptions import (
12
+ DataFrameDiffError,
13
+ SchemaDiffError,
14
+ SQLFrameException,
15
+ )
16
+ from sqlframe.base.util import verify_pandas_installed
17
+
18
+ if t.TYPE_CHECKING:
19
+ import pandas as pd
20
+
21
+
22
+ def _terminal_color_support():
23
+ try:
24
+ # determine if environment supports color
25
+ script = "$(test $(tput colors)) && $(test $(tput colors) -ge 8) && echo true || echo false"
26
+ return os.popen(script).read()
27
+ except Exception:
28
+ return False
29
+
30
+
31
+ def _context_diff(actual: t.List[str], expected: t.List[str], n: int = 3):
32
+ """
33
+ Modified from difflib context_diff API,
34
+ see original code here: https://github.com/python/cpython/blob/main/Lib/difflib.py#L1180
35
+ """
36
+
37
+ def red(s: str) -> str:
38
+ red_color = "\033[31m"
39
+ no_color = "\033[0m"
40
+ return red_color + str(s) + no_color
41
+
42
+ prefix = dict(insert="+ ", delete="- ", replace="! ", equal=" ")
43
+ for group in difflib.SequenceMatcher(None, actual, expected).get_grouped_opcodes(n):
44
+ yield "*** actual ***"
45
+ if any(tag in {"replace", "delete"} for tag, _, _, _, _ in group):
46
+ for tag, i1, i2, _, _ in group:
47
+ for line in actual[i1:i2]:
48
+ if tag != "equal" and _terminal_color_support():
49
+ yield red(prefix[tag] + str(line))
50
+ else:
51
+ yield prefix[tag] + str(line)
52
+
53
+ yield "\n"
54
+
55
+ yield "*** expected ***"
56
+ if any(tag in {"replace", "insert"} for tag, _, _, _, _ in group):
57
+ for tag, _, _, j1, j2 in group:
58
+ for line in expected[j1:j2]:
59
+ if tag != "equal" and _terminal_color_support():
60
+ yield red(prefix[tag] + str(line))
61
+ else:
62
+ yield prefix[tag] + str(line)
63
+
64
+
65
+ # Source: https://github.com/apache/spark/blob/master/python/pyspark/testing/utils.py#L519
66
+ def assertDataFrameEqual(
67
+ actual: t.Union[_BaseDataFrame, pd.DataFrame, t.List[types.Row]],
68
+ expected: t.Union[_BaseDataFrame, pd.DataFrame, t.List[types.Row]],
69
+ checkRowOrder: bool = False,
70
+ rtol: float = 1e-5,
71
+ atol: float = 1e-8,
72
+ ):
73
+ r"""
74
+ A util function to assert equality between `actual` and `expected`
75
+ (DataFrames or lists of Rows), with optional parameters `checkRowOrder`, `rtol`, and `atol`.
76
+
77
+ Supports Spark, Spark Connect, pandas, and pandas-on-Spark DataFrames.
78
+ For more information about pandas-on-Spark DataFrame equality, see the docs for
79
+ `assertPandasOnSparkEqual`.
80
+
81
+ .. versionadded:: 3.5.0
82
+
83
+ Parameters
84
+ ----------
85
+ actual : DataFrame (Spark, Spark Connect, pandas, or pandas-on-Spark) or list of Rows
86
+ The DataFrame that is being compared or tested.
87
+ expected : DataFrame (Spark, Spark Connect, pandas, or pandas-on-Spark) or list of Rows
88
+ The expected result of the operation, for comparison with the actual result.
89
+ checkRowOrder : bool, optional
90
+ A flag indicating whether the order of rows should be considered in the comparison.
91
+ If set to `False` (default), the row order is not taken into account.
92
+ If set to `True`, the order of rows is important and will be checked during comparison.
93
+ (See Notes)
94
+ rtol : float, optional
95
+ The relative tolerance, used in asserting approximate equality for float values in actual
96
+ and expected. Set to 1e-5 by default. (See Notes)
97
+ atol : float, optional
98
+ The absolute tolerance, used in asserting approximate equality for float values in actual
99
+ and expected. Set to 1e-8 by default. (See Notes)
100
+
101
+ Notes
102
+ -----
103
+ When `assertDataFrameEqual` fails, the error message uses the Python `difflib` library to
104
+ display a diff log of each row that differs in `actual` and `expected`.
105
+
106
+ For `checkRowOrder`, note that PySpark DataFrame ordering is non-deterministic, unless
107
+ explicitly sorted.
108
+
109
+ Note that schema equality is checked only when `expected` is a DataFrame (not a list of Rows).
110
+
111
+ For DataFrames with float values, assertDataFrame asserts approximate equality.
112
+ Two float values a and b are approximately equal if the following equation is True:
113
+
114
+ ``absolute(a - b) <= (atol + rtol * absolute(b))``.
115
+
116
+ Examples
117
+ --------
118
+ >>> df1 = spark.createDataFrame(data=[("1", 1000), ("2", 3000)], schema=["id", "amount"])
119
+ >>> df2 = spark.createDataFrame(data=[("1", 1000), ("2", 3000)], schema=["id", "amount"])
120
+ >>> assertDataFrameEqual(df1, df2) # pass, DataFrames are identical
121
+
122
+ >>> df1 = spark.createDataFrame(data=[("1", 0.1), ("2", 3.23)], schema=["id", "amount"])
123
+ >>> df2 = spark.createDataFrame(data=[("1", 0.109), ("2", 3.23)], schema=["id", "amount"])
124
+ >>> assertDataFrameEqual(df1, df2, rtol=1e-1) # pass, DataFrames are approx equal by rtol
125
+
126
+ >>> df1 = spark.createDataFrame(data=[(1, 1000), (2, 3000)], schema=["id", "amount"])
127
+ >>> list_of_rows = [Row(1, 1000), Row(2, 3000)]
128
+ >>> assertDataFrameEqual(df1, list_of_rows) # pass, actual and expected data are equal
129
+
130
+ >>> import pyspark.pandas as ps
131
+ >>> df1 = ps.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]})
132
+ >>> df2 = ps.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]})
133
+ >>> assertDataFrameEqual(df1, df2) # pass, pandas-on-Spark DataFrames are equal
134
+
135
+ >>> df1 = spark.createDataFrame(
136
+ ... data=[("1", 1000.00), ("2", 3000.00), ("3", 2000.00)], schema=["id", "amount"])
137
+ >>> df2 = spark.createDataFrame(
138
+ ... data=[("1", 1001.00), ("2", 3000.00), ("3", 2003.00)], schema=["id", "amount"])
139
+ >>> assertDataFrameEqual(df1, df2) # doctest: +IGNORE_EXCEPTION_DETAIL
140
+ Traceback (most recent call last):
141
+ ...
142
+ PySparkAssertionError: [DIFFERENT_ROWS] Results do not match: ( 66.66667 % )
143
+ *** actual ***
144
+ ! Row(id='1', amount=1000.0)
145
+ Row(id='2', amount=3000.0)
146
+ ! Row(id='3', amount=2000.0)
147
+ *** expected ***
148
+ ! Row(id='1', amount=1001.0)
149
+ Row(id='2', amount=3000.0)
150
+ ! Row(id='3', amount=2003.0)
151
+ """
152
+ import pandas as pd
153
+
154
+ if actual is None and expected is None:
155
+ return True
156
+ elif actual is None or expected is None:
157
+ raise SQLFrameException("Missing required arguments: actual and expected")
158
+
159
+ def compare_rows(r1: types.Row, r2: types.Row):
160
+ def compare_vals(val1, val2):
161
+ if isinstance(val1, list) and isinstance(val2, list):
162
+ return len(val1) == len(val2) and all(
163
+ compare_vals(x, y) for x, y in zip(val1, val2)
164
+ )
165
+ elif isinstance(val1, types.Row) and isinstance(val2, types.Row):
166
+ return all(compare_vals(x, y) for x, y in zip(val1, val2))
167
+ elif isinstance(val1, dict) and isinstance(val2, dict):
168
+ return (
169
+ len(val1.keys()) == len(val2.keys())
170
+ and val1.keys() == val2.keys()
171
+ and all(compare_vals(val1[k], val2[k]) for k in val1.keys())
172
+ )
173
+ elif isinstance(val1, float) and isinstance(val2, float):
174
+ if abs(val1 - val2) > (atol + rtol * abs(val2)):
175
+ return False
176
+ else:
177
+ if val1 != val2:
178
+ return False
179
+ return True
180
+
181
+ if r1 is None and r2 is None:
182
+ return True
183
+ elif r1 is None or r2 is None:
184
+ return False
185
+
186
+ return compare_vals(r1, r2)
187
+
188
+ def assert_rows_equal(rows1: t.List[types.Row], rows2: t.List[types.Row]):
189
+ zipped = list(zip_longest(rows1, rows2))
190
+ diff_rows_cnt = 0
191
+ diff_rows = False
192
+
193
+ rows_str1 = ""
194
+ rows_str2 = ""
195
+
196
+ # count different rows
197
+ for r1, r2 in zipped:
198
+ rows_str1 += str(r1) + "\n"
199
+ rows_str2 += str(r2) + "\n"
200
+ if not compare_rows(r1, r2):
201
+ diff_rows_cnt += 1
202
+ diff_rows = True
203
+
204
+ generated_diff = _context_diff(
205
+ actual=rows_str1.splitlines(), expected=rows_str2.splitlines(), n=len(zipped)
206
+ )
207
+
208
+ if diff_rows:
209
+ error_msg = "Results do not match: "
210
+ percent_diff = (diff_rows_cnt / len(zipped)) * 100
211
+ error_msg += "( %.5f %% )" % percent_diff
212
+ error_msg += "\n" + "\n".join(generated_diff)
213
+ raise DataFrameDiffError("Rows are different:\n%s" % error_msg)
214
+
215
+ # convert actual and expected to list
216
+ if not isinstance(actual, list) and not isinstance(expected, list):
217
+ # only compare schema if expected is not a List
218
+ assertSchemaEqual(actual.schema, expected.schema) # type: ignore
219
+
220
+ if not isinstance(actual, list):
221
+ actual_list = actual.collect() # type: ignore
222
+ else:
223
+ actual_list = actual
224
+
225
+ if not isinstance(expected, list):
226
+ expected_list = expected.collect() # type: ignore
227
+ else:
228
+ expected_list = expected
229
+
230
+ if not checkRowOrder:
231
+ # rename duplicate columns for sorting
232
+ actual_list = sorted(actual_list, key=lambda x: str(x))
233
+ expected_list = sorted(expected_list, key=lambda x: str(x))
234
+
235
+ assert_rows_equal(actual_list, expected_list)
236
+
237
+
238
+ def assertSchemaEqual(actual: types.StructType, expected: types.StructType):
239
+ r"""
240
+ A util function to assert equality between DataFrame schemas `actual` and `expected`.
241
+
242
+ .. versionadded:: 3.5.0
243
+
244
+ Parameters
245
+ ----------
246
+ actual : StructType
247
+ The DataFrame schema that is being compared or tested.
248
+ expected : StructType
249
+ The expected schema, for comparison with the actual schema.
250
+
251
+ Notes
252
+ -----
253
+ When assertSchemaEqual fails, the error message uses the Python `difflib` library to display
254
+ a diff log of the `actual` and `expected` schemas.
255
+
256
+ Examples
257
+ --------
258
+ >>> from pyspark.sql.types import StructType, StructField, ArrayType, IntegerType, DoubleType
259
+ >>> s1 = StructType([StructField("names", ArrayType(DoubleType(), True), True)])
260
+ >>> s2 = StructType([StructField("names", ArrayType(DoubleType(), True), True)])
261
+ >>> assertSchemaEqual(s1, s2) # pass, schemas are identical
262
+
263
+ >>> df1 = spark.createDataFrame(data=[(1, 1000), (2, 3000)], schema=["id", "number"])
264
+ >>> df2 = spark.createDataFrame(data=[("1", 1000), ("2", 5000)], schema=["id", "amount"])
265
+ >>> assertSchemaEqual(df1.schema, df2.schema) # doctest: +IGNORE_EXCEPTION_DETAIL
266
+ Traceback (most recent call last):
267
+ ...
268
+ PySparkAssertionError: [DIFFERENT_SCHEMA] Schemas do not match.
269
+ --- actual
270
+ +++ expected
271
+ - StructType([StructField('id', LongType(), True), StructField('number', LongType(), True)])
272
+ ? ^^ ^^^^^
273
+ + StructType([StructField('id', StringType(), True), StructField('amount', LongType(), True)])
274
+ ? ^^^^ ++++ ^
275
+ """
276
+ if not isinstance(actual, types.StructType):
277
+ raise RuntimeError("actual must be a StructType")
278
+ if not isinstance(expected, types.StructType):
279
+ raise RuntimeError("expected must be a StructType")
280
+
281
+ def compare_schemas_ignore_nullable(s1: types.StructType, s2: types.StructType):
282
+ if len(s1) != len(s2):
283
+ return False
284
+ zipped = zip_longest(s1, s2)
285
+ for sf1, sf2 in zipped:
286
+ if not compare_structfields_ignore_nullable(sf1, sf2):
287
+ return False
288
+ return True
289
+
290
+ def compare_structfields_ignore_nullable(
291
+ actualSF: types.StructField, expectedSF: types.StructField
292
+ ):
293
+ if actualSF is None and expectedSF is None:
294
+ return True
295
+ elif actualSF is None or expectedSF is None:
296
+ return False
297
+ if actualSF.name != expectedSF.name:
298
+ return False
299
+ else:
300
+ return compare_datatypes_ignore_nullable(actualSF.dataType, expectedSF.dataType)
301
+
302
+ def compare_datatypes_ignore_nullable(dt1: t.Any, dt2: t.Any):
303
+ # checks datatype equality, using recursion to ignore nullable
304
+ if dt1.typeName() == dt2.typeName():
305
+ if dt1.typeName() == "array":
306
+ return compare_datatypes_ignore_nullable(dt1.elementType, dt2.elementType)
307
+ elif dt1.typeName() == "struct":
308
+ return compare_schemas_ignore_nullable(dt1, dt2)
309
+ else:
310
+ return True
311
+ else:
312
+ return False
313
+
314
+ # ignore nullable flag by default
315
+ if not compare_schemas_ignore_nullable(actual, expected):
316
+ generated_diff = difflib.ndiff(str(actual).splitlines(), str(expected).splitlines())
317
+
318
+ error_msg = "\n".join(generated_diff)
319
+
320
+ raise SchemaDiffError(error_msg)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sqlframe
3
- Version: 1.9.0
3
+ Version: 1.11.0
4
4
  Summary: Turning PySpark Into a Universal DataFrame API
5
5
  Home-page: https://github.com/eakmanrq/sqlframe
6
6
  Author: Ryan Eakman
@@ -1,33 +1,33 @@
1
1
  sqlframe/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- sqlframe/_version.py,sha256=Wi1Vgg8ccNVK7oIZNO8kmGhwjztIUyuzlku2tkT7820,411
2
+ sqlframe/_version.py,sha256=rZqhcUFwPMyj_mTWUN2A6qcFr8Ptv08CSbXbruC3jR4,413
3
3
  sqlframe/base/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  sqlframe/base/_typing.py,sha256=DuTay8-o9W-pw3RPZCgLunKNJLS9PkaV11G_pxXp9NY,1256
5
5
  sqlframe/base/catalog.py,sha256=ATDGirouUjal05P4ymL-wIi8rgjg_8w4PoACamiO64A,37245
6
6
  sqlframe/base/column.py,sha256=5bfJWj9dnStHUxLSrWMD-gwiC4-aHKC8lhoC62nhM1k,16153
7
- sqlframe/base/dataframe.py,sha256=uL4neDTMy1a9XJH46YLQryzdDci4iDxNXBtiJOzfHfs,67718
7
+ sqlframe/base/dataframe.py,sha256=Tf5euWTGxFmYirgHK5ZXUI41so5ruo-asVmUwj9DFdo,70015
8
8
  sqlframe/base/decorators.py,sha256=I5osMgx9BuCgbtp4jVM2DNwYJVLzCv-OtTedhQEik0g,1882
9
- sqlframe/base/exceptions.py,sha256=pCB9hXX4jxZWzNg3JN1i38cv3BmpUlee5NoLYx3YXIQ,208
10
- sqlframe/base/function_alternatives.py,sha256=NDXs2igY7PBsStzTSRZvJcCshBOJkPQl2GbhpVFU6To,42931
11
- sqlframe/base/functions.py,sha256=FZczLQzADcXQWuKUbv67LHnK1yQU4nVzJGnNJQEHkrY,58438
9
+ sqlframe/base/exceptions.py,sha256=9Uwvqn2eAkDpqm4BrRgbL61qM-GMCbJEMAW8otxO46s,370
10
+ sqlframe/base/function_alternatives.py,sha256=QESqZy7Osp9-CV5Yoi6XFat5SE8PzCVZ3o7gOFmIY7g,45888
11
+ sqlframe/base/functions.py,sha256=hJDpE7GYQpQ1iHjdr1hG_hu0mAIb60vNoghjEcgMREI,187550
12
12
  sqlframe/base/group.py,sha256=TES9CleVmH3x-0X-tqmuUKfCKSWjH5vg1aU3R6dDmFc,4059
13
13
  sqlframe/base/normalize.py,sha256=nXAJ5CwxVf4DV0GsH-q1w0p8gmjSMlv96k_ez1eVul8,3880
14
14
  sqlframe/base/operations.py,sha256=-AhNuEzcV7ZExoP1oY3blaKip-joQyJeQVvfBTs_2g4,3456
15
15
  sqlframe/base/readerwriter.py,sha256=5NPQMiOrw6I54U243R_6-ynnWYsNksgqwRpPp4IFjIw,25288
16
- sqlframe/base/session.py,sha256=oQsOwlwAhbqtD8epR44kGXP29S31fIxM29NxfwCbcl0,21993
16
+ sqlframe/base/session.py,sha256=2C0OsPoP49AuqVNtPiazTdVpwQA1668g5WOydrYP6SA,22001
17
17
  sqlframe/base/transforms.py,sha256=y0j3SGDz3XCmNGrvassk1S-owllUWfkHyMgZlY6SFO4,467
18
- sqlframe/base/types.py,sha256=aJT5YXr-M_LAfUM0uK4asfbrQFab_xmsp1CP2zkG8p0,11924
19
- sqlframe/base/util.py,sha256=hgmTVzdTvHhfc9d5I96wjk9LGr-vhSZlaB-MejENzcA,9110
18
+ sqlframe/base/types.py,sha256=K6mjafbX7oIk65CapwamcO2I8nf-poRIpKKt9XDNEaQ,11987
19
+ sqlframe/base/util.py,sha256=tWccrZne-Acn4N2RxYr87mfI_GDMf_K9hRD7BnhGBq0,11756
20
20
  sqlframe/base/window.py,sha256=8hOv-ignPPIsZA9FzvYzcLE9J_glalVaYjIAUdRUX3o,4943
21
21
  sqlframe/base/mixins/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
22
  sqlframe/base/mixins/catalog_mixins.py,sha256=NhuPGxIqPjyuC_V_NALN1sn9v9h0-xwFOlJyJgsvyek,14212
23
- sqlframe/base/mixins/dataframe_mixins.py,sha256=U2tKIY5pCLnoPy1boAQ1YWLgK1E-ZT4x47oRWtGoYLQ,2360
23
+ sqlframe/base/mixins/dataframe_mixins.py,sha256=FoR3bymPk-vT7NhJsDwZe6ONHheMd5UJhzID2lM1joQ,1411
24
24
  sqlframe/base/mixins/readwriter_mixins.py,sha256=QnxGVL8ftZfYlBNG0Bl24N_bnA2YioSxUsTSgKIbuvQ,4723
25
25
  sqlframe/bigquery/__init__.py,sha256=i2NsMbiXOj2xphCtPuNk6cVw4iYeq5_B1I9dVI9aGAk,712
26
26
  sqlframe/bigquery/catalog.py,sha256=h3aQAQAJg6MMvFpP8Ku0S4pcx30n5qYrqHhWSomxb6A,9319
27
27
  sqlframe/bigquery/column.py,sha256=E1tUa62Y5HajkhgFuebU9zohrGyieudcHzTT8gfalio,40
28
- sqlframe/bigquery/dataframe.py,sha256=fPQ6043aSS_ds30WsvrYOgNZJPH0jq7BeNHGLQ2MEW4,1372
29
- sqlframe/bigquery/functions.py,sha256=RF8yG_4MS3at_60V0NNTE5ADERJZa7kZGYFWI4ST3jM,11149
30
- sqlframe/bigquery/functions.pyi,sha256=pCVCzS1yN1Xahn0UAJhNwPUHX0XattzfHtfexrrH2Rs,11982
28
+ sqlframe/bigquery/dataframe.py,sha256=Y2uy4FEYw0KxIHgnaA9uMwdIzxJzTlD_NSzIe7P7kxA,2405
29
+ sqlframe/bigquery/functions.py,sha256=2YqJmBG0F0o10cztFZoP-G4px1QMKuHST6jlj1snUfY,11331
30
+ sqlframe/bigquery/functions.pyi,sha256=JiyLFLiO0jyJec6j1o4ujPVQ7Tma-c9YHlm-3UQYD9M,13642
31
31
  sqlframe/bigquery/group.py,sha256=UVBNBRTo8OqS-_cS5YwvTeJYgYxeG-d6R3kfyHmlFqw,391
32
32
  sqlframe/bigquery/readwriter.py,sha256=WAD3ZMwkkjOpvPPoZXfaLLNM6tRTeUvdEj-hQZAzXeo,870
33
33
  sqlframe/bigquery/session.py,sha256=1-hE1Wr2b6SqfD4M_-OGMqjaSbhD6wSQd74v71xHZv8,2709
@@ -36,20 +36,20 @@ sqlframe/bigquery/window.py,sha256=6GKPzuxeSapJakBaKBeT9VpED1ACdjggDv9JRILDyV0,3
36
36
  sqlframe/duckdb/__init__.py,sha256=t85TA3ufZtL1weQNFmEs8itCSwbJFtw03-p0GT4XGf8,669
37
37
  sqlframe/duckdb/catalog.py,sha256=rt3XuP3m4DbhuibOFyvx_95F2zZa6uDwCI_TmcvKy1A,3895
38
38
  sqlframe/duckdb/column.py,sha256=wkEPcp3xVsH5nC3kpacXqNkRv9htPtBgt-0uFRxIRNs,56
39
- sqlframe/duckdb/dataframe.py,sha256=RPdXSOv_wCB0R5E5fzRMYOGFHilb4egqRk3UgiT6WEU,1530
40
- sqlframe/duckdb/functions.py,sha256=srvzbk_Wg-wQPFGYp624dRDyYJghi47M8E-Tu7pBdY0,1507
41
- sqlframe/duckdb/functions.pyi,sha256=URTH3IY_Wckvpcm3JsVyCvLIFCqFrmanIREtgsIXOzk,5055
39
+ sqlframe/duckdb/dataframe.py,sha256=WmBrrmrfxDpYuikSMFqacgV2Jawkx4sSYE-_mwnL4Jw,1225
40
+ sqlframe/duckdb/functions.py,sha256=pz40eqR4U_s42p7UeaefJD5yU1vo6mqNoVz0iKN5eRk,1671
41
+ sqlframe/duckdb/functions.pyi,sha256=nU-6a2cfLDkuMCdYrNRLfa6-i8Aa0CxQQ1nLT6roIdI,5813
42
42
  sqlframe/duckdb/group.py,sha256=IkhbW42Ng1U5YT3FkIdiB4zBqRkW4QyTb-1detY1e_4,383
43
43
  sqlframe/duckdb/readwriter.py,sha256=6xiyE3JKzY9ieKqvbAOBlifiHE6NpYISHul3Idlmoa0,4542
44
44
  sqlframe/duckdb/session.py,sha256=j75iIsmaxl5x7oqyhN_VolvEclKj7QmaFfIis-SmoKM,2147
45
45
  sqlframe/duckdb/types.py,sha256=KwNyuXIo-2xVVd4bZED3YrQOobKCtemlxGrJL7DrTC8,34
46
46
  sqlframe/duckdb/window.py,sha256=6GKPzuxeSapJakBaKBeT9VpED1ACdjggDv9JRILDyV0,35
47
47
  sqlframe/postgres/__init__.py,sha256=Sz_MtgV_oh_QhfZTC7iKM07ICUmNcJEDV0kEkSW9ZKU,712
48
- sqlframe/postgres/catalog.py,sha256=L5heEav8PTtKJDofJTf-51_cCLpZud5lDvZC-RFZIaw,3722
48
+ sqlframe/postgres/catalog.py,sha256=uGMKo4RXOU6fA4IjcfebukEI18QswVk3cnB_G7S6_Fw,8130
49
49
  sqlframe/postgres/column.py,sha256=E1tUa62Y5HajkhgFuebU9zohrGyieudcHzTT8gfalio,40
50
- sqlframe/postgres/dataframe.py,sha256=feGvQo7GD-YGmWWGc5h94CMVZm0gcgUQsdlAktXS4Ac,1492
51
- sqlframe/postgres/functions.py,sha256=UNL7dE6LmzekvolwqWB-aFt8ITamxeSfuG50_NP_G8c,2133
52
- sqlframe/postgres/functions.pyi,sha256=lg2Bf0D_LJwC7DbmyXokNaw3v1pvttDj5OxXLqIgAYE,4789
50
+ sqlframe/postgres/dataframe.py,sha256=f-w6UHxZtmeZ5oMbaqJaZ8FrYeOhzyveNlZOK57ke0k,1289
51
+ sqlframe/postgres/functions.py,sha256=b9ccP5vY8EDZXkJbhE_LjAlH50_6wcUF9VbzPrariec,2374
52
+ sqlframe/postgres/functions.pyi,sha256=um-qE2g9iPs0-53vJ46lArbfvDqAbFIwrxLJgcrPM_8,5536
53
53
  sqlframe/postgres/group.py,sha256=KUXeSFKWTSH9yCRJAhW85OvjZaG6Zr4In9LR_ie3yGU,391
54
54
  sqlframe/postgres/readwriter.py,sha256=L1e3yKXzFVNR_W5s1DHaWol7G8x7l4jcZ5sLGualyMk,870
55
55
  sqlframe/postgres/session.py,sha256=oKh8-j9MN6msVheQNCYoGmej9ktFLTTHmlMP58uZ3nw,1936
@@ -58,7 +58,7 @@ sqlframe/postgres/window.py,sha256=6GKPzuxeSapJakBaKBeT9VpED1ACdjggDv9JRILDyV0,3
58
58
  sqlframe/redshift/__init__.py,sha256=jamKYQtQaKjjXnQ01QGPHvatbrZSw9sWno_VOUGSz6I,712
59
59
  sqlframe/redshift/catalog.py,sha256=JBDWIu4FQhi4_POB9pxW0T5A-6qdSK7BCq_Cp-V6tIM,4717
60
60
  sqlframe/redshift/column.py,sha256=E1tUa62Y5HajkhgFuebU9zohrGyieudcHzTT8gfalio,40
61
- sqlframe/redshift/dataframe.py,sha256=mtxmKVnvuYNQnirEvuXICY53WRiN8L1QCtSsvPJ-4jE,1372
61
+ sqlframe/redshift/dataframe.py,sha256=aTC0DOPDFwWH1_b9T0Pif80cYSGudIp0D-cmkR7Ci_M,1104
62
62
  sqlframe/redshift/functions.py,sha256=DR5kodYAcKatUqopwrEQtxryI4ZSqaH47_y3WLht4Wg,455
63
63
  sqlframe/redshift/group.py,sha256=5MGZYJfHpzoRSQ0N_pn4KUk4Mk2gocQwU3K1-jAbvGg,391
64
64
  sqlframe/redshift/readwriter.py,sha256=g3FYKSsJKqcSnElprzzz29ZctoXq9tRB0Mj9Bm1HycI,870
@@ -68,9 +68,9 @@ sqlframe/redshift/window.py,sha256=6GKPzuxeSapJakBaKBeT9VpED1ACdjggDv9JRILDyV0,3
68
68
  sqlframe/snowflake/__init__.py,sha256=nuQ3cuHjDpW4ELZfbd2qOYmtXmcYl7MtsrdOrRdozo0,746
69
69
  sqlframe/snowflake/catalog.py,sha256=uDjBgDdCyxaDkGNX_8tb-lol7MwwazcClUBAZsOSj70,5014
70
70
  sqlframe/snowflake/column.py,sha256=E1tUa62Y5HajkhgFuebU9zohrGyieudcHzTT8gfalio,40
71
- sqlframe/snowflake/dataframe.py,sha256=OJ27NudBUE3XX9mc8ywooGhYV4ijF9nX2K_nkHRcTx4,1393
72
- sqlframe/snowflake/functions.py,sha256=HXxt-wM05vcbgmu06uGApGd-Z9bWOwWwjqPfg38fF0M,2330
73
- sqlframe/snowflake/functions.pyi,sha256=yteEge4EVC1V1gzZmlJJDNZqIey8QaYE7ktwjuxE-vM,5543
71
+ sqlframe/snowflake/dataframe.py,sha256=jUyQNCe3K6SH4PtmrR67YN0SLqkHakMxLiB261fDgkc,1862
72
+ sqlframe/snowflake/functions.py,sha256=cIO56ZsOpjg6ICLjTh-osG1h1UjjEtM39_ieMiWkmyI,2466
73
+ sqlframe/snowflake/functions.pyi,sha256=MkNif_sIceHMNhl-qvLir2DJ1jPqwyaahltdpgY4Jq0,6213
74
74
  sqlframe/snowflake/group.py,sha256=pPP1l2RRo_LgkXrji8a87n2PKo-63ZRPT-WUtvVcBME,395
75
75
  sqlframe/snowflake/readwriter.py,sha256=yhRc2HcMq6PwV3ghZWC-q-qaE7LE4aEjZEXCip4OOlQ,884
76
76
  sqlframe/snowflake/session.py,sha256=bDOlnuIiQ9j_zfF7F5H1gTLmpHUjruIxr2CfXcS_7YU,3284
@@ -79,9 +79,9 @@ sqlframe/snowflake/window.py,sha256=6GKPzuxeSapJakBaKBeT9VpED1ACdjggDv9JRILDyV0,
79
79
  sqlframe/spark/__init__.py,sha256=WhYQAZMJN1EMNAVGUH7BEinxNdYtXOrrr-6HUniJOyI,649
80
80
  sqlframe/spark/catalog.py,sha256=rIX5DtPnINbcPZRUe4Z1bOpkJoNRlrO9qWkUeTQClNc,32612
81
81
  sqlframe/spark/column.py,sha256=E1tUa62Y5HajkhgFuebU9zohrGyieudcHzTT8gfalio,40
82
- sqlframe/spark/dataframe.py,sha256=V3z5Bx9snLgYh4bDwJfJb5mj1P7UsZF8DMlLwZXopBg,1309
82
+ sqlframe/spark/dataframe.py,sha256=_TD-h7oz0-i80r90v17UoLDoIzcGNchU2SL13ujOOic,1779
83
83
  sqlframe/spark/functions.py,sha256=PkK4MBpVADhnDbrgFDii5zFaNrhi4y-OYX3Lcu-SW0k,530
84
- sqlframe/spark/functions.pyi,sha256=0gyD8H7qYqiG-u_dpZB9wBuPni6GB5wa8YXUICoZMwU,6849
84
+ sqlframe/spark/functions.pyi,sha256=bjz6s8E6OB0c4KfTTsls7rhb_R9mIYvkaeaXefMziqM,11617
85
85
  sqlframe/spark/group.py,sha256=MrvV_v-YkBc6T1zz882WrEqtWjlooWIyHBCmTQg3fCA,379
86
86
  sqlframe/spark/readwriter.py,sha256=w68EImTcGJv64X7pc1tk5tDjDxb1nAnn-MiIaaN9Dc8,812
87
87
  sqlframe/spark/session.py,sha256=ztIS7VCFxjR3B7i4JXaXo0evTUhUjOsIAZb7Ssqt2cU,4254
@@ -97,8 +97,10 @@ sqlframe/standalone/readwriter.py,sha256=EZNyDJ4ID6sGNog3uP4-e9RvchX4biJJDNtc5hk
97
97
  sqlframe/standalone/session.py,sha256=wQmdu2sv6KMTAv0LRFk7TY7yzlh3xvmsyqilEtRecbY,1191
98
98
  sqlframe/standalone/types.py,sha256=KwNyuXIo-2xVVd4bZED3YrQOobKCtemlxGrJL7DrTC8,34
99
99
  sqlframe/standalone/window.py,sha256=6GKPzuxeSapJakBaKBeT9VpED1ACdjggDv9JRILDyV0,35
100
- sqlframe-1.9.0.dist-info/LICENSE,sha256=VZu79YgW780qxaFJMr0t5ZgbOYEh04xWoxaWOaqIGWk,1068
101
- sqlframe-1.9.0.dist-info/METADATA,sha256=ZqMpC8SchKLMSu0-H36KkmMLQcv13IKaK-AKumtcIYA,7496
102
- sqlframe-1.9.0.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
103
- sqlframe-1.9.0.dist-info/top_level.txt,sha256=T0_RpoygaZSF6heeWwIDQgaP0varUdSK1pzjeJZRjM8,9
104
- sqlframe-1.9.0.dist-info/RECORD,,
100
+ sqlframe/testing/__init__.py,sha256=VVCosQhitU74A3NnE52O4mNtGZONapuEXcc20QmSlnQ,132
101
+ sqlframe/testing/utils.py,sha256=9DDYVuocO7tygee3RaajuJNZ24sJwf_LY556kKg7kTw,13011
102
+ sqlframe-1.11.0.dist-info/LICENSE,sha256=VZu79YgW780qxaFJMr0t5ZgbOYEh04xWoxaWOaqIGWk,1068
103
+ sqlframe-1.11.0.dist-info/METADATA,sha256=JTMUu99Ygcz_fK15KTHUb9OqQcPiQoUjQ1-7RQ09COE,7497
104
+ sqlframe-1.11.0.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
105
+ sqlframe-1.11.0.dist-info/top_level.txt,sha256=T0_RpoygaZSF6heeWwIDQgaP0varUdSK1pzjeJZRjM8,9
106
+ sqlframe-1.11.0.dist-info/RECORD,,