sqlframe 3.25.0__py3-none-any.whl → 3.27.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
sqlframe/_version.py CHANGED
@@ -17,5 +17,5 @@ __version__: str
17
17
  __version_tuple__: VERSION_TUPLE
18
18
  version_tuple: VERSION_TUPLE
19
19
 
20
- __version__ = version = '3.25.0'
21
- __version_tuple__ = version_tuple = (3, 25, 0)
20
+ __version__ = version = '3.27.0'
21
+ __version_tuple__ = version_tuple = (3, 27, 0)
@@ -551,6 +551,17 @@ def sha1_force_sha1_and_to_hex(col: ColumnOrName) -> Column:
551
551
  )
552
552
 
553
553
 
554
+ def sha2_sha265(col: ColumnOrName) -> Column:
555
+ col_func = get_func_from_session("col")
556
+
557
+ return Column(
558
+ expression.Anonymous(
559
+ this="SHA256",
560
+ expressions=[col_func(col).column_expression],
561
+ )
562
+ )
563
+
564
+
554
565
  def hash_from_farm_fingerprint(*cols: ColumnOrName) -> Column:
555
566
  if len(cols) > 1:
556
567
  raise ValueError("This dialect only supports a single column for calculating hash")
@@ -1492,7 +1492,7 @@ def md5(col: ColumnOrName) -> Column:
1492
1492
  return Column.invoke_expression_over_column(col, expression.MD5)
1493
1493
 
1494
1494
 
1495
- @meta(unsupported_engines=["duckdb", "postgres"])
1495
+ @meta(unsupported_engines=["postgres"])
1496
1496
  def sha1(col: ColumnOrName) -> Column:
1497
1497
  from sqlframe.base.function_alternatives import sha1_force_sha1_and_to_hex
1498
1498
 
@@ -1504,8 +1504,18 @@ def sha1(col: ColumnOrName) -> Column:
1504
1504
  return Column.invoke_expression_over_column(col, expression.SHA)
1505
1505
 
1506
1506
 
1507
- @meta(unsupported_engines=["bigquery", "duckdb", "postgres"])
1507
+ @meta(unsupported_engines=["bigquery", "postgres"])
1508
1508
  def sha2(col: ColumnOrName, numBits: int) -> Column:
1509
+ from sqlframe.base.function_alternatives import sha2_sha265
1510
+
1511
+ session = _get_session()
1512
+
1513
+ if session._is_duckdb:
1514
+ if numBits in [256, 0]:
1515
+ return sha2_sha265(col)
1516
+ else:
1517
+ raise ValueError("This dialect only supports SHA-265 (numBits=256 or numBits=0)")
1518
+
1509
1519
  return Column.invoke_expression_over_column(col, expression.SHA2, length=lit(numBits))
1510
1520
 
1511
1521
 
@@ -2355,7 +2365,7 @@ def from_json(
2355
2365
  schema = schema.simpleString()
2356
2366
  schema = schema if isinstance(schema, Column) else lit(schema)
2357
2367
  if options is not None:
2358
- options_col = create_map([lit(x) for x in _flatten(options.items())])
2368
+ options_col = create_map([lit(str(x)) for x in _flatten(options.items())])
2359
2369
  return Column.invoke_anonymous_function(col, "FROM_JSON", schema, options_col)
2360
2370
  return Column.invoke_anonymous_function(col, "FROM_JSON", schema)
2361
2371
 
@@ -2389,7 +2399,7 @@ def schema_of_json(col: ColumnOrName, options: t.Optional[t.Dict[str, str]] = No
2389
2399
  if isinstance(col, str):
2390
2400
  col = lit(col)
2391
2401
  if options is not None:
2392
- options_col = create_map([lit(x) for x in _flatten(options.items())])
2402
+ options_col = create_map([lit(str(x)) for x in _flatten(options.items())])
2393
2403
  return Column.invoke_anonymous_function(col, "SCHEMA_OF_JSON", options_col)
2394
2404
  return Column.invoke_anonymous_function(col, "SCHEMA_OF_JSON")
2395
2405
 
@@ -2399,7 +2409,7 @@ def schema_of_csv(col: ColumnOrName, options: t.Optional[t.Dict[str, str]] = Non
2399
2409
  if isinstance(col, str):
2400
2410
  col = lit(col)
2401
2411
  if options is not None:
2402
- options_col = create_map([lit(x) for x in _flatten(options.items())])
2412
+ options_col = create_map([lit(str(x)) for x in _flatten(options.items())])
2403
2413
  return Column.invoke_anonymous_function(col, "SCHEMA_OF_CSV", options_col)
2404
2414
  return Column.invoke_anonymous_function(col, "SCHEMA_OF_CSV")
2405
2415
 
@@ -2407,7 +2417,7 @@ def schema_of_csv(col: ColumnOrName, options: t.Optional[t.Dict[str, str]] = Non
2407
2417
  @meta(unsupported_engines=["bigquery", "duckdb", "postgres", "snowflake"])
2408
2418
  def to_csv(col: ColumnOrName, options: t.Optional[t.Dict[str, str]] = None) -> Column:
2409
2419
  if options is not None:
2410
- options_col = create_map([lit(x) for x in _flatten(options.items())])
2420
+ options_col = create_map([lit(str(x)) for x in _flatten(options.items())])
2411
2421
  return Column.invoke_anonymous_function(col, "TO_CSV", options_col)
2412
2422
  return Column.invoke_anonymous_function(col, "TO_CSV")
2413
2423
 
@@ -4934,7 +4944,7 @@ def printf(format: ColumnOrName, *cols: ColumnOrName) -> Column:
4934
4944
  return Column.invoke_anonymous_function(format, "printf", *cols)
4935
4945
 
4936
4946
 
4937
- @meta(unsupported_engines=["*", "spark", "databricks"])
4947
+ @meta(unsupported_engines=["bigquery", "postgres", "redshift", "snowflake", "spark", "databricks"])
4938
4948
  def product(col: ColumnOrName) -> Column:
4939
4949
  """
4940
4950
  Aggregate function: returns the product of the values in a group.
@@ -38,6 +38,7 @@ class _BaseDataFrameReader(t.Generic[SESSION, DF, TABLE]):
38
38
  def __init__(self, spark: SESSION):
39
39
  self._session = spark
40
40
  self.state_format_to_read: t.Optional[str] = None
41
+ self.state_options: t.Dict[str, OptionalPrimitiveType] = {}
41
42
 
42
43
  @property
43
44
  def session(self) -> SESSION:
@@ -107,6 +108,88 @@ class _BaseDataFrameReader(t.Generic[SESSION, DF, TABLE]):
107
108
  self.state_format_to_read = source
108
109
  return self
109
110
 
111
+ def options(self, **options: OptionalPrimitiveType) -> "Self":
112
+ """Adds input options for the underlying data source.
113
+
114
+ .. versionadded:: 1.4.0
115
+
116
+ .. versionchanged:: 3.4.0
117
+ Supports Spark Connect.
118
+
119
+ Parameters
120
+ ----------
121
+ **options : dict
122
+ The dictionary of string keys and primitive-type values.
123
+
124
+ Examples
125
+ --------
126
+ >>> spark.read.options(inferSchema=True, header=True)
127
+ <...readwriter.DataFrameReader object ...>
128
+
129
+ Specify the option 'nullValue' and 'header' with reading a CSV file.
130
+
131
+ >>> import tempfile
132
+ >>> with tempfile.TemporaryDirectory() as d:
133
+ ... # Write a DataFrame into a CSV file with a header.
134
+ ... df = spark.createDataFrame([{"age": 100, "name": "Hyukjin Kwon"}])
135
+ ... df.write.option("header", True).mode("overwrite").format("csv").save(d)
136
+ ...
137
+ ... # Read the CSV file as a DataFrame with 'nullValue' option set to 'Hyukjin Kwon',
138
+ ... # and 'header' option set to `True`.
139
+ ... spark.read.options(
140
+ ... nullValue="Hyukjin Kwon",
141
+ ... header=True
142
+ ... ).format('csv').load(d).show()
143
+ +---+----+
144
+ |age|name|
145
+ +---+----+
146
+ |100|NULL|
147
+ +---+----+
148
+ """
149
+
150
+ self.state_options = {**self.state_options, **options}
151
+ return self
152
+
153
+ def option(self, key: str, value: OptionalPrimitiveType) -> "Self":
154
+ """Adds an input option for the underlying data source.
155
+
156
+ .. versionadded:: 1.4.0
157
+
158
+ .. versionchanged:: 3.4.0
159
+ Supports Spark Connect.
160
+
161
+ Parameters
162
+ ----------
163
+ key : str
164
+ The key of the option.
165
+ value :
166
+ The value of the option.
167
+
168
+ Examples
169
+ --------
170
+ >>> spark.read.option("inferSchema", True)
171
+ <...readwriter.DataFrameReader object ...>
172
+
173
+ Specify the option 'nullValue' and 'header' with reading a CSV file.
174
+
175
+ >>> import tempfile
176
+ >>> with tempfile.TemporaryDirectory() as d:
177
+ ... # Write a DataFrame into a CSV file with a header.
178
+ ... df = spark.createDataFrame([{"age": 100, "name": "Hyukjin Kwon"}])
179
+ ... df.write.option("header", True).mode("overwrite").format("csv").save(d)
180
+ ...
181
+ ... # Read the CSV file as a DataFrame with 'nullValue' option set to 'Hyukjin Kwon',
182
+ ... # and 'header' option set to `True`.
183
+ ... spark.read.option("nullValue", "Hyukjin Kwon").option("header", True).format('csv').load(d).show()
184
+ +---+----+
185
+ |age|name|
186
+ +---+----+
187
+ |100|NULL|
188
+ +---+----+
189
+ """
190
+ self.state_options[key] = value
191
+ return self
192
+
110
193
  def load(
111
194
  self,
112
195
  path: t.Optional[PathOrPaths] = None,
@@ -220,7 +303,9 @@ class _BaseDataFrameReader(t.Generic[SESSION, DF, TABLE]):
220
303
  modifiedAfter=modifiedAfter,
221
304
  allowNonNumericNumbers=allowNonNumericNumbers,
222
305
  )
223
- return self.load(path=path, format="json", schema=schema, **options)
306
+ # Apply previously set options, with method-specific options taking precedence
307
+ all_options = {**self.state_options, **{k: v for k, v in options.items() if v is not None}}
308
+ return self.load(path=path, format="json", schema=schema, **all_options)
224
309
 
225
310
  def parquet(self, *paths: str, **options: OptionalPrimitiveType) -> DF:
226
311
  """
@@ -263,7 +348,8 @@ class _BaseDataFrameReader(t.Generic[SESSION, DF, TABLE]):
263
348
  |100|Hyukjin Kwon|
264
349
  +---+------------+
265
350
  """
266
- dfs = [self.load(path=path, format="parquet", **options) for path in paths] # type: ignore
351
+ all_options = {**self.state_options, **{k: v for k, v in options.items() if v is not None}}
352
+ dfs = [self.load(path=path, format="parquet", **all_options) for path in paths] # type: ignore
267
353
  return reduce(lambda a, b: a.union(b), dfs)
268
354
 
269
355
  def csv(
@@ -384,7 +470,9 @@ class _BaseDataFrameReader(t.Generic[SESSION, DF, TABLE]):
384
470
  modifiedAfter=modifiedAfter,
385
471
  unescapedQuoteHandling=unescapedQuoteHandling,
386
472
  )
387
- return self.load(path=path, format="csv", schema=schema, **options)
473
+ # Apply previously set options, with method-specific options taking precedence
474
+ all_options = {**self.state_options, **{k: v for k, v in options.items() if v is not None}}
475
+ return self.load(path=path, format="csv", schema=schema, **all_options)
388
476
 
389
477
 
390
478
  class _BaseDataFrameWriter(t.Generic[SESSION, DF]):
sqlframe/base/session.py CHANGED
@@ -193,22 +193,21 @@ class _BaseSession(t.Generic[CATALOG, READER, WRITER, DF, TABLE, CONN, UDF_REGIS
193
193
  def getActiveSession(self) -> Self:
194
194
  return self
195
195
 
196
- def range(self, *args):
197
- start = 0
198
- step = 1
199
- numPartitions = None
200
- if len(args) == 1:
201
- end = args[0]
202
- elif len(args) == 2:
203
- start, end = args
204
- elif len(args) == 3:
205
- start, end, step = args
206
- elif len(args) == 4:
207
- start, end, step, numPartitions = args
208
- else:
209
- raise ValueError(
210
- "range() takes 1 to 4 positional arguments but {} were given".format(len(args))
211
- )
196
+ def range(
197
+ self,
198
+ start: int,
199
+ end: t.Optional[int] = None,
200
+ step: int = 1,
201
+ numPartitions: t.Optional[int] = None,
202
+ ):
203
+ # Ensure end is provided by either args or kwargs
204
+ if end is None:
205
+ if start:
206
+ end = start
207
+ start = 0
208
+ else:
209
+ raise ValueError("range() requires an 'end' value")
210
+
212
211
  if numPartitions is not None:
213
212
  logger.warning("numPartitions is not supported")
214
213
  return self.createDataFrame([[x] for x in range(start, end, step)], schema={"id": "long"})
@@ -94,11 +94,15 @@ class DatabricksDataFrameReader(
94
94
  """
95
95
  assert path is not None, "path is required"
96
96
  assert isinstance(path, str), "path must be a string"
97
+
98
+ # Merge state_options with provided options, with provided options taking precedence
99
+ merged_options = {**self.state_options, **options}
100
+
97
101
  format = format or self.state_format_to_read or _infer_format(path)
98
102
  fs_prefix, filepath = split_filepath(path)
99
103
 
100
104
  if fs_prefix == "":
101
- return super().load(path, format, schema, **options)
105
+ return super().load(path, format, schema, **merged_options)
102
106
 
103
107
  if schema:
104
108
  column_mapping = ensure_column_mapping(schema)
@@ -116,7 +120,7 @@ class DatabricksDataFrameReader(
116
120
  paths = ",".join([f"{path}" for path in ensure_list(path)])
117
121
 
118
122
  format_options: dict[str, OptionalPrimitiveType] = {
119
- k: v for k, v in options.items() if v is not None
123
+ k: v for k, v in merged_options.items() if v is not None
120
124
  }
121
125
  format_options["format"] = format
122
126
  format_options["schemaEvolutionMode"] = "none"
@@ -137,7 +141,7 @@ class DatabricksDataFrameReader(
137
141
  qualify=False,
138
142
  )
139
143
  if select_columns == [exp.Star()] and df.schema:
140
- return self.load(path=path, format=format, schema=df.schema, **options)
144
+ return self.load(path=path, format=format, schema=df.schema, **merged_options)
141
145
  self.session._last_loaded_file = path # type: ignore
142
146
  return df
143
147
 
@@ -148,6 +148,7 @@ from sqlframe.base.functions import percentile_approx as percentile_approx
148
148
  from sqlframe.base.functions import position as position
149
149
  from sqlframe.base.functions import pow as pow
150
150
  from sqlframe.base.functions import power as power
151
+ from sqlframe.base.functions import product as product
151
152
  from sqlframe.base.functions import quarter as quarter
152
153
  from sqlframe.base.functions import radians as radians
153
154
  from sqlframe.base.functions import rand as rand
@@ -166,6 +167,7 @@ from sqlframe.base.functions import rpad as rpad
166
167
  from sqlframe.base.functions import rtrim as rtrim
167
168
  from sqlframe.base.functions import second as second
168
169
  from sqlframe.base.functions import sequence as sequence
170
+ from sqlframe.base.functions import sha1 as sha1
169
171
  from sqlframe.base.functions import shiftLeft as shiftLeft
170
172
  from sqlframe.base.functions import shiftleft as shiftleft
171
173
  from sqlframe.base.functions import shiftRight as shiftRight
@@ -75,31 +75,34 @@ class DuckDBDataFrameReader(
75
75
  |100|NULL|
76
76
  +---+----+
77
77
  """
78
+ # Merge state_options with provided options, with provided options taking precedence
79
+ merged_options = {**self.state_options, **options}
80
+
78
81
  format = format or self.state_format_to_read
79
82
  if schema:
80
83
  column_mapping = ensure_column_mapping(schema)
81
84
  select_column_mapping = column_mapping.copy()
82
- if options.get("filename"):
85
+ if merged_options.get("filename"):
83
86
  select_column_mapping["filename"] = "VARCHAR"
84
87
  select_columns = [x.expression for x in self._to_casted_columns(select_column_mapping)]
85
88
  if format == "csv":
86
89
  duckdb_columns = ", ".join(
87
90
  [f"'{column}': '{dtype}'" for column, dtype in column_mapping.items()]
88
91
  )
89
- options["columns"] = "{" + duckdb_columns + "}"
92
+ merged_options["columns"] = "{" + duckdb_columns + "}"
90
93
  else:
91
94
  select_columns = [exp.Star()]
92
95
  if format == "delta":
93
96
  from_clause = f"delta_scan('{path}')"
94
97
  elif format:
95
- options.pop("inferSchema", None)
98
+ merged_options.pop("inferSchema", None)
96
99
  paths = ",".join([f"'{path}'" for path in ensure_list(path)])
97
- from_clause = f"read_{format}([{paths}], {to_csv(options)})"
100
+ from_clause = f"read_{format}([{paths}], {to_csv(merged_options)})"
98
101
  else:
99
102
  from_clause = f"'{path}'"
100
103
  df = self.session.sql(exp.select(*select_columns).from_(from_clause), qualify=False)
101
104
  if select_columns == [exp.Star()]:
102
- return self.load(path=path, format=format, schema=df.schema, **options)
105
+ return self.load(path=path, format=format, schema=df.schema, **merged_options)
103
106
  self.session._last_loaded_file = path # type: ignore
104
107
  return df
105
108
 
@@ -78,6 +78,10 @@ class SparkDataFrameReader(
78
78
  """
79
79
  assert path is not None, "path is required"
80
80
  assert isinstance(path, str), "path must be a string"
81
+
82
+ # Merge state_options with provided options, with provided options taking precedence
83
+ merged_options = {**self.state_options, **options}
84
+
81
85
  format = format or self.state_format_to_read or _infer_format(path)
82
86
  if schema:
83
87
  column_mapping = ensure_column_mapping(schema)
@@ -93,11 +97,13 @@ class SparkDataFrameReader(
93
97
  from_clause = f"delta.`{path}`"
94
98
  elif format:
95
99
  paths = ",".join([f"{path}" for path in ensure_list(path)])
96
- tmp_view_key = options.get("_tmp_view_key_", f"{generate_random_identifier()}_vw")
97
- options["_tmp_view_key_"] = tmp_view_key
100
+ tmp_view_key = merged_options.get(
101
+ "_tmp_view_key_", f"{generate_random_identifier()}_vw"
102
+ )
103
+ merged_options["_tmp_view_key_"] = tmp_view_key
98
104
 
99
105
  format_options: dict[str, OptionalPrimitiveType] = {
100
- k: v for k, v in options.items() if v is not None
106
+ k: v for k, v in merged_options.items() if v is not None
101
107
  }
102
108
  format_options.pop("_tmp_view_key_")
103
109
  format_options["path"] = paths
@@ -121,7 +127,7 @@ class SparkDataFrameReader(
121
127
  qualify=False,
122
128
  )
123
129
  if select_columns == [exp.Star()] and df.schema:
124
- return self.load(path=path, format=format, schema=df.schema, **options)
130
+ return self.load(path=path, format=format, schema=df.schema, **merged_options)
125
131
  self.session._last_loaded_file = path # type: ignore
126
132
  return df
127
133
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sqlframe
3
- Version: 3.25.0
3
+ Version: 3.27.0
4
4
  Summary: Turning PySpark Into a Universal DataFrame API
5
5
  Home-page: https://github.com/eakmanrq/sqlframe
6
6
  Author: Ryan Eakman
@@ -1,5 +1,5 @@
1
1
  sqlframe/__init__.py,sha256=SB80yLTITBXHI2GCDS6n6bN5ObHqgPjfpRPAUwxaots,3403
2
- sqlframe/_version.py,sha256=lX2iL8aKS-SZi2TbIojy_xpiTXHlvGXPfxFC3OMcj2o,513
2
+ sqlframe/_version.py,sha256=Ax3LD7DzZA_hEyXtX5hoIc_sZjRSealrt3DnfvJEnEg,513
3
3
  sqlframe/py.typed,sha256=Nqnn8clbgv-5l0PgxcTOldg8mkMKrFn4TvPL-rYUUGg,1
4
4
  sqlframe/base/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
5
5
  sqlframe/base/_typing.py,sha256=b2clI5HI1zEZKB_3Msx3FeAJQyft44ubUifJwQRVXyQ,1298
@@ -8,13 +8,13 @@ sqlframe/base/column.py,sha256=AG9Z_6RNhVxLhLU29kRCgzMgDNSm-_GFg96xLqk1-bs,19838
8
8
  sqlframe/base/dataframe.py,sha256=OgEUlDI5Y4rWrVngW5LttCUMC40WR-Pyr5af6aqbMNU,84028
9
9
  sqlframe/base/decorators.py,sha256=ms-CvDOIW3T8IVB9VqDmLwAiaEsqXLYRXEqVQaxktiM,1890
10
10
  sqlframe/base/exceptions.py,sha256=9Uwvqn2eAkDpqm4BrRgbL61qM-GMCbJEMAW8otxO46s,370
11
- sqlframe/base/function_alternatives.py,sha256=KFkEm0aIHzajvQmiPZnzTLh-Ud9wjeg4lJ4Rk0vk-YU,53674
12
- sqlframe/base/functions.py,sha256=W96Fg6hCXYgFfJOEt4HTam61ZUNkBp7cutv1ZEAX3H8,224000
11
+ sqlframe/base/function_alternatives.py,sha256=Bs1bwl25fN3Yy9rb4GnUWBGunQ1C_yelkb2yV9DSZIY,53918
12
+ sqlframe/base/functions.py,sha256=mazXkUuyvs_8YWD_ssesDXPO0TZ2sIjvUsaeUpIeNE0,224343
13
13
  sqlframe/base/group.py,sha256=4R9sOZm4ZRlTfShq2j3_HQOiL_Tj1bYkouenYsgnlII,4115
14
14
  sqlframe/base/normalize.py,sha256=nXAJ5CwxVf4DV0GsH-q1w0p8gmjSMlv96k_ez1eVul8,3880
15
15
  sqlframe/base/operations.py,sha256=g-YNcbvNKTOBbYm23GKfB3fmydlR7ZZDAuZUtXIHtzw,4438
16
- sqlframe/base/readerwriter.py,sha256=xjNRmHndfUAtqeVbNdcmTcjOc14LUpgxs23q7RUw1Ls,27893
17
- sqlframe/base/session.py,sha256=G5_bI_z1iJtAGm2SgEdjkKiyJmS0yOUopx9P5TEGdR4,27273
16
+ sqlframe/base/readerwriter.py,sha256=Nb2VJ_HBmLQp5mK8JhnFooZh2ydAaboCAFVPb-4MNX4,31241
17
+ sqlframe/base/session.py,sha256=vR0ESFGH6US0F-AxUw1TPDDpYbpCFGgOC049iKhq_yQ,27178
18
18
  sqlframe/base/table.py,sha256=rCeh1W5SWbtEVfkLAUiexzrZwNgmZeptLEmLcM1ABkE,6961
19
19
  sqlframe/base/transforms.py,sha256=y0j3SGDz3XCmNGrvassk1S-owllUWfkHyMgZlY6SFO4,467
20
20
  sqlframe/base/types.py,sha256=iBNk9bpFtb2NBIogYS8i7OlQZMRvpR6XxqzBebsjQDU,12280
@@ -46,7 +46,7 @@ sqlframe/databricks/dataframe.py,sha256=uBJ8GxjfiODto2PvhgmsslIwKArW7tcLBA70tvtu
46
46
  sqlframe/databricks/functions.py,sha256=La8rjAwO0hD4FBO0QxW5CtZtFAPvOrVc6lG4OtPGgbc,336
47
47
  sqlframe/databricks/functions.pyi,sha256=FzVBpzXCJzxIp73sIAo_R8Wx8uOJrix-W12HsgyeTcQ,23799
48
48
  sqlframe/databricks/group.py,sha256=dU3g0DVLRlfOSCamKchQFXRd1WTFbdxoXkpEX8tPD6Y,399
49
- sqlframe/databricks/readwriter.py,sha256=zLnRztTnS0onRFoC4ElSypxnVW1Q_vs75bd8FRB2GKg,14602
49
+ sqlframe/databricks/readwriter.py,sha256=cuGRI1G627JEZgGNtirrT8LAwT6xQCdgkSAETmLKNXU,14777
50
50
  sqlframe/databricks/session.py,sha256=iw4uczkJHkpVO8vusEEmfCrhxHWyAHpCFmOZ-0qlkms,2343
51
51
  sqlframe/databricks/table.py,sha256=Q0Vnrl5aUqnqFTQpTwfWMRyQ9AQnagtpnSnXmP6IKRs,678
52
52
  sqlframe/databricks/types.py,sha256=KwNyuXIo-2xVVd4bZED3YrQOobKCtemlxGrJL7DrTC8,34
@@ -57,9 +57,9 @@ sqlframe/duckdb/catalog.py,sha256=89FCSJglMbOxonk3IXmlkMcdXCfMdePpGfqlbkkB_d0,51
57
57
  sqlframe/duckdb/column.py,sha256=E1tUa62Y5HajkhgFuebU9zohrGyieudcHzTT8gfalio,40
58
58
  sqlframe/duckdb/dataframe.py,sha256=Z8_K69UQGZVeBfVGXVwIJP8OMuIvNBB3DPKTP3Lfu4w,1908
59
59
  sqlframe/duckdb/functions.py,sha256=ix2efGGD4HLaY1rtCtEd3IrsicGEVGiBAeKOo5OD8rA,424
60
- sqlframe/duckdb/functions.pyi,sha256=Ih8XqqTV1VcsdBUPV4V9jx1d13QPggTb-BkQZcA4iCQ,12438
60
+ sqlframe/duckdb/functions.pyi,sha256=hDjpT-tGDO8LyElcno5YYRUnJg1dXXbGcRjJ69Zqk_U,12542
61
61
  sqlframe/duckdb/group.py,sha256=IkhbW42Ng1U5YT3FkIdiB4zBqRkW4QyTb-1detY1e_4,383
62
- sqlframe/duckdb/readwriter.py,sha256=-_Ama7evadIa3PYvynKDK6RcTMTDBHpHJzfANTine7g,4983
62
+ sqlframe/duckdb/readwriter.py,sha256=WEfUSKI68BFwAt4xwQX-GO8ZSGuUQYgYKkmWE55DmJo,5171
63
63
  sqlframe/duckdb/session.py,sha256=H1qjMYmhpwUHmf6jOPA6IhPIEIeX8rlvOl3MTIEijG0,2719
64
64
  sqlframe/duckdb/table.py,sha256=AmEKoH2TZo98loS5NbNaTuqv0eg76SY_OckVBMmQ6Co,410
65
65
  sqlframe/duckdb/types.py,sha256=KwNyuXIo-2xVVd4bZED3YrQOobKCtemlxGrJL7DrTC8,34
@@ -110,7 +110,7 @@ sqlframe/spark/dataframe.py,sha256=WyXHWsH8Ldu2cWTNmsLy5hEFrjJvQh_Aqv3JJcbDy6k,1
110
110
  sqlframe/spark/functions.py,sha256=MYCgHsjRQWylT-rezWRBuLV6BivcaVarbaQtP4T0toQ,331
111
111
  sqlframe/spark/functions.pyi,sha256=GyOdUzv2Z7Qt99JAKEPKgV2t2Rn274OuqwAfcoAXlN0,24259
112
112
  sqlframe/spark/group.py,sha256=MrvV_v-YkBc6T1zz882WrEqtWjlooWIyHBCmTQg3fCA,379
113
- sqlframe/spark/readwriter.py,sha256=YVGgkYpmuQj8wGHxJx6ivAAKSqyGes-0GhCezvd7kYU,6565
113
+ sqlframe/spark/readwriter.py,sha256=VZHFWnhuFjlI-PMHclp1RmoIiISeT6IIp5O8lDn5TBY,6777
114
114
  sqlframe/spark/session.py,sha256=irlsTky06pKRKAyPLwVzUtLGe4O8mALSgxIqLvqJNF8,5675
115
115
  sqlframe/spark/table.py,sha256=puWV8h_CqA64zwpzq0ydY9LoygMAvprkODyxyzZeF9M,186
116
116
  sqlframe/spark/types.py,sha256=KwNyuXIo-2xVVd4bZED3YrQOobKCtemlxGrJL7DrTC8,34
@@ -130,8 +130,8 @@ sqlframe/standalone/udf.py,sha256=azmgtUjHNIPs0WMVNId05SHwiYn41MKVBhKXsQJ5dmY,27
130
130
  sqlframe/standalone/window.py,sha256=6GKPzuxeSapJakBaKBeT9VpED1ACdjggDv9JRILDyV0,35
131
131
  sqlframe/testing/__init__.py,sha256=VVCosQhitU74A3NnE52O4mNtGZONapuEXcc20QmSlnQ,132
132
132
  sqlframe/testing/utils.py,sha256=PFsGZpwNUE_4-g_f43_vstTqsK0AQ2lBneb5Eb6NkFo,13008
133
- sqlframe-3.25.0.dist-info/LICENSE,sha256=VZu79YgW780qxaFJMr0t5ZgbOYEh04xWoxaWOaqIGWk,1068
134
- sqlframe-3.25.0.dist-info/METADATA,sha256=qXwyR7B-HVkPnO4ZGah338Mu-mK7F-9bliiB_5mRAGE,8971
135
- sqlframe-3.25.0.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
136
- sqlframe-3.25.0.dist-info/top_level.txt,sha256=T0_RpoygaZSF6heeWwIDQgaP0varUdSK1pzjeJZRjM8,9
137
- sqlframe-3.25.0.dist-info/RECORD,,
133
+ sqlframe-3.27.0.dist-info/LICENSE,sha256=VZu79YgW780qxaFJMr0t5ZgbOYEh04xWoxaWOaqIGWk,1068
134
+ sqlframe-3.27.0.dist-info/METADATA,sha256=OxTHyxBe656grcjYrGVqBA6VpyGAk36IvFaKnt1CIjk,8971
135
+ sqlframe-3.27.0.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
136
+ sqlframe-3.27.0.dist-info/top_level.txt,sha256=T0_RpoygaZSF6heeWwIDQgaP0varUdSK1pzjeJZRjM8,9
137
+ sqlframe-3.27.0.dist-info/RECORD,,