sqlframe 3.21.1__py3-none-any.whl → 3.22.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
sqlframe/_version.py CHANGED
@@ -1,8 +1,13 @@
1
- # file generated by setuptools_scm
1
+ # file generated by setuptools-scm
2
2
  # don't change, don't track in version control
3
+
4
+ __all__ = ["__version__", "__version_tuple__", "version", "version_tuple"]
5
+
3
6
  TYPE_CHECKING = False
4
7
  if TYPE_CHECKING:
5
- from typing import Tuple, Union
8
+ from typing import Tuple
9
+ from typing import Union
10
+
6
11
  VERSION_TUPLE = Tuple[Union[int, str], ...]
7
12
  else:
8
13
  VERSION_TUPLE = object
@@ -12,5 +17,5 @@ __version__: str
12
17
  __version_tuple__: VERSION_TUPLE
13
18
  version_tuple: VERSION_TUPLE
14
19
 
15
- __version__ = version = '3.21.1'
16
- __version_tuple__ = version_tuple = (3, 21, 1)
20
+ __version__ = version = '3.22.1'
21
+ __version_tuple__ = version_tuple = (3, 22, 1)
sqlframe/base/catalog.py CHANGED
@@ -70,13 +70,16 @@ class _BaseCatalog(t.Generic[SESSION, DF]):
70
70
  }
71
71
 
72
72
  def add_table(
73
- self, table: exp.Table | str, column_mapping: t.Optional[ColumnMapping] = None
73
+ self,
74
+ table: exp.Table | str,
75
+ column_mapping: t.Optional[ColumnMapping] = None,
76
+ **kwargs: t.Any,
74
77
  ) -> None:
75
78
  # TODO: Making this an update or add
76
79
  table = self.ensure_table(table)
77
80
  if self._schema.find(table):
78
81
  return
79
- if not column_mapping:
82
+ if column_mapping is None:
80
83
  try:
81
84
  column_mapping = {
82
85
  normalize_string(
@@ -100,7 +103,7 @@ class _BaseCatalog(t.Generic[SESSION, DF]):
100
103
  if column.this.quoted:
101
104
  self._quoted_columns[table].append(column.this.name)
102
105
 
103
- self._schema.add_table(table, column_mapping, dialect=self.session.input_dialect)
106
+ self._schema.add_table(table, column_mapping, dialect=self.session.input_dialect, **kwargs)
104
107
 
105
108
  def getDatabase(self, dbName: str) -> Database:
106
109
  """Get the database with the specified name.
@@ -342,7 +342,7 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
342
342
  return types.StructType(
343
343
  [
344
344
  types.StructField(
345
- c.name,
345
+ self.display_name_mapping.get(c.name, c.name),
346
346
  sqlglot_to_spark(
347
347
  exp.DataType.build(c.dataType, dialect=self.session.output_dialect)
348
348
  ),
@@ -1898,7 +1898,7 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
1898
1898
  print("root")
1899
1899
  for column in self._typed_columns:
1900
1900
  print_schema(
1901
- column.name,
1901
+ self.display_name_mapping.get(column.name, column.name),
1902
1902
  exp.DataType.build(column.dataType, dialect=self.session.output_dialect),
1903
1903
  column.nullable,
1904
1904
  0,
@@ -4504,7 +4504,7 @@ def median(col: ColumnOrName) -> Column:
4504
4504
  return Column.invoke_expression_over_column(col, expression.Median)
4505
4505
 
4506
4506
 
4507
- @meta(unsupported_engines="*")
4507
+ @meta(unsupported_engines=["bigquery", "postgres"])
4508
4508
  def mode(col: ColumnOrName) -> Column:
4509
4509
  """
4510
4510
  Returns the most frequent value in a group.
@@ -4540,6 +4540,7 @@ def mode(col: ColumnOrName) -> Column:
4540
4540
  |dotNET| 2012|
4541
4541
  +------+----------+
4542
4542
  """
4543
+
4543
4544
  return Column.invoke_anonymous_function(col, "mode")
4544
4545
 
4545
4546
 
@@ -82,6 +82,10 @@ class PandasLoaderMixin(_BaseDataFrameReader, t.Generic[SESSION, DF]):
82
82
  elif format == "parquet":
83
83
  df = pd.read_parquet(path, **kwargs) # type: ignore
84
84
  elif format == "csv":
85
+ kwargs.pop("inferSchema", None)
86
+ if "header" in kwargs:
87
+ if isinstance(kwargs["header"], bool) and kwargs["header"]:
88
+ kwargs["header"] = "infer"
85
89
  df = pd.read_csv(path, **kwargs) # type: ignore
86
90
  else:
87
91
  raise UnsupportedOperationError(f"Unsupported format: {format}")
@@ -393,10 +393,12 @@ class _BaseDataFrameWriter(t.Generic[SESSION, DF]):
393
393
  df: DF,
394
394
  mode: t.Optional[str] = None,
395
395
  by_name: bool = False,
396
+ state_format_to_write: t.Optional[str] = None,
396
397
  ):
397
398
  self._df = df
398
399
  self._mode = mode
399
400
  self._by_name = by_name
401
+ self._state_format_to_write = state_format_to_write
400
402
 
401
403
  @property
402
404
  def _session(self) -> SESSION:
@@ -484,6 +486,44 @@ class _BaseDataFrameWriter(t.Generic[SESSION, DF]):
484
486
  def _write(self, path: str, mode: t.Optional[str], format: str, **options) -> None:
485
487
  raise NotImplementedError
486
488
 
489
+ def format(self, source: str) -> "Self":
490
+ """Specifies the input data source format.
491
+
492
+ .. versionadded:: 1.4.0
493
+
494
+ .. versionchanged:: 3.4.0
495
+ Supports Spark Connect.
496
+
497
+ Parameters
498
+ ----------
499
+ source : str
500
+ string, name of the data source, e.g. 'json', 'parquet'.
501
+
502
+ Examples
503
+ --------
504
+ >>> spark.read.format('json')
505
+ <...readwriter.DataFrameReader object ...>
506
+
507
+ Write a DataFrame into a JSON file and read it back.
508
+
509
+ >>> import tempfile
510
+ >>> with tempfile.TemporaryDirectory() as d:
511
+ ... # Write a DataFrame into a JSON file
512
+ ... spark.createDataFrame(
513
+ ... [{"age": 100, "name": "Hyukjin Kwon"}]
514
+ ... ).write.mode("overwrite").format("json").save(d)
515
+ ...
516
+ ... # Read the JSON file as a DataFrame.
517
+ ... spark.read.format('json').load(d).show()
518
+ +---+------------+
519
+ |age| name|
520
+ +---+------------+
521
+ |100|Hyukjin Kwon|
522
+ +---+------------+
523
+ """
524
+ self._state_format_to_write = source
525
+ return self
526
+
487
527
  def json(
488
528
  self,
489
529
  path: str,
sqlframe/base/util.py CHANGED
@@ -1,6 +1,8 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import importlib
4
+ import random
5
+ import string
4
6
  import typing as t
5
7
  import unicodedata
6
8
 
@@ -277,15 +279,6 @@ def verify_openai_installed():
277
279
  )
278
280
 
279
281
 
280
- def verify_numpy_installed():
281
- try:
282
- import numpy # noqa
283
- except ImportError:
284
- raise ImportError(
285
- """Numpy is required for this functionality. `pip install "sqlframe[pandas]"` (also include your engine if needed) to install pandas/numpy."""
286
- )
287
-
288
-
289
282
  def quote_preserving_alias_or_name(col: t.Union[exp.Column, exp.Alias]) -> str:
290
283
  from sqlframe.base.session import _BaseSession
291
284
 
@@ -427,3 +420,20 @@ def normalize_string(
427
420
  for pos in star_positions:
428
421
  normalized_value = normalized_value[:pos] + "*" + normalized_value[pos:]
429
422
  return normalized_value
423
+
424
+
425
+ def generate_random_identifier(size=6, chars=string.ascii_uppercase + string.digits):
426
+ return "_" + "".join(random.choice(chars) for _ in range(size))
427
+
428
+
429
+ def split_filepath(filepath: str) -> tuple[str, str]:
430
+ if filepath.startswith("dbfs:") or filepath.startswith("/dbfs"):
431
+ prefix = "dbfs:"
432
+ return prefix, filepath[len(prefix) :]
433
+ if filepath.startswith("file://"):
434
+ prefix = "file://"
435
+ return "", filepath[len(prefix) :]
436
+ split_ = str(filepath).split("://", 1)
437
+ if len(split_) == 2: # noqa: PLR2004
438
+ return split_[0] + "://", split_[1]
439
+ return "", split_[0]
sqlframe/base/window.py CHANGED
@@ -82,37 +82,26 @@ class WindowSpec:
82
82
  def _calc_start_end(
83
83
  self, start: int, end: int
84
84
  ) -> t.Dict[str, t.Optional[t.Union[str, exp.Expression]]]:
85
- kwargs: t.Dict[str, t.Optional[t.Union[str, exp.Expression]]] = {
86
- "start_side": None,
87
- "end_side": None,
85
+ def get_value_and_side(x: int) -> t.Tuple[t.Union[str, exp.Expression], t.Optional[str]]:
86
+ if x == Window.currentRow:
87
+ return "CURRENT ROW", None
88
+ if x < 0:
89
+ side = "PRECEDING"
90
+ value = "UNBOUNDED" if x <= Window.unboundedPreceding else F.lit(abs(x)).expression
91
+ return value, side
92
+ else:
93
+ side = "FOLLOWING"
94
+ value = "UNBOUNDED" if x >= Window.unboundedFollowing else F.lit(x).expression
95
+ return value, side
96
+
97
+ start, start_side = get_value_and_side(start) # type: ignore
98
+ end, end_side = get_value_and_side(end) # type: ignore
99
+ return {
100
+ "start": start, # type: ignore
101
+ "start_side": start_side,
102
+ "end": end, # type: ignore
103
+ "end_side": end_side,
88
104
  }
89
- if start == Window.currentRow:
90
- kwargs["start"] = "CURRENT ROW"
91
- else:
92
- kwargs = {
93
- **kwargs,
94
- **{
95
- "start_side": "PRECEDING",
96
- "start": (
97
- "UNBOUNDED"
98
- if start <= Window.unboundedPreceding
99
- else F.lit(start).expression
100
- ),
101
- },
102
- }
103
- if end == Window.currentRow:
104
- kwargs["end"] = "CURRENT ROW"
105
- else:
106
- kwargs = {
107
- **kwargs,
108
- **{
109
- "end_side": "FOLLOWING",
110
- "end": (
111
- "UNBOUNDED" if end >= Window.unboundedFollowing else F.lit(end).expression
112
- ),
113
- },
114
- }
115
- return kwargs
116
105
 
117
106
  def rowsBetween(self, start: int, end: int) -> WindowSpec:
118
107
  window_spec = self.copy()
@@ -92,6 +92,7 @@ class DuckDBDataFrameReader(
92
92
  if format == "delta":
93
93
  from_clause = f"delta_scan('{path}')"
94
94
  elif format:
95
+ options.pop("inferSchema", None)
95
96
  paths = ",".join([f"'{path}'" for path in ensure_list(path)])
96
97
  from_clause = f"read_{format}([{paths}], {to_csv(options)})"
97
98
  else:
@@ -4,7 +4,7 @@ import typing as t
4
4
  from functools import cached_property
5
5
 
6
6
  from sqlframe.base.session import _BaseSession
7
- from sqlframe.base.util import soundex, verify_numpy_installed
7
+ from sqlframe.base.util import soundex
8
8
  from sqlframe.duckdb.catalog import DuckDBCatalog
9
9
  from sqlframe.duckdb.dataframe import DuckDBDataFrame
10
10
  from sqlframe.duckdb.readwriter import (
@@ -46,8 +46,6 @@ class DuckDBSession(
46
46
  if not hasattr(self, "_conn"):
47
47
  conn = conn or duckdb.connect()
48
48
  try:
49
- # Creating a function requires numpy to be installed so if they don't have it, we'll just skip it
50
- verify_numpy_installed()
51
49
  conn.create_function("SOUNDEX", lambda x: soundex(x), return_type=VARCHAR)
52
50
  except ImportError:
53
51
  pass
@@ -4,27 +4,160 @@ from __future__ import annotations
4
4
 
5
5
  import typing as t
6
6
 
7
- from sqlframe.base.mixins.readwriter_mixins import PandasLoaderMixin, PandasWriterMixin
7
+ from sqlglot import exp
8
+ from sqlglot.helper import ensure_list
9
+
8
10
  from sqlframe.base.readerwriter import (
9
11
  _BaseDataFrameReader,
10
12
  _BaseDataFrameWriter,
13
+ _infer_format,
11
14
  )
15
+ from sqlframe.base.util import ensure_column_mapping, generate_random_identifier, to_csv
12
16
 
13
17
  if t.TYPE_CHECKING:
18
+ from sqlframe.base._typing import OptionalPrimitiveType, PathOrPaths
19
+ from sqlframe.base.types import StructType
14
20
  from sqlframe.spark.dataframe import SparkDataFrame
15
21
  from sqlframe.spark.session import SparkSession
16
22
  from sqlframe.spark.table import SparkTable
17
23
 
18
24
 
19
25
  class SparkDataFrameReader(
20
- PandasLoaderMixin["SparkSession", "SparkDataFrame"],
21
26
  _BaseDataFrameReader["SparkSession", "SparkDataFrame", "SparkTable"],
22
27
  ):
23
- pass
28
+ def load(
29
+ self,
30
+ path: t.Optional[PathOrPaths] = None,
31
+ format: t.Optional[str] = None,
32
+ schema: t.Optional[t.Union[StructType, str]] = None,
33
+ **options: OptionalPrimitiveType,
34
+ ) -> SparkDataFrame:
35
+ """Loads data from a data source and returns it as a :class:`DataFrame`.
36
+
37
+ .. versionadded:: 1.4.0
38
+
39
+ .. versionchanged:: 3.4.0
40
+ Supports Spark Connect.
41
+
42
+ Parameters
43
+ ----------
44
+ path : str or list, t.Optional
45
+ t.Optional string or a list of string for file-system backed data sources.
46
+ format : str, t.Optional
47
+ t.Optional string for format of the data source. Default to 'parquet'.
48
+ schema : :class:`pyspark.sql.types.StructType` or str, t.Optional
49
+ t.Optional :class:`pyspark.sql.types.StructType` for the input schema
50
+ or a DDL-formatted string (For example ``col0 INT, col1 DOUBLE``).
51
+ **options : dict
52
+ all other string options
53
+
54
+ Examples
55
+ --------
56
+ Load a CSV file with format, schema and options specified.
57
+
58
+ >>> import tempfile
59
+ >>> with tempfile.TemporaryDirectory() as d:
60
+ ... # Write a DataFrame into a CSV file with a header
61
+ ... df = spark.createDataFrame([{"age": 100, "name": "Hyukjin Kwon"}])
62
+ ... df.write.option("header", True).mode("overwrite").format("csv").save(d)
63
+ ...
64
+ ... # Read the CSV file as a DataFrame with 'nullValue' option set to 'Hyukjin Kwon',
65
+ ... # and 'header' option set to `True`.
66
+ ... df = spark.read.load(
67
+ ... d, schema=df.schema, format="csv", nullValue="Hyukjin Kwon", header=True)
68
+ ... df.printSchema()
69
+ ... df.show()
70
+ root
71
+ |-- age: long (nullable = true)
72
+ |-- name: string (nullable = true)
73
+ +---+----+
74
+ |age|name|
75
+ +---+----+
76
+ |100|NULL|
77
+ +---+----+
78
+ """
79
+ assert path is not None, "path is required"
80
+ assert isinstance(path, str), "path must be a string"
81
+ format = format or self.state_format_to_read or _infer_format(path)
82
+ if schema:
83
+ column_mapping = ensure_column_mapping(schema)
84
+ select_column_mapping = column_mapping.copy()
85
+ select_columns = [x.expression for x in self._to_casted_columns(select_column_mapping)]
86
+
87
+ if hasattr(schema, "simpleString"):
88
+ schema = schema.simpleString()
89
+ else:
90
+ select_columns = [exp.Star()]
91
+
92
+ if format == "delta":
93
+ from_clause = f"delta.`{path}`"
94
+ elif format:
95
+ paths = ",".join([f"{path}" for path in ensure_list(path)])
96
+ tmp_view_key = options.get("_tmp_view_key_", f"{generate_random_identifier()}_vw")
97
+ options["_tmp_view_key_"] = tmp_view_key
98
+
99
+ format_options: dict[str, OptionalPrimitiveType] = {
100
+ k: v for k, v in options.items() if v is not None
101
+ }
102
+ format_options.pop("_tmp_view_key_")
103
+ format_options["path"] = paths
104
+ if schema:
105
+ format_options["schema"] = f"{schema}"
106
+ format_options.pop("inferSchema", None)
107
+ format_options = {key: f"'{val}'" for key, val in format_options.items()}
108
+ format_options_str = to_csv(format_options, " ")
109
+
110
+ tmp_view = f"CREATE OR REPLACE TEMPORARY VIEW {tmp_view_key} USING {format}" + (
111
+ f" OPTIONS ({format_options_str})" if format_options_str else ""
112
+ )
113
+ self.session.spark_session.sql(tmp_view).collect()
114
+
115
+ from_clause = f"{tmp_view_key}"
116
+ else:
117
+ from_clause = f"'{path}'"
118
+
119
+ df = self.session.sql(
120
+ exp.select(*select_columns).from_(from_clause, dialect=self.session.input_dialect),
121
+ qualify=False,
122
+ )
123
+ if select_columns == [exp.Star()] and df.schema:
124
+ return self.load(path=path, format=format, schema=df.schema, **options)
125
+ self.session._last_loaded_file = path # type: ignore
126
+ return df
24
127
 
25
128
 
26
129
  class SparkDataFrameWriter(
27
- PandasWriterMixin["SparkSession", "SparkDataFrame"],
28
130
  _BaseDataFrameWriter["SparkSession", "SparkDataFrame"],
29
131
  ):
30
- pass
132
+ def save(
133
+ self,
134
+ path: str,
135
+ mode: t.Optional[str] = None,
136
+ format: t.Optional[str] = None,
137
+ partitionBy: t.Optional[t.Union[str, t.List[str]]] = None,
138
+ **options,
139
+ ):
140
+ format = str(format or self._state_format_to_write)
141
+ self._write(path, mode, format, partitionBy=partitionBy, **options)
142
+
143
+ def _write(self, path: str, mode: t.Optional[str], format: str, **options):
144
+ spark_df = None
145
+ expressions = self._df._get_expressions()
146
+ for i, expression in enumerate(expressions):
147
+ if i < len(expressions) - 1:
148
+ self._df.session._collect(expressions)
149
+ else:
150
+ sql = self._df.session._to_sql(expression)
151
+ spark_df = self._session.spark_session.sql(sql)
152
+ if spark_df is not None:
153
+ options = {k: v for k, v in options.items() if v is not None}
154
+ mode = str(mode or self._mode or "default")
155
+ spark_writer = spark_df.write.format(format).mode(mode)
156
+ partition_columns = options.pop("partitionBy", None)
157
+ compression = options.pop("compression", None)
158
+ if partition_columns:
159
+ partition_columns = options.pop("partitionBy")
160
+ spark_writer = spark_writer.partitionBy(*partition_columns)
161
+ if compression:
162
+ spark_writer = spark_writer.option("compression", compression)
163
+ spark_writer.save(path=path, **options)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sqlframe
3
- Version: 3.21.1
3
+ Version: 3.22.1
4
4
  Summary: Turning PySpark Into a Universal DataFrame API
5
5
  Home-page: https://github.com/eakmanrq/sqlframe
6
6
  Author: Ryan Eakman
@@ -36,7 +36,7 @@ Requires-Dist: psycopg <4,>=3.1 ; extra == 'dev'
36
36
  Requires-Dist: pyarrow <20,>=10 ; extra == 'dev'
37
37
  Requires-Dist: pyspark <3.6,>=2 ; extra == 'dev'
38
38
  Requires-Dist: pytest-forked ; extra == 'dev'
39
- Requires-Dist: pytest-postgresql <7,>=6 ; extra == 'dev'
39
+ Requires-Dist: pytest-postgresql <8,>=6 ; extra == 'dev'
40
40
  Requires-Dist: pytest-xdist <3.7,>=3.6 ; extra == 'dev'
41
41
  Requires-Dist: pytest <8.4,>=8.2.0 ; extra == 'dev'
42
42
  Requires-Dist: ruff <0.10,>=0.4.4 ; extra == 'dev'
@@ -1,29 +1,29 @@
1
1
  sqlframe/__init__.py,sha256=wfqm98eLoLid9oV_FzzpG5loKC6LxOhj2lXpfN7SARo,3138
2
- sqlframe/_version.py,sha256=fmhKf9XPZdwZdKpQ-ESJ_LGssm7Q8K_NJEGVKwXLGQM,413
2
+ sqlframe/_version.py,sha256=8eLhLry2PxnXZDWV0RoPCF5YsBcUB10-ky6GKcZbQZU,513
3
3
  sqlframe/base/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  sqlframe/base/_typing.py,sha256=b2clI5HI1zEZKB_3Msx3FeAJQyft44ubUifJwQRVXyQ,1298
5
- sqlframe/base/catalog.py,sha256=SzFQalTWdhWzxUY-4ut1f9TfOECp_JmJEgNPfrRKCe0,38457
5
+ sqlframe/base/catalog.py,sha256=D39Mn4AIkUyaVyLJiI5gcX0Bv_pChl89GAyYHrtOw5g,38513
6
6
  sqlframe/base/column.py,sha256=oHVwkSWABO3ZlAbgBShsxSSlgbI06BOup5XJrRhgqJI,18097
7
- sqlframe/base/dataframe.py,sha256=FOgLdCpscLsBntkRvutcgSVqXqMgXo9DYa892mXu00E,83907
7
+ sqlframe/base/dataframe.py,sha256=9wcN5I5bSiGKs6m-mSCwlDUSRfrZ_ymfRUtJa8hggd4,83990
8
8
  sqlframe/base/decorators.py,sha256=ms-CvDOIW3T8IVB9VqDmLwAiaEsqXLYRXEqVQaxktiM,1890
9
9
  sqlframe/base/exceptions.py,sha256=9Uwvqn2eAkDpqm4BrRgbL61qM-GMCbJEMAW8otxO46s,370
10
10
  sqlframe/base/function_alternatives.py,sha256=KFkEm0aIHzajvQmiPZnzTLh-Ud9wjeg4lJ4Rk0vk-YU,53674
11
- sqlframe/base/functions.py,sha256=jfLgboldiTB9CPkoZMtKUAwx6XSvFnEOIpCZQfoEJJU,223060
11
+ sqlframe/base/functions.py,sha256=0lLefa959J1z7Ea6QBnQrru06lKCLurEX1xFYxGJbTc,223082
12
12
  sqlframe/base/group.py,sha256=fsyG5990_Pd7gFPjTFrH9IEoAquL_wEkVpIlBAIkZJU,4091
13
13
  sqlframe/base/normalize.py,sha256=nXAJ5CwxVf4DV0GsH-q1w0p8gmjSMlv96k_ez1eVul8,3880
14
14
  sqlframe/base/operations.py,sha256=xSPw74e59wYvNd6U1AlwziNCTG6Aftrbl4SybN9u9VE,3450
15
- sqlframe/base/readerwriter.py,sha256=w8926cqIrXF7NGHiINw5UHzP_3xpjsqbijTBTzycBRM,26605
15
+ sqlframe/base/readerwriter.py,sha256=g6h9ldC4ImYdoUwZJxwfd5Iu2p9aqZ39bEFhwrHVlvI,27868
16
16
  sqlframe/base/session.py,sha256=G5_bI_z1iJtAGm2SgEdjkKiyJmS0yOUopx9P5TEGdR4,27273
17
17
  sqlframe/base/table.py,sha256=rCeh1W5SWbtEVfkLAUiexzrZwNgmZeptLEmLcM1ABkE,6961
18
18
  sqlframe/base/transforms.py,sha256=y0j3SGDz3XCmNGrvassk1S-owllUWfkHyMgZlY6SFO4,467
19
19
  sqlframe/base/types.py,sha256=iBNk9bpFtb2NBIogYS8i7OlQZMRvpR6XxqzBebsjQDU,12280
20
20
  sqlframe/base/udf.py,sha256=O6hMhBUy9NVv-mhJRtfFhXTIa_-Z8Y_FkmmuOHu0l90,1117
21
- sqlframe/base/util.py,sha256=_s2M-qHzTLgyGu1v8laRHJorUpUO6-fr3kk7CsvcuXw,15161
22
- sqlframe/base/window.py,sha256=8hOv-ignPPIsZA9FzvYzcLE9J_glalVaYjIAUdRUX3o,4943
21
+ sqlframe/base/util.py,sha256=QktpGRlkYsapQRs_uhuc3T736qRry2PNE7kcPxjYefo,15519
22
+ sqlframe/base/window.py,sha256=vmQEUa3FnaDmdce2f3xzskOJe7XsZ7JhMyOIR4RdJuY,4806
23
23
  sqlframe/base/mixins/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
24
24
  sqlframe/base/mixins/catalog_mixins.py,sha256=9tn0mK8oPoqIIjNItystD5tdBMdK9YpkxTG7G9KQl8k,18619
25
25
  sqlframe/base/mixins/dataframe_mixins.py,sha256=9U556vWjhmAwnbqkQ4mDkKHxQRs-FkncwEEY9RWmm6U,1408
26
- sqlframe/base/mixins/readwriter_mixins.py,sha256=ap8j_g7PoUGHaHKCPMnRPbXofOsUhUzlaF7Loxy2m-I,4752
26
+ sqlframe/base/mixins/readwriter_mixins.py,sha256=ItQ_0jZ5RljgmLjGDIzLMRP_NQdy3wAyKwJ6K5NjaqA,4954
27
27
  sqlframe/base/mixins/table_mixins.py,sha256=2TnGFpbDSGw_NswpZwLACqvdD4zCA7hXekQ9IEkoTOk,13784
28
28
  sqlframe/bigquery/__init__.py,sha256=kbaomhYAANPdxeDQhajv8IHfMg_ENKivtYK-rPwaV08,939
29
29
  sqlframe/bigquery/catalog.py,sha256=w6cxAV6OWa2icVt5vwgNXy08EP9WJh3DkQJzQkn2kIo,11661
@@ -58,8 +58,8 @@ sqlframe/duckdb/dataframe.py,sha256=Z8_K69UQGZVeBfVGXVwIJP8OMuIvNBB3DPKTP3Lfu4w,
58
58
  sqlframe/duckdb/functions.py,sha256=ix2efGGD4HLaY1rtCtEd3IrsicGEVGiBAeKOo5OD8rA,424
59
59
  sqlframe/duckdb/functions.pyi,sha256=P0ky6k-J7LdCDrQ0OjfRC3ARIYNHPmAmmaB_jBEO5L0,12383
60
60
  sqlframe/duckdb/group.py,sha256=IkhbW42Ng1U5YT3FkIdiB4zBqRkW4QyTb-1detY1e_4,383
61
- sqlframe/duckdb/readwriter.py,sha256=5EP8DEoX3N_xYavWpetsZdzvtYF-oCrAz3n-siNE8yY,4938
62
- sqlframe/duckdb/session.py,sha256=Uf7im6eBbBYRvIhVGV0VCTCF76FQ00A5FbKPCdNllzw,2898
61
+ sqlframe/duckdb/readwriter.py,sha256=-_Ama7evadIa3PYvynKDK6RcTMTDBHpHJzfANTine7g,4983
62
+ sqlframe/duckdb/session.py,sha256=H1qjMYmhpwUHmf6jOPA6IhPIEIeX8rlvOl3MTIEijG0,2719
63
63
  sqlframe/duckdb/table.py,sha256=AmEKoH2TZo98loS5NbNaTuqv0eg76SY_OckVBMmQ6Co,410
64
64
  sqlframe/duckdb/types.py,sha256=KwNyuXIo-2xVVd4bZED3YrQOobKCtemlxGrJL7DrTC8,34
65
65
  sqlframe/duckdb/udf.py,sha256=Du9LnOtT1lJvB90D4HSR2tB7MXy179jZngDR-EjVjQk,656
@@ -109,7 +109,7 @@ sqlframe/spark/dataframe.py,sha256=WyXHWsH8Ldu2cWTNmsLy5hEFrjJvQh_Aqv3JJcbDy6k,1
109
109
  sqlframe/spark/functions.py,sha256=MYCgHsjRQWylT-rezWRBuLV6BivcaVarbaQtP4T0toQ,331
110
110
  sqlframe/spark/functions.pyi,sha256=GyOdUzv2Z7Qt99JAKEPKgV2t2Rn274OuqwAfcoAXlN0,24259
111
111
  sqlframe/spark/group.py,sha256=MrvV_v-YkBc6T1zz882WrEqtWjlooWIyHBCmTQg3fCA,379
112
- sqlframe/spark/readwriter.py,sha256=zXZcCPWpQMMN90wdIx8AD4Y5tWBcpRSL4-yKX2aZyik,874
112
+ sqlframe/spark/readwriter.py,sha256=YVGgkYpmuQj8wGHxJx6ivAAKSqyGes-0GhCezvd7kYU,6565
113
113
  sqlframe/spark/session.py,sha256=irlsTky06pKRKAyPLwVzUtLGe4O8mALSgxIqLvqJNF8,5675
114
114
  sqlframe/spark/table.py,sha256=puWV8h_CqA64zwpzq0ydY9LoygMAvprkODyxyzZeF9M,186
115
115
  sqlframe/spark/types.py,sha256=KwNyuXIo-2xVVd4bZED3YrQOobKCtemlxGrJL7DrTC8,34
@@ -129,8 +129,8 @@ sqlframe/standalone/udf.py,sha256=azmgtUjHNIPs0WMVNId05SHwiYn41MKVBhKXsQJ5dmY,27
129
129
  sqlframe/standalone/window.py,sha256=6GKPzuxeSapJakBaKBeT9VpED1ACdjggDv9JRILDyV0,35
130
130
  sqlframe/testing/__init__.py,sha256=VVCosQhitU74A3NnE52O4mNtGZONapuEXcc20QmSlnQ,132
131
131
  sqlframe/testing/utils.py,sha256=PFsGZpwNUE_4-g_f43_vstTqsK0AQ2lBneb5Eb6NkFo,13008
132
- sqlframe-3.21.1.dist-info/LICENSE,sha256=VZu79YgW780qxaFJMr0t5ZgbOYEh04xWoxaWOaqIGWk,1068
133
- sqlframe-3.21.1.dist-info/METADATA,sha256=AauznGD-zSbh2cqT63w2MIrg_-0SlewyyRMNElL5O2I,8970
134
- sqlframe-3.21.1.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
135
- sqlframe-3.21.1.dist-info/top_level.txt,sha256=T0_RpoygaZSF6heeWwIDQgaP0varUdSK1pzjeJZRjM8,9
136
- sqlframe-3.21.1.dist-info/RECORD,,
132
+ sqlframe-3.22.1.dist-info/LICENSE,sha256=VZu79YgW780qxaFJMr0t5ZgbOYEh04xWoxaWOaqIGWk,1068
133
+ sqlframe-3.22.1.dist-info/METADATA,sha256=T51BFHOahh25l4U0bR07BvwoXMIyO_FvhzwJYA54-sQ,8970
134
+ sqlframe-3.22.1.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
135
+ sqlframe-3.22.1.dist-info/top_level.txt,sha256=T0_RpoygaZSF6heeWwIDQgaP0varUdSK1pzjeJZRjM8,9
136
+ sqlframe-3.22.1.dist-info/RECORD,,