sqlframe 3.15.1__py3-none-any.whl → 3.17.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
sqlframe/_version.py CHANGED
@@ -12,5 +12,5 @@ __version__: str
12
12
  __version_tuple__: VERSION_TUPLE
13
13
  version_tuple: VERSION_TUPLE
14
14
 
15
- __version__ = version = '3.15.1'
16
- __version_tuple__ = version_tuple = (3, 15, 1)
15
+ __version__ = version = '3.17.0'
16
+ __version_tuple__ = version_tuple = (3, 17, 0)
sqlframe/base/column.py CHANGED
@@ -291,6 +291,7 @@ class Column:
291
291
  this=self.column_expression,
292
292
  alias=alias.this if isinstance(alias, exp.Column) else alias,
293
293
  )
294
+ new_expression._meta = {"display_name": name, **(new_expression._meta or {})}
294
295
  return Column(new_expression)
295
296
 
296
297
  def asc(self) -> Column:
@@ -233,6 +233,7 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
233
233
  last_op: Operation = Operation.INIT,
234
234
  pending_hints: t.Optional[t.List[exp.Expression]] = None,
235
235
  output_expression_container: t.Optional[OutputExpressionContainer] = None,
236
+ display_name_mapping: t.Optional[t.Dict[str, str]] = None,
236
237
  **kwargs,
237
238
  ):
238
239
  self.session = session
@@ -246,6 +247,7 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
246
247
  self.pending_hints = pending_hints or []
247
248
  self.output_expression_container = output_expression_container or exp.Select()
248
249
  self.temp_views: t.List[exp.Select] = []
250
+ self.display_name_mapping = display_name_mapping or {}
249
251
 
250
252
  def __getattr__(self, column_name: str) -> Column:
251
253
  return self[column_name]
@@ -385,13 +387,16 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
385
387
  return Column.ensure_cols(ensure_list(cols)) # type: ignore
386
388
 
387
389
  def _ensure_and_normalize_cols(
388
- self, cols, expression: t.Optional[exp.Select] = None
390
+ self, cols, expression: t.Optional[exp.Select] = None, skip_star_expansion: bool = False
389
391
  ) -> t.List[Column]:
390
392
  from sqlframe.base.normalize import normalize
391
393
 
392
394
  cols = self._ensure_list_of_columns(cols)
393
395
  normalize(self.session, expression or self.expression, cols)
394
- return list(flatten([self._expand_star(col) for col in cols]))
396
+ if not skip_star_expansion:
397
+ cols = list(flatten([self._expand_star(col) for col in cols]))
398
+ self._resolve_ambiguous_columns(cols)
399
+ return cols
395
400
 
396
401
  def _ensure_and_normalize_col(self, col):
397
402
  from sqlframe.base.column import Column
@@ -399,6 +404,7 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
399
404
 
400
405
  col = Column.ensure_col(col)
401
406
  normalize(self.session, self.expression, col)
407
+ self._resolve_ambiguous_columns(col)
402
408
  return col
403
409
 
404
410
  def _convert_leaf_to_cte(
@@ -589,6 +595,23 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
589
595
  )
590
596
  return [col]
591
597
 
598
+ def _update_display_name_mapping(
599
+ self, normalized_columns: t.List[Column], user_input: t.Iterable[ColumnOrName]
600
+ ) -> None:
601
+ from sqlframe.base.column import Column
602
+
603
+ normalized_aliases = [x.alias_or_name for x in normalized_columns]
604
+ user_display_names = [
605
+ x.expression.meta.get("display_name") if isinstance(x, Column) else x
606
+ for x in user_input
607
+ ]
608
+ zipped = {
609
+ k: v
610
+ for k, v in dict(zip(normalized_aliases, user_display_names)).items()
611
+ if v is not None
612
+ }
613
+ self.display_name_mapping.update(zipped)
614
+
592
615
  def _get_expressions(
593
616
  self,
594
617
  optimize: bool = True,
@@ -608,6 +631,16 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
608
631
  select_expression = select_expression.transform(
609
632
  replace_id_value, replacement_mapping
610
633
  ).assert_is(exp.Select)
634
+ for index, column in enumerate(select_expression.expressions):
635
+ column_name = quote_preserving_alias_or_name(column)
636
+ if column_name in self.display_name_mapping:
637
+ display_name_identifier = exp.to_identifier(
638
+ self.display_name_mapping[column_name], quoted=True
639
+ )
640
+ display_name_identifier._meta = {"case_sensitive": True, **(column._meta or {})}
641
+ select_expression.expressions[index] = exp.alias_(
642
+ column.unalias(), display_name_identifier, quoted=True
643
+ )
611
644
  if optimize:
612
645
  select_expression = t.cast(
613
646
  exp.Select,
@@ -745,59 +778,73 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
745
778
  kwargs["join_on_uuid"] = str(uuid4())
746
779
  return self.__class__(**object_to_dict(self, **kwargs))
747
780
 
781
+ def _resolve_ambiguous_columns(self, columns: t.Union[Column, t.List[Column]]) -> None:
782
+ if "joins" not in self.expression.args:
783
+ return
784
+
785
+ columns = ensure_list(columns)
786
+ ambiguous_cols: t.List[exp.Column] = list(
787
+ flatten(
788
+ [
789
+ sub_col
790
+ for col in columns
791
+ for sub_col in col.expression.find_all(exp.Column)
792
+ if not sub_col.table
793
+ ]
794
+ )
795
+ )
796
+ if ambiguous_cols:
797
+ join_table_identifiers = [
798
+ x.this for x in get_tables_from_expression_with_join(self.expression)
799
+ ]
800
+ cte_names_in_join = [x.this for x in join_table_identifiers]
801
+ # If we have columns that resolve to multiple CTE expressions then we want to use each CTE left-to-right
802
+ # (or right to left if a right join) and therefore we allow multiple columns with the same
803
+ # name in the result. This matches the behavior of Spark.
804
+ resolved_column_position: t.Dict[exp.Column, int] = {
805
+ col.copy(): -1 for col in ambiguous_cols
806
+ }
807
+ for ambiguous_col in ambiguous_cols:
808
+ ctes = (
809
+ list(reversed(self.expression.ctes))
810
+ if self.expression.args["joins"][0].args.get("side", "") == "right"
811
+ else self.expression.ctes
812
+ )
813
+ ctes_with_column = [
814
+ cte
815
+ for cte in ctes
816
+ if cte.alias_or_name in cte_names_in_join
817
+ and ambiguous_col.alias_or_name in cte.this.named_selects
818
+ ]
819
+ # Check if there is a CTE with this column that we haven't used before. If so, use it. Otherwise,
820
+ # use the same CTE we used before
821
+ cte = seq_get(ctes_with_column, resolved_column_position[ambiguous_col] + 1)
822
+ if cte:
823
+ resolved_column_position[ambiguous_col] += 1
824
+ else:
825
+ cte = ctes_with_column[resolved_column_position[ambiguous_col]]
826
+ ambiguous_col.set("table", exp.to_identifier(cte.alias_or_name))
827
+
748
828
  @operation(Operation.SELECT)
749
829
  def select(self, *cols, **kwargs) -> Self:
750
- from sqlframe.base.column import Column
751
-
752
830
  if not cols:
753
831
  return self
754
832
 
755
833
  if isinstance(cols[0], list):
756
834
  cols = cols[0] # type: ignore
757
835
  columns = self._ensure_and_normalize_cols(cols)
836
+ if "skip_update_display_name_mapping" not in kwargs:
837
+ unexpanded_columns = self._ensure_and_normalize_cols(cols, skip_star_expansion=True)
838
+ user_cols = list(cols)
839
+ star_columns = []
840
+ for index, user_col in enumerate(cols):
841
+ if "*" in (user_col if isinstance(user_col, str) else user_col.alias_or_name):
842
+ star_columns.append(index)
843
+ for index in star_columns:
844
+ unexpanded_columns.pop(index)
845
+ user_cols.pop(index)
846
+ self._update_display_name_mapping(unexpanded_columns, user_cols)
758
847
  kwargs["append"] = kwargs.get("append", False)
759
- if self.expression.args.get("joins"):
760
- ambiguous_cols: t.List[exp.Column] = list(
761
- flatten(
762
- [
763
- sub_col
764
- for col in columns
765
- for sub_col in col.expression.find_all(exp.Column)
766
- if not sub_col.table
767
- ]
768
- )
769
- )
770
- if ambiguous_cols:
771
- join_table_identifiers = [
772
- x.this for x in get_tables_from_expression_with_join(self.expression)
773
- ]
774
- cte_names_in_join = [x.this for x in join_table_identifiers]
775
- # If we have columns that resolve to multiple CTE expressions then we want to use each CTE left-to-right
776
- # (or right to left if a right join) and therefore we allow multiple columns with the same
777
- # name in the result. This matches the behavior of Spark.
778
- resolved_column_position: t.Dict[exp.Column, int] = {
779
- col.copy(): -1 for col in ambiguous_cols
780
- }
781
- for ambiguous_col in ambiguous_cols:
782
- ctes = (
783
- list(reversed(self.expression.ctes))
784
- if self.expression.args["joins"][0].args.get("side", "") == "right"
785
- else self.expression.ctes
786
- )
787
- ctes_with_column = [
788
- cte
789
- for cte in ctes
790
- if cte.alias_or_name in cte_names_in_join
791
- and ambiguous_col.alias_or_name in cte.this.named_selects
792
- ]
793
- # Check if there is a CTE with this column that we haven't used before. If so, use it. Otherwise,
794
- # use the same CTE we used before
795
- cte = seq_get(ctes_with_column, resolved_column_position[ambiguous_col] + 1)
796
- if cte:
797
- resolved_column_position[ambiguous_col] += 1
798
- else:
799
- cte = ctes_with_column[resolved_column_position[ambiguous_col]]
800
- ambiguous_col.set("table", exp.to_identifier(cte.alias_or_name))
801
848
  # If an expression is `CAST(x AS DATETYPE)` then we want to alias so that `x` is the result column name
802
849
  columns = [
803
850
  col.alias(col.expression.alias_or_name)
@@ -846,6 +893,7 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
846
893
  @operation(Operation.SELECT)
847
894
  def agg(self, *exprs, **kwargs) -> Self:
848
895
  cols = self._ensure_and_normalize_cols(exprs)
896
+ self._update_display_name_mapping(cols, exprs)
849
897
  return self.groupBy().agg(*cols)
850
898
 
851
899
  @operation(Operation.FROM)
@@ -1045,7 +1093,9 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
1045
1093
  new_df = self.copy(expression=join_expression)
1046
1094
  new_df.pending_join_hints.extend(self.pending_join_hints)
1047
1095
  new_df.pending_hints.extend(other_df.pending_hints)
1048
- new_df = new_df.select.__wrapped__(new_df, *select_column_names) # type: ignore
1096
+ new_df = new_df.select.__wrapped__( # type: ignore
1097
+ new_df, *select_column_names, skip_update_display_name_mapping=True
1098
+ )
1049
1099
  return new_df
1050
1100
 
1051
1101
  @operation(Operation.ORDER_BY)
@@ -1435,20 +1485,18 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
1435
1485
  def withColumnRenamed(self, existing: str, new: str) -> Self:
1436
1486
  expression = self.expression.copy()
1437
1487
  existing = self.session._normalize_string(existing)
1438
- new = self.session._normalize_string(new)
1439
- existing_columns = [
1440
- expression
1441
- for expression in expression.expressions
1442
- if expression.alias_or_name == existing
1443
- ]
1444
- if not existing_columns:
1488
+ columns = self._get_outer_select_columns(expression)
1489
+ results = []
1490
+ found_match = False
1491
+ for column in columns:
1492
+ if column.alias_or_name == existing:
1493
+ column = column.alias(new)
1494
+ self._update_display_name_mapping([column], [new])
1495
+ found_match = True
1496
+ results.append(column)
1497
+ if not found_match:
1445
1498
  raise ValueError("Tried to rename a column that doesn't exist")
1446
- for existing_column in existing_columns:
1447
- if isinstance(existing_column, exp.Column):
1448
- existing_column.replace(exp.alias_(existing_column, new))
1449
- else:
1450
- existing_column.set("alias", exp.to_identifier(new))
1451
- return self.copy(expression=expression)
1499
+ return self.select.__wrapped__(self, *results, skip_update_display_name_mapping=True) # type: ignore
1452
1500
 
1453
1501
  @operation(Operation.SELECT)
1454
1502
  def withColumns(self, *colsMap: t.Dict[str, Column]) -> Self:
@@ -1489,23 +1537,27 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
1489
1537
  if len(colsMap) != 1:
1490
1538
  raise ValueError("Only a single map is supported")
1491
1539
  col_map = {
1492
- self._ensure_and_normalize_col(k).alias_or_name: self._ensure_and_normalize_col(v)
1540
+ self._ensure_and_normalize_col(k): (self._ensure_and_normalize_col(v), k)
1493
1541
  for k, v in colsMap[0].items()
1494
1542
  }
1495
1543
  existing_cols = self._get_outer_select_columns(self.expression)
1496
1544
  existing_col_names = [x.alias_or_name for x in existing_cols]
1497
1545
  select_columns = existing_cols
1498
- for column_name, col_value in col_map.items():
1546
+ for col, (col_value, display_name) in col_map.items():
1547
+ column_name = col.alias_or_name
1499
1548
  existing_col_index = (
1500
1549
  existing_col_names.index(column_name) if column_name in existing_col_names else None
1501
1550
  )
1502
1551
  if existing_col_index is not None:
1503
1552
  select_columns[existing_col_index] = col_value.alias( # type: ignore
1504
- column_name
1505
- ).expression
1553
+ display_name
1554
+ )
1506
1555
  else:
1507
- select_columns.append(col_value.alias(column_name))
1508
- return self.select.__wrapped__(self, *select_columns) # type: ignore
1556
+ select_columns.append(col_value.alias(display_name))
1557
+ self._update_display_name_mapping(
1558
+ [col for col in col_map], [name for _, name in col_map.values()]
1559
+ )
1560
+ return self.select.__wrapped__(self, *select_columns, skip_update_display_name_mapping=True) # type: ignore
1509
1561
 
1510
1562
  @operation(Operation.SELECT)
1511
1563
  def drop(self, *cols: t.Union[str, Column]) -> Self:
@@ -43,7 +43,7 @@ def func_metadata(unsupported_engines: t.Optional[t.Union[str, t.List[str]]] = N
43
43
  col_name = col_name.this
44
44
  alias_name = f"{func.__name__}__{col_name or ''}__"
45
45
  # BigQuery has restrictions on alias names so we constrain it to alphanumeric characters and underscores
46
- return result.alias(re.sub("\W", "_", alias_name)) # type: ignore
46
+ return result.alias(re.sub(r"\W", "_", alias_name)) # type: ignore
47
47
  return result
48
48
 
49
49
  wrapper.unsupported_engines = ( # type: ignore
@@ -39,11 +39,19 @@ def col(column_name: t.Union[ColumnOrName, t.Any]) -> Column:
39
39
 
40
40
  dialect = _BaseSession().input_dialect
41
41
  if isinstance(column_name, str):
42
- return Column(
43
- expression.to_column(column_name, dialect=dialect).transform(
44
- dialect.normalize_identifier
45
- )
42
+ col_expression = expression.to_column(column_name, dialect=dialect).transform(
43
+ dialect.normalize_identifier
46
44
  )
45
+ case_sensitive_expression = expression.to_column(column_name, dialect=dialect)
46
+ if not isinstance(
47
+ case_sensitive_expression, (expression.Star, expression.Literal, expression.Null)
48
+ ):
49
+ col_expression._meta = {
50
+ "display_name": case_sensitive_expression.this.this,
51
+ **(col_expression._meta or {}),
52
+ }
53
+
54
+ return Column(col_expression)
47
55
  return Column(column_name)
48
56
 
49
57
 
@@ -2851,12 +2859,14 @@ def bool_or(col: ColumnOrName) -> Column:
2851
2859
  return Column.invoke_expression_over_column(col, expression.LogicalOr)
2852
2860
 
2853
2861
 
2854
- @meta(unsupported_engines="*")
2862
+ @meta()
2855
2863
  def btrim(str: ColumnOrName, trim: t.Optional[ColumnOrName] = None) -> Column:
2856
2864
  if trim is not None:
2857
- return Column.invoke_anonymous_function(str, "btrim", trim)
2865
+ return Column.invoke_expression_over_column(
2866
+ str, expression.Trim, expression=Column.ensure_col(trim).column_expression
2867
+ )
2858
2868
  else:
2859
- return Column.invoke_anonymous_function(str, "btrim")
2869
+ return Column.invoke_expression_over_column(str, expression.Trim)
2860
2870
 
2861
2871
 
2862
2872
  @meta(unsupported_engines="*")
sqlframe/base/session.py CHANGED
@@ -507,9 +507,14 @@ class _BaseSession(t.Generic[CATALOG, READER, WRITER, DF, TABLE, CONN, UDF_REGIS
507
507
  result = self._cur.fetchall()
508
508
  if not self._cur.description:
509
509
  return []
510
+ case_sensitive_cols = []
511
+ for col in self._cur.description:
512
+ col_id = exp.parse_identifier(col[0], dialect=self.execution_dialect)
513
+ col_id._meta = {"case_sensitive": True, **(col_id._meta or {})}
514
+ case_sensitive_cols.append(col_id)
510
515
  columns = [
511
- normalize_string(x[0], from_dialect="execution", to_dialect="output", is_column=True)
512
- for x in self._cur.description
516
+ normalize_string(x, from_dialect="execution", to_dialect="output")
517
+ for x in case_sensitive_cols
513
518
  ]
514
519
  return [self._to_row(columns, row) for row in result]
515
520
 
sqlframe/spark/session.py CHANGED
@@ -79,17 +79,18 @@ class SparkSession(
79
79
  if skip_rows:
80
80
  return []
81
81
  assert self._last_df is not None
82
- return [
83
- Row(
84
- **{
85
- normalize_string(
86
- k, from_dialect="execution", to_dialect="output", is_column=True
87
- ): v
88
- for k, v in row.asDict().items()
89
- }
90
- )
91
- for row in self._last_df.collect()
92
- ]
82
+ results = []
83
+ for row in self._last_df.collect():
84
+ rows_normalized = {}
85
+ for k, v in row.asDict().items():
86
+ col_id = exp.parse_identifier(k, dialect=self.execution_dialect)
87
+ col_id._meta = {"case_sensitive": True, **(col_id._meta or {})}
88
+ col_name = normalize_string(
89
+ col_id, from_dialect="execution", to_dialect="output", is_column=True
90
+ )
91
+ rows_normalized[col_name] = v
92
+ results.append(Row(**rows_normalized))
93
+ return results
93
94
 
94
95
  def _execute(self, sql: str) -> None:
95
96
  self._last_df = self.spark_session.sql(sql)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sqlframe
3
- Version: 3.15.1
3
+ Version: 3.17.0
4
4
  Summary: Turning PySpark Into a Universal DataFrame API
5
5
  Home-page: https://github.com/eakmanrq/sqlframe
6
6
  Author: Ryan Eakman
@@ -1,19 +1,19 @@
1
1
  sqlframe/__init__.py,sha256=wfqm98eLoLid9oV_FzzpG5loKC6LxOhj2lXpfN7SARo,3138
2
- sqlframe/_version.py,sha256=rNfI2qI8EULJid-fGjytQ8KiqfMi0Ktaq6sNSFSM_1s,413
2
+ sqlframe/_version.py,sha256=KdbrTz1mygb-tPODYZu2E4Sk2KYmeTUCHVpQLRpXAXo,413
3
3
  sqlframe/base/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  sqlframe/base/_typing.py,sha256=b2clI5HI1zEZKB_3Msx3FeAJQyft44ubUifJwQRVXyQ,1298
5
5
  sqlframe/base/catalog.py,sha256=SzFQalTWdhWzxUY-4ut1f9TfOECp_JmJEgNPfrRKCe0,38457
6
- sqlframe/base/column.py,sha256=wRghgieYAA51aw4WuFQWOvl0TFOToZbBhBuIamEzxx4,18011
7
- sqlframe/base/dataframe.py,sha256=E1zWlB_a2FNOxjTcQ68MtL_A4c8fnLiHY3MeZttK4Xk,76570
8
- sqlframe/base/decorators.py,sha256=P56cgs8DANxGRIwVs5uOMnDy-BlXZZYMbf4fdnkpWPI,1889
6
+ sqlframe/base/column.py,sha256=oHVwkSWABO3ZlAbgBShsxSSlgbI06BOup5XJrRhgqJI,18097
7
+ sqlframe/base/dataframe.py,sha256=SQtwoQKpq-12WXuplOPN21fXQPvjF_D9WLcPPFA12Zs,78973
8
+ sqlframe/base/decorators.py,sha256=ms-CvDOIW3T8IVB9VqDmLwAiaEsqXLYRXEqVQaxktiM,1890
9
9
  sqlframe/base/exceptions.py,sha256=9Uwvqn2eAkDpqm4BrRgbL61qM-GMCbJEMAW8otxO46s,370
10
10
  sqlframe/base/function_alternatives.py,sha256=NV31IaEhVYmfUSWetAEFISAvLzs2DxQ7bp-iMNgj0hQ,53786
11
- sqlframe/base/functions.py,sha256=9mN54Nx6yqos1njfyW2-WRzfFUsA96P9z1ldJVtovSs,220543
11
+ sqlframe/base/functions.py,sha256=1LHxazgC9tZ_GzyWNsjU945SRnAsQjUH2easMJLU3h4,221012
12
12
  sqlframe/base/group.py,sha256=fsyG5990_Pd7gFPjTFrH9IEoAquL_wEkVpIlBAIkZJU,4091
13
13
  sqlframe/base/normalize.py,sha256=nXAJ5CwxVf4DV0GsH-q1w0p8gmjSMlv96k_ez1eVul8,3880
14
14
  sqlframe/base/operations.py,sha256=xSPw74e59wYvNd6U1AlwziNCTG6Aftrbl4SybN9u9VE,3450
15
15
  sqlframe/base/readerwriter.py,sha256=w8926cqIrXF7NGHiINw5UHzP_3xpjsqbijTBTzycBRM,26605
16
- sqlframe/base/session.py,sha256=s9M9_nbtOQQgLyEBZs-ijkMeHkYkILHfBc8JsU2SLmU,26369
16
+ sqlframe/base/session.py,sha256=0eBE_HYEb3npyyOGM7zS_VR8WgzvfgVI-PFLCK9Hy0M,26628
17
17
  sqlframe/base/table.py,sha256=rCeh1W5SWbtEVfkLAUiexzrZwNgmZeptLEmLcM1ABkE,6961
18
18
  sqlframe/base/transforms.py,sha256=y0j3SGDz3XCmNGrvassk1S-owllUWfkHyMgZlY6SFO4,467
19
19
  sqlframe/base/types.py,sha256=iBNk9bpFtb2NBIogYS8i7OlQZMRvpR6XxqzBebsjQDU,12280
@@ -110,7 +110,7 @@ sqlframe/spark/functions.py,sha256=MYCgHsjRQWylT-rezWRBuLV6BivcaVarbaQtP4T0toQ,3
110
110
  sqlframe/spark/functions.pyi,sha256=GyOdUzv2Z7Qt99JAKEPKgV2t2Rn274OuqwAfcoAXlN0,24259
111
111
  sqlframe/spark/group.py,sha256=MrvV_v-YkBc6T1zz882WrEqtWjlooWIyHBCmTQg3fCA,379
112
112
  sqlframe/spark/readwriter.py,sha256=zXZcCPWpQMMN90wdIx8AD4Y5tWBcpRSL4-yKX2aZyik,874
113
- sqlframe/spark/session.py,sha256=1kgi69uztJxJ6bJpgkpRxllOYgVrizKXA5iT88-jWKA,5421
113
+ sqlframe/spark/session.py,sha256=9qG-J5L8gmiy384GZFSBT2tHF8akqqJNij23Y3pheMs,5651
114
114
  sqlframe/spark/table.py,sha256=puWV8h_CqA64zwpzq0ydY9LoygMAvprkODyxyzZeF9M,186
115
115
  sqlframe/spark/types.py,sha256=KwNyuXIo-2xVVd4bZED3YrQOobKCtemlxGrJL7DrTC8,34
116
116
  sqlframe/spark/udf.py,sha256=owB8NDaGVkUQ0WGm7SZt2t9zfvLFCfi0W48QiPfgjck,1153
@@ -129,8 +129,8 @@ sqlframe/standalone/udf.py,sha256=azmgtUjHNIPs0WMVNId05SHwiYn41MKVBhKXsQJ5dmY,27
129
129
  sqlframe/standalone/window.py,sha256=6GKPzuxeSapJakBaKBeT9VpED1ACdjggDv9JRILDyV0,35
130
130
  sqlframe/testing/__init__.py,sha256=VVCosQhitU74A3NnE52O4mNtGZONapuEXcc20QmSlnQ,132
131
131
  sqlframe/testing/utils.py,sha256=PFsGZpwNUE_4-g_f43_vstTqsK0AQ2lBneb5Eb6NkFo,13008
132
- sqlframe-3.15.1.dist-info/LICENSE,sha256=VZu79YgW780qxaFJMr0t5ZgbOYEh04xWoxaWOaqIGWk,1068
133
- sqlframe-3.15.1.dist-info/METADATA,sha256=-MxovSCoyQnT-6Ujd4BDA_yVpf9KWra2v1CQGN2TmG4,8970
134
- sqlframe-3.15.1.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
135
- sqlframe-3.15.1.dist-info/top_level.txt,sha256=T0_RpoygaZSF6heeWwIDQgaP0varUdSK1pzjeJZRjM8,9
136
- sqlframe-3.15.1.dist-info/RECORD,,
132
+ sqlframe-3.17.0.dist-info/LICENSE,sha256=VZu79YgW780qxaFJMr0t5ZgbOYEh04xWoxaWOaqIGWk,1068
133
+ sqlframe-3.17.0.dist-info/METADATA,sha256=K8kfOT5t6cEBs4YsIK76QCFBPW2NEcDcsPMkEhWCLUI,8970
134
+ sqlframe-3.17.0.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
135
+ sqlframe-3.17.0.dist-info/top_level.txt,sha256=T0_RpoygaZSF6heeWwIDQgaP0varUdSK1pzjeJZRjM8,9
136
+ sqlframe-3.17.0.dist-info/RECORD,,