sqlframe 3.16.0__py3-none-any.whl → 3.17.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sqlframe/_version.py +2 -2
- sqlframe/base/column.py +1 -0
- sqlframe/base/dataframe.py +71 -24
- sqlframe/base/functions.py +12 -4
- sqlframe/base/session.py +27 -10
- sqlframe/bigquery/session.py +1 -1
- sqlframe/databricks/session.py +1 -1
- sqlframe/duckdb/session.py +1 -1
- sqlframe/postgres/session.py +1 -1
- sqlframe/snowflake/session.py +1 -1
- sqlframe/spark/session.py +13 -12
- sqlframe/standalone/session.py +1 -1
- {sqlframe-3.16.0.dist-info → sqlframe-3.17.1.dist-info}/METADATA +3 -3
- {sqlframe-3.16.0.dist-info → sqlframe-3.17.1.dist-info}/RECORD +17 -17
- {sqlframe-3.16.0.dist-info → sqlframe-3.17.1.dist-info}/LICENSE +0 -0
- {sqlframe-3.16.0.dist-info → sqlframe-3.17.1.dist-info}/WHEEL +0 -0
- {sqlframe-3.16.0.dist-info → sqlframe-3.17.1.dist-info}/top_level.txt +0 -0
sqlframe/_version.py
CHANGED
sqlframe/base/column.py
CHANGED
|
@@ -291,6 +291,7 @@ class Column:
|
|
|
291
291
|
this=self.column_expression,
|
|
292
292
|
alias=alias.this if isinstance(alias, exp.Column) else alias,
|
|
293
293
|
)
|
|
294
|
+
new_expression._meta = {"display_name": name, **(new_expression._meta or {})}
|
|
294
295
|
return Column(new_expression)
|
|
295
296
|
|
|
296
297
|
def asc(self) -> Column:
|
sqlframe/base/dataframe.py
CHANGED
|
@@ -233,6 +233,7 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
|
|
|
233
233
|
last_op: Operation = Operation.INIT,
|
|
234
234
|
pending_hints: t.Optional[t.List[exp.Expression]] = None,
|
|
235
235
|
output_expression_container: t.Optional[OutputExpressionContainer] = None,
|
|
236
|
+
display_name_mapping: t.Optional[t.Dict[str, str]] = None,
|
|
236
237
|
**kwargs,
|
|
237
238
|
):
|
|
238
239
|
self.session = session
|
|
@@ -246,6 +247,7 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
|
|
|
246
247
|
self.pending_hints = pending_hints or []
|
|
247
248
|
self.output_expression_container = output_expression_container or exp.Select()
|
|
248
249
|
self.temp_views: t.List[exp.Select] = []
|
|
250
|
+
self.display_name_mapping = display_name_mapping or {}
|
|
249
251
|
|
|
250
252
|
def __getattr__(self, column_name: str) -> Column:
|
|
251
253
|
return self[column_name]
|
|
@@ -385,13 +387,14 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
|
|
|
385
387
|
return Column.ensure_cols(ensure_list(cols)) # type: ignore
|
|
386
388
|
|
|
387
389
|
def _ensure_and_normalize_cols(
|
|
388
|
-
self, cols, expression: t.Optional[exp.Select] = None
|
|
390
|
+
self, cols, expression: t.Optional[exp.Select] = None, skip_star_expansion: bool = False
|
|
389
391
|
) -> t.List[Column]:
|
|
390
392
|
from sqlframe.base.normalize import normalize
|
|
391
393
|
|
|
392
394
|
cols = self._ensure_list_of_columns(cols)
|
|
393
395
|
normalize(self.session, expression or self.expression, cols)
|
|
394
|
-
|
|
396
|
+
if not skip_star_expansion:
|
|
397
|
+
cols = list(flatten([self._expand_star(col) for col in cols]))
|
|
395
398
|
self._resolve_ambiguous_columns(cols)
|
|
396
399
|
return cols
|
|
397
400
|
|
|
@@ -592,6 +595,23 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
|
|
|
592
595
|
)
|
|
593
596
|
return [col]
|
|
594
597
|
|
|
598
|
+
def _update_display_name_mapping(
|
|
599
|
+
self, normalized_columns: t.List[Column], user_input: t.Iterable[ColumnOrName]
|
|
600
|
+
) -> None:
|
|
601
|
+
from sqlframe.base.column import Column
|
|
602
|
+
|
|
603
|
+
normalized_aliases = [x.alias_or_name for x in normalized_columns]
|
|
604
|
+
user_display_names = [
|
|
605
|
+
x.expression.meta.get("display_name") if isinstance(x, Column) else x
|
|
606
|
+
for x in user_input
|
|
607
|
+
]
|
|
608
|
+
zipped = {
|
|
609
|
+
k: v
|
|
610
|
+
for k, v in dict(zip(normalized_aliases, user_display_names)).items()
|
|
611
|
+
if v is not None
|
|
612
|
+
}
|
|
613
|
+
self.display_name_mapping.update(zipped)
|
|
614
|
+
|
|
595
615
|
def _get_expressions(
|
|
596
616
|
self,
|
|
597
617
|
optimize: bool = True,
|
|
@@ -611,6 +631,16 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
|
|
|
611
631
|
select_expression = select_expression.transform(
|
|
612
632
|
replace_id_value, replacement_mapping
|
|
613
633
|
).assert_is(exp.Select)
|
|
634
|
+
for index, column in enumerate(select_expression.expressions):
|
|
635
|
+
column_name = quote_preserving_alias_or_name(column)
|
|
636
|
+
if column_name in self.display_name_mapping:
|
|
637
|
+
display_name_identifier = exp.to_identifier(
|
|
638
|
+
self.display_name_mapping[column_name], quoted=True
|
|
639
|
+
)
|
|
640
|
+
display_name_identifier._meta = {"case_sensitive": True, **(column._meta or {})}
|
|
641
|
+
select_expression.expressions[index] = exp.alias_(
|
|
642
|
+
column.unalias(), display_name_identifier, quoted=True
|
|
643
|
+
)
|
|
614
644
|
if optimize:
|
|
615
645
|
select_expression = t.cast(
|
|
616
646
|
exp.Select,
|
|
@@ -792,8 +822,9 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
|
|
|
792
822
|
if cte:
|
|
793
823
|
resolved_column_position[ambiguous_col] += 1
|
|
794
824
|
else:
|
|
795
|
-
cte = ctes_with_column
|
|
796
|
-
|
|
825
|
+
cte = seq_get(ctes_with_column, resolved_column_position[ambiguous_col])
|
|
826
|
+
if cte:
|
|
827
|
+
ambiguous_col.set("table", exp.to_identifier(cte.alias_or_name))
|
|
797
828
|
|
|
798
829
|
@operation(Operation.SELECT)
|
|
799
830
|
def select(self, *cols, **kwargs) -> Self:
|
|
@@ -803,6 +834,17 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
|
|
|
803
834
|
if isinstance(cols[0], list):
|
|
804
835
|
cols = cols[0] # type: ignore
|
|
805
836
|
columns = self._ensure_and_normalize_cols(cols)
|
|
837
|
+
if "skip_update_display_name_mapping" not in kwargs:
|
|
838
|
+
unexpanded_columns = self._ensure_and_normalize_cols(cols, skip_star_expansion=True)
|
|
839
|
+
user_cols = list(cols)
|
|
840
|
+
star_columns = []
|
|
841
|
+
for index, user_col in enumerate(cols):
|
|
842
|
+
if "*" in (user_col if isinstance(user_col, str) else user_col.alias_or_name):
|
|
843
|
+
star_columns.append(index)
|
|
844
|
+
for index in star_columns:
|
|
845
|
+
unexpanded_columns.pop(index)
|
|
846
|
+
user_cols.pop(index)
|
|
847
|
+
self._update_display_name_mapping(unexpanded_columns, user_cols)
|
|
806
848
|
kwargs["append"] = kwargs.get("append", False)
|
|
807
849
|
# If an expression is `CAST(x AS DATETYPE)` then we want to alias so that `x` is the result column name
|
|
808
850
|
columns = [
|
|
@@ -852,6 +894,7 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
|
|
|
852
894
|
@operation(Operation.SELECT)
|
|
853
895
|
def agg(self, *exprs, **kwargs) -> Self:
|
|
854
896
|
cols = self._ensure_and_normalize_cols(exprs)
|
|
897
|
+
self._update_display_name_mapping(cols, exprs)
|
|
855
898
|
return self.groupBy().agg(*cols)
|
|
856
899
|
|
|
857
900
|
@operation(Operation.FROM)
|
|
@@ -1051,7 +1094,9 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
|
|
|
1051
1094
|
new_df = self.copy(expression=join_expression)
|
|
1052
1095
|
new_df.pending_join_hints.extend(self.pending_join_hints)
|
|
1053
1096
|
new_df.pending_hints.extend(other_df.pending_hints)
|
|
1054
|
-
new_df = new_df.select.__wrapped__(
|
|
1097
|
+
new_df = new_df.select.__wrapped__( # type: ignore
|
|
1098
|
+
new_df, *select_column_names, skip_update_display_name_mapping=True
|
|
1099
|
+
)
|
|
1055
1100
|
return new_df
|
|
1056
1101
|
|
|
1057
1102
|
@operation(Operation.ORDER_BY)
|
|
@@ -1441,20 +1486,18 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
|
|
|
1441
1486
|
def withColumnRenamed(self, existing: str, new: str) -> Self:
|
|
1442
1487
|
expression = self.expression.copy()
|
|
1443
1488
|
existing = self.session._normalize_string(existing)
|
|
1444
|
-
|
|
1445
|
-
|
|
1446
|
-
|
|
1447
|
-
|
|
1448
|
-
if
|
|
1449
|
-
|
|
1450
|
-
|
|
1489
|
+
columns = self._get_outer_select_columns(expression)
|
|
1490
|
+
results = []
|
|
1491
|
+
found_match = False
|
|
1492
|
+
for column in columns:
|
|
1493
|
+
if column.alias_or_name == existing:
|
|
1494
|
+
column = column.alias(new)
|
|
1495
|
+
self._update_display_name_mapping([column], [new])
|
|
1496
|
+
found_match = True
|
|
1497
|
+
results.append(column)
|
|
1498
|
+
if not found_match:
|
|
1451
1499
|
raise ValueError("Tried to rename a column that doesn't exist")
|
|
1452
|
-
|
|
1453
|
-
if isinstance(existing_column, exp.Column):
|
|
1454
|
-
existing_column.replace(exp.alias_(existing_column, new))
|
|
1455
|
-
else:
|
|
1456
|
-
existing_column.set("alias", exp.to_identifier(new))
|
|
1457
|
-
return self.copy(expression=expression)
|
|
1500
|
+
return self.select.__wrapped__(self, *results, skip_update_display_name_mapping=True) # type: ignore
|
|
1458
1501
|
|
|
1459
1502
|
@operation(Operation.SELECT)
|
|
1460
1503
|
def withColumns(self, *colsMap: t.Dict[str, Column]) -> Self:
|
|
@@ -1495,23 +1538,27 @@ class BaseDataFrame(t.Generic[SESSION, WRITER, NA, STAT, GROUP_DATA]):
|
|
|
1495
1538
|
if len(colsMap) != 1:
|
|
1496
1539
|
raise ValueError("Only a single map is supported")
|
|
1497
1540
|
col_map = {
|
|
1498
|
-
self._ensure_and_normalize_col(k)
|
|
1541
|
+
self._ensure_and_normalize_col(k): (self._ensure_and_normalize_col(v), k)
|
|
1499
1542
|
for k, v in colsMap[0].items()
|
|
1500
1543
|
}
|
|
1501
1544
|
existing_cols = self._get_outer_select_columns(self.expression)
|
|
1502
1545
|
existing_col_names = [x.alias_or_name for x in existing_cols]
|
|
1503
1546
|
select_columns = existing_cols
|
|
1504
|
-
for
|
|
1547
|
+
for col, (col_value, display_name) in col_map.items():
|
|
1548
|
+
column_name = col.alias_or_name
|
|
1505
1549
|
existing_col_index = (
|
|
1506
1550
|
existing_col_names.index(column_name) if column_name in existing_col_names else None
|
|
1507
1551
|
)
|
|
1508
1552
|
if existing_col_index is not None:
|
|
1509
1553
|
select_columns[existing_col_index] = col_value.alias( # type: ignore
|
|
1510
|
-
|
|
1511
|
-
)
|
|
1554
|
+
display_name
|
|
1555
|
+
)
|
|
1512
1556
|
else:
|
|
1513
|
-
select_columns.append(col_value.alias(
|
|
1514
|
-
|
|
1557
|
+
select_columns.append(col_value.alias(display_name))
|
|
1558
|
+
self._update_display_name_mapping(
|
|
1559
|
+
[col for col in col_map], [name for _, name in col_map.values()]
|
|
1560
|
+
)
|
|
1561
|
+
return self.select.__wrapped__(self, *select_columns, skip_update_display_name_mapping=True) # type: ignore
|
|
1515
1562
|
|
|
1516
1563
|
@operation(Operation.SELECT)
|
|
1517
1564
|
def drop(self, *cols: t.Union[str, Column]) -> Self:
|
sqlframe/base/functions.py
CHANGED
|
@@ -39,11 +39,19 @@ def col(column_name: t.Union[ColumnOrName, t.Any]) -> Column:
|
|
|
39
39
|
|
|
40
40
|
dialect = _BaseSession().input_dialect
|
|
41
41
|
if isinstance(column_name, str):
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
dialect.normalize_identifier
|
|
45
|
-
)
|
|
42
|
+
col_expression = expression.to_column(column_name, dialect=dialect).transform(
|
|
43
|
+
dialect.normalize_identifier
|
|
46
44
|
)
|
|
45
|
+
case_sensitive_expression = expression.to_column(column_name, dialect=dialect)
|
|
46
|
+
if not isinstance(
|
|
47
|
+
case_sensitive_expression, (expression.Star, expression.Literal, expression.Null)
|
|
48
|
+
):
|
|
49
|
+
col_expression._meta = {
|
|
50
|
+
"display_name": case_sensitive_expression.this.this,
|
|
51
|
+
**(col_expression._meta or {}),
|
|
52
|
+
}
|
|
53
|
+
|
|
54
|
+
return Column(col_expression)
|
|
47
55
|
return Column(column_name)
|
|
48
56
|
|
|
49
57
|
|
sqlframe/base/session.py
CHANGED
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
|
+
import contextlib
|
|
5
6
|
import datetime
|
|
6
7
|
import logging
|
|
7
8
|
import sys
|
|
@@ -213,13 +214,16 @@ class _BaseSession(t.Generic[CATALOG, READER, WRITER, DF, TABLE, CONN, UDF_REGIS
|
|
|
213
214
|
|
|
214
215
|
def createDataFrame(
|
|
215
216
|
self,
|
|
216
|
-
data: t.
|
|
217
|
-
t.
|
|
218
|
-
t.
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
217
|
+
data: t.Union[
|
|
218
|
+
t.Sequence[
|
|
219
|
+
t.Union[
|
|
220
|
+
t.Dict[str, ColumnLiterals],
|
|
221
|
+
t.List[ColumnLiterals],
|
|
222
|
+
t.Tuple[ColumnLiterals, ...],
|
|
223
|
+
ColumnLiterals,
|
|
224
|
+
],
|
|
225
|
+
],
|
|
226
|
+
pd.DataFrame,
|
|
223
227
|
],
|
|
224
228
|
schema: t.Optional[SchemaInput] = None,
|
|
225
229
|
samplingRatio: t.Optional[float] = None,
|
|
@@ -240,11 +244,18 @@ class _BaseSession(t.Generic[CATALOG, READER, WRITER, DF, TABLE, CONN, UDF_REGIS
|
|
|
240
244
|
):
|
|
241
245
|
raise NotImplementedError("Only schema of either list or string of list supported")
|
|
242
246
|
|
|
247
|
+
with contextlib.suppress(ImportError):
|
|
248
|
+
from pandas import DataFrame as pd_DataFrame
|
|
249
|
+
|
|
250
|
+
if isinstance(data, pd_DataFrame):
|
|
251
|
+
data = data.to_dict("records") # type: ignore
|
|
252
|
+
|
|
243
253
|
column_mapping: t.Mapping[str, t.Optional[exp.DataType]]
|
|
244
254
|
if schema is not None:
|
|
245
255
|
column_mapping = get_column_mapping_from_schema_input(
|
|
246
256
|
schema, dialect=self.input_dialect
|
|
247
257
|
)
|
|
258
|
+
|
|
248
259
|
elif data:
|
|
249
260
|
if isinstance(data[0], Row):
|
|
250
261
|
column_mapping = {col_name.strip(): None for col_name in data[0].__fields__}
|
|
@@ -386,7 +397,8 @@ class _BaseSession(t.Generic[CATALOG, READER, WRITER, DF, TABLE, CONN, UDF_REGIS
|
|
|
386
397
|
dialect = Dialect.get_or_raise(dialect or self.input_dialect)
|
|
387
398
|
expression = (
|
|
388
399
|
sqlglot.parse_one(
|
|
389
|
-
normalize_string(sqlQuery, from_dialect=dialect, is_query=True),
|
|
400
|
+
normalize_string(sqlQuery, from_dialect=dialect, is_query=True),
|
|
401
|
+
read=dialect,
|
|
390
402
|
)
|
|
391
403
|
if isinstance(sqlQuery, str)
|
|
392
404
|
else sqlQuery
|
|
@@ -507,9 +519,14 @@ class _BaseSession(t.Generic[CATALOG, READER, WRITER, DF, TABLE, CONN, UDF_REGIS
|
|
|
507
519
|
result = self._cur.fetchall()
|
|
508
520
|
if not self._cur.description:
|
|
509
521
|
return []
|
|
522
|
+
case_sensitive_cols = []
|
|
523
|
+
for col in self._cur.description:
|
|
524
|
+
col_id = exp.parse_identifier(col[0], dialect=self.execution_dialect)
|
|
525
|
+
col_id._meta = {"case_sensitive": True, **(col_id._meta or {})}
|
|
526
|
+
case_sensitive_cols.append(col_id)
|
|
510
527
|
columns = [
|
|
511
|
-
normalize_string(x
|
|
512
|
-
for x in
|
|
528
|
+
normalize_string(x, from_dialect="execution", to_dialect="output")
|
|
529
|
+
for x in case_sensitive_cols
|
|
513
530
|
]
|
|
514
531
|
return [self._to_row(columns, row) for row in result]
|
|
515
532
|
|
sqlframe/bigquery/session.py
CHANGED
sqlframe/databricks/session.py
CHANGED
sqlframe/duckdb/session.py
CHANGED
sqlframe/postgres/session.py
CHANGED
sqlframe/snowflake/session.py
CHANGED
sqlframe/spark/session.py
CHANGED
|
@@ -34,7 +34,7 @@ class SparkSession(
|
|
|
34
34
|
SparkDataFrameWriter,
|
|
35
35
|
SparkDataFrame,
|
|
36
36
|
SparkTable,
|
|
37
|
-
PySparkSession,
|
|
37
|
+
PySparkSession, # type: ignore
|
|
38
38
|
SparkUDFRegistration,
|
|
39
39
|
],
|
|
40
40
|
):
|
|
@@ -79,17 +79,18 @@ class SparkSession(
|
|
|
79
79
|
if skip_rows:
|
|
80
80
|
return []
|
|
81
81
|
assert self._last_df is not None
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
82
|
+
results = []
|
|
83
|
+
for row in self._last_df.collect():
|
|
84
|
+
rows_normalized = {}
|
|
85
|
+
for k, v in row.asDict().items():
|
|
86
|
+
col_id = exp.parse_identifier(k, dialect=self.execution_dialect)
|
|
87
|
+
col_id._meta = {"case_sensitive": True, **(col_id._meta or {})}
|
|
88
|
+
col_name = normalize_string(
|
|
89
|
+
col_id, from_dialect="execution", to_dialect="output", is_column=True
|
|
90
|
+
)
|
|
91
|
+
rows_normalized[col_name] = v
|
|
92
|
+
results.append(Row(**rows_normalized))
|
|
93
|
+
return results
|
|
93
94
|
|
|
94
95
|
def _execute(self, sql: str) -> None:
|
|
95
96
|
self._last_df = self.spark_session.sql(sql)
|
sqlframe/standalone/session.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: sqlframe
|
|
3
|
-
Version: 3.
|
|
3
|
+
Version: 3.17.1
|
|
4
4
|
Summary: Turning PySpark Into a Universal DataFrame API
|
|
5
5
|
Home-page: https://github.com/eakmanrq/sqlframe
|
|
6
6
|
Author: Ryan Eakman
|
|
@@ -17,7 +17,7 @@ Requires-Python: >=3.9
|
|
|
17
17
|
Description-Content-Type: text/markdown
|
|
18
18
|
License-File: LICENSE
|
|
19
19
|
Requires-Dist: prettytable <4
|
|
20
|
-
Requires-Dist: sqlglot <26.
|
|
20
|
+
Requires-Dist: sqlglot <26.5,>=24.0.0
|
|
21
21
|
Requires-Dist: typing-extensions
|
|
22
22
|
Provides-Extra: bigquery
|
|
23
23
|
Requires-Dist: google-cloud-bigquery-storage <3,>=2 ; extra == 'bigquery'
|
|
@@ -27,7 +27,7 @@ Requires-Dist: databricks-sql-connector <5,>=3.6 ; extra == 'databricks'
|
|
|
27
27
|
Provides-Extra: dev
|
|
28
28
|
Requires-Dist: duckdb <1.2,>=0.9 ; extra == 'dev'
|
|
29
29
|
Requires-Dist: findspark <3,>=2 ; extra == 'dev'
|
|
30
|
-
Requires-Dist: mypy <1.
|
|
30
|
+
Requires-Dist: mypy <1.16,>=1.10.0 ; extra == 'dev'
|
|
31
31
|
Requires-Dist: openai <2,>=1.30 ; extra == 'dev'
|
|
32
32
|
Requires-Dist: pandas-stubs <3,>=2 ; extra == 'dev'
|
|
33
33
|
Requires-Dist: pandas <3,>=2 ; extra == 'dev'
|
|
@@ -1,19 +1,19 @@
|
|
|
1
1
|
sqlframe/__init__.py,sha256=wfqm98eLoLid9oV_FzzpG5loKC6LxOhj2lXpfN7SARo,3138
|
|
2
|
-
sqlframe/_version.py,sha256=
|
|
2
|
+
sqlframe/_version.py,sha256=c661E5W7jBk0KLcY4zPGbS19JVM6PufmNidbT4Hw5Ok,413
|
|
3
3
|
sqlframe/base/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
4
4
|
sqlframe/base/_typing.py,sha256=b2clI5HI1zEZKB_3Msx3FeAJQyft44ubUifJwQRVXyQ,1298
|
|
5
5
|
sqlframe/base/catalog.py,sha256=SzFQalTWdhWzxUY-4ut1f9TfOECp_JmJEgNPfrRKCe0,38457
|
|
6
|
-
sqlframe/base/column.py,sha256=
|
|
7
|
-
sqlframe/base/dataframe.py,sha256=
|
|
6
|
+
sqlframe/base/column.py,sha256=oHVwkSWABO3ZlAbgBShsxSSlgbI06BOup5XJrRhgqJI,18097
|
|
7
|
+
sqlframe/base/dataframe.py,sha256=KcB9HhnXuPojHOpksvYwmgIwcXUD6g9--EEvEBytDuY,79010
|
|
8
8
|
sqlframe/base/decorators.py,sha256=ms-CvDOIW3T8IVB9VqDmLwAiaEsqXLYRXEqVQaxktiM,1890
|
|
9
9
|
sqlframe/base/exceptions.py,sha256=9Uwvqn2eAkDpqm4BrRgbL61qM-GMCbJEMAW8otxO46s,370
|
|
10
10
|
sqlframe/base/function_alternatives.py,sha256=NV31IaEhVYmfUSWetAEFISAvLzs2DxQ7bp-iMNgj0hQ,53786
|
|
11
|
-
sqlframe/base/functions.py,sha256=
|
|
11
|
+
sqlframe/base/functions.py,sha256=1LHxazgC9tZ_GzyWNsjU945SRnAsQjUH2easMJLU3h4,221012
|
|
12
12
|
sqlframe/base/group.py,sha256=fsyG5990_Pd7gFPjTFrH9IEoAquL_wEkVpIlBAIkZJU,4091
|
|
13
13
|
sqlframe/base/normalize.py,sha256=nXAJ5CwxVf4DV0GsH-q1w0p8gmjSMlv96k_ez1eVul8,3880
|
|
14
14
|
sqlframe/base/operations.py,sha256=xSPw74e59wYvNd6U1AlwziNCTG6Aftrbl4SybN9u9VE,3450
|
|
15
15
|
sqlframe/base/readerwriter.py,sha256=w8926cqIrXF7NGHiINw5UHzP_3xpjsqbijTBTzycBRM,26605
|
|
16
|
-
sqlframe/base/session.py,sha256=
|
|
16
|
+
sqlframe/base/session.py,sha256=gvg_yZaivR0N6Kyo3tiUitoTc9NSYekGp0-x-fPhAqA,26967
|
|
17
17
|
sqlframe/base/table.py,sha256=rCeh1W5SWbtEVfkLAUiexzrZwNgmZeptLEmLcM1ABkE,6961
|
|
18
18
|
sqlframe/base/transforms.py,sha256=y0j3SGDz3XCmNGrvassk1S-owllUWfkHyMgZlY6SFO4,467
|
|
19
19
|
sqlframe/base/types.py,sha256=iBNk9bpFtb2NBIogYS8i7OlQZMRvpR6XxqzBebsjQDU,12280
|
|
@@ -33,7 +33,7 @@ sqlframe/bigquery/functions.py,sha256=MYLs6-sXXqe5o6ghJHHtEpFJlYMeyKzx9-rT3wwXlc
|
|
|
33
33
|
sqlframe/bigquery/functions.pyi,sha256=KXgV46eZFNIXwXIhPuSJ08BG18iLQzDCQjyI3REBEXg,11925
|
|
34
34
|
sqlframe/bigquery/group.py,sha256=UVBNBRTo8OqS-_cS5YwvTeJYgYxeG-d6R3kfyHmlFqw,391
|
|
35
35
|
sqlframe/bigquery/readwriter.py,sha256=2uQhGe0THiLPb-_NF5jDdbizwjYJk854MuhEcnLghaE,949
|
|
36
|
-
sqlframe/bigquery/session.py,sha256=
|
|
36
|
+
sqlframe/bigquery/session.py,sha256=5-SNh1sHd4Q6zPEmlinL9LLwsqlzBTqT-ZNRAFpU1_s,2931
|
|
37
37
|
sqlframe/bigquery/table.py,sha256=pSSRFeKcStyFuE1B4uiheP22tHHq5SdQ-uuaNQpbsfI,661
|
|
38
38
|
sqlframe/bigquery/types.py,sha256=KwNyuXIo-2xVVd4bZED3YrQOobKCtemlxGrJL7DrTC8,34
|
|
39
39
|
sqlframe/bigquery/udf.py,sha256=ZZ1-P1zWZhQqmhBqwAxfNeKl31nDkkZgkuz7Dn28P_0,264
|
|
@@ -46,7 +46,7 @@ sqlframe/databricks/functions.py,sha256=La8rjAwO0hD4FBO0QxW5CtZtFAPvOrVc6lG4OtPG
|
|
|
46
46
|
sqlframe/databricks/functions.pyi,sha256=FzVBpzXCJzxIp73sIAo_R8Wx8uOJrix-W12HsgyeTcQ,23799
|
|
47
47
|
sqlframe/databricks/group.py,sha256=dU3g0DVLRlfOSCamKchQFXRd1WTFbdxoXkpEX8tPD6Y,399
|
|
48
48
|
sqlframe/databricks/readwriter.py,sha256=qhdw1zozrBixdUWsaQT2jy4JtZo0nuTjnRyg2GiFy9g,3370
|
|
49
|
-
sqlframe/databricks/session.py,sha256=
|
|
49
|
+
sqlframe/databricks/session.py,sha256=iw4uczkJHkpVO8vusEEmfCrhxHWyAHpCFmOZ-0qlkms,2343
|
|
50
50
|
sqlframe/databricks/table.py,sha256=Q0Vnrl5aUqnqFTQpTwfWMRyQ9AQnagtpnSnXmP6IKRs,678
|
|
51
51
|
sqlframe/databricks/types.py,sha256=KwNyuXIo-2xVVd4bZED3YrQOobKCtemlxGrJL7DrTC8,34
|
|
52
52
|
sqlframe/databricks/udf.py,sha256=3rmxv_6zSLfIxH8P8P050ZO-ki0aqBb9wWuUQBtl4m8,272
|
|
@@ -59,7 +59,7 @@ sqlframe/duckdb/functions.py,sha256=ix2efGGD4HLaY1rtCtEd3IrsicGEVGiBAeKOo5OD8rA,
|
|
|
59
59
|
sqlframe/duckdb/functions.pyi,sha256=P0ky6k-J7LdCDrQ0OjfRC3ARIYNHPmAmmaB_jBEO5L0,12383
|
|
60
60
|
sqlframe/duckdb/group.py,sha256=IkhbW42Ng1U5YT3FkIdiB4zBqRkW4QyTb-1detY1e_4,383
|
|
61
61
|
sqlframe/duckdb/readwriter.py,sha256=5EP8DEoX3N_xYavWpetsZdzvtYF-oCrAz3n-siNE8yY,4938
|
|
62
|
-
sqlframe/duckdb/session.py,sha256=
|
|
62
|
+
sqlframe/duckdb/session.py,sha256=Uf7im6eBbBYRvIhVGV0VCTCF76FQ00A5FbKPCdNllzw,2898
|
|
63
63
|
sqlframe/duckdb/table.py,sha256=AmEKoH2TZo98loS5NbNaTuqv0eg76SY_OckVBMmQ6Co,410
|
|
64
64
|
sqlframe/duckdb/types.py,sha256=KwNyuXIo-2xVVd4bZED3YrQOobKCtemlxGrJL7DrTC8,34
|
|
65
65
|
sqlframe/duckdb/udf.py,sha256=Du9LnOtT1lJvB90D4HSR2tB7MXy179jZngDR-EjVjQk,656
|
|
@@ -72,7 +72,7 @@ sqlframe/postgres/functions.py,sha256=yOuDlt4GHXHg4MWhrAjlpVkEf1-zefxLQ1JfWy4JqI
|
|
|
72
72
|
sqlframe/postgres/functions.pyi,sha256=PozoVOSao-KG9LKqN7XuTETO_nd9ujGhQUMxwtMP6_c,11001
|
|
73
73
|
sqlframe/postgres/group.py,sha256=KUXeSFKWTSH9yCRJAhW85OvjZaG6Zr4In9LR_ie3yGU,391
|
|
74
74
|
sqlframe/postgres/readwriter.py,sha256=o3cJWBlmQX5gpII8dLmQQUxg0ZRDO06XyFS6rBdHGQs,949
|
|
75
|
-
sqlframe/postgres/session.py,sha256=
|
|
75
|
+
sqlframe/postgres/session.py,sha256=iKmxA46tmcy-86s46eXjYWawGWlZm4QRqix40aqV2pw,2664
|
|
76
76
|
sqlframe/postgres/table.py,sha256=LZeomgZQnibZ3zO36McKfxAbT670AcNqI4ftptY3B5A,661
|
|
77
77
|
sqlframe/postgres/types.py,sha256=KwNyuXIo-2xVVd4bZED3YrQOobKCtemlxGrJL7DrTC8,34
|
|
78
78
|
sqlframe/postgres/udf.py,sha256=TylVxrmPzycAqnpTiueGvvetiMCfCX31QatgQMIgpME,264
|
|
@@ -97,7 +97,7 @@ sqlframe/snowflake/functions.py,sha256=g3ELesLGvN7tyRyiEAnPneVyL0rQhSSOxYFvBFuOi
|
|
|
97
97
|
sqlframe/snowflake/functions.pyi,sha256=sEqNdyqQBtgqoKtRViUb70QT5yrvpVoP3ZN7VxHNxbw,12612
|
|
98
98
|
sqlframe/snowflake/group.py,sha256=pPP1l2RRo_LgkXrji8a87n2PKo-63ZRPT-WUtvVcBME,395
|
|
99
99
|
sqlframe/snowflake/readwriter.py,sha256=TxvufwdA83xjMoMjgyHerz0Qri3sPFB1nEPHmQyGNtg,966
|
|
100
|
-
sqlframe/snowflake/session.py,sha256=
|
|
100
|
+
sqlframe/snowflake/session.py,sha256=nHQ8DfKJNUCUjAZqnF68_1JiRX7mucJHLtvZVm3_0Ig,3512
|
|
101
101
|
sqlframe/snowflake/table.py,sha256=A5SpA-0mnYBkVXIxaa10LY-QJ8OuNjOEXfJK8k-iDkQ,618
|
|
102
102
|
sqlframe/snowflake/types.py,sha256=KwNyuXIo-2xVVd4bZED3YrQOobKCtemlxGrJL7DrTC8,34
|
|
103
103
|
sqlframe/snowflake/udf.py,sha256=yzMmky-n5BXFbdldgfzLP8hCrVm0DgruSUCUUy1_3sk,268
|
|
@@ -110,7 +110,7 @@ sqlframe/spark/functions.py,sha256=MYCgHsjRQWylT-rezWRBuLV6BivcaVarbaQtP4T0toQ,3
|
|
|
110
110
|
sqlframe/spark/functions.pyi,sha256=GyOdUzv2Z7Qt99JAKEPKgV2t2Rn274OuqwAfcoAXlN0,24259
|
|
111
111
|
sqlframe/spark/group.py,sha256=MrvV_v-YkBc6T1zz882WrEqtWjlooWIyHBCmTQg3fCA,379
|
|
112
112
|
sqlframe/spark/readwriter.py,sha256=zXZcCPWpQMMN90wdIx8AD4Y5tWBcpRSL4-yKX2aZyik,874
|
|
113
|
-
sqlframe/spark/session.py,sha256=
|
|
113
|
+
sqlframe/spark/session.py,sha256=fYu8aVSDRAJ7ZnA7zgba7acXjP8ROJshfX5UYmEq5mI,5667
|
|
114
114
|
sqlframe/spark/table.py,sha256=puWV8h_CqA64zwpzq0ydY9LoygMAvprkODyxyzZeF9M,186
|
|
115
115
|
sqlframe/spark/types.py,sha256=KwNyuXIo-2xVVd4bZED3YrQOobKCtemlxGrJL7DrTC8,34
|
|
116
116
|
sqlframe/spark/udf.py,sha256=owB8NDaGVkUQ0WGm7SZt2t9zfvLFCfi0W48QiPfgjck,1153
|
|
@@ -122,15 +122,15 @@ sqlframe/standalone/dataframe.py,sha256=K5zx14KzUTe2_KBo7XsLhe9jLiTHwYkf6VQzcnS9
|
|
|
122
122
|
sqlframe/standalone/functions.py,sha256=cA5Lbk6UUH4vTmq1aNtPsBb3uskOxsrT6V1STHYdtNI,46
|
|
123
123
|
sqlframe/standalone/group.py,sha256=oGEbAQMSm6AlkwnBxNI8r9enZWRwsRxc8zpzoz3rArk,399
|
|
124
124
|
sqlframe/standalone/readwriter.py,sha256=KP2qNJZD0uhthRblLVTloE4Fv5rh_rwA95q7pjAXEww,716
|
|
125
|
-
sqlframe/standalone/session.py,sha256=
|
|
125
|
+
sqlframe/standalone/session.py,sha256=INTyERn5Rnqx9aVOcYr5OWzOp3hZkIp4kQosxdCMdjM,1517
|
|
126
126
|
sqlframe/standalone/table.py,sha256=FN-qYKisHyFjdzpklb1vsbzGFXaKvgbNyiEXxyu-95o,216
|
|
127
127
|
sqlframe/standalone/types.py,sha256=KwNyuXIo-2xVVd4bZED3YrQOobKCtemlxGrJL7DrTC8,34
|
|
128
128
|
sqlframe/standalone/udf.py,sha256=azmgtUjHNIPs0WMVNId05SHwiYn41MKVBhKXsQJ5dmY,272
|
|
129
129
|
sqlframe/standalone/window.py,sha256=6GKPzuxeSapJakBaKBeT9VpED1ACdjggDv9JRILDyV0,35
|
|
130
130
|
sqlframe/testing/__init__.py,sha256=VVCosQhitU74A3NnE52O4mNtGZONapuEXcc20QmSlnQ,132
|
|
131
131
|
sqlframe/testing/utils.py,sha256=PFsGZpwNUE_4-g_f43_vstTqsK0AQ2lBneb5Eb6NkFo,13008
|
|
132
|
-
sqlframe-3.
|
|
133
|
-
sqlframe-3.
|
|
134
|
-
sqlframe-3.
|
|
135
|
-
sqlframe-3.
|
|
136
|
-
sqlframe-3.
|
|
132
|
+
sqlframe-3.17.1.dist-info/LICENSE,sha256=VZu79YgW780qxaFJMr0t5ZgbOYEh04xWoxaWOaqIGWk,1068
|
|
133
|
+
sqlframe-3.17.1.dist-info/METADATA,sha256=7YgdpN6-xqFqBAQlqR5V8IaeByzgFlcW9aQl1fG8Ey0,8970
|
|
134
|
+
sqlframe-3.17.1.dist-info/WHEEL,sha256=oiQVh_5PnQM0E3gPdiz09WCNmwiHDMaGer_elqB3coM,92
|
|
135
|
+
sqlframe-3.17.1.dist-info/top_level.txt,sha256=T0_RpoygaZSF6heeWwIDQgaP0varUdSK1pzjeJZRjM8,9
|
|
136
|
+
sqlframe-3.17.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|