sqlglot 26.28.1__py3-none-any.whl → 26.30.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sqlglot/_version.py +2 -2
- sqlglot/dialects/__init__.py +1 -0
- sqlglot/dialects/athena.py +1 -0
- sqlglot/dialects/bigquery.py +1 -0
- sqlglot/dialects/clickhouse.py +2 -0
- sqlglot/dialects/dialect.py +16 -5
- sqlglot/dialects/duckdb.py +8 -0
- sqlglot/dialects/fabric.py +88 -0
- sqlglot/dialects/hive.py +1 -0
- sqlglot/dialects/oracle.py +15 -0
- sqlglot/dialects/presto.py +1 -0
- sqlglot/dialects/prql.py +5 -1
- sqlglot/dialects/redshift.py +1 -0
- sqlglot/dialects/snowflake.py +39 -1
- sqlglot/dialects/spark.py +17 -0
- sqlglot/dialects/sqlite.py +4 -3
- sqlglot/dialects/tsql.py +2 -2
- sqlglot/expressions.py +9 -1
- sqlglot/generator.py +20 -3
- sqlglot/optimizer/annotate_types.py +44 -1
- sqlglot/optimizer/qualify_columns.py +7 -0
- sqlglot/optimizer/scope.py +14 -1
- sqlglot/parser.py +191 -87
- {sqlglot-26.28.1.dist-info → sqlglot-26.30.0.dist-info}/METADATA +1 -1
- {sqlglot-26.28.1.dist-info → sqlglot-26.30.0.dist-info}/RECORD +28 -27
- {sqlglot-26.28.1.dist-info → sqlglot-26.30.0.dist-info}/WHEEL +0 -0
- {sqlglot-26.28.1.dist-info → sqlglot-26.30.0.dist-info}/licenses/LICENSE +0 -0
- {sqlglot-26.28.1.dist-info → sqlglot-26.30.0.dist-info}/top_level.txt +0 -0
sqlglot/_version.py
CHANGED
sqlglot/dialects/__init__.py
CHANGED
sqlglot/dialects/athena.py
CHANGED
sqlglot/dialects/bigquery.py
CHANGED
@@ -524,6 +524,7 @@ class BigQuery(Dialect):
|
|
524
524
|
PREFIXED_PIVOT_COLUMNS = True
|
525
525
|
LOG_DEFAULTS_TO_LN = True
|
526
526
|
SUPPORTS_IMPLICIT_UNNEST = True
|
527
|
+
JOINS_HAVE_EQUAL_PRECEDENCE = True
|
527
528
|
|
528
529
|
# BigQuery does not allow ASC/DESC to be used as an identifier
|
529
530
|
ID_VAR_TOKENS = parser.Parser.ID_VAR_TOKENS - {TokenType.ASC, TokenType.DESC}
|
sqlglot/dialects/clickhouse.py
CHANGED
@@ -297,6 +297,7 @@ class ClickHouse(Dialect):
|
|
297
297
|
MODIFIERS_ATTACHED_TO_SET_OP = False
|
298
298
|
INTERVAL_SPANS = False
|
299
299
|
OPTIONAL_ALIAS_TOKEN_CTE = False
|
300
|
+
JOINS_HAVE_EQUAL_PRECEDENCE = True
|
300
301
|
|
301
302
|
FUNCTIONS = {
|
302
303
|
**parser.Parser.FUNCTIONS,
|
@@ -691,6 +692,7 @@ class ClickHouse(Dialect):
|
|
691
692
|
parse_bracket: bool = False,
|
692
693
|
is_db_reference: bool = False,
|
693
694
|
parse_partition: bool = False,
|
695
|
+
consume_pipe: bool = False,
|
694
696
|
) -> t.Optional[exp.Expression]:
|
695
697
|
this = super()._parse_table(
|
696
698
|
schema=schema,
|
sqlglot/dialects/dialect.py
CHANGED
@@ -77,6 +77,7 @@ class Dialects(str, Enum):
|
|
77
77
|
DRUID = "druid"
|
78
78
|
DUCKDB = "duckdb"
|
79
79
|
DUNE = "dune"
|
80
|
+
FABRIC = "fabric"
|
80
81
|
HIVE = "hive"
|
81
82
|
MATERIALIZE = "materialize"
|
82
83
|
MYSQL = "mysql"
|
@@ -1621,7 +1622,10 @@ def map_date_part(part, dialect: DialectType = Dialect):
|
|
1621
1622
|
mapped = (
|
1622
1623
|
Dialect.get_or_raise(dialect).DATE_PART_MAPPING.get(part.name.upper()) if part else None
|
1623
1624
|
)
|
1624
|
-
|
1625
|
+
if mapped:
|
1626
|
+
return exp.Literal.string(mapped) if part.is_string else exp.var(mapped)
|
1627
|
+
|
1628
|
+
return part
|
1625
1629
|
|
1626
1630
|
|
1627
1631
|
def no_last_day_sql(self: Generator, expression: exp.LastDay) -> str:
|
@@ -1903,12 +1907,19 @@ def groupconcat_sql(
|
|
1903
1907
|
|
1904
1908
|
def build_timetostr_or_tochar(args: t.List, dialect: Dialect) -> exp.TimeToStr | exp.ToChar:
|
1905
1909
|
this = seq_get(args, 0)
|
1910
|
+
format = seq_get(args, 1)
|
1906
1911
|
|
1907
|
-
if this
|
1908
|
-
|
1912
|
+
if this:
|
1913
|
+
if not this.type:
|
1914
|
+
from sqlglot.optimizer.annotate_types import annotate_types
|
1915
|
+
|
1916
|
+
annotate_types(this, dialect=dialect)
|
1909
1917
|
|
1910
|
-
|
1911
|
-
|
1918
|
+
from sqlglot.dialects import Snowflake
|
1919
|
+
|
1920
|
+
if this.is_type(*exp.DataType.TEMPORAL_TYPES) or (
|
1921
|
+
isinstance(format, exp.Literal) and format.name in Snowflake.TIME_MAPPING
|
1922
|
+
):
|
1912
1923
|
dialect_name = dialect.__class__.__name__.lower()
|
1913
1924
|
return build_formatted_time(exp.TimeToStr, dialect_name, default=True)(args)
|
1914
1925
|
|
sqlglot/dialects/duckdb.py
CHANGED
@@ -290,6 +290,12 @@ class DuckDB(Dialect):
|
|
290
290
|
# https://duckdb.org/docs/sql/introduction.html#creating-a-new-table
|
291
291
|
NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_INSENSITIVE
|
292
292
|
|
293
|
+
DATE_PART_MAPPING = {
|
294
|
+
**Dialect.DATE_PART_MAPPING,
|
295
|
+
"DAYOFWEEKISO": "ISODOW",
|
296
|
+
}
|
297
|
+
DATE_PART_MAPPING.pop("WEEKDAY")
|
298
|
+
|
293
299
|
def to_json_path(self, path: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
|
294
300
|
if isinstance(path, exp.Literal):
|
295
301
|
# DuckDB also supports the JSON pointer syntax, where every path starts with a `/`.
|
@@ -502,6 +508,7 @@ class DuckDB(Dialect):
|
|
502
508
|
parse_bracket: bool = False,
|
503
509
|
is_db_reference: bool = False,
|
504
510
|
parse_partition: bool = False,
|
511
|
+
consume_pipe: bool = False,
|
505
512
|
) -> t.Optional[exp.Expression]:
|
506
513
|
# DuckDB supports prefix aliases, e.g. FROM foo: bar
|
507
514
|
if self._next and self._next.token_type == TokenType.COLON:
|
@@ -620,6 +627,7 @@ class DuckDB(Dialect):
|
|
620
627
|
PAD_FILL_PATTERN_IS_REQUIRED = True
|
621
628
|
ARRAY_CONCAT_IS_VAR_LEN = False
|
622
629
|
ARRAY_SIZE_DIM_REQUIRED = False
|
630
|
+
NORMALIZE_EXTRACT_DATE_PARTS = True
|
623
631
|
|
624
632
|
TRANSFORMS = {
|
625
633
|
**generator.Generator.TRANSFORMS,
|
@@ -0,0 +1,88 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
from sqlglot import exp
|
4
|
+
from sqlglot.dialects.dialect import NormalizationStrategy
|
5
|
+
from sqlglot.dialects.tsql import TSQL
|
6
|
+
|
7
|
+
|
8
|
+
class Fabric(TSQL):
|
9
|
+
"""
|
10
|
+
Microsoft Fabric Data Warehouse dialect that inherits from T-SQL.
|
11
|
+
|
12
|
+
Microsoft Fabric is a cloud-based analytics platform that provides a unified
|
13
|
+
data warehouse experience. While it shares much of T-SQL's syntax, it has
|
14
|
+
specific differences and limitations that this dialect addresses.
|
15
|
+
|
16
|
+
Key differences from T-SQL:
|
17
|
+
- Case-sensitive identifiers (unlike T-SQL which is case-insensitive)
|
18
|
+
- Limited data type support with mappings to supported alternatives
|
19
|
+
- Temporal types (DATETIME2, DATETIMEOFFSET, TIME) limited to 6 digits precision
|
20
|
+
- Certain legacy types (MONEY, SMALLMONEY, etc.) are not supported
|
21
|
+
- Unicode types (NCHAR, NVARCHAR) are mapped to non-unicode equivalents
|
22
|
+
|
23
|
+
References:
|
24
|
+
- Data Types: https://learn.microsoft.com/en-us/fabric/data-warehouse/data-types
|
25
|
+
- T-SQL Surface Area: https://learn.microsoft.com/en-us/fabric/data-warehouse/tsql-surface-area
|
26
|
+
"""
|
27
|
+
|
28
|
+
# Fabric is case-sensitive unlike T-SQL which is case-insensitive
|
29
|
+
NORMALIZATION_STRATEGY = NormalizationStrategy.CASE_SENSITIVE
|
30
|
+
|
31
|
+
class Generator(TSQL.Generator):
|
32
|
+
# Fabric-specific type mappings - override T-SQL types that aren't supported
|
33
|
+
# Reference: https://learn.microsoft.com/en-us/fabric/data-warehouse/data-types
|
34
|
+
TYPE_MAPPING = {
|
35
|
+
**TSQL.Generator.TYPE_MAPPING,
|
36
|
+
# Fabric doesn't support these types, map to alternatives
|
37
|
+
exp.DataType.Type.MONEY: "DECIMAL",
|
38
|
+
exp.DataType.Type.SMALLMONEY: "DECIMAL",
|
39
|
+
exp.DataType.Type.DATETIME: "DATETIME2(6)",
|
40
|
+
exp.DataType.Type.SMALLDATETIME: "DATETIME2(6)",
|
41
|
+
exp.DataType.Type.NCHAR: "CHAR",
|
42
|
+
exp.DataType.Type.NVARCHAR: "VARCHAR",
|
43
|
+
exp.DataType.Type.TEXT: "VARCHAR(MAX)",
|
44
|
+
exp.DataType.Type.IMAGE: "VARBINARY",
|
45
|
+
exp.DataType.Type.TINYINT: "SMALLINT",
|
46
|
+
exp.DataType.Type.UTINYINT: "SMALLINT", # T-SQL parses TINYINT as UTINYINT
|
47
|
+
exp.DataType.Type.JSON: "VARCHAR",
|
48
|
+
exp.DataType.Type.XML: "VARCHAR",
|
49
|
+
exp.DataType.Type.UUID: "VARBINARY(MAX)", # UNIQUEIDENTIFIER has limitations in Fabric
|
50
|
+
# Override T-SQL mappings that use different names in Fabric
|
51
|
+
exp.DataType.Type.DECIMAL: "DECIMAL", # T-SQL uses NUMERIC
|
52
|
+
exp.DataType.Type.DOUBLE: "FLOAT",
|
53
|
+
exp.DataType.Type.INT: "INT", # T-SQL uses INTEGER
|
54
|
+
}
|
55
|
+
|
56
|
+
def datatype_sql(self, expression: exp.DataType) -> str:
|
57
|
+
"""
|
58
|
+
Override datatype generation to handle Fabric-specific precision limitations.
|
59
|
+
|
60
|
+
Fabric limits temporal types (TIME, DATETIME2, DATETIMEOFFSET) to max 6 digits precision.
|
61
|
+
When no precision is specified, we default to 6 digits.
|
62
|
+
"""
|
63
|
+
if expression.is_type(
|
64
|
+
exp.DataType.Type.TIME,
|
65
|
+
exp.DataType.Type.DATETIME2,
|
66
|
+
exp.DataType.Type.TIMESTAMPTZ, # DATETIMEOFFSET in Fabric
|
67
|
+
):
|
68
|
+
# Get the current precision (first expression if it exists)
|
69
|
+
precision = expression.find(exp.DataTypeParam)
|
70
|
+
|
71
|
+
# Determine the target precision
|
72
|
+
if precision is None:
|
73
|
+
# No precision specified, default to 6
|
74
|
+
target_precision = 6
|
75
|
+
elif precision.this.is_int:
|
76
|
+
# Cap precision at 6
|
77
|
+
current_precision = precision.this.to_py()
|
78
|
+
target_precision = min(current_precision, 6)
|
79
|
+
|
80
|
+
# Create a new expression with the target precision
|
81
|
+
new_expression = exp.DataType(
|
82
|
+
this=expression.this,
|
83
|
+
expressions=[exp.DataTypeParam(this=exp.Literal.number(target_precision))],
|
84
|
+
)
|
85
|
+
|
86
|
+
return super().datatype_sql(new_expression)
|
87
|
+
|
88
|
+
return super().datatype_sql(expression)
|
sqlglot/dialects/hive.py
CHANGED
sqlglot/dialects/oracle.py
CHANGED
@@ -128,6 +128,7 @@ class Oracle(Dialect):
|
|
128
128
|
"NEXT": lambda self: self._parse_next_value_for(),
|
129
129
|
"PRIOR": lambda self: self.expression(exp.Prior, this=self._parse_bitwise()),
|
130
130
|
"SYSDATE": lambda self: self.expression(exp.CurrentTimestamp, sysdate=True),
|
131
|
+
"DBMS_RANDOM": lambda self: self._parse_dbms_random(),
|
131
132
|
}
|
132
133
|
|
133
134
|
FUNCTION_PARSERS: t.Dict[str, t.Callable] = {
|
@@ -177,6 +178,19 @@ class Oracle(Dialect):
|
|
177
178
|
),
|
178
179
|
}
|
179
180
|
|
181
|
+
def _parse_dbms_random(self) -> t.Optional[exp.Expression]:
|
182
|
+
if self._match_text_seq(".", "VALUE"):
|
183
|
+
lower, upper = None, None
|
184
|
+
if self._match(TokenType.L_PAREN, advance=False):
|
185
|
+
lower_upper = self._parse_wrapped_csv(self._parse_bitwise)
|
186
|
+
if len(lower_upper) == 2:
|
187
|
+
lower, upper = lower_upper
|
188
|
+
|
189
|
+
return exp.Rand(lower=lower, upper=upper)
|
190
|
+
|
191
|
+
self._retreat(self._index - 1)
|
192
|
+
return None
|
193
|
+
|
180
194
|
def _parse_json_array(self, expr_type: t.Type[E], **kwargs) -> E:
|
181
195
|
return self.expression(
|
182
196
|
expr_type,
|
@@ -299,6 +313,7 @@ class Oracle(Dialect):
|
|
299
313
|
exp.LogicalOr: rename_func("MAX"),
|
300
314
|
exp.LogicalAnd: rename_func("MIN"),
|
301
315
|
exp.Mod: rename_func("MOD"),
|
316
|
+
exp.Rand: rename_func("DBMS_RANDOM.VALUE"),
|
302
317
|
exp.Select: transforms.preprocess(
|
303
318
|
[
|
304
319
|
transforms.eliminate_distinct_on,
|
sqlglot/dialects/presto.py
CHANGED
sqlglot/dialects/prql.py
CHANGED
@@ -189,11 +189,15 @@ class PRQL(Dialect):
|
|
189
189
|
parse_bracket: bool = False,
|
190
190
|
is_db_reference: bool = False,
|
191
191
|
parse_partition: bool = False,
|
192
|
+
consume_pipe: bool = False,
|
192
193
|
) -> t.Optional[exp.Expression]:
|
193
194
|
return self._parse_table_parts()
|
194
195
|
|
195
196
|
def _parse_from(
|
196
|
-
self,
|
197
|
+
self,
|
198
|
+
joins: bool = False,
|
199
|
+
skip_from_token: bool = False,
|
200
|
+
consume_pipe: bool = False,
|
197
201
|
) -> t.Optional[exp.From]:
|
198
202
|
if not skip_from_token and not self._match(TokenType.FROM):
|
199
203
|
return None
|
sqlglot/dialects/redshift.py
CHANGED
@@ -90,6 +90,7 @@ class Redshift(Postgres):
|
|
90
90
|
parse_bracket: bool = False,
|
91
91
|
is_db_reference: bool = False,
|
92
92
|
parse_partition: bool = False,
|
93
|
+
consume_pipe: bool = False,
|
93
94
|
) -> t.Optional[exp.Expression]:
|
94
95
|
# Redshift supports UNPIVOTing SUPER objects, e.g. `UNPIVOT foo.obj[0] AS val AT attr`
|
95
96
|
unpivot = self._match(TokenType.UNPIVOT)
|
sqlglot/dialects/snowflake.py
CHANGED
@@ -31,6 +31,7 @@ from sqlglot.dialects.dialect import (
|
|
31
31
|
)
|
32
32
|
from sqlglot.generator import unsupported_args
|
33
33
|
from sqlglot.helper import flatten, is_float, is_int, seq_get
|
34
|
+
from sqlglot.optimizer.scope import find_all_in_scope
|
34
35
|
from sqlglot.tokens import TokenType
|
35
36
|
|
36
37
|
if t.TYPE_CHECKING:
|
@@ -333,6 +334,34 @@ def _json_extract_value_array_sql(
|
|
333
334
|
return self.func("TRANSFORM", json_extract, transform_lambda)
|
334
335
|
|
335
336
|
|
337
|
+
def _eliminate_dot_variant_lookup(expression: exp.Expression) -> exp.Expression:
|
338
|
+
if isinstance(expression, exp.Select):
|
339
|
+
# This transformation is used to facilitate transpilation of BigQuery `UNNEST` operations
|
340
|
+
# to Snowflake. It should not affect roundtrip because `Unnest` nodes cannot be produced
|
341
|
+
# by Snowflake's parser.
|
342
|
+
#
|
343
|
+
# Additionally, at the time of writing this, BigQuery is the only dialect that produces a
|
344
|
+
# `TableAlias` node that only fills `columns` and not `this`, due to `UNNEST_COLUMN_ONLY`.
|
345
|
+
unnest_aliases = set()
|
346
|
+
for unnest in find_all_in_scope(expression, exp.Unnest):
|
347
|
+
unnest_alias = unnest.args.get("alias")
|
348
|
+
if (
|
349
|
+
isinstance(unnest_alias, exp.TableAlias)
|
350
|
+
and not unnest_alias.this
|
351
|
+
and len(unnest_alias.columns) == 1
|
352
|
+
):
|
353
|
+
unnest_aliases.add(unnest_alias.columns[0].name)
|
354
|
+
|
355
|
+
if unnest_aliases:
|
356
|
+
for c in find_all_in_scope(expression, exp.Column):
|
357
|
+
if c.table in unnest_aliases:
|
358
|
+
bracket_lhs = c.args["table"]
|
359
|
+
bracket_rhs = exp.Literal.string(c.name)
|
360
|
+
c.replace(exp.Bracket(this=bracket_lhs, expressions=[bracket_rhs]))
|
361
|
+
|
362
|
+
return expression
|
363
|
+
|
364
|
+
|
336
365
|
class Snowflake(Dialect):
|
337
366
|
# https://docs.snowflake.com/en/sql-reference/identifiers-syntax
|
338
367
|
NORMALIZATION_STRATEGY = NormalizationStrategy.UPPERCASE
|
@@ -770,6 +799,7 @@ class Snowflake(Dialect):
|
|
770
799
|
parse_bracket: bool = False,
|
771
800
|
is_db_reference: bool = False,
|
772
801
|
parse_partition: bool = False,
|
802
|
+
consume_pipe: bool = False,
|
773
803
|
) -> t.Optional[exp.Expression]:
|
774
804
|
table = super()._parse_table(
|
775
805
|
schema=schema,
|
@@ -1096,6 +1126,7 @@ class Snowflake(Dialect):
|
|
1096
1126
|
transforms.explode_projection_to_unnest(),
|
1097
1127
|
transforms.eliminate_semi_and_anti_joins,
|
1098
1128
|
_transform_generate_date_array,
|
1129
|
+
_eliminate_dot_variant_lookup,
|
1099
1130
|
]
|
1100
1131
|
),
|
1101
1132
|
exp.SHA: rename_func("SHA1"),
|
@@ -1314,7 +1345,14 @@ class Snowflake(Dialect):
|
|
1314
1345
|
start = f" START {start}" if start else ""
|
1315
1346
|
increment = expression.args.get("increment")
|
1316
1347
|
increment = f" INCREMENT {increment}" if increment else ""
|
1317
|
-
|
1348
|
+
|
1349
|
+
order = expression.args.get("order")
|
1350
|
+
if order is not None:
|
1351
|
+
order_clause = " ORDER" if order else " NOORDER"
|
1352
|
+
else:
|
1353
|
+
order_clause = ""
|
1354
|
+
|
1355
|
+
return f"AUTOINCREMENT{start}{increment}{order_clause}"
|
1318
1356
|
|
1319
1357
|
def cluster_sql(self, expression: exp.Cluster) -> str:
|
1320
1358
|
return f"CLUSTER BY ({self.expressions(expression, flat=True)})"
|
sqlglot/dialects/spark.py
CHANGED
@@ -7,6 +7,7 @@ from sqlglot.dialects.dialect import rename_func, unit_to_var, timestampdiff_sql
|
|
7
7
|
from sqlglot.dialects.hive import _build_with_ignore_nulls
|
8
8
|
from sqlglot.dialects.spark2 import Spark2, temporary_storage_provider, _build_as_cast
|
9
9
|
from sqlglot.helper import ensure_list, seq_get
|
10
|
+
from sqlglot.tokens import TokenType
|
10
11
|
from sqlglot.transforms import (
|
11
12
|
ctas_with_tmp_tables_to_create_tmp_view,
|
12
13
|
remove_unique_constraints,
|
@@ -121,6 +122,16 @@ class Spark(Spark2):
|
|
121
122
|
),
|
122
123
|
}
|
123
124
|
|
125
|
+
PLACEHOLDER_PARSERS = {
|
126
|
+
**Spark2.Parser.PLACEHOLDER_PARSERS,
|
127
|
+
TokenType.L_BRACE: lambda self: self._parse_query_parameter(),
|
128
|
+
}
|
129
|
+
|
130
|
+
def _parse_query_parameter(self) -> t.Optional[exp.Expression]:
|
131
|
+
this = self._parse_id_var()
|
132
|
+
self._match(TokenType.R_BRACE)
|
133
|
+
return self.expression(exp.Placeholder, this=this, widget=True)
|
134
|
+
|
124
135
|
def _parse_generated_as_identity(
|
125
136
|
self,
|
126
137
|
) -> (
|
@@ -200,3 +211,9 @@ class Spark(Spark2):
|
|
200
211
|
return self.func("DATEDIFF", unit_to_var(expression), start, end)
|
201
212
|
|
202
213
|
return self.func("DATEDIFF", end, start)
|
214
|
+
|
215
|
+
def placeholder_sql(self, expression: exp.Placeholder) -> str:
|
216
|
+
if not expression.args.get("widget"):
|
217
|
+
return super().placeholder_sql(expression)
|
218
|
+
|
219
|
+
return f"{{{expression.name}}}"
|
sqlglot/dialects/sqlite.py
CHANGED
@@ -102,6 +102,10 @@ class SQLite(Dialect):
|
|
102
102
|
COMMANDS = {*tokens.Tokenizer.COMMANDS, TokenType.REPLACE}
|
103
103
|
|
104
104
|
class Parser(parser.Parser):
|
105
|
+
STRING_ALIASES = True
|
106
|
+
ALTER_RENAME_REQUIRES_COLUMN = False
|
107
|
+
JOINS_HAVE_EQUAL_PRECEDENCE = True
|
108
|
+
|
105
109
|
FUNCTIONS = {
|
106
110
|
**parser.Parser.FUNCTIONS,
|
107
111
|
"EDITDIST3": exp.Levenshtein.from_arg_list,
|
@@ -110,9 +114,6 @@ class SQLite(Dialect):
|
|
110
114
|
"TIME": lambda args: exp.Anonymous(this="TIME", expressions=args),
|
111
115
|
}
|
112
116
|
|
113
|
-
STRING_ALIASES = True
|
114
|
-
ALTER_RENAME_REQUIRES_COLUMN = False
|
115
|
-
|
116
117
|
def _parse_unique(self) -> exp.UniqueColumnConstraint:
|
117
118
|
# Do not consume more tokens if UNIQUE is used as a standalone constraint, e.g:
|
118
119
|
# CREATE TABLE foo (bar TEXT UNIQUE REFERENCES baz ...)
|
sqlglot/dialects/tsql.py
CHANGED
@@ -1224,8 +1224,6 @@ class TSQL(Dialect):
|
|
1224
1224
|
# to amend the AST by moving the CTEs to the CREATE VIEW statement's query.
|
1225
1225
|
ctas_expression.set("with", with_.pop())
|
1226
1226
|
|
1227
|
-
sql = super().create_sql(expression)
|
1228
|
-
|
1229
1227
|
table = expression.find(exp.Table)
|
1230
1228
|
|
1231
1229
|
# Convert CTAS statement to SELECT .. INTO ..
|
@@ -1243,6 +1241,8 @@ class TSQL(Dialect):
|
|
1243
1241
|
select_into.limit(0, copy=False)
|
1244
1242
|
|
1245
1243
|
sql = self.sql(select_into)
|
1244
|
+
else:
|
1245
|
+
sql = super().create_sql(expression)
|
1246
1246
|
|
1247
1247
|
if exists:
|
1248
1248
|
identifier = self.sql(exp.Literal.string(exp.table_name(table) if table else ""))
|
sqlglot/expressions.py
CHANGED
@@ -1947,6 +1947,7 @@ class GeneratedAsIdentityColumnConstraint(ColumnConstraintKind):
|
|
1947
1947
|
"minvalue": False,
|
1948
1948
|
"maxvalue": False,
|
1949
1949
|
"cycle": False,
|
1950
|
+
"order": False,
|
1950
1951
|
}
|
1951
1952
|
|
1952
1953
|
|
@@ -4452,8 +4453,9 @@ class SessionParameter(Condition):
|
|
4452
4453
|
arg_types = {"this": True, "kind": False}
|
4453
4454
|
|
4454
4455
|
|
4456
|
+
# https://www.databricks.com/blog/parameterized-queries-pyspark
|
4455
4457
|
class Placeholder(Condition):
|
4456
|
-
arg_types = {"this": False, "kind": False}
|
4458
|
+
arg_types = {"this": False, "kind": False, "widget": False}
|
4457
4459
|
|
4458
4460
|
@property
|
4459
4461
|
def name(self) -> str:
|
@@ -7044,6 +7046,12 @@ class Semicolon(Expression):
|
|
7044
7046
|
arg_types = {}
|
7045
7047
|
|
7046
7048
|
|
7049
|
+
# BigQuery allows SELECT t FROM t and treats the projection as a struct value. This expression
|
7050
|
+
# type is intended to be constructed by qualify so that we can properly annotate its type later
|
7051
|
+
class TableColumn(Expression):
|
7052
|
+
pass
|
7053
|
+
|
7054
|
+
|
7047
7055
|
def _norm_arg(arg):
|
7048
7056
|
return arg.lower() if type(arg) is str else arg
|
7049
7057
|
|
sqlglot/generator.py
CHANGED
@@ -201,6 +201,7 @@ class Generator(metaclass=_Generator):
|
|
201
201
|
exp.StreamingTableProperty: lambda *_: "STREAMING",
|
202
202
|
exp.StrictProperty: lambda *_: "STRICT",
|
203
203
|
exp.SwapTable: lambda self, e: f"SWAP WITH {self.sql(e, 'this')}",
|
204
|
+
exp.TableColumn: lambda self, e: self.sql(e.this),
|
204
205
|
exp.Tags: lambda self, e: f"TAG ({self.expressions(e, flat=True)})",
|
205
206
|
exp.TemporaryProperty: lambda *_: "TEMPORARY",
|
206
207
|
exp.TitleColumnConstraint: lambda self, e: f"TITLE {self.sql(e, 'this')}",
|
@@ -463,6 +464,11 @@ class Generator(metaclass=_Generator):
|
|
463
464
|
# Whether to wrap <props> in `AlterSet`, e.g., ALTER ... SET (<props>)
|
464
465
|
ALTER_SET_WRAPPED = False
|
465
466
|
|
467
|
+
# Whether to normalize the date parts in EXTRACT(<date_part> FROM <expr>) into a common representation
|
468
|
+
# For instance, to extract the day of week in ISO semantics, one can use ISODOW, DAYOFWEEKISO etc depending on the dialect.
|
469
|
+
# TODO: The normalization should be done by default once we've tested it across all dialects.
|
470
|
+
NORMALIZE_EXTRACT_DATE_PARTS = False
|
471
|
+
|
466
472
|
# The name to generate for the JSONPath expression. If `None`, only `this` will be generated
|
467
473
|
PARSE_JSON_NAME: t.Optional[str] = "PARSE_JSON"
|
468
474
|
|
@@ -2909,9 +2915,17 @@ class Generator(metaclass=_Generator):
|
|
2909
2915
|
return f"NEXT VALUE FOR {self.sql(expression, 'this')}{order}"
|
2910
2916
|
|
2911
2917
|
def extract_sql(self, expression: exp.Extract) -> str:
|
2912
|
-
|
2918
|
+
from sqlglot.dialects.dialect import map_date_part
|
2919
|
+
|
2920
|
+
this = (
|
2921
|
+
map_date_part(expression.this, self.dialect)
|
2922
|
+
if self.NORMALIZE_EXTRACT_DATE_PARTS
|
2923
|
+
else expression.this
|
2924
|
+
)
|
2925
|
+
this_sql = self.sql(this) if self.EXTRACT_ALLOWS_QUOTES else this.name
|
2913
2926
|
expression_sql = self.sql(expression, "expression")
|
2914
|
-
|
2927
|
+
|
2928
|
+
return f"EXTRACT({this_sql} FROM {expression_sql})"
|
2915
2929
|
|
2916
2930
|
def trim_sql(self, expression: exp.Trim) -> str:
|
2917
2931
|
trim_type = self.sql(expression, "position")
|
@@ -4766,7 +4780,10 @@ class Generator(metaclass=_Generator):
|
|
4766
4780
|
|
4767
4781
|
def detach_sql(self, expression: exp.Detach) -> str:
|
4768
4782
|
this = self.sql(expression, "this")
|
4769
|
-
|
4783
|
+
# the DATABASE keyword is required if IF EXISTS is set
|
4784
|
+
# without it, DuckDB throws an error: Parser Error: syntax error at or near "exists" (Line Number: 1)
|
4785
|
+
# ref: https://duckdb.org/docs/stable/sql/statements/attach.html#detach-syntax
|
4786
|
+
exists_sql = " DATABASE IF EXISTS" if expression.args.get("exists") else ""
|
4770
4787
|
|
4771
4788
|
return f"DETACH{exists_sql} {this}"
|
4772
4789
|
|
@@ -12,7 +12,7 @@ from sqlglot.helper import (
|
|
12
12
|
seq_get,
|
13
13
|
)
|
14
14
|
from sqlglot.optimizer.scope import Scope, traverse_scope
|
15
|
-
from sqlglot.schema import Schema, ensure_schema
|
15
|
+
from sqlglot.schema import MappingSchema, Schema, ensure_schema
|
16
16
|
from sqlglot.dialects.dialect import Dialect
|
17
17
|
|
18
18
|
if t.TYPE_CHECKING:
|
@@ -290,9 +290,52 @@ class TypeAnnotator(metaclass=_TypeAnnotator):
|
|
290
290
|
elif isinstance(source.expression, exp.Unnest):
|
291
291
|
self._set_type(col, source.expression.type)
|
292
292
|
|
293
|
+
if isinstance(self.schema, MappingSchema):
|
294
|
+
for table_column in scope.table_columns:
|
295
|
+
source = scope.sources.get(table_column.name)
|
296
|
+
|
297
|
+
if isinstance(source, exp.Table):
|
298
|
+
schema = self.schema.find(
|
299
|
+
source, raise_on_missing=False, ensure_data_types=True
|
300
|
+
)
|
301
|
+
if not isinstance(schema, dict):
|
302
|
+
continue
|
303
|
+
|
304
|
+
struct_type = exp.DataType(
|
305
|
+
this=exp.DataType.Type.STRUCT,
|
306
|
+
expressions=[
|
307
|
+
exp.ColumnDef(this=exp.to_identifier(c), kind=kind)
|
308
|
+
for c, kind in schema.items()
|
309
|
+
],
|
310
|
+
nested=True,
|
311
|
+
)
|
312
|
+
self._set_type(table_column, struct_type)
|
313
|
+
elif (
|
314
|
+
isinstance(source, Scope)
|
315
|
+
and isinstance(source.expression, exp.Query)
|
316
|
+
and source.expression.is_type(exp.DataType.Type.STRUCT)
|
317
|
+
):
|
318
|
+
self._set_type(table_column, source.expression.type)
|
319
|
+
|
293
320
|
# Then (possibly) annotate the remaining expressions in the scope
|
294
321
|
self._maybe_annotate(scope.expression)
|
295
322
|
|
323
|
+
if self.schema.dialect == "bigquery" and isinstance(scope.expression, exp.Query):
|
324
|
+
struct_type = exp.DataType(
|
325
|
+
this=exp.DataType.Type.STRUCT,
|
326
|
+
expressions=[
|
327
|
+
exp.ColumnDef(this=exp.to_identifier(select.output_name), kind=select.type)
|
328
|
+
for select in scope.expression.selects
|
329
|
+
],
|
330
|
+
nested=True,
|
331
|
+
)
|
332
|
+
if not any(
|
333
|
+
cd.kind.is_type(exp.DataType.Type.UNKNOWN)
|
334
|
+
for cd in struct_type.expressions
|
335
|
+
if cd.kind
|
336
|
+
):
|
337
|
+
self._set_type(scope.expression, struct_type)
|
338
|
+
|
296
339
|
def _maybe_annotate(self, expression: E) -> E:
|
297
340
|
if id(expression) in self._visited:
|
298
341
|
return expression # We've already inferred the expression's type
|
@@ -529,6 +529,13 @@ def _qualify_columns(scope: Scope, resolver: Resolver, allow_partial_qualificati
|
|
529
529
|
column_table = resolver.get_table(column_name)
|
530
530
|
if column_table:
|
531
531
|
column.set("table", column_table)
|
532
|
+
elif (
|
533
|
+
resolver.schema.dialect == "bigquery"
|
534
|
+
and len(column.parts) == 1
|
535
|
+
and column_name in scope.selected_sources
|
536
|
+
):
|
537
|
+
# BigQuery allows tables to be referenced as columns, treating them as structs
|
538
|
+
scope.replace(column, exp.TableColumn(this=column.this))
|
532
539
|
|
533
540
|
for pivot in scope.pivots:
|
534
541
|
for column in pivot.find_all(exp.Column):
|
sqlglot/optimizer/scope.py
CHANGED
@@ -88,6 +88,7 @@ class Scope:
|
|
88
88
|
def clear_cache(self):
|
89
89
|
self._collected = False
|
90
90
|
self._raw_columns = None
|
91
|
+
self._table_columns = None
|
91
92
|
self._stars = None
|
92
93
|
self._derived_tables = None
|
93
94
|
self._udtfs = None
|
@@ -125,6 +126,7 @@ class Scope:
|
|
125
126
|
self._derived_tables = []
|
126
127
|
self._udtfs = []
|
127
128
|
self._raw_columns = []
|
129
|
+
self._table_columns = []
|
128
130
|
self._stars = []
|
129
131
|
self._join_hints = []
|
130
132
|
self._semi_anti_join_tables = set()
|
@@ -156,6 +158,8 @@ class Scope:
|
|
156
158
|
self._derived_tables.append(node)
|
157
159
|
elif isinstance(node, exp.UNWRAPPED_QUERIES):
|
158
160
|
self._subqueries.append(node)
|
161
|
+
elif isinstance(node, exp.TableColumn):
|
162
|
+
self._table_columns.append(node)
|
159
163
|
|
160
164
|
self._collected = True
|
161
165
|
|
@@ -309,6 +313,13 @@ class Scope:
|
|
309
313
|
|
310
314
|
return self._columns
|
311
315
|
|
316
|
+
@property
|
317
|
+
def table_columns(self):
|
318
|
+
if self._table_columns is None:
|
319
|
+
self._ensure_collected()
|
320
|
+
|
321
|
+
return self._table_columns
|
322
|
+
|
312
323
|
@property
|
313
324
|
def selected_sources(self):
|
314
325
|
"""
|
@@ -849,12 +860,14 @@ def walk_in_scope(expression, bfs=True, prune=None):
|
|
849
860
|
|
850
861
|
if node is expression:
|
851
862
|
continue
|
863
|
+
|
852
864
|
if (
|
853
865
|
isinstance(node, exp.CTE)
|
854
866
|
or (
|
855
867
|
isinstance(node.parent, (exp.From, exp.Join, exp.Subquery))
|
856
|
-
and
|
868
|
+
and _is_derived_table(node)
|
857
869
|
)
|
870
|
+
or (isinstance(node.parent, exp.UDTF) and isinstance(node, exp.Query))
|
858
871
|
or isinstance(node, exp.UNWRAPPED_QUERIES)
|
859
872
|
):
|
860
873
|
crossed_scope_boundary = True
|
sqlglot/parser.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
3
|
import logging
|
4
|
+
import re
|
4
5
|
import typing as t
|
5
6
|
import itertools
|
6
7
|
from collections import defaultdict
|
@@ -23,6 +24,9 @@ logger = logging.getLogger("sqlglot")
|
|
23
24
|
|
24
25
|
OPTIONS_TYPE = t.Dict[str, t.Sequence[t.Union[t.Sequence[str], str]]]
|
25
26
|
|
27
|
+
# Used to detect alphabetical characters and +/- in timestamp literals
|
28
|
+
TIME_ZONE_RE: t.Pattern[str] = re.compile(r":.*?[a-zA-Z\+\-]")
|
29
|
+
|
26
30
|
|
27
31
|
def build_var_map(args: t.List) -> exp.StarMap | exp.VarMap:
|
28
32
|
if len(args) == 1 and args[0].is_star:
|
@@ -931,15 +935,20 @@ class Parser(metaclass=_Parser):
|
|
931
935
|
}
|
932
936
|
|
933
937
|
PIPE_SYNTAX_TRANSFORM_PARSERS = {
|
934
|
-
"
|
935
|
-
"
|
938
|
+
"AGGREGATE": lambda self, query: self._parse_pipe_syntax_aggregate(query),
|
939
|
+
"AS": lambda self, query: self._build_pipe_cte(
|
940
|
+
query, [exp.Star()], self._parse_table_alias()
|
941
|
+
),
|
942
|
+
"EXTEND": lambda self, query: self._parse_pipe_syntax_extend(query),
|
943
|
+
"LIMIT": lambda self, query: self._parse_pipe_syntax_limit(query),
|
936
944
|
"ORDER BY": lambda self, query: query.order_by(
|
937
945
|
self._parse_order(), append=False, copy=False
|
938
946
|
),
|
939
|
-
"LIMIT": lambda self, query: self._parse_pipe_syntax_limit(query),
|
940
|
-
"AGGREGATE": lambda self, query: self._parse_pipe_syntax_aggregate(query),
|
941
947
|
"PIVOT": lambda self, query: self._parse_pipe_syntax_pivot(query),
|
948
|
+
"SELECT": lambda self, query: self._parse_pipe_syntax_select(query),
|
949
|
+
"TABLESAMPLE": lambda self, query: self._parse_pipe_syntax_tablesample(query),
|
942
950
|
"UNPIVOT": lambda self, query: self._parse_pipe_syntax_pivot(query),
|
951
|
+
"WHERE": lambda self, query: query.where(self._parse_where(), copy=False),
|
943
952
|
}
|
944
953
|
|
945
954
|
PROPERTY_PARSERS: t.Dict[str, t.Callable] = {
|
@@ -1511,6 +1520,15 @@ class Parser(metaclass=_Parser):
|
|
1511
1520
|
# Whether renaming a column with an ALTER statement requires the presence of the COLUMN keyword
|
1512
1521
|
ALTER_RENAME_REQUIRES_COLUMN = True
|
1513
1522
|
|
1523
|
+
# Whether all join types have the same precedence, i.e., they "naturally" produce a left-deep tree.
|
1524
|
+
# In standard SQL, joins that use the JOIN keyword take higher precedence than comma-joins. That is
|
1525
|
+
# to say, JOIN operators happen before comma operators. This is not the case in some dialects, such
|
1526
|
+
# as BigQuery, where all joins have the same precedence.
|
1527
|
+
JOINS_HAVE_EQUAL_PRECEDENCE = False
|
1528
|
+
|
1529
|
+
# Whether TIMESTAMP <literal> can produce a zone-aware timestamp
|
1530
|
+
ZONE_AWARE_TIMESTAMP_CONSTRUCTOR = False
|
1531
|
+
|
1514
1532
|
__slots__ = (
|
1515
1533
|
"error_level",
|
1516
1534
|
"error_message_context",
|
@@ -3135,7 +3153,7 @@ class Parser(metaclass=_Parser):
|
|
3135
3153
|
is_unpivot=self._prev.token_type == TokenType.UNPIVOT
|
3136
3154
|
)
|
3137
3155
|
elif self._match(TokenType.FROM):
|
3138
|
-
from_ = self._parse_from(skip_from_token=True)
|
3156
|
+
from_ = self._parse_from(skip_from_token=True, consume_pipe=True)
|
3139
3157
|
# Support parentheses for duckdb FROM-first syntax
|
3140
3158
|
select = self._parse_select()
|
3141
3159
|
if select:
|
@@ -3145,7 +3163,7 @@ class Parser(metaclass=_Parser):
|
|
3145
3163
|
this = exp.select("*").from_(t.cast(exp.From, from_))
|
3146
3164
|
else:
|
3147
3165
|
this = (
|
3148
|
-
self._parse_table()
|
3166
|
+
self._parse_table(consume_pipe=True)
|
3149
3167
|
if table
|
3150
3168
|
else self._parse_select(nested=True, parse_set_operation=False)
|
3151
3169
|
)
|
@@ -3166,6 +3184,31 @@ class Parser(metaclass=_Parser):
|
|
3166
3184
|
table: bool = False,
|
3167
3185
|
parse_subquery_alias: bool = True,
|
3168
3186
|
parse_set_operation: bool = True,
|
3187
|
+
consume_pipe: bool = True,
|
3188
|
+
) -> t.Optional[exp.Expression]:
|
3189
|
+
query = self._parse_select_query(
|
3190
|
+
nested=nested,
|
3191
|
+
table=table,
|
3192
|
+
parse_subquery_alias=parse_subquery_alias,
|
3193
|
+
parse_set_operation=parse_set_operation,
|
3194
|
+
)
|
3195
|
+
|
3196
|
+
if (
|
3197
|
+
consume_pipe
|
3198
|
+
and self._match(TokenType.PIPE_GT, advance=False)
|
3199
|
+
and isinstance(query, exp.Query)
|
3200
|
+
):
|
3201
|
+
query = self._parse_pipe_syntax_query(query)
|
3202
|
+
query = query.subquery(copy=False) if query and table else query
|
3203
|
+
|
3204
|
+
return query
|
3205
|
+
|
3206
|
+
def _parse_select_query(
|
3207
|
+
self,
|
3208
|
+
nested: bool = False,
|
3209
|
+
table: bool = False,
|
3210
|
+
parse_subquery_alias: bool = True,
|
3211
|
+
parse_set_operation: bool = True,
|
3169
3212
|
) -> t.Optional[exp.Expression]:
|
3170
3213
|
cte = self._parse_with()
|
3171
3214
|
|
@@ -3185,7 +3228,11 @@ class Parser(metaclass=_Parser):
|
|
3185
3228
|
return this
|
3186
3229
|
|
3187
3230
|
# duckdb supports leading with FROM x
|
3188
|
-
from_ =
|
3231
|
+
from_ = (
|
3232
|
+
self._parse_from(consume_pipe=True)
|
3233
|
+
if self._match(TokenType.FROM, advance=False)
|
3234
|
+
else None
|
3235
|
+
)
|
3189
3236
|
|
3190
3237
|
if self._match(TokenType.SELECT):
|
3191
3238
|
comments = self._prev_comments
|
@@ -3252,10 +3299,6 @@ class Parser(metaclass=_Parser):
|
|
3252
3299
|
elif self._match(TokenType.VALUES, advance=False):
|
3253
3300
|
this = self._parse_derived_table_values()
|
3254
3301
|
elif from_:
|
3255
|
-
if self._match(TokenType.PIPE_GT, advance=False):
|
3256
|
-
return self._parse_pipe_syntax_query(
|
3257
|
-
exp.Select().from_(from_.this, append=False, copy=False)
|
3258
|
-
)
|
3259
3302
|
this = exp.select("*").from_(from_.this, copy=False)
|
3260
3303
|
elif self._match(TokenType.SUMMARIZE):
|
3261
3304
|
table = self._match(TokenType.TABLE)
|
@@ -3516,13 +3559,18 @@ class Parser(metaclass=_Parser):
|
|
3516
3559
|
)
|
3517
3560
|
|
3518
3561
|
def _parse_from(
|
3519
|
-
self,
|
3562
|
+
self,
|
3563
|
+
joins: bool = False,
|
3564
|
+
skip_from_token: bool = False,
|
3565
|
+
consume_pipe: bool = False,
|
3520
3566
|
) -> t.Optional[exp.From]:
|
3521
3567
|
if not skip_from_token and not self._match(TokenType.FROM):
|
3522
3568
|
return None
|
3523
3569
|
|
3524
3570
|
return self.expression(
|
3525
|
-
exp.From,
|
3571
|
+
exp.From,
|
3572
|
+
comments=self._prev_comments,
|
3573
|
+
this=self._parse_table(joins=joins, consume_pipe=consume_pipe),
|
3526
3574
|
)
|
3527
3575
|
|
3528
3576
|
def _parse_match_recognize_measure(self) -> exp.MatchRecognizeMeasure:
|
@@ -3697,9 +3745,12 @@ class Parser(metaclass=_Parser):
|
|
3697
3745
|
) -> t.Optional[exp.Join]:
|
3698
3746
|
if self._match(TokenType.COMMA):
|
3699
3747
|
table = self._try_parse(self._parse_table)
|
3700
|
-
if table
|
3701
|
-
|
3702
|
-
|
3748
|
+
cross_join = self.expression(exp.Join, this=table) if table else None
|
3749
|
+
|
3750
|
+
if cross_join and self.JOINS_HAVE_EQUAL_PRECEDENCE:
|
3751
|
+
cross_join.set("kind", "CROSS")
|
3752
|
+
|
3753
|
+
return cross_join
|
3703
3754
|
|
3704
3755
|
index = self._index
|
3705
3756
|
method, side, kind = self._parse_join_parts()
|
@@ -3948,6 +3999,7 @@ class Parser(metaclass=_Parser):
|
|
3948
3999
|
parse_bracket: bool = False,
|
3949
4000
|
is_db_reference: bool = False,
|
3950
4001
|
parse_partition: bool = False,
|
4002
|
+
consume_pipe: bool = False,
|
3951
4003
|
) -> t.Optional[exp.Expression]:
|
3952
4004
|
lateral = self._parse_lateral()
|
3953
4005
|
if lateral:
|
@@ -3961,7 +4013,7 @@ class Parser(metaclass=_Parser):
|
|
3961
4013
|
if values:
|
3962
4014
|
return values
|
3963
4015
|
|
3964
|
-
subquery = self._parse_select(table=True)
|
4016
|
+
subquery = self._parse_select(table=True, consume_pipe=consume_pipe)
|
3965
4017
|
if subquery:
|
3966
4018
|
if not subquery.args.get("pivots"):
|
3967
4019
|
subquery.set("pivots", self._parse_pivots())
|
@@ -4703,7 +4755,9 @@ class Parser(metaclass=_Parser):
|
|
4703
4755
|
|
4704
4756
|
return locks
|
4705
4757
|
|
4706
|
-
def parse_set_operation(
|
4758
|
+
def parse_set_operation(
|
4759
|
+
self, this: t.Optional[exp.Expression], consume_pipe: bool = False
|
4760
|
+
) -> t.Optional[exp.Expression]:
|
4707
4761
|
start = self._index
|
4708
4762
|
_, side_token, kind_token = self._parse_join_parts()
|
4709
4763
|
|
@@ -4746,7 +4800,9 @@ class Parser(metaclass=_Parser):
|
|
4746
4800
|
if by_name and self._match_texts(("ON", "BY")):
|
4747
4801
|
on_column_list = self._parse_wrapped_csv(self._parse_column)
|
4748
4802
|
|
4749
|
-
expression = self._parse_select(
|
4803
|
+
expression = self._parse_select(
|
4804
|
+
nested=True, parse_set_operation=False, consume_pipe=consume_pipe
|
4805
|
+
)
|
4750
4806
|
|
4751
4807
|
return self.expression(
|
4752
4808
|
operation,
|
@@ -5077,12 +5133,20 @@ class Parser(metaclass=_Parser):
|
|
5077
5133
|
this = self._parse_primary()
|
5078
5134
|
|
5079
5135
|
if isinstance(this, exp.Literal):
|
5136
|
+
literal = this.name
|
5080
5137
|
this = self._parse_column_ops(this)
|
5081
5138
|
|
5082
5139
|
parser = self.TYPE_LITERAL_PARSERS.get(data_type.this)
|
5083
5140
|
if parser:
|
5084
5141
|
return parser(self, this, data_type)
|
5085
5142
|
|
5143
|
+
if (
|
5144
|
+
self.ZONE_AWARE_TIMESTAMP_CONSTRUCTOR
|
5145
|
+
and data_type.is_type(exp.DataType.Type.TIMESTAMP)
|
5146
|
+
and TIME_ZONE_RE.search(literal)
|
5147
|
+
):
|
5148
|
+
data_type = exp.DataType.build("TIMESTAMPTZ")
|
5149
|
+
|
5086
5150
|
return self.expression(exp.Cast, this=this, to=data_type)
|
5087
5151
|
|
5088
5152
|
# The expressions arg gets set by the parser when we have something like DECIMAL(38, 0)
|
@@ -5543,6 +5607,37 @@ class Parser(metaclass=_Parser):
|
|
5543
5607
|
|
5544
5608
|
return self._parse_colon_as_variant_extract(this) if self.COLON_IS_VARIANT_EXTRACT else this
|
5545
5609
|
|
5610
|
+
def _parse_paren(self) -> t.Optional[exp.Expression]:
|
5611
|
+
if not self._match(TokenType.L_PAREN):
|
5612
|
+
return None
|
5613
|
+
|
5614
|
+
comments = self._prev_comments
|
5615
|
+
query = self._parse_select()
|
5616
|
+
|
5617
|
+
if query:
|
5618
|
+
expressions = [query]
|
5619
|
+
else:
|
5620
|
+
expressions = self._parse_expressions()
|
5621
|
+
|
5622
|
+
this = self._parse_query_modifiers(seq_get(expressions, 0))
|
5623
|
+
|
5624
|
+
if not this and self._match(TokenType.R_PAREN, advance=False):
|
5625
|
+
this = self.expression(exp.Tuple)
|
5626
|
+
elif isinstance(this, exp.UNWRAPPED_QUERIES):
|
5627
|
+
this = self._parse_subquery(this=this, parse_alias=False)
|
5628
|
+
elif isinstance(this, exp.Subquery):
|
5629
|
+
this = self._parse_subquery(this=self._parse_set_operations(this), parse_alias=False)
|
5630
|
+
elif len(expressions) > 1 or self._prev.token_type == TokenType.COMMA:
|
5631
|
+
this = self.expression(exp.Tuple, expressions=expressions)
|
5632
|
+
else:
|
5633
|
+
this = self.expression(exp.Paren, this=this)
|
5634
|
+
|
5635
|
+
if this:
|
5636
|
+
this.add_comments(comments)
|
5637
|
+
|
5638
|
+
self._match_r_paren(expression=this)
|
5639
|
+
return this
|
5640
|
+
|
5546
5641
|
def _parse_primary(self) -> t.Optional[exp.Expression]:
|
5547
5642
|
if self._match_set(self.PRIMARY_PARSERS):
|
5548
5643
|
token_type = self._prev.token_type
|
@@ -5561,37 +5656,7 @@ class Parser(metaclass=_Parser):
|
|
5561
5656
|
if self._match_pair(TokenType.DOT, TokenType.NUMBER):
|
5562
5657
|
return exp.Literal.number(f"0.{self._prev.text}")
|
5563
5658
|
|
5564
|
-
|
5565
|
-
comments = self._prev_comments
|
5566
|
-
query = self._parse_select()
|
5567
|
-
|
5568
|
-
if query:
|
5569
|
-
expressions = [query]
|
5570
|
-
else:
|
5571
|
-
expressions = self._parse_expressions()
|
5572
|
-
|
5573
|
-
this = self._parse_query_modifiers(seq_get(expressions, 0))
|
5574
|
-
|
5575
|
-
if not this and self._match(TokenType.R_PAREN, advance=False):
|
5576
|
-
this = self.expression(exp.Tuple)
|
5577
|
-
elif isinstance(this, exp.UNWRAPPED_QUERIES):
|
5578
|
-
this = self._parse_subquery(this=this, parse_alias=False)
|
5579
|
-
elif isinstance(this, exp.Subquery):
|
5580
|
-
this = self._parse_subquery(
|
5581
|
-
this=self._parse_set_operations(this), parse_alias=False
|
5582
|
-
)
|
5583
|
-
elif len(expressions) > 1 or self._prev.token_type == TokenType.COMMA:
|
5584
|
-
this = self.expression(exp.Tuple, expressions=expressions)
|
5585
|
-
else:
|
5586
|
-
this = self.expression(exp.Paren, this=this)
|
5587
|
-
|
5588
|
-
if this:
|
5589
|
-
this.add_comments(comments)
|
5590
|
-
|
5591
|
-
self._match_r_paren(expression=this)
|
5592
|
-
return this
|
5593
|
-
|
5594
|
-
return None
|
5659
|
+
return self._parse_paren()
|
5595
5660
|
|
5596
5661
|
def _parse_field(
|
5597
5662
|
self,
|
@@ -5913,6 +5978,7 @@ class Parser(metaclass=_Parser):
|
|
5913
5978
|
) -> exp.GeneratedAsIdentityColumnConstraint | exp.AutoIncrementColumnConstraint:
|
5914
5979
|
start = None
|
5915
5980
|
increment = None
|
5981
|
+
order = None
|
5916
5982
|
|
5917
5983
|
if self._match(TokenType.L_PAREN, advance=False):
|
5918
5984
|
args = self._parse_wrapped_csv(self._parse_bitwise)
|
@@ -5922,10 +5988,14 @@ class Parser(metaclass=_Parser):
|
|
5922
5988
|
start = self._parse_bitwise()
|
5923
5989
|
self._match_text_seq("INCREMENT")
|
5924
5990
|
increment = self._parse_bitwise()
|
5991
|
+
if self._match_text_seq("ORDER"):
|
5992
|
+
order = True
|
5993
|
+
elif self._match_text_seq("NOORDER"):
|
5994
|
+
order = False
|
5925
5995
|
|
5926
5996
|
if start and increment:
|
5927
5997
|
return exp.GeneratedAsIdentityColumnConstraint(
|
5928
|
-
start=start, increment=increment, this=False
|
5998
|
+
start=start, increment=increment, this=False, order=order
|
5929
5999
|
)
|
5930
6000
|
|
5931
6001
|
return exp.AutoIncrementColumnConstraint()
|
@@ -8328,12 +8398,18 @@ class Parser(metaclass=_Parser):
|
|
8328
8398
|
expression.update_positions(token)
|
8329
8399
|
return expression
|
8330
8400
|
|
8331
|
-
def _build_pipe_cte(
|
8332
|
-
|
8333
|
-
|
8334
|
-
|
8335
|
-
|
8336
|
-
|
8401
|
+
def _build_pipe_cte(
|
8402
|
+
self,
|
8403
|
+
query: exp.Query,
|
8404
|
+
expressions: t.List[exp.Expression],
|
8405
|
+
alias_cte: t.Optional[exp.TableAlias] = None,
|
8406
|
+
) -> exp.Select:
|
8407
|
+
new_cte: t.Optional[t.Union[str, exp.TableAlias]]
|
8408
|
+
if alias_cte:
|
8409
|
+
new_cte = alias_cte
|
8410
|
+
else:
|
8411
|
+
self._pipe_cte_counter += 1
|
8412
|
+
new_cte = f"__tmp{self._pipe_cte_counter}"
|
8337
8413
|
|
8338
8414
|
with_ = query.args.get("with")
|
8339
8415
|
ctes = with_.pop() if with_ else None
|
@@ -8345,14 +8421,13 @@ class Parser(metaclass=_Parser):
|
|
8345
8421
|
return new_select.with_(new_cte, as_=query, copy=False)
|
8346
8422
|
|
8347
8423
|
def _parse_pipe_syntax_select(self, query: exp.Select) -> exp.Select:
|
8348
|
-
select = self._parse_select()
|
8424
|
+
select = self._parse_select(consume_pipe=False)
|
8349
8425
|
if not select:
|
8350
8426
|
return query
|
8351
8427
|
|
8352
|
-
|
8353
|
-
|
8354
|
-
|
8355
|
-
return self._build_pipe_cte(query, select.expressions)
|
8428
|
+
return self._build_pipe_cte(
|
8429
|
+
query=query.select(*select.expressions, append=False), expressions=[exp.Star()]
|
8430
|
+
)
|
8356
8431
|
|
8357
8432
|
def _parse_pipe_syntax_limit(self, query: exp.Select) -> exp.Select:
|
8358
8433
|
limit = self._parse_limit()
|
@@ -8396,12 +8471,12 @@ class Parser(metaclass=_Parser):
|
|
8396
8471
|
aggregates_or_groups.append(this)
|
8397
8472
|
|
8398
8473
|
if group_by_exists:
|
8399
|
-
query
|
8474
|
+
query.select(*aggregates_or_groups, copy=False).group_by(
|
8400
8475
|
*[projection.args.get("alias", projection) for projection in aggregates_or_groups],
|
8401
8476
|
copy=False,
|
8402
8477
|
)
|
8403
8478
|
else:
|
8404
|
-
query
|
8479
|
+
query.select(*aggregates_or_groups, append=False, copy=False)
|
8405
8480
|
|
8406
8481
|
if orders:
|
8407
8482
|
return query.order_by(*orders, append=False, copy=False)
|
@@ -8417,41 +8492,48 @@ class Parser(metaclass=_Parser):
|
|
8417
8492
|
):
|
8418
8493
|
query = self._parse_pipe_syntax_aggregate_group_order_by(query)
|
8419
8494
|
|
8420
|
-
return self._build_pipe_cte(query, [exp.Star()])
|
8495
|
+
return self._build_pipe_cte(query=query, expressions=[exp.Star()])
|
8421
8496
|
|
8422
|
-
def _parse_pipe_syntax_set_operator(
|
8423
|
-
self, query: t.Optional[exp.Query]
|
8424
|
-
) -> t.Optional[exp.Select]:
|
8497
|
+
def _parse_pipe_syntax_set_operator(self, query: exp.Query) -> t.Optional[exp.Query]:
|
8425
8498
|
first_setop = self.parse_set_operation(this=query)
|
8426
|
-
|
8427
|
-
if not first_setop or not query:
|
8499
|
+
if not first_setop:
|
8428
8500
|
return None
|
8429
8501
|
|
8502
|
+
def _parse_and_unwrap_query() -> t.Optional[exp.Select]:
|
8503
|
+
expr = self._parse_paren()
|
8504
|
+
return expr.assert_is(exp.Subquery).unnest() if expr else None
|
8505
|
+
|
8430
8506
|
first_setop.this.pop()
|
8431
|
-
distinct = first_setop.args.pop("distinct")
|
8432
|
-
setops = [first_setop.expression.pop(), *self._parse_expressions()]
|
8433
8507
|
|
8434
|
-
|
8508
|
+
setops = [
|
8509
|
+
first_setop.expression.pop().assert_is(exp.Subquery).unnest(),
|
8510
|
+
*self._parse_csv(_parse_and_unwrap_query),
|
8511
|
+
]
|
8512
|
+
|
8513
|
+
query = self._build_pipe_cte(query=query, expressions=[exp.Star()])
|
8435
8514
|
with_ = query.args.get("with")
|
8436
8515
|
ctes = with_.pop() if with_ else None
|
8437
8516
|
|
8438
8517
|
if isinstance(first_setop, exp.Union):
|
8439
|
-
query = query.union(*setops,
|
8518
|
+
query = query.union(*setops, copy=False, **first_setop.args)
|
8440
8519
|
elif isinstance(first_setop, exp.Except):
|
8441
|
-
query = query.except_(*setops,
|
8520
|
+
query = query.except_(*setops, copy=False, **first_setop.args)
|
8442
8521
|
else:
|
8443
|
-
query = query.intersect(*setops,
|
8522
|
+
query = query.intersect(*setops, copy=False, **first_setop.args)
|
8444
8523
|
|
8445
8524
|
query.set("with", ctes)
|
8446
8525
|
|
8447
|
-
return self._build_pipe_cte(query, [exp.Star()])
|
8526
|
+
return self._build_pipe_cte(query=query, expressions=[exp.Star()])
|
8448
8527
|
|
8449
|
-
def _parse_pipe_syntax_join(self, query: exp.
|
8528
|
+
def _parse_pipe_syntax_join(self, query: exp.Query) -> t.Optional[exp.Query]:
|
8450
8529
|
join = self._parse_join()
|
8451
8530
|
if not join:
|
8452
8531
|
return None
|
8453
8532
|
|
8454
|
-
|
8533
|
+
if isinstance(query, exp.Select):
|
8534
|
+
return query.join(join, copy=False)
|
8535
|
+
|
8536
|
+
return query
|
8455
8537
|
|
8456
8538
|
def _parse_pipe_syntax_pivot(self, query: exp.Select) -> exp.Select:
|
8457
8539
|
pivots = self._parse_pivots()
|
@@ -8462,16 +8544,41 @@ class Parser(metaclass=_Parser):
|
|
8462
8544
|
if from_:
|
8463
8545
|
from_.this.set("pivots", pivots)
|
8464
8546
|
|
8465
|
-
return self._build_pipe_cte(query, [exp.Star()])
|
8547
|
+
return self._build_pipe_cte(query=query, expressions=[exp.Star()])
|
8548
|
+
|
8549
|
+
def _parse_pipe_syntax_extend(self, query: exp.Select) -> exp.Select:
|
8550
|
+
self._match_text_seq("EXTEND")
|
8551
|
+
query.select(*[exp.Star(), *self._parse_expressions()], append=False, copy=False)
|
8552
|
+
return self._build_pipe_cte(query=query, expressions=[exp.Star()])
|
8553
|
+
|
8554
|
+
def _parse_pipe_syntax_tablesample(self, query: exp.Select) -> exp.Select:
|
8555
|
+
sample = self._parse_table_sample()
|
8556
|
+
|
8557
|
+
with_ = query.args.get("with")
|
8558
|
+
if with_:
|
8559
|
+
with_.expressions[-1].this.set("sample", sample)
|
8560
|
+
else:
|
8561
|
+
query.set("sample", sample)
|
8562
|
+
|
8563
|
+
return query
|
8564
|
+
|
8565
|
+
def _parse_pipe_syntax_query(self, query: exp.Query) -> t.Optional[exp.Query]:
|
8566
|
+
if isinstance(query, exp.Subquery):
|
8567
|
+
query = exp.select("*").from_(query, copy=False)
|
8568
|
+
|
8569
|
+
if not query.args.get("from"):
|
8570
|
+
query = exp.select("*").from_(query.subquery(copy=False), copy=False)
|
8466
8571
|
|
8467
|
-
def _parse_pipe_syntax_query(self, query: exp.Select) -> t.Optional[exp.Select]:
|
8468
8572
|
while self._match(TokenType.PIPE_GT):
|
8469
8573
|
start = self._curr
|
8470
8574
|
parser = self.PIPE_SYNTAX_TRANSFORM_PARSERS.get(self._curr.text.upper())
|
8471
8575
|
if not parser:
|
8472
|
-
|
8473
|
-
|
8474
|
-
|
8576
|
+
# The set operators (UNION, etc) and the JOIN operator have a few common starting
|
8577
|
+
# keywords, making it tricky to disambiguate them without lookahead. The approach
|
8578
|
+
# here is to try and parse a set operation and if that fails, then try to parse a
|
8579
|
+
# join operator. If that fails as well, then the operator is not supported.
|
8580
|
+
parsed_query = self._parse_pipe_syntax_set_operator(query)
|
8581
|
+
parsed_query = parsed_query or self._parse_pipe_syntax_join(query)
|
8475
8582
|
if not parsed_query:
|
8476
8583
|
self._retreat(start)
|
8477
8584
|
self.raise_error(f"Unsupported pipe syntax operator: '{start.text.upper()}'.")
|
@@ -8480,7 +8587,4 @@ class Parser(metaclass=_Parser):
|
|
8480
8587
|
else:
|
8481
8588
|
query = parser(self, query)
|
8482
8589
|
|
8483
|
-
if query and not query.selects:
|
8484
|
-
return query.select("*", copy=False)
|
8485
|
-
|
8486
8590
|
return query
|
@@ -1,15 +1,15 @@
|
|
1
1
|
sqlglot/__init__.py,sha256=za08rtdPh2v7dOpGdNomttlIVGgTrKja7rPd6sQwaTg,5391
|
2
2
|
sqlglot/__main__.py,sha256=022c173KqxsiABWTEpUIq_tJUxuNiW7a7ABsxBXqvu8,2069
|
3
3
|
sqlglot/_typing.py,sha256=-1HPyr3w5COlSJWqlgt8jhFk2dyMvBuvVBqIX1wyVCM,642
|
4
|
-
sqlglot/_version.py,sha256=
|
4
|
+
sqlglot/_version.py,sha256=FTiVUaTaSWoKVYxiizmUH00aN8nwszEzKZ3Dh2gUm7s,515
|
5
5
|
sqlglot/diff.py,sha256=PtOllQMQa1Sw1-V2Y8eypmDqGujXYPaTOp_WLsWkAWk,17314
|
6
6
|
sqlglot/errors.py,sha256=QNKMr-pzLUDR-tuMmn_GK6iMHUIVdb_YSJ_BhGEvuso,2126
|
7
|
-
sqlglot/expressions.py,sha256=
|
8
|
-
sqlglot/generator.py,sha256=
|
7
|
+
sqlglot/expressions.py,sha256=QTf40Yu04Ar6en5_Ncv0bpER5PRlNYtB7S7Ocr2vScw,243330
|
8
|
+
sqlglot/generator.py,sha256=E1LjyN49nX9XfK-hysHWvpw7-qtws4xeb85sZi5x3M0,213345
|
9
9
|
sqlglot/helper.py,sha256=9nZjFVRBtMKFC3EdzpDQ6jkazFO19po6BF8xHiNGZIo,15111
|
10
10
|
sqlglot/jsonpath.py,sha256=dKdI3PNINNGimmSse2IIv-GbPN_3lXncXh_70QH7Lss,7664
|
11
11
|
sqlglot/lineage.py,sha256=kXBDSErmZZluZx_kkrMj4MPEOAbkvcbX1tbOW7Bpl-U,15303
|
12
|
-
sqlglot/parser.py,sha256=
|
12
|
+
sqlglot/parser.py,sha256=xWm01SCq3tSHr7WIVz-h2taaf_JW5JvADsNufE8OAEw,324529
|
13
13
|
sqlglot/planner.py,sha256=ql7Li-bWJRcyXzNaZy_n6bQ6B2ZfunEIB8Ztv2xaxq4,14634
|
14
14
|
sqlglot/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
15
15
|
sqlglot/schema.py,sha256=13H2qKQs27EKdTpDLOvcNnSTDAUbYNKjWtJs4aQCSOA,20509
|
@@ -18,42 +18,43 @@ sqlglot/time.py,sha256=Q62gv6kL40OiRBF6BMESxKJcMVn7ZLNw7sv8H34z5FI,18400
|
|
18
18
|
sqlglot/tokens.py,sha256=R0B8GQSbQ9GoDc0NlaT5Tc8RjgEOx2IYIkYU5rY8Rg8,48742
|
19
19
|
sqlglot/transforms.py,sha256=3jpbHeVTLK9hmQi5f3_vmK-5jZB32_ittCkO7poxCs4,40631
|
20
20
|
sqlglot/trie.py,sha256=v27uXMrHfqrXlJ6GmeTSMovsB_3o0ctnlKhdNt7W6fI,2245
|
21
|
-
sqlglot/dialects/__init__.py,sha256=
|
22
|
-
sqlglot/dialects/athena.py,sha256=
|
23
|
-
sqlglot/dialects/bigquery.py,sha256=
|
24
|
-
sqlglot/dialects/clickhouse.py,sha256=
|
21
|
+
sqlglot/dialects/__init__.py,sha256=Rcmnv_D8xjCrEPObi4R2_cs3upbfc5OrDKMIJhQTt4k,3513
|
22
|
+
sqlglot/dialects/athena.py,sha256=gPE9ybRcbd6dVa1mrTFB_eVjsjQG36hErq5EpHyQmXo,6344
|
23
|
+
sqlglot/dialects/bigquery.py,sha256=Dw_ZOCv_rCDiUqMBJbmkb6b_R2t9ZemiRmvqu9YLBmY,52756
|
24
|
+
sqlglot/dialects/clickhouse.py,sha256=Dc0aXwEgN8b6coXKM6P8zh3IsyrXjBajNGB-cVhnu1Y,56603
|
25
25
|
sqlglot/dialects/databricks.py,sha256=8PoaiP8PfiBjpheRiua-rO_HzX2TRUXqc3DnlQ8zYrg,4481
|
26
|
-
sqlglot/dialects/dialect.py,sha256
|
26
|
+
sqlglot/dialects/dialect.py,sha256=-u8403azEMX3F9KrLQnv7xOU6IaHpxL4pJH733oQlqs,68747
|
27
27
|
sqlglot/dialects/doris.py,sha256=eC7Ct-iz7p4Usz659NkelUFhm-GmVolIZy5uaBvgjaA,14397
|
28
28
|
sqlglot/dialects/drill.py,sha256=FOh7_KjPx_77pv0DiHKZog0CcmzqeF9_PEmGnJ1ESSM,5825
|
29
29
|
sqlglot/dialects/druid.py,sha256=kh3snZtneehNOWqs3XcPjsrhNaRbkCQ8E4hHbWJ1fHM,690
|
30
|
-
sqlglot/dialects/duckdb.py,sha256=
|
30
|
+
sqlglot/dialects/duckdb.py,sha256=oGCgK0KjwJcCKy-YOZeiQnEo4v7Zc1r5AK0tCXO2VIc,48005
|
31
31
|
sqlglot/dialects/dune.py,sha256=gALut-fFfN2qMsr8LvZ1NQK3F3W9z2f4PwMvTMXVVVg,375
|
32
|
-
sqlglot/dialects/
|
32
|
+
sqlglot/dialects/fabric.py,sha256=RfRvQq7AVcr7yT30rqsTk-QILmhTJHECXZXMOotmL6I,4104
|
33
|
+
sqlglot/dialects/hive.py,sha256=yKCsVN4R8pIB2Lmx1YGiSR9b8Me3li6rsGuZrKjHTo4,31771
|
33
34
|
sqlglot/dialects/materialize.py,sha256=_DPLPt8YrdQIIXNrGJw1IMcGOoAEJ9NO9X9pDfy4hxs,3494
|
34
35
|
sqlglot/dialects/mysql.py,sha256=prZecn3zeoifZX7l54UuLG64ar7I-or_z9lF-rT8bds,49233
|
35
|
-
sqlglot/dialects/oracle.py,sha256=
|
36
|
+
sqlglot/dialects/oracle.py,sha256=o6On1cYWFt6TpQYKuzo4kCz5vKb8jQr8WSwc619h3Lg,15967
|
36
37
|
sqlglot/dialects/postgres.py,sha256=KUyMoLkm1_sZKUbdjn6bjXx9xz7sbEMKa-fl5Mzfrsk,31025
|
37
|
-
sqlglot/dialects/presto.py,sha256=
|
38
|
-
sqlglot/dialects/prql.py,sha256=
|
39
|
-
sqlglot/dialects/redshift.py,sha256=
|
38
|
+
sqlglot/dialects/presto.py,sha256=xsbYSc_1-z-jSOsG85z9Pw7pd_V_BX0Dila7KsMsS04,33203
|
39
|
+
sqlglot/dialects/prql.py,sha256=fwN-SPEGx-drwf1K0U2MByN-PkW3C_rOgQ3xeJeychg,7908
|
40
|
+
sqlglot/dialects/redshift.py,sha256=UwfntKCfPpX63G6ow4vjadFpfmfaKrmFOGLoOuWN8Yg,15406
|
40
41
|
sqlglot/dialects/risingwave.py,sha256=hwEOPjMw0ZM_3fjQcBUE00oy6I8V6mzYOOYmcwwS8mw,2898
|
41
|
-
sqlglot/dialects/snowflake.py,sha256=
|
42
|
-
sqlglot/dialects/spark.py,sha256=
|
42
|
+
sqlglot/dialects/snowflake.py,sha256=kpoWQ_w3SJyb605QWSvr-BxBR3pP9tmlDbT4ix8p484,63438
|
43
|
+
sqlglot/dialects/spark.py,sha256=bOUSXUoWtLfWaQ9fIjWaw4zLBJY6N7vxajdMbAxLdOk,8307
|
43
44
|
sqlglot/dialects/spark2.py,sha256=8er7nHDm5Wc57m9AOxKN0sd_DVzbhAL44H_udlFh9O8,14258
|
44
|
-
sqlglot/dialects/sqlite.py,sha256=
|
45
|
+
sqlglot/dialects/sqlite.py,sha256=fwqmopeuoupD_2dh2q6rT3UFxWtFHkskZ1OXAYnPT9Q,12483
|
45
46
|
sqlglot/dialects/starrocks.py,sha256=fHNgvq5Nz7dI4QUWCTOO5VDOYjasBxRRlcg9TbY0UZE,11235
|
46
47
|
sqlglot/dialects/tableau.py,sha256=oIawDzUITxGCWaEMB8OaNMPWhbC3U-2y09pYPm4eazc,2190
|
47
48
|
sqlglot/dialects/teradata.py,sha256=xWa-9kSTsT-eM1NePi_oIM1dPHmXW89GLU5Uda3_6Ao,14036
|
48
49
|
sqlglot/dialects/trino.py,sha256=wgLsiX1NQvjGny_rgrU1e2r6kK1LD0KgaSdIDrYmjD0,4285
|
49
|
-
sqlglot/dialects/tsql.py,sha256=
|
50
|
+
sqlglot/dialects/tsql.py,sha256=kMa8hYAXp3D2-g4HzkuzHDsWeXU1WgbyZm2sNl2a8rE,54397
|
50
51
|
sqlglot/executor/__init__.py,sha256=FslewzYQtQdDNg_0Ju2UaiP4vo4IMUgkfkmFsYUhcN0,2958
|
51
52
|
sqlglot/executor/context.py,sha256=WJHJdYQCOeVXwLw0uSSrWSc25eBMn5Ix108RCvdsKRQ,3386
|
52
53
|
sqlglot/executor/env.py,sha256=tQhU5PpTBMcxgZIFddFqxWMNPtHN0vOOz72voncY3KY,8276
|
53
54
|
sqlglot/executor/python.py,sha256=09GYRzrPn3lZGfDJY9pbONOvmYxsRyeSWjUiqkSRHGo,16661
|
54
55
|
sqlglot/executor/table.py,sha256=xkuJlgLVNYUXsSUaX0zTcnFekldXLLU8LqDyjR5K9wY,4419
|
55
56
|
sqlglot/optimizer/__init__.py,sha256=FdAvVz6rQLLkiiH21-SD4RxB5zS3WDeU-s03PZkJ-F4,343
|
56
|
-
sqlglot/optimizer/annotate_types.py,sha256
|
57
|
+
sqlglot/optimizer/annotate_types.py,sha256=-JkNgc5R1jYh130D8lGv5nYSmPddv4Naf3BZiD5ZuTs,24137
|
57
58
|
sqlglot/optimizer/canonicalize.py,sha256=RJpUbWDudjknRMtO_Kf8MGZ5Hv1twpPWac2u5kpV4Vw,7719
|
58
59
|
sqlglot/optimizer/eliminate_ctes.py,sha256=fUBM0RUnPrm2sYptEWBux98B7fcx7W-BM1zVqfgDz9c,1448
|
59
60
|
sqlglot/optimizer/eliminate_joins.py,sha256=5Whliegc7U8BnS6tlrl9wkeAgyP1NpgCCAPxChHzFfw,5874
|
@@ -67,13 +68,13 @@ sqlglot/optimizer/optimizer.py,sha256=vXEXDWHvbO-vJmSI7UqJuydM2WrD1xko7rETq2EtVJ
|
|
67
68
|
sqlglot/optimizer/pushdown_predicates.py,sha256=H4lFc9Dsds8W7FOsE4wbK6PHJBu6SjgQU7mVtl4laps,8357
|
68
69
|
sqlglot/optimizer/pushdown_projections.py,sha256=7NoK5NAUVYVhs0YnYyo6WuXfaO-BShSwS6lA8Y-ATQ4,6668
|
69
70
|
sqlglot/optimizer/qualify.py,sha256=oAPfwub7dEkrlCrsptcJWpLya4BgKhN6M5SwIs_86LY,4002
|
70
|
-
sqlglot/optimizer/qualify_columns.py,sha256=
|
71
|
+
sqlglot/optimizer/qualify_columns.py,sha256=77aScPakXYaiagnoCWk2qwMxlKuRGsFTAK9sOQuR2vY,40872
|
71
72
|
sqlglot/optimizer/qualify_tables.py,sha256=5f5enBAh-bpNB9ewF97W9fx9h1TGXj1Ih5fncvH42sY,6486
|
72
|
-
sqlglot/optimizer/scope.py,sha256=
|
73
|
+
sqlglot/optimizer/scope.py,sha256=r-2PaO7-woaIWaWrKC88J9eTgdQardNYQ1rIXXaPr1w,30501
|
73
74
|
sqlglot/optimizer/simplify.py,sha256=S0Blqg5Mq2KRRWhWz-Eivch9sBjBhg9fRJA6EdBzj2g,50704
|
74
75
|
sqlglot/optimizer/unnest_subqueries.py,sha256=kzWUVDlxs8z9nmRx-8U-pHXPtVZhEIwkKqmKhr2QLvc,10908
|
75
|
-
sqlglot-26.
|
76
|
-
sqlglot-26.
|
77
|
-
sqlglot-26.
|
78
|
-
sqlglot-26.
|
79
|
-
sqlglot-26.
|
76
|
+
sqlglot-26.30.0.dist-info/licenses/LICENSE,sha256=AI3__mHZfOtzY3EluR_pIYBm3_pE7TbVx7qaHxoZ114,1065
|
77
|
+
sqlglot-26.30.0.dist-info/METADATA,sha256=rIvq32wg6apWdTgqTkYe5mYgGj5XTwMQ8rvqjoTQruI,20732
|
78
|
+
sqlglot-26.30.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
79
|
+
sqlglot-26.30.0.dist-info/top_level.txt,sha256=5kRskCGA_gVADF9rSfSzPdLHXqvfMusDYeHePfNY2nQ,8
|
80
|
+
sqlglot-26.30.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|