altimate-code 0.5.1 → 0.5.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +35 -0
- package/README.md +1 -5
- package/bin/altimate +6 -0
- package/bin/altimate-code +6 -0
- package/dbt-tools/bin/altimate-dbt +2 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/altimate/__init__.py +0 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/altimate/fetch_schema.py +35 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/altimate/utils.py +353 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/altimate/validate_sql.py +114 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/__init__.py +178 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/__main__.py +96 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/_typing.py +17 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/__init__.py +3 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/__init__.py +18 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/_typing.py +18 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/column.py +332 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/dataframe.py +866 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/functions.py +1267 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/group.py +59 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/normalize.py +78 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/operations.py +53 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/readwriter.py +108 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/session.py +190 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/transforms.py +9 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/types.py +212 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/util.py +32 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/window.py +134 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/__init__.py +118 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/athena.py +166 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/bigquery.py +1331 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/clickhouse.py +1393 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/databricks.py +131 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/dialect.py +1915 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/doris.py +561 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/drill.py +157 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/druid.py +20 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/duckdb.py +1159 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/dune.py +16 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/hive.py +787 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/materialize.py +94 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/mysql.py +1324 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/oracle.py +378 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/postgres.py +778 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/presto.py +788 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/prql.py +203 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/redshift.py +448 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/risingwave.py +78 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/snowflake.py +1464 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/spark.py +202 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/spark2.py +349 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/sqlite.py +320 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/starrocks.py +343 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/tableau.py +61 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/teradata.py +356 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/trino.py +115 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/tsql.py +1403 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/diff.py +456 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/errors.py +93 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/executor/__init__.py +95 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/executor/context.py +101 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/executor/env.py +246 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/executor/python.py +460 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/executor/table.py +155 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/expressions.py +8870 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/generator.py +4993 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/helper.py +582 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/jsonpath.py +227 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/lineage.py +423 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/__init__.py +11 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/annotate_types.py +589 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/canonicalize.py +222 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/eliminate_ctes.py +43 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/eliminate_joins.py +181 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/eliminate_subqueries.py +189 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/isolate_table_selects.py +50 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/merge_subqueries.py +415 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/normalize.py +200 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/normalize_identifiers.py +64 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/optimize_joins.py +91 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/optimizer.py +94 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/pushdown_predicates.py +222 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/pushdown_projections.py +172 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/qualify.py +104 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/qualify_columns.py +1024 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/qualify_tables.py +155 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/scope.py +904 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/simplify.py +1587 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/unnest_subqueries.py +302 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/parser.py +8501 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/planner.py +463 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/schema.py +588 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/serde.py +68 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/time.py +687 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/tokens.py +1520 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/transforms.py +1020 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/trie.py +81 -0
- package/dbt-tools/dist/altimate_python_packages/dbt_core_integration.py +825 -0
- package/dbt-tools/dist/altimate_python_packages/dbt_utils.py +157 -0
- package/dbt-tools/dist/index.js +23859 -0
- package/package.json +13 -13
- package/postinstall.mjs +42 -0
- package/skills/altimate-setup/SKILL.md +31 -0
package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/postgres.py
ADDED
|
@@ -0,0 +1,778 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import typing as t
|
|
4
|
+
|
|
5
|
+
from sqlglot import exp, generator, parser, tokens, transforms
|
|
6
|
+
from sqlglot.dialects.dialect import (
|
|
7
|
+
DATE_ADD_OR_SUB,
|
|
8
|
+
Dialect,
|
|
9
|
+
JSON_EXTRACT_TYPE,
|
|
10
|
+
any_value_to_max_sql,
|
|
11
|
+
binary_from_function,
|
|
12
|
+
bool_xor_sql,
|
|
13
|
+
datestrtodate_sql,
|
|
14
|
+
build_formatted_time,
|
|
15
|
+
filter_array_using_unnest,
|
|
16
|
+
inline_array_sql,
|
|
17
|
+
json_extract_segments,
|
|
18
|
+
json_path_key_only_name,
|
|
19
|
+
max_or_greatest,
|
|
20
|
+
merge_without_target_sql,
|
|
21
|
+
min_or_least,
|
|
22
|
+
no_last_day_sql,
|
|
23
|
+
no_map_from_entries_sql,
|
|
24
|
+
no_paren_current_date_sql,
|
|
25
|
+
no_pivot_sql,
|
|
26
|
+
no_trycast_sql,
|
|
27
|
+
build_json_extract_path,
|
|
28
|
+
build_timestamp_trunc,
|
|
29
|
+
rename_func,
|
|
30
|
+
sha256_sql,
|
|
31
|
+
struct_extract_sql,
|
|
32
|
+
timestamptrunc_sql,
|
|
33
|
+
timestrtotime_sql,
|
|
34
|
+
trim_sql,
|
|
35
|
+
ts_or_ds_add_cast,
|
|
36
|
+
strposition_sql,
|
|
37
|
+
count_if_to_sum,
|
|
38
|
+
groupconcat_sql,
|
|
39
|
+
Version,
|
|
40
|
+
)
|
|
41
|
+
from sqlglot.generator import unsupported_args
|
|
42
|
+
from sqlglot.helper import is_int, seq_get
|
|
43
|
+
from sqlglot.parser import binary_range_parser
|
|
44
|
+
from sqlglot.tokens import TokenType
|
|
45
|
+
|
|
46
|
+
if t.TYPE_CHECKING:
|
|
47
|
+
from sqlglot.dialects.dialect import DialectType
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
DATE_DIFF_FACTOR = {
|
|
51
|
+
"MICROSECOND": " * 1000000",
|
|
52
|
+
"MILLISECOND": " * 1000",
|
|
53
|
+
"SECOND": "",
|
|
54
|
+
"MINUTE": " / 60",
|
|
55
|
+
"HOUR": " / 3600",
|
|
56
|
+
"DAY": " / 86400",
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def _date_add_sql(kind: str) -> t.Callable[[Postgres.Generator, DATE_ADD_OR_SUB], str]:
|
|
61
|
+
def func(self: Postgres.Generator, expression: DATE_ADD_OR_SUB) -> str:
|
|
62
|
+
if isinstance(expression, exp.TsOrDsAdd):
|
|
63
|
+
expression = ts_or_ds_add_cast(expression)
|
|
64
|
+
|
|
65
|
+
this = self.sql(expression, "this")
|
|
66
|
+
unit = expression.args.get("unit")
|
|
67
|
+
|
|
68
|
+
e = self._simplify_unless_literal(expression.expression)
|
|
69
|
+
if isinstance(e, exp.Literal):
|
|
70
|
+
e.args["is_string"] = True
|
|
71
|
+
elif e.is_number:
|
|
72
|
+
e = exp.Literal.string(e.to_py())
|
|
73
|
+
else:
|
|
74
|
+
self.unsupported("Cannot add non literal")
|
|
75
|
+
|
|
76
|
+
return f"{this} {kind} {self.sql(exp.Interval(this=e, unit=unit))}"
|
|
77
|
+
|
|
78
|
+
return func
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def _date_diff_sql(self: Postgres.Generator, expression: exp.DateDiff) -> str:
|
|
82
|
+
unit = expression.text("unit").upper()
|
|
83
|
+
factor = DATE_DIFF_FACTOR.get(unit)
|
|
84
|
+
|
|
85
|
+
end = f"CAST({self.sql(expression, 'this')} AS TIMESTAMP)"
|
|
86
|
+
start = f"CAST({self.sql(expression, 'expression')} AS TIMESTAMP)"
|
|
87
|
+
|
|
88
|
+
if factor is not None:
|
|
89
|
+
return f"CAST(EXTRACT(epoch FROM {end} - {start}){factor} AS BIGINT)"
|
|
90
|
+
|
|
91
|
+
age = f"AGE({end}, {start})"
|
|
92
|
+
|
|
93
|
+
if unit == "WEEK":
|
|
94
|
+
unit = f"EXTRACT(days FROM ({end} - {start})) / 7"
|
|
95
|
+
elif unit == "MONTH":
|
|
96
|
+
unit = f"EXTRACT(year FROM {age}) * 12 + EXTRACT(month FROM {age})"
|
|
97
|
+
elif unit == "QUARTER":
|
|
98
|
+
unit = f"EXTRACT(year FROM {age}) * 4 + EXTRACT(month FROM {age}) / 3"
|
|
99
|
+
elif unit == "YEAR":
|
|
100
|
+
unit = f"EXTRACT(year FROM {age})"
|
|
101
|
+
else:
|
|
102
|
+
unit = age
|
|
103
|
+
|
|
104
|
+
return f"CAST({unit} AS BIGINT)"
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
def _substring_sql(self: Postgres.Generator, expression: exp.Substring) -> str:
|
|
108
|
+
this = self.sql(expression, "this")
|
|
109
|
+
start = self.sql(expression, "start")
|
|
110
|
+
length = self.sql(expression, "length")
|
|
111
|
+
|
|
112
|
+
from_part = f" FROM {start}" if start else ""
|
|
113
|
+
for_part = f" FOR {length}" if length else ""
|
|
114
|
+
|
|
115
|
+
return f"SUBSTRING({this}{from_part}{for_part})"
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def _auto_increment_to_serial(expression: exp.Expression) -> exp.Expression:
|
|
119
|
+
auto = expression.find(exp.AutoIncrementColumnConstraint)
|
|
120
|
+
|
|
121
|
+
if auto:
|
|
122
|
+
expression.args["constraints"].remove(auto.parent)
|
|
123
|
+
kind = expression.args["kind"]
|
|
124
|
+
|
|
125
|
+
if kind.this == exp.DataType.Type.INT:
|
|
126
|
+
kind.replace(exp.DataType(this=exp.DataType.Type.SERIAL))
|
|
127
|
+
elif kind.this == exp.DataType.Type.SMALLINT:
|
|
128
|
+
kind.replace(exp.DataType(this=exp.DataType.Type.SMALLSERIAL))
|
|
129
|
+
elif kind.this == exp.DataType.Type.BIGINT:
|
|
130
|
+
kind.replace(exp.DataType(this=exp.DataType.Type.BIGSERIAL))
|
|
131
|
+
|
|
132
|
+
return expression
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def _serial_to_generated(expression: exp.Expression) -> exp.Expression:
|
|
136
|
+
if not isinstance(expression, exp.ColumnDef):
|
|
137
|
+
return expression
|
|
138
|
+
kind = expression.kind
|
|
139
|
+
if not kind:
|
|
140
|
+
return expression
|
|
141
|
+
|
|
142
|
+
if kind.this == exp.DataType.Type.SERIAL:
|
|
143
|
+
data_type = exp.DataType(this=exp.DataType.Type.INT)
|
|
144
|
+
elif kind.this == exp.DataType.Type.SMALLSERIAL:
|
|
145
|
+
data_type = exp.DataType(this=exp.DataType.Type.SMALLINT)
|
|
146
|
+
elif kind.this == exp.DataType.Type.BIGSERIAL:
|
|
147
|
+
data_type = exp.DataType(this=exp.DataType.Type.BIGINT)
|
|
148
|
+
else:
|
|
149
|
+
data_type = None
|
|
150
|
+
|
|
151
|
+
if data_type:
|
|
152
|
+
expression.args["kind"].replace(data_type)
|
|
153
|
+
constraints = expression.args["constraints"]
|
|
154
|
+
generated = exp.ColumnConstraint(kind=exp.GeneratedAsIdentityColumnConstraint(this=False))
|
|
155
|
+
notnull = exp.ColumnConstraint(kind=exp.NotNullColumnConstraint())
|
|
156
|
+
|
|
157
|
+
if notnull not in constraints:
|
|
158
|
+
constraints.insert(0, notnull)
|
|
159
|
+
if generated not in constraints:
|
|
160
|
+
constraints.insert(0, generated)
|
|
161
|
+
|
|
162
|
+
return expression
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def _build_generate_series(args: t.List) -> exp.ExplodingGenerateSeries:
|
|
166
|
+
# The goal is to convert step values like '1 day' or INTERVAL '1 day' into INTERVAL '1' day
|
|
167
|
+
# Note: postgres allows calls with just two arguments -- the "step" argument defaults to 1
|
|
168
|
+
step = seq_get(args, 2)
|
|
169
|
+
if step is not None:
|
|
170
|
+
if step.is_string:
|
|
171
|
+
args[2] = exp.to_interval(step.this)
|
|
172
|
+
elif isinstance(step, exp.Interval) and not step.args.get("unit"):
|
|
173
|
+
args[2] = exp.to_interval(step.this.this)
|
|
174
|
+
|
|
175
|
+
return exp.ExplodingGenerateSeries.from_arg_list(args)
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
def _build_to_timestamp(args: t.List) -> exp.UnixToTime | exp.StrToTime:
|
|
179
|
+
# TO_TIMESTAMP accepts either a single double argument or (text, text)
|
|
180
|
+
if len(args) == 1:
|
|
181
|
+
# https://www.postgresql.org/docs/current/functions-datetime.html#FUNCTIONS-DATETIME-TABLE
|
|
182
|
+
return exp.UnixToTime.from_arg_list(args)
|
|
183
|
+
|
|
184
|
+
# https://www.postgresql.org/docs/current/functions-formatting.html
|
|
185
|
+
return build_formatted_time(exp.StrToTime, "postgres")(args)
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def _json_extract_sql(
|
|
189
|
+
name: str, op: str
|
|
190
|
+
) -> t.Callable[[Postgres.Generator, JSON_EXTRACT_TYPE], str]:
|
|
191
|
+
def _generate(self: Postgres.Generator, expression: JSON_EXTRACT_TYPE) -> str:
|
|
192
|
+
if expression.args.get("only_json_types"):
|
|
193
|
+
return json_extract_segments(name, quoted_index=False, op=op)(self, expression)
|
|
194
|
+
return json_extract_segments(name)(self, expression)
|
|
195
|
+
|
|
196
|
+
return _generate
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
def _build_regexp_replace(args: t.List, dialect: DialectType = None) -> exp.RegexpReplace:
|
|
200
|
+
# The signature of REGEXP_REPLACE is:
|
|
201
|
+
# regexp_replace(source, pattern, replacement [, start [, N ]] [, flags ])
|
|
202
|
+
#
|
|
203
|
+
# Any one of `start`, `N` and `flags` can be column references, meaning that
|
|
204
|
+
# unless we can statically see that the last argument is a non-integer string
|
|
205
|
+
# (eg. not '0'), then it's not possible to construct the correct AST
|
|
206
|
+
if len(args) > 3:
|
|
207
|
+
last = args[-1]
|
|
208
|
+
if not is_int(last.name):
|
|
209
|
+
if not last.type or last.is_type(exp.DataType.Type.UNKNOWN, exp.DataType.Type.NULL):
|
|
210
|
+
from sqlglot.optimizer.annotate_types import annotate_types
|
|
211
|
+
|
|
212
|
+
last = annotate_types(last, dialect=dialect)
|
|
213
|
+
|
|
214
|
+
if last.is_type(*exp.DataType.TEXT_TYPES):
|
|
215
|
+
regexp_replace = exp.RegexpReplace.from_arg_list(args[:-1])
|
|
216
|
+
regexp_replace.set("modifiers", last)
|
|
217
|
+
return regexp_replace
|
|
218
|
+
|
|
219
|
+
return exp.RegexpReplace.from_arg_list(args)
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
def _unix_to_time_sql(self: Postgres.Generator, expression: exp.UnixToTime) -> str:
|
|
223
|
+
scale = expression.args.get("scale")
|
|
224
|
+
timestamp = expression.this
|
|
225
|
+
|
|
226
|
+
if scale in (None, exp.UnixToTime.SECONDS):
|
|
227
|
+
return self.func("TO_TIMESTAMP", timestamp, self.format_time(expression))
|
|
228
|
+
|
|
229
|
+
return self.func(
|
|
230
|
+
"TO_TIMESTAMP",
|
|
231
|
+
exp.Div(this=timestamp, expression=exp.func("POW", 10, scale)),
|
|
232
|
+
self.format_time(expression),
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
def _build_levenshtein_less_equal(args: t.List) -> exp.Levenshtein:
|
|
237
|
+
# Postgres has two signatures for levenshtein_less_equal function, but in both cases
|
|
238
|
+
# max_dist is the last argument
|
|
239
|
+
# levenshtein_less_equal(source, target, ins_cost, del_cost, sub_cost, max_d)
|
|
240
|
+
# levenshtein_less_equal(source, target, max_d)
|
|
241
|
+
max_dist = args.pop()
|
|
242
|
+
|
|
243
|
+
return exp.Levenshtein(
|
|
244
|
+
this=seq_get(args, 0),
|
|
245
|
+
expression=seq_get(args, 1),
|
|
246
|
+
ins_cost=seq_get(args, 2),
|
|
247
|
+
del_cost=seq_get(args, 3),
|
|
248
|
+
sub_cost=seq_get(args, 4),
|
|
249
|
+
max_dist=max_dist,
|
|
250
|
+
)
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
def _levenshtein_sql(self: Postgres.Generator, expression: exp.Levenshtein) -> str:
|
|
254
|
+
name = "LEVENSHTEIN_LESS_EQUAL" if expression.args.get("max_dist") else "LEVENSHTEIN"
|
|
255
|
+
|
|
256
|
+
return rename_func(name)(self, expression)
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
def _versioned_anyvalue_sql(self: Postgres.Generator, expression: exp.AnyValue) -> str:
|
|
260
|
+
# https://www.postgresql.org/docs/16/functions-aggregate.html
|
|
261
|
+
# https://www.postgresql.org/about/featurematrix/
|
|
262
|
+
if self.dialect.version < Version("16.0"):
|
|
263
|
+
return any_value_to_max_sql(self, expression)
|
|
264
|
+
|
|
265
|
+
return rename_func("ANY_VALUE")(self, expression)
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
class Postgres(Dialect):
|
|
269
|
+
INDEX_OFFSET = 1
|
|
270
|
+
TYPED_DIVISION = True
|
|
271
|
+
CONCAT_COALESCE = True
|
|
272
|
+
NULL_ORDERING = "nulls_are_large"
|
|
273
|
+
TIME_FORMAT = "'YYYY-MM-DD HH24:MI:SS'"
|
|
274
|
+
TABLESAMPLE_SIZE_IS_PERCENT = True
|
|
275
|
+
|
|
276
|
+
TIME_MAPPING = {
|
|
277
|
+
"AM": "%p",
|
|
278
|
+
"PM": "%p",
|
|
279
|
+
"d": "%u", # 1-based day of week
|
|
280
|
+
"D": "%u", # 1-based day of week
|
|
281
|
+
"dd": "%d", # day of month
|
|
282
|
+
"DD": "%d", # day of month
|
|
283
|
+
"ddd": "%j", # zero padded day of year
|
|
284
|
+
"DDD": "%j", # zero padded day of year
|
|
285
|
+
"FMDD": "%-d", # - is no leading zero for Python; same for FM in postgres
|
|
286
|
+
"FMDDD": "%-j", # day of year
|
|
287
|
+
"FMHH12": "%-I", # 9
|
|
288
|
+
"FMHH24": "%-H", # 9
|
|
289
|
+
"FMMI": "%-M", # Minute
|
|
290
|
+
"FMMM": "%-m", # 1
|
|
291
|
+
"FMSS": "%-S", # Second
|
|
292
|
+
"HH12": "%I", # 09
|
|
293
|
+
"HH24": "%H", # 09
|
|
294
|
+
"mi": "%M", # zero padded minute
|
|
295
|
+
"MI": "%M", # zero padded minute
|
|
296
|
+
"mm": "%m", # 01
|
|
297
|
+
"MM": "%m", # 01
|
|
298
|
+
"OF": "%z", # utc offset
|
|
299
|
+
"ss": "%S", # zero padded second
|
|
300
|
+
"SS": "%S", # zero padded second
|
|
301
|
+
"TMDay": "%A", # TM is locale dependent
|
|
302
|
+
"TMDy": "%a",
|
|
303
|
+
"TMMon": "%b", # Sep
|
|
304
|
+
"TMMonth": "%B", # September
|
|
305
|
+
"TZ": "%Z", # uppercase timezone name
|
|
306
|
+
"US": "%f", # zero padded microsecond
|
|
307
|
+
"ww": "%U", # 1-based week of year
|
|
308
|
+
"WW": "%U", # 1-based week of year
|
|
309
|
+
"yy": "%y", # 15
|
|
310
|
+
"YY": "%y", # 15
|
|
311
|
+
"yyyy": "%Y", # 2015
|
|
312
|
+
"YYYY": "%Y", # 2015
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
class Tokenizer(tokens.Tokenizer):
|
|
316
|
+
BIT_STRINGS = [("b'", "'"), ("B'", "'")]
|
|
317
|
+
HEX_STRINGS = [("x'", "'"), ("X'", "'")]
|
|
318
|
+
BYTE_STRINGS = [("e'", "'"), ("E'", "'")]
|
|
319
|
+
HEREDOC_STRINGS = ["$"]
|
|
320
|
+
|
|
321
|
+
HEREDOC_TAG_IS_IDENTIFIER = True
|
|
322
|
+
HEREDOC_STRING_ALTERNATIVE = TokenType.PARAMETER
|
|
323
|
+
|
|
324
|
+
KEYWORDS = {
|
|
325
|
+
**tokens.Tokenizer.KEYWORDS,
|
|
326
|
+
"~": TokenType.RLIKE,
|
|
327
|
+
"@@": TokenType.DAT,
|
|
328
|
+
"@>": TokenType.AT_GT,
|
|
329
|
+
"<@": TokenType.LT_AT,
|
|
330
|
+
"|/": TokenType.PIPE_SLASH,
|
|
331
|
+
"||/": TokenType.DPIPE_SLASH,
|
|
332
|
+
"BEGIN": TokenType.COMMAND,
|
|
333
|
+
"BEGIN TRANSACTION": TokenType.BEGIN,
|
|
334
|
+
"BIGSERIAL": TokenType.BIGSERIAL,
|
|
335
|
+
"CONSTRAINT TRIGGER": TokenType.COMMAND,
|
|
336
|
+
"CSTRING": TokenType.PSEUDO_TYPE,
|
|
337
|
+
"DECLARE": TokenType.COMMAND,
|
|
338
|
+
"DO": TokenType.COMMAND,
|
|
339
|
+
"EXEC": TokenType.COMMAND,
|
|
340
|
+
"HSTORE": TokenType.HSTORE,
|
|
341
|
+
"INT8": TokenType.BIGINT,
|
|
342
|
+
"MONEY": TokenType.MONEY,
|
|
343
|
+
"NAME": TokenType.NAME,
|
|
344
|
+
"OID": TokenType.OBJECT_IDENTIFIER,
|
|
345
|
+
"ONLY": TokenType.ONLY,
|
|
346
|
+
"OPERATOR": TokenType.OPERATOR,
|
|
347
|
+
"REFRESH": TokenType.COMMAND,
|
|
348
|
+
"REINDEX": TokenType.COMMAND,
|
|
349
|
+
"RESET": TokenType.COMMAND,
|
|
350
|
+
"REVOKE": TokenType.COMMAND,
|
|
351
|
+
"SERIAL": TokenType.SERIAL,
|
|
352
|
+
"SMALLSERIAL": TokenType.SMALLSERIAL,
|
|
353
|
+
"TEMP": TokenType.TEMPORARY,
|
|
354
|
+
"REGCLASS": TokenType.OBJECT_IDENTIFIER,
|
|
355
|
+
"REGCOLLATION": TokenType.OBJECT_IDENTIFIER,
|
|
356
|
+
"REGCONFIG": TokenType.OBJECT_IDENTIFIER,
|
|
357
|
+
"REGDICTIONARY": TokenType.OBJECT_IDENTIFIER,
|
|
358
|
+
"REGNAMESPACE": TokenType.OBJECT_IDENTIFIER,
|
|
359
|
+
"REGOPER": TokenType.OBJECT_IDENTIFIER,
|
|
360
|
+
"REGOPERATOR": TokenType.OBJECT_IDENTIFIER,
|
|
361
|
+
"REGPROC": TokenType.OBJECT_IDENTIFIER,
|
|
362
|
+
"REGPROCEDURE": TokenType.OBJECT_IDENTIFIER,
|
|
363
|
+
"REGROLE": TokenType.OBJECT_IDENTIFIER,
|
|
364
|
+
"REGTYPE": TokenType.OBJECT_IDENTIFIER,
|
|
365
|
+
"FLOAT": TokenType.DOUBLE,
|
|
366
|
+
}
|
|
367
|
+
KEYWORDS.pop("/*+")
|
|
368
|
+
KEYWORDS.pop("DIV")
|
|
369
|
+
|
|
370
|
+
SINGLE_TOKENS = {
|
|
371
|
+
**tokens.Tokenizer.SINGLE_TOKENS,
|
|
372
|
+
"$": TokenType.HEREDOC_STRING,
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
VAR_SINGLE_TOKENS = {"$"}
|
|
376
|
+
|
|
377
|
+
class Parser(parser.Parser):
|
|
378
|
+
PROPERTY_PARSERS = {
|
|
379
|
+
**parser.Parser.PROPERTY_PARSERS,
|
|
380
|
+
"SET": lambda self: self.expression(exp.SetConfigProperty, this=self._parse_set()),
|
|
381
|
+
}
|
|
382
|
+
PROPERTY_PARSERS.pop("INPUT")
|
|
383
|
+
|
|
384
|
+
FUNCTIONS = {
|
|
385
|
+
**parser.Parser.FUNCTIONS,
|
|
386
|
+
"ASCII": exp.Unicode.from_arg_list,
|
|
387
|
+
"DATE_TRUNC": build_timestamp_trunc,
|
|
388
|
+
"DIV": lambda args: exp.cast(
|
|
389
|
+
binary_from_function(exp.IntDiv)(args), exp.DataType.Type.DECIMAL
|
|
390
|
+
),
|
|
391
|
+
"GENERATE_SERIES": _build_generate_series,
|
|
392
|
+
"JSON_EXTRACT_PATH": build_json_extract_path(exp.JSONExtract),
|
|
393
|
+
"JSON_EXTRACT_PATH_TEXT": build_json_extract_path(exp.JSONExtractScalar),
|
|
394
|
+
"LENGTH": lambda args: exp.Length(this=seq_get(args, 0), encoding=seq_get(args, 1)),
|
|
395
|
+
"MAKE_TIME": exp.TimeFromParts.from_arg_list,
|
|
396
|
+
"MAKE_TIMESTAMP": exp.TimestampFromParts.from_arg_list,
|
|
397
|
+
"NOW": exp.CurrentTimestamp.from_arg_list,
|
|
398
|
+
"REGEXP_REPLACE": _build_regexp_replace,
|
|
399
|
+
"TO_CHAR": build_formatted_time(exp.TimeToStr, "postgres"),
|
|
400
|
+
"TO_DATE": build_formatted_time(exp.StrToDate, "postgres"),
|
|
401
|
+
"TO_TIMESTAMP": _build_to_timestamp,
|
|
402
|
+
"UNNEST": exp.Explode.from_arg_list,
|
|
403
|
+
"SHA256": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(256)),
|
|
404
|
+
"SHA384": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(384)),
|
|
405
|
+
"SHA512": lambda args: exp.SHA2(this=seq_get(args, 0), length=exp.Literal.number(512)),
|
|
406
|
+
"LEVENSHTEIN_LESS_EQUAL": _build_levenshtein_less_equal,
|
|
407
|
+
"JSON_OBJECT_AGG": lambda args: exp.JSONObjectAgg(expressions=args),
|
|
408
|
+
"JSONB_OBJECT_AGG": exp.JSONBObjectAgg.from_arg_list,
|
|
409
|
+
}
|
|
410
|
+
|
|
411
|
+
NO_PAREN_FUNCTIONS = {
|
|
412
|
+
**parser.Parser.NO_PAREN_FUNCTIONS,
|
|
413
|
+
TokenType.CURRENT_SCHEMA: exp.CurrentSchema,
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
FUNCTION_PARSERS = {
|
|
417
|
+
**parser.Parser.FUNCTION_PARSERS,
|
|
418
|
+
"DATE_PART": lambda self: self._parse_date_part(),
|
|
419
|
+
"JSONB_EXISTS": lambda self: self._parse_jsonb_exists(),
|
|
420
|
+
}
|
|
421
|
+
|
|
422
|
+
BITWISE = {
|
|
423
|
+
**parser.Parser.BITWISE,
|
|
424
|
+
TokenType.HASH: exp.BitwiseXor,
|
|
425
|
+
}
|
|
426
|
+
|
|
427
|
+
EXPONENT = {
|
|
428
|
+
TokenType.CARET: exp.Pow,
|
|
429
|
+
}
|
|
430
|
+
|
|
431
|
+
RANGE_PARSERS = {
|
|
432
|
+
**parser.Parser.RANGE_PARSERS,
|
|
433
|
+
TokenType.DAMP: binary_range_parser(exp.ArrayOverlaps),
|
|
434
|
+
TokenType.DAT: lambda self, this: self.expression(
|
|
435
|
+
exp.MatchAgainst, this=self._parse_bitwise(), expressions=[this]
|
|
436
|
+
),
|
|
437
|
+
TokenType.OPERATOR: lambda self, this: self._parse_operator(this),
|
|
438
|
+
}
|
|
439
|
+
|
|
440
|
+
STATEMENT_PARSERS = {
|
|
441
|
+
**parser.Parser.STATEMENT_PARSERS,
|
|
442
|
+
TokenType.END: lambda self: self._parse_commit_or_rollback(),
|
|
443
|
+
}
|
|
444
|
+
|
|
445
|
+
JSON_ARROWS_REQUIRE_JSON_TYPE = True
|
|
446
|
+
|
|
447
|
+
COLUMN_OPERATORS = {
|
|
448
|
+
**parser.Parser.COLUMN_OPERATORS,
|
|
449
|
+
TokenType.ARROW: lambda self, this, path: build_json_extract_path(
|
|
450
|
+
exp.JSONExtract, arrow_req_json_type=self.JSON_ARROWS_REQUIRE_JSON_TYPE
|
|
451
|
+
)([this, path]),
|
|
452
|
+
TokenType.DARROW: lambda self, this, path: build_json_extract_path(
|
|
453
|
+
exp.JSONExtractScalar, arrow_req_json_type=self.JSON_ARROWS_REQUIRE_JSON_TYPE
|
|
454
|
+
)([this, path]),
|
|
455
|
+
}
|
|
456
|
+
|
|
457
|
+
def _parse_operator(self, this: t.Optional[exp.Expression]) -> t.Optional[exp.Expression]:
|
|
458
|
+
while True:
|
|
459
|
+
if not self._match(TokenType.L_PAREN):
|
|
460
|
+
break
|
|
461
|
+
|
|
462
|
+
op = ""
|
|
463
|
+
while self._curr and not self._match(TokenType.R_PAREN):
|
|
464
|
+
op += self._curr.text
|
|
465
|
+
self._advance()
|
|
466
|
+
|
|
467
|
+
this = self.expression(
|
|
468
|
+
exp.Operator,
|
|
469
|
+
comments=self._prev_comments,
|
|
470
|
+
this=this,
|
|
471
|
+
operator=op,
|
|
472
|
+
expression=self._parse_bitwise(),
|
|
473
|
+
)
|
|
474
|
+
|
|
475
|
+
if not self._match(TokenType.OPERATOR):
|
|
476
|
+
break
|
|
477
|
+
|
|
478
|
+
return this
|
|
479
|
+
|
|
480
|
+
def _parse_date_part(self) -> exp.Expression:
|
|
481
|
+
part = self._parse_type()
|
|
482
|
+
self._match(TokenType.COMMA)
|
|
483
|
+
value = self._parse_bitwise()
|
|
484
|
+
|
|
485
|
+
if part and isinstance(part, (exp.Column, exp.Literal)):
|
|
486
|
+
part = exp.var(part.name)
|
|
487
|
+
|
|
488
|
+
return self.expression(exp.Extract, this=part, expression=value)
|
|
489
|
+
|
|
490
|
+
def _parse_unique_key(self) -> t.Optional[exp.Expression]:
|
|
491
|
+
return None
|
|
492
|
+
|
|
493
|
+
def _parse_jsonb_exists(self) -> exp.JSONBExists:
|
|
494
|
+
return self.expression(
|
|
495
|
+
exp.JSONBExists,
|
|
496
|
+
this=self._parse_bitwise(),
|
|
497
|
+
path=self._match(TokenType.COMMA)
|
|
498
|
+
and self.dialect.to_json_path(self._parse_bitwise()),
|
|
499
|
+
)
|
|
500
|
+
|
|
501
|
+
def _parse_generated_as_identity(
|
|
502
|
+
self,
|
|
503
|
+
) -> (
|
|
504
|
+
exp.GeneratedAsIdentityColumnConstraint
|
|
505
|
+
| exp.ComputedColumnConstraint
|
|
506
|
+
| exp.GeneratedAsRowColumnConstraint
|
|
507
|
+
):
|
|
508
|
+
this = super()._parse_generated_as_identity()
|
|
509
|
+
|
|
510
|
+
if self._match_text_seq("STORED"):
|
|
511
|
+
this = self.expression(exp.ComputedColumnConstraint, this=this.expression)
|
|
512
|
+
|
|
513
|
+
return this
|
|
514
|
+
|
|
515
|
+
def _parse_user_defined_type(
|
|
516
|
+
self, identifier: exp.Identifier
|
|
517
|
+
) -> t.Optional[exp.Expression]:
|
|
518
|
+
udt_type: exp.Identifier | exp.Dot = identifier
|
|
519
|
+
|
|
520
|
+
while self._match(TokenType.DOT):
|
|
521
|
+
part = self._parse_id_var()
|
|
522
|
+
if part:
|
|
523
|
+
udt_type = exp.Dot(this=udt_type, expression=part)
|
|
524
|
+
|
|
525
|
+
return exp.DataType.build(udt_type, udt=True)
|
|
526
|
+
|
|
527
|
+
class Generator(generator.Generator):
|
|
528
|
+
SINGLE_STRING_INTERVAL = True
|
|
529
|
+
RENAME_TABLE_WITH_DB = False
|
|
530
|
+
LOCKING_READS_SUPPORTED = True
|
|
531
|
+
JOIN_HINTS = False
|
|
532
|
+
TABLE_HINTS = False
|
|
533
|
+
QUERY_HINTS = False
|
|
534
|
+
NVL2_SUPPORTED = False
|
|
535
|
+
PARAMETER_TOKEN = "$"
|
|
536
|
+
TABLESAMPLE_SIZE_IS_ROWS = False
|
|
537
|
+
TABLESAMPLE_SEED_KEYWORD = "REPEATABLE"
|
|
538
|
+
SUPPORTS_SELECT_INTO = True
|
|
539
|
+
JSON_TYPE_REQUIRED_FOR_EXTRACTION = True
|
|
540
|
+
SUPPORTS_UNLOGGED_TABLES = True
|
|
541
|
+
LIKE_PROPERTY_INSIDE_SCHEMA = True
|
|
542
|
+
MULTI_ARG_DISTINCT = False
|
|
543
|
+
CAN_IMPLEMENT_ARRAY_ANY = True
|
|
544
|
+
SUPPORTS_WINDOW_EXCLUDE = True
|
|
545
|
+
COPY_HAS_INTO_KEYWORD = False
|
|
546
|
+
ARRAY_CONCAT_IS_VAR_LEN = False
|
|
547
|
+
SUPPORTS_MEDIAN = False
|
|
548
|
+
ARRAY_SIZE_DIM_REQUIRED = True
|
|
549
|
+
|
|
550
|
+
SUPPORTED_JSON_PATH_PARTS = {
|
|
551
|
+
exp.JSONPathKey,
|
|
552
|
+
exp.JSONPathRoot,
|
|
553
|
+
exp.JSONPathSubscript,
|
|
554
|
+
}
|
|
555
|
+
|
|
556
|
+
TYPE_MAPPING = {
|
|
557
|
+
**generator.Generator.TYPE_MAPPING,
|
|
558
|
+
exp.DataType.Type.TINYINT: "SMALLINT",
|
|
559
|
+
exp.DataType.Type.FLOAT: "REAL",
|
|
560
|
+
exp.DataType.Type.DOUBLE: "DOUBLE PRECISION",
|
|
561
|
+
exp.DataType.Type.BINARY: "BYTEA",
|
|
562
|
+
exp.DataType.Type.VARBINARY: "BYTEA",
|
|
563
|
+
exp.DataType.Type.ROWVERSION: "BYTEA",
|
|
564
|
+
exp.DataType.Type.DATETIME: "TIMESTAMP",
|
|
565
|
+
exp.DataType.Type.TIMESTAMPNTZ: "TIMESTAMP",
|
|
566
|
+
exp.DataType.Type.BLOB: "BYTEA",
|
|
567
|
+
}
|
|
568
|
+
|
|
569
|
+
TRANSFORMS = {
|
|
570
|
+
**generator.Generator.TRANSFORMS,
|
|
571
|
+
exp.AnyValue: _versioned_anyvalue_sql,
|
|
572
|
+
exp.ArrayConcat: lambda self, e: self.arrayconcat_sql(e, name="ARRAY_CAT"),
|
|
573
|
+
exp.ArrayFilter: filter_array_using_unnest,
|
|
574
|
+
exp.BitwiseXor: lambda self, e: self.binary(e, "#"),
|
|
575
|
+
exp.ColumnDef: transforms.preprocess([_auto_increment_to_serial, _serial_to_generated]),
|
|
576
|
+
exp.CurrentDate: no_paren_current_date_sql,
|
|
577
|
+
exp.CurrentTimestamp: lambda *_: "CURRENT_TIMESTAMP",
|
|
578
|
+
exp.CurrentUser: lambda *_: "CURRENT_USER",
|
|
579
|
+
exp.DateAdd: _date_add_sql("+"),
|
|
580
|
+
exp.DateDiff: _date_diff_sql,
|
|
581
|
+
exp.DateStrToDate: datestrtodate_sql,
|
|
582
|
+
exp.DateSub: _date_add_sql("-"),
|
|
583
|
+
exp.Explode: rename_func("UNNEST"),
|
|
584
|
+
exp.ExplodingGenerateSeries: rename_func("GENERATE_SERIES"),
|
|
585
|
+
exp.GroupConcat: lambda self, e: groupconcat_sql(
|
|
586
|
+
self, e, func_name="STRING_AGG", within_group=False
|
|
587
|
+
),
|
|
588
|
+
exp.IntDiv: rename_func("DIV"),
|
|
589
|
+
exp.JSONExtract: _json_extract_sql("JSON_EXTRACT_PATH", "->"),
|
|
590
|
+
exp.JSONExtractScalar: _json_extract_sql("JSON_EXTRACT_PATH_TEXT", "->>"),
|
|
591
|
+
exp.JSONBExtract: lambda self, e: self.binary(e, "#>"),
|
|
592
|
+
exp.JSONBExtractScalar: lambda self, e: self.binary(e, "#>>"),
|
|
593
|
+
exp.JSONBContains: lambda self, e: self.binary(e, "?"),
|
|
594
|
+
exp.ParseJSON: lambda self, e: self.sql(exp.cast(e.this, exp.DataType.Type.JSON)),
|
|
595
|
+
exp.JSONPathKey: json_path_key_only_name,
|
|
596
|
+
exp.JSONPathRoot: lambda *_: "",
|
|
597
|
+
exp.JSONPathSubscript: lambda self, e: self.json_path_part(e.this),
|
|
598
|
+
exp.LastDay: no_last_day_sql,
|
|
599
|
+
exp.LogicalOr: rename_func("BOOL_OR"),
|
|
600
|
+
exp.LogicalAnd: rename_func("BOOL_AND"),
|
|
601
|
+
exp.Max: max_or_greatest,
|
|
602
|
+
exp.MapFromEntries: no_map_from_entries_sql,
|
|
603
|
+
exp.Min: min_or_least,
|
|
604
|
+
exp.Merge: merge_without_target_sql,
|
|
605
|
+
exp.PartitionedByProperty: lambda self, e: f"PARTITION BY {self.sql(e, 'this')}",
|
|
606
|
+
exp.PercentileCont: transforms.preprocess(
|
|
607
|
+
[transforms.add_within_group_for_percentiles]
|
|
608
|
+
),
|
|
609
|
+
exp.PercentileDisc: transforms.preprocess(
|
|
610
|
+
[transforms.add_within_group_for_percentiles]
|
|
611
|
+
),
|
|
612
|
+
exp.Pivot: no_pivot_sql,
|
|
613
|
+
exp.Rand: rename_func("RANDOM"),
|
|
614
|
+
exp.RegexpLike: lambda self, e: self.binary(e, "~"),
|
|
615
|
+
exp.RegexpILike: lambda self, e: self.binary(e, "~*"),
|
|
616
|
+
exp.Select: transforms.preprocess(
|
|
617
|
+
[
|
|
618
|
+
transforms.eliminate_semi_and_anti_joins,
|
|
619
|
+
transforms.eliminate_qualify,
|
|
620
|
+
]
|
|
621
|
+
),
|
|
622
|
+
exp.SHA2: sha256_sql,
|
|
623
|
+
exp.StrPosition: lambda self, e: strposition_sql(self, e, func_name="POSITION"),
|
|
624
|
+
exp.StrToDate: lambda self, e: self.func("TO_DATE", e.this, self.format_time(e)),
|
|
625
|
+
exp.StrToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this, self.format_time(e)),
|
|
626
|
+
exp.StructExtract: struct_extract_sql,
|
|
627
|
+
exp.Substring: _substring_sql,
|
|
628
|
+
exp.TimeFromParts: rename_func("MAKE_TIME"),
|
|
629
|
+
exp.TimestampFromParts: rename_func("MAKE_TIMESTAMP"),
|
|
630
|
+
exp.TimestampTrunc: timestamptrunc_sql(zone=True),
|
|
631
|
+
exp.TimeStrToTime: timestrtotime_sql,
|
|
632
|
+
exp.TimeToStr: lambda self, e: self.func("TO_CHAR", e.this, self.format_time(e)),
|
|
633
|
+
exp.ToChar: lambda self, e: self.function_fallback_sql(e),
|
|
634
|
+
exp.Trim: trim_sql,
|
|
635
|
+
exp.TryCast: no_trycast_sql,
|
|
636
|
+
exp.TsOrDsAdd: _date_add_sql("+"),
|
|
637
|
+
exp.TsOrDsDiff: _date_diff_sql,
|
|
638
|
+
exp.UnixToTime: lambda self, e: self.func("TO_TIMESTAMP", e.this),
|
|
639
|
+
exp.Uuid: lambda *_: "GEN_RANDOM_UUID()",
|
|
640
|
+
exp.TimeToUnix: lambda self, e: self.func(
|
|
641
|
+
"DATE_PART", exp.Literal.string("epoch"), e.this
|
|
642
|
+
),
|
|
643
|
+
exp.VariancePop: rename_func("VAR_POP"),
|
|
644
|
+
exp.Variance: rename_func("VAR_SAMP"),
|
|
645
|
+
exp.Xor: bool_xor_sql,
|
|
646
|
+
exp.Unicode: rename_func("ASCII"),
|
|
647
|
+
exp.UnixToTime: _unix_to_time_sql,
|
|
648
|
+
exp.Levenshtein: _levenshtein_sql,
|
|
649
|
+
exp.JSONObjectAgg: rename_func("JSON_OBJECT_AGG"),
|
|
650
|
+
exp.JSONBObjectAgg: rename_func("JSONB_OBJECT_AGG"),
|
|
651
|
+
exp.CountIf: count_if_to_sum,
|
|
652
|
+
}
|
|
653
|
+
|
|
654
|
+
TRANSFORMS.pop(exp.CommentColumnConstraint)
|
|
655
|
+
|
|
656
|
+
PROPERTIES_LOCATION = {
|
|
657
|
+
**generator.Generator.PROPERTIES_LOCATION,
|
|
658
|
+
exp.PartitionedByProperty: exp.Properties.Location.POST_SCHEMA,
|
|
659
|
+
exp.TransientProperty: exp.Properties.Location.UNSUPPORTED,
|
|
660
|
+
exp.VolatileProperty: exp.Properties.Location.UNSUPPORTED,
|
|
661
|
+
}
|
|
662
|
+
|
|
663
|
+
def schemacommentproperty_sql(self, expression: exp.SchemaCommentProperty) -> str:
|
|
664
|
+
self.unsupported("Table comments are not supported in the CREATE statement")
|
|
665
|
+
return ""
|
|
666
|
+
|
|
667
|
+
def commentcolumnconstraint_sql(self, expression: exp.CommentColumnConstraint) -> str:
|
|
668
|
+
self.unsupported("Column comments are not supported in the CREATE statement")
|
|
669
|
+
return ""
|
|
670
|
+
|
|
671
|
+
def unnest_sql(self, expression: exp.Unnest) -> str:
|
|
672
|
+
if len(expression.expressions) == 1:
|
|
673
|
+
arg = expression.expressions[0]
|
|
674
|
+
if isinstance(arg, exp.GenerateDateArray):
|
|
675
|
+
generate_series: exp.Expression = exp.GenerateSeries(**arg.args)
|
|
676
|
+
if isinstance(expression.parent, (exp.From, exp.Join)):
|
|
677
|
+
generate_series = (
|
|
678
|
+
exp.select("value::date")
|
|
679
|
+
.from_(exp.Table(this=generate_series).as_("_t", table=["value"]))
|
|
680
|
+
.subquery(expression.args.get("alias") or "_unnested_generate_series")
|
|
681
|
+
)
|
|
682
|
+
return self.sql(generate_series)
|
|
683
|
+
|
|
684
|
+
from sqlglot.optimizer.annotate_types import annotate_types
|
|
685
|
+
|
|
686
|
+
this = annotate_types(arg, dialect=self.dialect)
|
|
687
|
+
if this.is_type("array<json>"):
|
|
688
|
+
while isinstance(this, exp.Cast):
|
|
689
|
+
this = this.this
|
|
690
|
+
|
|
691
|
+
arg_as_json = self.sql(exp.cast(this, exp.DataType.Type.JSON))
|
|
692
|
+
alias = self.sql(expression, "alias")
|
|
693
|
+
alias = f" AS {alias}" if alias else ""
|
|
694
|
+
|
|
695
|
+
if expression.args.get("offset"):
|
|
696
|
+
self.unsupported("Unsupported JSON_ARRAY_ELEMENTS with offset")
|
|
697
|
+
|
|
698
|
+
return f"JSON_ARRAY_ELEMENTS({arg_as_json}){alias}"
|
|
699
|
+
|
|
700
|
+
return super().unnest_sql(expression)
|
|
701
|
+
|
|
702
|
+
def bracket_sql(self, expression: exp.Bracket) -> str:
|
|
703
|
+
"""Forms like ARRAY[1, 2, 3][3] aren't allowed; we need to wrap the ARRAY."""
|
|
704
|
+
if isinstance(expression.this, exp.Array):
|
|
705
|
+
expression.set("this", exp.paren(expression.this, copy=False))
|
|
706
|
+
|
|
707
|
+
return super().bracket_sql(expression)
|
|
708
|
+
|
|
709
|
+
def matchagainst_sql(self, expression: exp.MatchAgainst) -> str:
|
|
710
|
+
this = self.sql(expression, "this")
|
|
711
|
+
expressions = [f"{self.sql(e)} @@ {this}" for e in expression.expressions]
|
|
712
|
+
sql = " OR ".join(expressions)
|
|
713
|
+
return f"({sql})" if len(expressions) > 1 else sql
|
|
714
|
+
|
|
715
|
+
def alterset_sql(self, expression: exp.AlterSet) -> str:
|
|
716
|
+
exprs = self.expressions(expression, flat=True)
|
|
717
|
+
exprs = f"({exprs})" if exprs else ""
|
|
718
|
+
|
|
719
|
+
access_method = self.sql(expression, "access_method")
|
|
720
|
+
access_method = f"ACCESS METHOD {access_method}" if access_method else ""
|
|
721
|
+
tablespace = self.sql(expression, "tablespace")
|
|
722
|
+
tablespace = f"TABLESPACE {tablespace}" if tablespace else ""
|
|
723
|
+
option = self.sql(expression, "option")
|
|
724
|
+
|
|
725
|
+
return f"SET {exprs}{access_method}{tablespace}{option}"
|
|
726
|
+
|
|
727
|
+
def datatype_sql(self, expression: exp.DataType) -> str:
|
|
728
|
+
if expression.is_type(exp.DataType.Type.ARRAY):
|
|
729
|
+
if expression.expressions:
|
|
730
|
+
values = self.expressions(expression, key="values", flat=True)
|
|
731
|
+
return f"{self.expressions(expression, flat=True)}[{values}]"
|
|
732
|
+
return "ARRAY"
|
|
733
|
+
|
|
734
|
+
if (
|
|
735
|
+
expression.is_type(exp.DataType.Type.DOUBLE, exp.DataType.Type.FLOAT)
|
|
736
|
+
and expression.expressions
|
|
737
|
+
):
|
|
738
|
+
# Postgres doesn't support precision for REAL and DOUBLE PRECISION types
|
|
739
|
+
return f"FLOAT({self.expressions(expression, flat=True)})"
|
|
740
|
+
|
|
741
|
+
return super().datatype_sql(expression)
|
|
742
|
+
|
|
743
|
+
def cast_sql(self, expression: exp.Cast, safe_prefix: t.Optional[str] = None) -> str:
|
|
744
|
+
this = expression.this
|
|
745
|
+
|
|
746
|
+
# Postgres casts DIV() to decimal for transpilation but when roundtripping it's superfluous
|
|
747
|
+
if isinstance(this, exp.IntDiv) and expression.to == exp.DataType.build("decimal"):
|
|
748
|
+
return self.sql(this)
|
|
749
|
+
|
|
750
|
+
return super().cast_sql(expression, safe_prefix=safe_prefix)
|
|
751
|
+
|
|
752
|
+
def array_sql(self, expression: exp.Array) -> str:
|
|
753
|
+
exprs = expression.expressions
|
|
754
|
+
func_name = self.normalize_func("ARRAY")
|
|
755
|
+
|
|
756
|
+
if isinstance(seq_get(exprs, 0), exp.Select):
|
|
757
|
+
return f"{func_name}({self.sql(exprs[0])})"
|
|
758
|
+
|
|
759
|
+
return f"{func_name}{inline_array_sql(self, expression)}"
|
|
760
|
+
|
|
761
|
+
def computedcolumnconstraint_sql(self, expression: exp.ComputedColumnConstraint) -> str:
|
|
762
|
+
return f"GENERATED ALWAYS AS ({self.sql(expression, 'this')}) STORED"
|
|
763
|
+
|
|
764
|
+
def isascii_sql(self, expression: exp.IsAscii) -> str:
|
|
765
|
+
return f"({self.sql(expression.this)} ~ '^[[:ascii:]]*$')"
|
|
766
|
+
|
|
767
|
+
@unsupported_args("this")
|
|
768
|
+
def currentschema_sql(self, expression: exp.CurrentSchema) -> str:
|
|
769
|
+
return "CURRENT_SCHEMA"
|
|
770
|
+
|
|
771
|
+
def interval_sql(self, expression: exp.Interval) -> str:
|
|
772
|
+
unit = expression.text("unit").lower()
|
|
773
|
+
|
|
774
|
+
if unit.startswith("quarter") and isinstance(expression.this, exp.Literal):
|
|
775
|
+
expression.this.replace(exp.Literal.number(int(expression.this.to_py()) * 3))
|
|
776
|
+
expression.args["unit"].replace(exp.var("MONTH"))
|
|
777
|
+
|
|
778
|
+
return super().interval_sql(expression)
|