altimate-code 0.5.2 → 0.5.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +12 -0
- package/bin/altimate +6 -0
- package/bin/altimate-code +6 -0
- package/dbt-tools/bin/altimate-dbt +2 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/altimate/__init__.py +0 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/altimate/fetch_schema.py +35 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/altimate/utils.py +353 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/altimate/validate_sql.py +114 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/__init__.py +178 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/__main__.py +96 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/_typing.py +17 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/__init__.py +3 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/__init__.py +18 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/_typing.py +18 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/column.py +332 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/dataframe.py +866 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/functions.py +1267 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/group.py +59 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/normalize.py +78 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/operations.py +53 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/readwriter.py +108 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/session.py +190 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/transforms.py +9 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/types.py +212 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/util.py +32 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/window.py +134 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/__init__.py +118 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/athena.py +166 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/bigquery.py +1331 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/clickhouse.py +1393 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/databricks.py +131 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/dialect.py +1915 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/doris.py +561 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/drill.py +157 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/druid.py +20 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/duckdb.py +1159 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/dune.py +16 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/hive.py +787 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/materialize.py +94 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/mysql.py +1324 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/oracle.py +378 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/postgres.py +778 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/presto.py +788 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/prql.py +203 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/redshift.py +448 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/risingwave.py +78 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/snowflake.py +1464 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/spark.py +202 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/spark2.py +349 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/sqlite.py +320 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/starrocks.py +343 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/tableau.py +61 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/teradata.py +356 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/trino.py +115 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/tsql.py +1403 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/diff.py +456 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/errors.py +93 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/executor/__init__.py +95 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/executor/context.py +101 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/executor/env.py +246 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/executor/python.py +460 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/executor/table.py +155 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/expressions.py +8870 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/generator.py +4993 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/helper.py +582 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/jsonpath.py +227 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/lineage.py +423 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/__init__.py +11 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/annotate_types.py +589 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/canonicalize.py +222 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/eliminate_ctes.py +43 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/eliminate_joins.py +181 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/eliminate_subqueries.py +189 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/isolate_table_selects.py +50 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/merge_subqueries.py +415 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/normalize.py +200 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/normalize_identifiers.py +64 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/optimize_joins.py +91 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/optimizer.py +94 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/pushdown_predicates.py +222 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/pushdown_projections.py +172 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/qualify.py +104 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/qualify_columns.py +1024 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/qualify_tables.py +155 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/scope.py +904 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/simplify.py +1587 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/unnest_subqueries.py +302 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/parser.py +8501 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/planner.py +463 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/schema.py +588 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/serde.py +68 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/time.py +687 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/tokens.py +1520 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/transforms.py +1020 -0
- package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/trie.py +81 -0
- package/dbt-tools/dist/altimate_python_packages/dbt_core_integration.py +825 -0
- package/dbt-tools/dist/altimate_python_packages/dbt_utils.py +157 -0
- package/dbt-tools/dist/index.js +23859 -0
- package/package.json +13 -13
- package/postinstall.mjs +42 -0
- package/skills/altimate-setup/SKILL.md +31 -0
|
@@ -0,0 +1,460 @@
|
|
|
1
|
+
import ast
|
|
2
|
+
import collections
|
|
3
|
+
import itertools
|
|
4
|
+
import math
|
|
5
|
+
|
|
6
|
+
from sqlglot import exp, generator, planner, tokens
|
|
7
|
+
from sqlglot.dialects.dialect import Dialect, inline_array_sql
|
|
8
|
+
from sqlglot.errors import ExecuteError
|
|
9
|
+
from sqlglot.executor.context import Context
|
|
10
|
+
from sqlglot.executor.env import ENV
|
|
11
|
+
from sqlglot.executor.table import RowReader, Table
|
|
12
|
+
from sqlglot.helper import csv_reader, subclasses
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class PythonExecutor:
|
|
16
|
+
def __init__(self, env=None, tables=None):
|
|
17
|
+
self.generator = Python().generator(identify=True, comments=False)
|
|
18
|
+
self.env = {**ENV, **(env or {})}
|
|
19
|
+
self.tables = tables or {}
|
|
20
|
+
|
|
21
|
+
def execute(self, plan):
|
|
22
|
+
finished = set()
|
|
23
|
+
queue = set(plan.leaves)
|
|
24
|
+
contexts = {}
|
|
25
|
+
|
|
26
|
+
while queue:
|
|
27
|
+
node = queue.pop()
|
|
28
|
+
try:
|
|
29
|
+
context = self.context(
|
|
30
|
+
{
|
|
31
|
+
name: table
|
|
32
|
+
for dep in node.dependencies
|
|
33
|
+
for name, table in contexts[dep].tables.items()
|
|
34
|
+
}
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
if isinstance(node, planner.Scan):
|
|
38
|
+
contexts[node] = self.scan(node, context)
|
|
39
|
+
elif isinstance(node, planner.Aggregate):
|
|
40
|
+
contexts[node] = self.aggregate(node, context)
|
|
41
|
+
elif isinstance(node, planner.Join):
|
|
42
|
+
contexts[node] = self.join(node, context)
|
|
43
|
+
elif isinstance(node, planner.Sort):
|
|
44
|
+
contexts[node] = self.sort(node, context)
|
|
45
|
+
elif isinstance(node, planner.SetOperation):
|
|
46
|
+
contexts[node] = self.set_operation(node, context)
|
|
47
|
+
else:
|
|
48
|
+
raise NotImplementedError
|
|
49
|
+
|
|
50
|
+
finished.add(node)
|
|
51
|
+
|
|
52
|
+
for dep in node.dependents:
|
|
53
|
+
if all(d in contexts for d in dep.dependencies):
|
|
54
|
+
queue.add(dep)
|
|
55
|
+
|
|
56
|
+
for dep in node.dependencies:
|
|
57
|
+
if all(d in finished for d in dep.dependents):
|
|
58
|
+
contexts.pop(dep)
|
|
59
|
+
except Exception as e:
|
|
60
|
+
raise ExecuteError(f"Step '{node.id}' failed: {e}") from e
|
|
61
|
+
|
|
62
|
+
root = plan.root
|
|
63
|
+
return contexts[root].tables[root.name]
|
|
64
|
+
|
|
65
|
+
def generate(self, expression):
|
|
66
|
+
"""Convert a SQL expression into literal Python code and compile it into bytecode."""
|
|
67
|
+
if not expression:
|
|
68
|
+
return None
|
|
69
|
+
|
|
70
|
+
sql = self.generator.generate(expression)
|
|
71
|
+
return compile(sql, sql, "eval", optimize=2)
|
|
72
|
+
|
|
73
|
+
def generate_tuple(self, expressions):
|
|
74
|
+
"""Convert an array of SQL expressions into tuple of Python byte code."""
|
|
75
|
+
if not expressions:
|
|
76
|
+
return tuple()
|
|
77
|
+
return tuple(self.generate(expression) for expression in expressions)
|
|
78
|
+
|
|
79
|
+
def context(self, tables):
|
|
80
|
+
return Context(tables, env=self.env)
|
|
81
|
+
|
|
82
|
+
def table(self, expressions):
|
|
83
|
+
return Table(
|
|
84
|
+
expression.alias_or_name if isinstance(expression, exp.Expression) else expression
|
|
85
|
+
for expression in expressions
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
def scan(self, step, context):
|
|
89
|
+
source = step.source
|
|
90
|
+
|
|
91
|
+
if source and isinstance(source, exp.Expression):
|
|
92
|
+
source = source.name or source.alias
|
|
93
|
+
|
|
94
|
+
if source is None:
|
|
95
|
+
context, table_iter = self.static()
|
|
96
|
+
elif source in context:
|
|
97
|
+
if not step.projections and not step.condition:
|
|
98
|
+
return self.context({step.name: context.tables[source]})
|
|
99
|
+
table_iter = context.table_iter(source)
|
|
100
|
+
elif isinstance(step.source, exp.Table) and isinstance(step.source.this, exp.ReadCSV):
|
|
101
|
+
table_iter = self.scan_csv(step)
|
|
102
|
+
context = next(table_iter)
|
|
103
|
+
else:
|
|
104
|
+
context, table_iter = self.scan_table(step)
|
|
105
|
+
|
|
106
|
+
return self.context({step.name: self._project_and_filter(context, step, table_iter)})
|
|
107
|
+
|
|
108
|
+
def _project_and_filter(self, context, step, table_iter):
|
|
109
|
+
sink = self.table(step.projections if step.projections else context.columns)
|
|
110
|
+
condition = self.generate(step.condition)
|
|
111
|
+
projections = self.generate_tuple(step.projections)
|
|
112
|
+
|
|
113
|
+
for reader in table_iter:
|
|
114
|
+
if len(sink) >= step.limit:
|
|
115
|
+
break
|
|
116
|
+
|
|
117
|
+
if condition and not context.eval(condition):
|
|
118
|
+
continue
|
|
119
|
+
|
|
120
|
+
if projections:
|
|
121
|
+
sink.append(context.eval_tuple(projections))
|
|
122
|
+
else:
|
|
123
|
+
sink.append(reader.row)
|
|
124
|
+
|
|
125
|
+
return sink
|
|
126
|
+
|
|
127
|
+
def static(self):
|
|
128
|
+
return self.context({}), [RowReader(())]
|
|
129
|
+
|
|
130
|
+
def scan_table(self, step):
|
|
131
|
+
table = self.tables.find(step.source)
|
|
132
|
+
context = self.context({step.source.alias_or_name: table})
|
|
133
|
+
return context, iter(table)
|
|
134
|
+
|
|
135
|
+
def scan_csv(self, step):
|
|
136
|
+
alias = step.source.alias
|
|
137
|
+
source = step.source.this
|
|
138
|
+
|
|
139
|
+
with csv_reader(source) as reader:
|
|
140
|
+
columns = next(reader)
|
|
141
|
+
table = Table(columns)
|
|
142
|
+
context = self.context({alias: table})
|
|
143
|
+
yield context
|
|
144
|
+
types = []
|
|
145
|
+
for row in reader:
|
|
146
|
+
if not types:
|
|
147
|
+
for v in row:
|
|
148
|
+
try:
|
|
149
|
+
types.append(type(ast.literal_eval(v)))
|
|
150
|
+
except (ValueError, SyntaxError):
|
|
151
|
+
types.append(str)
|
|
152
|
+
|
|
153
|
+
# We can't cast empty values ('') to non-string types, so we convert them to None instead
|
|
154
|
+
context.set_row(
|
|
155
|
+
tuple(None if (t is not str and v == "") else t(v) for t, v in zip(types, row))
|
|
156
|
+
)
|
|
157
|
+
yield context.table.reader
|
|
158
|
+
|
|
159
|
+
def join(self, step, context):
|
|
160
|
+
source = step.source_name
|
|
161
|
+
|
|
162
|
+
source_table = context.tables[source]
|
|
163
|
+
source_context = self.context({source: source_table})
|
|
164
|
+
column_ranges = {source: range(0, len(source_table.columns))}
|
|
165
|
+
|
|
166
|
+
for name, join in step.joins.items():
|
|
167
|
+
table = context.tables[name]
|
|
168
|
+
start = max(r.stop for r in column_ranges.values())
|
|
169
|
+
column_ranges[name] = range(start, len(table.columns) + start)
|
|
170
|
+
join_context = self.context({name: table})
|
|
171
|
+
|
|
172
|
+
if join.get("source_key"):
|
|
173
|
+
table = self.hash_join(join, source_context, join_context)
|
|
174
|
+
else:
|
|
175
|
+
table = self.nested_loop_join(join, source_context, join_context)
|
|
176
|
+
|
|
177
|
+
source_context = self.context(
|
|
178
|
+
{
|
|
179
|
+
name: Table(table.columns, table.rows, column_range)
|
|
180
|
+
for name, column_range in column_ranges.items()
|
|
181
|
+
}
|
|
182
|
+
)
|
|
183
|
+
condition = self.generate(join["condition"])
|
|
184
|
+
if condition:
|
|
185
|
+
source_context.filter(condition)
|
|
186
|
+
|
|
187
|
+
if not step.condition and not step.projections:
|
|
188
|
+
return source_context
|
|
189
|
+
|
|
190
|
+
sink = self._project_and_filter(
|
|
191
|
+
source_context,
|
|
192
|
+
step,
|
|
193
|
+
(reader for reader, _ in iter(source_context)),
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
if step.projections:
|
|
197
|
+
return self.context({step.name: sink})
|
|
198
|
+
else:
|
|
199
|
+
return self.context(
|
|
200
|
+
{
|
|
201
|
+
name: Table(table.columns, sink.rows, table.column_range)
|
|
202
|
+
for name, table in source_context.tables.items()
|
|
203
|
+
}
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
def nested_loop_join(self, _join, source_context, join_context):
|
|
207
|
+
table = Table(source_context.columns + join_context.columns)
|
|
208
|
+
|
|
209
|
+
for reader_a, _ in source_context:
|
|
210
|
+
for reader_b, _ in join_context:
|
|
211
|
+
table.append(reader_a.row + reader_b.row)
|
|
212
|
+
|
|
213
|
+
return table
|
|
214
|
+
|
|
215
|
+
def hash_join(self, join, source_context, join_context):
|
|
216
|
+
source_key = self.generate_tuple(join["source_key"])
|
|
217
|
+
join_key = self.generate_tuple(join["join_key"])
|
|
218
|
+
left = join.get("side") == "LEFT"
|
|
219
|
+
right = join.get("side") == "RIGHT"
|
|
220
|
+
|
|
221
|
+
results = collections.defaultdict(lambda: ([], []))
|
|
222
|
+
|
|
223
|
+
for reader, ctx in source_context:
|
|
224
|
+
results[ctx.eval_tuple(source_key)][0].append(reader.row)
|
|
225
|
+
for reader, ctx in join_context:
|
|
226
|
+
results[ctx.eval_tuple(join_key)][1].append(reader.row)
|
|
227
|
+
|
|
228
|
+
table = Table(source_context.columns + join_context.columns)
|
|
229
|
+
nulls = [(None,) * len(join_context.columns if left else source_context.columns)]
|
|
230
|
+
|
|
231
|
+
for a_group, b_group in results.values():
|
|
232
|
+
if left:
|
|
233
|
+
b_group = b_group or nulls
|
|
234
|
+
elif right:
|
|
235
|
+
a_group = a_group or nulls
|
|
236
|
+
|
|
237
|
+
for a_row, b_row in itertools.product(a_group, b_group):
|
|
238
|
+
table.append(a_row + b_row)
|
|
239
|
+
|
|
240
|
+
return table
|
|
241
|
+
|
|
242
|
+
def aggregate(self, step, context):
|
|
243
|
+
group_by = self.generate_tuple(step.group.values())
|
|
244
|
+
aggregations = self.generate_tuple(step.aggregations)
|
|
245
|
+
operands = self.generate_tuple(step.operands)
|
|
246
|
+
|
|
247
|
+
if operands:
|
|
248
|
+
operand_table = Table(self.table(step.operands).columns)
|
|
249
|
+
|
|
250
|
+
for reader, ctx in context:
|
|
251
|
+
operand_table.append(ctx.eval_tuple(operands))
|
|
252
|
+
|
|
253
|
+
for i, (a, b) in enumerate(zip(context.table.rows, operand_table.rows)):
|
|
254
|
+
context.table.rows[i] = a + b
|
|
255
|
+
|
|
256
|
+
width = len(context.columns)
|
|
257
|
+
context.add_columns(*operand_table.columns)
|
|
258
|
+
|
|
259
|
+
operand_table = Table(
|
|
260
|
+
context.columns,
|
|
261
|
+
context.table.rows,
|
|
262
|
+
range(width, width + len(operand_table.columns)),
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
context = self.context(
|
|
266
|
+
{
|
|
267
|
+
None: operand_table,
|
|
268
|
+
**context.tables,
|
|
269
|
+
}
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
context.sort(group_by)
|
|
273
|
+
|
|
274
|
+
group = None
|
|
275
|
+
start = 0
|
|
276
|
+
end = 1
|
|
277
|
+
length = len(context.table)
|
|
278
|
+
table = self.table(list(step.group) + step.aggregations)
|
|
279
|
+
|
|
280
|
+
def add_row():
|
|
281
|
+
table.append(group + context.eval_tuple(aggregations))
|
|
282
|
+
|
|
283
|
+
if length:
|
|
284
|
+
for i in range(length):
|
|
285
|
+
context.set_index(i)
|
|
286
|
+
key = context.eval_tuple(group_by)
|
|
287
|
+
group = key if group is None else group
|
|
288
|
+
end += 1
|
|
289
|
+
if key != group:
|
|
290
|
+
context.set_range(start, end - 2)
|
|
291
|
+
add_row()
|
|
292
|
+
group = key
|
|
293
|
+
start = end - 2
|
|
294
|
+
if len(table.rows) >= step.limit:
|
|
295
|
+
break
|
|
296
|
+
if i == length - 1:
|
|
297
|
+
context.set_range(start, end - 1)
|
|
298
|
+
add_row()
|
|
299
|
+
elif step.limit > 0 and not group_by:
|
|
300
|
+
context.set_range(0, 0)
|
|
301
|
+
table.append(context.eval_tuple(aggregations))
|
|
302
|
+
|
|
303
|
+
context = self.context({step.name: table, **{name: table for name in context.tables}})
|
|
304
|
+
|
|
305
|
+
if step.projections or step.condition:
|
|
306
|
+
return self.scan(step, context)
|
|
307
|
+
return context
|
|
308
|
+
|
|
309
|
+
def sort(self, step, context):
|
|
310
|
+
projections = self.generate_tuple(step.projections)
|
|
311
|
+
projection_columns = [p.alias_or_name for p in step.projections]
|
|
312
|
+
all_columns = list(context.columns) + projection_columns
|
|
313
|
+
sink = self.table(all_columns)
|
|
314
|
+
for reader, ctx in context:
|
|
315
|
+
sink.append(reader.row + ctx.eval_tuple(projections))
|
|
316
|
+
|
|
317
|
+
sort_ctx = self.context(
|
|
318
|
+
{
|
|
319
|
+
None: sink,
|
|
320
|
+
**{table: sink for table in context.tables},
|
|
321
|
+
}
|
|
322
|
+
)
|
|
323
|
+
sort_ctx.sort(self.generate_tuple(step.key))
|
|
324
|
+
|
|
325
|
+
if not math.isinf(step.limit):
|
|
326
|
+
sort_ctx.table.rows = sort_ctx.table.rows[0 : step.limit]
|
|
327
|
+
|
|
328
|
+
output = Table(
|
|
329
|
+
projection_columns,
|
|
330
|
+
rows=[r[len(context.columns) : len(all_columns)] for r in sort_ctx.table.rows],
|
|
331
|
+
)
|
|
332
|
+
return self.context({step.name: output})
|
|
333
|
+
|
|
334
|
+
def set_operation(self, step, context):
|
|
335
|
+
left = context.tables[step.left]
|
|
336
|
+
right = context.tables[step.right]
|
|
337
|
+
|
|
338
|
+
sink = self.table(left.columns)
|
|
339
|
+
|
|
340
|
+
if issubclass(step.op, exp.Intersect):
|
|
341
|
+
sink.rows = list(set(left.rows).intersection(set(right.rows)))
|
|
342
|
+
elif issubclass(step.op, exp.Except):
|
|
343
|
+
sink.rows = list(set(left.rows).difference(set(right.rows)))
|
|
344
|
+
elif issubclass(step.op, exp.Union) and step.distinct:
|
|
345
|
+
sink.rows = list(set(left.rows).union(set(right.rows)))
|
|
346
|
+
else:
|
|
347
|
+
sink.rows = left.rows + right.rows
|
|
348
|
+
|
|
349
|
+
if not math.isinf(step.limit):
|
|
350
|
+
sink.rows = sink.rows[0 : step.limit]
|
|
351
|
+
|
|
352
|
+
return self.context({step.name: sink})
|
|
353
|
+
|
|
354
|
+
|
|
355
|
+
def _ordered_py(self, expression):
|
|
356
|
+
this = self.sql(expression, "this")
|
|
357
|
+
desc = "True" if expression.args.get("desc") else "False"
|
|
358
|
+
nulls_first = "True" if expression.args.get("nulls_first") else "False"
|
|
359
|
+
return f"ORDERED({this}, {desc}, {nulls_first})"
|
|
360
|
+
|
|
361
|
+
|
|
362
|
+
def _rename(self, e):
|
|
363
|
+
try:
|
|
364
|
+
values = list(e.args.values())
|
|
365
|
+
|
|
366
|
+
if len(values) == 1:
|
|
367
|
+
values = values[0]
|
|
368
|
+
if not isinstance(values, list):
|
|
369
|
+
return self.func(e.key, values)
|
|
370
|
+
return self.func(e.key, *values)
|
|
371
|
+
|
|
372
|
+
if isinstance(e, exp.Func) and e.is_var_len_args:
|
|
373
|
+
args = itertools.chain.from_iterable(x if isinstance(x, list) else [x] for x in values)
|
|
374
|
+
return self.func(e.key, *args)
|
|
375
|
+
|
|
376
|
+
return self.func(e.key, *values)
|
|
377
|
+
except Exception as ex:
|
|
378
|
+
raise Exception(f"Could not rename {repr(e)}") from ex
|
|
379
|
+
|
|
380
|
+
|
|
381
|
+
def _case_sql(self, expression):
|
|
382
|
+
this = self.sql(expression, "this")
|
|
383
|
+
chain = self.sql(expression, "default") or "None"
|
|
384
|
+
|
|
385
|
+
for e in reversed(expression.args["ifs"]):
|
|
386
|
+
true = self.sql(e, "true")
|
|
387
|
+
condition = self.sql(e, "this")
|
|
388
|
+
condition = f"{this} = ({condition})" if this else condition
|
|
389
|
+
chain = f"{true} if {condition} else ({chain})"
|
|
390
|
+
|
|
391
|
+
return chain
|
|
392
|
+
|
|
393
|
+
|
|
394
|
+
def _lambda_sql(self, e: exp.Lambda) -> str:
|
|
395
|
+
names = {e.name.lower() for e in e.expressions}
|
|
396
|
+
|
|
397
|
+
e = e.transform(
|
|
398
|
+
lambda n: (
|
|
399
|
+
exp.var(n.name) if isinstance(n, exp.Identifier) and n.name.lower() in names else n
|
|
400
|
+
)
|
|
401
|
+
).assert_is(exp.Lambda)
|
|
402
|
+
|
|
403
|
+
return f"lambda {self.expressions(e, flat=True)}: {self.sql(e, 'this')}"
|
|
404
|
+
|
|
405
|
+
|
|
406
|
+
def _div_sql(self: generator.Generator, e: exp.Div) -> str:
|
|
407
|
+
denominator = self.sql(e, "expression")
|
|
408
|
+
|
|
409
|
+
if e.args.get("safe"):
|
|
410
|
+
denominator += " or None"
|
|
411
|
+
|
|
412
|
+
sql = f"DIV({self.sql(e, 'this')}, {denominator})"
|
|
413
|
+
|
|
414
|
+
if e.args.get("typed"):
|
|
415
|
+
sql = f"int({sql})"
|
|
416
|
+
|
|
417
|
+
return sql
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
class Python(Dialect):
|
|
421
|
+
class Tokenizer(tokens.Tokenizer):
|
|
422
|
+
STRING_ESCAPES = ["\\"]
|
|
423
|
+
|
|
424
|
+
class Generator(generator.Generator):
|
|
425
|
+
TRANSFORMS = {
|
|
426
|
+
**{klass: _rename for klass in subclasses(exp.__name__, exp.Binary)},
|
|
427
|
+
**{klass: _rename for klass in exp.ALL_FUNCTIONS},
|
|
428
|
+
exp.Case: _case_sql,
|
|
429
|
+
exp.Alias: lambda self, e: self.sql(e.this),
|
|
430
|
+
exp.Array: inline_array_sql,
|
|
431
|
+
exp.And: lambda self, e: self.binary(e, "and"),
|
|
432
|
+
exp.Between: _rename,
|
|
433
|
+
exp.Boolean: lambda self, e: "True" if e.this else "False",
|
|
434
|
+
exp.Cast: lambda self, e: f"CAST({self.sql(e.this)}, exp.DataType.Type.{e.args['to']})",
|
|
435
|
+
exp.Column: lambda self,
|
|
436
|
+
e: f"scope[{self.sql(e, 'table') or None}][{self.sql(e.this)}]",
|
|
437
|
+
exp.Concat: lambda self, e: self.func(
|
|
438
|
+
"SAFECONCAT" if e.args.get("safe") else "CONCAT", *e.expressions
|
|
439
|
+
),
|
|
440
|
+
exp.Distinct: lambda self, e: f"set({self.sql(e, 'this')})",
|
|
441
|
+
exp.Div: _div_sql,
|
|
442
|
+
exp.Extract: lambda self,
|
|
443
|
+
e: f"EXTRACT('{e.name.lower()}', {self.sql(e, 'expression')})",
|
|
444
|
+
exp.In: lambda self,
|
|
445
|
+
e: f"{self.sql(e, 'this')} in {{{self.expressions(e, flat=True)}}}",
|
|
446
|
+
exp.Interval: lambda self, e: f"INTERVAL({self.sql(e.this)}, '{self.sql(e.unit)}')",
|
|
447
|
+
exp.Is: lambda self, e: (
|
|
448
|
+
self.binary(e, "==") if isinstance(e.this, exp.Literal) else self.binary(e, "is")
|
|
449
|
+
),
|
|
450
|
+
exp.JSONExtract: lambda self, e: self.func(e.key, e.this, e.expression, *e.expressions),
|
|
451
|
+
exp.JSONPath: lambda self, e: f"[{','.join(self.sql(p) for p in e.expressions[1:])}]",
|
|
452
|
+
exp.JSONPathKey: lambda self, e: f"'{self.sql(e.this)}'",
|
|
453
|
+
exp.JSONPathSubscript: lambda self, e: f"'{e.this}'",
|
|
454
|
+
exp.Lambda: _lambda_sql,
|
|
455
|
+
exp.Not: lambda self, e: f"not {self.sql(e.this)}",
|
|
456
|
+
exp.Null: lambda *_: "None",
|
|
457
|
+
exp.Or: lambda self, e: self.binary(e, "or"),
|
|
458
|
+
exp.Ordered: _ordered_py,
|
|
459
|
+
exp.Star: lambda *_: "1",
|
|
460
|
+
}
|
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import typing as t
|
|
4
|
+
|
|
5
|
+
from sqlglot.dialects.dialect import DialectType
|
|
6
|
+
from sqlglot.helper import dict_depth
|
|
7
|
+
from sqlglot.schema import AbstractMappingSchema, normalize_name
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class Table:
|
|
11
|
+
def __init__(
|
|
12
|
+
self,
|
|
13
|
+
columns: t.Iterable,
|
|
14
|
+
rows: t.Optional[t.List] = None,
|
|
15
|
+
column_range: t.Optional[range] = None,
|
|
16
|
+
) -> None:
|
|
17
|
+
self.columns = tuple(columns)
|
|
18
|
+
self.column_range = column_range
|
|
19
|
+
self.reader = RowReader(self.columns, self.column_range)
|
|
20
|
+
self.rows = rows or []
|
|
21
|
+
if rows:
|
|
22
|
+
assert len(rows[0]) == len(self.columns)
|
|
23
|
+
self.range_reader = RangeReader(self)
|
|
24
|
+
|
|
25
|
+
def add_columns(self, *columns: str) -> None:
|
|
26
|
+
self.columns += columns
|
|
27
|
+
if self.column_range:
|
|
28
|
+
self.column_range = range(
|
|
29
|
+
self.column_range.start, self.column_range.stop + len(columns)
|
|
30
|
+
)
|
|
31
|
+
self.reader = RowReader(self.columns, self.column_range)
|
|
32
|
+
|
|
33
|
+
def append(self, row: t.List) -> None:
|
|
34
|
+
assert len(row) == len(self.columns)
|
|
35
|
+
self.rows.append(row)
|
|
36
|
+
|
|
37
|
+
def pop(self) -> None:
|
|
38
|
+
self.rows.pop()
|
|
39
|
+
|
|
40
|
+
def to_pylist(self) -> t.List:
|
|
41
|
+
return [dict(zip(self.columns, row)) for row in self.rows]
|
|
42
|
+
|
|
43
|
+
@property
|
|
44
|
+
def width(self) -> int:
|
|
45
|
+
return len(self.columns)
|
|
46
|
+
|
|
47
|
+
def __len__(self) -> int:
|
|
48
|
+
return len(self.rows)
|
|
49
|
+
|
|
50
|
+
def __iter__(self) -> TableIter:
|
|
51
|
+
return TableIter(self)
|
|
52
|
+
|
|
53
|
+
def __getitem__(self, index: int) -> RowReader:
|
|
54
|
+
self.reader.row = self.rows[index]
|
|
55
|
+
return self.reader
|
|
56
|
+
|
|
57
|
+
def __repr__(self) -> str:
|
|
58
|
+
columns = tuple(
|
|
59
|
+
column
|
|
60
|
+
for i, column in enumerate(self.columns)
|
|
61
|
+
if not self.column_range or i in self.column_range
|
|
62
|
+
)
|
|
63
|
+
widths = {column: len(column) for column in columns}
|
|
64
|
+
lines = [" ".join(column for column in columns)]
|
|
65
|
+
|
|
66
|
+
for i, row in enumerate(self):
|
|
67
|
+
if i > 10:
|
|
68
|
+
break
|
|
69
|
+
|
|
70
|
+
lines.append(
|
|
71
|
+
" ".join(
|
|
72
|
+
str(row[column]).rjust(widths[column])[0 : widths[column]] for column in columns
|
|
73
|
+
)
|
|
74
|
+
)
|
|
75
|
+
return "\n".join(lines)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
class TableIter:
|
|
79
|
+
def __init__(self, table: Table) -> None:
|
|
80
|
+
self.table = table
|
|
81
|
+
self.index = -1
|
|
82
|
+
|
|
83
|
+
def __iter__(self) -> TableIter:
|
|
84
|
+
return self
|
|
85
|
+
|
|
86
|
+
def __next__(self) -> RowReader:
|
|
87
|
+
self.index += 1
|
|
88
|
+
if self.index < len(self.table):
|
|
89
|
+
return self.table[self.index]
|
|
90
|
+
raise StopIteration
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
class RangeReader:
|
|
94
|
+
def __init__(self, table: Table) -> None:
|
|
95
|
+
self.table = table
|
|
96
|
+
self.range = range(0)
|
|
97
|
+
|
|
98
|
+
def __len__(self) -> int:
|
|
99
|
+
return len(self.range)
|
|
100
|
+
|
|
101
|
+
def __getitem__(self, column: str):
|
|
102
|
+
return (self.table[i][column] for i in self.range)
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
class RowReader:
|
|
106
|
+
def __init__(self, columns, column_range=None):
|
|
107
|
+
self.columns = {
|
|
108
|
+
column: i for i, column in enumerate(columns) if not column_range or i in column_range
|
|
109
|
+
}
|
|
110
|
+
self.row = None
|
|
111
|
+
|
|
112
|
+
def __getitem__(self, column):
|
|
113
|
+
return self.row[self.columns[column]]
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
class Tables(AbstractMappingSchema):
|
|
117
|
+
pass
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def ensure_tables(d: t.Optional[t.Dict], dialect: DialectType = None) -> Tables:
|
|
121
|
+
return Tables(_ensure_tables(d, dialect=dialect))
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
def _ensure_tables(d: t.Optional[t.Dict], dialect: DialectType = None) -> t.Dict:
|
|
125
|
+
if not d:
|
|
126
|
+
return {}
|
|
127
|
+
|
|
128
|
+
depth = dict_depth(d)
|
|
129
|
+
if depth > 1:
|
|
130
|
+
return {
|
|
131
|
+
normalize_name(k, dialect=dialect, is_table=True).name: _ensure_tables(
|
|
132
|
+
v, dialect=dialect
|
|
133
|
+
)
|
|
134
|
+
for k, v in d.items()
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
result = {}
|
|
138
|
+
for table_name, table in d.items():
|
|
139
|
+
table_name = normalize_name(table_name, dialect=dialect).name
|
|
140
|
+
|
|
141
|
+
if isinstance(table, Table):
|
|
142
|
+
result[table_name] = table
|
|
143
|
+
else:
|
|
144
|
+
table = [
|
|
145
|
+
{
|
|
146
|
+
normalize_name(column_name, dialect=dialect).name: value
|
|
147
|
+
for column_name, value in row.items()
|
|
148
|
+
}
|
|
149
|
+
for row in table
|
|
150
|
+
]
|
|
151
|
+
column_names = tuple(column_name for column_name in table[0]) if table else ()
|
|
152
|
+
rows = [tuple(row[name] for name in column_names) for row in table]
|
|
153
|
+
result[table_name] = Table(columns=column_names, rows=rows)
|
|
154
|
+
|
|
155
|
+
return result
|