altimate-code 0.5.1 → 0.5.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (102) hide show
  1. package/CHANGELOG.md +35 -0
  2. package/README.md +1 -5
  3. package/bin/altimate +6 -0
  4. package/bin/altimate-code +6 -0
  5. package/dbt-tools/bin/altimate-dbt +2 -0
  6. package/dbt-tools/dist/altimate_python_packages/altimate_packages/altimate/__init__.py +0 -0
  7. package/dbt-tools/dist/altimate_python_packages/altimate_packages/altimate/fetch_schema.py +35 -0
  8. package/dbt-tools/dist/altimate_python_packages/altimate_packages/altimate/utils.py +353 -0
  9. package/dbt-tools/dist/altimate_python_packages/altimate_packages/altimate/validate_sql.py +114 -0
  10. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/__init__.py +178 -0
  11. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/__main__.py +96 -0
  12. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/_typing.py +17 -0
  13. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/__init__.py +3 -0
  14. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/__init__.py +18 -0
  15. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/_typing.py +18 -0
  16. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/column.py +332 -0
  17. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/dataframe.py +866 -0
  18. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/functions.py +1267 -0
  19. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/group.py +59 -0
  20. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/normalize.py +78 -0
  21. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/operations.py +53 -0
  22. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/readwriter.py +108 -0
  23. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/session.py +190 -0
  24. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/transforms.py +9 -0
  25. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/types.py +212 -0
  26. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/util.py +32 -0
  27. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dataframe/sql/window.py +134 -0
  28. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/__init__.py +118 -0
  29. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/athena.py +166 -0
  30. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/bigquery.py +1331 -0
  31. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/clickhouse.py +1393 -0
  32. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/databricks.py +131 -0
  33. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/dialect.py +1915 -0
  34. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/doris.py +561 -0
  35. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/drill.py +157 -0
  36. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/druid.py +20 -0
  37. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/duckdb.py +1159 -0
  38. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/dune.py +16 -0
  39. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/hive.py +787 -0
  40. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/materialize.py +94 -0
  41. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/mysql.py +1324 -0
  42. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/oracle.py +378 -0
  43. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/postgres.py +778 -0
  44. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/presto.py +788 -0
  45. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/prql.py +203 -0
  46. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/redshift.py +448 -0
  47. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/risingwave.py +78 -0
  48. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/snowflake.py +1464 -0
  49. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/spark.py +202 -0
  50. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/spark2.py +349 -0
  51. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/sqlite.py +320 -0
  52. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/starrocks.py +343 -0
  53. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/tableau.py +61 -0
  54. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/teradata.py +356 -0
  55. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/trino.py +115 -0
  56. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/dialects/tsql.py +1403 -0
  57. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/diff.py +456 -0
  58. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/errors.py +93 -0
  59. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/executor/__init__.py +95 -0
  60. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/executor/context.py +101 -0
  61. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/executor/env.py +246 -0
  62. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/executor/python.py +460 -0
  63. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/executor/table.py +155 -0
  64. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/expressions.py +8870 -0
  65. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/generator.py +4993 -0
  66. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/helper.py +582 -0
  67. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/jsonpath.py +227 -0
  68. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/lineage.py +423 -0
  69. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/__init__.py +11 -0
  70. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/annotate_types.py +589 -0
  71. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/canonicalize.py +222 -0
  72. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/eliminate_ctes.py +43 -0
  73. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/eliminate_joins.py +181 -0
  74. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/eliminate_subqueries.py +189 -0
  75. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/isolate_table_selects.py +50 -0
  76. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/merge_subqueries.py +415 -0
  77. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/normalize.py +200 -0
  78. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/normalize_identifiers.py +64 -0
  79. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/optimize_joins.py +91 -0
  80. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/optimizer.py +94 -0
  81. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/pushdown_predicates.py +222 -0
  82. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/pushdown_projections.py +172 -0
  83. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/qualify.py +104 -0
  84. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/qualify_columns.py +1024 -0
  85. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/qualify_tables.py +155 -0
  86. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/scope.py +904 -0
  87. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/simplify.py +1587 -0
  88. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/optimizer/unnest_subqueries.py +302 -0
  89. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/parser.py +8501 -0
  90. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/planner.py +463 -0
  91. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/schema.py +588 -0
  92. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/serde.py +68 -0
  93. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/time.py +687 -0
  94. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/tokens.py +1520 -0
  95. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/transforms.py +1020 -0
  96. package/dbt-tools/dist/altimate_python_packages/altimate_packages/sqlglot/trie.py +81 -0
  97. package/dbt-tools/dist/altimate_python_packages/dbt_core_integration.py +825 -0
  98. package/dbt-tools/dist/altimate_python_packages/dbt_utils.py +157 -0
  99. package/dbt-tools/dist/index.js +23859 -0
  100. package/package.json +13 -13
  101. package/postinstall.mjs +42 -0
  102. package/skills/altimate-setup/SKILL.md +31 -0
@@ -0,0 +1,227 @@
1
+ from __future__ import annotations
2
+
3
+ import typing as t
4
+
5
+ import sqlglot.expressions as exp
6
+ from sqlglot.errors import ParseError
7
+ from sqlglot.tokens import Token, Tokenizer, TokenType
8
+
9
+ if t.TYPE_CHECKING:
10
+ from sqlglot._typing import Lit
11
+ from sqlglot.dialects.dialect import DialectType
12
+
13
+
14
+ class JSONPathTokenizer(Tokenizer):
15
+ SINGLE_TOKENS = {
16
+ "(": TokenType.L_PAREN,
17
+ ")": TokenType.R_PAREN,
18
+ "[": TokenType.L_BRACKET,
19
+ "]": TokenType.R_BRACKET,
20
+ ":": TokenType.COLON,
21
+ ",": TokenType.COMMA,
22
+ "-": TokenType.DASH,
23
+ ".": TokenType.DOT,
24
+ "?": TokenType.PLACEHOLDER,
25
+ "@": TokenType.PARAMETER,
26
+ "'": TokenType.QUOTE,
27
+ '"': TokenType.QUOTE,
28
+ "$": TokenType.DOLLAR,
29
+ "*": TokenType.STAR,
30
+ }
31
+
32
+ KEYWORDS = {
33
+ "..": TokenType.DOT,
34
+ }
35
+
36
+ IDENTIFIER_ESCAPES = ["\\"]
37
+ STRING_ESCAPES = ["\\"]
38
+
39
+
40
+ def parse(path: str, dialect: DialectType = None) -> exp.JSONPath:
41
+ """Takes in a JSON path string and parses it into a JSONPath expression."""
42
+ from sqlglot.dialects import Dialect
43
+
44
+ jsonpath_tokenizer = Dialect.get_or_raise(dialect).jsonpath_tokenizer
45
+ tokens = jsonpath_tokenizer.tokenize(path)
46
+ size = len(tokens)
47
+
48
+ i = 0
49
+
50
+ def _curr() -> t.Optional[TokenType]:
51
+ return tokens[i].token_type if i < size else None
52
+
53
+ def _prev() -> Token:
54
+ return tokens[i - 1]
55
+
56
+ def _advance() -> Token:
57
+ nonlocal i
58
+ i += 1
59
+ return _prev()
60
+
61
+ def _error(msg: str) -> str:
62
+ return f"{msg} at index {i}: {path}"
63
+
64
+ @t.overload
65
+ def _match(token_type: TokenType, raise_unmatched: Lit[True] = True) -> Token:
66
+ pass
67
+
68
+ @t.overload
69
+ def _match(token_type: TokenType, raise_unmatched: Lit[False] = False) -> t.Optional[Token]:
70
+ pass
71
+
72
+ def _match(token_type, raise_unmatched=False):
73
+ if _curr() == token_type:
74
+ return _advance()
75
+ if raise_unmatched:
76
+ raise ParseError(_error(f"Expected {token_type}"))
77
+ return None
78
+
79
+ def _parse_literal() -> t.Any:
80
+ token = _match(TokenType.STRING) or _match(TokenType.IDENTIFIER)
81
+ if token:
82
+ return token.text
83
+ if _match(TokenType.STAR):
84
+ return exp.JSONPathWildcard()
85
+ if _match(TokenType.PLACEHOLDER) or _match(TokenType.L_PAREN):
86
+ script = _prev().text == "("
87
+ start = i
88
+
89
+ while True:
90
+ if _match(TokenType.L_BRACKET):
91
+ _parse_bracket() # nested call which we can throw away
92
+ if _curr() in (TokenType.R_BRACKET, None):
93
+ break
94
+ _advance()
95
+
96
+ expr_type = exp.JSONPathScript if script else exp.JSONPathFilter
97
+ return expr_type(this=path[tokens[start].start : tokens[i].end])
98
+
99
+ number = "-" if _match(TokenType.DASH) else ""
100
+
101
+ token = _match(TokenType.NUMBER)
102
+ if token:
103
+ number += token.text
104
+
105
+ if number:
106
+ return int(number)
107
+
108
+ return False
109
+
110
+ def _parse_slice() -> t.Any:
111
+ start = _parse_literal()
112
+ end = _parse_literal() if _match(TokenType.COLON) else None
113
+ step = _parse_literal() if _match(TokenType.COLON) else None
114
+
115
+ if end is None and step is None:
116
+ return start
117
+
118
+ return exp.JSONPathSlice(start=start, end=end, step=step)
119
+
120
+ def _parse_bracket() -> exp.JSONPathPart:
121
+ literal = _parse_slice()
122
+
123
+ if isinstance(literal, str) or literal is not False:
124
+ indexes = [literal]
125
+ while _match(TokenType.COMMA):
126
+ literal = _parse_slice()
127
+
128
+ if literal:
129
+ indexes.append(literal)
130
+
131
+ if len(indexes) == 1:
132
+ if isinstance(literal, str):
133
+ node: exp.JSONPathPart = exp.JSONPathKey(this=indexes[0])
134
+ elif isinstance(literal, exp.JSONPathPart) and isinstance(
135
+ literal, (exp.JSONPathScript, exp.JSONPathFilter)
136
+ ):
137
+ node = exp.JSONPathSelector(this=indexes[0])
138
+ else:
139
+ node = exp.JSONPathSubscript(this=indexes[0])
140
+ else:
141
+ node = exp.JSONPathUnion(expressions=indexes)
142
+ else:
143
+ raise ParseError(_error("Cannot have empty segment"))
144
+
145
+ _match(TokenType.R_BRACKET, raise_unmatched=True)
146
+
147
+ return node
148
+
149
+ def _parse_var_text() -> str:
150
+ """
151
+ Consumes & returns the text for a var. In BigQuery it's valid to have a key with spaces
152
+ in it, e.g JSON_QUERY(..., '$. a b c ') should produce a single JSONPathKey(' a b c ').
153
+ This is done by merging "consecutive" vars until a key separator is found (dot, colon etc)
154
+ or the path string is exhausted.
155
+ """
156
+ prev_index = i - 2
157
+
158
+ while _match(TokenType.VAR):
159
+ pass
160
+
161
+ start = 0 if prev_index < 0 else tokens[prev_index].end + 1
162
+
163
+ if i >= len(tokens):
164
+ # This key is the last token for the path, so it's text is the remaining path
165
+ text = path[start:]
166
+ else:
167
+ text = path[start : tokens[i].start]
168
+
169
+ return text
170
+
171
+ # We canonicalize the JSON path AST so that it always starts with a
172
+ # "root" element, so paths like "field" will be generated as "$.field"
173
+ _match(TokenType.DOLLAR)
174
+ expressions: t.List[exp.JSONPathPart] = [exp.JSONPathRoot()]
175
+
176
+ while _curr():
177
+ if _match(TokenType.DOT) or _match(TokenType.COLON):
178
+ recursive = _prev().text == ".."
179
+
180
+ if _match(TokenType.VAR):
181
+ value: t.Optional[str | exp.JSONPathWildcard] = _parse_var_text()
182
+ elif _match(TokenType.IDENTIFIER):
183
+ value = _prev().text
184
+ elif _match(TokenType.STAR):
185
+ value = exp.JSONPathWildcard()
186
+ else:
187
+ value = None
188
+
189
+ if recursive:
190
+ expressions.append(exp.JSONPathRecursive(this=value))
191
+ elif value:
192
+ expressions.append(exp.JSONPathKey(this=value))
193
+ else:
194
+ raise ParseError(_error("Expected key name or * after DOT"))
195
+ elif _match(TokenType.L_BRACKET):
196
+ expressions.append(_parse_bracket())
197
+ elif _match(TokenType.VAR):
198
+ expressions.append(exp.JSONPathKey(this=_parse_var_text()))
199
+ elif _match(TokenType.IDENTIFIER):
200
+ expressions.append(exp.JSONPathKey(this=_prev().text))
201
+ elif _match(TokenType.STAR):
202
+ expressions.append(exp.JSONPathWildcard())
203
+ else:
204
+ raise ParseError(_error(f"Unexpected {tokens[i].token_type}"))
205
+
206
+ return exp.JSONPath(expressions=expressions)
207
+
208
+
209
+ JSON_PATH_PART_TRANSFORMS: t.Dict[t.Type[exp.Expression], t.Callable[..., str]] = {
210
+ exp.JSONPathFilter: lambda _, e: f"?{e.this}",
211
+ exp.JSONPathKey: lambda self, e: self._jsonpathkey_sql(e),
212
+ exp.JSONPathRecursive: lambda _, e: f"..{e.this or ''}",
213
+ exp.JSONPathRoot: lambda *_: "$",
214
+ exp.JSONPathScript: lambda _, e: f"({e.this}",
215
+ exp.JSONPathSelector: lambda self, e: f"[{self.json_path_part(e.this)}]",
216
+ exp.JSONPathSlice: lambda self, e: ":".join(
217
+ "" if p is False else self.json_path_part(p)
218
+ for p in [e.args.get("start"), e.args.get("end"), e.args.get("step")]
219
+ if p is not None
220
+ ),
221
+ exp.JSONPathSubscript: lambda self, e: self._jsonpathsubscript_sql(e),
222
+ exp.JSONPathUnion: lambda self,
223
+ e: f"[{','.join(self.json_path_part(p) for p in e.expressions)}]",
224
+ exp.JSONPathWildcard: lambda *_: "*",
225
+ }
226
+
227
+ ALL_JSON_PATH_PARTS = set(JSON_PATH_PART_TRANSFORMS)
@@ -0,0 +1,423 @@
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ import logging
5
+ import typing as t
6
+ from dataclasses import dataclass, field
7
+
8
+ from sqlglot import Schema, exp, maybe_parse
9
+ from sqlglot.errors import SqlglotError
10
+ from sqlglot.optimizer import Scope, build_scope, find_all_in_scope, normalize_identifiers, qualify
11
+ from sqlglot.optimizer.scope import ScopeType
12
+
13
+ if t.TYPE_CHECKING:
14
+ from sqlglot.dialects.dialect import DialectType
15
+
16
+ logger = logging.getLogger("sqlglot")
17
+
18
+
19
+ @dataclass(frozen=True)
20
+ class Node:
21
+ name: str
22
+ expression: exp.Expression
23
+ source: exp.Expression
24
+ downstream: t.List[Node] = field(default_factory=list)
25
+ source_name: str = ""
26
+ reference_node_name: str = ""
27
+
28
+ def walk(self) -> t.Iterator[Node]:
29
+ yield self
30
+
31
+ for d in self.downstream:
32
+ yield from d.walk()
33
+
34
+ def to_html(self, dialect: DialectType = None, **opts) -> GraphHTML:
35
+ nodes = {}
36
+ edges = []
37
+
38
+ for node in self.walk():
39
+ if isinstance(node.expression, exp.Table):
40
+ label = f"FROM {node.expression.this}"
41
+ title = f"<pre>SELECT {node.name} FROM {node.expression.this}</pre>"
42
+ group = 1
43
+ else:
44
+ label = node.expression.sql(pretty=True, dialect=dialect)
45
+ source = node.source.transform(
46
+ lambda n: (
47
+ exp.Tag(this=n, prefix="<b>", postfix="</b>") if n is node.expression else n
48
+ ),
49
+ copy=False,
50
+ ).sql(pretty=True, dialect=dialect)
51
+ title = f"<pre>{source}</pre>"
52
+ group = 0
53
+
54
+ node_id = id(node)
55
+
56
+ nodes[node_id] = {
57
+ "id": node_id,
58
+ "label": label,
59
+ "title": title,
60
+ "group": group,
61
+ }
62
+
63
+ for d in node.downstream:
64
+ edges.append({"from": node_id, "to": id(d)})
65
+ return GraphHTML(nodes, edges, **opts)
66
+
67
+
68
+ def lineage(
69
+ column: str | exp.Column,
70
+ sql: str | exp.Expression,
71
+ schema: t.Optional[t.Dict | Schema] = None,
72
+ sources: t.Optional[t.Mapping[str, str | exp.Query]] = None,
73
+ dialect: DialectType = None,
74
+ scope: t.Optional[Scope] = None,
75
+ trim_selects: bool = True,
76
+ **kwargs,
77
+ ) -> Node:
78
+ """Build the lineage graph for a column of a SQL query.
79
+
80
+ Args:
81
+ column: The column to build the lineage for.
82
+ sql: The SQL string or expression.
83
+ schema: The schema of tables.
84
+ sources: A mapping of queries which will be used to continue building lineage.
85
+ dialect: The dialect of input SQL.
86
+ scope: A pre-created scope to use instead.
87
+ trim_selects: Whether or not to clean up selects by trimming to only relevant columns.
88
+ **kwargs: Qualification optimizer kwargs.
89
+
90
+ Returns:
91
+ A lineage node.
92
+ """
93
+
94
+ expression = maybe_parse(sql, dialect=dialect)
95
+ column = normalize_identifiers.normalize_identifiers(column, dialect=dialect).name
96
+
97
+ if sources:
98
+ expression = exp.expand(
99
+ expression,
100
+ {k: t.cast(exp.Query, maybe_parse(v, dialect=dialect)) for k, v in sources.items()},
101
+ dialect=dialect,
102
+ )
103
+
104
+ if not scope:
105
+ expression = qualify.qualify(
106
+ expression,
107
+ dialect=dialect,
108
+ schema=schema,
109
+ **{"validate_qualify_columns": False, "identify": False, **kwargs}, # type: ignore
110
+ )
111
+
112
+ scope = build_scope(expression)
113
+
114
+ if not scope:
115
+ raise SqlglotError("Cannot build lineage, sql must be SELECT")
116
+
117
+ if not any(select.alias_or_name == column for select in scope.expression.selects):
118
+ raise SqlglotError(f"Cannot find column '{column}' in query.")
119
+
120
+ return to_node(column, scope, dialect, trim_selects=trim_selects)
121
+
122
+
123
+ def to_node(
124
+ column: str | int,
125
+ scope: Scope,
126
+ dialect: DialectType,
127
+ scope_name: t.Optional[str] = None,
128
+ upstream: t.Optional[Node] = None,
129
+ source_name: t.Optional[str] = None,
130
+ reference_node_name: t.Optional[str] = None,
131
+ trim_selects: bool = True,
132
+ ) -> Node:
133
+ # Find the specific select clause that is the source of the column we want.
134
+ # This can either be a specific, named select or a generic `*` clause.
135
+ select = (
136
+ scope.expression.selects[column]
137
+ if isinstance(column, int)
138
+ else next(
139
+ (select for select in scope.expression.selects if select.alias_or_name == column),
140
+ exp.Star() if scope.expression.is_star else scope.expression,
141
+ )
142
+ )
143
+
144
+ if isinstance(scope.expression, exp.Subquery):
145
+ for source in scope.subquery_scopes:
146
+ return to_node(
147
+ column,
148
+ scope=source,
149
+ dialect=dialect,
150
+ upstream=upstream,
151
+ source_name=source_name,
152
+ reference_node_name=reference_node_name,
153
+ trim_selects=trim_selects,
154
+ )
155
+ if isinstance(scope.expression, exp.SetOperation):
156
+ name = type(scope.expression).__name__.upper()
157
+ upstream = upstream or Node(name=name, source=scope.expression, expression=select)
158
+
159
+ index = (
160
+ column
161
+ if isinstance(column, int)
162
+ else next(
163
+ (
164
+ i
165
+ for i, select in enumerate(scope.expression.selects)
166
+ if select.alias_or_name == column or select.is_star
167
+ ),
168
+ -1, # mypy will not allow a None here, but a negative index should never be returned
169
+ )
170
+ )
171
+
172
+ if index == -1:
173
+ raise ValueError(f"Could not find {column} in {scope.expression}")
174
+
175
+ for s in scope.union_scopes:
176
+ to_node(
177
+ index,
178
+ scope=s,
179
+ dialect=dialect,
180
+ upstream=upstream,
181
+ source_name=source_name,
182
+ reference_node_name=reference_node_name,
183
+ trim_selects=trim_selects,
184
+ )
185
+
186
+ return upstream
187
+
188
+ if trim_selects and isinstance(scope.expression, exp.Select):
189
+ # For better ergonomics in our node labels, replace the full select with
190
+ # a version that has only the column we care about.
191
+ # "x", SELECT x, y FROM foo
192
+ # => "x", SELECT x FROM foo
193
+ source = t.cast(exp.Expression, scope.expression.select(select, append=False))
194
+ else:
195
+ source = scope.expression
196
+
197
+ # Create the node for this step in the lineage chain, and attach it to the previous one.
198
+ node = Node(
199
+ name=f"{scope_name}.{column}" if scope_name else str(column),
200
+ source=source,
201
+ expression=select,
202
+ source_name=source_name or "",
203
+ reference_node_name=reference_node_name or "",
204
+ )
205
+
206
+ if upstream:
207
+ upstream.downstream.append(node)
208
+
209
+ subquery_scopes = {
210
+ id(subquery_scope.expression): subquery_scope for subquery_scope in scope.subquery_scopes
211
+ }
212
+
213
+ for subquery in find_all_in_scope(select, exp.UNWRAPPED_QUERIES):
214
+ subquery_scope = subquery_scopes.get(id(subquery))
215
+ if not subquery_scope:
216
+ logger.warning(f"Unknown subquery scope: {subquery.sql(dialect=dialect)}")
217
+ continue
218
+
219
+ for name in subquery.named_selects:
220
+ to_node(
221
+ name,
222
+ scope=subquery_scope,
223
+ dialect=dialect,
224
+ upstream=node,
225
+ trim_selects=trim_selects,
226
+ )
227
+
228
+ # if the select is a star add all scope sources as downstreams
229
+ if select.is_star:
230
+ for source in scope.sources.values():
231
+ if isinstance(source, Scope):
232
+ source = source.expression
233
+ node.downstream.append(
234
+ Node(name=select.sql(comments=False), source=source, expression=source)
235
+ )
236
+
237
+ # Find all columns that went into creating this one to list their lineage nodes.
238
+ source_columns = set(find_all_in_scope(select, exp.Column))
239
+
240
+ # If the source is a UDTF find columns used in the UDTF to generate the table
241
+ if isinstance(source, exp.UDTF):
242
+ source_columns |= set(source.find_all(exp.Column))
243
+ derived_tables = [
244
+ source.expression.parent
245
+ for source in scope.sources.values()
246
+ if isinstance(source, Scope) and source.is_derived_table
247
+ ]
248
+ else:
249
+ derived_tables = scope.derived_tables
250
+
251
+ source_names = {
252
+ dt.alias: dt.comments[0].split()[1]
253
+ for dt in derived_tables
254
+ if dt.comments and dt.comments[0].startswith("source: ")
255
+ }
256
+
257
+ pivots = scope.pivots
258
+ pivot = pivots[0] if len(pivots) == 1 and not pivots[0].unpivot else None
259
+ if pivot:
260
+ # For each aggregation function, the pivot creates a new column for each field in category
261
+ # combined with the aggfunc. So the columns parsed have this order: cat_a_value_sum, cat_a,
262
+ # b_value_sum, b. Because of this step wise manner the aggfunc 'sum(value) as value_sum'
263
+ # belongs to the column indices 0, 2, and the aggfunc 'max(price)' without an alias belongs
264
+ # to the column indices 1, 3. Here, only the columns used in the aggregations are of interest
265
+ # in the lineage, so lookup the pivot column name by index and map that with the columns used
266
+ # in the aggregation.
267
+ #
268
+ # Example: PIVOT (SUM(value) AS value_sum, MAX(price)) FOR category IN ('a' AS cat_a, 'b')
269
+ pivot_columns = pivot.args["columns"]
270
+ pivot_aggs_count = len(pivot.expressions)
271
+
272
+ pivot_column_mapping = {}
273
+ for i, agg in enumerate(pivot.expressions):
274
+ agg_cols = list(agg.find_all(exp.Column))
275
+ for col_index in range(i, len(pivot_columns), pivot_aggs_count):
276
+ pivot_column_mapping[pivot_columns[col_index].name] = agg_cols
277
+
278
+ for c in source_columns:
279
+ table = c.table
280
+ source = scope.sources.get(table)
281
+
282
+ if isinstance(source, Scope):
283
+ reference_node_name = None
284
+ if source.scope_type == ScopeType.DERIVED_TABLE and table not in source_names:
285
+ reference_node_name = table
286
+ elif source.scope_type == ScopeType.CTE:
287
+ selected_node, _ = scope.selected_sources.get(table, (None, None))
288
+ reference_node_name = selected_node.name if selected_node else None
289
+
290
+ # The table itself came from a more specific scope. Recurse into that one using the unaliased column name.
291
+ to_node(
292
+ c.name,
293
+ scope=source,
294
+ dialect=dialect,
295
+ scope_name=table,
296
+ upstream=node,
297
+ source_name=source_names.get(table) or source_name,
298
+ reference_node_name=reference_node_name,
299
+ trim_selects=trim_selects,
300
+ )
301
+ elif pivot and pivot.alias_or_name == c.table:
302
+ downstream_columns = []
303
+
304
+ column_name = c.name
305
+ if any(column_name == pivot_column.name for pivot_column in pivot_columns):
306
+ downstream_columns.extend(pivot_column_mapping[column_name])
307
+ else:
308
+ # The column is not in the pivot, so it must be an implicit column of the
309
+ # pivoted source -- adapt column to be from the implicit pivoted source.
310
+ downstream_columns.append(exp.column(c.this, table=pivot.parent.this))
311
+
312
+ for downstream_column in downstream_columns:
313
+ table = downstream_column.table
314
+ source = scope.sources.get(table)
315
+ if isinstance(source, Scope):
316
+ to_node(
317
+ downstream_column.name,
318
+ scope=source,
319
+ scope_name=table,
320
+ dialect=dialect,
321
+ upstream=node,
322
+ source_name=source_names.get(table) or source_name,
323
+ reference_node_name=reference_node_name,
324
+ trim_selects=trim_selects,
325
+ )
326
+ else:
327
+ source = source or exp.Placeholder()
328
+ node.downstream.append(
329
+ Node(
330
+ name=downstream_column.sql(comments=False),
331
+ source=source,
332
+ expression=source,
333
+ )
334
+ )
335
+ else:
336
+ # The source is not a scope and the column is not in any pivot - we've reached the end
337
+ # of the line. At this point, if a source is not found it means this column's lineage
338
+ # is unknown. This can happen if the definition of a source used in a query is not
339
+ # passed into the `sources` map.
340
+ source = source or exp.Placeholder()
341
+ node.downstream.append(
342
+ Node(name=c.sql(comments=False), source=source, expression=source)
343
+ )
344
+
345
+ return node
346
+
347
+
348
+ class GraphHTML:
349
+ """Node to HTML generator using vis.js.
350
+
351
+ https://visjs.github.io/vis-network/docs/network/
352
+ """
353
+
354
+ def __init__(
355
+ self, nodes: t.Dict, edges: t.List, imports: bool = True, options: t.Optional[t.Dict] = None
356
+ ):
357
+ self.imports = imports
358
+
359
+ self.options = {
360
+ "height": "500px",
361
+ "width": "100%",
362
+ "layout": {
363
+ "hierarchical": {
364
+ "enabled": True,
365
+ "nodeSpacing": 200,
366
+ "sortMethod": "directed",
367
+ },
368
+ },
369
+ "interaction": {
370
+ "dragNodes": False,
371
+ "selectable": False,
372
+ },
373
+ "physics": {
374
+ "enabled": False,
375
+ },
376
+ "edges": {
377
+ "arrows": "to",
378
+ },
379
+ "nodes": {
380
+ "font": "20px monaco",
381
+ "shape": "box",
382
+ "widthConstraint": {
383
+ "maximum": 300,
384
+ },
385
+ },
386
+ **(options or {}),
387
+ }
388
+
389
+ self.nodes = nodes
390
+ self.edges = edges
391
+
392
+ def __str__(self):
393
+ nodes = json.dumps(list(self.nodes.values()))
394
+ edges = json.dumps(self.edges)
395
+ options = json.dumps(self.options)
396
+ imports = (
397
+ """<script type="text/javascript" src="https://unpkg.com/vis-data@latest/peer/umd/vis-data.min.js"></script>
398
+ <script type="text/javascript" src="https://unpkg.com/vis-network@latest/peer/umd/vis-network.min.js"></script>
399
+ <link rel="stylesheet" type="text/css" href="https://unpkg.com/vis-network/styles/vis-network.min.css" />"""
400
+ if self.imports
401
+ else ""
402
+ )
403
+
404
+ return f"""<div>
405
+ <div id="sqlglot-lineage"></div>
406
+ {imports}
407
+ <script type="text/javascript">
408
+ var nodes = new vis.DataSet({nodes})
409
+ nodes.forEach(row => row["title"] = new DOMParser().parseFromString(row["title"], "text/html").body.childNodes[0])
410
+
411
+ new vis.Network(
412
+ document.getElementById("sqlglot-lineage"),
413
+ {{
414
+ nodes: nodes,
415
+ edges: new vis.DataSet({edges})
416
+ }},
417
+ {options},
418
+ )
419
+ </script>
420
+ </div>"""
421
+
422
+ def _repr_html_(self) -> str:
423
+ return self.__str__()
@@ -0,0 +1,11 @@
1
+ # ruff: noqa: F401
2
+
3
+ from sqlglot.optimizer.optimizer import RULES as RULES, optimize as optimize
4
+ from sqlglot.optimizer.scope import (
5
+ Scope as Scope,
6
+ build_scope as build_scope,
7
+ find_all_in_scope as find_all_in_scope,
8
+ find_in_scope as find_in_scope,
9
+ traverse_scope as traverse_scope,
10
+ walk_in_scope as walk_in_scope,
11
+ )