dvt-core 1.11.0b4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dvt-core might be problematic. Click here for more details.
- dvt/__init__.py +7 -0
- dvt/_pydantic_shim.py +26 -0
- dvt/adapters/__init__.py +16 -0
- dvt/adapters/multi_adapter_manager.py +268 -0
- dvt/artifacts/__init__.py +0 -0
- dvt/artifacts/exceptions/__init__.py +1 -0
- dvt/artifacts/exceptions/schemas.py +31 -0
- dvt/artifacts/resources/__init__.py +116 -0
- dvt/artifacts/resources/base.py +68 -0
- dvt/artifacts/resources/types.py +93 -0
- dvt/artifacts/resources/v1/analysis.py +10 -0
- dvt/artifacts/resources/v1/catalog.py +23 -0
- dvt/artifacts/resources/v1/components.py +275 -0
- dvt/artifacts/resources/v1/config.py +282 -0
- dvt/artifacts/resources/v1/documentation.py +11 -0
- dvt/artifacts/resources/v1/exposure.py +52 -0
- dvt/artifacts/resources/v1/function.py +53 -0
- dvt/artifacts/resources/v1/generic_test.py +32 -0
- dvt/artifacts/resources/v1/group.py +22 -0
- dvt/artifacts/resources/v1/hook.py +11 -0
- dvt/artifacts/resources/v1/macro.py +30 -0
- dvt/artifacts/resources/v1/metric.py +173 -0
- dvt/artifacts/resources/v1/model.py +146 -0
- dvt/artifacts/resources/v1/owner.py +10 -0
- dvt/artifacts/resources/v1/saved_query.py +112 -0
- dvt/artifacts/resources/v1/seed.py +42 -0
- dvt/artifacts/resources/v1/semantic_layer_components.py +72 -0
- dvt/artifacts/resources/v1/semantic_model.py +315 -0
- dvt/artifacts/resources/v1/singular_test.py +14 -0
- dvt/artifacts/resources/v1/snapshot.py +92 -0
- dvt/artifacts/resources/v1/source_definition.py +85 -0
- dvt/artifacts/resources/v1/sql_operation.py +10 -0
- dvt/artifacts/resources/v1/unit_test_definition.py +78 -0
- dvt/artifacts/schemas/__init__.py +0 -0
- dvt/artifacts/schemas/base.py +191 -0
- dvt/artifacts/schemas/batch_results.py +24 -0
- dvt/artifacts/schemas/catalog/__init__.py +12 -0
- dvt/artifacts/schemas/catalog/v1/__init__.py +0 -0
- dvt/artifacts/schemas/catalog/v1/catalog.py +60 -0
- dvt/artifacts/schemas/freshness/__init__.py +1 -0
- dvt/artifacts/schemas/freshness/v3/__init__.py +0 -0
- dvt/artifacts/schemas/freshness/v3/freshness.py +159 -0
- dvt/artifacts/schemas/manifest/__init__.py +2 -0
- dvt/artifacts/schemas/manifest/v12/__init__.py +0 -0
- dvt/artifacts/schemas/manifest/v12/manifest.py +212 -0
- dvt/artifacts/schemas/results.py +148 -0
- dvt/artifacts/schemas/run/__init__.py +2 -0
- dvt/artifacts/schemas/run/v5/__init__.py +0 -0
- dvt/artifacts/schemas/run/v5/run.py +184 -0
- dvt/artifacts/schemas/upgrades/__init__.py +4 -0
- dvt/artifacts/schemas/upgrades/upgrade_manifest.py +174 -0
- dvt/artifacts/schemas/upgrades/upgrade_manifest_dbt_version.py +2 -0
- dvt/artifacts/utils/validation.py +153 -0
- dvt/cli/__init__.py +1 -0
- dvt/cli/context.py +16 -0
- dvt/cli/exceptions.py +56 -0
- dvt/cli/flags.py +558 -0
- dvt/cli/main.py +971 -0
- dvt/cli/option_types.py +121 -0
- dvt/cli/options.py +79 -0
- dvt/cli/params.py +803 -0
- dvt/cli/requires.py +478 -0
- dvt/cli/resolvers.py +32 -0
- dvt/cli/types.py +40 -0
- dvt/clients/__init__.py +0 -0
- dvt/clients/checked_load.py +82 -0
- dvt/clients/git.py +164 -0
- dvt/clients/jinja.py +206 -0
- dvt/clients/jinja_static.py +245 -0
- dvt/clients/registry.py +192 -0
- dvt/clients/yaml_helper.py +68 -0
- dvt/compilation.py +833 -0
- dvt/compute/__init__.py +26 -0
- dvt/compute/base.py +288 -0
- dvt/compute/engines/__init__.py +13 -0
- dvt/compute/engines/duckdb_engine.py +368 -0
- dvt/compute/engines/spark_engine.py +273 -0
- dvt/compute/query_analyzer.py +212 -0
- dvt/compute/router.py +483 -0
- dvt/config/__init__.py +4 -0
- dvt/config/catalogs.py +95 -0
- dvt/config/compute_config.py +406 -0
- dvt/config/profile.py +411 -0
- dvt/config/profiles_v2.py +464 -0
- dvt/config/project.py +893 -0
- dvt/config/renderer.py +232 -0
- dvt/config/runtime.py +491 -0
- dvt/config/selectors.py +209 -0
- dvt/config/utils.py +78 -0
- dvt/connectors/.gitignore +6 -0
- dvt/connectors/README.md +306 -0
- dvt/connectors/catalog.yml +217 -0
- dvt/connectors/download_connectors.py +300 -0
- dvt/constants.py +29 -0
- dvt/context/__init__.py +0 -0
- dvt/context/base.py +746 -0
- dvt/context/configured.py +136 -0
- dvt/context/context_config.py +350 -0
- dvt/context/docs.py +82 -0
- dvt/context/exceptions_jinja.py +179 -0
- dvt/context/macro_resolver.py +195 -0
- dvt/context/macros.py +171 -0
- dvt/context/manifest.py +73 -0
- dvt/context/providers.py +2198 -0
- dvt/context/query_header.py +14 -0
- dvt/context/secret.py +59 -0
- dvt/context/target.py +74 -0
- dvt/contracts/__init__.py +0 -0
- dvt/contracts/files.py +413 -0
- dvt/contracts/graph/__init__.py +0 -0
- dvt/contracts/graph/manifest.py +1904 -0
- dvt/contracts/graph/metrics.py +98 -0
- dvt/contracts/graph/model_config.py +71 -0
- dvt/contracts/graph/node_args.py +42 -0
- dvt/contracts/graph/nodes.py +1806 -0
- dvt/contracts/graph/semantic_manifest.py +233 -0
- dvt/contracts/graph/unparsed.py +812 -0
- dvt/contracts/project.py +417 -0
- dvt/contracts/results.py +53 -0
- dvt/contracts/selection.py +23 -0
- dvt/contracts/sql.py +86 -0
- dvt/contracts/state.py +69 -0
- dvt/contracts/util.py +46 -0
- dvt/deprecations.py +347 -0
- dvt/deps/__init__.py +0 -0
- dvt/deps/base.py +153 -0
- dvt/deps/git.py +196 -0
- dvt/deps/local.py +80 -0
- dvt/deps/registry.py +131 -0
- dvt/deps/resolver.py +149 -0
- dvt/deps/tarball.py +121 -0
- dvt/docs/source/_ext/dbt_click.py +118 -0
- dvt/docs/source/conf.py +32 -0
- dvt/env_vars.py +64 -0
- dvt/event_time/event_time.py +40 -0
- dvt/event_time/sample_window.py +60 -0
- dvt/events/__init__.py +16 -0
- dvt/events/base_types.py +37 -0
- dvt/events/core_types_pb2.py +2 -0
- dvt/events/logging.py +109 -0
- dvt/events/types.py +2534 -0
- dvt/exceptions.py +1487 -0
- dvt/flags.py +89 -0
- dvt/graph/__init__.py +11 -0
- dvt/graph/cli.py +248 -0
- dvt/graph/graph.py +172 -0
- dvt/graph/queue.py +213 -0
- dvt/graph/selector.py +375 -0
- dvt/graph/selector_methods.py +976 -0
- dvt/graph/selector_spec.py +223 -0
- dvt/graph/thread_pool.py +18 -0
- dvt/hooks.py +21 -0
- dvt/include/README.md +49 -0
- dvt/include/__init__.py +3 -0
- dvt/include/global_project.py +4 -0
- dvt/include/starter_project/.gitignore +4 -0
- dvt/include/starter_project/README.md +15 -0
- dvt/include/starter_project/__init__.py +3 -0
- dvt/include/starter_project/analyses/.gitkeep +0 -0
- dvt/include/starter_project/dvt_project.yml +36 -0
- dvt/include/starter_project/macros/.gitkeep +0 -0
- dvt/include/starter_project/models/example/my_first_dbt_model.sql +27 -0
- dvt/include/starter_project/models/example/my_second_dbt_model.sql +6 -0
- dvt/include/starter_project/models/example/schema.yml +21 -0
- dvt/include/starter_project/seeds/.gitkeep +0 -0
- dvt/include/starter_project/snapshots/.gitkeep +0 -0
- dvt/include/starter_project/tests/.gitkeep +0 -0
- dvt/internal_deprecations.py +27 -0
- dvt/jsonschemas/__init__.py +3 -0
- dvt/jsonschemas/jsonschemas.py +309 -0
- dvt/jsonschemas/project/0.0.110.json +4717 -0
- dvt/jsonschemas/project/0.0.85.json +2015 -0
- dvt/jsonschemas/resources/0.0.110.json +2636 -0
- dvt/jsonschemas/resources/0.0.85.json +2536 -0
- dvt/jsonschemas/resources/latest.json +6773 -0
- dvt/links.py +4 -0
- dvt/materializations/__init__.py +0 -0
- dvt/materializations/incremental/__init__.py +0 -0
- dvt/materializations/incremental/microbatch.py +235 -0
- dvt/mp_context.py +8 -0
- dvt/node_types.py +37 -0
- dvt/parser/__init__.py +23 -0
- dvt/parser/analysis.py +21 -0
- dvt/parser/base.py +549 -0
- dvt/parser/common.py +267 -0
- dvt/parser/docs.py +52 -0
- dvt/parser/fixtures.py +51 -0
- dvt/parser/functions.py +30 -0
- dvt/parser/generic_test.py +100 -0
- dvt/parser/generic_test_builders.py +334 -0
- dvt/parser/hooks.py +119 -0
- dvt/parser/macros.py +137 -0
- dvt/parser/manifest.py +2204 -0
- dvt/parser/models.py +574 -0
- dvt/parser/partial.py +1179 -0
- dvt/parser/read_files.py +445 -0
- dvt/parser/schema_generic_tests.py +423 -0
- dvt/parser/schema_renderer.py +111 -0
- dvt/parser/schema_yaml_readers.py +936 -0
- dvt/parser/schemas.py +1467 -0
- dvt/parser/search.py +149 -0
- dvt/parser/seeds.py +28 -0
- dvt/parser/singular_test.py +20 -0
- dvt/parser/snapshots.py +44 -0
- dvt/parser/sources.py +557 -0
- dvt/parser/sql.py +63 -0
- dvt/parser/unit_tests.py +622 -0
- dvt/plugins/__init__.py +20 -0
- dvt/plugins/contracts.py +10 -0
- dvt/plugins/exceptions.py +2 -0
- dvt/plugins/manager.py +164 -0
- dvt/plugins/manifest.py +21 -0
- dvt/profiler.py +20 -0
- dvt/py.typed +1 -0
- dvt/runners/__init__.py +2 -0
- dvt/runners/exposure_runner.py +7 -0
- dvt/runners/no_op_runner.py +46 -0
- dvt/runners/saved_query_runner.py +7 -0
- dvt/selected_resources.py +8 -0
- dvt/task/__init__.py +0 -0
- dvt/task/base.py +504 -0
- dvt/task/build.py +197 -0
- dvt/task/clean.py +57 -0
- dvt/task/clone.py +162 -0
- dvt/task/compile.py +151 -0
- dvt/task/compute.py +366 -0
- dvt/task/debug.py +650 -0
- dvt/task/deps.py +280 -0
- dvt/task/docs/__init__.py +3 -0
- dvt/task/docs/generate.py +408 -0
- dvt/task/docs/index.html +250 -0
- dvt/task/docs/serve.py +28 -0
- dvt/task/freshness.py +323 -0
- dvt/task/function.py +122 -0
- dvt/task/group_lookup.py +46 -0
- dvt/task/init.py +374 -0
- dvt/task/list.py +237 -0
- dvt/task/printer.py +176 -0
- dvt/task/profiles.py +256 -0
- dvt/task/retry.py +175 -0
- dvt/task/run.py +1146 -0
- dvt/task/run_operation.py +142 -0
- dvt/task/runnable.py +802 -0
- dvt/task/seed.py +104 -0
- dvt/task/show.py +150 -0
- dvt/task/snapshot.py +57 -0
- dvt/task/sql.py +111 -0
- dvt/task/test.py +464 -0
- dvt/tests/fixtures/__init__.py +1 -0
- dvt/tests/fixtures/project.py +620 -0
- dvt/tests/util.py +651 -0
- dvt/tracking.py +529 -0
- dvt/utils/__init__.py +3 -0
- dvt/utils/artifact_upload.py +151 -0
- dvt/utils/utils.py +408 -0
- dvt/version.py +249 -0
- dvt_core-1.11.0b4.dist-info/METADATA +252 -0
- dvt_core-1.11.0b4.dist-info/RECORD +261 -0
- dvt_core-1.11.0b4.dist-info/WHEEL +5 -0
- dvt_core-1.11.0b4.dist-info/entry_points.txt +2 -0
- dvt_core-1.11.0b4.dist-info/top_level.txt +1 -0
dvt/compilation.py
ADDED
|
@@ -0,0 +1,833 @@
|
|
|
1
|
+
import dataclasses
|
|
2
|
+
import json
|
|
3
|
+
import os
|
|
4
|
+
import pickle
|
|
5
|
+
from collections import defaultdict, deque
|
|
6
|
+
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
|
|
7
|
+
|
|
8
|
+
import dvt.tracking
|
|
9
|
+
import networkx as nx # type: ignore
|
|
10
|
+
import sqlparse
|
|
11
|
+
from dvt.clients import jinja
|
|
12
|
+
from dvt.context.providers import (
|
|
13
|
+
generate_runtime_model_context,
|
|
14
|
+
generate_runtime_unit_test_context,
|
|
15
|
+
)
|
|
16
|
+
from dvt.contracts.graph.manifest import Manifest, UniqueID
|
|
17
|
+
from dvt.contracts.graph.nodes import (
|
|
18
|
+
GenericTestNode,
|
|
19
|
+
GraphMemberNode,
|
|
20
|
+
InjectedCTE,
|
|
21
|
+
ManifestNode,
|
|
22
|
+
ManifestSQLNode,
|
|
23
|
+
ModelNode,
|
|
24
|
+
SeedNode,
|
|
25
|
+
UnitTestDefinition,
|
|
26
|
+
UnitTestNode,
|
|
27
|
+
)
|
|
28
|
+
from dvt.events.types import FoundStats, WritingInjectedSQLForNode
|
|
29
|
+
from dvt.exceptions import (
|
|
30
|
+
DbtInternalError,
|
|
31
|
+
DbtRuntimeError,
|
|
32
|
+
ForeignKeyConstraintToSyntaxError,
|
|
33
|
+
GraphDependencyNotFoundError,
|
|
34
|
+
ParsingError,
|
|
35
|
+
)
|
|
36
|
+
from dvt.flags import get_flags
|
|
37
|
+
from dvt.graph import Graph
|
|
38
|
+
from dvt.node_types import ModelLanguage, NodeType
|
|
39
|
+
|
|
40
|
+
from dbt.adapters.factory import get_adapter
|
|
41
|
+
from dbt_common.clients.system import make_directory
|
|
42
|
+
from dbt_common.contracts.constraints import ConstraintType
|
|
43
|
+
from dbt_common.events.contextvars import get_node_info
|
|
44
|
+
from dbt_common.events.format import pluralize
|
|
45
|
+
from dbt_common.events.functions import fire_event
|
|
46
|
+
from dbt_common.events.types import Note
|
|
47
|
+
from dbt_common.invocation import get_invocation_id
|
|
48
|
+
|
|
49
|
+
graph_file_name = "graph.gpickle"
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def print_compile_stats(stats: Dict[NodeType, int]):
|
|
53
|
+
# create tracking event for resource_counts
|
|
54
|
+
if dbt.tracking.active_user is not None:
|
|
55
|
+
resource_counts = {k.pluralize(): v for k, v in stats.items()}
|
|
56
|
+
dbt.tracking.track_resource_counts(resource_counts)
|
|
57
|
+
|
|
58
|
+
# do not include resource types that are not actually defined in the project
|
|
59
|
+
stat_line = ", ".join(
|
|
60
|
+
[pluralize(ct, t).replace("_", " ") for t, ct in stats.items() if ct != 0]
|
|
61
|
+
)
|
|
62
|
+
fire_event(FoundStats(stat_line=stat_line))
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def _node_enabled(node: ManifestNode):
|
|
66
|
+
# Disabled models are already excluded from the manifest
|
|
67
|
+
if node.resource_type == NodeType.Test and not node.config.enabled:
|
|
68
|
+
return False
|
|
69
|
+
else:
|
|
70
|
+
return True
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def _generate_stats(manifest: Manifest) -> Dict[NodeType, int]:
|
|
74
|
+
stats: Dict[NodeType, int] = defaultdict(int)
|
|
75
|
+
for node in manifest.nodes.values():
|
|
76
|
+
if _node_enabled(node):
|
|
77
|
+
stats[node.resource_type] += 1
|
|
78
|
+
|
|
79
|
+
# Disabled nodes don't appear in the following collections, so we don't check.
|
|
80
|
+
stats[NodeType.Source] += len(manifest.sources)
|
|
81
|
+
stats[NodeType.Exposure] += len(manifest.exposures)
|
|
82
|
+
stats[NodeType.Metric] += len(manifest.metrics)
|
|
83
|
+
stats[NodeType.Macro] += len(manifest.macros)
|
|
84
|
+
stats[NodeType.Group] += len(manifest.groups)
|
|
85
|
+
stats[NodeType.SemanticModel] += len(manifest.semantic_models)
|
|
86
|
+
stats[NodeType.SavedQuery] += len(manifest.saved_queries)
|
|
87
|
+
stats[NodeType.Unit] += len(manifest.unit_tests)
|
|
88
|
+
|
|
89
|
+
# TODO: should we be counting dimensions + entities?
|
|
90
|
+
|
|
91
|
+
return stats
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def _add_prepended_cte(prepended_ctes, new_cte):
|
|
95
|
+
for cte in prepended_ctes:
|
|
96
|
+
if cte.id == new_cte.id and new_cte.sql:
|
|
97
|
+
cte.sql = new_cte.sql
|
|
98
|
+
return
|
|
99
|
+
if new_cte.sql:
|
|
100
|
+
prepended_ctes.append(new_cte)
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def _extend_prepended_ctes(prepended_ctes, new_prepended_ctes):
|
|
104
|
+
for new_cte in new_prepended_ctes:
|
|
105
|
+
_add_prepended_cte(prepended_ctes, new_cte)
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
def _get_tests_for_node(manifest: Manifest, unique_id: UniqueID) -> List[UniqueID]:
|
|
109
|
+
"""Get a list of tests that depend on the node with the
|
|
110
|
+
provided unique id"""
|
|
111
|
+
|
|
112
|
+
tests = []
|
|
113
|
+
if unique_id in manifest.child_map:
|
|
114
|
+
for child_unique_id in manifest.child_map[unique_id]:
|
|
115
|
+
if child_unique_id.startswith("test."):
|
|
116
|
+
tests.append(child_unique_id)
|
|
117
|
+
|
|
118
|
+
return tests
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
@dataclasses.dataclass
|
|
122
|
+
class SeenDetails:
|
|
123
|
+
node_id: UniqueID
|
|
124
|
+
visits: int = 0
|
|
125
|
+
ancestors: Set[UniqueID] = dataclasses.field(default_factory=set)
|
|
126
|
+
awaits_tests: Set[Tuple[UniqueID, Tuple[UniqueID, ...]]] = dataclasses.field(
|
|
127
|
+
default_factory=set
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
class Linker:
|
|
132
|
+
def __init__(self, data=None) -> None:
|
|
133
|
+
if data is None:
|
|
134
|
+
data = {}
|
|
135
|
+
self.graph: nx.DiGraph = nx.DiGraph(**data)
|
|
136
|
+
|
|
137
|
+
def edges(self):
|
|
138
|
+
return self.graph.edges()
|
|
139
|
+
|
|
140
|
+
def nodes(self):
|
|
141
|
+
return self.graph.nodes()
|
|
142
|
+
|
|
143
|
+
def find_cycles(self):
|
|
144
|
+
try:
|
|
145
|
+
cycle = nx.find_cycle(self.graph)
|
|
146
|
+
except nx.NetworkXNoCycle:
|
|
147
|
+
return None
|
|
148
|
+
else:
|
|
149
|
+
# cycles is a List[Tuple[str, ...]]
|
|
150
|
+
return " --> ".join(c[0] for c in cycle)
|
|
151
|
+
|
|
152
|
+
def dependency(self, node1, node2):
|
|
153
|
+
"indicate that node1 depends on node2"
|
|
154
|
+
self.graph.add_node(node1)
|
|
155
|
+
self.graph.add_node(node2)
|
|
156
|
+
self.graph.add_edge(node2, node1)
|
|
157
|
+
|
|
158
|
+
def add_node(self, node):
|
|
159
|
+
self.graph.add_node(node)
|
|
160
|
+
|
|
161
|
+
def write_graph(self, outfile: str, manifest: Manifest):
|
|
162
|
+
"""Write the graph to a gpickle file. Before doing so, serialize and
|
|
163
|
+
include all nodes in their corresponding graph entries.
|
|
164
|
+
"""
|
|
165
|
+
out_graph = self.graph.copy()
|
|
166
|
+
for node_id in self.graph:
|
|
167
|
+
data = manifest.expect(node_id).to_dict(omit_none=True)
|
|
168
|
+
out_graph.add_node(node_id, **data)
|
|
169
|
+
with open(outfile, "wb") as outfh:
|
|
170
|
+
pickle.dump(out_graph, outfh, protocol=pickle.HIGHEST_PROTOCOL)
|
|
171
|
+
|
|
172
|
+
def link_node(self, node: GraphMemberNode, manifest: Manifest):
|
|
173
|
+
self.add_node(node.unique_id)
|
|
174
|
+
|
|
175
|
+
for dependency in node.depends_on_nodes:
|
|
176
|
+
if dependency in manifest.nodes:
|
|
177
|
+
self.dependency(node.unique_id, (manifest.nodes[dependency].unique_id))
|
|
178
|
+
elif dependency in manifest.sources:
|
|
179
|
+
self.dependency(node.unique_id, (manifest.sources[dependency].unique_id))
|
|
180
|
+
elif dependency in manifest.metrics:
|
|
181
|
+
self.dependency(node.unique_id, (manifest.metrics[dependency].unique_id))
|
|
182
|
+
elif dependency in manifest.semantic_models:
|
|
183
|
+
self.dependency(node.unique_id, (manifest.semantic_models[dependency].unique_id))
|
|
184
|
+
elif dependency in manifest.functions:
|
|
185
|
+
self.dependency(node.unique_id, (manifest.functions[dependency].unique_id))
|
|
186
|
+
else:
|
|
187
|
+
raise GraphDependencyNotFoundError(node, dependency)
|
|
188
|
+
|
|
189
|
+
def link_graph(self, manifest: Manifest):
|
|
190
|
+
for source in manifest.sources.values():
|
|
191
|
+
self.add_node(source.unique_id)
|
|
192
|
+
for node in manifest.nodes.values():
|
|
193
|
+
self.link_node(node, manifest)
|
|
194
|
+
for semantic_model in manifest.semantic_models.values():
|
|
195
|
+
self.link_node(semantic_model, manifest)
|
|
196
|
+
for exposure in manifest.exposures.values():
|
|
197
|
+
self.link_node(exposure, manifest)
|
|
198
|
+
for function in manifest.functions.values():
|
|
199
|
+
self.link_node(function, manifest)
|
|
200
|
+
for metric in manifest.metrics.values():
|
|
201
|
+
self.link_node(metric, manifest)
|
|
202
|
+
for unit_test in manifest.unit_tests.values():
|
|
203
|
+
self.link_node(unit_test, manifest)
|
|
204
|
+
for saved_query in manifest.saved_queries.values():
|
|
205
|
+
self.link_node(saved_query, manifest)
|
|
206
|
+
|
|
207
|
+
cycle = self.find_cycles()
|
|
208
|
+
|
|
209
|
+
if cycle:
|
|
210
|
+
raise RuntimeError("Found a cycle: {}".format(cycle))
|
|
211
|
+
|
|
212
|
+
def add_test_edges(self, manifest: Manifest) -> None:
|
|
213
|
+
if not get_flags().USE_FAST_TEST_EDGES:
|
|
214
|
+
self.add_test_edges_1(manifest)
|
|
215
|
+
else:
|
|
216
|
+
self.add_test_edges_2(manifest)
|
|
217
|
+
|
|
218
|
+
def add_test_edges_1(self, manifest: Manifest) -> None:
|
|
219
|
+
"""This method adds additional edges to the DAG. For a given non-test
|
|
220
|
+
executable node, add an edge from an upstream test to the given node if
|
|
221
|
+
the set of nodes the test depends on is a subset of the upstream nodes
|
|
222
|
+
for the given node."""
|
|
223
|
+
|
|
224
|
+
# HISTORICAL NOTE: To understand the motivation behind this function,
|
|
225
|
+
# consider a node A with tests and a node B which depends (either directly
|
|
226
|
+
# or indirectly) on A. It would be nice if B were not executed until
|
|
227
|
+
# all of the tests on A are finished. After all, we don't want to
|
|
228
|
+
# propagate bad data. We can enforce that behavior by adding new
|
|
229
|
+
# dependencies (edges) from tests to nodes that should wait on them.
|
|
230
|
+
#
|
|
231
|
+
# This function implements a rough approximation of the behavior just
|
|
232
|
+
# described. In fact, for tests that only depend on a single node, it
|
|
233
|
+
# always works.
|
|
234
|
+
#
|
|
235
|
+
# Things get trickier for tests that depend on multiple nodes. In that
|
|
236
|
+
# case, if we are not careful, we will introduce cycles. That seems to
|
|
237
|
+
# be the reason this function adds dependencies from a downstream node to
|
|
238
|
+
# an upstream test if and only if the downstream node is already a
|
|
239
|
+
# descendant of all the nodes the upstream test depends on. By following
|
|
240
|
+
# that rule, it never makes the node dependent on new upstream nodes other
|
|
241
|
+
# than the tests themselves, and no cycles will be created.
|
|
242
|
+
#
|
|
243
|
+
# One drawback (Drawback 1) of the approach taken in this function is
|
|
244
|
+
# that it could still allow a downstream node to proceed before all
|
|
245
|
+
# testing is done on its ancestors, if it happens to have ancestors that
|
|
246
|
+
# are not also ancestors of a test with multiple dependencies.
|
|
247
|
+
#
|
|
248
|
+
# Another drawback (Drawback 2) is that the approach below adds far more
|
|
249
|
+
# edges than are strictly needed. After all, if we have A -> B -> C,
|
|
250
|
+
# there is no need to add a new edge A -> C. But this function often does.
|
|
251
|
+
#
|
|
252
|
+
# Drawback 2 is resolved in the new add_test_edges_2() implementation
|
|
253
|
+
# below, which is also typically much faster. Drawback 1 has been left in
|
|
254
|
+
# place in order to conservatively retain existing behavior, and so that
|
|
255
|
+
# the new implementation can be verified against this existing
|
|
256
|
+
# implementation by ensuring both resulting graphs have the same transitive
|
|
257
|
+
# reduction.
|
|
258
|
+
|
|
259
|
+
# MOTIVATING IDEA: Given a graph...
|
|
260
|
+
#
|
|
261
|
+
# model1 --> model2 --> model3
|
|
262
|
+
# | |
|
|
263
|
+
# | \/
|
|
264
|
+
# \/ test 2
|
|
265
|
+
# test1
|
|
266
|
+
#
|
|
267
|
+
# ...produce the following...
|
|
268
|
+
#
|
|
269
|
+
# model1 --> model2 --> model3
|
|
270
|
+
# | /\ | /\ /\
|
|
271
|
+
# | | \/ | |
|
|
272
|
+
# \/ | test2 ----| |
|
|
273
|
+
# test1 ----|---------------|
|
|
274
|
+
|
|
275
|
+
for node_id in self.graph:
|
|
276
|
+
# If node is executable (in manifest.nodes) and does _not_
|
|
277
|
+
# represent a test, continue.
|
|
278
|
+
if (
|
|
279
|
+
node_id in manifest.nodes
|
|
280
|
+
and manifest.nodes[node_id].resource_type != NodeType.Test
|
|
281
|
+
):
|
|
282
|
+
# Get *everything* upstream of the node
|
|
283
|
+
all_upstream_nodes = nx.traversal.bfs_tree(self.graph, node_id, reverse=True)
|
|
284
|
+
# Get the set of upstream nodes not including the current node.
|
|
285
|
+
upstream_nodes = set([n for n in all_upstream_nodes if n != node_id])
|
|
286
|
+
|
|
287
|
+
# Get all tests that depend on any upstream nodes.
|
|
288
|
+
upstream_tests = []
|
|
289
|
+
for upstream_node in upstream_nodes:
|
|
290
|
+
# This gets tests with unique_ids starting with "test."
|
|
291
|
+
upstream_tests += _get_tests_for_node(manifest, upstream_node)
|
|
292
|
+
|
|
293
|
+
for upstream_test in upstream_tests:
|
|
294
|
+
# Get the set of all nodes that the test depends on
|
|
295
|
+
# including the upstream_node itself. This is necessary
|
|
296
|
+
# because tests can depend on multiple nodes (ex:
|
|
297
|
+
# relationship tests). Test nodes do not distinguish
|
|
298
|
+
# between what node the test is "testing" and what
|
|
299
|
+
# node(s) it depends on.
|
|
300
|
+
test_depends_on = set(manifest.nodes[upstream_test].depends_on_nodes)
|
|
301
|
+
|
|
302
|
+
# If the set of nodes that an upstream test depends on
|
|
303
|
+
# is a subset of all upstream nodes of the current node,
|
|
304
|
+
# add an edge from the upstream test to the current node.
|
|
305
|
+
if test_depends_on.issubset(upstream_nodes):
|
|
306
|
+
self.graph.add_edge(upstream_test, node_id, edge_type="parent_test")
|
|
307
|
+
|
|
308
|
+
def add_test_edges_2(self, manifest: Manifest):
|
|
309
|
+
graph = self.graph
|
|
310
|
+
new_edges = self._get_test_edges_2(graph, manifest)
|
|
311
|
+
for e in new_edges:
|
|
312
|
+
graph.add_edge(e[0], e[1], edge_type="parent_test")
|
|
313
|
+
|
|
314
|
+
@staticmethod
|
|
315
|
+
def _get_test_edges_2(
|
|
316
|
+
graph: nx.DiGraph, manifest: Manifest
|
|
317
|
+
) -> Iterable[Tuple[UniqueID, UniqueID]]:
|
|
318
|
+
# This function enforces the same execution behavior as add_test_edges,
|
|
319
|
+
# but executes far more quickly and adds far fewer edges. See the
|
|
320
|
+
# HISTORICAL NOTE above.
|
|
321
|
+
#
|
|
322
|
+
# The idea is to first scan for "single-tested" nodes (which have tests
|
|
323
|
+
# that depend only upon on that node) and "multi-tested" nodes (which
|
|
324
|
+
# have tests that depend on multiple nodes). Single-tested nodes are
|
|
325
|
+
# handled quickly and easily.
|
|
326
|
+
#
|
|
327
|
+
# The less common but more complex case of multi-tested nodes is handled
|
|
328
|
+
# by a specialized function.
|
|
329
|
+
|
|
330
|
+
new_edges: List[Tuple[UniqueID, UniqueID]] = []
|
|
331
|
+
|
|
332
|
+
source_nodes: List[UniqueID] = []
|
|
333
|
+
executable_nodes: Set[UniqueID] = set()
|
|
334
|
+
multi_tested_nodes = set()
|
|
335
|
+
# Dictionary mapping nodes with single-dep tests to a list of those tests.
|
|
336
|
+
single_tested_nodes: dict[UniqueID, List[UniqueID]] = defaultdict(list)
|
|
337
|
+
for node_id in graph.nodes:
|
|
338
|
+
manifest_node = manifest.nodes.get(node_id, None)
|
|
339
|
+
if manifest_node is None:
|
|
340
|
+
continue
|
|
341
|
+
|
|
342
|
+
if next(graph.predecessors(node_id), None) is None:
|
|
343
|
+
source_nodes.append(node_id)
|
|
344
|
+
|
|
345
|
+
if manifest_node.resource_type != NodeType.Test:
|
|
346
|
+
executable_nodes.add(node_id)
|
|
347
|
+
else:
|
|
348
|
+
test_deps = manifest_node.depends_on_nodes
|
|
349
|
+
if len(test_deps) == 1:
|
|
350
|
+
single_tested_nodes[test_deps[0]].append(node_id)
|
|
351
|
+
elif len(test_deps) > 1:
|
|
352
|
+
multi_tested_nodes.update(manifest_node.depends_on_nodes)
|
|
353
|
+
|
|
354
|
+
# Now that we have all the necessary information conveniently organized,
|
|
355
|
+
# add new edges for single-tested nodes.
|
|
356
|
+
for node_id, test_ids in single_tested_nodes.items():
|
|
357
|
+
succs = [s for s in graph.successors(node_id) if s in executable_nodes]
|
|
358
|
+
for succ_id in succs:
|
|
359
|
+
for test_id in test_ids:
|
|
360
|
+
new_edges.append((test_id, succ_id))
|
|
361
|
+
|
|
362
|
+
# Get the edges for multi-tested nodes separately, if needed.
|
|
363
|
+
if len(multi_tested_nodes) > 0:
|
|
364
|
+
multi_test_edges = Linker._get_multi_test_edges(
|
|
365
|
+
graph, manifest, source_nodes, executable_nodes, multi_tested_nodes
|
|
366
|
+
)
|
|
367
|
+
new_edges += multi_test_edges
|
|
368
|
+
|
|
369
|
+
return new_edges
|
|
370
|
+
|
|
371
|
+
@staticmethod
|
|
372
|
+
def _get_multi_test_edges(
|
|
373
|
+
graph: nx.DiGraph,
|
|
374
|
+
manifest: Manifest,
|
|
375
|
+
source_nodes: Iterable[UniqueID],
|
|
376
|
+
executable_nodes: Set[UniqueID],
|
|
377
|
+
multi_tested_nodes,
|
|
378
|
+
) -> List[Tuple[UniqueID, UniqueID]]:
|
|
379
|
+
# Works through the graph in a breadth-first style, processing nodes from
|
|
380
|
+
# a ready queue which initially consists of nodes with no ancestors,
|
|
381
|
+
# and adding more nodes to the ready queue after all their ancestors
|
|
382
|
+
# have been processed. All the while, the relevant details of all nodes
|
|
383
|
+
# "seen" by the search so far are maintained in a SeenDetails record,
|
|
384
|
+
# including the ancestor set which tests it is "awaiting" (i.e. tests of
|
|
385
|
+
# its ancestors). The processing step adds test edges when every dependency
|
|
386
|
+
# of an awaited test is an ancestor of a node that is being processed.
|
|
387
|
+
# Downstream nodes are then exempted from awaiting the test.
|
|
388
|
+
#
|
|
389
|
+
# Memory consumption is potentially O(n^2) with n the number of nodes in
|
|
390
|
+
# the graph, since the average number of ancestors and tests being awaited
|
|
391
|
+
# for each of the n nodes could itself be O(n) but we only track ancestors
|
|
392
|
+
# that are multi-tested, which should keep things closer to O(n) in real-
|
|
393
|
+
# world scenarios.
|
|
394
|
+
|
|
395
|
+
new_edges: List[Tuple[UniqueID, UniqueID]] = []
|
|
396
|
+
ready: deque = deque(source_nodes)
|
|
397
|
+
details = {node_id: SeenDetails(node_id) for node_id in source_nodes}
|
|
398
|
+
|
|
399
|
+
while len(ready) > 0:
|
|
400
|
+
curr_details: SeenDetails = details[ready.pop()]
|
|
401
|
+
test_ids = _get_tests_for_node(manifest, curr_details.node_id)
|
|
402
|
+
new_awaits_for_succs = curr_details.awaits_tests.copy()
|
|
403
|
+
for test_id in test_ids:
|
|
404
|
+
deps: List[UniqueID] = sorted(manifest.nodes[test_id].depends_on_nodes)
|
|
405
|
+
if len(deps) > 1:
|
|
406
|
+
# Tests with only one dep were already handled.
|
|
407
|
+
new_awaits_for_succs.add((test_id, tuple(deps)))
|
|
408
|
+
|
|
409
|
+
for succ_id in [
|
|
410
|
+
s for s in graph.successors(curr_details.node_id) if s in executable_nodes
|
|
411
|
+
]:
|
|
412
|
+
suc_details = details.get(succ_id, None)
|
|
413
|
+
if suc_details is None:
|
|
414
|
+
suc_details = SeenDetails(succ_id)
|
|
415
|
+
details[succ_id] = suc_details
|
|
416
|
+
suc_details.visits += 1
|
|
417
|
+
suc_details.awaits_tests.update(new_awaits_for_succs)
|
|
418
|
+
suc_details.ancestors.update(curr_details.ancestors)
|
|
419
|
+
if curr_details.node_id in multi_tested_nodes:
|
|
420
|
+
# Only track ancestry information for the set of nodes
|
|
421
|
+
# we will actually check against later.
|
|
422
|
+
suc_details.ancestors.add(curr_details.node_id)
|
|
423
|
+
|
|
424
|
+
if suc_details.visits == graph.in_degree(succ_id):
|
|
425
|
+
if len(suc_details.awaits_tests) > 0:
|
|
426
|
+
removes = set()
|
|
427
|
+
for awt in suc_details.awaits_tests:
|
|
428
|
+
if not any(True for a in awt[1] if a not in suc_details.ancestors):
|
|
429
|
+
removes.add(awt)
|
|
430
|
+
new_edges.append((awt[0], succ_id))
|
|
431
|
+
|
|
432
|
+
suc_details.awaits_tests.difference_update(removes)
|
|
433
|
+
ready.appendleft(succ_id)
|
|
434
|
+
|
|
435
|
+
# We are now done with the current node and all of its ancestors.
|
|
436
|
+
# Discard its details to save memory.
|
|
437
|
+
del details[curr_details.node_id]
|
|
438
|
+
|
|
439
|
+
return new_edges
|
|
440
|
+
|
|
441
|
+
def get_graph(self, manifest: Manifest) -> Graph:
|
|
442
|
+
self.link_graph(manifest)
|
|
443
|
+
return Graph(self.graph)
|
|
444
|
+
|
|
445
|
+
def get_graph_summary(self, manifest: Manifest) -> Dict[int, Dict[str, Any]]:
|
|
446
|
+
"""Create a smaller summary of the graph, suitable for basic diagnostics
|
|
447
|
+
and performance tuning. The summary includes only the edge structure,
|
|
448
|
+
node types, and node names. Each of the n nodes is assigned an integer
|
|
449
|
+
index 0, 1, 2,..., n-1 for compactness"""
|
|
450
|
+
graph_nodes = dict()
|
|
451
|
+
index_dict = dict()
|
|
452
|
+
for node_index, node_name in enumerate(self.graph):
|
|
453
|
+
index_dict[node_name] = node_index
|
|
454
|
+
data = manifest.expect(node_name).to_dict(omit_none=True)
|
|
455
|
+
graph_nodes[node_index] = {"name": node_name, "type": data["resource_type"]}
|
|
456
|
+
|
|
457
|
+
for node_index, node in graph_nodes.items():
|
|
458
|
+
successors = [index_dict[n] for n in self.graph.successors(node["name"])]
|
|
459
|
+
if successors:
|
|
460
|
+
node["succ"] = [index_dict[n] for n in self.graph.successors(node["name"])]
|
|
461
|
+
|
|
462
|
+
return graph_nodes
|
|
463
|
+
|
|
464
|
+
|
|
465
|
+
class Compiler:
|
|
466
|
+
def __init__(self, config) -> None:
|
|
467
|
+
self.config = config
|
|
468
|
+
|
|
469
|
+
def initialize(self):
|
|
470
|
+
make_directory(self.config.project_target_path)
|
|
471
|
+
|
|
472
|
+
# creates a ModelContext which is converted to
|
|
473
|
+
# a dict for jinja rendering of SQL
|
|
474
|
+
def _create_node_context(
|
|
475
|
+
self,
|
|
476
|
+
node: ManifestSQLNode,
|
|
477
|
+
manifest: Manifest,
|
|
478
|
+
extra_context: Dict[str, Any],
|
|
479
|
+
) -> Dict[str, Any]:
|
|
480
|
+
if isinstance(node, UnitTestNode):
|
|
481
|
+
context = generate_runtime_unit_test_context(node, self.config, manifest)
|
|
482
|
+
else:
|
|
483
|
+
context = generate_runtime_model_context(node, self.config, manifest)
|
|
484
|
+
context.update(extra_context)
|
|
485
|
+
|
|
486
|
+
if isinstance(node, GenericTestNode):
|
|
487
|
+
# for test nodes, add a special keyword args value to the context
|
|
488
|
+
jinja.add_rendered_test_kwargs(context, node)
|
|
489
|
+
|
|
490
|
+
return context
|
|
491
|
+
|
|
492
|
+
def add_ephemeral_prefix(self, name: str):
|
|
493
|
+
adapter = get_adapter(self.config)
|
|
494
|
+
relation_cls = adapter.Relation
|
|
495
|
+
return relation_cls.add_ephemeral_prefix(name)
|
|
496
|
+
|
|
497
|
+
def _recursively_prepend_ctes(
|
|
498
|
+
self,
|
|
499
|
+
model: ManifestSQLNode,
|
|
500
|
+
manifest: Manifest,
|
|
501
|
+
extra_context: Optional[Dict[str, Any]],
|
|
502
|
+
) -> Tuple[ManifestSQLNode, List[InjectedCTE]]:
|
|
503
|
+
"""This method is called by the 'compile_node' method. Starting
|
|
504
|
+
from the node that it is passed in, it will recursively call
|
|
505
|
+
itself using the 'extra_ctes'. The 'ephemeral' models do
|
|
506
|
+
not produce SQL that is executed directly, instead they
|
|
507
|
+
are rolled up into the models that refer to them by
|
|
508
|
+
inserting CTEs into the SQL.
|
|
509
|
+
"""
|
|
510
|
+
if model.compiled_code is None:
|
|
511
|
+
raise DbtRuntimeError("Cannot inject ctes into an uncompiled node", model)
|
|
512
|
+
|
|
513
|
+
# tech debt: safe flag/arg access (#6259)
|
|
514
|
+
if not getattr(self.config.args, "inject_ephemeral_ctes", True):
|
|
515
|
+
return (model, [])
|
|
516
|
+
|
|
517
|
+
# extra_ctes_injected flag says that we've already recursively injected the ctes
|
|
518
|
+
if model.extra_ctes_injected:
|
|
519
|
+
return (model, model.extra_ctes)
|
|
520
|
+
|
|
521
|
+
# Just to make it plain that nothing is actually injected for this case
|
|
522
|
+
if len(model.extra_ctes) == 0:
|
|
523
|
+
# SeedNodes don't have compilation attributes
|
|
524
|
+
if not isinstance(model, SeedNode):
|
|
525
|
+
model.extra_ctes_injected = True
|
|
526
|
+
return (model, [])
|
|
527
|
+
|
|
528
|
+
# This stores the ctes which will all be recursively
|
|
529
|
+
# gathered and then "injected" into the model.
|
|
530
|
+
prepended_ctes: List[InjectedCTE] = []
|
|
531
|
+
|
|
532
|
+
# extra_ctes are added to the model by
|
|
533
|
+
# RuntimeRefResolver.create_relation, which adds an
|
|
534
|
+
# extra_cte for every model relation which is an
|
|
535
|
+
# ephemeral model. InjectedCTEs have a unique_id and sql.
|
|
536
|
+
# extra_ctes start out with sql set to None, and the sql is set in this loop.
|
|
537
|
+
for cte in model.extra_ctes:
|
|
538
|
+
if cte.id not in manifest.nodes:
|
|
539
|
+
raise DbtInternalError(
|
|
540
|
+
f"During compilation, found a cte reference that "
|
|
541
|
+
f"could not be resolved: {cte.id}"
|
|
542
|
+
)
|
|
543
|
+
cte_model = manifest.nodes[cte.id]
|
|
544
|
+
assert not isinstance(cte_model, SeedNode)
|
|
545
|
+
|
|
546
|
+
if not cte_model.is_ephemeral_model:
|
|
547
|
+
raise DbtInternalError(f"{cte.id} is not ephemeral")
|
|
548
|
+
|
|
549
|
+
# This model has already been compiled and extra_ctes_injected, so it's been
|
|
550
|
+
# through here before. We already checked above for extra_ctes_injected, but
|
|
551
|
+
# checking again because updates maybe have happened in another thread.
|
|
552
|
+
if cte_model.compiled is True and cte_model.extra_ctes_injected is True:
|
|
553
|
+
new_prepended_ctes = cte_model.extra_ctes
|
|
554
|
+
|
|
555
|
+
# if the cte_model isn't compiled, i.e. first time here
|
|
556
|
+
else:
|
|
557
|
+
# This is an ephemeral parsed model that we can compile.
|
|
558
|
+
# Render the raw_code and set compiled to True
|
|
559
|
+
cte_model = self._compile_code(cte_model, manifest, extra_context)
|
|
560
|
+
# recursively call this method, sets extra_ctes_injected to True
|
|
561
|
+
cte_model, new_prepended_ctes = self._recursively_prepend_ctes(
|
|
562
|
+
cte_model, manifest, extra_context
|
|
563
|
+
)
|
|
564
|
+
# Write compiled SQL file
|
|
565
|
+
self._write_node(cte_model)
|
|
566
|
+
|
|
567
|
+
_extend_prepended_ctes(prepended_ctes, new_prepended_ctes)
|
|
568
|
+
|
|
569
|
+
new_cte_name = self.add_ephemeral_prefix(cte_model.identifier)
|
|
570
|
+
rendered_sql = cte_model._pre_injected_sql or cte_model.compiled_code
|
|
571
|
+
sql = f" {new_cte_name} as (\n{rendered_sql}\n)"
|
|
572
|
+
|
|
573
|
+
_add_prepended_cte(prepended_ctes, InjectedCTE(id=cte.id, sql=sql))
|
|
574
|
+
|
|
575
|
+
# Check again before updating for multi-threading
|
|
576
|
+
if not model.extra_ctes_injected:
|
|
577
|
+
injected_sql = inject_ctes_into_sql(
|
|
578
|
+
model.compiled_code,
|
|
579
|
+
prepended_ctes,
|
|
580
|
+
)
|
|
581
|
+
model.extra_ctes_injected = True
|
|
582
|
+
model._pre_injected_sql = model.compiled_code
|
|
583
|
+
model.compiled_code = injected_sql
|
|
584
|
+
model.extra_ctes = prepended_ctes
|
|
585
|
+
|
|
586
|
+
# if model.extra_ctes is not set to prepended ctes, something went wrong
|
|
587
|
+
return model, model.extra_ctes
|
|
588
|
+
|
|
589
|
+
# Sets compiled_code and compiled flag in the ManifestSQLNode passed in,
|
|
590
|
+
# creates a "context" dictionary for jinja rendering,
|
|
591
|
+
# and then renders the "compiled_code" using the node, the
|
|
592
|
+
# raw_code and the context.
|
|
593
|
+
def _compile_code(
|
|
594
|
+
self,
|
|
595
|
+
node: ManifestSQLNode,
|
|
596
|
+
manifest: Manifest,
|
|
597
|
+
extra_context: Optional[Dict[str, Any]] = None,
|
|
598
|
+
) -> ManifestSQLNode:
|
|
599
|
+
if extra_context is None:
|
|
600
|
+
extra_context = {}
|
|
601
|
+
|
|
602
|
+
if node.language == ModelLanguage.python and node.resource_type == NodeType.Model:
|
|
603
|
+
context = self._create_node_context(node, manifest, extra_context)
|
|
604
|
+
|
|
605
|
+
postfix = jinja.get_rendered(
|
|
606
|
+
"{{ py_script_postfix(model) }}",
|
|
607
|
+
context,
|
|
608
|
+
node,
|
|
609
|
+
)
|
|
610
|
+
# we should NOT jinja render the python model's 'raw code'
|
|
611
|
+
node.compiled_code = f"{node.raw_code}\n\n{postfix}"
|
|
612
|
+
|
|
613
|
+
else:
|
|
614
|
+
context = self._create_node_context(node, manifest, extra_context)
|
|
615
|
+
node.compiled_code = jinja.get_rendered(
|
|
616
|
+
node.raw_code,
|
|
617
|
+
context,
|
|
618
|
+
node,
|
|
619
|
+
)
|
|
620
|
+
|
|
621
|
+
node.compiled = True
|
|
622
|
+
|
|
623
|
+
# relation_name is set at parse time, except for tests without store_failures,
|
|
624
|
+
# but cli param can turn on store_failures, so we set here.
|
|
625
|
+
if (
|
|
626
|
+
node.resource_type == NodeType.Test
|
|
627
|
+
and node.relation_name is None
|
|
628
|
+
and node.is_relational
|
|
629
|
+
):
|
|
630
|
+
adapter = get_adapter(self.config)
|
|
631
|
+
relation_cls = adapter.Relation
|
|
632
|
+
relation_name = str(relation_cls.create_from(self.config, node))
|
|
633
|
+
node.relation_name = relation_name
|
|
634
|
+
|
|
635
|
+
# Compile 'ref' and 'source' expressions in foreign key constraints
|
|
636
|
+
if isinstance(node, ModelNode):
|
|
637
|
+
for constraint in node.all_constraints:
|
|
638
|
+
if constraint.type == ConstraintType.foreign_key and constraint.to:
|
|
639
|
+
constraint.to = self._compile_relation_for_foreign_key_constraint_to(
|
|
640
|
+
manifest, node, constraint.to
|
|
641
|
+
)
|
|
642
|
+
|
|
643
|
+
return node
|
|
644
|
+
|
|
645
|
+
def _compile_relation_for_foreign_key_constraint_to(
|
|
646
|
+
self, manifest: Manifest, node: ManifestSQLNode, to_expression: str
|
|
647
|
+
) -> str:
|
|
648
|
+
try:
|
|
649
|
+
foreign_key_node = manifest.find_node_from_ref_or_source(to_expression)
|
|
650
|
+
except ParsingError:
|
|
651
|
+
raise ForeignKeyConstraintToSyntaxError(node, to_expression)
|
|
652
|
+
|
|
653
|
+
if not foreign_key_node:
|
|
654
|
+
raise GraphDependencyNotFoundError(node, to_expression)
|
|
655
|
+
|
|
656
|
+
adapter = get_adapter(self.config)
|
|
657
|
+
relation_name = str(adapter.Relation.create_from(self.config, foreign_key_node))
|
|
658
|
+
return relation_name
|
|
659
|
+
|
|
660
|
+
# This method doesn't actually "compile" any of the nodes. That is done by the
|
|
661
|
+
# "compile_node" method. This creates a Linker and builds the networkx graph,
|
|
662
|
+
# writes out the graph.gpickle file, and prints the stats, returning a Graph object.
|
|
663
|
+
def compile(self, manifest: Manifest, write=True, add_test_edges=False) -> Graph:
|
|
664
|
+
self.initialize()
|
|
665
|
+
linker = Linker()
|
|
666
|
+
linker.link_graph(manifest)
|
|
667
|
+
|
|
668
|
+
# Create a file containing basic information about graph structure,
|
|
669
|
+
# supporting diagnostics and performance analysis.
|
|
670
|
+
summaries: Dict = dict()
|
|
671
|
+
summaries["_invocation_id"] = get_invocation_id()
|
|
672
|
+
summaries["linked"] = linker.get_graph_summary(manifest)
|
|
673
|
+
|
|
674
|
+
# This is only called for the "build" command
|
|
675
|
+
if add_test_edges:
|
|
676
|
+
manifest.build_parent_and_child_maps()
|
|
677
|
+
linker.add_test_edges(manifest)
|
|
678
|
+
|
|
679
|
+
# Create another diagnostic summary, just as above, but this time
|
|
680
|
+
# including the test edges.
|
|
681
|
+
summaries["with_test_edges"] = linker.get_graph_summary(manifest)
|
|
682
|
+
|
|
683
|
+
with open(
|
|
684
|
+
os.path.join(self.config.project_target_path, "graph_summary.json"), "w"
|
|
685
|
+
) as out_stream:
|
|
686
|
+
try:
|
|
687
|
+
out_stream.write(json.dumps(summaries))
|
|
688
|
+
except Exception as e: # This is non-essential information, so merely note failures.
|
|
689
|
+
fire_event(
|
|
690
|
+
Note(
|
|
691
|
+
msg=f"An error was encountered writing the graph summary information: {e}"
|
|
692
|
+
)
|
|
693
|
+
)
|
|
694
|
+
|
|
695
|
+
stats = _generate_stats(manifest)
|
|
696
|
+
|
|
697
|
+
if write:
|
|
698
|
+
self.write_graph_file(linker, manifest)
|
|
699
|
+
|
|
700
|
+
# Do not print these for list command
|
|
701
|
+
if self.config.args.which != "list":
|
|
702
|
+
stats = _generate_stats(manifest)
|
|
703
|
+
print_compile_stats(stats)
|
|
704
|
+
|
|
705
|
+
return Graph(linker.graph)
|
|
706
|
+
|
|
707
|
+
def write_graph_file(self, linker: Linker, manifest: Manifest):
|
|
708
|
+
filename = graph_file_name
|
|
709
|
+
graph_path = os.path.join(self.config.project_target_path, filename)
|
|
710
|
+
flags = get_flags()
|
|
711
|
+
if flags.WRITE_JSON:
|
|
712
|
+
linker.write_graph(graph_path, manifest)
|
|
713
|
+
|
|
714
|
+
# writes the "compiled_code" into the target/compiled directory
|
|
715
|
+
def _write_node(
|
|
716
|
+
self, node: ManifestSQLNode, split_suffix: Optional[str] = None
|
|
717
|
+
) -> ManifestSQLNode:
|
|
718
|
+
if not node.extra_ctes_injected or node.resource_type in (
|
|
719
|
+
NodeType.Snapshot,
|
|
720
|
+
NodeType.Seed,
|
|
721
|
+
):
|
|
722
|
+
return node
|
|
723
|
+
fire_event(WritingInjectedSQLForNode(node_info=get_node_info()))
|
|
724
|
+
|
|
725
|
+
if node.compiled_code:
|
|
726
|
+
node.compiled_path = node.get_target_write_path(
|
|
727
|
+
self.config.target_path, "compiled", split_suffix
|
|
728
|
+
)
|
|
729
|
+
node.write_node(self.config.project_root, node.compiled_path, node.compiled_code)
|
|
730
|
+
return node
|
|
731
|
+
|
|
732
|
+
def compile_node(
|
|
733
|
+
self,
|
|
734
|
+
node: ManifestSQLNode,
|
|
735
|
+
manifest: Manifest,
|
|
736
|
+
extra_context: Optional[Dict[str, Any]] = None,
|
|
737
|
+
write: bool = True,
|
|
738
|
+
split_suffix: Optional[str] = None,
|
|
739
|
+
) -> ManifestSQLNode:
|
|
740
|
+
"""This is the main entry point into this code. It's called by
|
|
741
|
+
CompileRunner.compile, GenericRPCRunner.compile, and
|
|
742
|
+
RunTask.get_hook_sql. It calls '_compile_code' to render
|
|
743
|
+
the node's raw_code into compiled_code, and then calls the
|
|
744
|
+
recursive method to "prepend" the ctes.
|
|
745
|
+
"""
|
|
746
|
+
# REVIEW: UnitTestDefinition shouldn't be possible here because of the
|
|
747
|
+
# type of node, and it is likewise an invalid return type.
|
|
748
|
+
if isinstance(node, UnitTestDefinition):
|
|
749
|
+
return node
|
|
750
|
+
|
|
751
|
+
# Make sure Lexer for sqlparse 0.4.4 is initialized
|
|
752
|
+
from sqlparse.lexer import Lexer # type: ignore
|
|
753
|
+
|
|
754
|
+
if hasattr(Lexer, "get_default_instance"):
|
|
755
|
+
Lexer.get_default_instance()
|
|
756
|
+
|
|
757
|
+
node = self._compile_code(node, manifest, extra_context)
|
|
758
|
+
|
|
759
|
+
node, _ = self._recursively_prepend_ctes(node, manifest, extra_context)
|
|
760
|
+
if write:
|
|
761
|
+
self._write_node(node, split_suffix=split_suffix)
|
|
762
|
+
return node
|
|
763
|
+
|
|
764
|
+
|
|
765
|
+
def inject_ctes_into_sql(sql: str, ctes: List[InjectedCTE]) -> str:
|
|
766
|
+
"""
|
|
767
|
+
`ctes` is a list of InjectedCTEs like:
|
|
768
|
+
|
|
769
|
+
[
|
|
770
|
+
InjectedCTE(
|
|
771
|
+
id="cte_id_1",
|
|
772
|
+
sql="__dbt__cte__ephemeral as (select * from table)",
|
|
773
|
+
),
|
|
774
|
+
InjectedCTE(
|
|
775
|
+
id="cte_id_2",
|
|
776
|
+
sql="__dbt__cte__events as (select id, type from events)",
|
|
777
|
+
),
|
|
778
|
+
]
|
|
779
|
+
|
|
780
|
+
Given `sql` like:
|
|
781
|
+
|
|
782
|
+
"with internal_cte as (select * from sessions)
|
|
783
|
+
select * from internal_cte"
|
|
784
|
+
|
|
785
|
+
This will spit out:
|
|
786
|
+
|
|
787
|
+
"with __dbt__cte__ephemeral as (select * from table),
|
|
788
|
+
__dbt__cte__events as (select id, type from events),
|
|
789
|
+
internal_cte as (select * from sessions)
|
|
790
|
+
select * from internal_cte"
|
|
791
|
+
|
|
792
|
+
(Whitespace enhanced for readability.)
|
|
793
|
+
"""
|
|
794
|
+
if len(ctes) == 0:
|
|
795
|
+
return sql
|
|
796
|
+
|
|
797
|
+
parsed_stmts = sqlparse.parse(sql)
|
|
798
|
+
parsed = parsed_stmts[0]
|
|
799
|
+
|
|
800
|
+
with_stmt = None
|
|
801
|
+
for token in parsed.tokens:
|
|
802
|
+
if token.is_keyword and token.normalized == "WITH":
|
|
803
|
+
with_stmt = token
|
|
804
|
+
elif token.is_keyword and token.normalized == "RECURSIVE" and with_stmt is not None:
|
|
805
|
+
with_stmt = token
|
|
806
|
+
break
|
|
807
|
+
elif not token.is_whitespace and with_stmt is not None:
|
|
808
|
+
break
|
|
809
|
+
|
|
810
|
+
if with_stmt is None:
|
|
811
|
+
# no with stmt, add one, and inject CTEs right at the beginning
|
|
812
|
+
# [original_sql]
|
|
813
|
+
first_token = parsed.token_first()
|
|
814
|
+
with_token = sqlparse.sql.Token(sqlparse.tokens.Keyword, "with")
|
|
815
|
+
parsed.insert_before(first_token, with_token)
|
|
816
|
+
# [with][original_sql]
|
|
817
|
+
injected_ctes = ", ".join(c.sql for c in ctes) + " "
|
|
818
|
+
injected_ctes_token = sqlparse.sql.Token(sqlparse.tokens.Keyword, injected_ctes)
|
|
819
|
+
parsed.insert_after(with_token, injected_ctes_token)
|
|
820
|
+
# [with][joined_ctes][original_sql]
|
|
821
|
+
else:
|
|
822
|
+
# with stmt exists so we don't need to add one, but we do need to add a comma
|
|
823
|
+
# between the injected ctes and the original sql
|
|
824
|
+
# [with][original_sql]
|
|
825
|
+
injected_ctes = ", ".join(c.sql for c in ctes)
|
|
826
|
+
injected_ctes_token = sqlparse.sql.Token(sqlparse.tokens.Keyword, injected_ctes)
|
|
827
|
+
parsed.insert_after(with_stmt, injected_ctes_token)
|
|
828
|
+
# [with][joined_ctes][original_sql]
|
|
829
|
+
comma_token = sqlparse.sql.Token(sqlparse.tokens.Punctuation, ", ")
|
|
830
|
+
parsed.insert_after(injected_ctes_token, comma_token)
|
|
831
|
+
# [with][joined_ctes][, ][original_sql]
|
|
832
|
+
|
|
833
|
+
return str(parsed)
|