dvt-core 1.11.0b4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dvt-core might be problematic. Click here for more details.

Files changed (261) hide show
  1. dvt/__init__.py +7 -0
  2. dvt/_pydantic_shim.py +26 -0
  3. dvt/adapters/__init__.py +16 -0
  4. dvt/adapters/multi_adapter_manager.py +268 -0
  5. dvt/artifacts/__init__.py +0 -0
  6. dvt/artifacts/exceptions/__init__.py +1 -0
  7. dvt/artifacts/exceptions/schemas.py +31 -0
  8. dvt/artifacts/resources/__init__.py +116 -0
  9. dvt/artifacts/resources/base.py +68 -0
  10. dvt/artifacts/resources/types.py +93 -0
  11. dvt/artifacts/resources/v1/analysis.py +10 -0
  12. dvt/artifacts/resources/v1/catalog.py +23 -0
  13. dvt/artifacts/resources/v1/components.py +275 -0
  14. dvt/artifacts/resources/v1/config.py +282 -0
  15. dvt/artifacts/resources/v1/documentation.py +11 -0
  16. dvt/artifacts/resources/v1/exposure.py +52 -0
  17. dvt/artifacts/resources/v1/function.py +53 -0
  18. dvt/artifacts/resources/v1/generic_test.py +32 -0
  19. dvt/artifacts/resources/v1/group.py +22 -0
  20. dvt/artifacts/resources/v1/hook.py +11 -0
  21. dvt/artifacts/resources/v1/macro.py +30 -0
  22. dvt/artifacts/resources/v1/metric.py +173 -0
  23. dvt/artifacts/resources/v1/model.py +146 -0
  24. dvt/artifacts/resources/v1/owner.py +10 -0
  25. dvt/artifacts/resources/v1/saved_query.py +112 -0
  26. dvt/artifacts/resources/v1/seed.py +42 -0
  27. dvt/artifacts/resources/v1/semantic_layer_components.py +72 -0
  28. dvt/artifacts/resources/v1/semantic_model.py +315 -0
  29. dvt/artifacts/resources/v1/singular_test.py +14 -0
  30. dvt/artifacts/resources/v1/snapshot.py +92 -0
  31. dvt/artifacts/resources/v1/source_definition.py +85 -0
  32. dvt/artifacts/resources/v1/sql_operation.py +10 -0
  33. dvt/artifacts/resources/v1/unit_test_definition.py +78 -0
  34. dvt/artifacts/schemas/__init__.py +0 -0
  35. dvt/artifacts/schemas/base.py +191 -0
  36. dvt/artifacts/schemas/batch_results.py +24 -0
  37. dvt/artifacts/schemas/catalog/__init__.py +12 -0
  38. dvt/artifacts/schemas/catalog/v1/__init__.py +0 -0
  39. dvt/artifacts/schemas/catalog/v1/catalog.py +60 -0
  40. dvt/artifacts/schemas/freshness/__init__.py +1 -0
  41. dvt/artifacts/schemas/freshness/v3/__init__.py +0 -0
  42. dvt/artifacts/schemas/freshness/v3/freshness.py +159 -0
  43. dvt/artifacts/schemas/manifest/__init__.py +2 -0
  44. dvt/artifacts/schemas/manifest/v12/__init__.py +0 -0
  45. dvt/artifacts/schemas/manifest/v12/manifest.py +212 -0
  46. dvt/artifacts/schemas/results.py +148 -0
  47. dvt/artifacts/schemas/run/__init__.py +2 -0
  48. dvt/artifacts/schemas/run/v5/__init__.py +0 -0
  49. dvt/artifacts/schemas/run/v5/run.py +184 -0
  50. dvt/artifacts/schemas/upgrades/__init__.py +4 -0
  51. dvt/artifacts/schemas/upgrades/upgrade_manifest.py +174 -0
  52. dvt/artifacts/schemas/upgrades/upgrade_manifest_dbt_version.py +2 -0
  53. dvt/artifacts/utils/validation.py +153 -0
  54. dvt/cli/__init__.py +1 -0
  55. dvt/cli/context.py +16 -0
  56. dvt/cli/exceptions.py +56 -0
  57. dvt/cli/flags.py +558 -0
  58. dvt/cli/main.py +971 -0
  59. dvt/cli/option_types.py +121 -0
  60. dvt/cli/options.py +79 -0
  61. dvt/cli/params.py +803 -0
  62. dvt/cli/requires.py +478 -0
  63. dvt/cli/resolvers.py +32 -0
  64. dvt/cli/types.py +40 -0
  65. dvt/clients/__init__.py +0 -0
  66. dvt/clients/checked_load.py +82 -0
  67. dvt/clients/git.py +164 -0
  68. dvt/clients/jinja.py +206 -0
  69. dvt/clients/jinja_static.py +245 -0
  70. dvt/clients/registry.py +192 -0
  71. dvt/clients/yaml_helper.py +68 -0
  72. dvt/compilation.py +833 -0
  73. dvt/compute/__init__.py +26 -0
  74. dvt/compute/base.py +288 -0
  75. dvt/compute/engines/__init__.py +13 -0
  76. dvt/compute/engines/duckdb_engine.py +368 -0
  77. dvt/compute/engines/spark_engine.py +273 -0
  78. dvt/compute/query_analyzer.py +212 -0
  79. dvt/compute/router.py +483 -0
  80. dvt/config/__init__.py +4 -0
  81. dvt/config/catalogs.py +95 -0
  82. dvt/config/compute_config.py +406 -0
  83. dvt/config/profile.py +411 -0
  84. dvt/config/profiles_v2.py +464 -0
  85. dvt/config/project.py +893 -0
  86. dvt/config/renderer.py +232 -0
  87. dvt/config/runtime.py +491 -0
  88. dvt/config/selectors.py +209 -0
  89. dvt/config/utils.py +78 -0
  90. dvt/connectors/.gitignore +6 -0
  91. dvt/connectors/README.md +306 -0
  92. dvt/connectors/catalog.yml +217 -0
  93. dvt/connectors/download_connectors.py +300 -0
  94. dvt/constants.py +29 -0
  95. dvt/context/__init__.py +0 -0
  96. dvt/context/base.py +746 -0
  97. dvt/context/configured.py +136 -0
  98. dvt/context/context_config.py +350 -0
  99. dvt/context/docs.py +82 -0
  100. dvt/context/exceptions_jinja.py +179 -0
  101. dvt/context/macro_resolver.py +195 -0
  102. dvt/context/macros.py +171 -0
  103. dvt/context/manifest.py +73 -0
  104. dvt/context/providers.py +2198 -0
  105. dvt/context/query_header.py +14 -0
  106. dvt/context/secret.py +59 -0
  107. dvt/context/target.py +74 -0
  108. dvt/contracts/__init__.py +0 -0
  109. dvt/contracts/files.py +413 -0
  110. dvt/contracts/graph/__init__.py +0 -0
  111. dvt/contracts/graph/manifest.py +1904 -0
  112. dvt/contracts/graph/metrics.py +98 -0
  113. dvt/contracts/graph/model_config.py +71 -0
  114. dvt/contracts/graph/node_args.py +42 -0
  115. dvt/contracts/graph/nodes.py +1806 -0
  116. dvt/contracts/graph/semantic_manifest.py +233 -0
  117. dvt/contracts/graph/unparsed.py +812 -0
  118. dvt/contracts/project.py +417 -0
  119. dvt/contracts/results.py +53 -0
  120. dvt/contracts/selection.py +23 -0
  121. dvt/contracts/sql.py +86 -0
  122. dvt/contracts/state.py +69 -0
  123. dvt/contracts/util.py +46 -0
  124. dvt/deprecations.py +347 -0
  125. dvt/deps/__init__.py +0 -0
  126. dvt/deps/base.py +153 -0
  127. dvt/deps/git.py +196 -0
  128. dvt/deps/local.py +80 -0
  129. dvt/deps/registry.py +131 -0
  130. dvt/deps/resolver.py +149 -0
  131. dvt/deps/tarball.py +121 -0
  132. dvt/docs/source/_ext/dbt_click.py +118 -0
  133. dvt/docs/source/conf.py +32 -0
  134. dvt/env_vars.py +64 -0
  135. dvt/event_time/event_time.py +40 -0
  136. dvt/event_time/sample_window.py +60 -0
  137. dvt/events/__init__.py +16 -0
  138. dvt/events/base_types.py +37 -0
  139. dvt/events/core_types_pb2.py +2 -0
  140. dvt/events/logging.py +109 -0
  141. dvt/events/types.py +2534 -0
  142. dvt/exceptions.py +1487 -0
  143. dvt/flags.py +89 -0
  144. dvt/graph/__init__.py +11 -0
  145. dvt/graph/cli.py +248 -0
  146. dvt/graph/graph.py +172 -0
  147. dvt/graph/queue.py +213 -0
  148. dvt/graph/selector.py +375 -0
  149. dvt/graph/selector_methods.py +976 -0
  150. dvt/graph/selector_spec.py +223 -0
  151. dvt/graph/thread_pool.py +18 -0
  152. dvt/hooks.py +21 -0
  153. dvt/include/README.md +49 -0
  154. dvt/include/__init__.py +3 -0
  155. dvt/include/global_project.py +4 -0
  156. dvt/include/starter_project/.gitignore +4 -0
  157. dvt/include/starter_project/README.md +15 -0
  158. dvt/include/starter_project/__init__.py +3 -0
  159. dvt/include/starter_project/analyses/.gitkeep +0 -0
  160. dvt/include/starter_project/dvt_project.yml +36 -0
  161. dvt/include/starter_project/macros/.gitkeep +0 -0
  162. dvt/include/starter_project/models/example/my_first_dbt_model.sql +27 -0
  163. dvt/include/starter_project/models/example/my_second_dbt_model.sql +6 -0
  164. dvt/include/starter_project/models/example/schema.yml +21 -0
  165. dvt/include/starter_project/seeds/.gitkeep +0 -0
  166. dvt/include/starter_project/snapshots/.gitkeep +0 -0
  167. dvt/include/starter_project/tests/.gitkeep +0 -0
  168. dvt/internal_deprecations.py +27 -0
  169. dvt/jsonschemas/__init__.py +3 -0
  170. dvt/jsonschemas/jsonschemas.py +309 -0
  171. dvt/jsonschemas/project/0.0.110.json +4717 -0
  172. dvt/jsonschemas/project/0.0.85.json +2015 -0
  173. dvt/jsonschemas/resources/0.0.110.json +2636 -0
  174. dvt/jsonschemas/resources/0.0.85.json +2536 -0
  175. dvt/jsonschemas/resources/latest.json +6773 -0
  176. dvt/links.py +4 -0
  177. dvt/materializations/__init__.py +0 -0
  178. dvt/materializations/incremental/__init__.py +0 -0
  179. dvt/materializations/incremental/microbatch.py +235 -0
  180. dvt/mp_context.py +8 -0
  181. dvt/node_types.py +37 -0
  182. dvt/parser/__init__.py +23 -0
  183. dvt/parser/analysis.py +21 -0
  184. dvt/parser/base.py +549 -0
  185. dvt/parser/common.py +267 -0
  186. dvt/parser/docs.py +52 -0
  187. dvt/parser/fixtures.py +51 -0
  188. dvt/parser/functions.py +30 -0
  189. dvt/parser/generic_test.py +100 -0
  190. dvt/parser/generic_test_builders.py +334 -0
  191. dvt/parser/hooks.py +119 -0
  192. dvt/parser/macros.py +137 -0
  193. dvt/parser/manifest.py +2204 -0
  194. dvt/parser/models.py +574 -0
  195. dvt/parser/partial.py +1179 -0
  196. dvt/parser/read_files.py +445 -0
  197. dvt/parser/schema_generic_tests.py +423 -0
  198. dvt/parser/schema_renderer.py +111 -0
  199. dvt/parser/schema_yaml_readers.py +936 -0
  200. dvt/parser/schemas.py +1467 -0
  201. dvt/parser/search.py +149 -0
  202. dvt/parser/seeds.py +28 -0
  203. dvt/parser/singular_test.py +20 -0
  204. dvt/parser/snapshots.py +44 -0
  205. dvt/parser/sources.py +557 -0
  206. dvt/parser/sql.py +63 -0
  207. dvt/parser/unit_tests.py +622 -0
  208. dvt/plugins/__init__.py +20 -0
  209. dvt/plugins/contracts.py +10 -0
  210. dvt/plugins/exceptions.py +2 -0
  211. dvt/plugins/manager.py +164 -0
  212. dvt/plugins/manifest.py +21 -0
  213. dvt/profiler.py +20 -0
  214. dvt/py.typed +1 -0
  215. dvt/runners/__init__.py +2 -0
  216. dvt/runners/exposure_runner.py +7 -0
  217. dvt/runners/no_op_runner.py +46 -0
  218. dvt/runners/saved_query_runner.py +7 -0
  219. dvt/selected_resources.py +8 -0
  220. dvt/task/__init__.py +0 -0
  221. dvt/task/base.py +504 -0
  222. dvt/task/build.py +197 -0
  223. dvt/task/clean.py +57 -0
  224. dvt/task/clone.py +162 -0
  225. dvt/task/compile.py +151 -0
  226. dvt/task/compute.py +366 -0
  227. dvt/task/debug.py +650 -0
  228. dvt/task/deps.py +280 -0
  229. dvt/task/docs/__init__.py +3 -0
  230. dvt/task/docs/generate.py +408 -0
  231. dvt/task/docs/index.html +250 -0
  232. dvt/task/docs/serve.py +28 -0
  233. dvt/task/freshness.py +323 -0
  234. dvt/task/function.py +122 -0
  235. dvt/task/group_lookup.py +46 -0
  236. dvt/task/init.py +374 -0
  237. dvt/task/list.py +237 -0
  238. dvt/task/printer.py +176 -0
  239. dvt/task/profiles.py +256 -0
  240. dvt/task/retry.py +175 -0
  241. dvt/task/run.py +1146 -0
  242. dvt/task/run_operation.py +142 -0
  243. dvt/task/runnable.py +802 -0
  244. dvt/task/seed.py +104 -0
  245. dvt/task/show.py +150 -0
  246. dvt/task/snapshot.py +57 -0
  247. dvt/task/sql.py +111 -0
  248. dvt/task/test.py +464 -0
  249. dvt/tests/fixtures/__init__.py +1 -0
  250. dvt/tests/fixtures/project.py +620 -0
  251. dvt/tests/util.py +651 -0
  252. dvt/tracking.py +529 -0
  253. dvt/utils/__init__.py +3 -0
  254. dvt/utils/artifact_upload.py +151 -0
  255. dvt/utils/utils.py +408 -0
  256. dvt/version.py +249 -0
  257. dvt_core-1.11.0b4.dist-info/METADATA +252 -0
  258. dvt_core-1.11.0b4.dist-info/RECORD +261 -0
  259. dvt_core-1.11.0b4.dist-info/WHEEL +5 -0
  260. dvt_core-1.11.0b4.dist-info/entry_points.txt +2 -0
  261. dvt_core-1.11.0b4.dist-info/top_level.txt +1 -0
@@ -0,0 +1,2198 @@
1
+ import abc
2
+ import os
3
+ from copy import deepcopy
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ Any,
7
+ Callable,
8
+ Dict,
9
+ Iterable,
10
+ List,
11
+ Mapping,
12
+ Optional,
13
+ Tuple,
14
+ Type,
15
+ TypeVar,
16
+ Union,
17
+ )
18
+
19
+ from dvt import selected_resources
20
+ from dvt.artifacts.resources import (
21
+ NodeConfig,
22
+ NodeVersion,
23
+ RefArgs,
24
+ SeedConfig,
25
+ SourceConfig,
26
+ )
27
+ from dvt.clients.jinja import (
28
+ MacroGenerator,
29
+ MacroStack,
30
+ UnitTestMacroGenerator,
31
+ get_rendered,
32
+ )
33
+ from dvt.clients.jinja_static import statically_parse_unrendered_config
34
+ from dvt.config import IsFQNResource, Project, RuntimeConfig
35
+ from dvt.constants import DEFAULT_ENV_PLACEHOLDER
36
+ from dvt.context.base import Var, contextmember, contextproperty
37
+ from dvt.context.configured import FQNLookup
38
+ from dvt.context.context_config import ContextConfig
39
+ from dvt.context.exceptions_jinja import wrapped_exports
40
+ from dvt.context.macro_resolver import MacroResolver, TestMacroNamespace
41
+ from dvt.context.macros import MacroNamespace, MacroNamespaceBuilder
42
+ from dvt.context.manifest import ManifestContext
43
+ from dvt.contracts.graph.manifest import Disabled, Manifest
44
+ from dvt.contracts.graph.metrics import MetricReference, ResolvedMetricReference
45
+ from dvt.contracts.graph.nodes import (
46
+ AccessType,
47
+ Exposure,
48
+ FunctionNode,
49
+ Macro,
50
+ ManifestNode,
51
+ ModelNode,
52
+ Resource,
53
+ SeedNode,
54
+ SemanticModel,
55
+ SnapshotNode,
56
+ SourceDefinition,
57
+ UnitTestNode,
58
+ )
59
+ from dvt.exceptions import (
60
+ CompilationError,
61
+ ConflictingConfigKeysError,
62
+ DbtReferenceError,
63
+ EnvVarMissingError,
64
+ InlineModelConfigError,
65
+ LoadAgateTableNotSeedError,
66
+ LoadAgateTableValueError,
67
+ MacroDispatchArgError,
68
+ MacroResultAlreadyLoadedError,
69
+ MetricArgsError,
70
+ NumberSourceArgsError,
71
+ OperationsCannotRefEphemeralNodesError,
72
+ ParsingError,
73
+ PersistDocsValueTypeError,
74
+ RefArgsError,
75
+ RefBadContextError,
76
+ SecretEnvVarLocationError,
77
+ TargetNotFoundError,
78
+ )
79
+ from dvt.flags import get_flags
80
+ from dvt.materializations.incremental.microbatch import MicrobatchBuilder
81
+ from dvt.node_types import ModelLanguage, NodeType
82
+ from dvt.utils import MultiDict, args_to_dict
83
+ from typing_extensions import Protocol
84
+
85
+ from dbt.adapters.base.column import Column
86
+ from dbt.adapters.base.relation import EventTimeFilter, RelationType
87
+ from dbt.adapters.contracts.connection import AdapterResponse
88
+ from dbt.adapters.exceptions import MissingConfigError
89
+ from dbt.adapters.factory import (
90
+ get_adapter,
91
+ get_adapter_package_names,
92
+ get_adapter_type_names,
93
+ )
94
+ from dbt_common.clients.jinja import MacroProtocol
95
+ from dbt_common.constants import SECRET_ENV_PREFIX
96
+ from dbt_common.context import get_invocation_context
97
+ from dbt_common.events.functions import get_metadata_vars
98
+ from dbt_common.exceptions import (
99
+ DbtInternalError,
100
+ DbtRuntimeError,
101
+ DbtValidationError,
102
+ MacrosSourcesUnWriteableError,
103
+ )
104
+ from dbt_common.utils import AttrDict, cast_to_str, merge
105
+
106
+ if TYPE_CHECKING:
107
+ import agate
108
+
109
+
110
+ _MISSING = object()
111
+
112
+
113
+ # base classes
114
+ class RelationProxy:
115
+ def __init__(self, adapter):
116
+ self._quoting_config = adapter.config.quoting
117
+ self._relation_type = adapter.Relation
118
+
119
+ def __getattr__(self, key):
120
+ return getattr(self._relation_type, key)
121
+
122
+ def create(self, *args, **kwargs):
123
+ kwargs["quote_policy"] = merge(self._quoting_config, kwargs.pop("quote_policy", {}))
124
+ return self._relation_type.create(*args, **kwargs)
125
+
126
+
127
+ class BaseDatabaseWrapper:
128
+ """
129
+ Wrapper for runtime database interaction. Applies the runtime quote policy
130
+ via a relation proxy.
131
+ """
132
+
133
+ def __init__(self, adapter, namespace: MacroNamespace):
134
+ self._adapter = adapter
135
+ self.Relation = RelationProxy(adapter)
136
+ self._namespace = namespace
137
+
138
+ def __getattr__(self, name):
139
+ raise NotImplementedError("subclasses need to implement this")
140
+
141
+ @property
142
+ def config(self):
143
+ return self._adapter.config
144
+
145
+ def type(self):
146
+ return self._adapter.type()
147
+
148
+ def commit(self):
149
+ return self._adapter.commit_if_has_connection()
150
+
151
+ def _get_adapter_macro_prefixes(self) -> List[str]:
152
+ # order matters for dispatch:
153
+ # 1. current adapter
154
+ # 2. any parent adapters (dependencies)
155
+ # 3. 'default'
156
+ search_prefixes = get_adapter_type_names(self._adapter.type()) + ["default"]
157
+ return search_prefixes
158
+
159
+ def _get_search_packages(self, namespace: Optional[str] = None) -> List[Optional[str]]:
160
+ search_packages: List[Optional[str]] = [None]
161
+
162
+ if namespace is None:
163
+ search_packages = [None]
164
+ elif isinstance(namespace, str):
165
+ macro_search_order = self._adapter.config.get_macro_search_order(namespace)
166
+ if macro_search_order:
167
+ search_packages = macro_search_order
168
+ elif not macro_search_order and namespace in self._adapter.config.dependencies:
169
+ search_packages = [self.config.project_name, namespace]
170
+ else:
171
+ raise CompilationError(
172
+ f"In adapter.dispatch, got a {type(namespace)} macro_namespace argument "
173
+ f'("{namespace}"), but macro_namespace should be None or a string.'
174
+ )
175
+
176
+ return search_packages
177
+
178
+ def dispatch(
179
+ self,
180
+ macro_name: str,
181
+ macro_namespace: Optional[str] = None,
182
+ packages: Optional[List[str]] = None, # eventually remove since it's fully deprecated
183
+ ) -> MacroGenerator:
184
+ search_packages: List[Optional[str]]
185
+
186
+ if "." in macro_name:
187
+ suggest_macro_namespace, suggest_macro_name = macro_name.split(".", 1)
188
+ msg = (
189
+ f'In adapter.dispatch, got a macro name of "{macro_name}", '
190
+ f'but "." is not a valid macro name component. Did you mean '
191
+ f'`adapter.dispatch("{suggest_macro_name}", '
192
+ f'macro_namespace="{suggest_macro_namespace}")`?'
193
+ )
194
+ raise CompilationError(msg)
195
+
196
+ if packages is not None:
197
+ raise MacroDispatchArgError(macro_name)
198
+
199
+ search_packages = self._get_search_packages(macro_namespace)
200
+
201
+ attempts = []
202
+
203
+ for package_name in search_packages:
204
+ for prefix in self._get_adapter_macro_prefixes():
205
+ search_name = f"{prefix}__{macro_name}"
206
+ try:
207
+ # this uses the namespace from the context
208
+ macro = self._namespace.get_from_package(package_name, search_name)
209
+ except CompilationError:
210
+ # Only raise CompilationError if macro is not found in
211
+ # any package
212
+ macro = None
213
+
214
+ if package_name is None:
215
+ attempts.append(search_name)
216
+ else:
217
+ attempts.append(f"{package_name}.{search_name}")
218
+
219
+ if macro is not None:
220
+ return macro
221
+
222
+ searched = ", ".join(repr(a) for a in attempts)
223
+ msg = f"In dispatch: No macro named '{macro_name}' found within namespace: '{macro_namespace}'\n Searched for: {searched}"
224
+ raise CompilationError(msg)
225
+
226
+
227
+ class BaseResolver(metaclass=abc.ABCMeta):
228
+ def __init__(self, db_wrapper, model, config, manifest):
229
+ self.db_wrapper = db_wrapper
230
+ self.model = model
231
+ self.config = config
232
+ self.manifest = manifest
233
+
234
+ @property
235
+ def current_project(self):
236
+ return self.config.project_name
237
+
238
+ @property
239
+ def Relation(self):
240
+ return self.db_wrapper.Relation
241
+
242
+ @property
243
+ def resolve_limit(self) -> Optional[int]:
244
+ return 0 if getattr(self.config.args, "EMPTY", False) else None
245
+
246
+ def _resolve_event_time_field_name(self, target: ManifestNode) -> str:
247
+ """Get the event time field name with proper quoting based on configuration."""
248
+ # Default to False for quoting
249
+ should_quote = False
250
+ column_found = False
251
+ column = None
252
+
253
+ # Check if config has event_time attribute
254
+ if not hasattr(target.config, "event_time") or target.config.event_time is None:
255
+ return ""
256
+
257
+ # Check column-level quote configuration first (overrides source-level)
258
+ if hasattr(target, "columns") and target.columns and isinstance(target.columns, dict):
259
+ for _, column_info in target.columns.items():
260
+ if column_info.name == target.config.event_time:
261
+ column_found = True
262
+ # Create the column object
263
+ column = Column.create(
264
+ column_info.name, column_info.data_type if column_info.data_type else ""
265
+ )
266
+ # Column-level quote setting takes precedence
267
+ if hasattr(column_info, "quote") and column_info.quote is not None:
268
+ should_quote = column_info.quote
269
+ # Fallback to source-level quote setting
270
+ elif (
271
+ hasattr(target, "quoting")
272
+ and hasattr(target.quoting, "column")
273
+ and target.quoting.column is not None
274
+ ):
275
+ should_quote = target.quoting.column
276
+ break
277
+
278
+ # If column not found, fall back to source-level quote setting
279
+ if not column_found:
280
+ if (
281
+ hasattr(target, "quoting")
282
+ and hasattr(target.quoting, "column")
283
+ and target.quoting.column is not None
284
+ ):
285
+ should_quote = target.quoting.column
286
+ # Create column object for quoting
287
+ column = Column.create(target.config.event_time, "")
288
+
289
+ # Apply quoting logic
290
+ if should_quote and column is not None:
291
+ return column.quoted
292
+ else:
293
+ return target.config.event_time
294
+
295
+ def resolve_event_time_filter(self, target: ManifestNode) -> Optional[EventTimeFilter]:
296
+ event_time_filter = None
297
+ sample_mode = getattr(self.config.args, "sample", None) is not None
298
+ field_name = self._resolve_event_time_field_name(target)
299
+
300
+ # TODO The number of branches here is getting rough. We should consider ways to simplify
301
+ # what is going on to make it easier to maintain
302
+
303
+ # Only do event time filtering if the base node has the necessary event time configs
304
+ if (
305
+ isinstance(target.config, (NodeConfig, SeedConfig, SourceConfig))
306
+ and target.config.event_time
307
+ and isinstance(self.model, (ModelNode, SnapshotNode))
308
+ ):
309
+
310
+ # Handling of microbatch models
311
+ if (
312
+ isinstance(self.model, ModelNode)
313
+ and self.model.config.materialized == "incremental"
314
+ and self.model.config.incremental_strategy == "microbatch"
315
+ and self.manifest.use_microbatch_batches(project_name=self.config.project_name)
316
+ and self.model.batch is not None
317
+ ):
318
+ # Sample mode microbatch models
319
+ if sample_mode:
320
+ start = (
321
+ self.config.args.sample.start
322
+ if self.config.args.sample.start > self.model.batch.event_time_start
323
+ else self.model.batch.event_time_start
324
+ )
325
+ end = (
326
+ self.config.args.sample.end
327
+ if self.config.args.sample.end < self.model.batch.event_time_end
328
+ else self.model.batch.event_time_end
329
+ )
330
+ event_time_filter = EventTimeFilter(
331
+ field_name=field_name,
332
+ start=start,
333
+ end=end,
334
+ )
335
+
336
+ # Regular microbatch models
337
+ else:
338
+ event_time_filter = EventTimeFilter(
339
+ field_name=field_name,
340
+ start=self.model.batch.event_time_start,
341
+ end=self.model.batch.event_time_end,
342
+ )
343
+
344
+ # Sample mode _non_ microbatch models
345
+ elif sample_mode:
346
+ event_time_filter = EventTimeFilter(
347
+ field_name=field_name,
348
+ start=self.config.args.sample.start,
349
+ end=self.config.args.sample.end,
350
+ )
351
+
352
+ return event_time_filter
353
+
354
+ @abc.abstractmethod
355
+ def __call__(self, *args: str) -> Union[str, RelationProxy, MetricReference]:
356
+ pass
357
+
358
+
359
+ class BaseRefResolver(BaseResolver):
360
+ @abc.abstractmethod
361
+ def resolve(
362
+ self, name: str, package: Optional[str] = None, version: Optional[NodeVersion] = None
363
+ ) -> RelationProxy: ...
364
+
365
+ def _repack_args(
366
+ self, name: str, package: Optional[str], version: Optional[NodeVersion]
367
+ ) -> RefArgs:
368
+ return RefArgs(package=package, name=name, version=version)
369
+
370
+ def validate_args(self, name: str, package: Optional[str], version: Optional[NodeVersion]):
371
+ if not isinstance(name, str):
372
+ raise CompilationError(
373
+ f"The name argument to ref() must be a string, got {type(name)}"
374
+ )
375
+
376
+ if package is not None and not isinstance(package, str):
377
+ raise CompilationError(
378
+ f"The package argument to ref() must be a string or None, got {type(package)}"
379
+ )
380
+
381
+ if version is not None and not isinstance(version, (str, int, float)):
382
+ raise CompilationError(
383
+ f"The version argument to ref() must be a string, int, float, or None - got {type(version)}"
384
+ )
385
+
386
+ def __call__(self, *args: str, **kwargs) -> RelationProxy:
387
+ name: str
388
+ package: Optional[str] = None
389
+ version: Optional[NodeVersion] = None
390
+
391
+ if len(args) == 1:
392
+ name = args[0]
393
+ elif len(args) == 2:
394
+ package, name = args
395
+ else:
396
+ raise RefArgsError(node=self.model, args=args)
397
+
398
+ version = kwargs.get("version") or kwargs.get("v")
399
+ self.validate_args(name, package, version)
400
+ return self.resolve(name, package, version)
401
+
402
+
403
+ class BaseSourceResolver(BaseResolver):
404
+ @abc.abstractmethod
405
+ def resolve(self, source_name: str, table_name: str):
406
+ pass
407
+
408
+ def validate_args(self, source_name: str, table_name: str):
409
+ if not isinstance(source_name, str):
410
+ raise CompilationError(
411
+ f"The source name (first) argument to source() must be a "
412
+ f"string, got {type(source_name)}"
413
+ )
414
+ if not isinstance(table_name, str):
415
+ raise CompilationError(
416
+ f"The table name (second) argument to source() must be a "
417
+ f"string, got {type(table_name)}"
418
+ )
419
+
420
+ def __call__(self, *args: str) -> RelationProxy:
421
+ if len(args) != 2:
422
+ raise NumberSourceArgsError(args, node=self.model)
423
+ self.validate_args(args[0], args[1])
424
+ return self.resolve(args[0], args[1])
425
+
426
+
427
+ class BaseMetricResolver(BaseResolver):
428
+ @abc.abstractmethod
429
+ def resolve(self, name: str, package: Optional[str] = None) -> MetricReference: ...
430
+
431
+ def _repack_args(self, name: str, package: Optional[str]) -> List[str]:
432
+ if package is None:
433
+ return [name]
434
+ else:
435
+ return [package, name]
436
+
437
+ def validate_args(self, name: str, package: Optional[str]):
438
+ if not isinstance(name, str):
439
+ raise CompilationError(
440
+ f"The name argument to metric() must be a string, got {type(name)}"
441
+ )
442
+
443
+ if package is not None and not isinstance(package, str):
444
+ raise CompilationError(
445
+ f"The package argument to metric() must be a string or None, got {type(package)}"
446
+ )
447
+
448
+ def __call__(self, *args: str) -> MetricReference:
449
+ name: str
450
+ package: Optional[str] = None
451
+
452
+ if len(args) == 1:
453
+ name = args[0]
454
+ elif len(args) == 2:
455
+ package, name = args
456
+ else:
457
+ raise MetricArgsError(node=self.model, args=args)
458
+ self.validate_args(name, package)
459
+ return self.resolve(name, package)
460
+
461
+
462
+ class BaseFunctionResolver(BaseResolver):
463
+ @abc.abstractmethod
464
+ def resolve(self, name: str, package: Optional[str] = None): ...
465
+
466
+ def _repack_args(self, name: str, package: Optional[str]) -> List[str]:
467
+ if package is None:
468
+ return [name]
469
+ else:
470
+ return [package, name]
471
+
472
+ def validate_args(self, name: str, package: Optional[str]):
473
+ if not isinstance(name, str):
474
+ raise CompilationError(
475
+ f"The name argument to function() must be a string, got {type(name)}"
476
+ )
477
+
478
+ if package is not None and not isinstance(package, str):
479
+ raise CompilationError(
480
+ f"The package argument to function() must be a string or None, got {type(package)}"
481
+ )
482
+
483
+ def __call__(self, *args: str):
484
+ name: str
485
+ package: Optional[str] = None
486
+
487
+ if len(args) == 1:
488
+ name = args[0]
489
+ elif len(args) == 2:
490
+ package, name = args
491
+ else:
492
+ raise RefArgsError(node=self.model, args=args)
493
+ self.validate_args(name, package)
494
+ return self.resolve(name, package)
495
+
496
+
497
+ class Config(Protocol):
498
+ def __init__(self, model, context_config: Optional[ContextConfig]): ...
499
+
500
+
501
+ # Implementation of "config(..)" calls in models
502
+ class ParseConfigObject(Config):
503
+ def __init__(self, model, context_config: Optional[ContextConfig]):
504
+ self.model = model
505
+ self.context_config = context_config
506
+
507
+ def _transform_config(self, config):
508
+ for oldkey in ("pre_hook", "post_hook"):
509
+ if oldkey in config:
510
+ newkey = oldkey.replace("_", "-")
511
+ if newkey in config:
512
+ raise ConflictingConfigKeysError(oldkey, newkey, node=self.model)
513
+ config[newkey] = config.pop(oldkey)
514
+ return config
515
+
516
+ def __call__(self, *args, **kwargs):
517
+ if len(args) == 1 and len(kwargs) == 0:
518
+ opts = args[0]
519
+ elif len(args) == 0 and len(kwargs) > 0:
520
+ opts = kwargs
521
+ else:
522
+ raise InlineModelConfigError(node=self.model)
523
+
524
+ opts = self._transform_config(opts)
525
+
526
+ # it's ok to have a parse context with no context config, but you must
527
+ # not call it!
528
+ if self.context_config is None:
529
+ raise DbtRuntimeError("At parse time, did not receive a context config")
530
+
531
+ # Track unrendered opts to build parsed node unrendered_config later on
532
+ if get_flags().state_modified_compare_more_unrendered_values:
533
+ unrendered_config = statically_parse_unrendered_config(self.model.raw_code)
534
+ if unrendered_config:
535
+ self.context_config.add_unrendered_config_call(unrendered_config)
536
+
537
+ # Use rendered opts to populate context_config
538
+ self.context_config.add_config_call(opts)
539
+ return ""
540
+
541
+ def set(self, name, value):
542
+ return self.__call__({name: value})
543
+
544
+ def require(self, name, validator=None):
545
+ return ""
546
+
547
+ def get(self, name, default=None, validator=None):
548
+ return ""
549
+
550
+ def persist_relation_docs(self) -> bool:
551
+ return False
552
+
553
+ def persist_column_docs(self) -> bool:
554
+ return False
555
+
556
+
557
+ class RuntimeConfigObject(Config):
558
+ def __init__(self, model, context_config: Optional[ContextConfig] = None):
559
+ self.model = model
560
+ # we never use or get a config, only the parser cares
561
+
562
+ def __call__(self, *args, **kwargs):
563
+ return ""
564
+
565
+ def set(self, name, value):
566
+ return self.__call__({name: value})
567
+
568
+ def _validate(self, validator, value):
569
+ validator(value)
570
+
571
+ def _lookup(self, name, default=_MISSING):
572
+ # if this is a macro, there might be no `model.config`.
573
+ if not hasattr(self.model, "config"):
574
+ result = default
575
+ else:
576
+ result = self.model.config.get(name, default)
577
+ if result is _MISSING:
578
+ raise MissingConfigError(unique_id=self.model.unique_id, name=name)
579
+ return result
580
+
581
+ def require(self, name, validator=None):
582
+ to_return = self._lookup(name)
583
+
584
+ if validator is not None:
585
+ self._validate(validator, to_return)
586
+
587
+ return to_return
588
+
589
+ def get(self, name, default=None, validator=None):
590
+ to_return = self._lookup(name, default)
591
+
592
+ if validator is not None and default is not None:
593
+ self._validate(validator, to_return)
594
+
595
+ return to_return
596
+
597
+ def persist_relation_docs(self) -> bool:
598
+ persist_docs = self.get("persist_docs", default={})
599
+ if not isinstance(persist_docs, dict):
600
+ raise PersistDocsValueTypeError(persist_docs)
601
+
602
+ return persist_docs.get("relation", False)
603
+
604
+ def persist_column_docs(self) -> bool:
605
+ persist_docs = self.get("persist_docs", default={})
606
+ if not isinstance(persist_docs, dict):
607
+ raise PersistDocsValueTypeError(persist_docs)
608
+
609
+ return persist_docs.get("columns", False)
610
+
611
+
612
+ # `adapter` implementations
613
+ class ParseDatabaseWrapper(BaseDatabaseWrapper):
614
+ """The parser subclass of the database wrapper applies any explicit
615
+ parse-time overrides.
616
+ """
617
+
618
+ def __getattr__(self, name):
619
+ override = name in self._adapter._available_ and name in self._adapter._parse_replacements_
620
+
621
+ if override:
622
+ return self._adapter._parse_replacements_[name]
623
+ elif name in self._adapter._available_:
624
+ return getattr(self._adapter, name)
625
+ else:
626
+ raise AttributeError(
627
+ "'{}' object has no attribute '{}'".format(self.__class__.__name__, name)
628
+ )
629
+
630
+
631
+ class RuntimeDatabaseWrapper(BaseDatabaseWrapper):
632
+ """The runtime database wrapper exposes everything the adapter marks
633
+ available.
634
+ """
635
+
636
+ def __getattr__(self, name):
637
+ if name in self._adapter._available_:
638
+ return getattr(self._adapter, name)
639
+ else:
640
+ raise AttributeError(
641
+ "'{}' object has no attribute '{}'".format(self.__class__.__name__, name)
642
+ )
643
+
644
+
645
+ # `ref` implementations
646
+ class ParseRefResolver(BaseRefResolver):
647
+ def resolve(
648
+ self, name: str, package: Optional[str] = None, version: Optional[NodeVersion] = None
649
+ ) -> RelationProxy:
650
+ self.model.refs.append(self._repack_args(name, package, version))
651
+
652
+ # This is not the ref for the "name" passed in, but for the current model.
653
+ return self.Relation.create_from(self.config, self.model)
654
+
655
+
656
+ ResolveRef = Union[Disabled, ManifestNode]
657
+
658
+
659
+ class RuntimeRefResolver(BaseRefResolver):
660
+ def resolve(
661
+ self,
662
+ target_name: str,
663
+ target_package: Optional[str] = None,
664
+ target_version: Optional[NodeVersion] = None,
665
+ ) -> RelationProxy:
666
+ target_model = self.manifest.resolve_ref(
667
+ self.model,
668
+ target_name,
669
+ target_package,
670
+ target_version,
671
+ self.current_project,
672
+ self.model.package_name,
673
+ )
674
+
675
+ # Raise an error if the reference target is missing
676
+ if target_model is None or isinstance(target_model, Disabled):
677
+ raise TargetNotFoundError(
678
+ node=self.model,
679
+ target_name=target_name,
680
+ target_kind="node",
681
+ target_package=target_package,
682
+ target_version=target_version,
683
+ disabled=isinstance(target_model, Disabled),
684
+ )
685
+
686
+ # Raise error if trying to reference a 'private' resource outside its 'group'
687
+ elif self.manifest.is_invalid_private_ref(
688
+ self.model, target_model, self.config.dependencies
689
+ ):
690
+ raise DbtReferenceError(
691
+ unique_id=self.model.unique_id,
692
+ ref_unique_id=target_model.unique_id,
693
+ access=AccessType.Private,
694
+ scope=cast_to_str(target_model.group),
695
+ )
696
+ # Or a 'protected' resource outside its project/package namespace
697
+ elif self.manifest.is_invalid_protected_ref(
698
+ self.model, target_model, self.config.dependencies
699
+ ):
700
+ raise DbtReferenceError(
701
+ unique_id=self.model.unique_id,
702
+ ref_unique_id=target_model.unique_id,
703
+ access=AccessType.Protected,
704
+ scope=target_model.package_name,
705
+ )
706
+ self.validate(target_model, target_name, target_package, target_version)
707
+ return self.create_relation(target_model)
708
+
709
+ def create_relation(self, target_model: ManifestNode) -> RelationProxy:
710
+ if target_model.is_ephemeral_model:
711
+ self.model.set_cte(target_model.unique_id, None)
712
+ return self.Relation.create_ephemeral_from(
713
+ target_model,
714
+ limit=self.resolve_limit,
715
+ event_time_filter=self.resolve_event_time_filter(target_model),
716
+ )
717
+ elif (
718
+ hasattr(target_model, "defer_relation")
719
+ and target_model.defer_relation
720
+ and self.config.args.defer
721
+ and (
722
+ # User has explicitly opted to prefer defer_relation for unselected resources
723
+ (
724
+ self.config.args.favor_state
725
+ and target_model.unique_id not in selected_resources.SELECTED_RESOURCES
726
+ )
727
+ # Or, this node's relation does not exist in the expected target location (cache lookup)
728
+ or not get_adapter(self.config).get_relation(
729
+ target_model.database, target_model.schema, target_model.identifier
730
+ )
731
+ )
732
+ ):
733
+ return self.Relation.create_from(
734
+ self.config,
735
+ target_model.defer_relation,
736
+ limit=self.resolve_limit,
737
+ event_time_filter=self.resolve_event_time_filter(target_model),
738
+ )
739
+ else:
740
+ return self.Relation.create_from(
741
+ self.config,
742
+ target_model,
743
+ limit=self.resolve_limit,
744
+ event_time_filter=self.resolve_event_time_filter(target_model),
745
+ )
746
+
747
+ def validate(
748
+ self,
749
+ resolved: ManifestNode,
750
+ target_name: str,
751
+ target_package: Optional[str],
752
+ target_version: Optional[NodeVersion],
753
+ ) -> None:
754
+ if resolved.unique_id not in self.model.depends_on.nodes:
755
+ args = self._repack_args(target_name, target_package, target_version)
756
+ raise RefBadContextError(node=self.model, args=args)
757
+
758
+
759
+ class OperationRefResolver(RuntimeRefResolver):
760
+ def validate(
761
+ self,
762
+ resolved: ManifestNode,
763
+ target_name: str,
764
+ target_package: Optional[str],
765
+ target_version: Optional[NodeVersion],
766
+ ) -> None:
767
+ pass
768
+
769
+ def create_relation(self, target_model: ManifestNode) -> RelationProxy:
770
+ if target_model.is_ephemeral_model:
771
+ # In operations, we can't ref() ephemeral nodes, because
772
+ # Macros do not support set_cte
773
+ raise OperationsCannotRefEphemeralNodesError(target_model.name, node=self.model)
774
+ else:
775
+ return super().create_relation(target_model)
776
+
777
+
778
+ class RuntimeUnitTestRefResolver(RuntimeRefResolver):
779
+ @property
780
+ def resolve_limit(self) -> Optional[int]:
781
+ # Unit tests should never respect --empty flag or provide a limit since they are based on fake data.
782
+ return None
783
+
784
+ def resolve(
785
+ self,
786
+ target_name: str,
787
+ target_package: Optional[str] = None,
788
+ target_version: Optional[NodeVersion] = None,
789
+ ) -> RelationProxy:
790
+ return super().resolve(target_name, target_package, target_version)
791
+
792
+
793
+ # `source` implementations
794
+ class ParseSourceResolver(BaseSourceResolver):
795
+ def resolve(self, source_name: str, table_name: str):
796
+ # When you call source(), this is what happens at parse time
797
+ self.model.sources.append([source_name, table_name])
798
+ return self.Relation.create_from(self.config, self.model)
799
+
800
+
801
+ class RuntimeSourceResolver(BaseSourceResolver):
802
+ def resolve(self, source_name: str, table_name: str):
803
+ target_source = self.manifest.resolve_source(
804
+ source_name,
805
+ table_name,
806
+ self.current_project,
807
+ self.model.package_name,
808
+ )
809
+
810
+ if target_source is None or isinstance(target_source, Disabled):
811
+ raise TargetNotFoundError(
812
+ node=self.model,
813
+ target_name=f"{source_name}.{table_name}",
814
+ target_kind="source",
815
+ disabled=(isinstance(target_source, Disabled)),
816
+ )
817
+
818
+ # Source quoting does _not_ respect global configs in dbt_project.yml, as documented here:
819
+ # https://docs.getdbt.com/reference/project-configs/quoting
820
+ # Use an object with an empty quoting field to bypass any settings in self.
821
+ class SourceQuotingBaseConfig:
822
+ quoting: Dict[str, Any] = {}
823
+
824
+ return self.Relation.create_from(
825
+ SourceQuotingBaseConfig(),
826
+ target_source,
827
+ limit=self.resolve_limit,
828
+ event_time_filter=self.resolve_event_time_filter(target_source),
829
+ )
830
+
831
+
832
+ class RuntimeUnitTestSourceResolver(BaseSourceResolver):
833
+ @property
834
+ def resolve_limit(self) -> Optional[int]:
835
+ # Unit tests should never respect --empty flag or provide a limit since they are based on fake data.
836
+ return None
837
+
838
+ def resolve(self, source_name: str, table_name: str):
839
+ target_source = self.manifest.resolve_source(
840
+ source_name,
841
+ table_name,
842
+ self.current_project,
843
+ self.model.package_name,
844
+ )
845
+ if target_source is None or isinstance(target_source, Disabled):
846
+ raise TargetNotFoundError(
847
+ node=self.model,
848
+ target_name=f"{source_name}.{table_name}",
849
+ target_kind="source",
850
+ disabled=(isinstance(target_source, Disabled)),
851
+ )
852
+ # For unit tests, this isn't a "real" source, it's a ModelNode taking
853
+ # the place of a source. We don't really need to return the relation here,
854
+ # we just need to set_cte, but skipping it confuses typing. We *do* need
855
+ # the relation in the "this" property.
856
+ self.model.set_cte(target_source.unique_id, None)
857
+ return self.Relation.create_ephemeral_from(target_source)
858
+
859
+
860
+ # metric` implementations
861
+ class ParseMetricResolver(BaseMetricResolver):
862
+ def resolve(self, name: str, package: Optional[str] = None) -> MetricReference:
863
+ self.model.metrics.append(self._repack_args(name, package))
864
+
865
+ return MetricReference(name, package)
866
+
867
+
868
+ class RuntimeMetricResolver(BaseMetricResolver):
869
+ def resolve(self, target_name: str, target_package: Optional[str] = None) -> MetricReference:
870
+ target_metric = self.manifest.resolve_metric(
871
+ target_name,
872
+ target_package,
873
+ self.current_project,
874
+ self.model.package_name,
875
+ )
876
+
877
+ if target_metric is None or isinstance(target_metric, Disabled):
878
+ raise TargetNotFoundError(
879
+ node=self.model,
880
+ target_name=target_name,
881
+ target_kind="metric",
882
+ target_package=target_package,
883
+ )
884
+
885
+ return ResolvedMetricReference(target_metric, self.manifest)
886
+
887
+
888
+ # `var` implementations.
889
+ class ModelConfiguredVar(Var):
890
+ def __init__(
891
+ self,
892
+ context: Dict[str, Any],
893
+ config: RuntimeConfig,
894
+ node: Resource,
895
+ ) -> None:
896
+ self._node: Resource
897
+ self._config: RuntimeConfig = config
898
+ super().__init__(context, config.cli_vars, node=node)
899
+
900
+ def packages_for_node(self) -> Iterable[Project]:
901
+ dependencies = self._config.load_dependencies()
902
+ package_name = self._node.package_name
903
+
904
+ if package_name != self._config.project_name:
905
+ if package_name in dependencies:
906
+ yield dependencies[package_name]
907
+ yield self._config
908
+
909
+ def _generate_merged(self) -> Mapping[str, Any]:
910
+ search_node: IsFQNResource
911
+ if isinstance(self._node, IsFQNResource):
912
+ search_node = self._node
913
+ else:
914
+ search_node = FQNLookup(self._node.package_name)
915
+
916
+ adapter_type = self._config.credentials.type
917
+
918
+ merged = MultiDict()
919
+ for project in self.packages_for_node():
920
+ merged.add(project.vars.vars_for(search_node, adapter_type))
921
+ merged.add(self._cli_vars)
922
+ return merged
923
+
924
+
925
+ class ParseVar(ModelConfiguredVar):
926
+ def get_missing_var(self, var_name):
927
+ # in the parser, just always return None.
928
+ return None
929
+
930
+
931
+ class RuntimeVar(ModelConfiguredVar):
932
+ pass
933
+
934
+
935
+ class UnitTestVar(RuntimeVar):
936
+ def __init__(
937
+ self,
938
+ context: Dict[str, Any],
939
+ config: RuntimeConfig,
940
+ node: Resource,
941
+ ) -> None:
942
+ config_copy = None
943
+ assert isinstance(node, UnitTestNode)
944
+ if node.overrides and node.overrides.vars:
945
+ config_copy = deepcopy(config)
946
+ config_copy.cli_vars.update(node.overrides.vars)
947
+
948
+ super().__init__(context, config_copy or config, node=node)
949
+
950
+
951
+ # `function` implementations.
952
+ class ParseFunctionResolver(BaseFunctionResolver):
953
+ def resolve(self, name: str, package: Optional[str] = None):
954
+ # When you call function(), this is what happens at parse time
955
+ self.model.functions.append(self._repack_args(name, package))
956
+ return self.Relation.create_from(self.config, self.model, type=RelationType.Function)
957
+
958
+
959
+ class RuntimeFunctionResolver(BaseFunctionResolver):
960
+ def resolve(self, name: str, package: Optional[str] = None):
961
+ target_function = self.manifest.resolve_function(
962
+ name,
963
+ package,
964
+ self.current_project,
965
+ self.model.package_name,
966
+ )
967
+
968
+ if target_function is None or isinstance(target_function, Disabled):
969
+ raise TargetNotFoundError(
970
+ node=self.model,
971
+ target_name=name,
972
+ target_kind="function",
973
+ disabled=(isinstance(target_function, Disabled)),
974
+ )
975
+
976
+ # Source quoting does _not_ respect global configs in dbt_project.yml, as documented here:
977
+ # https://docs.getdbt.com/reference/project-configs/quoting
978
+ # Use an object with an empty quoting field to bypass any settings in self.
979
+ class SourceQuotingBaseConfig:
980
+ quoting: Dict[str, Any] = {}
981
+
982
+ return self.Relation.create_from(
983
+ SourceQuotingBaseConfig(),
984
+ target_function,
985
+ limit=self.resolve_limit,
986
+ event_time_filter=self.resolve_event_time_filter(target_function),
987
+ type=RelationType.Function,
988
+ )
989
+
990
+
991
+ # TODO: Right now the RuntimeUnitTestProvider uses the RuntimeFunctionResolver for functions,
992
+ # but for CT-12025 we'll likely need to create a separate RuntimeUnitTestFunctionResolver to
993
+ # handle function overrides (mocking functions)
994
+
995
+
996
+ # Providers
997
+ class Provider(Protocol):
998
+ execute: bool
999
+ Config: Type[Config]
1000
+ DatabaseWrapper: Type[BaseDatabaseWrapper]
1001
+ Var: Type[ModelConfiguredVar]
1002
+ ref: Type[BaseRefResolver]
1003
+ source: Type[BaseSourceResolver]
1004
+ metric: Type[BaseMetricResolver]
1005
+ function: Type[BaseFunctionResolver]
1006
+
1007
+
1008
+ class ParseProvider(Provider):
1009
+ execute = False
1010
+ Config = ParseConfigObject
1011
+ DatabaseWrapper = ParseDatabaseWrapper
1012
+ Var = ParseVar
1013
+ ref = ParseRefResolver
1014
+ source = ParseSourceResolver
1015
+ metric = ParseMetricResolver
1016
+ function = ParseFunctionResolver
1017
+
1018
+
1019
+ class GenerateNameProvider(Provider):
1020
+ execute = False
1021
+ Config = RuntimeConfigObject
1022
+ DatabaseWrapper = ParseDatabaseWrapper
1023
+ Var = RuntimeVar
1024
+ ref = ParseRefResolver
1025
+ source = ParseSourceResolver
1026
+ metric = ParseMetricResolver
1027
+ function = ParseFunctionResolver
1028
+
1029
+
1030
+ class RuntimeProvider(Provider):
1031
+ execute = True
1032
+ Config = RuntimeConfigObject
1033
+ DatabaseWrapper = RuntimeDatabaseWrapper
1034
+ Var = RuntimeVar
1035
+ ref = RuntimeRefResolver
1036
+ source = RuntimeSourceResolver
1037
+ metric = RuntimeMetricResolver
1038
+ function = RuntimeFunctionResolver
1039
+
1040
+
1041
+ class RuntimeUnitTestProvider(Provider):
1042
+ execute = True
1043
+ Config = RuntimeConfigObject
1044
+ DatabaseWrapper = RuntimeDatabaseWrapper
1045
+ Var = UnitTestVar
1046
+ ref = RuntimeUnitTestRefResolver
1047
+ source = RuntimeUnitTestSourceResolver
1048
+ metric = RuntimeMetricResolver
1049
+ function = RuntimeFunctionResolver
1050
+
1051
+
1052
+ class OperationProvider(RuntimeProvider):
1053
+ ref = OperationRefResolver
1054
+
1055
+
1056
+ T = TypeVar("T")
1057
+
1058
+
1059
+ # Base context collection, used for parsing configs.
1060
+ class ProviderContext(ManifestContext):
1061
+ # subclasses are MacroContext, ModelContext, TestContext, SourceContext
1062
+ def __init__(
1063
+ self,
1064
+ model,
1065
+ config: RuntimeConfig,
1066
+ manifest: Manifest,
1067
+ provider: Provider,
1068
+ context_config: Optional[ContextConfig],
1069
+ ) -> None:
1070
+ if provider is None:
1071
+ raise DbtInternalError(f"Invalid provider given to context: {provider}")
1072
+ # mypy appeasement - we know it'll be a RuntimeConfig
1073
+ self.config: RuntimeConfig
1074
+ self.model: Union[Macro, ManifestNode, SourceDefinition] = model
1075
+ super().__init__(config, manifest, model.package_name)
1076
+ self.sql_results: Dict[str, Optional[AttrDict]] = {}
1077
+ self.context_config: Optional[ContextConfig] = context_config
1078
+ self.provider: Provider = provider
1079
+ self.adapter = get_adapter(self.config)
1080
+ # The macro namespace is used in creating the DatabaseWrapper
1081
+ self.db_wrapper = self.provider.DatabaseWrapper(self.adapter, self.namespace)
1082
+
1083
+ # This overrides the method in ManifestContext, and provides
1084
+ # a model, which the ManifestContext builder does not
1085
+ def _get_namespace_builder(self):
1086
+ internal_packages = get_adapter_package_names(self.config.credentials.type)
1087
+ return MacroNamespaceBuilder(
1088
+ self.config.project_name,
1089
+ self.search_package,
1090
+ self.macro_stack,
1091
+ internal_packages,
1092
+ self.model,
1093
+ )
1094
+
1095
+ @contextproperty()
1096
+ def dbt_metadata_envs(self) -> Dict[str, str]:
1097
+ return get_metadata_vars()
1098
+
1099
+ @contextproperty()
1100
+ def invocation_args_dict(self):
1101
+ return args_to_dict(self.config.args)
1102
+
1103
+ @contextproperty()
1104
+ def _sql_results(self) -> Dict[str, Optional[AttrDict]]:
1105
+ return self.sql_results
1106
+
1107
+ @contextmember()
1108
+ def load_result(self, name: str) -> Optional[AttrDict]:
1109
+ if name in self.sql_results:
1110
+ # handle the special case of "main" macro
1111
+ # See: https://github.com/dbt-labs/dbt-core/blob/ada8860e48b32ac712d92e8b0977b2c3c9749981/core/dbt/task/run.py#L228
1112
+ if name == "main":
1113
+ return self.sql_results["main"]
1114
+
1115
+ # handle a None, which indicates this name was populated but has since been loaded
1116
+ elif self.sql_results[name] is None:
1117
+ raise MacroResultAlreadyLoadedError(name)
1118
+
1119
+ # Handle the regular use case
1120
+ else:
1121
+ ret_val = self.sql_results[name]
1122
+ self.sql_results[name] = None
1123
+ return ret_val
1124
+ else:
1125
+ # Handle trying to load a result that was never stored
1126
+ return None
1127
+
1128
+ @contextmember()
1129
+ def store_result(
1130
+ self, name: str, response: Any, agate_table: Optional["agate.Table"] = None
1131
+ ) -> str:
1132
+ from dbt_common.clients import agate_helper
1133
+
1134
+ if agate_table is None:
1135
+ agate_table = agate_helper.empty_table()
1136
+
1137
+ self.sql_results[name] = AttrDict(
1138
+ {
1139
+ "response": response,
1140
+ "data": agate_helper.as_matrix(agate_table),
1141
+ "table": agate_table,
1142
+ }
1143
+ )
1144
+ return ""
1145
+
1146
+ @contextmember()
1147
+ def store_raw_result(
1148
+ self,
1149
+ name: str,
1150
+ message=Optional[str],
1151
+ code=Optional[str],
1152
+ rows_affected=Optional[str],
1153
+ agate_table: Optional["agate.Table"] = None,
1154
+ ) -> str:
1155
+ response = AdapterResponse(_message=message, code=code, rows_affected=rows_affected)
1156
+ return self.store_result(name, response, agate_table)
1157
+
1158
+ @contextproperty()
1159
+ def validation(self):
1160
+ def validate_any(*args) -> Callable[[T], None]:
1161
+ def inner(value: T) -> None:
1162
+ for arg in args:
1163
+ if isinstance(arg, type) and isinstance(value, arg):
1164
+ return
1165
+ elif value == arg:
1166
+ return
1167
+ raise DbtValidationError(
1168
+ 'Expected value "{}" to be one of {}'.format(value, ",".join(map(str, args)))
1169
+ )
1170
+
1171
+ return inner
1172
+
1173
+ return AttrDict(
1174
+ {
1175
+ "any": validate_any,
1176
+ }
1177
+ )
1178
+
1179
+ @contextmember()
1180
+ def write(self, payload: str) -> str:
1181
+ # macros/source defs aren't 'writeable'.
1182
+ if isinstance(self.model, (Macro, SourceDefinition)):
1183
+ raise MacrosSourcesUnWriteableError(node=self.model)
1184
+
1185
+ split_suffix = None
1186
+ if (
1187
+ isinstance(self.model, ModelNode)
1188
+ and self.model.config.get("incremental_strategy") == "microbatch"
1189
+ and self.model.batch is not None
1190
+ ):
1191
+ split_suffix = MicrobatchBuilder.format_batch_start(
1192
+ self.model.batch.event_time_start,
1193
+ self.model.config.batch_size,
1194
+ )
1195
+
1196
+ self.model.build_path = self.model.get_target_write_path(
1197
+ self.config.target_path, "run", split_suffix=split_suffix
1198
+ )
1199
+ self.model.write_node(self.config.project_root, self.model.build_path, payload)
1200
+ return ""
1201
+
1202
+ @contextmember()
1203
+ def render(self, string: str) -> str:
1204
+ return get_rendered(string, self._ctx, self.model)
1205
+
1206
+ @contextmember()
1207
+ def try_or_compiler_error(
1208
+ self, message_if_exception: str, func: Callable, *args, **kwargs
1209
+ ) -> Any:
1210
+ try:
1211
+ return func(*args, **kwargs)
1212
+ except Exception:
1213
+ raise CompilationError(message_if_exception, self.model)
1214
+
1215
+ @contextmember()
1216
+ def load_agate_table(self) -> "agate.Table":
1217
+ from dbt_common.clients import agate_helper
1218
+
1219
+ if not isinstance(self.model, SeedNode):
1220
+ raise LoadAgateTableNotSeedError(self.model.resource_type, node=self.model)
1221
+
1222
+ # include package_path for seeds defined in packages
1223
+ package_path = (
1224
+ os.path.join(self.config.packages_install_path, self.model.package_name)
1225
+ if self.model.package_name != self.config.project_name
1226
+ else "."
1227
+ )
1228
+ path = os.path.join(self.config.project_root, package_path, self.model.original_file_path)
1229
+ if not os.path.exists(path):
1230
+ assert self.model.root_path
1231
+ path = os.path.join(self.model.root_path, self.model.original_file_path)
1232
+
1233
+ column_types = self.model.config.column_types
1234
+ delimiter = self.model.config.delimiter
1235
+ try:
1236
+ table = agate_helper.from_csv(path, text_columns=column_types, delimiter=delimiter)
1237
+ except ValueError as e:
1238
+ raise LoadAgateTableValueError(e, node=self.model)
1239
+ # this is used by some adapters
1240
+ table.original_abspath = os.path.abspath(path) # type: ignore
1241
+ return table
1242
+
1243
+ @contextproperty()
1244
+ def ref(self) -> Callable:
1245
+ """The most important function in dbt is `ref()`; it's impossible to
1246
+ build even moderately complex models without it. `ref()` is how you
1247
+ reference one model within another. This is a very common behavior, as
1248
+ typically models are built to be "stacked" on top of one another. Here
1249
+ is how this looks in practice:
1250
+
1251
+ > model_a.sql:
1252
+
1253
+ select *
1254
+ from public.raw_data
1255
+
1256
+ > model_b.sql:
1257
+
1258
+ select *
1259
+ from {{ref('model_a')}}
1260
+
1261
+
1262
+ `ref()` is, under the hood, actually doing two important things. First,
1263
+ it is interpolating the schema into your model file to allow you to
1264
+ change your deployment schema via configuration. Second, it is using
1265
+ these references between models to automatically build the dependency
1266
+ graph. This will enable dbt to deploy models in the correct order when
1267
+ using dbt run.
1268
+
1269
+ The `ref` function returns a Relation object.
1270
+
1271
+ ## Advanced ref usage
1272
+
1273
+ There is also a two-argument variant of the `ref` function. With this
1274
+ variant, you can pass both a package name and model name to `ref` to
1275
+ avoid ambiguity. This functionality is not commonly required for
1276
+ typical dbt usage.
1277
+
1278
+ > model.sql:
1279
+
1280
+ select * from {{ ref('package_name', 'model_name') }}"
1281
+ """
1282
+ return self.provider.ref(self.db_wrapper, self.model, self.config, self.manifest)
1283
+
1284
+ @contextproperty()
1285
+ def source(self) -> Callable:
1286
+ return self.provider.source(self.db_wrapper, self.model, self.config, self.manifest)
1287
+
1288
+ @contextproperty()
1289
+ def metric(self) -> Callable:
1290
+ return self.provider.metric(self.db_wrapper, self.model, self.config, self.manifest)
1291
+
1292
+ @contextproperty()
1293
+ def function(self) -> Callable:
1294
+ return self.provider.function(self.db_wrapper, self.model, self.config, self.manifest)
1295
+
1296
+ @contextproperty("config")
1297
+ def ctx_config(self) -> Config:
1298
+ """The `config` variable exists to handle end-user configuration for
1299
+ custom materializations. Configs like `unique_key` can be implemented
1300
+ using the `config` variable in your own materializations.
1301
+
1302
+ For example, code in the `incremental` materialization like this:
1303
+
1304
+ {% materialization incremental, default -%}
1305
+ {%- set unique_key = config.get('unique_key') -%}
1306
+ ...
1307
+
1308
+ is responsible for handling model code that looks like this:
1309
+
1310
+ {{
1311
+ config(
1312
+ materialized='incremental',
1313
+ unique_key='id'
1314
+ )
1315
+ }}
1316
+
1317
+
1318
+ ## config.get
1319
+
1320
+ name: The name of the configuration variable (required)
1321
+ default: The default value to use if this configuration is not provided
1322
+ (optional)
1323
+
1324
+ The `config.get` function is used to get configurations for a model
1325
+ from the end-user. Configs defined in this way are optional, and a
1326
+ default value can be provided.
1327
+
1328
+ Example usage:
1329
+
1330
+ {% materialization incremental, default -%}
1331
+ -- Example w/ no default. unique_key will be None if the user does not provide this configuration
1332
+ {%- set unique_key = config.get('unique_key') -%}
1333
+ -- Example w/ default value. Default to 'id' if 'unique_key' not provided
1334
+ {%- set unique_key = config.get('unique_key', default='id') -%}
1335
+ ...
1336
+
1337
+ ## config.require
1338
+
1339
+ name: The name of the configuration variable (required)
1340
+
1341
+ The `config.require` function is used to get configurations for a model
1342
+ from the end-user. Configs defined using this function are required,
1343
+ and failure to provide them will result in a compilation error.
1344
+
1345
+ Example usage:
1346
+
1347
+ {% materialization incremental, default -%}
1348
+ {%- set unique_key = config.require('unique_key') -%}
1349
+ ...
1350
+ """ # noqa
1351
+ return self.provider.Config(self.model, self.context_config)
1352
+
1353
+ @contextproperty()
1354
+ def execute(self) -> bool:
1355
+ """`execute` is a Jinja variable that returns True when dbt is in
1356
+ "execute" mode.
1357
+
1358
+ When you execute a dbt compile or dbt run command, dbt:
1359
+
1360
+ - Reads all of the files in your project and generates a "manifest"
1361
+ comprised of models, tests, and other graph nodes present in your
1362
+ project. During this phase, dbt uses the `ref` statements it finds
1363
+ to generate the DAG for your project. *No SQL is run during this
1364
+ phase*, and `execute == False`.
1365
+ - Compiles (and runs) each node (eg. building models, or running
1366
+ tests). SQL is run during this phase, and `execute == True`.
1367
+
1368
+ Any Jinja that relies on a result being returned from the database will
1369
+ error during the parse phase. For example, this SQL will return an
1370
+ error:
1371
+
1372
+ > models/order_payment_methods.sql:
1373
+
1374
+ {% set payment_method_query %}
1375
+ select distinct
1376
+ payment_method
1377
+ from {{ ref('raw_payments') }}
1378
+ order by 1
1379
+ {% endset %}
1380
+ {% set results = run_query(relation_query) %}
1381
+ {# Return the first column #}
1382
+ {% set payment_methods = results.columns[0].values() %}
1383
+
1384
+ The error returned by dbt will look as follows:
1385
+
1386
+ Encountered an error:
1387
+ Compilation Error in model order_payment_methods (models/order_payment_methods.sql)
1388
+ 'None' has no attribute 'table'
1389
+
1390
+ This is because Line #11 assumes that a table has been returned, when,
1391
+ during the parse phase, this query hasn't been run.
1392
+
1393
+ To work around this, wrap any problematic Jinja in an
1394
+ `{% if execute %}` statement:
1395
+
1396
+ > models/order_payment_methods.sql:
1397
+
1398
+ {% set payment_method_query %}
1399
+ select distinct
1400
+ payment_method
1401
+ from {{ ref('raw_payments') }}
1402
+ order by 1
1403
+ {% endset %}
1404
+ {% set results = run_query(relation_query) %}
1405
+ {% if execute %}
1406
+ {# Return the first column #}
1407
+ {% set payment_methods = results.columns[0].values() %}
1408
+ {% else %}
1409
+ {% set payment_methods = [] %}
1410
+ {% endif %}
1411
+ """ # noqa
1412
+ return self.provider.execute
1413
+
1414
+ @contextproperty()
1415
+ def exceptions(self) -> Dict[str, Any]:
1416
+ """The exceptions namespace can be used to raise warnings and errors in
1417
+ dbt userspace.
1418
+
1419
+
1420
+ ## raise_compiler_error
1421
+
1422
+ The `exceptions.raise_compiler_error` method will raise a compiler
1423
+ error with the provided message. This is typically only useful in
1424
+ macros or materializations when invalid arguments are provided by the
1425
+ calling model. Note that throwing an exception will cause a model to
1426
+ fail, so please use this variable with care!
1427
+
1428
+ Example usage:
1429
+
1430
+ > exceptions.sql:
1431
+
1432
+ {% if number < 0 or number > 100 %}
1433
+ {{ exceptions.raise_compiler_error("Invalid `number`. Got: " ~ number) }}
1434
+ {% endif %}
1435
+
1436
+ ## warn
1437
+
1438
+ The `exceptions.warn` method will raise a compiler warning with the
1439
+ provided message. If the `--warn-error` flag is provided to dbt, then
1440
+ this warning will be elevated to an exception, which is raised.
1441
+
1442
+ Example usage:
1443
+
1444
+ > warn.sql:
1445
+
1446
+ {% if number < 0 or number > 100 %}
1447
+ {% do exceptions.warn("Invalid `number`. Got: " ~ number) %}
1448
+ {% endif %}
1449
+ """ # noqa
1450
+ return wrapped_exports(self.model)
1451
+
1452
+ @contextproperty()
1453
+ def database(self) -> str:
1454
+ return self.config.credentials.database
1455
+
1456
+ @contextproperty()
1457
+ def schema(self) -> str:
1458
+ return self.config.credentials.schema
1459
+
1460
+ @contextproperty()
1461
+ def var(self) -> ModelConfiguredVar:
1462
+ return self.provider.Var(
1463
+ context=self._ctx,
1464
+ config=self.config,
1465
+ node=self.model,
1466
+ )
1467
+
1468
+ @contextproperty("adapter")
1469
+ def ctx_adapter(self) -> BaseDatabaseWrapper:
1470
+ """`adapter` is a wrapper around the internal database adapter used by
1471
+ dbt. It allows users to make calls to the database in their dbt models.
1472
+ The adapter methods will be translated into specific SQL statements
1473
+ depending on the type of adapter your project is using.
1474
+ """
1475
+ return self.db_wrapper
1476
+
1477
+ @contextproperty()
1478
+ def api(self) -> Dict[str, Any]:
1479
+ return {
1480
+ "Relation": self.db_wrapper.Relation,
1481
+ "Column": self.adapter.Column,
1482
+ }
1483
+
1484
+ @contextproperty()
1485
+ def column(self) -> Type[Column]:
1486
+ return self.adapter.Column
1487
+
1488
+ @contextproperty()
1489
+ def env(self) -> Dict[str, Any]:
1490
+ return self.target
1491
+
1492
+ @contextproperty()
1493
+ def graph(self) -> Dict[str, Any]:
1494
+ """The `graph` context variable contains information about the nodes in
1495
+ your dbt project. Models, sources, tests, and snapshots are all
1496
+ examples of nodes in dbt projects.
1497
+
1498
+ ## The graph context variable
1499
+
1500
+ The graph context variable is a dictionary which maps node ids onto dictionary representations of those nodes. A simplified example might look like:
1501
+
1502
+ {
1503
+ "model.project_name.model_name": {
1504
+ "config": {"materialzed": "table", "sort": "id"},
1505
+ "tags": ["abc", "123"],
1506
+ "path": "models/path/to/model_name.sql",
1507
+ ...
1508
+ },
1509
+ "source.project_name.source_name": {
1510
+ "path": "models/path/to/schema.yml",
1511
+ "columns": {
1512
+ "id": { .... },
1513
+ "first_name": { .... },
1514
+ },
1515
+ ...
1516
+ }
1517
+ }
1518
+
1519
+ The exact contract for these model and source nodes is not currently
1520
+ documented, but that will change in the future.
1521
+
1522
+ ## Accessing models
1523
+
1524
+ The `model` entries in the `graph` dictionary will be incomplete or
1525
+ incorrect during parsing. If accessing the models in your project via
1526
+ the `graph` variable, be sure to use the `execute` flag to ensure that
1527
+ this code only executes at run-time and not at parse-time. Do not use
1528
+ the `graph` variable to build you DAG, as the resulting dbt behavior
1529
+ will be undefined and likely incorrect.
1530
+
1531
+ Example usage:
1532
+
1533
+ > graph-usage.sql:
1534
+
1535
+ /*
1536
+ Print information about all of the models in the Snowplow package
1537
+ */
1538
+ {% if execute %}
1539
+ {% for node in graph.nodes.values()
1540
+ | selectattr("resource_type", "equalto", "model")
1541
+ | selectattr("package_name", "equalto", "snowplow") %}
1542
+
1543
+ {% do log(node.unique_id ~ ", materialized: " ~ node.config.materialized, info=true) %}
1544
+
1545
+ {% endfor %}
1546
+ {% endif %}
1547
+ /*
1548
+ Example output
1549
+ ---------------------------------------------------------------
1550
+ model.snowplow.snowplow_id_map, materialized: incremental
1551
+ model.snowplow.snowplow_page_views, materialized: incremental
1552
+ model.snowplow.snowplow_web_events, materialized: incremental
1553
+ model.snowplow.snowplow_web_page_context, materialized: table
1554
+ model.snowplow.snowplow_web_events_scroll_depth, materialized: incremental
1555
+ model.snowplow.snowplow_web_events_time, materialized: incremental
1556
+ model.snowplow.snowplow_web_events_internal_fixed, materialized: ephemeral
1557
+ model.snowplow.snowplow_base_web_page_context, materialized: ephemeral
1558
+ model.snowplow.snowplow_base_events, materialized: ephemeral
1559
+ model.snowplow.snowplow_sessions_tmp, materialized: incremental
1560
+ model.snowplow.snowplow_sessions, materialized: table
1561
+ */
1562
+
1563
+ ## Accessing sources
1564
+
1565
+ To access the sources in your dbt project programatically, use the "sources" attribute.
1566
+
1567
+ Example usage:
1568
+
1569
+ > models/events_unioned.sql
1570
+
1571
+ /*
1572
+ Union all of the Snowplow sources defined in the project
1573
+ which begin with the string "event_"
1574
+ */
1575
+ {% set sources = [] -%}
1576
+ {% for node in graph.sources.values() -%}
1577
+ {%- if node.name.startswith('event_') and node.source_name == 'snowplow' -%}
1578
+ {%- do sources.append(source(node.source_name, node.name)) -%}
1579
+ {%- endif -%}
1580
+ {%- endfor %}
1581
+ select * from (
1582
+ {%- for source in sources %}
1583
+ {{ source }} {% if not loop.last %} union all {% endif %}
1584
+ {% endfor %}
1585
+ )
1586
+ /*
1587
+ Example compiled SQL
1588
+ ---------------------------------------------------------------
1589
+ select * from (
1590
+ select * from raw.snowplow.event_add_to_cart union all
1591
+ select * from raw.snowplow.event_remove_from_cart union all
1592
+ select * from raw.snowplow.event_checkout
1593
+ )
1594
+ */
1595
+
1596
+ """ # noqa
1597
+ return self.manifest.flat_graph
1598
+
1599
+ @contextproperty("model")
1600
+ def ctx_model(self) -> Dict[str, Any]:
1601
+ model_dct = self.model.to_dict(omit_none=True)
1602
+ # Maintain direct use of compiled_sql
1603
+ # TODO add depreciation logic[CT-934]
1604
+ if "compiled_code" in model_dct:
1605
+ model_dct["compiled_sql"] = model_dct["compiled_code"]
1606
+
1607
+ if (
1608
+ hasattr(self.model, "contract")
1609
+ and self.model.contract.alias_types is True
1610
+ and "columns" in model_dct
1611
+ ):
1612
+ for column in model_dct["columns"].values():
1613
+ if "data_type" in column:
1614
+ orig_data_type = column["data_type"]
1615
+ # translate data_type to value in Column.TYPE_LABELS
1616
+ new_data_type = self.adapter.Column.translate_type(orig_data_type)
1617
+ column["data_type"] = new_data_type
1618
+ return model_dct
1619
+
1620
+ @contextproperty()
1621
+ def pre_hooks(self) -> Optional[List[Dict[str, Any]]]:
1622
+ return None
1623
+
1624
+ @contextproperty()
1625
+ def post_hooks(self) -> Optional[List[Dict[str, Any]]]:
1626
+ return None
1627
+
1628
+ @contextproperty()
1629
+ def sql(self) -> Optional[str]:
1630
+ return None
1631
+
1632
+ @contextproperty()
1633
+ def sql_now(self) -> str:
1634
+ return self.adapter.date_function()
1635
+
1636
+ @contextmember()
1637
+ def adapter_macro(self, name: str, *args, **kwargs):
1638
+ """This was deprecated in v0.18 in favor of adapter.dispatch"""
1639
+ msg = (
1640
+ 'The "adapter_macro" macro has been deprecated. Instead, use '
1641
+ "the `adapter.dispatch` method to find a macro and call the "
1642
+ "result. For more information, see: "
1643
+ "https://docs.getdbt.com/reference/dbt-jinja-functions/dispatch)"
1644
+ " adapter_macro was called for: {macro_name}".format(macro_name=name)
1645
+ )
1646
+ raise CompilationError(msg)
1647
+
1648
+ @contextmember()
1649
+ def env_var(self, var: str, default: Optional[str] = None) -> str:
1650
+ """The env_var() function. Return the environment variable named 'var'.
1651
+ If there is no such environment variable set, return the default.
1652
+
1653
+ If the default is None, raise an exception for an undefined variable.
1654
+ """
1655
+ return_value = None
1656
+ if var.startswith(SECRET_ENV_PREFIX):
1657
+ raise SecretEnvVarLocationError(var)
1658
+
1659
+ env = get_invocation_context().env
1660
+
1661
+ if var in env:
1662
+ return_value = env[var]
1663
+ elif default is not None:
1664
+ return_value = default
1665
+
1666
+ if return_value is not None:
1667
+ # Save the env_var value in the manifest and the var name in the source_file.
1668
+ # If this is compiling, do not save because it's irrelevant to parsing.
1669
+ compiling = (
1670
+ True
1671
+ if hasattr(self.model, "compiled")
1672
+ and getattr(self.model, "compiled", False) is True
1673
+ else False
1674
+ )
1675
+ if self.model and not compiling:
1676
+ # If the environment variable is set from a default, store a string indicating
1677
+ # that so we can skip partial parsing. Otherwise the file will be scheduled for
1678
+ # reparsing. If the default changes, the file will have been updated and therefore
1679
+ # will be scheduled for reparsing anyways.
1680
+ self.manifest.env_vars[var] = (
1681
+ return_value if var in env else DEFAULT_ENV_PLACEHOLDER
1682
+ )
1683
+
1684
+ # hooks come from dbt_project.yml which doesn't have a real file_id
1685
+ if self.model.file_id in self.manifest.files:
1686
+ source_file = self.manifest.files[self.model.file_id]
1687
+ # Schema files should never get here
1688
+ if source_file.parse_file_type != "schema":
1689
+ # TODO CT-211
1690
+ source_file.env_vars.append(var) # type: ignore[union-attr]
1691
+ return return_value
1692
+ else:
1693
+ raise EnvVarMissingError(var)
1694
+
1695
+ @contextproperty()
1696
+ def selected_resources(self) -> List[str]:
1697
+ """The `selected_resources` variable contains a list of the resources
1698
+ selected based on the parameters provided to the dbt command.
1699
+ Currently, is not populated for the command `run-operation` that
1700
+ doesn't support `--select`.
1701
+ """
1702
+ return selected_resources.SELECTED_RESOURCES
1703
+
1704
+ @contextmember()
1705
+ def submit_python_job(self, parsed_model: Dict, compiled_code: str) -> AdapterResponse:
1706
+ # Check macro_stack and that the unique id is for a materialization macro
1707
+ if not (
1708
+ self.context_macro_stack.depth == 2
1709
+ and self.context_macro_stack.call_stack[1] == "macro.dbt.statement"
1710
+ and "materialization" in self.context_macro_stack.call_stack[0]
1711
+ ):
1712
+ raise DbtRuntimeError(
1713
+ f"submit_python_job is not intended to be called here, at model {parsed_model['alias']}, with macro call_stack {self.context_macro_stack.call_stack}."
1714
+ )
1715
+ return self.adapter.submit_python_job(parsed_model, compiled_code)
1716
+
1717
+
1718
+ class MacroContext(ProviderContext):
1719
+ """Internally, macros can be executed like nodes, with some restrictions:
1720
+
1721
+ - they don't have all values available that nodes do:
1722
+ - 'this', 'pre_hooks', 'post_hooks', and 'sql' are missing
1723
+ - 'schema' does not use any 'model' information
1724
+ - they can't be configured with config() directives
1725
+ """
1726
+
1727
+ def __init__(
1728
+ self,
1729
+ model: MacroProtocol,
1730
+ config: RuntimeConfig,
1731
+ manifest: Manifest,
1732
+ provider: Provider,
1733
+ search_package: Optional[str],
1734
+ ) -> None:
1735
+ super().__init__(model, config, manifest, provider, None)
1736
+ # override the model-based package with the given one
1737
+ if search_package is None:
1738
+ # if the search package name isn't specified, use the root project
1739
+ self._search_package = config.project_name
1740
+ else:
1741
+ self._search_package = search_package
1742
+
1743
+
1744
+ class SourceContext(ProviderContext):
1745
+ # SourceContext is being used to render jinja SQL during execution of
1746
+ # custom SQL in source freshness. It is not used for parsing.
1747
+ model: SourceDefinition
1748
+
1749
+ @contextproperty()
1750
+ def this(self) -> Optional[RelationProxy]:
1751
+ return self.db_wrapper.Relation.create_from(self.config, self.model)
1752
+
1753
+ @contextproperty()
1754
+ def source_node(self) -> SourceDefinition:
1755
+ return self.model
1756
+
1757
+
1758
+ class ModelContext(ProviderContext):
1759
+ model: ManifestNode
1760
+
1761
+ @contextproperty()
1762
+ def pre_hooks(self) -> List[Dict[str, Any]]:
1763
+ if self.model.resource_type in [NodeType.Source, NodeType.Test, NodeType.Unit]:
1764
+ return []
1765
+ # TODO CT-211
1766
+ return [
1767
+ h.to_dict(omit_none=True) for h in self.model.config.pre_hook # type: ignore[union-attr] # noqa
1768
+ ]
1769
+
1770
+ @contextproperty()
1771
+ def post_hooks(self) -> List[Dict[str, Any]]:
1772
+ if self.model.resource_type in [NodeType.Source, NodeType.Test, NodeType.Unit]:
1773
+ return []
1774
+ # TODO CT-211
1775
+ return [
1776
+ h.to_dict(omit_none=True) for h in self.model.config.post_hook # type: ignore[union-attr] # noqa
1777
+ ]
1778
+
1779
+ @contextproperty()
1780
+ def compiled_code(self) -> Optional[str]:
1781
+ # TODO: avoid routing on args.which if possible
1782
+ if getattr(self.model, "defer_relation", None) and self.config.args.which == "clone":
1783
+ # TODO https://github.com/dbt-labs/dbt-core/issues/7976
1784
+ return f"select * from {self.model.defer_relation.relation_name or str(self.defer_relation)}" # type: ignore[union-attr]
1785
+ elif getattr(self.model, "extra_ctes_injected", None):
1786
+ # TODO CT-211
1787
+ return self.model.compiled_code # type: ignore[union-attr]
1788
+ else:
1789
+ return None
1790
+
1791
+ @contextproperty()
1792
+ def sql(self) -> Optional[str]:
1793
+ # only set this for sql models, for backward compatibility
1794
+ if self.model.language == ModelLanguage.sql: # type: ignore[union-attr]
1795
+ return self.compiled_code
1796
+ else:
1797
+ return None
1798
+
1799
+ @contextproperty()
1800
+ def database(self) -> str:
1801
+ return getattr(self.model, "database", self.config.credentials.database)
1802
+
1803
+ @contextproperty()
1804
+ def schema(self) -> str:
1805
+ return getattr(self.model, "schema", self.config.credentials.schema)
1806
+
1807
+ @contextproperty()
1808
+ def this(self) -> Optional[RelationProxy]:
1809
+ """`this` makes available schema information about the currently
1810
+ executing model. It's is useful in any context in which you need to
1811
+ write code that references the current model, for example when defining
1812
+ a `sql_where` clause for an incremental model and for writing pre- and
1813
+ post-model hooks that operate on the model in some way. Developers have
1814
+ options for how to use `this`:
1815
+
1816
+ |------------------|------------------|
1817
+ | dbt Model Syntax | Output |
1818
+ |------------------|------------------|
1819
+ | {{this}} | "schema"."table" |
1820
+ |------------------|------------------|
1821
+ | {{this.schema}} | schema |
1822
+ |------------------|------------------|
1823
+ | {{this.table}} | table |
1824
+ |------------------|------------------|
1825
+ | {{this.name}} | table |
1826
+ |------------------|------------------|
1827
+
1828
+ Here's an example of how to use `this` in `dbt_project.yml` to grant
1829
+ select rights on a table to a different db user.
1830
+
1831
+ > example.yml:
1832
+
1833
+ models:
1834
+ project-name:
1835
+ post-hook:
1836
+ - "grant select on {{ this }} to db_reader"
1837
+ """
1838
+ if self.model.resource_type == NodeType.Operation:
1839
+ return None
1840
+ return self.db_wrapper.Relation.create_from(self.config, self.model)
1841
+
1842
+ @contextproperty()
1843
+ def defer_relation(self) -> Optional[RelationProxy]:
1844
+ """
1845
+ For commands which add information about this node's corresponding
1846
+ production version (via a --state artifact), access the Relation
1847
+ object for that stateful other
1848
+ """
1849
+ if getattr(self.model, "defer_relation", None):
1850
+ return self.db_wrapper.Relation.create_from(
1851
+ self.config, self.model.defer_relation # type: ignore
1852
+ )
1853
+ else:
1854
+ return None
1855
+
1856
+
1857
+ class UnitTestContext(ModelContext):
1858
+ model: UnitTestNode
1859
+
1860
+ @contextmember()
1861
+ def env_var(self, var: str, default: Optional[str] = None) -> str:
1862
+ """The env_var() function. Return the overriden unit test environment variable named 'var'.
1863
+
1864
+ If there is no unit test override, return the environment variable named 'var'.
1865
+
1866
+ If there is no such environment variable set, return the default.
1867
+
1868
+ If the default is None, raise an exception for an undefined variable.
1869
+ """
1870
+ if self.model.overrides and var in self.model.overrides.env_vars:
1871
+ return self.model.overrides.env_vars[var]
1872
+ else:
1873
+ return super().env_var(var, default)
1874
+
1875
+ @contextproperty()
1876
+ def this(self) -> Optional[str]:
1877
+ if self.model.this_input_node_unique_id:
1878
+ this_node = self.manifest.expect(self.model.this_input_node_unique_id)
1879
+ self.model.set_cte(this_node.unique_id, None) # type: ignore
1880
+ return self.adapter.Relation.add_ephemeral_prefix(this_node.identifier) # type: ignore
1881
+ return None
1882
+
1883
+
1884
+ class FunctionContext(ModelContext):
1885
+ model: FunctionNode
1886
+
1887
+ @contextproperty()
1888
+ def this(self) -> Optional[RelationProxy]:
1889
+ return self.db_wrapper.Relation.create_from(self.config, self.model)
1890
+
1891
+
1892
+ # This is called by '_context_for', used in 'render_with_context'
1893
+ def generate_parser_model_context(
1894
+ model: ManifestNode,
1895
+ config: RuntimeConfig,
1896
+ manifest: Manifest,
1897
+ context_config: ContextConfig,
1898
+ ) -> Dict[str, Any]:
1899
+ # The __init__ method of ModelContext also initializes
1900
+ # a ManifestContext object which creates a MacroNamespaceBuilder
1901
+ # which adds every macro in the Manifest.
1902
+ ctx = ModelContext(model, config, manifest, ParseProvider(), context_config)
1903
+ # The 'to_dict' method in ManifestContext moves all of the macro names
1904
+ # in the macro 'namespace' up to top level keys
1905
+ return ctx.to_dict()
1906
+
1907
+
1908
+ def generate_generate_name_macro_context(
1909
+ macro: Macro,
1910
+ config: RuntimeConfig,
1911
+ manifest: Manifest,
1912
+ ) -> Dict[str, Any]:
1913
+ ctx = MacroContext(macro, config, manifest, GenerateNameProvider(), None)
1914
+ return ctx.to_dict()
1915
+
1916
+
1917
+ def generate_runtime_model_context(
1918
+ model: ManifestNode,
1919
+ config: RuntimeConfig,
1920
+ manifest: Manifest,
1921
+ ) -> Dict[str, Any]:
1922
+ ctx = ModelContext(model, config, manifest, RuntimeProvider(), None)
1923
+ return ctx.to_dict()
1924
+
1925
+
1926
+ def generate_runtime_macro_context(
1927
+ macro: MacroProtocol,
1928
+ config: RuntimeConfig,
1929
+ manifest: Manifest,
1930
+ package_name: Optional[str],
1931
+ ) -> Dict[str, Any]:
1932
+ ctx = MacroContext(macro, config, manifest, OperationProvider(), package_name)
1933
+ return ctx.to_dict()
1934
+
1935
+
1936
+ def generate_runtime_unit_test_context(
1937
+ unit_test: UnitTestNode,
1938
+ config: RuntimeConfig,
1939
+ manifest: Manifest,
1940
+ ) -> Dict[str, Any]:
1941
+ ctx = UnitTestContext(unit_test, config, manifest, RuntimeUnitTestProvider(), None)
1942
+ ctx_dict = ctx.to_dict()
1943
+
1944
+ if unit_test.overrides and unit_test.overrides.macros:
1945
+ global_macro_overrides: Dict[str, Any] = {}
1946
+ package_macro_overrides: Dict[Tuple[str, str], Any] = {}
1947
+
1948
+ # split macro overrides into global and package-namespaced collections
1949
+ for macro_name, macro_value in unit_test.overrides.macros.items():
1950
+ macro_name_split = macro_name.split(".")
1951
+ macro_package = macro_name_split[0] if len(macro_name_split) == 2 else None
1952
+ macro_name = macro_name_split[-1]
1953
+
1954
+ # macro overrides of global macros
1955
+ if macro_package is None and macro_name in ctx_dict:
1956
+ original_context_value = ctx_dict[macro_name]
1957
+ if isinstance(original_context_value, MacroGenerator):
1958
+ macro_value = UnitTestMacroGenerator(original_context_value, macro_value)
1959
+ global_macro_overrides[macro_name] = macro_value
1960
+
1961
+ # macro overrides of package-namespaced macros
1962
+ elif (
1963
+ macro_package
1964
+ and macro_package in ctx_dict
1965
+ and macro_name in ctx_dict[macro_package]
1966
+ ):
1967
+ original_context_value = ctx_dict[macro_package][macro_name]
1968
+ if isinstance(original_context_value, MacroGenerator):
1969
+ macro_value = UnitTestMacroGenerator(original_context_value, macro_value)
1970
+ package_macro_overrides[(macro_package, macro_name)] = macro_value
1971
+
1972
+ # macro overrides of package-namespaced macros
1973
+ for (macro_package, macro_name), macro_override_value in package_macro_overrides.items():
1974
+ ctx_dict[macro_package][macro_name] = macro_override_value
1975
+ # propgate override of namespaced dbt macro to global namespace
1976
+ if macro_package == "dbt":
1977
+ ctx_dict[macro_name] = macro_value
1978
+
1979
+ # macro overrides of global macros, which should take precedence over equivalent package-namespaced overrides
1980
+ for macro_name, macro_override_value in global_macro_overrides.items():
1981
+ ctx_dict[macro_name] = macro_override_value
1982
+ # propgate override of global dbt macro to dbt namespace
1983
+ if ctx_dict["dbt"].get(macro_name):
1984
+ ctx_dict["dbt"][macro_name] = macro_override_value
1985
+
1986
+ return ctx_dict
1987
+
1988
+
1989
+ def generate_runtime_function_context(
1990
+ function: FunctionNode,
1991
+ config: RuntimeConfig,
1992
+ manifest: Manifest,
1993
+ ) -> Dict[str, Any]:
1994
+ ctx = FunctionContext(function, config, manifest, OperationProvider(), None)
1995
+ return ctx.to_dict()
1996
+
1997
+
1998
+ class ExposureRefResolver(BaseResolver):
1999
+ def __call__(self, *args, **kwargs) -> str:
2000
+ package = None
2001
+ if len(args) == 1:
2002
+ name = args[0]
2003
+ elif len(args) == 2:
2004
+ package, name = args
2005
+ else:
2006
+ raise RefArgsError(node=self.model, args=args)
2007
+
2008
+ version = kwargs.get("version") or kwargs.get("v")
2009
+
2010
+ self.model.refs.append(RefArgs(package=package, name=name, version=version))
2011
+ return ""
2012
+
2013
+
2014
+ class ExposureSourceResolver(BaseResolver):
2015
+ def __call__(self, *args) -> str:
2016
+ if len(args) != 2:
2017
+ raise NumberSourceArgsError(args, node=self.model)
2018
+ self.model.sources.append(list(args))
2019
+ return ""
2020
+
2021
+
2022
+ class ExposureMetricResolver(BaseResolver):
2023
+ def __call__(self, *args) -> str:
2024
+ if len(args) not in (1, 2):
2025
+ raise MetricArgsError(node=self.model, args=args)
2026
+ self.model.metrics.append(list(args))
2027
+ return ""
2028
+
2029
+
2030
+ def generate_parse_exposure(
2031
+ exposure: Exposure,
2032
+ config: RuntimeConfig,
2033
+ manifest: Manifest,
2034
+ package_name: str,
2035
+ ) -> Dict[str, Any]:
2036
+ project = config.load_dependencies()[package_name]
2037
+ return {
2038
+ "ref": ExposureRefResolver(
2039
+ None,
2040
+ exposure,
2041
+ project,
2042
+ manifest,
2043
+ ),
2044
+ "source": ExposureSourceResolver(
2045
+ None,
2046
+ exposure,
2047
+ project,
2048
+ manifest,
2049
+ ),
2050
+ "metric": ExposureMetricResolver(
2051
+ None,
2052
+ exposure,
2053
+ project,
2054
+ manifest,
2055
+ ),
2056
+ }
2057
+
2058
+
2059
+ # applies to SemanticModels
2060
+ class SemanticModelRefResolver(BaseResolver):
2061
+ def __call__(self, *args, **kwargs) -> str:
2062
+ package = None
2063
+ if len(args) == 1:
2064
+ name = args[0]
2065
+ elif len(args) == 2:
2066
+ package, name = args
2067
+ else:
2068
+ raise RefArgsError(node=self.model, args=args)
2069
+
2070
+ version = kwargs.get("version") or kwargs.get("v")
2071
+ self.validate_args(name, package, version)
2072
+
2073
+ # "model" here is any node
2074
+ self.model.refs.append(RefArgs(package=package, name=name, version=version))
2075
+ return ""
2076
+
2077
+ def validate_args(self, name, package, version):
2078
+ if not isinstance(name, str):
2079
+ raise ParsingError(
2080
+ f"In a semantic model or metrics section in {self.model.original_file_path} "
2081
+ "the name argument to ref() must be a string"
2082
+ )
2083
+
2084
+
2085
+ # used for semantic models
2086
+ def generate_parse_semantic_models(
2087
+ semantic_model: SemanticModel,
2088
+ config: RuntimeConfig,
2089
+ manifest: Manifest,
2090
+ package_name: str,
2091
+ ) -> Dict[str, Any]:
2092
+ project = config.load_dependencies()[package_name]
2093
+ return {
2094
+ "ref": SemanticModelRefResolver(
2095
+ None,
2096
+ semantic_model,
2097
+ project,
2098
+ manifest,
2099
+ ),
2100
+ }
2101
+
2102
+
2103
+ # This class is currently used by the schema parser in order
2104
+ # to limit the number of macros in the context by using
2105
+ # the TestMacroNamespace
2106
+ class TestContext(ProviderContext):
2107
+ def __init__(
2108
+ self,
2109
+ model,
2110
+ config: RuntimeConfig,
2111
+ manifest: Manifest,
2112
+ provider: Provider,
2113
+ context_config: Optional[ContextConfig],
2114
+ macro_resolver: MacroResolver,
2115
+ ) -> None:
2116
+ # this must be before super init so that macro_resolver exists for
2117
+ # build_namespace
2118
+ self.macro_resolver = macro_resolver
2119
+ self.thread_ctx = MacroStack()
2120
+ super().__init__(model, config, manifest, provider, context_config)
2121
+ self._build_test_namespace()
2122
+ # We need to rebuild this because it's already been built by
2123
+ # the ProviderContext with the wrong namespace.
2124
+ self.db_wrapper = self.provider.DatabaseWrapper(self.adapter, self.namespace)
2125
+
2126
+ def _build_namespace(self):
2127
+ return {}
2128
+
2129
+ # this overrides _build_namespace in ManifestContext which provides a
2130
+ # complete namespace of all macros to only specify macros in the depends_on
2131
+ # This only provides a namespace with macros in the test node
2132
+ # 'depends_on.macros' by using the TestMacroNamespace
2133
+ def _build_test_namespace(self):
2134
+ depends_on_macros = []
2135
+ # all generic tests use a macro named 'get_where_subquery' to wrap 'model' arg
2136
+ # see generic_test_builders.build_model_str
2137
+ get_where_subquery = self.macro_resolver.macros_by_name.get("get_where_subquery")
2138
+ if get_where_subquery:
2139
+ depends_on_macros.append(get_where_subquery.unique_id)
2140
+ if self.model.depends_on and self.model.depends_on.macros:
2141
+ depends_on_macros.extend(self.model.depends_on.macros)
2142
+ lookup_macros = depends_on_macros.copy()
2143
+ for macro_unique_id in lookup_macros:
2144
+ lookup_macro = self.macro_resolver.macros.get(macro_unique_id)
2145
+ if lookup_macro:
2146
+ depends_on_macros.extend(lookup_macro.depends_on.macros)
2147
+
2148
+ macro_namespace = TestMacroNamespace(
2149
+ self.macro_resolver, self._ctx, self.model, self.thread_ctx, depends_on_macros
2150
+ )
2151
+ self.namespace = macro_namespace
2152
+
2153
+ @contextmember()
2154
+ def env_var(self, var: str, default: Optional[str] = None) -> str:
2155
+ return_value = None
2156
+ if var.startswith(SECRET_ENV_PREFIX):
2157
+ raise SecretEnvVarLocationError(var)
2158
+
2159
+ env = get_invocation_context().env
2160
+ if var in env:
2161
+ return_value = env[var]
2162
+ elif default is not None:
2163
+ return_value = default
2164
+
2165
+ if return_value is not None:
2166
+ # Save the env_var value in the manifest and the var name in the source_file
2167
+ if self.model:
2168
+ # If the environment variable is set from a default, store a string indicating
2169
+ # that so we can skip partial parsing. Otherwise the file will be scheduled for
2170
+ # reparsing. If the default changes, the file will have been updated and therefore
2171
+ # will be scheduled for reparsing anyways.
2172
+ self.manifest.env_vars[var] = (
2173
+ return_value if var in env else DEFAULT_ENV_PLACEHOLDER
2174
+ )
2175
+ # the "model" should only be test nodes, but just in case, check
2176
+ # TODO CT-211
2177
+ if self.model.resource_type == NodeType.Test and self.model.file_key_name: # type: ignore[union-attr] # noqa
2178
+ source_file = self.manifest.files[self.model.file_id]
2179
+ # TODO CT-211
2180
+ (yaml_key, name) = self.model.file_key_name.split(".") # type: ignore[union-attr] # noqa
2181
+ # TODO CT-211
2182
+ source_file.add_env_var(var, yaml_key, name) # type: ignore[union-attr]
2183
+ return return_value
2184
+ else:
2185
+ raise EnvVarMissingError(var)
2186
+
2187
+
2188
+ def generate_test_context(
2189
+ model: ManifestNode,
2190
+ config: RuntimeConfig,
2191
+ manifest: Manifest,
2192
+ context_config: ContextConfig,
2193
+ macro_resolver: MacroResolver,
2194
+ ) -> Dict[str, Any]:
2195
+ ctx = TestContext(model, config, manifest, ParseProvider(), context_config, macro_resolver)
2196
+ # The 'to_dict' method in ManifestContext moves all of the macro names
2197
+ # in the macro 'namespace' up to top level keys
2198
+ return ctx.to_dict()