dvt-core 0.58.6__cp311-cp311-macosx_10_9_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dbt/__init__.py +7 -0
- dbt/_pydantic_shim.py +26 -0
- dbt/artifacts/__init__.py +0 -0
- dbt/artifacts/exceptions/__init__.py +1 -0
- dbt/artifacts/exceptions/schemas.py +31 -0
- dbt/artifacts/resources/__init__.py +116 -0
- dbt/artifacts/resources/base.py +67 -0
- dbt/artifacts/resources/types.py +93 -0
- dbt/artifacts/resources/v1/analysis.py +10 -0
- dbt/artifacts/resources/v1/catalog.py +23 -0
- dbt/artifacts/resources/v1/components.py +274 -0
- dbt/artifacts/resources/v1/config.py +277 -0
- dbt/artifacts/resources/v1/documentation.py +11 -0
- dbt/artifacts/resources/v1/exposure.py +51 -0
- dbt/artifacts/resources/v1/function.py +52 -0
- dbt/artifacts/resources/v1/generic_test.py +31 -0
- dbt/artifacts/resources/v1/group.py +21 -0
- dbt/artifacts/resources/v1/hook.py +11 -0
- dbt/artifacts/resources/v1/macro.py +29 -0
- dbt/artifacts/resources/v1/metric.py +172 -0
- dbt/artifacts/resources/v1/model.py +145 -0
- dbt/artifacts/resources/v1/owner.py +10 -0
- dbt/artifacts/resources/v1/saved_query.py +111 -0
- dbt/artifacts/resources/v1/seed.py +41 -0
- dbt/artifacts/resources/v1/semantic_layer_components.py +72 -0
- dbt/artifacts/resources/v1/semantic_model.py +314 -0
- dbt/artifacts/resources/v1/singular_test.py +14 -0
- dbt/artifacts/resources/v1/snapshot.py +91 -0
- dbt/artifacts/resources/v1/source_definition.py +84 -0
- dbt/artifacts/resources/v1/sql_operation.py +10 -0
- dbt/artifacts/resources/v1/unit_test_definition.py +77 -0
- dbt/artifacts/schemas/__init__.py +0 -0
- dbt/artifacts/schemas/base.py +191 -0
- dbt/artifacts/schemas/batch_results.py +24 -0
- dbt/artifacts/schemas/catalog/__init__.py +11 -0
- dbt/artifacts/schemas/catalog/v1/__init__.py +0 -0
- dbt/artifacts/schemas/catalog/v1/catalog.py +59 -0
- dbt/artifacts/schemas/freshness/__init__.py +1 -0
- dbt/artifacts/schemas/freshness/v3/__init__.py +0 -0
- dbt/artifacts/schemas/freshness/v3/freshness.py +158 -0
- dbt/artifacts/schemas/manifest/__init__.py +2 -0
- dbt/artifacts/schemas/manifest/v12/__init__.py +0 -0
- dbt/artifacts/schemas/manifest/v12/manifest.py +211 -0
- dbt/artifacts/schemas/results.py +147 -0
- dbt/artifacts/schemas/run/__init__.py +2 -0
- dbt/artifacts/schemas/run/v5/__init__.py +0 -0
- dbt/artifacts/schemas/run/v5/run.py +184 -0
- dbt/artifacts/schemas/upgrades/__init__.py +4 -0
- dbt/artifacts/schemas/upgrades/upgrade_manifest.py +174 -0
- dbt/artifacts/schemas/upgrades/upgrade_manifest_dbt_version.py +2 -0
- dbt/artifacts/utils/validation.py +153 -0
- dbt/cli/__init__.py +1 -0
- dbt/cli/context.py +17 -0
- dbt/cli/exceptions.py +57 -0
- dbt/cli/flags.py +560 -0
- dbt/cli/main.py +2403 -0
- dbt/cli/option_types.py +121 -0
- dbt/cli/options.py +80 -0
- dbt/cli/params.py +844 -0
- dbt/cli/requires.py +490 -0
- dbt/cli/resolvers.py +50 -0
- dbt/cli/types.py +40 -0
- dbt/clients/__init__.py +0 -0
- dbt/clients/checked_load.py +83 -0
- dbt/clients/git.py +164 -0
- dbt/clients/jinja.py +206 -0
- dbt/clients/jinja_static.py +245 -0
- dbt/clients/registry.py +192 -0
- dbt/clients/yaml_helper.py +68 -0
- dbt/compilation.py +876 -0
- dbt/compute/__init__.py +14 -0
- dbt/compute/engines/__init__.py +12 -0
- dbt/compute/engines/spark_engine.cpython-311-darwin.so +0 -0
- dbt/compute/engines/spark_engine.py +642 -0
- dbt/compute/federated_executor.cpython-311-darwin.so +0 -0
- dbt/compute/federated_executor.py +1080 -0
- dbt/compute/filter_pushdown.cpython-311-darwin.so +0 -0
- dbt/compute/filter_pushdown.py +273 -0
- dbt/compute/jar_provisioning.cpython-311-darwin.so +0 -0
- dbt/compute/jar_provisioning.py +255 -0
- dbt/compute/java_compat.cpython-311-darwin.so +0 -0
- dbt/compute/java_compat.py +689 -0
- dbt/compute/jdbc_utils.cpython-311-darwin.so +0 -0
- dbt/compute/jdbc_utils.py +678 -0
- dbt/compute/metadata/__init__.py +40 -0
- dbt/compute/metadata/adapters_registry.cpython-311-darwin.so +0 -0
- dbt/compute/metadata/adapters_registry.py +370 -0
- dbt/compute/metadata/registry.cpython-311-darwin.so +0 -0
- dbt/compute/metadata/registry.py +674 -0
- dbt/compute/metadata/store.cpython-311-darwin.so +0 -0
- dbt/compute/metadata/store.py +1499 -0
- dbt/compute/smart_selector.cpython-311-darwin.so +0 -0
- dbt/compute/smart_selector.py +377 -0
- dbt/compute/strategies/__init__.py +55 -0
- dbt/compute/strategies/base.cpython-311-darwin.so +0 -0
- dbt/compute/strategies/base.py +165 -0
- dbt/compute/strategies/dataproc.cpython-311-darwin.so +0 -0
- dbt/compute/strategies/dataproc.py +207 -0
- dbt/compute/strategies/emr.cpython-311-darwin.so +0 -0
- dbt/compute/strategies/emr.py +203 -0
- dbt/compute/strategies/local.cpython-311-darwin.so +0 -0
- dbt/compute/strategies/local.py +443 -0
- dbt/compute/strategies/standalone.cpython-311-darwin.so +0 -0
- dbt/compute/strategies/standalone.py +262 -0
- dbt/config/__init__.py +4 -0
- dbt/config/catalogs.py +94 -0
- dbt/config/compute.cpython-311-darwin.so +0 -0
- dbt/config/compute.py +513 -0
- dbt/config/dvt_profile.cpython-311-darwin.so +0 -0
- dbt/config/dvt_profile.py +342 -0
- dbt/config/profile.py +422 -0
- dbt/config/project.py +873 -0
- dbt/config/project_utils.py +28 -0
- dbt/config/renderer.py +231 -0
- dbt/config/runtime.py +553 -0
- dbt/config/selectors.py +208 -0
- dbt/config/utils.py +77 -0
- dbt/constants.py +28 -0
- dbt/context/__init__.py +0 -0
- dbt/context/base.py +745 -0
- dbt/context/configured.py +135 -0
- dbt/context/context_config.py +382 -0
- dbt/context/docs.py +82 -0
- dbt/context/exceptions_jinja.py +178 -0
- dbt/context/macro_resolver.py +195 -0
- dbt/context/macros.py +171 -0
- dbt/context/manifest.py +72 -0
- dbt/context/providers.py +2249 -0
- dbt/context/query_header.py +13 -0
- dbt/context/secret.py +58 -0
- dbt/context/target.py +74 -0
- dbt/contracts/__init__.py +0 -0
- dbt/contracts/files.py +413 -0
- dbt/contracts/graph/__init__.py +0 -0
- dbt/contracts/graph/manifest.py +1904 -0
- dbt/contracts/graph/metrics.py +97 -0
- dbt/contracts/graph/model_config.py +70 -0
- dbt/contracts/graph/node_args.py +42 -0
- dbt/contracts/graph/nodes.py +1806 -0
- dbt/contracts/graph/semantic_manifest.py +232 -0
- dbt/contracts/graph/unparsed.py +811 -0
- dbt/contracts/project.py +417 -0
- dbt/contracts/results.py +53 -0
- dbt/contracts/selection.py +23 -0
- dbt/contracts/sql.py +85 -0
- dbt/contracts/state.py +68 -0
- dbt/contracts/util.py +46 -0
- dbt/deprecations.py +348 -0
- dbt/deps/__init__.py +0 -0
- dbt/deps/base.py +152 -0
- dbt/deps/git.py +195 -0
- dbt/deps/local.py +79 -0
- dbt/deps/registry.py +130 -0
- dbt/deps/resolver.py +149 -0
- dbt/deps/tarball.py +120 -0
- dbt/docs/source/_ext/dbt_click.py +119 -0
- dbt/docs/source/conf.py +32 -0
- dbt/env_vars.py +64 -0
- dbt/event_time/event_time.py +40 -0
- dbt/event_time/sample_window.py +60 -0
- dbt/events/__init__.py +15 -0
- dbt/events/base_types.py +36 -0
- dbt/events/core_types_pb2.py +2 -0
- dbt/events/logging.py +108 -0
- dbt/events/types.py +2516 -0
- dbt/exceptions.py +1486 -0
- dbt/flags.py +89 -0
- dbt/graph/__init__.py +11 -0
- dbt/graph/cli.py +249 -0
- dbt/graph/graph.py +172 -0
- dbt/graph/queue.py +214 -0
- dbt/graph/selector.py +374 -0
- dbt/graph/selector_methods.py +975 -0
- dbt/graph/selector_spec.py +222 -0
- dbt/graph/thread_pool.py +18 -0
- dbt/hooks.py +21 -0
- dbt/include/README.md +49 -0
- dbt/include/__init__.py +3 -0
- dbt/include/data/adapters_registry.duckdb +0 -0
- dbt/include/data/build_registry.py +242 -0
- dbt/include/data/csv/adapter_queries.csv +33 -0
- dbt/include/data/csv/syntax_rules.csv +9 -0
- dbt/include/data/csv/type_mappings_bigquery.csv +28 -0
- dbt/include/data/csv/type_mappings_databricks.csv +30 -0
- dbt/include/data/csv/type_mappings_mysql.csv +40 -0
- dbt/include/data/csv/type_mappings_oracle.csv +30 -0
- dbt/include/data/csv/type_mappings_postgres.csv +56 -0
- dbt/include/data/csv/type_mappings_redshift.csv +33 -0
- dbt/include/data/csv/type_mappings_snowflake.csv +38 -0
- dbt/include/data/csv/type_mappings_sqlserver.csv +35 -0
- dbt/include/starter_project/.gitignore +4 -0
- dbt/include/starter_project/README.md +15 -0
- dbt/include/starter_project/__init__.py +3 -0
- dbt/include/starter_project/analyses/.gitkeep +0 -0
- dbt/include/starter_project/dbt_project.yml +36 -0
- dbt/include/starter_project/macros/.gitkeep +0 -0
- dbt/include/starter_project/models/example/my_first_dbt_model.sql +27 -0
- dbt/include/starter_project/models/example/my_second_dbt_model.sql +6 -0
- dbt/include/starter_project/models/example/schema.yml +21 -0
- dbt/include/starter_project/seeds/.gitkeep +0 -0
- dbt/include/starter_project/snapshots/.gitkeep +0 -0
- dbt/include/starter_project/tests/.gitkeep +0 -0
- dbt/internal_deprecations.py +26 -0
- dbt/jsonschemas/__init__.py +3 -0
- dbt/jsonschemas/jsonschemas.py +309 -0
- dbt/jsonschemas/project/0.0.110.json +4717 -0
- dbt/jsonschemas/project/0.0.85.json +2015 -0
- dbt/jsonschemas/resources/0.0.110.json +2636 -0
- dbt/jsonschemas/resources/0.0.85.json +2536 -0
- dbt/jsonschemas/resources/latest.json +6773 -0
- dbt/links.py +4 -0
- dbt/materializations/__init__.py +0 -0
- dbt/materializations/incremental/__init__.py +0 -0
- dbt/materializations/incremental/microbatch.py +236 -0
- dbt/mp_context.py +8 -0
- dbt/node_types.py +37 -0
- dbt/parser/__init__.py +23 -0
- dbt/parser/analysis.py +21 -0
- dbt/parser/base.py +548 -0
- dbt/parser/common.py +266 -0
- dbt/parser/docs.py +52 -0
- dbt/parser/fixtures.py +51 -0
- dbt/parser/functions.py +30 -0
- dbt/parser/generic_test.py +100 -0
- dbt/parser/generic_test_builders.py +333 -0
- dbt/parser/hooks.py +118 -0
- dbt/parser/macros.py +137 -0
- dbt/parser/manifest.py +2204 -0
- dbt/parser/models.py +573 -0
- dbt/parser/partial.py +1178 -0
- dbt/parser/read_files.py +445 -0
- dbt/parser/schema_generic_tests.py +422 -0
- dbt/parser/schema_renderer.py +111 -0
- dbt/parser/schema_yaml_readers.py +935 -0
- dbt/parser/schemas.py +1466 -0
- dbt/parser/search.py +149 -0
- dbt/parser/seeds.py +28 -0
- dbt/parser/singular_test.py +20 -0
- dbt/parser/snapshots.py +44 -0
- dbt/parser/sources.py +558 -0
- dbt/parser/sql.py +62 -0
- dbt/parser/unit_tests.py +621 -0
- dbt/plugins/__init__.py +20 -0
- dbt/plugins/contracts.py +9 -0
- dbt/plugins/exceptions.py +2 -0
- dbt/plugins/manager.py +163 -0
- dbt/plugins/manifest.py +21 -0
- dbt/profiler.py +20 -0
- dbt/py.typed +1 -0
- dbt/query_analyzer.cpython-311-darwin.so +0 -0
- dbt/query_analyzer.py +410 -0
- dbt/runners/__init__.py +2 -0
- dbt/runners/exposure_runner.py +7 -0
- dbt/runners/no_op_runner.py +45 -0
- dbt/runners/saved_query_runner.py +7 -0
- dbt/selected_resources.py +8 -0
- dbt/task/__init__.py +0 -0
- dbt/task/base.py +503 -0
- dbt/task/build.py +197 -0
- dbt/task/clean.py +56 -0
- dbt/task/clone.py +161 -0
- dbt/task/compile.py +150 -0
- dbt/task/compute.cpython-311-darwin.so +0 -0
- dbt/task/compute.py +458 -0
- dbt/task/debug.py +505 -0
- dbt/task/deps.py +280 -0
- dbt/task/docs/__init__.py +3 -0
- dbt/task/docs/api/__init__.py +23 -0
- dbt/task/docs/api/catalog.cpython-311-darwin.so +0 -0
- dbt/task/docs/api/catalog.py +204 -0
- dbt/task/docs/api/lineage.cpython-311-darwin.so +0 -0
- dbt/task/docs/api/lineage.py +234 -0
- dbt/task/docs/api/profile.cpython-311-darwin.so +0 -0
- dbt/task/docs/api/profile.py +204 -0
- dbt/task/docs/api/spark.cpython-311-darwin.so +0 -0
- dbt/task/docs/api/spark.py +186 -0
- dbt/task/docs/generate.py +947 -0
- dbt/task/docs/index.html +250 -0
- dbt/task/docs/serve.cpython-311-darwin.so +0 -0
- dbt/task/docs/serve.py +174 -0
- dbt/task/dvt_output.py +362 -0
- dbt/task/dvt_run.py +204 -0
- dbt/task/freshness.py +322 -0
- dbt/task/function.py +121 -0
- dbt/task/group_lookup.py +46 -0
- dbt/task/init.cpython-311-darwin.so +0 -0
- dbt/task/init.py +604 -0
- dbt/task/java.cpython-311-darwin.so +0 -0
- dbt/task/java.py +316 -0
- dbt/task/list.py +236 -0
- dbt/task/metadata.cpython-311-darwin.so +0 -0
- dbt/task/metadata.py +804 -0
- dbt/task/printer.py +175 -0
- dbt/task/profile.cpython-311-darwin.so +0 -0
- dbt/task/profile.py +1307 -0
- dbt/task/profile_serve.py +615 -0
- dbt/task/retract.py +438 -0
- dbt/task/retry.py +175 -0
- dbt/task/run.py +1387 -0
- dbt/task/run_operation.py +141 -0
- dbt/task/runnable.py +758 -0
- dbt/task/seed.py +103 -0
- dbt/task/show.py +149 -0
- dbt/task/snapshot.py +56 -0
- dbt/task/spark.cpython-311-darwin.so +0 -0
- dbt/task/spark.py +414 -0
- dbt/task/sql.py +110 -0
- dbt/task/target_sync.cpython-311-darwin.so +0 -0
- dbt/task/target_sync.py +766 -0
- dbt/task/test.py +464 -0
- dbt/tests/fixtures/__init__.py +1 -0
- dbt/tests/fixtures/project.py +620 -0
- dbt/tests/util.py +651 -0
- dbt/tracking.py +529 -0
- dbt/utils/__init__.py +3 -0
- dbt/utils/artifact_upload.py +151 -0
- dbt/utils/utils.py +408 -0
- dbt/version.py +270 -0
- dvt_cli/__init__.py +72 -0
- dvt_core-0.58.6.dist-info/METADATA +288 -0
- dvt_core-0.58.6.dist-info/RECORD +324 -0
- dvt_core-0.58.6.dist-info/WHEEL +5 -0
- dvt_core-0.58.6.dist-info/entry_points.txt +2 -0
- dvt_core-0.58.6.dist-info/top_level.txt +2 -0
dbt/task/dvt_run.py
ADDED
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
# =============================================================================
|
|
2
|
+
# DVT Run Task with Rich UI
|
|
3
|
+
# =============================================================================
|
|
4
|
+
# Wrapper around standard RunTask that adds Rich progress display.
|
|
5
|
+
#
|
|
6
|
+
# DVT v0.58.0: Enhanced CLI output with Rich library
|
|
7
|
+
#
|
|
8
|
+
# IMPORTANT: This wrapper does NOT modify core dbt execution logic.
|
|
9
|
+
# All DVT compute rules are enforced in run.py's ModelRunner.execute().
|
|
10
|
+
#
|
|
11
|
+
# DVT Compute Rules (implemented in run.py):
|
|
12
|
+
# 1. Pushdown Preference: Same-target -> Adapter pushdown (no Spark)
|
|
13
|
+
# 2. Federation Path: Cross-target -> Spark execution
|
|
14
|
+
# 3. Compute Hierarchy: default < model config < CLI --target-compute
|
|
15
|
+
# 4. Target Hierarchy: default < model config < CLI --target
|
|
16
|
+
#
|
|
17
|
+
# =============================================================================
|
|
18
|
+
|
|
19
|
+
from __future__ import annotations
|
|
20
|
+
|
|
21
|
+
import sys
|
|
22
|
+
import time
|
|
23
|
+
import threading
|
|
24
|
+
from typing import Any, Dict, List, Optional, AbstractSet
|
|
25
|
+
|
|
26
|
+
from dbt.artifacts.schemas.results import NodeStatus
|
|
27
|
+
from dbt.artifacts.schemas.run import RunExecutionResult, RunResult
|
|
28
|
+
from dbt.cli.flags import Flags
|
|
29
|
+
from dbt.config import RuntimeConfig
|
|
30
|
+
from dbt.contracts.graph.manifest import Manifest
|
|
31
|
+
from dbt.task.run import RunTask, ModelRunner
|
|
32
|
+
from dbt.task.dvt_output import DVTProgressDisplay, HAS_RICH
|
|
33
|
+
|
|
34
|
+
# Lock for thread-safe Rich console updates
|
|
35
|
+
_console_lock = threading.Lock()
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class DVTRunTask(RunTask):
|
|
39
|
+
"""
|
|
40
|
+
DVT Run Task with Rich UI progress display.
|
|
41
|
+
|
|
42
|
+
This class wraps the standard RunTask to add beautiful CLI output
|
|
43
|
+
while preserving all dbt-core and DVT compute logic.
|
|
44
|
+
|
|
45
|
+
Features:
|
|
46
|
+
- Rich progress bar with spinner
|
|
47
|
+
- Per-model result display with execution path (PUSHDOWN/FEDERATION)
|
|
48
|
+
- Summary panel with pass/fail/skip counts
|
|
49
|
+
- Graceful fallback to standard output if Rich unavailable
|
|
50
|
+
|
|
51
|
+
Usage:
|
|
52
|
+
task = DVTRunTask(args, config, manifest)
|
|
53
|
+
results = task.run()
|
|
54
|
+
"""
|
|
55
|
+
|
|
56
|
+
def __init__(
|
|
57
|
+
self,
|
|
58
|
+
args: Flags,
|
|
59
|
+
config: RuntimeConfig,
|
|
60
|
+
manifest: Manifest,
|
|
61
|
+
batch_map: Optional[Dict[str, Any]] = None,
|
|
62
|
+
) -> None:
|
|
63
|
+
super().__init__(args, config, manifest, batch_map)
|
|
64
|
+
self._display: Optional[DVTProgressDisplay] = None
|
|
65
|
+
self._use_rich_output = HAS_RICH and not getattr(args, 'QUIET', False)
|
|
66
|
+
self._model_start_times: Dict[str, float] = {}
|
|
67
|
+
self._execution_paths: Dict[str, str] = {} # Track pushdown vs federation
|
|
68
|
+
|
|
69
|
+
def _get_target_info(self) -> str:
|
|
70
|
+
"""Get the current target name for display."""
|
|
71
|
+
cli_target = getattr(self.config.args, 'TARGET', None)
|
|
72
|
+
return cli_target or self.config.target_name or "default"
|
|
73
|
+
|
|
74
|
+
def _get_compute_info(self) -> str:
|
|
75
|
+
"""Get the current compute engine for display."""
|
|
76
|
+
cli_compute = getattr(self.config.args, 'TARGET_COMPUTE', None)
|
|
77
|
+
return cli_compute or "auto"
|
|
78
|
+
|
|
79
|
+
def before_run(self, adapter, selected_uids: AbstractSet[str]):
|
|
80
|
+
"""Called before running models - initialize Rich display."""
|
|
81
|
+
# Call parent first (handles schemas, cache, hooks, metadata)
|
|
82
|
+
result = super().before_run(adapter, selected_uids)
|
|
83
|
+
|
|
84
|
+
# Initialize Rich display if available
|
|
85
|
+
if self._use_rich_output and self.num_nodes > 0:
|
|
86
|
+
try:
|
|
87
|
+
self._display = DVTProgressDisplay(
|
|
88
|
+
title="DVT Run",
|
|
89
|
+
subtitle="Executing models with DVT compute rules",
|
|
90
|
+
)
|
|
91
|
+
self._display.print_header(
|
|
92
|
+
target=self._get_target_info(),
|
|
93
|
+
compute=self._get_compute_info(),
|
|
94
|
+
)
|
|
95
|
+
self._display.start_progress(
|
|
96
|
+
total=self.num_nodes,
|
|
97
|
+
description="Running models..."
|
|
98
|
+
)
|
|
99
|
+
except Exception:
|
|
100
|
+
# Fall back to standard output on any Rich error
|
|
101
|
+
self._display = None
|
|
102
|
+
self._use_rich_output = False
|
|
103
|
+
|
|
104
|
+
return result
|
|
105
|
+
|
|
106
|
+
def _handle_result(self, result) -> None:
|
|
107
|
+
"""Handle a single model result - update Rich display."""
|
|
108
|
+
# Call parent handler first (fires standard dbt events for logging)
|
|
109
|
+
super()._handle_result(result)
|
|
110
|
+
|
|
111
|
+
# Update Rich progress bar (but don't print per-model results to avoid duplication)
|
|
112
|
+
# dbt already prints detailed per-model output via fire_event(LogModelResult)
|
|
113
|
+
# We only advance the progress bar and track stats for the summary
|
|
114
|
+
if self._display and result.node:
|
|
115
|
+
try:
|
|
116
|
+
# Determine status for stats tracking
|
|
117
|
+
if result.status in (NodeStatus.Error, NodeStatus.Fail):
|
|
118
|
+
status = "error"
|
|
119
|
+
elif result.status == NodeStatus.Skipped:
|
|
120
|
+
status = "skip"
|
|
121
|
+
elif result.status == NodeStatus.Warn:
|
|
122
|
+
status = "warn"
|
|
123
|
+
else:
|
|
124
|
+
status = "success"
|
|
125
|
+
|
|
126
|
+
# Track execution path for summary
|
|
127
|
+
message = result.message or ""
|
|
128
|
+
if "Federated" in message:
|
|
129
|
+
self._execution_paths[result.node.unique_id] = "FEDERATION"
|
|
130
|
+
elif result.status in (NodeStatus.Success, NodeStatus.Pass):
|
|
131
|
+
self._execution_paths[result.node.unique_id] = "PUSHDOWN"
|
|
132
|
+
|
|
133
|
+
# Thread-safe progress update (just advance the bar)
|
|
134
|
+
with _console_lock:
|
|
135
|
+
self._display.advance(status)
|
|
136
|
+
except Exception:
|
|
137
|
+
# Silently ignore Rich display errors
|
|
138
|
+
pass
|
|
139
|
+
|
|
140
|
+
def after_run(self, adapter, results) -> None:
|
|
141
|
+
"""Called after all models complete - show summary."""
|
|
142
|
+
# Stop Rich progress display
|
|
143
|
+
if self._display:
|
|
144
|
+
try:
|
|
145
|
+
self._display.stop_progress()
|
|
146
|
+
|
|
147
|
+
# Calculate execution path stats for additional info
|
|
148
|
+
additional_info = {}
|
|
149
|
+
pushdown_count = sum(
|
|
150
|
+
1 for path in self._execution_paths.values() if path == "PUSHDOWN"
|
|
151
|
+
)
|
|
152
|
+
federation_count = sum(
|
|
153
|
+
1 for path in self._execution_paths.values() if path == "FEDERATION"
|
|
154
|
+
)
|
|
155
|
+
if pushdown_count > 0:
|
|
156
|
+
additional_info["Pushdown"] = f"{pushdown_count} models"
|
|
157
|
+
if federation_count > 0:
|
|
158
|
+
additional_info["Federation"] = f"{federation_count} models"
|
|
159
|
+
|
|
160
|
+
self._display.print_summary(additional_info=additional_info)
|
|
161
|
+
except Exception:
|
|
162
|
+
pass
|
|
163
|
+
|
|
164
|
+
# Call parent (handles end hooks)
|
|
165
|
+
super().after_run(adapter, results)
|
|
166
|
+
|
|
167
|
+
def task_end_messages(self, results) -> None:
|
|
168
|
+
"""Override to prevent duplicate output when using Rich."""
|
|
169
|
+
if self._display:
|
|
170
|
+
# Rich display handles summary, skip default messages
|
|
171
|
+
return
|
|
172
|
+
|
|
173
|
+
# Fall back to standard dbt output
|
|
174
|
+
super().task_end_messages(results)
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
def create_dvt_run_task(
|
|
178
|
+
args: Flags,
|
|
179
|
+
config: RuntimeConfig,
|
|
180
|
+
manifest: Manifest,
|
|
181
|
+
batch_map: Optional[Dict[str, Any]] = None,
|
|
182
|
+
) -> RunTask:
|
|
183
|
+
"""
|
|
184
|
+
Factory function to create appropriate run task.
|
|
185
|
+
|
|
186
|
+
Returns DVTRunTask with Rich UI if available and not in quiet mode,
|
|
187
|
+
otherwise returns standard RunTask.
|
|
188
|
+
|
|
189
|
+
Args:
|
|
190
|
+
args: CLI flags
|
|
191
|
+
config: Runtime configuration
|
|
192
|
+
manifest: Project manifest
|
|
193
|
+
batch_map: Optional batch map for retry
|
|
194
|
+
|
|
195
|
+
Returns:
|
|
196
|
+
RunTask instance (DVTRunTask or standard RunTask)
|
|
197
|
+
"""
|
|
198
|
+
# Check if we should use Rich output
|
|
199
|
+
use_rich = HAS_RICH and not getattr(args, 'QUIET', False)
|
|
200
|
+
|
|
201
|
+
if use_rich:
|
|
202
|
+
return DVTRunTask(args, config, manifest, batch_map)
|
|
203
|
+
else:
|
|
204
|
+
return RunTask(args, config, manifest, batch_map)
|
dbt/task/freshness.py
ADDED
|
@@ -0,0 +1,322 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import threading
|
|
3
|
+
import time
|
|
4
|
+
from typing import AbstractSet, Dict, List, Optional, Type
|
|
5
|
+
|
|
6
|
+
from dbt import deprecations
|
|
7
|
+
from dbt.adapters.base import BaseAdapter
|
|
8
|
+
from dbt.adapters.base.impl import FreshnessResponse
|
|
9
|
+
from dbt.adapters.base.relation import BaseRelation
|
|
10
|
+
from dbt.adapters.capability import Capability
|
|
11
|
+
from dbt.adapters.contracts.connection import AdapterResponse
|
|
12
|
+
from dbt.artifacts.schemas.freshness import (
|
|
13
|
+
FreshnessResult,
|
|
14
|
+
FreshnessStatus,
|
|
15
|
+
PartialSourceFreshnessResult,
|
|
16
|
+
SourceFreshnessResult,
|
|
17
|
+
)
|
|
18
|
+
from dbt.clients import jinja
|
|
19
|
+
from dbt.constants import SOURCE_RESULT_FILE_NAME
|
|
20
|
+
from dbt.context.providers import RuntimeProvider, SourceContext
|
|
21
|
+
from dbt.contracts.graph.manifest import Manifest
|
|
22
|
+
from dbt.contracts.graph.nodes import HookNode, SourceDefinition
|
|
23
|
+
from dbt.contracts.results import RunStatus
|
|
24
|
+
from dbt.events.types import FreshnessCheckComplete, LogFreshnessResult, LogStartLine
|
|
25
|
+
from dbt.graph import ResourceTypeSelector
|
|
26
|
+
from dbt.node_types import NodeType, RunHookType
|
|
27
|
+
from dbt_common.events.base_types import EventLevel
|
|
28
|
+
from dbt_common.events.functions import fire_event
|
|
29
|
+
from dbt_common.events.types import Note
|
|
30
|
+
from dbt_common.exceptions import DbtInternalError, DbtRuntimeError
|
|
31
|
+
|
|
32
|
+
from .base import BaseRunner
|
|
33
|
+
from .printer import print_run_result_error
|
|
34
|
+
from .run import RunTask
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class FreshnessRunner(BaseRunner):
|
|
38
|
+
def __init__(self, config, adapter, node, node_index, num_nodes) -> None:
|
|
39
|
+
super().__init__(config, adapter, node, node_index, num_nodes)
|
|
40
|
+
self._metadata_freshness_cache: Dict[BaseRelation, FreshnessResult] = {}
|
|
41
|
+
|
|
42
|
+
def set_metadata_freshness_cache(
|
|
43
|
+
self, metadata_freshness_cache: Dict[BaseRelation, FreshnessResult]
|
|
44
|
+
) -> None:
|
|
45
|
+
self._metadata_freshness_cache = metadata_freshness_cache
|
|
46
|
+
|
|
47
|
+
def on_skip(self):
|
|
48
|
+
raise DbtRuntimeError("Freshness: nodes cannot be skipped!")
|
|
49
|
+
|
|
50
|
+
def before_execute(self) -> None:
|
|
51
|
+
description = "freshness of {0.source_name}.{0.name}".format(self.node)
|
|
52
|
+
fire_event(
|
|
53
|
+
LogStartLine(
|
|
54
|
+
description=description,
|
|
55
|
+
index=self.node_index,
|
|
56
|
+
total=self.num_nodes,
|
|
57
|
+
node_info=self.node.node_info,
|
|
58
|
+
)
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
def after_execute(self, result) -> None:
|
|
62
|
+
if hasattr(result, "node"):
|
|
63
|
+
source_name = result.node.source_name
|
|
64
|
+
table_name = result.node.name
|
|
65
|
+
else:
|
|
66
|
+
source_name = result.source_name
|
|
67
|
+
table_name = result.table_name
|
|
68
|
+
level = LogFreshnessResult.status_to_level(str(result.status))
|
|
69
|
+
fire_event(
|
|
70
|
+
LogFreshnessResult(
|
|
71
|
+
status=result.status,
|
|
72
|
+
source_name=source_name,
|
|
73
|
+
table_name=table_name,
|
|
74
|
+
index=self.node_index,
|
|
75
|
+
total=self.num_nodes,
|
|
76
|
+
execution_time=result.execution_time,
|
|
77
|
+
node_info=self.node.node_info,
|
|
78
|
+
),
|
|
79
|
+
level=level,
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
def error_result(self, node, message, start_time, timing_info):
|
|
83
|
+
return self._build_run_result(
|
|
84
|
+
node=node,
|
|
85
|
+
start_time=start_time,
|
|
86
|
+
status=FreshnessStatus.RuntimeErr,
|
|
87
|
+
timing_info=timing_info,
|
|
88
|
+
message=message,
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
def _build_run_result(self, node, start_time, status, timing_info, message):
|
|
92
|
+
execution_time = time.time() - start_time
|
|
93
|
+
thread_id = threading.current_thread().name
|
|
94
|
+
return PartialSourceFreshnessResult(
|
|
95
|
+
status=status,
|
|
96
|
+
thread_id=thread_id,
|
|
97
|
+
execution_time=execution_time,
|
|
98
|
+
timing=timing_info,
|
|
99
|
+
message=message,
|
|
100
|
+
node=node,
|
|
101
|
+
adapter_response={},
|
|
102
|
+
failures=None,
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
def from_run_result(self, result, start_time, timing_info):
|
|
106
|
+
result.execution_time = time.time() - start_time
|
|
107
|
+
result.timing.extend(timing_info)
|
|
108
|
+
return result
|
|
109
|
+
|
|
110
|
+
def execute(self, compiled_node, manifest):
|
|
111
|
+
relation = self.adapter.Relation.create_from(self.config, compiled_node)
|
|
112
|
+
# given a Source, calculate its freshness.
|
|
113
|
+
with self.adapter.connection_named(compiled_node.unique_id, compiled_node):
|
|
114
|
+
self.adapter.clear_transaction()
|
|
115
|
+
adapter_response: Optional[AdapterResponse] = None
|
|
116
|
+
freshness: Optional[FreshnessResponse] = None
|
|
117
|
+
|
|
118
|
+
if compiled_node.loaded_at_query is not None:
|
|
119
|
+
# within the context user can have access to `this`, `source_node`(`model` will point to the same thing), etc
|
|
120
|
+
compiled_code = jinja.get_rendered(
|
|
121
|
+
compiled_node.loaded_at_query,
|
|
122
|
+
SourceContext(
|
|
123
|
+
compiled_node, self.config, manifest, RuntimeProvider(), None
|
|
124
|
+
).to_dict(),
|
|
125
|
+
compiled_node,
|
|
126
|
+
)
|
|
127
|
+
adapter_response, freshness = self.adapter.calculate_freshness_from_custom_sql(
|
|
128
|
+
relation,
|
|
129
|
+
compiled_code,
|
|
130
|
+
macro_resolver=manifest,
|
|
131
|
+
)
|
|
132
|
+
status = compiled_node.freshness.status(freshness["age"])
|
|
133
|
+
elif compiled_node.loaded_at_field is not None:
|
|
134
|
+
adapter_response, freshness = self.adapter.calculate_freshness(
|
|
135
|
+
relation,
|
|
136
|
+
compiled_node.loaded_at_field,
|
|
137
|
+
compiled_node.freshness.filter,
|
|
138
|
+
macro_resolver=manifest,
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
status = compiled_node.freshness.status(freshness["age"])
|
|
142
|
+
elif self.adapter.supports(Capability.TableLastModifiedMetadata):
|
|
143
|
+
if compiled_node.freshness.filter is not None:
|
|
144
|
+
fire_event(
|
|
145
|
+
Note(
|
|
146
|
+
msg=f"A filter cannot be applied to a metadata freshness check on source '{compiled_node.name}'."
|
|
147
|
+
),
|
|
148
|
+
EventLevel.WARN,
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
metadata_source = self.adapter.Relation.create_from(self.config, compiled_node)
|
|
152
|
+
if metadata_source in self._metadata_freshness_cache:
|
|
153
|
+
freshness = self._metadata_freshness_cache[metadata_source]
|
|
154
|
+
else:
|
|
155
|
+
adapter_response, freshness = self.adapter.calculate_freshness_from_metadata(
|
|
156
|
+
relation,
|
|
157
|
+
macro_resolver=manifest,
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
status = compiled_node.freshness.status(freshness["age"])
|
|
161
|
+
else:
|
|
162
|
+
raise DbtRuntimeError(
|
|
163
|
+
f"Could not compute freshness for source {compiled_node.name}: no 'loaded_at_field' provided and {self.adapter.type()} adapter does not support metadata-based freshness checks."
|
|
164
|
+
)
|
|
165
|
+
# adapter_response was not returned in previous versions, so this will be None
|
|
166
|
+
# we cannot call to_dict() on NoneType
|
|
167
|
+
if adapter_response:
|
|
168
|
+
adapter_response = adapter_response.to_dict(omit_none=True)
|
|
169
|
+
|
|
170
|
+
return SourceFreshnessResult(
|
|
171
|
+
node=compiled_node,
|
|
172
|
+
status=status,
|
|
173
|
+
thread_id=threading.current_thread().name,
|
|
174
|
+
timing=[],
|
|
175
|
+
execution_time=0,
|
|
176
|
+
message=None,
|
|
177
|
+
adapter_response=adapter_response or {},
|
|
178
|
+
failures=None,
|
|
179
|
+
**freshness,
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
def compile(self, manifest: Manifest):
|
|
183
|
+
if self.node.resource_type != NodeType.Source:
|
|
184
|
+
# should be unreachable...
|
|
185
|
+
raise DbtRuntimeError("freshness runner: got a non-Source")
|
|
186
|
+
# we don't do anything interesting when we compile a source node
|
|
187
|
+
return self.node
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
class FreshnessSelector(ResourceTypeSelector):
|
|
191
|
+
def node_is_match(self, node):
|
|
192
|
+
if not super().node_is_match(node):
|
|
193
|
+
return False
|
|
194
|
+
if not isinstance(node, SourceDefinition):
|
|
195
|
+
return False
|
|
196
|
+
return node.has_freshness
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
class FreshnessTask(RunTask):
|
|
200
|
+
def __init__(self, args, config, manifest) -> None:
|
|
201
|
+
super().__init__(args, config, manifest)
|
|
202
|
+
|
|
203
|
+
if self.args.output:
|
|
204
|
+
deprecations.warn(
|
|
205
|
+
"custom-output-path-in-source-freshness-deprecation", path=str(self.args.output)
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
self._metadata_freshness_cache: Dict[BaseRelation, FreshnessResult] = {}
|
|
209
|
+
|
|
210
|
+
def result_path(self) -> str:
|
|
211
|
+
if self.args.output:
|
|
212
|
+
return os.path.realpath(self.args.output)
|
|
213
|
+
else:
|
|
214
|
+
return os.path.join(self.config.project_target_path, SOURCE_RESULT_FILE_NAME)
|
|
215
|
+
|
|
216
|
+
def raise_on_first_error(self) -> bool:
|
|
217
|
+
return False
|
|
218
|
+
|
|
219
|
+
def get_node_selector(self):
|
|
220
|
+
if self.manifest is None or self.graph is None:
|
|
221
|
+
raise DbtInternalError("manifest and graph must be set to get perform node selection")
|
|
222
|
+
return FreshnessSelector(
|
|
223
|
+
graph=self.graph,
|
|
224
|
+
manifest=self.manifest,
|
|
225
|
+
previous_state=self.previous_state,
|
|
226
|
+
resource_types=[NodeType.Source],
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
def before_run(self, adapter: BaseAdapter, selected_uids: AbstractSet[str]) -> RunStatus:
|
|
230
|
+
populate_metadata_freshness_cache_status = RunStatus.Success
|
|
231
|
+
|
|
232
|
+
before_run_status = super().before_run(adapter, selected_uids)
|
|
233
|
+
|
|
234
|
+
if before_run_status == RunStatus.Success and adapter.supports(
|
|
235
|
+
Capability.TableLastModifiedMetadataBatch
|
|
236
|
+
):
|
|
237
|
+
populate_metadata_freshness_cache_status = self.populate_metadata_freshness_cache(
|
|
238
|
+
adapter, selected_uids
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
if (
|
|
242
|
+
before_run_status == RunStatus.Success
|
|
243
|
+
and populate_metadata_freshness_cache_status == RunStatus.Success
|
|
244
|
+
):
|
|
245
|
+
return RunStatus.Success
|
|
246
|
+
else:
|
|
247
|
+
return RunStatus.Error
|
|
248
|
+
|
|
249
|
+
def get_runner(self, node) -> BaseRunner:
|
|
250
|
+
freshness_runner = super().get_runner(node)
|
|
251
|
+
assert isinstance(freshness_runner, FreshnessRunner)
|
|
252
|
+
freshness_runner.set_metadata_freshness_cache(self._metadata_freshness_cache)
|
|
253
|
+
return freshness_runner
|
|
254
|
+
|
|
255
|
+
def get_runner_type(self, _) -> Optional[Type[BaseRunner]]:
|
|
256
|
+
return FreshnessRunner
|
|
257
|
+
|
|
258
|
+
def get_result(self, results, elapsed_time, generated_at):
|
|
259
|
+
return FreshnessResult.from_node_results(
|
|
260
|
+
elapsed_time=elapsed_time, generated_at=generated_at, results=results
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
def task_end_messages(self, results) -> None:
|
|
264
|
+
for result in results:
|
|
265
|
+
if result.status in (
|
|
266
|
+
FreshnessStatus.Error,
|
|
267
|
+
FreshnessStatus.RuntimeErr,
|
|
268
|
+
RunStatus.Error,
|
|
269
|
+
):
|
|
270
|
+
print_run_result_error(result)
|
|
271
|
+
|
|
272
|
+
fire_event(FreshnessCheckComplete())
|
|
273
|
+
|
|
274
|
+
def get_hooks_by_type(self, hook_type: RunHookType) -> List[HookNode]:
|
|
275
|
+
hooks = super().get_hooks_by_type(hook_type)
|
|
276
|
+
if self.args.source_freshness_run_project_hooks:
|
|
277
|
+
return hooks
|
|
278
|
+
else:
|
|
279
|
+
if hooks:
|
|
280
|
+
deprecations.warn("source-freshness-project-hooks")
|
|
281
|
+
return []
|
|
282
|
+
|
|
283
|
+
def populate_metadata_freshness_cache(
|
|
284
|
+
self, adapter, selected_uids: AbstractSet[str]
|
|
285
|
+
) -> RunStatus:
|
|
286
|
+
if self.manifest is None:
|
|
287
|
+
raise DbtInternalError("Manifest must be set to populate metadata freshness cache")
|
|
288
|
+
|
|
289
|
+
batch_metadata_sources: List[BaseRelation] = []
|
|
290
|
+
for selected_source_uid in list(selected_uids):
|
|
291
|
+
source = self.manifest.sources.get(selected_source_uid)
|
|
292
|
+
if source and source.loaded_at_field is None:
|
|
293
|
+
metadata_source = adapter.Relation.create_from(self.config, source)
|
|
294
|
+
batch_metadata_sources.append(metadata_source)
|
|
295
|
+
|
|
296
|
+
fire_event(
|
|
297
|
+
Note(
|
|
298
|
+
msg=f"Pulling freshness from warehouse metadata tables for {len(batch_metadata_sources)} sources"
|
|
299
|
+
),
|
|
300
|
+
EventLevel.INFO,
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
try:
|
|
304
|
+
_, metadata_freshness_results = adapter.calculate_freshness_from_metadata_batch(
|
|
305
|
+
batch_metadata_sources
|
|
306
|
+
)
|
|
307
|
+
self._metadata_freshness_cache.update(metadata_freshness_results)
|
|
308
|
+
return RunStatus.Success
|
|
309
|
+
except Exception as e:
|
|
310
|
+
# This error handling is intentionally very coarse.
|
|
311
|
+
# If anything goes wrong during batch metadata calculation, we can safely
|
|
312
|
+
# leave _metadata_freshness_cache unpopulated.
|
|
313
|
+
# Downstream, this will be gracefully handled as a cache miss and non-batch
|
|
314
|
+
# metadata-based freshness will still be performed on a source-by-source basis.
|
|
315
|
+
fire_event(
|
|
316
|
+
Note(msg=f"Metadata freshness could not be computed in batch: {e}"),
|
|
317
|
+
EventLevel.WARN,
|
|
318
|
+
)
|
|
319
|
+
return RunStatus.Error
|
|
320
|
+
|
|
321
|
+
def get_freshness_metadata_cache(self) -> Dict[BaseRelation, FreshnessResult]:
|
|
322
|
+
return self._metadata_freshness_cache
|
dbt/task/function.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
import threading
|
|
2
|
+
from typing import Any, Dict
|
|
3
|
+
|
|
4
|
+
from dbt.adapters.exceptions import MissingMaterializationError
|
|
5
|
+
from dbt.artifacts.schemas.results import NodeStatus, RunStatus
|
|
6
|
+
from dbt.artifacts.schemas.run import RunResult
|
|
7
|
+
from dbt.clients.jinja import MacroGenerator
|
|
8
|
+
from dbt.context.providers import generate_runtime_function_context
|
|
9
|
+
from dbt.contracts.graph.manifest import Manifest
|
|
10
|
+
from dbt.contracts.graph.nodes import FunctionNode
|
|
11
|
+
from dbt.events.types import LogFunctionResult, LogStartLine
|
|
12
|
+
from dbt.task import group_lookup
|
|
13
|
+
from dbt.task.compile import CompileRunner
|
|
14
|
+
from dbt_common.clients.jinja import MacroProtocol
|
|
15
|
+
from dbt_common.events.base_types import EventLevel
|
|
16
|
+
from dbt_common.events.functions import fire_event
|
|
17
|
+
from dbt_common.exceptions import DbtValidationError
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class FunctionRunner(CompileRunner):
|
|
21
|
+
|
|
22
|
+
def __init__(self, config, adapter, node, node_index: int, num_nodes: int) -> None:
|
|
23
|
+
super().__init__(config, adapter, node, node_index, num_nodes)
|
|
24
|
+
|
|
25
|
+
# doing this gives us type hints for the node :D
|
|
26
|
+
assert isinstance(node, FunctionNode)
|
|
27
|
+
self.node = node
|
|
28
|
+
|
|
29
|
+
def describe_node(self) -> str:
|
|
30
|
+
return f"function {self.node.name}" # TODO: add more info, similar to SeedRunner.describe_node
|
|
31
|
+
|
|
32
|
+
def before_execute(self) -> None:
|
|
33
|
+
fire_event(
|
|
34
|
+
LogStartLine(
|
|
35
|
+
description=self.describe_node(),
|
|
36
|
+
index=self.node_index,
|
|
37
|
+
total=self.num_nodes,
|
|
38
|
+
node_info=self.node.node_info,
|
|
39
|
+
)
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
def _get_materialization_macro(
|
|
43
|
+
self, compiled_node: FunctionNode, manifest: Manifest
|
|
44
|
+
) -> MacroProtocol:
|
|
45
|
+
materialization_macro = manifest.find_materialization_macro_by_name(
|
|
46
|
+
self.config.project_name, compiled_node.get_materialization(), self.adapter.type()
|
|
47
|
+
)
|
|
48
|
+
if materialization_macro is None:
|
|
49
|
+
raise MissingMaterializationError(
|
|
50
|
+
materialization=compiled_node.get_materialization(),
|
|
51
|
+
adapter_type=self.adapter.type(),
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
return materialization_macro
|
|
55
|
+
|
|
56
|
+
def _check_lang_supported(
|
|
57
|
+
self, compiled_node: FunctionNode, materialization_macro: MacroProtocol
|
|
58
|
+
) -> None:
|
|
59
|
+
# TODO: This function and its typing is a bit wonky, we should fix it
|
|
60
|
+
# Specifically, a MacroProtocol doesn't have a supported_languags attribute, but a macro does. We're acting
|
|
61
|
+
# like the materialization_macro might not have a supported_languages attribute, but we access it in an unguarded manner.
|
|
62
|
+
# So are we guaranteed to always have a Macro here? (because a Macro always has a supported_languages attribute)
|
|
63
|
+
# This logic is a copy of of the logic in the run.py file, so the same logical conundrum applies there. Also perhaps
|
|
64
|
+
# we can refactor to having one definition, and maybe a logically consistent one...
|
|
65
|
+
mat_has_supported_langs = hasattr(materialization_macro, "supported_languages")
|
|
66
|
+
function_lang_supported = compiled_node.language in materialization_macro.supported_languages # type: ignore
|
|
67
|
+
if mat_has_supported_langs and not function_lang_supported:
|
|
68
|
+
str_langs = [str(lang) for lang in materialization_macro.supported_languages] # type: ignore
|
|
69
|
+
raise DbtValidationError(
|
|
70
|
+
f'Materialization "{materialization_macro.name}" only supports languages {str_langs}; '
|
|
71
|
+
f'got "{compiled_node.language}"'
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
def build_result(self, compiled_node: FunctionNode, context: Dict[str, Any]) -> RunResult:
|
|
75
|
+
loaded_result = context["load_result"]("main")
|
|
76
|
+
|
|
77
|
+
return RunResult(
|
|
78
|
+
node=compiled_node,
|
|
79
|
+
status=RunStatus.Success,
|
|
80
|
+
timing=[],
|
|
81
|
+
thread_id=threading.current_thread().name,
|
|
82
|
+
# This gets set later in `from_run_result` called by `BaseRunner.safe_run`
|
|
83
|
+
execution_time=0.0,
|
|
84
|
+
message=str(loaded_result.response),
|
|
85
|
+
adapter_response=loaded_result.response.to_dict(omit_none=True),
|
|
86
|
+
failures=loaded_result.get("failures"),
|
|
87
|
+
batch_results=None,
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
def execute(self, compiled_node: FunctionNode, manifest: Manifest) -> RunResult:
|
|
91
|
+
materialization_macro = self._get_materialization_macro(compiled_node, manifest)
|
|
92
|
+
self._check_lang_supported(compiled_node, materialization_macro)
|
|
93
|
+
context = generate_runtime_function_context(compiled_node, self.config, manifest)
|
|
94
|
+
|
|
95
|
+
MacroGenerator(materialization_macro, context=context)()
|
|
96
|
+
|
|
97
|
+
return self.build_result(compiled_node, context)
|
|
98
|
+
|
|
99
|
+
def after_execute(self, result: RunResult) -> None:
|
|
100
|
+
self.print_result_line(result)
|
|
101
|
+
|
|
102
|
+
# def compile() defined on CompileRunner
|
|
103
|
+
|
|
104
|
+
def print_result_line(self, result: RunResult) -> None:
|
|
105
|
+
node = result.node
|
|
106
|
+
assert isinstance(node, FunctionNode)
|
|
107
|
+
|
|
108
|
+
group = group_lookup.get(node.unique_id)
|
|
109
|
+
level = EventLevel.ERROR if result.status == NodeStatus.Error else EventLevel.INFO
|
|
110
|
+
fire_event(
|
|
111
|
+
LogFunctionResult(
|
|
112
|
+
description=self.describe_node(),
|
|
113
|
+
status=result.status,
|
|
114
|
+
index=self.node_index,
|
|
115
|
+
total=self.num_nodes,
|
|
116
|
+
execution_time=result.execution_time,
|
|
117
|
+
node_info=self.node.node_info,
|
|
118
|
+
group=group,
|
|
119
|
+
),
|
|
120
|
+
level=level,
|
|
121
|
+
)
|
dbt/task/group_lookup.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
from typing import AbstractSet, Dict, Optional, Union
|
|
2
|
+
|
|
3
|
+
from dbt.contracts.graph.manifest import Manifest
|
|
4
|
+
from dbt.contracts.graph.nodes import Group
|
|
5
|
+
|
|
6
|
+
_node_id_to_group_name_map: Dict[str, str] = {}
|
|
7
|
+
_group_name_to_group_map: Dict[str, Group] = {}
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def init(manifest: Optional[Manifest], selected_ids: AbstractSet[str]) -> None:
|
|
11
|
+
if not manifest:
|
|
12
|
+
return
|
|
13
|
+
|
|
14
|
+
if not manifest.groups:
|
|
15
|
+
return
|
|
16
|
+
|
|
17
|
+
if not hasattr(manifest, "group_map"):
|
|
18
|
+
manifest.build_group_map()
|
|
19
|
+
|
|
20
|
+
_every_group_name_to_group_map = {v.name: v for v in manifest.groups.values()}
|
|
21
|
+
|
|
22
|
+
for group_name, node_ids in manifest.group_map.items():
|
|
23
|
+
for node_id in node_ids:
|
|
24
|
+
# only add node to lookup if it's selected
|
|
25
|
+
if node_id in selected_ids:
|
|
26
|
+
_node_id_to_group_name_map[node_id] = group_name
|
|
27
|
+
|
|
28
|
+
# only add group to lookup if it's not already there and if node is selected
|
|
29
|
+
if group_name not in _group_name_to_group_map:
|
|
30
|
+
_group_name_to_group_map[group_name] = _every_group_name_to_group_map[
|
|
31
|
+
group_name
|
|
32
|
+
]
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def get(node_id: str) -> Optional[Dict[str, Union[str, Dict[str, str]]]]:
|
|
36
|
+
group_name = _node_id_to_group_name_map.get(node_id)
|
|
37
|
+
|
|
38
|
+
if group_name is None:
|
|
39
|
+
return None
|
|
40
|
+
|
|
41
|
+
group = _group_name_to_group_map.get(group_name)
|
|
42
|
+
|
|
43
|
+
if group is None:
|
|
44
|
+
return None
|
|
45
|
+
|
|
46
|
+
return group.to_logging_dict()
|
|
Binary file
|