dvt-core 1.11.0b4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dvt-core might be problematic. Click here for more details.
- dvt/__init__.py +7 -0
- dvt/_pydantic_shim.py +26 -0
- dvt/adapters/__init__.py +16 -0
- dvt/adapters/multi_adapter_manager.py +268 -0
- dvt/artifacts/__init__.py +0 -0
- dvt/artifacts/exceptions/__init__.py +1 -0
- dvt/artifacts/exceptions/schemas.py +31 -0
- dvt/artifacts/resources/__init__.py +116 -0
- dvt/artifacts/resources/base.py +68 -0
- dvt/artifacts/resources/types.py +93 -0
- dvt/artifacts/resources/v1/analysis.py +10 -0
- dvt/artifacts/resources/v1/catalog.py +23 -0
- dvt/artifacts/resources/v1/components.py +275 -0
- dvt/artifacts/resources/v1/config.py +282 -0
- dvt/artifacts/resources/v1/documentation.py +11 -0
- dvt/artifacts/resources/v1/exposure.py +52 -0
- dvt/artifacts/resources/v1/function.py +53 -0
- dvt/artifacts/resources/v1/generic_test.py +32 -0
- dvt/artifacts/resources/v1/group.py +22 -0
- dvt/artifacts/resources/v1/hook.py +11 -0
- dvt/artifacts/resources/v1/macro.py +30 -0
- dvt/artifacts/resources/v1/metric.py +173 -0
- dvt/artifacts/resources/v1/model.py +146 -0
- dvt/artifacts/resources/v1/owner.py +10 -0
- dvt/artifacts/resources/v1/saved_query.py +112 -0
- dvt/artifacts/resources/v1/seed.py +42 -0
- dvt/artifacts/resources/v1/semantic_layer_components.py +72 -0
- dvt/artifacts/resources/v1/semantic_model.py +315 -0
- dvt/artifacts/resources/v1/singular_test.py +14 -0
- dvt/artifacts/resources/v1/snapshot.py +92 -0
- dvt/artifacts/resources/v1/source_definition.py +85 -0
- dvt/artifacts/resources/v1/sql_operation.py +10 -0
- dvt/artifacts/resources/v1/unit_test_definition.py +78 -0
- dvt/artifacts/schemas/__init__.py +0 -0
- dvt/artifacts/schemas/base.py +191 -0
- dvt/artifacts/schemas/batch_results.py +24 -0
- dvt/artifacts/schemas/catalog/__init__.py +12 -0
- dvt/artifacts/schemas/catalog/v1/__init__.py +0 -0
- dvt/artifacts/schemas/catalog/v1/catalog.py +60 -0
- dvt/artifacts/schemas/freshness/__init__.py +1 -0
- dvt/artifacts/schemas/freshness/v3/__init__.py +0 -0
- dvt/artifacts/schemas/freshness/v3/freshness.py +159 -0
- dvt/artifacts/schemas/manifest/__init__.py +2 -0
- dvt/artifacts/schemas/manifest/v12/__init__.py +0 -0
- dvt/artifacts/schemas/manifest/v12/manifest.py +212 -0
- dvt/artifacts/schemas/results.py +148 -0
- dvt/artifacts/schemas/run/__init__.py +2 -0
- dvt/artifacts/schemas/run/v5/__init__.py +0 -0
- dvt/artifacts/schemas/run/v5/run.py +184 -0
- dvt/artifacts/schemas/upgrades/__init__.py +4 -0
- dvt/artifacts/schemas/upgrades/upgrade_manifest.py +174 -0
- dvt/artifacts/schemas/upgrades/upgrade_manifest_dbt_version.py +2 -0
- dvt/artifacts/utils/validation.py +153 -0
- dvt/cli/__init__.py +1 -0
- dvt/cli/context.py +16 -0
- dvt/cli/exceptions.py +56 -0
- dvt/cli/flags.py +558 -0
- dvt/cli/main.py +971 -0
- dvt/cli/option_types.py +121 -0
- dvt/cli/options.py +79 -0
- dvt/cli/params.py +803 -0
- dvt/cli/requires.py +478 -0
- dvt/cli/resolvers.py +32 -0
- dvt/cli/types.py +40 -0
- dvt/clients/__init__.py +0 -0
- dvt/clients/checked_load.py +82 -0
- dvt/clients/git.py +164 -0
- dvt/clients/jinja.py +206 -0
- dvt/clients/jinja_static.py +245 -0
- dvt/clients/registry.py +192 -0
- dvt/clients/yaml_helper.py +68 -0
- dvt/compilation.py +833 -0
- dvt/compute/__init__.py +26 -0
- dvt/compute/base.py +288 -0
- dvt/compute/engines/__init__.py +13 -0
- dvt/compute/engines/duckdb_engine.py +368 -0
- dvt/compute/engines/spark_engine.py +273 -0
- dvt/compute/query_analyzer.py +212 -0
- dvt/compute/router.py +483 -0
- dvt/config/__init__.py +4 -0
- dvt/config/catalogs.py +95 -0
- dvt/config/compute_config.py +406 -0
- dvt/config/profile.py +411 -0
- dvt/config/profiles_v2.py +464 -0
- dvt/config/project.py +893 -0
- dvt/config/renderer.py +232 -0
- dvt/config/runtime.py +491 -0
- dvt/config/selectors.py +209 -0
- dvt/config/utils.py +78 -0
- dvt/connectors/.gitignore +6 -0
- dvt/connectors/README.md +306 -0
- dvt/connectors/catalog.yml +217 -0
- dvt/connectors/download_connectors.py +300 -0
- dvt/constants.py +29 -0
- dvt/context/__init__.py +0 -0
- dvt/context/base.py +746 -0
- dvt/context/configured.py +136 -0
- dvt/context/context_config.py +350 -0
- dvt/context/docs.py +82 -0
- dvt/context/exceptions_jinja.py +179 -0
- dvt/context/macro_resolver.py +195 -0
- dvt/context/macros.py +171 -0
- dvt/context/manifest.py +73 -0
- dvt/context/providers.py +2198 -0
- dvt/context/query_header.py +14 -0
- dvt/context/secret.py +59 -0
- dvt/context/target.py +74 -0
- dvt/contracts/__init__.py +0 -0
- dvt/contracts/files.py +413 -0
- dvt/contracts/graph/__init__.py +0 -0
- dvt/contracts/graph/manifest.py +1904 -0
- dvt/contracts/graph/metrics.py +98 -0
- dvt/contracts/graph/model_config.py +71 -0
- dvt/contracts/graph/node_args.py +42 -0
- dvt/contracts/graph/nodes.py +1806 -0
- dvt/contracts/graph/semantic_manifest.py +233 -0
- dvt/contracts/graph/unparsed.py +812 -0
- dvt/contracts/project.py +417 -0
- dvt/contracts/results.py +53 -0
- dvt/contracts/selection.py +23 -0
- dvt/contracts/sql.py +86 -0
- dvt/contracts/state.py +69 -0
- dvt/contracts/util.py +46 -0
- dvt/deprecations.py +347 -0
- dvt/deps/__init__.py +0 -0
- dvt/deps/base.py +153 -0
- dvt/deps/git.py +196 -0
- dvt/deps/local.py +80 -0
- dvt/deps/registry.py +131 -0
- dvt/deps/resolver.py +149 -0
- dvt/deps/tarball.py +121 -0
- dvt/docs/source/_ext/dbt_click.py +118 -0
- dvt/docs/source/conf.py +32 -0
- dvt/env_vars.py +64 -0
- dvt/event_time/event_time.py +40 -0
- dvt/event_time/sample_window.py +60 -0
- dvt/events/__init__.py +16 -0
- dvt/events/base_types.py +37 -0
- dvt/events/core_types_pb2.py +2 -0
- dvt/events/logging.py +109 -0
- dvt/events/types.py +2534 -0
- dvt/exceptions.py +1487 -0
- dvt/flags.py +89 -0
- dvt/graph/__init__.py +11 -0
- dvt/graph/cli.py +248 -0
- dvt/graph/graph.py +172 -0
- dvt/graph/queue.py +213 -0
- dvt/graph/selector.py +375 -0
- dvt/graph/selector_methods.py +976 -0
- dvt/graph/selector_spec.py +223 -0
- dvt/graph/thread_pool.py +18 -0
- dvt/hooks.py +21 -0
- dvt/include/README.md +49 -0
- dvt/include/__init__.py +3 -0
- dvt/include/global_project.py +4 -0
- dvt/include/starter_project/.gitignore +4 -0
- dvt/include/starter_project/README.md +15 -0
- dvt/include/starter_project/__init__.py +3 -0
- dvt/include/starter_project/analyses/.gitkeep +0 -0
- dvt/include/starter_project/dvt_project.yml +36 -0
- dvt/include/starter_project/macros/.gitkeep +0 -0
- dvt/include/starter_project/models/example/my_first_dbt_model.sql +27 -0
- dvt/include/starter_project/models/example/my_second_dbt_model.sql +6 -0
- dvt/include/starter_project/models/example/schema.yml +21 -0
- dvt/include/starter_project/seeds/.gitkeep +0 -0
- dvt/include/starter_project/snapshots/.gitkeep +0 -0
- dvt/include/starter_project/tests/.gitkeep +0 -0
- dvt/internal_deprecations.py +27 -0
- dvt/jsonschemas/__init__.py +3 -0
- dvt/jsonschemas/jsonschemas.py +309 -0
- dvt/jsonschemas/project/0.0.110.json +4717 -0
- dvt/jsonschemas/project/0.0.85.json +2015 -0
- dvt/jsonschemas/resources/0.0.110.json +2636 -0
- dvt/jsonschemas/resources/0.0.85.json +2536 -0
- dvt/jsonschemas/resources/latest.json +6773 -0
- dvt/links.py +4 -0
- dvt/materializations/__init__.py +0 -0
- dvt/materializations/incremental/__init__.py +0 -0
- dvt/materializations/incremental/microbatch.py +235 -0
- dvt/mp_context.py +8 -0
- dvt/node_types.py +37 -0
- dvt/parser/__init__.py +23 -0
- dvt/parser/analysis.py +21 -0
- dvt/parser/base.py +549 -0
- dvt/parser/common.py +267 -0
- dvt/parser/docs.py +52 -0
- dvt/parser/fixtures.py +51 -0
- dvt/parser/functions.py +30 -0
- dvt/parser/generic_test.py +100 -0
- dvt/parser/generic_test_builders.py +334 -0
- dvt/parser/hooks.py +119 -0
- dvt/parser/macros.py +137 -0
- dvt/parser/manifest.py +2204 -0
- dvt/parser/models.py +574 -0
- dvt/parser/partial.py +1179 -0
- dvt/parser/read_files.py +445 -0
- dvt/parser/schema_generic_tests.py +423 -0
- dvt/parser/schema_renderer.py +111 -0
- dvt/parser/schema_yaml_readers.py +936 -0
- dvt/parser/schemas.py +1467 -0
- dvt/parser/search.py +149 -0
- dvt/parser/seeds.py +28 -0
- dvt/parser/singular_test.py +20 -0
- dvt/parser/snapshots.py +44 -0
- dvt/parser/sources.py +557 -0
- dvt/parser/sql.py +63 -0
- dvt/parser/unit_tests.py +622 -0
- dvt/plugins/__init__.py +20 -0
- dvt/plugins/contracts.py +10 -0
- dvt/plugins/exceptions.py +2 -0
- dvt/plugins/manager.py +164 -0
- dvt/plugins/manifest.py +21 -0
- dvt/profiler.py +20 -0
- dvt/py.typed +1 -0
- dvt/runners/__init__.py +2 -0
- dvt/runners/exposure_runner.py +7 -0
- dvt/runners/no_op_runner.py +46 -0
- dvt/runners/saved_query_runner.py +7 -0
- dvt/selected_resources.py +8 -0
- dvt/task/__init__.py +0 -0
- dvt/task/base.py +504 -0
- dvt/task/build.py +197 -0
- dvt/task/clean.py +57 -0
- dvt/task/clone.py +162 -0
- dvt/task/compile.py +151 -0
- dvt/task/compute.py +366 -0
- dvt/task/debug.py +650 -0
- dvt/task/deps.py +280 -0
- dvt/task/docs/__init__.py +3 -0
- dvt/task/docs/generate.py +408 -0
- dvt/task/docs/index.html +250 -0
- dvt/task/docs/serve.py +28 -0
- dvt/task/freshness.py +323 -0
- dvt/task/function.py +122 -0
- dvt/task/group_lookup.py +46 -0
- dvt/task/init.py +374 -0
- dvt/task/list.py +237 -0
- dvt/task/printer.py +176 -0
- dvt/task/profiles.py +256 -0
- dvt/task/retry.py +175 -0
- dvt/task/run.py +1146 -0
- dvt/task/run_operation.py +142 -0
- dvt/task/runnable.py +802 -0
- dvt/task/seed.py +104 -0
- dvt/task/show.py +150 -0
- dvt/task/snapshot.py +57 -0
- dvt/task/sql.py +111 -0
- dvt/task/test.py +464 -0
- dvt/tests/fixtures/__init__.py +1 -0
- dvt/tests/fixtures/project.py +620 -0
- dvt/tests/util.py +651 -0
- dvt/tracking.py +529 -0
- dvt/utils/__init__.py +3 -0
- dvt/utils/artifact_upload.py +151 -0
- dvt/utils/utils.py +408 -0
- dvt/version.py +249 -0
- dvt_core-1.11.0b4.dist-info/METADATA +252 -0
- dvt_core-1.11.0b4.dist-info/RECORD +261 -0
- dvt_core-1.11.0b4.dist-info/WHEEL +5 -0
- dvt_core-1.11.0b4.dist-info/entry_points.txt +2 -0
- dvt_core-1.11.0b4.dist-info/top_level.txt +1 -0
dvt/parser/unit_tests.py
ADDED
|
@@ -0,0 +1,622 @@
|
|
|
1
|
+
import csv
|
|
2
|
+
import os
|
|
3
|
+
from copy import deepcopy
|
|
4
|
+
from csv import DictReader
|
|
5
|
+
from io import StringIO
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Any, Dict, List, Optional, Set
|
|
8
|
+
|
|
9
|
+
from dvt import utils
|
|
10
|
+
from dvt.artifacts.resources import ModelConfig, UnitTestConfig, UnitTestFormat
|
|
11
|
+
from dvt.config import RuntimeConfig
|
|
12
|
+
from dvt.context.context_config import ContextConfig
|
|
13
|
+
from dvt.context.providers import generate_parse_exposure, get_rendered
|
|
14
|
+
from dvt.contracts.files import FileHash, SchemaSourceFile
|
|
15
|
+
from dvt.contracts.graph.manifest import Manifest
|
|
16
|
+
from dvt.contracts.graph.model_config import UnitTestNodeConfig
|
|
17
|
+
from dvt.contracts.graph.nodes import (
|
|
18
|
+
DependsOn,
|
|
19
|
+
ModelNode,
|
|
20
|
+
UnitTestDefinition,
|
|
21
|
+
UnitTestNode,
|
|
22
|
+
UnitTestSourceDefinition,
|
|
23
|
+
)
|
|
24
|
+
from dvt.contracts.graph.unparsed import UnparsedUnitTest
|
|
25
|
+
from dvt.exceptions import InvalidUnitTestGivenInput, ParsingError
|
|
26
|
+
from dvt.graph import UniqueId
|
|
27
|
+
from dvt.node_types import NodeType
|
|
28
|
+
from dvt.parser.schemas import (
|
|
29
|
+
JSONValidationError,
|
|
30
|
+
ParseResult,
|
|
31
|
+
SchemaParser,
|
|
32
|
+
ValidationError,
|
|
33
|
+
YamlBlock,
|
|
34
|
+
YamlParseDictError,
|
|
35
|
+
YamlReader,
|
|
36
|
+
)
|
|
37
|
+
from dvt.utils import get_pseudo_test_path
|
|
38
|
+
|
|
39
|
+
from dbt_common.events.functions import fire_event
|
|
40
|
+
from dbt_common.events.types import SystemStdErr
|
|
41
|
+
from dbt_extractor import ExtractionError, py_extract_from_source # type: ignore
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class UnitTestManifestLoader:
|
|
45
|
+
def __init__(self, manifest, root_project, selected) -> None:
|
|
46
|
+
self.manifest: Manifest = manifest
|
|
47
|
+
self.root_project: RuntimeConfig = root_project
|
|
48
|
+
# selected comes from the initial selection against a "regular" manifest
|
|
49
|
+
self.selected: Set[UniqueId] = selected
|
|
50
|
+
self.unit_test_manifest = Manifest(macros=manifest.macros)
|
|
51
|
+
|
|
52
|
+
def load(self) -> Manifest:
|
|
53
|
+
for unique_id in self.selected:
|
|
54
|
+
if unique_id in self.manifest.unit_tests:
|
|
55
|
+
unit_test_case: UnitTestDefinition = self.manifest.unit_tests[unique_id]
|
|
56
|
+
if not unit_test_case.config.enabled:
|
|
57
|
+
continue
|
|
58
|
+
self.parse_unit_test_case(unit_test_case)
|
|
59
|
+
return self.unit_test_manifest
|
|
60
|
+
|
|
61
|
+
def parse_unit_test_case(self, test_case: UnitTestDefinition):
|
|
62
|
+
# Create unit test node based on the node being tested
|
|
63
|
+
# The tested_node has already been resolved and is in depends_on.nodes
|
|
64
|
+
tested_node_unique_id = test_case.depends_on.nodes[0]
|
|
65
|
+
tested_node = self.manifest.nodes[tested_node_unique_id]
|
|
66
|
+
assert isinstance(tested_node, ModelNode)
|
|
67
|
+
|
|
68
|
+
# Create UnitTestNode based on model being tested. Since selection has
|
|
69
|
+
# already been done, we don't have to care about fields that are necessary
|
|
70
|
+
# for selection.
|
|
71
|
+
# Note: no depends_on, that's added later using input nodes
|
|
72
|
+
name = test_case.name
|
|
73
|
+
if tested_node.is_versioned:
|
|
74
|
+
name = name + f"_v{tested_node.version}"
|
|
75
|
+
expected_sql: Optional[str] = None
|
|
76
|
+
if test_case.expect.format == UnitTestFormat.SQL:
|
|
77
|
+
expected_rows: List[Dict[str, Any]] = []
|
|
78
|
+
expected_sql = test_case.expect.rows # type: ignore
|
|
79
|
+
else:
|
|
80
|
+
assert isinstance(test_case.expect.rows, List)
|
|
81
|
+
expected_rows = deepcopy(test_case.expect.rows)
|
|
82
|
+
|
|
83
|
+
assert isinstance(expected_rows, List)
|
|
84
|
+
unit_test_node = UnitTestNode(
|
|
85
|
+
name=name,
|
|
86
|
+
resource_type=NodeType.Unit,
|
|
87
|
+
package_name=test_case.package_name,
|
|
88
|
+
path=get_pseudo_test_path(name, test_case.original_file_path),
|
|
89
|
+
original_file_path=test_case.original_file_path,
|
|
90
|
+
unique_id=test_case.unique_id,
|
|
91
|
+
config=UnitTestNodeConfig(
|
|
92
|
+
materialized="unit", expected_rows=expected_rows, expected_sql=expected_sql
|
|
93
|
+
),
|
|
94
|
+
raw_code=tested_node.raw_code,
|
|
95
|
+
database=tested_node.database,
|
|
96
|
+
schema=tested_node.schema,
|
|
97
|
+
alias=name,
|
|
98
|
+
fqn=test_case.unique_id.split("."),
|
|
99
|
+
checksum=FileHash.empty(),
|
|
100
|
+
tested_node_unique_id=tested_node.unique_id,
|
|
101
|
+
overrides=test_case.overrides,
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
ctx = generate_parse_exposure(
|
|
105
|
+
unit_test_node, # type: ignore
|
|
106
|
+
self.root_project,
|
|
107
|
+
self.manifest,
|
|
108
|
+
test_case.package_name,
|
|
109
|
+
)
|
|
110
|
+
get_rendered(unit_test_node.raw_code, ctx, unit_test_node, capture_macros=True)
|
|
111
|
+
# unit_test_node now has a populated refs/sources
|
|
112
|
+
|
|
113
|
+
self.unit_test_manifest.nodes[unit_test_node.unique_id] = unit_test_node
|
|
114
|
+
# Now create input_nodes for the test inputs
|
|
115
|
+
"""
|
|
116
|
+
given:
|
|
117
|
+
- input: ref('my_model_a')
|
|
118
|
+
rows: []
|
|
119
|
+
- input: ref('my_model_b')
|
|
120
|
+
rows:
|
|
121
|
+
- {id: 1, b: 2}
|
|
122
|
+
- {id: 2, b: 2}
|
|
123
|
+
"""
|
|
124
|
+
# Add the model "input" nodes, consisting of all referenced models in the unit test.
|
|
125
|
+
# This creates an ephemeral model for every input in every test, so there may be multiple
|
|
126
|
+
# input models substituting for the same input ref'd model. Note that since these are
|
|
127
|
+
# always "ephemeral" they just wrap the tested_node SQL in additional CTEs. No actual table
|
|
128
|
+
# or view is created.
|
|
129
|
+
for given in test_case.given:
|
|
130
|
+
# extract the original_input_node from the ref in the "input" key of the given list
|
|
131
|
+
original_input_node = self._get_original_input_node(
|
|
132
|
+
given.input, tested_node, test_case.name
|
|
133
|
+
)
|
|
134
|
+
input_name = original_input_node.name
|
|
135
|
+
common_fields = {
|
|
136
|
+
"resource_type": NodeType.Model,
|
|
137
|
+
# root directory for input and output fixtures
|
|
138
|
+
"original_file_path": unit_test_node.original_file_path,
|
|
139
|
+
"config": ModelConfig(materialized="ephemeral"),
|
|
140
|
+
"database": original_input_node.database,
|
|
141
|
+
"alias": original_input_node.identifier,
|
|
142
|
+
"schema": original_input_node.schema,
|
|
143
|
+
"fqn": original_input_node.fqn,
|
|
144
|
+
"checksum": FileHash.empty(),
|
|
145
|
+
"raw_code": self._build_fixture_raw_code(given.rows, None, given.format),
|
|
146
|
+
"package_name": original_input_node.package_name,
|
|
147
|
+
"unique_id": f"model.{original_input_node.package_name}.{input_name}",
|
|
148
|
+
"name": input_name,
|
|
149
|
+
"path": f"{input_name}.sql",
|
|
150
|
+
}
|
|
151
|
+
resource_type = original_input_node.resource_type
|
|
152
|
+
|
|
153
|
+
if resource_type in (
|
|
154
|
+
NodeType.Model,
|
|
155
|
+
NodeType.Seed,
|
|
156
|
+
NodeType.Snapshot,
|
|
157
|
+
):
|
|
158
|
+
|
|
159
|
+
input_node = ModelNode(
|
|
160
|
+
**common_fields,
|
|
161
|
+
defer_relation=original_input_node.defer_relation,
|
|
162
|
+
)
|
|
163
|
+
if resource_type == NodeType.Model:
|
|
164
|
+
if original_input_node.version:
|
|
165
|
+
input_node.version = original_input_node.version
|
|
166
|
+
if original_input_node.latest_version:
|
|
167
|
+
input_node.latest_version = original_input_node.latest_version
|
|
168
|
+
|
|
169
|
+
elif resource_type == NodeType.Source:
|
|
170
|
+
# We are reusing the database/schema/identifier from the original source,
|
|
171
|
+
# but that shouldn't matter since this acts as an ephemeral model which just
|
|
172
|
+
# wraps a CTE around the unit test node.
|
|
173
|
+
input_node = UnitTestSourceDefinition(
|
|
174
|
+
**common_fields,
|
|
175
|
+
source_name=original_input_node.source_name, # needed for source lookup
|
|
176
|
+
)
|
|
177
|
+
# Sources need to go in the sources dictionary in order to create the right lookup
|
|
178
|
+
self.unit_test_manifest.sources[input_node.unique_id] = input_node # type: ignore
|
|
179
|
+
|
|
180
|
+
# Both ModelNode and UnitTestSourceDefinition need to go in nodes dictionary
|
|
181
|
+
self.unit_test_manifest.nodes[input_node.unique_id] = input_node
|
|
182
|
+
|
|
183
|
+
# Populate this_input_node_unique_id if input fixture represents node being tested
|
|
184
|
+
if original_input_node == tested_node:
|
|
185
|
+
unit_test_node.this_input_node_unique_id = input_node.unique_id
|
|
186
|
+
|
|
187
|
+
# Add unique ids of input_nodes to depends_on
|
|
188
|
+
unit_test_node.depends_on.nodes.append(input_node.unique_id)
|
|
189
|
+
|
|
190
|
+
# Add functions to the manifest and depends_on
|
|
191
|
+
for unique_id in tested_node.depends_on.nodes:
|
|
192
|
+
if unique_id in self.manifest.functions:
|
|
193
|
+
unit_test_node.depends_on.nodes.append(unique_id)
|
|
194
|
+
self.unit_test_manifest.functions[unique_id] = self.manifest.functions[unique_id]
|
|
195
|
+
|
|
196
|
+
def _build_fixture_raw_code(self, rows, column_name_to_data_types, fixture_format) -> str:
|
|
197
|
+
# We're not currently using column_name_to_data_types, but leaving here for
|
|
198
|
+
# possible future use.
|
|
199
|
+
if fixture_format == UnitTestFormat.SQL:
|
|
200
|
+
return rows
|
|
201
|
+
else:
|
|
202
|
+
return ("{{{{ get_fixture_sql({rows}, {column_name_to_data_types}) }}}}").format(
|
|
203
|
+
rows=rows, column_name_to_data_types=column_name_to_data_types
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
def _get_original_input_node(self, input: str, tested_node: ModelNode, test_case_name: str):
|
|
207
|
+
"""
|
|
208
|
+
Returns the original input node as defined in the project given an input reference
|
|
209
|
+
and the node being tested.
|
|
210
|
+
|
|
211
|
+
input: str representing how input node is referenced in tested model sql
|
|
212
|
+
* examples:
|
|
213
|
+
- "ref('my_model_a')"
|
|
214
|
+
- "source('my_source_schema', 'my_source_name')"
|
|
215
|
+
- "this"
|
|
216
|
+
tested_node: ModelNode of representing node being tested
|
|
217
|
+
"""
|
|
218
|
+
if input.strip() == "this":
|
|
219
|
+
original_input_node = tested_node
|
|
220
|
+
else:
|
|
221
|
+
try:
|
|
222
|
+
statically_parsed = py_extract_from_source(f"{{{{ {input} }}}}")
|
|
223
|
+
except ExtractionError:
|
|
224
|
+
raise InvalidUnitTestGivenInput(input=input)
|
|
225
|
+
|
|
226
|
+
if statically_parsed["refs"]:
|
|
227
|
+
ref = list(statically_parsed["refs"])[0]
|
|
228
|
+
name = ref.get("name")
|
|
229
|
+
package = ref.get("package")
|
|
230
|
+
version = ref.get("version")
|
|
231
|
+
# TODO: disabled lookup, versioned lookup, public models
|
|
232
|
+
original_input_node = self.manifest.ref_lookup.find(
|
|
233
|
+
name, package, version, self.manifest
|
|
234
|
+
)
|
|
235
|
+
elif statically_parsed["sources"]:
|
|
236
|
+
source = list(statically_parsed["sources"])[0]
|
|
237
|
+
input_source_name, input_name = source
|
|
238
|
+
original_input_node = self.manifest.source_lookup.find(
|
|
239
|
+
f"{input_source_name}.{input_name}",
|
|
240
|
+
None,
|
|
241
|
+
self.manifest,
|
|
242
|
+
)
|
|
243
|
+
else:
|
|
244
|
+
raise InvalidUnitTestGivenInput(input=input)
|
|
245
|
+
|
|
246
|
+
if not original_input_node:
|
|
247
|
+
msg = f"Unit test '{test_case_name}' had an input ({input}) which was not found in the manifest."
|
|
248
|
+
raise ParsingError(msg)
|
|
249
|
+
|
|
250
|
+
return original_input_node
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
class UnitTestParser(YamlReader):
|
|
254
|
+
def __init__(self, schema_parser: SchemaParser, yaml: YamlBlock) -> None:
|
|
255
|
+
super().__init__(schema_parser, yaml, "unit_tests")
|
|
256
|
+
self.schema_parser = schema_parser
|
|
257
|
+
self.yaml = yaml
|
|
258
|
+
|
|
259
|
+
def parse(self) -> ParseResult:
|
|
260
|
+
for data in self.get_key_dicts():
|
|
261
|
+
unit_test: UnparsedUnitTest = self._get_unit_test(data)
|
|
262
|
+
tested_model_node = find_tested_model_node(
|
|
263
|
+
self.manifest, self.project.project_name, unit_test.model
|
|
264
|
+
)
|
|
265
|
+
unit_test_case_unique_id = (
|
|
266
|
+
f"{NodeType.Unit}.{self.project.project_name}.{unit_test.model}.{unit_test.name}"
|
|
267
|
+
)
|
|
268
|
+
unit_test_fqn = self._build_fqn(
|
|
269
|
+
self.project.project_name,
|
|
270
|
+
self.yaml.path.original_file_path,
|
|
271
|
+
unit_test.model,
|
|
272
|
+
unit_test.name,
|
|
273
|
+
)
|
|
274
|
+
unit_test_config = self._build_unit_test_config(unit_test_fqn, unit_test.config)
|
|
275
|
+
|
|
276
|
+
unit_test_definition = UnitTestDefinition(
|
|
277
|
+
name=unit_test.name,
|
|
278
|
+
model=unit_test.model,
|
|
279
|
+
resource_type=NodeType.Unit,
|
|
280
|
+
package_name=self.project.project_name,
|
|
281
|
+
path=self.yaml.path.relative_path,
|
|
282
|
+
original_file_path=self.yaml.path.original_file_path,
|
|
283
|
+
unique_id=unit_test_case_unique_id,
|
|
284
|
+
given=unit_test.given,
|
|
285
|
+
expect=unit_test.expect,
|
|
286
|
+
description=unit_test.description,
|
|
287
|
+
overrides=unit_test.overrides,
|
|
288
|
+
depends_on=DependsOn(),
|
|
289
|
+
fqn=unit_test_fqn,
|
|
290
|
+
config=unit_test_config,
|
|
291
|
+
versions=unit_test.versions,
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
if tested_model_node:
|
|
295
|
+
unit_test_definition.depends_on.nodes.append(tested_model_node.unique_id)
|
|
296
|
+
unit_test_definition.schema = tested_model_node.schema
|
|
297
|
+
|
|
298
|
+
# Check that format and type of rows matches for each given input,
|
|
299
|
+
# convert rows to a list of dictionaries, and add the unique_id of
|
|
300
|
+
# the unit_test_definition to the fixture source_file for partial parsing.
|
|
301
|
+
self._validate_and_normalize_given(unit_test_definition)
|
|
302
|
+
self._validate_and_normalize_expect(unit_test_definition)
|
|
303
|
+
|
|
304
|
+
# for calculating state:modified
|
|
305
|
+
unit_test_definition.build_unit_test_checksum()
|
|
306
|
+
assert isinstance(self.yaml.file, SchemaSourceFile)
|
|
307
|
+
if unit_test_config.enabled:
|
|
308
|
+
self.manifest.add_unit_test(self.yaml.file, unit_test_definition)
|
|
309
|
+
else:
|
|
310
|
+
self.manifest.add_disabled(self.yaml.file, unit_test_definition)
|
|
311
|
+
|
|
312
|
+
return ParseResult()
|
|
313
|
+
|
|
314
|
+
def _get_unit_test(self, data: Dict[str, Any]) -> UnparsedUnitTest:
|
|
315
|
+
try:
|
|
316
|
+
UnparsedUnitTest.validate(data)
|
|
317
|
+
return UnparsedUnitTest.from_dict(data)
|
|
318
|
+
except (ValidationError, JSONValidationError) as exc:
|
|
319
|
+
raise YamlParseDictError(self.yaml.path, self.key, data, exc)
|
|
320
|
+
|
|
321
|
+
def _build_unit_test_config(
|
|
322
|
+
self, unit_test_fqn: List[str], config_dict: Dict[str, Any]
|
|
323
|
+
) -> UnitTestConfig:
|
|
324
|
+
config = ContextConfig(
|
|
325
|
+
self.schema_parser.root_project,
|
|
326
|
+
unit_test_fqn,
|
|
327
|
+
NodeType.Unit,
|
|
328
|
+
self.schema_parser.project.project_name,
|
|
329
|
+
)
|
|
330
|
+
unit_test_config_dict = config.build_config_dict(patch_config_dict=config_dict)
|
|
331
|
+
unit_test_config_dict = self.render_entry(unit_test_config_dict)
|
|
332
|
+
|
|
333
|
+
return UnitTestConfig.from_dict(unit_test_config_dict)
|
|
334
|
+
|
|
335
|
+
def _build_fqn(self, package_name, original_file_path, model_name, test_name):
|
|
336
|
+
# This code comes from "get_fqn" and "get_fqn_prefix" in the base parser.
|
|
337
|
+
# We need to get the directories underneath the model-path.
|
|
338
|
+
path = Path(original_file_path)
|
|
339
|
+
relative_path = str(path.relative_to(*path.parts[:1]))
|
|
340
|
+
no_ext = os.path.splitext(relative_path)[0]
|
|
341
|
+
fqn = [package_name]
|
|
342
|
+
fqn.extend(utils.split_path(no_ext)[:-1])
|
|
343
|
+
fqn.append(model_name)
|
|
344
|
+
fqn.append(test_name)
|
|
345
|
+
return fqn
|
|
346
|
+
|
|
347
|
+
def _get_fixture(self, fixture_name: str, project_name: str):
|
|
348
|
+
fixture_unique_id = f"{NodeType.Fixture}.{project_name}.{fixture_name}"
|
|
349
|
+
if fixture_unique_id in self.manifest.fixtures:
|
|
350
|
+
fixture = self.manifest.fixtures[fixture_unique_id]
|
|
351
|
+
return fixture
|
|
352
|
+
else:
|
|
353
|
+
raise ParsingError(
|
|
354
|
+
f"File not found for fixture '{fixture_name}' in unit tests in {self.yaml.path.original_file_path}"
|
|
355
|
+
)
|
|
356
|
+
|
|
357
|
+
def _validate_and_normalize_given(self, unit_test_definition):
|
|
358
|
+
for ut_input in unit_test_definition.given:
|
|
359
|
+
self._validate_and_normalize_rows(ut_input, unit_test_definition, "input")
|
|
360
|
+
|
|
361
|
+
def _validate_and_normalize_expect(self, unit_test_definition):
|
|
362
|
+
self._validate_and_normalize_rows(
|
|
363
|
+
unit_test_definition.expect, unit_test_definition, "expected"
|
|
364
|
+
)
|
|
365
|
+
|
|
366
|
+
def _validate_and_normalize_rows(self, ut_fixture, unit_test_definition, fixture_type) -> None:
|
|
367
|
+
if ut_fixture.format == UnitTestFormat.Dict:
|
|
368
|
+
if ut_fixture.rows is None and ut_fixture.fixture is None: # This is a seed
|
|
369
|
+
ut_fixture.rows = self._load_rows_from_seed(ut_fixture.input)
|
|
370
|
+
if not isinstance(ut_fixture.rows, list):
|
|
371
|
+
raise ParsingError(
|
|
372
|
+
f"Unit test {unit_test_definition.name} has {fixture_type} rows "
|
|
373
|
+
f"which do not match format {ut_fixture.format}"
|
|
374
|
+
)
|
|
375
|
+
elif ut_fixture.format == UnitTestFormat.CSV:
|
|
376
|
+
if not (isinstance(ut_fixture.rows, str) or isinstance(ut_fixture.fixture, str)):
|
|
377
|
+
raise ParsingError(
|
|
378
|
+
f"Unit test {unit_test_definition.name} has {fixture_type} rows or fixtures "
|
|
379
|
+
f"which do not match format {ut_fixture.format}. Expected string."
|
|
380
|
+
)
|
|
381
|
+
|
|
382
|
+
if ut_fixture.fixture:
|
|
383
|
+
csv_rows = self.get_fixture_file_rows(
|
|
384
|
+
ut_fixture.fixture, self.project.project_name, unit_test_definition.unique_id
|
|
385
|
+
)
|
|
386
|
+
else:
|
|
387
|
+
csv_rows = self._convert_csv_to_list_of_dicts(ut_fixture.rows)
|
|
388
|
+
|
|
389
|
+
# Empty values (e.g. ,,) in a csv fixture should default to null, not ""
|
|
390
|
+
ut_fixture.rows = [
|
|
391
|
+
{k: (None if v == "" else v) for k, v in row.items()} for row in csv_rows
|
|
392
|
+
]
|
|
393
|
+
|
|
394
|
+
elif ut_fixture.format == UnitTestFormat.SQL:
|
|
395
|
+
if not (isinstance(ut_fixture.rows, str) or isinstance(ut_fixture.fixture, str)):
|
|
396
|
+
raise ParsingError(
|
|
397
|
+
f"Unit test {unit_test_definition.name} has {fixture_type} rows or fixtures "
|
|
398
|
+
f"which do not match format {ut_fixture.format}. Expected string."
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
if ut_fixture.fixture:
|
|
402
|
+
ut_fixture.rows = self.get_fixture_file_rows(
|
|
403
|
+
ut_fixture.fixture, self.project.project_name, unit_test_definition.unique_id
|
|
404
|
+
)
|
|
405
|
+
|
|
406
|
+
# sanitize order of input
|
|
407
|
+
if ut_fixture.rows and (
|
|
408
|
+
ut_fixture.format == UnitTestFormat.Dict or ut_fixture.format == UnitTestFormat.CSV
|
|
409
|
+
):
|
|
410
|
+
self._promote_first_non_none_row(ut_fixture)
|
|
411
|
+
|
|
412
|
+
def _promote_first_non_none_row(self, ut_fixture):
|
|
413
|
+
"""
|
|
414
|
+
Promote the first row with no None values to the top of the ut_fixture.rows list.
|
|
415
|
+
|
|
416
|
+
This function modifies the ut_fixture object in place.
|
|
417
|
+
|
|
418
|
+
Needed for databases like Redshift which uses the first value in a column to determine
|
|
419
|
+
the column type. If the first value is None, the type is assumed to be VARCHAR(1).
|
|
420
|
+
This leads to obscure type mismatch errors centered on a unit test fixture's `expect`.
|
|
421
|
+
See https://github.com/dbt-labs/dbt-redshift/issues/821 for more info.
|
|
422
|
+
"""
|
|
423
|
+
non_none_row_index = None
|
|
424
|
+
|
|
425
|
+
# Iterate through each row and its index
|
|
426
|
+
for index, row in enumerate(ut_fixture.rows):
|
|
427
|
+
# Check if all values in the row are not None
|
|
428
|
+
if all(value is not None for value in row.values()):
|
|
429
|
+
non_none_row_index = index
|
|
430
|
+
break
|
|
431
|
+
|
|
432
|
+
if non_none_row_index is None:
|
|
433
|
+
fire_event(
|
|
434
|
+
SystemStdErr(
|
|
435
|
+
bmsg="Unit Test fixtures benefit from having at least one row free of Null values to ensure consistent column types. Failure to meet this recommendation can result in type mismatch errors between unit test source models and `expected` fixtures."
|
|
436
|
+
)
|
|
437
|
+
)
|
|
438
|
+
else:
|
|
439
|
+
ut_fixture.rows[0], ut_fixture.rows[non_none_row_index] = (
|
|
440
|
+
ut_fixture.rows[non_none_row_index],
|
|
441
|
+
ut_fixture.rows[0],
|
|
442
|
+
)
|
|
443
|
+
|
|
444
|
+
def get_fixture_file_rows(self, fixture_name, project_name, utdef_unique_id):
|
|
445
|
+
# find fixture file object and store unit_test_definition unique_id
|
|
446
|
+
fixture = self._get_fixture(fixture_name, project_name)
|
|
447
|
+
fixture_source_file = self.manifest.files[fixture.file_id]
|
|
448
|
+
fixture_source_file.unit_tests.append(utdef_unique_id)
|
|
449
|
+
return fixture.rows
|
|
450
|
+
|
|
451
|
+
def _convert_csv_to_list_of_dicts(self, csv_string: str) -> List[Dict[str, Any]]:
|
|
452
|
+
dummy_file = StringIO(csv_string)
|
|
453
|
+
reader = csv.DictReader(dummy_file)
|
|
454
|
+
rows = []
|
|
455
|
+
for row in reader:
|
|
456
|
+
rows.append(row)
|
|
457
|
+
return rows
|
|
458
|
+
|
|
459
|
+
def _load_rows_from_seed(self, ref_str: str) -> List[Dict[str, Any]]:
|
|
460
|
+
"""Read rows from seed file on disk if not specified in YAML config. If seed file doesn't exist, return empty list."""
|
|
461
|
+
ref = py_extract_from_source("{{ " + ref_str + " }}")["refs"][0]
|
|
462
|
+
|
|
463
|
+
rows: List[Dict[str, Any]] = []
|
|
464
|
+
|
|
465
|
+
seed_name = ref["name"]
|
|
466
|
+
package_name = ref.get("package", self.project.project_name)
|
|
467
|
+
|
|
468
|
+
seed_node = self.manifest.ref_lookup.find(seed_name, package_name, None, self.manifest)
|
|
469
|
+
|
|
470
|
+
if not seed_node or seed_node.resource_type != NodeType.Seed:
|
|
471
|
+
# Seed not found in custom package specified
|
|
472
|
+
if package_name != self.project.project_name:
|
|
473
|
+
raise ParsingError(
|
|
474
|
+
f"Unable to find seed '{package_name}.{seed_name}' for unit tests in '{package_name}' package"
|
|
475
|
+
)
|
|
476
|
+
else:
|
|
477
|
+
raise ParsingError(
|
|
478
|
+
f"Unable to find seed '{package_name}.{seed_name}' for unit tests in directories: {self.project.seed_paths}"
|
|
479
|
+
)
|
|
480
|
+
|
|
481
|
+
seed_path = Path(self.project.project_root) / seed_node.original_file_path
|
|
482
|
+
with open(seed_path, "r") as f:
|
|
483
|
+
for row in DictReader(f):
|
|
484
|
+
rows.append(row)
|
|
485
|
+
|
|
486
|
+
return rows
|
|
487
|
+
|
|
488
|
+
|
|
489
|
+
def find_tested_model_node(
|
|
490
|
+
manifest: Manifest, current_project: str, unit_test_model: str
|
|
491
|
+
) -> Optional[ModelNode]:
|
|
492
|
+
model_name_split = unit_test_model.split()
|
|
493
|
+
model_name = model_name_split[0]
|
|
494
|
+
model_version = model_name_split[1] if len(model_name_split) == 2 else None
|
|
495
|
+
|
|
496
|
+
tested_node = manifest.ref_lookup.find(model_name, current_project, model_version, manifest)
|
|
497
|
+
return tested_node
|
|
498
|
+
|
|
499
|
+
|
|
500
|
+
# This is called by the ManifestLoader after other processing has been done,
|
|
501
|
+
# so that model versions are available.
|
|
502
|
+
def process_models_for_unit_test(
|
|
503
|
+
manifest: Manifest, current_project: str, unit_test_def: UnitTestDefinition, models_to_versions
|
|
504
|
+
):
|
|
505
|
+
# If the unit tests doesn't have a depends_on.nodes[0] then we weren't able to resolve
|
|
506
|
+
# the model, either because versions hadn't been processed yet, or it's not a valid model name
|
|
507
|
+
if not unit_test_def.depends_on.nodes:
|
|
508
|
+
tested_node = find_tested_model_node(manifest, current_project, unit_test_def.model)
|
|
509
|
+
if not tested_node:
|
|
510
|
+
raise ParsingError(
|
|
511
|
+
f"Unable to find model '{current_project}.{unit_test_def.model}' for "
|
|
512
|
+
f"unit test '{unit_test_def.name}' in {unit_test_def.original_file_path}"
|
|
513
|
+
)
|
|
514
|
+
unit_test_def.depends_on.nodes.append(tested_node.unique_id)
|
|
515
|
+
unit_test_def.schema = tested_node.schema
|
|
516
|
+
|
|
517
|
+
# The UnitTestDefinition should only have one "depends_on" at this point,
|
|
518
|
+
# the one that's found by the "model" field.
|
|
519
|
+
target_model_id = unit_test_def.depends_on.nodes[0]
|
|
520
|
+
if target_model_id not in manifest.nodes:
|
|
521
|
+
if target_model_id in manifest.disabled:
|
|
522
|
+
# The model is disabled, so we don't need to do anything (#10540)
|
|
523
|
+
return
|
|
524
|
+
else:
|
|
525
|
+
# If we've reached here and the model is not disabled, throw an error
|
|
526
|
+
raise ParsingError(
|
|
527
|
+
f"Unit test '{unit_test_def.name}' references a model that does not exist: {target_model_id}"
|
|
528
|
+
)
|
|
529
|
+
|
|
530
|
+
target_model = manifest.nodes[target_model_id]
|
|
531
|
+
assert isinstance(target_model, ModelNode)
|
|
532
|
+
|
|
533
|
+
target_model_is_incremental = "macro.dbt.is_incremental" in target_model.depends_on.macros
|
|
534
|
+
unit_test_def_has_incremental_override = unit_test_def.overrides and isinstance(
|
|
535
|
+
unit_test_def.overrides.macros.get("is_incremental"), bool
|
|
536
|
+
)
|
|
537
|
+
|
|
538
|
+
if target_model_is_incremental and (not unit_test_def_has_incremental_override):
|
|
539
|
+
raise ParsingError(
|
|
540
|
+
f"Boolean override for 'is_incremental' must be provided for unit test '{unit_test_def.name}' in model '{target_model.name}'"
|
|
541
|
+
)
|
|
542
|
+
|
|
543
|
+
unit_test_def_incremental_override_true = (
|
|
544
|
+
unit_test_def.overrides and unit_test_def.overrides.macros.get("is_incremental")
|
|
545
|
+
)
|
|
546
|
+
unit_test_def_has_this_input = "this" in [i.input for i in unit_test_def.given]
|
|
547
|
+
|
|
548
|
+
if (
|
|
549
|
+
target_model_is_incremental
|
|
550
|
+
and unit_test_def_incremental_override_true
|
|
551
|
+
and (not unit_test_def_has_this_input)
|
|
552
|
+
):
|
|
553
|
+
raise ParsingError(
|
|
554
|
+
f"Unit test '{unit_test_def.name}' for incremental model '{target_model.name}' must have a 'this' input"
|
|
555
|
+
)
|
|
556
|
+
|
|
557
|
+
# unit_test_versions = unit_test_def.versions
|
|
558
|
+
# We're setting up unit tests for versioned models, so if
|
|
559
|
+
# the model isn't versioned, we don't need to do anything
|
|
560
|
+
if not target_model.is_versioned:
|
|
561
|
+
if unit_test_def.versions and (
|
|
562
|
+
unit_test_def.versions.include or unit_test_def.versions.exclude
|
|
563
|
+
):
|
|
564
|
+
# If model is not versioned, we should not have an include or exclude
|
|
565
|
+
msg = (
|
|
566
|
+
f"Unit test '{unit_test_def.name}' should not have a versions include or exclude "
|
|
567
|
+
f"when referencing non-versioned model '{target_model.name}'"
|
|
568
|
+
)
|
|
569
|
+
raise ParsingError(msg)
|
|
570
|
+
else:
|
|
571
|
+
return
|
|
572
|
+
versioned_models = []
|
|
573
|
+
if (
|
|
574
|
+
target_model.package_name in models_to_versions
|
|
575
|
+
and target_model.name in models_to_versions[target_model.package_name]
|
|
576
|
+
):
|
|
577
|
+
versioned_models = models_to_versions[target_model.package_name][target_model.name]
|
|
578
|
+
|
|
579
|
+
versions_to_test = []
|
|
580
|
+
if unit_test_def.versions is None:
|
|
581
|
+
versions_to_test = versioned_models
|
|
582
|
+
elif unit_test_def.versions.exclude:
|
|
583
|
+
for model_unique_id in versioned_models:
|
|
584
|
+
model = manifest.nodes[model_unique_id]
|
|
585
|
+
assert isinstance(model, ModelNode)
|
|
586
|
+
if model.version in unit_test_def.versions.exclude:
|
|
587
|
+
continue
|
|
588
|
+
else:
|
|
589
|
+
versions_to_test.append(model.unique_id)
|
|
590
|
+
elif unit_test_def.versions.include:
|
|
591
|
+
for model_unique_id in versioned_models:
|
|
592
|
+
model = manifest.nodes[model_unique_id]
|
|
593
|
+
assert isinstance(model, ModelNode)
|
|
594
|
+
if model.version in unit_test_def.versions.include:
|
|
595
|
+
versions_to_test.append(model.unique_id)
|
|
596
|
+
else:
|
|
597
|
+
continue
|
|
598
|
+
|
|
599
|
+
if not versions_to_test:
|
|
600
|
+
msg = (
|
|
601
|
+
f"Unit test '{unit_test_def.name}' referenced a version of '{target_model.name}' "
|
|
602
|
+
"which was not found."
|
|
603
|
+
)
|
|
604
|
+
raise ParsingError(msg)
|
|
605
|
+
else:
|
|
606
|
+
# Create unit test definitions that match the model versions
|
|
607
|
+
original_unit_test_def = manifest.unit_tests.pop(unit_test_def.unique_id)
|
|
608
|
+
original_unit_test_dict = original_unit_test_def.to_dict()
|
|
609
|
+
schema_file = manifest.files[original_unit_test_def.file_id]
|
|
610
|
+
assert isinstance(schema_file, SchemaSourceFile)
|
|
611
|
+
schema_file.unit_tests.remove(original_unit_test_def.unique_id)
|
|
612
|
+
for versioned_model_unique_id in versions_to_test:
|
|
613
|
+
versioned_model = manifest.nodes[versioned_model_unique_id]
|
|
614
|
+
assert isinstance(versioned_model, ModelNode)
|
|
615
|
+
versioned_unit_test_unique_id = f"{NodeType.Unit}.{unit_test_def.package_name}.{unit_test_def.model}.{unit_test_def.name}_v{versioned_model.version}"
|
|
616
|
+
new_unit_test_def = UnitTestDefinition.from_dict(original_unit_test_dict)
|
|
617
|
+
new_unit_test_def.unique_id = versioned_unit_test_unique_id
|
|
618
|
+
new_unit_test_def.depends_on.nodes[0] = versioned_model_unique_id
|
|
619
|
+
new_unit_test_def.version = versioned_model.version
|
|
620
|
+
schema_file.unit_tests.append(versioned_unit_test_unique_id)
|
|
621
|
+
# fqn?
|
|
622
|
+
manifest.unit_tests[versioned_unit_test_unique_id] = new_unit_test_def
|
dvt/plugins/__init__.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
|
|
3
|
+
# these are just exports, they need "noqa" so flake8 will not complain.
|
|
4
|
+
from .manager import PluginManager, dbt_hook, dbtPlugin # noqa
|
|
5
|
+
|
|
6
|
+
PLUGIN_MANAGER: Optional[PluginManager] = None
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def set_up_plugin_manager(project_name: str):
|
|
10
|
+
global PLUGIN_MANAGER
|
|
11
|
+
PLUGIN_MANAGER = PluginManager.from_modules(project_name)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def get_plugin_manager(project_name: str) -> PluginManager:
|
|
15
|
+
global PLUGIN_MANAGER
|
|
16
|
+
if not PLUGIN_MANAGER:
|
|
17
|
+
set_up_plugin_manager(project_name)
|
|
18
|
+
|
|
19
|
+
assert PLUGIN_MANAGER
|
|
20
|
+
return PLUGIN_MANAGER
|
dvt/plugins/contracts.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
from typing import Dict
|
|
2
|
+
|
|
3
|
+
# just exports, they need "noqa" so flake8 will not complain.
|
|
4
|
+
from dvt.artifacts.schemas.base import ArtifactMixin as PluginArtifact # noqa
|
|
5
|
+
from dvt.artifacts.schemas.base import BaseArtifactMetadata # noqa
|
|
6
|
+
from dvt.artifacts.schemas.base import schema_version # noqa
|
|
7
|
+
|
|
8
|
+
from dbt_common.dataclass_schema import ExtensibleDbtClassMixin, dbtClassMixin # noqa
|
|
9
|
+
|
|
10
|
+
PluginArtifacts = Dict[str, PluginArtifact]
|