dagster-dbt 0.23.3__py3-none-any.whl → 0.28.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. dagster_dbt/__init__.py +41 -140
  2. dagster_dbt/asset_decorator.py +49 -230
  3. dagster_dbt/asset_specs.py +65 -0
  4. dagster_dbt/asset_utils.py +655 -338
  5. dagster_dbt/cli/app.py +44 -43
  6. dagster_dbt/cloud/__init__.py +6 -4
  7. dagster_dbt/cloud/asset_defs.py +119 -177
  8. dagster_dbt/cloud/cli.py +3 -4
  9. dagster_dbt/cloud/ops.py +9 -6
  10. dagster_dbt/cloud/resources.py +9 -4
  11. dagster_dbt/cloud/types.py +12 -7
  12. dagster_dbt/cloud/utils.py +186 -0
  13. dagster_dbt/cloud_v2/__init__.py +10 -0
  14. dagster_dbt/cloud_v2/asset_decorator.py +81 -0
  15. dagster_dbt/cloud_v2/cli_invocation.py +67 -0
  16. dagster_dbt/cloud_v2/client.py +438 -0
  17. dagster_dbt/cloud_v2/resources.py +462 -0
  18. dagster_dbt/cloud_v2/run_handler.py +229 -0
  19. dagster_dbt/cloud_v2/sensor_builder.py +254 -0
  20. dagster_dbt/cloud_v2/types.py +143 -0
  21. dagster_dbt/compat.py +107 -0
  22. dagster_dbt/components/__init__.py +0 -0
  23. dagster_dbt/components/dbt_project/__init__.py +0 -0
  24. dagster_dbt/components/dbt_project/component.py +545 -0
  25. dagster_dbt/components/dbt_project/scaffolder.py +65 -0
  26. dagster_dbt/core/__init__.py +0 -10
  27. dagster_dbt/core/dbt_cli_event.py +612 -0
  28. dagster_dbt/core/dbt_cli_invocation.py +474 -0
  29. dagster_dbt/core/dbt_event_iterator.py +399 -0
  30. dagster_dbt/core/resource.py +733 -0
  31. dagster_dbt/core/utils.py +14 -279
  32. dagster_dbt/dagster_dbt_translator.py +317 -74
  33. dagster_dbt/dbt_core_version.py +1 -0
  34. dagster_dbt/dbt_manifest.py +6 -5
  35. dagster_dbt/dbt_manifest_asset_selection.py +62 -22
  36. dagster_dbt/dbt_project.py +179 -40
  37. dagster_dbt/dbt_project_manager.py +173 -0
  38. dagster_dbt/dbt_version.py +0 -0
  39. dagster_dbt/errors.py +9 -84
  40. dagster_dbt/freshness_builder.py +147 -0
  41. dagster_dbt/include/pyproject.toml.jinja +21 -0
  42. dagster_dbt/include/scaffold/assets.py.jinja +1 -8
  43. dagster_dbt/include/scaffold/definitions.py.jinja +0 -15
  44. dagster_dbt/include/scaffold/project.py.jinja +1 -0
  45. dagster_dbt/include/setup.py.jinja +2 -3
  46. dagster_dbt/metadata_set.py +18 -0
  47. dagster_dbt/utils.py +136 -234
  48. dagster_dbt/version.py +1 -1
  49. dagster_dbt-0.28.4.dist-info/METADATA +47 -0
  50. dagster_dbt-0.28.4.dist-info/RECORD +59 -0
  51. {dagster_dbt-0.23.3.dist-info → dagster_dbt-0.28.4.dist-info}/WHEEL +1 -1
  52. {dagster_dbt-0.23.3.dist-info → dagster_dbt-0.28.4.dist-info}/entry_points.txt +3 -0
  53. {dagster_dbt-0.23.3.dist-info → dagster_dbt-0.28.4.dist-info/licenses}/LICENSE +1 -1
  54. dagster_dbt/asset_defs.py +0 -1049
  55. dagster_dbt/core/resources.py +0 -527
  56. dagster_dbt/core/resources_v2.py +0 -1542
  57. dagster_dbt/core/types.py +0 -63
  58. dagster_dbt/dbt_resource.py +0 -220
  59. dagster_dbt/include/scaffold/constants.py.jinja +0 -21
  60. dagster_dbt/ops.py +0 -134
  61. dagster_dbt/types.py +0 -22
  62. dagster_dbt-0.23.3.dist-info/METADATA +0 -31
  63. dagster_dbt-0.23.3.dist-info/RECORD +0 -43
  64. {dagster_dbt-0.23.3.dist-info → dagster_dbt-0.28.4.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,254 @@
1
+ from collections.abc import Iterator, Sequence
2
+ from datetime import timedelta
3
+ from typing import Optional, Union
4
+
5
+ from dagster import (
6
+ AssetCheckEvaluation,
7
+ AssetKey,
8
+ AssetMaterialization,
9
+ AssetObservation,
10
+ DefaultSensorStatus,
11
+ SensorDefinition,
12
+ SensorEvaluationContext,
13
+ SensorResult,
14
+ _check as check,
15
+ sensor,
16
+ )
17
+ from dagster._core.definitions.repository_definition.repository_definition import (
18
+ RepositoryDefinition,
19
+ )
20
+ from dagster._grpc.client import DEFAULT_SENSOR_GRPC_TIMEOUT
21
+ from dagster._record import record
22
+ from dagster._serdes import deserialize_value, serialize_value
23
+ from dagster._time import datetime_from_timestamp, get_current_datetime
24
+ from dagster_shared.serdes import whitelist_for_serdes
25
+
26
+ from dagster_dbt.cloud_v2.resources import DbtCloudWorkspace
27
+ from dagster_dbt.cloud_v2.run_handler import (
28
+ COMPLETED_AT_TIMESTAMP_METADATA_KEY,
29
+ DbtCloudJobRunResults,
30
+ )
31
+ from dagster_dbt.cloud_v2.types import DbtCloudRun
32
+ from dagster_dbt.dagster_dbt_translator import DagsterDbtTranslator
33
+ from dagster_dbt.utils import clean_name
34
+
35
+ MAIN_LOOP_TIMEOUT_SECONDS = DEFAULT_SENSOR_GRPC_TIMEOUT - 20
36
+ DEFAULT_DBT_CLOUD_SENSOR_INTERVAL_SECONDS = 30
37
+ START_LOOKBACK_SECONDS = 60 # Lookback one minute in time for the initial setting of the cursor.
38
+
39
+
40
+ @record
41
+ class BatchResult:
42
+ idx: int
43
+ asset_events: Sequence[AssetMaterialization]
44
+ all_asset_keys_materialized: set[AssetKey]
45
+
46
+
47
+ @whitelist_for_serdes
48
+ @record
49
+ class DbtCloudPollingSensorCursor:
50
+ """A cursor that stores the last effective timestamp and offset."""
51
+
52
+ finished_at_lower_bound: Optional[float] = None
53
+ finished_at_upper_bound: Optional[float] = None
54
+ offset: Optional[int] = None
55
+
56
+
57
+ def materializations_from_batch_iter(
58
+ context: SensorEvaluationContext,
59
+ finished_at_lower_bound: float,
60
+ finished_at_upper_bound: float,
61
+ offset: int,
62
+ workspace: DbtCloudWorkspace,
63
+ dagster_dbt_translator: DagsterDbtTranslator,
64
+ ) -> Iterator[Optional[BatchResult]]:
65
+ client = workspace.get_client()
66
+ workspace_data = workspace.get_or_fetch_workspace_data()
67
+
68
+ total_processed_runs = 0
69
+ while True:
70
+ latest_offset = total_processed_runs + offset
71
+ runs, total_runs = client.get_runs_batch(
72
+ project_id=workspace.project_id,
73
+ environment_id=workspace.environment_id,
74
+ finished_at_lower_bound=datetime_from_timestamp(finished_at_lower_bound),
75
+ finished_at_upper_bound=datetime_from_timestamp(finished_at_upper_bound),
76
+ offset=latest_offset,
77
+ )
78
+ if len(runs) == 0:
79
+ yield None
80
+ context.log.info("Received no runs. Breaking.")
81
+ break
82
+ context.log.info(
83
+ f"Processing {len(runs)}/{total_runs} runs for dbt Cloud workspace "
84
+ f"for project {workspace.project_name} and environment {workspace.environment_name}..."
85
+ )
86
+ for i, run_details in enumerate(runs):
87
+ run = DbtCloudRun.from_run_details(run_details=run_details)
88
+
89
+ if run.job_definition_id == workspace_data.adhoc_job_id:
90
+ context.log.info(f"Run {run.id} was triggered by Dagster. Continuing.")
91
+ continue
92
+
93
+ run_artifacts = client.list_run_artifacts(run_id=run.id)
94
+ if "run_results.json" not in run_artifacts:
95
+ context.log.info(
96
+ f"Run {run.id} does not have a run_results.json artifact. Continuing."
97
+ )
98
+ continue
99
+
100
+ run_results = DbtCloudJobRunResults.from_run_results_json(
101
+ run_results_json=client.get_run_results_json(run_id=run.id)
102
+ )
103
+ events = run_results.to_default_asset_events(
104
+ client=workspace.get_client(),
105
+ manifest=workspace_data.manifest,
106
+ dagster_dbt_translator=dagster_dbt_translator,
107
+ )
108
+ # Currently, only materializations are tracked
109
+ mats = [event for event in events if isinstance(event, AssetMaterialization)]
110
+ context.log.info(f"Found {len(mats)} materializations for {run.id}")
111
+
112
+ all_asset_keys_materialized = {mat.asset_key for mat in mats}
113
+ yield (
114
+ BatchResult(
115
+ idx=i + latest_offset,
116
+ asset_events=mats,
117
+ all_asset_keys_materialized=all_asset_keys_materialized,
118
+ )
119
+ if mats
120
+ else None
121
+ )
122
+ total_processed_runs += len(runs)
123
+ context.log.info(
124
+ f"Processed {total_processed_runs}/{total_runs} runs for dbt Cloud workspace "
125
+ f"for project {workspace.project_name} and environment {workspace.environment_name}..."
126
+ )
127
+ if total_processed_runs == total_runs:
128
+ yield None
129
+ context.log.info("Processed all runs. Breaking.")
130
+ break
131
+
132
+
133
+ def sorted_asset_events(
134
+ asset_events: Sequence[Union[AssetMaterialization, AssetObservation, AssetCheckEvaluation]],
135
+ repository_def: RepositoryDefinition,
136
+ ) -> list[Union[AssetMaterialization, AssetObservation, AssetCheckEvaluation]]:
137
+ """Sort asset events by end date and toposort order."""
138
+ topo_aks = repository_def.asset_graph.toposorted_asset_keys
139
+ materializations_and_timestamps = [
140
+ (mat.metadata[COMPLETED_AT_TIMESTAMP_METADATA_KEY].value, mat) for mat in asset_events
141
+ ]
142
+ return [
143
+ sorted_event[1]
144
+ for sorted_event in sorted(
145
+ materializations_and_timestamps, key=lambda x: (x[0], topo_aks.index(x[1].asset_key))
146
+ )
147
+ ]
148
+
149
+
150
+ def build_dbt_cloud_polling_sensor(
151
+ *,
152
+ workspace: DbtCloudWorkspace,
153
+ dagster_dbt_translator: Optional[DagsterDbtTranslator] = None,
154
+ minimum_interval_seconds: int = DEFAULT_DBT_CLOUD_SENSOR_INTERVAL_SECONDS,
155
+ default_sensor_status: Optional[DefaultSensorStatus] = None,
156
+ ) -> SensorDefinition:
157
+ """The constructed sensor polls the dbt Cloud Workspace for activity, and inserts asset events into Dagster's event log.
158
+
159
+ Args:
160
+ workspace (DbtCloudWorkspace): The dbt Cloud workspace to poll for runs.
161
+ dagster_dbt_translator (Optional[DagsterDbtTranslator], optional): The translator to use
162
+ to convert dbt Cloud content into :py:class:`dagster.AssetSpec`.
163
+ Defaults to :py:class:`DagsterDbtTranslator`.
164
+ minimum_interval_seconds (int, optional): The minimum interval in seconds between sensor runs. Defaults to 30.
165
+ default_sensor_status (Optional[DefaultSensorStatus], optional): The default status of the sensor.
166
+
167
+ Returns:
168
+ Definitions: A `SensorDefinitions` object.
169
+ """
170
+ dagster_dbt_translator = dagster_dbt_translator or DagsterDbtTranslator()
171
+
172
+ @sensor(
173
+ name=clean_name(
174
+ f"{workspace.account_name}_{workspace.project_name}_{workspace.environment_name}__run_status_sensor"
175
+ ),
176
+ description=(
177
+ f"dbt Cloud polling sensor for dbt Cloud workspace for account {workspace.account_name}, "
178
+ f"project {workspace.project_name} and environment {workspace.environment_name}"
179
+ ),
180
+ minimum_interval_seconds=minimum_interval_seconds,
181
+ default_status=default_sensor_status or DefaultSensorStatus.RUNNING,
182
+ )
183
+ def dbt_cloud_run_sensor(context: SensorEvaluationContext) -> SensorResult:
184
+ """Sensor to report materialization events for each asset as new runs come in."""
185
+ context.log.info(
186
+ f"************"
187
+ f"Running sensor for dbt Cloud workspace for account {workspace.account_name}, "
188
+ f"project {workspace.project_name} and environment {workspace.environment_name}"
189
+ f"***********"
190
+ )
191
+ try:
192
+ cursor = (
193
+ deserialize_value(context.cursor, DbtCloudPollingSensorCursor)
194
+ if context.cursor
195
+ else DbtCloudPollingSensorCursor()
196
+ )
197
+ except Exception as e:
198
+ context.log.info(f"Failed to interpret cursor. Starting from scratch. Error: {e}")
199
+ cursor = DbtCloudPollingSensorCursor()
200
+ current_date = get_current_datetime()
201
+ current_offset = cursor.offset or 0
202
+ finished_at_lower_bound = (
203
+ cursor.finished_at_lower_bound
204
+ or (current_date - timedelta(seconds=START_LOOKBACK_SECONDS)).timestamp()
205
+ )
206
+ finished_at_upper_bound = cursor.finished_at_upper_bound or current_date.timestamp()
207
+ sensor_iter = materializations_from_batch_iter(
208
+ context=context,
209
+ finished_at_lower_bound=finished_at_lower_bound,
210
+ finished_at_upper_bound=finished_at_upper_bound,
211
+ offset=current_offset,
212
+ workspace=workspace,
213
+ dagster_dbt_translator=dagster_dbt_translator,
214
+ )
215
+
216
+ all_asset_events: list[AssetMaterialization] = []
217
+ latest_offset = current_offset
218
+ repository_def = check.not_none(context.repository_def)
219
+ batch_result = None
220
+ while get_current_datetime() - current_date < timedelta(seconds=MAIN_LOOP_TIMEOUT_SECONDS):
221
+ batch_result = next(sensor_iter, None)
222
+ if batch_result is None:
223
+ context.log.info("Received no batch result. Breaking.")
224
+ break
225
+ all_asset_events.extend(batch_result.asset_events)
226
+ latest_offset = batch_result.idx
227
+
228
+ if batch_result is not None:
229
+ new_cursor = DbtCloudPollingSensorCursor(
230
+ finished_at_lower_bound=finished_at_lower_bound,
231
+ finished_at_upper_bound=finished_at_upper_bound,
232
+ offset=latest_offset + 1,
233
+ )
234
+ else:
235
+ # We have completed iteration for this range
236
+ new_cursor = DbtCloudPollingSensorCursor(
237
+ finished_at_lower_bound=finished_at_upper_bound,
238
+ finished_at_upper_bound=None,
239
+ offset=0,
240
+ )
241
+
242
+ context.update_cursor(serialize_value(new_cursor))
243
+
244
+ context.log.info(
245
+ f"************"
246
+ f"Exiting sensor for dbt Cloud workspace for account {workspace.account_name}, "
247
+ f"project {workspace.project_name} and environment {workspace.environment_name}"
248
+ f"***********"
249
+ )
250
+ return SensorResult(
251
+ asset_events=sorted_asset_events(all_asset_events, repository_def),
252
+ )
253
+
254
+ return dbt_cloud_run_sensor
@@ -0,0 +1,143 @@
1
+ from collections.abc import Mapping, Sequence
2
+ from enum import Enum
3
+ from typing import Any, Optional
4
+
5
+ from dagster import Failure, MetadataValue
6
+ from dagster._record import record
7
+ from dagster._serdes import whitelist_for_serdes
8
+ from dagster_shared.record import as_dict
9
+
10
+
11
+ @record
12
+ class DbtCloudAccount:
13
+ """Represents a dbt Cloud Account, based on data as returned from the API."""
14
+
15
+ id: int
16
+ name: Optional[str]
17
+
18
+ @classmethod
19
+ def from_account_details(cls, account_details: Mapping[str, Any]) -> "DbtCloudAccount":
20
+ return cls(
21
+ id=account_details["id"],
22
+ name=account_details.get("name"),
23
+ )
24
+
25
+
26
+ @record
27
+ class DbtCloudProject:
28
+ """Represents a dbt Cloud Project, based on data as returned from the API."""
29
+
30
+ id: int
31
+ name: Optional[str]
32
+
33
+ @classmethod
34
+ def from_project_details(cls, project_details: Mapping[str, Any]) -> "DbtCloudProject":
35
+ return cls(
36
+ id=project_details["id"],
37
+ name=project_details.get("name"),
38
+ )
39
+
40
+
41
+ @record
42
+ class DbtCloudEnvironment:
43
+ """Represents a dbt Cloud Environment, based on data as returned from the API."""
44
+
45
+ id: int
46
+ name: Optional[str]
47
+
48
+ @classmethod
49
+ def from_environment_details(
50
+ cls, environment_details: Mapping[str, Any]
51
+ ) -> "DbtCloudEnvironment":
52
+ return cls(
53
+ id=environment_details["id"],
54
+ name=environment_details.get("name"),
55
+ )
56
+
57
+
58
+ @record
59
+ class DbtCloudJob:
60
+ """Represents a dbt Cloud job, based on data as returned from the API."""
61
+
62
+ id: int
63
+ account_id: Optional[int]
64
+ project_id: Optional[int]
65
+ environment_id: Optional[int]
66
+ name: Optional[str]
67
+
68
+ @classmethod
69
+ def from_job_details(cls, job_details: Mapping[str, Any]) -> "DbtCloudJob":
70
+ return cls(
71
+ id=job_details["id"],
72
+ account_id=job_details.get("account_id"),
73
+ project_id=job_details.get("project_id"),
74
+ environment_id=job_details.get("environment_id"),
75
+ name=job_details.get("name"),
76
+ )
77
+
78
+
79
+ class DbtCloudJobRunStatusType(int, Enum):
80
+ """Enum representing each status type for a run in dbt Cloud's ontology."""
81
+
82
+ QUEUED = 1
83
+ STARTING = 2
84
+ RUNNING = 3
85
+ SUCCESS = 10
86
+ ERROR = 20
87
+ CANCELLED = 30
88
+
89
+
90
+ @record
91
+ class DbtCloudRun:
92
+ """Represents a dbt Cloud run, based on data as returned from the API."""
93
+
94
+ id: int
95
+ trigger_id: Optional[int]
96
+ account_id: Optional[int]
97
+ project_id: Optional[int]
98
+ environment_id: Optional[int]
99
+ job_definition_id: Optional[int]
100
+ status: Optional[DbtCloudJobRunStatusType]
101
+ url: Optional[str]
102
+
103
+ @classmethod
104
+ def from_run_details(cls, run_details: Mapping[str, Any]) -> "DbtCloudRun":
105
+ return cls(
106
+ id=run_details["id"],
107
+ trigger_id=run_details.get("trigger_id"),
108
+ account_id=run_details.get("account_id"),
109
+ project_id=run_details.get("project_id"),
110
+ environment_id=run_details.get("environment_id"),
111
+ job_definition_id=run_details.get("job_definition_id"),
112
+ status=DbtCloudJobRunStatusType(run_details.get("status"))
113
+ if run_details.get("status")
114
+ else None,
115
+ url=run_details.get("href"),
116
+ )
117
+
118
+ def raise_for_status(self) -> None:
119
+ if self.status in {
120
+ DbtCloudJobRunStatusType.ERROR,
121
+ DbtCloudJobRunStatusType.CANCELLED,
122
+ }:
123
+ raise Failure(
124
+ f"dbt Cloud run '{self.id}' failed!",
125
+ metadata={
126
+ "run_details": MetadataValue.json(as_dict(self)),
127
+ },
128
+ )
129
+
130
+
131
+ @whitelist_for_serdes
132
+ @record
133
+ class DbtCloudWorkspaceData:
134
+ """Represents the data of a dbt Cloud workspace, given a project and environment."""
135
+
136
+ project_id: int
137
+ environment_id: int
138
+ # The ID of the ad hoc dbt Cloud job created by Dagster.
139
+ # This job is used to parse the dbt Cloud project.
140
+ # This job is also used to kick off cli invocation if no job ID is specified by users.
141
+ adhoc_job_id: int
142
+ manifest: Mapping[str, Any]
143
+ jobs: Sequence[Mapping[str, Any]]
dagster_dbt/compat.py ADDED
@@ -0,0 +1,107 @@
1
+ import logging
2
+ from enum import Enum
3
+ from typing import TYPE_CHECKING, Any, TypeAlias
4
+
5
+ from packaging import version
6
+
7
+ # it's unclear exactly which dbt import adds a handler to the root logger, but something certainly does!
8
+ # on this line, we keep track of the set of handlers that are on the root logger BEFORE any dbt imports
9
+ # happen. at the end of this file, we set the root logger's handlers to the original set to ensure that
10
+ # after this file is loaded, the root logger's handlers will be unchanged.
11
+ existing_root_logger_handlers = [*logging.getLogger().handlers]
12
+
13
+
14
+ try:
15
+ from dbt.version import __version__ as dbt_version
16
+
17
+ DBT_PYTHON_VERSION = version.parse(dbt_version)
18
+ except ImportError:
19
+ DBT_PYTHON_VERSION = None
20
+
21
+ # Conditionally define types for various types we use from the dbt-core package
22
+ if TYPE_CHECKING:
23
+ from dbt.adapters.base.impl import (
24
+ BaseAdapter as _BaseAdapter,
25
+ BaseColumn as _BaseColumn,
26
+ BaseRelation as _BaseRelation,
27
+ )
28
+ from dbt.contracts.results import (
29
+ NodeStatus as _NodeStatus,
30
+ TestStatus as _TestStatus,
31
+ )
32
+ from dbt.node_types import NodeType as _NodeType
33
+
34
+ BaseAdapter: TypeAlias = _BaseAdapter
35
+ BaseColumn: TypeAlias = _BaseColumn
36
+ BaseRelation: TypeAlias = _BaseRelation
37
+ NodeStatus: TypeAlias = _NodeStatus
38
+ NodeType: TypeAlias = _NodeType
39
+ TestStatus: TypeAlias = _TestStatus
40
+ REFABLE_NODE_TYPES: list[str] = []
41
+ else:
42
+ if DBT_PYTHON_VERSION is not None:
43
+ from dbt.adapters.base.impl import (
44
+ BaseAdapter as BaseAdapter,
45
+ BaseColumn as BaseColumn,
46
+ BaseRelation as BaseRelation,
47
+ )
48
+ from dbt.contracts.results import NodeStatus, TestStatus
49
+ from dbt.node_types import NodeType as NodeType
50
+
51
+ if DBT_PYTHON_VERSION < version.parse("1.8.0"):
52
+ from dbt.node_types import NodeType
53
+
54
+ REFABLE_NODE_TYPES = NodeType.refable()
55
+ else:
56
+ from dbt.node_types import REFABLE_NODE_TYPES as REFABLE_NODE_TYPES
57
+ else:
58
+ # here, we define implementations for types that will not be available if dbt-core is not
59
+ # installed
60
+ BaseAdapter = Any
61
+ BaseColumn = Any
62
+ BaseRelation = Any
63
+ REFABLE_NODE_TYPES = ["model", "seed", "snapshot"]
64
+
65
+ class StrEnum(str, Enum):
66
+ def _generate_next_value_(name, *_):
67
+ return name
68
+
69
+ class NodeType(StrEnum):
70
+ Model = "model"
71
+ Analysis = "analysis"
72
+ Test = "test"
73
+ Snapshot = "snapshot"
74
+ Operation = "operation"
75
+ Seed = "seed"
76
+ RPCCall = "rpc"
77
+ SqlOperation = "sql_operation"
78
+ Documentation = "doc"
79
+ Source = "source"
80
+ Macro = "macro"
81
+ Exposure = "exposure"
82
+ Metric = "metric"
83
+ Group = "group"
84
+ SavedQuery = "saved_query"
85
+ SemanticModel = "semantic_model"
86
+ Unit = "unit_test"
87
+ Fixture = "fixture"
88
+
89
+ class NodeStatus(StrEnum):
90
+ Success = "success"
91
+ Error = "error"
92
+ Fail = "fail"
93
+ Warn = "warn"
94
+ Skipped = "skipped"
95
+ PartialSuccess = "partial success"
96
+ Pass = "pass"
97
+ RuntimeErr = "runtime error"
98
+
99
+ class TestStatus(StrEnum):
100
+ Pass = NodeStatus.Pass
101
+ Error = NodeStatus.Error
102
+ Fail = NodeStatus.Fail
103
+ Warn = NodeStatus.Warn
104
+ Skipped = NodeStatus.Skipped
105
+
106
+
107
+ logging.getLogger().handlers = existing_root_logger_handlers
File without changes
File without changes