dagster 1.12.11__py3-none-any.whl → 1.12.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. dagster/_cli/asset.py +15 -4
  2. dagster/_cli/job.py +8 -3
  3. dagster/_core/asset_graph_view/asset_graph_view.py +83 -19
  4. dagster/_core/asset_graph_view/entity_subset.py +14 -9
  5. dagster/_core/asset_graph_view/serializable_entity_subset.py +15 -0
  6. dagster/_core/code_pointer.py +8 -1
  7. dagster/_core/definitions/asset_checks/asset_check_evaluation.py +41 -68
  8. dagster/_core/definitions/asset_checks/asset_check_result.py +10 -0
  9. dagster/_core/definitions/asset_checks/asset_check_spec.py +11 -0
  10. dagster/_core/definitions/assets/graph/asset_graph.py +1 -0
  11. dagster/_core/definitions/assets/graph/base_asset_graph.py +29 -2
  12. dagster/_core/definitions/assets/graph/remote_asset_graph.py +9 -5
  13. dagster/_core/definitions/declarative_automation/legacy/valid_asset_subset.py +4 -4
  14. dagster/_core/definitions/declarative_automation/operands/operands.py +10 -4
  15. dagster/_core/definitions/declarative_automation/serialized_objects.py +36 -0
  16. dagster/_core/definitions/decorators/asset_check_decorator.py +6 -0
  17. dagster/_core/definitions/decorators/asset_decorator.py +13 -13
  18. dagster/_core/event_api.py +10 -0
  19. dagster/_core/execution/context/asset_check_execution_context.py +39 -0
  20. dagster/_core/execution/plan/execute_step.py +4 -3
  21. dagster/_core/execution/run_cancellation_thread.py +1 -0
  22. dagster/_core/instance/runs/run_domain.py +73 -90
  23. dagster/_core/remote_representation/external_data.py +6 -0
  24. dagster/_core/storage/asset_check_execution_record.py +49 -5
  25. dagster/_core/storage/asset_check_state.py +263 -0
  26. dagster/_core/storage/dagster_run.py +77 -0
  27. dagster/_core/storage/event_log/base.py +59 -1
  28. dagster/_core/storage/event_log/sql_event_log.py +174 -7
  29. dagster/_core/storage/event_log/sqlite/sqlite_event_log.py +6 -1
  30. dagster/_core/storage/legacy_storage.py +26 -5
  31. dagster/_core/telemetry.py +3 -0
  32. dagster/_core/workspace/load_target.py +1 -1
  33. dagster/_daemon/monitoring/run_monitoring.py +5 -1
  34. dagster/_generate/download.py +1 -0
  35. dagster/_utils/__init__.py +11 -0
  36. dagster/components/list/list.py +4 -1
  37. dagster/version.py +1 -1
  38. {dagster-1.12.11.dist-info → dagster-1.12.13.dist-info}/METADATA +4 -4
  39. {dagster-1.12.11.dist-info → dagster-1.12.13.dist-info}/RECORD +43 -42
  40. {dagster-1.12.11.dist-info → dagster-1.12.13.dist-info}/WHEEL +1 -1
  41. {dagster-1.12.11.dist-info → dagster-1.12.13.dist-info}/entry_points.txt +0 -0
  42. {dagster-1.12.11.dist-info → dagster-1.12.13.dist-info}/licenses/LICENSE +0 -0
  43. {dagster-1.12.11.dist-info → dagster-1.12.13.dist-info}/top_level.txt +0 -0
@@ -480,6 +480,9 @@ class RemoteAssetGraph(BaseAssetGraph[TRemoteAssetNode], ABC, Generic[TRemoteAss
480
480
  remote_node.asset_check.description,
481
481
  remote_node.asset_check.automation_condition,
482
482
  {}, # metadata not yet on AssetCheckNodeSnap
483
+ remote_node.asset_check.partitions_def_snapshot.get_partitions_definition()
484
+ if remote_node.asset_check.partitions_def_snapshot
485
+ else None,
483
486
  )
484
487
 
485
488
  ##### COMMON ASSET GRAPH INTERFACE
@@ -744,12 +747,13 @@ class RemoteWorkspaceAssetGraph(RemoteAssetGraph[RemoteWorkspaceAssetNode]):
744
747
  def get_repo_scoped_node(
745
748
  self, key: EntityKey, repository_selector: "RepositorySelector"
746
749
  ) -> Optional[Union[RemoteRepositoryAssetNode, RemoteAssetCheckNode]]:
747
- if isinstance(key, AssetKey):
748
- if not self.has(key):
749
- return None
750
- return self.get(key).resolve_to_repo_scoped_node(repository_selector)
750
+ if not self.has(key):
751
+ return None
752
+ node = self.get(key)
753
+ if isinstance(node, RemoteWorkspaceAssetNode):
754
+ return node.resolve_to_repo_scoped_node(repository_selector)
751
755
  else:
752
- raise Exception("Key must be an asset key for get_repo_scoped_node")
756
+ return node # type: ignore
753
757
 
754
758
  def split_entity_keys_by_repository(
755
759
  self, keys: AbstractSet[EntityKey]
@@ -98,14 +98,14 @@ class ValidAssetSubset(SerializableEntitySubset[AssetKey]):
98
98
  key=asset_key, value=AllPartitionsSubset(partitions_def, ctx)
99
99
  )
100
100
 
101
- @staticmethod
101
+ @classmethod
102
102
  def empty(
103
- asset_key: AssetKey, partitions_def: Optional[PartitionsDefinition]
103
+ cls, key: AssetKey, partitions_def: Optional[PartitionsDefinition]
104
104
  ) -> "ValidAssetSubset":
105
105
  if partitions_def is None:
106
- return ValidAssetSubset(key=asset_key, value=False)
106
+ return cls(key=key, value=False)
107
107
  else:
108
- return ValidAssetSubset(key=asset_key, value=partitions_def.empty_subset())
108
+ return cls(key=key, value=partitions_def.empty_subset())
109
109
 
110
110
  @staticmethod
111
111
  def from_asset_partitions_set(
@@ -102,7 +102,9 @@ class RunInProgressAutomationCondition(SubsetAutomationCondition):
102
102
  return "run_in_progress"
103
103
 
104
104
  async def compute_subset(self, context: AutomationContext) -> EntitySubset: # pyright: ignore[reportIncompatibleMethodOverride]
105
- return await context.asset_graph_view.compute_run_in_progress_subset(key=context.key)
105
+ return await context.asset_graph_view.compute_run_in_progress_subset(
106
+ key=context.key, from_subset=context.candidate_subset
107
+ )
106
108
 
107
109
 
108
110
  @whitelist_for_serdes
@@ -113,7 +115,9 @@ class BackfillInProgressAutomationCondition(SubsetAutomationCondition):
113
115
  return "backfill_in_progress"
114
116
 
115
117
  async def compute_subset(self, context: AutomationContext) -> EntitySubset: # pyright: ignore[reportIncompatibleMethodOverride]
116
- return await context.asset_graph_view.compute_backfill_in_progress_subset(key=context.key)
118
+ return await context.asset_graph_view.compute_backfill_in_progress_subset(
119
+ key=context.key, from_subset=context.candidate_subset
120
+ )
117
121
 
118
122
 
119
123
  @whitelist_for_serdes(storage_name="FailedAutomationCondition")
@@ -124,7 +128,9 @@ class ExecutionFailedAutomationCondition(SubsetAutomationCondition):
124
128
  return "execution_failed"
125
129
 
126
130
  async def compute_subset(self, context: AutomationContext) -> EntitySubset: # pyright: ignore[reportIncompatibleMethodOverride]
127
- return await context.asset_graph_view.compute_execution_failed_subset(key=context.key)
131
+ return await context.asset_graph_view.compute_execution_failed_subset(
132
+ key=context.key, from_subset=context.candidate_subset
133
+ )
128
134
 
129
135
 
130
136
  @whitelist_for_serdes
@@ -322,5 +328,5 @@ class CheckResultCondition(SubsetAutomationCondition[AssetCheckKey]):
322
328
  else AssetCheckExecutionResolvedStatus.FAILED
323
329
  )
324
330
  return await context.asset_graph_view.compute_subset_with_status(
325
- key=context.key, status=target_status
331
+ key=context.key, status=target_status, from_subset=context.candidate_subset
326
332
  )
@@ -1,3 +1,4 @@
1
+ import itertools
1
2
  from collections.abc import Iterator, Mapping, Sequence
2
3
  from dataclasses import dataclass
3
4
  from typing import ( # noqa: UP035
@@ -312,3 +313,38 @@ class AutomationConditionEvaluationState:
312
313
  @property
313
314
  def true_subset(self) -> SerializableEntitySubset:
314
315
  return self.previous_evaluation.true_subset
316
+
317
+
318
+ def get_expanded_label(
319
+ item: Union[AutomationConditionEvaluation, AutomationConditionSnapshot],
320
+ use_label=False,
321
+ ) -> Sequence[str]:
322
+ if isinstance(item, AutomationConditionSnapshot):
323
+ label, name, description, children = (
324
+ item.node_snapshot.label,
325
+ item.node_snapshot.name,
326
+ item.node_snapshot.description,
327
+ item.children,
328
+ )
329
+ else:
330
+ snapshot = item.condition_snapshot
331
+ label, name, description, children = (
332
+ snapshot.label,
333
+ snapshot.name,
334
+ snapshot.description,
335
+ item.child_evaluations,
336
+ )
337
+
338
+ if use_label and label is not None:
339
+ return [label]
340
+ node_text = name or description
341
+ child_labels = [f"({' '.join(get_expanded_label(c, use_label=True))})" for c in children]
342
+ if len(child_labels) == 0:
343
+ return [node_text]
344
+ elif len(child_labels) == 1:
345
+ return [node_text, f"{child_labels[0]}"]
346
+ else:
347
+ # intersperses node_text (e.g. AND) between each child label
348
+ return list(itertools.chain(*itertools.zip_longest(child_labels, [], fillvalue=node_text)))[
349
+ :-1
350
+ ]
@@ -28,6 +28,9 @@ from dagster._core.definitions.decorators.decorator_assets_definition_builder im
28
28
  from dagster._core.definitions.decorators.op_decorator import _Op
29
29
  from dagster._core.definitions.events import AssetKey, CoercibleToAssetKey
30
30
  from dagster._core.definitions.output import Out
31
+ from dagster._core.definitions.partitions.definition.partitions_definition import (
32
+ PartitionsDefinition,
33
+ )
31
34
  from dagster._core.definitions.policy import RetryPolicy
32
35
  from dagster._core.definitions.source_asset import SourceAsset
33
36
  from dagster._core.definitions.utils import DEFAULT_OUTPUT
@@ -113,6 +116,7 @@ def asset_check(
113
116
  metadata: Optional[Mapping[str, Any]] = None,
114
117
  automation_condition: Optional[AutomationCondition[AssetCheckKey]] = None,
115
118
  pool: Optional[str] = None,
119
+ partitions_def: Optional[PartitionsDefinition] = None,
116
120
  ) -> Callable[[AssetCheckFunction], AssetChecksDefinition]:
117
121
  """Create a definition for how to execute an asset check.
118
122
 
@@ -151,6 +155,7 @@ def asset_check(
151
155
  automation_condition (Optional[AutomationCondition]): An AutomationCondition which determines
152
156
  when this check should be executed.
153
157
  pool (Optional[str]): A string that identifies the concurrency pool that governs this asset check's execution.
158
+ partitions_def (Optional[PartitionsDefinition]): The PartitionsDefinition for this asset check.
154
159
 
155
160
  Produces an :py:class:`AssetChecksDefinition` object.
156
161
 
@@ -218,6 +223,7 @@ def asset_check(
218
223
  blocking=blocking,
219
224
  metadata=metadata,
220
225
  automation_condition=automation_condition,
226
+ partitions_def=partitions_def,
221
227
  )
222
228
 
223
229
  resource_defs_for_execution = wrap_resources_for_execution(resource_defs)
@@ -82,11 +82,11 @@ def asset(
82
82
  io_manager_def: Optional[object] = ...,
83
83
  io_manager_key: Optional[str] = ...,
84
84
  dagster_type: Optional[DagsterType] = ...,
85
- partitions_def: Optional[PartitionsDefinition] = ...,
85
+ partitions_def: Optional[PartitionsDefinition[str]] = ...,
86
86
  op_tags: Optional[Mapping[str, Any]] = ...,
87
87
  group_name: Optional[str] = ...,
88
88
  output_required: bool = ...,
89
- automation_condition: Optional[AutomationCondition] = ...,
89
+ automation_condition: Optional[AutomationCondition[AssetKey]] = ...,
90
90
  backfill_policy: Optional[BackfillPolicy] = ...,
91
91
  retry_policy: Optional[RetryPolicy] = ...,
92
92
  code_version: Optional[str] = ...,
@@ -95,14 +95,14 @@ def asset(
95
95
  owners: Optional[Sequence[str]] = ...,
96
96
  kinds: Optional[AbstractSet[str]] = ...,
97
97
  pool: Optional[str] = ...,
98
- **kwargs,
98
+ **kwargs: Any,
99
99
  ) -> Callable[[Callable[..., Any]], AssetsDefinition]: ...
100
100
 
101
101
 
102
102
  @overload
103
103
  def asset(
104
104
  compute_fn: Callable[..., Any],
105
- **kwargs,
105
+ **kwargs: Any,
106
106
  ) -> AssetsDefinition: ...
107
107
 
108
108
 
@@ -168,11 +168,11 @@ def asset(
168
168
  io_manager_def: Optional[object] = None,
169
169
  io_manager_key: Optional[str] = None,
170
170
  dagster_type: Optional[DagsterType] = None,
171
- partitions_def: Optional[PartitionsDefinition] = None,
171
+ partitions_def: Optional[PartitionsDefinition[str]] = None,
172
172
  op_tags: Optional[Mapping[str, Any]] = None,
173
173
  group_name: Optional[str] = None,
174
174
  output_required: bool = True,
175
- automation_condition: Optional[AutomationCondition] = None,
175
+ automation_condition: Optional[AutomationCondition[AssetKey]] = None,
176
176
  freshness_policy: Optional[FreshnessPolicy] = None,
177
177
  backfill_policy: Optional[BackfillPolicy] = None,
178
178
  retry_policy: Optional[RetryPolicy] = None,
@@ -182,7 +182,7 @@ def asset(
182
182
  owners: Optional[Sequence[str]] = None,
183
183
  kinds: Optional[AbstractSet[str]] = None,
184
184
  pool: Optional[str] = None,
185
- **kwargs,
185
+ **kwargs: Any,
186
186
  ) -> Union[AssetsDefinition, Callable[[Callable[..., Any]], AssetsDefinition]]:
187
187
  """Create a definition for how to compute an asset.
188
188
 
@@ -590,7 +590,7 @@ def multi_asset(
590
590
  config_schema: Optional[UserConfigSchema] = None,
591
591
  required_resource_keys: Optional[AbstractSet[str]] = None,
592
592
  internal_asset_deps: Optional[Mapping[str, set[AssetKey]]] = None,
593
- partitions_def: Optional[PartitionsDefinition] = None,
593
+ partitions_def: Optional[PartitionsDefinition[str]] = None,
594
594
  hooks: Optional[AbstractSet[HookDefinition]] = None,
595
595
  backfill_policy: Optional[BackfillPolicy] = None,
596
596
  op_tags: Optional[Mapping[str, Any]] = None,
@@ -769,7 +769,7 @@ def graph_asset(
769
769
  config: Optional[Union[ConfigMapping, Mapping[str, Any]]] = None,
770
770
  key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,
771
771
  group_name: Optional[str] = None,
772
- partitions_def: Optional[PartitionsDefinition] = None,
772
+ partitions_def: Optional[PartitionsDefinition[str]] = None,
773
773
  hooks: Optional[AbstractSet[HookDefinition]] = None,
774
774
  metadata: Optional[RawMetadataMapping] = ...,
775
775
  tags: Optional[Mapping[str, str]] = ...,
@@ -777,7 +777,7 @@ def graph_asset(
777
777
  kinds: Optional[AbstractSet[str]] = None,
778
778
  legacy_freshness_policy: Optional[LegacyFreshnessPolicy] = ...,
779
779
  auto_materialize_policy: Optional[AutoMaterializePolicy] = ...,
780
- automation_condition: Optional[AutomationCondition] = ...,
780
+ automation_condition: Optional[AutomationCondition[AssetKey]] = ...,
781
781
  backfill_policy: Optional[BackfillPolicy] = ...,
782
782
  resource_defs: Optional[Mapping[str, ResourceDefinition]] = ...,
783
783
  check_specs: Optional[Sequence[AssetCheckSpec]] = None,
@@ -806,19 +806,19 @@ def graph_asset(
806
806
  config: Optional[Union[ConfigMapping, Mapping[str, Any]]] = None,
807
807
  key_prefix: Optional[CoercibleToAssetKeyPrefix] = None,
808
808
  group_name: Optional[str] = None,
809
- partitions_def: Optional[PartitionsDefinition] = None,
809
+ partitions_def: Optional[PartitionsDefinition[str]] = None,
810
810
  hooks: Optional[AbstractSet[HookDefinition]] = None,
811
811
  metadata: Optional[RawMetadataMapping] = None,
812
812
  tags: Optional[Mapping[str, str]] = None,
813
813
  owners: Optional[Sequence[str]] = None,
814
- automation_condition: Optional[AutomationCondition] = None,
814
+ automation_condition: Optional[AutomationCondition[AssetKey]] = None,
815
815
  backfill_policy: Optional[BackfillPolicy] = None,
816
816
  resource_defs: Optional[Mapping[str, ResourceDefinition]] = None,
817
817
  check_specs: Optional[Sequence[AssetCheckSpec]] = None,
818
818
  code_version: Optional[str] = None,
819
819
  key: Optional[CoercibleToAssetKey] = None,
820
820
  kinds: Optional[AbstractSet[str]] = None,
821
- **kwargs,
821
+ **kwargs: Any,
822
822
  ) -> Union[AssetsDefinition, Callable[[Callable[..., Any]], AssetsDefinition]]:
823
823
  """Creates a software-defined asset that's computed using a graph of ops.
824
824
 
@@ -4,6 +4,7 @@ from datetime import datetime
4
4
  from enum import Enum
5
5
  from typing import Literal, NamedTuple, Optional, TypeAlias, Union
6
6
 
7
+ from dagster_shared.record import record
7
8
  from dagster_shared.seven import json
8
9
 
9
10
  import dagster._check as check
@@ -342,6 +343,15 @@ class AssetRecordsFilter(
342
343
  return None
343
344
 
344
345
 
346
+ @record
347
+ class PartitionKeyFilter:
348
+ """Filter for the partition keys that should be included in the result. Allows for filtering on
349
+ unpartitioned assets, specific partition keys, or a combination of both.
350
+ """
351
+
352
+ key: Optional[str]
353
+
354
+
345
355
  @whitelist_for_serdes
346
356
  class RunStatusChangeRecordsFilter(
347
357
  NamedTuple(
@@ -6,6 +6,8 @@ from dagster._annotations import public
6
6
  from dagster._core.definitions.asset_checks.asset_check_spec import AssetCheckKey, AssetCheckSpec
7
7
  from dagster._core.definitions.job_definition import JobDefinition
8
8
  from dagster._core.definitions.op_definition import OpDefinition
9
+ from dagster._core.definitions.partitions.partition_key_range import PartitionKeyRange
10
+ from dagster._core.definitions.partitions.utils.time_window import TimeWindow
9
11
  from dagster._core.definitions.repository_definition.repository_definition import (
10
12
  RepositoryDefinition,
11
13
  )
@@ -135,6 +137,43 @@ class AssetCheckExecutionContext:
135
137
  def get_step_execution_context(self) -> StepExecutionContext:
136
138
  return self.op_execution_context.get_step_execution_context()
137
139
 
140
+ #### partition related
141
+ @public
142
+ @property
143
+ @_copy_docs_from_op_execution_context
144
+ def has_partition_key(self) -> bool:
145
+ return self.op_execution_context.has_partition_key
146
+
147
+ @public
148
+ @property
149
+ @_copy_docs_from_op_execution_context
150
+ def partition_key(self) -> str:
151
+ return self.op_execution_context.partition_key
152
+
153
+ @public
154
+ @property
155
+ @_copy_docs_from_op_execution_context
156
+ def partition_keys(self) -> Sequence[str]:
157
+ return self.op_execution_context.partition_keys
158
+
159
+ @public
160
+ @property
161
+ @_copy_docs_from_op_execution_context
162
+ def has_partition_key_range(self) -> bool:
163
+ return self.op_execution_context.has_partition_key_range
164
+
165
+ @public
166
+ @property
167
+ @_copy_docs_from_op_execution_context
168
+ def partition_key_range(self) -> PartitionKeyRange:
169
+ return self.op_execution_context.partition_key_range
170
+
171
+ @public
172
+ @property
173
+ @_copy_docs_from_op_execution_context
174
+ def partition_time_window(self) -> TimeWindow:
175
+ return self.op_execution_context.partition_time_window
176
+
138
177
  # misc
139
178
 
140
179
  @public
@@ -97,9 +97,6 @@ def _process_user_event(
97
97
  asset_key = _resolve_asset_result_asset_key(user_event, assets_def)
98
98
  output_name = assets_def.get_output_name_for_asset_key(asset_key)
99
99
 
100
- for check_result in user_event.check_results or []:
101
- yield from _process_user_event(step_context, check_result)
102
-
103
100
  with disable_dagster_warnings():
104
101
  if isinstance(user_event, MaterializeResult):
105
102
  value = user_event.value
@@ -112,6 +109,10 @@ def _process_user_event(
112
109
  data_version=user_event.data_version,
113
110
  tags=user_event.tags,
114
111
  )
112
+
113
+ for check_result in user_event.check_results or []:
114
+ yield from _process_user_event(step_context, check_result)
115
+
115
116
  elif isinstance(user_event, AssetCheckResult):
116
117
  asset_check_evaluation = user_event.to_asset_check_evaluation(step_context)
117
118
  assets_def = _get_assets_def_for_step(step_context, user_event)
@@ -21,6 +21,7 @@ def _kill_on_cancel(instance_ref: InstanceRef, run_id, shutdown_event):
21
21
  if run.status in [
22
22
  DagsterRunStatus.CANCELING,
23
23
  DagsterRunStatus.CANCELED,
24
+ DagsterRunStatus.FAILURE,
24
25
  ]:
25
26
  print( # noqa: T201
26
27
  f"Detected run status {run.status}, sending interrupt to main thread"
@@ -2,7 +2,7 @@ import logging
2
2
  import os
3
3
  import warnings
4
4
  from collections.abc import Mapping, Sequence, Set
5
- from typing import TYPE_CHECKING, Any, Optional, cast
5
+ from typing import TYPE_CHECKING, Any, Optional, cast, overload
6
6
 
7
7
  import dagster._check as check
8
8
  from dagster._core.definitions.asset_checks.asset_check_evaluation import (
@@ -50,10 +50,13 @@ from dagster._utils.warnings import disable_dagster_warnings
50
50
  if TYPE_CHECKING:
51
51
  from dagster._core.definitions.asset_checks.asset_check_spec import AssetCheckKey
52
52
  from dagster._core.definitions.assets.graph.base_asset_graph import (
53
+ AssetCheckNode,
53
54
  BaseAssetGraph,
54
55
  BaseAssetNode,
56
+ BaseEntityNode,
55
57
  )
56
58
  from dagster._core.definitions.job_definition import JobDefinition
59
+ from dagster._core.definitions.partitions.definition import PartitionsDefinition
57
60
  from dagster._core.definitions.repository_definition.repository_definition import (
58
61
  RepositoryLoadData,
59
62
  )
@@ -66,10 +69,7 @@ if TYPE_CHECKING:
66
69
  from dagster._core.remote_representation.code_location import CodeLocation
67
70
  from dagster._core.remote_representation.external import RemoteJob
68
71
  from dagster._core.snap import ExecutionPlanSnapshot, JobSnap
69
- from dagster._core.snap.execution_plan_snapshot import (
70
- ExecutionStepOutputSnap,
71
- ExecutionStepSnap,
72
- )
72
+ from dagster._core.snap.execution_plan_snapshot import ExecutionStepSnap
73
73
  from dagster._core.workspace.context import BaseWorkspaceRequestContext
74
74
 
75
75
 
@@ -108,6 +108,7 @@ class RunDomain:
108
108
  """Create a run with the given parameters."""
109
109
  from dagster._core.definitions.asset_key import AssetCheckKey
110
110
  from dagster._core.definitions.assets.graph.remote_asset_graph import RemoteAssetGraph
111
+ from dagster._core.definitions.partitions.context import partition_loading_context
111
112
  from dagster._core.remote_origin import RemoteJobOrigin
112
113
  from dagster._core.snap import ExecutionPlanSnapshot, JobSnap
113
114
  from dagster._utils.tags import normalize_tags
@@ -256,7 +257,8 @@ class RunDomain:
256
257
  dagster_run = self._instance.run_storage.add_run(dagster_run)
257
258
 
258
259
  if execution_plan_snapshot and not assets_are_externally_managed(dagster_run):
259
- self._log_asset_planned_events(dagster_run, execution_plan_snapshot, asset_graph)
260
+ with partition_loading_context(dynamic_partitions_store=self._instance):
261
+ self._log_asset_planned_events(dagster_run, execution_plan_snapshot, asset_graph)
260
262
 
261
263
  return dagster_run
262
264
 
@@ -315,8 +317,8 @@ class RunDomain:
315
317
  adjusted_output = output
316
318
 
317
319
  if asset_key:
318
- asset_node = self._get_repo_scoped_asset_node(
319
- asset_graph, asset_key, remote_job_origin
320
+ asset_node = self._get_repo_scoped_entity_node(
321
+ asset_key, asset_graph, remote_job_origin
320
322
  )
321
323
  if asset_node:
322
324
  partitions_definition = asset_node.partitions_def
@@ -767,12 +769,28 @@ class RunDomain:
767
769
  {key for key in to_reexecute if isinstance(key, AssetCheckKey)},
768
770
  )
769
771
 
770
- def _get_repo_scoped_asset_node(
772
+ @overload
773
+ def _get_repo_scoped_entity_node(
771
774
  self,
775
+ key: AssetKey,
776
+ asset_graph: "BaseAssetGraph",
777
+ remote_job_origin: Optional["RemoteJobOrigin"] = None,
778
+ ) -> Optional["BaseAssetNode"]: ...
779
+
780
+ @overload
781
+ def _get_repo_scoped_entity_node(
782
+ self,
783
+ key: "AssetCheckKey",
772
784
  asset_graph: "BaseAssetGraph",
773
- asset_key: AssetKey,
774
785
  remote_job_origin: Optional["RemoteJobOrigin"] = None,
775
- ) -> Optional["BaseAssetNode"]:
786
+ ) -> Optional["AssetCheckNode"]: ...
787
+
788
+ def _get_repo_scoped_entity_node(
789
+ self,
790
+ key: "EntityKey",
791
+ asset_graph: "BaseAssetGraph",
792
+ remote_job_origin: Optional["RemoteJobOrigin"] = None,
793
+ ) -> Optional["BaseEntityNode"]:
776
794
  from dagster._core.definitions.assets.graph.remote_asset_graph import (
777
795
  RemoteWorkspaceAssetGraph,
778
796
  )
@@ -783,16 +801,29 @@ class RunDomain:
783
801
  # in all cases, return the BaseAssetNode for the supplied asset key if it exists.
784
802
  if isinstance(asset_graph, RemoteWorkspaceAssetGraph):
785
803
  return cast(
786
- "Optional[BaseAssetNode]",
804
+ "Optional[BaseEntityNode]",
787
805
  asset_graph.get_repo_scoped_node(
788
- asset_key, check.not_none(remote_job_origin).repository_origin.get_selector()
806
+ key, check.not_none(remote_job_origin).repository_origin.get_selector()
789
807
  ),
790
808
  )
791
809
 
792
- if not asset_graph.has(asset_key):
810
+ if not asset_graph.has(key):
793
811
  return None
794
812
 
795
- return asset_graph.get(asset_key)
813
+ return asset_graph.get(key)
814
+
815
+ def _get_partitions_def(
816
+ self,
817
+ key: "EntityKey",
818
+ asset_graph: "BaseAssetGraph",
819
+ remote_job_origin: Optional["RemoteJobOrigin"],
820
+ run: "DagsterRun",
821
+ ) -> Optional["PartitionsDefinition"]:
822
+ # don't fetch the partitions def if the run is not partitioned
823
+ if not run.is_partitioned:
824
+ return None
825
+ entity_node = self._get_repo_scoped_entity_node(key, asset_graph, remote_job_origin)
826
+ return entity_node.partitions_def if entity_node else None
796
827
 
797
828
  def _log_asset_planned_events(
798
829
  self,
@@ -819,7 +850,7 @@ class RunDomain:
819
850
  if asset_key:
820
851
  events.extend(
821
852
  self.get_materialization_planned_events_for_asset(
822
- dagster_run, asset_key, job_name, step, output, asset_graph
853
+ dagster_run, asset_key, job_name, step, asset_graph
823
854
  )
824
855
  )
825
856
 
@@ -830,6 +861,13 @@ class RunDomain:
830
861
  target_asset_key = asset_check_key.asset_key
831
862
  check_name = asset_check_key.name
832
863
 
864
+ partitions_def = self._get_partitions_def(
865
+ asset_check_key, asset_graph, dagster_run.remote_job_origin, dagster_run
866
+ )
867
+ partitions_subset = dagster_run.get_resolved_partitions_subset_for_events(
868
+ partitions_def
869
+ )
870
+
833
871
  event = DagsterEvent(
834
872
  event_type_value=DagsterEventType.ASSET_CHECK_EVALUATION_PLANNED.value,
835
873
  job_name=job_name,
@@ -838,8 +876,9 @@ class RunDomain:
838
876
  f" asset {target_asset_key.to_string()}"
839
877
  ),
840
878
  event_specific_data=AssetCheckEvaluationPlanned(
841
- target_asset_key,
879
+ asset_key=target_asset_key,
842
880
  check_name=check_name,
881
+ partitions_subset=partitions_subset,
843
882
  ),
844
883
  step_key=step.key,
845
884
  )
@@ -878,94 +917,26 @@ class RunDomain:
878
917
  asset_key: AssetKey,
879
918
  job_name: str,
880
919
  step: "ExecutionStepSnap",
881
- output: "ExecutionStepOutputSnap",
882
920
  asset_graph: "BaseAssetGraph[BaseAssetNode]",
883
921
  ) -> Sequence["DagsterEvent"]:
884
922
  """Moved from DagsterInstance._log_materialization_planned_event_for_asset."""
885
- from dagster._core.definitions.partitions.context import partition_loading_context
886
- from dagster._core.definitions.partitions.definition import DynamicPartitionsDefinition
887
923
  from dagster._core.events import AssetMaterializationPlannedData, DagsterEvent
888
924
 
889
925
  events = []
890
926
 
891
- partition_tag = dagster_run.tags.get(PARTITION_NAME_TAG)
892
- partition_range_start, partition_range_end = (
893
- dagster_run.tags.get(ASSET_PARTITION_RANGE_START_TAG),
894
- dagster_run.tags.get(ASSET_PARTITION_RANGE_END_TAG),
895
- )
896
-
897
- if partition_tag and (partition_range_start or partition_range_end):
898
- raise DagsterInvariantViolationError(
899
- f"Cannot have {ASSET_PARTITION_RANGE_START_TAG} or"
900
- f" {ASSET_PARTITION_RANGE_END_TAG} set along with"
901
- f" {PARTITION_NAME_TAG}"
902
- )
903
-
904
- partitions_subset = None
905
- individual_partitions = None
906
- if partition_range_start or partition_range_end:
907
- if not partition_range_start or not partition_range_end:
908
- raise DagsterInvariantViolationError(
909
- f"Cannot have {ASSET_PARTITION_RANGE_START_TAG} or"
910
- f" {ASSET_PARTITION_RANGE_END_TAG} set without the other"
911
- )
912
-
913
- asset_node = check.not_none(
914
- self._get_repo_scoped_asset_node(
915
- asset_graph, asset_key, dagster_run.remote_job_origin
916
- )
917
- )
918
-
919
- partitions_def = asset_node.partitions_def
920
- if (
921
- isinstance(partitions_def, DynamicPartitionsDefinition)
922
- and partitions_def.name is None
923
- ):
924
- raise DagsterInvariantViolationError(
925
- "Creating a run targeting a partition range is not supported for assets partitioned with function-based dynamic partitions"
926
- )
927
-
928
- if partitions_def is not None:
929
- with partition_loading_context(dynamic_partitions_store=self._instance):
930
- if self._instance.event_log_storage.supports_partition_subset_in_asset_materialization_planned_events:
931
- partitions_subset = partitions_def.subset_with_partition_keys(
932
- partitions_def.get_partition_keys_in_range(
933
- PartitionKeyRange(partition_range_start, partition_range_end),
934
- )
935
- ).to_serializable_subset()
936
- individual_partitions = []
937
- else:
938
- individual_partitions = partitions_def.get_partition_keys_in_range(
939
- PartitionKeyRange(partition_range_start, partition_range_end),
940
- )
941
- elif check.not_none(output.properties).is_asset_partitioned and partition_tag:
942
- individual_partitions = [partition_tag]
943
-
944
- assert not (individual_partitions and partitions_subset), (
945
- "Should set either individual_partitions or partitions_subset, but not both"
927
+ partitions_def = self._get_partitions_def(
928
+ asset_key, asset_graph, dagster_run.remote_job_origin, dagster_run
946
929
  )
947
930
 
948
- if not individual_partitions and not partitions_subset:
931
+ partitions_subset = dagster_run.get_resolved_partitions_subset_for_events(partitions_def)
932
+ if partitions_subset is None:
949
933
  materialization_planned = DagsterEvent.build_asset_materialization_planned_event(
950
934
  job_name,
951
935
  step.key,
952
936
  AssetMaterializationPlannedData(asset_key, partition=None, partitions_subset=None),
953
937
  )
954
938
  events.append(materialization_planned)
955
- elif individual_partitions:
956
- for individual_partition in individual_partitions:
957
- materialization_planned = DagsterEvent.build_asset_materialization_planned_event(
958
- job_name,
959
- step.key,
960
- AssetMaterializationPlannedData(
961
- asset_key,
962
- partition=individual_partition,
963
- partitions_subset=partitions_subset,
964
- ),
965
- )
966
- events.append(materialization_planned)
967
-
968
- else:
939
+ elif self._instance.event_log_storage.supports_partition_subset_in_asset_materialization_planned_events:
969
940
  materialization_planned = DagsterEvent.build_asset_materialization_planned_event(
970
941
  job_name,
971
942
  step.key,
@@ -974,6 +945,18 @@ class RunDomain:
974
945
  ),
975
946
  )
976
947
  events.append(materialization_planned)
948
+ else:
949
+ for partition_key in partitions_subset.get_partition_keys():
950
+ materialization_planned = DagsterEvent.build_asset_materialization_planned_event(
951
+ job_name,
952
+ step.key,
953
+ AssetMaterializationPlannedData(
954
+ asset_key,
955
+ partition=partition_key,
956
+ partitions_subset=None,
957
+ ),
958
+ )
959
+ events.append(materialization_planned)
977
960
 
978
961
  return events
979
962