prefect-client 3.0.1__py3-none-any.whl → 3.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. prefect/_internal/compatibility/deprecated.py +1 -1
  2. prefect/blocks/core.py +5 -4
  3. prefect/blocks/notifications.py +21 -0
  4. prefect/blocks/webhook.py +17 -1
  5. prefect/cache_policies.py +98 -28
  6. prefect/client/orchestration.py +42 -20
  7. prefect/client/schemas/actions.py +10 -2
  8. prefect/client/schemas/filters.py +4 -2
  9. prefect/client/schemas/objects.py +48 -6
  10. prefect/client/schemas/responses.py +15 -1
  11. prefect/client/types/flexible_schedule_list.py +1 -1
  12. prefect/concurrency/asyncio.py +45 -6
  13. prefect/concurrency/services.py +1 -1
  14. prefect/concurrency/sync.py +21 -27
  15. prefect/concurrency/v1/asyncio.py +3 -0
  16. prefect/concurrency/v1/sync.py +4 -5
  17. prefect/context.py +6 -6
  18. prefect/deployments/runner.py +43 -5
  19. prefect/events/actions.py +6 -0
  20. prefect/flow_engine.py +12 -4
  21. prefect/flows.py +15 -11
  22. prefect/locking/filesystem.py +243 -0
  23. prefect/logging/handlers.py +0 -2
  24. prefect/logging/loggers.py +0 -18
  25. prefect/logging/logging.yml +1 -0
  26. prefect/main.py +19 -5
  27. prefect/plugins.py +9 -1
  28. prefect/records/base.py +12 -0
  29. prefect/records/filesystem.py +6 -2
  30. prefect/records/memory.py +6 -0
  31. prefect/records/result_store.py +6 -0
  32. prefect/results.py +192 -29
  33. prefect/runner/runner.py +74 -6
  34. prefect/settings.py +31 -1
  35. prefect/states.py +34 -17
  36. prefect/task_engine.py +58 -43
  37. prefect/transactions.py +113 -52
  38. prefect/utilities/asyncutils.py +7 -0
  39. prefect/utilities/collections.py +3 -2
  40. prefect/utilities/engine.py +20 -9
  41. prefect/utilities/importtools.py +1 -0
  42. prefect/utilities/urls.py +70 -12
  43. prefect/workers/base.py +10 -8
  44. {prefect_client-3.0.1.dist-info → prefect_client-3.0.3.dist-info}/METADATA +1 -1
  45. {prefect_client-3.0.1.dist-info → prefect_client-3.0.3.dist-info}/RECORD +48 -47
  46. {prefect_client-3.0.1.dist-info → prefect_client-3.0.3.dist-info}/LICENSE +0 -0
  47. {prefect_client-3.0.1.dist-info → prefect_client-3.0.3.dist-info}/WHEEL +0 -0
  48. {prefect_client-3.0.1.dist-info → prefect_client-3.0.3.dist-info}/top_level.txt +0 -0
@@ -30,7 +30,7 @@ M = TypeVar("M", bound=BaseModel)
30
30
 
31
31
 
32
32
  DEPRECATED_WARNING = (
33
- "{name} has been deprecated{when}. It will not be available after {end_date}."
33
+ "{name} has been deprecated{when}. It will not be available in new releases after {end_date}."
34
34
  " {help}"
35
35
  )
36
36
  DEPRECATED_MOVED_WARNING = (
prefect/blocks/core.py CHANGED
@@ -40,7 +40,6 @@ from pydantic import (
40
40
  from pydantic.json_schema import GenerateJsonSchema
41
41
  from typing_extensions import Literal, ParamSpec, Self, get_args
42
42
 
43
- import prefect
44
43
  import prefect.exceptions
45
44
  from prefect.client.schemas import (
46
45
  DEFAULT_BLOCK_SCHEMA_VERSION,
@@ -52,6 +51,7 @@ from prefect.client.schemas import (
52
51
  from prefect.client.utilities import inject_client
53
52
  from prefect.events import emit_event
54
53
  from prefect.logging.loggers import disable_logger
54
+ from prefect.plugins import load_prefect_collections
55
55
  from prefect.types import SecretDict
56
56
  from prefect.utilities.asyncutils import sync_compatible
57
57
  from prefect.utilities.collections import listrepr, remove_nested_keys, visit_collection
@@ -86,7 +86,7 @@ class InvalidBlockRegistration(Exception):
86
86
  """
87
87
 
88
88
 
89
- def _collect_nested_reference_strings(obj: Dict):
89
+ def _collect_nested_reference_strings(obj: Dict) -> List[str]:
90
90
  """
91
91
  Collects all nested reference strings (e.g. #/definitions/Model) from a given object.
92
92
  """
@@ -739,9 +739,10 @@ class Block(BaseModel, ABC):
739
739
  """
740
740
  Retrieve the block class implementation given a key.
741
741
  """
742
+
742
743
  # Ensure collections are imported and have the opportunity to register types
743
- # before looking up the block class
744
- prefect.plugins.load_prefect_collections()
744
+ # before looking up the block class, but only do this once
745
+ load_prefect_collections()
745
746
 
746
747
  return lookup_type(cls, key)
747
748
 
@@ -10,6 +10,7 @@ from prefect.logging import LogEavesdropper
10
10
  from prefect.types import SecretDict
11
11
  from prefect.utilities.asyncutils import sync_compatible
12
12
  from prefect.utilities.templating import apply_values, find_placeholders
13
+ from prefect.utilities.urls import validate_restricted_url
13
14
 
14
15
  PREFECT_NOTIFY_TYPE_DEFAULT = "prefect_default"
15
16
 
@@ -80,6 +81,26 @@ class AppriseNotificationBlock(AbstractAppriseNotificationBlock, ABC):
80
81
  description="Incoming webhook URL used to send notifications.",
81
82
  examples=["https://hooks.example.com/XXX"],
82
83
  )
84
+ allow_private_urls: bool = Field(
85
+ default=True,
86
+ description="Whether to allow notifications to private URLs. Defaults to True.",
87
+ )
88
+
89
+ @sync_compatible
90
+ async def notify(
91
+ self,
92
+ body: str,
93
+ subject: Optional[str] = None,
94
+ ):
95
+ if not self.allow_private_urls:
96
+ try:
97
+ validate_restricted_url(self.url.get_secret_value())
98
+ except ValueError as exc:
99
+ if self._raise_on_failure:
100
+ raise NotificationError(str(exc))
101
+ raise
102
+
103
+ await super().notify(body, subject)
83
104
 
84
105
 
85
106
  # TODO: Move to prefect-slack once collection block auto-registration is
prefect/blocks/webhook.py CHANGED
@@ -6,10 +6,12 @@ from typing_extensions import Literal
6
6
 
7
7
  from prefect.blocks.core import Block
8
8
  from prefect.types import SecretDict
9
+ from prefect.utilities.urls import validate_restricted_url
9
10
 
10
11
  # Use a global HTTP transport to maintain a process-wide connection pool for
11
12
  # interservice requests
12
13
  _http_transport = AsyncHTTPTransport()
14
+ _insecure_http_transport = AsyncHTTPTransport(verify=False)
13
15
 
14
16
 
15
17
  class Webhook(Block):
@@ -39,9 +41,20 @@ class Webhook(Block):
39
41
  title="Webhook Headers",
40
42
  description="A dictionary of headers to send with the webhook request.",
41
43
  )
44
+ allow_private_urls: bool = Field(
45
+ default=True,
46
+ description="Whether to allow notifications to private URLs. Defaults to True.",
47
+ )
48
+ verify: bool = Field(
49
+ default=True,
50
+ description="Whether or not to enforce a secure connection to the webhook.",
51
+ )
42
52
 
43
53
  def block_initialization(self):
44
- self._client = AsyncClient(transport=_http_transport)
54
+ if self.verify:
55
+ self._client = AsyncClient(transport=_http_transport)
56
+ else:
57
+ self._client = AsyncClient(transport=_insecure_http_transport)
45
58
 
46
59
  async def call(self, payload: Optional[dict] = None) -> Response:
47
60
  """
@@ -50,6 +63,9 @@ class Webhook(Block):
50
63
  Args:
51
64
  payload: an optional payload to send when calling the webhook.
52
65
  """
66
+ if not self.allow_private_urls:
67
+ validate_restricted_url(self.url.get_secret_value())
68
+
53
69
  async with self._client:
54
70
  return await self._client.request(
55
71
  method=self.method,
prefect/cache_policies.py CHANGED
@@ -1,10 +1,19 @@
1
1
  import inspect
2
- from dataclasses import dataclass
3
- from typing import Any, Callable, Dict, Optional
2
+ from copy import deepcopy
3
+ from dataclasses import dataclass, field
4
+ from pathlib import Path
5
+ from typing import TYPE_CHECKING, Any, Callable, Dict, List, Literal, Optional, Union
6
+
7
+ from typing_extensions import Self
4
8
 
5
9
  from prefect.context import TaskRunContext
6
10
  from prefect.utilities.hashing import hash_objects
7
11
 
12
+ if TYPE_CHECKING:
13
+ from prefect.filesystems import WritableFileSystem
14
+ from prefect.locking.protocol import LockManager
15
+ from prefect.transactions import IsolationLevel
16
+
8
17
 
9
18
  @dataclass
10
19
  class CachePolicy:
@@ -12,6 +21,14 @@ class CachePolicy:
12
21
  Base class for all cache policies.
13
22
  """
14
23
 
24
+ key_storage: Union["WritableFileSystem", str, Path, None] = None
25
+ isolation_level: Union[
26
+ Literal["READ_COMMITTED", "SERIALIZABLE"],
27
+ "IsolationLevel",
28
+ None,
29
+ ] = None
30
+ lock_manager: Optional["LockManager"] = None
31
+
15
32
  @classmethod
16
33
  def from_cache_key_fn(
17
34
  cls, cache_key_fn: Callable[["TaskRunContext", Dict[str, Any]], Optional[str]]
@@ -21,6 +38,37 @@ class CachePolicy:
21
38
  """
22
39
  return CacheKeyFnPolicy(cache_key_fn=cache_key_fn)
23
40
 
41
+ def configure(
42
+ self,
43
+ key_storage: Union["WritableFileSystem", str, Path, None] = None,
44
+ lock_manager: Optional["LockManager"] = None,
45
+ isolation_level: Union[
46
+ Literal["READ_COMMITTED", "SERIALIZABLE"], "IsolationLevel", None
47
+ ] = None,
48
+ ) -> Self:
49
+ """
50
+ Configure the cache policy with the given key storage, lock manager, and isolation level.
51
+
52
+ Args:
53
+ key_storage: The storage to use for cache keys. If not provided,
54
+ the current key storage will be used.
55
+ lock_manager: The lock manager to use for the cache policy. If not provided,
56
+ the current lock manager will be used.
57
+ isolation_level: The isolation level to use for the cache policy. If not provided,
58
+ the current isolation level will be used.
59
+
60
+ Returns:
61
+ A new cache policy with the given key storage, lock manager, and isolation level.
62
+ """
63
+ new = deepcopy(self)
64
+ if key_storage is not None:
65
+ new.key_storage = key_storage
66
+ if lock_manager is not None:
67
+ new.lock_manager = lock_manager
68
+ if isolation_level is not None:
69
+ new.isolation_level = isolation_level
70
+ return new
71
+
24
72
  def compute_key(
25
73
  self,
26
74
  task_ctx: TaskRunContext,
@@ -30,35 +78,48 @@ class CachePolicy:
30
78
  ) -> Optional[str]:
31
79
  raise NotImplementedError
32
80
 
33
- def __sub__(self, other: str) -> "CompoundCachePolicy":
81
+ def __sub__(self, other: str) -> "CachePolicy":
34
82
  if not isinstance(other, str):
35
83
  raise TypeError("Can only subtract strings from key policies.")
36
- if isinstance(self, Inputs):
37
- exclude = self.exclude or []
38
- return Inputs(exclude=exclude + [other])
39
- elif isinstance(self, CompoundCachePolicy):
40
- new = Inputs(exclude=[other])
41
- policies = self.policies or []
42
- return CompoundCachePolicy(policies=policies + [new])
43
- else:
44
- new = Inputs(exclude=[other])
45
- return CompoundCachePolicy(policies=[self, new])
46
-
47
- def __add__(self, other: "CachePolicy") -> "CompoundCachePolicy":
84
+ new = Inputs(exclude=[other])
85
+ return CompoundCachePolicy(policies=[self, new])
86
+
87
+ def __add__(self, other: "CachePolicy") -> "CachePolicy":
48
88
  # adding _None is a no-op
49
89
  if isinstance(other, _None):
50
90
  return self
51
- elif isinstance(self, _None):
52
- return other
53
91
 
54
- if isinstance(self, CompoundCachePolicy):
55
- policies = self.policies or []
56
- return CompoundCachePolicy(policies=policies + [other])
57
- elif isinstance(other, CompoundCachePolicy):
58
- policies = other.policies or []
59
- return CompoundCachePolicy(policies=policies + [self])
60
- else:
61
- return CompoundCachePolicy(policies=[self, other])
92
+ if (
93
+ other.key_storage is not None
94
+ and self.key_storage is not None
95
+ and other.key_storage != self.key_storage
96
+ ):
97
+ raise ValueError(
98
+ "Cannot add CachePolicies with different storage locations."
99
+ )
100
+ if (
101
+ other.isolation_level is not None
102
+ and self.isolation_level is not None
103
+ and other.isolation_level != self.isolation_level
104
+ ):
105
+ raise ValueError(
106
+ "Cannot add CachePolicies with different isolation levels."
107
+ )
108
+ if (
109
+ other.lock_manager is not None
110
+ and self.lock_manager is not None
111
+ and other.lock_manager != self.lock_manager
112
+ ):
113
+ raise ValueError(
114
+ "Cannot add CachePolicies with different lock implementations."
115
+ )
116
+
117
+ return CompoundCachePolicy(
118
+ policies=[self, other],
119
+ key_storage=self.key_storage or other.key_storage,
120
+ isolation_level=self.isolation_level or other.isolation_level,
121
+ lock_manager=self.lock_manager or other.lock_manager,
122
+ )
62
123
 
63
124
 
64
125
  @dataclass
@@ -93,7 +154,7 @@ class CompoundCachePolicy(CachePolicy):
93
154
  Any keys that return `None` will be ignored.
94
155
  """
95
156
 
96
- policies: Optional[list] = None
157
+ policies: List[CachePolicy] = field(default_factory=list)
97
158
 
98
159
  def compute_key(
99
160
  self,
@@ -103,7 +164,7 @@ class CompoundCachePolicy(CachePolicy):
103
164
  **kwargs,
104
165
  ) -> Optional[str]:
105
166
  keys = []
106
- for policy in self.policies or []:
167
+ for policy in self.policies:
107
168
  policy_key = policy.compute_key(
108
169
  task_ctx=task_ctx,
109
170
  inputs=inputs,
@@ -133,6 +194,10 @@ class _None(CachePolicy):
133
194
  ) -> Optional[str]:
134
195
  return None
135
196
 
197
+ def __add__(self, other: "CachePolicy") -> "CachePolicy":
198
+ # adding _None is a no-op
199
+ return other
200
+
136
201
 
137
202
  @dataclass
138
203
  class TaskSource(CachePolicy):
@@ -208,7 +273,7 @@ class Inputs(CachePolicy):
208
273
  Policy that computes a cache key based on a hash of the runtime inputs provided to the task..
209
274
  """
210
275
 
211
- exclude: Optional[list] = None
276
+ exclude: List[str] = field(default_factory=list)
212
277
 
213
278
  def compute_key(
214
279
  self,
@@ -230,6 +295,11 @@ class Inputs(CachePolicy):
230
295
 
231
296
  return hash_objects(hashed_inputs)
232
297
 
298
+ def __sub__(self, other: str) -> "CachePolicy":
299
+ if not isinstance(other, str):
300
+ raise TypeError("Can only subtract strings from key policies.")
301
+ return Inputs(exclude=self.exclude + [other])
302
+
233
303
 
234
304
  INPUTS = Inputs()
235
305
  NONE = _None()
@@ -86,6 +86,7 @@ from prefect.client.schemas.objects import (
86
86
  BlockSchema,
87
87
  BlockType,
88
88
  ConcurrencyLimit,
89
+ ConcurrencyOptions,
89
90
  Constant,
90
91
  DeploymentSchedule,
91
92
  Flow,
@@ -94,7 +95,6 @@ from prefect.client.schemas.objects import (
94
95
  FlowRunPolicy,
95
96
  Log,
96
97
  Parameter,
97
- QueueFilter,
98
98
  TaskRunPolicy,
99
99
  TaskRunResult,
100
100
  Variable,
@@ -994,7 +994,6 @@ class PrefectClient:
994
994
  async def create_work_queue(
995
995
  self,
996
996
  name: str,
997
- tags: Optional[List[str]] = None,
998
997
  description: Optional[str] = None,
999
998
  is_paused: Optional[bool] = None,
1000
999
  concurrency_limit: Optional[int] = None,
@@ -1006,8 +1005,6 @@ class PrefectClient:
1006
1005
 
1007
1006
  Args:
1008
1007
  name: a unique name for the work queue
1009
- tags: DEPRECATED: an optional list of tags to filter on; only work scheduled with these tags
1010
- will be included in the queue. This option will be removed on 2023-02-23.
1011
1008
  description: An optional description for the work queue.
1012
1009
  is_paused: Whether or not the work queue is paused.
1013
1010
  concurrency_limit: An optional concurrency limit for the work queue.
@@ -1021,18 +1018,7 @@ class PrefectClient:
1021
1018
  Returns:
1022
1019
  The created work queue
1023
1020
  """
1024
- if tags:
1025
- warnings.warn(
1026
- (
1027
- "The use of tags for creating work queue filters is deprecated."
1028
- " This option will be removed on 2023-02-23."
1029
- ),
1030
- DeprecationWarning,
1031
- )
1032
- filter = QueueFilter(tags=tags)
1033
- else:
1034
- filter = None
1035
- create_model = WorkQueueCreate(name=name, filter=filter)
1021
+ create_model = WorkQueueCreate(name=name, filter=None)
1036
1022
  if description is not None:
1037
1023
  create_model.description = description
1038
1024
  if is_paused is not None:
@@ -1654,6 +1640,7 @@ class PrefectClient:
1654
1640
  version: Optional[str] = None,
1655
1641
  schedules: Optional[List[DeploymentScheduleCreate]] = None,
1656
1642
  concurrency_limit: Optional[int] = None,
1643
+ concurrency_options: Optional[ConcurrencyOptions] = None,
1657
1644
  parameters: Optional[Dict[str, Any]] = None,
1658
1645
  description: Optional[str] = None,
1659
1646
  work_queue_name: Optional[str] = None,
@@ -1712,6 +1699,7 @@ class PrefectClient:
1712
1699
  paused=paused,
1713
1700
  schedules=schedules or [],
1714
1701
  concurrency_limit=concurrency_limit,
1702
+ concurrency_options=concurrency_options,
1715
1703
  pull_steps=pull_steps,
1716
1704
  enforce_parameter_schema=enforce_parameter_schema,
1717
1705
  )
@@ -2158,7 +2146,10 @@ class PrefectClient:
2158
2146
  try:
2159
2147
  response = await self._client.post(
2160
2148
  f"/flow_runs/{flow_run_id}/set_state",
2161
- json=dict(state=state_create.model_dump(mode="json"), force=force),
2149
+ json=dict(
2150
+ state=state_create.model_dump(mode="json", serialize_as_any=True),
2151
+ force=force,
2152
+ ),
2162
2153
  )
2163
2154
  except httpx.HTTPStatusError as e:
2164
2155
  if e.response.status_code == status.HTTP_404_NOT_FOUND:
@@ -3055,7 +3046,11 @@ class PrefectClient:
3055
3046
  return response.json()
3056
3047
 
3057
3048
  async def increment_concurrency_slots(
3058
- self, names: List[str], slots: int, mode: str, create_if_missing: Optional[bool]
3049
+ self,
3050
+ names: List[str],
3051
+ slots: int,
3052
+ mode: str,
3053
+ create_if_missing: Optional[bool] = None,
3059
3054
  ) -> httpx.Response:
3060
3055
  return await self._client.post(
3061
3056
  "/v2/concurrency_limits/increment",
@@ -3063,7 +3058,7 @@ class PrefectClient:
3063
3058
  "names": names,
3064
3059
  "slots": slots,
3065
3060
  "mode": mode,
3066
- "create_if_missing": create_if_missing,
3061
+ "create_if_missing": create_if_missing if create_if_missing else False,
3067
3062
  },
3068
3063
  )
3069
3064
 
@@ -3140,6 +3135,30 @@ class PrefectClient:
3140
3135
  else:
3141
3136
  raise
3142
3137
 
3138
+ async def upsert_global_concurrency_limit_by_name(self, name: str, limit: int):
3139
+ """Creates a global concurrency limit with the given name and limit if one does not already exist.
3140
+
3141
+ If one does already exist matching the name then update it's limit if it is different.
3142
+
3143
+ Note: This is not done atomically.
3144
+ """
3145
+ try:
3146
+ existing_limit = await self.read_global_concurrency_limit_by_name(name)
3147
+ except prefect.exceptions.ObjectNotFound:
3148
+ existing_limit = None
3149
+
3150
+ if not existing_limit:
3151
+ await self.create_global_concurrency_limit(
3152
+ GlobalConcurrencyLimitCreate(
3153
+ name=name,
3154
+ limit=limit,
3155
+ )
3156
+ )
3157
+ elif existing_limit.limit != limit:
3158
+ await self.update_global_concurrency_limit(
3159
+ name, GlobalConcurrencyLimitUpdate(limit=limit)
3160
+ )
3161
+
3143
3162
  async def read_global_concurrency_limits(
3144
3163
  self, limit: int = 10, offset: int = 0
3145
3164
  ) -> List[GlobalConcurrencyLimitResponse]:
@@ -3934,7 +3953,10 @@ class SyncPrefectClient:
3934
3953
  try:
3935
3954
  response = self._client.post(
3936
3955
  f"/flow_runs/{flow_run_id}/set_state",
3937
- json=dict(state=state_create.model_dump(mode="json"), force=force),
3956
+ json=dict(
3957
+ state=state_create.model_dump(mode="json", serialize_as_any=True),
3958
+ force=force,
3959
+ ),
3938
3960
  )
3939
3961
  except httpx.HTTPStatusError as e:
3940
3962
  if e.response.status_code == status.HTTP_404_NOT_FOUND:
@@ -38,7 +38,7 @@ from prefect.utilities.collections import listrepr
38
38
  from prefect.utilities.pydantic import get_class_fields_only
39
39
 
40
40
  if TYPE_CHECKING:
41
- from prefect.results import BaseResult
41
+ from prefect.results import BaseResult, ResultRecordMetadata
42
42
 
43
43
  R = TypeVar("R")
44
44
 
@@ -50,7 +50,7 @@ class StateCreate(ActionBaseModel):
50
50
  name: Optional[str] = Field(default=None)
51
51
  message: Optional[str] = Field(default=None, examples=["Run started"])
52
52
  state_details: StateDetails = Field(default_factory=StateDetails)
53
- data: Union["BaseResult[R]", Any] = Field(
53
+ data: Union["BaseResult[R]", "ResultRecordMetadata", Any] = Field(
54
54
  default=None,
55
55
  )
56
56
 
@@ -161,6 +161,10 @@ class DeploymentCreate(ActionBaseModel):
161
161
  default=None,
162
162
  description="The concurrency limit for the deployment.",
163
163
  )
164
+ concurrency_options: Optional[objects.ConcurrencyOptions] = Field(
165
+ default=None,
166
+ description="The concurrency options for the deployment.",
167
+ )
164
168
  enforce_parameter_schema: Optional[bool] = Field(
165
169
  default=None,
166
170
  description=(
@@ -237,6 +241,10 @@ class DeploymentUpdate(ActionBaseModel):
237
241
  default=None,
238
242
  description="The concurrency limit for the deployment.",
239
243
  )
244
+ concurrency_options: Optional[objects.ConcurrencyOptions] = Field(
245
+ default=None,
246
+ description="The concurrency options for the deployment.",
247
+ )
240
248
  tags: List[str] = Field(default_factory=list)
241
249
  work_queue_name: Optional[str] = Field(None)
242
250
  work_pool_name: Optional[str] = Field(
@@ -506,7 +506,7 @@ class DeploymentFilterTags(PrefectBaseModel, OperatorMixin):
506
506
 
507
507
 
508
508
  class DeploymentFilterConcurrencyLimit(PrefectBaseModel):
509
- """Filter by `Deployment.concurrency_limit`."""
509
+ """DEPRECATED: Prefer `Deployment.concurrency_limit_id` over `Deployment.concurrency_limit`."""
510
510
 
511
511
  ge_: Optional[int] = Field(
512
512
  default=None,
@@ -538,7 +538,9 @@ class DeploymentFilter(PrefectBaseModel, OperatorMixin):
538
538
  default=None, description="Filter criteria for `Deployment.work_queue_name`"
539
539
  )
540
540
  concurrency_limit: Optional[DeploymentFilterConcurrencyLimit] = Field(
541
- default=None, description="Filter criteria for `Deployment.concurrency_limit`"
541
+ default=None,
542
+ description="DEPRECATED: Prefer `Deployment.concurrency_limit_id` over `Deployment.concurrency_limit`. If provided, will be ignored for backwards-compatibility. Will be removed after December 2024.",
543
+ deprecated=True,
542
544
  )
543
545
 
544
546
 
@@ -3,6 +3,7 @@ import warnings
3
3
  from functools import partial
4
4
  from typing import (
5
5
  TYPE_CHECKING,
6
+ Annotated,
6
7
  Any,
7
8
  Dict,
8
9
  Generic,
@@ -17,10 +18,12 @@ import orjson
17
18
  import pendulum
18
19
  from pydantic import (
19
20
  ConfigDict,
21
+ Discriminator,
20
22
  Field,
21
23
  HttpUrl,
22
24
  IPvAnyNetwork,
23
25
  SerializationInfo,
26
+ Tag,
24
27
  field_validator,
25
28
  model_serializer,
26
29
  model_validator,
@@ -59,7 +62,7 @@ from prefect.utilities.names import generate_slug
59
62
  from prefect.utilities.pydantic import handle_secret_render
60
63
 
61
64
  if TYPE_CHECKING:
62
- from prefect.results import BaseResult
65
+ from prefect.results import BaseResult, ResultRecordMetadata
63
66
 
64
67
 
65
68
  R = TypeVar("R", default=Any)
@@ -138,6 +141,30 @@ class WorkQueueStatus(AutoEnum):
138
141
  PAUSED = AutoEnum.auto()
139
142
 
140
143
 
144
+ class ConcurrencyLimitStrategy(AutoEnum):
145
+ """Enumeration of concurrency limit strategies."""
146
+
147
+ ENQUEUE = AutoEnum.auto()
148
+ CANCEL_NEW = AutoEnum.auto()
149
+
150
+
151
+ class ConcurrencyOptions(PrefectBaseModel):
152
+ """
153
+ Class for storing the concurrency config in database.
154
+ """
155
+
156
+ collision_strategy: ConcurrencyLimitStrategy
157
+
158
+
159
+ class ConcurrencyLimitConfig(PrefectBaseModel):
160
+ """
161
+ Class for storing the concurrency limit config in database.
162
+ """
163
+
164
+ limit: int
165
+ collision_strategy: ConcurrencyLimitStrategy = ConcurrencyLimitStrategy.ENQUEUE
166
+
167
+
141
168
  class StateDetails(PrefectBaseModel):
142
169
  flow_run_id: Optional[UUID] = None
143
170
  task_run_id: Optional[UUID] = None
@@ -158,6 +185,14 @@ class StateDetails(PrefectBaseModel):
158
185
  task_parameters_id: Optional[UUID] = None
159
186
 
160
187
 
188
+ def data_discriminator(x: Any) -> str:
189
+ if isinstance(x, dict) and "type" in x:
190
+ return "BaseResult"
191
+ elif isinstance(x, dict) and "storage_key" in x:
192
+ return "ResultRecordMetadata"
193
+ return "Any"
194
+
195
+
161
196
  class State(ObjectBaseModel, Generic[R]):
162
197
  """
163
198
  The state of a run.
@@ -168,9 +203,14 @@ class State(ObjectBaseModel, Generic[R]):
168
203
  timestamp: DateTime = Field(default_factory=lambda: pendulum.now("UTC"))
169
204
  message: Optional[str] = Field(default=None, examples=["Run started"])
170
205
  state_details: StateDetails = Field(default_factory=StateDetails)
171
- data: Union["BaseResult[R]", Any] = Field(
172
- default=None,
173
- )
206
+ data: Annotated[
207
+ Union[
208
+ Annotated["BaseResult[R]", Tag("BaseResult")],
209
+ Annotated["ResultRecordMetadata", Tag("ResultRecordMetadata")],
210
+ Annotated[Any, Tag("Any")],
211
+ ],
212
+ Discriminator(data_discriminator),
213
+ ] = Field(default=None)
174
214
 
175
215
  @overload
176
216
  def result(self: "State[R]", raise_on_failure: bool = True) -> R:
@@ -276,10 +316,12 @@ class State(ObjectBaseModel, Generic[R]):
276
316
  results should be sent to the API. Other data is only available locally.
277
317
  """
278
318
  from prefect.client.schemas.actions import StateCreate
279
- from prefect.results import BaseResult
319
+ from prefect.results import BaseResult, ResultRecord, should_persist_result
280
320
 
281
- if isinstance(self.data, BaseResult) and self.data.serialize_to_none is False:
321
+ if isinstance(self.data, BaseResult):
282
322
  data = self.data
323
+ elif isinstance(self.data, ResultRecord) and should_persist_result():
324
+ data = self.data.metadata
283
325
  else:
284
326
  data = None
285
327
 
@@ -314,11 +314,25 @@ class DeploymentResponse(ObjectBaseModel):
314
314
  default=..., description="The flow id associated with the deployment."
315
315
  )
316
316
  concurrency_limit: Optional[int] = Field(
317
- default=None, description="The concurrency limit for the deployment."
317
+ default=None,
318
+ description="DEPRECATED: Prefer `global_concurrency_limit`. Will always be None for backwards compatibility. Will be removed after December 2024.",
319
+ deprecated=True,
320
+ )
321
+ global_concurrency_limit: Optional["GlobalConcurrencyLimitResponse"] = Field(
322
+ default=None,
323
+ description="The global concurrency limit object for enforcing the maximum number of flow runs that can be active at once.",
324
+ )
325
+ concurrency_options: Optional[objects.ConcurrencyOptions] = Field(
326
+ default=None,
327
+ description="The concurrency options for the deployment.",
318
328
  )
319
329
  paused: bool = Field(
320
330
  default=False, description="Whether or not the deployment is paused."
321
331
  )
332
+ concurrency_options: Optional[objects.ConcurrencyOptions] = Field(
333
+ default=None,
334
+ description="The concurrency options for the deployment.",
335
+ )
322
336
  schedules: List[objects.DeploymentSchedule] = Field(
323
337
  default_factory=list, description="A list of schedules for the deployment."
324
338
  )
@@ -7,5 +7,5 @@ if TYPE_CHECKING:
7
7
  from prefect.client.schemas.schedules import SCHEDULE_TYPES
8
8
 
9
9
  FlexibleScheduleList: TypeAlias = Sequence[
10
- Union[DeploymentScheduleCreate, dict[str, Any], "SCHEDULE_TYPES"]
10
+ Union["DeploymentScheduleCreate", dict[str, Any], "SCHEDULE_TYPES"]
11
11
  ]