prefect-client 3.0.2__py3-none-any.whl → 3.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
prefect/events/clients.py CHANGED
@@ -34,6 +34,7 @@ from prefect.settings import (
34
34
  PREFECT_API_KEY,
35
35
  PREFECT_API_URL,
36
36
  PREFECT_CLOUD_API_URL,
37
+ PREFECT_DEBUG_MODE,
37
38
  PREFECT_SERVER_ALLOW_EPHEMERAL_MODE,
38
39
  )
39
40
 
@@ -67,6 +68,18 @@ EVENT_WEBSOCKET_CHECKPOINTS = Counter(
67
68
  logger = get_logger(__name__)
68
69
 
69
70
 
71
+ def http_to_ws(url: str):
72
+ return url.replace("https://", "wss://").replace("http://", "ws://").rstrip("/")
73
+
74
+
75
+ def events_in_socket_from_api_url(url: str):
76
+ return http_to_ws(url) + "/events/in"
77
+
78
+
79
+ def events_out_socket_from_api_url(url: str):
80
+ return http_to_ws(url) + "/events/out"
81
+
82
+
70
83
  def get_events_client(
71
84
  reconnection_attempts: int = 10,
72
85
  checkpoint_every: int = 700,
@@ -251,12 +264,7 @@ class PrefectEventsClient(EventsClient):
251
264
  "api_url must be provided or set in the Prefect configuration"
252
265
  )
253
266
 
254
- self._events_socket_url = (
255
- api_url.replace("https://", "wss://")
256
- .replace("http://", "ws://")
257
- .rstrip("/")
258
- + "/events/in"
259
- )
267
+ self._events_socket_url = events_in_socket_from_api_url(api_url)
260
268
  self._connect = connect(self._events_socket_url)
261
269
  self._websocket = None
262
270
  self._reconnection_attempts = reconnection_attempts
@@ -285,11 +293,26 @@ class PrefectEventsClient(EventsClient):
285
293
  self._websocket = None
286
294
  await self._connect.__aexit__(None, None, None)
287
295
 
288
- self._websocket = await self._connect.__aenter__()
289
-
290
- # make sure we have actually connected
291
- pong = await self._websocket.ping()
292
- await pong
296
+ try:
297
+ self._websocket = await self._connect.__aenter__()
298
+ # make sure we have actually connected
299
+ pong = await self._websocket.ping()
300
+ await pong
301
+ except Exception as e:
302
+ # The client is frequently run in a background thread
303
+ # so we log an additional warning to ensure
304
+ # surfacing the error to the user.
305
+ logger.warning(
306
+ "Unable to connect to %r. "
307
+ "Please check your network settings to ensure websocket connections "
308
+ "to the API are allowed. Otherwise event data (including task run data) may be lost. "
309
+ "Reason: %s. "
310
+ "Set PREFECT_DEBUG_MODE=1 to see the full error.",
311
+ self._events_socket_url,
312
+ str(e),
313
+ exc_info=PREFECT_DEBUG_MODE,
314
+ )
315
+ raise
293
316
 
294
317
  events_to_resend = self._unconfirmed_events
295
318
  # Clear the unconfirmed events here, because they are going back through emit
@@ -412,7 +435,6 @@ class PrefectCloudEventsClient(PrefectEventsClient):
412
435
  reconnection_attempts=reconnection_attempts,
413
436
  checkpoint_every=checkpoint_every,
414
437
  )
415
-
416
438
  self._connect = connect(
417
439
  self._events_socket_url,
418
440
  extra_headers={"Authorization": f"bearer {api_key}"},
@@ -468,11 +490,7 @@ class PrefectEventSubscriber:
468
490
  self._filter = filter or EventFilter() # type: ignore[call-arg]
469
491
  self._seen_events = TTLCache(maxsize=SEEN_EVENTS_SIZE, ttl=SEEN_EVENTS_TTL)
470
492
 
471
- socket_url = (
472
- api_url.replace("https://", "wss://")
473
- .replace("http://", "ws://")
474
- .rstrip("/")
475
- ) + "/events/out"
493
+ socket_url = events_out_socket_from_api_url(api_url)
476
494
 
477
495
  logger.debug("Connecting to %s", socket_url)
478
496
 
@@ -527,11 +545,11 @@ class PrefectEventSubscriber:
527
545
  f"Reason: {e.args[0]}"
528
546
  )
529
547
  except ConnectionClosedError as e:
530
- raise Exception(
531
- "Unable to authenticate to the event stream. Please ensure the "
532
- "provided api_key you are using is valid for this environment. "
533
- f"Reason: {e.reason}"
534
- ) from e
548
+ reason = getattr(e.rcvd, "reason", None)
549
+ msg = "Unable to authenticate to the event stream. Please ensure the "
550
+ msg += "provided api_key you are using is valid for this environment. "
551
+ msg += f"Reason: {reason}" if reason else ""
552
+ raise Exception(msg) from e
535
553
 
536
554
  from prefect.events.filters import EventOccurredFilter
537
555
 
prefect/filesystems.py CHANGED
@@ -5,7 +5,7 @@ from typing import Any, Dict, Optional
5
5
 
6
6
  import anyio
7
7
  import fsspec
8
- from pydantic import Field, SecretStr, field_validator
8
+ from pydantic import BaseModel, Field, SecretStr, field_validator
9
9
 
10
10
  from prefect._internal.schemas.validators import (
11
11
  stringify_path,
@@ -519,4 +519,29 @@ class SMB(WritableFileSystem, WritableDeploymentStorage):
519
519
  return await self.filesystem.write_path(path=path, content=content)
520
520
 
521
521
 
522
+ class NullFileSystem(BaseModel):
523
+ """
524
+ A file system that does not store any data.
525
+ """
526
+
527
+ async def read_path(self, path: str) -> None:
528
+ pass
529
+
530
+ async def write_path(self, path: str, content: bytes) -> None:
531
+ pass
532
+
533
+ async def get_directory(
534
+ self, from_path: Optional[str] = None, local_path: Optional[str] = None
535
+ ) -> None:
536
+ pass
537
+
538
+ async def put_directory(
539
+ self,
540
+ local_path: Optional[str] = None,
541
+ to_path: Optional[str] = None,
542
+ ignore_file: Optional[str] = None,
543
+ ) -> None:
544
+ pass
545
+
546
+
522
547
  __getattr__ = getattr_migration(__name__)
prefect/flows.py CHANGED
@@ -51,8 +51,8 @@ from prefect._internal.concurrency.api import create_call, from_async
51
51
  from prefect.blocks.core import Block
52
52
  from prefect.client.orchestration import get_client
53
53
  from prefect.client.schemas.actions import DeploymentScheduleCreate
54
+ from prefect.client.schemas.objects import ConcurrencyLimitConfig, FlowRun
54
55
  from prefect.client.schemas.objects import Flow as FlowSchema
55
- from prefect.client.schemas.objects import FlowRun
56
56
  from prefect.client.utilities import client_injector
57
57
  from prefect.docker.docker_image import DockerImage
58
58
  from prefect.events import DeploymentTriggerTypes, TriggerTypes
@@ -258,11 +258,11 @@ class Flow(Generic[P, R]):
258
258
  if not callable(fn):
259
259
  raise TypeError("'fn' must be callable")
260
260
 
261
- # Validate name if given
262
- if name:
263
- _raise_on_name_with_banned_characters(name)
264
-
265
- self.name = name or fn.__name__.replace("_", "-")
261
+ self.name = name or fn.__name__.replace("_", "-").replace(
262
+ "<lambda>",
263
+ "unknown-lambda", # prefect API will not accept "<" or ">" in flow names
264
+ )
265
+ _raise_on_name_with_banned_characters(self.name)
266
266
 
267
267
  if flow_run_name is not None:
268
268
  if not isinstance(flow_run_name, str) and not callable(flow_run_name):
@@ -535,7 +535,7 @@ class Flow(Generic[P, R]):
535
535
 
536
536
  def resolve_block_reference(data: Any) -> Any:
537
537
  if isinstance(data, dict) and "$ref" in data:
538
- return Block.load_from_ref(data["$ref"])
538
+ return Block.load_from_ref(data["$ref"], _sync=True)
539
539
  return data
540
540
 
541
541
  try:
@@ -643,7 +643,7 @@ class Flow(Generic[P, R]):
643
643
  rrule: Optional[Union[Iterable[str], str]] = None,
644
644
  paused: Optional[bool] = None,
645
645
  schedules: Optional["FlexibleScheduleList"] = None,
646
- concurrency_limit: Optional[int] = None,
646
+ concurrency_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None,
647
647
  parameters: Optional[dict] = None,
648
648
  triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
649
649
  description: Optional[str] = None,
@@ -715,6 +715,7 @@ class Flow(Generic[P, R]):
715
715
  storage=self._storage,
716
716
  entrypoint=self._entrypoint,
717
717
  name=name,
718
+ flow_name=self.name,
718
719
  interval=interval,
719
720
  cron=cron,
720
721
  rrule=rrule,
@@ -733,7 +734,7 @@ class Flow(Generic[P, R]):
733
734
  ) # type: ignore # TODO: remove sync_compatible
734
735
  else:
735
736
  return RunnerDeployment.from_flow(
736
- self,
737
+ flow=self,
737
738
  name=name,
738
739
  interval=interval,
739
740
  cron=cron,
@@ -798,6 +799,7 @@ class Flow(Generic[P, R]):
798
799
  rrule: Optional[Union[Iterable[str], str]] = None,
799
800
  paused: Optional[bool] = None,
800
801
  schedules: Optional["FlexibleScheduleList"] = None,
802
+ global_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None,
801
803
  triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
802
804
  parameters: Optional[dict] = None,
803
805
  description: Optional[str] = None,
@@ -827,6 +829,7 @@ class Flow(Generic[P, R]):
827
829
  paused: Whether or not to set this deployment as paused.
828
830
  schedules: A list of schedule objects defining when to execute runs of this deployment.
829
831
  Used to define multiple schedules or additional scheduling options like `timezone`.
832
+ global_limit: The maximum number of concurrent runs allowed across all served flow instances associated with the same deployment.
830
833
  parameters: A dictionary of default parameter values to pass to runs of this deployment.
831
834
  description: A description for the created deployment. Defaults to the flow's
832
835
  description if not provided.
@@ -838,7 +841,7 @@ class Flow(Generic[P, R]):
838
841
  pause_on_shutdown: If True, provided schedule will be paused when the serve function is stopped.
839
842
  If False, the schedules will continue running.
840
843
  print_starting_message: Whether or not to print the starting message when flow is served.
841
- limit: The maximum number of runs that can be executed concurrently.
844
+ limit: The maximum number of runs that can be executed concurrently by the created runner; only applies to this served flow. To apply a limit across multiple served flows, use `global_limit`.
842
845
  webserver: Whether or not to start a monitoring webserver for this flow.
843
846
  entrypoint_type: Type of entrypoint to use for the deployment. When using a module path
844
847
  entrypoint, ensure that the module will be importable in the execution environment.
@@ -890,6 +893,7 @@ class Flow(Generic[P, R]):
890
893
  rrule=rrule,
891
894
  paused=paused,
892
895
  schedules=schedules,
896
+ concurrency_limit=global_limit,
893
897
  parameters=parameters,
894
898
  description=description,
895
899
  tags=tags,
@@ -1057,7 +1061,7 @@ class Flow(Generic[P, R]):
1057
1061
  rrule: Optional[str] = None,
1058
1062
  paused: Optional[bool] = None,
1059
1063
  schedules: Optional[List[DeploymentScheduleCreate]] = None,
1060
- concurrency_limit: Optional[int] = None,
1064
+ concurrency_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None,
1061
1065
  triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
1062
1066
  parameters: Optional[dict] = None,
1063
1067
  description: Optional[str] = None,
@@ -1252,6 +1256,15 @@ class Flow(Generic[P, R]):
1252
1256
  ) -> T:
1253
1257
  ...
1254
1258
 
1259
+ @overload
1260
+ def __call__(
1261
+ self: "Flow[P, Coroutine[Any, Any, T]]",
1262
+ *args: P.args,
1263
+ return_state: Literal[True],
1264
+ **kwargs: P.kwargs,
1265
+ ) -> Awaitable[State[T]]:
1266
+ ...
1267
+
1255
1268
  @overload
1256
1269
  def __call__(
1257
1270
  self: "Flow[P, T]",
@@ -1622,7 +1635,7 @@ def flow(
1622
1635
  )
1623
1636
 
1624
1637
 
1625
- def _raise_on_name_with_banned_characters(name: str) -> str:
1638
+ def _raise_on_name_with_banned_characters(name: Optional[str]) -> Optional[str]:
1626
1639
  """
1627
1640
  Raise an InvalidNameError if the given name contains any invalid
1628
1641
  characters.
@@ -38,10 +38,10 @@ class FileSystemLockManager(LockManager):
38
38
  """
39
39
 
40
40
  def __init__(self, lock_files_directory: Path):
41
- self.lock_files_directory = lock_files_directory
41
+ self.lock_files_directory = lock_files_directory.expanduser().resolve()
42
42
  self._locks: Dict[str, _LockInfo] = {}
43
43
 
44
- def _ensure_records_directory_exists(self):
44
+ def _ensure_lock_files_directory_exists(self):
45
45
  self.lock_files_directory.mkdir(parents=True, exist_ok=True)
46
46
 
47
47
  def _lock_path_for_key(self, key: str) -> Path:
@@ -98,7 +98,7 @@ class FileSystemLockManager(LockManager):
98
98
  acquire_timeout: Optional[float] = None,
99
99
  hold_timeout: Optional[float] = None,
100
100
  ) -> bool:
101
- self._ensure_records_directory_exists()
101
+ self._ensure_lock_files_directory_exists()
102
102
  lock_path = self._lock_path_for_key(key)
103
103
 
104
104
  if self.is_locked(key) and not self.is_lock_holder(key, holder):
prefect/plugins.py CHANGED
@@ -14,6 +14,8 @@ from typing import Any, Dict, Union
14
14
  import prefect.settings
15
15
  from prefect.utilities.compat import EntryPoints, entry_points
16
16
 
17
+ COLLECTIONS: Union[None, Dict[str, Union[ModuleType, Exception]]] = None
18
+
17
19
 
18
20
  def safe_load_entrypoints(entrypoints: EntryPoints) -> Dict[str, Union[Exception, Any]]:
19
21
  """
@@ -38,11 +40,16 @@ def safe_load_entrypoints(entrypoints: EntryPoints) -> Dict[str, Union[Exception
38
40
  return results
39
41
 
40
42
 
41
- def load_prefect_collections() -> Dict[str, ModuleType]:
43
+ def load_prefect_collections() -> Dict[str, Union[ModuleType, Exception]]:
42
44
  """
43
45
  Load all Prefect collections that define an entrypoint in the group
44
46
  `prefect.collections`.
45
47
  """
48
+ global COLLECTIONS
49
+
50
+ if COLLECTIONS is not None:
51
+ return COLLECTIONS
52
+
46
53
  collection_entrypoints: EntryPoints = entry_points(group="prefect.collections")
47
54
  collections = safe_load_entrypoints(collection_entrypoints)
48
55
 
@@ -61,4 +68,5 @@ def load_prefect_collections() -> Dict[str, ModuleType]:
61
68
  if prefect.settings.PREFECT_DEBUG_MODE:
62
69
  print(f"Loaded collection {name!r}.")
63
70
 
71
+ COLLECTIONS = collections
64
72
  return collections
prefect/results.py CHANGED
@@ -8,6 +8,7 @@ from functools import partial
8
8
  from pathlib import Path
9
9
  from typing import (
10
10
  TYPE_CHECKING,
11
+ Annotated,
11
12
  Any,
12
13
  Callable,
13
14
  Dict,
@@ -25,8 +26,10 @@ from cachetools import LRUCache
25
26
  from pydantic import (
26
27
  BaseModel,
27
28
  ConfigDict,
29
+ Discriminator,
28
30
  Field,
29
31
  PrivateAttr,
32
+ Tag,
30
33
  ValidationError,
31
34
  model_serializer,
32
35
  model_validator,
@@ -47,6 +50,7 @@ from prefect.exceptions import (
47
50
  )
48
51
  from prefect.filesystems import (
49
52
  LocalFileSystem,
53
+ NullFileSystem,
50
54
  WritableFileSystem,
51
55
  )
52
56
  from prefect.locking.protocol import LockManager
@@ -91,18 +95,26 @@ async def get_default_result_storage() -> WritableFileSystem:
91
95
  Generate a default file system for result storage.
92
96
  """
93
97
  default_block = PREFECT_DEFAULT_RESULT_STORAGE_BLOCK.value()
98
+ basepath = PREFECT_LOCAL_STORAGE_PATH.value()
99
+
100
+ cache_key = (str(default_block), str(basepath))
101
+
102
+ if cache_key in _default_storages:
103
+ return _default_storages[cache_key]
94
104
 
95
105
  if default_block is not None:
96
- return await resolve_result_storage(default_block)
106
+ storage = await resolve_result_storage(default_block)
107
+ else:
108
+ # Use the local file system
109
+ storage = LocalFileSystem(basepath=str(basepath))
97
110
 
98
- # otherwise, use the local file system
99
- basepath = PREFECT_LOCAL_STORAGE_PATH.value()
100
- return LocalFileSystem(basepath=str(basepath))
111
+ _default_storages[cache_key] = storage
112
+ return storage
101
113
 
102
114
 
103
115
  @sync_compatible
104
116
  async def resolve_result_storage(
105
- result_storage: Union[ResultStorage, UUID],
117
+ result_storage: Union[ResultStorage, UUID, Path],
106
118
  ) -> WritableFileSystem:
107
119
  """
108
120
  Resolve one of the valid `ResultStorage` input types into a saved block
@@ -119,6 +131,8 @@ async def resolve_result_storage(
119
131
  storage_block_id = storage_block._block_document_id
120
132
  else:
121
133
  storage_block_id = None
134
+ elif isinstance(result_storage, Path):
135
+ storage_block = LocalFileSystem(basepath=str(result_storage))
122
136
  elif isinstance(result_storage, str):
123
137
  storage_block = await Block.load(result_storage, client=client)
124
138
  storage_block_id = storage_block._block_document_id
@@ -208,6 +222,19 @@ def _format_user_supplied_storage_key(key: str) -> str:
208
222
  T = TypeVar("T")
209
223
 
210
224
 
225
+ def result_storage_discriminator(x: Any) -> str:
226
+ if isinstance(x, dict):
227
+ if "block_type_slug" in x:
228
+ return "WritableFileSystem"
229
+ else:
230
+ return "NullFileSystem"
231
+ if isinstance(x, WritableFileSystem):
232
+ return "WritableFileSystem"
233
+ if isinstance(x, NullFileSystem):
234
+ return "NullFileSystem"
235
+ return "None"
236
+
237
+
211
238
  @deprecated_field(
212
239
  "persist_result",
213
240
  when=lambda x: x is not None,
@@ -235,7 +262,14 @@ class ResultStore(BaseModel):
235
262
  model_config = ConfigDict(arbitrary_types_allowed=True)
236
263
 
237
264
  result_storage: Optional[WritableFileSystem] = Field(default=None)
238
- metadata_storage: Optional[WritableFileSystem] = Field(default=None)
265
+ metadata_storage: Annotated[
266
+ Union[
267
+ Annotated[WritableFileSystem, Tag("WritableFileSystem")],
268
+ Annotated[NullFileSystem, Tag("NullFileSystem")],
269
+ Annotated[None, Tag("None")],
270
+ ],
271
+ Discriminator(result_storage_discriminator),
272
+ ] = Field(default=None)
239
273
  lock_manager: Optional[LockManager] = Field(default=None)
240
274
  cache_result_in_memory: bool = Field(default=True)
241
275
  serializer: Serializer = Field(default_factory=get_default_result_serializer)
@@ -271,6 +305,7 @@ class ResultStore(BaseModel):
271
305
  update["cache_result_in_memory"] = flow.cache_result_in_memory
272
306
  if self.result_storage is None and update.get("result_storage") is None:
273
307
  update["result_storage"] = await get_default_result_storage()
308
+ update["metadata_storage"] = NullFileSystem()
274
309
  return self.model_copy(update=update)
275
310
 
276
311
  @sync_compatible
@@ -284,6 +319,8 @@ class ResultStore(BaseModel):
284
319
  Returns:
285
320
  An updated result store.
286
321
  """
322
+ from prefect.transactions import get_transaction
323
+
287
324
  update = {}
288
325
  if task.result_storage is not None:
289
326
  update["result_storage"] = await resolve_result_storage(task.result_storage)
@@ -295,8 +332,30 @@ class ResultStore(BaseModel):
295
332
  update["storage_key_fn"] = partial(
296
333
  _format_user_supplied_storage_key, task.result_storage_key
297
334
  )
335
+
336
+ # use the lock manager from a parent transaction if it exists
337
+ if (current_txn := get_transaction()) and isinstance(
338
+ current_txn.store, ResultStore
339
+ ):
340
+ update["lock_manager"] = current_txn.store.lock_manager
341
+
342
+ if task.cache_policy is not None and task.cache_policy is not NotSet:
343
+ if task.cache_policy.key_storage is not None:
344
+ storage = task.cache_policy.key_storage
345
+ if isinstance(storage, str) and not len(storage.split("/")) == 2:
346
+ storage = Path(storage)
347
+ update["metadata_storage"] = await resolve_result_storage(storage)
348
+ # if the cache policy has a lock manager, it takes precedence over the parent transaction
349
+ if task.cache_policy.lock_manager is not None:
350
+ update["lock_manager"] = task.cache_policy.lock_manager
351
+
298
352
  if self.result_storage is None and update.get("result_storage") is None:
299
353
  update["result_storage"] = await get_default_result_storage()
354
+ if (
355
+ isinstance(self.metadata_storage, NullFileSystem)
356
+ and update.get("metadata_storage", NotSet) is NotSet
357
+ ):
358
+ update["metadata_storage"] = None
300
359
  return self.model_copy(update=update)
301
360
 
302
361
  @staticmethod
@@ -414,7 +473,9 @@ class ResultStore(BaseModel):
414
473
  )
415
474
  else:
416
475
  content = await self.result_storage.read_path(key)
417
- result_record = ResultRecord.deserialize(content)
476
+ result_record = ResultRecord.deserialize(
477
+ content, backup_serializer=self.serializer
478
+ )
418
479
 
419
480
  if self.cache_result_in_memory:
420
481
  if self.result_storage_block_id is None and hasattr(
@@ -427,26 +488,36 @@ class ResultStore(BaseModel):
427
488
  self.cache[cache_key] = result_record
428
489
  return result_record
429
490
 
430
- def read(self, key: str, holder: Optional[str] = None) -> "ResultRecord":
491
+ def read(
492
+ self,
493
+ key: str,
494
+ holder: Optional[str] = None,
495
+ ) -> "ResultRecord":
431
496
  """
432
497
  Read a result record from storage.
433
498
 
434
499
  Args:
435
500
  key: The key to read the result record from.
436
501
  holder: The holder of the lock if a lock was set on the record.
502
+
437
503
  Returns:
438
504
  A result record.
439
505
  """
440
506
  holder = holder or self.generate_default_holder()
441
507
  return self._read(key=key, holder=holder, _sync=True)
442
508
 
443
- async def aread(self, key: str, holder: Optional[str] = None) -> "ResultRecord":
509
+ async def aread(
510
+ self,
511
+ key: str,
512
+ holder: Optional[str] = None,
513
+ ) -> "ResultRecord":
444
514
  """
445
515
  Read a result record from storage.
446
516
 
447
517
  Args:
448
518
  key: The key to read the result record from.
449
519
  holder: The holder of the lock if a lock was set on the record.
520
+
450
521
  Returns:
451
522
  A result record.
452
523
  """
@@ -1007,17 +1078,31 @@ class ResultRecord(BaseModel, Generic[R]):
1007
1078
  )
1008
1079
 
1009
1080
  @classmethod
1010
- def deserialize(cls, data: bytes) -> "ResultRecord[R]":
1081
+ def deserialize(
1082
+ cls, data: bytes, backup_serializer: Optional[Serializer] = None
1083
+ ) -> "ResultRecord[R]":
1011
1084
  """
1012
1085
  Deserialize a record from bytes.
1013
1086
 
1014
1087
  Args:
1015
1088
  data: the serialized record
1089
+ backup_serializer: The serializer to use to deserialize the result record. Only
1090
+ necessary if the provided data does not specify a serializer.
1016
1091
 
1017
1092
  Returns:
1018
1093
  ResultRecord: the deserialized record
1019
1094
  """
1020
- instance = cls.model_validate_json(data)
1095
+ try:
1096
+ instance = cls.model_validate_json(data)
1097
+ except ValidationError:
1098
+ if backup_serializer is None:
1099
+ raise
1100
+ else:
1101
+ result = backup_serializer.loads(data)
1102
+ return cls(
1103
+ metadata=ResultRecordMetadata(serializer=backup_serializer),
1104
+ result=result,
1105
+ )
1021
1106
  if isinstance(instance.result, bytes):
1022
1107
  instance.result = instance.serializer.loads(instance.result)
1023
1108
  elif isinstance(instance.result, str):
prefect/runner/runner.py CHANGED
@@ -64,12 +64,16 @@ from prefect.client.schemas.filters import (
64
64
  FlowRunFilterStateName,
65
65
  FlowRunFilterStateType,
66
66
  )
67
+ from prefect.client.schemas.objects import (
68
+ ConcurrencyLimitConfig,
69
+ FlowRun,
70
+ State,
71
+ StateType,
72
+ )
67
73
  from prefect.client.schemas.objects import Flow as APIFlow
68
- from prefect.client.schemas.objects import FlowRun, State, StateType
69
74
  from prefect.concurrency.asyncio import (
70
75
  AcquireConcurrencySlotTimeoutError,
71
76
  ConcurrencySlotAcquisitionError,
72
- concurrency,
73
77
  )
74
78
  from prefect.events import DeploymentTriggerTypes, TriggerTypes
75
79
  from prefect.events.related import tags_as_related_resources
@@ -87,7 +91,6 @@ from prefect.settings import (
87
91
  get_current_settings,
88
92
  )
89
93
  from prefect.states import (
90
- AwaitingConcurrencySlot,
91
94
  Crashed,
92
95
  Pending,
93
96
  exception_to_failed_state,
@@ -236,7 +239,7 @@ class Runner:
236
239
  rrule: Optional[Union[Iterable[str], str]] = None,
237
240
  paused: Optional[bool] = None,
238
241
  schedules: Optional["FlexibleScheduleList"] = None,
239
- concurrency_limit: Optional[int] = None,
242
+ concurrency_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None,
240
243
  parameters: Optional[dict] = None,
241
244
  triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
242
245
  description: Optional[str] = None,
@@ -1042,28 +1045,12 @@ class Runner:
1042
1045
  ) -> Union[Optional[int], Exception]:
1043
1046
  run_logger = self._get_flow_run_logger(flow_run)
1044
1047
 
1045
- if flow_run.deployment_id:
1046
- deployment = await self._client.read_deployment(flow_run.deployment_id)
1047
- if deployment and deployment.concurrency_limit:
1048
- limit_name = f"deployment:{deployment.id}"
1049
- concurrency_ctx = concurrency
1050
-
1051
- # ensure that the global concurrency limit is available
1052
- # and up-to-date before attempting to acquire a slot
1053
- await self._client.upsert_global_concurrency_limit_by_name(
1054
- limit_name, deployment.concurrency_limit
1055
- )
1056
- else:
1057
- limit_name = ""
1058
- concurrency_ctx = asyncnullcontext
1059
-
1060
1048
  try:
1061
- async with concurrency_ctx(limit_name, max_retries=0, strict=True):
1062
- status_code = await self._run_process(
1063
- flow_run=flow_run,
1064
- task_status=task_status,
1065
- entrypoint=entrypoint,
1066
- )
1049
+ status_code = await self._run_process(
1050
+ flow_run=flow_run,
1051
+ task_status=task_status,
1052
+ entrypoint=entrypoint,
1053
+ )
1067
1054
  except (
1068
1055
  AcquireConcurrencySlotTimeoutError,
1069
1056
  ConcurrencySlotAcquisitionError,
@@ -1165,26 +1152,6 @@ class Runner:
1165
1152
  exc_info=True,
1166
1153
  )
1167
1154
 
1168
- async def _propose_scheduled_state(self, flow_run: "FlowRun") -> None:
1169
- run_logger = self._get_flow_run_logger(flow_run)
1170
- try:
1171
- state = await propose_state(
1172
- self._client,
1173
- AwaitingConcurrencySlot(),
1174
- flow_run_id=flow_run.id,
1175
- )
1176
- self._logger.info(f"Flow run {flow_run.id} now has state {state.name}")
1177
- except Abort as exc:
1178
- run_logger.info(
1179
- (
1180
- f"Aborted rescheduling of flow run '{flow_run.id}'. "
1181
- f"Server sent an abort signal: {exc}"
1182
- ),
1183
- )
1184
- pass
1185
- except Exception:
1186
- run_logger.exception(f"Failed to update state of flow run '{flow_run.id}'")
1187
-
1188
1155
  async def _propose_crashed_state(self, flow_run: "FlowRun", message: str) -> None:
1189
1156
  run_logger = self._get_flow_run_logger(flow_run)
1190
1157
  try: