prefect-client 3.3.8.dev4__py3-none-any.whl → 3.4.1.dev1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
prefect/_build_info.py CHANGED
@@ -1,5 +1,5 @@
1
1
  # Generated by versioningit
2
- __version__ = "3.3.8.dev4"
3
- __build_date__ = "2025-05-02 08:08:48.097027+00:00"
4
- __git_commit__ = "5d6e0c61baba4513aa9744c942297d645fc4db2c"
2
+ __version__ = "3.4.1.dev1"
3
+ __build_date__ = "2025-05-04 08:07:31.383855+00:00"
4
+ __git_commit__ = "378fc4f6b72aace32343264ee6cea198d949ba6a"
5
5
  __dirty__ = False
prefect/_versioning.py CHANGED
@@ -422,6 +422,8 @@ async def get_inferred_version_info(
422
422
  get_git_version_info,
423
423
  ]
424
424
 
425
+ if version_type is VersionType.SIMPLE:
426
+ return None
425
427
  if version_type:
426
428
  if version_type not in type_to_getter:
427
429
  raise ValueError(f"Unknown version type: {version_type}")
prefect/blocks/core.py CHANGED
@@ -95,6 +95,12 @@ class InvalidBlockRegistration(Exception):
95
95
  """
96
96
 
97
97
 
98
+ class UnknownBlockType(Exception):
99
+ """
100
+ Raised when a block type is not found in the registry.
101
+ """
102
+
103
+
98
104
  def _collect_nested_reference_strings(
99
105
  obj: dict[str, Any] | list[Any],
100
106
  ) -> list[dict[str, Any]]:
@@ -786,7 +792,20 @@ class Block(BaseModel, ABC):
786
792
  # before looking up the block class, but only do this once
787
793
  load_prefect_collections()
788
794
 
789
- return lookup_type(cls, key)
795
+ try:
796
+ return lookup_type(cls, key)
797
+ except KeyError:
798
+ message = f"No block class found for slug {key!r}."
799
+ # Handle common blocks types used for storage, which is the primary use case for looking up blocks by key
800
+ if key == "s3-bucket":
801
+ message += " Please ensure that `prefect-aws` is installed."
802
+ elif key == "gcs-bucket":
803
+ message += " Please ensure that `prefect-gcp` is installed."
804
+ elif key == "azure-blob-storage-container":
805
+ message += " Please ensure that `prefect-azure` is installed."
806
+ else:
807
+ message += " Please ensure that the block class is available in the current environment."
808
+ raise UnknownBlockType(message)
790
809
 
791
810
  def _define_metadata_on_nested_blocks(
792
811
  self, block_document_references: dict[str, dict[str, Any]]
@@ -1459,6 +1459,10 @@ class WorkPoolStorageConfiguration(PrefectBaseModel):
1459
1459
  bundle_execution_step: Optional[dict[str, Any]] = Field(
1460
1460
  default=None, description="The bundle execution step for the work pool."
1461
1461
  )
1462
+ default_result_storage_block_id: Optional[UUID] = Field(
1463
+ default=None,
1464
+ description="The block document ID of the default result storage block.",
1465
+ )
1462
1466
 
1463
1467
 
1464
1468
  class WorkPool(ObjectBaseModel):
@@ -35,7 +35,15 @@ import importlib
35
35
  import tempfile
36
36
  from datetime import datetime, timedelta
37
37
  from pathlib import Path
38
- from typing import TYPE_CHECKING, Any, ClassVar, Iterable, List, Optional, Union
38
+ from typing import (
39
+ TYPE_CHECKING,
40
+ Any,
41
+ ClassVar,
42
+ Iterable,
43
+ List,
44
+ Optional,
45
+ Union,
46
+ )
39
47
  from uuid import UUID
40
48
 
41
49
  from pydantic import (
@@ -58,6 +66,7 @@ from prefect._internal.schemas.validators import (
58
66
  reconcile_paused_deployment,
59
67
  reconcile_schedules_runner,
60
68
  )
69
+ from prefect._versioning import VersionType, get_inferred_version_info
61
70
  from prefect.client.base import ServerType
62
71
  from prefect.client.orchestration import PrefectClient, get_client
63
72
  from prefect.client.schemas.actions import DeploymentScheduleCreate, DeploymentUpdate
@@ -154,6 +163,13 @@ class RunnerDeployment(BaseModel):
154
163
  version: Optional[str] = Field(
155
164
  default=None, description="An optional version for the deployment."
156
165
  )
166
+ version_type: Optional[VersionType] = Field(
167
+ default=None,
168
+ description=(
169
+ "The type of version information to use for the deployment. The version type"
170
+ " will be inferred if not provided."
171
+ ),
172
+ )
157
173
  tags: ListOfNonEmptyStrings = Field(
158
174
  default_factory=list,
159
175
  description="One of more tags to apply to this deployment.",
@@ -219,6 +235,7 @@ class RunnerDeployment(BaseModel):
219
235
  " a built runner."
220
236
  ),
221
237
  )
238
+
222
239
  # (Experimental) SLA configuration for the deployment. May be removed or modified at any time. Currently only supported on Prefect Cloud.
223
240
  _sla: Optional[Union[SlaTypes, list[SlaTypes]]] = PrivateAttr(
224
241
  default=None,
@@ -232,6 +249,9 @@ class RunnerDeployment(BaseModel):
232
249
  _parameter_openapi_schema: ParameterSchema = PrivateAttr(
233
250
  default_factory=ParameterSchema,
234
251
  )
252
+ _version_from_flow: bool = PrivateAttr(
253
+ default=False,
254
+ )
235
255
 
236
256
  @property
237
257
  def entrypoint_type(self) -> EntrypointType:
@@ -241,6 +261,20 @@ class RunnerDeployment(BaseModel):
241
261
  def full_name(self) -> str:
242
262
  return f"{self.flow_name}/{self.name}"
243
263
 
264
+ def _get_deployment_version_info(
265
+ self, version_type: Optional[VersionType] = None
266
+ ) -> VersionInfo:
267
+ if inferred_version := run_coro_as_sync(
268
+ get_inferred_version_info(version_type)
269
+ ):
270
+ if not self.version or self._version_from_flow:
271
+ self.version = inferred_version.version # TODO: maybe reconsider
272
+
273
+ inferred_version.version = self.version
274
+ return inferred_version
275
+
276
+ return VersionInfo(version=self.version or "", type="prefect:simple")
277
+
244
278
  @field_validator("name", mode="before")
245
279
  @classmethod
246
280
  def validate_name(cls, value: str) -> str:
@@ -387,7 +421,7 @@ class RunnerDeployment(BaseModel):
387
421
  update_payload = self.model_dump(
388
422
  mode="json",
389
423
  exclude_unset=True,
390
- exclude={"storage", "name", "flow_name", "triggers"},
424
+ exclude={"storage", "name", "flow_name", "triggers", "version_type"},
391
425
  )
392
426
 
393
427
  if self.storage:
@@ -444,7 +478,7 @@ class RunnerDeployment(BaseModel):
444
478
  self,
445
479
  work_pool_name: Optional[str] = None,
446
480
  image: Optional[str] = None,
447
- version_info: VersionInfo | None = None,
481
+ version_info: Optional[VersionInfo] = None,
448
482
  ) -> UUID:
449
483
  """
450
484
  Registers this deployment with the API and returns the deployment's ID.
@@ -455,11 +489,15 @@ class RunnerDeployment(BaseModel):
455
489
  image: The registry, name, and tag of the Docker image to
456
490
  use for this deployment. Only used when the deployment is
457
491
  deployed to a work pool.
458
- version_info: Version information for the deployment.
492
+ version_info: The version information to use for the deployment.
459
493
  Returns:
460
494
  The ID of the created deployment.
461
495
  """
462
496
 
497
+ version_info = version_info or self._get_deployment_version_info(
498
+ self.version_type
499
+ )
500
+
463
501
  async with get_client() as client:
464
502
  try:
465
503
  deployment = await client.read_deployment_by_name(self.full_name)
@@ -570,6 +608,7 @@ class RunnerDeployment(BaseModel):
570
608
 
571
609
  if not self.version:
572
610
  self.version = flow.version
611
+ self._version_from_flow = True
573
612
  if not self.description:
574
613
  self.description = flow.description
575
614
 
@@ -592,6 +631,7 @@ class RunnerDeployment(BaseModel):
592
631
  description: Optional[str] = None,
593
632
  tags: Optional[List[str]] = None,
594
633
  version: Optional[str] = None,
634
+ version_type: Optional[VersionType] = None,
595
635
  enforce_parameter_schema: bool = True,
596
636
  work_pool_name: Optional[str] = None,
597
637
  work_queue_name: Optional[str] = None,
@@ -622,6 +662,7 @@ class RunnerDeployment(BaseModel):
622
662
  tags: A list of tags to associate with the created deployment for organizational
623
663
  purposes.
624
664
  version: A version for the created deployment. Defaults to the flow's version.
665
+ version_type: The type of version information to use for the deployment.
625
666
  enforce_parameter_schema: Whether or not the Prefect API should enforce the
626
667
  parameter schema for this deployment.
627
668
  work_pool_name: The name of the work pool to use for this deployment.
@@ -662,6 +703,7 @@ class RunnerDeployment(BaseModel):
662
703
  parameters=parameters or {},
663
704
  description=description,
664
705
  version=version,
706
+ version_type=version_type,
665
707
  enforce_parameter_schema=enforce_parameter_schema,
666
708
  work_pool_name=work_pool_name,
667
709
  work_queue_name=work_queue_name,
@@ -837,6 +879,7 @@ class RunnerDeployment(BaseModel):
837
879
  description: Optional[str] = None,
838
880
  tags: Optional[List[str]] = None,
839
881
  version: Optional[str] = None,
882
+ version_type: Optional[VersionType] = None,
840
883
  enforce_parameter_schema: bool = True,
841
884
  work_pool_name: Optional[str] = None,
842
885
  work_queue_name: Optional[str] = None,
@@ -870,6 +913,8 @@ class RunnerDeployment(BaseModel):
870
913
  tags: A list of tags to associate with the created deployment for organizational
871
914
  purposes.
872
915
  version: A version for the created deployment. Defaults to the flow's version.
916
+ version_type: The type of version information to use for the deployment. The version type
917
+ will be inferred if not provided.
873
918
  enforce_parameter_schema: Whether or not the Prefect API should enforce the
874
919
  parameter schema for this deployment.
875
920
  work_pool_name: The name of the work pool to use for this deployment.
@@ -921,6 +966,7 @@ class RunnerDeployment(BaseModel):
921
966
  parameters=parameters or {},
922
967
  description=description,
923
968
  version=version,
969
+ version_type=version_type,
924
970
  entrypoint=entrypoint,
925
971
  enforce_parameter_schema=enforce_parameter_schema,
926
972
  storage=storage,
@@ -959,6 +1005,7 @@ class RunnerDeployment(BaseModel):
959
1005
  description: Optional[str] = None,
960
1006
  tags: Optional[List[str]] = None,
961
1007
  version: Optional[str] = None,
1008
+ version_type: Optional[VersionType] = None,
962
1009
  enforce_parameter_schema: bool = True,
963
1010
  work_pool_name: Optional[str] = None,
964
1011
  work_queue_name: Optional[str] = None,
@@ -992,6 +1039,8 @@ class RunnerDeployment(BaseModel):
992
1039
  tags: A list of tags to associate with the created deployment for organizational
993
1040
  purposes.
994
1041
  version: A version for the created deployment. Defaults to the flow's version.
1042
+ version_type: The type of version information to use for the deployment. The version type
1043
+ will be inferred if not provided.
995
1044
  enforce_parameter_schema: Whether or not the Prefect API should enforce the
996
1045
  parameter schema for this deployment.
997
1046
  work_pool_name: The name of the work pool to use for this deployment.
@@ -1041,6 +1090,7 @@ class RunnerDeployment(BaseModel):
1041
1090
  parameters=parameters or {},
1042
1091
  description=description,
1043
1092
  version=version,
1093
+ version_type=version_type,
1044
1094
  entrypoint=entrypoint,
1045
1095
  enforce_parameter_schema=enforce_parameter_schema,
1046
1096
  storage=storage,
prefect/events/clients.py CHANGED
@@ -684,8 +684,8 @@ class PrefectEventSubscriber:
684
684
 
685
685
  async def __aexit__(
686
686
  self,
687
- exc_type: Optional[Type[Exception]],
688
- exc_val: Optional[Exception],
687
+ exc_type: Optional[Type[BaseException]],
688
+ exc_val: Optional[BaseException],
689
689
  exc_tb: Optional[TracebackType],
690
690
  ) -> None:
691
691
  self._websocket = None
prefect/flows.py CHANGED
@@ -49,6 +49,7 @@ from typing_extensions import Literal, ParamSpec
49
49
 
50
50
  from prefect._experimental.sla.objects import SlaTypes
51
51
  from prefect._internal.concurrency.api import create_call, from_async
52
+ from prefect._versioning import VersionType
52
53
  from prefect.blocks.core import Block
53
54
  from prefect.client.schemas.filters import WorkerFilter, WorkerFilterStatus
54
55
  from prefect.client.schemas.objects import ConcurrencyLimitConfig, FlowRun
@@ -704,6 +705,7 @@ class Flow(Generic[P, R]):
704
705
  description: Optional[str] = None,
705
706
  tags: Optional[list[str]] = None,
706
707
  version: Optional[str] = None,
708
+ version_type: Optional[VersionType] = None,
707
709
  enforce_parameter_schema: bool = True,
708
710
  work_pool_name: Optional[str] = None,
709
711
  work_queue_name: Optional[str] = None,
@@ -733,6 +735,8 @@ class Flow(Generic[P, R]):
733
735
  tags: A list of tags to associate with the created deployment for organizational
734
736
  purposes.
735
737
  version: A version for the created deployment. Defaults to the flow's version.
738
+ version_type: The type of version to use for the created deployment. The version type
739
+ will be inferred if not provided.
736
740
  enforce_parameter_schema: Whether or not the Prefect API should enforce the
737
741
  parameter schema for the created deployment.
738
742
  work_pool_name: The name of the work pool to use for this deployment.
@@ -787,6 +791,7 @@ class Flow(Generic[P, R]):
787
791
  parameters=parameters or {},
788
792
  description=description,
789
793
  version=version,
794
+ version_type=version_type,
790
795
  enforce_parameter_schema=enforce_parameter_schema,
791
796
  work_pool_name=work_pool_name,
792
797
  work_queue_name=work_queue_name,
@@ -809,6 +814,7 @@ class Flow(Generic[P, R]):
809
814
  parameters=parameters or {},
810
815
  description=description,
811
816
  version=version,
817
+ version_type=version_type,
812
818
  enforce_parameter_schema=enforce_parameter_schema,
813
819
  work_pool_name=work_pool_name,
814
820
  work_queue_name=work_queue_name,
@@ -840,6 +846,7 @@ class Flow(Generic[P, R]):
840
846
  description: Optional[str] = None,
841
847
  tags: Optional[list[str]] = None,
842
848
  version: Optional[str] = None,
849
+ version_type: Optional[VersionType] = None,
843
850
  enforce_parameter_schema: bool = True,
844
851
  work_pool_name: Optional[str] = None,
845
852
  work_queue_name: Optional[str] = None,
@@ -869,6 +876,8 @@ class Flow(Generic[P, R]):
869
876
  tags: A list of tags to associate with the created deployment for organizational
870
877
  purposes.
871
878
  version: A version for the created deployment. Defaults to the flow's version.
879
+ version_type: The type of version to use for the created deployment. The version type
880
+ will be inferred if not provided.
872
881
  enforce_parameter_schema: Whether or not the Prefect API should enforce the
873
882
  parameter schema for the created deployment.
874
883
  work_pool_name: The name of the work pool to use for this deployment.
@@ -925,6 +934,7 @@ class Flow(Generic[P, R]):
925
934
  parameters=parameters or {},
926
935
  description=description,
927
936
  version=version,
937
+ version_type=version_type,
928
938
  enforce_parameter_schema=enforce_parameter_schema,
929
939
  work_pool_name=work_pool_name,
930
940
  work_queue_name=work_queue_name,
@@ -949,6 +959,7 @@ class Flow(Generic[P, R]):
949
959
  parameters=parameters or {},
950
960
  description=description,
951
961
  version=version,
962
+ version_type=version_type,
952
963
  enforce_parameter_schema=enforce_parameter_schema,
953
964
  work_pool_name=work_pool_name,
954
965
  work_queue_name=work_queue_name,
@@ -1375,6 +1386,7 @@ class Flow(Generic[P, R]):
1375
1386
  description: Optional[str] = None,
1376
1387
  tags: Optional[list[str]] = None,
1377
1388
  version: Optional[str] = None,
1389
+ version_type: Optional[VersionType] = None,
1378
1390
  enforce_parameter_schema: bool = True,
1379
1391
  entrypoint_type: EntrypointType = EntrypointType.FILE_PATH,
1380
1392
  print_next_steps: bool = True,
@@ -1426,6 +1438,8 @@ class Flow(Generic[P, R]):
1426
1438
  tags: A list of tags to associate with the created deployment for organizational
1427
1439
  purposes.
1428
1440
  version: A version for the created deployment. Defaults to the flow's version.
1441
+ version_type: The type of version to use for the created deployment. The version type
1442
+ will be inferred if not provided.
1429
1443
  enforce_parameter_schema: Whether or not the Prefect API should enforce the
1430
1444
  parameter schema for the created deployment.
1431
1445
  entrypoint_type: Type of entrypoint to use for the deployment. When using a module path
@@ -1510,6 +1524,7 @@ class Flow(Generic[P, R]):
1510
1524
  description=description,
1511
1525
  tags=tags,
1512
1526
  version=version,
1527
+ version_type=version_type,
1513
1528
  enforce_parameter_schema=enforce_parameter_schema,
1514
1529
  work_queue_name=work_queue_name,
1515
1530
  job_variables=job_variables,
@@ -133,6 +133,7 @@ class CloudRunPushProvisioner:
133
133
  await self._run_command(
134
134
  f"gcloud iam service-accounts create {self._service_account_name}"
135
135
  ' --display-name "Prefect Cloud Run Service Account"'
136
+ f" --project={self._project}"
136
137
  )
137
138
  except subprocess.CalledProcessError as e:
138
139
  if "already exists" not in e.output.decode("utf-8"):
@@ -0,0 +1,60 @@
1
+ import asyncio
2
+ import uuid
3
+ from contextlib import AsyncExitStack
4
+ from typing import Any, Protocol
5
+
6
+ from prefect.events.clients import PrefectEventSubscriber, get_events_subscriber
7
+ from prefect.events.filters import EventFilter, EventNameFilter
8
+ from prefect.logging.loggers import get_logger
9
+
10
+
11
+ class OnCancellingCallback(Protocol):
12
+ def __call__(self, flow_run_id: uuid.UUID) -> None: ...
13
+
14
+
15
+ class FlowRunCancellingObserver:
16
+ def __init__(self, on_cancelling: OnCancellingCallback):
17
+ self.logger = get_logger("FlowRunCancellingObserver")
18
+ self.on_cancelling = on_cancelling
19
+ self._events_subscriber: PrefectEventSubscriber | None
20
+ self._exit_stack = AsyncExitStack()
21
+
22
+ async def _consume_events(self):
23
+ if self._events_subscriber is None:
24
+ raise RuntimeError(
25
+ "Events subscriber not initialized. Please use `async with` to initialize the observer."
26
+ )
27
+ async for event in self._events_subscriber:
28
+ try:
29
+ flow_run_id = uuid.UUID(
30
+ event.resource["prefect.resource.id"].replace(
31
+ "prefect.flow-run.", ""
32
+ )
33
+ )
34
+ self.on_cancelling(flow_run_id)
35
+ except ValueError:
36
+ self.logger.debug(
37
+ "Received event with invalid flow run ID: %s",
38
+ event.resource["prefect.resource.id"],
39
+ )
40
+
41
+ async def __aenter__(self):
42
+ self._events_subscriber = await self._exit_stack.enter_async_context(
43
+ get_events_subscriber(
44
+ filter=EventFilter(
45
+ event=EventNameFilter(name=["prefect.flow-run.Cancelling"])
46
+ )
47
+ )
48
+ )
49
+ self._consumer_task = asyncio.create_task(self._consume_events())
50
+ return self
51
+
52
+ async def __aexit__(self, *exc_info: Any):
53
+ await self._exit_stack.__aexit__(*exc_info)
54
+ self._consumer_task.cancel()
55
+ try:
56
+ await self._consumer_task
57
+ except asyncio.CancelledError:
58
+ pass
59
+ except Exception:
60
+ self.logger.exception("Error consuming events")
prefect/runner/runner.py CHANGED
@@ -46,6 +46,8 @@ import subprocess
46
46
  import sys
47
47
  import tempfile
48
48
  import threading
49
+ import uuid
50
+ from contextlib import AsyncExitStack
49
51
  from copy import deepcopy
50
52
  from functools import partial
51
53
  from pathlib import Path
@@ -80,13 +82,6 @@ from prefect._internal.concurrency.api import (
80
82
  from_sync,
81
83
  )
82
84
  from prefect.client.orchestration import PrefectClient, get_client
83
- from prefect.client.schemas.filters import (
84
- FlowRunFilter,
85
- FlowRunFilterId,
86
- FlowRunFilterState,
87
- FlowRunFilterStateName,
88
- FlowRunFilterStateType,
89
- )
90
85
  from prefect.client.schemas.objects import (
91
86
  ConcurrencyLimitConfig,
92
87
  State,
@@ -100,6 +95,7 @@ from prefect.events.utilities import emit_event
100
95
  from prefect.exceptions import Abort, ObjectNotFound
101
96
  from prefect.flows import Flow, FlowStateHook, load_flow_from_flow_run
102
97
  from prefect.logging.loggers import PrefectLogAdapter, flow_run_logger, get_logger
98
+ from prefect.runner._observers import FlowRunCancellingObserver
103
99
  from prefect.runner.storage import RunnerStorage
104
100
  from prefect.schedules import Schedule
105
101
  from prefect.settings import (
@@ -229,6 +225,7 @@ class Runner:
229
225
  raise ValueError("Heartbeat must be 30 seconds or greater.")
230
226
  self._heartbeat_task: asyncio.Task[None] | None = None
231
227
 
228
+ self._exit_stack = AsyncExitStack()
232
229
  self._limiter: anyio.CapacityLimiter | None = None
233
230
  self._client: PrefectClient = get_client()
234
231
  self._submitting_flow_run_ids: set[UUID] = set()
@@ -501,15 +498,6 @@ class Runner:
501
498
  jitter_range=0.3,
502
499
  )
503
500
  )
504
- loops_task_group.start_soon(
505
- partial(
506
- critical_service_loop,
507
- workload=runner._check_for_cancelled_flow_runs,
508
- interval=self.query_seconds * 2,
509
- run_once=run_once,
510
- jitter_range=0.3,
511
- )
512
- )
513
501
 
514
502
  def execute_in_background(
515
503
  self, func: Callable[..., Any], *args: Any, **kwargs: Any
@@ -583,58 +571,42 @@ class Runner:
583
571
  if not self._acquire_limit_slot(flow_run_id):
584
572
  return
585
573
 
586
- async with anyio.create_task_group() as tg:
587
- with anyio.CancelScope():
588
- self._submitting_flow_run_ids.add(flow_run_id)
589
- flow_run = await self._client.read_flow_run(flow_run_id)
590
-
591
- process: (
592
- anyio.abc.Process | Exception
593
- ) = await self._runs_task_group.start(
594
- partial(
595
- self._submit_run_and_capture_errors,
596
- flow_run=flow_run,
597
- entrypoint=entrypoint,
598
- command=command,
599
- cwd=cwd,
600
- env=env,
601
- stream_output=stream_output,
602
- ),
603
- )
604
- if isinstance(process, Exception):
605
- return
574
+ self._submitting_flow_run_ids.add(flow_run_id)
575
+ flow_run = await self._client.read_flow_run(flow_run_id)
606
576
 
607
- task_status.started(process.pid)
577
+ process: anyio.abc.Process | Exception = await self._runs_task_group.start(
578
+ partial(
579
+ self._submit_run_and_capture_errors,
580
+ flow_run=flow_run,
581
+ entrypoint=entrypoint,
582
+ command=command,
583
+ cwd=cwd,
584
+ env=env,
585
+ stream_output=stream_output,
586
+ ),
587
+ )
588
+ if isinstance(process, Exception):
589
+ return
608
590
 
609
- if self.heartbeat_seconds is not None:
610
- await self._emit_flow_run_heartbeat(flow_run)
591
+ task_status.started(process.pid)
611
592
 
612
- async with self._flow_run_process_map_lock:
613
- # Only add the process to the map if it is still running
614
- if process.returncode is None:
615
- self._flow_run_process_map[flow_run.id] = ProcessMapEntry(
616
- pid=process.pid, flow_run=flow_run
617
- )
593
+ if self.heartbeat_seconds is not None:
594
+ await self._emit_flow_run_heartbeat(flow_run)
618
595
 
619
- # We want this loop to stop when the flow run process exits
620
- # so we'll check if the flow run process is still alive on
621
- # each iteration and cancel the task group if it is not.
622
- workload = partial(
623
- self._check_for_cancelled_flow_runs,
624
- should_stop=lambda: not self._flow_run_process_map,
625
- on_stop=tg.cancel_scope.cancel,
596
+ async with self._flow_run_process_map_lock:
597
+ # Only add the process to the map if it is still running
598
+ if process.returncode is None:
599
+ self._flow_run_process_map[flow_run.id] = ProcessMapEntry(
600
+ pid=process.pid, flow_run=flow_run
626
601
  )
627
602
 
628
- tg.start_soon(
629
- partial(
630
- critical_service_loop,
631
- workload=workload,
632
- interval=self.query_seconds,
633
- jitter_range=0.3,
634
- )
635
- )
603
+ while True:
604
+ # Wait until flow run execution is complete and the process has been removed from the map
605
+ await anyio.sleep(0.1)
606
+ if self._flow_run_process_map.get(flow_run.id) is None:
607
+ break
636
608
 
637
- return process
609
+ return process
638
610
 
639
611
  async def execute_bundle(
640
612
  self,
@@ -673,24 +645,8 @@ class Runner:
673
645
  )
674
646
  self._flow_run_bundle_map[flow_run.id] = bundle
675
647
 
676
- tasks: list[asyncio.Task[None]] = []
677
- tasks.append(
678
- asyncio.create_task(
679
- critical_service_loop(
680
- workload=self._check_for_cancelled_flow_runs,
681
- interval=self.query_seconds,
682
- jitter_range=0.3,
683
- )
684
- )
685
- )
686
-
687
648
  await anyio.to_thread.run_sync(process.join)
688
649
 
689
- for task in tasks:
690
- task.cancel()
691
-
692
- await asyncio.gather(*tasks, return_exceptions=True)
693
-
694
650
  self._flow_run_process_map.pop(flow_run.id)
695
651
 
696
652
  flow_run_logger = self._get_flow_run_logger(flow_run)
@@ -1000,83 +956,11 @@ class Runner:
1000
956
  self.last_polled: datetime.datetime = now("UTC")
1001
957
  return await self._submit_scheduled_flow_runs(flow_run_response=runs_response)
1002
958
 
1003
- async def _check_for_cancelled_flow_runs(
1004
- self,
1005
- should_stop: Callable[[], bool] = lambda: False,
1006
- on_stop: Callable[[], None] = lambda: None,
959
+ async def _cancel_run(
960
+ self, flow_run: "FlowRun | uuid.UUID", state_msg: Optional[str] = None
1007
961
  ):
1008
- """
1009
- Checks for flow runs with CANCELLING a cancelling state and attempts to
1010
- cancel them.
1011
-
1012
- Args:
1013
- should_stop: A callable that returns a boolean indicating whether or not
1014
- the runner should stop checking for cancelled flow runs.
1015
- on_stop: A callable that is called when the runner should stop checking
1016
- for cancelled flow runs.
1017
- """
1018
- if self.stopping:
1019
- return
1020
- if not self.started:
1021
- raise RuntimeError(
1022
- "Runner is not set up. Please make sure you are running this runner "
1023
- "as an async context manager."
1024
- )
1025
-
1026
- if should_stop():
1027
- self._logger.debug(
1028
- "Runner has no active flow runs or deployments. Sending message to loop"
1029
- " service that no further cancellation checks are needed."
1030
- )
1031
- on_stop()
1032
-
1033
- self._logger.debug("Checking for cancelled flow runs...")
1034
-
1035
- named_cancelling_flow_runs = await self._client.read_flow_runs(
1036
- flow_run_filter=FlowRunFilter(
1037
- state=FlowRunFilterState(
1038
- type=FlowRunFilterStateType(any_=[StateType.CANCELLED]),
1039
- name=FlowRunFilterStateName(any_=["Cancelling"]),
1040
- ),
1041
- # Avoid duplicate cancellation calls
1042
- id=FlowRunFilterId(
1043
- any_=list(
1044
- self._flow_run_process_map.keys()
1045
- - self._cancelling_flow_run_ids
1046
- )
1047
- ),
1048
- ),
1049
- )
1050
-
1051
- typed_cancelling_flow_runs = await self._client.read_flow_runs(
1052
- flow_run_filter=FlowRunFilter(
1053
- state=FlowRunFilterState(
1054
- type=FlowRunFilterStateType(any_=[StateType.CANCELLING]),
1055
- ),
1056
- # Avoid duplicate cancellation calls
1057
- id=FlowRunFilterId(
1058
- any_=list(
1059
- self._flow_run_process_map.keys()
1060
- - self._cancelling_flow_run_ids
1061
- )
1062
- ),
1063
- ),
1064
- )
1065
-
1066
- cancelling_flow_runs = named_cancelling_flow_runs + typed_cancelling_flow_runs
1067
-
1068
- if cancelling_flow_runs:
1069
- self._logger.info(
1070
- f"Found {len(cancelling_flow_runs)} flow runs awaiting cancellation."
1071
- )
1072
-
1073
- for flow_run in cancelling_flow_runs:
1074
- self._cancelling_flow_run_ids.add(flow_run.id)
1075
- self._runs_task_group.start_soon(self._cancel_run, flow_run)
1076
-
1077
- return cancelling_flow_runs
1078
-
1079
- async def _cancel_run(self, flow_run: "FlowRun", state_msg: Optional[str] = None):
962
+ if isinstance(flow_run, uuid.UUID):
963
+ flow_run = await self._client.read_flow_run(flow_run)
1080
964
  run_logger = self._get_flow_run_logger(flow_run)
1081
965
 
1082
966
  process_map_entry = self._flow_run_process_map.get(flow_run.id)
@@ -1301,7 +1185,7 @@ class Runner:
1301
1185
  except anyio.WouldBlock:
1302
1186
  if TYPE_CHECKING:
1303
1187
  assert self._limiter is not None
1304
- self._logger.info(
1188
+ self._logger.debug(
1305
1189
  f"Flow run limit reached; {self._limiter.borrowed_tokens} flow runs"
1306
1190
  " in progress. You can control this limit by adjusting the "
1307
1191
  "PREFECT_RUNNER_PROCESS_LIMIT setting."
@@ -1543,43 +1427,6 @@ class Runner:
1543
1427
 
1544
1428
  await self._client.set_flow_run_state(flow_run.id, state, force=True)
1545
1429
 
1546
- # Do not remove the flow run from the cancelling set immediately because
1547
- # the API caches responses for the `read_flow_runs` and we do not want to
1548
- # duplicate cancellations.
1549
- await self._schedule_task(
1550
- 60 * 10, self._cancelling_flow_run_ids.remove, flow_run.id
1551
- )
1552
-
1553
- async def _schedule_task(
1554
- self, __in_seconds: int, fn: Callable[..., Any], *args: Any, **kwargs: Any
1555
- ) -> None:
1556
- """
1557
- Schedule a background task to start after some time.
1558
-
1559
- These tasks will be run immediately when the runner exits instead of waiting.
1560
-
1561
- The function may be async or sync. Async functions will be awaited.
1562
- """
1563
-
1564
- async def wrapper(task_status: anyio.abc.TaskStatus[None]) -> None:
1565
- # If we are shutting down, do not sleep; otherwise sleep until the scheduled
1566
- # time or shutdown
1567
- if self.started:
1568
- with anyio.CancelScope() as scope:
1569
- self._scheduled_task_scopes.add(scope)
1570
- task_status.started()
1571
- await anyio.sleep(__in_seconds)
1572
-
1573
- self._scheduled_task_scopes.remove(scope)
1574
- else:
1575
- task_status.started()
1576
-
1577
- result = fn(*args, **kwargs)
1578
- if asyncio.iscoroutine(result):
1579
- await result
1580
-
1581
- await self._runs_task_group.start(wrapper)
1582
-
1583
1430
  async def _run_on_cancellation_hooks(
1584
1431
  self,
1585
1432
  flow_run: "FlowRun",
@@ -1647,11 +1494,18 @@ class Runner:
1647
1494
  if not hasattr(self, "_loop") or not self._loop:
1648
1495
  self._loop = asyncio.get_event_loop()
1649
1496
 
1650
- await self._client.__aenter__()
1497
+ await self._exit_stack.enter_async_context(
1498
+ FlowRunCancellingObserver(
1499
+ on_cancelling=lambda flow_run_id: self._runs_task_group.start_soon(
1500
+ self._cancel_run, flow_run_id
1501
+ )
1502
+ )
1503
+ )
1504
+ await self._exit_stack.enter_async_context(self._client)
1651
1505
 
1652
1506
  if not hasattr(self, "_runs_task_group") or not self._runs_task_group:
1653
1507
  self._runs_task_group: anyio.abc.TaskGroup = anyio.create_task_group()
1654
- await self._runs_task_group.__aenter__()
1508
+ await self._exit_stack.enter_async_context(self._runs_task_group)
1655
1509
 
1656
1510
  if not hasattr(self, "_loops_task_group") or not self._loops_task_group:
1657
1511
  self._loops_task_group: anyio.abc.TaskGroup = anyio.create_task_group()
@@ -1677,11 +1531,7 @@ class Runner:
1677
1531
  for scope in self._scheduled_task_scopes:
1678
1532
  scope.cancel()
1679
1533
 
1680
- if self._runs_task_group:
1681
- await self._runs_task_group.__aexit__(*exc_info)
1682
-
1683
- if self._client:
1684
- await self._client.__aexit__(*exc_info)
1534
+ await self._exit_stack.__aexit__(*exc_info)
1685
1535
 
1686
1536
  shutil.rmtree(str(self._tmp_dir))
1687
1537
  del self._runs_task_group, self._loops_task_group
@@ -62,6 +62,19 @@ from prefect.settings import (
62
62
  )
63
63
  from prefect.utilities.hashing import hash_objects
64
64
 
65
+ if os.environ.get("PREFECT_LOGFIRE_ENABLED"):
66
+ import logfire # pyright: ignore
67
+
68
+ token: str | None = os.environ.get("PREFECT_LOGFIRE_WRITE_TOKEN")
69
+ if token is None:
70
+ raise ValueError(
71
+ "PREFECT_LOGFIRE_WRITE_TOKEN must be set when PREFECT_LOGFIRE_ENABLED is true"
72
+ )
73
+
74
+ logfire.configure(token=token) # pyright: ignore
75
+ else:
76
+ logfire = None
77
+
65
78
  if TYPE_CHECKING:
66
79
  import logging
67
80
 
@@ -250,7 +263,7 @@ def copy_directory(directory: str, path: str) -> None:
250
263
  shutil.rmtree(destination)
251
264
  shutil.copytree(source, destination, symlinks=True)
252
265
  # ensure copied files are writeable
253
- for root, dirs, files in os.walk(destination):
266
+ for root, _, files in os.walk(destination):
254
267
  for f in files:
255
268
  os.chmod(os.path.join(root, f), 0o700)
256
269
  else:
@@ -329,6 +342,10 @@ def create_api_app(
329
342
 
330
343
  fast_api_app_kwargs = fast_api_app_kwargs or {}
331
344
  api_app = FastAPI(title=API_TITLE, **fast_api_app_kwargs)
345
+
346
+ if logfire:
347
+ logfire.instrument_fastapi(api_app) # pyright: ignore
348
+
332
349
  api_app.add_middleware(GZipMiddleware)
333
350
 
334
351
  @api_app.get(health_check_path, tags=["Root"])
@@ -14,6 +14,7 @@ from fastapi import (
14
14
  Path,
15
15
  status,
16
16
  )
17
+ from packaging.version import Version
17
18
  from sqlalchemy.ext.asyncio import AsyncSession
18
19
 
19
20
  import prefect.server.api.dependencies as dependencies
@@ -157,6 +158,9 @@ class WorkerLookups:
157
158
  async def create_work_pool(
158
159
  work_pool: schemas.actions.WorkPoolCreate,
159
160
  db: PrefectDBInterface = Depends(provide_database_interface),
161
+ prefect_client_version: Optional[str] = Depends(
162
+ dependencies.get_prefect_client_version
163
+ ),
160
164
  ) -> schemas.core.WorkPool:
161
165
  """
162
166
  Creates a new work pool. If a work pool with the same
@@ -186,7 +190,14 @@ async def create_work_pool(
186
190
  work_pool=model,
187
191
  )
188
192
 
189
- return schemas.core.WorkPool.model_validate(model, from_attributes=True)
193
+ ret = schemas.core.WorkPool.model_validate(model, from_attributes=True)
194
+ if prefect_client_version and Version(prefect_client_version) <= Version(
195
+ "3.3.7"
196
+ ):
197
+ # Client versions 3.3.7 and below do not support the default_result_storage_block_id field and will error
198
+ # when receiving it.
199
+ del ret.storage_configuration.default_result_storage_block_id
200
+ return ret
190
201
 
191
202
  except sa.exc.IntegrityError:
192
203
  raise HTTPException(
@@ -200,6 +211,9 @@ async def read_work_pool(
200
211
  work_pool_name: str = Path(..., description="The work pool name", alias="name"),
201
212
  worker_lookups: WorkerLookups = Depends(WorkerLookups),
202
213
  db: PrefectDBInterface = Depends(provide_database_interface),
214
+ prefect_client_version: Optional[str] = Depends(
215
+ dependencies.get_prefect_client_version
216
+ ),
203
217
  ) -> schemas.core.WorkPool:
204
218
  """
205
219
  Read a work pool by name
@@ -212,7 +226,18 @@ async def read_work_pool(
212
226
  orm_work_pool = await models.workers.read_work_pool(
213
227
  session=session, work_pool_id=work_pool_id
214
228
  )
215
- return schemas.core.WorkPool.model_validate(orm_work_pool, from_attributes=True)
229
+ work_pool = schemas.core.WorkPool.model_validate(
230
+ orm_work_pool, from_attributes=True
231
+ )
232
+
233
+ if prefect_client_version and Version(prefect_client_version) <= Version(
234
+ "3.3.7"
235
+ ):
236
+ # Client versions 3.3.7 and below do not support the default_result_storage_block_id field and will error
237
+ # when receiving it.
238
+ del work_pool.storage_configuration.default_result_storage_block_id
239
+
240
+ return work_pool
216
241
 
217
242
 
218
243
  @router.post("/filter")
@@ -220,8 +245,10 @@ async def read_work_pools(
220
245
  work_pools: Optional[schemas.filters.WorkPoolFilter] = None,
221
246
  limit: int = dependencies.LimitBody(),
222
247
  offset: int = Body(0, ge=0),
223
- worker_lookups: WorkerLookups = Depends(WorkerLookups),
224
248
  db: PrefectDBInterface = Depends(provide_database_interface),
249
+ prefect_client_version: Optional[str] = Depends(
250
+ dependencies.get_prefect_client_version
251
+ ),
225
252
  ) -> List[schemas.core.WorkPool]:
226
253
  """
227
254
  Read multiple work pools
@@ -233,10 +260,18 @@ async def read_work_pools(
233
260
  offset=offset,
234
261
  limit=limit,
235
262
  )
236
- return [
263
+ ret = [
237
264
  schemas.core.WorkPool.model_validate(w, from_attributes=True)
238
265
  for w in orm_work_pools
239
266
  ]
267
+ if prefect_client_version and Version(prefect_client_version) <= Version(
268
+ "3.3.7"
269
+ ):
270
+ # Client versions 3.3.7 and below do not support the default_result_storage_block_id field and will error
271
+ # when receiving it.
272
+ for work_pool in ret:
273
+ del work_pool.storage_configuration.default_result_storage_block_id
274
+ return ret
240
275
 
241
276
 
242
277
  @router.post("/count")
prefect/settings/base.py CHANGED
@@ -2,7 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  import inspect
4
4
  from functools import partial
5
- from typing import Any, Dict, Tuple, Type
5
+ from typing import Any
6
6
 
7
7
  from pydantic import (
8
8
  AliasChoices,
@@ -32,12 +32,12 @@ class PrefectBaseSettings(BaseSettings):
32
32
  @classmethod
33
33
  def settings_customise_sources(
34
34
  cls,
35
- settings_cls: Type[BaseSettings],
35
+ settings_cls: type[BaseSettings],
36
36
  init_settings: PydanticBaseSettingsSource,
37
37
  env_settings: PydanticBaseSettingsSource,
38
38
  dotenv_settings: PydanticBaseSettingsSource,
39
39
  file_secret_settings: PydanticBaseSettingsSource,
40
- ) -> Tuple[PydanticBaseSettingsSource, ...]:
40
+ ) -> tuple[PydanticBaseSettingsSource, ...]:
41
41
  """
42
42
  Define an order for Prefect settings sources.
43
43
 
@@ -93,9 +93,9 @@ class PrefectBaseSettings(BaseSettings):
93
93
  exclude_unset: bool = False,
94
94
  include_secrets: bool = True,
95
95
  include_aliases: bool = False,
96
- ) -> Dict[str, str]:
96
+ ) -> dict[str, str]:
97
97
  """Convert the settings object to a dictionary of environment variables."""
98
- env: Dict[str, Any] = self.model_dump(
98
+ env: dict[str, Any] = self.model_dump(
99
99
  exclude_unset=exclude_unset,
100
100
  mode="json",
101
101
  context={"include_secrets": include_secrets},
@@ -192,7 +192,7 @@ class PrefectSettingsConfigDict(SettingsConfigDict, total=False):
192
192
 
193
193
 
194
194
  def _add_environment_variables(
195
- schema: Dict[str, Any], model: Type[PrefectBaseSettings]
195
+ schema: dict[str, Any], model: type[PrefectBaseSettings]
196
196
  ) -> None:
197
197
  for property in schema["properties"]:
198
198
  env_vars: list[str] = []
@@ -212,7 +212,7 @@ def _add_environment_variables(
212
212
 
213
213
 
214
214
  def build_settings_config(
215
- path: Tuple[str, ...] = tuple(), frozen: bool = False
215
+ path: tuple[str, ...] = tuple(), frozen: bool = False
216
216
  ) -> PrefectSettingsConfigDict:
217
217
  env_prefix = f"PREFECT_{'_'.join(path).upper()}_" if path else "PREFECT_"
218
218
  return PrefectSettingsConfigDict(
@@ -1,3 +1,5 @@
1
+ from __future__ import annotations
2
+
1
3
  from typing import ClassVar
2
4
 
3
5
  from pydantic import AliasChoices, AliasPath, Field
prefect/workers/base.py CHANGED
@@ -55,6 +55,7 @@ from prefect.exceptions import (
55
55
  Abort,
56
56
  ObjectNotFound,
57
57
  )
58
+ from prefect.filesystems import LocalFileSystem
58
59
  from prefect.futures import PrefectFlowRunFuture
59
60
  from prefect.logging.loggers import (
60
61
  PrefectLogAdapter,
@@ -722,15 +723,6 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
722
723
  if self._runs_task_group is None:
723
724
  raise RuntimeError("Worker not properly initialized")
724
725
 
725
- from prefect.results import get_result_store
726
-
727
- current_result_store = get_result_store()
728
- if current_result_store.result_storage is None and flow.result_storage is None:
729
- self._logger.warning(
730
- f"Flow {flow.name!r} has no result storage configured. Please configure "
731
- "result storage for the flow if you want to retrieve the result for the flow run."
732
- )
733
-
734
726
  flow_run = await self._runs_task_group.start(
735
727
  partial(
736
728
  self._submit_adhoc_run,
@@ -766,6 +758,32 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
766
758
  "work-pool storage configure`."
767
759
  )
768
760
 
761
+ from prefect.results import aresolve_result_storage, get_result_store
762
+
763
+ current_result_store = get_result_store()
764
+ # Check result storage and use the work pool default if needed
765
+ if (
766
+ current_result_store.result_storage is None
767
+ or isinstance(current_result_store.result_storage, LocalFileSystem)
768
+ and flow.result_storage is None
769
+ ):
770
+ if (
771
+ self.work_pool.storage_configuration.default_result_storage_block_id
772
+ is None
773
+ ):
774
+ self._logger.warning(
775
+ f"Flow {flow.name!r} has no result storage configured. Please configure "
776
+ "result storage for the flow if you want to retrieve the result for the flow run."
777
+ )
778
+ else:
779
+ # Use the work pool's default result storage block for the flow run to ensure the caller can retrieve the result
780
+ flow = flow.with_options(
781
+ result_storage=await aresolve_result_storage(
782
+ self.work_pool.storage_configuration.default_result_storage_block_id
783
+ ),
784
+ persist_result=True,
785
+ )
786
+
769
787
  bundle_key = str(uuid.uuid4())
770
788
  upload_command = convert_step_to_command(
771
789
  self.work_pool.storage_configuration.bundle_upload_step,
@@ -778,8 +796,9 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
778
796
 
779
797
  job_variables = (job_variables or {}) | {"command": " ".join(execute_command)}
780
798
  parameters = parameters or {}
781
- parent_task_run = None
782
799
 
800
+ # Create a parent task run if this is a child flow run to ensure it shows up as a child flow in the UI
801
+ parent_task_run = None
783
802
  if flow_run_ctx := FlowRunContext.get():
784
803
  parent_task = Task[Any, Any](
785
804
  name=flow.name,
@@ -821,6 +840,8 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
821
840
 
822
841
  bundle = create_bundle_for_flow_run(flow=flow, flow_run=flow_run)
823
842
 
843
+ # Write the bundle to a temporary directory so it can be uploaded to the bundle storage
844
+ # via the upload command
824
845
  with tempfile.TemporaryDirectory() as temp_dir:
825
846
  await (
826
847
  anyio.Path(temp_dir)
@@ -843,6 +864,8 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
843
864
  logger.debug("Successfully uploaded execution bundle")
844
865
 
845
866
  try:
867
+ # Call the implementation-specific run method with the constructed configuration. This is where the
868
+ # rubber meets the road.
846
869
  result = await self.run(flow_run, configuration)
847
870
 
848
871
  if result.status_code != 0:
@@ -1128,7 +1151,7 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
1128
1151
  if self._limiter:
1129
1152
  self._limiter.acquire_on_behalf_of_nowait(flow_run.id)
1130
1153
  except anyio.WouldBlock:
1131
- self._logger.info(
1154
+ self._logger.debug(
1132
1155
  f"Flow run limit reached; {self.limiter.borrowed_tokens} flow runs"
1133
1156
  " in progress."
1134
1157
  )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: prefect-client
3
- Version: 3.3.8.dev4
3
+ Version: 3.4.1.dev1
4
4
  Summary: Workflow orchestration and management.
5
5
  Project-URL: Changelog, https://github.com/PrefectHQ/prefect/releases
6
6
  Project-URL: Documentation, https://docs.prefect.io
@@ -40,7 +40,7 @@ Requires-Dist: jsonpatch<2.0,>=1.32
40
40
  Requires-Dist: jsonschema<5.0.0,>=4.0.0
41
41
  Requires-Dist: opentelemetry-api<2.0.0,>=1.27.0
42
42
  Requires-Dist: orjson<4.0,>=3.7
43
- Requires-Dist: packaging<24.3,>=21.3
43
+ Requires-Dist: packaging<25.1,>=21.3
44
44
  Requires-Dist: pathspec>=0.8.0
45
45
  Requires-Dist: pendulum<4,>=3.0.0; python_version < '3.13'
46
46
  Requires-Dist: prometheus-client>=0.20.0
@@ -62,7 +62,7 @@ Requires-Dist: typing-extensions<5.0.0,>=4.10.0
62
62
  Requires-Dist: ujson<6.0.0,>=5.8.0
63
63
  Requires-Dist: uvicorn!=0.29.0,>=0.14.0
64
64
  Requires-Dist: websockets<16.0,>=13.0
65
- Requires-Dist: whenever<0.8.0,>=0.7.3; python_version >= '3.13'
65
+ Requires-Dist: whenever<0.9.0,>=0.7.3; python_version >= '3.13'
66
66
  Provides-Extra: notifications
67
67
  Requires-Dist: apprise<2.0.0,>=1.1.0; extra == 'notifications'
68
68
  Description-Content-Type: text/markdown
@@ -1,9 +1,9 @@
1
1
  prefect/.prefectignore,sha256=awSprvKT0vI8a64mEOLrMxhxqcO-b0ERQeYpA2rNKVQ,390
2
2
  prefect/__init__.py,sha256=iCdcC5ZmeewikCdnPEP6YBAjPNV5dvfxpYCTpw30Hkw,3685
3
3
  prefect/__main__.py,sha256=WFjw3kaYJY6pOTA7WDOgqjsz8zUEUZHCcj3P5wyVa-g,66
4
- prefect/_build_info.py,sha256=fPIQpc-QDiKnHO_7x3zhljsGJda7vTLJ-4VzvCenG_U,185
4
+ prefect/_build_info.py,sha256=S2FR6y1xQ3ZKOstDGJZLDW2Sml9MbnhZ9zMjp4hX7qw,185
5
5
  prefect/_result_records.py,sha256=S6QmsODkehGVSzbMm6ig022PYbI6gNKz671p_8kBYx4,7789
6
- prefect/_versioning.py,sha256=nRawjhBY2XpAzS5dHm4ajj8GlSNCr_YOjg2Zbez69j0,14069
6
+ prefect/_versioning.py,sha256=YqR5cxXrY4P6LM1Pmhd8iMo7v_G2KJpGNdsf4EvDFQ0,14132
7
7
  prefect/_waiters.py,sha256=Ia2ITaXdHzevtyWIgJoOg95lrEXQqNEOquHvw3T33UQ,9026
8
8
  prefect/agent.py,sha256=dPvG1jDGD5HSH7aM2utwtk6RaJ9qg13XjkA0lAIgQmY,287
9
9
  prefect/artifacts.py,sha256=dMBUOAWnUamzjb5HSqwB5-GR2Qb-Gxee26XG5NDCUuw,22720
@@ -15,7 +15,7 @@ prefect/exceptions.py,sha256=wZLQQMRB_DyiYkeEdIC5OKwbba5A94Dlnics-lrWI7A,11581
15
15
  prefect/filesystems.py,sha256=v5YqGB4uXf9Ew2VuB9VCSkawvYMMVvEtZf7w1VmAmr8,18036
16
16
  prefect/flow_engine.py,sha256=hZpTYEtwTPMtwVoTCrfD93igN7rlKeG_0kyCvdU4aYE,58876
17
17
  prefect/flow_runs.py,sha256=d3jfmrIPP3C19IJREvpkuN6fxksX3Lzo-LlHOB-_E2I,17419
18
- prefect/flows.py,sha256=UCBwsb99wtPTGPu2PneKCfAMlMBA2GhXJb5rzMBxw1s,118041
18
+ prefect/flows.py,sha256=dxy3xfNexZ1NCEYM4UdHI0dQsn6QQJCPx0XEEoYFOBk,118900
19
19
  prefect/futures.py,sha256=5wVHLtniwG2au0zuxM-ucqo08x0B5l6e8Z1Swbe8R9s,23720
20
20
  prefect/main.py,sha256=8V-qLB4GjEVCkGRgGXeaIk-JIXY8Z9FozcNluj4Sm9E,2589
21
21
  prefect/plugins.py,sha256=FPRLR2mWVBMuOnlzeiTD9krlHONZH2rtYLD753JQDNQ,2516
@@ -71,7 +71,7 @@ prefect/_vendor/croniter/__init__.py,sha256=NUFzdbyPcTQhIOFtzmFM0nbClAvBbKh2mlnT
71
71
  prefect/_vendor/croniter/croniter.py,sha256=eJ2HzStNAYV-vNiLOgDXl4sYWWHOsSA0dgwbkQoguhY,53009
72
72
  prefect/blocks/__init__.py,sha256=D0hB72qMfgqnBB2EMZRxUxlX9yLfkab5zDChOwJZmkY,220
73
73
  prefect/blocks/abstract.py,sha256=mpOAWopSR_RrzdxeurBTXVSKisP8ne-k8LYos-tp7go,17021
74
- prefect/blocks/core.py,sha256=ZUMmbBiw2O9_k_wOzR1itXJp8rj-E10tr4RPEoSfA4s,62028
74
+ prefect/blocks/core.py,sha256=iP-g6guW9HFkt-sFpgH8WCyWhwnH5zIoUJuI2ykImG0,62894
75
75
  prefect/blocks/fields.py,sha256=1m507VVmkpOnMF_7N-qboRjtw4_ceIuDneX3jZ3Jm54,63
76
76
  prefect/blocks/notifications.py,sha256=UpNNxc4Bwx0nSlDj-vZQOv2XyUCUB2PaO4uBPO1Y6XM,34162
77
77
  prefect/blocks/redis.py,sha256=lt_f1SIcS5OVvthCY6KRWiy5DyUZNRlHqkKhKF25P8c,5770
@@ -114,7 +114,7 @@ prefect/client/orchestration/_work_pools/client.py,sha256=s1DfUQQBgB2sLiVVPhLNTl
114
114
  prefect/client/schemas/__init__.py,sha256=InZcDzdeWA2oaV0TlyvoMcyLcbi_aaqU1U9D6Gx-eoU,2747
115
115
  prefect/client/schemas/actions.py,sha256=EigGRTOwa_aWBMfqiTvNaUO8e78M1kIxorEzp1bigcI,33148
116
116
  prefect/client/schemas/filters.py,sha256=qa--NNZduuSOcL1xw-YMd4FVIKMrDnBwPPY4m5Di0GA,35963
117
- prefect/client/schemas/objects.py,sha256=uhmwZDw1kZQ8pZNjpoFtyMJIHFc_02s3HI0elgbGEOE,57679
117
+ prefect/client/schemas/objects.py,sha256=NevDxRA0TEmM0W01q-FMOHVsW7c76vuwk9puJpFneZE,57850
118
118
  prefect/client/schemas/responses.py,sha256=Zdcx7jlIaluEa2uYIOE5mK1HsJvWPErRAcaWM20oY_I,17336
119
119
  prefect/client/schemas/schedules.py,sha256=sxLFk0SmFY7X1Y9R9HyGDqOS3U5NINBWTciUU7vTTic,14836
120
120
  prefect/client/schemas/sorting.py,sha256=L-2Mx-igZPtsUoRUguTcG3nIEstMEMPD97NwPM2Ox5s,2579
@@ -138,7 +138,7 @@ prefect/deployments/__init__.py,sha256=_wb7NxDKhq11z9MjYsPckmT3o6MRhGLRgCV9TmvYt
138
138
  prefect/deployments/base.py,sha256=YY7g8MN6qzjNEjEA8wQXPxCrd47WnACIUeSRtI4nrEk,11849
139
139
  prefect/deployments/deployments.py,sha256=K3Rgnpjxo_T8I8LMwlq24OKqZiZBTE8-YnPg-YGUStM,171
140
140
  prefect/deployments/flow_runs.py,sha256=NYe-Bphsy6ENLqSSfywQuX5cRZt-uVgzqGmOsf3Sqw4,7643
141
- prefect/deployments/runner.py,sha256=_VqbkXvPVvdyFVkRsr5emi26cJmu5-2uhtVUoE0EkNA,54805
141
+ prefect/deployments/runner.py,sha256=SyhFJTdllbml2a1niuR6zfkngTbudGSfTaKvMtaHtEg,56622
142
142
  prefect/deployments/schedules.py,sha256=2eL1-w8qXtwKVkgfUK7cuamwpKK3X6tN1QYTDa_gWxU,2190
143
143
  prefect/deployments/steps/__init__.py,sha256=Dlz9VqMRyG1Gal8dj8vfGpPr0LyQhZdvcciozkK8WoY,206
144
144
  prefect/deployments/steps/core.py,sha256=ulSgBFSx1lhBt1fP-UxebrernkumBDlympR6IPffV1g,6900
@@ -148,7 +148,7 @@ prefect/docker/__init__.py,sha256=z6wdc6UFfiBG2jb9Jk64uCWVM04JKVWeVyDWwuuon8M,52
148
148
  prefect/docker/docker_image.py,sha256=bR_pEq5-FDxlwTj8CP_7nwZ_MiGK6KxIi8v7DRjy1Kg,3138
149
149
  prefect/events/__init__.py,sha256=GtKl2bE--pJduTxelH2xy7SadlLJmmis8WR1EYixhuA,2094
150
150
  prefect/events/actions.py,sha256=A7jS8bo4zWGnrt3QfSoQs0uYC1xfKXio3IfU0XtTb5s,9129
151
- prefect/events/clients.py,sha256=c5ZTt-ZVslvDr6-OrbsWb_7XUocWptGyAuVAVtAzokY,27589
151
+ prefect/events/clients.py,sha256=gp3orepQav99303OC-zK6uz3dpyLlLpQ9ZWJEDol0cs,27597
152
152
  prefect/events/filters.py,sha256=2hVfzc3Rdgy0mBHDutWxT__LJY0zpVM8greWX3y6kjM,8233
153
153
  prefect/events/related.py,sha256=CTeexYUmmA93V4gsR33GIFmw-SS-X_ouOpRg-oeq-BU,6672
154
154
  prefect/events/utilities.py,sha256=ww34bTMENCNwcp6RhhgzG0KgXOvKGe0MKmBdSJ8NpZY,3043
@@ -163,7 +163,7 @@ prefect/events/schemas/labelling.py,sha256=bU-XYaHXhI2MEBIHngth96R9D02m8HHb85KNc
163
163
  prefect/infrastructure/__init__.py,sha256=dPvG1jDGD5HSH7aM2utwtk6RaJ9qg13XjkA0lAIgQmY,287
164
164
  prefect/infrastructure/base.py,sha256=dPvG1jDGD5HSH7aM2utwtk6RaJ9qg13XjkA0lAIgQmY,287
165
165
  prefect/infrastructure/provisioners/__init__.py,sha256=NTDdbkBE37FiBcroja5huuyWr4xYljjQp3ZnD7oplrA,1801
166
- prefect/infrastructure/provisioners/cloud_run.py,sha256=kEiVLGNH54zkDl9mqf3PPC20Xfyk_3RngLPUmpJEYYg,17799
166
+ prefect/infrastructure/provisioners/cloud_run.py,sha256=q3wd1dyPViOXM1L6NZ3EPX8GmVyFHmljXdMv1ERw2TE,17845
167
167
  prefect/infrastructure/provisioners/coiled.py,sha256=VgZt35fnAMmtXMsqPhPNhXLMHs3mLTscJwxdQnZMRp8,9011
168
168
  prefect/infrastructure/provisioners/container_instance.py,sha256=Ai8Tx48uu5IsqRaRFKfEYHgQg0iAj_9DAB9yWpO_gRk,41366
169
169
  prefect/infrastructure/provisioners/ecs.py,sha256=xd7ymfb55TLPIKOyXufLZIIKFMEEMZBS4fW0OM0PckU,48410
@@ -184,7 +184,8 @@ prefect/logging/highlighters.py,sha256=BCf_LNhFInIfGPqwuu8YVrGa4wVxNc4YXo2pYgftp
184
184
  prefect/logging/loggers.py,sha256=rwFJv0i3dhdKr25XX-xUkQy4Vv4dy18bTy366jrC0OQ,12741
185
185
  prefect/logging/logging.yml,sha256=tT7gTyC4NmngFSqFkCdHaw7R0GPNPDDsTCGZQByiJAQ,3169
186
186
  prefect/runner/__init__.py,sha256=pQBd9wVrUVUDUFJlgiweKSnbahoBZwqnd2O2jkhrULY,158
187
- prefect/runner/runner.py,sha256=jv87XyaJ89uK0VzKpMzL3HfXgKZky8JlRs-gW04no5Y,65117
187
+ prefect/runner/_observers.py,sha256=PpyXQL5bjp86AnDFEzcFPS5ayL6ExqcYgyuBMMQCO9Q,2183
188
+ prefect/runner/runner.py,sha256=DFgZQTkKwmCDMmfA640xY1oTOCURzTOo7HOtwQxRVwA,59443
188
189
  prefect/runner/server.py,sha256=YRYFNoYddA9XfiTIYtudxrnD1vCX-PaOLhvyGUOb9AQ,11966
189
190
  prefect/runner/storage.py,sha256=n-65YoEf7KNVInnmMPeP5TVFJOa2zOS8w9en9MHi6uo,31328
190
191
  prefect/runner/submit.py,sha256=qOEj-NChQ6RYFV35hHEVMTklrNmKwaGs2mR78ku9H0o,9474
@@ -217,7 +218,7 @@ prefect/server/api/middleware.py,sha256=WkyuyeJIfo9Q0GAIVU5gO6yIGNVwoHwuBah5AB5o
217
218
  prefect/server/api/root.py,sha256=CeumFYIM_BDvPicJH9ry5PO_02PZTLeMqbLMGGTh90o,942
218
219
  prefect/server/api/run_history.py,sha256=EW-GTPxZAQ5zXiAqHzmS-iAN_Bn6ZSgVQksDT-ZTsyc,5995
219
220
  prefect/server/api/saved_searches.py,sha256=UjoqLLe245QVIs6q5Vk4vdODCOoYzciEEjhi7B8sYCE,3233
220
- prefect/server/api/server.py,sha256=eEZkZJgXcx6N_qpz9PS7wNVYEz-SZgEdVzG8i9ovjSI,32893
221
+ prefect/server/api/server.py,sha256=xSi2km9KhhHPHSKEFHVntii0hRz2OINtB5zCUNajt6A,33356
221
222
  prefect/server/api/task_run_states.py,sha256=e63OPpxPudv_CIB5oKr8Z8rfQ-Osjm9Zq0iHe8obnMo,1647
222
223
  prefect/server/api/task_runs.py,sha256=86lXKGUJJSElhkVcxX-kbjctrNe98nUe3U0McDCfTMw,13904
223
224
  prefect/server/api/task_workers.py,sha256=cFP9M8tsApDL_JpySn-x6fOYy9RnOeOgKiqOl_UVVQM,1042
@@ -225,7 +226,7 @@ prefect/server/api/templates.py,sha256=92bLFfcahZUp5PVNTZPjl8uJSDj4ZYRTVdmTzZXkE
225
226
  prefect/server/api/validation.py,sha256=HxSNyH8yb_tI-kOfjXESRjJp6WQK6hYWBJsaBxUvY34,14490
226
227
  prefect/server/api/variables.py,sha256=SJaKuqInfQIEdMlJOemptBDN43KLFhlf_u9QwupDu7A,6185
227
228
  prefect/server/api/work_queues.py,sha256=wBcbmkZDaQ5Ddi9wc8tNs6kYG_FdNtYwTCR0VkhPj2o,7588
228
- prefect/server/api/workers.py,sha256=sGQzJED7E3uMP1jMdWAyB3d44xWBRtoHcTGY0oiEbm4,22602
229
+ prefect/server/api/workers.py,sha256=-y8J9R47zeINvA07wd5P-5PCHjZmJVMm81CdfKMraww,24086
229
230
  prefect/server/api/collections_data/views/aggregate-worker-metadata.json,sha256=f6t13GRkIcLqGYB3OnXluAHEFoSqZM2SQP22vpcu0Mk,79793
230
231
  prefect/server/api/static/prefect-logo-mark-gradient.png,sha256=ylRjJkI_JHCw8VbQasNnXQHwZW-sH-IQiUGSD3aWP1E,73430
231
232
  prefect/server/api/ui/__init__.py,sha256=TCXO4ZUZCqCbm2QoNvWNTErkzWiX2nSACuO-0Tiomvg,93
@@ -234,7 +235,7 @@ prefect/server/api/ui/flows.py,sha256=W4kwqOCJ_2vROmMCmemH2Mq3uWbWZyu5q5uTZPBdYw
234
235
  prefect/server/api/ui/schemas.py,sha256=NVWA1RFnHW-MMU1s6WbNmp_S5mhbrN-_P41I4O2XtMg,2085
235
236
  prefect/server/api/ui/task_runs.py,sha256=6CMrHmY-ybJGHXz7YlVVP2ZTmvq7w-XA9GUHqCcw_7o,7319
236
237
  prefect/settings/__init__.py,sha256=3jDLzExmq9HsRWo1kTSE16BO_3B3JlVsk5pR0s4PWEQ,2136
237
- prefect/settings/base.py,sha256=HGukXOXOokfqmrVirgejNskKtf1x2QheZ-ldRakxPJA,9701
238
+ prefect/settings/base.py,sha256=VtBSwBLowLvtBVDq3ZY5oKAwosMqsDMt2gcXLAiFf5k,9682
238
239
  prefect/settings/constants.py,sha256=5NjVLG1Km9J9I-a6wrq-qmi_dTkPdwEk3IrY9bSxWvw,281
239
240
  prefect/settings/context.py,sha256=yKxnaDJHX8e2jmAVtw1RF9o7X4V3AOcz61sVeQyPX2c,2195
240
241
  prefect/settings/legacy.py,sha256=KG00GwaURl1zbwfCKAjwNRdJjB2UdTyo80gYF7U60jk,5693
@@ -247,7 +248,7 @@ prefect/settings/models/cli.py,sha256=U-KwO1mfwj-hsyrR0KfS4eHg1-M1rr6VllqOt-VzoB
247
248
  prefect/settings/models/client.py,sha256=GuI4GLIxeuYzlOJQ23oFgqpETFZDiYVvwUFZ9DEEzDA,3134
248
249
  prefect/settings/models/cloud.py,sha256=TZ_Z8WwB5vZb5SslhITd_Fs3J9ytItRL2HYaLtkTRkU,2006
249
250
  prefect/settings/models/deployments.py,sha256=SAP8AX1bhJbK71X6cK2ndR0GueoaQGicP3JjXiCkdaA,1322
250
- prefect/settings/models/experiments.py,sha256=Qyot4ACvCV9JZUquG8QCfwzqeus5UtIQgoYtFu5qlks,868
251
+ prefect/settings/models/experiments.py,sha256=SU1tghg36ivnxZS03Ih_M7hRz_poLkWnaciUukcNaEY,904
251
252
  prefect/settings/models/flows.py,sha256=kQ_sCA7TUqaEs9wWuGHkGQOuAIEZ5elD4UzeKRe00Vk,1143
252
253
  prefect/settings/models/internal.py,sha256=KUb16dg3lH5gwlnUnVJub6JHFXHRyZf1voINBvC_Ysc,718
253
254
  prefect/settings/models/logging.py,sha256=Sj9GDNr5QMFaP6CN0WJyfpwhpOk4p1yhv45dyQMRzHM,4729
@@ -312,13 +313,13 @@ prefect/utilities/schema_tools/__init__.py,sha256=At3rMHd2g_Em2P3_dFQlFgqR_EpBwr
312
313
  prefect/utilities/schema_tools/hydration.py,sha256=NkRhWkNfxxFmVGhNDfmxdK_xeKaEhs3a42q83Sg9cT4,9436
313
314
  prefect/utilities/schema_tools/validation.py,sha256=Wix26IVR-ZJ32-6MX2pHhrwm3reB-Q4iB6_phn85OKE,10743
314
315
  prefect/workers/__init__.py,sha256=EaM1F0RZ-XIJaGeTKLsXDnfOPHzVWk5bk0_c4BVS44M,64
315
- prefect/workers/base.py,sha256=B3K80V-bZ1oI-5iwM2jw93is9srTSCLNN2lvVtlmB7g,60267
316
+ prefect/workers/base.py,sha256=gqXHTFFhjIzCZoBh4FDlA8AAm4l2j6GW6wS7Q7Hbb20,61504
316
317
  prefect/workers/block.py,sha256=dPvG1jDGD5HSH7aM2utwtk6RaJ9qg13XjkA0lAIgQmY,287
317
318
  prefect/workers/cloud.py,sha256=dPvG1jDGD5HSH7aM2utwtk6RaJ9qg13XjkA0lAIgQmY,287
318
319
  prefect/workers/process.py,sha256=Yi5D0U5AQ51wHT86GdwtImXSefe0gJf3LGq4r4z9zwM,11090
319
320
  prefect/workers/server.py,sha256=2pmVeJZiVbEK02SO6BEZaBIvHMsn6G8LzjW8BXyiTtk,1952
320
321
  prefect/workers/utilities.py,sha256=VfPfAlGtTuDj0-Kb8WlMgAuOfgXCdrGAnKMapPSBrwc,2483
321
- prefect_client-3.3.8.dev4.dist-info/METADATA,sha256=RnRvdjpOPN4o--OI4fqJHfRiPVRabPGWr4leEn2kHUw,7471
322
- prefect_client-3.3.8.dev4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
323
- prefect_client-3.3.8.dev4.dist-info/licenses/LICENSE,sha256=MCxsn8osAkzfxKC4CC_dLcUkU8DZLkyihZ8mGs3Ah3Q,11357
324
- prefect_client-3.3.8.dev4.dist-info/RECORD,,
322
+ prefect_client-3.4.1.dev1.dist-info/METADATA,sha256=IFwUWcPPlE7Z-2A_Xn2tZef9WEqSXbfjlRRx0TQnbfI,7471
323
+ prefect_client-3.4.1.dev1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
324
+ prefect_client-3.4.1.dev1.dist-info/licenses/LICENSE,sha256=MCxsn8osAkzfxKC4CC_dLcUkU8DZLkyihZ8mGs3Ah3Q,11357
325
+ prefect_client-3.4.1.dev1.dist-info/RECORD,,