prefect-client 3.3.8.dev4__py3-none-any.whl → 3.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. prefect/_build_info.py +3 -3
  2. prefect/_experimental/bundles/__init__.py +1 -1
  3. prefect/_internal/schemas/bases.py +11 -1
  4. prefect/_internal/schemas/validators.py +0 -98
  5. prefect/_internal/uuid7.py +11 -0
  6. prefect/_versioning.py +2 -0
  7. prefect/blocks/core.py +20 -1
  8. prefect/client/orchestration/__init__.py +16 -8
  9. prefect/client/schemas/actions.py +13 -35
  10. prefect/client/schemas/objects.py +30 -22
  11. prefect/client/subscriptions.py +18 -9
  12. prefect/deployments/runner.py +54 -4
  13. prefect/events/clients.py +6 -6
  14. prefect/events/filters.py +25 -11
  15. prefect/events/schemas/automations.py +3 -1
  16. prefect/events/schemas/events.py +3 -2
  17. prefect/flows.py +94 -28
  18. prefect/infrastructure/provisioners/cloud_run.py +1 -0
  19. prefect/runner/_observers.py +60 -0
  20. prefect/runner/runner.py +72 -214
  21. prefect/server/api/server.py +18 -1
  22. prefect/server/api/workers.py +42 -6
  23. prefect/settings/base.py +7 -7
  24. prefect/settings/models/experiments.py +2 -0
  25. prefect/task_runners.py +2 -1
  26. prefect/tasks.py +3 -2
  27. prefect/types/__init__.py +24 -36
  28. prefect/types/names.py +139 -0
  29. prefect/utilities/dockerutils.py +18 -8
  30. prefect/utilities/importtools.py +12 -4
  31. prefect/workers/base.py +66 -21
  32. {prefect_client-3.3.8.dev4.dist-info → prefect_client-3.4.1.dist-info}/METADATA +4 -3
  33. {prefect_client-3.3.8.dev4.dist-info → prefect_client-3.4.1.dist-info}/RECORD +35 -32
  34. {prefect_client-3.3.8.dev4.dist-info → prefect_client-3.4.1.dist-info}/WHEEL +0 -0
  35. {prefect_client-3.3.8.dev4.dist-info → prefect_client-3.4.1.dist-info}/licenses/LICENSE +0 -0
@@ -35,7 +35,15 @@ import importlib
35
35
  import tempfile
36
36
  from datetime import datetime, timedelta
37
37
  from pathlib import Path
38
- from typing import TYPE_CHECKING, Any, ClassVar, Iterable, List, Optional, Union
38
+ from typing import (
39
+ TYPE_CHECKING,
40
+ Any,
41
+ ClassVar,
42
+ Iterable,
43
+ List,
44
+ Optional,
45
+ Union,
46
+ )
39
47
  from uuid import UUID
40
48
 
41
49
  from pydantic import (
@@ -58,6 +66,7 @@ from prefect._internal.schemas.validators import (
58
66
  reconcile_paused_deployment,
59
67
  reconcile_schedules_runner,
60
68
  )
69
+ from prefect._versioning import VersionType, get_inferred_version_info
61
70
  from prefect.client.base import ServerType
62
71
  from prefect.client.orchestration import PrefectClient, get_client
63
72
  from prefect.client.schemas.actions import DeploymentScheduleCreate, DeploymentUpdate
@@ -154,6 +163,13 @@ class RunnerDeployment(BaseModel):
154
163
  version: Optional[str] = Field(
155
164
  default=None, description="An optional version for the deployment."
156
165
  )
166
+ version_type: Optional[VersionType] = Field(
167
+ default=None,
168
+ description=(
169
+ "The type of version information to use for the deployment. The version type"
170
+ " will be inferred if not provided."
171
+ ),
172
+ )
157
173
  tags: ListOfNonEmptyStrings = Field(
158
174
  default_factory=list,
159
175
  description="One of more tags to apply to this deployment.",
@@ -219,6 +235,7 @@ class RunnerDeployment(BaseModel):
219
235
  " a built runner."
220
236
  ),
221
237
  )
238
+
222
239
  # (Experimental) SLA configuration for the deployment. May be removed or modified at any time. Currently only supported on Prefect Cloud.
223
240
  _sla: Optional[Union[SlaTypes, list[SlaTypes]]] = PrivateAttr(
224
241
  default=None,
@@ -232,6 +249,9 @@ class RunnerDeployment(BaseModel):
232
249
  _parameter_openapi_schema: ParameterSchema = PrivateAttr(
233
250
  default_factory=ParameterSchema,
234
251
  )
252
+ _version_from_flow: bool = PrivateAttr(
253
+ default=False,
254
+ )
235
255
 
236
256
  @property
237
257
  def entrypoint_type(self) -> EntrypointType:
@@ -241,6 +261,20 @@ class RunnerDeployment(BaseModel):
241
261
  def full_name(self) -> str:
242
262
  return f"{self.flow_name}/{self.name}"
243
263
 
264
+ def _get_deployment_version_info(
265
+ self, version_type: Optional[VersionType] = None
266
+ ) -> VersionInfo:
267
+ if inferred_version := run_coro_as_sync(
268
+ get_inferred_version_info(version_type)
269
+ ):
270
+ if not self.version or self._version_from_flow:
271
+ self.version = inferred_version.version # TODO: maybe reconsider
272
+
273
+ inferred_version.version = self.version
274
+ return inferred_version
275
+
276
+ return VersionInfo(version=self.version or "", type="prefect:simple")
277
+
244
278
  @field_validator("name", mode="before")
245
279
  @classmethod
246
280
  def validate_name(cls, value: str) -> str:
@@ -387,7 +421,7 @@ class RunnerDeployment(BaseModel):
387
421
  update_payload = self.model_dump(
388
422
  mode="json",
389
423
  exclude_unset=True,
390
- exclude={"storage", "name", "flow_name", "triggers"},
424
+ exclude={"storage", "name", "flow_name", "triggers", "version_type"},
391
425
  )
392
426
 
393
427
  if self.storage:
@@ -444,7 +478,7 @@ class RunnerDeployment(BaseModel):
444
478
  self,
445
479
  work_pool_name: Optional[str] = None,
446
480
  image: Optional[str] = None,
447
- version_info: VersionInfo | None = None,
481
+ version_info: Optional[VersionInfo] = None,
448
482
  ) -> UUID:
449
483
  """
450
484
  Registers this deployment with the API and returns the deployment's ID.
@@ -455,11 +489,15 @@ class RunnerDeployment(BaseModel):
455
489
  image: The registry, name, and tag of the Docker image to
456
490
  use for this deployment. Only used when the deployment is
457
491
  deployed to a work pool.
458
- version_info: Version information for the deployment.
492
+ version_info: The version information to use for the deployment.
459
493
  Returns:
460
494
  The ID of the created deployment.
461
495
  """
462
496
 
497
+ version_info = version_info or self._get_deployment_version_info(
498
+ self.version_type
499
+ )
500
+
463
501
  async with get_client() as client:
464
502
  try:
465
503
  deployment = await client.read_deployment_by_name(self.full_name)
@@ -570,6 +608,7 @@ class RunnerDeployment(BaseModel):
570
608
 
571
609
  if not self.version:
572
610
  self.version = flow.version
611
+ self._version_from_flow = True
573
612
  if not self.description:
574
613
  self.description = flow.description
575
614
 
@@ -592,6 +631,7 @@ class RunnerDeployment(BaseModel):
592
631
  description: Optional[str] = None,
593
632
  tags: Optional[List[str]] = None,
594
633
  version: Optional[str] = None,
634
+ version_type: Optional[VersionType] = None,
595
635
  enforce_parameter_schema: bool = True,
596
636
  work_pool_name: Optional[str] = None,
597
637
  work_queue_name: Optional[str] = None,
@@ -622,6 +662,7 @@ class RunnerDeployment(BaseModel):
622
662
  tags: A list of tags to associate with the created deployment for organizational
623
663
  purposes.
624
664
  version: A version for the created deployment. Defaults to the flow's version.
665
+ version_type: The type of version information to use for the deployment.
625
666
  enforce_parameter_schema: Whether or not the Prefect API should enforce the
626
667
  parameter schema for this deployment.
627
668
  work_pool_name: The name of the work pool to use for this deployment.
@@ -662,6 +703,7 @@ class RunnerDeployment(BaseModel):
662
703
  parameters=parameters or {},
663
704
  description=description,
664
705
  version=version,
706
+ version_type=version_type,
665
707
  enforce_parameter_schema=enforce_parameter_schema,
666
708
  work_pool_name=work_pool_name,
667
709
  work_queue_name=work_queue_name,
@@ -837,6 +879,7 @@ class RunnerDeployment(BaseModel):
837
879
  description: Optional[str] = None,
838
880
  tags: Optional[List[str]] = None,
839
881
  version: Optional[str] = None,
882
+ version_type: Optional[VersionType] = None,
840
883
  enforce_parameter_schema: bool = True,
841
884
  work_pool_name: Optional[str] = None,
842
885
  work_queue_name: Optional[str] = None,
@@ -870,6 +913,8 @@ class RunnerDeployment(BaseModel):
870
913
  tags: A list of tags to associate with the created deployment for organizational
871
914
  purposes.
872
915
  version: A version for the created deployment. Defaults to the flow's version.
916
+ version_type: The type of version information to use for the deployment. The version type
917
+ will be inferred if not provided.
873
918
  enforce_parameter_schema: Whether or not the Prefect API should enforce the
874
919
  parameter schema for this deployment.
875
920
  work_pool_name: The name of the work pool to use for this deployment.
@@ -921,6 +966,7 @@ class RunnerDeployment(BaseModel):
921
966
  parameters=parameters or {},
922
967
  description=description,
923
968
  version=version,
969
+ version_type=version_type,
924
970
  entrypoint=entrypoint,
925
971
  enforce_parameter_schema=enforce_parameter_schema,
926
972
  storage=storage,
@@ -959,6 +1005,7 @@ class RunnerDeployment(BaseModel):
959
1005
  description: Optional[str] = None,
960
1006
  tags: Optional[List[str]] = None,
961
1007
  version: Optional[str] = None,
1008
+ version_type: Optional[VersionType] = None,
962
1009
  enforce_parameter_schema: bool = True,
963
1010
  work_pool_name: Optional[str] = None,
964
1011
  work_queue_name: Optional[str] = None,
@@ -992,6 +1039,8 @@ class RunnerDeployment(BaseModel):
992
1039
  tags: A list of tags to associate with the created deployment for organizational
993
1040
  purposes.
994
1041
  version: A version for the created deployment. Defaults to the flow's version.
1042
+ version_type: The type of version information to use for the deployment. The version type
1043
+ will be inferred if not provided.
995
1044
  enforce_parameter_schema: Whether or not the Prefect API should enforce the
996
1045
  parameter schema for this deployment.
997
1046
  work_pool_name: The name of the work pool to use for this deployment.
@@ -1041,6 +1090,7 @@ class RunnerDeployment(BaseModel):
1041
1090
  parameters=parameters or {},
1042
1091
  description=description,
1043
1092
  version=version,
1093
+ version_type=version_type,
1044
1094
  entrypoint=entrypoint,
1045
1095
  enforce_parameter_schema=enforce_parameter_schema,
1046
1096
  storage=storage,
prefect/events/clients.py CHANGED
@@ -251,8 +251,8 @@ class EventsClient(abc.ABC):
251
251
 
252
252
  async def __aexit__(
253
253
  self,
254
- exc_type: Optional[Type[Exception]],
255
- exc_val: Optional[Exception],
254
+ exc_type: Optional[Type[BaseException]],
255
+ exc_val: Optional[BaseException],
256
256
  exc_tb: Optional[TracebackType],
257
257
  ) -> None:
258
258
  del self._in_context
@@ -360,8 +360,8 @@ class PrefectEventsClient(EventsClient):
360
360
 
361
361
  async def __aexit__(
362
362
  self,
363
- exc_type: Optional[Type[Exception]],
364
- exc_val: Optional[Exception],
363
+ exc_type: Optional[Type[BaseException]],
364
+ exc_val: Optional[BaseException],
365
365
  exc_tb: Optional[TracebackType],
366
366
  ) -> None:
367
367
  self._websocket = None
@@ -684,8 +684,8 @@ class PrefectEventSubscriber:
684
684
 
685
685
  async def __aexit__(
686
686
  self,
687
- exc_type: Optional[Type[Exception]],
688
- exc_val: Optional[Exception],
687
+ exc_type: Optional[Type[BaseException]],
688
+ exc_val: Optional[BaseException],
689
689
  exc_tb: Optional[TracebackType],
690
690
  ) -> None:
691
691
  self._websocket = None
prefect/events/filters.py CHANGED
@@ -1,5 +1,7 @@
1
+ from __future__ import annotations
2
+
1
3
  import datetime
2
- from typing import Optional
4
+ from typing import Optional, Union
3
5
  from uuid import UUID
4
6
 
5
7
  from pydantic import Field
@@ -43,11 +45,21 @@ class EventDataFilter(PrefectBaseModel, extra="forbid"): # type: ignore[call-ar
43
45
  """A base class for filtering event data."""
44
46
 
45
47
  def get_filters(self) -> list["EventDataFilter"]:
46
- filters: list["EventDataFilter"] = [
47
- filter
48
- for filter in [getattr(self, name) for name in type(self).model_fields]
49
- if isinstance(filter, EventDataFilter)
50
- ]
48
+ filters: list[EventDataFilter] = []
49
+ for filter in [
50
+ getattr(self, name) for name in self.__class__.model_fields.keys()
51
+ ]:
52
+ # Any embedded list of filters are flattened and thus ANDed together
53
+ subfilters: list[EventDataFilter] = (
54
+ filter if isinstance(filter, list) else [filter]
55
+ )
56
+
57
+ for subfilter in subfilters:
58
+ if not isinstance(subfilter, EventDataFilter):
59
+ continue
60
+
61
+ filters.append(subfilter)
62
+
51
63
  return filters
52
64
 
53
65
  def includes(self, event: Event) -> bool:
@@ -233,18 +245,20 @@ class EventFilter(EventDataFilter):
233
245
  default=None,
234
246
  description="Filter criteria for the event name",
235
247
  )
236
- any_resource: Optional[EventAnyResourceFilter] = Field(
237
- default=None,
238
- description="Filter criteria for any resource involved in the event",
239
- )
240
248
  resource: Optional[EventResourceFilter] = Field(
241
249
  default=None,
242
250
  description="Filter criteria for the resource of the event",
243
251
  )
244
- related: Optional[EventRelatedFilter] = Field(
252
+ related: Optional[Union[EventRelatedFilter, list[EventRelatedFilter]]] = Field(
245
253
  default=None,
246
254
  description="Filter criteria for the related resources of the event",
247
255
  )
256
+ any_resource: Optional[
257
+ Union[EventAnyResourceFilter, list[EventAnyResourceFilter]]
258
+ ] = Field(
259
+ default=None,
260
+ description="Filter criteria for any resource involved in the event",
261
+ )
248
262
  id: EventIDFilter = Field(
249
263
  default_factory=lambda: EventIDFilter(id=[]),
250
264
  description="Filter criteria for the events' ID",
@@ -1,3 +1,5 @@
1
+ from __future__ import annotations
2
+
1
3
  import abc
2
4
  import textwrap
3
5
  from datetime import timedelta
@@ -103,7 +105,7 @@ class ResourceTrigger(Trigger, abc.ABC):
103
105
  default_factory=lambda: ResourceSpecification.model_validate({}),
104
106
  description="Labels for resources which this trigger will match.",
105
107
  )
106
- match_related: ResourceSpecification = Field(
108
+ match_related: Union[ResourceSpecification, list[ResourceSpecification]] = Field(
107
109
  default_factory=lambda: ResourceSpecification.model_validate({}),
108
110
  description="Labels for related resources which this trigger will match.",
109
111
  )
@@ -13,7 +13,7 @@ from typing import (
13
13
  Tuple,
14
14
  Union,
15
15
  )
16
- from uuid import UUID, uuid4
16
+ from uuid import UUID
17
17
 
18
18
  from pydantic import (
19
19
  AfterValidator,
@@ -26,6 +26,7 @@ from typing_extensions import Annotated, Self
26
26
 
27
27
  import prefect.types._datetime
28
28
  from prefect._internal.schemas.bases import PrefectBaseModel
29
+ from prefect._internal.uuid7 import uuid7
29
30
  from prefect.logging import get_logger
30
31
  from prefect.settings import (
31
32
  PREFECT_EVENTS_MAXIMUM_LABELS_PER_RESOURCE,
@@ -135,7 +136,7 @@ class Event(PrefectBaseModel):
135
136
  description="An open-ended set of data describing what happened",
136
137
  )
137
138
  id: UUID = Field(
138
- default_factory=uuid4,
139
+ default_factory=uuid7,
139
140
  description="The client-provided identifier of this event",
140
141
  )
141
142
  follows: Optional[UUID] = Field(
prefect/flows.py CHANGED
@@ -27,6 +27,7 @@ from typing import (
27
27
  Coroutine,
28
28
  Generic,
29
29
  Iterable,
30
+ List,
30
31
  NoReturn,
31
32
  Optional,
32
33
  Protocol,
@@ -49,6 +50,7 @@ from typing_extensions import Literal, ParamSpec
49
50
 
50
51
  from prefect._experimental.sla.objects import SlaTypes
51
52
  from prefect._internal.concurrency.api import create_call, from_async
53
+ from prefect._versioning import VersionType
52
54
  from prefect.blocks.core import Block
53
55
  from prefect.client.schemas.filters import WorkerFilter, WorkerFilterStatus
54
56
  from prefect.client.schemas.objects import ConcurrencyLimitConfig, FlowRun
@@ -704,6 +706,7 @@ class Flow(Generic[P, R]):
704
706
  description: Optional[str] = None,
705
707
  tags: Optional[list[str]] = None,
706
708
  version: Optional[str] = None,
709
+ version_type: Optional[VersionType] = None,
707
710
  enforce_parameter_schema: bool = True,
708
711
  work_pool_name: Optional[str] = None,
709
712
  work_queue_name: Optional[str] = None,
@@ -733,6 +736,8 @@ class Flow(Generic[P, R]):
733
736
  tags: A list of tags to associate with the created deployment for organizational
734
737
  purposes.
735
738
  version: A version for the created deployment. Defaults to the flow's version.
739
+ version_type: The type of version to use for the created deployment. The version type
740
+ will be inferred if not provided.
736
741
  enforce_parameter_schema: Whether or not the Prefect API should enforce the
737
742
  parameter schema for the created deployment.
738
743
  work_pool_name: The name of the work pool to use for this deployment.
@@ -787,6 +792,7 @@ class Flow(Generic[P, R]):
787
792
  parameters=parameters or {},
788
793
  description=description,
789
794
  version=version,
795
+ version_type=version_type,
790
796
  enforce_parameter_schema=enforce_parameter_schema,
791
797
  work_pool_name=work_pool_name,
792
798
  work_queue_name=work_queue_name,
@@ -809,6 +815,7 @@ class Flow(Generic[P, R]):
809
815
  parameters=parameters or {},
810
816
  description=description,
811
817
  version=version,
818
+ version_type=version_type,
812
819
  enforce_parameter_schema=enforce_parameter_schema,
813
820
  work_pool_name=work_pool_name,
814
821
  work_queue_name=work_queue_name,
@@ -840,6 +847,7 @@ class Flow(Generic[P, R]):
840
847
  description: Optional[str] = None,
841
848
  tags: Optional[list[str]] = None,
842
849
  version: Optional[str] = None,
850
+ version_type: Optional[VersionType] = None,
843
851
  enforce_parameter_schema: bool = True,
844
852
  work_pool_name: Optional[str] = None,
845
853
  work_queue_name: Optional[str] = None,
@@ -869,6 +877,8 @@ class Flow(Generic[P, R]):
869
877
  tags: A list of tags to associate with the created deployment for organizational
870
878
  purposes.
871
879
  version: A version for the created deployment. Defaults to the flow's version.
880
+ version_type: The type of version to use for the created deployment. The version type
881
+ will be inferred if not provided.
872
882
  enforce_parameter_schema: Whether or not the Prefect API should enforce the
873
883
  parameter schema for the created deployment.
874
884
  work_pool_name: The name of the work pool to use for this deployment.
@@ -925,6 +935,7 @@ class Flow(Generic[P, R]):
925
935
  parameters=parameters or {},
926
936
  description=description,
927
937
  version=version,
938
+ version_type=version_type,
928
939
  enforce_parameter_schema=enforce_parameter_schema,
929
940
  work_pool_name=work_pool_name,
930
941
  work_queue_name=work_queue_name,
@@ -949,6 +960,7 @@ class Flow(Generic[P, R]):
949
960
  parameters=parameters or {},
950
961
  description=description,
951
962
  version=version,
963
+ version_type=version_type,
952
964
  enforce_parameter_schema=enforce_parameter_schema,
953
965
  work_pool_name=work_pool_name,
954
966
  work_queue_name=work_queue_name,
@@ -1375,6 +1387,7 @@ class Flow(Generic[P, R]):
1375
1387
  description: Optional[str] = None,
1376
1388
  tags: Optional[list[str]] = None,
1377
1389
  version: Optional[str] = None,
1390
+ version_type: Optional[VersionType] = None,
1378
1391
  enforce_parameter_schema: bool = True,
1379
1392
  entrypoint_type: EntrypointType = EntrypointType.FILE_PATH,
1380
1393
  print_next_steps: bool = True,
@@ -1426,6 +1439,8 @@ class Flow(Generic[P, R]):
1426
1439
  tags: A list of tags to associate with the created deployment for organizational
1427
1440
  purposes.
1428
1441
  version: A version for the created deployment. Defaults to the flow's version.
1442
+ version_type: The type of version to use for the created deployment. The version type
1443
+ will be inferred if not provided.
1429
1444
  enforce_parameter_schema: Whether or not the Prefect API should enforce the
1430
1445
  parameter schema for the created deployment.
1431
1446
  entrypoint_type: Type of entrypoint to use for the deployment. When using a module path
@@ -1510,6 +1525,7 @@ class Flow(Generic[P, R]):
1510
1525
  description=description,
1511
1526
  tags=tags,
1512
1527
  version=version,
1528
+ version_type=version_type,
1513
1529
  enforce_parameter_schema=enforce_parameter_schema,
1514
1530
  work_queue_name=work_queue_name,
1515
1531
  job_variables=job_variables,
@@ -2294,8 +2310,9 @@ def load_flow_from_entrypoint(
2294
2310
  Extract a flow object from a script at an entrypoint by running all of the code in the file.
2295
2311
 
2296
2312
  Args:
2297
- entrypoint: a string in the format `<path_to_script>:<flow_func_name>` or a module path
2298
- to a flow function
2313
+ entrypoint: a string in the format `<path_to_script>:<flow_func_name>`
2314
+ or a string in the format `<path_to_script>:<class_name>.<flow_method_name>`
2315
+ or a module path to a flow function
2299
2316
  use_placeholder_flow: if True, use a placeholder Flow object if the actual flow object
2300
2317
  cannot be loaded from the entrypoint (e.g. dependencies are missing)
2301
2318
 
@@ -2685,26 +2702,55 @@ def load_placeholder_flow(entrypoint: str, raises: Exception) -> Flow[P, Any]:
2685
2702
 
2686
2703
  def safe_load_flow_from_entrypoint(entrypoint: str) -> Optional[Flow[P, Any]]:
2687
2704
  """
2688
- Load a flow from an entrypoint and return None if an exception is raised.
2705
+ Safely load a Prefect flow from an entrypoint string. Returns None if loading fails.
2689
2706
 
2690
2707
  Args:
2691
- entrypoint: a string in the format `<path_to_script>:<flow_func_name>`
2692
- or a module path to a flow function
2708
+ entrypoint (str): A string identifying the flow to load. Can be in one of the following formats:
2709
+ - `<path_to_script>:<flow_func_name>`
2710
+ - `<path_to_script>:<class_name>.<flow_method_name>`
2711
+ - `<module_path>.<flow_func_name>`
2712
+
2713
+ Returns:
2714
+ Optional[Flow]: The loaded Prefect flow object, or None if loading fails due to errors
2715
+ (e.g. unresolved dependencies, syntax errors, or missing objects).
2693
2716
  """
2694
- func_def, source_code = _entrypoint_definition_and_source(entrypoint)
2695
- path = None
2696
- if ":" in entrypoint:
2697
- path = entrypoint.rsplit(":")[0]
2717
+ func_or_cls_def, source_code, parts = _entrypoint_definition_and_source(entrypoint)
2718
+
2719
+ path = entrypoint.rsplit(":", maxsplit=1)[0] if ":" in entrypoint else None
2698
2720
  namespace = safe_load_namespace(source_code, filepath=path)
2699
- if func_def.name in namespace:
2700
- return namespace[func_def.name]
2701
- else:
2702
- # If the function is not in the namespace, if may be due to missing dependencies
2703
- # for the function. We will attempt to compile each annotation and default value
2704
- # and remove them from the function definition to see if the function can be
2705
- # compiled without them.
2706
2721
 
2707
- return _sanitize_and_load_flow(func_def, namespace)
2722
+ if parts[0] not in namespace:
2723
+ # If the object is not in the namespace, it may be due to missing dependencies
2724
+ # in annotations or default values. We will attempt to sanitize them by removing
2725
+ # anything that cannot be compiled, and then recompile the function or class.
2726
+ if isinstance(func_or_cls_def, (ast.FunctionDef, ast.AsyncFunctionDef)):
2727
+ return _sanitize_and_load_flow(func_or_cls_def, namespace)
2728
+ elif (
2729
+ isinstance(func_or_cls_def, ast.ClassDef)
2730
+ and len(parts) >= 2
2731
+ and func_or_cls_def.name == parts[0]
2732
+ ):
2733
+ method_name = parts[1]
2734
+ method_def = next(
2735
+ (
2736
+ stmt
2737
+ for stmt in func_or_cls_def.body
2738
+ if isinstance(stmt, (ast.FunctionDef, ast.AsyncFunctionDef))
2739
+ and stmt.name == method_name
2740
+ ),
2741
+ None,
2742
+ )
2743
+ if method_def is not None:
2744
+ return _sanitize_and_load_flow(method_def, namespace)
2745
+ else:
2746
+ return None
2747
+
2748
+ obj = namespace.get(parts[0])
2749
+ for part in parts[1:]:
2750
+ obj = getattr(obj, part, None)
2751
+ if obj is None:
2752
+ return None
2753
+ return obj
2708
2754
 
2709
2755
 
2710
2756
  def _sanitize_and_load_flow(
@@ -2838,7 +2884,7 @@ def load_flow_arguments_from_entrypoint(
2838
2884
  or a module path to a flow function
2839
2885
  """
2840
2886
 
2841
- func_def, source_code = _entrypoint_definition_and_source(entrypoint)
2887
+ func_def, source_code, _ = _entrypoint_definition_and_source(entrypoint)
2842
2888
  path = None
2843
2889
  if ":" in entrypoint:
2844
2890
  path = entrypoint.rsplit(":")[0]
@@ -2915,26 +2961,45 @@ def is_entrypoint_async(entrypoint: str) -> bool:
2915
2961
  Returns:
2916
2962
  True if the function is asynchronous, False otherwise.
2917
2963
  """
2918
- func_def, _ = _entrypoint_definition_and_source(entrypoint)
2964
+ func_def, _, _ = _entrypoint_definition_and_source(entrypoint)
2919
2965
  return isinstance(func_def, ast.AsyncFunctionDef)
2920
2966
 
2921
2967
 
2922
2968
  def _entrypoint_definition_and_source(
2923
2969
  entrypoint: str,
2924
- ) -> Tuple[Union[ast.FunctionDef, ast.AsyncFunctionDef], str]:
2970
+ ) -> Tuple[Union[ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef], str, List[str]]:
2971
+ """
2972
+ Resolves and parses the source definition of a given entrypoint.
2973
+
2974
+ The entrypoint can be provided in one of the following formats:
2975
+ - '<path_to_script>:<flow_func_name>'
2976
+ - '<path_to_script>:<class_name>.<flow_method_name>'
2977
+ - '<module_path.to.flow_function>'
2978
+
2979
+ Returns:
2980
+ A tuple containing:
2981
+ - The AST node (FunctionDef, AsyncFunctionDef, or ClassDef) of the base object.
2982
+ - The full source code of the file or module as a string.
2983
+ - A list of attribute access parts from the object path (e.g., ['MyFlowClass', 'run']).
2984
+
2985
+ Raises:
2986
+ ValueError: If the module or target object cannot be found.
2987
+ """
2925
2988
  if ":" in entrypoint:
2926
- # Split by the last colon once to handle Windows paths with drive letters i.e C:\path\to\file.py:do_stuff
2927
- path, func_name = entrypoint.rsplit(":", maxsplit=1)
2989
+ path, object_path = entrypoint.rsplit(":", maxsplit=1)
2928
2990
  source_code = Path(path).read_text()
2929
2991
  else:
2930
- path, func_name = entrypoint.rsplit(".", maxsplit=1)
2992
+ path, object_path = entrypoint.rsplit(".", maxsplit=1)
2931
2993
  spec = importlib.util.find_spec(path)
2932
2994
  if not spec or not spec.origin:
2933
2995
  raise ValueError(f"Could not find module {path!r}")
2934
2996
  source_code = Path(spec.origin).read_text()
2935
2997
 
2936
2998
  parsed_code = ast.parse(source_code)
2937
- func_def = next(
2999
+ parts = object_path.split(".")
3000
+ base_name = parts[0]
3001
+
3002
+ base_def = next(
2938
3003
  (
2939
3004
  node
2940
3005
  for node in ast.walk(parsed_code)
@@ -2943,14 +3008,15 @@ def _entrypoint_definition_and_source(
2943
3008
  (
2944
3009
  ast.FunctionDef,
2945
3010
  ast.AsyncFunctionDef,
3011
+ ast.ClassDef, # flow can be staticmethod/classmethod
2946
3012
  ),
2947
3013
  )
2948
- and node.name == func_name
3014
+ and node.name == base_name
2949
3015
  ),
2950
3016
  None,
2951
3017
  )
2952
3018
 
2953
- if not func_def:
2954
- raise ValueError(f"Could not find flow {func_name!r} in {path!r}")
3019
+ if not base_def:
3020
+ raise ValueError(f"Could not find object {base_name!r} in {path!r}")
2955
3021
 
2956
- return func_def, source_code
3022
+ return base_def, source_code, parts
@@ -133,6 +133,7 @@ class CloudRunPushProvisioner:
133
133
  await self._run_command(
134
134
  f"gcloud iam service-accounts create {self._service_account_name}"
135
135
  ' --display-name "Prefect Cloud Run Service Account"'
136
+ f" --project={self._project}"
136
137
  )
137
138
  except subprocess.CalledProcessError as e:
138
139
  if "already exists" not in e.output.decode("utf-8"):
@@ -0,0 +1,60 @@
1
+ import asyncio
2
+ import uuid
3
+ from contextlib import AsyncExitStack
4
+ from typing import Any, Protocol
5
+
6
+ from prefect.events.clients import PrefectEventSubscriber, get_events_subscriber
7
+ from prefect.events.filters import EventFilter, EventNameFilter
8
+ from prefect.logging.loggers import get_logger
9
+
10
+
11
+ class OnCancellingCallback(Protocol):
12
+ def __call__(self, flow_run_id: uuid.UUID) -> None: ...
13
+
14
+
15
+ class FlowRunCancellingObserver:
16
+ def __init__(self, on_cancelling: OnCancellingCallback):
17
+ self.logger = get_logger("FlowRunCancellingObserver")
18
+ self.on_cancelling = on_cancelling
19
+ self._events_subscriber: PrefectEventSubscriber | None
20
+ self._exit_stack = AsyncExitStack()
21
+
22
+ async def _consume_events(self):
23
+ if self._events_subscriber is None:
24
+ raise RuntimeError(
25
+ "Events subscriber not initialized. Please use `async with` to initialize the observer."
26
+ )
27
+ async for event in self._events_subscriber:
28
+ try:
29
+ flow_run_id = uuid.UUID(
30
+ event.resource["prefect.resource.id"].replace(
31
+ "prefect.flow-run.", ""
32
+ )
33
+ )
34
+ self.on_cancelling(flow_run_id)
35
+ except ValueError:
36
+ self.logger.debug(
37
+ "Received event with invalid flow run ID: %s",
38
+ event.resource["prefect.resource.id"],
39
+ )
40
+
41
+ async def __aenter__(self):
42
+ self._events_subscriber = await self._exit_stack.enter_async_context(
43
+ get_events_subscriber(
44
+ filter=EventFilter(
45
+ event=EventNameFilter(name=["prefect.flow-run.Cancelling"])
46
+ )
47
+ )
48
+ )
49
+ self._consumer_task = asyncio.create_task(self._consume_events())
50
+ return self
51
+
52
+ async def __aexit__(self, *exc_info: Any):
53
+ await self._exit_stack.__aexit__(*exc_info)
54
+ self._consumer_task.cancel()
55
+ try:
56
+ await self._consumer_task
57
+ except asyncio.CancelledError:
58
+ pass
59
+ except Exception:
60
+ self.logger.exception("Error consuming events")