prefect-client 2.14.9__py3-none-any.whl → 2.14.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. prefect/__init__.py +4 -1
  2. prefect/_internal/pydantic/v2_schema.py +9 -2
  3. prefect/client/orchestration.py +51 -4
  4. prefect/client/schemas/objects.py +16 -1
  5. prefect/deployments/runner.py +34 -3
  6. prefect/engine.py +302 -25
  7. prefect/events/clients.py +216 -5
  8. prefect/events/filters.py +214 -0
  9. prefect/exceptions.py +4 -0
  10. prefect/flows.py +16 -0
  11. prefect/infrastructure/base.py +106 -1
  12. prefect/infrastructure/container.py +52 -0
  13. prefect/infrastructure/kubernetes.py +64 -0
  14. prefect/infrastructure/process.py +38 -0
  15. prefect/infrastructure/provisioners/__init__.py +2 -0
  16. prefect/infrastructure/provisioners/cloud_run.py +206 -34
  17. prefect/infrastructure/provisioners/container_instance.py +1080 -0
  18. prefect/infrastructure/provisioners/ecs.py +483 -48
  19. prefect/input/__init__.py +11 -0
  20. prefect/input/actions.py +88 -0
  21. prefect/input/run_input.py +107 -0
  22. prefect/runner/runner.py +5 -0
  23. prefect/runner/server.py +92 -8
  24. prefect/runner/utils.py +92 -0
  25. prefect/settings.py +34 -9
  26. prefect/states.py +26 -3
  27. prefect/utilities/dockerutils.py +31 -0
  28. prefect/utilities/processutils.py +5 -2
  29. prefect/utilities/services.py +10 -0
  30. prefect/utilities/validation.py +63 -0
  31. prefect/workers/__init__.py +1 -0
  32. prefect/workers/block.py +226 -0
  33. prefect/workers/utilities.py +2 -2
  34. {prefect_client-2.14.9.dist-info → prefect_client-2.14.11.dist-info}/METADATA +2 -1
  35. {prefect_client-2.14.9.dist-info → prefect_client-2.14.11.dist-info}/RECORD +38 -30
  36. {prefect_client-2.14.9.dist-info → prefect_client-2.14.11.dist-info}/LICENSE +0 -0
  37. {prefect_client-2.14.9.dist-info → prefect_client-2.14.11.dist-info}/WHEEL +0 -0
  38. {prefect_client-2.14.9.dist-info → prefect_client-2.14.11.dist-info}/top_level.txt +0 -0
prefect/__init__.py CHANGED
@@ -44,7 +44,7 @@ from prefect.context import tags
44
44
  from prefect.manifests import Manifest
45
45
  from prefect.utilities.annotations import unmapped, allow_failure
46
46
  from prefect.results import BaseResult
47
- from prefect.engine import pause_flow_run, resume_flow_run
47
+ from prefect.engine import pause_flow_run, resume_flow_run, suspend_flow_run
48
48
  from prefect.client.orchestration import get_client, PrefectClient
49
49
  from prefect.client.cloud import get_cloud_client, CloudClient
50
50
  import prefect.variables
@@ -172,4 +172,7 @@ __all__ = [
172
172
  "Runner",
173
173
  "serve",
174
174
  "deploy",
175
+ "pause_flow_run",
176
+ "resume_flow_run",
177
+ "suspend_flow_run",
175
178
  ]
@@ -82,11 +82,18 @@ def process_v2_params(
82
82
  return name, type_, field
83
83
 
84
84
 
85
- def create_v2_schema(name_: str, model_cfg: ConfigDict, **model_fields):
85
+ def create_v2_schema(
86
+ name_: str,
87
+ model_cfg: t.Optional[ConfigDict] = None,
88
+ model_base: t.Optional[t.Type[V2BaseModel]] = None,
89
+ **model_fields
90
+ ):
86
91
  """
87
92
  Create a pydantic v2 model and craft a v1 compatible schema from it.
88
93
  """
89
- model = create_model(name_, __config__=model_cfg, **model_fields)
94
+ model = create_model(
95
+ name_, __config__=model_cfg, __base__=model_base, **model_fields
96
+ )
90
97
  adapter = TypeAdapter(model)
91
98
 
92
99
  # root model references under #definitions
@@ -80,6 +80,7 @@ from prefect.client.schemas.objects import (
80
80
  Constant,
81
81
  Deployment,
82
82
  Flow,
83
+ FlowRunInput,
83
84
  FlowRunNotificationPolicy,
84
85
  FlowRunPolicy,
85
86
  Log,
@@ -1780,18 +1781,23 @@ class PrefectClient:
1780
1781
  raise
1781
1782
  return FlowRun.parse_obj(response.json())
1782
1783
 
1783
- async def resume_flow_run(self, flow_run_id: UUID) -> OrchestrationResult:
1784
+ async def resume_flow_run(
1785
+ self, flow_run_id: UUID, run_input: Optional[Dict] = None
1786
+ ) -> OrchestrationResult:
1784
1787
  """
1785
1788
  Resumes a paused flow run.
1786
1789
 
1787
1790
  Args:
1788
1791
  flow_run_id: the flow run ID of interest
1792
+ run_input: the input to resume the flow run with
1789
1793
 
1790
1794
  Returns:
1791
1795
  an OrchestrationResult model representation of state orchestration output
1792
1796
  """
1793
1797
  try:
1794
- response = await self._client.post(f"/flow_runs/{flow_run_id}/resume")
1798
+ response = await self._client.post(
1799
+ f"/flow_runs/{flow_run_id}/resume", json={"run_input": run_input}
1800
+ )
1795
1801
  except httpx.HTTPStatusError:
1796
1802
  raise
1797
1803
 
@@ -2181,7 +2187,7 @@ class PrefectClient:
2181
2187
  limit: int = None,
2182
2188
  offset: int = None,
2183
2189
  sort: LogSort = LogSort.TIMESTAMP_ASC,
2184
- ) -> None:
2190
+ ) -> List[Log]:
2185
2191
  """
2186
2192
  Read flow and task run logs.
2187
2193
  """
@@ -2491,7 +2497,6 @@ class PrefectClient:
2491
2497
  f"/work_pools/{work_pool_name}/get_scheduled_flow_runs",
2492
2498
  json=body,
2493
2499
  )
2494
-
2495
2500
  return pydantic.parse_obj_as(List[WorkerFlowRunResponse], response.json())
2496
2501
 
2497
2502
  async def create_artifact(
@@ -2689,6 +2694,48 @@ class PrefectClient:
2689
2694
  },
2690
2695
  )
2691
2696
 
2697
+ async def create_flow_run_input(self, flow_run_id: UUID, key: str, value: str):
2698
+ """
2699
+ Creates a flow run input.
2700
+
2701
+ Args:
2702
+ flow_run_id: The flow run id.
2703
+ key: The input key.
2704
+ value: The input value.
2705
+ """
2706
+
2707
+ # Initialize the input to ensure that the key is valid.
2708
+ FlowRunInput(flow_run_id=flow_run_id, key=key, value=value)
2709
+
2710
+ response = await self._client.post(
2711
+ f"/flow_runs/{flow_run_id}/input",
2712
+ json={"key": key, "value": value},
2713
+ )
2714
+ response.raise_for_status()
2715
+
2716
+ async def read_flow_run_input(self, flow_run_id: UUID, key: str) -> str:
2717
+ """
2718
+ Reads a flow run input.
2719
+
2720
+ Args:
2721
+ flow_run_id: The flow run id.
2722
+ key: The input key.
2723
+ """
2724
+ response = await self._client.get(f"/flow_runs/{flow_run_id}/input/{key}")
2725
+ response.raise_for_status()
2726
+ return response.content.decode()
2727
+
2728
+ async def delete_flow_run_input(self, flow_run_id: UUID, key: str):
2729
+ """
2730
+ Deletes a flow run input.
2731
+
2732
+ Args:
2733
+ flow_run_id: The flow run id.
2734
+ key: The input key.
2735
+ """
2736
+ response = await self._client.delete(f"/flow_runs/{flow_run_id}/input/{key}")
2737
+ response.raise_for_status()
2738
+
2692
2739
  async def __aenter__(self):
2693
2740
  """
2694
2741
  Start the client.
@@ -25,7 +25,10 @@ from typing_extensions import Literal
25
25
 
26
26
  from prefect._internal.schemas.bases import ObjectBaseModel, PrefectBaseModel
27
27
  from prefect._internal.schemas.fields import CreatedBy, DateTimeTZ, UpdatedBy
28
- from prefect._internal.schemas.validators import raise_on_name_with_banned_characters
28
+ from prefect._internal.schemas.validators import (
29
+ raise_on_name_alphanumeric_dashes_only,
30
+ raise_on_name_with_banned_characters,
31
+ )
29
32
  from prefect.client.schemas.schedules import SCHEDULE_TYPES
30
33
  from prefect.settings import PREFECT_CLOUD_API_URL
31
34
  from prefect.utilities.collections import AutoEnum, listrepr
@@ -105,6 +108,7 @@ class StateDetails(PrefectBaseModel):
105
108
  pause_timeout: DateTimeTZ = None
106
109
  pause_reschedule: bool = False
107
110
  pause_key: str = None
111
+ run_input_keyset: Optional[Dict[str, str]] = None
108
112
  refresh_cache: bool = None
109
113
 
110
114
 
@@ -1523,3 +1527,14 @@ class Variable(ObjectBaseModel):
1523
1527
  description="A list of variable tags",
1524
1528
  example=["tag-1", "tag-2"],
1525
1529
  )
1530
+
1531
+
1532
+ class FlowRunInput(ObjectBaseModel):
1533
+ flow_run_id: UUID = Field(description="The flow run ID associated with the input.")
1534
+ key: str = Field(description="The key of the input.")
1535
+ value: str = Field(description="The value of the input.")
1536
+
1537
+ @validator("key", check_fields=False)
1538
+ def validate_name_characters(cls, v):
1539
+ raise_on_name_alphanumeric_dashes_only(v)
1540
+ return v
@@ -44,7 +44,11 @@ from rich.table import Table
44
44
  from prefect._internal.concurrency.api import create_call, from_async
45
45
  from prefect._internal.pydantic import HAS_PYDANTIC_V2
46
46
  from prefect.runner.storage import RunnerStorage
47
- from prefect.settings import PREFECT_DEFAULT_WORK_POOL_NAME, PREFECT_UI_URL
47
+ from prefect.settings import (
48
+ PREFECT_DEFAULT_DOCKER_BUILD_NAMESPACE,
49
+ PREFECT_DEFAULT_WORK_POOL_NAME,
50
+ PREFECT_UI_URL,
51
+ )
48
52
  from prefect.utilities.collections import get_from_dict
49
53
 
50
54
  if HAS_PYDANTIC_V2:
@@ -70,6 +74,7 @@ from prefect.utilities.dockerutils import (
70
74
  docker_client,
71
75
  generate_default_dockerfile,
72
76
  parse_image_tag,
77
+ split_repository_path,
73
78
  )
74
79
  from prefect.utilities.slugify import slugify
75
80
 
@@ -356,6 +361,7 @@ class RunnerDeployment(BaseModel):
356
361
  cron: Optional[str] = None,
357
362
  rrule: Optional[str] = None,
358
363
  schedule: Optional[SCHEDULE_TYPES] = None,
364
+ is_schedule_active: Optional[bool] = None,
359
365
  parameters: Optional[dict] = None,
360
366
  triggers: Optional[List[DeploymentTrigger]] = None,
361
367
  description: Optional[str] = None,
@@ -378,6 +384,9 @@ class RunnerDeployment(BaseModel):
378
384
  rrule: An rrule schedule of when to execute runs of this flow.
379
385
  schedule: A schedule object of when to execute runs of this flow. Used for
380
386
  advanced scheduling options like timezone.
387
+ is_schedule_active: Whether or not to set the schedule for this deployment as active. If
388
+ not provided when creating a deployment, the schedule will be set as active. If not
389
+ provided when updating a deployment, the schedule's activation will not be changed.
381
390
  triggers: A list of triggers that should kick of a run of this flow.
382
391
  parameters: A dictionary of default parameter values to pass to runs of this flow.
383
392
  description: A description for the created deployment. Defaults to the flow's
@@ -404,6 +413,7 @@ class RunnerDeployment(BaseModel):
404
413
  name=Path(name).stem,
405
414
  flow_name=flow.name,
406
415
  schedule=schedule,
416
+ is_schedule_active=is_schedule_active,
407
417
  tags=tags or [],
408
418
  triggers=triggers or [],
409
419
  parameters=parameters or {},
@@ -461,6 +471,7 @@ class RunnerDeployment(BaseModel):
461
471
  cron: Optional[str] = None,
462
472
  rrule: Optional[str] = None,
463
473
  schedule: Optional[SCHEDULE_TYPES] = None,
474
+ is_schedule_active: Optional[bool] = None,
464
475
  parameters: Optional[dict] = None,
465
476
  triggers: Optional[List[DeploymentTrigger]] = None,
466
477
  description: Optional[str] = None,
@@ -484,6 +495,9 @@ class RunnerDeployment(BaseModel):
484
495
  rrule: An rrule schedule of when to execute runs of this flow.
485
496
  schedule: A schedule object of when to execute runs of this flow. Used for
486
497
  advanced scheduling options like timezone.
498
+ is_schedule_active: Whether or not to set the schedule for this deployment as active. If
499
+ not provided when creating a deployment, the schedule will be set as active. If not
500
+ provided when updating a deployment, the schedule's activation will not be changed.
487
501
  triggers: A list of triggers that should kick of a run of this flow.
488
502
  parameters: A dictionary of default parameter values to pass to runs of this flow.
489
503
  description: A description for the created deployment. Defaults to the flow's
@@ -516,6 +530,7 @@ class RunnerDeployment(BaseModel):
516
530
  name=Path(name).stem,
517
531
  flow_name=flow.name,
518
532
  schedule=schedule,
533
+ is_schedule_active=is_schedule_active,
519
534
  tags=tags or [],
520
535
  triggers=triggers or [],
521
536
  parameters=parameters or {},
@@ -544,6 +559,7 @@ class RunnerDeployment(BaseModel):
544
559
  cron: Optional[str] = None,
545
560
  rrule: Optional[str] = None,
546
561
  schedule: Optional[SCHEDULE_TYPES] = None,
562
+ is_schedule_active: Optional[bool] = None,
547
563
  parameters: Optional[dict] = None,
548
564
  triggers: Optional[List[DeploymentTrigger]] = None,
549
565
  description: Optional[str] = None,
@@ -570,6 +586,9 @@ class RunnerDeployment(BaseModel):
570
586
  rrule: An rrule schedule of when to execute runs of this flow.
571
587
  schedule: A schedule object of when to execute runs of this flow. Used for
572
588
  advanced scheduling options like timezone.
589
+ is_schedule_active: Whether or not to set the schedule for this deployment as active. If
590
+ not provided when creating a deployment, the schedule will be set as active. If not
591
+ provided when updating a deployment, the schedule's activation will not be changed.
573
592
  triggers: A list of triggers that should kick of a run of this flow.
574
593
  parameters: A dictionary of default parameter values to pass to runs of this flow.
575
594
  description: A description for the created deployment. Defaults to the flow's
@@ -606,6 +625,7 @@ class RunnerDeployment(BaseModel):
606
625
  name=Path(name).stem,
607
626
  flow_name=flow.name,
608
627
  schedule=schedule,
628
+ is_schedule_active=is_schedule_active,
609
629
  tags=tags or [],
610
630
  triggers=triggers or [],
611
631
  parameters=parameters or {},
@@ -650,7 +670,14 @@ class DeploymentImage:
650
670
  f"Only one tag can be provided - both {image_tag!r} and {tag!r} were"
651
671
  " provided as tags."
652
672
  )
653
- self.name = image_name
673
+ namespace, repository = split_repository_path(image_name)
674
+ # if the provided image name does not include a namespace (registry URL or user/org name),
675
+ # use the default namespace
676
+ if not namespace:
677
+ namespace = PREFECT_DEFAULT_DOCKER_BUILD_NAMESPACE.value()
678
+ # join the namespace and repository to create the full image name
679
+ # ignore namespace if it is None
680
+ self.name = "/".join(filter(None, [namespace, repository]))
654
681
  self.tag = tag or image_tag or slugify(pendulum.now("utc").isoformat())
655
682
  self.dockerfile = dockerfile
656
683
  self.build_kwargs = build_kwargs
@@ -774,7 +801,11 @@ async def deploy(
774
801
  is_docker_based_work_pool = get_from_dict(
775
802
  work_pool.base_job_template, "variables.properties.image", False
776
803
  )
777
- if not is_docker_based_work_pool:
804
+ is_block_based_work_pool = get_from_dict(
805
+ work_pool.base_job_template, "variables.properties.block", False
806
+ )
807
+ # carve out an exception for block based work pools that only have a block in their base job template
808
+ if not is_docker_based_work_pool and not is_block_based_work_pool:
778
809
  raise ValueError(
779
810
  f"Work pool {work_pool_name!r} does not support custom Docker images. "
780
811
  "Please use a work pool with an `image` variable in its base job template."