prefect-client 2.14.8__py3-none-any.whl → 2.14.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,214 @@
1
+ from typing import List, Optional, Tuple, cast
2
+ from uuid import UUID
3
+
4
+ import pendulum
5
+
6
+ from prefect._internal.pydantic import HAS_PYDANTIC_V2
7
+ from prefect.events.schemas import Event, Resource, ResourceSpecification
8
+ from prefect.server.utilities.schemas import DateTimeTZ, PrefectBaseModel
9
+
10
+ if HAS_PYDANTIC_V2:
11
+ from pydantic.v1 import Field
12
+ else:
13
+ from pydantic import Field
14
+
15
+
16
+ class EventDataFilter(PrefectBaseModel):
17
+ """A base class for filtering event data."""
18
+
19
+ class Config:
20
+ extra = "forbid"
21
+
22
+ def get_filters(self) -> List["EventDataFilter"]:
23
+ return [
24
+ filter
25
+ for filter in [
26
+ getattr(self, name)
27
+ for name, field in self.__fields__.items()
28
+ if issubclass(field.type_, EventDataFilter)
29
+ ]
30
+ if filter
31
+ ]
32
+
33
+ def includes(self, event: Event) -> bool:
34
+ """Does the given event match the criteria of this filter?"""
35
+ return all(filter.includes(event) for filter in self.get_filters())
36
+
37
+ def excludes(self, event: Event) -> bool:
38
+ """Would the given filter exclude this event?"""
39
+ return not self.includes(event)
40
+
41
+
42
+ class EventOccurredFilter(EventDataFilter):
43
+ since: DateTimeTZ = Field(
44
+ default_factory=lambda: cast(
45
+ DateTimeTZ,
46
+ pendulum.now("UTC").start_of("day").subtract(days=180),
47
+ ),
48
+ description="Only include events after this time (inclusive)",
49
+ )
50
+ until: DateTimeTZ = Field(
51
+ default_factory=lambda: cast(DateTimeTZ, pendulum.now("UTC")),
52
+ description="Only include events prior to this time (inclusive)",
53
+ )
54
+
55
+ def includes(self, event: Event) -> bool:
56
+ return self.since <= event.occurred <= self.until
57
+
58
+
59
+ class EventNameFilter(EventDataFilter):
60
+ prefix: Optional[List[str]] = Field(
61
+ None, description="Only include events matching one of these prefixes"
62
+ )
63
+ exclude_prefix: Optional[List[str]] = Field(
64
+ None, description="Exclude events matching one of these prefixes"
65
+ )
66
+
67
+ name: Optional[List[str]] = Field(
68
+ None, description="Only include events matching one of these names exactly"
69
+ )
70
+ exclude_name: Optional[List[str]] = Field(
71
+ None, description="Exclude events matching one of these names exactly"
72
+ )
73
+
74
+ def includes(self, event: Event) -> bool:
75
+ if self.prefix:
76
+ if not any(event.event.startswith(prefix) for prefix in self.prefix):
77
+ return False
78
+
79
+ if self.exclude_prefix:
80
+ if any(event.event.startswith(prefix) for prefix in self.exclude_prefix):
81
+ return False
82
+
83
+ if self.name:
84
+ if not any(event.event == name for name in self.name):
85
+ return False
86
+
87
+ if self.exclude_name:
88
+ if any(event.event == name for name in self.exclude_name):
89
+ return False
90
+
91
+ return True
92
+
93
+
94
+ class EventResourceFilter(EventDataFilter):
95
+ id: Optional[List[str]] = Field(
96
+ None, description="Only include events for resources with these IDs"
97
+ )
98
+ id_prefix: Optional[List[str]] = Field(
99
+ None,
100
+ description=(
101
+ "Only include events for resources with IDs starting with these prefixes."
102
+ ),
103
+ )
104
+ labels: Optional[ResourceSpecification] = Field(
105
+ None, description="Only include events for resources with these labels"
106
+ )
107
+
108
+ def includes(self, event: Event) -> bool:
109
+ if self.id:
110
+ if not any(event.resource.id == resource_id for resource_id in self.id):
111
+ return False
112
+
113
+ if self.id_prefix:
114
+ if not any(
115
+ event.resource.id.startswith(prefix) for prefix in self.id_prefix
116
+ ):
117
+ return False
118
+
119
+ if self.labels:
120
+ if not self.labels.matches(event.resource):
121
+ return False
122
+
123
+ return True
124
+
125
+
126
+ class EventRelatedFilter(EventDataFilter):
127
+ id: Optional[List[str]] = Field(
128
+ None, description="Only include events for related resources with these IDs"
129
+ )
130
+ role: Optional[List[str]] = Field(
131
+ None, description="Only include events for related resources in these roles"
132
+ )
133
+ resources_in_roles: Optional[List[Tuple[str, str]]] = Field(
134
+ None,
135
+ description=(
136
+ "Only include events with specific related resources in specific roles"
137
+ ),
138
+ )
139
+ labels: Optional[ResourceSpecification] = Field(
140
+ None, description="Only include events for related resources with these labels"
141
+ )
142
+
143
+
144
+ class EventAnyResourceFilter(EventDataFilter):
145
+ id: Optional[List[str]] = Field(
146
+ None, description="Only include events for resources with these IDs"
147
+ )
148
+ id_prefix: Optional[List[str]] = Field(
149
+ None,
150
+ description=(
151
+ "Only include events for resources with IDs starting with these prefixes"
152
+ ),
153
+ )
154
+ labels: Optional[ResourceSpecification] = Field(
155
+ None, description="Only include events for related resources with these labels"
156
+ )
157
+
158
+ def includes(self, event: Event) -> bool:
159
+ resources = [event.resource] + event.related
160
+ if not any(self._includes(resource) for resource in resources):
161
+ return False
162
+ return True
163
+
164
+ def _includes(self, resource: Resource) -> bool:
165
+ if self.id:
166
+ if not any(resource.id == resource_id for resource_id in self.id):
167
+ return False
168
+
169
+ if self.id_prefix:
170
+ if not any(resource.id.startswith(prefix) for prefix in self.id_prefix):
171
+ return False
172
+
173
+ if self.labels:
174
+ if not self.labels.matches(resource):
175
+ return False
176
+
177
+ return True
178
+
179
+
180
+ class EventIDFilter(EventDataFilter):
181
+ id: Optional[List[UUID]] = Field(
182
+ None, description="Only include events with one of these IDs"
183
+ )
184
+
185
+ def includes(self, event: Event) -> bool:
186
+ if self.id:
187
+ if not any(event.id == id for id in self.id):
188
+ return False
189
+
190
+ return True
191
+
192
+
193
+ class EventFilter(EventDataFilter):
194
+ occurred: EventOccurredFilter = Field(
195
+ default_factory=EventOccurredFilter,
196
+ description="Filter criteria for when the events occurred",
197
+ )
198
+ event: Optional[EventNameFilter] = Field(
199
+ None,
200
+ description="Filter criteria for the event name",
201
+ )
202
+ any_resource: Optional[EventAnyResourceFilter] = Field(
203
+ None, description="Filter criteria for any resource involved in the event"
204
+ )
205
+ resource: Optional[EventResourceFilter] = Field(
206
+ None, description="Filter criteria for the resource of the event"
207
+ )
208
+ related: Optional[EventRelatedFilter] = Field(
209
+ None, description="Filter criteria for the related resources of the event"
210
+ )
211
+ id: EventIDFilter = Field(
212
+ default_factory=EventIDFilter,
213
+ description="Filter criteria for the events' ID",
214
+ )
prefect/exceptions.py CHANGED
@@ -296,6 +296,10 @@ class Pause(PrefectSignal):
296
296
  Raised when a flow run is PAUSED and needs to exit for resubmission.
297
297
  """
298
298
 
299
+ def __init__(self, *args, state=None, **kwargs):
300
+ super().__init__(*args, **kwargs)
301
+ self.state = state
302
+
299
303
 
300
304
  class ExternalSignal(BaseException):
301
305
  """
@@ -11,22 +11,27 @@ from prefect._internal.compatibility.experimental import (
11
11
  experiment_enabled,
12
12
  )
13
13
  from prefect._internal.pydantic import HAS_PYDANTIC_V2
14
+ from prefect.client.schemas.actions import WorkPoolCreate
15
+ from prefect.exceptions import ObjectAlreadyExists
14
16
 
15
17
  if HAS_PYDANTIC_V2:
16
18
  import pydantic.v1 as pydantic
17
19
  else:
18
20
  import pydantic
19
21
 
22
+ from rich.console import Console
20
23
  from typing_extensions import Self
21
24
 
22
25
  import prefect
23
- from prefect.blocks.core import Block
26
+ from prefect.blocks.core import Block, BlockNotSavedError
24
27
  from prefect.logging import get_logger
25
28
  from prefect.settings import (
26
29
  PREFECT_EXPERIMENTAL_WARN,
27
30
  PREFECT_EXPERIMENTAL_WARN_ENHANCED_CANCELLATION,
31
+ PREFECT_UI_URL,
28
32
  get_current_settings,
29
33
  )
34
+ from prefect.utilities.asyncutils import sync_compatible
30
35
 
31
36
  MIN_COMPAT_PREFECT_VERSION = "2.0b12"
32
37
 
@@ -66,6 +71,106 @@ class Infrastructure(Block, abc.ABC):
66
71
  description="The command to run in the infrastructure.",
67
72
  )
68
73
 
74
+ async def generate_work_pool_base_job_template(self):
75
+ if self._block_document_id is None:
76
+ raise BlockNotSavedError(
77
+ "Cannot publish as work pool, block has not been saved. Please call"
78
+ " `.save()` on your block before publishing."
79
+ )
80
+
81
+ block_schema = self.__class__.schema()
82
+ return {
83
+ "job_configuration": {"block": "{{ block }}"},
84
+ "variables": {
85
+ "type": "object",
86
+ "properties": {
87
+ "block": {
88
+ "title": "Block",
89
+ "description": (
90
+ "The infrastructure block to use for job creation."
91
+ ),
92
+ "allOf": [{"$ref": f"#/definitions/{self.__class__.__name__}"}],
93
+ "default": {
94
+ "$ref": {"block_document_id": str(self._block_document_id)}
95
+ },
96
+ }
97
+ },
98
+ "required": ["block"],
99
+ "definitions": {self.__class__.__name__: block_schema},
100
+ },
101
+ }
102
+
103
+ def get_corresponding_worker_type(self):
104
+ return "block"
105
+
106
+ @sync_compatible
107
+ async def publish_as_work_pool(self, work_pool_name: Optional[str] = None):
108
+ """
109
+ Creates a work pool configured to use the given block as the job creator.
110
+
111
+ Used to migrate from a agents setup to a worker setup.
112
+
113
+ Args:
114
+ work_pool_name: The name to give to the created work pool. If not provided, the name of the current
115
+ block will be used.
116
+ """
117
+
118
+ base_job_template = await self.generate_work_pool_base_job_template()
119
+ work_pool_name = work_pool_name or self._block_document_name
120
+
121
+ if work_pool_name is None:
122
+ raise ValueError(
123
+ "`work_pool_name` must be provided if the block has not been saved."
124
+ )
125
+
126
+ console = Console()
127
+
128
+ try:
129
+ async with prefect.get_client() as client:
130
+ work_pool = await client.create_work_pool(
131
+ work_pool=WorkPoolCreate(
132
+ name=work_pool_name,
133
+ type=self.get_corresponding_worker_type(),
134
+ base_job_template=base_job_template,
135
+ )
136
+ )
137
+ except ObjectAlreadyExists:
138
+ console.print(
139
+ (
140
+ f"Work pool with name {work_pool_name!r} already exists, please use"
141
+ " a different name."
142
+ ),
143
+ style="red",
144
+ )
145
+ return
146
+
147
+ console.print(
148
+ f"Work pool {work_pool.name} created!",
149
+ style="green",
150
+ )
151
+ if PREFECT_UI_URL:
152
+ console.print(
153
+ "You see your new work pool in the UI at"
154
+ f" {PREFECT_UI_URL.value()}/work-pools/work-pool/{work_pool.name}"
155
+ )
156
+
157
+ deploy_script = (
158
+ "my_flow.deploy(work_pool_name='{work_pool.name}', image='my_image:tag')"
159
+ )
160
+ if not hasattr(self, "image"):
161
+ deploy_script = (
162
+ "my_flow.from_source(source='https://github.com/org/repo.git',"
163
+ f" entrypoint='flow.py:my_flow').deploy(work_pool_name='{work_pool.name}')"
164
+ )
165
+ console.print(
166
+ "\nYou can deploy a flow to this work pool by calling"
167
+ f" [blue].deploy[/]:\n\n\t{deploy_script}\n"
168
+ )
169
+ console.print(
170
+ "\nTo start a worker to execute flow runs in this work pool run:\n"
171
+ )
172
+ console.print(f"\t[blue]prefect worker start --pool {work_pool.name}[/]\n")
173
+
69
174
  @abc.abstractmethod
70
175
  async def run(
71
176
  self,
@@ -1,5 +1,6 @@
1
1
  import json
2
2
  import re
3
+ import shlex
3
4
  import sys
4
5
  import urllib.parse
5
6
  import warnings
@@ -385,6 +386,57 @@ class DockerContainer(Infrastructure):
385
386
  finally:
386
387
  docker_client.close()
387
388
 
389
+ async def generate_work_pool_base_job_template(self):
390
+ from prefect.workers.utilities import (
391
+ get_default_base_job_template_for_infrastructure_type,
392
+ )
393
+
394
+ base_job_template = await get_default_base_job_template_for_infrastructure_type(
395
+ self.get_corresponding_worker_type()
396
+ )
397
+ if base_job_template is None:
398
+ return await super().generate_work_pool_base_job_template()
399
+ for key, value in self.dict(exclude_unset=True, exclude_defaults=True).items():
400
+ if key == "command":
401
+ base_job_template["variables"]["properties"]["command"]["default"] = (
402
+ shlex.join(value)
403
+ )
404
+ elif key == "image_registry":
405
+ self.logger.warning(
406
+ "Image registry blocks are not supported by Docker"
407
+ " work pools. Please authenticate to your registry using"
408
+ " the `docker login` command on your worker instances."
409
+ )
410
+ elif key in [
411
+ "type",
412
+ "block_type_slug",
413
+ "_block_document_id",
414
+ "_block_document_name",
415
+ "_is_anonymous",
416
+ ]:
417
+ continue
418
+ elif key == "image_pull_policy":
419
+ new_value = None
420
+ if value == ImagePullPolicy.ALWAYS:
421
+ new_value = "Always"
422
+ elif value == ImagePullPolicy.NEVER:
423
+ new_value = "Never"
424
+ elif value == ImagePullPolicy.IF_NOT_PRESENT:
425
+ new_value = "IfNotPresent"
426
+
427
+ base_job_template["variables"]["properties"][key]["default"] = new_value
428
+ elif key in base_job_template["variables"]["properties"]:
429
+ base_job_template["variables"]["properties"][key]["default"] = value
430
+ else:
431
+ self.logger.warning(
432
+ f"Variable {key!r} is not supported by Docker work pools. Skipping."
433
+ )
434
+
435
+ return base_job_template
436
+
437
+ def get_corresponding_worker_type(self):
438
+ return "docker"
439
+
388
440
  def _get_infrastructure_pid(self, container_id: str) -> str:
389
441
  """Generates a Docker infrastructure_pid string in the form of
390
442
  `<docker_host_base_url>:<container_id>`.
@@ -1,6 +1,7 @@
1
1
  import asyncio
2
2
  import contextlib
3
3
  import os
4
+ import shlex
4
5
  import signal
5
6
  import socket
6
7
  import subprocess
@@ -247,6 +248,43 @@ class Process(Infrastructure):
247
248
  def _base_flow_run_command(self):
248
249
  return [get_sys_executable(), "-m", "prefect.engine"]
249
250
 
251
+ def get_corresponding_worker_type(self):
252
+ return "process"
253
+
254
+ async def generate_work_pool_base_job_template(self):
255
+ from prefect.workers.utilities import (
256
+ get_default_base_job_template_for_infrastructure_type,
257
+ )
258
+
259
+ base_job_template = await get_default_base_job_template_for_infrastructure_type(
260
+ self.get_corresponding_worker_type(),
261
+ )
262
+ assert (
263
+ base_job_template is not None
264
+ ), "Failed to generate default base job template for Process worker."
265
+ for key, value in self.dict(exclude_unset=True, exclude_defaults=True).items():
266
+ if key == "command":
267
+ base_job_template["variables"]["properties"]["command"]["default"] = (
268
+ shlex.join(value)
269
+ )
270
+ elif key in [
271
+ "type",
272
+ "block_type_slug",
273
+ "_block_document_id",
274
+ "_block_document_name",
275
+ "_is_anonymous",
276
+ ]:
277
+ continue
278
+ elif key in base_job_template["variables"]["properties"]:
279
+ base_job_template["variables"]["properties"][key]["default"] = value
280
+ else:
281
+ self.logger.warning(
282
+ f"Variable {key!r} is not supported by Process work pools."
283
+ " Skipping."
284
+ )
285
+
286
+ return base_job_template
287
+
250
288
 
251
289
  class ProcessResult(InfrastructureResult):
252
290
  """Contains information about the final state of a completed process"""
@@ -1,11 +1,15 @@
1
- from typing import Any, Dict, Optional, Protocol
1
+ from typing import Any, Dict, Optional, Protocol, Type
2
2
 
3
3
  from prefect.client.orchestration import PrefectClient
4
4
  from .cloud_run import CloudRunPushProvisioner
5
+ from .container_instance import ContainerInstancePushProvisioner
6
+ from .ecs import ElasticContainerServicePushProvisioner
5
7
  import rich.console
6
8
 
7
9
  _provisioners = {
8
10
  "cloud-run:push": CloudRunPushProvisioner,
11
+ "azure-container-instance:push": ContainerInstancePushProvisioner,
12
+ "ecs:push": ElasticContainerServicePushProvisioner,
9
13
  }
10
14
 
11
15
 
@@ -29,7 +33,7 @@ class Provisioner(Protocol):
29
33
 
30
34
  def get_infrastructure_provisioner_for_work_pool_type(
31
35
  work_pool_type: str,
32
- ) -> Provisioner:
36
+ ) -> Type[Provisioner]:
33
37
  """
34
38
  Retrieve an instance of the infrastructure provisioner for the given work pool type.
35
39
 
@@ -259,6 +259,12 @@ class CloudRunPushProvisioner:
259
259
  ] = {"$ref": {"block_document_id": str(block_doc_id)}}
260
260
  progress.advance(task)
261
261
 
262
- self._console.print("Infrastructure successfully provisioned!", style="green")
262
+ self._console.print(
263
+ (
264
+ f"Infrastructure successfully provisioned for '{work_pool_name}' work"
265
+ " pool!"
266
+ ),
267
+ style="green",
268
+ )
263
269
 
264
270
  return base_job_template_copy