prefect-client 3.0.0rc4__py3-none-any.whl → 3.0.0rc6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/__init__.py +0 -2
- prefect/{records/cache_policies.py → cache_policies.py} +78 -23
- prefect/client/schemas/schedules.py +9 -2
- prefect/client/types/__init__.py +0 -0
- prefect/client/types/flexible_schedule_list.py +11 -0
- prefect/concurrency/asyncio.py +14 -4
- prefect/concurrency/services.py +29 -22
- prefect/concurrency/sync.py +3 -5
- prefect/context.py +0 -114
- prefect/deployments/__init__.py +1 -1
- prefect/deployments/runner.py +11 -93
- prefect/deployments/schedules.py +5 -7
- prefect/docker/__init__.py +20 -0
- prefect/docker/docker_image.py +82 -0
- prefect/flow_engine.py +14 -18
- prefect/flows.py +24 -93
- prefect/futures.py +13 -1
- prefect/infrastructure/provisioners/cloud_run.py +2 -2
- prefect/infrastructure/provisioners/container_instance.py +2 -2
- prefect/infrastructure/provisioners/ecs.py +2 -2
- prefect/records/result_store.py +5 -1
- prefect/results.py +78 -11
- prefect/runner/runner.py +5 -3
- prefect/runner/server.py +6 -2
- prefect/states.py +13 -3
- prefect/task_engine.py +10 -1
- prefect/tasks.py +8 -6
- prefect/transactions.py +2 -2
- prefect/types/entrypoint.py +13 -0
- prefect/utilities/dockerutils.py +2 -1
- {prefect_client-3.0.0rc4.dist-info → prefect_client-3.0.0rc6.dist-info}/METADATA +1 -1
- {prefect_client-3.0.0rc4.dist-info → prefect_client-3.0.0rc6.dist-info}/RECORD +35 -30
- {prefect_client-3.0.0rc4.dist-info → prefect_client-3.0.0rc6.dist-info}/LICENSE +0 -0
- {prefect_client-3.0.0rc4.dist-info → prefect_client-3.0.0rc6.dist-info}/WHEEL +0 -0
- {prefect_client-3.0.0rc4.dist-info → prefect_client-3.0.0rc6.dist-info}/top_level.txt +0 -0
prefect/__init__.py
CHANGED
@@ -49,8 +49,6 @@ import prefect.blocks.system
|
|
49
49
|
# Initialize the process-wide profile and registry at import time
|
50
50
|
import prefect.context
|
51
51
|
|
52
|
-
prefect.context.initialize_object_registry()
|
53
|
-
|
54
52
|
# Perform any forward-ref updates needed for Pydantic models
|
55
53
|
import prefect.client.schemas
|
56
54
|
|
@@ -8,6 +8,10 @@ from prefect.utilities.hashing import hash_objects
|
|
8
8
|
|
9
9
|
@dataclass
|
10
10
|
class CachePolicy:
|
11
|
+
"""
|
12
|
+
Base class for all cache policies.
|
13
|
+
"""
|
14
|
+
|
11
15
|
@classmethod
|
12
16
|
def from_cache_key_fn(
|
13
17
|
cls, cache_key_fn: Callable[["TaskRunContext", Dict[str, Any]], Optional[str]]
|
@@ -59,6 +63,11 @@ class CachePolicy:
|
|
59
63
|
|
60
64
|
@dataclass
|
61
65
|
class CacheKeyFnPolicy(CachePolicy):
|
66
|
+
"""
|
67
|
+
This policy accepts a custom function with signature f(task_run_context, task_parameters, flow_parameters) -> str
|
68
|
+
and uses it to compute a task run cache key.
|
69
|
+
"""
|
70
|
+
|
62
71
|
# making it optional for tests
|
63
72
|
cache_key_fn: Optional[
|
64
73
|
Callable[["TaskRunContext", Dict[str, Any]], Optional[str]]
|
@@ -77,6 +86,13 @@ class CacheKeyFnPolicy(CachePolicy):
|
|
77
86
|
|
78
87
|
@dataclass
|
79
88
|
class CompoundCachePolicy(CachePolicy):
|
89
|
+
"""
|
90
|
+
This policy is constructed from two or more other cache policies and works by computing the keys
|
91
|
+
for each policy individually, and then hashing a sorted tuple of all computed keys.
|
92
|
+
|
93
|
+
Any keys that return `None` will be ignored.
|
94
|
+
"""
|
95
|
+
|
80
96
|
policies: Optional[list] = None
|
81
97
|
|
82
98
|
def compute_key(
|
@@ -88,20 +104,25 @@ class CompoundCachePolicy(CachePolicy):
|
|
88
104
|
) -> Optional[str]:
|
89
105
|
keys = []
|
90
106
|
for policy in self.policies or []:
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
**kwargs,
|
97
|
-
)
|
107
|
+
policy_key = policy.compute_key(
|
108
|
+
task_ctx=task_ctx,
|
109
|
+
inputs=inputs,
|
110
|
+
flow_parameters=flow_parameters,
|
111
|
+
**kwargs,
|
98
112
|
)
|
113
|
+
if policy_key is not None:
|
114
|
+
keys.append(policy_key)
|
115
|
+
if not keys:
|
116
|
+
return None
|
99
117
|
return hash_objects(*keys)
|
100
118
|
|
101
119
|
|
102
120
|
@dataclass
|
103
|
-
class
|
104
|
-
"
|
121
|
+
class _None(CachePolicy):
|
122
|
+
"""
|
123
|
+
Policy that always returns `None` for the computed cache key.
|
124
|
+
This policy prevents persistence.
|
125
|
+
"""
|
105
126
|
|
106
127
|
def compute_key(
|
107
128
|
self,
|
@@ -110,12 +131,14 @@ class Default(CachePolicy):
|
|
110
131
|
flow_parameters: Dict[str, Any],
|
111
132
|
**kwargs,
|
112
133
|
) -> Optional[str]:
|
113
|
-
return
|
134
|
+
return None
|
114
135
|
|
115
136
|
|
116
137
|
@dataclass
|
117
|
-
class
|
118
|
-
"
|
138
|
+
class TaskSource(CachePolicy):
|
139
|
+
"""
|
140
|
+
Policy for computing a cache key based on the source code of the task.
|
141
|
+
"""
|
119
142
|
|
120
143
|
def compute_key(
|
121
144
|
self,
|
@@ -124,11 +147,22 @@ class _None(CachePolicy):
|
|
124
147
|
flow_parameters: Dict[str, Any],
|
125
148
|
**kwargs,
|
126
149
|
) -> Optional[str]:
|
127
|
-
|
150
|
+
if not task_ctx:
|
151
|
+
return None
|
152
|
+
try:
|
153
|
+
lines = inspect.getsource(task_ctx.task)
|
154
|
+
except TypeError:
|
155
|
+
lines = inspect.getsource(task_ctx.task.fn.__class__)
|
156
|
+
|
157
|
+
return hash_objects(lines)
|
128
158
|
|
129
159
|
|
130
160
|
@dataclass
|
131
|
-
class
|
161
|
+
class FlowParameters(CachePolicy):
|
162
|
+
"""
|
163
|
+
Policy that computes the cache key based on a hash of the flow parameters.
|
164
|
+
"""
|
165
|
+
|
132
166
|
def compute_key(
|
133
167
|
self,
|
134
168
|
task_ctx: TaskRunContext,
|
@@ -136,21 +170,37 @@ class TaskDef(CachePolicy):
|
|
136
170
|
flow_parameters: Dict[str, Any],
|
137
171
|
**kwargs,
|
138
172
|
) -> Optional[str]:
|
139
|
-
|
140
|
-
|
173
|
+
if not flow_parameters:
|
174
|
+
return None
|
175
|
+
return hash_objects(flow_parameters)
|
141
176
|
|
142
177
|
|
143
178
|
@dataclass
|
144
|
-
class
|
145
|
-
|
179
|
+
class RunId(CachePolicy):
|
180
|
+
"""
|
181
|
+
Returns either the prevailing flow run ID, or if not found, the prevailing task
|
182
|
+
run ID.
|
183
|
+
"""
|
184
|
+
|
185
|
+
def compute_key(
|
186
|
+
self,
|
187
|
+
task_ctx: TaskRunContext,
|
188
|
+
inputs: Dict[str, Any],
|
189
|
+
flow_parameters: Dict[str, Any],
|
190
|
+
**kwargs,
|
191
|
+
) -> Optional[str]:
|
192
|
+
if not task_ctx:
|
193
|
+
return None
|
194
|
+
run_id = task_ctx.task_run.flow_run_id
|
195
|
+
if run_id is None:
|
196
|
+
run_id = task_ctx.task_run.id
|
197
|
+
return str(run_id)
|
146
198
|
|
147
199
|
|
148
200
|
@dataclass
|
149
201
|
class Inputs(CachePolicy):
|
150
202
|
"""
|
151
|
-
|
152
|
-
|
153
|
-
And exclude/include config.
|
203
|
+
Policy that computes a cache key based on a hash of the runtime inputs provided to the task..
|
154
204
|
"""
|
155
205
|
|
156
206
|
exclude: Optional[list] = None
|
@@ -166,6 +216,9 @@ class Inputs(CachePolicy):
|
|
166
216
|
inputs = inputs or {}
|
167
217
|
exclude = self.exclude or []
|
168
218
|
|
219
|
+
if not inputs:
|
220
|
+
return None
|
221
|
+
|
169
222
|
for key, val in inputs.items():
|
170
223
|
if key not in exclude:
|
171
224
|
hashed_inputs[key] = val
|
@@ -173,7 +226,9 @@ class Inputs(CachePolicy):
|
|
173
226
|
return hash_objects(hashed_inputs)
|
174
227
|
|
175
228
|
|
176
|
-
DEFAULT = Default()
|
177
229
|
INPUTS = Inputs()
|
178
230
|
NONE = _None()
|
179
|
-
|
231
|
+
TASK_SOURCE = TaskSource()
|
232
|
+
FLOW_PARAMETERS = FlowParameters()
|
233
|
+
RUN_ID = RunId()
|
234
|
+
DEFAULT = INPUTS + TASK_SOURCE + RUN_ID
|
@@ -3,13 +3,14 @@ Schedule schemas
|
|
3
3
|
"""
|
4
4
|
|
5
5
|
import datetime
|
6
|
-
from typing import Annotated, Optional, Union
|
6
|
+
from typing import Annotated, Any, Optional, Union
|
7
7
|
|
8
8
|
import dateutil
|
9
9
|
import dateutil.rrule
|
10
10
|
import pendulum
|
11
11
|
from pydantic import AfterValidator, ConfigDict, Field, field_validator, model_validator
|
12
12
|
from pydantic_extra_types.pendulum_dt import DateTime
|
13
|
+
from typing_extensions import TypeAlias, TypeGuard
|
13
14
|
|
14
15
|
from prefect._internal.schemas.bases import PrefectBaseModel
|
15
16
|
from prefect._internal.schemas.validators import (
|
@@ -279,7 +280,13 @@ class NoSchedule(PrefectBaseModel):
|
|
279
280
|
model_config = ConfigDict(extra="forbid")
|
280
281
|
|
281
282
|
|
282
|
-
SCHEDULE_TYPES = Union[
|
283
|
+
SCHEDULE_TYPES: TypeAlias = Union[
|
284
|
+
IntervalSchedule, CronSchedule, RRuleSchedule, NoSchedule
|
285
|
+
]
|
286
|
+
|
287
|
+
|
288
|
+
def is_schedule_type(obj: Any) -> TypeGuard[SCHEDULE_TYPES]:
|
289
|
+
return isinstance(obj, (IntervalSchedule, CronSchedule, RRuleSchedule, NoSchedule))
|
283
290
|
|
284
291
|
|
285
292
|
def construct_schedule(
|
File without changes
|
@@ -0,0 +1,11 @@
|
|
1
|
+
from typing import TYPE_CHECKING, Any, Sequence, Union
|
2
|
+
|
3
|
+
from typing_extensions import TypeAlias
|
4
|
+
|
5
|
+
if TYPE_CHECKING:
|
6
|
+
from prefect.client.schemas.actions import DeploymentScheduleCreate
|
7
|
+
from prefect.client.schemas.schedules import SCHEDULE_TYPES
|
8
|
+
|
9
|
+
FlexibleScheduleList: TypeAlias = Sequence[
|
10
|
+
Union[DeploymentScheduleCreate, dict[str, Any], "SCHEDULE_TYPES"]
|
11
|
+
]
|
prefect/concurrency/asyncio.py
CHANGED
@@ -13,7 +13,6 @@ except ImportError:
|
|
13
13
|
|
14
14
|
from prefect.client.orchestration import get_client
|
15
15
|
from prefect.client.schemas.responses import MinimalConcurrencyLimitResponse
|
16
|
-
from prefect.utilities.timeout import timeout_async
|
17
16
|
|
18
17
|
from .events import (
|
19
18
|
_emit_concurrency_acquisition_events,
|
@@ -26,6 +25,10 @@ class ConcurrencySlotAcquisitionError(Exception):
|
|
26
25
|
"""Raised when an unhandlable occurs while acquiring concurrency slots."""
|
27
26
|
|
28
27
|
|
28
|
+
class AcquireConcurrencySlotTimeoutError(TimeoutError):
|
29
|
+
"""Raised when acquiring a concurrency slot times out."""
|
30
|
+
|
31
|
+
|
29
32
|
@asynccontextmanager
|
30
33
|
async def concurrency(
|
31
34
|
names: Union[str, List[str]],
|
@@ -58,8 +61,9 @@ async def concurrency(
|
|
58
61
|
```
|
59
62
|
"""
|
60
63
|
names = names if isinstance(names, list) else [names]
|
61
|
-
|
62
|
-
|
64
|
+
limits = await _acquire_concurrency_slots(
|
65
|
+
names, occupy, timeout_seconds=timeout_seconds
|
66
|
+
)
|
63
67
|
acquisition_time = pendulum.now("UTC")
|
64
68
|
emitted_events = _emit_concurrency_acquisition_events(limits, occupy)
|
65
69
|
|
@@ -91,12 +95,18 @@ async def _acquire_concurrency_slots(
|
|
91
95
|
names: List[str],
|
92
96
|
slots: int,
|
93
97
|
mode: Union[Literal["concurrency"], Literal["rate_limit"]] = "concurrency",
|
98
|
+
timeout_seconds: Optional[float] = None,
|
94
99
|
) -> List[MinimalConcurrencyLimitResponse]:
|
95
100
|
service = ConcurrencySlotAcquisitionService.instance(frozenset(names))
|
96
|
-
future = service.send((slots, mode))
|
101
|
+
future = service.send((slots, mode, timeout_seconds))
|
97
102
|
response_or_exception = await asyncio.wrap_future(future)
|
98
103
|
|
99
104
|
if isinstance(response_or_exception, Exception):
|
105
|
+
if isinstance(response_or_exception, TimeoutError):
|
106
|
+
raise AcquireConcurrencySlotTimeoutError(
|
107
|
+
f"Attempt to acquire concurrency slots timed out after {timeout_seconds} second(s)"
|
108
|
+
) from response_or_exception
|
109
|
+
|
100
110
|
raise ConcurrencySlotAcquisitionError(
|
101
111
|
f"Unable to acquire concurrency slots on {names!r}"
|
102
112
|
) from response_or_exception
|
prefect/concurrency/services.py
CHANGED
@@ -4,6 +4,7 @@ from contextlib import asynccontextmanager
|
|
4
4
|
from typing import (
|
5
5
|
TYPE_CHECKING,
|
6
6
|
FrozenSet,
|
7
|
+
Optional,
|
7
8
|
Tuple,
|
8
9
|
)
|
9
10
|
|
@@ -13,6 +14,7 @@ from starlette import status
|
|
13
14
|
from prefect._internal.concurrency import logger
|
14
15
|
from prefect._internal.concurrency.services import QueueService
|
15
16
|
from prefect.client.orchestration import get_client
|
17
|
+
from prefect.utilities.timeout import timeout_async
|
16
18
|
|
17
19
|
if TYPE_CHECKING:
|
18
20
|
from prefect.client.orchestration import PrefectClient
|
@@ -30,10 +32,12 @@ class ConcurrencySlotAcquisitionService(QueueService):
|
|
30
32
|
self._client = client
|
31
33
|
yield
|
32
34
|
|
33
|
-
async def _handle(
|
34
|
-
|
35
|
+
async def _handle(
|
36
|
+
self, item: Tuple[int, str, Optional[float], concurrent.futures.Future]
|
37
|
+
):
|
38
|
+
occupy, mode, timeout_seconds, future = item
|
35
39
|
try:
|
36
|
-
response = await self.acquire_slots(occupy, mode)
|
40
|
+
response = await self.acquire_slots(occupy, mode, timeout_seconds)
|
37
41
|
except Exception as exc:
|
38
42
|
# If the request to the increment endpoint fails in a non-standard
|
39
43
|
# way, we need to set the future's result so that the caller can
|
@@ -43,25 +47,28 @@ class ConcurrencySlotAcquisitionService(QueueService):
|
|
43
47
|
else:
|
44
48
|
future.set_result(response)
|
45
49
|
|
46
|
-
async def acquire_slots(
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
50
|
+
async def acquire_slots(
|
51
|
+
self, slots: int, mode: str, timeout_seconds: Optional[float] = None
|
52
|
+
) -> httpx.Response:
|
53
|
+
with timeout_async(seconds=timeout_seconds):
|
54
|
+
while True:
|
55
|
+
try:
|
56
|
+
response = await self._client.increment_concurrency_slots(
|
57
|
+
names=self.concurrency_limit_names, slots=slots, mode=mode
|
58
|
+
)
|
59
|
+
except Exception as exc:
|
60
|
+
if (
|
61
|
+
isinstance(exc, httpx.HTTPStatusError)
|
62
|
+
and exc.response.status_code == status.HTTP_423_LOCKED
|
63
|
+
):
|
64
|
+
retry_after = float(exc.response.headers["Retry-After"])
|
65
|
+
await asyncio.sleep(retry_after)
|
66
|
+
else:
|
67
|
+
raise exc
|
59
68
|
else:
|
60
|
-
|
61
|
-
else:
|
62
|
-
return response
|
69
|
+
return response
|
63
70
|
|
64
|
-
def send(self, item: Tuple[int, str]):
|
71
|
+
def send(self, item: Tuple[int, str, Optional[float]]) -> concurrent.futures.Future:
|
65
72
|
with self._lock:
|
66
73
|
if self._stopped:
|
67
74
|
raise RuntimeError("Cannot put items in a stopped service instance.")
|
@@ -69,7 +76,7 @@ class ConcurrencySlotAcquisitionService(QueueService):
|
|
69
76
|
logger.debug("Service %r enqueuing item %r", self, item)
|
70
77
|
future: concurrent.futures.Future = concurrent.futures.Future()
|
71
78
|
|
72
|
-
occupy, mode = item
|
73
|
-
self._queue.put_nowait((occupy, mode, future))
|
79
|
+
occupy, mode, timeout_seconds = item
|
80
|
+
self._queue.put_nowait((occupy, mode, timeout_seconds, future))
|
74
81
|
|
75
82
|
return future
|
prefect/concurrency/sync.py
CHANGED
@@ -12,7 +12,6 @@ except ImportError:
|
|
12
12
|
from prefect._internal.concurrency.api import create_call, from_sync
|
13
13
|
from prefect._internal.concurrency.event_loop import get_running_loop
|
14
14
|
from prefect.client.schemas.responses import MinimalConcurrencyLimitResponse
|
15
|
-
from prefect.utilities.timeout import timeout
|
16
15
|
|
17
16
|
from .asyncio import (
|
18
17
|
_acquire_concurrency_slots,
|
@@ -57,10 +56,9 @@ def concurrency(
|
|
57
56
|
"""
|
58
57
|
names = names if isinstance(names, list) else [names]
|
59
58
|
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
)
|
59
|
+
limits: List[MinimalConcurrencyLimitResponse] = _call_async_function_from_sync(
|
60
|
+
_acquire_concurrency_slots, names, occupy, timeout_seconds=timeout_seconds
|
61
|
+
)
|
64
62
|
acquisition_time = pendulum.now("UTC")
|
65
63
|
emitted_events = _emit_concurrency_acquisition_events(limits, occupy)
|
66
64
|
|
prefect/context.py
CHANGED
@@ -9,21 +9,16 @@ For more user-accessible information about the current run, see [`prefect.runtim
|
|
9
9
|
import os
|
10
10
|
import sys
|
11
11
|
import warnings
|
12
|
-
from collections import defaultdict
|
13
12
|
from contextlib import ExitStack, contextmanager
|
14
13
|
from contextvars import ContextVar, Token
|
15
|
-
from functools import update_wrapper
|
16
14
|
from pathlib import Path
|
17
15
|
from typing import (
|
18
16
|
TYPE_CHECKING,
|
19
17
|
Any,
|
20
|
-
ContextManager,
|
21
18
|
Dict,
|
22
19
|
Generator,
|
23
|
-
List,
|
24
20
|
Optional,
|
25
21
|
Set,
|
26
|
-
Tuple,
|
27
22
|
Type,
|
28
23
|
TypeVar,
|
29
24
|
Union,
|
@@ -46,7 +41,6 @@ from prefect.settings import PREFECT_HOME, Profile, Settings
|
|
46
41
|
from prefect.states import State
|
47
42
|
from prefect.task_runners import TaskRunner
|
48
43
|
from prefect.utilities.asyncutils import run_coro_as_sync
|
49
|
-
from prefect.utilities.importtools import load_script_as_module
|
50
44
|
|
51
45
|
T = TypeVar("T")
|
52
46
|
|
@@ -180,86 +174,6 @@ class ContextModel(BaseModel):
|
|
180
174
|
return self.model_dump(exclude_unset=True)
|
181
175
|
|
182
176
|
|
183
|
-
class PrefectObjectRegistry(ContextModel):
|
184
|
-
"""
|
185
|
-
A context that acts as a registry for all Prefect objects that are
|
186
|
-
registered during load and execution.
|
187
|
-
|
188
|
-
Attributes:
|
189
|
-
start_time: The time the object registry was created.
|
190
|
-
block_code_execution: If set, flow calls will be ignored.
|
191
|
-
capture_failures: If set, failures during __init__ will be silenced and tracked.
|
192
|
-
"""
|
193
|
-
|
194
|
-
start_time: DateTime = Field(default_factory=lambda: pendulum.now("UTC"))
|
195
|
-
|
196
|
-
_instance_registry: Dict[Type[T], List[T]] = PrivateAttr(
|
197
|
-
default_factory=lambda: defaultdict(list)
|
198
|
-
)
|
199
|
-
|
200
|
-
# Failures will be a tuple of (exception, instance, args, kwargs)
|
201
|
-
_instance_init_failures: Dict[
|
202
|
-
Type[T], List[Tuple[Exception, T, Tuple, Dict]]
|
203
|
-
] = PrivateAttr(default_factory=lambda: defaultdict(list))
|
204
|
-
|
205
|
-
block_code_execution: bool = False
|
206
|
-
capture_failures: bool = False
|
207
|
-
|
208
|
-
__var__ = ContextVar("object_registry")
|
209
|
-
|
210
|
-
def get_instances(self, type_: Type[T]) -> List[T]:
|
211
|
-
instances = []
|
212
|
-
for registered_type, type_instances in self._instance_registry.items():
|
213
|
-
if type_ in registered_type.mro():
|
214
|
-
instances.extend(type_instances)
|
215
|
-
return instances
|
216
|
-
|
217
|
-
def get_instance_failures(
|
218
|
-
self, type_: Type[T]
|
219
|
-
) -> List[Tuple[Exception, T, Tuple, Dict]]:
|
220
|
-
failures = []
|
221
|
-
for type__ in type_.mro():
|
222
|
-
failures.extend(self._instance_init_failures[type__])
|
223
|
-
return failures
|
224
|
-
|
225
|
-
def register_instance(self, object):
|
226
|
-
# TODO: Consider using a 'Set' to avoid duplicate entries
|
227
|
-
self._instance_registry[type(object)].append(object)
|
228
|
-
|
229
|
-
def register_init_failure(
|
230
|
-
self, exc: Exception, object: Any, init_args: Tuple, init_kwargs: Dict
|
231
|
-
):
|
232
|
-
self._instance_init_failures[type(object)].append(
|
233
|
-
(exc, object, init_args, init_kwargs)
|
234
|
-
)
|
235
|
-
|
236
|
-
@classmethod
|
237
|
-
def register_instances(cls, type_: Type[T]) -> Type[T]:
|
238
|
-
"""
|
239
|
-
Decorator for a class that adds registration to the `PrefectObjectRegistry`
|
240
|
-
on initialization of instances.
|
241
|
-
"""
|
242
|
-
original_init = type_.__init__
|
243
|
-
|
244
|
-
def __register_init__(__self__: T, *args: Any, **kwargs: Any) -> None:
|
245
|
-
registry = cls.get()
|
246
|
-
try:
|
247
|
-
original_init(__self__, *args, **kwargs)
|
248
|
-
except Exception as exc:
|
249
|
-
if not registry or not registry.capture_failures:
|
250
|
-
raise
|
251
|
-
else:
|
252
|
-
registry.register_init_failure(exc, __self__, args, kwargs)
|
253
|
-
else:
|
254
|
-
if registry:
|
255
|
-
registry.register_instance(__self__)
|
256
|
-
|
257
|
-
update_wrapper(__register_init__, original_init)
|
258
|
-
|
259
|
-
type_.__init__ = __register_init__
|
260
|
-
return type_
|
261
|
-
|
262
|
-
|
263
177
|
class ClientContext(ContextModel):
|
264
178
|
"""
|
265
179
|
A context for managing the Prefect client instances.
|
@@ -594,23 +508,6 @@ def tags(*new_tags: str) -> Generator[Set[str], None, None]:
|
|
594
508
|
yield new_tags
|
595
509
|
|
596
510
|
|
597
|
-
def registry_from_script(
|
598
|
-
path: str,
|
599
|
-
block_code_execution: bool = True,
|
600
|
-
capture_failures: bool = True,
|
601
|
-
) -> PrefectObjectRegistry:
|
602
|
-
"""
|
603
|
-
Return a fresh registry with instances populated from execution of a script.
|
604
|
-
"""
|
605
|
-
with PrefectObjectRegistry(
|
606
|
-
block_code_execution=block_code_execution,
|
607
|
-
capture_failures=capture_failures,
|
608
|
-
) as registry:
|
609
|
-
load_script_as_module(path)
|
610
|
-
|
611
|
-
return registry
|
612
|
-
|
613
|
-
|
614
511
|
@contextmanager
|
615
512
|
def use_profile(
|
616
513
|
profile: Union[Profile, str],
|
@@ -711,14 +608,3 @@ def root_settings_context():
|
|
711
608
|
|
712
609
|
|
713
610
|
GLOBAL_SETTINGS_CONTEXT: SettingsContext = root_settings_context()
|
714
|
-
GLOBAL_OBJECT_REGISTRY: Optional[ContextManager[PrefectObjectRegistry]] = None
|
715
|
-
|
716
|
-
|
717
|
-
def initialize_object_registry():
|
718
|
-
global GLOBAL_OBJECT_REGISTRY
|
719
|
-
|
720
|
-
if GLOBAL_OBJECT_REGISTRY:
|
721
|
-
return
|
722
|
-
|
723
|
-
GLOBAL_OBJECT_REGISTRY = PrefectObjectRegistry()
|
724
|
-
GLOBAL_OBJECT_REGISTRY.__enter__()
|