prefect-client 3.0.2__py3-none-any.whl → 3.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/blocks/core.py +5 -4
- prefect/blocks/webhook.py +9 -1
- prefect/cache_policies.py +98 -28
- prefect/client/orchestration.py +3 -0
- prefect/client/schemas/actions.py +8 -0
- prefect/client/schemas/filters.py +4 -2
- prefect/client/schemas/objects.py +24 -0
- prefect/client/schemas/responses.py +15 -1
- prefect/context.py +1 -5
- prefect/deployments/runner.py +42 -5
- prefect/flows.py +15 -11
- prefect/locking/filesystem.py +3 -3
- prefect/plugins.py +9 -1
- prefect/results.py +24 -5
- prefect/runner/runner.py +9 -10
- prefect/settings.py +30 -0
- prefect/task_engine.py +28 -7
- prefect/transactions.py +9 -3
- prefect/utilities/asyncutils.py +7 -0
- prefect/utilities/collections.py +3 -2
- prefect/utilities/engine.py +4 -1
- prefect/workers/base.py +2 -8
- {prefect_client-3.0.2.dist-info → prefect_client-3.0.3.dist-info}/METADATA +1 -1
- {prefect_client-3.0.2.dist-info → prefect_client-3.0.3.dist-info}/RECORD +27 -27
- {prefect_client-3.0.2.dist-info → prefect_client-3.0.3.dist-info}/LICENSE +0 -0
- {prefect_client-3.0.2.dist-info → prefect_client-3.0.3.dist-info}/WHEEL +0 -0
- {prefect_client-3.0.2.dist-info → prefect_client-3.0.3.dist-info}/top_level.txt +0 -0
prefect/blocks/core.py
CHANGED
@@ -40,7 +40,6 @@ from pydantic import (
|
|
40
40
|
from pydantic.json_schema import GenerateJsonSchema
|
41
41
|
from typing_extensions import Literal, ParamSpec, Self, get_args
|
42
42
|
|
43
|
-
import prefect
|
44
43
|
import prefect.exceptions
|
45
44
|
from prefect.client.schemas import (
|
46
45
|
DEFAULT_BLOCK_SCHEMA_VERSION,
|
@@ -52,6 +51,7 @@ from prefect.client.schemas import (
|
|
52
51
|
from prefect.client.utilities import inject_client
|
53
52
|
from prefect.events import emit_event
|
54
53
|
from prefect.logging.loggers import disable_logger
|
54
|
+
from prefect.plugins import load_prefect_collections
|
55
55
|
from prefect.types import SecretDict
|
56
56
|
from prefect.utilities.asyncutils import sync_compatible
|
57
57
|
from prefect.utilities.collections import listrepr, remove_nested_keys, visit_collection
|
@@ -86,7 +86,7 @@ class InvalidBlockRegistration(Exception):
|
|
86
86
|
"""
|
87
87
|
|
88
88
|
|
89
|
-
def _collect_nested_reference_strings(obj: Dict):
|
89
|
+
def _collect_nested_reference_strings(obj: Dict) -> List[str]:
|
90
90
|
"""
|
91
91
|
Collects all nested reference strings (e.g. #/definitions/Model) from a given object.
|
92
92
|
"""
|
@@ -739,9 +739,10 @@ class Block(BaseModel, ABC):
|
|
739
739
|
"""
|
740
740
|
Retrieve the block class implementation given a key.
|
741
741
|
"""
|
742
|
+
|
742
743
|
# Ensure collections are imported and have the opportunity to register types
|
743
|
-
# before looking up the block class
|
744
|
-
|
744
|
+
# before looking up the block class, but only do this once
|
745
|
+
load_prefect_collections()
|
745
746
|
|
746
747
|
return lookup_type(cls, key)
|
747
748
|
|
prefect/blocks/webhook.py
CHANGED
@@ -11,6 +11,7 @@ from prefect.utilities.urls import validate_restricted_url
|
|
11
11
|
# Use a global HTTP transport to maintain a process-wide connection pool for
|
12
12
|
# interservice requests
|
13
13
|
_http_transport = AsyncHTTPTransport()
|
14
|
+
_insecure_http_transport = AsyncHTTPTransport(verify=False)
|
14
15
|
|
15
16
|
|
16
17
|
class Webhook(Block):
|
@@ -44,9 +45,16 @@ class Webhook(Block):
|
|
44
45
|
default=True,
|
45
46
|
description="Whether to allow notifications to private URLs. Defaults to True.",
|
46
47
|
)
|
48
|
+
verify: bool = Field(
|
49
|
+
default=True,
|
50
|
+
description="Whether or not to enforce a secure connection to the webhook.",
|
51
|
+
)
|
47
52
|
|
48
53
|
def block_initialization(self):
|
49
|
-
self.
|
54
|
+
if self.verify:
|
55
|
+
self._client = AsyncClient(transport=_http_transport)
|
56
|
+
else:
|
57
|
+
self._client = AsyncClient(transport=_insecure_http_transport)
|
50
58
|
|
51
59
|
async def call(self, payload: Optional[dict] = None) -> Response:
|
52
60
|
"""
|
prefect/cache_policies.py
CHANGED
@@ -1,10 +1,19 @@
|
|
1
1
|
import inspect
|
2
|
-
from
|
3
|
-
from
|
2
|
+
from copy import deepcopy
|
3
|
+
from dataclasses import dataclass, field
|
4
|
+
from pathlib import Path
|
5
|
+
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Literal, Optional, Union
|
6
|
+
|
7
|
+
from typing_extensions import Self
|
4
8
|
|
5
9
|
from prefect.context import TaskRunContext
|
6
10
|
from prefect.utilities.hashing import hash_objects
|
7
11
|
|
12
|
+
if TYPE_CHECKING:
|
13
|
+
from prefect.filesystems import WritableFileSystem
|
14
|
+
from prefect.locking.protocol import LockManager
|
15
|
+
from prefect.transactions import IsolationLevel
|
16
|
+
|
8
17
|
|
9
18
|
@dataclass
|
10
19
|
class CachePolicy:
|
@@ -12,6 +21,14 @@ class CachePolicy:
|
|
12
21
|
Base class for all cache policies.
|
13
22
|
"""
|
14
23
|
|
24
|
+
key_storage: Union["WritableFileSystem", str, Path, None] = None
|
25
|
+
isolation_level: Union[
|
26
|
+
Literal["READ_COMMITTED", "SERIALIZABLE"],
|
27
|
+
"IsolationLevel",
|
28
|
+
None,
|
29
|
+
] = None
|
30
|
+
lock_manager: Optional["LockManager"] = None
|
31
|
+
|
15
32
|
@classmethod
|
16
33
|
def from_cache_key_fn(
|
17
34
|
cls, cache_key_fn: Callable[["TaskRunContext", Dict[str, Any]], Optional[str]]
|
@@ -21,6 +38,37 @@ class CachePolicy:
|
|
21
38
|
"""
|
22
39
|
return CacheKeyFnPolicy(cache_key_fn=cache_key_fn)
|
23
40
|
|
41
|
+
def configure(
|
42
|
+
self,
|
43
|
+
key_storage: Union["WritableFileSystem", str, Path, None] = None,
|
44
|
+
lock_manager: Optional["LockManager"] = None,
|
45
|
+
isolation_level: Union[
|
46
|
+
Literal["READ_COMMITTED", "SERIALIZABLE"], "IsolationLevel", None
|
47
|
+
] = None,
|
48
|
+
) -> Self:
|
49
|
+
"""
|
50
|
+
Configure the cache policy with the given key storage, lock manager, and isolation level.
|
51
|
+
|
52
|
+
Args:
|
53
|
+
key_storage: The storage to use for cache keys. If not provided,
|
54
|
+
the current key storage will be used.
|
55
|
+
lock_manager: The lock manager to use for the cache policy. If not provided,
|
56
|
+
the current lock manager will be used.
|
57
|
+
isolation_level: The isolation level to use for the cache policy. If not provided,
|
58
|
+
the current isolation level will be used.
|
59
|
+
|
60
|
+
Returns:
|
61
|
+
A new cache policy with the given key storage, lock manager, and isolation level.
|
62
|
+
"""
|
63
|
+
new = deepcopy(self)
|
64
|
+
if key_storage is not None:
|
65
|
+
new.key_storage = key_storage
|
66
|
+
if lock_manager is not None:
|
67
|
+
new.lock_manager = lock_manager
|
68
|
+
if isolation_level is not None:
|
69
|
+
new.isolation_level = isolation_level
|
70
|
+
return new
|
71
|
+
|
24
72
|
def compute_key(
|
25
73
|
self,
|
26
74
|
task_ctx: TaskRunContext,
|
@@ -30,35 +78,48 @@ class CachePolicy:
|
|
30
78
|
) -> Optional[str]:
|
31
79
|
raise NotImplementedError
|
32
80
|
|
33
|
-
def __sub__(self, other: str) -> "
|
81
|
+
def __sub__(self, other: str) -> "CachePolicy":
|
34
82
|
if not isinstance(other, str):
|
35
83
|
raise TypeError("Can only subtract strings from key policies.")
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
new = Inputs(exclude=[other])
|
41
|
-
policies = self.policies or []
|
42
|
-
return CompoundCachePolicy(policies=policies + [new])
|
43
|
-
else:
|
44
|
-
new = Inputs(exclude=[other])
|
45
|
-
return CompoundCachePolicy(policies=[self, new])
|
46
|
-
|
47
|
-
def __add__(self, other: "CachePolicy") -> "CompoundCachePolicy":
|
84
|
+
new = Inputs(exclude=[other])
|
85
|
+
return CompoundCachePolicy(policies=[self, new])
|
86
|
+
|
87
|
+
def __add__(self, other: "CachePolicy") -> "CachePolicy":
|
48
88
|
# adding _None is a no-op
|
49
89
|
if isinstance(other, _None):
|
50
90
|
return self
|
51
|
-
elif isinstance(self, _None):
|
52
|
-
return other
|
53
91
|
|
54
|
-
if
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
92
|
+
if (
|
93
|
+
other.key_storage is not None
|
94
|
+
and self.key_storage is not None
|
95
|
+
and other.key_storage != self.key_storage
|
96
|
+
):
|
97
|
+
raise ValueError(
|
98
|
+
"Cannot add CachePolicies with different storage locations."
|
99
|
+
)
|
100
|
+
if (
|
101
|
+
other.isolation_level is not None
|
102
|
+
and self.isolation_level is not None
|
103
|
+
and other.isolation_level != self.isolation_level
|
104
|
+
):
|
105
|
+
raise ValueError(
|
106
|
+
"Cannot add CachePolicies with different isolation levels."
|
107
|
+
)
|
108
|
+
if (
|
109
|
+
other.lock_manager is not None
|
110
|
+
and self.lock_manager is not None
|
111
|
+
and other.lock_manager != self.lock_manager
|
112
|
+
):
|
113
|
+
raise ValueError(
|
114
|
+
"Cannot add CachePolicies with different lock implementations."
|
115
|
+
)
|
116
|
+
|
117
|
+
return CompoundCachePolicy(
|
118
|
+
policies=[self, other],
|
119
|
+
key_storage=self.key_storage or other.key_storage,
|
120
|
+
isolation_level=self.isolation_level or other.isolation_level,
|
121
|
+
lock_manager=self.lock_manager or other.lock_manager,
|
122
|
+
)
|
62
123
|
|
63
124
|
|
64
125
|
@dataclass
|
@@ -93,7 +154,7 @@ class CompoundCachePolicy(CachePolicy):
|
|
93
154
|
Any keys that return `None` will be ignored.
|
94
155
|
"""
|
95
156
|
|
96
|
-
policies:
|
157
|
+
policies: List[CachePolicy] = field(default_factory=list)
|
97
158
|
|
98
159
|
def compute_key(
|
99
160
|
self,
|
@@ -103,7 +164,7 @@ class CompoundCachePolicy(CachePolicy):
|
|
103
164
|
**kwargs,
|
104
165
|
) -> Optional[str]:
|
105
166
|
keys = []
|
106
|
-
for policy in self.policies
|
167
|
+
for policy in self.policies:
|
107
168
|
policy_key = policy.compute_key(
|
108
169
|
task_ctx=task_ctx,
|
109
170
|
inputs=inputs,
|
@@ -133,6 +194,10 @@ class _None(CachePolicy):
|
|
133
194
|
) -> Optional[str]:
|
134
195
|
return None
|
135
196
|
|
197
|
+
def __add__(self, other: "CachePolicy") -> "CachePolicy":
|
198
|
+
# adding _None is a no-op
|
199
|
+
return other
|
200
|
+
|
136
201
|
|
137
202
|
@dataclass
|
138
203
|
class TaskSource(CachePolicy):
|
@@ -208,7 +273,7 @@ class Inputs(CachePolicy):
|
|
208
273
|
Policy that computes a cache key based on a hash of the runtime inputs provided to the task..
|
209
274
|
"""
|
210
275
|
|
211
|
-
exclude:
|
276
|
+
exclude: List[str] = field(default_factory=list)
|
212
277
|
|
213
278
|
def compute_key(
|
214
279
|
self,
|
@@ -230,6 +295,11 @@ class Inputs(CachePolicy):
|
|
230
295
|
|
231
296
|
return hash_objects(hashed_inputs)
|
232
297
|
|
298
|
+
def __sub__(self, other: str) -> "CachePolicy":
|
299
|
+
if not isinstance(other, str):
|
300
|
+
raise TypeError("Can only subtract strings from key policies.")
|
301
|
+
return Inputs(exclude=self.exclude + [other])
|
302
|
+
|
233
303
|
|
234
304
|
INPUTS = Inputs()
|
235
305
|
NONE = _None()
|
prefect/client/orchestration.py
CHANGED
@@ -86,6 +86,7 @@ from prefect.client.schemas.objects import (
|
|
86
86
|
BlockSchema,
|
87
87
|
BlockType,
|
88
88
|
ConcurrencyLimit,
|
89
|
+
ConcurrencyOptions,
|
89
90
|
Constant,
|
90
91
|
DeploymentSchedule,
|
91
92
|
Flow,
|
@@ -1639,6 +1640,7 @@ class PrefectClient:
|
|
1639
1640
|
version: Optional[str] = None,
|
1640
1641
|
schedules: Optional[List[DeploymentScheduleCreate]] = None,
|
1641
1642
|
concurrency_limit: Optional[int] = None,
|
1643
|
+
concurrency_options: Optional[ConcurrencyOptions] = None,
|
1642
1644
|
parameters: Optional[Dict[str, Any]] = None,
|
1643
1645
|
description: Optional[str] = None,
|
1644
1646
|
work_queue_name: Optional[str] = None,
|
@@ -1697,6 +1699,7 @@ class PrefectClient:
|
|
1697
1699
|
paused=paused,
|
1698
1700
|
schedules=schedules or [],
|
1699
1701
|
concurrency_limit=concurrency_limit,
|
1702
|
+
concurrency_options=concurrency_options,
|
1700
1703
|
pull_steps=pull_steps,
|
1701
1704
|
enforce_parameter_schema=enforce_parameter_schema,
|
1702
1705
|
)
|
@@ -161,6 +161,10 @@ class DeploymentCreate(ActionBaseModel):
|
|
161
161
|
default=None,
|
162
162
|
description="The concurrency limit for the deployment.",
|
163
163
|
)
|
164
|
+
concurrency_options: Optional[objects.ConcurrencyOptions] = Field(
|
165
|
+
default=None,
|
166
|
+
description="The concurrency options for the deployment.",
|
167
|
+
)
|
164
168
|
enforce_parameter_schema: Optional[bool] = Field(
|
165
169
|
default=None,
|
166
170
|
description=(
|
@@ -237,6 +241,10 @@ class DeploymentUpdate(ActionBaseModel):
|
|
237
241
|
default=None,
|
238
242
|
description="The concurrency limit for the deployment.",
|
239
243
|
)
|
244
|
+
concurrency_options: Optional[objects.ConcurrencyOptions] = Field(
|
245
|
+
default=None,
|
246
|
+
description="The concurrency options for the deployment.",
|
247
|
+
)
|
240
248
|
tags: List[str] = Field(default_factory=list)
|
241
249
|
work_queue_name: Optional[str] = Field(None)
|
242
250
|
work_pool_name: Optional[str] = Field(
|
@@ -506,7 +506,7 @@ class DeploymentFilterTags(PrefectBaseModel, OperatorMixin):
|
|
506
506
|
|
507
507
|
|
508
508
|
class DeploymentFilterConcurrencyLimit(PrefectBaseModel):
|
509
|
-
"""
|
509
|
+
"""DEPRECATED: Prefer `Deployment.concurrency_limit_id` over `Deployment.concurrency_limit`."""
|
510
510
|
|
511
511
|
ge_: Optional[int] = Field(
|
512
512
|
default=None,
|
@@ -538,7 +538,9 @@ class DeploymentFilter(PrefectBaseModel, OperatorMixin):
|
|
538
538
|
default=None, description="Filter criteria for `Deployment.work_queue_name`"
|
539
539
|
)
|
540
540
|
concurrency_limit: Optional[DeploymentFilterConcurrencyLimit] = Field(
|
541
|
-
default=None,
|
541
|
+
default=None,
|
542
|
+
description="DEPRECATED: Prefer `Deployment.concurrency_limit_id` over `Deployment.concurrency_limit`. If provided, will be ignored for backwards-compatibility. Will be removed after December 2024.",
|
543
|
+
deprecated=True,
|
542
544
|
)
|
543
545
|
|
544
546
|
|
@@ -141,6 +141,30 @@ class WorkQueueStatus(AutoEnum):
|
|
141
141
|
PAUSED = AutoEnum.auto()
|
142
142
|
|
143
143
|
|
144
|
+
class ConcurrencyLimitStrategy(AutoEnum):
|
145
|
+
"""Enumeration of concurrency limit strategies."""
|
146
|
+
|
147
|
+
ENQUEUE = AutoEnum.auto()
|
148
|
+
CANCEL_NEW = AutoEnum.auto()
|
149
|
+
|
150
|
+
|
151
|
+
class ConcurrencyOptions(PrefectBaseModel):
|
152
|
+
"""
|
153
|
+
Class for storing the concurrency config in database.
|
154
|
+
"""
|
155
|
+
|
156
|
+
collision_strategy: ConcurrencyLimitStrategy
|
157
|
+
|
158
|
+
|
159
|
+
class ConcurrencyLimitConfig(PrefectBaseModel):
|
160
|
+
"""
|
161
|
+
Class for storing the concurrency limit config in database.
|
162
|
+
"""
|
163
|
+
|
164
|
+
limit: int
|
165
|
+
collision_strategy: ConcurrencyLimitStrategy = ConcurrencyLimitStrategy.ENQUEUE
|
166
|
+
|
167
|
+
|
144
168
|
class StateDetails(PrefectBaseModel):
|
145
169
|
flow_run_id: Optional[UUID] = None
|
146
170
|
task_run_id: Optional[UUID] = None
|
@@ -314,11 +314,25 @@ class DeploymentResponse(ObjectBaseModel):
|
|
314
314
|
default=..., description="The flow id associated with the deployment."
|
315
315
|
)
|
316
316
|
concurrency_limit: Optional[int] = Field(
|
317
|
-
default=None,
|
317
|
+
default=None,
|
318
|
+
description="DEPRECATED: Prefer `global_concurrency_limit`. Will always be None for backwards compatibility. Will be removed after December 2024.",
|
319
|
+
deprecated=True,
|
320
|
+
)
|
321
|
+
global_concurrency_limit: Optional["GlobalConcurrencyLimitResponse"] = Field(
|
322
|
+
default=None,
|
323
|
+
description="The global concurrency limit object for enforcing the maximum number of flow runs that can be active at once.",
|
324
|
+
)
|
325
|
+
concurrency_options: Optional[objects.ConcurrencyOptions] = Field(
|
326
|
+
default=None,
|
327
|
+
description="The concurrency options for the deployment.",
|
318
328
|
)
|
319
329
|
paused: bool = Field(
|
320
330
|
default=False, description="Whether or not the deployment is paused."
|
321
331
|
)
|
332
|
+
concurrency_options: Optional[objects.ConcurrencyOptions] = Field(
|
333
|
+
default=None,
|
334
|
+
description="The concurrency options for the deployment.",
|
335
|
+
)
|
322
336
|
schedules: List[objects.DeploymentSchedule] = Field(
|
323
337
|
default_factory=list, description="A list of schedules for the deployment."
|
324
338
|
)
|
prefect/context.py
CHANGED
@@ -9,7 +9,6 @@ For more user-accessible information about the current run, see [`prefect.runtim
|
|
9
9
|
import os
|
10
10
|
import sys
|
11
11
|
import warnings
|
12
|
-
import weakref
|
13
12
|
from contextlib import ExitStack, asynccontextmanager, contextmanager
|
14
13
|
from contextvars import ContextVar, Token
|
15
14
|
from pathlib import Path
|
@@ -353,10 +352,7 @@ class EngineContext(RunContext):
|
|
353
352
|
|
354
353
|
# Tracking for result from task runs in this flow run for dependency tracking
|
355
354
|
# Holds the ID of the object returned by the task run and task run state
|
356
|
-
|
357
|
-
task_run_results: Mapping[int, State] = Field(
|
358
|
-
default_factory=weakref.WeakValueDictionary
|
359
|
-
)
|
355
|
+
task_run_results: Mapping[int, State] = Field(default_factory=dict)
|
360
356
|
|
361
357
|
# Events worker to emit events
|
362
358
|
events: Optional[EventsWorker] = None
|
prefect/deployments/runner.py
CHANGED
@@ -54,6 +54,7 @@ from prefect._internal.schemas.validators import (
|
|
54
54
|
)
|
55
55
|
from prefect.client.orchestration import get_client
|
56
56
|
from prefect.client.schemas.actions import DeploymentScheduleCreate
|
57
|
+
from prefect.client.schemas.objects import ConcurrencyLimitConfig, ConcurrencyOptions
|
57
58
|
from prefect.client.schemas.schedules import (
|
58
59
|
SCHEDULE_TYPES,
|
59
60
|
construct_schedule,
|
@@ -147,6 +148,10 @@ class RunnerDeployment(BaseModel):
|
|
147
148
|
default=None,
|
148
149
|
description="The maximum number of concurrent runs of this deployment.",
|
149
150
|
)
|
151
|
+
concurrency_options: Optional[ConcurrencyOptions] = Field(
|
152
|
+
default=None,
|
153
|
+
description="The concurrency limit config for the deployment.",
|
154
|
+
)
|
150
155
|
paused: Optional[bool] = Field(
|
151
156
|
default=None, description="Whether or not the deployment is paused."
|
152
157
|
)
|
@@ -279,6 +284,7 @@ class RunnerDeployment(BaseModel):
|
|
279
284
|
paused=self.paused,
|
280
285
|
schedules=self.schedules,
|
281
286
|
concurrency_limit=self.concurrency_limit,
|
287
|
+
concurrency_options=self.concurrency_options,
|
282
288
|
parameters=self.parameters,
|
283
289
|
description=self.description,
|
284
290
|
tags=self.tags,
|
@@ -437,7 +443,7 @@ class RunnerDeployment(BaseModel):
|
|
437
443
|
rrule: Optional[Union[Iterable[str], str]] = None,
|
438
444
|
paused: Optional[bool] = None,
|
439
445
|
schedules: Optional["FlexibleScheduleList"] = None,
|
440
|
-
concurrency_limit: Optional[int] = None,
|
446
|
+
concurrency_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None,
|
441
447
|
parameters: Optional[dict] = None,
|
442
448
|
triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
|
443
449
|
description: Optional[str] = None,
|
@@ -488,11 +494,20 @@ class RunnerDeployment(BaseModel):
|
|
488
494
|
|
489
495
|
job_variables = job_variables or {}
|
490
496
|
|
497
|
+
if isinstance(concurrency_limit, ConcurrencyLimitConfig):
|
498
|
+
concurrency_options = {
|
499
|
+
"collision_strategy": concurrency_limit.collision_strategy
|
500
|
+
}
|
501
|
+
concurrency_limit = concurrency_limit.limit
|
502
|
+
else:
|
503
|
+
concurrency_options = None
|
504
|
+
|
491
505
|
deployment = cls(
|
492
506
|
name=Path(name).stem,
|
493
507
|
flow_name=flow.name,
|
494
508
|
schedules=constructed_schedules,
|
495
509
|
concurrency_limit=concurrency_limit,
|
510
|
+
concurrency_options=concurrency_options,
|
496
511
|
paused=paused,
|
497
512
|
tags=tags or [],
|
498
513
|
triggers=triggers or [],
|
@@ -559,6 +574,7 @@ class RunnerDeployment(BaseModel):
|
|
559
574
|
cls,
|
560
575
|
entrypoint: str,
|
561
576
|
name: str,
|
577
|
+
flow_name: Optional[str] = None,
|
562
578
|
interval: Optional[
|
563
579
|
Union[Iterable[Union[int, float, timedelta]], int, float, timedelta]
|
564
580
|
] = None,
|
@@ -566,7 +582,7 @@ class RunnerDeployment(BaseModel):
|
|
566
582
|
rrule: Optional[Union[Iterable[str], str]] = None,
|
567
583
|
paused: Optional[bool] = None,
|
568
584
|
schedules: Optional["FlexibleScheduleList"] = None,
|
569
|
-
concurrency_limit: Optional[int] = None,
|
585
|
+
concurrency_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None,
|
570
586
|
parameters: Optional[dict] = None,
|
571
587
|
triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
|
572
588
|
description: Optional[str] = None,
|
@@ -584,6 +600,7 @@ class RunnerDeployment(BaseModel):
|
|
584
600
|
entrypoint: The path to a file containing a flow and the name of the flow function in
|
585
601
|
the format `./path/to/file.py:flow_func_name`.
|
586
602
|
name: A name for the deployment
|
603
|
+
flow_name: The name of the flow to deploy
|
587
604
|
interval: An interval on which to execute the current flow. Accepts either a number
|
588
605
|
or a timedelta object. If a number is given, it will be interpreted as seconds.
|
589
606
|
cron: A cron schedule of when to execute runs of this flow.
|
@@ -619,11 +636,20 @@ class RunnerDeployment(BaseModel):
|
|
619
636
|
schedules=schedules,
|
620
637
|
)
|
621
638
|
|
639
|
+
if isinstance(concurrency_limit, ConcurrencyLimitConfig):
|
640
|
+
concurrency_options = {
|
641
|
+
"collision_strategy": concurrency_limit.collision_strategy
|
642
|
+
}
|
643
|
+
concurrency_limit = concurrency_limit.limit
|
644
|
+
else:
|
645
|
+
concurrency_options = None
|
646
|
+
|
622
647
|
deployment = cls(
|
623
648
|
name=Path(name).stem,
|
624
|
-
flow_name=flow.name,
|
649
|
+
flow_name=flow_name or flow.name,
|
625
650
|
schedules=constructed_schedules,
|
626
651
|
concurrency_limit=concurrency_limit,
|
652
|
+
concurrency_options=concurrency_options,
|
627
653
|
paused=paused,
|
628
654
|
tags=tags or [],
|
629
655
|
triggers=triggers or [],
|
@@ -649,6 +675,7 @@ class RunnerDeployment(BaseModel):
|
|
649
675
|
storage: RunnerStorage,
|
650
676
|
entrypoint: str,
|
651
677
|
name: str,
|
678
|
+
flow_name: Optional[str] = None,
|
652
679
|
interval: Optional[
|
653
680
|
Union[Iterable[Union[int, float, timedelta]], int, float, timedelta]
|
654
681
|
] = None,
|
@@ -656,7 +683,7 @@ class RunnerDeployment(BaseModel):
|
|
656
683
|
rrule: Optional[Union[Iterable[str], str]] = None,
|
657
684
|
paused: Optional[bool] = None,
|
658
685
|
schedules: Optional["FlexibleScheduleList"] = None,
|
659
|
-
concurrency_limit: Optional[int] = None,
|
686
|
+
concurrency_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None,
|
660
687
|
parameters: Optional[dict] = None,
|
661
688
|
triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
|
662
689
|
description: Optional[str] = None,
|
@@ -675,6 +702,7 @@ class RunnerDeployment(BaseModel):
|
|
675
702
|
entrypoint: The path to a file containing a flow and the name of the flow function in
|
676
703
|
the format `./path/to/file.py:flow_func_name`.
|
677
704
|
name: A name for the deployment
|
705
|
+
flow_name: The name of the flow to deploy
|
678
706
|
storage: A storage object to use for retrieving flow code. If not provided, a
|
679
707
|
URL must be provided.
|
680
708
|
interval: An interval on which to execute the current flow. Accepts either a number
|
@@ -706,6 +734,14 @@ class RunnerDeployment(BaseModel):
|
|
706
734
|
schedules=schedules,
|
707
735
|
)
|
708
736
|
|
737
|
+
if isinstance(concurrency_limit, ConcurrencyLimitConfig):
|
738
|
+
concurrency_options = {
|
739
|
+
"collision_strategy": concurrency_limit.collision_strategy
|
740
|
+
}
|
741
|
+
concurrency_limit = concurrency_limit.limit
|
742
|
+
else:
|
743
|
+
concurrency_options = None
|
744
|
+
|
709
745
|
job_variables = job_variables or {}
|
710
746
|
|
711
747
|
with tempfile.TemporaryDirectory() as tmpdir:
|
@@ -719,9 +755,10 @@ class RunnerDeployment(BaseModel):
|
|
719
755
|
|
720
756
|
deployment = cls(
|
721
757
|
name=Path(name).stem,
|
722
|
-
flow_name=flow.name,
|
758
|
+
flow_name=flow_name or flow.name,
|
723
759
|
schedules=constructed_schedules,
|
724
760
|
concurrency_limit=concurrency_limit,
|
761
|
+
concurrency_options=concurrency_options,
|
725
762
|
paused=paused,
|
726
763
|
tags=tags or [],
|
727
764
|
triggers=triggers or [],
|
prefect/flows.py
CHANGED
@@ -51,8 +51,8 @@ from prefect._internal.concurrency.api import create_call, from_async
|
|
51
51
|
from prefect.blocks.core import Block
|
52
52
|
from prefect.client.orchestration import get_client
|
53
53
|
from prefect.client.schemas.actions import DeploymentScheduleCreate
|
54
|
+
from prefect.client.schemas.objects import ConcurrencyLimitConfig, FlowRun
|
54
55
|
from prefect.client.schemas.objects import Flow as FlowSchema
|
55
|
-
from prefect.client.schemas.objects import FlowRun
|
56
56
|
from prefect.client.utilities import client_injector
|
57
57
|
from prefect.docker.docker_image import DockerImage
|
58
58
|
from prefect.events import DeploymentTriggerTypes, TriggerTypes
|
@@ -258,11 +258,11 @@ class Flow(Generic[P, R]):
|
|
258
258
|
if not callable(fn):
|
259
259
|
raise TypeError("'fn' must be callable")
|
260
260
|
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
self.name
|
261
|
+
self.name = name or fn.__name__.replace("_", "-").replace(
|
262
|
+
"<lambda>",
|
263
|
+
"unknown-lambda", # prefect API will not accept "<" or ">" in flow names
|
264
|
+
)
|
265
|
+
_raise_on_name_with_banned_characters(self.name)
|
266
266
|
|
267
267
|
if flow_run_name is not None:
|
268
268
|
if not isinstance(flow_run_name, str) and not callable(flow_run_name):
|
@@ -643,7 +643,7 @@ class Flow(Generic[P, R]):
|
|
643
643
|
rrule: Optional[Union[Iterable[str], str]] = None,
|
644
644
|
paused: Optional[bool] = None,
|
645
645
|
schedules: Optional["FlexibleScheduleList"] = None,
|
646
|
-
concurrency_limit: Optional[int] = None,
|
646
|
+
concurrency_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None,
|
647
647
|
parameters: Optional[dict] = None,
|
648
648
|
triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
|
649
649
|
description: Optional[str] = None,
|
@@ -715,6 +715,7 @@ class Flow(Generic[P, R]):
|
|
715
715
|
storage=self._storage,
|
716
716
|
entrypoint=self._entrypoint,
|
717
717
|
name=name,
|
718
|
+
flow_name=self.name,
|
718
719
|
interval=interval,
|
719
720
|
cron=cron,
|
720
721
|
rrule=rrule,
|
@@ -733,7 +734,7 @@ class Flow(Generic[P, R]):
|
|
733
734
|
) # type: ignore # TODO: remove sync_compatible
|
734
735
|
else:
|
735
736
|
return RunnerDeployment.from_flow(
|
736
|
-
self,
|
737
|
+
flow=self,
|
737
738
|
name=name,
|
738
739
|
interval=interval,
|
739
740
|
cron=cron,
|
@@ -798,6 +799,7 @@ class Flow(Generic[P, R]):
|
|
798
799
|
rrule: Optional[Union[Iterable[str], str]] = None,
|
799
800
|
paused: Optional[bool] = None,
|
800
801
|
schedules: Optional["FlexibleScheduleList"] = None,
|
802
|
+
global_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None,
|
801
803
|
triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
|
802
804
|
parameters: Optional[dict] = None,
|
803
805
|
description: Optional[str] = None,
|
@@ -827,6 +829,7 @@ class Flow(Generic[P, R]):
|
|
827
829
|
paused: Whether or not to set this deployment as paused.
|
828
830
|
schedules: A list of schedule objects defining when to execute runs of this deployment.
|
829
831
|
Used to define multiple schedules or additional scheduling options like `timezone`.
|
832
|
+
global_limit: The maximum number of concurrent runs allowed across all served flow instances associated with the same deployment.
|
830
833
|
parameters: A dictionary of default parameter values to pass to runs of this deployment.
|
831
834
|
description: A description for the created deployment. Defaults to the flow's
|
832
835
|
description if not provided.
|
@@ -838,7 +841,7 @@ class Flow(Generic[P, R]):
|
|
838
841
|
pause_on_shutdown: If True, provided schedule will be paused when the serve function is stopped.
|
839
842
|
If False, the schedules will continue running.
|
840
843
|
print_starting_message: Whether or not to print the starting message when flow is served.
|
841
|
-
limit: The maximum number of runs that can be executed concurrently.
|
844
|
+
limit: The maximum number of runs that can be executed concurrently by the created runner; only applies to this served flow. To apply a limit across multiple served flows, use `global_limit`.
|
842
845
|
webserver: Whether or not to start a monitoring webserver for this flow.
|
843
846
|
entrypoint_type: Type of entrypoint to use for the deployment. When using a module path
|
844
847
|
entrypoint, ensure that the module will be importable in the execution environment.
|
@@ -890,6 +893,7 @@ class Flow(Generic[P, R]):
|
|
890
893
|
rrule=rrule,
|
891
894
|
paused=paused,
|
892
895
|
schedules=schedules,
|
896
|
+
concurrency_limit=global_limit,
|
893
897
|
parameters=parameters,
|
894
898
|
description=description,
|
895
899
|
tags=tags,
|
@@ -1057,7 +1061,7 @@ class Flow(Generic[P, R]):
|
|
1057
1061
|
rrule: Optional[str] = None,
|
1058
1062
|
paused: Optional[bool] = None,
|
1059
1063
|
schedules: Optional[List[DeploymentScheduleCreate]] = None,
|
1060
|
-
concurrency_limit: Optional[int] = None,
|
1064
|
+
concurrency_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None,
|
1061
1065
|
triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
|
1062
1066
|
parameters: Optional[dict] = None,
|
1063
1067
|
description: Optional[str] = None,
|
@@ -1622,7 +1626,7 @@ def flow(
|
|
1622
1626
|
)
|
1623
1627
|
|
1624
1628
|
|
1625
|
-
def _raise_on_name_with_banned_characters(name: str) -> str:
|
1629
|
+
def _raise_on_name_with_banned_characters(name: Optional[str]) -> Optional[str]:
|
1626
1630
|
"""
|
1627
1631
|
Raise an InvalidNameError if the given name contains any invalid
|
1628
1632
|
characters.
|
prefect/locking/filesystem.py
CHANGED
@@ -38,10 +38,10 @@ class FileSystemLockManager(LockManager):
|
|
38
38
|
"""
|
39
39
|
|
40
40
|
def __init__(self, lock_files_directory: Path):
|
41
|
-
self.lock_files_directory = lock_files_directory
|
41
|
+
self.lock_files_directory = lock_files_directory.expanduser().resolve()
|
42
42
|
self._locks: Dict[str, _LockInfo] = {}
|
43
43
|
|
44
|
-
def
|
44
|
+
def _ensure_lock_files_directory_exists(self):
|
45
45
|
self.lock_files_directory.mkdir(parents=True, exist_ok=True)
|
46
46
|
|
47
47
|
def _lock_path_for_key(self, key: str) -> Path:
|
@@ -98,7 +98,7 @@ class FileSystemLockManager(LockManager):
|
|
98
98
|
acquire_timeout: Optional[float] = None,
|
99
99
|
hold_timeout: Optional[float] = None,
|
100
100
|
) -> bool:
|
101
|
-
self.
|
101
|
+
self._ensure_lock_files_directory_exists()
|
102
102
|
lock_path = self._lock_path_for_key(key)
|
103
103
|
|
104
104
|
if self.is_locked(key) and not self.is_lock_holder(key, holder):
|
prefect/plugins.py
CHANGED
@@ -14,6 +14,8 @@ from typing import Any, Dict, Union
|
|
14
14
|
import prefect.settings
|
15
15
|
from prefect.utilities.compat import EntryPoints, entry_points
|
16
16
|
|
17
|
+
COLLECTIONS: Union[None, Dict[str, Union[ModuleType, Exception]]] = None
|
18
|
+
|
17
19
|
|
18
20
|
def safe_load_entrypoints(entrypoints: EntryPoints) -> Dict[str, Union[Exception, Any]]:
|
19
21
|
"""
|
@@ -38,11 +40,16 @@ def safe_load_entrypoints(entrypoints: EntryPoints) -> Dict[str, Union[Exception
|
|
38
40
|
return results
|
39
41
|
|
40
42
|
|
41
|
-
def load_prefect_collections() -> Dict[str, ModuleType]:
|
43
|
+
def load_prefect_collections() -> Dict[str, Union[ModuleType, Exception]]:
|
42
44
|
"""
|
43
45
|
Load all Prefect collections that define an entrypoint in the group
|
44
46
|
`prefect.collections`.
|
45
47
|
"""
|
48
|
+
global COLLECTIONS
|
49
|
+
|
50
|
+
if COLLECTIONS is not None:
|
51
|
+
return COLLECTIONS
|
52
|
+
|
46
53
|
collection_entrypoints: EntryPoints = entry_points(group="prefect.collections")
|
47
54
|
collections = safe_load_entrypoints(collection_entrypoints)
|
48
55
|
|
@@ -61,4 +68,5 @@ def load_prefect_collections() -> Dict[str, ModuleType]:
|
|
61
68
|
if prefect.settings.PREFECT_DEBUG_MODE:
|
62
69
|
print(f"Loaded collection {name!r}.")
|
63
70
|
|
71
|
+
COLLECTIONS = collections
|
64
72
|
return collections
|
prefect/results.py
CHANGED
@@ -91,18 +91,26 @@ async def get_default_result_storage() -> WritableFileSystem:
|
|
91
91
|
Generate a default file system for result storage.
|
92
92
|
"""
|
93
93
|
default_block = PREFECT_DEFAULT_RESULT_STORAGE_BLOCK.value()
|
94
|
+
basepath = PREFECT_LOCAL_STORAGE_PATH.value()
|
95
|
+
|
96
|
+
cache_key = (str(default_block), str(basepath))
|
97
|
+
|
98
|
+
if cache_key in _default_storages:
|
99
|
+
return _default_storages[cache_key]
|
94
100
|
|
95
101
|
if default_block is not None:
|
96
|
-
|
102
|
+
storage = await resolve_result_storage(default_block)
|
103
|
+
else:
|
104
|
+
# Use the local file system
|
105
|
+
storage = LocalFileSystem(basepath=str(basepath))
|
97
106
|
|
98
|
-
|
99
|
-
|
100
|
-
return LocalFileSystem(basepath=str(basepath))
|
107
|
+
_default_storages[cache_key] = storage
|
108
|
+
return storage
|
101
109
|
|
102
110
|
|
103
111
|
@sync_compatible
|
104
112
|
async def resolve_result_storage(
|
105
|
-
result_storage: Union[ResultStorage, UUID],
|
113
|
+
result_storage: Union[ResultStorage, UUID, Path],
|
106
114
|
) -> WritableFileSystem:
|
107
115
|
"""
|
108
116
|
Resolve one of the valid `ResultStorage` input types into a saved block
|
@@ -119,6 +127,8 @@ async def resolve_result_storage(
|
|
119
127
|
storage_block_id = storage_block._block_document_id
|
120
128
|
else:
|
121
129
|
storage_block_id = None
|
130
|
+
elif isinstance(result_storage, Path):
|
131
|
+
storage_block = LocalFileSystem(basepath=str(result_storage))
|
122
132
|
elif isinstance(result_storage, str):
|
123
133
|
storage_block = await Block.load(result_storage, client=client)
|
124
134
|
storage_block_id = storage_block._block_document_id
|
@@ -295,6 +305,15 @@ class ResultStore(BaseModel):
|
|
295
305
|
update["storage_key_fn"] = partial(
|
296
306
|
_format_user_supplied_storage_key, task.result_storage_key
|
297
307
|
)
|
308
|
+
if task.cache_policy is not None and task.cache_policy is not NotSet:
|
309
|
+
if task.cache_policy.key_storage is not None:
|
310
|
+
storage = task.cache_policy.key_storage
|
311
|
+
if isinstance(storage, str) and not len(storage.split("/")) == 2:
|
312
|
+
storage = Path(storage)
|
313
|
+
update["metadata_storage"] = await resolve_result_storage(storage)
|
314
|
+
if task.cache_policy.lock_manager is not None:
|
315
|
+
update["lock_manager"] = task.cache_policy.lock_manager
|
316
|
+
|
298
317
|
if self.result_storage is None and update.get("result_storage") is None:
|
299
318
|
update["result_storage"] = await get_default_result_storage()
|
300
319
|
return self.model_copy(update=update)
|
prefect/runner/runner.py
CHANGED
@@ -64,8 +64,13 @@ from prefect.client.schemas.filters import (
|
|
64
64
|
FlowRunFilterStateName,
|
65
65
|
FlowRunFilterStateType,
|
66
66
|
)
|
67
|
+
from prefect.client.schemas.objects import (
|
68
|
+
ConcurrencyLimitConfig,
|
69
|
+
FlowRun,
|
70
|
+
State,
|
71
|
+
StateType,
|
72
|
+
)
|
67
73
|
from prefect.client.schemas.objects import Flow as APIFlow
|
68
|
-
from prefect.client.schemas.objects import FlowRun, State, StateType
|
69
74
|
from prefect.concurrency.asyncio import (
|
70
75
|
AcquireConcurrencySlotTimeoutError,
|
71
76
|
ConcurrencySlotAcquisitionError,
|
@@ -236,7 +241,7 @@ class Runner:
|
|
236
241
|
rrule: Optional[Union[Iterable[str], str]] = None,
|
237
242
|
paused: Optional[bool] = None,
|
238
243
|
schedules: Optional["FlexibleScheduleList"] = None,
|
239
|
-
concurrency_limit: Optional[int] = None,
|
244
|
+
concurrency_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None,
|
240
245
|
parameters: Optional[dict] = None,
|
241
246
|
triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
|
242
247
|
description: Optional[str] = None,
|
@@ -1044,15 +1049,9 @@ class Runner:
|
|
1044
1049
|
|
1045
1050
|
if flow_run.deployment_id:
|
1046
1051
|
deployment = await self._client.read_deployment(flow_run.deployment_id)
|
1047
|
-
if deployment and deployment.
|
1048
|
-
limit_name =
|
1052
|
+
if deployment and deployment.global_concurrency_limit:
|
1053
|
+
limit_name = deployment.global_concurrency_limit.name
|
1049
1054
|
concurrency_ctx = concurrency
|
1050
|
-
|
1051
|
-
# ensure that the global concurrency limit is available
|
1052
|
-
# and up-to-date before attempting to acquire a slot
|
1053
|
-
await self._client.upsert_global_concurrency_limit_by_name(
|
1054
|
-
limit_name, deployment.concurrency_limit
|
1055
|
-
)
|
1056
1055
|
else:
|
1057
1056
|
limit_name = ""
|
1058
1057
|
concurrency_ctx = asyncnullcontext
|
prefect/settings.py
CHANGED
@@ -1288,6 +1288,36 @@ compromise. Adjust this setting based on your specific security requirements
|
|
1288
1288
|
and usage patterns.
|
1289
1289
|
"""
|
1290
1290
|
|
1291
|
+
PREFECT_SERVER_CORS_ALLOWED_ORIGINS = Setting(
|
1292
|
+
str,
|
1293
|
+
default="*",
|
1294
|
+
)
|
1295
|
+
"""
|
1296
|
+
A comma-separated list of origins that are authorized to make cross-origin requests to the API.
|
1297
|
+
|
1298
|
+
By default, this is set to `*`, which allows requests from all origins.
|
1299
|
+
"""
|
1300
|
+
|
1301
|
+
PREFECT_SERVER_CORS_ALLOWED_METHODS = Setting(
|
1302
|
+
str,
|
1303
|
+
default="*",
|
1304
|
+
)
|
1305
|
+
"""
|
1306
|
+
A comma-separated list of methods that are authorized to make cross-origin requests to the API.
|
1307
|
+
|
1308
|
+
By default, this is set to `*`, which allows requests with all methods.
|
1309
|
+
"""
|
1310
|
+
|
1311
|
+
PREFECT_SERVER_CORS_ALLOWED_HEADERS = Setting(
|
1312
|
+
str,
|
1313
|
+
default="*",
|
1314
|
+
)
|
1315
|
+
"""
|
1316
|
+
A comma-separated list of headers that are authorized to make cross-origin requests to the API.
|
1317
|
+
|
1318
|
+
By default, this is set to `*`, which allows requests with all headers.
|
1319
|
+
"""
|
1320
|
+
|
1291
1321
|
PREFECT_SERVER_ALLOW_EPHEMERAL_MODE = Setting(bool, default=False)
|
1292
1322
|
"""
|
1293
1323
|
Controls whether or not a subprocess server can be started when no API URL is provided.
|
prefect/task_engine.py
CHANGED
@@ -77,7 +77,7 @@ from prefect.states import (
|
|
77
77
|
exception_to_failed_state,
|
78
78
|
return_value_to_state,
|
79
79
|
)
|
80
|
-
from prefect.transactions import Transaction, transaction
|
80
|
+
from prefect.transactions import IsolationLevel, Transaction, transaction
|
81
81
|
from prefect.utilities.annotations import NotSet
|
82
82
|
from prefect.utilities.asyncutils import run_coro_as_sync
|
83
83
|
from prefect.utilities.callables import call_with_parameters, parameters_to_args_kwargs
|
@@ -364,7 +364,6 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
364
364
|
new_state = Running()
|
365
365
|
|
366
366
|
self.task_run.start_time = new_state.timestamp
|
367
|
-
self.task_run.run_count += 1
|
368
367
|
|
369
368
|
flow_run_context = FlowRunContext.get()
|
370
369
|
if flow_run_context and flow_run_context.flow_run:
|
@@ -412,6 +411,9 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
412
411
|
self.task_run.state_type = new_state.type
|
413
412
|
self.task_run.state_name = new_state.name
|
414
413
|
|
414
|
+
if new_state.is_running():
|
415
|
+
self.task_run.run_count += 1
|
416
|
+
|
415
417
|
if new_state.is_final():
|
416
418
|
if isinstance(state.data, BaseResult) and state.data.has_cached_object():
|
417
419
|
# Avoid fetching the result unless it is cached, otherwise we defeat
|
@@ -511,7 +513,6 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
511
513
|
else:
|
512
514
|
delay = None
|
513
515
|
new_state = Retrying()
|
514
|
-
self.task_run.run_count += 1
|
515
516
|
|
516
517
|
self.logger.info(
|
517
518
|
"Task run failed with exception: %r - " "Retry %s/%s will start %s",
|
@@ -692,8 +693,9 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
692
693
|
if scheduled_time := self.state.state_details.scheduled_time:
|
693
694
|
sleep_time = (scheduled_time - pendulum.now("utc")).total_seconds()
|
694
695
|
await anyio.sleep(sleep_time if sleep_time > 0 else 0)
|
696
|
+
new_state = Retrying() if self.state.name == "AwaitingRetry" else Running()
|
695
697
|
self.set_state(
|
696
|
-
|
698
|
+
new_state,
|
697
699
|
force=True,
|
698
700
|
)
|
699
701
|
|
@@ -725,12 +727,21 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
725
727
|
else PREFECT_TASKS_REFRESH_CACHE.value()
|
726
728
|
)
|
727
729
|
|
730
|
+
isolation_level = (
|
731
|
+
IsolationLevel(self.task.cache_policy.isolation_level)
|
732
|
+
if self.task.cache_policy
|
733
|
+
and self.task.cache_policy is not NotSet
|
734
|
+
and self.task.cache_policy.isolation_level is not None
|
735
|
+
else None
|
736
|
+
)
|
737
|
+
|
728
738
|
with transaction(
|
729
739
|
key=self.compute_transaction_key(),
|
730
740
|
store=get_result_store(),
|
731
741
|
overwrite=overwrite,
|
732
742
|
logger=self.logger,
|
733
743
|
write_on_commit=should_persist_result(),
|
744
|
+
isolation_level=isolation_level,
|
734
745
|
) as txn:
|
735
746
|
yield txn
|
736
747
|
|
@@ -874,7 +885,6 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
874
885
|
new_state = Running()
|
875
886
|
|
876
887
|
self.task_run.start_time = new_state.timestamp
|
877
|
-
self.task_run.run_count += 1
|
878
888
|
|
879
889
|
flow_run_context = FlowRunContext.get()
|
880
890
|
if flow_run_context:
|
@@ -922,6 +932,9 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
922
932
|
self.task_run.state_type = new_state.type
|
923
933
|
self.task_run.state_name = new_state.name
|
924
934
|
|
935
|
+
if new_state.is_running():
|
936
|
+
self.task_run.run_count += 1
|
937
|
+
|
925
938
|
if new_state.is_final():
|
926
939
|
if (
|
927
940
|
isinstance(new_state.data, BaseResult)
|
@@ -1017,7 +1030,6 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
1017
1030
|
else:
|
1018
1031
|
delay = None
|
1019
1032
|
new_state = Retrying()
|
1020
|
-
self.task_run.run_count += 1
|
1021
1033
|
|
1022
1034
|
self.logger.info(
|
1023
1035
|
"Task run failed with exception: %r - " "Retry %s/%s will start %s",
|
@@ -1190,8 +1202,9 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
1190
1202
|
if scheduled_time := self.state.state_details.scheduled_time:
|
1191
1203
|
sleep_time = (scheduled_time - pendulum.now("utc")).total_seconds()
|
1192
1204
|
await anyio.sleep(sleep_time if sleep_time > 0 else 0)
|
1205
|
+
new_state = Retrying() if self.state.name == "AwaitingRetry" else Running()
|
1193
1206
|
await self.set_state(
|
1194
|
-
|
1207
|
+
new_state,
|
1195
1208
|
force=True,
|
1196
1209
|
)
|
1197
1210
|
|
@@ -1224,6 +1237,13 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
1224
1237
|
if self.task.refresh_cache is not None
|
1225
1238
|
else PREFECT_TASKS_REFRESH_CACHE.value()
|
1226
1239
|
)
|
1240
|
+
isolation_level = (
|
1241
|
+
IsolationLevel(self.task.cache_policy.isolation_level)
|
1242
|
+
if self.task.cache_policy
|
1243
|
+
and self.task.cache_policy is not NotSet
|
1244
|
+
and self.task.cache_policy.isolation_level is not None
|
1245
|
+
else None
|
1246
|
+
)
|
1227
1247
|
|
1228
1248
|
with transaction(
|
1229
1249
|
key=self.compute_transaction_key(),
|
@@ -1231,6 +1251,7 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
1231
1251
|
overwrite=overwrite,
|
1232
1252
|
logger=self.logger,
|
1233
1253
|
write_on_commit=should_persist_result(),
|
1254
|
+
isolation_level=isolation_level,
|
1234
1255
|
) as txn:
|
1235
1256
|
yield txn
|
1236
1257
|
|
prefect/transactions.py
CHANGED
@@ -18,7 +18,11 @@ from pydantic import Field, PrivateAttr
|
|
18
18
|
from typing_extensions import Self
|
19
19
|
|
20
20
|
from prefect.context import ContextModel
|
21
|
-
from prefect.exceptions import
|
21
|
+
from prefect.exceptions import (
|
22
|
+
ConfigurationError,
|
23
|
+
MissingContextError,
|
24
|
+
SerializationError,
|
25
|
+
)
|
22
26
|
from prefect.logging.loggers import get_logger, get_run_logger
|
23
27
|
from prefect.records import RecordStore
|
24
28
|
from prefect.records.base import TransactionRecord
|
@@ -194,8 +198,10 @@ class Transaction(ContextModel):
|
|
194
198
|
and self.key
|
195
199
|
and not self.store.supports_isolation_level(self.isolation_level)
|
196
200
|
):
|
197
|
-
raise
|
198
|
-
f"Isolation level {self.isolation_level.name} is not supported by provided
|
201
|
+
raise ConfigurationError(
|
202
|
+
f"Isolation level {self.isolation_level.name} is not supported by provided "
|
203
|
+
"configuration. Please ensure you've provided a lock file directory or lock "
|
204
|
+
"manager when using the SERIALIZABLE isolation level."
|
199
205
|
)
|
200
206
|
|
201
207
|
# this needs to go before begin, which could set the state to committed
|
prefect/utilities/asyncutils.py
CHANGED
@@ -341,6 +341,13 @@ def sync_compatible(
|
|
341
341
|
will submit the async method to the event loop.
|
342
342
|
- If we cannot find an event loop, we will create a new one and run the async method
|
343
343
|
then tear down the loop.
|
344
|
+
|
345
|
+
Note: Type checkers will infer functions decorated with `@sync_compatible` are synchronous. If
|
346
|
+
you want to use the decorated function in an async context, you will need to ignore the types
|
347
|
+
and "cast" the return type to a coroutine. For example:
|
348
|
+
```
|
349
|
+
python result: Coroutine = sync_compatible(my_async_function)(arg1, arg2) # type: ignore
|
350
|
+
```
|
344
351
|
"""
|
345
352
|
|
346
353
|
@wraps(async_fn)
|
prefect/utilities/collections.py
CHANGED
@@ -18,7 +18,6 @@ from typing import (
|
|
18
18
|
Generator,
|
19
19
|
Hashable,
|
20
20
|
Iterable,
|
21
|
-
Iterator,
|
22
21
|
List,
|
23
22
|
Optional,
|
24
23
|
Set,
|
@@ -192,7 +191,9 @@ def extract_instances(
|
|
192
191
|
return ret
|
193
192
|
|
194
193
|
|
195
|
-
def batched_iterable(
|
194
|
+
def batched_iterable(
|
195
|
+
iterable: Iterable[T], size: int
|
196
|
+
) -> Generator[Tuple[T, ...], None, None]:
|
196
197
|
"""
|
197
198
|
Yield batches of a certain size from an iterable
|
198
199
|
|
prefect/utilities/engine.py
CHANGED
@@ -627,6 +627,9 @@ def link_state_to_result(state: State, result: Any) -> None:
|
|
627
627
|
"""
|
628
628
|
|
629
629
|
flow_run_context = FlowRunContext.get()
|
630
|
+
# Drop the data field to avoid holding a strong reference to the result
|
631
|
+
# Holding large user objects in memory can cause memory bloat
|
632
|
+
linked_state = state.model_copy(update={"data": None})
|
630
633
|
|
631
634
|
def link_if_trackable(obj: Any) -> None:
|
632
635
|
"""Track connection between a task run result and its associated state if it has a unique ID.
|
@@ -643,7 +646,7 @@ def link_state_to_result(state: State, result: Any) -> None:
|
|
643
646
|
):
|
644
647
|
state.state_details.untrackable_result = True
|
645
648
|
return
|
646
|
-
flow_run_context.task_run_results[id(obj)] =
|
649
|
+
flow_run_context.task_run_results[id(obj)] = linked_state
|
647
650
|
|
648
651
|
if flow_run_context:
|
649
652
|
visit_collection(expr=result, visit_fn=link_if_trackable, max_depth=1)
|
prefect/workers/base.py
CHANGED
@@ -869,15 +869,9 @@ class BaseWorker(abc.ABC):
|
|
869
869
|
|
870
870
|
if flow_run.deployment_id:
|
871
871
|
deployment = await self._client.read_deployment(flow_run.deployment_id)
|
872
|
-
if deployment and deployment.
|
873
|
-
limit_name =
|
872
|
+
if deployment and deployment.global_concurrency_limit:
|
873
|
+
limit_name = deployment.global_concurrency_limit.name
|
874
874
|
concurrency_ctx = concurrency
|
875
|
-
|
876
|
-
# ensure that the global concurrency limit is available
|
877
|
-
# and up-to-date before attempting to acquire a slot
|
878
|
-
await self._client.upsert_global_concurrency_limit_by_name(
|
879
|
-
limit_name, deployment.concurrency_limit
|
880
|
-
)
|
881
875
|
else:
|
882
876
|
limit_name = ""
|
883
877
|
concurrency_ctx = asyncnullcontext
|
@@ -4,29 +4,29 @@ prefect/_version.py,sha256=I9JsXwt7BjAAbMEZgtmE3a6dJ2jqV-wqWto9D6msb3k,24597
|
|
4
4
|
prefect/agent.py,sha256=BOVVY5z-vUIQ2u8LwMTXDaNys2fjOZSS5YGDwJmTQjI,230
|
5
5
|
prefect/artifacts.py,sha256=dsxFWmdg2r9zbHM3KgKOR5YbJ29_dXUYF9kipJpbxkE,13009
|
6
6
|
prefect/automations.py,sha256=NlQ62GPJzy-gnWQqX7c6CQJKw7p60WLGDAFcy82vtg4,5613
|
7
|
-
prefect/cache_policies.py,sha256=
|
8
|
-
prefect/context.py,sha256=
|
7
|
+
prefect/cache_policies.py,sha256=thYNj0CcJjM4TJQvXsLKTIQl7t0qjEnSWzxPWPONcRw,9118
|
8
|
+
prefect/context.py,sha256=J4GS70ZG_dkJ2v_dQWkdbuiN88iumFpoJhTu3hg7d60,21821
|
9
9
|
prefect/engine.py,sha256=BpmDbe6miZcTl1vRkxfCPYcWSXADLigGPCagFwucMz0,1976
|
10
10
|
prefect/exceptions.py,sha256=ondjUe0fIXXjhoFnqg8twqgLoPMR02HuQv5Az-kSG50,11348
|
11
11
|
prefect/filesystems.py,sha256=7tqufyXIfEnMs2VE-hov3tJfBiELMhU9Dn9snmDh4B8,17304
|
12
12
|
prefect/flow_engine.py,sha256=Z6xOO1ONAGwVNcvyvEIkJv_LB0VE5iBptV4ZWgTFqbc,30000
|
13
13
|
prefect/flow_runs.py,sha256=EaXRIQTOnwnA0fO7_EjwafFRmS57K_CRy0Xsz3JDIhc,16070
|
14
|
-
prefect/flows.py,sha256=
|
14
|
+
prefect/flows.py,sha256=1NisFNzfK2owGjNdXeYWuJBTqHx7AXIeWFF_t6I1rr8,89364
|
15
15
|
prefect/futures.py,sha256=1Uq-Q3ommCHSku_lsASuP1s3yFuYoL980fGcHdCFg30,16298
|
16
16
|
prefect/main.py,sha256=IdtnJR5-IwP8EZsfhMFKj92ylMhNyau9X_eMcTP2ZjM,2336
|
17
|
-
prefect/plugins.py,sha256=
|
17
|
+
prefect/plugins.py,sha256=HY7Z7OJlltqzsUiPMEL1Y_hQbHw0CeZKayWiK-k8DP4,2435
|
18
18
|
prefect/profiles.toml,sha256=kTvqDNMzjH3fsm5OEI-NKY4dMmipor5EvQXRB6rPEjY,522
|
19
19
|
prefect/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
20
|
-
prefect/results.py,sha256=
|
20
|
+
prefect/results.py,sha256=Rq5WQtAvGfvSdOm604LTSEG7PAO3VGl85JTCHLyIqE0,45327
|
21
21
|
prefect/serializers.py,sha256=Lo41EM0_qGzcfB_63390Izeo3DdK6cY6VZfxa9hpSGQ,8712
|
22
|
-
prefect/settings.py,sha256=
|
22
|
+
prefect/settings.py,sha256=LCZEVO0cPzlDG7bR4cbUHVr_J715cRLZ87Pn22FgQcM,73286
|
23
23
|
prefect/states.py,sha256=2lysq6X5AvqPfE3eD3D0HYt-KpFA2OUgA0c4ZQ22A_U,24906
|
24
|
-
prefect/task_engine.py,sha256=
|
24
|
+
prefect/task_engine.py,sha256=rcCPPrX01CxiOPhnf_7WcN0wGHbmB5VV7_OG7PKYOrY,57943
|
25
25
|
prefect/task_runners.py,sha256=W1n0yMwbDIqnvffFVJADo9MGEbLaYkzWk52rqgnkMY4,15019
|
26
26
|
prefect/task_runs.py,sha256=jkaQOkRKOHS8fgHUijteriFpjMSKv4zldn1D8tZHkUI,8777
|
27
27
|
prefect/task_worker.py,sha256=a8Uw78Ms4p3ikt_la50lENmPLIa-jjbuvunvjVXvRKQ,16785
|
28
28
|
prefect/tasks.py,sha256=35eOv7VfhziiC3hL9FxB3spYtG6tpxZBLzk5KP_8Ux8,68371
|
29
|
-
prefect/transactions.py,sha256=
|
29
|
+
prefect/transactions.py,sha256=XnP6Jz7uXIyU3mV1QVWii_PdnnsxdJLV238MOCtYoFw,16500
|
30
30
|
prefect/variables.py,sha256=023cfSj_ydwvz6lyChRKnjHFfkdoYZKK_zdTtuSxrYo,4665
|
31
31
|
prefect/_internal/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
32
32
|
prefect/_internal/_logging.py,sha256=HvNHY-8P469o5u4LYEDBTem69XZEt1QUeUaLToijpak,810
|
@@ -61,25 +61,25 @@ prefect/_internal/schemas/serializers.py,sha256=G_RGHfObjisUiRvd29p-zc6W4bwt5rE1
|
|
61
61
|
prefect/_internal/schemas/validators.py,sha256=Y8bHb3EsLJTiHsffg_TPbknj0Nmln8vd6qySLFbfGzY,26546
|
62
62
|
prefect/blocks/__init__.py,sha256=BUfh6gIwA6HEjRyVCAiv0he3M1zfM-oY-JrlBfeWeY8,182
|
63
63
|
prefect/blocks/abstract.py,sha256=YLzCaf3yXv6wFCF5ZqCIHJNwH7fME1rLxC-SijARHzk,16319
|
64
|
-
prefect/blocks/core.py,sha256=
|
64
|
+
prefect/blocks/core.py,sha256=l_56oggt9uJOABHus-NCXLQ4akeY4kzyDUO37ZyosX0,52783
|
65
65
|
prefect/blocks/fields.py,sha256=1m507VVmkpOnMF_7N-qboRjtw4_ceIuDneX3jZ3Jm54,63
|
66
66
|
prefect/blocks/notifications.py,sha256=NEQqFobAOYWmvqbs6wPGHklrHPocJQSEsJow_CczwqE,29794
|
67
67
|
prefect/blocks/redis.py,sha256=GUKYyx2QLtyNvgf5FT_dJxbgQcOzWCja3I23J1-AXhM,5629
|
68
68
|
prefect/blocks/system.py,sha256=OacB-LLXaNiLY49bPx7aAjmvdEdBxNoaOdzsCUcDr2c,4563
|
69
|
-
prefect/blocks/webhook.py,sha256=
|
69
|
+
prefect/blocks/webhook.py,sha256=F0u1WSO17Gda8qwr9gYaA84Nfc8Qkic6HhhJMYXRzug,2496
|
70
70
|
prefect/client/__init__.py,sha256=fFtCXsGIsBCsAMFKlUPgRVUoIeqq_CsGtFE1knhbHlU,593
|
71
71
|
prefect/client/base.py,sha256=2K8UiWzorZNNM4c8c-OiGeZ5i5ViUfZ_Q31oPobbOO0,24956
|
72
72
|
prefect/client/cloud.py,sha256=SOqPXvXmFxAatubTyRQQe9i3DkAf4-mZZIpSO3Oy-hA,5819
|
73
73
|
prefect/client/collections.py,sha256=u-96saqu0RALAazRI0YaZCJahnuafMppY21KN6ggx80,1059
|
74
74
|
prefect/client/constants.py,sha256=Z_GG8KF70vbbXxpJuqW5pLnwzujTVeHbcYYRikNmGH0,29
|
75
|
-
prefect/client/orchestration.py,sha256=
|
75
|
+
prefect/client/orchestration.py,sha256=XImn-8TKOYJ8LBAZ83FEC4DOf0RP6WE9BeLpDXfYX4A,149371
|
76
76
|
prefect/client/subscriptions.py,sha256=J9uK9NGHO4VX4Y3NGgBJ4pIG_0cf-dJWPhF3f3PGYL4,3388
|
77
77
|
prefect/client/utilities.py,sha256=89fmza0cRMOayxgXRdO51TKb11TczJ0ByOZmcZVrt44,3286
|
78
78
|
prefect/client/schemas/__init__.py,sha256=KlyqFV-hMulMkNstBn_0ijoHoIwJZaBj6B1r07UmgvE,607
|
79
|
-
prefect/client/schemas/actions.py,sha256=
|
80
|
-
prefect/client/schemas/filters.py,sha256=
|
81
|
-
prefect/client/schemas/objects.py,sha256=
|
82
|
-
prefect/client/schemas/responses.py,sha256=
|
79
|
+
prefect/client/schemas/actions.py,sha256=GT1VlvwV5koV690H7ViGFH3tpW7_PvDf0QJoYTcOLDg,28862
|
80
|
+
prefect/client/schemas/filters.py,sha256=oYUBj59SC6keYHaQ8-qFaVynEAcHV8BABrQaob2mI6c,35864
|
81
|
+
prefect/client/schemas/objects.py,sha256=UFdNqcHknHstXoVBlu-pP78fxBD1YmJyh1VOfYBJPrk,55564
|
82
|
+
prefect/client/schemas/responses.py,sha256=tV06W8npA8oCjV9d0ZNvjro4QcbHxayb8PC4LmanXjo,15467
|
83
83
|
prefect/client/schemas/schedules.py,sha256=8rpqjOYtknu2-1n5_WD4cOplgu93P3mCyX86B22LfL4,13070
|
84
84
|
prefect/client/schemas/sorting.py,sha256=L-2Mx-igZPtsUoRUguTcG3nIEstMEMPD97NwPM2Ox5s,2579
|
85
85
|
prefect/client/types/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -100,7 +100,7 @@ prefect/deployments/__init__.py,sha256=_wb7NxDKhq11z9MjYsPckmT3o6MRhGLRgCV9TmvYt
|
|
100
100
|
prefect/deployments/base.py,sha256=rEMb-AXUuO66a7Qwq0KFUI1L0Xrl_-8z7cgAKaysfwg,16136
|
101
101
|
prefect/deployments/deployments.py,sha256=EvC9qBdvJRc8CHJqRjFTqtzx75SE8bpZOl5C-2eULyA,109
|
102
102
|
prefect/deployments/flow_runs.py,sha256=tH6lpEkgHhQ5Ipr0bhVAjN6AeOoDwY7UKrkbJihJ6D0,6567
|
103
|
-
prefect/deployments/runner.py,sha256=
|
103
|
+
prefect/deployments/runner.py,sha256=b7jD1DHL7y2jeBXgdBfSsnBMJPHShs4Tt1c5jAeG5Dk,41823
|
104
104
|
prefect/deployments/schedules.py,sha256=KCYA6dOmLAoElHZuoWqdJn4Yno4TtOZtXfPOpTLb1cE,2046
|
105
105
|
prefect/deployments/steps/__init__.py,sha256=Dlz9VqMRyG1Gal8dj8vfGpPr0LyQhZdvcciozkK8WoY,206
|
106
106
|
prefect/deployments/steps/core.py,sha256=5vFf6BSpu992kkaYsvcPpsz-nZxFmayMIDmY9h0Hb8M,6846
|
@@ -133,7 +133,7 @@ prefect/input/__init__.py,sha256=Ue2h-YhYP71nEtsVJaslqMwO6C0ckjhjTYwwEgp-E3g,701
|
|
133
133
|
prefect/input/actions.py,sha256=IGdWjVcesnRjLmPCzB4ZM7FkRWXDKCku6yhE-7p0vKk,3777
|
134
134
|
prefect/input/run_input.py,sha256=2wG-0L3N0spwh61Z3xI0PM8AAjHEIQZcDN703Er_gLo,18728
|
135
135
|
prefect/locking/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
136
|
-
prefect/locking/filesystem.py,sha256=
|
136
|
+
prefect/locking/filesystem.py,sha256=GiZlLLj51cLH6QQgq7IeU6jUK6vGi0wMnOG0zaO95-c,8025
|
137
137
|
prefect/locking/memory.py,sha256=Y1fsMSUAk3jUILzRivbxlrE9Xv8OcVbaylVf-aiEGNc,7495
|
138
138
|
prefect/locking/protocol.py,sha256=o5-48SxvEDAdVwW8RIn7rCN32CmvIsaVHTztESUXuHU,4232
|
139
139
|
prefect/logging/__init__.py,sha256=zx9f5_dWrR4DbcTOFBpNGOPoCZ1QcPFudr7zxb2XRpA,148
|
@@ -150,7 +150,7 @@ prefect/records/filesystem.py,sha256=X-h7r5deiHH5IaaDk4ugOCmR5ZKnJeU2cLgp0AkMt0E
|
|
150
150
|
prefect/records/memory.py,sha256=YdzQvEfb-CX0sKxAZK5TaNxVvAlyYlZse9qdoer6Xbk,6447
|
151
151
|
prefect/records/result_store.py,sha256=3ZUFNHCCv_qBQhmIFdvlK_GMnPZcFacaI9dVdDKWdwA,2431
|
152
152
|
prefect/runner/__init__.py,sha256=7U-vAOXFkzMfRz1q8Uv6Otsvc0OrPYLLP44srwkJ_8s,89
|
153
|
-
prefect/runner/runner.py,sha256=
|
153
|
+
prefect/runner/runner.py,sha256=P1r2X59rlGz7k5QNjKcvajs4-IfaA8fpu6Ag6u2Wpxk,49969
|
154
154
|
prefect/runner/server.py,sha256=2o5vhrL7Zbn-HBStWhCjqqViex5Ye9GiQ1EW9RSEzdo,10500
|
155
155
|
prefect/runner/storage.py,sha256=OsBa4nWdFxOTiAMNLFpexBdi5K3iuxidQx4YWZwditE,24734
|
156
156
|
prefect/runner/submit.py,sha256=RuyDr-ved9wjYYarXiehY5oJVFf_HE3XKKACNWpxpPc,8131
|
@@ -165,14 +165,14 @@ prefect/types/__init__.py,sha256=SAHJDtWEGidTKXQACJ38nj6fq8r57Gj0Pwo4Gy7pVWs,223
|
|
165
165
|
prefect/types/entrypoint.py,sha256=2FF03-wLPgtnqR_bKJDB2BsXXINPdu8ptY9ZYEZnXg8,328
|
166
166
|
prefect/utilities/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
167
167
|
prefect/utilities/annotations.py,sha256=Ocj2s5zhnGr8uXUBnOli-OrybXVJdu4-uZvCRpKpV_Q,2820
|
168
|
-
prefect/utilities/asyncutils.py,sha256=
|
168
|
+
prefect/utilities/asyncutils.py,sha256=jWj2bMx2yLOd2QTouMOQFOtqy2DLnfefJNlujbMZZYU,20198
|
169
169
|
prefect/utilities/callables.py,sha256=53yqDgkx7Zb_uS4v1_ltrPrvdqjwkHvqK8A0E958dFk,24859
|
170
|
-
prefect/utilities/collections.py,sha256=
|
170
|
+
prefect/utilities/collections.py,sha256=_YVHZfT49phrXq7aDUmn4pqWwEtJQTPy2nJD0M1sz0o,17264
|
171
171
|
prefect/utilities/compat.py,sha256=mNQZDnzyKaOqy-OV-DnmH_dc7CNF5nQgW_EsA4xMr7g,906
|
172
172
|
prefect/utilities/context.py,sha256=BThuUW94-IYgFYTeMIM9KMo8ShT3oiI7w5ajZHzU1j0,1377
|
173
173
|
prefect/utilities/dispatch.py,sha256=EthEmyRwv-4W8z2BJclrsOQHJ_pJoZYL0t2cyYPEa-E,6098
|
174
174
|
prefect/utilities/dockerutils.py,sha256=kRozGQ7JO6Uxl-ljWtDryzxhf96rHL78aHYDh255Em4,20324
|
175
|
-
prefect/utilities/engine.py,sha256=
|
175
|
+
prefect/utilities/engine.py,sha256=KaGtKWNZ-EaSTTppL7zpqWWjDLpMcPTVK0Gfd4zXpRM,32087
|
176
176
|
prefect/utilities/filesystem.py,sha256=frAyy6qOeYa7c-jVbEUGZQEe6J1yF8I_SvUepPd59gI,4415
|
177
177
|
prefect/utilities/hashing.py,sha256=EOwZLmoIZImuSTxAvVqInabxJ-4RpEfYeg9e2EDQF8o,1752
|
178
178
|
prefect/utilities/importtools.py,sha256=aO-xhf2h2KzsLGvSKwRAZLB4ITeW9rsV0Ys-gwq3i7o,19426
|
@@ -192,14 +192,14 @@ prefect/utilities/schema_tools/__init__.py,sha256=KsFsTEHQqgp89TkDpjggkgBBywoHQP
|
|
192
192
|
prefect/utilities/schema_tools/hydration.py,sha256=k12qVCdLLrK-mNo1hPCdhxM5f_N14Nj0vJdtiWYWffk,8858
|
193
193
|
prefect/utilities/schema_tools/validation.py,sha256=2GCjxwApTFwzey40ul9OkcAXrU3r-kWK__9ucMo0qbk,9744
|
194
194
|
prefect/workers/__init__.py,sha256=8dP8SLZbWYyC_l9DRTQSE3dEbDgns5DZDhxkp_NfsbQ,35
|
195
|
-
prefect/workers/base.py,sha256=
|
195
|
+
prefect/workers/base.py,sha256=p3rZBZ5rmiAkpuR7GYK6O6Qn4emt-pqAKDeMCgEv9Ag,45880
|
196
196
|
prefect/workers/block.py,sha256=BOVVY5z-vUIQ2u8LwMTXDaNys2fjOZSS5YGDwJmTQjI,230
|
197
197
|
prefect/workers/cloud.py,sha256=BOVVY5z-vUIQ2u8LwMTXDaNys2fjOZSS5YGDwJmTQjI,230
|
198
198
|
prefect/workers/process.py,sha256=tcJ3fbiraLCfpVGpv8dOHwMSfVzeD_kyguUOvPuIz6I,19796
|
199
199
|
prefect/workers/server.py,sha256=lgh2FfSuaNU7b6HPxSFm8JtKvAvHsZGkiOo4y4tW1Cw,2022
|
200
200
|
prefect/workers/utilities.py,sha256=VfPfAlGtTuDj0-Kb8WlMgAuOfgXCdrGAnKMapPSBrwc,2483
|
201
|
-
prefect_client-3.0.
|
202
|
-
prefect_client-3.0.
|
203
|
-
prefect_client-3.0.
|
204
|
-
prefect_client-3.0.
|
205
|
-
prefect_client-3.0.
|
201
|
+
prefect_client-3.0.3.dist-info/LICENSE,sha256=MCxsn8osAkzfxKC4CC_dLcUkU8DZLkyihZ8mGs3Ah3Q,11357
|
202
|
+
prefect_client-3.0.3.dist-info/METADATA,sha256=WJVB5YHCoG2EQzn1y8HlCSgGcy2gtWZtNHChFI7CBE4,7332
|
203
|
+
prefect_client-3.0.3.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
|
204
|
+
prefect_client-3.0.3.dist-info/top_level.txt,sha256=MJZYJgFdbRc2woQCeB4vM6T33tr01TmkEhRcns6H_H4,8
|
205
|
+
prefect_client-3.0.3.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|