prefect-client 3.0.1__py3-none-any.whl → 3.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/_internal/compatibility/deprecated.py +1 -1
- prefect/blocks/core.py +5 -4
- prefect/blocks/notifications.py +21 -0
- prefect/blocks/webhook.py +17 -1
- prefect/cache_policies.py +98 -28
- prefect/client/orchestration.py +42 -20
- prefect/client/schemas/actions.py +10 -2
- prefect/client/schemas/filters.py +4 -2
- prefect/client/schemas/objects.py +48 -6
- prefect/client/schemas/responses.py +15 -1
- prefect/client/types/flexible_schedule_list.py +1 -1
- prefect/concurrency/asyncio.py +45 -6
- prefect/concurrency/services.py +1 -1
- prefect/concurrency/sync.py +21 -27
- prefect/concurrency/v1/asyncio.py +3 -0
- prefect/concurrency/v1/sync.py +4 -5
- prefect/context.py +6 -6
- prefect/deployments/runner.py +43 -5
- prefect/events/actions.py +6 -0
- prefect/flow_engine.py +12 -4
- prefect/flows.py +15 -11
- prefect/locking/filesystem.py +243 -0
- prefect/logging/handlers.py +0 -2
- prefect/logging/loggers.py +0 -18
- prefect/logging/logging.yml +1 -0
- prefect/main.py +19 -5
- prefect/plugins.py +9 -1
- prefect/records/base.py +12 -0
- prefect/records/filesystem.py +6 -2
- prefect/records/memory.py +6 -0
- prefect/records/result_store.py +6 -0
- prefect/results.py +192 -29
- prefect/runner/runner.py +74 -6
- prefect/settings.py +31 -1
- prefect/states.py +34 -17
- prefect/task_engine.py +58 -43
- prefect/transactions.py +113 -52
- prefect/utilities/asyncutils.py +7 -0
- prefect/utilities/collections.py +3 -2
- prefect/utilities/engine.py +20 -9
- prefect/utilities/importtools.py +1 -0
- prefect/utilities/urls.py +70 -12
- prefect/workers/base.py +10 -8
- {prefect_client-3.0.1.dist-info → prefect_client-3.0.3.dist-info}/METADATA +1 -1
- {prefect_client-3.0.1.dist-info → prefect_client-3.0.3.dist-info}/RECORD +48 -47
- {prefect_client-3.0.1.dist-info → prefect_client-3.0.3.dist-info}/LICENSE +0 -0
- {prefect_client-3.0.1.dist-info → prefect_client-3.0.3.dist-info}/WHEEL +0 -0
- {prefect_client-3.0.1.dist-info → prefect_client-3.0.3.dist-info}/top_level.txt +0 -0
prefect/concurrency/asyncio.py
CHANGED
@@ -6,6 +6,8 @@ import anyio
|
|
6
6
|
import httpx
|
7
7
|
import pendulum
|
8
8
|
|
9
|
+
from prefect._internal.compatibility.deprecated import deprecated_parameter
|
10
|
+
|
9
11
|
try:
|
10
12
|
from pendulum import Interval
|
11
13
|
except ImportError:
|
@@ -14,6 +16,8 @@ except ImportError:
|
|
14
16
|
|
15
17
|
from prefect.client.orchestration import get_client
|
16
18
|
from prefect.client.schemas.responses import MinimalConcurrencyLimitResponse
|
19
|
+
from prefect.logging.loggers import get_run_logger
|
20
|
+
from prefect.utilities.asyncutils import sync_compatible
|
17
21
|
|
18
22
|
from .context import ConcurrencyContext
|
19
23
|
from .events import (
|
@@ -36,8 +40,9 @@ async def concurrency(
|
|
36
40
|
names: Union[str, List[str]],
|
37
41
|
occupy: int = 1,
|
38
42
|
timeout_seconds: Optional[float] = None,
|
39
|
-
create_if_missing: bool = True,
|
40
43
|
max_retries: Optional[int] = None,
|
44
|
+
create_if_missing: Optional[bool] = None,
|
45
|
+
strict: bool = False,
|
41
46
|
) -> AsyncGenerator[None, None]:
|
42
47
|
"""A context manager that acquires and releases concurrency slots from the
|
43
48
|
given concurrency limits.
|
@@ -47,11 +52,13 @@ async def concurrency(
|
|
47
52
|
occupy: The number of slots to acquire and hold from each limit.
|
48
53
|
timeout_seconds: The number of seconds to wait for the slots to be acquired before
|
49
54
|
raising a `TimeoutError`. A timeout of `None` will wait indefinitely.
|
50
|
-
create_if_missing: Whether to create the concurrency limits if they do not exist.
|
51
55
|
max_retries: The maximum number of retries to acquire the concurrency slots.
|
56
|
+
strict: A boolean specifying whether to raise an error if the concurrency limit does not exist.
|
57
|
+
Defaults to `False`.
|
52
58
|
|
53
59
|
Raises:
|
54
60
|
TimeoutError: If the slots are not acquired within the given timeout.
|
61
|
+
ConcurrencySlotAcquisitionError: If the concurrency limit does not exist and `strict` is `True`.
|
55
62
|
|
56
63
|
Example:
|
57
64
|
A simple example of using the async `concurrency` context manager:
|
@@ -78,6 +85,7 @@ async def concurrency(
|
|
78
85
|
timeout_seconds=timeout_seconds,
|
79
86
|
create_if_missing=create_if_missing,
|
80
87
|
max_retries=max_retries,
|
88
|
+
strict=strict,
|
81
89
|
)
|
82
90
|
acquisition_time = pendulum.now("UTC")
|
83
91
|
emitted_events = _emit_concurrency_acquisition_events(limits, occupy)
|
@@ -106,7 +114,8 @@ async def rate_limit(
|
|
106
114
|
names: Union[str, List[str]],
|
107
115
|
occupy: int = 1,
|
108
116
|
timeout_seconds: Optional[float] = None,
|
109
|
-
create_if_missing: Optional[bool] =
|
117
|
+
create_if_missing: Optional[bool] = None,
|
118
|
+
strict: bool = False,
|
110
119
|
) -> None:
|
111
120
|
"""Block execution until an `occupy` number of slots of the concurrency
|
112
121
|
limits given in `names` are acquired. Requires that all given concurrency
|
@@ -117,7 +126,12 @@ async def rate_limit(
|
|
117
126
|
occupy: The number of slots to acquire and hold from each limit.
|
118
127
|
timeout_seconds: The number of seconds to wait for the slots to be acquired before
|
119
128
|
raising a `TimeoutError`. A timeout of `None` will wait indefinitely.
|
120
|
-
|
129
|
+
strict: A boolean specifying whether to raise an error if the concurrency limit does not exist.
|
130
|
+
Defaults to `False`.
|
131
|
+
|
132
|
+
Raises:
|
133
|
+
TimeoutError: If the slots are not acquired within the given timeout.
|
134
|
+
ConcurrencySlotAcquisitionError: If the concurrency limit does not exist and `strict` is `True`.
|
121
135
|
"""
|
122
136
|
if not names:
|
123
137
|
return
|
@@ -130,17 +144,27 @@ async def rate_limit(
|
|
130
144
|
mode="rate_limit",
|
131
145
|
timeout_seconds=timeout_seconds,
|
132
146
|
create_if_missing=create_if_missing,
|
147
|
+
strict=strict,
|
133
148
|
)
|
134
149
|
_emit_concurrency_acquisition_events(limits, occupy)
|
135
150
|
|
136
151
|
|
152
|
+
@sync_compatible
|
153
|
+
@deprecated_parameter(
|
154
|
+
name="create_if_missing",
|
155
|
+
start_date="Sep 2024",
|
156
|
+
end_date="Oct 2024",
|
157
|
+
when=lambda x: x is not None,
|
158
|
+
help="Limits must be explicitly created before acquiring concurrency slots; see `strict` if you want to enforce this behavior.",
|
159
|
+
)
|
137
160
|
async def _acquire_concurrency_slots(
|
138
161
|
names: List[str],
|
139
162
|
slots: int,
|
140
163
|
mode: Union[Literal["concurrency"], Literal["rate_limit"]] = "concurrency",
|
141
164
|
timeout_seconds: Optional[float] = None,
|
142
|
-
create_if_missing: Optional[bool] =
|
165
|
+
create_if_missing: Optional[bool] = None,
|
143
166
|
max_retries: Optional[int] = None,
|
167
|
+
strict: bool = False,
|
144
168
|
) -> List[MinimalConcurrencyLimitResponse]:
|
145
169
|
service = ConcurrencySlotAcquisitionService.instance(frozenset(names))
|
146
170
|
future = service.send(
|
@@ -158,9 +182,24 @@ async def _acquire_concurrency_slots(
|
|
158
182
|
f"Unable to acquire concurrency slots on {names!r}"
|
159
183
|
) from response_or_exception
|
160
184
|
|
161
|
-
|
185
|
+
retval = _response_to_minimal_concurrency_limit_response(response_or_exception)
|
186
|
+
|
187
|
+
if strict and not retval:
|
188
|
+
raise ConcurrencySlotAcquisitionError(
|
189
|
+
f"Concurrency limits {names!r} must be created before acquiring slots"
|
190
|
+
)
|
191
|
+
elif not retval:
|
192
|
+
try:
|
193
|
+
logger = get_run_logger()
|
194
|
+
logger.warning(
|
195
|
+
f"Concurrency limits {names!r} do not exist - skipping acquisition."
|
196
|
+
)
|
197
|
+
except Exception:
|
198
|
+
pass
|
199
|
+
return retval
|
162
200
|
|
163
201
|
|
202
|
+
@sync_compatible
|
164
203
|
async def _release_concurrency_slots(
|
165
204
|
names: List[str], slots: int, occupancy_seconds: float
|
166
205
|
) -> List[MinimalConcurrencyLimitResponse]:
|
prefect/concurrency/services.py
CHANGED
@@ -63,7 +63,7 @@ class ConcurrencySlotAcquisitionService(QueueService):
|
|
63
63
|
slots: int,
|
64
64
|
mode: str,
|
65
65
|
timeout_seconds: Optional[float] = None,
|
66
|
-
create_if_missing: Optional[bool] =
|
66
|
+
create_if_missing: Optional[bool] = None,
|
67
67
|
max_retries: Optional[int] = None,
|
68
68
|
) -> httpx.Response:
|
69
69
|
with timeout_async(seconds=timeout_seconds):
|
prefect/concurrency/sync.py
CHANGED
@@ -1,8 +1,5 @@
|
|
1
1
|
from contextlib import contextmanager
|
2
2
|
from typing import (
|
3
|
-
Any,
|
4
|
-
Awaitable,
|
5
|
-
Callable,
|
6
3
|
Generator,
|
7
4
|
List,
|
8
5
|
Optional,
|
@@ -19,8 +16,6 @@ except ImportError:
|
|
19
16
|
# pendulum < 3
|
20
17
|
from pendulum.period import Period as Interval # type: ignore
|
21
18
|
|
22
|
-
from prefect._internal.concurrency.api import create_call, from_sync
|
23
|
-
from prefect._internal.concurrency.event_loop import get_running_loop
|
24
19
|
from prefect.client.schemas.responses import MinimalConcurrencyLimitResponse
|
25
20
|
|
26
21
|
from .asyncio import (
|
@@ -40,8 +35,9 @@ def concurrency(
|
|
40
35
|
names: Union[str, List[str]],
|
41
36
|
occupy: int = 1,
|
42
37
|
timeout_seconds: Optional[float] = None,
|
43
|
-
create_if_missing: bool = True,
|
44
38
|
max_retries: Optional[int] = None,
|
39
|
+
strict: bool = False,
|
40
|
+
create_if_missing: Optional[bool] = None,
|
45
41
|
) -> Generator[None, None, None]:
|
46
42
|
"""A context manager that acquires and releases concurrency slots from the
|
47
43
|
given concurrency limits.
|
@@ -51,11 +47,13 @@ def concurrency(
|
|
51
47
|
occupy: The number of slots to acquire and hold from each limit.
|
52
48
|
timeout_seconds: The number of seconds to wait for the slots to be acquired before
|
53
49
|
raising a `TimeoutError`. A timeout of `None` will wait indefinitely.
|
54
|
-
create_if_missing: Whether to create the concurrency limits if they do not exist.
|
55
50
|
max_retries: The maximum number of retries to acquire the concurrency slots.
|
51
|
+
strict: A boolean specifying whether to raise an error if the concurrency limit does not exist.
|
52
|
+
Defaults to `False`.
|
56
53
|
|
57
54
|
Raises:
|
58
55
|
TimeoutError: If the slots are not acquired within the given timeout.
|
56
|
+
ConcurrencySlotAcquisitionError: If the concurrency limit does not exist and `strict` is `True`.
|
59
57
|
|
60
58
|
Example:
|
61
59
|
A simple example of using the sync `concurrency` context manager:
|
@@ -76,13 +74,14 @@ def concurrency(
|
|
76
74
|
|
77
75
|
names = names if isinstance(names, list) else [names]
|
78
76
|
|
79
|
-
limits: List[MinimalConcurrencyLimitResponse] =
|
80
|
-
_acquire_concurrency_slots,
|
77
|
+
limits: List[MinimalConcurrencyLimitResponse] = _acquire_concurrency_slots(
|
81
78
|
names,
|
82
79
|
occupy,
|
83
80
|
timeout_seconds=timeout_seconds,
|
84
81
|
create_if_missing=create_if_missing,
|
82
|
+
strict=strict,
|
85
83
|
max_retries=max_retries,
|
84
|
+
_sync=True,
|
86
85
|
)
|
87
86
|
acquisition_time = pendulum.now("UTC")
|
88
87
|
emitted_events = _emit_concurrency_acquisition_events(limits, occupy)
|
@@ -91,11 +90,11 @@ def concurrency(
|
|
91
90
|
yield
|
92
91
|
finally:
|
93
92
|
occupancy_period = cast(Interval, pendulum.now("UTC") - acquisition_time)
|
94
|
-
|
95
|
-
_release_concurrency_slots,
|
93
|
+
_release_concurrency_slots(
|
96
94
|
names,
|
97
95
|
occupy,
|
98
96
|
occupancy_period.total_seconds(),
|
97
|
+
_sync=True,
|
99
98
|
)
|
100
99
|
_emit_concurrency_release_events(limits, occupy, emitted_events)
|
101
100
|
|
@@ -104,7 +103,8 @@ def rate_limit(
|
|
104
103
|
names: Union[str, List[str]],
|
105
104
|
occupy: int = 1,
|
106
105
|
timeout_seconds: Optional[float] = None,
|
107
|
-
create_if_missing: Optional[bool] =
|
106
|
+
create_if_missing: Optional[bool] = None,
|
107
|
+
strict: bool = False,
|
108
108
|
) -> None:
|
109
109
|
"""Block execution until an `occupy` number of slots of the concurrency
|
110
110
|
limits given in `names` are acquired. Requires that all given concurrency
|
@@ -115,31 +115,25 @@ def rate_limit(
|
|
115
115
|
occupy: The number of slots to acquire and hold from each limit.
|
116
116
|
timeout_seconds: The number of seconds to wait for the slots to be acquired before
|
117
117
|
raising a `TimeoutError`. A timeout of `None` will wait indefinitely.
|
118
|
-
|
118
|
+
strict: A boolean specifying whether to raise an error if the concurrency limit does not exist.
|
119
|
+
Defaults to `False`.
|
120
|
+
|
121
|
+
Raises:
|
122
|
+
TimeoutError: If the slots are not acquired within the given timeout.
|
123
|
+
ConcurrencySlotAcquisitionError: If the concurrency limit does not exist and `strict` is `True`.
|
119
124
|
"""
|
120
125
|
if not names:
|
121
126
|
return
|
122
127
|
|
123
128
|
names = names if isinstance(names, list) else [names]
|
124
129
|
|
125
|
-
limits =
|
126
|
-
_acquire_concurrency_slots,
|
130
|
+
limits = _acquire_concurrency_slots(
|
127
131
|
names,
|
128
132
|
occupy,
|
129
133
|
mode="rate_limit",
|
130
134
|
timeout_seconds=timeout_seconds,
|
131
135
|
create_if_missing=create_if_missing,
|
136
|
+
strict=strict,
|
137
|
+
_sync=True,
|
132
138
|
)
|
133
139
|
_emit_concurrency_acquisition_events(limits, occupy)
|
134
|
-
|
135
|
-
|
136
|
-
def _call_async_function_from_sync(
|
137
|
-
fn: Callable[..., Awaitable[T]], *args: Any, **kwargs: Any
|
138
|
-
) -> T:
|
139
|
-
loop = get_running_loop()
|
140
|
-
call = create_call(fn, *args, **kwargs)
|
141
|
-
|
142
|
-
if loop is not None:
|
143
|
-
return from_sync.call_soon_in_loop_thread(call).result()
|
144
|
-
else:
|
145
|
-
return call() # type: ignore [return-value]
|
@@ -16,6 +16,7 @@ except ImportError:
|
|
16
16
|
from pendulum.period import Period as Interval # type: ignore
|
17
17
|
|
18
18
|
from prefect.client.orchestration import get_client
|
19
|
+
from prefect.utilities.asyncutils import sync_compatible
|
19
20
|
|
20
21
|
from .context import ConcurrencyContext
|
21
22
|
from .events import (
|
@@ -98,6 +99,7 @@ async def concurrency(
|
|
98
99
|
_emit_concurrency_release_events(limits, emitted_events, task_run_id)
|
99
100
|
|
100
101
|
|
102
|
+
@sync_compatible
|
101
103
|
async def _acquire_concurrency_slots(
|
102
104
|
names: List[str],
|
103
105
|
task_run_id: UUID,
|
@@ -120,6 +122,7 @@ async def _acquire_concurrency_slots(
|
|
120
122
|
return _response_to_concurrency_limit_response(response_or_exception)
|
121
123
|
|
122
124
|
|
125
|
+
@sync_compatible
|
123
126
|
async def _release_concurrency_slots(
|
124
127
|
names: List[str],
|
125
128
|
task_run_id: UUID,
|
prefect/concurrency/v1/sync.py
CHANGED
@@ -12,7 +12,6 @@ from uuid import UUID
|
|
12
12
|
import pendulum
|
13
13
|
|
14
14
|
from ...client.schemas.responses import MinimalConcurrencyLimitResponse
|
15
|
-
from ..sync import _call_async_function_from_sync
|
16
15
|
|
17
16
|
try:
|
18
17
|
from pendulum import Interval
|
@@ -70,11 +69,11 @@ def concurrency(
|
|
70
69
|
|
71
70
|
names = names if isinstance(names, list) else [names]
|
72
71
|
|
73
|
-
limits: List[MinimalConcurrencyLimitResponse] =
|
74
|
-
_acquire_concurrency_slots,
|
72
|
+
limits: List[MinimalConcurrencyLimitResponse] = _acquire_concurrency_slots(
|
75
73
|
names,
|
76
74
|
timeout_seconds=timeout_seconds,
|
77
75
|
task_run_id=task_run_id,
|
76
|
+
_sync=True,
|
78
77
|
)
|
79
78
|
acquisition_time = pendulum.now("UTC")
|
80
79
|
emitted_events = _emit_concurrency_acquisition_events(limits, task_run_id)
|
@@ -83,10 +82,10 @@ def concurrency(
|
|
83
82
|
yield
|
84
83
|
finally:
|
85
84
|
occupancy_period = cast(Interval, pendulum.now("UTC") - acquisition_time)
|
86
|
-
|
87
|
-
_release_concurrency_slots,
|
85
|
+
_release_concurrency_slots(
|
88
86
|
names,
|
89
87
|
task_run_id,
|
90
88
|
occupancy_period.total_seconds(),
|
89
|
+
_sync=True,
|
91
90
|
)
|
92
91
|
_emit_concurrency_release_events(limits, emitted_events, task_run_id)
|
prefect/context.py
CHANGED
@@ -9,7 +9,6 @@ For more user-accessible information about the current run, see [`prefect.runtim
|
|
9
9
|
import os
|
10
10
|
import sys
|
11
11
|
import warnings
|
12
|
-
import weakref
|
13
12
|
from contextlib import ExitStack, asynccontextmanager, contextmanager
|
14
13
|
from contextvars import ContextVar, Token
|
15
14
|
from pathlib import Path
|
@@ -40,7 +39,7 @@ from prefect.client.orchestration import PrefectClient, SyncPrefectClient, get_c
|
|
40
39
|
from prefect.client.schemas import FlowRun, TaskRun
|
41
40
|
from prefect.events.worker import EventsWorker
|
42
41
|
from prefect.exceptions import MissingContextError
|
43
|
-
from prefect.results import ResultStore
|
42
|
+
from prefect.results import ResultStore, get_default_persist_setting
|
44
43
|
from prefect.settings import PREFECT_HOME, Profile, Settings
|
45
44
|
from prefect.states import State
|
46
45
|
from prefect.task_runners import TaskRunner
|
@@ -343,6 +342,7 @@ class EngineContext(RunContext):
|
|
343
342
|
|
344
343
|
# Result handling
|
345
344
|
result_store: ResultStore
|
345
|
+
persist_result: bool = Field(default_factory=get_default_persist_setting)
|
346
346
|
|
347
347
|
# Counter for task calls allowing unique
|
348
348
|
task_run_dynamic_keys: Dict[str, int] = Field(default_factory=dict)
|
@@ -352,10 +352,7 @@ class EngineContext(RunContext):
|
|
352
352
|
|
353
353
|
# Tracking for result from task runs in this flow run for dependency tracking
|
354
354
|
# Holds the ID of the object returned by the task run and task run state
|
355
|
-
|
356
|
-
task_run_results: Mapping[int, State] = Field(
|
357
|
-
default_factory=weakref.WeakValueDictionary
|
358
|
-
)
|
355
|
+
task_run_results: Mapping[int, State] = Field(default_factory=dict)
|
359
356
|
|
360
357
|
# Events worker to emit events
|
361
358
|
events: Optional[EventsWorker] = None
|
@@ -372,6 +369,7 @@ class EngineContext(RunContext):
|
|
372
369
|
"start_time",
|
373
370
|
"input_keyset",
|
374
371
|
"result_store",
|
372
|
+
"persist_result",
|
375
373
|
},
|
376
374
|
exclude_unset=True,
|
377
375
|
)
|
@@ -397,6 +395,7 @@ class TaskRunContext(RunContext):
|
|
397
395
|
|
398
396
|
# Result handling
|
399
397
|
result_store: ResultStore
|
398
|
+
persist_result: bool = Field(default_factory=get_default_persist_setting)
|
400
399
|
|
401
400
|
__var__ = ContextVar("task_run")
|
402
401
|
|
@@ -410,6 +409,7 @@ class TaskRunContext(RunContext):
|
|
410
409
|
"start_time",
|
411
410
|
"input_keyset",
|
412
411
|
"result_store",
|
412
|
+
"persist_result",
|
413
413
|
},
|
414
414
|
exclude_unset=True,
|
415
415
|
)
|
prefect/deployments/runner.py
CHANGED
@@ -54,6 +54,7 @@ from prefect._internal.schemas.validators import (
|
|
54
54
|
)
|
55
55
|
from prefect.client.orchestration import get_client
|
56
56
|
from prefect.client.schemas.actions import DeploymentScheduleCreate
|
57
|
+
from prefect.client.schemas.objects import ConcurrencyLimitConfig, ConcurrencyOptions
|
57
58
|
from prefect.client.schemas.schedules import (
|
58
59
|
SCHEDULE_TYPES,
|
59
60
|
construct_schedule,
|
@@ -147,6 +148,10 @@ class RunnerDeployment(BaseModel):
|
|
147
148
|
default=None,
|
148
149
|
description="The maximum number of concurrent runs of this deployment.",
|
149
150
|
)
|
151
|
+
concurrency_options: Optional[ConcurrencyOptions] = Field(
|
152
|
+
default=None,
|
153
|
+
description="The concurrency limit config for the deployment.",
|
154
|
+
)
|
150
155
|
paused: Optional[bool] = Field(
|
151
156
|
default=None, description="Whether or not the deployment is paused."
|
152
157
|
)
|
@@ -279,6 +284,7 @@ class RunnerDeployment(BaseModel):
|
|
279
284
|
paused=self.paused,
|
280
285
|
schedules=self.schedules,
|
281
286
|
concurrency_limit=self.concurrency_limit,
|
287
|
+
concurrency_options=self.concurrency_options,
|
282
288
|
parameters=self.parameters,
|
283
289
|
description=self.description,
|
284
290
|
tags=self.tags,
|
@@ -437,7 +443,7 @@ class RunnerDeployment(BaseModel):
|
|
437
443
|
rrule: Optional[Union[Iterable[str], str]] = None,
|
438
444
|
paused: Optional[bool] = None,
|
439
445
|
schedules: Optional["FlexibleScheduleList"] = None,
|
440
|
-
concurrency_limit: Optional[int] = None,
|
446
|
+
concurrency_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None,
|
441
447
|
parameters: Optional[dict] = None,
|
442
448
|
triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
|
443
449
|
description: Optional[str] = None,
|
@@ -462,6 +468,7 @@ class RunnerDeployment(BaseModel):
|
|
462
468
|
paused: Whether or not to set this deployment as paused.
|
463
469
|
schedules: A list of schedule objects defining when to execute runs of this deployment.
|
464
470
|
Used to define multiple schedules or additional scheduling options like `timezone`.
|
471
|
+
concurrency_limit: The maximum number of concurrent runs this deployment will allow.
|
465
472
|
triggers: A list of triggers that should kick of a run of this flow.
|
466
473
|
parameters: A dictionary of default parameter values to pass to runs of this flow.
|
467
474
|
description: A description for the created deployment. Defaults to the flow's
|
@@ -487,11 +494,20 @@ class RunnerDeployment(BaseModel):
|
|
487
494
|
|
488
495
|
job_variables = job_variables or {}
|
489
496
|
|
497
|
+
if isinstance(concurrency_limit, ConcurrencyLimitConfig):
|
498
|
+
concurrency_options = {
|
499
|
+
"collision_strategy": concurrency_limit.collision_strategy
|
500
|
+
}
|
501
|
+
concurrency_limit = concurrency_limit.limit
|
502
|
+
else:
|
503
|
+
concurrency_options = None
|
504
|
+
|
490
505
|
deployment = cls(
|
491
506
|
name=Path(name).stem,
|
492
507
|
flow_name=flow.name,
|
493
508
|
schedules=constructed_schedules,
|
494
509
|
concurrency_limit=concurrency_limit,
|
510
|
+
concurrency_options=concurrency_options,
|
495
511
|
paused=paused,
|
496
512
|
tags=tags or [],
|
497
513
|
triggers=triggers or [],
|
@@ -558,6 +574,7 @@ class RunnerDeployment(BaseModel):
|
|
558
574
|
cls,
|
559
575
|
entrypoint: str,
|
560
576
|
name: str,
|
577
|
+
flow_name: Optional[str] = None,
|
561
578
|
interval: Optional[
|
562
579
|
Union[Iterable[Union[int, float, timedelta]], int, float, timedelta]
|
563
580
|
] = None,
|
@@ -565,7 +582,7 @@ class RunnerDeployment(BaseModel):
|
|
565
582
|
rrule: Optional[Union[Iterable[str], str]] = None,
|
566
583
|
paused: Optional[bool] = None,
|
567
584
|
schedules: Optional["FlexibleScheduleList"] = None,
|
568
|
-
concurrency_limit: Optional[int] = None,
|
585
|
+
concurrency_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None,
|
569
586
|
parameters: Optional[dict] = None,
|
570
587
|
triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
|
571
588
|
description: Optional[str] = None,
|
@@ -583,6 +600,7 @@ class RunnerDeployment(BaseModel):
|
|
583
600
|
entrypoint: The path to a file containing a flow and the name of the flow function in
|
584
601
|
the format `./path/to/file.py:flow_func_name`.
|
585
602
|
name: A name for the deployment
|
603
|
+
flow_name: The name of the flow to deploy
|
586
604
|
interval: An interval on which to execute the current flow. Accepts either a number
|
587
605
|
or a timedelta object. If a number is given, it will be interpreted as seconds.
|
588
606
|
cron: A cron schedule of when to execute runs of this flow.
|
@@ -618,11 +636,20 @@ class RunnerDeployment(BaseModel):
|
|
618
636
|
schedules=schedules,
|
619
637
|
)
|
620
638
|
|
639
|
+
if isinstance(concurrency_limit, ConcurrencyLimitConfig):
|
640
|
+
concurrency_options = {
|
641
|
+
"collision_strategy": concurrency_limit.collision_strategy
|
642
|
+
}
|
643
|
+
concurrency_limit = concurrency_limit.limit
|
644
|
+
else:
|
645
|
+
concurrency_options = None
|
646
|
+
|
621
647
|
deployment = cls(
|
622
648
|
name=Path(name).stem,
|
623
|
-
flow_name=flow.name,
|
649
|
+
flow_name=flow_name or flow.name,
|
624
650
|
schedules=constructed_schedules,
|
625
651
|
concurrency_limit=concurrency_limit,
|
652
|
+
concurrency_options=concurrency_options,
|
626
653
|
paused=paused,
|
627
654
|
tags=tags or [],
|
628
655
|
triggers=triggers or [],
|
@@ -648,6 +675,7 @@ class RunnerDeployment(BaseModel):
|
|
648
675
|
storage: RunnerStorage,
|
649
676
|
entrypoint: str,
|
650
677
|
name: str,
|
678
|
+
flow_name: Optional[str] = None,
|
651
679
|
interval: Optional[
|
652
680
|
Union[Iterable[Union[int, float, timedelta]], int, float, timedelta]
|
653
681
|
] = None,
|
@@ -655,7 +683,7 @@ class RunnerDeployment(BaseModel):
|
|
655
683
|
rrule: Optional[Union[Iterable[str], str]] = None,
|
656
684
|
paused: Optional[bool] = None,
|
657
685
|
schedules: Optional["FlexibleScheduleList"] = None,
|
658
|
-
concurrency_limit: Optional[int] = None,
|
686
|
+
concurrency_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None,
|
659
687
|
parameters: Optional[dict] = None,
|
660
688
|
triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
|
661
689
|
description: Optional[str] = None,
|
@@ -674,6 +702,7 @@ class RunnerDeployment(BaseModel):
|
|
674
702
|
entrypoint: The path to a file containing a flow and the name of the flow function in
|
675
703
|
the format `./path/to/file.py:flow_func_name`.
|
676
704
|
name: A name for the deployment
|
705
|
+
flow_name: The name of the flow to deploy
|
677
706
|
storage: A storage object to use for retrieving flow code. If not provided, a
|
678
707
|
URL must be provided.
|
679
708
|
interval: An interval on which to execute the current flow. Accepts either a number
|
@@ -705,6 +734,14 @@ class RunnerDeployment(BaseModel):
|
|
705
734
|
schedules=schedules,
|
706
735
|
)
|
707
736
|
|
737
|
+
if isinstance(concurrency_limit, ConcurrencyLimitConfig):
|
738
|
+
concurrency_options = {
|
739
|
+
"collision_strategy": concurrency_limit.collision_strategy
|
740
|
+
}
|
741
|
+
concurrency_limit = concurrency_limit.limit
|
742
|
+
else:
|
743
|
+
concurrency_options = None
|
744
|
+
|
708
745
|
job_variables = job_variables or {}
|
709
746
|
|
710
747
|
with tempfile.TemporaryDirectory() as tmpdir:
|
@@ -718,9 +755,10 @@ class RunnerDeployment(BaseModel):
|
|
718
755
|
|
719
756
|
deployment = cls(
|
720
757
|
name=Path(name).stem,
|
721
|
-
flow_name=flow.name,
|
758
|
+
flow_name=flow_name or flow.name,
|
722
759
|
schedules=constructed_schedules,
|
723
760
|
concurrency_limit=concurrency_limit,
|
761
|
+
concurrency_options=concurrency_options,
|
724
762
|
paused=paused,
|
725
763
|
tags=tags or [],
|
726
764
|
triggers=triggers or [],
|
prefect/events/actions.py
CHANGED
@@ -113,6 +113,12 @@ class CancelFlowRun(Action):
|
|
113
113
|
type: Literal["cancel-flow-run"] = "cancel-flow-run"
|
114
114
|
|
115
115
|
|
116
|
+
class ResumeFlowRun(Action):
|
117
|
+
"""Resumes a flow run associated with the trigger"""
|
118
|
+
|
119
|
+
type: Literal["resume-flow-run"] = "resume-flow-run"
|
120
|
+
|
121
|
+
|
116
122
|
class SuspendFlowRun(Action):
|
117
123
|
"""Suspends a flow run associated with the trigger"""
|
118
124
|
|
prefect/flow_engine.py
CHANGED
@@ -47,7 +47,12 @@ from prefect.logging.loggers import (
|
|
47
47
|
get_run_logger,
|
48
48
|
patch_print,
|
49
49
|
)
|
50
|
-
from prefect.results import
|
50
|
+
from prefect.results import (
|
51
|
+
BaseResult,
|
52
|
+
ResultStore,
|
53
|
+
get_result_store,
|
54
|
+
should_persist_result,
|
55
|
+
)
|
51
56
|
from prefect.settings import PREFECT_DEBUG_MODE
|
52
57
|
from prefect.states import (
|
53
58
|
Failed,
|
@@ -202,7 +207,7 @@ class FlowRunEngine(Generic[P, R]):
|
|
202
207
|
self.handle_exception(
|
203
208
|
exc,
|
204
209
|
msg=message,
|
205
|
-
result_store=
|
210
|
+
result_store=get_result_store().update_for_flow(
|
206
211
|
self.flow, _sync=True
|
207
212
|
),
|
208
213
|
)
|
@@ -271,7 +276,7 @@ class FlowRunEngine(Generic[P, R]):
|
|
271
276
|
return_value_to_state(
|
272
277
|
resolved_result,
|
273
278
|
result_store=result_store,
|
274
|
-
write_result=
|
279
|
+
write_result=should_persist_result(),
|
275
280
|
)
|
276
281
|
)
|
277
282
|
self.set_state(terminal_state)
|
@@ -507,10 +512,13 @@ class FlowRunEngine(Generic[P, R]):
|
|
507
512
|
flow_run=self.flow_run,
|
508
513
|
parameters=self.parameters,
|
509
514
|
client=client,
|
510
|
-
result_store=
|
515
|
+
result_store=get_result_store().update_for_flow(
|
511
516
|
self.flow, _sync=True
|
512
517
|
),
|
513
518
|
task_runner=task_runner,
|
519
|
+
persist_result=self.flow.persist_result
|
520
|
+
if self.flow.persist_result is not None
|
521
|
+
else should_persist_result(),
|
514
522
|
)
|
515
523
|
)
|
516
524
|
stack.enter_context(ConcurrencyContextV1())
|