prefect-client 3.2.7__py3-none-any.whl → 3.2.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/_build_info.py +3 -3
- prefect/_experimental/bundles.py +79 -0
- prefect/_waiters.py +254 -0
- prefect/client/subscriptions.py +2 -1
- prefect/events/clients.py +19 -17
- prefect/flow_runs.py +67 -35
- prefect/flows.py +3 -1
- prefect/futures.py +192 -22
- prefect/runner/runner.py +106 -39
- prefect/server/api/artifacts.py +5 -0
- prefect/server/api/automations.py +5 -0
- prefect/server/api/block_capabilities.py +5 -0
- prefect/server/api/block_documents.py +2 -0
- prefect/server/api/block_schemas.py +5 -0
- prefect/server/api/block_types.py +3 -1
- prefect/server/api/concurrency_limits.py +5 -0
- prefect/server/api/concurrency_limits_v2.py +5 -0
- prefect/server/api/deployments.py +2 -0
- prefect/server/api/events.py +5 -1
- prefect/server/api/flow_run_notification_policies.py +2 -0
- prefect/server/api/flow_run_states.py +2 -0
- prefect/server/api/flow_runs.py +2 -0
- prefect/server/api/flows.py +2 -0
- prefect/server/api/logs.py +5 -1
- prefect/server/api/task_run_states.py +2 -0
- prefect/server/api/task_runs.py +2 -0
- prefect/server/api/task_workers.py +5 -1
- prefect/server/api/variables.py +5 -0
- prefect/server/api/work_queues.py +2 -0
- prefect/server/api/workers.py +4 -0
- prefect/settings/profiles.py +6 -5
- prefect/task_worker.py +3 -3
- prefect/telemetry/instrumentation.py +2 -2
- prefect/utilities/templating.py +50 -11
- prefect/workers/base.py +3 -3
- prefect/workers/process.py +22 -319
- {prefect_client-3.2.7.dist-info → prefect_client-3.2.9.dist-info}/METADATA +2 -2
- {prefect_client-3.2.7.dist-info → prefect_client-3.2.9.dist-info}/RECORD +40 -39
- {prefect_client-3.2.7.dist-info → prefect_client-3.2.9.dist-info}/WHEEL +0 -0
- {prefect_client-3.2.7.dist-info → prefect_client-3.2.9.dist-info}/licenses/LICENSE +0 -0
prefect/flow_runs.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
1
3
|
from typing import (
|
2
4
|
TYPE_CHECKING,
|
3
5
|
Any,
|
4
|
-
Optional,
|
5
6
|
Type,
|
6
7
|
TypeVar,
|
7
8
|
overload,
|
@@ -13,6 +14,7 @@ import anyio
|
|
13
14
|
from prefect.client.orchestration import PrefectClient, get_client
|
14
15
|
from prefect.client.schemas import FlowRun
|
15
16
|
from prefect.client.schemas.objects import (
|
17
|
+
State,
|
16
18
|
StateType,
|
17
19
|
)
|
18
20
|
from prefect.client.schemas.responses import SetStateStatus
|
@@ -21,6 +23,8 @@ from prefect.context import (
|
|
21
23
|
FlowRunContext,
|
22
24
|
TaskRunContext,
|
23
25
|
)
|
26
|
+
from prefect.events.clients import get_events_subscriber
|
27
|
+
from prefect.events.filters import EventFilter, EventNameFilter, EventResourceFilter
|
24
28
|
from prefect.exceptions import (
|
25
29
|
Abort,
|
26
30
|
FlowPauseTimeout,
|
@@ -52,9 +56,9 @@ if TYPE_CHECKING:
|
|
52
56
|
@inject_client
|
53
57
|
async def wait_for_flow_run(
|
54
58
|
flow_run_id: UUID,
|
55
|
-
timeout:
|
56
|
-
poll_interval: int =
|
57
|
-
client:
|
59
|
+
timeout: int | None = 10800,
|
60
|
+
poll_interval: int | None = None,
|
61
|
+
client: "PrefectClient | None" = None,
|
58
62
|
log_states: bool = False,
|
59
63
|
) -> FlowRun:
|
60
64
|
"""
|
@@ -63,7 +67,9 @@ async def wait_for_flow_run(
|
|
63
67
|
Args:
|
64
68
|
flow_run_id: The flow run ID for the flow run to wait for.
|
65
69
|
timeout: The wait timeout in seconds. Defaults to 10800 (3 hours).
|
66
|
-
poll_interval:
|
70
|
+
poll_interval: Deprecated; polling is no longer used to wait for flow runs.
|
71
|
+
client: Optional Prefect client. If not provided, one will be injected.
|
72
|
+
log_states: If True, log state changes. Defaults to False.
|
67
73
|
|
68
74
|
Returns:
|
69
75
|
FlowRun: The finished flow run.
|
@@ -113,17 +119,37 @@ async def wait_for_flow_run(
|
|
113
119
|
|
114
120
|
```
|
115
121
|
"""
|
122
|
+
if poll_interval is not None:
|
123
|
+
get_logger().warning(
|
124
|
+
"The `poll_interval` argument is deprecated and will be removed in a future release. "
|
125
|
+
)
|
126
|
+
|
116
127
|
assert client is not None, "Client injection failed"
|
117
128
|
logger = get_logger()
|
129
|
+
|
130
|
+
event_filter = EventFilter(
|
131
|
+
event=EventNameFilter(prefix=["prefect.flow-run"]),
|
132
|
+
resource=EventResourceFilter(id=[f"prefect.flow-run.{flow_run_id}"]),
|
133
|
+
)
|
134
|
+
|
118
135
|
with anyio.move_on_after(timeout):
|
119
|
-
|
136
|
+
async with get_events_subscriber(filter=event_filter) as subscriber:
|
120
137
|
flow_run = await client.read_flow_run(flow_run_id)
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
if flow_state and flow_state.is_final():
|
138
|
+
if flow_run.state and flow_run.state.is_final():
|
139
|
+
if log_states:
|
140
|
+
logger.info(f"Flow run is in state {flow_run.state.name!r}")
|
125
141
|
return flow_run
|
126
|
-
|
142
|
+
|
143
|
+
async for event in subscriber:
|
144
|
+
state_type = StateType(event.resource["prefect.state-type"])
|
145
|
+
state = State(type=state_type)
|
146
|
+
|
147
|
+
if log_states:
|
148
|
+
logger.info(f"Flow run is in state {state.name!r}")
|
149
|
+
|
150
|
+
if state.is_final():
|
151
|
+
return await client.read_flow_run(flow_run_id)
|
152
|
+
|
127
153
|
raise FlowRunWaitTimeout(
|
128
154
|
f"Flow run with ID {flow_run_id} exceeded watch timeout of {timeout} seconds"
|
129
155
|
)
|
@@ -138,7 +164,7 @@ async def pause_flow_run(
|
|
138
164
|
wait_for_input: None = None,
|
139
165
|
timeout: int = 3600,
|
140
166
|
poll_interval: int = 10,
|
141
|
-
key:
|
167
|
+
key: str | None = None,
|
142
168
|
) -> None: ...
|
143
169
|
|
144
170
|
|
@@ -147,17 +173,17 @@ async def pause_flow_run(
|
|
147
173
|
wait_for_input: Type[T],
|
148
174
|
timeout: int = 3600,
|
149
175
|
poll_interval: int = 10,
|
150
|
-
key:
|
176
|
+
key: str | None = None,
|
151
177
|
) -> T: ...
|
152
178
|
|
153
179
|
|
154
180
|
@sync_compatible
|
155
181
|
async def pause_flow_run(
|
156
|
-
wait_for_input:
|
182
|
+
wait_for_input: Type[T] | None = None,
|
157
183
|
timeout: int = 3600,
|
158
184
|
poll_interval: int = 10,
|
159
|
-
key:
|
160
|
-
) ->
|
185
|
+
key: str | None = None,
|
186
|
+
) -> T | None:
|
161
187
|
"""
|
162
188
|
Pauses the current flow run by blocking execution until resumed.
|
163
189
|
|
@@ -213,10 +239,13 @@ async def pause_flow_run(
|
|
213
239
|
async def _in_process_pause(
|
214
240
|
timeout: int = 3600,
|
215
241
|
poll_interval: int = 10,
|
216
|
-
key:
|
217
|
-
client=None,
|
218
|
-
wait_for_input:
|
219
|
-
) ->
|
242
|
+
key: str | None = None,
|
243
|
+
client: "PrefectClient | None" = None,
|
244
|
+
wait_for_input: Type[T] | None = None,
|
245
|
+
) -> T | None:
|
246
|
+
if TYPE_CHECKING:
|
247
|
+
assert client is not None
|
248
|
+
|
220
249
|
if TaskRunContext.get():
|
221
250
|
raise RuntimeError("Cannot pause task runs.")
|
222
251
|
|
@@ -302,32 +331,32 @@ async def _in_process_pause(
|
|
302
331
|
@overload
|
303
332
|
async def suspend_flow_run(
|
304
333
|
wait_for_input: None = None,
|
305
|
-
flow_run_id:
|
306
|
-
timeout:
|
307
|
-
key:
|
308
|
-
client:
|
334
|
+
flow_run_id: UUID | None = None,
|
335
|
+
timeout: int | None = 3600,
|
336
|
+
key: str | None = None,
|
337
|
+
client: "PrefectClient | None" = None,
|
309
338
|
) -> None: ...
|
310
339
|
|
311
340
|
|
312
341
|
@overload
|
313
342
|
async def suspend_flow_run(
|
314
343
|
wait_for_input: Type[T],
|
315
|
-
flow_run_id:
|
316
|
-
timeout:
|
317
|
-
key:
|
318
|
-
client:
|
344
|
+
flow_run_id: UUID | None = None,
|
345
|
+
timeout: int | None = 3600,
|
346
|
+
key: str | None = None,
|
347
|
+
client: "PrefectClient | None" = None,
|
319
348
|
) -> T: ...
|
320
349
|
|
321
350
|
|
322
351
|
@sync_compatible
|
323
352
|
@inject_client
|
324
353
|
async def suspend_flow_run(
|
325
|
-
wait_for_input:
|
326
|
-
flow_run_id:
|
327
|
-
timeout:
|
328
|
-
key:
|
329
|
-
client:
|
330
|
-
) ->
|
354
|
+
wait_for_input: Type[T] | None = None,
|
355
|
+
flow_run_id: UUID | None = None,
|
356
|
+
timeout: int | None = 3600,
|
357
|
+
key: str | None = None,
|
358
|
+
client: "PrefectClient | None" = None,
|
359
|
+
) -> T | None:
|
331
360
|
"""
|
332
361
|
Suspends a flow run by stopping code execution until resumed.
|
333
362
|
|
@@ -357,6 +386,9 @@ async def suspend_flow_run(
|
|
357
386
|
resumed with the input, the flow will resume and the input will be
|
358
387
|
loaded and returned from this function.
|
359
388
|
"""
|
389
|
+
if TYPE_CHECKING:
|
390
|
+
assert client is not None
|
391
|
+
|
360
392
|
context = FlowRunContext.get()
|
361
393
|
|
362
394
|
if flow_run_id is None:
|
@@ -427,7 +459,7 @@ async def suspend_flow_run(
|
|
427
459
|
|
428
460
|
@sync_compatible
|
429
461
|
async def resume_flow_run(
|
430
|
-
flow_run_id: UUID, run_input:
|
462
|
+
flow_run_id: UUID, run_input: dict[str, Any] | None = None
|
431
463
|
) -> None:
|
432
464
|
"""
|
433
465
|
Resumes a paused flow.
|
prefect/flows.py
CHANGED
@@ -2353,7 +2353,9 @@ async def load_flow_from_flow_run(
|
|
2353
2353
|
from prefect.deployments.steps.core import StepExecutionError, run_steps
|
2354
2354
|
|
2355
2355
|
try:
|
2356
|
-
output = await run_steps(
|
2356
|
+
output = await run_steps(
|
2357
|
+
deployment.pull_steps, print_function=run_logger.info
|
2358
|
+
)
|
2357
2359
|
except StepExecutionError as e:
|
2358
2360
|
e = e.__cause__ or e
|
2359
2361
|
run_logger.error(str(e))
|
prefect/futures.py
CHANGED
@@ -1,14 +1,18 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
1
3
|
import abc
|
2
4
|
import asyncio
|
3
5
|
import concurrent.futures
|
4
6
|
import threading
|
5
7
|
import uuid
|
8
|
+
import warnings
|
6
9
|
from collections.abc import Generator, Iterator
|
7
10
|
from functools import partial
|
8
|
-
from typing import TYPE_CHECKING, Any, Callable, Generic
|
11
|
+
from typing import TYPE_CHECKING, Any, Callable, Generic
|
9
12
|
|
10
13
|
from typing_extensions import NamedTuple, Self, TypeVar
|
11
14
|
|
15
|
+
from prefect._waiters import FlowRunWaiter
|
12
16
|
from prefect.client.orchestration import get_client
|
13
17
|
from prefect.exceptions import ObjectNotFound
|
14
18
|
from prefect.logging.loggers import get_logger, get_run_logger
|
@@ -31,22 +35,42 @@ logger: "logging.Logger" = get_logger(__name__)
|
|
31
35
|
class PrefectFuture(abc.ABC, Generic[R]):
|
32
36
|
"""
|
33
37
|
Abstract base class for Prefect futures. A Prefect future is a handle to the
|
34
|
-
asynchronous execution of a
|
35
|
-
to complete and to retrieve the result of the
|
38
|
+
asynchronous execution of a run. It provides methods to wait for the
|
39
|
+
to complete and to retrieve the result of the run.
|
36
40
|
"""
|
37
41
|
|
38
42
|
def __init__(self, task_run_id: uuid.UUID):
|
43
|
+
warnings.warn(
|
44
|
+
"The __init__ method of PrefectFuture is deprecated and will be removed in a future release. "
|
45
|
+
"If you are subclassing PrefectFuture, please implement the __init__ method in your subclass or "
|
46
|
+
"subclass PrefectTaskRunFuture instead.",
|
47
|
+
DeprecationWarning,
|
48
|
+
)
|
39
49
|
self._task_run_id = task_run_id
|
40
|
-
self._final_state:
|
50
|
+
self._final_state: State[R] | None = None
|
41
51
|
|
42
52
|
@property
|
43
53
|
def task_run_id(self) -> uuid.UUID:
|
44
54
|
"""The ID of the task run associated with this future"""
|
55
|
+
warnings.warn(
|
56
|
+
"The task_run_id property of PrefectFuture is deprecated and will be removed in a future release. "
|
57
|
+
"If you are subclassing PrefectFuture, please implement the task_run_id property in your subclass or "
|
58
|
+
"subclass PrefectTaskRunFuture instead.",
|
59
|
+
DeprecationWarning,
|
60
|
+
)
|
61
|
+
|
45
62
|
return self._task_run_id
|
46
63
|
|
47
64
|
@property
|
48
65
|
def state(self) -> State:
|
49
66
|
"""The current state of the task run associated with this future"""
|
67
|
+
warnings.warn(
|
68
|
+
"The state property of PrefectFuture is deprecated and will be removed in a future release. "
|
69
|
+
"If you are subclassing PrefectFuture, please implement the state property in your subclass or "
|
70
|
+
"subclass PrefectTaskRunFuture instead.",
|
71
|
+
DeprecationWarning,
|
72
|
+
)
|
73
|
+
|
50
74
|
if self._final_state:
|
51
75
|
return self._final_state
|
52
76
|
client = get_client(sync_client=True)
|
@@ -59,7 +83,7 @@ class PrefectFuture(abc.ABC, Generic[R]):
|
|
59
83
|
return task_run.state or Pending()
|
60
84
|
|
61
85
|
@abc.abstractmethod
|
62
|
-
def wait(self, timeout:
|
86
|
+
def wait(self, timeout: float | None = None) -> None:
|
63
87
|
...
|
64
88
|
"""
|
65
89
|
Wait for the task run to complete.
|
@@ -74,7 +98,7 @@ class PrefectFuture(abc.ABC, Generic[R]):
|
|
74
98
|
@abc.abstractmethod
|
75
99
|
def result(
|
76
100
|
self,
|
77
|
-
timeout:
|
101
|
+
timeout: float | None = None,
|
78
102
|
raise_on_failure: bool = True,
|
79
103
|
) -> R:
|
80
104
|
...
|
@@ -103,7 +127,36 @@ class PrefectFuture(abc.ABC, Generic[R]):
|
|
103
127
|
...
|
104
128
|
|
105
129
|
|
106
|
-
class
|
130
|
+
class PrefectTaskRunFuture(PrefectFuture[R]):
|
131
|
+
"""
|
132
|
+
A Prefect future that represents the eventual execution of a task run.
|
133
|
+
"""
|
134
|
+
|
135
|
+
def __init__(self, task_run_id: uuid.UUID):
|
136
|
+
self._task_run_id = task_run_id
|
137
|
+
self._final_state: State[R] | None = None
|
138
|
+
|
139
|
+
@property
|
140
|
+
def task_run_id(self) -> uuid.UUID:
|
141
|
+
"""The ID of the task run associated with this future"""
|
142
|
+
return self._task_run_id
|
143
|
+
|
144
|
+
@property
|
145
|
+
def state(self) -> State:
|
146
|
+
"""The current state of the task run associated with this future"""
|
147
|
+
if self._final_state:
|
148
|
+
return self._final_state
|
149
|
+
client = get_client(sync_client=True)
|
150
|
+
try:
|
151
|
+
task_run = client.read_task_run(task_run_id=self.task_run_id)
|
152
|
+
except ObjectNotFound:
|
153
|
+
# We'll be optimistic and assume this task will eventually start
|
154
|
+
# TODO: Consider using task run events to wait for the task to start
|
155
|
+
return Pending()
|
156
|
+
return task_run.state or Pending()
|
157
|
+
|
158
|
+
|
159
|
+
class PrefectWrappedFuture(PrefectTaskRunFuture[R], abc.ABC, Generic[R, F]):
|
107
160
|
"""
|
108
161
|
A Prefect future that wraps another future object.
|
109
162
|
|
@@ -140,7 +193,7 @@ class PrefectConcurrentFuture(PrefectWrappedFuture[R, concurrent.futures.Future[
|
|
140
193
|
when the task run is submitted to a ThreadPoolExecutor.
|
141
194
|
"""
|
142
195
|
|
143
|
-
def wait(self, timeout:
|
196
|
+
def wait(self, timeout: float | None = None) -> None:
|
144
197
|
try:
|
145
198
|
result = self._wrapped_future.result(timeout=timeout)
|
146
199
|
except concurrent.futures.TimeoutError:
|
@@ -150,7 +203,7 @@ class PrefectConcurrentFuture(PrefectWrappedFuture[R, concurrent.futures.Future[
|
|
150
203
|
|
151
204
|
def result(
|
152
205
|
self,
|
153
|
-
timeout:
|
206
|
+
timeout: float | None = None,
|
154
207
|
raise_on_failure: bool = True,
|
155
208
|
) -> R:
|
156
209
|
if not self._final_state:
|
@@ -190,7 +243,7 @@ class PrefectConcurrentFuture(PrefectWrappedFuture[R, concurrent.futures.Future[
|
|
190
243
|
)
|
191
244
|
|
192
245
|
|
193
|
-
class PrefectDistributedFuture(
|
246
|
+
class PrefectDistributedFuture(PrefectTaskRunFuture[R]):
|
194
247
|
"""
|
195
248
|
Represents the result of a computation happening anywhere.
|
196
249
|
|
@@ -202,10 +255,10 @@ class PrefectDistributedFuture(PrefectFuture[R]):
|
|
202
255
|
done_callbacks: list[Callable[[PrefectFuture[R]], None]] = []
|
203
256
|
waiter = None
|
204
257
|
|
205
|
-
def wait(self, timeout:
|
258
|
+
def wait(self, timeout: float | None = None) -> None:
|
206
259
|
return run_coro_as_sync(self.wait_async(timeout=timeout))
|
207
260
|
|
208
|
-
async def wait_async(self, timeout:
|
261
|
+
async def wait_async(self, timeout: float | None = None) -> None:
|
209
262
|
if self._final_state:
|
210
263
|
logger.debug(
|
211
264
|
"Final state already set for %s. Returning...", self.task_run_id
|
@@ -244,7 +297,7 @@ class PrefectDistributedFuture(PrefectFuture[R]):
|
|
244
297
|
|
245
298
|
def result(
|
246
299
|
self,
|
247
|
-
timeout:
|
300
|
+
timeout: float | None = None,
|
248
301
|
raise_on_failure: bool = True,
|
249
302
|
) -> R:
|
250
303
|
return run_coro_as_sync(
|
@@ -253,7 +306,7 @@ class PrefectDistributedFuture(PrefectFuture[R]):
|
|
253
306
|
|
254
307
|
async def result_async(
|
255
308
|
self,
|
256
|
-
timeout:
|
309
|
+
timeout: float | None = None,
|
257
310
|
raise_on_failure: bool = True,
|
258
311
|
) -> R:
|
259
312
|
if not self._final_state:
|
@@ -289,6 +342,123 @@ class PrefectDistributedFuture(PrefectFuture[R]):
|
|
289
342
|
return hash(self.task_run_id)
|
290
343
|
|
291
344
|
|
345
|
+
class PrefectFlowRunFuture(PrefectFuture[R]):
|
346
|
+
"""
|
347
|
+
A Prefect future that represents the eventual execution of a flow run.
|
348
|
+
"""
|
349
|
+
|
350
|
+
def __init__(self, flow_run_id: uuid.UUID):
|
351
|
+
self._flow_run_id = flow_run_id
|
352
|
+
self._final_state: State[R] | None = None
|
353
|
+
|
354
|
+
@property
|
355
|
+
def flow_run_id(self) -> uuid.UUID:
|
356
|
+
"""The ID of the flow run associated with this future"""
|
357
|
+
return self._flow_run_id
|
358
|
+
|
359
|
+
@property
|
360
|
+
def state(self) -> State:
|
361
|
+
"""The current state of the flow run associated with this future"""
|
362
|
+
if self._final_state:
|
363
|
+
return self._final_state
|
364
|
+
client = get_client(sync_client=True)
|
365
|
+
state = Pending()
|
366
|
+
try:
|
367
|
+
flow_run = client.read_flow_run(flow_run_id=self.flow_run_id)
|
368
|
+
if flow_run.state:
|
369
|
+
state = flow_run.state
|
370
|
+
except ObjectNotFound:
|
371
|
+
# We'll be optimistic and assume this flow run will eventually start
|
372
|
+
pass
|
373
|
+
return state
|
374
|
+
|
375
|
+
def wait(self, timeout: float | None = None) -> None:
|
376
|
+
return run_coro_as_sync(self.wait_async(timeout=timeout))
|
377
|
+
|
378
|
+
async def wait_async(self, timeout: float | None = None) -> None:
|
379
|
+
if self._final_state:
|
380
|
+
logger.debug(
|
381
|
+
"Final state already set for %s. Returning...", self.task_run_id
|
382
|
+
)
|
383
|
+
return
|
384
|
+
|
385
|
+
# Ask for the instance of FlowRunWaiter _now_ so that it's already running and
|
386
|
+
# can catch the completion event if it happens before we start listening for it.
|
387
|
+
FlowRunWaiter.instance()
|
388
|
+
|
389
|
+
# Read task run to see if it is still running
|
390
|
+
async with get_client() as client:
|
391
|
+
flow_run = await client.read_flow_run(flow_run_id=self._flow_run_id)
|
392
|
+
if flow_run.state is None:
|
393
|
+
raise RuntimeError(
|
394
|
+
f"Flow run {self.flow_run_id} has no state which means it hasn't started yet."
|
395
|
+
)
|
396
|
+
if flow_run.state and flow_run.state.is_final():
|
397
|
+
logger.debug(
|
398
|
+
"Flow run %s already finished. Returning...",
|
399
|
+
self.flow_run_id,
|
400
|
+
)
|
401
|
+
self._final_state = flow_run.state
|
402
|
+
return
|
403
|
+
|
404
|
+
# If still running, wait for a completed event from the server
|
405
|
+
logger.debug(
|
406
|
+
"Waiting for completed event for flow run %s...",
|
407
|
+
self.flow_run_id,
|
408
|
+
)
|
409
|
+
await FlowRunWaiter.wait_for_flow_run(self._flow_run_id, timeout=timeout)
|
410
|
+
flow_run = await client.read_flow_run(flow_run_id=self._flow_run_id)
|
411
|
+
if flow_run.state and flow_run.state.is_final():
|
412
|
+
self._final_state = flow_run.state
|
413
|
+
return
|
414
|
+
|
415
|
+
def result(
|
416
|
+
self,
|
417
|
+
timeout: float | None = None,
|
418
|
+
raise_on_failure: bool = True,
|
419
|
+
) -> R:
|
420
|
+
return run_coro_as_sync(
|
421
|
+
self.aresult(timeout=timeout, raise_on_failure=raise_on_failure)
|
422
|
+
)
|
423
|
+
|
424
|
+
async def aresult(
|
425
|
+
self,
|
426
|
+
timeout: float | None = None,
|
427
|
+
raise_on_failure: bool = True,
|
428
|
+
) -> R:
|
429
|
+
if not self._final_state:
|
430
|
+
await self.wait_async(timeout=timeout)
|
431
|
+
if not self._final_state:
|
432
|
+
raise TimeoutError(
|
433
|
+
f"Task run {self.task_run_id} did not complete within {timeout} seconds"
|
434
|
+
)
|
435
|
+
|
436
|
+
return await self._final_state.result(
|
437
|
+
raise_on_failure=raise_on_failure, fetch=True
|
438
|
+
)
|
439
|
+
|
440
|
+
def add_done_callback(self, fn: Callable[[PrefectFuture[R]], None]) -> None:
|
441
|
+
if self._final_state:
|
442
|
+
fn(self)
|
443
|
+
return
|
444
|
+
FlowRunWaiter.instance()
|
445
|
+
with get_client(sync_client=True) as client:
|
446
|
+
flow_run = client.read_flow_run(flow_run_id=self._flow_run_id)
|
447
|
+
if flow_run.state and flow_run.state.is_final():
|
448
|
+
self._final_state = flow_run.state
|
449
|
+
fn(self)
|
450
|
+
return
|
451
|
+
FlowRunWaiter.add_done_callback(self._flow_run_id, partial(fn, self))
|
452
|
+
|
453
|
+
def __eq__(self, other: Any) -> bool:
|
454
|
+
if not isinstance(other, PrefectFlowRunFuture):
|
455
|
+
return False
|
456
|
+
return self.flow_run_id == other.flow_run_id
|
457
|
+
|
458
|
+
def __hash__(self) -> int:
|
459
|
+
return hash(self.flow_run_id)
|
460
|
+
|
461
|
+
|
292
462
|
class PrefectFutureList(list[PrefectFuture[R]], Iterator[PrefectFuture[R]]):
|
293
463
|
"""
|
294
464
|
A list of Prefect futures.
|
@@ -297,7 +467,7 @@ class PrefectFutureList(list[PrefectFuture[R]], Iterator[PrefectFuture[R]]):
|
|
297
467
|
in the list to complete and to retrieve the results of all task runs.
|
298
468
|
"""
|
299
469
|
|
300
|
-
def wait(self, timeout:
|
470
|
+
def wait(self, timeout: float | None = None) -> None:
|
301
471
|
"""
|
302
472
|
Wait for all futures in the list to complete.
|
303
473
|
|
@@ -309,7 +479,7 @@ class PrefectFutureList(list[PrefectFuture[R]], Iterator[PrefectFuture[R]]):
|
|
309
479
|
|
310
480
|
def result(
|
311
481
|
self: Self,
|
312
|
-
timeout:
|
482
|
+
timeout: float | None = None,
|
313
483
|
raise_on_failure: bool = True,
|
314
484
|
) -> list[R]:
|
315
485
|
"""
|
@@ -341,7 +511,7 @@ class PrefectFutureList(list[PrefectFuture[R]], Iterator[PrefectFuture[R]]):
|
|
341
511
|
|
342
512
|
|
343
513
|
def as_completed(
|
344
|
-
futures: list[PrefectFuture[R]], timeout:
|
514
|
+
futures: list[PrefectFuture[R]], timeout: float | None = None
|
345
515
|
) -> Generator[PrefectFuture[R], None]:
|
346
516
|
unique_futures: set[PrefectFuture[R]] = set(futures)
|
347
517
|
total_futures = len(unique_futures)
|
@@ -392,7 +562,7 @@ class DoneAndNotDoneFutures(NamedTuple, Generic[R]):
|
|
392
562
|
|
393
563
|
|
394
564
|
def wait(
|
395
|
-
futures: list[PrefectFuture[R]], timeout:
|
565
|
+
futures: list[PrefectFuture[R]], timeout: float | None = None
|
396
566
|
) -> DoneAndNotDoneFutures[R]:
|
397
567
|
"""
|
398
568
|
Wait for the futures in the given sequence to complete.
|
@@ -442,8 +612,8 @@ def wait(
|
|
442
612
|
|
443
613
|
|
444
614
|
def resolve_futures_to_states(
|
445
|
-
expr:
|
446
|
-
) ->
|
615
|
+
expr: PrefectFuture[R] | Any,
|
616
|
+
) -> PrefectFuture[R] | Any:
|
447
617
|
"""
|
448
618
|
Given a Python built-in collection, recursively find `PrefectFutures` and build a
|
449
619
|
new collection with the same structure with futures resolved to their final states.
|
@@ -454,8 +624,8 @@ def resolve_futures_to_states(
|
|
454
624
|
futures: set[PrefectFuture[R]] = set()
|
455
625
|
|
456
626
|
def _collect_futures(
|
457
|
-
futures: set[PrefectFuture[R]], expr: Any, context: Any
|
458
|
-
) ->
|
627
|
+
futures: set[PrefectFuture[R]], expr: Any | PrefectFuture[R], context: Any
|
628
|
+
) -> Any | PrefectFuture[R]:
|
459
629
|
# Expressions inside quotes should not be traversed
|
460
630
|
if isinstance(context.get("annotation"), quote):
|
461
631
|
raise StopVisiting()
|