prefect-client 3.0.0rc7__py3-none-any.whl → 3.0.0rc9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/__init__.py +75 -57
- prefect/_internal/compatibility/deprecated.py +53 -0
- prefect/_internal/compatibility/migration.py +8 -6
- prefect/_internal/integrations.py +7 -0
- prefect/blocks/core.py +1 -1
- prefect/client/__init__.py +4 -0
- prefect/client/orchestration.py +0 -3
- prefect/client/schemas/actions.py +0 -2
- prefect/client/schemas/objects.py +16 -8
- prefect/client/schemas/responses.py +0 -6
- prefect/client/utilities.py +4 -4
- prefect/deployments/steps/core.py +6 -0
- prefect/engine.py +4 -4
- prefect/flow_engine.py +30 -7
- prefect/flow_runs.py +1 -1
- prefect/flows.py +22 -20
- prefect/futures.py +78 -13
- prefect/logging/loggers.py +1 -1
- prefect/main.py +70 -0
- prefect/plugins.py +1 -64
- prefect/results.py +48 -71
- prefect/runner/storage.py +75 -6
- prefect/settings.py +13 -137
- prefect/states.py +54 -5
- prefect/task_engine.py +11 -0
- prefect/task_runners.py +111 -6
- prefect/task_worker.py +13 -2
- prefect/tasks.py +75 -28
- prefect/transactions.py +39 -1
- prefect/utilities/asyncutils.py +29 -5
- prefect/utilities/collections.py +1 -1
- {prefect_client-3.0.0rc7.dist-info → prefect_client-3.0.0rc9.dist-info}/METADATA +2 -1
- {prefect_client-3.0.0rc7.dist-info → prefect_client-3.0.0rc9.dist-info}/RECORD +36 -34
- {prefect_client-3.0.0rc7.dist-info → prefect_client-3.0.0rc9.dist-info}/LICENSE +0 -0
- {prefect_client-3.0.0rc7.dist-info → prefect_client-3.0.0rc9.dist-info}/WHEEL +0 -0
- {prefect_client-3.0.0rc7.dist-info → prefect_client-3.0.0rc9.dist-info}/top_level.txt +0 -0
prefect/settings.py
CHANGED
@@ -92,21 +92,6 @@ T = TypeVar("T")
|
|
92
92
|
|
93
93
|
DEFAULT_PROFILES_PATH = Path(__file__).parent.joinpath("profiles.toml")
|
94
94
|
|
95
|
-
# When we remove the experimental settings we also want to add them to the set of REMOVED_EXPERIMENTAL_FLAGS.
|
96
|
-
# The reason for this is removing the settings entirely causes the CLI to crash for anyone who has them in one or more of their profiles.
|
97
|
-
# Adding them to REMOVED_EXPERIMENTAL_FLAGS will make it so that the user is warned about it and they have time to take action.
|
98
|
-
REMOVED_EXPERIMENTAL_FLAGS = {
|
99
|
-
"PREFECT_EXPERIMENTAL_ENABLE_ENHANCED_SCHEDULING_UI",
|
100
|
-
"PREFECT_EXPERIMENTAL_ENABLE_ENHANCED_DEPLOYMENT_PARAMETERS",
|
101
|
-
"PREFECT_EXPERIMENTAL_ENABLE_EVENTS_CLIENT",
|
102
|
-
"PREFECT_EXPERIMENTAL_ENABLE_EVENTS",
|
103
|
-
"PREFECT_EXPERIMENTAL_EVENTS",
|
104
|
-
"PREFECT_EXPERIMENTAL_WARN_EVENTS_CLIENT",
|
105
|
-
"PREFECT_EXPERIMENTAL_ENABLE_FLOW_RUN_INFRA_OVERRIDES",
|
106
|
-
"PREFECT_EXPERIMENTAL_WARN_FLOW_RUN_INFRA_OVERRIDES",
|
107
|
-
"PREFECT_EXPERIMENTAL_ENABLE_WORK_POOLS",
|
108
|
-
}
|
109
|
-
|
110
95
|
|
111
96
|
class Setting(Generic[T]):
|
112
97
|
"""
|
@@ -423,7 +408,7 @@ def default_result_storage_block_name(
|
|
423
408
|
settings: Optional["Settings"] = None, value: Optional[str] = None
|
424
409
|
):
|
425
410
|
"""
|
426
|
-
`value_callback` for `
|
411
|
+
`value_callback` for `PREFECT_DEFAULT_RESULT_STORAGE_BLOCK` that sets the default
|
427
412
|
value to the hostname of the machine.
|
428
413
|
"""
|
429
414
|
if value is None:
|
@@ -959,41 +944,6 @@ interpreted and lead to incomplete output, e.g.
|
|
959
944
|
`DROP TABLE [dbo].[SomeTable];"` outputs `DROP TABLE .[SomeTable];`.
|
960
945
|
"""
|
961
946
|
|
962
|
-
PREFECT_TASK_INTROSPECTION_WARN_THRESHOLD = Setting(
|
963
|
-
float,
|
964
|
-
default=10.0,
|
965
|
-
)
|
966
|
-
"""
|
967
|
-
Threshold time in seconds for logging a warning if task parameter introspection
|
968
|
-
exceeds this duration. Parameter introspection can be a significant performance hit
|
969
|
-
when the parameter is a large collection object, e.g. a large dictionary or DataFrame,
|
970
|
-
and each element needs to be inspected. See `prefect.utilities.annotations.quote`
|
971
|
-
for more details.
|
972
|
-
Defaults to `10.0`.
|
973
|
-
Set to `0` to disable logging the warning.
|
974
|
-
"""
|
975
|
-
|
976
|
-
PREFECT_AGENT_QUERY_INTERVAL = Setting(
|
977
|
-
float,
|
978
|
-
default=15,
|
979
|
-
)
|
980
|
-
"""
|
981
|
-
The agent loop interval, in seconds. Agents will check for new runs this often.
|
982
|
-
Defaults to `15`.
|
983
|
-
"""
|
984
|
-
|
985
|
-
PREFECT_AGENT_PREFETCH_SECONDS = Setting(
|
986
|
-
int,
|
987
|
-
default=15,
|
988
|
-
)
|
989
|
-
"""
|
990
|
-
Agents will look for scheduled runs this many seconds in
|
991
|
-
the future and attempt to run them. This accounts for any additional
|
992
|
-
infrastructure spin-up time or latency in preparing a flow run. Note
|
993
|
-
flow runs will not start before their scheduled time, even if they are
|
994
|
-
prefetched. Defaults to `15`.
|
995
|
-
"""
|
996
|
-
|
997
947
|
PREFECT_ASYNC_FETCH_STATE_RESULT = Setting(bool, default=False)
|
998
948
|
"""
|
999
949
|
Determines whether `State.result()` fetches results automatically or not.
|
@@ -1373,16 +1323,6 @@ PREFECT_API_MAX_FLOW_RUN_GRAPH_ARTIFACTS = Setting(int, default=10000)
|
|
1373
1323
|
The maximum number of artifacts to show on a flow run graph on the v2 API
|
1374
1324
|
"""
|
1375
1325
|
|
1376
|
-
PREFECT_EXPERIMENTAL_ENABLE_ARTIFACTS_ON_FLOW_RUN_GRAPH = Setting(bool, default=True)
|
1377
|
-
"""
|
1378
|
-
Whether or not to enable artifacts on the flow run graph.
|
1379
|
-
"""
|
1380
|
-
|
1381
|
-
PREFECT_EXPERIMENTAL_ENABLE_STATES_ON_FLOW_RUN_GRAPH = Setting(bool, default=True)
|
1382
|
-
"""
|
1383
|
-
Whether or not to enable flow run states on the flow run graph.
|
1384
|
-
"""
|
1385
|
-
|
1386
1326
|
PREFECT_EXPERIMENTAL_ENABLE_WORKERS = Setting(bool, default=True)
|
1387
1327
|
"""
|
1388
1328
|
Whether or not to enable experimental Prefect workers.
|
@@ -1393,11 +1333,6 @@ PREFECT_EXPERIMENTAL_WARN_WORKERS = Setting(bool, default=False)
|
|
1393
1333
|
Whether or not to warn when experimental Prefect workers are used.
|
1394
1334
|
"""
|
1395
1335
|
|
1396
|
-
PREFECT_EXPERIMENTAL_WARN_VISUALIZE = Setting(bool, default=False)
|
1397
|
-
"""
|
1398
|
-
Whether or not to warn when experimental Prefect visualize is used.
|
1399
|
-
"""
|
1400
|
-
|
1401
1336
|
PREFECT_EXPERIMENTAL_ENABLE_ENHANCED_CANCELLATION = Setting(bool, default=True)
|
1402
1337
|
"""
|
1403
1338
|
Whether or not to enable experimental enhanced flow run cancellation.
|
@@ -1408,26 +1343,6 @@ PREFECT_EXPERIMENTAL_WARN_ENHANCED_CANCELLATION = Setting(bool, default=False)
|
|
1408
1343
|
Whether or not to warn when experimental enhanced flow run cancellation is used.
|
1409
1344
|
"""
|
1410
1345
|
|
1411
|
-
PREFECT_EXPERIMENTAL_ENABLE_DEPLOYMENT_STATUS = Setting(bool, default=True)
|
1412
|
-
"""
|
1413
|
-
Whether or not to enable deployment status in the UI
|
1414
|
-
"""
|
1415
|
-
|
1416
|
-
PREFECT_EXPERIMENTAL_WARN_DEPLOYMENT_STATUS = Setting(bool, default=False)
|
1417
|
-
"""
|
1418
|
-
Whether or not to warn when deployment status is used.
|
1419
|
-
"""
|
1420
|
-
|
1421
|
-
PREFECT_EXPERIMENTAL_FLOW_RUN_INPUT = Setting(bool, default=False)
|
1422
|
-
"""
|
1423
|
-
Whether or not to enable flow run input.
|
1424
|
-
"""
|
1425
|
-
|
1426
|
-
PREFECT_EXPERIMENTAL_WARN_FLOW_RUN_INPUT = Setting(bool, default=True)
|
1427
|
-
"""
|
1428
|
-
Whether or not to enable flow run input.
|
1429
|
-
"""
|
1430
|
-
|
1431
1346
|
|
1432
1347
|
# Prefect Events feature flags
|
1433
1348
|
|
@@ -1554,31 +1469,6 @@ PREFECT_EXPERIMENTAL_ENABLE_EXTRA_RUNNER_ENDPOINTS = Setting(bool, default=False
|
|
1554
1469
|
Whether or not to enable experimental worker webserver endpoints.
|
1555
1470
|
"""
|
1556
1471
|
|
1557
|
-
PREFECT_EXPERIMENTAL_ENABLE_ARTIFACTS = Setting(bool, default=True)
|
1558
|
-
"""
|
1559
|
-
Whether or not to enable experimental Prefect artifacts.
|
1560
|
-
"""
|
1561
|
-
|
1562
|
-
PREFECT_EXPERIMENTAL_WARN_ARTIFACTS = Setting(bool, default=False)
|
1563
|
-
"""
|
1564
|
-
Whether or not to warn when experimental Prefect artifacts are used.
|
1565
|
-
"""
|
1566
|
-
|
1567
|
-
PREFECT_EXPERIMENTAL_ENABLE_WORKSPACE_DASHBOARD = Setting(bool, default=True)
|
1568
|
-
"""
|
1569
|
-
Whether or not to enable the experimental workspace dashboard.
|
1570
|
-
"""
|
1571
|
-
|
1572
|
-
PREFECT_EXPERIMENTAL_WARN_WORKSPACE_DASHBOARD = Setting(bool, default=False)
|
1573
|
-
"""
|
1574
|
-
Whether or not to warn when the experimental workspace dashboard is enabled.
|
1575
|
-
"""
|
1576
|
-
|
1577
|
-
PREFECT_EXPERIMENTAL_ENABLE_WORK_QUEUE_STATUS = Setting(bool, default=True)
|
1578
|
-
"""
|
1579
|
-
Whether or not to enable experimental work queue status in-place of work queue health.
|
1580
|
-
"""
|
1581
|
-
|
1582
1472
|
PREFECT_EXPERIMENTAL_DISABLE_SYNC_COMPAT = Setting(bool, default=False)
|
1583
1473
|
"""
|
1584
1474
|
Whether or not to disable the sync_compatible decorator utility.
|
@@ -1663,11 +1553,6 @@ PREFECT_EVENTS_MAXIMUM_SIZE_BYTES = Setting(int, default=1_500_000)
|
|
1663
1553
|
The maximum size of an Event when serialized to JSON
|
1664
1554
|
"""
|
1665
1555
|
|
1666
|
-
PREFECT_API_SERVICES_EVENT_LOGGER_ENABLED = Setting(bool, default=True)
|
1667
|
-
"""
|
1668
|
-
Whether or not to start the event debug logger service in the server application.
|
1669
|
-
"""
|
1670
|
-
|
1671
1556
|
PREFECT_API_SERVICES_TRIGGERS_ENABLED = Setting(bool, default=True)
|
1672
1557
|
"""
|
1673
1558
|
Whether or not to start the triggers service in the server application.
|
@@ -1715,6 +1600,18 @@ PREFECT_API_EVENTS_RELATED_RESOURCE_CACHE_TTL = Setting(
|
|
1715
1600
|
How long to cache related resource data for emitting server-side vents
|
1716
1601
|
"""
|
1717
1602
|
|
1603
|
+
PREFECT_EVENTS_MAXIMUM_WEBSOCKET_BACKFILL = Setting(
|
1604
|
+
timedelta, default=timedelta(minutes=15)
|
1605
|
+
)
|
1606
|
+
"""
|
1607
|
+
The maximum range to look back for backfilling events for a websocket subscriber
|
1608
|
+
"""
|
1609
|
+
|
1610
|
+
PREFECT_EVENTS_WEBSOCKET_BACKFILL_PAGE_SIZE = Setting(int, default=250, gt=0)
|
1611
|
+
"""
|
1612
|
+
The page size for the queries to backfill events for websocket subscribers
|
1613
|
+
"""
|
1614
|
+
|
1718
1615
|
|
1719
1616
|
# Deprecated settings ------------------------------------------------------------------
|
1720
1617
|
|
@@ -2215,26 +2112,6 @@ class ProfilesCollection:
|
|
2215
2112
|
)
|
2216
2113
|
|
2217
2114
|
|
2218
|
-
def _handle_removed_flags(
|
2219
|
-
profile_name: str, settings: Dict[str, Any]
|
2220
|
-
) -> Dict[str, Any]:
|
2221
|
-
to_remove = [name for name in settings if name in REMOVED_EXPERIMENTAL_FLAGS]
|
2222
|
-
|
2223
|
-
for name in to_remove:
|
2224
|
-
warnings.warn(
|
2225
|
-
(
|
2226
|
-
f"Experimental flag {name!r} has been removed, please "
|
2227
|
-
f"update your {profile_name!r} profile."
|
2228
|
-
),
|
2229
|
-
UserWarning,
|
2230
|
-
stacklevel=3,
|
2231
|
-
)
|
2232
|
-
|
2233
|
-
settings.pop(name)
|
2234
|
-
|
2235
|
-
return settings
|
2236
|
-
|
2237
|
-
|
2238
2115
|
def _read_profiles_from(path: Path) -> ProfilesCollection:
|
2239
2116
|
"""
|
2240
2117
|
Read profiles from a path into a new `ProfilesCollection`.
|
@@ -2253,7 +2130,6 @@ def _read_profiles_from(path: Path) -> ProfilesCollection:
|
|
2253
2130
|
|
2254
2131
|
profiles = []
|
2255
2132
|
for name, settings in raw_profiles.items():
|
2256
|
-
settings = _handle_removed_flags(name, settings)
|
2257
2133
|
profiles.append(Profile(name=name, settings=settings, source=path))
|
2258
2134
|
|
2259
2135
|
return ProfilesCollection(profiles, active=active_profile)
|
prefect/states.py
CHANGED
@@ -1,3 +1,4 @@
|
|
1
|
+
import asyncio
|
1
2
|
import datetime
|
2
3
|
import sys
|
3
4
|
import traceback
|
@@ -22,15 +23,21 @@ from prefect.exceptions import (
|
|
22
23
|
TerminationSignal,
|
23
24
|
UnfinishedRun,
|
24
25
|
)
|
26
|
+
from prefect.logging.loggers import get_logger
|
25
27
|
from prefect.results import BaseResult, R, ResultFactory
|
26
28
|
from prefect.settings import PREFECT_ASYNC_FETCH_STATE_RESULT
|
27
29
|
from prefect.utilities.annotations import BaseAnnotation
|
28
30
|
from prefect.utilities.asyncutils import in_async_main_thread, sync_compatible
|
29
31
|
from prefect.utilities.collections import ensure_iterable
|
30
32
|
|
33
|
+
logger = get_logger("states")
|
34
|
+
|
31
35
|
|
32
36
|
def get_state_result(
|
33
|
-
state: State[R],
|
37
|
+
state: State[R],
|
38
|
+
raise_on_failure: bool = True,
|
39
|
+
fetch: Optional[bool] = None,
|
40
|
+
retry_result_failure: bool = True,
|
34
41
|
) -> R:
|
35
42
|
"""
|
36
43
|
Get the result from a state.
|
@@ -58,11 +65,50 @@ def get_state_result(
|
|
58
65
|
|
59
66
|
return state.data
|
60
67
|
else:
|
61
|
-
return _get_state_result(
|
68
|
+
return _get_state_result(
|
69
|
+
state,
|
70
|
+
raise_on_failure=raise_on_failure,
|
71
|
+
retry_result_failure=retry_result_failure,
|
72
|
+
)
|
73
|
+
|
74
|
+
|
75
|
+
RESULT_READ_MAXIMUM_ATTEMPTS = 10
|
76
|
+
RESULT_READ_RETRY_DELAY = 0.25
|
77
|
+
|
78
|
+
|
79
|
+
async def _get_state_result_data_with_retries(
|
80
|
+
state: State[R], retry_result_failure: bool = True
|
81
|
+
) -> R:
|
82
|
+
# Results may be written asynchronously, possibly after their corresponding
|
83
|
+
# state has been written and events have been emitted, so we should give some
|
84
|
+
# grace here about missing results. The exception below could come in the form
|
85
|
+
# of a missing file, a short read, or other types of errors depending on the
|
86
|
+
# result storage backend.
|
87
|
+
if retry_result_failure is False:
|
88
|
+
max_attempts = 1
|
89
|
+
else:
|
90
|
+
max_attempts = RESULT_READ_MAXIMUM_ATTEMPTS
|
91
|
+
|
92
|
+
for i in range(1, max_attempts + 1):
|
93
|
+
try:
|
94
|
+
return await state.data.get()
|
95
|
+
except Exception as e:
|
96
|
+
if i == max_attempts:
|
97
|
+
raise
|
98
|
+
logger.debug(
|
99
|
+
"Exception %r while reading result, retry %s/%s in %ss...",
|
100
|
+
e,
|
101
|
+
i,
|
102
|
+
max_attempts,
|
103
|
+
RESULT_READ_RETRY_DELAY,
|
104
|
+
)
|
105
|
+
await asyncio.sleep(RESULT_READ_RETRY_DELAY)
|
62
106
|
|
63
107
|
|
64
108
|
@sync_compatible
|
65
|
-
async def _get_state_result(
|
109
|
+
async def _get_state_result(
|
110
|
+
state: State[R], raise_on_failure: bool, retry_result_failure: bool = True
|
111
|
+
) -> R:
|
66
112
|
"""
|
67
113
|
Internal implementation for `get_state_result` without async backwards compatibility
|
68
114
|
"""
|
@@ -81,7 +127,10 @@ async def _get_state_result(state: State[R], raise_on_failure: bool) -> R:
|
|
81
127
|
raise await get_state_exception(state)
|
82
128
|
|
83
129
|
if isinstance(state.data, BaseResult):
|
84
|
-
result = await
|
130
|
+
result = await _get_state_result_data_with_retries(
|
131
|
+
state, retry_result_failure=retry_result_failure
|
132
|
+
)
|
133
|
+
|
85
134
|
elif state.data is None:
|
86
135
|
if state.is_failed() or state.is_crashed() or state.is_cancelled():
|
87
136
|
return await get_state_exception(state)
|
@@ -352,7 +401,7 @@ async def get_state_exception(state: State) -> BaseException:
|
|
352
401
|
raise ValueError(f"Expected failed or crashed state got {state!r}.")
|
353
402
|
|
354
403
|
if isinstance(state.data, BaseResult):
|
355
|
-
result = await state
|
404
|
+
result = await _get_state_result_data_with_retries(state)
|
356
405
|
elif state.data is None:
|
357
406
|
result = None
|
358
407
|
else:
|
prefect/task_engine.py
CHANGED
@@ -249,6 +249,16 @@ class TaskRunEngine(Generic[P, R]):
|
|
249
249
|
new_state = Running()
|
250
250
|
state = self.set_state(new_state)
|
251
251
|
|
252
|
+
# TODO: this is temporary until the API stops rejecting state transitions
|
253
|
+
# and the client / transaction store becomes the source of truth
|
254
|
+
# this is a bandaid caused by the API storing a Completed state with a bad
|
255
|
+
# result reference that no longer exists
|
256
|
+
if state.is_completed():
|
257
|
+
try:
|
258
|
+
state.result(retry_result_failure=False, _sync=True)
|
259
|
+
except Exception:
|
260
|
+
state = self.set_state(new_state, force=True)
|
261
|
+
|
252
262
|
BACKOFF_MAX = 10
|
253
263
|
backoff_count = 0
|
254
264
|
|
@@ -592,6 +602,7 @@ class TaskRunEngine(Generic[P, R]):
|
|
592
602
|
key=self.compute_transaction_key(),
|
593
603
|
store=ResultFactoryStore(result_factory=result_factory),
|
594
604
|
overwrite=overwrite,
|
605
|
+
logger=self.logger,
|
595
606
|
) as txn:
|
596
607
|
yield txn
|
597
608
|
|
prefect/task_runners.py
CHANGED
@@ -4,7 +4,18 @@ import sys
|
|
4
4
|
import uuid
|
5
5
|
from concurrent.futures import ThreadPoolExecutor
|
6
6
|
from contextvars import copy_context
|
7
|
-
from typing import
|
7
|
+
from typing import (
|
8
|
+
TYPE_CHECKING,
|
9
|
+
Any,
|
10
|
+
Coroutine,
|
11
|
+
Dict,
|
12
|
+
Generic,
|
13
|
+
Iterable,
|
14
|
+
List,
|
15
|
+
Optional,
|
16
|
+
Set,
|
17
|
+
overload,
|
18
|
+
)
|
8
19
|
|
9
20
|
from typing_extensions import ParamSpec, Self, TypeVar
|
10
21
|
|
@@ -14,6 +25,7 @@ from prefect.futures import (
|
|
14
25
|
PrefectConcurrentFuture,
|
15
26
|
PrefectDistributedFuture,
|
16
27
|
PrefectFuture,
|
28
|
+
PrefectFutureList,
|
17
29
|
)
|
18
30
|
from prefect.logging.loggers import get_logger, get_run_logger
|
19
31
|
from prefect.utilities.annotations import allow_failure, quote, unmapped
|
@@ -30,6 +42,7 @@ if TYPE_CHECKING:
|
|
30
42
|
P = ParamSpec("P")
|
31
43
|
T = TypeVar("T")
|
32
44
|
F = TypeVar("F", bound=PrefectFuture)
|
45
|
+
R = TypeVar("R")
|
33
46
|
|
34
47
|
|
35
48
|
class TaskRunner(abc.ABC, Generic[F]):
|
@@ -85,7 +98,7 @@ class TaskRunner(abc.ABC, Generic[F]):
|
|
85
98
|
task: "Task",
|
86
99
|
parameters: Dict[str, Any],
|
87
100
|
wait_for: Optional[Iterable[PrefectFuture]] = None,
|
88
|
-
) ->
|
101
|
+
) -> PrefectFutureList[F]:
|
89
102
|
"""
|
90
103
|
Submit multiple tasks to the task run engine.
|
91
104
|
|
@@ -157,7 +170,7 @@ class TaskRunner(abc.ABC, Generic[F]):
|
|
157
170
|
|
158
171
|
map_length = list(lengths)[0]
|
159
172
|
|
160
|
-
futures = []
|
173
|
+
futures: List[PrefectFuture] = []
|
161
174
|
for i in range(map_length):
|
162
175
|
call_parameters = {
|
163
176
|
key: value[i] for key, value in iterable_parameters.items()
|
@@ -187,7 +200,7 @@ class TaskRunner(abc.ABC, Generic[F]):
|
|
187
200
|
)
|
188
201
|
)
|
189
202
|
|
190
|
-
return futures
|
203
|
+
return PrefectFutureList(futures)
|
191
204
|
|
192
205
|
def __enter__(self):
|
193
206
|
if self._started:
|
@@ -211,13 +224,33 @@ class ThreadPoolTaskRunner(TaskRunner[PrefectConcurrentFuture]):
|
|
211
224
|
def duplicate(self) -> "ThreadPoolTaskRunner":
|
212
225
|
return type(self)(max_workers=self._max_workers)
|
213
226
|
|
227
|
+
@overload
|
228
|
+
def submit(
|
229
|
+
self,
|
230
|
+
task: "Task[P, Coroutine[Any, Any, R]]",
|
231
|
+
parameters: Dict[str, Any],
|
232
|
+
wait_for: Optional[Iterable[PrefectFuture]] = None,
|
233
|
+
dependencies: Optional[Dict[str, Set[TaskRunInput]]] = None,
|
234
|
+
) -> PrefectConcurrentFuture[R]:
|
235
|
+
...
|
236
|
+
|
237
|
+
@overload
|
238
|
+
def submit(
|
239
|
+
self,
|
240
|
+
task: "Task[Any, R]",
|
241
|
+
parameters: Dict[str, Any],
|
242
|
+
wait_for: Optional[Iterable[PrefectFuture]] = None,
|
243
|
+
dependencies: Optional[Dict[str, Set[TaskRunInput]]] = None,
|
244
|
+
) -> PrefectConcurrentFuture[R]:
|
245
|
+
...
|
246
|
+
|
214
247
|
def submit(
|
215
248
|
self,
|
216
249
|
task: "Task",
|
217
250
|
parameters: Dict[str, Any],
|
218
251
|
wait_for: Optional[Iterable[PrefectFuture]] = None,
|
219
252
|
dependencies: Optional[Dict[str, Set[TaskRunInput]]] = None,
|
220
|
-
)
|
253
|
+
):
|
221
254
|
"""
|
222
255
|
Submit a task to the task run engine running in a separate thread.
|
223
256
|
|
@@ -278,6 +311,32 @@ class ThreadPoolTaskRunner(TaskRunner[PrefectConcurrentFuture]):
|
|
278
311
|
)
|
279
312
|
return prefect_future
|
280
313
|
|
314
|
+
@overload
|
315
|
+
def map(
|
316
|
+
self,
|
317
|
+
task: "Task[P, Coroutine[Any, Any, R]]",
|
318
|
+
parameters: Dict[str, Any],
|
319
|
+
wait_for: Optional[Iterable[PrefectFuture]] = None,
|
320
|
+
) -> PrefectFutureList[PrefectConcurrentFuture[R]]:
|
321
|
+
...
|
322
|
+
|
323
|
+
@overload
|
324
|
+
def map(
|
325
|
+
self,
|
326
|
+
task: "Task[Any, R]",
|
327
|
+
parameters: Dict[str, Any],
|
328
|
+
wait_for: Optional[Iterable[PrefectFuture]] = None,
|
329
|
+
) -> PrefectFutureList[PrefectConcurrentFuture[R]]:
|
330
|
+
...
|
331
|
+
|
332
|
+
def map(
|
333
|
+
self,
|
334
|
+
task: "Task",
|
335
|
+
parameters: Dict[str, Any],
|
336
|
+
wait_for: Optional[Iterable[PrefectFuture]] = None,
|
337
|
+
):
|
338
|
+
return super().map(task, parameters, wait_for)
|
339
|
+
|
281
340
|
def __enter__(self):
|
282
341
|
super().__enter__()
|
283
342
|
self._executor = ThreadPoolExecutor(max_workers=self._max_workers)
|
@@ -306,13 +365,33 @@ class PrefectTaskRunner(TaskRunner[PrefectDistributedFuture]):
|
|
306
365
|
def duplicate(self) -> "PrefectTaskRunner":
|
307
366
|
return type(self)()
|
308
367
|
|
368
|
+
@overload
|
369
|
+
def submit(
|
370
|
+
self,
|
371
|
+
task: "Task[P, Coroutine[Any, Any, R]]",
|
372
|
+
parameters: Dict[str, Any],
|
373
|
+
wait_for: Optional[Iterable[PrefectFuture]] = None,
|
374
|
+
dependencies: Optional[Dict[str, Set[TaskRunInput]]] = None,
|
375
|
+
) -> PrefectDistributedFuture[R]:
|
376
|
+
...
|
377
|
+
|
378
|
+
@overload
|
379
|
+
def submit(
|
380
|
+
self,
|
381
|
+
task: "Task[Any, R]",
|
382
|
+
parameters: Dict[str, Any],
|
383
|
+
wait_for: Optional[Iterable[PrefectFuture]] = None,
|
384
|
+
dependencies: Optional[Dict[str, Set[TaskRunInput]]] = None,
|
385
|
+
) -> PrefectDistributedFuture[R]:
|
386
|
+
...
|
387
|
+
|
309
388
|
def submit(
|
310
389
|
self,
|
311
390
|
task: "Task",
|
312
391
|
parameters: Dict[str, Any],
|
313
392
|
wait_for: Optional[Iterable[PrefectFuture]] = None,
|
314
393
|
dependencies: Optional[Dict[str, Set[TaskRunInput]]] = None,
|
315
|
-
)
|
394
|
+
):
|
316
395
|
"""
|
317
396
|
Submit a task to the task run engine running in a separate thread.
|
318
397
|
|
@@ -342,3 +421,29 @@ class PrefectTaskRunner(TaskRunner[PrefectDistributedFuture]):
|
|
342
421
|
return task.apply_async(
|
343
422
|
kwargs=parameters, wait_for=wait_for, dependencies=dependencies
|
344
423
|
)
|
424
|
+
|
425
|
+
@overload
|
426
|
+
def map(
|
427
|
+
self,
|
428
|
+
task: "Task[P, Coroutine[Any, Any, R]]",
|
429
|
+
parameters: Dict[str, Any],
|
430
|
+
wait_for: Optional[Iterable[PrefectFuture]] = None,
|
431
|
+
) -> PrefectFutureList[PrefectDistributedFuture[R]]:
|
432
|
+
...
|
433
|
+
|
434
|
+
@overload
|
435
|
+
def map(
|
436
|
+
self,
|
437
|
+
task: "Task[Any, R]",
|
438
|
+
parameters: Dict[str, Any],
|
439
|
+
wait_for: Optional[Iterable[PrefectFuture]] = None,
|
440
|
+
) -> PrefectFutureList[PrefectDistributedFuture[R]]:
|
441
|
+
...
|
442
|
+
|
443
|
+
def map(
|
444
|
+
self,
|
445
|
+
task: "Task",
|
446
|
+
parameters: Dict[str, Any],
|
447
|
+
wait_for: Optional[Iterable[PrefectFuture]] = None,
|
448
|
+
):
|
449
|
+
return super().map(task, parameters, wait_for)
|
prefect/task_worker.py
CHANGED
@@ -7,7 +7,7 @@ import sys
|
|
7
7
|
from concurrent.futures import ThreadPoolExecutor
|
8
8
|
from contextlib import AsyncExitStack
|
9
9
|
from contextvars import copy_context
|
10
|
-
from typing import
|
10
|
+
from typing import Optional
|
11
11
|
from uuid import UUID
|
12
12
|
|
13
13
|
import anyio
|
@@ -20,6 +20,7 @@ from websockets.exceptions import InvalidStatusCode
|
|
20
20
|
|
21
21
|
from prefect import Task
|
22
22
|
from prefect._internal.concurrency.api import create_call, from_sync
|
23
|
+
from prefect.cache_policies import DEFAULT, NONE
|
23
24
|
from prefect.client.orchestration import get_client
|
24
25
|
from prefect.client.schemas.objects import TaskRun
|
25
26
|
from prefect.client.subscriptions import Subscription
|
@@ -32,6 +33,7 @@ from prefect.settings import (
|
|
32
33
|
)
|
33
34
|
from prefect.states import Pending
|
34
35
|
from prefect.task_engine import run_task_async, run_task_sync
|
36
|
+
from prefect.utilities.annotations import NotSet
|
35
37
|
from prefect.utilities.asyncutils import asyncnullcontext, sync_compatible
|
36
38
|
from prefect.utilities.engine import emit_task_run_state_change_event, propose_state
|
37
39
|
from prefect.utilities.processutils import _register_signal
|
@@ -76,7 +78,16 @@ class TaskWorker:
|
|
76
78
|
*tasks: Task,
|
77
79
|
limit: Optional[int] = 10,
|
78
80
|
):
|
79
|
-
self.tasks
|
81
|
+
self.tasks = []
|
82
|
+
for t in tasks:
|
83
|
+
if isinstance(t, Task):
|
84
|
+
if t.cache_policy in [None, NONE, NotSet]:
|
85
|
+
self.tasks.append(
|
86
|
+
t.with_options(persist_result=True, cache_policy=DEFAULT)
|
87
|
+
)
|
88
|
+
else:
|
89
|
+
self.tasks.append(t.with_options(persist_result=True))
|
90
|
+
|
80
91
|
self.task_keys = set(t.task_key for t in tasks if isinstance(t, Task))
|
81
92
|
|
82
93
|
self._started_at: Optional[pendulum.DateTime] = None
|