prefect-client 2.16.2__py3-none-any.whl → 2.16.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/_internal/concurrency/services.py +5 -0
- prefect/_internal/concurrency/threads.py +3 -0
- prefect/deployments/deployments.py +29 -6
- prefect/deployments/runner.py +15 -33
- prefect/deployments/schedules.py +37 -0
- prefect/engine.py +59 -22
- prefect/events/schemas.py +253 -43
- prefect/flows.py +26 -2
- prefect/runner/runner.py +2 -2
- prefect/server/api/collections_data/views/aggregate-worker-metadata.json +9 -2
- prefect/settings.py +34 -9
- prefect/task_engine.py +16 -8
- prefect/tasks.py +39 -4
- prefect/utilities/schema_tools/__init__.py +0 -0
- prefect/utilities/schema_tools/hydration.py +218 -0
- prefect/utilities/schema_tools/validation.py +240 -0
- {prefect_client-2.16.2.dist-info → prefect_client-2.16.3.dist-info}/METADATA +52 -49
- {prefect_client-2.16.2.dist-info → prefect_client-2.16.3.dist-info}/RECORD +21 -17
- {prefect_client-2.16.2.dist-info → prefect_client-2.16.3.dist-info}/LICENSE +0 -0
- {prefect_client-2.16.2.dist-info → prefect_client-2.16.3.dist-info}/WHEEL +0 -0
- {prefect_client-2.16.2.dist-info → prefect_client-2.16.3.dist-info}/top_level.txt +0 -0
@@ -91,6 +91,11 @@ class QueueService(abc.ABC, Generic[T]):
|
|
91
91
|
self._remove_instance()
|
92
92
|
self._stopped = True
|
93
93
|
|
94
|
+
# Allow asyncio task to be garbage-collected. Its context may contain
|
95
|
+
# references to all Prefect Task calls made during a flow run, through
|
96
|
+
# EngineContext. Issue #10338.
|
97
|
+
self._task = None
|
98
|
+
|
94
99
|
# Signal completion to the loop
|
95
100
|
self._queue.put_nowait(None)
|
96
101
|
|
@@ -214,6 +214,9 @@ class EventLoopThread(Portal):
|
|
214
214
|
for call in self._on_shutdown:
|
215
215
|
await self._run_call(call)
|
216
216
|
|
217
|
+
# Empty the list to allow calls to be garbage collected. Issue #10338.
|
218
|
+
self._on_shutdown = []
|
219
|
+
|
217
220
|
async def _run_call(self, call: Call) -> None:
|
218
221
|
task = call.run()
|
219
222
|
if task is not None:
|
@@ -36,6 +36,10 @@ from prefect.client.schemas.objects import (
|
|
36
36
|
from prefect.client.schemas.schedules import SCHEDULE_TYPES
|
37
37
|
from prefect.client.utilities import inject_client
|
38
38
|
from prefect.context import FlowRunContext, PrefectObjectRegistry, TaskRunContext
|
39
|
+
from prefect.deployments.schedules import (
|
40
|
+
FlexibleScheduleList,
|
41
|
+
normalize_to_minimal_deployment_schedules,
|
42
|
+
)
|
39
43
|
from prefect.deployments.steps.core import run_steps
|
40
44
|
from prefect.events.schemas import DeploymentTrigger
|
41
45
|
from prefect.exceptions import (
|
@@ -649,11 +653,17 @@ class Deployment(BaseModel):
|
|
649
653
|
cls._validate_schedule(value)
|
650
654
|
return value
|
651
655
|
|
652
|
-
@
|
653
|
-
def validate_schedules(cls,
|
654
|
-
|
655
|
-
|
656
|
-
|
656
|
+
@root_validator(pre=True)
|
657
|
+
def validate_schedules(cls, values):
|
658
|
+
if "schedules" in values:
|
659
|
+
values["schedules"] = normalize_to_minimal_deployment_schedules(
|
660
|
+
values["schedules"]
|
661
|
+
)
|
662
|
+
|
663
|
+
for schedule in values["schedules"]:
|
664
|
+
cls._validate_schedule(schedule.schedule)
|
665
|
+
|
666
|
+
return values
|
657
667
|
|
658
668
|
@classmethod
|
659
669
|
@sync_compatible
|
@@ -936,6 +946,7 @@ class Deployment(BaseModel):
|
|
936
946
|
ignore_file: str = ".prefectignore",
|
937
947
|
apply: bool = False,
|
938
948
|
load_existing: bool = True,
|
949
|
+
schedules: Optional[FlexibleScheduleList] = None,
|
939
950
|
**kwargs,
|
940
951
|
) -> "Deployment":
|
941
952
|
"""
|
@@ -955,6 +966,14 @@ class Deployment(BaseModel):
|
|
955
966
|
load_existing: if True, load any settings that may already be configured for
|
956
967
|
the named deployment server-side (e.g., schedules, default parameter
|
957
968
|
values, etc.)
|
969
|
+
schedules: An optional list of schedules. Each item in the list can be:
|
970
|
+
- An instance of `MinimalDeploymentSchedule`.
|
971
|
+
- A dictionary with a `schedule` key, and optionally, an
|
972
|
+
`active` key. The `schedule` key should correspond to a
|
973
|
+
schedule type, and `active` is a boolean indicating whether
|
974
|
+
the schedule is active or not.
|
975
|
+
- An instance of one of the predefined schedule types:
|
976
|
+
`IntervalSchedule`, `CronSchedule`, or `RRuleSchedule`.
|
958
977
|
**kwargs: other keyword arguments to pass to the constructor for the
|
959
978
|
`Deployment` class
|
960
979
|
"""
|
@@ -963,7 +982,11 @@ class Deployment(BaseModel):
|
|
963
982
|
|
964
983
|
# note that `deployment.load` only updates settings that were *not*
|
965
984
|
# provided at initialization
|
966
|
-
deployment = cls(
|
985
|
+
deployment = cls(
|
986
|
+
name=name,
|
987
|
+
schedules=schedules,
|
988
|
+
**kwargs,
|
989
|
+
)
|
967
990
|
deployment.flow_name = flow.name
|
968
991
|
if not deployment.entrypoint:
|
969
992
|
## first see if an entrypoint can be determined
|
prefect/deployments/runner.py
CHANGED
@@ -34,7 +34,7 @@ import importlib
|
|
34
34
|
import tempfile
|
35
35
|
from datetime import datetime, timedelta
|
36
36
|
from pathlib import Path
|
37
|
-
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Union
|
37
|
+
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Union
|
38
38
|
from uuid import UUID
|
39
39
|
|
40
40
|
import pendulum
|
@@ -63,6 +63,11 @@ from prefect.client.schemas.schedules import (
|
|
63
63
|
SCHEDULE_TYPES,
|
64
64
|
construct_schedule,
|
65
65
|
)
|
66
|
+
from prefect.deployments.schedules import (
|
67
|
+
FlexibleScheduleList,
|
68
|
+
create_minimal_deployment_schedule,
|
69
|
+
normalize_to_minimal_deployment_schedules,
|
70
|
+
)
|
66
71
|
from prefect.events.schemas import DeploymentTrigger
|
67
72
|
from prefect.exceptions import (
|
68
73
|
ObjectNotFound,
|
@@ -83,18 +88,9 @@ from prefect.utilities.slugify import slugify
|
|
83
88
|
if TYPE_CHECKING:
|
84
89
|
from prefect.flows import Flow
|
85
90
|
|
86
|
-
FlexibleScheduleList = Union[MinimalDeploymentSchedule, dict, SCHEDULE_TYPES]
|
87
|
-
|
88
91
|
__all__ = ["RunnerDeployment"]
|
89
92
|
|
90
93
|
|
91
|
-
def _to_deployment_schedule(
|
92
|
-
schedule: Optional[SCHEDULE_TYPES] = None,
|
93
|
-
active: Optional[bool] = True,
|
94
|
-
) -> MinimalDeploymentSchedule:
|
95
|
-
return MinimalDeploymentSchedule(schedule=schedule, active=active)
|
96
|
-
|
97
|
-
|
98
94
|
class DeploymentApplyError(RuntimeError):
|
99
95
|
"""
|
100
96
|
Raised when an error occurs while applying a deployment.
|
@@ -264,23 +260,9 @@ class RunnerDeployment(BaseModel):
|
|
264
260
|
schedules = values.get("schedules")
|
265
261
|
|
266
262
|
if schedules is None and schedule is not None:
|
267
|
-
values["schedules"] = [
|
263
|
+
values["schedules"] = [create_minimal_deployment_schedule(schedule)]
|
268
264
|
elif schedules is not None and len(schedules) > 0:
|
269
|
-
|
270
|
-
for obj in schedules:
|
271
|
-
if isinstance(obj, get_args(SCHEDULE_TYPES)):
|
272
|
-
reconciled.append(_to_deployment_schedule(obj))
|
273
|
-
elif isinstance(obj, dict):
|
274
|
-
reconciled.append(_to_deployment_schedule(**obj))
|
275
|
-
elif isinstance(obj, MinimalDeploymentSchedule):
|
276
|
-
reconciled.append(obj)
|
277
|
-
else:
|
278
|
-
raise ValueError(
|
279
|
-
"Invalid schedule provided. Must be a schedule object, a dict,"
|
280
|
-
" or a MinimalDeploymentSchedule."
|
281
|
-
)
|
282
|
-
|
283
|
-
values["schedules"] = reconciled
|
265
|
+
values["schedules"] = normalize_to_minimal_deployment_schedules(schedules)
|
284
266
|
|
285
267
|
return values
|
286
268
|
|
@@ -389,8 +371,8 @@ class RunnerDeployment(BaseModel):
|
|
389
371
|
rrule: Optional[Union[Iterable[str], str]] = None,
|
390
372
|
timezone: Optional[str] = None,
|
391
373
|
schedule: Optional[SCHEDULE_TYPES] = None,
|
392
|
-
schedules: Optional[
|
393
|
-
) -> Union[List[MinimalDeploymentSchedule],
|
374
|
+
schedules: Optional[FlexibleScheduleList] = None,
|
375
|
+
) -> Union[List[MinimalDeploymentSchedule], FlexibleScheduleList]:
|
394
376
|
"""
|
395
377
|
Construct a schedule or schedules from the provided arguments.
|
396
378
|
|
@@ -448,7 +430,7 @@ class RunnerDeployment(BaseModel):
|
|
448
430
|
value = [value]
|
449
431
|
|
450
432
|
return [
|
451
|
-
|
433
|
+
create_minimal_deployment_schedule(
|
452
434
|
construct_schedule(
|
453
435
|
**{
|
454
436
|
schedule_type: v,
|
@@ -460,7 +442,7 @@ class RunnerDeployment(BaseModel):
|
|
460
442
|
for v in value
|
461
443
|
]
|
462
444
|
else:
|
463
|
-
return [
|
445
|
+
return [create_minimal_deployment_schedule(schedule)]
|
464
446
|
|
465
447
|
def _set_defaults_from_flow(self, flow: "Flow"):
|
466
448
|
self._parameter_openapi_schema = parameter_schema(flow)
|
@@ -481,7 +463,7 @@ class RunnerDeployment(BaseModel):
|
|
481
463
|
cron: Optional[Union[Iterable[str], str]] = None,
|
482
464
|
rrule: Optional[Union[Iterable[str], str]] = None,
|
483
465
|
paused: Optional[bool] = None,
|
484
|
-
schedules: Optional[
|
466
|
+
schedules: Optional[FlexibleScheduleList] = None,
|
485
467
|
schedule: Optional[SCHEDULE_TYPES] = None,
|
486
468
|
is_schedule_active: Optional[bool] = None,
|
487
469
|
parameters: Optional[dict] = None,
|
@@ -617,7 +599,7 @@ class RunnerDeployment(BaseModel):
|
|
617
599
|
cron: Optional[Union[Iterable[str], str]] = None,
|
618
600
|
rrule: Optional[Union[Iterable[str], str]] = None,
|
619
601
|
paused: Optional[bool] = None,
|
620
|
-
schedules: Optional[
|
602
|
+
schedules: Optional[FlexibleScheduleList] = None,
|
621
603
|
schedule: Optional[SCHEDULE_TYPES] = None,
|
622
604
|
is_schedule_active: Optional[bool] = None,
|
623
605
|
parameters: Optional[dict] = None,
|
@@ -715,7 +697,7 @@ class RunnerDeployment(BaseModel):
|
|
715
697
|
cron: Optional[Union[Iterable[str], str]] = None,
|
716
698
|
rrule: Optional[Union[Iterable[str], str]] = None,
|
717
699
|
paused: Optional[bool] = None,
|
718
|
-
schedules: Optional[
|
700
|
+
schedules: Optional[FlexibleScheduleList] = None,
|
719
701
|
schedule: Optional[SCHEDULE_TYPES] = None,
|
720
702
|
is_schedule_active: Optional[bool] = None,
|
721
703
|
parameters: Optional[dict] = None,
|
@@ -0,0 +1,37 @@
|
|
1
|
+
from typing import List, Optional, Sequence, Union, get_args
|
2
|
+
|
3
|
+
from prefect.client.schemas.objects import MinimalDeploymentSchedule
|
4
|
+
from prefect.client.schemas.schedules import SCHEDULE_TYPES
|
5
|
+
|
6
|
+
FlexibleScheduleList = Sequence[Union[MinimalDeploymentSchedule, dict, SCHEDULE_TYPES]]
|
7
|
+
|
8
|
+
|
9
|
+
def create_minimal_deployment_schedule(
|
10
|
+
schedule: SCHEDULE_TYPES,
|
11
|
+
active: Optional[bool] = True,
|
12
|
+
) -> MinimalDeploymentSchedule:
|
13
|
+
return MinimalDeploymentSchedule(
|
14
|
+
schedule=schedule,
|
15
|
+
active=active if active is not None else True,
|
16
|
+
)
|
17
|
+
|
18
|
+
|
19
|
+
def normalize_to_minimal_deployment_schedules(
|
20
|
+
schedules: Optional[FlexibleScheduleList],
|
21
|
+
) -> List[MinimalDeploymentSchedule]:
|
22
|
+
normalized = []
|
23
|
+
if schedules is not None:
|
24
|
+
for obj in schedules:
|
25
|
+
if isinstance(obj, get_args(SCHEDULE_TYPES)):
|
26
|
+
normalized.append(create_minimal_deployment_schedule(obj))
|
27
|
+
elif isinstance(obj, dict):
|
28
|
+
normalized.append(create_minimal_deployment_schedule(**obj))
|
29
|
+
elif isinstance(obj, MinimalDeploymentSchedule):
|
30
|
+
normalized.append(obj)
|
31
|
+
else:
|
32
|
+
raise ValueError(
|
33
|
+
"Invalid schedule provided. Must be a schedule object, a dict,"
|
34
|
+
" or a MinimalDeploymentSchedule."
|
35
|
+
)
|
36
|
+
|
37
|
+
return normalized
|
prefect/engine.py
CHANGED
@@ -170,6 +170,7 @@ from prefect.logging.loggers import (
|
|
170
170
|
from prefect.results import BaseResult, ResultFactory, UnknownResult
|
171
171
|
from prefect.settings import (
|
172
172
|
PREFECT_DEBUG_MODE,
|
173
|
+
PREFECT_EXPERIMENTAL_ENABLE_TASK_SCHEDULING,
|
173
174
|
PREFECT_LOGGING_LOG_PRINTS,
|
174
175
|
PREFECT_TASK_INTROSPECTION_WARN_THRESHOLD,
|
175
176
|
PREFECT_TASKS_REFRESH_CACHE,
|
@@ -800,6 +801,8 @@ async def orchestrate_flow_run(
|
|
800
801
|
# flag to ensure we only update the flow run name once
|
801
802
|
run_name_set = False
|
802
803
|
|
804
|
+
await _run_flow_hooks(flow=flow, flow_run=flow_run, state=state)
|
805
|
+
|
803
806
|
while state.is_running():
|
804
807
|
waited_for_task_runs = False
|
805
808
|
|
@@ -955,7 +958,6 @@ async def orchestrate_flow_run(
|
|
955
958
|
f"Received non-final state {state.name!r} when proposing final"
|
956
959
|
f" state {terminal_state.name!r} and will attempt to run again..."
|
957
960
|
),
|
958
|
-
extra={"send_to_api": False},
|
959
961
|
)
|
960
962
|
# Attempt to enter a running state again
|
961
963
|
state = await propose_state(client, Running(), flow_run_id=flow_run.id)
|
@@ -1369,10 +1371,28 @@ def enter_task_run_engine(
|
|
1369
1371
|
flow_run_context = FlowRunContext.get()
|
1370
1372
|
|
1371
1373
|
if not flow_run_context:
|
1372
|
-
|
1373
|
-
|
1374
|
-
|
1375
|
-
|
1374
|
+
if (
|
1375
|
+
not PREFECT_EXPERIMENTAL_ENABLE_TASK_SCHEDULING.value()
|
1376
|
+
or return_type == "future"
|
1377
|
+
or mapped
|
1378
|
+
):
|
1379
|
+
raise RuntimeError(
|
1380
|
+
"Tasks cannot be run outside of a flow by default."
|
1381
|
+
" If you meant to submit an autonomous task, you need to set"
|
1382
|
+
" `prefect config set PREFECT_EXPERIMENTAL_ENABLE_TASK_SCHEDULING=true`"
|
1383
|
+
" and use `your_task.submit()` instead of `your_task()`."
|
1384
|
+
" Mapping autonomous tasks is not yet supported."
|
1385
|
+
)
|
1386
|
+
from prefect.task_engine import submit_autonomous_task_run_to_engine
|
1387
|
+
|
1388
|
+
return submit_autonomous_task_run_to_engine(
|
1389
|
+
task=task,
|
1390
|
+
task_run=None,
|
1391
|
+
parameters=parameters,
|
1392
|
+
task_runner=task_runner,
|
1393
|
+
wait_for=wait_for,
|
1394
|
+
return_type=return_type,
|
1395
|
+
client=get_client(),
|
1376
1396
|
)
|
1377
1397
|
|
1378
1398
|
if TaskRunContext.get():
|
@@ -1403,12 +1423,13 @@ def enter_task_run_engine(
|
|
1403
1423
|
|
1404
1424
|
async def begin_task_map(
|
1405
1425
|
task: Task,
|
1406
|
-
flow_run_context: FlowRunContext,
|
1426
|
+
flow_run_context: Optional[FlowRunContext],
|
1407
1427
|
parameters: Dict[str, Any],
|
1408
1428
|
wait_for: Optional[Iterable[PrefectFuture]],
|
1409
1429
|
return_type: EngineReturnType,
|
1410
1430
|
task_runner: Optional[BaseTaskRunner],
|
1411
|
-
|
1431
|
+
autonomous: bool = False,
|
1432
|
+
) -> List[Union[PrefectFuture, Awaitable[PrefectFuture], TaskRun]]:
|
1412
1433
|
"""Async entrypoint for task mapping"""
|
1413
1434
|
# We need to resolve some futures to map over their data, collect the upstream
|
1414
1435
|
# links beforehand to retain relationship tracking.
|
@@ -1475,18 +1496,29 @@ async def begin_task_map(
|
|
1475
1496
|
# Collapse any previously exploded kwargs
|
1476
1497
|
call_parameters = collapse_variadic_parameters(task.fn, call_parameters)
|
1477
1498
|
|
1478
|
-
|
1479
|
-
|
1480
|
-
|
1481
|
-
|
1482
|
-
|
1483
|
-
|
1484
|
-
wait_for=wait_for,
|
1485
|
-
return_type=return_type,
|
1486
|
-
task_runner=task_runner,
|
1487
|
-
extra_task_inputs=task_inputs,
|
1499
|
+
if autonomous:
|
1500
|
+
task_runs.append(
|
1501
|
+
await create_autonomous_task_run(
|
1502
|
+
task=task,
|
1503
|
+
parameters=call_parameters,
|
1504
|
+
)
|
1488
1505
|
)
|
1489
|
-
|
1506
|
+
else:
|
1507
|
+
task_runs.append(
|
1508
|
+
partial(
|
1509
|
+
get_task_call_return_value,
|
1510
|
+
task=task,
|
1511
|
+
flow_run_context=flow_run_context,
|
1512
|
+
parameters=call_parameters,
|
1513
|
+
wait_for=wait_for,
|
1514
|
+
return_type=return_type,
|
1515
|
+
task_runner=task_runner,
|
1516
|
+
extra_task_inputs=task_inputs,
|
1517
|
+
)
|
1518
|
+
)
|
1519
|
+
|
1520
|
+
if autonomous:
|
1521
|
+
return task_runs
|
1490
1522
|
|
1491
1523
|
# Maintain the order of the task runs when using the sequential task runner
|
1492
1524
|
runner = task_runner if task_runner else flow_run_context.task_runner
|
@@ -1698,7 +1730,10 @@ async def create_task_run(
|
|
1698
1730
|
task_inputs=task_inputs,
|
1699
1731
|
)
|
1700
1732
|
|
1701
|
-
|
1733
|
+
if flow_run_context.flow_run:
|
1734
|
+
logger.info(f"Created task run {task_run.name!r} for task {task.name!r}")
|
1735
|
+
else:
|
1736
|
+
engine_logger.info(f"Created task run {task_run.name!r} for task {task.name!r}")
|
1702
1737
|
|
1703
1738
|
return task_run
|
1704
1739
|
|
@@ -1716,7 +1751,7 @@ async def submit_task_run(
|
|
1716
1751
|
|
1717
1752
|
if (
|
1718
1753
|
task_runner.concurrency_type == TaskConcurrencyType.SEQUENTIAL
|
1719
|
-
and
|
1754
|
+
and flow_run_context.flow_run
|
1720
1755
|
):
|
1721
1756
|
logger.info(f"Executing {task_run.name!r} immediately...")
|
1722
1757
|
|
@@ -2182,7 +2217,6 @@ async def orchestrate_task_run(
|
|
2182
2217
|
f" state {terminal_state.name!r} and will attempt to run"
|
2183
2218
|
" again..."
|
2184
2219
|
),
|
2185
|
-
extra={"send_to_api": False},
|
2186
2220
|
)
|
2187
2221
|
# Attempt to enter a running state again
|
2188
2222
|
state = await propose_state(client, Running(), task_run_id=task_run.id)
|
@@ -2802,7 +2836,10 @@ async def _run_flow_hooks(flow: Flow, flow_run: FlowRun, state: State) -> None:
|
|
2802
2836
|
os.environ.get("PREFECT__ENABLE_CANCELLATION_AND_CRASHED_HOOKS", "true").lower()
|
2803
2837
|
== "true"
|
2804
2838
|
)
|
2805
|
-
|
2839
|
+
|
2840
|
+
if state.is_running() and flow.on_running:
|
2841
|
+
hooks = flow.on_running
|
2842
|
+
elif state.is_failed() and flow.on_failure:
|
2806
2843
|
hooks = flow.on_failure
|
2807
2844
|
elif state.is_completed() and flow.on_completion:
|
2808
2845
|
hooks = flow.on_completion
|