prefect-client 3.3.8.dev4__py3-none-any.whl → 3.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/_build_info.py +3 -3
- prefect/_experimental/bundles/__init__.py +1 -1
- prefect/_internal/schemas/bases.py +11 -1
- prefect/_internal/schemas/validators.py +0 -98
- prefect/_internal/uuid7.py +11 -0
- prefect/_versioning.py +2 -0
- prefect/blocks/core.py +20 -1
- prefect/client/orchestration/__init__.py +16 -8
- prefect/client/schemas/actions.py +13 -35
- prefect/client/schemas/objects.py +30 -22
- prefect/client/subscriptions.py +18 -9
- prefect/deployments/runner.py +54 -4
- prefect/events/clients.py +6 -6
- prefect/events/filters.py +25 -11
- prefect/events/schemas/automations.py +3 -1
- prefect/events/schemas/events.py +3 -2
- prefect/flows.py +94 -28
- prefect/infrastructure/provisioners/cloud_run.py +1 -0
- prefect/runner/_observers.py +60 -0
- prefect/runner/runner.py +72 -214
- prefect/server/api/server.py +18 -1
- prefect/server/api/workers.py +42 -6
- prefect/settings/base.py +7 -7
- prefect/settings/models/experiments.py +2 -0
- prefect/task_runners.py +2 -1
- prefect/tasks.py +3 -2
- prefect/types/__init__.py +24 -36
- prefect/types/names.py +139 -0
- prefect/utilities/dockerutils.py +18 -8
- prefect/utilities/importtools.py +12 -4
- prefect/workers/base.py +66 -21
- {prefect_client-3.3.8.dev4.dist-info → prefect_client-3.4.1.dist-info}/METADATA +4 -3
- {prefect_client-3.3.8.dev4.dist-info → prefect_client-3.4.1.dist-info}/RECORD +35 -32
- {prefect_client-3.3.8.dev4.dist-info → prefect_client-3.4.1.dist-info}/WHEEL +0 -0
- {prefect_client-3.3.8.dev4.dist-info → prefect_client-3.4.1.dist-info}/licenses/LICENSE +0 -0
prefect/runner/runner.py
CHANGED
@@ -46,6 +46,8 @@ import subprocess
|
|
46
46
|
import sys
|
47
47
|
import tempfile
|
48
48
|
import threading
|
49
|
+
import uuid
|
50
|
+
from contextlib import AsyncExitStack
|
49
51
|
from copy import deepcopy
|
50
52
|
from functools import partial
|
51
53
|
from pathlib import Path
|
@@ -80,13 +82,6 @@ from prefect._internal.concurrency.api import (
|
|
80
82
|
from_sync,
|
81
83
|
)
|
82
84
|
from prefect.client.orchestration import PrefectClient, get_client
|
83
|
-
from prefect.client.schemas.filters import (
|
84
|
-
FlowRunFilter,
|
85
|
-
FlowRunFilterId,
|
86
|
-
FlowRunFilterState,
|
87
|
-
FlowRunFilterStateName,
|
88
|
-
FlowRunFilterStateType,
|
89
|
-
)
|
90
85
|
from prefect.client.schemas.objects import (
|
91
86
|
ConcurrencyLimitConfig,
|
92
87
|
State,
|
@@ -94,12 +89,13 @@ from prefect.client.schemas.objects import (
|
|
94
89
|
)
|
95
90
|
from prefect.client.schemas.objects import Flow as APIFlow
|
96
91
|
from prefect.events import DeploymentTriggerTypes, TriggerTypes
|
92
|
+
from prefect.events.clients import EventsClient, get_events_client
|
97
93
|
from prefect.events.related import tags_as_related_resources
|
98
|
-
from prefect.events.schemas.events import RelatedResource
|
99
|
-
from prefect.events.utilities import emit_event
|
94
|
+
from prefect.events.schemas.events import Event, RelatedResource, Resource
|
100
95
|
from prefect.exceptions import Abort, ObjectNotFound
|
101
96
|
from prefect.flows import Flow, FlowStateHook, load_flow_from_flow_run
|
102
97
|
from prefect.logging.loggers import PrefectLogAdapter, flow_run_logger, get_logger
|
98
|
+
from prefect.runner._observers import FlowRunCancellingObserver
|
103
99
|
from prefect.runner.storage import RunnerStorage
|
104
100
|
from prefect.schedules import Schedule
|
105
101
|
from prefect.settings import (
|
@@ -228,7 +224,9 @@ class Runner:
|
|
228
224
|
if self.heartbeat_seconds is not None and self.heartbeat_seconds < 30:
|
229
225
|
raise ValueError("Heartbeat must be 30 seconds or greater.")
|
230
226
|
self._heartbeat_task: asyncio.Task[None] | None = None
|
227
|
+
self._events_client: EventsClient = get_events_client(checkpoint_every=1)
|
231
228
|
|
229
|
+
self._exit_stack = AsyncExitStack()
|
232
230
|
self._limiter: anyio.CapacityLimiter | None = None
|
233
231
|
self._client: PrefectClient = get_client()
|
234
232
|
self._submitting_flow_run_ids: set[UUID] = set()
|
@@ -501,15 +499,6 @@ class Runner:
|
|
501
499
|
jitter_range=0.3,
|
502
500
|
)
|
503
501
|
)
|
504
|
-
loops_task_group.start_soon(
|
505
|
-
partial(
|
506
|
-
critical_service_loop,
|
507
|
-
workload=runner._check_for_cancelled_flow_runs,
|
508
|
-
interval=self.query_seconds * 2,
|
509
|
-
run_once=run_once,
|
510
|
-
jitter_range=0.3,
|
511
|
-
)
|
512
|
-
)
|
513
502
|
|
514
503
|
def execute_in_background(
|
515
504
|
self, func: Callable[..., Any], *args: Any, **kwargs: Any
|
@@ -583,58 +572,42 @@ class Runner:
|
|
583
572
|
if not self._acquire_limit_slot(flow_run_id):
|
584
573
|
return
|
585
574
|
|
586
|
-
|
587
|
-
|
588
|
-
self._submitting_flow_run_ids.add(flow_run_id)
|
589
|
-
flow_run = await self._client.read_flow_run(flow_run_id)
|
590
|
-
|
591
|
-
process: (
|
592
|
-
anyio.abc.Process | Exception
|
593
|
-
) = await self._runs_task_group.start(
|
594
|
-
partial(
|
595
|
-
self._submit_run_and_capture_errors,
|
596
|
-
flow_run=flow_run,
|
597
|
-
entrypoint=entrypoint,
|
598
|
-
command=command,
|
599
|
-
cwd=cwd,
|
600
|
-
env=env,
|
601
|
-
stream_output=stream_output,
|
602
|
-
),
|
603
|
-
)
|
604
|
-
if isinstance(process, Exception):
|
605
|
-
return
|
575
|
+
self._submitting_flow_run_ids.add(flow_run_id)
|
576
|
+
flow_run = await self._client.read_flow_run(flow_run_id)
|
606
577
|
|
607
|
-
|
578
|
+
process: anyio.abc.Process | Exception = await self._runs_task_group.start(
|
579
|
+
partial(
|
580
|
+
self._submit_run_and_capture_errors,
|
581
|
+
flow_run=flow_run,
|
582
|
+
entrypoint=entrypoint,
|
583
|
+
command=command,
|
584
|
+
cwd=cwd,
|
585
|
+
env=env,
|
586
|
+
stream_output=stream_output,
|
587
|
+
),
|
588
|
+
)
|
589
|
+
if isinstance(process, Exception):
|
590
|
+
return
|
608
591
|
|
609
|
-
|
610
|
-
await self._emit_flow_run_heartbeat(flow_run)
|
592
|
+
task_status.started(process.pid)
|
611
593
|
|
612
|
-
|
613
|
-
|
614
|
-
if process.returncode is None:
|
615
|
-
self._flow_run_process_map[flow_run.id] = ProcessMapEntry(
|
616
|
-
pid=process.pid, flow_run=flow_run
|
617
|
-
)
|
594
|
+
if self.heartbeat_seconds is not None:
|
595
|
+
await self._emit_flow_run_heartbeat(flow_run)
|
618
596
|
|
619
|
-
|
620
|
-
|
621
|
-
|
622
|
-
|
623
|
-
|
624
|
-
should_stop=lambda: not self._flow_run_process_map,
|
625
|
-
on_stop=tg.cancel_scope.cancel,
|
597
|
+
async with self._flow_run_process_map_lock:
|
598
|
+
# Only add the process to the map if it is still running
|
599
|
+
if process.returncode is None:
|
600
|
+
self._flow_run_process_map[flow_run.id] = ProcessMapEntry(
|
601
|
+
pid=process.pid, flow_run=flow_run
|
626
602
|
)
|
627
603
|
|
628
|
-
|
629
|
-
|
630
|
-
|
631
|
-
|
632
|
-
|
633
|
-
jitter_range=0.3,
|
634
|
-
)
|
635
|
-
)
|
604
|
+
while True:
|
605
|
+
# Wait until flow run execution is complete and the process has been removed from the map
|
606
|
+
await anyio.sleep(0.1)
|
607
|
+
if self._flow_run_process_map.get(flow_run.id) is None:
|
608
|
+
break
|
636
609
|
|
637
|
-
|
610
|
+
return process
|
638
611
|
|
639
612
|
async def execute_bundle(
|
640
613
|
self,
|
@@ -673,24 +646,8 @@ class Runner:
|
|
673
646
|
)
|
674
647
|
self._flow_run_bundle_map[flow_run.id] = bundle
|
675
648
|
|
676
|
-
tasks: list[asyncio.Task[None]] = []
|
677
|
-
tasks.append(
|
678
|
-
asyncio.create_task(
|
679
|
-
critical_service_loop(
|
680
|
-
workload=self._check_for_cancelled_flow_runs,
|
681
|
-
interval=self.query_seconds,
|
682
|
-
jitter_range=0.3,
|
683
|
-
)
|
684
|
-
)
|
685
|
-
)
|
686
|
-
|
687
649
|
await anyio.to_thread.run_sync(process.join)
|
688
650
|
|
689
|
-
for task in tasks:
|
690
|
-
task.cancel()
|
691
|
-
|
692
|
-
await asyncio.gather(*tasks, return_exceptions=True)
|
693
|
-
|
694
651
|
self._flow_run_process_map.pop(flow_run.id)
|
695
652
|
|
696
653
|
flow_run_logger = self._get_flow_run_logger(flow_run)
|
@@ -1000,83 +957,11 @@ class Runner:
|
|
1000
957
|
self.last_polled: datetime.datetime = now("UTC")
|
1001
958
|
return await self._submit_scheduled_flow_runs(flow_run_response=runs_response)
|
1002
959
|
|
1003
|
-
async def
|
1004
|
-
self,
|
1005
|
-
should_stop: Callable[[], bool] = lambda: False,
|
1006
|
-
on_stop: Callable[[], None] = lambda: None,
|
960
|
+
async def _cancel_run(
|
961
|
+
self, flow_run: "FlowRun | uuid.UUID", state_msg: Optional[str] = None
|
1007
962
|
):
|
1008
|
-
|
1009
|
-
|
1010
|
-
cancel them.
|
1011
|
-
|
1012
|
-
Args:
|
1013
|
-
should_stop: A callable that returns a boolean indicating whether or not
|
1014
|
-
the runner should stop checking for cancelled flow runs.
|
1015
|
-
on_stop: A callable that is called when the runner should stop checking
|
1016
|
-
for cancelled flow runs.
|
1017
|
-
"""
|
1018
|
-
if self.stopping:
|
1019
|
-
return
|
1020
|
-
if not self.started:
|
1021
|
-
raise RuntimeError(
|
1022
|
-
"Runner is not set up. Please make sure you are running this runner "
|
1023
|
-
"as an async context manager."
|
1024
|
-
)
|
1025
|
-
|
1026
|
-
if should_stop():
|
1027
|
-
self._logger.debug(
|
1028
|
-
"Runner has no active flow runs or deployments. Sending message to loop"
|
1029
|
-
" service that no further cancellation checks are needed."
|
1030
|
-
)
|
1031
|
-
on_stop()
|
1032
|
-
|
1033
|
-
self._logger.debug("Checking for cancelled flow runs...")
|
1034
|
-
|
1035
|
-
named_cancelling_flow_runs = await self._client.read_flow_runs(
|
1036
|
-
flow_run_filter=FlowRunFilter(
|
1037
|
-
state=FlowRunFilterState(
|
1038
|
-
type=FlowRunFilterStateType(any_=[StateType.CANCELLED]),
|
1039
|
-
name=FlowRunFilterStateName(any_=["Cancelling"]),
|
1040
|
-
),
|
1041
|
-
# Avoid duplicate cancellation calls
|
1042
|
-
id=FlowRunFilterId(
|
1043
|
-
any_=list(
|
1044
|
-
self._flow_run_process_map.keys()
|
1045
|
-
- self._cancelling_flow_run_ids
|
1046
|
-
)
|
1047
|
-
),
|
1048
|
-
),
|
1049
|
-
)
|
1050
|
-
|
1051
|
-
typed_cancelling_flow_runs = await self._client.read_flow_runs(
|
1052
|
-
flow_run_filter=FlowRunFilter(
|
1053
|
-
state=FlowRunFilterState(
|
1054
|
-
type=FlowRunFilterStateType(any_=[StateType.CANCELLING]),
|
1055
|
-
),
|
1056
|
-
# Avoid duplicate cancellation calls
|
1057
|
-
id=FlowRunFilterId(
|
1058
|
-
any_=list(
|
1059
|
-
self._flow_run_process_map.keys()
|
1060
|
-
- self._cancelling_flow_run_ids
|
1061
|
-
)
|
1062
|
-
),
|
1063
|
-
),
|
1064
|
-
)
|
1065
|
-
|
1066
|
-
cancelling_flow_runs = named_cancelling_flow_runs + typed_cancelling_flow_runs
|
1067
|
-
|
1068
|
-
if cancelling_flow_runs:
|
1069
|
-
self._logger.info(
|
1070
|
-
f"Found {len(cancelling_flow_runs)} flow runs awaiting cancellation."
|
1071
|
-
)
|
1072
|
-
|
1073
|
-
for flow_run in cancelling_flow_runs:
|
1074
|
-
self._cancelling_flow_run_ids.add(flow_run.id)
|
1075
|
-
self._runs_task_group.start_soon(self._cancel_run, flow_run)
|
1076
|
-
|
1077
|
-
return cancelling_flow_runs
|
1078
|
-
|
1079
|
-
async def _cancel_run(self, flow_run: "FlowRun", state_msg: Optional[str] = None):
|
963
|
+
if isinstance(flow_run, uuid.UUID):
|
964
|
+
flow_run = await self._client.read_flow_run(flow_run)
|
1080
965
|
run_logger = self._get_flow_run_logger(flow_run)
|
1081
966
|
|
1082
967
|
process_map_entry = self._flow_run_process_map.get(flow_run.id)
|
@@ -1121,7 +1006,7 @@ class Runner:
|
|
1121
1006
|
)
|
1122
1007
|
|
1123
1008
|
flow, deployment = await self._get_flow_and_deployment(flow_run)
|
1124
|
-
self._emit_flow_run_cancelled_event(
|
1009
|
+
await self._emit_flow_run_cancelled_event(
|
1125
1010
|
flow_run=flow_run, flow=flow, deployment=deployment
|
1126
1011
|
)
|
1127
1012
|
run_logger.info(f"Cancelled flow run '{flow_run.name}'!")
|
@@ -1180,14 +1065,18 @@ class Runner:
|
|
1180
1065
|
related = [RelatedResource.model_validate(r) for r in related]
|
1181
1066
|
related += tags_as_related_resources(set(tags))
|
1182
1067
|
|
1183
|
-
|
1184
|
-
|
1185
|
-
|
1186
|
-
|
1187
|
-
|
1188
|
-
|
1189
|
-
|
1190
|
-
|
1068
|
+
await self._events_client.emit(
|
1069
|
+
Event(
|
1070
|
+
event="prefect.flow-run.heartbeat",
|
1071
|
+
resource=Resource(
|
1072
|
+
{
|
1073
|
+
"prefect.resource.id": f"prefect.flow-run.{flow_run.id}",
|
1074
|
+
"prefect.resource.name": flow_run.name,
|
1075
|
+
"prefect.version": __version__,
|
1076
|
+
}
|
1077
|
+
),
|
1078
|
+
related=related,
|
1079
|
+
)
|
1191
1080
|
)
|
1192
1081
|
|
1193
1082
|
def _event_resource(self):
|
@@ -1199,7 +1088,7 @@ class Runner:
|
|
1199
1088
|
"prefect.version": __version__,
|
1200
1089
|
}
|
1201
1090
|
|
1202
|
-
def _emit_flow_run_cancelled_event(
|
1091
|
+
async def _emit_flow_run_cancelled_event(
|
1203
1092
|
self,
|
1204
1093
|
flow_run: "FlowRun",
|
1205
1094
|
flow: "Optional[APIFlow]",
|
@@ -1234,10 +1123,12 @@ class Runner:
|
|
1234
1123
|
related = [RelatedResource.model_validate(r) for r in related]
|
1235
1124
|
related += tags_as_related_resources(set(tags))
|
1236
1125
|
|
1237
|
-
|
1238
|
-
|
1239
|
-
|
1240
|
-
|
1126
|
+
await self._events_client.emit(
|
1127
|
+
Event(
|
1128
|
+
event="prefect.runner.cancelled-flow-run",
|
1129
|
+
resource=Resource(self._event_resource()),
|
1130
|
+
related=related,
|
1131
|
+
)
|
1241
1132
|
)
|
1242
1133
|
self._logger.debug(f"Emitted flow run heartbeat event for {flow_run.id}")
|
1243
1134
|
|
@@ -1301,7 +1192,7 @@ class Runner:
|
|
1301
1192
|
except anyio.WouldBlock:
|
1302
1193
|
if TYPE_CHECKING:
|
1303
1194
|
assert self._limiter is not None
|
1304
|
-
self._logger.
|
1195
|
+
self._logger.debug(
|
1305
1196
|
f"Flow run limit reached; {self._limiter.borrowed_tokens} flow runs"
|
1306
1197
|
" in progress. You can control this limit by adjusting the "
|
1307
1198
|
"PREFECT_RUNNER_PROCESS_LIMIT setting."
|
@@ -1543,43 +1434,6 @@ class Runner:
|
|
1543
1434
|
|
1544
1435
|
await self._client.set_flow_run_state(flow_run.id, state, force=True)
|
1545
1436
|
|
1546
|
-
# Do not remove the flow run from the cancelling set immediately because
|
1547
|
-
# the API caches responses for the `read_flow_runs` and we do not want to
|
1548
|
-
# duplicate cancellations.
|
1549
|
-
await self._schedule_task(
|
1550
|
-
60 * 10, self._cancelling_flow_run_ids.remove, flow_run.id
|
1551
|
-
)
|
1552
|
-
|
1553
|
-
async def _schedule_task(
|
1554
|
-
self, __in_seconds: int, fn: Callable[..., Any], *args: Any, **kwargs: Any
|
1555
|
-
) -> None:
|
1556
|
-
"""
|
1557
|
-
Schedule a background task to start after some time.
|
1558
|
-
|
1559
|
-
These tasks will be run immediately when the runner exits instead of waiting.
|
1560
|
-
|
1561
|
-
The function may be async or sync. Async functions will be awaited.
|
1562
|
-
"""
|
1563
|
-
|
1564
|
-
async def wrapper(task_status: anyio.abc.TaskStatus[None]) -> None:
|
1565
|
-
# If we are shutting down, do not sleep; otherwise sleep until the scheduled
|
1566
|
-
# time or shutdown
|
1567
|
-
if self.started:
|
1568
|
-
with anyio.CancelScope() as scope:
|
1569
|
-
self._scheduled_task_scopes.add(scope)
|
1570
|
-
task_status.started()
|
1571
|
-
await anyio.sleep(__in_seconds)
|
1572
|
-
|
1573
|
-
self._scheduled_task_scopes.remove(scope)
|
1574
|
-
else:
|
1575
|
-
task_status.started()
|
1576
|
-
|
1577
|
-
result = fn(*args, **kwargs)
|
1578
|
-
if asyncio.iscoroutine(result):
|
1579
|
-
await result
|
1580
|
-
|
1581
|
-
await self._runs_task_group.start(wrapper)
|
1582
|
-
|
1583
1437
|
async def _run_on_cancellation_hooks(
|
1584
1438
|
self,
|
1585
1439
|
flow_run: "FlowRun",
|
@@ -1647,11 +1501,19 @@ class Runner:
|
|
1647
1501
|
if not hasattr(self, "_loop") or not self._loop:
|
1648
1502
|
self._loop = asyncio.get_event_loop()
|
1649
1503
|
|
1650
|
-
await self.
|
1504
|
+
await self._exit_stack.enter_async_context(
|
1505
|
+
FlowRunCancellingObserver(
|
1506
|
+
on_cancelling=lambda flow_run_id: self._runs_task_group.start_soon(
|
1507
|
+
self._cancel_run, flow_run_id
|
1508
|
+
)
|
1509
|
+
)
|
1510
|
+
)
|
1511
|
+
await self._exit_stack.enter_async_context(self._client)
|
1512
|
+
await self._exit_stack.enter_async_context(self._events_client)
|
1651
1513
|
|
1652
1514
|
if not hasattr(self, "_runs_task_group") or not self._runs_task_group:
|
1653
1515
|
self._runs_task_group: anyio.abc.TaskGroup = anyio.create_task_group()
|
1654
|
-
await self.
|
1516
|
+
await self._exit_stack.enter_async_context(self._runs_task_group)
|
1655
1517
|
|
1656
1518
|
if not hasattr(self, "_loops_task_group") or not self._loops_task_group:
|
1657
1519
|
self._loops_task_group: anyio.abc.TaskGroup = anyio.create_task_group()
|
@@ -1677,11 +1539,7 @@ class Runner:
|
|
1677
1539
|
for scope in self._scheduled_task_scopes:
|
1678
1540
|
scope.cancel()
|
1679
1541
|
|
1680
|
-
|
1681
|
-
await self._runs_task_group.__aexit__(*exc_info)
|
1682
|
-
|
1683
|
-
if self._client:
|
1684
|
-
await self._client.__aexit__(*exc_info)
|
1542
|
+
await self._exit_stack.__aexit__(*exc_info)
|
1685
1543
|
|
1686
1544
|
shutil.rmtree(str(self._tmp_dir))
|
1687
1545
|
del self._runs_task_group, self._loops_task_group
|
prefect/server/api/server.py
CHANGED
@@ -62,6 +62,19 @@ from prefect.settings import (
|
|
62
62
|
)
|
63
63
|
from prefect.utilities.hashing import hash_objects
|
64
64
|
|
65
|
+
if os.environ.get("PREFECT_LOGFIRE_ENABLED"):
|
66
|
+
import logfire # pyright: ignore
|
67
|
+
|
68
|
+
token: str | None = os.environ.get("PREFECT_LOGFIRE_WRITE_TOKEN")
|
69
|
+
if token is None:
|
70
|
+
raise ValueError(
|
71
|
+
"PREFECT_LOGFIRE_WRITE_TOKEN must be set when PREFECT_LOGFIRE_ENABLED is true"
|
72
|
+
)
|
73
|
+
|
74
|
+
logfire.configure(token=token) # pyright: ignore
|
75
|
+
else:
|
76
|
+
logfire = None
|
77
|
+
|
65
78
|
if TYPE_CHECKING:
|
66
79
|
import logging
|
67
80
|
|
@@ -250,7 +263,7 @@ def copy_directory(directory: str, path: str) -> None:
|
|
250
263
|
shutil.rmtree(destination)
|
251
264
|
shutil.copytree(source, destination, symlinks=True)
|
252
265
|
# ensure copied files are writeable
|
253
|
-
for root,
|
266
|
+
for root, _, files in os.walk(destination):
|
254
267
|
for f in files:
|
255
268
|
os.chmod(os.path.join(root, f), 0o700)
|
256
269
|
else:
|
@@ -329,6 +342,10 @@ def create_api_app(
|
|
329
342
|
|
330
343
|
fast_api_app_kwargs = fast_api_app_kwargs or {}
|
331
344
|
api_app = FastAPI(title=API_TITLE, **fast_api_app_kwargs)
|
345
|
+
|
346
|
+
if logfire:
|
347
|
+
logfire.instrument_fastapi(api_app) # pyright: ignore
|
348
|
+
|
332
349
|
api_app.add_middleware(GZipMiddleware)
|
333
350
|
|
334
351
|
@api_app.get(health_check_path, tags=["Root"])
|
prefect/server/api/workers.py
CHANGED
@@ -3,7 +3,7 @@ Routes for interacting with work queue objects.
|
|
3
3
|
"""
|
4
4
|
|
5
5
|
from typing import TYPE_CHECKING, List, Optional
|
6
|
-
from uuid import UUID
|
6
|
+
from uuid import UUID
|
7
7
|
|
8
8
|
import sqlalchemy as sa
|
9
9
|
from fastapi import (
|
@@ -14,11 +14,13 @@ from fastapi import (
|
|
14
14
|
Path,
|
15
15
|
status,
|
16
16
|
)
|
17
|
+
from packaging.version import Version
|
17
18
|
from sqlalchemy.ext.asyncio import AsyncSession
|
18
19
|
|
19
20
|
import prefect.server.api.dependencies as dependencies
|
20
21
|
import prefect.server.models as models
|
21
22
|
import prefect.server.schemas as schemas
|
23
|
+
from prefect._internal.uuid7 import uuid7
|
22
24
|
from prefect.server.api.validation import validate_job_variable_defaults_for_work_pool
|
23
25
|
from prefect.server.database import PrefectDBInterface, provide_database_interface
|
24
26
|
from prefect.server.models.deployments import mark_deployments_ready
|
@@ -157,6 +159,9 @@ class WorkerLookups:
|
|
157
159
|
async def create_work_pool(
|
158
160
|
work_pool: schemas.actions.WorkPoolCreate,
|
159
161
|
db: PrefectDBInterface = Depends(provide_database_interface),
|
162
|
+
prefect_client_version: Optional[str] = Depends(
|
163
|
+
dependencies.get_prefect_client_version
|
164
|
+
),
|
160
165
|
) -> schemas.core.WorkPool:
|
161
166
|
"""
|
162
167
|
Creates a new work pool. If a work pool with the same
|
@@ -180,13 +185,20 @@ async def create_work_pool(
|
|
180
185
|
)
|
181
186
|
|
182
187
|
await emit_work_pool_status_event(
|
183
|
-
event_id=
|
188
|
+
event_id=uuid7(),
|
184
189
|
occurred=now("UTC"),
|
185
190
|
pre_update_work_pool=None,
|
186
191
|
work_pool=model,
|
187
192
|
)
|
188
193
|
|
189
|
-
|
194
|
+
ret = schemas.core.WorkPool.model_validate(model, from_attributes=True)
|
195
|
+
if prefect_client_version and Version(prefect_client_version) <= Version(
|
196
|
+
"3.3.7"
|
197
|
+
):
|
198
|
+
# Client versions 3.3.7 and below do not support the default_result_storage_block_id field and will error
|
199
|
+
# when receiving it.
|
200
|
+
del ret.storage_configuration.default_result_storage_block_id
|
201
|
+
return ret
|
190
202
|
|
191
203
|
except sa.exc.IntegrityError:
|
192
204
|
raise HTTPException(
|
@@ -200,6 +212,9 @@ async def read_work_pool(
|
|
200
212
|
work_pool_name: str = Path(..., description="The work pool name", alias="name"),
|
201
213
|
worker_lookups: WorkerLookups = Depends(WorkerLookups),
|
202
214
|
db: PrefectDBInterface = Depends(provide_database_interface),
|
215
|
+
prefect_client_version: Optional[str] = Depends(
|
216
|
+
dependencies.get_prefect_client_version
|
217
|
+
),
|
203
218
|
) -> schemas.core.WorkPool:
|
204
219
|
"""
|
205
220
|
Read a work pool by name
|
@@ -212,7 +227,18 @@ async def read_work_pool(
|
|
212
227
|
orm_work_pool = await models.workers.read_work_pool(
|
213
228
|
session=session, work_pool_id=work_pool_id
|
214
229
|
)
|
215
|
-
|
230
|
+
work_pool = schemas.core.WorkPool.model_validate(
|
231
|
+
orm_work_pool, from_attributes=True
|
232
|
+
)
|
233
|
+
|
234
|
+
if prefect_client_version and Version(prefect_client_version) <= Version(
|
235
|
+
"3.3.7"
|
236
|
+
):
|
237
|
+
# Client versions 3.3.7 and below do not support the default_result_storage_block_id field and will error
|
238
|
+
# when receiving it.
|
239
|
+
del work_pool.storage_configuration.default_result_storage_block_id
|
240
|
+
|
241
|
+
return work_pool
|
216
242
|
|
217
243
|
|
218
244
|
@router.post("/filter")
|
@@ -220,8 +246,10 @@ async def read_work_pools(
|
|
220
246
|
work_pools: Optional[schemas.filters.WorkPoolFilter] = None,
|
221
247
|
limit: int = dependencies.LimitBody(),
|
222
248
|
offset: int = Body(0, ge=0),
|
223
|
-
worker_lookups: WorkerLookups = Depends(WorkerLookups),
|
224
249
|
db: PrefectDBInterface = Depends(provide_database_interface),
|
250
|
+
prefect_client_version: Optional[str] = Depends(
|
251
|
+
dependencies.get_prefect_client_version
|
252
|
+
),
|
225
253
|
) -> List[schemas.core.WorkPool]:
|
226
254
|
"""
|
227
255
|
Read multiple work pools
|
@@ -233,10 +261,18 @@ async def read_work_pools(
|
|
233
261
|
offset=offset,
|
234
262
|
limit=limit,
|
235
263
|
)
|
236
|
-
|
264
|
+
ret = [
|
237
265
|
schemas.core.WorkPool.model_validate(w, from_attributes=True)
|
238
266
|
for w in orm_work_pools
|
239
267
|
]
|
268
|
+
if prefect_client_version and Version(prefect_client_version) <= Version(
|
269
|
+
"3.3.7"
|
270
|
+
):
|
271
|
+
# Client versions 3.3.7 and below do not support the default_result_storage_block_id field and will error
|
272
|
+
# when receiving it.
|
273
|
+
for work_pool in ret:
|
274
|
+
del work_pool.storage_configuration.default_result_storage_block_id
|
275
|
+
return ret
|
240
276
|
|
241
277
|
|
242
278
|
@router.post("/count")
|
prefect/settings/base.py
CHANGED
@@ -2,7 +2,7 @@ from __future__ import annotations
|
|
2
2
|
|
3
3
|
import inspect
|
4
4
|
from functools import partial
|
5
|
-
from typing import Any
|
5
|
+
from typing import Any
|
6
6
|
|
7
7
|
from pydantic import (
|
8
8
|
AliasChoices,
|
@@ -32,12 +32,12 @@ class PrefectBaseSettings(BaseSettings):
|
|
32
32
|
@classmethod
|
33
33
|
def settings_customise_sources(
|
34
34
|
cls,
|
35
|
-
settings_cls:
|
35
|
+
settings_cls: type[BaseSettings],
|
36
36
|
init_settings: PydanticBaseSettingsSource,
|
37
37
|
env_settings: PydanticBaseSettingsSource,
|
38
38
|
dotenv_settings: PydanticBaseSettingsSource,
|
39
39
|
file_secret_settings: PydanticBaseSettingsSource,
|
40
|
-
) ->
|
40
|
+
) -> tuple[PydanticBaseSettingsSource, ...]:
|
41
41
|
"""
|
42
42
|
Define an order for Prefect settings sources.
|
43
43
|
|
@@ -93,9 +93,9 @@ class PrefectBaseSettings(BaseSettings):
|
|
93
93
|
exclude_unset: bool = False,
|
94
94
|
include_secrets: bool = True,
|
95
95
|
include_aliases: bool = False,
|
96
|
-
) ->
|
96
|
+
) -> dict[str, str]:
|
97
97
|
"""Convert the settings object to a dictionary of environment variables."""
|
98
|
-
env:
|
98
|
+
env: dict[str, Any] = self.model_dump(
|
99
99
|
exclude_unset=exclude_unset,
|
100
100
|
mode="json",
|
101
101
|
context={"include_secrets": include_secrets},
|
@@ -192,7 +192,7 @@ class PrefectSettingsConfigDict(SettingsConfigDict, total=False):
|
|
192
192
|
|
193
193
|
|
194
194
|
def _add_environment_variables(
|
195
|
-
schema:
|
195
|
+
schema: dict[str, Any], model: type[PrefectBaseSettings]
|
196
196
|
) -> None:
|
197
197
|
for property in schema["properties"]:
|
198
198
|
env_vars: list[str] = []
|
@@ -212,7 +212,7 @@ def _add_environment_variables(
|
|
212
212
|
|
213
213
|
|
214
214
|
def build_settings_config(
|
215
|
-
path:
|
215
|
+
path: tuple[str, ...] = tuple(), frozen: bool = False
|
216
216
|
) -> PrefectSettingsConfigDict:
|
217
217
|
env_prefix = f"PREFECT_{'_'.join(path).upper()}_" if path else "PREFECT_"
|
218
218
|
return PrefectSettingsConfigDict(
|
prefect/task_runners.py
CHANGED
@@ -21,6 +21,7 @@ from typing import (
|
|
21
21
|
|
22
22
|
from typing_extensions import ParamSpec, Self, TypeVar
|
23
23
|
|
24
|
+
from prefect._internal.uuid7 import uuid7
|
24
25
|
from prefect.client.schemas.objects import TaskRunInput
|
25
26
|
from prefect.exceptions import MappingLengthMismatch, MappingMissingIterable
|
26
27
|
from prefect.futures import (
|
@@ -290,7 +291,7 @@ class ThreadPoolTaskRunner(TaskRunner[PrefectConcurrentFuture[R]]):
|
|
290
291
|
from prefect.context import FlowRunContext
|
291
292
|
from prefect.task_engine import run_task_async, run_task_sync
|
292
293
|
|
293
|
-
task_run_id =
|
294
|
+
task_run_id = uuid7()
|
294
295
|
cancel_event = threading.Event()
|
295
296
|
self._cancel_events[task_run_id] = cancel_event
|
296
297
|
context = copy_context()
|
prefect/tasks.py
CHANGED
@@ -32,6 +32,7 @@ from uuid import UUID, uuid4
|
|
32
32
|
from typing_extensions import Literal, ParamSpec, Self, TypeAlias, TypeIs
|
33
33
|
|
34
34
|
import prefect.states
|
35
|
+
from prefect._internal.uuid7 import uuid7
|
35
36
|
from prefect.cache_policies import DEFAULT, NO_CACHE, CachePolicy
|
36
37
|
from prefect.client.orchestration import get_client
|
37
38
|
from prefect.client.schemas import TaskRun
|
@@ -953,7 +954,7 @@ class Task(Generic[P, R]):
|
|
953
954
|
if flow_run_context and flow_run_context.flow_run
|
954
955
|
else None
|
955
956
|
)
|
956
|
-
task_run_id = id or
|
957
|
+
task_run_id = id or uuid7()
|
957
958
|
state = prefect.states.Pending(
|
958
959
|
state_details=StateDetails(
|
959
960
|
task_run_id=task_run_id,
|
@@ -1551,7 +1552,7 @@ class Task(Generic[P, R]):
|
|
1551
1552
|
validated_state=task_run.state,
|
1552
1553
|
)
|
1553
1554
|
|
1554
|
-
if task_run_url := url_for(task_run):
|
1555
|
+
if get_current_settings().ui_url and (task_run_url := url_for(task_run)):
|
1555
1556
|
logger.info(
|
1556
1557
|
f"Created task run {task_run.name!r}. View it in the UI at {task_run_url!r}"
|
1557
1558
|
)
|