prefect-client 3.2.7__py3-none-any.whl → 3.2.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/_build_info.py +3 -3
- prefect/_experimental/bundles.py +79 -0
- prefect/_waiters.py +254 -0
- prefect/client/subscriptions.py +2 -1
- prefect/events/clients.py +19 -17
- prefect/flow_runs.py +67 -35
- prefect/flows.py +3 -1
- prefect/futures.py +192 -22
- prefect/runner/runner.py +106 -39
- prefect/server/api/artifacts.py +5 -0
- prefect/server/api/automations.py +5 -0
- prefect/server/api/block_capabilities.py +5 -0
- prefect/server/api/block_documents.py +2 -0
- prefect/server/api/block_schemas.py +5 -0
- prefect/server/api/block_types.py +3 -1
- prefect/server/api/concurrency_limits.py +5 -0
- prefect/server/api/concurrency_limits_v2.py +5 -0
- prefect/server/api/deployments.py +2 -0
- prefect/server/api/events.py +5 -1
- prefect/server/api/flow_run_notification_policies.py +2 -0
- prefect/server/api/flow_run_states.py +2 -0
- prefect/server/api/flow_runs.py +2 -0
- prefect/server/api/flows.py +2 -0
- prefect/server/api/logs.py +5 -1
- prefect/server/api/task_run_states.py +2 -0
- prefect/server/api/task_runs.py +2 -0
- prefect/server/api/task_workers.py +5 -1
- prefect/server/api/variables.py +5 -0
- prefect/server/api/work_queues.py +2 -0
- prefect/server/api/workers.py +4 -0
- prefect/settings/profiles.py +6 -5
- prefect/task_worker.py +3 -3
- prefect/telemetry/instrumentation.py +2 -2
- prefect/utilities/templating.py +50 -11
- prefect/workers/base.py +3 -3
- prefect/workers/process.py +22 -319
- {prefect_client-3.2.7.dist-info → prefect_client-3.2.9.dist-info}/METADATA +2 -2
- {prefect_client-3.2.7.dist-info → prefect_client-3.2.9.dist-info}/RECORD +40 -39
- {prefect_client-3.2.7.dist-info → prefect_client-3.2.9.dist-info}/WHEEL +0 -0
- {prefect_client-3.2.7.dist-info → prefect_client-3.2.9.dist-info}/licenses/LICENSE +0 -0
prefect/runner/runner.py
CHANGED
@@ -39,6 +39,7 @@ import datetime
|
|
39
39
|
import inspect
|
40
40
|
import logging
|
41
41
|
import os
|
42
|
+
import shlex
|
42
43
|
import shutil
|
43
44
|
import signal
|
44
45
|
import subprocess
|
@@ -113,6 +114,7 @@ from prefect.states import (
|
|
113
114
|
)
|
114
115
|
from prefect.types._datetime import DateTime
|
115
116
|
from prefect.types.entrypoint import EntrypointType
|
117
|
+
from prefect.utilities.annotations import NotSet
|
116
118
|
from prefect.utilities.asyncutils import (
|
117
119
|
asyncnullcontext,
|
118
120
|
is_async_fn,
|
@@ -152,7 +154,7 @@ class Runner:
|
|
152
154
|
query_seconds: Optional[float] = None,
|
153
155
|
prefetch_seconds: float = 10,
|
154
156
|
heartbeat_seconds: Optional[float] = None,
|
155
|
-
limit:
|
157
|
+
limit: int | type[NotSet] | None = NotSet,
|
156
158
|
pause_on_shutdown: bool = True,
|
157
159
|
webserver: bool = False,
|
158
160
|
):
|
@@ -168,7 +170,8 @@ class Runner:
|
|
168
170
|
heartbeat_seconds: The number of seconds to wait between emitting
|
169
171
|
flow run heartbeats. The runner will not emit heartbeats if the value is None.
|
170
172
|
Defaults to `PREFECT_RUNNER_HEARTBEAT_FREQUENCY`.
|
171
|
-
limit: The maximum number of flow runs this runner should be running at
|
173
|
+
limit: The maximum number of flow runs this runner should be running at. Provide `None` for no limit.
|
174
|
+
If not provided, the runner will use the value of `PREFECT_RUNNER_PROCESS_LIMIT`.
|
172
175
|
pause_on_shutdown: A boolean for whether or not to automatically pause
|
173
176
|
deployment schedules on shutdown; defaults to `True`
|
174
177
|
webserver: a boolean flag for whether to start a webserver for this runner
|
@@ -209,7 +212,11 @@ class Runner:
|
|
209
212
|
self.started: bool = False
|
210
213
|
self.stopping: bool = False
|
211
214
|
self.pause_on_shutdown: bool = pause_on_shutdown
|
212
|
-
self.limit: int | None =
|
215
|
+
self.limit: int | None = (
|
216
|
+
settings.runner.process_limit
|
217
|
+
if limit is NotSet or isinstance(limit, type)
|
218
|
+
else limit
|
219
|
+
)
|
213
220
|
self.webserver: bool = webserver
|
214
221
|
|
215
222
|
self.query_seconds: float = query_seconds or settings.runner.poll_frequency
|
@@ -227,6 +234,7 @@ class Runner:
|
|
227
234
|
self._scheduled_task_scopes: set[anyio.abc.CancelScope] = set()
|
228
235
|
self._deployment_ids: set[UUID] = set()
|
229
236
|
self._flow_run_process_map: dict[UUID, ProcessMapEntry] = dict()
|
237
|
+
self.__flow_run_process_map_lock: asyncio.Lock | None = None
|
230
238
|
self._flow_run_bundle_map: dict[UUID, SerializedBundle] = dict()
|
231
239
|
|
232
240
|
self._tmp_dir: Path = (
|
@@ -243,6 +251,12 @@ class Runner:
|
|
243
251
|
)
|
244
252
|
self._flow_cache: LRUCache[UUID, "APIFlow"] = LRUCache(maxsize=100)
|
245
253
|
|
254
|
+
@property
|
255
|
+
def _flow_run_process_map_lock(self) -> asyncio.Lock:
|
256
|
+
if self.__flow_run_process_map_lock is None:
|
257
|
+
self.__flow_run_process_map_lock = asyncio.Lock()
|
258
|
+
return self.__flow_run_process_map_lock
|
259
|
+
|
246
260
|
@sync_compatible
|
247
261
|
async def add_deployment(
|
248
262
|
self,
|
@@ -550,13 +564,23 @@ class Runner:
|
|
550
564
|
)
|
551
565
|
|
552
566
|
async def execute_flow_run(
|
553
|
-
self,
|
554
|
-
|
567
|
+
self,
|
568
|
+
flow_run_id: UUID,
|
569
|
+
entrypoint: str | None = None,
|
570
|
+
command: str | None = None,
|
571
|
+
cwd: Path | None = None,
|
572
|
+
env: dict[str, str | None] | None = None,
|
573
|
+
task_status: anyio.abc.TaskStatus[int] | None = None,
|
574
|
+
stream_output: bool = True,
|
575
|
+
) -> anyio.abc.Process | None:
|
555
576
|
"""
|
556
577
|
Executes a single flow run with the given ID.
|
557
578
|
|
558
579
|
Execution will wait to monitor for cancellation requests. Exits once
|
559
580
|
the flow run process has exited.
|
581
|
+
|
582
|
+
Returns:
|
583
|
+
The flow run process.
|
560
584
|
"""
|
561
585
|
self.pause_on_shutdown = False
|
562
586
|
context = self if not self.started else asyncnullcontext()
|
@@ -570,17 +594,26 @@ class Runner:
|
|
570
594
|
self._submitting_flow_run_ids.add(flow_run_id)
|
571
595
|
flow_run = await self._client.read_flow_run(flow_run_id)
|
572
596
|
|
573
|
-
|
597
|
+
process: anyio.abc.Process = await self._runs_task_group.start(
|
574
598
|
partial(
|
575
599
|
self._submit_run_and_capture_errors,
|
576
600
|
flow_run=flow_run,
|
577
601
|
entrypoint=entrypoint,
|
602
|
+
command=command,
|
603
|
+
cwd=cwd,
|
604
|
+
env=env,
|
605
|
+
stream_output=stream_output,
|
578
606
|
),
|
579
607
|
)
|
580
|
-
|
581
|
-
|
582
|
-
|
583
|
-
|
608
|
+
if task_status:
|
609
|
+
task_status.started(process.pid)
|
610
|
+
|
611
|
+
async with self._flow_run_process_map_lock:
|
612
|
+
# Only add the process to the map if it is still running
|
613
|
+
if process.returncode is None:
|
614
|
+
self._flow_run_process_map[flow_run.id] = ProcessMapEntry(
|
615
|
+
pid=process.pid, flow_run=flow_run
|
616
|
+
)
|
584
617
|
|
585
618
|
# We want this loop to stop when the flow run process exits
|
586
619
|
# so we'll check if the flow run process is still alive on
|
@@ -609,6 +642,8 @@ class Runner:
|
|
609
642
|
)
|
610
643
|
)
|
611
644
|
|
645
|
+
return process
|
646
|
+
|
612
647
|
async def execute_bundle(self, bundle: SerializedBundle) -> None:
|
613
648
|
"""
|
614
649
|
Executes a bundle in a subprocess.
|
@@ -696,7 +731,7 @@ class Runner:
|
|
696
731
|
)
|
697
732
|
elif (
|
698
733
|
sys.platform == "win32"
|
699
|
-
and process.
|
734
|
+
and process.exitcode == STATUS_CONTROL_C_EXIT
|
700
735
|
):
|
701
736
|
level = logging.INFO
|
702
737
|
help_message = (
|
@@ -733,9 +768,13 @@ class Runner:
|
|
733
768
|
async def _run_process(
|
734
769
|
self,
|
735
770
|
flow_run: "FlowRun",
|
736
|
-
task_status:
|
737
|
-
entrypoint:
|
738
|
-
|
771
|
+
task_status: anyio.abc.TaskStatus[anyio.abc.Process] | None = None,
|
772
|
+
entrypoint: str | None = None,
|
773
|
+
command: str | None = None,
|
774
|
+
cwd: Path | None = None,
|
775
|
+
env: dict[str, str | None] | None = None,
|
776
|
+
stream_output: bool = True,
|
777
|
+
) -> anyio.abc.Process:
|
739
778
|
"""
|
740
779
|
Runs the given flow run in a subprocess.
|
741
780
|
|
@@ -747,7 +786,10 @@ class Runner:
|
|
747
786
|
task_status: anyio task status used to send a message to the caller
|
748
787
|
than the flow run process has started.
|
749
788
|
"""
|
750
|
-
command
|
789
|
+
if command is None:
|
790
|
+
runner_command = [get_sys_executable(), "-m", "prefect.engine"]
|
791
|
+
else:
|
792
|
+
runner_command = shlex.split(command)
|
751
793
|
|
752
794
|
flow_run_logger = self._get_flow_run_logger(flow_run)
|
753
795
|
|
@@ -760,7 +802,9 @@ class Runner:
|
|
760
802
|
|
761
803
|
flow_run_logger.info("Opening process...")
|
762
804
|
|
763
|
-
env
|
805
|
+
if env is None:
|
806
|
+
env = {}
|
807
|
+
env.update(get_current_settings().to_environment_variables(exclude_unset=True))
|
764
808
|
env.update(
|
765
809
|
{
|
766
810
|
**{
|
@@ -798,12 +842,12 @@ class Runner:
|
|
798
842
|
setattr(storage, "last_adhoc_pull", datetime.datetime.now())
|
799
843
|
|
800
844
|
process = await run_process(
|
801
|
-
command=
|
802
|
-
stream_output=
|
845
|
+
command=runner_command,
|
846
|
+
stream_output=stream_output,
|
803
847
|
task_status=task_status,
|
804
|
-
task_status_handler=
|
848
|
+
task_status_handler=lambda process: process,
|
805
849
|
env=env,
|
806
|
-
cwd=storage.destination if storage else
|
850
|
+
cwd=storage.destination if storage else cwd,
|
807
851
|
**kwargs,
|
808
852
|
)
|
809
853
|
|
@@ -852,7 +896,7 @@ class Runner:
|
|
852
896
|
f"Process for flow run {flow_run.name!r} exited cleanly."
|
853
897
|
)
|
854
898
|
|
855
|
-
return process
|
899
|
+
return process
|
856
900
|
|
857
901
|
async def _kill_process(
|
858
902
|
self,
|
@@ -1242,8 +1286,8 @@ class Runner:
|
|
1242
1286
|
assert self._limiter is not None
|
1243
1287
|
self._logger.info(
|
1244
1288
|
f"Flow run limit reached; {self._limiter.borrowed_tokens} flow runs"
|
1245
|
-
" in progress. You can control this limit by
|
1246
|
-
"
|
1289
|
+
" in progress. You can control this limit by adjusting the "
|
1290
|
+
"PREFECT_RUNNER_PROCESS_LIMIT setting."
|
1247
1291
|
)
|
1248
1292
|
return False
|
1249
1293
|
|
@@ -1316,9 +1360,10 @@ class Runner:
|
|
1316
1360
|
)
|
1317
1361
|
|
1318
1362
|
if readiness_result and not isinstance(readiness_result, Exception):
|
1319
|
-
self.
|
1320
|
-
|
1321
|
-
|
1363
|
+
async with self._flow_run_process_map_lock:
|
1364
|
+
self._flow_run_process_map[flow_run.id] = ProcessMapEntry(
|
1365
|
+
pid=readiness_result, flow_run=flow_run
|
1366
|
+
)
|
1322
1367
|
# Heartbeats are opt-in and only emitted if a heartbeat frequency is set
|
1323
1368
|
if self.heartbeat_seconds is not None:
|
1324
1369
|
await self._emit_flow_run_heartbeat(flow_run)
|
@@ -1333,17 +1378,26 @@ class Runner:
|
|
1333
1378
|
async def _submit_run_and_capture_errors(
|
1334
1379
|
self,
|
1335
1380
|
flow_run: "FlowRun",
|
1336
|
-
task_status: anyio.abc.TaskStatus[
|
1337
|
-
entrypoint:
|
1381
|
+
task_status: anyio.abc.TaskStatus[anyio.abc.Process | Exception],
|
1382
|
+
entrypoint: str | None = None,
|
1383
|
+
command: str | None = None,
|
1384
|
+
cwd: Path | None = None,
|
1385
|
+
env: dict[str, str | None] | None = None,
|
1386
|
+
stream_output: bool = True,
|
1338
1387
|
) -> Union[Optional[int], Exception]:
|
1339
1388
|
run_logger = self._get_flow_run_logger(flow_run)
|
1340
1389
|
|
1341
1390
|
try:
|
1342
|
-
|
1391
|
+
process = await self._run_process(
|
1343
1392
|
flow_run=flow_run,
|
1344
1393
|
task_status=task_status,
|
1345
1394
|
entrypoint=entrypoint,
|
1395
|
+
command=command,
|
1396
|
+
cwd=cwd,
|
1397
|
+
env=env,
|
1398
|
+
stream_output=stream_output,
|
1346
1399
|
)
|
1400
|
+
status_code = process.returncode
|
1347
1401
|
except Exception as exc:
|
1348
1402
|
if task_status:
|
1349
1403
|
# This flow run was being submitted and did not start successfully
|
@@ -1363,7 +1417,9 @@ class Runner:
|
|
1363
1417
|
return exc
|
1364
1418
|
finally:
|
1365
1419
|
self._release_limit_slot(flow_run.id)
|
1366
|
-
|
1420
|
+
|
1421
|
+
async with self._flow_run_process_map_lock:
|
1422
|
+
self._flow_run_process_map.pop(flow_run.id, None)
|
1367
1423
|
|
1368
1424
|
if status_code != 0:
|
1369
1425
|
await self._propose_crashed_state(
|
@@ -1513,6 +1569,7 @@ class Runner:
|
|
1513
1569
|
"""
|
1514
1570
|
Run the hooks for a flow.
|
1515
1571
|
"""
|
1572
|
+
run_logger = self._get_flow_run_logger(flow_run)
|
1516
1573
|
if state.is_cancelling():
|
1517
1574
|
try:
|
1518
1575
|
if flow_run.id in self._flow_run_bundle_map:
|
@@ -1520,6 +1577,7 @@ class Runner:
|
|
1520
1577
|
self._flow_run_bundle_map[flow_run.id]
|
1521
1578
|
)
|
1522
1579
|
else:
|
1580
|
+
run_logger.info("Loading flow to check for on_cancellation hooks")
|
1523
1581
|
flow = await load_flow_from_flow_run(
|
1524
1582
|
flow_run, storage_base_path=str(self._tmp_dir)
|
1525
1583
|
)
|
@@ -1529,7 +1587,7 @@ class Runner:
|
|
1529
1587
|
except ObjectNotFound:
|
1530
1588
|
run_logger = self._get_flow_run_logger(flow_run)
|
1531
1589
|
run_logger.warning(
|
1532
|
-
f"Runner
|
1590
|
+
f"Runner failed to retrieve flow to execute on_cancellation hooks for flow run {flow_run.id!r}."
|
1533
1591
|
)
|
1534
1592
|
|
1535
1593
|
async def _run_on_crashed_hooks(
|
@@ -1540,16 +1598,25 @@ class Runner:
|
|
1540
1598
|
"""
|
1541
1599
|
Run the hooks for a flow.
|
1542
1600
|
"""
|
1601
|
+
run_logger = self._get_flow_run_logger(flow_run)
|
1543
1602
|
if state.is_crashed():
|
1544
|
-
|
1545
|
-
|
1546
|
-
|
1547
|
-
|
1548
|
-
|
1549
|
-
|
1550
|
-
|
1603
|
+
try:
|
1604
|
+
if flow_run.id in self._flow_run_bundle_map:
|
1605
|
+
flow = extract_flow_from_bundle(
|
1606
|
+
self._flow_run_bundle_map[flow_run.id]
|
1607
|
+
)
|
1608
|
+
else:
|
1609
|
+
run_logger.info("Loading flow to check for on_crashed hooks")
|
1610
|
+
flow = await load_flow_from_flow_run(
|
1611
|
+
flow_run, storage_base_path=str(self._tmp_dir)
|
1612
|
+
)
|
1613
|
+
hooks = flow.on_crashed_hooks or []
|
1551
1614
|
|
1552
|
-
|
1615
|
+
await _run_hooks(hooks, flow_run, flow, state)
|
1616
|
+
except ObjectNotFound:
|
1617
|
+
run_logger.warning(
|
1618
|
+
f"Runner failed to retrieve flow to execute on_crashed hooks for flow run {flow_run.id!r}."
|
1619
|
+
)
|
1553
1620
|
|
1554
1621
|
async def __aenter__(self) -> Self:
|
1555
1622
|
self._logger.debug("Starting runner...")
|
prefect/server/api/artifacts.py
CHANGED
@@ -26,6 +26,11 @@ async def create_artifact(
|
|
26
26
|
response: Response,
|
27
27
|
db: PrefectDBInterface = Depends(provide_database_interface),
|
28
28
|
) -> core.Artifact:
|
29
|
+
"""
|
30
|
+
Create an artifact.
|
31
|
+
|
32
|
+
For more information, see https://docs.prefect.io/v3/develop/artifacts.
|
33
|
+
"""
|
29
34
|
artifact = core.Artifact(**artifact.model_dump())
|
30
35
|
|
31
36
|
right_now = now("UTC")
|
@@ -39,6 +39,11 @@ async def create_automation(
|
|
39
39
|
automation: AutomationCreate,
|
40
40
|
db: PrefectDBInterface = Depends(provide_database_interface),
|
41
41
|
) -> Automation:
|
42
|
+
"""
|
43
|
+
Create an automation.
|
44
|
+
|
45
|
+
For more information, see https://docs.prefect.io/v3/automate.
|
46
|
+
"""
|
42
47
|
# reset any client-provided IDs on the provided triggers
|
43
48
|
automation.trigger.reset_ids()
|
44
49
|
|
@@ -19,6 +19,11 @@ router: PrefectRouter = PrefectRouter(
|
|
19
19
|
async def read_available_block_capabilities(
|
20
20
|
db: PrefectDBInterface = Depends(provide_database_interface),
|
21
21
|
) -> List[str]:
|
22
|
+
"""
|
23
|
+
Get available block capabilities.
|
24
|
+
|
25
|
+
For more information, see https://docs.prefect.io/v3/develop/blocks.
|
26
|
+
"""
|
22
27
|
async with db.session_context() as session:
|
23
28
|
return await models.block_schemas.read_available_block_capabilities(
|
24
29
|
session=session
|
@@ -24,6 +24,8 @@ async def create_block_document(
|
|
24
24
|
) -> schemas.core.BlockDocument:
|
25
25
|
"""
|
26
26
|
Create a new block document.
|
27
|
+
|
28
|
+
For more information, see https://docs.prefect.io/v3/develop/blocks.
|
27
29
|
"""
|
28
30
|
async with db.session_context(begin_transaction=True) as session:
|
29
31
|
if block_document.name is not None:
|
@@ -30,6 +30,11 @@ async def create_block_schema(
|
|
30
30
|
response: Response,
|
31
31
|
db: PrefectDBInterface = Depends(provide_database_interface),
|
32
32
|
) -> schemas.core.BlockSchema:
|
33
|
+
"""
|
34
|
+
Create a block schema.
|
35
|
+
|
36
|
+
For more information, see https://docs.prefect.io/v3/develop/blocks.
|
37
|
+
"""
|
33
38
|
from prefect.blocks.core import Block
|
34
39
|
|
35
40
|
async with db.session_context(begin_transaction=True) as session:
|
@@ -19,7 +19,9 @@ async def create_block_type(
|
|
19
19
|
db: PrefectDBInterface = Depends(provide_database_interface),
|
20
20
|
) -> schemas.core.BlockType:
|
21
21
|
"""
|
22
|
-
Create a new block type
|
22
|
+
Create a new block type.
|
23
|
+
|
24
|
+
For more information, see https://docs.prefect.io/v3/develop/blocks.
|
23
25
|
"""
|
24
26
|
# API-created blocks cannot start with the word "Prefect"
|
25
27
|
# as it is reserved for system use
|
@@ -28,6 +28,11 @@ async def create_concurrency_limit(
|
|
28
28
|
response: Response,
|
29
29
|
db: PrefectDBInterface = Depends(provide_database_interface),
|
30
30
|
) -> schemas.core.ConcurrencyLimit:
|
31
|
+
"""
|
32
|
+
Create a task run concurrency limit.
|
33
|
+
|
34
|
+
For more information, see https://docs.prefect.io/v3/develop/task-run-limits.
|
35
|
+
"""
|
31
36
|
# hydrate the input model into a full model
|
32
37
|
concurrency_limit_model = schemas.core.ConcurrencyLimit(
|
33
38
|
**concurrency_limit.model_dump()
|
@@ -21,6 +21,11 @@ async def create_concurrency_limit_v2(
|
|
21
21
|
concurrency_limit: actions.ConcurrencyLimitV2Create,
|
22
22
|
db: PrefectDBInterface = Depends(provide_database_interface),
|
23
23
|
) -> schemas.core.ConcurrencyLimitV2:
|
24
|
+
"""
|
25
|
+
Create a task run concurrency limit.
|
26
|
+
|
27
|
+
For more information, see https://docs.prefect.io/v3/develop/global-concurrency-limits.
|
28
|
+
"""
|
24
29
|
async with db.session_context(begin_transaction=True) as session:
|
25
30
|
model = await models.concurrency_limits_v2.create_concurrency_limit(
|
26
31
|
session=session, concurrency_limit=concurrency_limit
|
@@ -68,6 +68,8 @@ async def create_deployment(
|
|
68
68
|
|
69
69
|
If the deployment has an active schedule, flow runs will be scheduled.
|
70
70
|
When upserting, any scheduled runs from the existing deployment will be deleted.
|
71
|
+
|
72
|
+
For more information, see https://docs.prefect.io/v3/deploy.
|
71
73
|
"""
|
72
74
|
|
73
75
|
data = deployment.model_dump(exclude_unset=True)
|
prefect/server/api/events.py
CHANGED
@@ -48,7 +48,11 @@ async def create_events(
|
|
48
48
|
events: List[Event],
|
49
49
|
ephemeral_request: bool = Depends(is_ephemeral_request),
|
50
50
|
) -> None:
|
51
|
-
"""
|
51
|
+
"""
|
52
|
+
Record a batch of Events.
|
53
|
+
|
54
|
+
For more information, see https://docs.prefect.io/v3/automate/events/events.
|
55
|
+
"""
|
52
56
|
if ephemeral_request:
|
53
57
|
await EventsPipeline().process_events(events)
|
54
58
|
else:
|
@@ -25,6 +25,8 @@ async def create_flow_run_notification_policy(
|
|
25
25
|
) -> schemas.core.FlowRunNotificationPolicy:
|
26
26
|
"""
|
27
27
|
Creates a new flow run notification policy.
|
28
|
+
|
29
|
+
For more information, see https://docs.prefect.io/v3/automate/events/automations-triggers#sending-notifications-with-automations.
|
28
30
|
"""
|
29
31
|
async with db.session_context(begin_transaction=True) as session:
|
30
32
|
return await models.flow_run_notification_policies.create_flow_run_notification_policy(
|
@@ -26,6 +26,8 @@ async def read_flow_run_state(
|
|
26
26
|
) -> schemas.states.State:
|
27
27
|
"""
|
28
28
|
Get a flow run state by id.
|
29
|
+
|
30
|
+
For more information, see https://docs.prefect.io/v3/develop/write-flows#final-state-determination.
|
29
31
|
"""
|
30
32
|
async with db.session_context() as session:
|
31
33
|
flow_run_state = await models.flow_run_states.read_flow_run_state(
|
prefect/server/api/flow_runs.py
CHANGED
@@ -74,6 +74,8 @@ async def create_flow_run(
|
|
74
74
|
idempotency key already exists, the existing flow run will be returned.
|
75
75
|
|
76
76
|
If no state is provided, the flow run will be created in a PENDING state.
|
77
|
+
|
78
|
+
For more information, see https://docs.prefect.io/v3/develop/write-flows.
|
77
79
|
"""
|
78
80
|
# hydrate the input model into a full flow run / state model
|
79
81
|
flow_run_object = schemas.core.FlowRun(
|
prefect/server/api/flows.py
CHANGED
@@ -27,6 +27,8 @@ async def create_flow(
|
|
27
27
|
) -> schemas.core.Flow:
|
28
28
|
"""Gracefully creates a new flow from the provided schema. If a flow with the
|
29
29
|
same name already exists, the existing flow is returned.
|
30
|
+
|
31
|
+
For more information, see https://docs.prefect.io/v3/develop/write-flows.
|
30
32
|
"""
|
31
33
|
# hydrate the input model into a full flow model
|
32
34
|
flow = schemas.core.Flow(**flow.model_dump())
|
prefect/server/api/logs.py
CHANGED
@@ -20,7 +20,11 @@ async def create_logs(
|
|
20
20
|
logs: List[schemas.actions.LogCreate],
|
21
21
|
db: PrefectDBInterface = Depends(provide_database_interface),
|
22
22
|
) -> None:
|
23
|
-
"""
|
23
|
+
"""
|
24
|
+
Create new logs from the provided schema.
|
25
|
+
|
26
|
+
For more information, see https://docs.prefect.io/v3/develop/logging.
|
27
|
+
"""
|
24
28
|
for batch in models.logs.split_logs_into_batches(logs):
|
25
29
|
async with db.session_context(begin_transaction=True) as session:
|
26
30
|
await models.logs.create_logs(session=session, logs=batch)
|
@@ -26,6 +26,8 @@ async def read_task_run_state(
|
|
26
26
|
) -> schemas.states.State:
|
27
27
|
"""
|
28
28
|
Get a task run state by id.
|
29
|
+
|
30
|
+
For more information, see https://docs.prefect.io/v3/develop/write-tasks.
|
29
31
|
"""
|
30
32
|
async with db.session_context() as session:
|
31
33
|
task_run_state = await models.task_run_states.read_task_run_state(
|
prefect/server/api/task_runs.py
CHANGED
@@ -57,6 +57,8 @@ async def create_task_run(
|
|
57
57
|
run will be returned.
|
58
58
|
|
59
59
|
If no state is provided, the task run will be created in a PENDING state.
|
60
|
+
|
61
|
+
For more information, see https://docs.prefect.io/v3/develop/write-tasks.
|
60
62
|
"""
|
61
63
|
# hydrate the input model into a full task run / state model
|
62
64
|
task_run_dict = task_run.model_dump()
|
@@ -20,7 +20,11 @@ async def read_task_workers(
|
|
20
20
|
default=None, description="The task worker filter", embed=True
|
21
21
|
),
|
22
22
|
) -> List[TaskWorkerResponse]:
|
23
|
-
"""
|
23
|
+
"""
|
24
|
+
Read active task workers. Optionally filter by task keys.
|
25
|
+
|
26
|
+
For more information, see https://docs.prefect.io/v3/develop/deferred-tasks.
|
27
|
+
"""
|
24
28
|
|
25
29
|
if task_worker_filter and task_worker_filter.task_keys:
|
26
30
|
return await models.task_workers.get_workers_for_task_keys(
|
prefect/server/api/variables.py
CHANGED
@@ -57,6 +57,11 @@ async def create_variable(
|
|
57
57
|
variable: actions.VariableCreate,
|
58
58
|
db: PrefectDBInterface = Depends(provide_database_interface),
|
59
59
|
) -> core.Variable:
|
60
|
+
"""
|
61
|
+
Create a variable.
|
62
|
+
|
63
|
+
For more information, see https://docs.prefect.io/v3/develop/variables.
|
64
|
+
"""
|
60
65
|
async with db.session_context(begin_transaction=True) as session:
|
61
66
|
try:
|
62
67
|
model = await models.variables.create_variable(
|
prefect/server/api/workers.py
CHANGED
@@ -161,6 +161,8 @@ async def create_work_pool(
|
|
161
161
|
"""
|
162
162
|
Creates a new work pool. If a work pool with the same
|
163
163
|
name already exists, an error will be raised.
|
164
|
+
|
165
|
+
For more information, see https://docs.prefect.io/v3/deploy/infrastructure-concepts/work-pools.
|
164
166
|
"""
|
165
167
|
if work_pool.name.lower().startswith("prefect"):
|
166
168
|
raise HTTPException(
|
@@ -408,6 +410,8 @@ async def create_work_queue(
|
|
408
410
|
"""
|
409
411
|
Creates a new work pool queue. If a work pool queue with the same
|
410
412
|
name already exists, an error will be raised.
|
413
|
+
|
414
|
+
For more information, see https://docs.prefect.io/v3/deploy/infrastructure-concepts/work-pools#work-queues.
|
411
415
|
"""
|
412
416
|
|
413
417
|
try:
|
prefect/settings/profiles.py
CHANGED
@@ -1,3 +1,5 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
1
3
|
import inspect
|
2
4
|
import warnings
|
3
5
|
from pathlib import Path
|
@@ -9,7 +11,6 @@ from typing import (
|
|
9
11
|
Iterable,
|
10
12
|
Iterator,
|
11
13
|
Optional,
|
12
|
-
Set,
|
13
14
|
Union,
|
14
15
|
)
|
15
16
|
|
@@ -32,8 +33,8 @@ from prefect.settings.models.root import Settings
|
|
32
33
|
|
33
34
|
|
34
35
|
def _cast_settings(
|
35
|
-
settings:
|
36
|
-
) ->
|
36
|
+
settings: dict[str | Setting, Any] | Any,
|
37
|
+
) -> dict[Setting, Any]:
|
37
38
|
"""For backwards compatibility, allow either Settings objects as keys or string references to settings."""
|
38
39
|
if not isinstance(settings, dict):
|
39
40
|
raise ValueError("Settings must be a dictionary.")
|
@@ -63,7 +64,7 @@ class Profile(BaseModel):
|
|
63
64
|
)
|
64
65
|
|
65
66
|
name: str
|
66
|
-
settings: Annotated[
|
67
|
+
settings: Annotated[dict[Setting, Any], BeforeValidator(_cast_settings)] = Field(
|
67
68
|
default_factory=dict
|
68
69
|
)
|
69
70
|
source: Optional[Path] = None
|
@@ -114,7 +115,7 @@ class ProfilesCollection:
|
|
114
115
|
self.active_name = active
|
115
116
|
|
116
117
|
@property
|
117
|
-
def names(self) ->
|
118
|
+
def names(self) -> set[str]:
|
118
119
|
"""
|
119
120
|
Return a set of profile names in this collection.
|
120
121
|
"""
|
prefect/task_worker.py
CHANGED
@@ -18,7 +18,7 @@ import uvicorn
|
|
18
18
|
from exceptiongroup import BaseExceptionGroup # novermin
|
19
19
|
from fastapi import FastAPI
|
20
20
|
from typing_extensions import ParamSpec, Self, TypeVar
|
21
|
-
from websockets.exceptions import
|
21
|
+
from websockets.exceptions import InvalidStatus
|
22
22
|
|
23
23
|
from prefect import Task
|
24
24
|
from prefect._internal.concurrency.api import create_call, from_sync
|
@@ -181,8 +181,8 @@ class TaskWorker:
|
|
181
181
|
logger.info("Starting task worker...")
|
182
182
|
try:
|
183
183
|
await self._subscribe_to_task_scheduling()
|
184
|
-
except
|
185
|
-
if exc.status_code == 403:
|
184
|
+
except InvalidStatus as exc:
|
185
|
+
if exc.response.status_code == 403:
|
186
186
|
logger.error(
|
187
187
|
"403: Could not establish a connection to the `/task_runs/subscriptions/scheduled`"
|
188
188
|
f" endpoint found at:\n\n {PREFECT_API_URL.value()}"
|
@@ -95,7 +95,7 @@ def _setup_meter_provider(
|
|
95
95
|
resource: Resource, headers: dict[str, str], telemetry_url: str
|
96
96
|
) -> MeterProvider:
|
97
97
|
metric_reader = PeriodicExportingMetricReader(
|
98
|
-
OTLPMetricExporter(
|
98
|
+
OTLPMetricExporter( # pyright: ignore[reportArgumentType] `preferred_temporality` and `preferred_aggregation` default to `None`, but otel's typing doesn't include it
|
99
99
|
endpoint=_url_join(telemetry_url, "v1/metrics"),
|
100
100
|
headers=headers,
|
101
101
|
)
|
@@ -109,7 +109,7 @@ def _setup_meter_provider(
|
|
109
109
|
def _setup_logger_provider(
|
110
110
|
resource: Resource, headers: dict[str, str], telemetry_url: str
|
111
111
|
) -> LoggerProvider:
|
112
|
-
logger_provider = LoggerProvider(resource=resource)
|
112
|
+
logger_provider = LoggerProvider(resource=resource) # pyright: ignore[reportArgumentType] `multi_log_record_processor` defaults to `None` but otel's typing doesn't include it
|
113
113
|
queueing_log_exporter = QueueingLogExporter.instance(
|
114
114
|
_url_join(telemetry_url, "v1/logs"), tuple(headers.items())
|
115
115
|
)
|