prefect-client 3.2.7__py3-none-any.whl → 3.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/_build_info.py +3 -3
- prefect/_experimental/bundles.py +73 -0
- prefect/_waiters.py +254 -0
- prefect/client/subscriptions.py +2 -1
- prefect/events/clients.py +19 -17
- prefect/flow_runs.py +67 -35
- prefect/flows.py +3 -1
- prefect/futures.py +192 -22
- prefect/runner/runner.py +95 -34
- prefect/server/api/artifacts.py +5 -0
- prefect/server/api/automations.py +5 -0
- prefect/server/api/block_capabilities.py +5 -0
- prefect/server/api/block_documents.py +2 -0
- prefect/server/api/block_schemas.py +5 -0
- prefect/server/api/block_types.py +3 -1
- prefect/server/api/concurrency_limits.py +5 -0
- prefect/server/api/concurrency_limits_v2.py +5 -0
- prefect/server/api/deployments.py +2 -0
- prefect/server/api/events.py +5 -1
- prefect/server/api/flow_run_notification_policies.py +2 -0
- prefect/server/api/flow_run_states.py +2 -0
- prefect/server/api/flow_runs.py +2 -0
- prefect/server/api/flows.py +2 -0
- prefect/server/api/logs.py +5 -1
- prefect/server/api/task_run_states.py +2 -0
- prefect/server/api/task_runs.py +2 -0
- prefect/server/api/task_workers.py +5 -1
- prefect/server/api/variables.py +5 -0
- prefect/server/api/work_queues.py +2 -0
- prefect/server/api/workers.py +4 -0
- prefect/settings/profiles.py +6 -5
- prefect/task_worker.py +3 -3
- prefect/telemetry/instrumentation.py +2 -2
- prefect/utilities/templating.py +50 -11
- prefect/workers/base.py +3 -3
- prefect/workers/process.py +22 -319
- {prefect_client-3.2.7.dist-info → prefect_client-3.2.8.dist-info}/METADATA +2 -2
- {prefect_client-3.2.7.dist-info → prefect_client-3.2.8.dist-info}/RECORD +40 -39
- {prefect_client-3.2.7.dist-info → prefect_client-3.2.8.dist-info}/WHEEL +0 -0
- {prefect_client-3.2.7.dist-info → prefect_client-3.2.8.dist-info}/licenses/LICENSE +0 -0
prefect/runner/runner.py
CHANGED
@@ -39,6 +39,7 @@ import datetime
|
|
39
39
|
import inspect
|
40
40
|
import logging
|
41
41
|
import os
|
42
|
+
import shlex
|
42
43
|
import shutil
|
43
44
|
import signal
|
44
45
|
import subprocess
|
@@ -227,6 +228,7 @@ class Runner:
|
|
227
228
|
self._scheduled_task_scopes: set[anyio.abc.CancelScope] = set()
|
228
229
|
self._deployment_ids: set[UUID] = set()
|
229
230
|
self._flow_run_process_map: dict[UUID, ProcessMapEntry] = dict()
|
231
|
+
self.__flow_run_process_map_lock: asyncio.Lock | None = None
|
230
232
|
self._flow_run_bundle_map: dict[UUID, SerializedBundle] = dict()
|
231
233
|
|
232
234
|
self._tmp_dir: Path = (
|
@@ -243,6 +245,12 @@ class Runner:
|
|
243
245
|
)
|
244
246
|
self._flow_cache: LRUCache[UUID, "APIFlow"] = LRUCache(maxsize=100)
|
245
247
|
|
248
|
+
@property
|
249
|
+
def _flow_run_process_map_lock(self) -> asyncio.Lock:
|
250
|
+
if self.__flow_run_process_map_lock is None:
|
251
|
+
self.__flow_run_process_map_lock = asyncio.Lock()
|
252
|
+
return self.__flow_run_process_map_lock
|
253
|
+
|
246
254
|
@sync_compatible
|
247
255
|
async def add_deployment(
|
248
256
|
self,
|
@@ -550,13 +558,23 @@ class Runner:
|
|
550
558
|
)
|
551
559
|
|
552
560
|
async def execute_flow_run(
|
553
|
-
self,
|
554
|
-
|
561
|
+
self,
|
562
|
+
flow_run_id: UUID,
|
563
|
+
entrypoint: str | None = None,
|
564
|
+
command: str | None = None,
|
565
|
+
cwd: Path | None = None,
|
566
|
+
env: dict[str, str | None] | None = None,
|
567
|
+
task_status: anyio.abc.TaskStatus[int] | None = None,
|
568
|
+
stream_output: bool = True,
|
569
|
+
) -> anyio.abc.Process | None:
|
555
570
|
"""
|
556
571
|
Executes a single flow run with the given ID.
|
557
572
|
|
558
573
|
Execution will wait to monitor for cancellation requests. Exits once
|
559
574
|
the flow run process has exited.
|
575
|
+
|
576
|
+
Returns:
|
577
|
+
The flow run process.
|
560
578
|
"""
|
561
579
|
self.pause_on_shutdown = False
|
562
580
|
context = self if not self.started else asyncnullcontext()
|
@@ -570,17 +588,26 @@ class Runner:
|
|
570
588
|
self._submitting_flow_run_ids.add(flow_run_id)
|
571
589
|
flow_run = await self._client.read_flow_run(flow_run_id)
|
572
590
|
|
573
|
-
|
591
|
+
process: anyio.abc.Process = await self._runs_task_group.start(
|
574
592
|
partial(
|
575
593
|
self._submit_run_and_capture_errors,
|
576
594
|
flow_run=flow_run,
|
577
595
|
entrypoint=entrypoint,
|
596
|
+
command=command,
|
597
|
+
cwd=cwd,
|
598
|
+
env=env,
|
599
|
+
stream_output=stream_output,
|
578
600
|
),
|
579
601
|
)
|
580
|
-
|
581
|
-
|
582
|
-
|
583
|
-
|
602
|
+
if task_status:
|
603
|
+
task_status.started(process.pid)
|
604
|
+
|
605
|
+
async with self._flow_run_process_map_lock:
|
606
|
+
# Only add the process to the map if it is still running
|
607
|
+
if process.returncode is None:
|
608
|
+
self._flow_run_process_map[flow_run.id] = ProcessMapEntry(
|
609
|
+
pid=process.pid, flow_run=flow_run
|
610
|
+
)
|
584
611
|
|
585
612
|
# We want this loop to stop when the flow run process exits
|
586
613
|
# so we'll check if the flow run process is still alive on
|
@@ -609,6 +636,8 @@ class Runner:
|
|
609
636
|
)
|
610
637
|
)
|
611
638
|
|
639
|
+
return process
|
640
|
+
|
612
641
|
async def execute_bundle(self, bundle: SerializedBundle) -> None:
|
613
642
|
"""
|
614
643
|
Executes a bundle in a subprocess.
|
@@ -696,7 +725,7 @@ class Runner:
|
|
696
725
|
)
|
697
726
|
elif (
|
698
727
|
sys.platform == "win32"
|
699
|
-
and process.
|
728
|
+
and process.exitcode == STATUS_CONTROL_C_EXIT
|
700
729
|
):
|
701
730
|
level = logging.INFO
|
702
731
|
help_message = (
|
@@ -733,9 +762,13 @@ class Runner:
|
|
733
762
|
async def _run_process(
|
734
763
|
self,
|
735
764
|
flow_run: "FlowRun",
|
736
|
-
task_status:
|
737
|
-
entrypoint:
|
738
|
-
|
765
|
+
task_status: anyio.abc.TaskStatus[anyio.abc.Process] | None = None,
|
766
|
+
entrypoint: str | None = None,
|
767
|
+
command: str | None = None,
|
768
|
+
cwd: Path | None = None,
|
769
|
+
env: dict[str, str | None] | None = None,
|
770
|
+
stream_output: bool = True,
|
771
|
+
) -> anyio.abc.Process:
|
739
772
|
"""
|
740
773
|
Runs the given flow run in a subprocess.
|
741
774
|
|
@@ -747,7 +780,10 @@ class Runner:
|
|
747
780
|
task_status: anyio task status used to send a message to the caller
|
748
781
|
than the flow run process has started.
|
749
782
|
"""
|
750
|
-
command
|
783
|
+
if command is None:
|
784
|
+
runner_command = [get_sys_executable(), "-m", "prefect.engine"]
|
785
|
+
else:
|
786
|
+
runner_command = shlex.split(command)
|
751
787
|
|
752
788
|
flow_run_logger = self._get_flow_run_logger(flow_run)
|
753
789
|
|
@@ -760,7 +796,9 @@ class Runner:
|
|
760
796
|
|
761
797
|
flow_run_logger.info("Opening process...")
|
762
798
|
|
763
|
-
env
|
799
|
+
if env is None:
|
800
|
+
env = {}
|
801
|
+
env.update(get_current_settings().to_environment_variables(exclude_unset=True))
|
764
802
|
env.update(
|
765
803
|
{
|
766
804
|
**{
|
@@ -798,12 +836,12 @@ class Runner:
|
|
798
836
|
setattr(storage, "last_adhoc_pull", datetime.datetime.now())
|
799
837
|
|
800
838
|
process = await run_process(
|
801
|
-
command=
|
802
|
-
stream_output=
|
839
|
+
command=runner_command,
|
840
|
+
stream_output=stream_output,
|
803
841
|
task_status=task_status,
|
804
|
-
task_status_handler=
|
842
|
+
task_status_handler=lambda process: process,
|
805
843
|
env=env,
|
806
|
-
cwd=storage.destination if storage else
|
844
|
+
cwd=storage.destination if storage else cwd,
|
807
845
|
**kwargs,
|
808
846
|
)
|
809
847
|
|
@@ -852,7 +890,7 @@ class Runner:
|
|
852
890
|
f"Process for flow run {flow_run.name!r} exited cleanly."
|
853
891
|
)
|
854
892
|
|
855
|
-
return process
|
893
|
+
return process
|
856
894
|
|
857
895
|
async def _kill_process(
|
858
896
|
self,
|
@@ -1316,9 +1354,10 @@ class Runner:
|
|
1316
1354
|
)
|
1317
1355
|
|
1318
1356
|
if readiness_result and not isinstance(readiness_result, Exception):
|
1319
|
-
self.
|
1320
|
-
|
1321
|
-
|
1357
|
+
async with self._flow_run_process_map_lock:
|
1358
|
+
self._flow_run_process_map[flow_run.id] = ProcessMapEntry(
|
1359
|
+
pid=readiness_result, flow_run=flow_run
|
1360
|
+
)
|
1322
1361
|
# Heartbeats are opt-in and only emitted if a heartbeat frequency is set
|
1323
1362
|
if self.heartbeat_seconds is not None:
|
1324
1363
|
await self._emit_flow_run_heartbeat(flow_run)
|
@@ -1333,17 +1372,26 @@ class Runner:
|
|
1333
1372
|
async def _submit_run_and_capture_errors(
|
1334
1373
|
self,
|
1335
1374
|
flow_run: "FlowRun",
|
1336
|
-
task_status: anyio.abc.TaskStatus[
|
1337
|
-
entrypoint:
|
1375
|
+
task_status: anyio.abc.TaskStatus[anyio.abc.Process | Exception],
|
1376
|
+
entrypoint: str | None = None,
|
1377
|
+
command: str | None = None,
|
1378
|
+
cwd: Path | None = None,
|
1379
|
+
env: dict[str, str | None] | None = None,
|
1380
|
+
stream_output: bool = True,
|
1338
1381
|
) -> Union[Optional[int], Exception]:
|
1339
1382
|
run_logger = self._get_flow_run_logger(flow_run)
|
1340
1383
|
|
1341
1384
|
try:
|
1342
|
-
|
1385
|
+
process = await self._run_process(
|
1343
1386
|
flow_run=flow_run,
|
1344
1387
|
task_status=task_status,
|
1345
1388
|
entrypoint=entrypoint,
|
1389
|
+
command=command,
|
1390
|
+
cwd=cwd,
|
1391
|
+
env=env,
|
1392
|
+
stream_output=stream_output,
|
1346
1393
|
)
|
1394
|
+
status_code = process.returncode
|
1347
1395
|
except Exception as exc:
|
1348
1396
|
if task_status:
|
1349
1397
|
# This flow run was being submitted and did not start successfully
|
@@ -1363,7 +1411,9 @@ class Runner:
|
|
1363
1411
|
return exc
|
1364
1412
|
finally:
|
1365
1413
|
self._release_limit_slot(flow_run.id)
|
1366
|
-
|
1414
|
+
|
1415
|
+
async with self._flow_run_process_map_lock:
|
1416
|
+
self._flow_run_process_map.pop(flow_run.id, None)
|
1367
1417
|
|
1368
1418
|
if status_code != 0:
|
1369
1419
|
await self._propose_crashed_state(
|
@@ -1513,6 +1563,7 @@ class Runner:
|
|
1513
1563
|
"""
|
1514
1564
|
Run the hooks for a flow.
|
1515
1565
|
"""
|
1566
|
+
run_logger = self._get_flow_run_logger(flow_run)
|
1516
1567
|
if state.is_cancelling():
|
1517
1568
|
try:
|
1518
1569
|
if flow_run.id in self._flow_run_bundle_map:
|
@@ -1520,6 +1571,7 @@ class Runner:
|
|
1520
1571
|
self._flow_run_bundle_map[flow_run.id]
|
1521
1572
|
)
|
1522
1573
|
else:
|
1574
|
+
run_logger.info("Loading flow to check for on_cancellation hooks")
|
1523
1575
|
flow = await load_flow_from_flow_run(
|
1524
1576
|
flow_run, storage_base_path=str(self._tmp_dir)
|
1525
1577
|
)
|
@@ -1529,7 +1581,7 @@ class Runner:
|
|
1529
1581
|
except ObjectNotFound:
|
1530
1582
|
run_logger = self._get_flow_run_logger(flow_run)
|
1531
1583
|
run_logger.warning(
|
1532
|
-
f"Runner
|
1584
|
+
f"Runner failed to retrieve flow to execute on_cancellation hooks for flow run {flow_run.id!r}."
|
1533
1585
|
)
|
1534
1586
|
|
1535
1587
|
async def _run_on_crashed_hooks(
|
@@ -1540,16 +1592,25 @@ class Runner:
|
|
1540
1592
|
"""
|
1541
1593
|
Run the hooks for a flow.
|
1542
1594
|
"""
|
1595
|
+
run_logger = self._get_flow_run_logger(flow_run)
|
1543
1596
|
if state.is_crashed():
|
1544
|
-
|
1545
|
-
|
1546
|
-
|
1547
|
-
|
1548
|
-
|
1549
|
-
|
1550
|
-
|
1597
|
+
try:
|
1598
|
+
if flow_run.id in self._flow_run_bundle_map:
|
1599
|
+
flow = extract_flow_from_bundle(
|
1600
|
+
self._flow_run_bundle_map[flow_run.id]
|
1601
|
+
)
|
1602
|
+
else:
|
1603
|
+
run_logger.info("Loading flow to check for on_crashed hooks")
|
1604
|
+
flow = await load_flow_from_flow_run(
|
1605
|
+
flow_run, storage_base_path=str(self._tmp_dir)
|
1606
|
+
)
|
1607
|
+
hooks = flow.on_crashed_hooks or []
|
1551
1608
|
|
1552
|
-
|
1609
|
+
await _run_hooks(hooks, flow_run, flow, state)
|
1610
|
+
except ObjectNotFound:
|
1611
|
+
run_logger.warning(
|
1612
|
+
f"Runner failed to retrieve flow to execute on_crashed hooks for flow run {flow_run.id!r}."
|
1613
|
+
)
|
1553
1614
|
|
1554
1615
|
async def __aenter__(self) -> Self:
|
1555
1616
|
self._logger.debug("Starting runner...")
|
prefect/server/api/artifacts.py
CHANGED
@@ -26,6 +26,11 @@ async def create_artifact(
|
|
26
26
|
response: Response,
|
27
27
|
db: PrefectDBInterface = Depends(provide_database_interface),
|
28
28
|
) -> core.Artifact:
|
29
|
+
"""
|
30
|
+
Create an artifact.
|
31
|
+
|
32
|
+
For more information, see https://docs.prefect.io/v3/develop/artifacts.
|
33
|
+
"""
|
29
34
|
artifact = core.Artifact(**artifact.model_dump())
|
30
35
|
|
31
36
|
right_now = now("UTC")
|
@@ -39,6 +39,11 @@ async def create_automation(
|
|
39
39
|
automation: AutomationCreate,
|
40
40
|
db: PrefectDBInterface = Depends(provide_database_interface),
|
41
41
|
) -> Automation:
|
42
|
+
"""
|
43
|
+
Create an automation.
|
44
|
+
|
45
|
+
For more information, see https://docs.prefect.io/v3/automate.
|
46
|
+
"""
|
42
47
|
# reset any client-provided IDs on the provided triggers
|
43
48
|
automation.trigger.reset_ids()
|
44
49
|
|
@@ -19,6 +19,11 @@ router: PrefectRouter = PrefectRouter(
|
|
19
19
|
async def read_available_block_capabilities(
|
20
20
|
db: PrefectDBInterface = Depends(provide_database_interface),
|
21
21
|
) -> List[str]:
|
22
|
+
"""
|
23
|
+
Get available block capabilities.
|
24
|
+
|
25
|
+
For more information, see https://docs.prefect.io/v3/develop/blocks.
|
26
|
+
"""
|
22
27
|
async with db.session_context() as session:
|
23
28
|
return await models.block_schemas.read_available_block_capabilities(
|
24
29
|
session=session
|
@@ -24,6 +24,8 @@ async def create_block_document(
|
|
24
24
|
) -> schemas.core.BlockDocument:
|
25
25
|
"""
|
26
26
|
Create a new block document.
|
27
|
+
|
28
|
+
For more information, see https://docs.prefect.io/v3/develop/blocks.
|
27
29
|
"""
|
28
30
|
async with db.session_context(begin_transaction=True) as session:
|
29
31
|
if block_document.name is not None:
|
@@ -30,6 +30,11 @@ async def create_block_schema(
|
|
30
30
|
response: Response,
|
31
31
|
db: PrefectDBInterface = Depends(provide_database_interface),
|
32
32
|
) -> schemas.core.BlockSchema:
|
33
|
+
"""
|
34
|
+
Create a block schema.
|
35
|
+
|
36
|
+
For more information, see https://docs.prefect.io/v3/develop/blocks.
|
37
|
+
"""
|
33
38
|
from prefect.blocks.core import Block
|
34
39
|
|
35
40
|
async with db.session_context(begin_transaction=True) as session:
|
@@ -19,7 +19,9 @@ async def create_block_type(
|
|
19
19
|
db: PrefectDBInterface = Depends(provide_database_interface),
|
20
20
|
) -> schemas.core.BlockType:
|
21
21
|
"""
|
22
|
-
Create a new block type
|
22
|
+
Create a new block type.
|
23
|
+
|
24
|
+
For more information, see https://docs.prefect.io/v3/develop/blocks.
|
23
25
|
"""
|
24
26
|
# API-created blocks cannot start with the word "Prefect"
|
25
27
|
# as it is reserved for system use
|
@@ -28,6 +28,11 @@ async def create_concurrency_limit(
|
|
28
28
|
response: Response,
|
29
29
|
db: PrefectDBInterface = Depends(provide_database_interface),
|
30
30
|
) -> schemas.core.ConcurrencyLimit:
|
31
|
+
"""
|
32
|
+
Create a task run concurrency limit.
|
33
|
+
|
34
|
+
For more information, see https://docs.prefect.io/v3/develop/task-run-limits.
|
35
|
+
"""
|
31
36
|
# hydrate the input model into a full model
|
32
37
|
concurrency_limit_model = schemas.core.ConcurrencyLimit(
|
33
38
|
**concurrency_limit.model_dump()
|
@@ -21,6 +21,11 @@ async def create_concurrency_limit_v2(
|
|
21
21
|
concurrency_limit: actions.ConcurrencyLimitV2Create,
|
22
22
|
db: PrefectDBInterface = Depends(provide_database_interface),
|
23
23
|
) -> schemas.core.ConcurrencyLimitV2:
|
24
|
+
"""
|
25
|
+
Create a task run concurrency limit.
|
26
|
+
|
27
|
+
For more information, see https://docs.prefect.io/v3/develop/global-concurrency-limits.
|
28
|
+
"""
|
24
29
|
async with db.session_context(begin_transaction=True) as session:
|
25
30
|
model = await models.concurrency_limits_v2.create_concurrency_limit(
|
26
31
|
session=session, concurrency_limit=concurrency_limit
|
@@ -68,6 +68,8 @@ async def create_deployment(
|
|
68
68
|
|
69
69
|
If the deployment has an active schedule, flow runs will be scheduled.
|
70
70
|
When upserting, any scheduled runs from the existing deployment will be deleted.
|
71
|
+
|
72
|
+
For more information, see https://docs.prefect.io/v3/deploy.
|
71
73
|
"""
|
72
74
|
|
73
75
|
data = deployment.model_dump(exclude_unset=True)
|
prefect/server/api/events.py
CHANGED
@@ -48,7 +48,11 @@ async def create_events(
|
|
48
48
|
events: List[Event],
|
49
49
|
ephemeral_request: bool = Depends(is_ephemeral_request),
|
50
50
|
) -> None:
|
51
|
-
"""
|
51
|
+
"""
|
52
|
+
Record a batch of Events.
|
53
|
+
|
54
|
+
For more information, see https://docs.prefect.io/v3/automate/events/events.
|
55
|
+
"""
|
52
56
|
if ephemeral_request:
|
53
57
|
await EventsPipeline().process_events(events)
|
54
58
|
else:
|
@@ -25,6 +25,8 @@ async def create_flow_run_notification_policy(
|
|
25
25
|
) -> schemas.core.FlowRunNotificationPolicy:
|
26
26
|
"""
|
27
27
|
Creates a new flow run notification policy.
|
28
|
+
|
29
|
+
For more information, see https://docs.prefect.io/v3/automate/events/automations-triggers#sending-notifications-with-automations.
|
28
30
|
"""
|
29
31
|
async with db.session_context(begin_transaction=True) as session:
|
30
32
|
return await models.flow_run_notification_policies.create_flow_run_notification_policy(
|
@@ -26,6 +26,8 @@ async def read_flow_run_state(
|
|
26
26
|
) -> schemas.states.State:
|
27
27
|
"""
|
28
28
|
Get a flow run state by id.
|
29
|
+
|
30
|
+
For more information, see https://docs.prefect.io/v3/develop/write-flows#final-state-determination.
|
29
31
|
"""
|
30
32
|
async with db.session_context() as session:
|
31
33
|
flow_run_state = await models.flow_run_states.read_flow_run_state(
|
prefect/server/api/flow_runs.py
CHANGED
@@ -74,6 +74,8 @@ async def create_flow_run(
|
|
74
74
|
idempotency key already exists, the existing flow run will be returned.
|
75
75
|
|
76
76
|
If no state is provided, the flow run will be created in a PENDING state.
|
77
|
+
|
78
|
+
For more information, see https://docs.prefect.io/v3/develop/write-flows.
|
77
79
|
"""
|
78
80
|
# hydrate the input model into a full flow run / state model
|
79
81
|
flow_run_object = schemas.core.FlowRun(
|
prefect/server/api/flows.py
CHANGED
@@ -27,6 +27,8 @@ async def create_flow(
|
|
27
27
|
) -> schemas.core.Flow:
|
28
28
|
"""Gracefully creates a new flow from the provided schema. If a flow with the
|
29
29
|
same name already exists, the existing flow is returned.
|
30
|
+
|
31
|
+
For more information, see https://docs.prefect.io/v3/develop/write-flows.
|
30
32
|
"""
|
31
33
|
# hydrate the input model into a full flow model
|
32
34
|
flow = schemas.core.Flow(**flow.model_dump())
|
prefect/server/api/logs.py
CHANGED
@@ -20,7 +20,11 @@ async def create_logs(
|
|
20
20
|
logs: List[schemas.actions.LogCreate],
|
21
21
|
db: PrefectDBInterface = Depends(provide_database_interface),
|
22
22
|
) -> None:
|
23
|
-
"""
|
23
|
+
"""
|
24
|
+
Create new logs from the provided schema.
|
25
|
+
|
26
|
+
For more information, see https://docs.prefect.io/v3/develop/logging.
|
27
|
+
"""
|
24
28
|
for batch in models.logs.split_logs_into_batches(logs):
|
25
29
|
async with db.session_context(begin_transaction=True) as session:
|
26
30
|
await models.logs.create_logs(session=session, logs=batch)
|
@@ -26,6 +26,8 @@ async def read_task_run_state(
|
|
26
26
|
) -> schemas.states.State:
|
27
27
|
"""
|
28
28
|
Get a task run state by id.
|
29
|
+
|
30
|
+
For more information, see https://docs.prefect.io/v3/develop/write-tasks.
|
29
31
|
"""
|
30
32
|
async with db.session_context() as session:
|
31
33
|
task_run_state = await models.task_run_states.read_task_run_state(
|
prefect/server/api/task_runs.py
CHANGED
@@ -57,6 +57,8 @@ async def create_task_run(
|
|
57
57
|
run will be returned.
|
58
58
|
|
59
59
|
If no state is provided, the task run will be created in a PENDING state.
|
60
|
+
|
61
|
+
For more information, see https://docs.prefect.io/v3/develop/write-tasks.
|
60
62
|
"""
|
61
63
|
# hydrate the input model into a full task run / state model
|
62
64
|
task_run_dict = task_run.model_dump()
|
@@ -20,7 +20,11 @@ async def read_task_workers(
|
|
20
20
|
default=None, description="The task worker filter", embed=True
|
21
21
|
),
|
22
22
|
) -> List[TaskWorkerResponse]:
|
23
|
-
"""
|
23
|
+
"""
|
24
|
+
Read active task workers. Optionally filter by task keys.
|
25
|
+
|
26
|
+
For more information, see https://docs.prefect.io/v3/develop/deferred-tasks.
|
27
|
+
"""
|
24
28
|
|
25
29
|
if task_worker_filter and task_worker_filter.task_keys:
|
26
30
|
return await models.task_workers.get_workers_for_task_keys(
|
prefect/server/api/variables.py
CHANGED
@@ -57,6 +57,11 @@ async def create_variable(
|
|
57
57
|
variable: actions.VariableCreate,
|
58
58
|
db: PrefectDBInterface = Depends(provide_database_interface),
|
59
59
|
) -> core.Variable:
|
60
|
+
"""
|
61
|
+
Create a variable.
|
62
|
+
|
63
|
+
For more information, see https://docs.prefect.io/v3/develop/variables.
|
64
|
+
"""
|
60
65
|
async with db.session_context(begin_transaction=True) as session:
|
61
66
|
try:
|
62
67
|
model = await models.variables.create_variable(
|
prefect/server/api/workers.py
CHANGED
@@ -161,6 +161,8 @@ async def create_work_pool(
|
|
161
161
|
"""
|
162
162
|
Creates a new work pool. If a work pool with the same
|
163
163
|
name already exists, an error will be raised.
|
164
|
+
|
165
|
+
For more information, see https://docs.prefect.io/v3/deploy/infrastructure-concepts/work-pools.
|
164
166
|
"""
|
165
167
|
if work_pool.name.lower().startswith("prefect"):
|
166
168
|
raise HTTPException(
|
@@ -408,6 +410,8 @@ async def create_work_queue(
|
|
408
410
|
"""
|
409
411
|
Creates a new work pool queue. If a work pool queue with the same
|
410
412
|
name already exists, an error will be raised.
|
413
|
+
|
414
|
+
For more information, see https://docs.prefect.io/v3/deploy/infrastructure-concepts/work-pools#work-queues.
|
411
415
|
"""
|
412
416
|
|
413
417
|
try:
|
prefect/settings/profiles.py
CHANGED
@@ -1,3 +1,5 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
1
3
|
import inspect
|
2
4
|
import warnings
|
3
5
|
from pathlib import Path
|
@@ -9,7 +11,6 @@ from typing import (
|
|
9
11
|
Iterable,
|
10
12
|
Iterator,
|
11
13
|
Optional,
|
12
|
-
Set,
|
13
14
|
Union,
|
14
15
|
)
|
15
16
|
|
@@ -32,8 +33,8 @@ from prefect.settings.models.root import Settings
|
|
32
33
|
|
33
34
|
|
34
35
|
def _cast_settings(
|
35
|
-
settings:
|
36
|
-
) ->
|
36
|
+
settings: dict[str | Setting, Any] | Any,
|
37
|
+
) -> dict[Setting, Any]:
|
37
38
|
"""For backwards compatibility, allow either Settings objects as keys or string references to settings."""
|
38
39
|
if not isinstance(settings, dict):
|
39
40
|
raise ValueError("Settings must be a dictionary.")
|
@@ -63,7 +64,7 @@ class Profile(BaseModel):
|
|
63
64
|
)
|
64
65
|
|
65
66
|
name: str
|
66
|
-
settings: Annotated[
|
67
|
+
settings: Annotated[dict[Setting, Any], BeforeValidator(_cast_settings)] = Field(
|
67
68
|
default_factory=dict
|
68
69
|
)
|
69
70
|
source: Optional[Path] = None
|
@@ -114,7 +115,7 @@ class ProfilesCollection:
|
|
114
115
|
self.active_name = active
|
115
116
|
|
116
117
|
@property
|
117
|
-
def names(self) ->
|
118
|
+
def names(self) -> set[str]:
|
118
119
|
"""
|
119
120
|
Return a set of profile names in this collection.
|
120
121
|
"""
|
prefect/task_worker.py
CHANGED
@@ -18,7 +18,7 @@ import uvicorn
|
|
18
18
|
from exceptiongroup import BaseExceptionGroup # novermin
|
19
19
|
from fastapi import FastAPI
|
20
20
|
from typing_extensions import ParamSpec, Self, TypeVar
|
21
|
-
from websockets.exceptions import
|
21
|
+
from websockets.exceptions import InvalidStatus
|
22
22
|
|
23
23
|
from prefect import Task
|
24
24
|
from prefect._internal.concurrency.api import create_call, from_sync
|
@@ -181,8 +181,8 @@ class TaskWorker:
|
|
181
181
|
logger.info("Starting task worker...")
|
182
182
|
try:
|
183
183
|
await self._subscribe_to_task_scheduling()
|
184
|
-
except
|
185
|
-
if exc.status_code == 403:
|
184
|
+
except InvalidStatus as exc:
|
185
|
+
if exc.response.status_code == 403:
|
186
186
|
logger.error(
|
187
187
|
"403: Could not establish a connection to the `/task_runs/subscriptions/scheduled`"
|
188
188
|
f" endpoint found at:\n\n {PREFECT_API_URL.value()}"
|
@@ -95,7 +95,7 @@ def _setup_meter_provider(
|
|
95
95
|
resource: Resource, headers: dict[str, str], telemetry_url: str
|
96
96
|
) -> MeterProvider:
|
97
97
|
metric_reader = PeriodicExportingMetricReader(
|
98
|
-
OTLPMetricExporter(
|
98
|
+
OTLPMetricExporter( # pyright: ignore[reportArgumentType] `preferred_temporality` and `preferred_aggregation` default to `None`, but otel's typing doesn't include it
|
99
99
|
endpoint=_url_join(telemetry_url, "v1/metrics"),
|
100
100
|
headers=headers,
|
101
101
|
)
|
@@ -109,7 +109,7 @@ def _setup_meter_provider(
|
|
109
109
|
def _setup_logger_provider(
|
110
110
|
resource: Resource, headers: dict[str, str], telemetry_url: str
|
111
111
|
) -> LoggerProvider:
|
112
|
-
logger_provider = LoggerProvider(resource=resource)
|
112
|
+
logger_provider = LoggerProvider(resource=resource) # pyright: ignore[reportArgumentType] `multi_log_record_processor` defaults to `None` but otel's typing doesn't include it
|
113
113
|
queueing_log_exporter = QueueingLogExporter.instance(
|
114
114
|
_url_join(telemetry_url, "v1/logs"), tuple(headers.items())
|
115
115
|
)
|