prefect-client 2.18.0__py3-none-any.whl → 2.18.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/_internal/schemas/fields.py +31 -12
- prefect/automations.py +162 -0
- prefect/blocks/core.py +1 -1
- prefect/blocks/notifications.py +2 -2
- prefect/blocks/system.py +2 -3
- prefect/client/orchestration.py +309 -30
- prefect/client/schemas/objects.py +11 -8
- prefect/client/schemas/sorting.py +9 -0
- prefect/client/utilities.py +25 -3
- prefect/concurrency/asyncio.py +11 -5
- prefect/concurrency/events.py +3 -3
- prefect/concurrency/services.py +1 -1
- prefect/concurrency/sync.py +9 -5
- prefect/deployments/deployments.py +27 -18
- prefect/deployments/runner.py +34 -26
- prefect/engine.py +3 -1
- prefect/events/actions.py +2 -1
- prefect/events/cli/automations.py +207 -46
- prefect/events/clients.py +53 -20
- prefect/events/filters.py +31 -4
- prefect/events/instrument.py +40 -40
- prefect/events/related.py +2 -1
- prefect/events/schemas/automations.py +52 -7
- prefect/events/schemas/deployment_triggers.py +16 -228
- prefect/events/schemas/events.py +18 -11
- prefect/events/schemas/labelling.py +1 -1
- prefect/events/utilities.py +1 -1
- prefect/events/worker.py +10 -7
- prefect/flows.py +42 -24
- prefect/input/actions.py +9 -9
- prefect/input/run_input.py +51 -37
- prefect/new_flow_engine.py +444 -0
- prefect/new_task_engine.py +488 -0
- prefect/results.py +3 -2
- prefect/runner/runner.py +3 -2
- prefect/server/api/collections_data/views/aggregate-worker-metadata.json +45 -4
- prefect/settings.py +47 -0
- prefect/states.py +25 -19
- prefect/tasks.py +146 -19
- prefect/utilities/asyncutils.py +41 -0
- prefect/utilities/engine.py +6 -4
- prefect/utilities/schema_tools/validation.py +1 -1
- prefect/workers/process.py +2 -1
- {prefect_client-2.18.0.dist-info → prefect_client-2.18.2.dist-info}/METADATA +1 -1
- {prefect_client-2.18.0.dist-info → prefect_client-2.18.2.dist-info}/RECORD +48 -46
- prefect/concurrency/common.py +0 -0
- {prefect_client-2.18.0.dist-info → prefect_client-2.18.2.dist-info}/LICENSE +0 -0
- {prefect_client-2.18.0.dist-info → prefect_client-2.18.2.dist-info}/WHEEL +0 -0
- {prefect_client-2.18.0.dist-info → prefect_client-2.18.2.dist-info}/top_level.txt +0 -0
prefect/settings.py
CHANGED
@@ -1214,6 +1214,36 @@ PREFECT_API_SERVICES_CANCELLATION_CLEANUP_LOOP_SECONDS = Setting(
|
|
1214
1214
|
this often. Defaults to `20`.
|
1215
1215
|
"""
|
1216
1216
|
|
1217
|
+
PREFECT_API_SERVICES_FOREMAN_ENABLED = Setting(bool, default=True)
|
1218
|
+
"""Whether or not to start the Foreman service in the server application."""
|
1219
|
+
|
1220
|
+
PREFECT_API_SERVICES_FOREMAN_LOOP_SECONDS = Setting(float, default=15)
|
1221
|
+
"""The number of seconds to wait between each iteration of the Foreman loop which checks
|
1222
|
+
for offline workers and updates work pool status."""
|
1223
|
+
|
1224
|
+
|
1225
|
+
PREFECT_API_SERVICES_FOREMAN_INACTIVITY_HEARTBEAT_MULTIPLE = Setting(int, default=3)
|
1226
|
+
"The number of heartbeats that must be missed before a worker is marked as offline."
|
1227
|
+
|
1228
|
+
PREFECT_API_SERVICES_FOREMAN_FALLBACK_HEARTBEAT_INTERVAL_SECONDS = Setting(
|
1229
|
+
int, default=30
|
1230
|
+
)
|
1231
|
+
"""The number of seconds to use for online/offline evaluation if a worker's heartbeat
|
1232
|
+
interval is not set."""
|
1233
|
+
|
1234
|
+
PREFECT_API_SERVICES_FOREMAN_DEPLOYMENT_LAST_POLLED_TIMEOUT_SECONDS = Setting(
|
1235
|
+
int, default=60
|
1236
|
+
)
|
1237
|
+
"""The number of seconds before a deployment is marked as not ready if it has not been
|
1238
|
+
polled."""
|
1239
|
+
|
1240
|
+
PREFECT_API_SERVICES_FOREMAN_WORK_QUEUE_LAST_POLLED_TIMEOUT_SECONDS = Setting(
|
1241
|
+
int, default=60
|
1242
|
+
)
|
1243
|
+
"""The number of seconds before a work queue is marked as not ready if it has not been
|
1244
|
+
polled."""
|
1245
|
+
|
1246
|
+
|
1217
1247
|
PREFECT_API_DEFAULT_LIMIT = Setting(
|
1218
1248
|
int,
|
1219
1249
|
default=200,
|
@@ -1579,6 +1609,11 @@ PREFECT_EXPERIMENTAL_ENABLE_WORK_QUEUE_STATUS = Setting(bool, default=True)
|
|
1579
1609
|
Whether or not to enable experimental work queue status in-place of work queue health.
|
1580
1610
|
"""
|
1581
1611
|
|
1612
|
+
PREFECT_EXPERIMENTAL_ENABLE_NEW_ENGINE = Setting(bool, default=False)
|
1613
|
+
"""
|
1614
|
+
Whether or not to enable experimental new engine.
|
1615
|
+
"""
|
1616
|
+
|
1582
1617
|
|
1583
1618
|
# Defaults -----------------------------------------------------------------------------
|
1584
1619
|
|
@@ -1693,11 +1728,23 @@ PREFECT_API_SERVICES_EVENT_PERSISTER_FLUSH_INTERVAL = Setting(float, default=5,
|
|
1693
1728
|
The maximum number of seconds between flushes of the event persister.
|
1694
1729
|
"""
|
1695
1730
|
|
1731
|
+
PREFECT_EVENTS_RETENTION_PERIOD = Setting(timedelta, default=timedelta(days=7))
|
1732
|
+
"""
|
1733
|
+
The amount of time to retain events in the database.
|
1734
|
+
"""
|
1735
|
+
|
1696
1736
|
PREFECT_API_EVENTS_STREAM_OUT_ENABLED = Setting(bool, default=True)
|
1697
1737
|
"""
|
1698
1738
|
Whether or not to allow streaming events out of via websockets.
|
1699
1739
|
"""
|
1700
1740
|
|
1741
|
+
PREFECT_API_EVENTS_RELATED_RESOURCE_CACHE_TTL = Setting(
|
1742
|
+
timedelta, default=timedelta(minutes=5)
|
1743
|
+
)
|
1744
|
+
"""
|
1745
|
+
How long to cache related resource data for emitting server-side vents
|
1746
|
+
"""
|
1747
|
+
|
1701
1748
|
# Deprecated settings ------------------------------------------------------------------
|
1702
1749
|
|
1703
1750
|
|
prefect/states.py
CHANGED
@@ -486,8 +486,10 @@ class StateGroup:
|
|
486
486
|
|
487
487
|
|
488
488
|
def Scheduled(
|
489
|
-
cls: Type[State] = State,
|
490
|
-
|
489
|
+
cls: Type[State[R]] = State,
|
490
|
+
scheduled_time: Optional[datetime.datetime] = None,
|
491
|
+
**kwargs: Any,
|
492
|
+
) -> State[R]:
|
491
493
|
"""Convenience function for creating `Scheduled` states.
|
492
494
|
|
493
495
|
Returns:
|
@@ -503,7 +505,7 @@ def Scheduled(
|
|
503
505
|
return cls(type=StateType.SCHEDULED, state_details=state_details, **kwargs)
|
504
506
|
|
505
507
|
|
506
|
-
def Completed(cls: Type[State] = State, **kwargs) -> State:
|
508
|
+
def Completed(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
507
509
|
"""Convenience function for creating `Completed` states.
|
508
510
|
|
509
511
|
Returns:
|
@@ -512,7 +514,7 @@ def Completed(cls: Type[State] = State, **kwargs) -> State:
|
|
512
514
|
return cls(type=StateType.COMPLETED, **kwargs)
|
513
515
|
|
514
516
|
|
515
|
-
def Running(cls: Type[State] = State, **kwargs) -> State:
|
517
|
+
def Running(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
516
518
|
"""Convenience function for creating `Running` states.
|
517
519
|
|
518
520
|
Returns:
|
@@ -521,7 +523,7 @@ def Running(cls: Type[State] = State, **kwargs) -> State:
|
|
521
523
|
return cls(type=StateType.RUNNING, **kwargs)
|
522
524
|
|
523
525
|
|
524
|
-
def Failed(cls: Type[State] = State, **kwargs) -> State:
|
526
|
+
def Failed(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
525
527
|
"""Convenience function for creating `Failed` states.
|
526
528
|
|
527
529
|
Returns:
|
@@ -530,7 +532,7 @@ def Failed(cls: Type[State] = State, **kwargs) -> State:
|
|
530
532
|
return cls(type=StateType.FAILED, **kwargs)
|
531
533
|
|
532
534
|
|
533
|
-
def Crashed(cls: Type[State] = State, **kwargs) -> State:
|
535
|
+
def Crashed(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
534
536
|
"""Convenience function for creating `Crashed` states.
|
535
537
|
|
536
538
|
Returns:
|
@@ -539,7 +541,7 @@ def Crashed(cls: Type[State] = State, **kwargs) -> State:
|
|
539
541
|
return cls(type=StateType.CRASHED, **kwargs)
|
540
542
|
|
541
543
|
|
542
|
-
def Cancelling(cls: Type[State] = State, **kwargs) -> State:
|
544
|
+
def Cancelling(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
543
545
|
"""Convenience function for creating `Cancelling` states.
|
544
546
|
|
545
547
|
Returns:
|
@@ -548,7 +550,7 @@ def Cancelling(cls: Type[State] = State, **kwargs) -> State:
|
|
548
550
|
return cls(type=StateType.CANCELLING, **kwargs)
|
549
551
|
|
550
552
|
|
551
|
-
def Cancelled(cls: Type[State] = State, **kwargs) -> State:
|
553
|
+
def Cancelled(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
552
554
|
"""Convenience function for creating `Cancelled` states.
|
553
555
|
|
554
556
|
Returns:
|
@@ -557,7 +559,7 @@ def Cancelled(cls: Type[State] = State, **kwargs) -> State:
|
|
557
559
|
return cls(type=StateType.CANCELLED, **kwargs)
|
558
560
|
|
559
561
|
|
560
|
-
def Pending(cls: Type[State] = State, **kwargs) -> State:
|
562
|
+
def Pending(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
561
563
|
"""Convenience function for creating `Pending` states.
|
562
564
|
|
563
565
|
Returns:
|
@@ -567,13 +569,13 @@ def Pending(cls: Type[State] = State, **kwargs) -> State:
|
|
567
569
|
|
568
570
|
|
569
571
|
def Paused(
|
570
|
-
cls: Type[State] = State,
|
572
|
+
cls: Type[State[R]] = State,
|
571
573
|
timeout_seconds: Optional[int] = None,
|
572
574
|
pause_expiration_time: Optional[datetime.datetime] = None,
|
573
575
|
reschedule: bool = False,
|
574
576
|
pause_key: Optional[str] = None,
|
575
|
-
**kwargs,
|
576
|
-
) -> State:
|
577
|
+
**kwargs: Any,
|
578
|
+
) -> State[R]:
|
577
579
|
"""Convenience function for creating `Paused` states.
|
578
580
|
|
579
581
|
Returns:
|
@@ -603,11 +605,11 @@ def Paused(
|
|
603
605
|
|
604
606
|
|
605
607
|
def Suspended(
|
606
|
-
cls: Type[State] = State,
|
608
|
+
cls: Type[State[R]] = State,
|
607
609
|
timeout_seconds: Optional[int] = None,
|
608
610
|
pause_expiration_time: Optional[datetime.datetime] = None,
|
609
611
|
pause_key: Optional[str] = None,
|
610
|
-
**kwargs,
|
612
|
+
**kwargs: Any,
|
611
613
|
):
|
612
614
|
"""Convenience function for creating `Suspended` states.
|
613
615
|
|
@@ -626,8 +628,10 @@ def Suspended(
|
|
626
628
|
|
627
629
|
|
628
630
|
def AwaitingRetry(
|
629
|
-
cls: Type[State] = State,
|
630
|
-
|
631
|
+
cls: Type[State[R]] = State,
|
632
|
+
scheduled_time: Optional[datetime.datetime] = None,
|
633
|
+
**kwargs: Any,
|
634
|
+
) -> State[R]:
|
631
635
|
"""Convenience function for creating `AwaitingRetry` states.
|
632
636
|
|
633
637
|
Returns:
|
@@ -638,7 +642,7 @@ def AwaitingRetry(
|
|
638
642
|
)
|
639
643
|
|
640
644
|
|
641
|
-
def Retrying(cls: Type[State] = State, **kwargs) -> State:
|
645
|
+
def Retrying(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
642
646
|
"""Convenience function for creating `Retrying` states.
|
643
647
|
|
644
648
|
Returns:
|
@@ -648,8 +652,10 @@ def Retrying(cls: Type[State] = State, **kwargs) -> State:
|
|
648
652
|
|
649
653
|
|
650
654
|
def Late(
|
651
|
-
cls: Type[State] = State,
|
652
|
-
|
655
|
+
cls: Type[State[R]] = State,
|
656
|
+
scheduled_time: Optional[datetime.datetime] = None,
|
657
|
+
**kwargs: Any,
|
658
|
+
) -> State[R]:
|
653
659
|
"""Convenience function for creating `Late` states.
|
654
660
|
|
655
661
|
Returns:
|
prefect/tasks.py
CHANGED
@@ -22,25 +22,30 @@ from typing import (
|
|
22
22
|
List,
|
23
23
|
NoReturn,
|
24
24
|
Optional,
|
25
|
+
Set,
|
25
26
|
TypeVar,
|
26
27
|
Union,
|
27
28
|
cast,
|
28
29
|
overload,
|
29
30
|
)
|
31
|
+
from uuid import uuid4
|
30
32
|
|
31
33
|
from typing_extensions import Literal, ParamSpec
|
32
34
|
|
33
35
|
from prefect._internal.concurrency.api import create_call, from_async, from_sync
|
34
36
|
from prefect.client.schemas import TaskRun
|
35
|
-
from prefect.
|
37
|
+
from prefect.client.schemas.objects import TaskRunInput
|
38
|
+
from prefect.context import FlowRunContext, PrefectObjectRegistry, TagsContext
|
36
39
|
from prefect.futures import PrefectFuture
|
40
|
+
from prefect.logging.loggers import get_logger, get_run_logger
|
37
41
|
from prefect.results import ResultSerializer, ResultStorage
|
38
42
|
from prefect.settings import (
|
43
|
+
PREFECT_EXPERIMENTAL_ENABLE_NEW_ENGINE,
|
39
44
|
PREFECT_EXPERIMENTAL_ENABLE_TASK_SCHEDULING,
|
40
45
|
PREFECT_TASK_DEFAULT_RETRIES,
|
41
46
|
PREFECT_TASK_DEFAULT_RETRY_DELAY_SECONDS,
|
42
47
|
)
|
43
|
-
from prefect.states import State
|
48
|
+
from prefect.states import Pending, State
|
44
49
|
from prefect.task_runners import BaseTaskRunner
|
45
50
|
from prefect.utilities.annotations import NotSet
|
46
51
|
from prefect.utilities.asyncutils import Async, Sync
|
@@ -64,6 +69,8 @@ T = TypeVar("T") # Generic type var for capturing the inner return type of asyn
|
|
64
69
|
R = TypeVar("R") # The return type of the user's function
|
65
70
|
P = ParamSpec("P") # The parameters of the task
|
66
71
|
|
72
|
+
logger = get_logger("tasks")
|
73
|
+
|
67
74
|
|
68
75
|
def task_input_hash(
|
69
76
|
context: "TaskRunContext", arguments: Dict[str, Any]
|
@@ -189,14 +196,14 @@ class Task(Generic[P, R]):
|
|
189
196
|
def __init__(
|
190
197
|
self,
|
191
198
|
fn: Callable[P, R],
|
192
|
-
name: str = None,
|
193
|
-
description: str = None,
|
194
|
-
tags: Iterable[str] = None,
|
195
|
-
version: str = None,
|
196
|
-
cache_key_fn:
|
197
|
-
["TaskRunContext", Dict[str, Any]], Optional[str]
|
199
|
+
name: Optional[str] = None,
|
200
|
+
description: Optional[str] = None,
|
201
|
+
tags: Optional[Iterable[str]] = None,
|
202
|
+
version: Optional[str] = None,
|
203
|
+
cache_key_fn: Optional[
|
204
|
+
Callable[["TaskRunContext", Dict[str, Any]], Optional[str]]
|
198
205
|
] = None,
|
199
|
-
cache_expiration: datetime.timedelta = None,
|
206
|
+
cache_expiration: Optional[datetime.timedelta] = None,
|
200
207
|
task_run_name: Optional[Union[Callable[[], str], str]] = None,
|
201
208
|
retries: Optional[int] = None,
|
202
209
|
retry_delay_seconds: Optional[
|
@@ -213,7 +220,7 @@ class Task(Generic[P, R]):
|
|
213
220
|
result_serializer: Optional[ResultSerializer] = None,
|
214
221
|
result_storage_key: Optional[str] = None,
|
215
222
|
cache_result_in_memory: bool = True,
|
216
|
-
timeout_seconds: Union[int, float] = None,
|
223
|
+
timeout_seconds: Union[int, float, None] = None,
|
217
224
|
log_prints: Optional[bool] = False,
|
218
225
|
refresh_cache: Optional[bool] = None,
|
219
226
|
on_completion: Optional[List[Callable[["Task", TaskRun, State], None]]] = None,
|
@@ -325,6 +332,7 @@ class Task(Generic[P, R]):
|
|
325
332
|
self.result_serializer = result_serializer
|
326
333
|
self.result_storage_key = result_storage_key
|
327
334
|
self.cache_result_in_memory = cache_result_in_memory
|
335
|
+
|
328
336
|
self.timeout_seconds = float(timeout_seconds) if timeout_seconds else None
|
329
337
|
# Warn if this task's `name` conflicts with another task while having a
|
330
338
|
# different function. This is to detect the case where two or more tasks
|
@@ -529,6 +537,53 @@ class Task(Generic[P, R]):
|
|
529
537
|
viz_return_value=viz_return_value or self.viz_return_value,
|
530
538
|
)
|
531
539
|
|
540
|
+
async def create_run(
|
541
|
+
self,
|
542
|
+
flow_run_context: FlowRunContext,
|
543
|
+
parameters: Dict[str, Any],
|
544
|
+
wait_for: Optional[Iterable[PrefectFuture]],
|
545
|
+
extra_task_inputs: Optional[Dict[str, Set[TaskRunInput]]] = None,
|
546
|
+
) -> TaskRun:
|
547
|
+
# TODO: Investigate if we can replace create_task_run on the task run engine
|
548
|
+
# with this method. Would require updating to work without the flow run context.
|
549
|
+
from prefect.utilities.engine import (
|
550
|
+
_dynamic_key_for_task_run,
|
551
|
+
collect_task_run_inputs,
|
552
|
+
)
|
553
|
+
|
554
|
+
dynamic_key = _dynamic_key_for_task_run(flow_run_context, self)
|
555
|
+
task_inputs = {
|
556
|
+
k: await collect_task_run_inputs(v) for k, v in parameters.items()
|
557
|
+
}
|
558
|
+
if wait_for:
|
559
|
+
task_inputs["wait_for"] = await collect_task_run_inputs(wait_for)
|
560
|
+
|
561
|
+
# Join extra task inputs
|
562
|
+
extra_task_inputs = extra_task_inputs or {}
|
563
|
+
for k, extras in extra_task_inputs.items():
|
564
|
+
task_inputs[k] = task_inputs[k].union(extras)
|
565
|
+
|
566
|
+
flow_run_logger = get_run_logger(flow_run_context)
|
567
|
+
|
568
|
+
task_run = await flow_run_context.client.create_task_run(
|
569
|
+
task=self,
|
570
|
+
name=f"{self.name} - {dynamic_key}",
|
571
|
+
flow_run_id=flow_run_context.flow_run.id,
|
572
|
+
dynamic_key=dynamic_key,
|
573
|
+
state=Pending(),
|
574
|
+
extra_tags=TagsContext.get().current_tags,
|
575
|
+
task_inputs=task_inputs,
|
576
|
+
)
|
577
|
+
|
578
|
+
if flow_run_context.flow_run:
|
579
|
+
flow_run_logger.info(
|
580
|
+
f"Created task run {task_run.name!r} for task {self.name!r}"
|
581
|
+
)
|
582
|
+
else:
|
583
|
+
logger.info(f"Created task run {task_run.name!r} for task {self.name!r}")
|
584
|
+
|
585
|
+
return task_run
|
586
|
+
|
532
587
|
@overload
|
533
588
|
def __call__(
|
534
589
|
self: "Task[P, NoReturn]",
|
@@ -582,6 +637,22 @@ class Task(Generic[P, R]):
|
|
582
637
|
self.isasync, self.name, parameters, self.viz_return_value
|
583
638
|
)
|
584
639
|
|
640
|
+
# new engine currently only compatible with async tasks
|
641
|
+
if PREFECT_EXPERIMENTAL_ENABLE_NEW_ENGINE.value():
|
642
|
+
from prefect.new_task_engine import run_task, run_task_sync
|
643
|
+
|
644
|
+
run_kwargs = dict(
|
645
|
+
task=self,
|
646
|
+
parameters=parameters,
|
647
|
+
wait_for=wait_for,
|
648
|
+
return_type=return_type,
|
649
|
+
)
|
650
|
+
if self.isasync:
|
651
|
+
# this returns an awaitable coroutine
|
652
|
+
return run_task(**run_kwargs)
|
653
|
+
else:
|
654
|
+
return run_task_sync(**run_kwargs)
|
655
|
+
|
585
656
|
if (
|
586
657
|
PREFECT_EXPERIMENTAL_ENABLE_TASK_SCHEDULING.value()
|
587
658
|
and not FlowRunContext.get()
|
@@ -810,6 +881,7 @@ class Task(Generic[P, R]):
|
|
810
881
|
# Convert the call args/kwargs to a parameter dict
|
811
882
|
parameters = get_call_parameters(self.fn, args, kwargs)
|
812
883
|
return_type = "state" if return_state else "future"
|
884
|
+
flow_run_context = FlowRunContext.get()
|
813
885
|
|
814
886
|
task_viz_tracker = get_task_viz_tracker()
|
815
887
|
if task_viz_tracker:
|
@@ -817,10 +889,7 @@ class Task(Generic[P, R]):
|
|
817
889
|
"`task.submit()` is not currently supported by `flow.visualize()`"
|
818
890
|
)
|
819
891
|
|
820
|
-
if
|
821
|
-
PREFECT_EXPERIMENTAL_ENABLE_TASK_SCHEDULING.value()
|
822
|
-
and not FlowRunContext.get()
|
823
|
-
):
|
892
|
+
if PREFECT_EXPERIMENTAL_ENABLE_TASK_SCHEDULING and not flow_run_context:
|
824
893
|
create_autonomous_task_run_call = create_call(
|
825
894
|
create_autonomous_task_run, task=self, parameters=parameters
|
826
895
|
)
|
@@ -832,16 +901,74 @@ class Task(Generic[P, R]):
|
|
832
901
|
return from_sync.wait_for_call_in_loop_thread(
|
833
902
|
create_autonomous_task_run_call
|
834
903
|
)
|
904
|
+
if PREFECT_EXPERIMENTAL_ENABLE_NEW_ENGINE and flow_run_context:
|
905
|
+
if self.isasync:
|
906
|
+
return self._submit_async(
|
907
|
+
parameters=parameters,
|
908
|
+
flow_run_context=flow_run_context,
|
909
|
+
wait_for=wait_for,
|
910
|
+
return_state=return_state,
|
911
|
+
)
|
912
|
+
else:
|
913
|
+
raise NotImplementedError(
|
914
|
+
"Submitting sync tasks with the new engine has not be implemented yet."
|
915
|
+
)
|
835
916
|
|
836
|
-
|
837
|
-
|
917
|
+
else:
|
918
|
+
return enter_task_run_engine(
|
919
|
+
self,
|
920
|
+
parameters=parameters,
|
921
|
+
wait_for=wait_for,
|
922
|
+
return_type=return_type,
|
923
|
+
task_runner=None, # Use the flow's task runner
|
924
|
+
mapped=False,
|
925
|
+
)
|
926
|
+
|
927
|
+
async def _submit_async(
|
928
|
+
self,
|
929
|
+
parameters: Dict[str, Any],
|
930
|
+
flow_run_context: FlowRunContext,
|
931
|
+
wait_for: Optional[Iterable[PrefectFuture]],
|
932
|
+
return_state: bool,
|
933
|
+
):
|
934
|
+
from prefect.new_task_engine import run_task
|
935
|
+
|
936
|
+
task_runner = flow_run_context.task_runner
|
937
|
+
|
938
|
+
task_run = await self.create_run(
|
939
|
+
flow_run_context=flow_run_context,
|
838
940
|
parameters=parameters,
|
839
941
|
wait_for=wait_for,
|
840
|
-
return_type=return_type,
|
841
|
-
task_runner=None, # Use the flow's task runner
|
842
|
-
mapped=False,
|
843
942
|
)
|
844
943
|
|
944
|
+
future = PrefectFuture(
|
945
|
+
name=task_run.name,
|
946
|
+
key=uuid4(),
|
947
|
+
task_runner=task_runner,
|
948
|
+
asynchronous=(self.isasync and flow_run_context.flow.isasync),
|
949
|
+
)
|
950
|
+
future.task_run = task_run
|
951
|
+
flow_run_context.task_run_futures.append(future)
|
952
|
+
await task_runner.submit(
|
953
|
+
key=future.key,
|
954
|
+
call=partial(
|
955
|
+
run_task,
|
956
|
+
task=self,
|
957
|
+
task_run=task_run,
|
958
|
+
parameters=parameters,
|
959
|
+
wait_for=wait_for,
|
960
|
+
return_type="state",
|
961
|
+
),
|
962
|
+
)
|
963
|
+
# TODO: I don't like this. Can we move responsibility for creating the future
|
964
|
+
# and setting this anyio.Event to the task runner?
|
965
|
+
future._submitted.set()
|
966
|
+
|
967
|
+
if return_state:
|
968
|
+
return await future.wait()
|
969
|
+
else:
|
970
|
+
return future
|
971
|
+
|
845
972
|
@overload
|
846
973
|
def map(
|
847
974
|
self: "Task[P, NoReturn]",
|
prefect/utilities/asyncutils.py
CHANGED
@@ -1,12 +1,15 @@
|
|
1
1
|
"""
|
2
2
|
Utilities for interoperability with async functions and workers from various contexts.
|
3
3
|
"""
|
4
|
+
|
4
5
|
import asyncio
|
5
6
|
import ctypes
|
6
7
|
import inspect
|
7
8
|
import threading
|
8
9
|
import warnings
|
10
|
+
from concurrent.futures import ThreadPoolExecutor
|
9
11
|
from contextlib import asynccontextmanager
|
12
|
+
from contextvars import copy_context
|
10
13
|
from functools import partial, wraps
|
11
14
|
from threading import Thread
|
12
15
|
from typing import (
|
@@ -20,6 +23,7 @@ from typing import (
|
|
20
23
|
Type,
|
21
24
|
TypeVar,
|
22
25
|
Union,
|
26
|
+
cast,
|
23
27
|
)
|
24
28
|
from uuid import UUID, uuid4
|
25
29
|
|
@@ -78,6 +82,43 @@ def is_async_gen_fn(func):
|
|
78
82
|
return inspect.isasyncgenfunction(func)
|
79
83
|
|
80
84
|
|
85
|
+
def run_sync(coroutine: Coroutine[Any, Any, T]) -> T:
|
86
|
+
"""
|
87
|
+
Runs a coroutine from a synchronous context. A thread will be spawned
|
88
|
+
to run the event loop if necessary, which allows coroutines to run in
|
89
|
+
environments like Jupyter notebooks where the event loop runs on the main
|
90
|
+
thread.
|
91
|
+
|
92
|
+
Args:
|
93
|
+
coroutine: The coroutine to run.
|
94
|
+
|
95
|
+
Returns:
|
96
|
+
The return value of the coroutine.
|
97
|
+
|
98
|
+
Example:
|
99
|
+
Basic usage:
|
100
|
+
```python
|
101
|
+
async def my_async_function(x: int) -> int:
|
102
|
+
return x + 1
|
103
|
+
|
104
|
+
run_sync(my_async_function(1))
|
105
|
+
```
|
106
|
+
"""
|
107
|
+
# ensure context variables are properly copied to the async frame
|
108
|
+
context = copy_context()
|
109
|
+
try:
|
110
|
+
loop = asyncio.get_running_loop()
|
111
|
+
except RuntimeError:
|
112
|
+
loop = None
|
113
|
+
|
114
|
+
if loop and loop.is_running():
|
115
|
+
with ThreadPoolExecutor() as executor:
|
116
|
+
future = executor.submit(context.run, asyncio.run, coroutine)
|
117
|
+
return cast(T, future.result())
|
118
|
+
else:
|
119
|
+
return context.run(asyncio.run, coroutine)
|
120
|
+
|
121
|
+
|
81
122
|
async def run_sync_in_worker_thread(
|
82
123
|
__fn: Callable[..., T], *args: Any, **kwargs: Any
|
83
124
|
) -> T:
|
prefect/utilities/engine.py
CHANGED
@@ -11,6 +11,7 @@ from typing import (
|
|
11
11
|
Iterable,
|
12
12
|
Optional,
|
13
13
|
Set,
|
14
|
+
TypeVar,
|
14
15
|
Union,
|
15
16
|
)
|
16
17
|
from uuid import UUID, uuid4
|
@@ -66,6 +67,7 @@ from prefect.utilities.text import truncated_to
|
|
66
67
|
API_HEALTHCHECKS = {}
|
67
68
|
UNTRACKABLE_TYPES = {bool, type(None), type(...), type(NotImplemented)}
|
68
69
|
engine_logger = get_logger("engine")
|
70
|
+
T = TypeVar("T")
|
69
71
|
|
70
72
|
|
71
73
|
async def collect_task_run_inputs(expr: Any, max_depth: int = -1) -> Set[TaskRunInput]:
|
@@ -308,11 +310,11 @@ async def resolve_inputs(
|
|
308
310
|
|
309
311
|
async def propose_state(
|
310
312
|
client: PrefectClient,
|
311
|
-
state: State,
|
313
|
+
state: State[object],
|
312
314
|
force: bool = False,
|
313
|
-
task_run_id: UUID = None,
|
314
|
-
flow_run_id: UUID = None,
|
315
|
-
) -> State:
|
315
|
+
task_run_id: Optional[UUID] = None,
|
316
|
+
flow_run_id: Optional[UUID] = None,
|
317
|
+
) -> State[object]:
|
316
318
|
"""
|
317
319
|
Propose a new state for a flow run or task run, invoking Prefect orchestration logic.
|
318
320
|
|
@@ -232,7 +232,7 @@ def preprocess_schema(schema):
|
|
232
232
|
process_properties(schema["properties"], required_fields)
|
233
233
|
|
234
234
|
if "definitions" in schema: # Also process definitions for reused models
|
235
|
-
for definition in schema["definitions"].values():
|
235
|
+
for definition in (schema["definitions"] or {}).values():
|
236
236
|
if "properties" in definition:
|
237
237
|
required_fields = definition.get("required", [])
|
238
238
|
process_properties(definition["properties"], required_fields)
|
prefect/workers/process.py
CHANGED
@@ -13,6 +13,7 @@ to poll for flow runs.
|
|
13
13
|
For more information about work pools and workers,
|
14
14
|
checkout out the [Prefect docs](/concepts/work-pools/).
|
15
15
|
"""
|
16
|
+
|
16
17
|
import asyncio
|
17
18
|
import contextlib
|
18
19
|
import os
|
@@ -142,7 +143,7 @@ class ProcessWorker(BaseWorker):
|
|
142
143
|
"Execute flow runs as subprocesses on a worker. Works well for local execution"
|
143
144
|
" when first getting started."
|
144
145
|
)
|
145
|
-
_display_name = "
|
146
|
+
_display_name = "Process"
|
146
147
|
_documentation_url = (
|
147
148
|
"https://docs.prefect.io/latest/api-ref/prefect/workers/process/"
|
148
149
|
)
|