prefect-client 3.1.4__py3-none-any.whl → 3.1.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/__init__.py +3 -0
- prefect/_internal/compatibility/migration.py +1 -1
- prefect/_internal/concurrency/api.py +52 -52
- prefect/_internal/concurrency/calls.py +59 -35
- prefect/_internal/concurrency/cancellation.py +34 -18
- prefect/_internal/concurrency/event_loop.py +7 -6
- prefect/_internal/concurrency/threads.py +41 -33
- prefect/_internal/concurrency/waiters.py +28 -21
- prefect/_internal/pydantic/v1_schema.py +2 -2
- prefect/_internal/pydantic/v2_schema.py +10 -9
- prefect/_internal/schemas/bases.py +10 -11
- prefect/_internal/schemas/validators.py +2 -1
- prefect/_version.py +3 -3
- prefect/automations.py +53 -47
- prefect/blocks/abstract.py +12 -10
- prefect/blocks/core.py +4 -2
- prefect/cache_policies.py +11 -11
- prefect/client/__init__.py +3 -1
- prefect/client/base.py +36 -37
- prefect/client/cloud.py +26 -19
- prefect/client/collections.py +2 -2
- prefect/client/orchestration.py +366 -277
- prefect/client/schemas/__init__.py +24 -0
- prefect/client/schemas/actions.py +132 -120
- prefect/client/schemas/filters.py +5 -0
- prefect/client/schemas/objects.py +113 -85
- prefect/client/schemas/responses.py +21 -18
- prefect/client/schemas/schedules.py +136 -93
- prefect/client/subscriptions.py +28 -14
- prefect/client/utilities.py +32 -36
- prefect/concurrency/asyncio.py +6 -9
- prefect/concurrency/services.py +3 -0
- prefect/concurrency/sync.py +35 -5
- prefect/context.py +39 -31
- prefect/deployments/flow_runs.py +3 -5
- prefect/docker/__init__.py +1 -1
- prefect/events/schemas/events.py +25 -20
- prefect/events/utilities.py +1 -2
- prefect/filesystems.py +3 -3
- prefect/flow_engine.py +755 -138
- prefect/flow_runs.py +3 -3
- prefect/flows.py +214 -170
- prefect/logging/configuration.py +1 -1
- prefect/logging/highlighters.py +1 -2
- prefect/logging/loggers.py +30 -20
- prefect/main.py +17 -24
- prefect/runner/runner.py +43 -21
- prefect/runner/server.py +30 -32
- prefect/runner/submit.py +3 -6
- prefect/runner/utils.py +6 -6
- prefect/runtime/flow_run.py +7 -0
- prefect/settings/constants.py +2 -2
- prefect/settings/legacy.py +1 -1
- prefect/settings/models/server/events.py +10 -0
- prefect/settings/sources.py +9 -2
- prefect/task_engine.py +72 -19
- prefect/task_runners.py +2 -2
- prefect/tasks.py +46 -33
- prefect/telemetry/bootstrap.py +15 -2
- prefect/telemetry/run_telemetry.py +107 -0
- prefect/transactions.py +14 -14
- prefect/types/__init__.py +20 -3
- prefect/utilities/_engine.py +96 -0
- prefect/utilities/annotations.py +25 -18
- prefect/utilities/asyncutils.py +126 -140
- prefect/utilities/callables.py +87 -78
- prefect/utilities/collections.py +278 -117
- prefect/utilities/compat.py +13 -21
- prefect/utilities/context.py +6 -5
- prefect/utilities/dispatch.py +23 -12
- prefect/utilities/dockerutils.py +33 -32
- prefect/utilities/engine.py +126 -239
- prefect/utilities/filesystem.py +18 -15
- prefect/utilities/hashing.py +10 -11
- prefect/utilities/importtools.py +40 -27
- prefect/utilities/math.py +9 -5
- prefect/utilities/names.py +3 -3
- prefect/utilities/processutils.py +121 -57
- prefect/utilities/pydantic.py +41 -36
- prefect/utilities/render_swagger.py +22 -12
- prefect/utilities/schema_tools/__init__.py +2 -1
- prefect/utilities/schema_tools/hydration.py +50 -43
- prefect/utilities/schema_tools/validation.py +52 -42
- prefect/utilities/services.py +13 -12
- prefect/utilities/templating.py +45 -45
- prefect/utilities/text.py +2 -1
- prefect/utilities/timeout.py +4 -4
- prefect/utilities/urls.py +9 -4
- prefect/utilities/visualization.py +46 -24
- prefect/variables.py +9 -8
- prefect/workers/base.py +18 -10
- {prefect_client-3.1.4.dist-info → prefect_client-3.1.6.dist-info}/METADATA +5 -5
- {prefect_client-3.1.4.dist-info → prefect_client-3.1.6.dist-info}/RECORD +96 -94
- {prefect_client-3.1.4.dist-info → prefect_client-3.1.6.dist-info}/WHEEL +1 -1
- {prefect_client-3.1.4.dist-info → prefect_client-3.1.6.dist-info}/LICENSE +0 -0
- {prefect_client-3.1.4.dist-info → prefect_client-3.1.6.dist-info}/top_level.txt +0 -0
prefect/flow_engine.py
CHANGED
@@ -2,7 +2,7 @@ import asyncio
|
|
2
2
|
import logging
|
3
3
|
import os
|
4
4
|
import time
|
5
|
-
from contextlib import ExitStack, contextmanager
|
5
|
+
from contextlib import ExitStack, asynccontextmanager, contextmanager, nullcontext
|
6
6
|
from dataclasses import dataclass, field
|
7
7
|
from typing import (
|
8
8
|
Any,
|
@@ -22,19 +22,25 @@ from typing import (
|
|
22
22
|
)
|
23
23
|
from uuid import UUID
|
24
24
|
|
25
|
-
from
|
25
|
+
from anyio import CancelScope
|
26
|
+
from opentelemetry import propagate, trace
|
26
27
|
from opentelemetry.trace import Tracer, get_tracer
|
27
28
|
from typing_extensions import ParamSpec
|
28
29
|
|
29
30
|
import prefect
|
30
31
|
from prefect import Task
|
31
|
-
from prefect.client.orchestration import SyncPrefectClient, get_client
|
32
|
+
from prefect.client.orchestration import PrefectClient, SyncPrefectClient, get_client
|
32
33
|
from prefect.client.schemas import FlowRun, TaskRun
|
33
34
|
from prefect.client.schemas.filters import FlowRunFilter
|
34
35
|
from prefect.client.schemas.sorting import FlowRunSort
|
35
36
|
from prefect.concurrency.context import ConcurrencyContext
|
36
37
|
from prefect.concurrency.v1.context import ConcurrencyContext as ConcurrencyContextV1
|
37
|
-
from prefect.context import
|
38
|
+
from prefect.context import (
|
39
|
+
AsyncClientContext,
|
40
|
+
FlowRunContext,
|
41
|
+
SyncClientContext,
|
42
|
+
TagsContext,
|
43
|
+
)
|
38
44
|
from prefect.exceptions import (
|
39
45
|
Abort,
|
40
46
|
Pause,
|
@@ -66,6 +72,9 @@ from prefect.states import (
|
|
66
72
|
exception_to_failed_state,
|
67
73
|
return_value_to_state,
|
68
74
|
)
|
75
|
+
from prefect.telemetry.run_telemetry import OTELSetter
|
76
|
+
from prefect.types import KeyValueLabels
|
77
|
+
from prefect.utilities._engine import get_hook_name, resolve_custom_flow_run_name
|
69
78
|
from prefect.utilities.annotations import NotSet
|
70
79
|
from prefect.utilities.asyncutils import run_coro_as_sync
|
71
80
|
from prefect.utilities.callables import (
|
@@ -75,10 +84,9 @@ from prefect.utilities.callables import (
|
|
75
84
|
)
|
76
85
|
from prefect.utilities.collections import visit_collection
|
77
86
|
from prefect.utilities.engine import (
|
78
|
-
_get_hook_name,
|
79
|
-
_resolve_custom_flow_run_name,
|
80
87
|
capture_sigterm,
|
81
88
|
link_state_to_result,
|
89
|
+
propose_state,
|
82
90
|
propose_state_sync,
|
83
91
|
resolve_to_final_result,
|
84
92
|
)
|
@@ -87,6 +95,8 @@ from prefect.utilities.urls import url_for
|
|
87
95
|
|
88
96
|
P = ParamSpec("P")
|
89
97
|
R = TypeVar("R")
|
98
|
+
LABELS_TRACEPARENT_KEY = "__OTEL_TRACEPARENT"
|
99
|
+
TRACEPARENT_KEY = "traceparent"
|
90
100
|
|
91
101
|
|
92
102
|
class FlowRunTimeoutError(TimeoutError):
|
@@ -112,7 +122,7 @@ def load_flow_and_flow_run(flow_run_id: UUID) -> Tuple[FlowRun, Flow]:
|
|
112
122
|
|
113
123
|
|
114
124
|
@dataclass
|
115
|
-
class
|
125
|
+
class BaseFlowRunEngine(Generic[P, R]):
|
116
126
|
flow: Union[Flow[P, R], Flow[P, Coroutine[Any, Any, R]]]
|
117
127
|
parameters: Optional[Dict[str, Any]] = None
|
118
128
|
flow_run: Optional[FlowRun] = None
|
@@ -124,7 +134,6 @@ class FlowRunEngine(Generic[P, R]):
|
|
124
134
|
# holds the exception raised by the user code, if any
|
125
135
|
_raised: Union[Exception, Type[NotSet]] = NotSet
|
126
136
|
_is_started: bool = False
|
127
|
-
_client: Optional[SyncPrefectClient] = None
|
128
137
|
short_circuit: bool = False
|
129
138
|
_flow_run_name_set: bool = False
|
130
139
|
_tracer: Tracer = field(
|
@@ -139,16 +148,81 @@ class FlowRunEngine(Generic[P, R]):
|
|
139
148
|
if self.parameters is None:
|
140
149
|
self.parameters = {}
|
141
150
|
|
151
|
+
@property
|
152
|
+
def state(self) -> State:
|
153
|
+
return self.flow_run.state # type: ignore
|
154
|
+
|
155
|
+
def _end_span_on_success(self):
|
156
|
+
if not self._span:
|
157
|
+
return
|
158
|
+
self._span.set_status(trace.Status(trace.StatusCode.OK))
|
159
|
+
self._span.end(time.time_ns())
|
160
|
+
self._span = None
|
161
|
+
|
162
|
+
def _end_span_on_error(self, exc: BaseException, description: Optional[str]):
|
163
|
+
if not self._span:
|
164
|
+
return
|
165
|
+
self._span.record_exception(exc)
|
166
|
+
self._span.set_status(trace.Status(trace.StatusCode.ERROR, description))
|
167
|
+
self._span.end(time.time_ns())
|
168
|
+
self._span = None
|
169
|
+
|
170
|
+
def is_running(self) -> bool:
|
171
|
+
if getattr(self, "flow_run", None) is None:
|
172
|
+
return False
|
173
|
+
return getattr(self, "flow_run").state.is_running()
|
174
|
+
|
175
|
+
def is_pending(self) -> bool:
|
176
|
+
if getattr(self, "flow_run", None) is None:
|
177
|
+
return False # TODO: handle this differently?
|
178
|
+
return getattr(self, "flow_run").state.is_pending()
|
179
|
+
|
180
|
+
def cancel_all_tasks(self):
|
181
|
+
if hasattr(self.flow.task_runner, "cancel_all"):
|
182
|
+
self.flow.task_runner.cancel_all() # type: ignore
|
183
|
+
|
184
|
+
def _update_otel_labels(
|
185
|
+
self, span: trace.Span, client: Union[SyncPrefectClient, PrefectClient]
|
186
|
+
):
|
187
|
+
parent_flow_run_ctx = FlowRunContext.get()
|
188
|
+
if parent_flow_run_ctx and parent_flow_run_ctx.flow_run:
|
189
|
+
if traceparent := parent_flow_run_ctx.flow_run.labels.get(
|
190
|
+
LABELS_TRACEPARENT_KEY
|
191
|
+
):
|
192
|
+
carrier: KeyValueLabels = {TRACEPARENT_KEY: traceparent}
|
193
|
+
propagate.get_global_textmap().inject(
|
194
|
+
carrier={TRACEPARENT_KEY: traceparent},
|
195
|
+
setter=OTELSetter(),
|
196
|
+
)
|
197
|
+
else:
|
198
|
+
carrier: KeyValueLabels = {}
|
199
|
+
propagate.get_global_textmap().inject(
|
200
|
+
carrier,
|
201
|
+
context=trace.set_span_in_context(span),
|
202
|
+
setter=OTELSetter(),
|
203
|
+
)
|
204
|
+
if carrier.get(TRACEPARENT_KEY):
|
205
|
+
if self.flow_run:
|
206
|
+
client.update_flow_run_labels(
|
207
|
+
flow_run_id=self.flow_run.id,
|
208
|
+
labels={LABELS_TRACEPARENT_KEY: carrier[TRACEPARENT_KEY]},
|
209
|
+
)
|
210
|
+
else:
|
211
|
+
self.logger.info(
|
212
|
+
f"Tried to set traceparent {carrier[TRACEPARENT_KEY]} for flow run, but None was found"
|
213
|
+
)
|
214
|
+
|
215
|
+
|
216
|
+
@dataclass
|
217
|
+
class FlowRunEngine(BaseFlowRunEngine[P, R]):
|
218
|
+
_client: Optional[SyncPrefectClient] = None
|
219
|
+
|
142
220
|
@property
|
143
221
|
def client(self) -> SyncPrefectClient:
|
144
222
|
if not self._is_started or self._client is None:
|
145
223
|
raise RuntimeError("Engine has not started.")
|
146
224
|
return self._client
|
147
225
|
|
148
|
-
@property
|
149
|
-
def state(self) -> State:
|
150
|
-
return self.flow_run.state # type: ignore
|
151
|
-
|
152
226
|
def _resolve_parameters(self):
|
153
227
|
if not self.parameters:
|
154
228
|
return {}
|
@@ -243,7 +317,7 @@ class FlowRunEngine(Generic[P, R]):
|
|
243
317
|
|
244
318
|
if self._span:
|
245
319
|
self._span.add_event(
|
246
|
-
state.name,
|
320
|
+
state.name or state.type,
|
247
321
|
{
|
248
322
|
"prefect.state.message": state.message or "",
|
249
323
|
"prefect.state.type": state.type,
|
@@ -362,22 +436,7 @@ class FlowRunEngine(Generic[P, R]):
|
|
362
436
|
self.set_state(state, force=True)
|
363
437
|
self._raised = exc
|
364
438
|
|
365
|
-
self._end_span_on_error(exc, state.message)
|
366
|
-
|
367
|
-
def _end_span_on_success(self):
|
368
|
-
if not self._span:
|
369
|
-
return
|
370
|
-
self._span.set_status(trace.Status(trace.StatusCode.OK))
|
371
|
-
self._span.end(time.time_ns())
|
372
|
-
self._span = None
|
373
|
-
|
374
|
-
def _end_span_on_error(self, exc: BaseException, description: Optional[str]):
|
375
|
-
if not self._span:
|
376
|
-
return
|
377
|
-
self._span.record_exception(exc)
|
378
|
-
self._span.set_status(trace.Status(trace.StatusCode.ERROR, description))
|
379
|
-
self._span.end(time.time_ns())
|
380
|
-
self._span = None
|
439
|
+
self._end_span_on_error(exc, state.message if state else "")
|
381
440
|
|
382
441
|
def load_subflow_run(
|
383
442
|
self,
|
@@ -512,7 +571,7 @@ class FlowRunEngine(Generic[P, R]):
|
|
512
571
|
hooks = None
|
513
572
|
|
514
573
|
for hook in hooks or []:
|
515
|
-
hook_name =
|
574
|
+
hook_name = get_hook_name(hook)
|
516
575
|
|
517
576
|
try:
|
518
577
|
self.logger.info(
|
@@ -575,7 +634,7 @@ class FlowRunEngine(Generic[P, R]):
|
|
575
634
|
|
576
635
|
# update the flow run name if necessary
|
577
636
|
if not self._flow_run_name_set and self.flow.flow_run_name:
|
578
|
-
flow_run_name =
|
637
|
+
flow_run_name = resolve_custom_flow_run_name(
|
579
638
|
flow=self.flow, parameters=self.parameters
|
580
639
|
)
|
581
640
|
self.client.set_flow_run_name(
|
@@ -622,7 +681,7 @@ class FlowRunEngine(Generic[P, R]):
|
|
622
681
|
empirical_policy=self.flow_run.empirical_policy,
|
623
682
|
)
|
624
683
|
|
625
|
-
|
684
|
+
span = self._tracer.start_span(
|
626
685
|
name=self.flow_run.name,
|
627
686
|
attributes={
|
628
687
|
**self.flow_run.labels,
|
@@ -632,6 +691,9 @@ class FlowRunEngine(Generic[P, R]):
|
|
632
691
|
"prefect.flow.name": self.flow.name,
|
633
692
|
},
|
634
693
|
)
|
694
|
+
self._update_otel_labels(span, self.client)
|
695
|
+
|
696
|
+
self._span = span
|
635
697
|
|
636
698
|
try:
|
637
699
|
yield self
|
@@ -665,20 +727,6 @@ class FlowRunEngine(Generic[P, R]):
|
|
665
727
|
self._is_started = False
|
666
728
|
self._client = None
|
667
729
|
|
668
|
-
def is_running(self) -> bool:
|
669
|
-
if getattr(self, "flow_run", None) is None:
|
670
|
-
return False
|
671
|
-
return getattr(self, "flow_run").state.is_running()
|
672
|
-
|
673
|
-
def is_pending(self) -> bool:
|
674
|
-
if getattr(self, "flow_run", None) is None:
|
675
|
-
return False # TODO: handle this differently?
|
676
|
-
return getattr(self, "flow_run").state.is_pending()
|
677
|
-
|
678
|
-
def cancel_all_tasks(self):
|
679
|
-
if hasattr(self.flow.task_runner, "cancel_all"):
|
680
|
-
self.flow.task_runner.cancel_all() # type: ignore
|
681
|
-
|
682
730
|
# --------------------------
|
683
731
|
#
|
684
732
|
# The following methods compose the main task run loop
|
@@ -687,12 +735,13 @@ class FlowRunEngine(Generic[P, R]):
|
|
687
735
|
|
688
736
|
@contextmanager
|
689
737
|
def start(self) -> Generator[None, None, None]:
|
690
|
-
with self.initialize_run()
|
691
|
-
self.
|
738
|
+
with self.initialize_run():
|
739
|
+
with trace.use_span(self._span) if self._span else nullcontext():
|
740
|
+
self.begin_run()
|
692
741
|
|
693
|
-
|
694
|
-
|
695
|
-
|
742
|
+
if self.state.is_running():
|
743
|
+
self.call_hooks()
|
744
|
+
yield
|
696
745
|
|
697
746
|
@contextmanager
|
698
747
|
def run_context(self):
|
@@ -734,126 +783,694 @@ class FlowRunEngine(Generic[P, R]):
|
|
734
783
|
self.handle_success(result)
|
735
784
|
|
736
785
|
|
737
|
-
|
738
|
-
|
739
|
-
|
740
|
-
|
741
|
-
wait_for: Optional[Iterable[PrefectFuture]] = None,
|
742
|
-
return_type: Literal["state", "result"] = "result",
|
743
|
-
) -> Union[R, State, None]:
|
744
|
-
engine = FlowRunEngine[P, R](
|
745
|
-
flow=flow,
|
746
|
-
parameters=parameters,
|
747
|
-
flow_run=flow_run,
|
748
|
-
wait_for=wait_for,
|
749
|
-
)
|
786
|
+
@dataclass
|
787
|
+
class AsyncFlowRunEngine(BaseFlowRunEngine[P, R]):
|
788
|
+
"""
|
789
|
+
Async version of the flow run engine.
|
750
790
|
|
751
|
-
|
752
|
-
|
753
|
-
|
754
|
-
engine.call_flow_fn()
|
791
|
+
NOTE: This has not been fully asyncified yet which may lead to async flows
|
792
|
+
not being fully asyncified.
|
793
|
+
"""
|
755
794
|
|
756
|
-
|
795
|
+
_client: Optional[PrefectClient] = None
|
757
796
|
|
797
|
+
@property
|
798
|
+
def client(self) -> PrefectClient:
|
799
|
+
if not self._is_started or self._client is None:
|
800
|
+
raise RuntimeError("Engine has not started.")
|
801
|
+
return self._client
|
758
802
|
|
759
|
-
|
760
|
-
|
761
|
-
|
762
|
-
parameters: Optional[Dict[str, Any]] = None,
|
763
|
-
wait_for: Optional[Iterable[PrefectFuture]] = None,
|
764
|
-
return_type: Literal["state", "result"] = "result",
|
765
|
-
) -> Union[R, State, None]:
|
766
|
-
engine = FlowRunEngine[P, R](
|
767
|
-
flow=flow, parameters=parameters, flow_run=flow_run, wait_for=wait_for
|
768
|
-
)
|
803
|
+
def _resolve_parameters(self):
|
804
|
+
if not self.parameters:
|
805
|
+
return {}
|
769
806
|
|
770
|
-
|
771
|
-
|
772
|
-
|
773
|
-
|
807
|
+
resolved_parameters = {}
|
808
|
+
for parameter, value in self.parameters.items():
|
809
|
+
try:
|
810
|
+
resolved_parameters[parameter] = visit_collection(
|
811
|
+
value,
|
812
|
+
visit_fn=resolve_to_final_result,
|
813
|
+
return_data=True,
|
814
|
+
max_depth=-1,
|
815
|
+
remove_annotations=True,
|
816
|
+
context={},
|
817
|
+
)
|
818
|
+
except UpstreamTaskError:
|
819
|
+
raise
|
820
|
+
except Exception as exc:
|
821
|
+
raise PrefectException(
|
822
|
+
f"Failed to resolve inputs in parameter {parameter!r}. If your"
|
823
|
+
" parameter type is not supported, consider using the `quote`"
|
824
|
+
" annotation to skip resolution of inputs."
|
825
|
+
) from exc
|
774
826
|
|
775
|
-
|
827
|
+
self.parameters = resolved_parameters
|
776
828
|
|
829
|
+
def _wait_for_dependencies(self):
|
830
|
+
if not self.wait_for:
|
831
|
+
return
|
777
832
|
|
778
|
-
|
779
|
-
|
780
|
-
|
781
|
-
|
782
|
-
|
783
|
-
|
784
|
-
|
785
|
-
|
786
|
-
raise ValueError("The return_type for a generator flow must be 'result'")
|
833
|
+
visit_collection(
|
834
|
+
self.wait_for,
|
835
|
+
visit_fn=resolve_to_final_result,
|
836
|
+
return_data=False,
|
837
|
+
max_depth=-1,
|
838
|
+
remove_annotations=True,
|
839
|
+
context={},
|
840
|
+
)
|
787
841
|
|
788
|
-
|
789
|
-
|
790
|
-
|
842
|
+
async def begin_run(self) -> State:
|
843
|
+
try:
|
844
|
+
self._resolve_parameters()
|
845
|
+
self._wait_for_dependencies()
|
846
|
+
except UpstreamTaskError as upstream_exc:
|
847
|
+
state = await self.set_state(
|
848
|
+
Pending(
|
849
|
+
name="NotReady",
|
850
|
+
message=str(upstream_exc),
|
851
|
+
),
|
852
|
+
# if orchestrating a run already in a pending state, force orchestration to
|
853
|
+
# update the state name
|
854
|
+
force=self.state.is_pending(),
|
855
|
+
)
|
856
|
+
return state
|
791
857
|
|
792
|
-
|
793
|
-
|
794
|
-
|
795
|
-
|
796
|
-
|
858
|
+
# validate prior to context so that context receives validated params
|
859
|
+
if self.flow.should_validate_parameters:
|
860
|
+
try:
|
861
|
+
self.parameters = self.flow.validate_parameters(self.parameters or {})
|
862
|
+
except Exception as exc:
|
863
|
+
message = "Validation of flow parameters failed with error:"
|
864
|
+
self.logger.error("%s %s", message, exc)
|
865
|
+
await self.handle_exception(
|
866
|
+
exc,
|
867
|
+
msg=message,
|
868
|
+
result_store=get_result_store().update_for_flow(
|
869
|
+
self.flow, _sync=True
|
870
|
+
),
|
797
871
|
)
|
798
|
-
|
799
|
-
|
800
|
-
while True:
|
801
|
-
gen_result = next(gen)
|
802
|
-
# link the current state to the result for dependency tracking
|
803
|
-
link_state_to_result(engine.state, gen_result)
|
804
|
-
yield gen_result
|
805
|
-
except StopIteration as exc:
|
806
|
-
engine.handle_success(exc.value)
|
807
|
-
except GeneratorExit as exc:
|
808
|
-
engine.handle_success(None)
|
809
|
-
gen.throw(exc)
|
872
|
+
self.short_circuit = True
|
873
|
+
await self.call_hooks()
|
810
874
|
|
811
|
-
|
875
|
+
new_state = Running()
|
876
|
+
state = await self.set_state(new_state)
|
877
|
+
while state.is_pending():
|
878
|
+
await asyncio.sleep(0.2)
|
879
|
+
state = await self.set_state(new_state)
|
880
|
+
return state
|
812
881
|
|
882
|
+
async def set_state(self, state: State, force: bool = False) -> State:
|
883
|
+
""" """
|
884
|
+
# prevents any state-setting activity
|
885
|
+
if self.short_circuit:
|
886
|
+
return self.state
|
813
887
|
|
814
|
-
|
815
|
-
|
816
|
-
|
817
|
-
|
818
|
-
|
819
|
-
|
820
|
-
) -> AsyncGenerator[R, None]:
|
821
|
-
if return_type != "result":
|
822
|
-
raise ValueError("The return_type for a generator flow must be 'result'")
|
888
|
+
state = await propose_state(
|
889
|
+
self.client, state, flow_run_id=self.flow_run.id, force=force
|
890
|
+
) # type: ignore
|
891
|
+
self.flow_run.state = state # type: ignore
|
892
|
+
self.flow_run.state_name = state.name # type: ignore
|
893
|
+
self.flow_run.state_type = state.type # type: ignore
|
823
894
|
|
824
|
-
|
825
|
-
|
826
|
-
|
895
|
+
if self._span:
|
896
|
+
self._span.add_event(
|
897
|
+
state.name or state.type,
|
898
|
+
{
|
899
|
+
"prefect.state.message": state.message or "",
|
900
|
+
"prefect.state.type": state.type,
|
901
|
+
"prefect.state.name": state.name or state.type,
|
902
|
+
"prefect.state.id": str(state.id),
|
903
|
+
},
|
904
|
+
)
|
905
|
+
return state
|
827
906
|
|
828
|
-
|
829
|
-
|
830
|
-
|
831
|
-
|
832
|
-
|
833
|
-
)
|
834
|
-
|
835
|
-
|
836
|
-
|
907
|
+
async def result(self, raise_on_failure: bool = True) -> "Union[R, State, None]":
|
908
|
+
if self._return_value is not NotSet and not isinstance(
|
909
|
+
self._return_value, State
|
910
|
+
):
|
911
|
+
if isinstance(self._return_value, BaseResult):
|
912
|
+
_result = self._return_value.get()
|
913
|
+
else:
|
914
|
+
_result = self._return_value
|
915
|
+
|
916
|
+
if asyncio.iscoroutine(_result):
|
917
|
+
# getting the value for a BaseResult may return an awaitable
|
918
|
+
# depending on whether the parent frame is sync or not
|
919
|
+
_result = await _result
|
920
|
+
return _result
|
921
|
+
|
922
|
+
if self._raised is not NotSet:
|
923
|
+
if raise_on_failure:
|
924
|
+
raise self._raised
|
925
|
+
return self._raised
|
926
|
+
|
927
|
+
# This is a fall through case which leans on the existing state result mechanics to get the
|
928
|
+
# return value. This is necessary because we currently will return a State object if the
|
929
|
+
# the State was Prefect-created.
|
930
|
+
# TODO: Remove the need to get the result from a State except in cases where the return value
|
931
|
+
# is a State object.
|
932
|
+
_result = self.state.result(raise_on_failure=raise_on_failure, fetch=True) # type: ignore
|
933
|
+
# state.result is a `sync_compatible` function that may or may not return an awaitable
|
934
|
+
# depending on whether the parent frame is sync or not
|
935
|
+
if asyncio.iscoroutine(_result):
|
936
|
+
_result = await _result
|
937
|
+
return _result
|
938
|
+
|
939
|
+
async def handle_success(self, result: R) -> R:
|
940
|
+
result_store = getattr(FlowRunContext.get(), "result_store", None)
|
941
|
+
if result_store is None:
|
942
|
+
raise ValueError("Result store is not set")
|
943
|
+
resolved_result = resolve_futures_to_states(result)
|
944
|
+
terminal_state = await return_value_to_state(
|
945
|
+
resolved_result,
|
946
|
+
result_store=result_store,
|
947
|
+
write_result=should_persist_result(),
|
948
|
+
)
|
949
|
+
await self.set_state(terminal_state)
|
950
|
+
self._return_value = resolved_result
|
951
|
+
|
952
|
+
self._end_span_on_success()
|
953
|
+
|
954
|
+
return result
|
955
|
+
|
956
|
+
async def handle_exception(
|
957
|
+
self,
|
958
|
+
exc: Exception,
|
959
|
+
msg: Optional[str] = None,
|
960
|
+
result_store: Optional[ResultStore] = None,
|
961
|
+
) -> State:
|
962
|
+
context = FlowRunContext.get()
|
963
|
+
terminal_state = cast(
|
964
|
+
State,
|
965
|
+
await exception_to_failed_state(
|
966
|
+
exc,
|
967
|
+
message=msg or "Flow run encountered an exception:",
|
968
|
+
result_store=result_store or getattr(context, "result_store", None),
|
969
|
+
write_result=True,
|
970
|
+
),
|
971
|
+
)
|
972
|
+
state = await self.set_state(terminal_state)
|
973
|
+
if self.state.is_scheduled():
|
974
|
+
self.logger.info(
|
975
|
+
(
|
976
|
+
f"Received non-final state {state.name!r} when proposing final"
|
977
|
+
f" state {terminal_state.name!r} and will attempt to run again..."
|
978
|
+
),
|
979
|
+
)
|
980
|
+
state = await self.set_state(Running())
|
981
|
+
self._raised = exc
|
982
|
+
|
983
|
+
self._end_span_on_error(exc, state.message)
|
984
|
+
|
985
|
+
return state
|
986
|
+
|
987
|
+
async def handle_timeout(self, exc: TimeoutError) -> None:
|
988
|
+
if isinstance(exc, FlowRunTimeoutError):
|
989
|
+
message = (
|
990
|
+
f"Flow run exceeded timeout of {self.flow.timeout_seconds} second(s)"
|
991
|
+
)
|
992
|
+
else:
|
993
|
+
message = f"Flow run failed due to timeout: {exc!r}"
|
994
|
+
self.logger.error(message)
|
995
|
+
state = Failed(
|
996
|
+
data=exc,
|
997
|
+
message=message,
|
998
|
+
name="TimedOut",
|
999
|
+
)
|
1000
|
+
await self.set_state(state)
|
1001
|
+
self._raised = exc
|
1002
|
+
|
1003
|
+
self._end_span_on_error(exc, message)
|
1004
|
+
|
1005
|
+
async def handle_crash(self, exc: BaseException) -> None:
|
1006
|
+
# need to shield from asyncio cancellation to ensure we update the state
|
1007
|
+
# on the server before exiting
|
1008
|
+
with CancelScope(shield=True):
|
1009
|
+
state = await exception_to_crashed_state(exc)
|
1010
|
+
self.logger.error(f"Crash detected! {state.message}")
|
1011
|
+
self.logger.debug("Crash details:", exc_info=exc)
|
1012
|
+
await self.set_state(state, force=True)
|
1013
|
+
self._raised = exc
|
1014
|
+
|
1015
|
+
self._end_span_on_error(exc, state.message)
|
1016
|
+
|
1017
|
+
async def load_subflow_run(
|
1018
|
+
self,
|
1019
|
+
parent_task_run: TaskRun,
|
1020
|
+
client: PrefectClient,
|
1021
|
+
context: FlowRunContext,
|
1022
|
+
) -> Union[FlowRun, None]:
|
1023
|
+
"""
|
1024
|
+
This method attempts to load an existing flow run for a subflow task
|
1025
|
+
run, if appropriate.
|
1026
|
+
|
1027
|
+
If the parent task run is in a final but not COMPLETED state, and not
|
1028
|
+
being rerun, then we attempt to load an existing flow run instead of
|
1029
|
+
creating a new one. This will prevent the engine from running the
|
1030
|
+
subflow again.
|
1031
|
+
|
1032
|
+
If no existing flow run is found, or if the subflow should be rerun,
|
1033
|
+
then no flow run is returned.
|
1034
|
+
"""
|
1035
|
+
|
1036
|
+
# check if the parent flow run is rerunning
|
1037
|
+
rerunning = (
|
1038
|
+
context.flow_run.run_count > 1
|
1039
|
+
if getattr(context, "flow_run", None)
|
1040
|
+
and isinstance(context.flow_run, FlowRun)
|
1041
|
+
else False
|
1042
|
+
)
|
1043
|
+
|
1044
|
+
# if the parent task run is in a final but not completed state, and
|
1045
|
+
# not rerunning, then retrieve the most recent flow run instead of
|
1046
|
+
# creating a new one. This effectively loads a cached flow run for
|
1047
|
+
# situations where we are confident the flow should not be run
|
1048
|
+
# again.
|
1049
|
+
assert isinstance(parent_task_run.state, State)
|
1050
|
+
if parent_task_run.state.is_final() and not (
|
1051
|
+
rerunning and not parent_task_run.state.is_completed()
|
1052
|
+
):
|
1053
|
+
# return the most recent flow run, if it exists
|
1054
|
+
flow_runs = await client.read_flow_runs(
|
1055
|
+
flow_run_filter=FlowRunFilter(
|
1056
|
+
parent_task_run_id={"any_": [parent_task_run.id]}
|
1057
|
+
),
|
1058
|
+
sort=FlowRunSort.EXPECTED_START_TIME_ASC,
|
1059
|
+
limit=1,
|
1060
|
+
)
|
1061
|
+
if flow_runs:
|
1062
|
+
loaded_flow_run = flow_runs[-1]
|
1063
|
+
self._return_value = loaded_flow_run.state
|
1064
|
+
return loaded_flow_run
|
1065
|
+
|
1066
|
+
async def create_flow_run(self, client: PrefectClient) -> FlowRun:
|
1067
|
+
flow_run_ctx = FlowRunContext.get()
|
1068
|
+
parameters = self.parameters or {}
|
1069
|
+
|
1070
|
+
parent_task_run = None
|
1071
|
+
|
1072
|
+
# this is a subflow run
|
1073
|
+
if flow_run_ctx:
|
1074
|
+
# add a task to a parent flow run that represents the execution of a subflow run
|
1075
|
+
parent_task = Task(
|
1076
|
+
name=self.flow.name, fn=self.flow.fn, version=self.flow.version
|
1077
|
+
)
|
1078
|
+
|
1079
|
+
parent_task_run = await parent_task.create_run(
|
1080
|
+
flow_run_context=flow_run_ctx,
|
1081
|
+
parameters=self.parameters,
|
1082
|
+
wait_for=self.wait_for,
|
1083
|
+
)
|
1084
|
+
|
1085
|
+
# check if there is already a flow run for this subflow
|
1086
|
+
if subflow_run := await self.load_subflow_run(
|
1087
|
+
parent_task_run=parent_task_run, client=client, context=flow_run_ctx
|
1088
|
+
):
|
1089
|
+
return subflow_run
|
1090
|
+
|
1091
|
+
flow_run = await client.create_flow_run(
|
1092
|
+
flow=self.flow,
|
1093
|
+
parameters=self.flow.serialize_parameters(parameters),
|
1094
|
+
state=Pending(),
|
1095
|
+
parent_task_run_id=getattr(parent_task_run, "id", None),
|
1096
|
+
tags=TagsContext.get().current_tags,
|
1097
|
+
)
|
1098
|
+
if flow_run_ctx:
|
1099
|
+
parent_logger = get_run_logger(flow_run_ctx)
|
1100
|
+
parent_logger.info(
|
1101
|
+
f"Created subflow run {flow_run.name!r} for flow {self.flow.name!r}"
|
1102
|
+
)
|
1103
|
+
else:
|
1104
|
+
self.logger.info(
|
1105
|
+
f"Created flow run {flow_run.name!r} for flow {self.flow.name!r}"
|
1106
|
+
)
|
1107
|
+
|
1108
|
+
return flow_run
|
1109
|
+
|
1110
|
+
async def call_hooks(self, state: Optional[State] = None):
|
1111
|
+
if state is None:
|
1112
|
+
state = self.state
|
1113
|
+
flow = self.flow
|
1114
|
+
flow_run = self.flow_run
|
1115
|
+
|
1116
|
+
if not flow_run:
|
1117
|
+
raise ValueError("Flow run is not set")
|
1118
|
+
|
1119
|
+
enable_cancellation_and_crashed_hooks = (
|
1120
|
+
os.environ.get(
|
1121
|
+
"PREFECT__ENABLE_CANCELLATION_AND_CRASHED_HOOKS", "true"
|
1122
|
+
).lower()
|
1123
|
+
== "true"
|
1124
|
+
)
|
1125
|
+
|
1126
|
+
if state.is_failed() and flow.on_failure_hooks:
|
1127
|
+
hooks = flow.on_failure_hooks
|
1128
|
+
elif state.is_completed() and flow.on_completion_hooks:
|
1129
|
+
hooks = flow.on_completion_hooks
|
1130
|
+
elif (
|
1131
|
+
enable_cancellation_and_crashed_hooks
|
1132
|
+
and state.is_cancelling()
|
1133
|
+
and flow.on_cancellation_hooks
|
1134
|
+
):
|
1135
|
+
hooks = flow.on_cancellation_hooks
|
1136
|
+
elif (
|
1137
|
+
enable_cancellation_and_crashed_hooks
|
1138
|
+
and state.is_crashed()
|
1139
|
+
and flow.on_crashed_hooks
|
1140
|
+
):
|
1141
|
+
hooks = flow.on_crashed_hooks
|
1142
|
+
elif state.is_running() and flow.on_running_hooks:
|
1143
|
+
hooks = flow.on_running_hooks
|
1144
|
+
else:
|
1145
|
+
hooks = None
|
1146
|
+
|
1147
|
+
for hook in hooks or []:
|
1148
|
+
hook_name = get_hook_name(hook)
|
1149
|
+
|
1150
|
+
try:
|
1151
|
+
self.logger.info(
|
1152
|
+
f"Running hook {hook_name!r} in response to entering state"
|
1153
|
+
f" {state.name!r}"
|
1154
|
+
)
|
1155
|
+
result = hook(flow, flow_run, state)
|
1156
|
+
if asyncio.iscoroutine(result):
|
1157
|
+
await result
|
1158
|
+
except Exception:
|
1159
|
+
self.logger.error(
|
1160
|
+
f"An error was encountered while running hook {hook_name!r}",
|
1161
|
+
exc_info=True,
|
1162
|
+
)
|
1163
|
+
else:
|
1164
|
+
self.logger.info(f"Hook {hook_name!r} finished running successfully")
|
1165
|
+
|
1166
|
+
@asynccontextmanager
|
1167
|
+
async def setup_run_context(self, client: Optional[PrefectClient] = None):
|
1168
|
+
from prefect.utilities.engine import (
|
1169
|
+
should_log_prints,
|
1170
|
+
)
|
1171
|
+
|
1172
|
+
if client is None:
|
1173
|
+
client = self.client
|
1174
|
+
if not self.flow_run:
|
1175
|
+
raise ValueError("Flow run not set")
|
1176
|
+
|
1177
|
+
self.flow_run = await client.read_flow_run(self.flow_run.id)
|
1178
|
+
log_prints = should_log_prints(self.flow)
|
1179
|
+
|
1180
|
+
with ExitStack() as stack:
|
1181
|
+
# TODO: Explore closing task runner before completing the flow to
|
1182
|
+
# wait for futures to complete
|
1183
|
+
stack.enter_context(capture_sigterm())
|
1184
|
+
if log_prints:
|
1185
|
+
stack.enter_context(patch_print())
|
1186
|
+
task_runner = stack.enter_context(self.flow.task_runner.duplicate())
|
1187
|
+
stack.enter_context(
|
1188
|
+
FlowRunContext(
|
1189
|
+
flow=self.flow,
|
1190
|
+
log_prints=log_prints,
|
1191
|
+
flow_run=self.flow_run,
|
1192
|
+
parameters=self.parameters,
|
1193
|
+
client=client,
|
1194
|
+
result_store=get_result_store().update_for_flow(
|
1195
|
+
self.flow, _sync=True
|
1196
|
+
),
|
1197
|
+
task_runner=task_runner,
|
1198
|
+
persist_result=self.flow.persist_result
|
1199
|
+
if self.flow.persist_result is not None
|
1200
|
+
else should_persist_result(),
|
1201
|
+
)
|
1202
|
+
)
|
1203
|
+
stack.enter_context(ConcurrencyContextV1())
|
1204
|
+
stack.enter_context(ConcurrencyContext())
|
1205
|
+
|
1206
|
+
# set the logger to the flow run logger
|
1207
|
+
self.logger = flow_run_logger(flow_run=self.flow_run, flow=self.flow)
|
1208
|
+
|
1209
|
+
# update the flow run name if necessary
|
1210
|
+
if not self._flow_run_name_set and self.flow.flow_run_name:
|
1211
|
+
flow_run_name = resolve_custom_flow_run_name(
|
1212
|
+
flow=self.flow, parameters=self.parameters
|
1213
|
+
)
|
1214
|
+
await self.client.set_flow_run_name(
|
1215
|
+
flow_run_id=self.flow_run.id, name=flow_run_name
|
1216
|
+
)
|
1217
|
+
self.logger.extra["flow_run_name"] = flow_run_name
|
1218
|
+
self.logger.debug(
|
1219
|
+
f"Renamed flow run {self.flow_run.name!r} to {flow_run_name!r}"
|
1220
|
+
)
|
1221
|
+
self.flow_run.name = flow_run_name
|
1222
|
+
self._flow_run_name_set = True
|
1223
|
+
yield
|
1224
|
+
|
1225
|
+
@asynccontextmanager
|
1226
|
+
async def initialize_run(self):
|
1227
|
+
"""
|
1228
|
+
Enters a client context and creates a flow run if needed.
|
1229
|
+
"""
|
1230
|
+
async with AsyncClientContext.get_or_create() as client_ctx:
|
1231
|
+
self._client = client_ctx.client
|
1232
|
+
self._is_started = True
|
1233
|
+
|
1234
|
+
if not self.flow_run:
|
1235
|
+
self.flow_run = await self.create_flow_run(self.client)
|
1236
|
+
flow_run_url = url_for(self.flow_run)
|
1237
|
+
|
1238
|
+
if flow_run_url:
|
1239
|
+
self.logger.info(
|
1240
|
+
f"View at {flow_run_url}", extra={"send_to_api": False}
|
1241
|
+
)
|
1242
|
+
else:
|
1243
|
+
# Update the empirical policy to match the flow if it is not set
|
1244
|
+
if self.flow_run.empirical_policy.retry_delay is None:
|
1245
|
+
self.flow_run.empirical_policy.retry_delay = (
|
1246
|
+
self.flow.retry_delay_seconds
|
1247
|
+
)
|
1248
|
+
|
1249
|
+
if self.flow_run.empirical_policy.retries is None:
|
1250
|
+
self.flow_run.empirical_policy.retries = self.flow.retries
|
1251
|
+
|
1252
|
+
await self.client.update_flow_run(
|
1253
|
+
flow_run_id=self.flow_run.id,
|
1254
|
+
flow_version=self.flow.version,
|
1255
|
+
empirical_policy=self.flow_run.empirical_policy,
|
1256
|
+
)
|
1257
|
+
|
1258
|
+
span = self._tracer.start_span(
|
1259
|
+
name=self.flow_run.name,
|
1260
|
+
attributes={
|
1261
|
+
**self.flow_run.labels,
|
1262
|
+
"prefect.run.type": "flow",
|
1263
|
+
"prefect.run.id": str(self.flow_run.id),
|
1264
|
+
"prefect.tags": self.flow_run.tags,
|
1265
|
+
"prefect.flow.name": self.flow.name,
|
1266
|
+
},
|
1267
|
+
)
|
1268
|
+
self._update_otel_labels(span, self.client)
|
1269
|
+
self._span = span
|
1270
|
+
|
1271
|
+
try:
|
1272
|
+
yield self
|
1273
|
+
|
1274
|
+
except TerminationSignal as exc:
|
1275
|
+
self.cancel_all_tasks()
|
1276
|
+
await self.handle_crash(exc)
|
1277
|
+
raise
|
1278
|
+
except Exception:
|
1279
|
+
# regular exceptions are caught and re-raised to the user
|
1280
|
+
raise
|
1281
|
+
except (Abort, Pause):
|
1282
|
+
raise
|
1283
|
+
except GeneratorExit:
|
1284
|
+
# Do not capture generator exits as crashes
|
1285
|
+
raise
|
1286
|
+
except BaseException as exc:
|
1287
|
+
# BaseExceptions are caught and handled as crashes
|
1288
|
+
await self.handle_crash(exc)
|
1289
|
+
raise
|
1290
|
+
finally:
|
1291
|
+
# If debugging, use the more complete `repr` than the usual `str` description
|
1292
|
+
display_state = (
|
1293
|
+
repr(self.state) if PREFECT_DEBUG_MODE else str(self.state)
|
1294
|
+
)
|
1295
|
+
self.logger.log(
|
1296
|
+
level=logging.INFO if self.state.is_completed() else logging.ERROR,
|
1297
|
+
msg=f"Finished in state {display_state}",
|
1298
|
+
)
|
1299
|
+
|
1300
|
+
self._is_started = False
|
1301
|
+
self._client = None
|
1302
|
+
|
1303
|
+
# --------------------------
|
1304
|
+
#
|
1305
|
+
# The following methods compose the main task run loop
|
1306
|
+
#
|
1307
|
+
# --------------------------
|
1308
|
+
|
1309
|
+
@asynccontextmanager
|
1310
|
+
async def start(self) -> AsyncGenerator[None, None]:
|
1311
|
+
async with self.initialize_run():
|
1312
|
+
with trace.use_span(self._span) if self._span else nullcontext():
|
1313
|
+
await self.begin_run()
|
1314
|
+
|
1315
|
+
if self.state.is_running():
|
1316
|
+
await self.call_hooks()
|
1317
|
+
yield
|
1318
|
+
|
1319
|
+
@asynccontextmanager
|
1320
|
+
async def run_context(self):
|
1321
|
+
timeout_context = timeout_async if self.flow.isasync else timeout
|
1322
|
+
# reenter the run context to ensure it is up to date for every run
|
1323
|
+
async with self.setup_run_context():
|
1324
|
+
try:
|
1325
|
+
with timeout_context(
|
1326
|
+
seconds=self.flow.timeout_seconds,
|
1327
|
+
timeout_exc_type=FlowRunTimeoutError,
|
1328
|
+
):
|
1329
|
+
self.logger.debug(
|
1330
|
+
f"Executing flow {self.flow.name!r} for flow run {self.flow_run.name!r}..."
|
1331
|
+
)
|
1332
|
+
yield self
|
1333
|
+
except TimeoutError as exc:
|
1334
|
+
await self.handle_timeout(exc)
|
1335
|
+
except Exception as exc:
|
1336
|
+
self.logger.exception("Encountered exception during execution: %r", exc)
|
1337
|
+
await self.handle_exception(exc)
|
1338
|
+
finally:
|
1339
|
+
if self.state.is_final() or self.state.is_cancelling():
|
1340
|
+
await self.call_hooks()
|
1341
|
+
|
1342
|
+
async def call_flow_fn(self) -> Coroutine[Any, Any, R]:
|
1343
|
+
"""
|
1344
|
+
Convenience method to call the flow function. Returns a coroutine if the
|
1345
|
+
flow is async.
|
1346
|
+
"""
|
1347
|
+
assert self.flow.isasync, "Flow must be async to be run with AsyncFlowRunEngine"
|
1348
|
+
|
1349
|
+
result = await call_with_parameters(self.flow.fn, self.parameters)
|
1350
|
+
await self.handle_success(result)
|
1351
|
+
return result
|
1352
|
+
|
1353
|
+
|
1354
|
+
def run_flow_sync(
|
1355
|
+
flow: Flow[P, R],
|
1356
|
+
flow_run: Optional[FlowRun] = None,
|
1357
|
+
parameters: Optional[Dict[str, Any]] = None,
|
1358
|
+
wait_for: Optional[Iterable[PrefectFuture]] = None,
|
1359
|
+
return_type: Literal["state", "result"] = "result",
|
1360
|
+
) -> Union[R, State, None]:
|
1361
|
+
engine = FlowRunEngine[P, R](
|
1362
|
+
flow=flow,
|
1363
|
+
parameters=parameters,
|
1364
|
+
flow_run=flow_run,
|
1365
|
+
wait_for=wait_for,
|
1366
|
+
)
|
1367
|
+
|
1368
|
+
with engine.start():
|
1369
|
+
while engine.is_running():
|
1370
|
+
with engine.run_context():
|
1371
|
+
engine.call_flow_fn()
|
1372
|
+
|
1373
|
+
return engine.state if return_type == "state" else engine.result()
|
1374
|
+
|
1375
|
+
|
1376
|
+
async def run_flow_async(
|
1377
|
+
flow: Flow[P, R],
|
1378
|
+
flow_run: Optional[FlowRun] = None,
|
1379
|
+
parameters: Optional[Dict[str, Any]] = None,
|
1380
|
+
wait_for: Optional[Iterable[PrefectFuture]] = None,
|
1381
|
+
return_type: Literal["state", "result"] = "result",
|
1382
|
+
) -> Union[R, State, None]:
|
1383
|
+
engine = AsyncFlowRunEngine[P, R](
|
1384
|
+
flow=flow, parameters=parameters, flow_run=flow_run, wait_for=wait_for
|
1385
|
+
)
|
1386
|
+
|
1387
|
+
async with engine.start():
|
1388
|
+
while engine.is_running():
|
1389
|
+
async with engine.run_context():
|
1390
|
+
await engine.call_flow_fn()
|
1391
|
+
|
1392
|
+
return engine.state if return_type == "state" else await engine.result()
|
1393
|
+
|
1394
|
+
|
1395
|
+
def run_generator_flow_sync(
|
1396
|
+
flow: Flow[P, R],
|
1397
|
+
flow_run: Optional[FlowRun] = None,
|
1398
|
+
parameters: Optional[Dict[str, Any]] = None,
|
1399
|
+
wait_for: Optional[Iterable[PrefectFuture]] = None,
|
1400
|
+
return_type: Literal["state", "result"] = "result",
|
1401
|
+
) -> Generator[R, None, None]:
|
1402
|
+
if return_type != "result":
|
1403
|
+
raise ValueError("The return_type for a generator flow must be 'result'")
|
1404
|
+
|
1405
|
+
engine = FlowRunEngine[P, R](
|
1406
|
+
flow=flow, parameters=parameters, flow_run=flow_run, wait_for=wait_for
|
1407
|
+
)
|
1408
|
+
|
1409
|
+
with engine.start():
|
1410
|
+
while engine.is_running():
|
1411
|
+
with engine.run_context():
|
1412
|
+
call_args, call_kwargs = parameters_to_args_kwargs(
|
1413
|
+
flow.fn, engine.parameters or {}
|
1414
|
+
)
|
1415
|
+
gen = flow.fn(*call_args, **call_kwargs)
|
1416
|
+
try:
|
1417
|
+
while True:
|
1418
|
+
gen_result = next(gen)
|
1419
|
+
# link the current state to the result for dependency tracking
|
1420
|
+
link_state_to_result(engine.state, gen_result)
|
1421
|
+
yield gen_result
|
1422
|
+
except StopIteration as exc:
|
1423
|
+
engine.handle_success(exc.value)
|
1424
|
+
except GeneratorExit as exc:
|
1425
|
+
engine.handle_success(None)
|
1426
|
+
gen.throw(exc)
|
1427
|
+
|
1428
|
+
return engine.result()
|
1429
|
+
|
1430
|
+
|
1431
|
+
async def run_generator_flow_async(
|
1432
|
+
flow: Flow[P, R],
|
1433
|
+
flow_run: Optional[FlowRun] = None,
|
1434
|
+
parameters: Optional[Dict[str, Any]] = None,
|
1435
|
+
wait_for: Optional[Iterable[PrefectFuture[R]]] = None,
|
1436
|
+
return_type: Literal["state", "result"] = "result",
|
1437
|
+
) -> AsyncGenerator[R, None]:
|
1438
|
+
if return_type != "result":
|
1439
|
+
raise ValueError("The return_type for a generator flow must be 'result'")
|
1440
|
+
|
1441
|
+
engine = AsyncFlowRunEngine[P, R](
|
1442
|
+
flow=flow, parameters=parameters, flow_run=flow_run, wait_for=wait_for
|
1443
|
+
)
|
1444
|
+
|
1445
|
+
async with engine.start():
|
1446
|
+
while engine.is_running():
|
1447
|
+
async with engine.run_context():
|
1448
|
+
call_args, call_kwargs = parameters_to_args_kwargs(
|
1449
|
+
flow.fn, engine.parameters or {}
|
1450
|
+
)
|
1451
|
+
gen = flow.fn(*call_args, **call_kwargs)
|
1452
|
+
try:
|
1453
|
+
while True:
|
837
1454
|
# can't use anext in Python < 3.10
|
838
1455
|
gen_result = await gen.__anext__()
|
839
1456
|
# link the current state to the result for dependency tracking
|
840
1457
|
link_state_to_result(engine.state, gen_result)
|
841
1458
|
yield gen_result
|
842
1459
|
except (StopAsyncIteration, GeneratorExit) as exc:
|
843
|
-
engine.handle_success(None)
|
1460
|
+
await engine.handle_success(None)
|
844
1461
|
if isinstance(exc, GeneratorExit):
|
845
1462
|
gen.throw(exc)
|
846
1463
|
|
847
1464
|
# async generators can't return, but we can raise failures here
|
848
1465
|
if engine.state.is_failed():
|
849
|
-
engine.result()
|
1466
|
+
await engine.result()
|
850
1467
|
|
851
1468
|
|
852
1469
|
def run_flow(
|
853
1470
|
flow: Flow[P, R],
|
854
1471
|
flow_run: Optional[FlowRun] = None,
|
855
1472
|
parameters: Optional[Dict[str, Any]] = None,
|
856
|
-
wait_for: Optional[Iterable[PrefectFuture]] = None,
|
1473
|
+
wait_for: Optional[Iterable[PrefectFuture[R]]] = None,
|
857
1474
|
return_type: Literal["state", "result"] = "result",
|
858
1475
|
) -> Union[R, State, None]:
|
859
1476
|
kwargs = dict(
|