prefect-client 3.4.0__py3-none-any.whl → 3.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/_build_info.py +3 -3
- prefect/_experimental/bundles/__init__.py +1 -1
- prefect/_internal/schemas/bases.py +11 -1
- prefect/_internal/schemas/validators.py +0 -98
- prefect/_internal/uuid7.py +11 -0
- prefect/client/orchestration/__init__.py +16 -8
- prefect/client/schemas/actions.py +13 -35
- prefect/client/schemas/objects.py +26 -22
- prefect/client/subscriptions.py +18 -9
- prefect/events/clients.py +6 -6
- prefect/events/filters.py +25 -11
- prefect/events/schemas/automations.py +3 -1
- prefect/events/schemas/events.py +3 -2
- prefect/flows.py +79 -28
- prefect/runner/_observers.py +60 -0
- prefect/runner/runner.py +71 -213
- prefect/server/api/workers.py +3 -2
- prefect/task_runners.py +2 -1
- prefect/tasks.py +3 -2
- prefect/types/__init__.py +24 -36
- prefect/types/names.py +139 -0
- prefect/utilities/dockerutils.py +18 -8
- prefect/utilities/importtools.py +12 -4
- prefect/workers/base.py +32 -10
- {prefect_client-3.4.0.dist-info → prefect_client-3.4.1.dist-info}/METADATA +2 -1
- {prefect_client-3.4.0.dist-info → prefect_client-3.4.1.dist-info}/RECORD +28 -25
- {prefect_client-3.4.0.dist-info → prefect_client-3.4.1.dist-info}/WHEEL +0 -0
- {prefect_client-3.4.0.dist-info → prefect_client-3.4.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,3 +1,5 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
1
3
|
import abc
|
2
4
|
import textwrap
|
3
5
|
from datetime import timedelta
|
@@ -103,7 +105,7 @@ class ResourceTrigger(Trigger, abc.ABC):
|
|
103
105
|
default_factory=lambda: ResourceSpecification.model_validate({}),
|
104
106
|
description="Labels for resources which this trigger will match.",
|
105
107
|
)
|
106
|
-
match_related: ResourceSpecification = Field(
|
108
|
+
match_related: Union[ResourceSpecification, list[ResourceSpecification]] = Field(
|
107
109
|
default_factory=lambda: ResourceSpecification.model_validate({}),
|
108
110
|
description="Labels for related resources which this trigger will match.",
|
109
111
|
)
|
prefect/events/schemas/events.py
CHANGED
@@ -13,7 +13,7 @@ from typing import (
|
|
13
13
|
Tuple,
|
14
14
|
Union,
|
15
15
|
)
|
16
|
-
from uuid import UUID
|
16
|
+
from uuid import UUID
|
17
17
|
|
18
18
|
from pydantic import (
|
19
19
|
AfterValidator,
|
@@ -26,6 +26,7 @@ from typing_extensions import Annotated, Self
|
|
26
26
|
|
27
27
|
import prefect.types._datetime
|
28
28
|
from prefect._internal.schemas.bases import PrefectBaseModel
|
29
|
+
from prefect._internal.uuid7 import uuid7
|
29
30
|
from prefect.logging import get_logger
|
30
31
|
from prefect.settings import (
|
31
32
|
PREFECT_EVENTS_MAXIMUM_LABELS_PER_RESOURCE,
|
@@ -135,7 +136,7 @@ class Event(PrefectBaseModel):
|
|
135
136
|
description="An open-ended set of data describing what happened",
|
136
137
|
)
|
137
138
|
id: UUID = Field(
|
138
|
-
default_factory=
|
139
|
+
default_factory=uuid7,
|
139
140
|
description="The client-provided identifier of this event",
|
140
141
|
)
|
141
142
|
follows: Optional[UUID] = Field(
|
prefect/flows.py
CHANGED
@@ -27,6 +27,7 @@ from typing import (
|
|
27
27
|
Coroutine,
|
28
28
|
Generic,
|
29
29
|
Iterable,
|
30
|
+
List,
|
30
31
|
NoReturn,
|
31
32
|
Optional,
|
32
33
|
Protocol,
|
@@ -2309,8 +2310,9 @@ def load_flow_from_entrypoint(
|
|
2309
2310
|
Extract a flow object from a script at an entrypoint by running all of the code in the file.
|
2310
2311
|
|
2311
2312
|
Args:
|
2312
|
-
entrypoint: a string in the format `<path_to_script>:<flow_func_name>`
|
2313
|
-
|
2313
|
+
entrypoint: a string in the format `<path_to_script>:<flow_func_name>`
|
2314
|
+
or a string in the format `<path_to_script>:<class_name>.<flow_method_name>`
|
2315
|
+
or a module path to a flow function
|
2314
2316
|
use_placeholder_flow: if True, use a placeholder Flow object if the actual flow object
|
2315
2317
|
cannot be loaded from the entrypoint (e.g. dependencies are missing)
|
2316
2318
|
|
@@ -2700,26 +2702,55 @@ def load_placeholder_flow(entrypoint: str, raises: Exception) -> Flow[P, Any]:
|
|
2700
2702
|
|
2701
2703
|
def safe_load_flow_from_entrypoint(entrypoint: str) -> Optional[Flow[P, Any]]:
|
2702
2704
|
"""
|
2703
|
-
|
2705
|
+
Safely load a Prefect flow from an entrypoint string. Returns None if loading fails.
|
2704
2706
|
|
2705
2707
|
Args:
|
2706
|
-
entrypoint:
|
2707
|
-
|
2708
|
+
entrypoint (str): A string identifying the flow to load. Can be in one of the following formats:
|
2709
|
+
- `<path_to_script>:<flow_func_name>`
|
2710
|
+
- `<path_to_script>:<class_name>.<flow_method_name>`
|
2711
|
+
- `<module_path>.<flow_func_name>`
|
2712
|
+
|
2713
|
+
Returns:
|
2714
|
+
Optional[Flow]: The loaded Prefect flow object, or None if loading fails due to errors
|
2715
|
+
(e.g. unresolved dependencies, syntax errors, or missing objects).
|
2708
2716
|
"""
|
2709
|
-
|
2710
|
-
|
2711
|
-
if ":" in entrypoint
|
2712
|
-
path = entrypoint.rsplit(":")[0]
|
2717
|
+
func_or_cls_def, source_code, parts = _entrypoint_definition_and_source(entrypoint)
|
2718
|
+
|
2719
|
+
path = entrypoint.rsplit(":", maxsplit=1)[0] if ":" in entrypoint else None
|
2713
2720
|
namespace = safe_load_namespace(source_code, filepath=path)
|
2714
|
-
if func_def.name in namespace:
|
2715
|
-
return namespace[func_def.name]
|
2716
|
-
else:
|
2717
|
-
# If the function is not in the namespace, if may be due to missing dependencies
|
2718
|
-
# for the function. We will attempt to compile each annotation and default value
|
2719
|
-
# and remove them from the function definition to see if the function can be
|
2720
|
-
# compiled without them.
|
2721
2721
|
|
2722
|
-
|
2722
|
+
if parts[0] not in namespace:
|
2723
|
+
# If the object is not in the namespace, it may be due to missing dependencies
|
2724
|
+
# in annotations or default values. We will attempt to sanitize them by removing
|
2725
|
+
# anything that cannot be compiled, and then recompile the function or class.
|
2726
|
+
if isinstance(func_or_cls_def, (ast.FunctionDef, ast.AsyncFunctionDef)):
|
2727
|
+
return _sanitize_and_load_flow(func_or_cls_def, namespace)
|
2728
|
+
elif (
|
2729
|
+
isinstance(func_or_cls_def, ast.ClassDef)
|
2730
|
+
and len(parts) >= 2
|
2731
|
+
and func_or_cls_def.name == parts[0]
|
2732
|
+
):
|
2733
|
+
method_name = parts[1]
|
2734
|
+
method_def = next(
|
2735
|
+
(
|
2736
|
+
stmt
|
2737
|
+
for stmt in func_or_cls_def.body
|
2738
|
+
if isinstance(stmt, (ast.FunctionDef, ast.AsyncFunctionDef))
|
2739
|
+
and stmt.name == method_name
|
2740
|
+
),
|
2741
|
+
None,
|
2742
|
+
)
|
2743
|
+
if method_def is not None:
|
2744
|
+
return _sanitize_and_load_flow(method_def, namespace)
|
2745
|
+
else:
|
2746
|
+
return None
|
2747
|
+
|
2748
|
+
obj = namespace.get(parts[0])
|
2749
|
+
for part in parts[1:]:
|
2750
|
+
obj = getattr(obj, part, None)
|
2751
|
+
if obj is None:
|
2752
|
+
return None
|
2753
|
+
return obj
|
2723
2754
|
|
2724
2755
|
|
2725
2756
|
def _sanitize_and_load_flow(
|
@@ -2853,7 +2884,7 @@ def load_flow_arguments_from_entrypoint(
|
|
2853
2884
|
or a module path to a flow function
|
2854
2885
|
"""
|
2855
2886
|
|
2856
|
-
func_def, source_code = _entrypoint_definition_and_source(entrypoint)
|
2887
|
+
func_def, source_code, _ = _entrypoint_definition_and_source(entrypoint)
|
2857
2888
|
path = None
|
2858
2889
|
if ":" in entrypoint:
|
2859
2890
|
path = entrypoint.rsplit(":")[0]
|
@@ -2930,26 +2961,45 @@ def is_entrypoint_async(entrypoint: str) -> bool:
|
|
2930
2961
|
Returns:
|
2931
2962
|
True if the function is asynchronous, False otherwise.
|
2932
2963
|
"""
|
2933
|
-
func_def, _ = _entrypoint_definition_and_source(entrypoint)
|
2964
|
+
func_def, _, _ = _entrypoint_definition_and_source(entrypoint)
|
2934
2965
|
return isinstance(func_def, ast.AsyncFunctionDef)
|
2935
2966
|
|
2936
2967
|
|
2937
2968
|
def _entrypoint_definition_and_source(
|
2938
2969
|
entrypoint: str,
|
2939
|
-
) -> Tuple[Union[ast.FunctionDef, ast.AsyncFunctionDef], str]:
|
2970
|
+
) -> Tuple[Union[ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef], str, List[str]]:
|
2971
|
+
"""
|
2972
|
+
Resolves and parses the source definition of a given entrypoint.
|
2973
|
+
|
2974
|
+
The entrypoint can be provided in one of the following formats:
|
2975
|
+
- '<path_to_script>:<flow_func_name>'
|
2976
|
+
- '<path_to_script>:<class_name>.<flow_method_name>'
|
2977
|
+
- '<module_path.to.flow_function>'
|
2978
|
+
|
2979
|
+
Returns:
|
2980
|
+
A tuple containing:
|
2981
|
+
- The AST node (FunctionDef, AsyncFunctionDef, or ClassDef) of the base object.
|
2982
|
+
- The full source code of the file or module as a string.
|
2983
|
+
- A list of attribute access parts from the object path (e.g., ['MyFlowClass', 'run']).
|
2984
|
+
|
2985
|
+
Raises:
|
2986
|
+
ValueError: If the module or target object cannot be found.
|
2987
|
+
"""
|
2940
2988
|
if ":" in entrypoint:
|
2941
|
-
|
2942
|
-
path, func_name = entrypoint.rsplit(":", maxsplit=1)
|
2989
|
+
path, object_path = entrypoint.rsplit(":", maxsplit=1)
|
2943
2990
|
source_code = Path(path).read_text()
|
2944
2991
|
else:
|
2945
|
-
path,
|
2992
|
+
path, object_path = entrypoint.rsplit(".", maxsplit=1)
|
2946
2993
|
spec = importlib.util.find_spec(path)
|
2947
2994
|
if not spec or not spec.origin:
|
2948
2995
|
raise ValueError(f"Could not find module {path!r}")
|
2949
2996
|
source_code = Path(spec.origin).read_text()
|
2950
2997
|
|
2951
2998
|
parsed_code = ast.parse(source_code)
|
2952
|
-
|
2999
|
+
parts = object_path.split(".")
|
3000
|
+
base_name = parts[0]
|
3001
|
+
|
3002
|
+
base_def = next(
|
2953
3003
|
(
|
2954
3004
|
node
|
2955
3005
|
for node in ast.walk(parsed_code)
|
@@ -2958,14 +3008,15 @@ def _entrypoint_definition_and_source(
|
|
2958
3008
|
(
|
2959
3009
|
ast.FunctionDef,
|
2960
3010
|
ast.AsyncFunctionDef,
|
3011
|
+
ast.ClassDef, # flow can be staticmethod/classmethod
|
2961
3012
|
),
|
2962
3013
|
)
|
2963
|
-
and node.name ==
|
3014
|
+
and node.name == base_name
|
2964
3015
|
),
|
2965
3016
|
None,
|
2966
3017
|
)
|
2967
3018
|
|
2968
|
-
if not
|
2969
|
-
raise ValueError(f"Could not find
|
3019
|
+
if not base_def:
|
3020
|
+
raise ValueError(f"Could not find object {base_name!r} in {path!r}")
|
2970
3021
|
|
2971
|
-
return
|
3022
|
+
return base_def, source_code, parts
|
@@ -0,0 +1,60 @@
|
|
1
|
+
import asyncio
|
2
|
+
import uuid
|
3
|
+
from contextlib import AsyncExitStack
|
4
|
+
from typing import Any, Protocol
|
5
|
+
|
6
|
+
from prefect.events.clients import PrefectEventSubscriber, get_events_subscriber
|
7
|
+
from prefect.events.filters import EventFilter, EventNameFilter
|
8
|
+
from prefect.logging.loggers import get_logger
|
9
|
+
|
10
|
+
|
11
|
+
class OnCancellingCallback(Protocol):
|
12
|
+
def __call__(self, flow_run_id: uuid.UUID) -> None: ...
|
13
|
+
|
14
|
+
|
15
|
+
class FlowRunCancellingObserver:
|
16
|
+
def __init__(self, on_cancelling: OnCancellingCallback):
|
17
|
+
self.logger = get_logger("FlowRunCancellingObserver")
|
18
|
+
self.on_cancelling = on_cancelling
|
19
|
+
self._events_subscriber: PrefectEventSubscriber | None
|
20
|
+
self._exit_stack = AsyncExitStack()
|
21
|
+
|
22
|
+
async def _consume_events(self):
|
23
|
+
if self._events_subscriber is None:
|
24
|
+
raise RuntimeError(
|
25
|
+
"Events subscriber not initialized. Please use `async with` to initialize the observer."
|
26
|
+
)
|
27
|
+
async for event in self._events_subscriber:
|
28
|
+
try:
|
29
|
+
flow_run_id = uuid.UUID(
|
30
|
+
event.resource["prefect.resource.id"].replace(
|
31
|
+
"prefect.flow-run.", ""
|
32
|
+
)
|
33
|
+
)
|
34
|
+
self.on_cancelling(flow_run_id)
|
35
|
+
except ValueError:
|
36
|
+
self.logger.debug(
|
37
|
+
"Received event with invalid flow run ID: %s",
|
38
|
+
event.resource["prefect.resource.id"],
|
39
|
+
)
|
40
|
+
|
41
|
+
async def __aenter__(self):
|
42
|
+
self._events_subscriber = await self._exit_stack.enter_async_context(
|
43
|
+
get_events_subscriber(
|
44
|
+
filter=EventFilter(
|
45
|
+
event=EventNameFilter(name=["prefect.flow-run.Cancelling"])
|
46
|
+
)
|
47
|
+
)
|
48
|
+
)
|
49
|
+
self._consumer_task = asyncio.create_task(self._consume_events())
|
50
|
+
return self
|
51
|
+
|
52
|
+
async def __aexit__(self, *exc_info: Any):
|
53
|
+
await self._exit_stack.__aexit__(*exc_info)
|
54
|
+
self._consumer_task.cancel()
|
55
|
+
try:
|
56
|
+
await self._consumer_task
|
57
|
+
except asyncio.CancelledError:
|
58
|
+
pass
|
59
|
+
except Exception:
|
60
|
+
self.logger.exception("Error consuming events")
|
prefect/runner/runner.py
CHANGED
@@ -46,6 +46,8 @@ import subprocess
|
|
46
46
|
import sys
|
47
47
|
import tempfile
|
48
48
|
import threading
|
49
|
+
import uuid
|
50
|
+
from contextlib import AsyncExitStack
|
49
51
|
from copy import deepcopy
|
50
52
|
from functools import partial
|
51
53
|
from pathlib import Path
|
@@ -80,13 +82,6 @@ from prefect._internal.concurrency.api import (
|
|
80
82
|
from_sync,
|
81
83
|
)
|
82
84
|
from prefect.client.orchestration import PrefectClient, get_client
|
83
|
-
from prefect.client.schemas.filters import (
|
84
|
-
FlowRunFilter,
|
85
|
-
FlowRunFilterId,
|
86
|
-
FlowRunFilterState,
|
87
|
-
FlowRunFilterStateName,
|
88
|
-
FlowRunFilterStateType,
|
89
|
-
)
|
90
85
|
from prefect.client.schemas.objects import (
|
91
86
|
ConcurrencyLimitConfig,
|
92
87
|
State,
|
@@ -94,12 +89,13 @@ from prefect.client.schemas.objects import (
|
|
94
89
|
)
|
95
90
|
from prefect.client.schemas.objects import Flow as APIFlow
|
96
91
|
from prefect.events import DeploymentTriggerTypes, TriggerTypes
|
92
|
+
from prefect.events.clients import EventsClient, get_events_client
|
97
93
|
from prefect.events.related import tags_as_related_resources
|
98
|
-
from prefect.events.schemas.events import RelatedResource
|
99
|
-
from prefect.events.utilities import emit_event
|
94
|
+
from prefect.events.schemas.events import Event, RelatedResource, Resource
|
100
95
|
from prefect.exceptions import Abort, ObjectNotFound
|
101
96
|
from prefect.flows import Flow, FlowStateHook, load_flow_from_flow_run
|
102
97
|
from prefect.logging.loggers import PrefectLogAdapter, flow_run_logger, get_logger
|
98
|
+
from prefect.runner._observers import FlowRunCancellingObserver
|
103
99
|
from prefect.runner.storage import RunnerStorage
|
104
100
|
from prefect.schedules import Schedule
|
105
101
|
from prefect.settings import (
|
@@ -228,7 +224,9 @@ class Runner:
|
|
228
224
|
if self.heartbeat_seconds is not None and self.heartbeat_seconds < 30:
|
229
225
|
raise ValueError("Heartbeat must be 30 seconds or greater.")
|
230
226
|
self._heartbeat_task: asyncio.Task[None] | None = None
|
227
|
+
self._events_client: EventsClient = get_events_client(checkpoint_every=1)
|
231
228
|
|
229
|
+
self._exit_stack = AsyncExitStack()
|
232
230
|
self._limiter: anyio.CapacityLimiter | None = None
|
233
231
|
self._client: PrefectClient = get_client()
|
234
232
|
self._submitting_flow_run_ids: set[UUID] = set()
|
@@ -501,15 +499,6 @@ class Runner:
|
|
501
499
|
jitter_range=0.3,
|
502
500
|
)
|
503
501
|
)
|
504
|
-
loops_task_group.start_soon(
|
505
|
-
partial(
|
506
|
-
critical_service_loop,
|
507
|
-
workload=runner._check_for_cancelled_flow_runs,
|
508
|
-
interval=self.query_seconds * 2,
|
509
|
-
run_once=run_once,
|
510
|
-
jitter_range=0.3,
|
511
|
-
)
|
512
|
-
)
|
513
502
|
|
514
503
|
def execute_in_background(
|
515
504
|
self, func: Callable[..., Any], *args: Any, **kwargs: Any
|
@@ -583,58 +572,42 @@ class Runner:
|
|
583
572
|
if not self._acquire_limit_slot(flow_run_id):
|
584
573
|
return
|
585
574
|
|
586
|
-
|
587
|
-
|
588
|
-
self._submitting_flow_run_ids.add(flow_run_id)
|
589
|
-
flow_run = await self._client.read_flow_run(flow_run_id)
|
590
|
-
|
591
|
-
process: (
|
592
|
-
anyio.abc.Process | Exception
|
593
|
-
) = await self._runs_task_group.start(
|
594
|
-
partial(
|
595
|
-
self._submit_run_and_capture_errors,
|
596
|
-
flow_run=flow_run,
|
597
|
-
entrypoint=entrypoint,
|
598
|
-
command=command,
|
599
|
-
cwd=cwd,
|
600
|
-
env=env,
|
601
|
-
stream_output=stream_output,
|
602
|
-
),
|
603
|
-
)
|
604
|
-
if isinstance(process, Exception):
|
605
|
-
return
|
575
|
+
self._submitting_flow_run_ids.add(flow_run_id)
|
576
|
+
flow_run = await self._client.read_flow_run(flow_run_id)
|
606
577
|
|
607
|
-
|
578
|
+
process: anyio.abc.Process | Exception = await self._runs_task_group.start(
|
579
|
+
partial(
|
580
|
+
self._submit_run_and_capture_errors,
|
581
|
+
flow_run=flow_run,
|
582
|
+
entrypoint=entrypoint,
|
583
|
+
command=command,
|
584
|
+
cwd=cwd,
|
585
|
+
env=env,
|
586
|
+
stream_output=stream_output,
|
587
|
+
),
|
588
|
+
)
|
589
|
+
if isinstance(process, Exception):
|
590
|
+
return
|
608
591
|
|
609
|
-
|
610
|
-
await self._emit_flow_run_heartbeat(flow_run)
|
592
|
+
task_status.started(process.pid)
|
611
593
|
|
612
|
-
|
613
|
-
|
614
|
-
if process.returncode is None:
|
615
|
-
self._flow_run_process_map[flow_run.id] = ProcessMapEntry(
|
616
|
-
pid=process.pid, flow_run=flow_run
|
617
|
-
)
|
594
|
+
if self.heartbeat_seconds is not None:
|
595
|
+
await self._emit_flow_run_heartbeat(flow_run)
|
618
596
|
|
619
|
-
|
620
|
-
|
621
|
-
|
622
|
-
|
623
|
-
|
624
|
-
should_stop=lambda: not self._flow_run_process_map,
|
625
|
-
on_stop=tg.cancel_scope.cancel,
|
597
|
+
async with self._flow_run_process_map_lock:
|
598
|
+
# Only add the process to the map if it is still running
|
599
|
+
if process.returncode is None:
|
600
|
+
self._flow_run_process_map[flow_run.id] = ProcessMapEntry(
|
601
|
+
pid=process.pid, flow_run=flow_run
|
626
602
|
)
|
627
603
|
|
628
|
-
|
629
|
-
|
630
|
-
|
631
|
-
|
632
|
-
|
633
|
-
jitter_range=0.3,
|
634
|
-
)
|
635
|
-
)
|
604
|
+
while True:
|
605
|
+
# Wait until flow run execution is complete and the process has been removed from the map
|
606
|
+
await anyio.sleep(0.1)
|
607
|
+
if self._flow_run_process_map.get(flow_run.id) is None:
|
608
|
+
break
|
636
609
|
|
637
|
-
|
610
|
+
return process
|
638
611
|
|
639
612
|
async def execute_bundle(
|
640
613
|
self,
|
@@ -673,24 +646,8 @@ class Runner:
|
|
673
646
|
)
|
674
647
|
self._flow_run_bundle_map[flow_run.id] = bundle
|
675
648
|
|
676
|
-
tasks: list[asyncio.Task[None]] = []
|
677
|
-
tasks.append(
|
678
|
-
asyncio.create_task(
|
679
|
-
critical_service_loop(
|
680
|
-
workload=self._check_for_cancelled_flow_runs,
|
681
|
-
interval=self.query_seconds,
|
682
|
-
jitter_range=0.3,
|
683
|
-
)
|
684
|
-
)
|
685
|
-
)
|
686
|
-
|
687
649
|
await anyio.to_thread.run_sync(process.join)
|
688
650
|
|
689
|
-
for task in tasks:
|
690
|
-
task.cancel()
|
691
|
-
|
692
|
-
await asyncio.gather(*tasks, return_exceptions=True)
|
693
|
-
|
694
651
|
self._flow_run_process_map.pop(flow_run.id)
|
695
652
|
|
696
653
|
flow_run_logger = self._get_flow_run_logger(flow_run)
|
@@ -1000,83 +957,11 @@ class Runner:
|
|
1000
957
|
self.last_polled: datetime.datetime = now("UTC")
|
1001
958
|
return await self._submit_scheduled_flow_runs(flow_run_response=runs_response)
|
1002
959
|
|
1003
|
-
async def
|
1004
|
-
self,
|
1005
|
-
should_stop: Callable[[], bool] = lambda: False,
|
1006
|
-
on_stop: Callable[[], None] = lambda: None,
|
960
|
+
async def _cancel_run(
|
961
|
+
self, flow_run: "FlowRun | uuid.UUID", state_msg: Optional[str] = None
|
1007
962
|
):
|
1008
|
-
|
1009
|
-
|
1010
|
-
cancel them.
|
1011
|
-
|
1012
|
-
Args:
|
1013
|
-
should_stop: A callable that returns a boolean indicating whether or not
|
1014
|
-
the runner should stop checking for cancelled flow runs.
|
1015
|
-
on_stop: A callable that is called when the runner should stop checking
|
1016
|
-
for cancelled flow runs.
|
1017
|
-
"""
|
1018
|
-
if self.stopping:
|
1019
|
-
return
|
1020
|
-
if not self.started:
|
1021
|
-
raise RuntimeError(
|
1022
|
-
"Runner is not set up. Please make sure you are running this runner "
|
1023
|
-
"as an async context manager."
|
1024
|
-
)
|
1025
|
-
|
1026
|
-
if should_stop():
|
1027
|
-
self._logger.debug(
|
1028
|
-
"Runner has no active flow runs or deployments. Sending message to loop"
|
1029
|
-
" service that no further cancellation checks are needed."
|
1030
|
-
)
|
1031
|
-
on_stop()
|
1032
|
-
|
1033
|
-
self._logger.debug("Checking for cancelled flow runs...")
|
1034
|
-
|
1035
|
-
named_cancelling_flow_runs = await self._client.read_flow_runs(
|
1036
|
-
flow_run_filter=FlowRunFilter(
|
1037
|
-
state=FlowRunFilterState(
|
1038
|
-
type=FlowRunFilterStateType(any_=[StateType.CANCELLED]),
|
1039
|
-
name=FlowRunFilterStateName(any_=["Cancelling"]),
|
1040
|
-
),
|
1041
|
-
# Avoid duplicate cancellation calls
|
1042
|
-
id=FlowRunFilterId(
|
1043
|
-
any_=list(
|
1044
|
-
self._flow_run_process_map.keys()
|
1045
|
-
- self._cancelling_flow_run_ids
|
1046
|
-
)
|
1047
|
-
),
|
1048
|
-
),
|
1049
|
-
)
|
1050
|
-
|
1051
|
-
typed_cancelling_flow_runs = await self._client.read_flow_runs(
|
1052
|
-
flow_run_filter=FlowRunFilter(
|
1053
|
-
state=FlowRunFilterState(
|
1054
|
-
type=FlowRunFilterStateType(any_=[StateType.CANCELLING]),
|
1055
|
-
),
|
1056
|
-
# Avoid duplicate cancellation calls
|
1057
|
-
id=FlowRunFilterId(
|
1058
|
-
any_=list(
|
1059
|
-
self._flow_run_process_map.keys()
|
1060
|
-
- self._cancelling_flow_run_ids
|
1061
|
-
)
|
1062
|
-
),
|
1063
|
-
),
|
1064
|
-
)
|
1065
|
-
|
1066
|
-
cancelling_flow_runs = named_cancelling_flow_runs + typed_cancelling_flow_runs
|
1067
|
-
|
1068
|
-
if cancelling_flow_runs:
|
1069
|
-
self._logger.info(
|
1070
|
-
f"Found {len(cancelling_flow_runs)} flow runs awaiting cancellation."
|
1071
|
-
)
|
1072
|
-
|
1073
|
-
for flow_run in cancelling_flow_runs:
|
1074
|
-
self._cancelling_flow_run_ids.add(flow_run.id)
|
1075
|
-
self._runs_task_group.start_soon(self._cancel_run, flow_run)
|
1076
|
-
|
1077
|
-
return cancelling_flow_runs
|
1078
|
-
|
1079
|
-
async def _cancel_run(self, flow_run: "FlowRun", state_msg: Optional[str] = None):
|
963
|
+
if isinstance(flow_run, uuid.UUID):
|
964
|
+
flow_run = await self._client.read_flow_run(flow_run)
|
1080
965
|
run_logger = self._get_flow_run_logger(flow_run)
|
1081
966
|
|
1082
967
|
process_map_entry = self._flow_run_process_map.get(flow_run.id)
|
@@ -1121,7 +1006,7 @@ class Runner:
|
|
1121
1006
|
)
|
1122
1007
|
|
1123
1008
|
flow, deployment = await self._get_flow_and_deployment(flow_run)
|
1124
|
-
self._emit_flow_run_cancelled_event(
|
1009
|
+
await self._emit_flow_run_cancelled_event(
|
1125
1010
|
flow_run=flow_run, flow=flow, deployment=deployment
|
1126
1011
|
)
|
1127
1012
|
run_logger.info(f"Cancelled flow run '{flow_run.name}'!")
|
@@ -1180,14 +1065,18 @@ class Runner:
|
|
1180
1065
|
related = [RelatedResource.model_validate(r) for r in related]
|
1181
1066
|
related += tags_as_related_resources(set(tags))
|
1182
1067
|
|
1183
|
-
|
1184
|
-
|
1185
|
-
|
1186
|
-
|
1187
|
-
|
1188
|
-
|
1189
|
-
|
1190
|
-
|
1068
|
+
await self._events_client.emit(
|
1069
|
+
Event(
|
1070
|
+
event="prefect.flow-run.heartbeat",
|
1071
|
+
resource=Resource(
|
1072
|
+
{
|
1073
|
+
"prefect.resource.id": f"prefect.flow-run.{flow_run.id}",
|
1074
|
+
"prefect.resource.name": flow_run.name,
|
1075
|
+
"prefect.version": __version__,
|
1076
|
+
}
|
1077
|
+
),
|
1078
|
+
related=related,
|
1079
|
+
)
|
1191
1080
|
)
|
1192
1081
|
|
1193
1082
|
def _event_resource(self):
|
@@ -1199,7 +1088,7 @@ class Runner:
|
|
1199
1088
|
"prefect.version": __version__,
|
1200
1089
|
}
|
1201
1090
|
|
1202
|
-
def _emit_flow_run_cancelled_event(
|
1091
|
+
async def _emit_flow_run_cancelled_event(
|
1203
1092
|
self,
|
1204
1093
|
flow_run: "FlowRun",
|
1205
1094
|
flow: "Optional[APIFlow]",
|
@@ -1234,10 +1123,12 @@ class Runner:
|
|
1234
1123
|
related = [RelatedResource.model_validate(r) for r in related]
|
1235
1124
|
related += tags_as_related_resources(set(tags))
|
1236
1125
|
|
1237
|
-
|
1238
|
-
|
1239
|
-
|
1240
|
-
|
1126
|
+
await self._events_client.emit(
|
1127
|
+
Event(
|
1128
|
+
event="prefect.runner.cancelled-flow-run",
|
1129
|
+
resource=Resource(self._event_resource()),
|
1130
|
+
related=related,
|
1131
|
+
)
|
1241
1132
|
)
|
1242
1133
|
self._logger.debug(f"Emitted flow run heartbeat event for {flow_run.id}")
|
1243
1134
|
|
@@ -1543,43 +1434,6 @@ class Runner:
|
|
1543
1434
|
|
1544
1435
|
await self._client.set_flow_run_state(flow_run.id, state, force=True)
|
1545
1436
|
|
1546
|
-
# Do not remove the flow run from the cancelling set immediately because
|
1547
|
-
# the API caches responses for the `read_flow_runs` and we do not want to
|
1548
|
-
# duplicate cancellations.
|
1549
|
-
await self._schedule_task(
|
1550
|
-
60 * 10, self._cancelling_flow_run_ids.remove, flow_run.id
|
1551
|
-
)
|
1552
|
-
|
1553
|
-
async def _schedule_task(
|
1554
|
-
self, __in_seconds: int, fn: Callable[..., Any], *args: Any, **kwargs: Any
|
1555
|
-
) -> None:
|
1556
|
-
"""
|
1557
|
-
Schedule a background task to start after some time.
|
1558
|
-
|
1559
|
-
These tasks will be run immediately when the runner exits instead of waiting.
|
1560
|
-
|
1561
|
-
The function may be async or sync. Async functions will be awaited.
|
1562
|
-
"""
|
1563
|
-
|
1564
|
-
async def wrapper(task_status: anyio.abc.TaskStatus[None]) -> None:
|
1565
|
-
# If we are shutting down, do not sleep; otherwise sleep until the scheduled
|
1566
|
-
# time or shutdown
|
1567
|
-
if self.started:
|
1568
|
-
with anyio.CancelScope() as scope:
|
1569
|
-
self._scheduled_task_scopes.add(scope)
|
1570
|
-
task_status.started()
|
1571
|
-
await anyio.sleep(__in_seconds)
|
1572
|
-
|
1573
|
-
self._scheduled_task_scopes.remove(scope)
|
1574
|
-
else:
|
1575
|
-
task_status.started()
|
1576
|
-
|
1577
|
-
result = fn(*args, **kwargs)
|
1578
|
-
if asyncio.iscoroutine(result):
|
1579
|
-
await result
|
1580
|
-
|
1581
|
-
await self._runs_task_group.start(wrapper)
|
1582
|
-
|
1583
1437
|
async def _run_on_cancellation_hooks(
|
1584
1438
|
self,
|
1585
1439
|
flow_run: "FlowRun",
|
@@ -1647,11 +1501,19 @@ class Runner:
|
|
1647
1501
|
if not hasattr(self, "_loop") or not self._loop:
|
1648
1502
|
self._loop = asyncio.get_event_loop()
|
1649
1503
|
|
1650
|
-
await self.
|
1504
|
+
await self._exit_stack.enter_async_context(
|
1505
|
+
FlowRunCancellingObserver(
|
1506
|
+
on_cancelling=lambda flow_run_id: self._runs_task_group.start_soon(
|
1507
|
+
self._cancel_run, flow_run_id
|
1508
|
+
)
|
1509
|
+
)
|
1510
|
+
)
|
1511
|
+
await self._exit_stack.enter_async_context(self._client)
|
1512
|
+
await self._exit_stack.enter_async_context(self._events_client)
|
1651
1513
|
|
1652
1514
|
if not hasattr(self, "_runs_task_group") or not self._runs_task_group:
|
1653
1515
|
self._runs_task_group: anyio.abc.TaskGroup = anyio.create_task_group()
|
1654
|
-
await self.
|
1516
|
+
await self._exit_stack.enter_async_context(self._runs_task_group)
|
1655
1517
|
|
1656
1518
|
if not hasattr(self, "_loops_task_group") or not self._loops_task_group:
|
1657
1519
|
self._loops_task_group: anyio.abc.TaskGroup = anyio.create_task_group()
|
@@ -1677,11 +1539,7 @@ class Runner:
|
|
1677
1539
|
for scope in self._scheduled_task_scopes:
|
1678
1540
|
scope.cancel()
|
1679
1541
|
|
1680
|
-
|
1681
|
-
await self._runs_task_group.__aexit__(*exc_info)
|
1682
|
-
|
1683
|
-
if self._client:
|
1684
|
-
await self._client.__aexit__(*exc_info)
|
1542
|
+
await self._exit_stack.__aexit__(*exc_info)
|
1685
1543
|
|
1686
1544
|
shutil.rmtree(str(self._tmp_dir))
|
1687
1545
|
del self._runs_task_group, self._loops_task_group
|