prefect-client 3.0.0rc1__py3-none-any.whl → 3.0.0rc3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/_internal/compatibility/migration.py +124 -0
- prefect/_internal/concurrency/__init__.py +2 -2
- prefect/_internal/concurrency/primitives.py +1 -0
- prefect/_internal/pydantic/annotations/pendulum.py +2 -2
- prefect/_internal/pytz.py +1 -1
- prefect/blocks/core.py +1 -1
- prefect/blocks/redis.py +168 -0
- prefect/client/orchestration.py +113 -23
- prefect/client/schemas/actions.py +1 -1
- prefect/client/schemas/filters.py +6 -0
- prefect/client/schemas/objects.py +22 -11
- prefect/client/subscriptions.py +3 -2
- prefect/concurrency/asyncio.py +1 -1
- prefect/concurrency/services.py +1 -1
- prefect/context.py +1 -27
- prefect/deployments/__init__.py +3 -0
- prefect/deployments/base.py +11 -3
- prefect/deployments/deployments.py +3 -0
- prefect/deployments/steps/pull.py +1 -0
- prefect/deployments/steps/utility.py +2 -1
- prefect/engine.py +3 -0
- prefect/events/cli/automations.py +1 -1
- prefect/events/clients.py +7 -1
- prefect/events/schemas/events.py +2 -0
- prefect/exceptions.py +9 -0
- prefect/filesystems.py +22 -11
- prefect/flow_engine.py +118 -156
- prefect/flow_runs.py +2 -2
- prefect/flows.py +91 -35
- prefect/futures.py +44 -43
- prefect/infrastructure/provisioners/container_instance.py +1 -0
- prefect/infrastructure/provisioners/ecs.py +2 -2
- prefect/input/__init__.py +4 -0
- prefect/input/run_input.py +4 -2
- prefect/logging/formatters.py +2 -2
- prefect/logging/handlers.py +2 -2
- prefect/logging/loggers.py +1 -1
- prefect/plugins.py +1 -0
- prefect/records/cache_policies.py +179 -0
- prefect/records/result_store.py +10 -3
- prefect/results.py +27 -55
- prefect/runner/runner.py +1 -1
- prefect/runner/server.py +1 -1
- prefect/runtime/__init__.py +1 -0
- prefect/runtime/deployment.py +1 -0
- prefect/runtime/flow_run.py +1 -0
- prefect/runtime/task_run.py +1 -0
- prefect/settings.py +21 -5
- prefect/states.py +17 -4
- prefect/task_engine.py +337 -209
- prefect/task_runners.py +15 -5
- prefect/task_runs.py +203 -0
- prefect/{task_server.py → task_worker.py} +66 -36
- prefect/tasks.py +180 -77
- prefect/transactions.py +92 -16
- prefect/types/__init__.py +1 -1
- prefect/utilities/asyncutils.py +3 -3
- prefect/utilities/callables.py +90 -7
- prefect/utilities/dockerutils.py +5 -3
- prefect/utilities/engine.py +11 -0
- prefect/utilities/filesystem.py +4 -5
- prefect/utilities/importtools.py +34 -5
- prefect/utilities/services.py +2 -2
- prefect/utilities/urls.py +195 -0
- prefect/utilities/visualization.py +1 -0
- prefect/variables.py +19 -10
- prefect/workers/base.py +46 -1
- {prefect_client-3.0.0rc1.dist-info → prefect_client-3.0.0rc3.dist-info}/METADATA +3 -2
- {prefect_client-3.0.0rc1.dist-info → prefect_client-3.0.0rc3.dist-info}/RECORD +72 -66
- {prefect_client-3.0.0rc1.dist-info → prefect_client-3.0.0rc3.dist-info}/LICENSE +0 -0
- {prefect_client-3.0.0rc1.dist-info → prefect_client-3.0.0rc3.dist-info}/WHEEL +0 -0
- {prefect_client-3.0.0rc1.dist-info → prefect_client-3.0.0rc3.dist-info}/top_level.txt +0 -0
prefect/task_runners.py
CHANGED
@@ -202,12 +202,13 @@ class TaskRunner(abc.ABC, Generic[F]):
|
|
202
202
|
|
203
203
|
|
204
204
|
class ThreadPoolTaskRunner(TaskRunner[PrefectConcurrentFuture]):
|
205
|
-
def __init__(self):
|
205
|
+
def __init__(self, max_workers: Optional[int] = None):
|
206
206
|
super().__init__()
|
207
207
|
self._executor: Optional[ThreadPoolExecutor] = None
|
208
|
+
self._max_workers = max_workers
|
208
209
|
|
209
210
|
def duplicate(self) -> "ThreadPoolTaskRunner":
|
210
|
-
return type(self)()
|
211
|
+
return type(self)(max_workers=self._max_workers)
|
211
212
|
|
212
213
|
def submit(
|
213
214
|
self,
|
@@ -278,7 +279,7 @@ class ThreadPoolTaskRunner(TaskRunner[PrefectConcurrentFuture]):
|
|
278
279
|
|
279
280
|
def __enter__(self):
|
280
281
|
super().__enter__()
|
281
|
-
self._executor = ThreadPoolExecutor()
|
282
|
+
self._executor = ThreadPoolExecutor(max_workers=self._max_workers)
|
282
283
|
return self
|
283
284
|
|
284
285
|
def __exit__(self, exc_type, exc_value, traceback):
|
@@ -287,6 +288,15 @@ class ThreadPoolTaskRunner(TaskRunner[PrefectConcurrentFuture]):
|
|
287
288
|
self._executor = None
|
288
289
|
super().__exit__(exc_type, exc_value, traceback)
|
289
290
|
|
291
|
+
def __eq__(self, value: object) -> bool:
|
292
|
+
if not isinstance(value, ThreadPoolTaskRunner):
|
293
|
+
return False
|
294
|
+
return self._max_workers == value._max_workers
|
295
|
+
|
296
|
+
|
297
|
+
# Here, we alias ConcurrentTaskRunner to ThreadPoolTaskRunner for backwards compatibility
|
298
|
+
ConcurrentTaskRunner = ThreadPoolTaskRunner
|
299
|
+
|
290
300
|
|
291
301
|
class PrefectTaskRunner(TaskRunner[PrefectDistributedFuture]):
|
292
302
|
def __init__(self):
|
@@ -321,11 +331,11 @@ class PrefectTaskRunner(TaskRunner[PrefectDistributedFuture]):
|
|
321
331
|
flow_run_ctx = FlowRunContext.get()
|
322
332
|
if flow_run_ctx:
|
323
333
|
get_run_logger(flow_run_ctx).info(
|
324
|
-
f"Submitting task {task.name} to for execution by a Prefect task
|
334
|
+
f"Submitting task {task.name} to for execution by a Prefect task worker..."
|
325
335
|
)
|
326
336
|
else:
|
327
337
|
self.logger.info(
|
328
|
-
f"Submitting task {task.name} to for execution by a Prefect task
|
338
|
+
f"Submitting task {task.name} to for execution by a Prefect task worker..."
|
329
339
|
)
|
330
340
|
|
331
341
|
return task.apply_async(
|
prefect/task_runs.py
ADDED
@@ -0,0 +1,203 @@
|
|
1
|
+
import asyncio
|
2
|
+
import atexit
|
3
|
+
import threading
|
4
|
+
import uuid
|
5
|
+
from typing import Dict, Optional
|
6
|
+
|
7
|
+
import anyio
|
8
|
+
from cachetools import TTLCache
|
9
|
+
from typing_extensions import Self
|
10
|
+
|
11
|
+
from prefect._internal.concurrency.api import create_call, from_async, from_sync
|
12
|
+
from prefect._internal.concurrency.threads import get_global_loop
|
13
|
+
from prefect.client.schemas.objects import TERMINAL_STATES
|
14
|
+
from prefect.events.clients import get_events_subscriber
|
15
|
+
from prefect.events.filters import EventFilter, EventNameFilter
|
16
|
+
from prefect.logging.loggers import get_logger
|
17
|
+
|
18
|
+
|
19
|
+
class TaskRunWaiter:
|
20
|
+
"""
|
21
|
+
A service used for waiting for a task run to finish.
|
22
|
+
|
23
|
+
This service listens for task run events and provides a way to wait for a specific
|
24
|
+
task run to finish. This is useful for waiting for a task run to finish before
|
25
|
+
continuing execution.
|
26
|
+
|
27
|
+
The service is a singleton and must be started before use. The service will
|
28
|
+
automatically start when the first instance is created. A single websocket
|
29
|
+
connection is used to listen for task run events.
|
30
|
+
|
31
|
+
The service can be used to wait for a task run to finish by calling
|
32
|
+
`TaskRunWaiter.wait_for_task_run` with the task run ID to wait for. The method
|
33
|
+
will return when the task run has finished or the timeout has elapsed.
|
34
|
+
|
35
|
+
The service will automatically stop when the Python process exits or when the
|
36
|
+
global loop thread is stopped.
|
37
|
+
|
38
|
+
Example:
|
39
|
+
```python
|
40
|
+
import asyncio
|
41
|
+
from uuid import uuid4
|
42
|
+
|
43
|
+
from prefect import task
|
44
|
+
from prefect.task_engine import run_task_async
|
45
|
+
from prefect.task_runs import TaskRunWaiter
|
46
|
+
|
47
|
+
|
48
|
+
@task
|
49
|
+
async def test_task():
|
50
|
+
await asyncio.sleep(5)
|
51
|
+
print("Done!")
|
52
|
+
|
53
|
+
|
54
|
+
async def main():
|
55
|
+
task_run_id = uuid4()
|
56
|
+
asyncio.create_task(run_task_async(task=test_task, task_run_id=task_run_id))
|
57
|
+
|
58
|
+
await TaskRunWaiter.wait_for_task_run(task_run_id)
|
59
|
+
print("Task run finished")
|
60
|
+
|
61
|
+
|
62
|
+
if __name__ == "__main__":
|
63
|
+
asyncio.run(main())
|
64
|
+
```
|
65
|
+
"""
|
66
|
+
|
67
|
+
_instance: Optional[Self] = None
|
68
|
+
_instance_lock = threading.Lock()
|
69
|
+
|
70
|
+
def __init__(self):
|
71
|
+
self.logger = get_logger("TaskRunWaiter")
|
72
|
+
self._consumer_task: Optional[asyncio.Task] = None
|
73
|
+
self._observed_completed_task_runs: TTLCache[uuid.UUID, bool] = TTLCache(
|
74
|
+
maxsize=10000, ttl=600
|
75
|
+
)
|
76
|
+
self._completion_events: Dict[uuid.UUID, asyncio.Event] = {}
|
77
|
+
self._loop: Optional[asyncio.AbstractEventLoop] = None
|
78
|
+
self._observed_completed_task_runs_lock = threading.Lock()
|
79
|
+
self._completion_events_lock = threading.Lock()
|
80
|
+
self._started = False
|
81
|
+
|
82
|
+
def start(self):
|
83
|
+
"""
|
84
|
+
Start the TaskRunWaiter service.
|
85
|
+
"""
|
86
|
+
if self._started:
|
87
|
+
return
|
88
|
+
self.logger.debug("Starting TaskRunWaiter")
|
89
|
+
loop_thread = get_global_loop()
|
90
|
+
|
91
|
+
if not asyncio.get_running_loop() == loop_thread._loop:
|
92
|
+
raise RuntimeError("TaskRunWaiter must run on the global loop thread.")
|
93
|
+
|
94
|
+
self._loop = loop_thread._loop
|
95
|
+
self._consumer_task = self._loop.create_task(self._consume_events())
|
96
|
+
|
97
|
+
loop_thread.add_shutdown_call(create_call(self.stop))
|
98
|
+
atexit.register(self.stop)
|
99
|
+
self._started = True
|
100
|
+
|
101
|
+
async def _consume_events(self):
|
102
|
+
async with get_events_subscriber(
|
103
|
+
filter=EventFilter(
|
104
|
+
event=EventNameFilter(
|
105
|
+
name=[
|
106
|
+
f"prefect.task-run.{state.name.title()}"
|
107
|
+
for state in TERMINAL_STATES
|
108
|
+
],
|
109
|
+
)
|
110
|
+
)
|
111
|
+
) as subscriber:
|
112
|
+
async for event in subscriber:
|
113
|
+
try:
|
114
|
+
self.logger.debug(
|
115
|
+
f"Received event: {event.resource['prefect.resource.id']}"
|
116
|
+
)
|
117
|
+
task_run_id = uuid.UUID(
|
118
|
+
event.resource["prefect.resource.id"].replace(
|
119
|
+
"prefect.task-run.", ""
|
120
|
+
)
|
121
|
+
)
|
122
|
+
with self._observed_completed_task_runs_lock:
|
123
|
+
# Cache the task run ID for a short period of time to avoid
|
124
|
+
# unnecessary waits
|
125
|
+
self._observed_completed_task_runs[task_run_id] = True
|
126
|
+
with self._completion_events_lock:
|
127
|
+
# Set the event for the task run ID if it is in the cache
|
128
|
+
# so the waiter can wake up the waiting coroutine
|
129
|
+
if task_run_id in self._completion_events:
|
130
|
+
self._completion_events[task_run_id].set()
|
131
|
+
except Exception as exc:
|
132
|
+
self.logger.error(f"Error processing event: {exc}")
|
133
|
+
|
134
|
+
def stop(self):
|
135
|
+
"""
|
136
|
+
Stop the TaskRunWaiter service.
|
137
|
+
"""
|
138
|
+
self.logger.debug("Stopping TaskRunWaiter")
|
139
|
+
if self._consumer_task:
|
140
|
+
self._consumer_task.cancel()
|
141
|
+
self._consumer_task = None
|
142
|
+
self.__class__._instance = None
|
143
|
+
self._started = False
|
144
|
+
|
145
|
+
@classmethod
|
146
|
+
async def wait_for_task_run(
|
147
|
+
cls, task_run_id: uuid.UUID, timeout: Optional[float] = None
|
148
|
+
):
|
149
|
+
"""
|
150
|
+
Wait for a task run to finish.
|
151
|
+
|
152
|
+
Note this relies on a websocket connection to receive events from the server
|
153
|
+
and will not work with an ephemeral server.
|
154
|
+
|
155
|
+
Args:
|
156
|
+
task_run_id: The ID of the task run to wait for.
|
157
|
+
timeout: The maximum time to wait for the task run to
|
158
|
+
finish. Defaults to None.
|
159
|
+
"""
|
160
|
+
instance = cls.instance()
|
161
|
+
with instance._observed_completed_task_runs_lock:
|
162
|
+
if task_run_id in instance._observed_completed_task_runs:
|
163
|
+
return
|
164
|
+
|
165
|
+
# Need to create event in loop thread to ensure it can be set
|
166
|
+
# from the loop thread
|
167
|
+
finished_event = await from_async.wait_for_call_in_loop_thread(
|
168
|
+
create_call(asyncio.Event)
|
169
|
+
)
|
170
|
+
with instance._completion_events_lock:
|
171
|
+
# Cache the event for the task run ID so the consumer can set it
|
172
|
+
# when the event is received
|
173
|
+
instance._completion_events[task_run_id] = finished_event
|
174
|
+
|
175
|
+
with anyio.move_on_after(delay=timeout):
|
176
|
+
await from_async.wait_for_call_in_loop_thread(
|
177
|
+
create_call(finished_event.wait)
|
178
|
+
)
|
179
|
+
|
180
|
+
with instance._completion_events_lock:
|
181
|
+
# Remove the event from the cache after it has been waited on
|
182
|
+
instance._completion_events.pop(task_run_id, None)
|
183
|
+
|
184
|
+
@classmethod
|
185
|
+
def instance(cls):
|
186
|
+
"""
|
187
|
+
Get the singleton instance of TaskRunWaiter.
|
188
|
+
"""
|
189
|
+
with cls._instance_lock:
|
190
|
+
if cls._instance is None:
|
191
|
+
cls._instance = cls._new_instance()
|
192
|
+
return cls._instance
|
193
|
+
|
194
|
+
@classmethod
|
195
|
+
def _new_instance(cls):
|
196
|
+
instance = cls()
|
197
|
+
|
198
|
+
if threading.get_ident() == get_global_loop().thread.ident:
|
199
|
+
instance.start()
|
200
|
+
else:
|
201
|
+
from_sync.call_soon_in_loop_thread(create_call(instance.start)).result()
|
202
|
+
|
203
|
+
return instance
|
@@ -11,10 +11,12 @@ from typing import List, Optional
|
|
11
11
|
|
12
12
|
import anyio
|
13
13
|
import anyio.abc
|
14
|
+
from exceptiongroup import BaseExceptionGroup # novermin
|
14
15
|
from websockets.exceptions import InvalidStatusCode
|
15
16
|
|
16
|
-
from prefect import Task
|
17
|
+
from prefect import Task
|
17
18
|
from prefect._internal.concurrency.api import create_call, from_sync
|
19
|
+
from prefect.client.orchestration import get_client
|
18
20
|
from prefect.client.schemas.objects import TaskRun
|
19
21
|
from prefect.client.subscriptions import Subscription
|
20
22
|
from prefect.exceptions import Abort, PrefectHTTPStatusError
|
@@ -30,11 +32,11 @@ from prefect.utilities.asyncutils import asyncnullcontext, sync_compatible
|
|
30
32
|
from prefect.utilities.engine import emit_task_run_state_change_event, propose_state
|
31
33
|
from prefect.utilities.processutils import _register_signal
|
32
34
|
|
33
|
-
logger = get_logger("
|
35
|
+
logger = get_logger("task_worker")
|
34
36
|
|
35
37
|
|
36
|
-
class
|
37
|
-
"""Raised when the task
|
38
|
+
class StopTaskWorker(Exception):
|
39
|
+
"""Raised when the task worker is stopped."""
|
38
40
|
|
39
41
|
pass
|
40
42
|
|
@@ -49,11 +51,11 @@ def should_try_to_read_parameters(task: Task, task_run: TaskRun) -> bool:
|
|
49
51
|
return new_enough_state_details and task_accepts_parameters
|
50
52
|
|
51
53
|
|
52
|
-
class
|
54
|
+
class TaskWorker:
|
53
55
|
"""This class is responsible for serving tasks that may be executed in the background
|
54
56
|
by a task runner via the traditional engine machinery.
|
55
57
|
|
56
|
-
When `start()` is called, the task
|
58
|
+
When `start()` is called, the task worker will open a websocket connection to a
|
57
59
|
server-side queue of scheduled task runs. When a scheduled task run is found, the
|
58
60
|
scheduled task run is submitted to the engine for execution with a minimal `EngineContext`
|
59
61
|
so that the task run can be governed by orchestration rules.
|
@@ -70,7 +72,7 @@ class TaskServer:
|
|
70
72
|
*tasks: Task,
|
71
73
|
limit: Optional[int] = 10,
|
72
74
|
):
|
73
|
-
self.tasks: List[Task] = tasks
|
75
|
+
self.tasks: List[Task] = list(tasks)
|
74
76
|
|
75
77
|
self.started: bool = False
|
76
78
|
self.stopping: bool = False
|
@@ -80,11 +82,11 @@ class TaskServer:
|
|
80
82
|
|
81
83
|
if not asyncio.get_event_loop().is_running():
|
82
84
|
raise RuntimeError(
|
83
|
-
"
|
85
|
+
"TaskWorker must be initialized within an async context."
|
84
86
|
)
|
85
87
|
|
86
88
|
self._runs_task_group: anyio.abc.TaskGroup = anyio.create_task_group()
|
87
|
-
self._executor = ThreadPoolExecutor()
|
89
|
+
self._executor = ThreadPoolExecutor(max_workers=limit if limit else None)
|
88
90
|
self._limiter = anyio.CapacityLimiter(limit) if limit else None
|
89
91
|
|
90
92
|
@property
|
@@ -93,7 +95,7 @@ class TaskServer:
|
|
93
95
|
|
94
96
|
def handle_sigterm(self, signum, frame):
|
95
97
|
"""
|
96
|
-
Shuts down the task
|
98
|
+
Shuts down the task worker when a SIGTERM is received.
|
97
99
|
"""
|
98
100
|
logger.info("SIGTERM received, initiating graceful shutdown...")
|
99
101
|
from_sync.call_in_loop_thread(create_call(self.stop))
|
@@ -103,18 +105,18 @@ class TaskServer:
|
|
103
105
|
@sync_compatible
|
104
106
|
async def start(self) -> None:
|
105
107
|
"""
|
106
|
-
Starts a task
|
108
|
+
Starts a task worker, which runs the tasks provided in the constructor.
|
107
109
|
"""
|
108
110
|
_register_signal(signal.SIGTERM, self.handle_sigterm)
|
109
111
|
|
110
112
|
async with asyncnullcontext() if self.started else self:
|
111
|
-
logger.info("Starting task
|
113
|
+
logger.info("Starting task worker...")
|
112
114
|
try:
|
113
115
|
await self._subscribe_to_task_scheduling()
|
114
116
|
except InvalidStatusCode as exc:
|
115
117
|
if exc.status_code == 403:
|
116
118
|
logger.error(
|
117
|
-
"Could not establish a connection to the `/task_runs/subscriptions/scheduled`"
|
119
|
+
"403: Could not establish a connection to the `/task_runs/subscriptions/scheduled`"
|
118
120
|
f" endpoint found at:\n\n {PREFECT_API_URL.value()}"
|
119
121
|
"\n\nPlease double-check the values of your"
|
120
122
|
" `PREFECT_API_URL` and `PREFECT_API_KEY` environment variables."
|
@@ -124,32 +126,54 @@ class TaskServer:
|
|
124
126
|
|
125
127
|
@sync_compatible
|
126
128
|
async def stop(self):
|
127
|
-
"""Stops the task
|
129
|
+
"""Stops the task worker's polling cycle."""
|
128
130
|
if not self.started:
|
129
131
|
raise RuntimeError(
|
130
|
-
"Task
|
132
|
+
"Task worker has not yet started. Please start the task worker by"
|
131
133
|
" calling .start()"
|
132
134
|
)
|
133
135
|
|
134
136
|
self.started = False
|
135
137
|
self.stopping = True
|
136
138
|
|
137
|
-
raise
|
139
|
+
raise StopTaskWorker
|
138
140
|
|
139
141
|
async def _subscribe_to_task_scheduling(self):
|
140
|
-
|
141
|
-
|
142
|
+
base_url = PREFECT_API_URL.value()
|
143
|
+
if base_url is None:
|
144
|
+
raise ValueError(
|
145
|
+
"`PREFECT_API_URL` must be set to use the task worker. "
|
146
|
+
"Task workers are not compatible with the ephemeral API."
|
147
|
+
)
|
148
|
+
task_keys_repr = " | ".join(
|
149
|
+
t.task_key.split(".")[-1].split("-")[0] for t in self.tasks
|
142
150
|
)
|
151
|
+
logger.info(f"Subscribing to runs of task(s): {task_keys_repr}")
|
143
152
|
async for task_run in Subscription(
|
144
153
|
model=TaskRun,
|
145
154
|
path="/task_runs/subscriptions/scheduled",
|
146
155
|
keys=[task.task_key for task in self.tasks],
|
147
156
|
client_id=self._client_id,
|
157
|
+
base_url=base_url,
|
148
158
|
):
|
159
|
+
logger.info(f"Received task run: {task_run.id} - {task_run.name}")
|
149
160
|
if self._limiter:
|
150
161
|
await self._limiter.acquire_on_behalf_of(task_run.id)
|
151
|
-
|
152
|
-
|
162
|
+
self._runs_task_group.start_soon(
|
163
|
+
self._safe_submit_scheduled_task_run, task_run
|
164
|
+
)
|
165
|
+
|
166
|
+
async def _safe_submit_scheduled_task_run(self, task_run: TaskRun):
|
167
|
+
try:
|
168
|
+
await self._submit_scheduled_task_run(task_run)
|
169
|
+
except BaseException as exc:
|
170
|
+
logger.exception(
|
171
|
+
f"Failed to submit task run {task_run.id!r}",
|
172
|
+
exc_info=exc,
|
173
|
+
)
|
174
|
+
finally:
|
175
|
+
if self._limiter:
|
176
|
+
self._limiter.release_on_behalf_of(task_run.id)
|
153
177
|
|
154
178
|
async def _submit_scheduled_task_run(self, task_run: TaskRun):
|
155
179
|
logger.debug(
|
@@ -159,11 +183,11 @@ class TaskServer:
|
|
159
183
|
task = next((t for t in self.tasks if t.task_key == task_run.task_key), None)
|
160
184
|
|
161
185
|
if not task:
|
162
|
-
if PREFECT_TASK_SCHEDULING_DELETE_FAILED_SUBMISSIONS
|
186
|
+
if PREFECT_TASK_SCHEDULING_DELETE_FAILED_SUBMISSIONS:
|
163
187
|
logger.warning(
|
164
|
-
f"Task {task_run.name!r} not found in task
|
188
|
+
f"Task {task_run.name!r} not found in task worker registry."
|
165
189
|
)
|
166
|
-
await self._client._client.delete(f"/task_runs/{task_run.id}")
|
190
|
+
await self._client._client.delete(f"/task_runs/{task_run.id}") # type: ignore
|
167
191
|
|
168
192
|
return
|
169
193
|
|
@@ -256,18 +280,16 @@ class TaskServer:
|
|
256
280
|
context=run_context,
|
257
281
|
)
|
258
282
|
await asyncio.wrap_future(future)
|
259
|
-
if self._limiter:
|
260
|
-
self._limiter.release_on_behalf_of(task_run.id)
|
261
283
|
|
262
284
|
async def execute_task_run(self, task_run: TaskRun):
|
263
|
-
"""Execute a task run in the task
|
285
|
+
"""Execute a task run in the task worker."""
|
264
286
|
async with self if not self.started else asyncnullcontext():
|
265
287
|
if self._limiter:
|
266
288
|
await self._limiter.acquire_on_behalf_of(task_run.id)
|
267
|
-
await self.
|
289
|
+
await self._safe_submit_scheduled_task_run(task_run)
|
268
290
|
|
269
291
|
async def __aenter__(self):
|
270
|
-
logger.debug("Starting task
|
292
|
+
logger.debug("Starting task worker...")
|
271
293
|
|
272
294
|
if self._client._closed:
|
273
295
|
self._client = get_client()
|
@@ -280,7 +302,7 @@ class TaskServer:
|
|
280
302
|
return self
|
281
303
|
|
282
304
|
async def __aexit__(self, *exc_info):
|
283
|
-
logger.debug("Stopping task
|
305
|
+
logger.debug("Stopping task worker...")
|
284
306
|
self.started = False
|
285
307
|
await self._exit_stack.__aexit__(*exc_info)
|
286
308
|
|
@@ -300,7 +322,7 @@ async def serve(*tasks: Task, limit: Optional[int] = 10):
|
|
300
322
|
Example:
|
301
323
|
```python
|
302
324
|
from prefect import task
|
303
|
-
from prefect.
|
325
|
+
from prefect.task_worker import serve
|
304
326
|
|
305
327
|
@task(log_prints=True)
|
306
328
|
def say(message: str):
|
@@ -315,13 +337,21 @@ async def serve(*tasks: Task, limit: Optional[int] = 10):
|
|
315
337
|
serve(say, yell)
|
316
338
|
```
|
317
339
|
"""
|
318
|
-
|
340
|
+
task_worker = TaskWorker(*tasks, limit=limit)
|
319
341
|
|
320
342
|
try:
|
321
|
-
await
|
343
|
+
await task_worker.start()
|
344
|
+
|
345
|
+
except BaseExceptionGroup as exc: # novermin
|
346
|
+
exceptions = exc.exceptions
|
347
|
+
n_exceptions = len(exceptions)
|
348
|
+
logger.error(
|
349
|
+
f"Task worker stopped with {n_exceptions} exception{'s' if n_exceptions != 1 else ''}:"
|
350
|
+
f"\n" + "\n".join(str(e) for e in exceptions)
|
351
|
+
)
|
322
352
|
|
323
|
-
except
|
324
|
-
logger.info("Task
|
353
|
+
except StopTaskWorker:
|
354
|
+
logger.info("Task worker stopped.")
|
325
355
|
|
326
|
-
except asyncio.CancelledError:
|
327
|
-
logger.info("Task
|
356
|
+
except (asyncio.CancelledError, KeyboardInterrupt):
|
357
|
+
logger.info("Task worker interrupted, stopping...")
|