prefect-client 3.1.8__py3-none-any.whl → 3.1.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/__init__.py +53 -59
- prefect/_internal/concurrency/services.py +6 -4
- prefect/_version.py +3 -3
- prefect/agent.py +3 -1
- prefect/artifacts.py +61 -74
- prefect/automations.py +27 -7
- prefect/client/cloud.py +0 -21
- prefect/client/schemas/objects.py +11 -0
- prefect/client/utilities.py +1 -15
- prefect/context.py +16 -27
- prefect/deployments/deployments.py +4 -2
- prefect/deployments/runner.py +3 -1
- prefect/engine.py +2 -1
- prefect/events/filters.py +2 -8
- prefect/exceptions.py +31 -41
- prefect/filesystems.py +2 -2
- prefect/flow_engine.py +2 -2
- prefect/flows.py +230 -186
- prefect/futures.py +42 -27
- prefect/infrastructure/__init__.py +3 -1
- prefect/infrastructure/base.py +3 -1
- prefect/locking/filesystem.py +8 -7
- prefect/locking/memory.py +5 -3
- prefect/locking/protocol.py +1 -1
- prefect/plugins.py +12 -10
- prefect/results.py +76 -19
- prefect/runner/runner.py +2 -3
- prefect/states.py +22 -10
- prefect/task_engine.py +1 -1
- prefect/telemetry/instrumentation.py +9 -10
- prefect/telemetry/processors.py +6 -6
- prefect/telemetry/services.py +68 -0
- prefect/utilities/engine.py +15 -1
- prefect/utilities/importtools.py +28 -21
- prefect/variables.py +2 -2
- prefect/workers/__init__.py +2 -0
- prefect/workers/base.py +6 -12
- prefect/workers/block.py +3 -1
- prefect/workers/cloud.py +3 -1
- {prefect_client-3.1.8.dist-info → prefect_client-3.1.10.dist-info}/METADATA +1 -1
- {prefect_client-3.1.8.dist-info → prefect_client-3.1.10.dist-info}/RECORD +44 -43
- {prefect_client-3.1.8.dist-info → prefect_client-3.1.10.dist-info}/LICENSE +0 -0
- {prefect_client-3.1.8.dist-info → prefect_client-3.1.10.dist-info}/WHEEL +0 -0
- {prefect_client-3.1.8.dist-info → prefect_client-3.1.10.dist-info}/top_level.txt +0 -0
prefect/futures.py
CHANGED
@@ -1,17 +1,15 @@
|
|
1
1
|
import abc
|
2
2
|
import asyncio
|
3
|
-
import collections
|
4
3
|
import concurrent.futures
|
5
4
|
import threading
|
6
5
|
import uuid
|
7
6
|
from collections.abc import Generator, Iterator
|
8
7
|
from functools import partial
|
9
|
-
from typing import Any, Callable, Generic,
|
8
|
+
from typing import Any, Callable, Generic, Optional, Union
|
10
9
|
|
11
|
-
from typing_extensions import TypeVar
|
10
|
+
from typing_extensions import NamedTuple, Self, TypeVar
|
12
11
|
|
13
12
|
from prefect.client.orchestration import get_client
|
14
|
-
from prefect.client.schemas.objects import TaskRun
|
15
13
|
from prefect.exceptions import ObjectNotFound
|
16
14
|
from prefect.logging.loggers import get_logger, get_run_logger
|
17
15
|
from prefect.states import Pending, State
|
@@ -50,7 +48,7 @@ class PrefectFuture(abc.ABC, Generic[R]):
|
|
50
48
|
return self._final_state
|
51
49
|
client = get_client(sync_client=True)
|
52
50
|
try:
|
53
|
-
task_run =
|
51
|
+
task_run = client.read_task_run(task_run_id=self.task_run_id)
|
54
52
|
except ObjectNotFound:
|
55
53
|
# We'll be optimistic and assume this task will eventually start
|
56
54
|
# TODO: Consider using task run events to wait for the task to start
|
@@ -92,7 +90,7 @@ class PrefectFuture(abc.ABC, Generic[R]):
|
|
92
90
|
"""
|
93
91
|
|
94
92
|
@abc.abstractmethod
|
95
|
-
def add_done_callback(self, fn):
|
93
|
+
def add_done_callback(self, fn: Callable[["PrefectFuture[R]"], None]):
|
96
94
|
"""
|
97
95
|
Add a callback to be run when the future completes or is cancelled.
|
98
96
|
|
@@ -102,13 +100,17 @@ class PrefectFuture(abc.ABC, Generic[R]):
|
|
102
100
|
...
|
103
101
|
|
104
102
|
|
105
|
-
class PrefectWrappedFuture(PrefectFuture, abc.ABC, Generic[R, F]):
|
103
|
+
class PrefectWrappedFuture(PrefectFuture[R], abc.ABC, Generic[R, F]):
|
106
104
|
"""
|
107
105
|
A Prefect future that wraps another future object.
|
106
|
+
|
107
|
+
Type Parameters:
|
108
|
+
R: The return type of the future
|
109
|
+
F: The type of the wrapped future
|
108
110
|
"""
|
109
111
|
|
110
112
|
def __init__(self, task_run_id: uuid.UUID, wrapped_future: F):
|
111
|
-
self._wrapped_future = wrapped_future
|
113
|
+
self._wrapped_future: F = wrapped_future
|
112
114
|
super().__init__(task_run_id)
|
113
115
|
|
114
116
|
@property
|
@@ -116,10 +118,11 @@ class PrefectWrappedFuture(PrefectFuture, abc.ABC, Generic[R, F]):
|
|
116
118
|
"""The underlying future object wrapped by this Prefect future"""
|
117
119
|
return self._wrapped_future
|
118
120
|
|
119
|
-
def add_done_callback(self, fn: Callable[[PrefectFuture[R]], None]):
|
121
|
+
def add_done_callback(self, fn: Callable[[PrefectFuture[R]], None]) -> None:
|
122
|
+
"""Add a callback to be executed when the future completes."""
|
120
123
|
if not self._final_state:
|
121
124
|
|
122
|
-
def call_with_self(future):
|
125
|
+
def call_with_self(future: F):
|
123
126
|
"""Call the callback with self as the argument, this is necessary to ensure we remove the future from the pending set"""
|
124
127
|
fn(self)
|
125
128
|
|
@@ -128,7 +131,7 @@ class PrefectWrappedFuture(PrefectFuture, abc.ABC, Generic[R, F]):
|
|
128
131
|
fn(self)
|
129
132
|
|
130
133
|
|
131
|
-
class PrefectConcurrentFuture(PrefectWrappedFuture[R, concurrent.futures.Future]):
|
134
|
+
class PrefectConcurrentFuture(PrefectWrappedFuture[R, concurrent.futures.Future[R]]):
|
132
135
|
"""
|
133
136
|
A Prefect future that wraps a concurrent.futures.Future. This future is used
|
134
137
|
when the task run is submitted to a ThreadPoolExecutor.
|
@@ -193,7 +196,7 @@ class PrefectDistributedFuture(PrefectFuture[R]):
|
|
193
196
|
any task run scheduled in Prefect's API.
|
194
197
|
"""
|
195
198
|
|
196
|
-
done_callbacks:
|
199
|
+
done_callbacks: list[Callable[[PrefectFuture[R]], None]] = []
|
197
200
|
waiter = None
|
198
201
|
|
199
202
|
def wait(self, timeout: Optional[float] = None) -> None:
|
@@ -270,7 +273,7 @@ class PrefectDistributedFuture(PrefectFuture[R]):
|
|
270
273
|
return
|
271
274
|
TaskRunWaiter.add_done_callback(self._task_run_id, partial(fn, self))
|
272
275
|
|
273
|
-
def __eq__(self, other):
|
276
|
+
def __eq__(self, other: Any) -> bool:
|
274
277
|
if not isinstance(other, PrefectDistributedFuture):
|
275
278
|
return False
|
276
279
|
return self.task_run_id == other.task_run_id
|
@@ -279,7 +282,7 @@ class PrefectDistributedFuture(PrefectFuture[R]):
|
|
279
282
|
return hash(self.task_run_id)
|
280
283
|
|
281
284
|
|
282
|
-
class PrefectFutureList(list, Iterator
|
285
|
+
class PrefectFutureList(list[PrefectFuture[R]], Iterator[PrefectFuture[R]]):
|
283
286
|
"""
|
284
287
|
A list of Prefect futures.
|
285
288
|
|
@@ -298,10 +301,10 @@ class PrefectFutureList(list, Iterator, Generic[F]):
|
|
298
301
|
wait(self, timeout=timeout)
|
299
302
|
|
300
303
|
def result(
|
301
|
-
self:
|
304
|
+
self: Self,
|
302
305
|
timeout: Optional[float] = None,
|
303
306
|
raise_on_failure: bool = True,
|
304
|
-
) ->
|
307
|
+
) -> list[R]:
|
305
308
|
"""
|
306
309
|
Get the results of all task runs associated with the futures in the list.
|
307
310
|
|
@@ -331,21 +334,22 @@ class PrefectFutureList(list, Iterator, Generic[F]):
|
|
331
334
|
|
332
335
|
|
333
336
|
def as_completed(
|
334
|
-
futures:
|
337
|
+
futures: list[PrefectFuture[R]], timeout: Optional[float] = None
|
335
338
|
) -> Generator[PrefectFuture[R], None]:
|
336
|
-
unique_futures:
|
339
|
+
unique_futures: set[PrefectFuture[R]] = set(futures)
|
337
340
|
total_futures = len(unique_futures)
|
341
|
+
pending = unique_futures
|
338
342
|
try:
|
339
343
|
with timeout_context(timeout):
|
340
|
-
done = {f for f in unique_futures if f._final_state}
|
344
|
+
done = {f for f in unique_futures if f._final_state} # type: ignore[privateUsage]
|
341
345
|
pending = unique_futures - done
|
342
346
|
yield from done
|
343
347
|
|
344
348
|
finished_event = threading.Event()
|
345
349
|
finished_lock = threading.Lock()
|
346
|
-
finished_futures = []
|
350
|
+
finished_futures: list[PrefectFuture[R]] = []
|
347
351
|
|
348
|
-
def add_to_done(future):
|
352
|
+
def add_to_done(future: PrefectFuture[R]):
|
349
353
|
with finished_lock:
|
350
354
|
finished_futures.append(future)
|
351
355
|
finished_event.set()
|
@@ -370,10 +374,19 @@ def as_completed(
|
|
370
374
|
)
|
371
375
|
|
372
376
|
|
373
|
-
DoneAndNotDoneFutures
|
377
|
+
class DoneAndNotDoneFutures(NamedTuple, Generic[R]):
|
378
|
+
"""A named 2-tuple of sets.
|
379
|
+
|
380
|
+
multiple inheritance supported in 3.11+, use typing_extensions.NamedTuple
|
381
|
+
"""
|
382
|
+
|
383
|
+
done: set[PrefectFuture[R]]
|
384
|
+
not_done: set[PrefectFuture[R]]
|
374
385
|
|
375
386
|
|
376
|
-
def wait(
|
387
|
+
def wait(
|
388
|
+
futures: list[PrefectFuture[R]], timeout: Optional[float] = None
|
389
|
+
) -> DoneAndNotDoneFutures[R]:
|
377
390
|
"""
|
378
391
|
Wait for the futures in the given sequence to complete.
|
379
392
|
|
@@ -431,9 +444,11 @@ def resolve_futures_to_states(
|
|
431
444
|
|
432
445
|
Unsupported object types will be returned without modification.
|
433
446
|
"""
|
434
|
-
futures:
|
447
|
+
futures: set[PrefectFuture[R]] = set()
|
435
448
|
|
436
|
-
def _collect_futures(
|
449
|
+
def _collect_futures(
|
450
|
+
futures: set[PrefectFuture[R]], expr: Any, context: Any
|
451
|
+
) -> Union[PrefectFuture[R], Any]:
|
437
452
|
# Expressions inside quotes should not be traversed
|
438
453
|
if isinstance(context.get("annotation"), quote):
|
439
454
|
raise StopVisiting()
|
@@ -455,14 +470,14 @@ def resolve_futures_to_states(
|
|
455
470
|
return expr
|
456
471
|
|
457
472
|
# Get final states for each future
|
458
|
-
states = []
|
473
|
+
states: list[State] = []
|
459
474
|
for future in futures:
|
460
475
|
future.wait()
|
461
476
|
states.append(future.state)
|
462
477
|
|
463
478
|
states_by_future = dict(zip(futures, states))
|
464
479
|
|
465
|
-
def replace_futures_with_states(expr, context):
|
480
|
+
def replace_futures_with_states(expr: Any, context: Any) -> Any:
|
466
481
|
# Expressions inside quotes should not be modified
|
467
482
|
if isinstance(context.get("annotation"), quote):
|
468
483
|
raise StopVisiting()
|
@@ -1,6 +1,8 @@
|
|
1
1
|
"""
|
2
2
|
2024-06-27: This surfaces an actionable error message for moved or removed objects in Prefect 3.0 upgrade.
|
3
3
|
"""
|
4
|
+
from typing import Any, Callable
|
5
|
+
|
4
6
|
from prefect._internal.compatibility.migration import getattr_migration
|
5
7
|
|
6
|
-
__getattr__ = getattr_migration(__name__)
|
8
|
+
__getattr__: Callable[[str], Any] = getattr_migration(__name__)
|
prefect/infrastructure/base.py
CHANGED
@@ -1,6 +1,8 @@
|
|
1
1
|
"""
|
2
2
|
2024-06-27: This surfaces an actionable error message for moved or removed objects in Prefect 3.0 upgrade.
|
3
3
|
"""
|
4
|
+
from typing import Any, Callable
|
5
|
+
|
4
6
|
from prefect._internal.compatibility.migration import getattr_migration
|
5
7
|
|
6
|
-
__getattr__ = getattr_migration(__name__)
|
8
|
+
__getattr__: Callable[[str], Any] = getattr_migration(__name__)
|
prefect/locking/filesystem.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
import time
|
2
|
+
from logging import Logger
|
2
3
|
from pathlib import Path
|
3
|
-
from typing import
|
4
|
+
from typing import Optional
|
4
5
|
|
5
6
|
import anyio
|
6
7
|
import pendulum
|
@@ -11,7 +12,7 @@ from prefect.logging.loggers import get_logger
|
|
11
12
|
|
12
13
|
from .protocol import LockManager
|
13
14
|
|
14
|
-
logger = get_logger(__name__)
|
15
|
+
logger: Logger = get_logger(__name__)
|
15
16
|
|
16
17
|
|
17
18
|
class _LockInfo(TypedDict):
|
@@ -37,11 +38,11 @@ class FileSystemLockManager(LockManager):
|
|
37
38
|
lock_files_directory: the directory where lock files are stored
|
38
39
|
"""
|
39
40
|
|
40
|
-
def __init__(self, lock_files_directory: Path):
|
41
|
-
self.lock_files_directory = lock_files_directory.expanduser().resolve()
|
42
|
-
self._locks:
|
41
|
+
def __init__(self, lock_files_directory: Path) -> None:
|
42
|
+
self.lock_files_directory: Path = lock_files_directory.expanduser().resolve()
|
43
|
+
self._locks: dict[str, _LockInfo] = {}
|
43
44
|
|
44
|
-
def _ensure_lock_files_directory_exists(self):
|
45
|
+
def _ensure_lock_files_directory_exists(self) -> None:
|
45
46
|
self.lock_files_directory.mkdir(parents=True, exist_ok=True)
|
46
47
|
|
47
48
|
def _lock_path_for_key(self, key: str) -> Path:
|
@@ -49,7 +50,7 @@ class FileSystemLockManager(LockManager):
|
|
49
50
|
return lock_info["path"]
|
50
51
|
return self.lock_files_directory.joinpath(key).with_suffix(".lock")
|
51
52
|
|
52
|
-
def _get_lock_info(self, key: str, use_cache=True) -> Optional[_LockInfo]:
|
53
|
+
def _get_lock_info(self, key: str, use_cache: bool = True) -> Optional[_LockInfo]:
|
53
54
|
if use_cache:
|
54
55
|
if (lock_info := self._locks.get(key)) is not None:
|
55
56
|
return lock_info
|
prefect/locking/memory.py
CHANGED
@@ -1,6 +1,8 @@
|
|
1
1
|
import asyncio
|
2
2
|
import threading
|
3
|
-
from typing import
|
3
|
+
from typing import Any, Optional, TypedDict
|
4
|
+
|
5
|
+
from typing_extensions import Self
|
4
6
|
|
5
7
|
from .protocol import LockManager
|
6
8
|
|
@@ -30,14 +32,14 @@ class MemoryLockManager(LockManager):
|
|
30
32
|
|
31
33
|
_instance = None
|
32
34
|
|
33
|
-
def __new__(cls, *args, **kwargs):
|
35
|
+
def __new__(cls, *args: Any, **kwargs: Any) -> Self:
|
34
36
|
if cls._instance is None:
|
35
37
|
cls._instance = super().__new__(cls)
|
36
38
|
return cls._instance
|
37
39
|
|
38
40
|
def __init__(self):
|
39
41
|
self._locks_dict_lock = threading.Lock()
|
40
|
-
self._locks:
|
42
|
+
self._locks: dict[str, _LockInfo] = {}
|
41
43
|
|
42
44
|
def _expire_lock(self, key: str):
|
43
45
|
"""
|
prefect/locking/protocol.py
CHANGED
prefect/plugins.py
CHANGED
@@ -9,15 +9,15 @@ Currently supported entrypoints:
|
|
9
9
|
"""
|
10
10
|
|
11
11
|
from types import ModuleType
|
12
|
-
from typing import Any,
|
12
|
+
from typing import Any, Union
|
13
13
|
|
14
14
|
import prefect.settings
|
15
15
|
from prefect.utilities.compat import EntryPoints, entry_points
|
16
16
|
|
17
|
-
|
17
|
+
_collections: Union[None, dict[str, Union[ModuleType, Exception]]] = None
|
18
18
|
|
19
19
|
|
20
|
-
def safe_load_entrypoints(entrypoints: EntryPoints) ->
|
20
|
+
def safe_load_entrypoints(entrypoints: EntryPoints) -> dict[str, Union[Exception, Any]]:
|
21
21
|
"""
|
22
22
|
Load entry points for a group capturing any exceptions that occur.
|
23
23
|
"""
|
@@ -26,7 +26,7 @@ def safe_load_entrypoints(entrypoints: EntryPoints) -> Dict[str, Union[Exception
|
|
26
26
|
# also want to validate the type for the group for entrypoints that have
|
27
27
|
# a specific type we expect.
|
28
28
|
|
29
|
-
results = {}
|
29
|
+
results: dict[str, Union[Exception, Any]] = {}
|
30
30
|
|
31
31
|
for entrypoint in entrypoints:
|
32
32
|
result = None
|
@@ -40,18 +40,20 @@ def safe_load_entrypoints(entrypoints: EntryPoints) -> Dict[str, Union[Exception
|
|
40
40
|
return results
|
41
41
|
|
42
42
|
|
43
|
-
def load_prefect_collections() ->
|
43
|
+
def load_prefect_collections() -> dict[str, Union[ModuleType, Exception]]:
|
44
44
|
"""
|
45
45
|
Load all Prefect collections that define an entrypoint in the group
|
46
46
|
`prefect.collections`.
|
47
47
|
"""
|
48
|
-
global
|
48
|
+
global _collections
|
49
49
|
|
50
|
-
if
|
51
|
-
return
|
50
|
+
if _collections is not None:
|
51
|
+
return _collections
|
52
52
|
|
53
53
|
collection_entrypoints: EntryPoints = entry_points(group="prefect.collections")
|
54
|
-
collections = safe_load_entrypoints(
|
54
|
+
collections: dict[str, Union[Exception, Any]] = safe_load_entrypoints(
|
55
|
+
collection_entrypoints
|
56
|
+
)
|
55
57
|
|
56
58
|
# TODO: Consider the utility of this once we've established this pattern.
|
57
59
|
# We cannot use a logger here because logging is not yet initialized.
|
@@ -68,5 +70,5 @@ def load_prefect_collections() -> Dict[str, Union[ModuleType, Exception]]:
|
|
68
70
|
if prefect.settings.PREFECT_DEBUG_MODE:
|
69
71
|
print(f"Loaded collection {name!r}.")
|
70
72
|
|
71
|
-
|
73
|
+
_collections = collections
|
72
74
|
return collections
|
prefect/results.py
CHANGED
@@ -233,6 +233,29 @@ def _format_user_supplied_storage_key(key: str) -> str:
|
|
233
233
|
return key.format(**runtime_vars, parameters=prefect.runtime.task_run.parameters)
|
234
234
|
|
235
235
|
|
236
|
+
async def _call_explicitly_async_block_method(
|
237
|
+
block: Union[WritableFileSystem, NullFileSystem],
|
238
|
+
method: str,
|
239
|
+
args: tuple[Any, ...],
|
240
|
+
kwargs: dict[str, Any],
|
241
|
+
) -> Any:
|
242
|
+
"""
|
243
|
+
TODO: remove this once we have explicit async methods on all storage blocks
|
244
|
+
|
245
|
+
see https://github.com/PrefectHQ/prefect/issues/15008
|
246
|
+
"""
|
247
|
+
if hasattr(block, f"a{method}"): # explicit async method
|
248
|
+
return await getattr(block, f"a{method}")(*args, **kwargs)
|
249
|
+
elif hasattr(getattr(block, method, None), "aio"): # sync_compatible
|
250
|
+
return await getattr(block, method).aio(block, *args, **kwargs)
|
251
|
+
else: # should not happen in prefect, but users can override impls
|
252
|
+
maybe_coro = getattr(block, method)(*args, **kwargs)
|
253
|
+
if inspect.isawaitable(maybe_coro):
|
254
|
+
return await maybe_coro
|
255
|
+
else:
|
256
|
+
return maybe_coro
|
257
|
+
|
258
|
+
|
236
259
|
T = TypeVar("T")
|
237
260
|
|
238
261
|
|
@@ -405,7 +428,9 @@ class ResultStore(BaseModel):
|
|
405
428
|
# TODO: Add an `exists` method to commonly used storage blocks
|
406
429
|
# so the entire payload doesn't need to be read
|
407
430
|
try:
|
408
|
-
metadata_content = await
|
431
|
+
metadata_content = await _call_explicitly_async_block_method(
|
432
|
+
self.metadata_storage, "read_path", (key,), {}
|
433
|
+
)
|
409
434
|
if metadata_content is None:
|
410
435
|
return False
|
411
436
|
metadata = ResultRecordMetadata.load_bytes(metadata_content)
|
@@ -414,7 +439,9 @@ class ResultStore(BaseModel):
|
|
414
439
|
return False
|
415
440
|
else:
|
416
441
|
try:
|
417
|
-
content = await
|
442
|
+
content = await _call_explicitly_async_block_method(
|
443
|
+
self.result_storage, "read_path", (key,), {}
|
444
|
+
)
|
418
445
|
if content is None:
|
419
446
|
return False
|
420
447
|
record = ResultRecord.deserialize(content)
|
@@ -491,12 +518,22 @@ class ResultStore(BaseModel):
|
|
491
518
|
self.result_storage = await get_default_result_storage()
|
492
519
|
|
493
520
|
if self.metadata_storage is not None:
|
494
|
-
metadata_content = await
|
521
|
+
metadata_content = await _call_explicitly_async_block_method(
|
522
|
+
self.metadata_storage,
|
523
|
+
"read_path",
|
524
|
+
(key,),
|
525
|
+
{},
|
526
|
+
)
|
495
527
|
metadata = ResultRecordMetadata.load_bytes(metadata_content)
|
496
528
|
assert (
|
497
529
|
metadata.storage_key is not None
|
498
530
|
), "Did not find storage key in metadata"
|
499
|
-
result_content = await
|
531
|
+
result_content = await _call_explicitly_async_block_method(
|
532
|
+
self.result_storage,
|
533
|
+
"read_path",
|
534
|
+
(metadata.storage_key,),
|
535
|
+
{},
|
536
|
+
)
|
500
537
|
result_record: ResultRecord[
|
501
538
|
Any
|
502
539
|
] = ResultRecord.deserialize_from_result_and_metadata(
|
@@ -504,7 +541,12 @@ class ResultStore(BaseModel):
|
|
504
541
|
)
|
505
542
|
await emit_result_read_event(self, resolved_key_path)
|
506
543
|
else:
|
507
|
-
content = await
|
544
|
+
content = await _call_explicitly_async_block_method(
|
545
|
+
self.result_storage,
|
546
|
+
"read_path",
|
547
|
+
(key,),
|
548
|
+
{},
|
549
|
+
)
|
508
550
|
result_record: ResultRecord[Any] = ResultRecord.deserialize(
|
509
551
|
content, backup_serializer=self.serializer
|
510
552
|
)
|
@@ -555,7 +597,7 @@ class ResultStore(BaseModel):
|
|
555
597
|
obj: Any,
|
556
598
|
key: Optional[str] = None,
|
557
599
|
expiration: Optional[DateTime] = None,
|
558
|
-
) -> "ResultRecord":
|
600
|
+
) -> "ResultRecord[Any]":
|
559
601
|
"""
|
560
602
|
Create a result record.
|
561
603
|
|
@@ -671,19 +713,26 @@ class ResultStore(BaseModel):
|
|
671
713
|
|
672
714
|
# If metadata storage is configured, write result and metadata separately
|
673
715
|
if self.metadata_storage is not None:
|
674
|
-
await
|
675
|
-
|
676
|
-
|
716
|
+
await _call_explicitly_async_block_method(
|
717
|
+
self.result_storage,
|
718
|
+
"write_path",
|
719
|
+
(result_record.metadata.storage_key,),
|
720
|
+
{"content": result_record.serialize_result()},
|
677
721
|
)
|
678
|
-
await
|
679
|
-
|
680
|
-
|
722
|
+
await _call_explicitly_async_block_method(
|
723
|
+
self.metadata_storage,
|
724
|
+
"write_path",
|
725
|
+
(base_key,),
|
726
|
+
{"content": result_record.serialize_metadata()},
|
681
727
|
)
|
682
728
|
await emit_result_write_event(self, result_record.metadata.storage_key)
|
683
729
|
# Otherwise, write the result metadata and result together
|
684
730
|
else:
|
685
|
-
await
|
686
|
-
|
731
|
+
await _call_explicitly_async_block_method(
|
732
|
+
self.result_storage,
|
733
|
+
"write_path",
|
734
|
+
(result_record.metadata.storage_key,),
|
735
|
+
{"content": result_record.serialize()},
|
687
736
|
)
|
688
737
|
await emit_result_write_event(self, result_record.metadata.storage_key)
|
689
738
|
if self.cache_result_in_memory:
|
@@ -910,8 +959,11 @@ class ResultStore(BaseModel):
|
|
910
959
|
serializer=self.serializer, storage_key=str(identifier)
|
911
960
|
),
|
912
961
|
)
|
913
|
-
await
|
914
|
-
|
962
|
+
await _call_explicitly_async_block_method(
|
963
|
+
self.result_storage,
|
964
|
+
"write_path",
|
965
|
+
(f"parameters/{identifier}",),
|
966
|
+
{"content": record.serialize()},
|
915
967
|
)
|
916
968
|
|
917
969
|
@sync_compatible
|
@@ -921,7 +973,12 @@ class ResultStore(BaseModel):
|
|
921
973
|
"Result store is not configured - must have a result storage block to read parameters"
|
922
974
|
)
|
923
975
|
record = ResultRecord.deserialize(
|
924
|
-
await
|
976
|
+
await _call_explicitly_async_block_method(
|
977
|
+
self.result_storage,
|
978
|
+
"read_path",
|
979
|
+
(f"parameters/{identifier}",),
|
980
|
+
{},
|
981
|
+
)
|
925
982
|
)
|
926
983
|
return record.result
|
927
984
|
|
@@ -976,7 +1033,7 @@ class ResultRecordMetadata(BaseModel):
|
|
976
1033
|
"""
|
977
1034
|
return cls.model_validate_json(data)
|
978
1035
|
|
979
|
-
def __eq__(self, other):
|
1036
|
+
def __eq__(self, other: Any) -> bool:
|
980
1037
|
if not isinstance(other, ResultRecordMetadata):
|
981
1038
|
return False
|
982
1039
|
return (
|
@@ -1050,7 +1107,7 @@ class ResultRecord(BaseModel, Generic[R]):
|
|
1050
1107
|
|
1051
1108
|
@model_validator(mode="before")
|
1052
1109
|
@classmethod
|
1053
|
-
def coerce_old_format(cls, value: Any):
|
1110
|
+
def coerce_old_format(cls, value: Any) -> Any:
|
1054
1111
|
if isinstance(value, dict):
|
1055
1112
|
if "data" in value:
|
1056
1113
|
value["result"] = value.pop("data")
|
prefect/runner/runner.py
CHANGED
@@ -1209,9 +1209,8 @@ class Runner:
|
|
1209
1209
|
)
|
1210
1210
|
# Mark the task as started to prevent agent crash
|
1211
1211
|
task_status.started(exc)
|
1212
|
-
|
1213
|
-
|
1214
|
-
)
|
1212
|
+
message = f"Flow run process could not be started:\n{exc!r}"
|
1213
|
+
await self._propose_crashed_state(flow_run, message)
|
1215
1214
|
else:
|
1216
1215
|
run_logger.exception(
|
1217
1216
|
f"An error occurred while monitoring flow run '{flow_run.id}'. "
|
prefect/states.py
CHANGED
@@ -11,6 +11,7 @@ from typing import Any, Dict, Iterable, Optional, Type
|
|
11
11
|
import anyio
|
12
12
|
import httpx
|
13
13
|
import pendulum
|
14
|
+
from opentelemetry import propagate
|
14
15
|
from typing_extensions import TypeGuard
|
15
16
|
|
16
17
|
from prefect._internal.compatibility import deprecated
|
@@ -588,6 +589,16 @@ class StateGroup:
|
|
588
589
|
return f"StateGroup<{self.counts_message()}>"
|
589
590
|
|
590
591
|
|
592
|
+
def _traced(cls: Type[State[R]], **kwargs: Any) -> State[R]:
|
593
|
+
state_details = StateDetails.model_validate(kwargs.pop("state_details", {}))
|
594
|
+
|
595
|
+
carrier = {}
|
596
|
+
propagate.inject(carrier)
|
597
|
+
state_details.traceparent = carrier.get("traceparent")
|
598
|
+
|
599
|
+
return cls(**kwargs, state_details=state_details)
|
600
|
+
|
601
|
+
|
591
602
|
def Scheduled(
|
592
603
|
cls: Type[State[R]] = State,
|
593
604
|
scheduled_time: Optional[datetime.datetime] = None,
|
@@ -605,7 +616,7 @@ def Scheduled(
|
|
605
616
|
raise ValueError("An extra scheduled_time was provided in state_details")
|
606
617
|
state_details.scheduled_time = scheduled_time
|
607
618
|
|
608
|
-
return cls
|
619
|
+
return _traced(cls, type=StateType.SCHEDULED, state_details=state_details, **kwargs)
|
609
620
|
|
610
621
|
|
611
622
|
def Completed(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
@@ -614,7 +625,8 @@ def Completed(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
|
614
625
|
Returns:
|
615
626
|
State: a Completed state
|
616
627
|
"""
|
617
|
-
|
628
|
+
|
629
|
+
return _traced(cls, type=StateType.COMPLETED, **kwargs)
|
618
630
|
|
619
631
|
|
620
632
|
def Running(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
@@ -623,7 +635,7 @@ def Running(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
|
623
635
|
Returns:
|
624
636
|
State: a Running state
|
625
637
|
"""
|
626
|
-
return cls
|
638
|
+
return _traced(cls, type=StateType.RUNNING, **kwargs)
|
627
639
|
|
628
640
|
|
629
641
|
def Failed(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
@@ -632,7 +644,7 @@ def Failed(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
|
632
644
|
Returns:
|
633
645
|
State: a Failed state
|
634
646
|
"""
|
635
|
-
return cls
|
647
|
+
return _traced(cls, type=StateType.FAILED, **kwargs)
|
636
648
|
|
637
649
|
|
638
650
|
def Crashed(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
@@ -641,7 +653,7 @@ def Crashed(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
|
641
653
|
Returns:
|
642
654
|
State: a Crashed state
|
643
655
|
"""
|
644
|
-
return cls
|
656
|
+
return _traced(cls, type=StateType.CRASHED, **kwargs)
|
645
657
|
|
646
658
|
|
647
659
|
def Cancelling(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
@@ -650,7 +662,7 @@ def Cancelling(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
|
650
662
|
Returns:
|
651
663
|
State: a Cancelling state
|
652
664
|
"""
|
653
|
-
return cls
|
665
|
+
return _traced(cls, type=StateType.CANCELLING, **kwargs)
|
654
666
|
|
655
667
|
|
656
668
|
def Cancelled(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
@@ -659,7 +671,7 @@ def Cancelled(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
|
659
671
|
Returns:
|
660
672
|
State: a Cancelled state
|
661
673
|
"""
|
662
|
-
return cls
|
674
|
+
return _traced(cls, type=StateType.CANCELLED, **kwargs)
|
663
675
|
|
664
676
|
|
665
677
|
def Pending(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
@@ -668,7 +680,7 @@ def Pending(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
|
668
680
|
Returns:
|
669
681
|
State: a Pending state
|
670
682
|
"""
|
671
|
-
return cls
|
683
|
+
return _traced(cls, type=StateType.PENDING, **kwargs)
|
672
684
|
|
673
685
|
|
674
686
|
def Paused(
|
@@ -704,7 +716,7 @@ def Paused(
|
|
704
716
|
state_details.pause_reschedule = reschedule
|
705
717
|
state_details.pause_key = pause_key
|
706
718
|
|
707
|
-
return cls
|
719
|
+
return _traced(cls, type=StateType.PAUSED, state_details=state_details, **kwargs)
|
708
720
|
|
709
721
|
|
710
722
|
def Suspended(
|
@@ -766,7 +778,7 @@ def Retrying(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
|
766
778
|
Returns:
|
767
779
|
State: a Retrying state
|
768
780
|
"""
|
769
|
-
return cls
|
781
|
+
return _traced(cls, type=StateType.RUNNING, name="Retrying", **kwargs)
|
770
782
|
|
771
783
|
|
772
784
|
def Late(
|