prefect-client 3.0.3__py3-none-any.whl → 3.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/_internal/retries.py +1 -3
- prefect/_internal/schemas/validators.py +1 -1
- prefect/cache_policies.py +1 -1
- prefect/client/cloud.py +9 -0
- prefect/client/orchestration.py +30 -2
- prefect/client/schemas/objects.py +17 -2
- prefect/client/subscriptions.py +3 -3
- prefect/context.py +11 -19
- prefect/deployments/base.py +12 -0
- prefect/deployments/flow_runs.py +8 -0
- prefect/events/clients.py +40 -22
- prefect/exceptions.py +22 -3
- prefect/filesystems.py +26 -1
- prefect/flow_engine.py +10 -7
- prefect/flows.py +11 -2
- prefect/logging/configuration.py +4 -8
- prefect/logging/handlers.py +3 -4
- prefect/results.py +73 -7
- prefect/runner/runner.py +5 -37
- prefect/settings.py +1364 -1582
- prefect/transactions.py +9 -9
- prefect/types/__init__.py +51 -1
- prefect/utilities/pydantic.py +2 -1
- prefect/utilities/text.py +13 -1
- prefect/workers/base.py +7 -56
- {prefect_client-3.0.3.dist-info → prefect_client-3.0.5.dist-info}/METADATA +1 -1
- {prefect_client-3.0.3.dist-info → prefect_client-3.0.5.dist-info}/RECORD +30 -31
- prefect/_internal/compatibility/experimental.py +0 -195
- {prefect_client-3.0.3.dist-info → prefect_client-3.0.5.dist-info}/LICENSE +0 -0
- {prefect_client-3.0.3.dist-info → prefect_client-3.0.5.dist-info}/WHEEL +0 -0
- {prefect_client-3.0.3.dist-info → prefect_client-3.0.5.dist-info}/top_level.txt +0 -0
prefect/results.py
CHANGED
@@ -8,6 +8,7 @@ from functools import partial
|
|
8
8
|
from pathlib import Path
|
9
9
|
from typing import (
|
10
10
|
TYPE_CHECKING,
|
11
|
+
Annotated,
|
11
12
|
Any,
|
12
13
|
Callable,
|
13
14
|
Dict,
|
@@ -25,8 +26,10 @@ from cachetools import LRUCache
|
|
25
26
|
from pydantic import (
|
26
27
|
BaseModel,
|
27
28
|
ConfigDict,
|
29
|
+
Discriminator,
|
28
30
|
Field,
|
29
31
|
PrivateAttr,
|
32
|
+
Tag,
|
30
33
|
ValidationError,
|
31
34
|
model_serializer,
|
32
35
|
model_validator,
|
@@ -47,6 +50,7 @@ from prefect.exceptions import (
|
|
47
50
|
)
|
48
51
|
from prefect.filesystems import (
|
49
52
|
LocalFileSystem,
|
53
|
+
NullFileSystem,
|
50
54
|
WritableFileSystem,
|
51
55
|
)
|
52
56
|
from prefect.locking.protocol import LockManager
|
@@ -218,6 +222,19 @@ def _format_user_supplied_storage_key(key: str) -> str:
|
|
218
222
|
T = TypeVar("T")
|
219
223
|
|
220
224
|
|
225
|
+
def result_storage_discriminator(x: Any) -> str:
|
226
|
+
if isinstance(x, dict):
|
227
|
+
if "block_type_slug" in x:
|
228
|
+
return "WritableFileSystem"
|
229
|
+
else:
|
230
|
+
return "NullFileSystem"
|
231
|
+
if isinstance(x, WritableFileSystem):
|
232
|
+
return "WritableFileSystem"
|
233
|
+
if isinstance(x, NullFileSystem):
|
234
|
+
return "NullFileSystem"
|
235
|
+
return "None"
|
236
|
+
|
237
|
+
|
221
238
|
@deprecated_field(
|
222
239
|
"persist_result",
|
223
240
|
when=lambda x: x is not None,
|
@@ -245,7 +262,14 @@ class ResultStore(BaseModel):
|
|
245
262
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
246
263
|
|
247
264
|
result_storage: Optional[WritableFileSystem] = Field(default=None)
|
248
|
-
metadata_storage:
|
265
|
+
metadata_storage: Annotated[
|
266
|
+
Union[
|
267
|
+
Annotated[WritableFileSystem, Tag("WritableFileSystem")],
|
268
|
+
Annotated[NullFileSystem, Tag("NullFileSystem")],
|
269
|
+
Annotated[None, Tag("None")],
|
270
|
+
],
|
271
|
+
Discriminator(result_storage_discriminator),
|
272
|
+
] = Field(default=None)
|
249
273
|
lock_manager: Optional[LockManager] = Field(default=None)
|
250
274
|
cache_result_in_memory: bool = Field(default=True)
|
251
275
|
serializer: Serializer = Field(default_factory=get_default_result_serializer)
|
@@ -281,6 +305,7 @@ class ResultStore(BaseModel):
|
|
281
305
|
update["cache_result_in_memory"] = flow.cache_result_in_memory
|
282
306
|
if self.result_storage is None and update.get("result_storage") is None:
|
283
307
|
update["result_storage"] = await get_default_result_storage()
|
308
|
+
update["metadata_storage"] = NullFileSystem()
|
284
309
|
return self.model_copy(update=update)
|
285
310
|
|
286
311
|
@sync_compatible
|
@@ -294,6 +319,8 @@ class ResultStore(BaseModel):
|
|
294
319
|
Returns:
|
295
320
|
An updated result store.
|
296
321
|
"""
|
322
|
+
from prefect.transactions import get_transaction
|
323
|
+
|
297
324
|
update = {}
|
298
325
|
if task.result_storage is not None:
|
299
326
|
update["result_storage"] = await resolve_result_storage(task.result_storage)
|
@@ -305,17 +332,30 @@ class ResultStore(BaseModel):
|
|
305
332
|
update["storage_key_fn"] = partial(
|
306
333
|
_format_user_supplied_storage_key, task.result_storage_key
|
307
334
|
)
|
335
|
+
|
336
|
+
# use the lock manager from a parent transaction if it exists
|
337
|
+
if (current_txn := get_transaction()) and isinstance(
|
338
|
+
current_txn.store, ResultStore
|
339
|
+
):
|
340
|
+
update["lock_manager"] = current_txn.store.lock_manager
|
341
|
+
|
308
342
|
if task.cache_policy is not None and task.cache_policy is not NotSet:
|
309
343
|
if task.cache_policy.key_storage is not None:
|
310
344
|
storage = task.cache_policy.key_storage
|
311
345
|
if isinstance(storage, str) and not len(storage.split("/")) == 2:
|
312
346
|
storage = Path(storage)
|
313
347
|
update["metadata_storage"] = await resolve_result_storage(storage)
|
348
|
+
# if the cache policy has a lock manager, it takes precedence over the parent transaction
|
314
349
|
if task.cache_policy.lock_manager is not None:
|
315
350
|
update["lock_manager"] = task.cache_policy.lock_manager
|
316
351
|
|
317
352
|
if self.result_storage is None and update.get("result_storage") is None:
|
318
353
|
update["result_storage"] = await get_default_result_storage()
|
354
|
+
if (
|
355
|
+
isinstance(self.metadata_storage, NullFileSystem)
|
356
|
+
and update.get("metadata_storage", NotSet) is NotSet
|
357
|
+
):
|
358
|
+
update["metadata_storage"] = None
|
319
359
|
return self.model_copy(update=update)
|
320
360
|
|
321
361
|
@staticmethod
|
@@ -433,7 +473,9 @@ class ResultStore(BaseModel):
|
|
433
473
|
)
|
434
474
|
else:
|
435
475
|
content = await self.result_storage.read_path(key)
|
436
|
-
result_record = ResultRecord.deserialize(
|
476
|
+
result_record = ResultRecord.deserialize(
|
477
|
+
content, backup_serializer=self.serializer
|
478
|
+
)
|
437
479
|
|
438
480
|
if self.cache_result_in_memory:
|
439
481
|
if self.result_storage_block_id is None and hasattr(
|
@@ -446,26 +488,36 @@ class ResultStore(BaseModel):
|
|
446
488
|
self.cache[cache_key] = result_record
|
447
489
|
return result_record
|
448
490
|
|
449
|
-
def read(
|
491
|
+
def read(
|
492
|
+
self,
|
493
|
+
key: str,
|
494
|
+
holder: Optional[str] = None,
|
495
|
+
) -> "ResultRecord":
|
450
496
|
"""
|
451
497
|
Read a result record from storage.
|
452
498
|
|
453
499
|
Args:
|
454
500
|
key: The key to read the result record from.
|
455
501
|
holder: The holder of the lock if a lock was set on the record.
|
502
|
+
|
456
503
|
Returns:
|
457
504
|
A result record.
|
458
505
|
"""
|
459
506
|
holder = holder or self.generate_default_holder()
|
460
507
|
return self._read(key=key, holder=holder, _sync=True)
|
461
508
|
|
462
|
-
async def aread(
|
509
|
+
async def aread(
|
510
|
+
self,
|
511
|
+
key: str,
|
512
|
+
holder: Optional[str] = None,
|
513
|
+
) -> "ResultRecord":
|
463
514
|
"""
|
464
515
|
Read a result record from storage.
|
465
516
|
|
466
517
|
Args:
|
467
518
|
key: The key to read the result record from.
|
468
519
|
holder: The holder of the lock if a lock was set on the record.
|
520
|
+
|
469
521
|
Returns:
|
470
522
|
A result record.
|
471
523
|
"""
|
@@ -1026,17 +1078,31 @@ class ResultRecord(BaseModel, Generic[R]):
|
|
1026
1078
|
)
|
1027
1079
|
|
1028
1080
|
@classmethod
|
1029
|
-
def deserialize(
|
1081
|
+
def deserialize(
|
1082
|
+
cls, data: bytes, backup_serializer: Optional[Serializer] = None
|
1083
|
+
) -> "ResultRecord[R]":
|
1030
1084
|
"""
|
1031
1085
|
Deserialize a record from bytes.
|
1032
1086
|
|
1033
1087
|
Args:
|
1034
1088
|
data: the serialized record
|
1089
|
+
backup_serializer: The serializer to use to deserialize the result record. Only
|
1090
|
+
necessary if the provided data does not specify a serializer.
|
1035
1091
|
|
1036
1092
|
Returns:
|
1037
1093
|
ResultRecord: the deserialized record
|
1038
1094
|
"""
|
1039
|
-
|
1095
|
+
try:
|
1096
|
+
instance = cls.model_validate_json(data)
|
1097
|
+
except ValidationError:
|
1098
|
+
if backup_serializer is None:
|
1099
|
+
raise
|
1100
|
+
else:
|
1101
|
+
result = backup_serializer.loads(data)
|
1102
|
+
return cls(
|
1103
|
+
metadata=ResultRecordMetadata(serializer=backup_serializer),
|
1104
|
+
result=result,
|
1105
|
+
)
|
1040
1106
|
if isinstance(instance.result, bytes):
|
1041
1107
|
instance.result = instance.serializer.loads(instance.result)
|
1042
1108
|
elif isinstance(instance.result, str):
|
@@ -1089,7 +1155,7 @@ class BaseResult(BaseModel, abc.ABC, Generic[R]):
|
|
1089
1155
|
try:
|
1090
1156
|
subcls = lookup_type(cls, dispatch_key=kwargs["type"])
|
1091
1157
|
except KeyError as exc:
|
1092
|
-
raise
|
1158
|
+
raise ValueError(f"Invalid type: {kwargs['type']}") from exc
|
1093
1159
|
return super().__new__(subcls)
|
1094
1160
|
else:
|
1095
1161
|
return super().__new__(cls)
|
prefect/runner/runner.py
CHANGED
@@ -74,7 +74,6 @@ from prefect.client.schemas.objects import Flow as APIFlow
|
|
74
74
|
from prefect.concurrency.asyncio import (
|
75
75
|
AcquireConcurrencySlotTimeoutError,
|
76
76
|
ConcurrencySlotAcquisitionError,
|
77
|
-
concurrency,
|
78
77
|
)
|
79
78
|
from prefect.events import DeploymentTriggerTypes, TriggerTypes
|
80
79
|
from prefect.events.related import tags_as_related_resources
|
@@ -92,7 +91,6 @@ from prefect.settings import (
|
|
92
91
|
get_current_settings,
|
93
92
|
)
|
94
93
|
from prefect.states import (
|
95
|
-
AwaitingConcurrencySlot,
|
96
94
|
Crashed,
|
97
95
|
Pending,
|
98
96
|
exception_to_failed_state,
|
@@ -1047,22 +1045,12 @@ class Runner:
|
|
1047
1045
|
) -> Union[Optional[int], Exception]:
|
1048
1046
|
run_logger = self._get_flow_run_logger(flow_run)
|
1049
1047
|
|
1050
|
-
if flow_run.deployment_id:
|
1051
|
-
deployment = await self._client.read_deployment(flow_run.deployment_id)
|
1052
|
-
if deployment and deployment.global_concurrency_limit:
|
1053
|
-
limit_name = deployment.global_concurrency_limit.name
|
1054
|
-
concurrency_ctx = concurrency
|
1055
|
-
else:
|
1056
|
-
limit_name = ""
|
1057
|
-
concurrency_ctx = asyncnullcontext
|
1058
|
-
|
1059
1048
|
try:
|
1060
|
-
|
1061
|
-
|
1062
|
-
|
1063
|
-
|
1064
|
-
|
1065
|
-
)
|
1049
|
+
status_code = await self._run_process(
|
1050
|
+
flow_run=flow_run,
|
1051
|
+
task_status=task_status,
|
1052
|
+
entrypoint=entrypoint,
|
1053
|
+
)
|
1066
1054
|
except (
|
1067
1055
|
AcquireConcurrencySlotTimeoutError,
|
1068
1056
|
ConcurrencySlotAcquisitionError,
|
@@ -1164,26 +1152,6 @@ class Runner:
|
|
1164
1152
|
exc_info=True,
|
1165
1153
|
)
|
1166
1154
|
|
1167
|
-
async def _propose_scheduled_state(self, flow_run: "FlowRun") -> None:
|
1168
|
-
run_logger = self._get_flow_run_logger(flow_run)
|
1169
|
-
try:
|
1170
|
-
state = await propose_state(
|
1171
|
-
self._client,
|
1172
|
-
AwaitingConcurrencySlot(),
|
1173
|
-
flow_run_id=flow_run.id,
|
1174
|
-
)
|
1175
|
-
self._logger.info(f"Flow run {flow_run.id} now has state {state.name}")
|
1176
|
-
except Abort as exc:
|
1177
|
-
run_logger.info(
|
1178
|
-
(
|
1179
|
-
f"Aborted rescheduling of flow run '{flow_run.id}'. "
|
1180
|
-
f"Server sent an abort signal: {exc}"
|
1181
|
-
),
|
1182
|
-
)
|
1183
|
-
pass
|
1184
|
-
except Exception:
|
1185
|
-
run_logger.exception(f"Failed to update state of flow run '{flow_run.id}'")
|
1186
|
-
|
1187
1155
|
async def _propose_crashed_state(self, flow_run: "FlowRun", message: str) -> None:
|
1188
1156
|
run_logger = self._get_flow_run_logger(flow_run)
|
1189
1157
|
try:
|