prefect-client 3.0.1__py3-none-any.whl → 3.0.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/_internal/compatibility/deprecated.py +1 -1
- prefect/blocks/notifications.py +21 -0
- prefect/blocks/webhook.py +8 -0
- prefect/client/orchestration.py +39 -20
- prefect/client/schemas/actions.py +2 -2
- prefect/client/schemas/objects.py +24 -6
- prefect/client/types/flexible_schedule_list.py +1 -1
- prefect/concurrency/asyncio.py +45 -6
- prefect/concurrency/services.py +1 -1
- prefect/concurrency/sync.py +21 -27
- prefect/concurrency/v1/asyncio.py +3 -0
- prefect/concurrency/v1/sync.py +4 -5
- prefect/context.py +5 -1
- prefect/deployments/runner.py +1 -0
- prefect/events/actions.py +6 -0
- prefect/flow_engine.py +12 -4
- prefect/locking/filesystem.py +243 -0
- prefect/logging/handlers.py +0 -2
- prefect/logging/loggers.py +0 -18
- prefect/logging/logging.yml +1 -0
- prefect/main.py +19 -5
- prefect/records/base.py +12 -0
- prefect/records/filesystem.py +6 -2
- prefect/records/memory.py +6 -0
- prefect/records/result_store.py +6 -0
- prefect/results.py +169 -25
- prefect/runner/runner.py +74 -5
- prefect/settings.py +1 -1
- prefect/states.py +34 -17
- prefect/task_engine.py +31 -37
- prefect/transactions.py +105 -50
- prefect/utilities/engine.py +16 -8
- prefect/utilities/importtools.py +1 -0
- prefect/utilities/urls.py +70 -12
- prefect/workers/base.py +14 -6
- {prefect_client-3.0.1.dist-info → prefect_client-3.0.2.dist-info}/METADATA +1 -1
- {prefect_client-3.0.1.dist-info → prefect_client-3.0.2.dist-info}/RECORD +40 -39
- {prefect_client-3.0.1.dist-info → prefect_client-3.0.2.dist-info}/LICENSE +0 -0
- {prefect_client-3.0.1.dist-info → prefect_client-3.0.2.dist-info}/WHEEL +0 -0
- {prefect_client-3.0.1.dist-info → prefect_client-3.0.2.dist-info}/top_level.txt +0 -0
prefect/results.py
CHANGED
@@ -5,6 +5,7 @@ import socket
|
|
5
5
|
import threading
|
6
6
|
import uuid
|
7
7
|
from functools import partial
|
8
|
+
from pathlib import Path
|
8
9
|
from typing import (
|
9
10
|
TYPE_CHECKING,
|
10
11
|
Any,
|
@@ -19,6 +20,8 @@ from typing import (
|
|
19
20
|
)
|
20
21
|
from uuid import UUID
|
21
22
|
|
23
|
+
import pendulum
|
24
|
+
from cachetools import LRUCache
|
22
25
|
from pydantic import (
|
23
26
|
BaseModel,
|
24
27
|
ConfigDict,
|
@@ -33,6 +36,8 @@ from pydantic_extra_types.pendulum_dt import DateTime
|
|
33
36
|
from typing_extensions import ParamSpec, Self
|
34
37
|
|
35
38
|
import prefect
|
39
|
+
from prefect._internal.compatibility import deprecated
|
40
|
+
from prefect._internal.compatibility.deprecated import deprecated_field
|
36
41
|
from prefect.blocks.core import Block
|
37
42
|
from prefect.client.utilities import inject_client
|
38
43
|
from prefect.exceptions import (
|
@@ -97,7 +102,7 @@ async def get_default_result_storage() -> WritableFileSystem:
|
|
97
102
|
|
98
103
|
@sync_compatible
|
99
104
|
async def resolve_result_storage(
|
100
|
-
result_storage: ResultStorage,
|
105
|
+
result_storage: Union[ResultStorage, UUID],
|
101
106
|
) -> WritableFileSystem:
|
102
107
|
"""
|
103
108
|
Resolve one of the valid `ResultStorage` input types into a saved block
|
@@ -118,6 +123,9 @@ async def resolve_result_storage(
|
|
118
123
|
storage_block = await Block.load(result_storage, client=client)
|
119
124
|
storage_block_id = storage_block._block_document_id
|
120
125
|
assert storage_block_id is not None, "Loaded storage blocks must have ids"
|
126
|
+
elif isinstance(result_storage, UUID):
|
127
|
+
block_document = await client.read_block_document(result_storage)
|
128
|
+
storage_block = Block._from_block_document(block_document)
|
121
129
|
else:
|
122
130
|
raise TypeError(
|
123
131
|
"Result storage must be one of the following types: 'UUID', 'Block', "
|
@@ -171,6 +179,25 @@ def get_default_persist_setting() -> bool:
|
|
171
179
|
return PREFECT_RESULTS_PERSIST_BY_DEFAULT.value()
|
172
180
|
|
173
181
|
|
182
|
+
def should_persist_result() -> bool:
|
183
|
+
"""
|
184
|
+
Return the default option for result persistence determined by the current run context.
|
185
|
+
|
186
|
+
If there is no current run context, the default value set by
|
187
|
+
`PREFECT_RESULTS_PERSIST_BY_DEFAULT` will be returned.
|
188
|
+
"""
|
189
|
+
from prefect.context import FlowRunContext, TaskRunContext
|
190
|
+
|
191
|
+
task_run_context = TaskRunContext.get()
|
192
|
+
if task_run_context is not None:
|
193
|
+
return task_run_context.persist_result
|
194
|
+
flow_run_context = FlowRunContext.get()
|
195
|
+
if flow_run_context is not None:
|
196
|
+
return flow_run_context.persist_result
|
197
|
+
|
198
|
+
return PREFECT_RESULTS_PERSIST_BY_DEFAULT.value()
|
199
|
+
|
200
|
+
|
174
201
|
def _format_user_supplied_storage_key(key: str) -> str:
|
175
202
|
# Note here we are pinning to task runs since flow runs do not support storage keys
|
176
203
|
# yet; we'll need to split logic in the future or have two separate functions
|
@@ -178,6 +205,16 @@ def _format_user_supplied_storage_key(key: str) -> str:
|
|
178
205
|
return key.format(**runtime_vars, parameters=prefect.runtime.task_run.parameters)
|
179
206
|
|
180
207
|
|
208
|
+
T = TypeVar("T")
|
209
|
+
|
210
|
+
|
211
|
+
@deprecated_field(
|
212
|
+
"persist_result",
|
213
|
+
when=lambda x: x is not None,
|
214
|
+
when_message="use the `should_persist_result` utility function instead",
|
215
|
+
start_date="Sep 2024",
|
216
|
+
end_date="Nov 2024",
|
217
|
+
)
|
181
218
|
class ResultStore(BaseModel):
|
182
219
|
"""
|
183
220
|
Manages the storage and retrieval of results.
|
@@ -200,10 +237,13 @@ class ResultStore(BaseModel):
|
|
200
237
|
result_storage: Optional[WritableFileSystem] = Field(default=None)
|
201
238
|
metadata_storage: Optional[WritableFileSystem] = Field(default=None)
|
202
239
|
lock_manager: Optional[LockManager] = Field(default=None)
|
203
|
-
persist_result: bool = Field(default_factory=get_default_persist_setting)
|
204
240
|
cache_result_in_memory: bool = Field(default=True)
|
205
241
|
serializer: Serializer = Field(default_factory=get_default_result_serializer)
|
206
242
|
storage_key_fn: Callable[[], str] = Field(default=DEFAULT_STORAGE_KEY_FN)
|
243
|
+
cache: LRUCache = Field(default_factory=lambda: LRUCache(maxsize=1000))
|
244
|
+
|
245
|
+
# Deprecated fields
|
246
|
+
persist_result: Optional[bool] = Field(default=None)
|
207
247
|
|
208
248
|
@property
|
209
249
|
def result_storage_block_id(self) -> Optional[UUID]:
|
@@ -227,8 +267,6 @@ class ResultStore(BaseModel):
|
|
227
267
|
update["result_storage"] = await resolve_result_storage(flow.result_storage)
|
228
268
|
if flow.result_serializer is not None:
|
229
269
|
update["serializer"] = resolve_serializer(flow.result_serializer)
|
230
|
-
if flow.persist_result is not None:
|
231
|
-
update["persist_result"] = flow.persist_result
|
232
270
|
if flow.cache_result_in_memory is not None:
|
233
271
|
update["cache_result_in_memory"] = flow.cache_result_in_memory
|
234
272
|
if self.result_storage is None and update.get("result_storage") is None:
|
@@ -251,8 +289,6 @@ class ResultStore(BaseModel):
|
|
251
289
|
update["result_storage"] = await resolve_result_storage(task.result_storage)
|
252
290
|
if task.result_serializer is not None:
|
253
291
|
update["serializer"] = resolve_serializer(task.result_serializer)
|
254
|
-
if task.persist_result is not None:
|
255
|
-
update["persist_result"] = task.persist_result
|
256
292
|
if task.cache_result_in_memory is not None:
|
257
293
|
update["cache_result_in_memory"] = task.cache_result_in_memory
|
258
294
|
if task.result_storage_key is not None:
|
@@ -293,16 +329,30 @@ class ResultStore(BaseModel):
|
|
293
329
|
# so the entire payload doesn't need to be read
|
294
330
|
try:
|
295
331
|
metadata_content = await self.metadata_storage.read_path(key)
|
296
|
-
|
332
|
+
if metadata_content is None:
|
333
|
+
return False
|
334
|
+
metadata = ResultRecordMetadata.load_bytes(metadata_content)
|
335
|
+
|
297
336
|
except Exception:
|
298
337
|
return False
|
299
338
|
else:
|
300
339
|
try:
|
301
340
|
content = await self.result_storage.read_path(key)
|
302
|
-
|
341
|
+
if content is None:
|
342
|
+
return False
|
343
|
+
record = ResultRecord.deserialize(content)
|
344
|
+
metadata = record.metadata
|
303
345
|
except Exception:
|
304
346
|
return False
|
305
347
|
|
348
|
+
if metadata.expiration:
|
349
|
+
# if the result has an expiration,
|
350
|
+
# check if it is still in the future
|
351
|
+
exists = metadata.expiration > pendulum.now("utc")
|
352
|
+
else:
|
353
|
+
exists = True
|
354
|
+
return exists
|
355
|
+
|
306
356
|
def exists(self, key: str) -> bool:
|
307
357
|
"""
|
308
358
|
Check if a result record exists in storage.
|
@@ -342,9 +392,13 @@ class ResultStore(BaseModel):
|
|
342
392
|
Returns:
|
343
393
|
A result record.
|
344
394
|
"""
|
395
|
+
|
345
396
|
if self.lock_manager is not None and not self.is_lock_holder(key, holder):
|
346
397
|
await self.await_for_lock(key)
|
347
398
|
|
399
|
+
if key in self.cache:
|
400
|
+
return self.cache[key]
|
401
|
+
|
348
402
|
if self.result_storage is None:
|
349
403
|
self.result_storage = await get_default_result_storage()
|
350
404
|
|
@@ -355,12 +409,23 @@ class ResultStore(BaseModel):
|
|
355
409
|
metadata.storage_key is not None
|
356
410
|
), "Did not find storage key in metadata"
|
357
411
|
result_content = await self.result_storage.read_path(metadata.storage_key)
|
358
|
-
|
412
|
+
result_record = ResultRecord.deserialize_from_result_and_metadata(
|
359
413
|
result=result_content, metadata=metadata_content
|
360
414
|
)
|
361
415
|
else:
|
362
416
|
content = await self.result_storage.read_path(key)
|
363
|
-
|
417
|
+
result_record = ResultRecord.deserialize(content)
|
418
|
+
|
419
|
+
if self.cache_result_in_memory:
|
420
|
+
if self.result_storage_block_id is None and hasattr(
|
421
|
+
self.result_storage, "_resolve_path"
|
422
|
+
):
|
423
|
+
cache_key = str(self.result_storage._resolve_path(key))
|
424
|
+
else:
|
425
|
+
cache_key = key
|
426
|
+
|
427
|
+
self.cache[cache_key] = result_record
|
428
|
+
return result_record
|
364
429
|
|
365
430
|
def read(self, key: str, holder: Optional[str] = None) -> "ResultRecord":
|
366
431
|
"""
|
@@ -390,10 +455,10 @@ class ResultStore(BaseModel):
|
|
390
455
|
|
391
456
|
def create_result_record(
|
392
457
|
self,
|
393
|
-
key: str,
|
394
458
|
obj: Any,
|
459
|
+
key: Optional[str] = None,
|
395
460
|
expiration: Optional[DateTime] = None,
|
396
|
-
):
|
461
|
+
) -> "ResultRecord":
|
397
462
|
"""
|
398
463
|
Create a result record.
|
399
464
|
|
@@ -404,6 +469,13 @@ class ResultStore(BaseModel):
|
|
404
469
|
"""
|
405
470
|
key = key or self.storage_key_fn()
|
406
471
|
|
472
|
+
if self.result_storage is None:
|
473
|
+
self.result_storage = get_default_result_storage(_sync=True)
|
474
|
+
|
475
|
+
if self.result_storage_block_id is None:
|
476
|
+
if hasattr(self.result_storage, "_resolve_path"):
|
477
|
+
key = str(self.result_storage._resolve_path(key))
|
478
|
+
|
407
479
|
return ResultRecord(
|
408
480
|
result=obj,
|
409
481
|
metadata=ResultRecordMetadata(
|
@@ -416,8 +488,8 @@ class ResultStore(BaseModel):
|
|
416
488
|
|
417
489
|
def write(
|
418
490
|
self,
|
419
|
-
key: str,
|
420
491
|
obj: Any,
|
492
|
+
key: Optional[str] = None,
|
421
493
|
expiration: Optional[DateTime] = None,
|
422
494
|
holder: Optional[str] = None,
|
423
495
|
):
|
@@ -433,17 +505,18 @@ class ResultStore(BaseModel):
|
|
433
505
|
holder: The holder of the lock if a lock was set on the record.
|
434
506
|
"""
|
435
507
|
holder = holder or self.generate_default_holder()
|
508
|
+
result_record = self.create_result_record(
|
509
|
+
key=key, obj=obj, expiration=expiration
|
510
|
+
)
|
436
511
|
return self.persist_result_record(
|
437
|
-
result_record=
|
438
|
-
key=key, obj=obj, expiration=expiration
|
439
|
-
),
|
512
|
+
result_record=result_record,
|
440
513
|
holder=holder,
|
441
514
|
)
|
442
515
|
|
443
516
|
async def awrite(
|
444
517
|
self,
|
445
|
-
key: str,
|
446
518
|
obj: Any,
|
519
|
+
key: Optional[str] = None,
|
447
520
|
expiration: Optional[DateTime] = None,
|
448
521
|
holder: Optional[str] = None,
|
449
522
|
):
|
@@ -478,13 +551,22 @@ class ResultStore(BaseModel):
|
|
478
551
|
), "Storage key is required on result record"
|
479
552
|
|
480
553
|
key = result_record.metadata.storage_key
|
554
|
+
if result_record.metadata.storage_block_id is None:
|
555
|
+
basepath = (
|
556
|
+
self.result_storage._resolve_path("")
|
557
|
+
if hasattr(self.result_storage, "_resolve_path")
|
558
|
+
else Path(".").resolve()
|
559
|
+
)
|
560
|
+
base_key = str(Path(key).relative_to(basepath))
|
561
|
+
else:
|
562
|
+
base_key = key
|
481
563
|
if (
|
482
564
|
self.lock_manager is not None
|
483
|
-
and self.is_locked(
|
484
|
-
and not self.is_lock_holder(
|
565
|
+
and self.is_locked(base_key)
|
566
|
+
and not self.is_lock_holder(base_key, holder)
|
485
567
|
):
|
486
568
|
raise RuntimeError(
|
487
|
-
f"Cannot write to result record with key {
|
569
|
+
f"Cannot write to result record with key {base_key} because it is locked by "
|
488
570
|
f"another holder."
|
489
571
|
)
|
490
572
|
if self.result_storage is None:
|
@@ -497,7 +579,7 @@ class ResultStore(BaseModel):
|
|
497
579
|
content=result_record.serialize_result(),
|
498
580
|
)
|
499
581
|
await self.metadata_storage.write_path(
|
500
|
-
|
582
|
+
base_key,
|
501
583
|
content=result_record.serialize_metadata(),
|
502
584
|
)
|
503
585
|
# Otherwise, write the result metadata and result together
|
@@ -506,6 +588,9 @@ class ResultStore(BaseModel):
|
|
506
588
|
result_record.metadata.storage_key, content=result_record.serialize()
|
507
589
|
)
|
508
590
|
|
591
|
+
if self.cache_result_in_memory:
|
592
|
+
self.cache[key] = result_record
|
593
|
+
|
509
594
|
def persist_result_record(
|
510
595
|
self, result_record: "ResultRecord", holder: Optional[str] = None
|
511
596
|
):
|
@@ -671,6 +756,11 @@ class ResultStore(BaseModel):
|
|
671
756
|
)
|
672
757
|
return await self.lock_manager.await_for_lock(key, timeout)
|
673
758
|
|
759
|
+
@deprecated.deprecated_callable(
|
760
|
+
start_date="Sep 2024",
|
761
|
+
end_date="Nov 2024",
|
762
|
+
help="Use `create_result_record` instead.",
|
763
|
+
)
|
674
764
|
@sync_compatible
|
675
765
|
async def create_result(
|
676
766
|
self,
|
@@ -683,6 +773,11 @@ class ResultStore(BaseModel):
|
|
683
773
|
"""
|
684
774
|
# Null objects are "cached" in memory at no cost
|
685
775
|
should_cache_object = self.cache_result_in_memory or obj is None
|
776
|
+
should_persist_result = (
|
777
|
+
self.persist_result
|
778
|
+
if self.persist_result is not None
|
779
|
+
else not should_cache_object
|
780
|
+
)
|
686
781
|
|
687
782
|
if key:
|
688
783
|
|
@@ -704,7 +799,7 @@ class ResultStore(BaseModel):
|
|
704
799
|
serializer=self.serializer,
|
705
800
|
cache_object=should_cache_object,
|
706
801
|
expiration=expiration,
|
707
|
-
serialize_to_none=not
|
802
|
+
serialize_to_none=not should_persist_result,
|
708
803
|
)
|
709
804
|
|
710
805
|
# TODO: These two methods need to find a new home
|
@@ -729,7 +824,7 @@ class ResultStore(BaseModel):
|
|
729
824
|
return record.result
|
730
825
|
|
731
826
|
|
732
|
-
def
|
827
|
+
def get_result_store() -> ResultStore:
|
733
828
|
"""
|
734
829
|
Get the current result store.
|
735
830
|
"""
|
@@ -779,6 +874,17 @@ class ResultRecordMetadata(BaseModel):
|
|
779
874
|
"""
|
780
875
|
return cls.model_validate_json(data)
|
781
876
|
|
877
|
+
def __eq__(self, other):
|
878
|
+
if not isinstance(other, ResultRecordMetadata):
|
879
|
+
return False
|
880
|
+
return (
|
881
|
+
self.storage_key == other.storage_key
|
882
|
+
and self.expiration == other.expiration
|
883
|
+
and self.serializer == other.serializer
|
884
|
+
and self.prefect_version == other.prefect_version
|
885
|
+
and self.storage_block_id == other.storage_block_id
|
886
|
+
)
|
887
|
+
|
782
888
|
|
783
889
|
class ResultRecord(BaseModel, Generic[R]):
|
784
890
|
"""
|
@@ -856,6 +962,31 @@ class ResultRecord(BaseModel, Generic[R]):
|
|
856
962
|
value["metadata"]["prefect_version"] = value.pop("prefect_version")
|
857
963
|
return value
|
858
964
|
|
965
|
+
@classmethod
|
966
|
+
async def _from_metadata(cls, metadata: ResultRecordMetadata) -> "ResultRecord[R]":
|
967
|
+
"""
|
968
|
+
Create a result record from metadata.
|
969
|
+
|
970
|
+
Will use the result record metadata to fetch data via a result store.
|
971
|
+
|
972
|
+
Args:
|
973
|
+
metadata: The metadata to create the result record from.
|
974
|
+
|
975
|
+
Returns:
|
976
|
+
ResultRecord: The result record.
|
977
|
+
"""
|
978
|
+
if metadata.storage_block_id is None:
|
979
|
+
storage_block = None
|
980
|
+
else:
|
981
|
+
storage_block = await resolve_result_storage(
|
982
|
+
metadata.storage_block_id, _sync=False
|
983
|
+
)
|
984
|
+
store = ResultStore(
|
985
|
+
result_storage=storage_block, serializer=metadata.serializer
|
986
|
+
)
|
987
|
+
result = await store.aread(metadata.storage_key)
|
988
|
+
return result
|
989
|
+
|
859
990
|
def serialize_metadata(self) -> bytes:
|
860
991
|
return self.metadata.dump_bytes()
|
861
992
|
|
@@ -913,7 +1044,15 @@ class ResultRecord(BaseModel, Generic[R]):
|
|
913
1044
|
result=result_record_metadata.serializer.loads(result),
|
914
1045
|
)
|
915
1046
|
|
1047
|
+
def __eq__(self, other):
|
1048
|
+
if not isinstance(other, ResultRecord):
|
1049
|
+
return False
|
1050
|
+
return self.metadata == other.metadata and self.result == other.result
|
916
1051
|
|
1052
|
+
|
1053
|
+
@deprecated.deprecated_class(
|
1054
|
+
start_date="Sep 2024", end_date="Nov 2024", help="Use `ResultRecord` instead."
|
1055
|
+
)
|
917
1056
|
@register_base_type
|
918
1057
|
class BaseResult(BaseModel, abc.ABC, Generic[R]):
|
919
1058
|
model_config = ConfigDict(extra="forbid")
|
@@ -964,6 +1103,9 @@ class BaseResult(BaseModel, abc.ABC, Generic[R]):
|
|
964
1103
|
return cls.__name__ if isinstance(default, PydanticUndefinedType) else default
|
965
1104
|
|
966
1105
|
|
1106
|
+
@deprecated.deprecated_class(
|
1107
|
+
start_date="Sep 2024", end_date="Nov 2024", help="Use `ResultRecord` instead."
|
1108
|
+
)
|
967
1109
|
class PersistedResult(BaseResult):
|
968
1110
|
"""
|
969
1111
|
Result type which stores a reference to a persisted result.
|
@@ -1057,7 +1199,6 @@ class PersistedResult(BaseResult):
|
|
1057
1199
|
"""
|
1058
1200
|
Write the result to the storage block.
|
1059
1201
|
"""
|
1060
|
-
|
1061
1202
|
if self._persisted or self.serialize_to_none:
|
1062
1203
|
# don't double write or overwrite
|
1063
1204
|
return
|
@@ -1078,7 +1219,10 @@ class PersistedResult(BaseResult):
|
|
1078
1219
|
# this could error if the serializer requires kwargs
|
1079
1220
|
serializer = Serializer(type=self.serializer_type)
|
1080
1221
|
|
1081
|
-
result_store = ResultStore(
|
1222
|
+
result_store = ResultStore(
|
1223
|
+
result_storage=storage_block,
|
1224
|
+
serializer=serializer,
|
1225
|
+
)
|
1082
1226
|
await result_store.awrite(
|
1083
1227
|
obj=obj, key=self.storage_key, expiration=self.expiration
|
1084
1228
|
)
|
prefect/runner/runner.py
CHANGED
@@ -66,6 +66,11 @@ from prefect.client.schemas.filters import (
|
|
66
66
|
)
|
67
67
|
from prefect.client.schemas.objects import Flow as APIFlow
|
68
68
|
from prefect.client.schemas.objects import FlowRun, State, StateType
|
69
|
+
from prefect.concurrency.asyncio import (
|
70
|
+
AcquireConcurrencySlotTimeoutError,
|
71
|
+
ConcurrencySlotAcquisitionError,
|
72
|
+
concurrency,
|
73
|
+
)
|
69
74
|
from prefect.events import DeploymentTriggerTypes, TriggerTypes
|
70
75
|
from prefect.events.related import tags_as_related_resources
|
71
76
|
from prefect.events.schemas.events import RelatedResource
|
@@ -81,7 +86,12 @@ from prefect.settings import (
|
|
81
86
|
PREFECT_RUNNER_SERVER_ENABLE,
|
82
87
|
get_current_settings,
|
83
88
|
)
|
84
|
-
from prefect.states import
|
89
|
+
from prefect.states import (
|
90
|
+
AwaitingConcurrencySlot,
|
91
|
+
Crashed,
|
92
|
+
Pending,
|
93
|
+
exception_to_failed_state,
|
94
|
+
)
|
85
95
|
from prefect.types.entrypoint import EntrypointType
|
86
96
|
from prefect.utilities.asyncutils import (
|
87
97
|
asyncnullcontext,
|
@@ -226,6 +236,7 @@ class Runner:
|
|
226
236
|
rrule: Optional[Union[Iterable[str], str]] = None,
|
227
237
|
paused: Optional[bool] = None,
|
228
238
|
schedules: Optional["FlexibleScheduleList"] = None,
|
239
|
+
concurrency_limit: Optional[int] = None,
|
229
240
|
parameters: Optional[dict] = None,
|
230
241
|
triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
|
231
242
|
description: Optional[str] = None,
|
@@ -248,6 +259,10 @@ class Runner:
|
|
248
259
|
or a timedelta object. If a number is given, it will be interpreted as seconds.
|
249
260
|
cron: A cron schedule of when to execute runs of this flow.
|
250
261
|
rrule: An rrule schedule of when to execute runs of this flow.
|
262
|
+
paused: Whether or not to set the created deployment as paused.
|
263
|
+
schedules: A list of schedule objects defining when to execute runs of this flow.
|
264
|
+
Used to define multiple schedules or additional scheduling options like `timezone`.
|
265
|
+
concurrency_limit: The maximum number of concurrent runs of this flow to allow.
|
251
266
|
triggers: A list of triggers that should kick of a run of this flow.
|
252
267
|
parameters: A dictionary of default parameter values to pass to runs of this flow.
|
253
268
|
description: A description for the created deployment. Defaults to the flow's
|
@@ -280,6 +295,7 @@ class Runner:
|
|
280
295
|
version=version,
|
281
296
|
enforce_parameter_schema=enforce_parameter_schema,
|
282
297
|
entrypoint_type=entrypoint_type,
|
298
|
+
concurrency_limit=concurrency_limit,
|
283
299
|
)
|
284
300
|
return await self.add_deployment(deployment)
|
285
301
|
|
@@ -959,6 +975,7 @@ class Runner:
|
|
959
975
|
"""
|
960
976
|
submittable_flow_runs = flow_run_response
|
961
977
|
submittable_flow_runs.sort(key=lambda run: run.next_scheduled_start_time)
|
978
|
+
|
962
979
|
for i, flow_run in enumerate(submittable_flow_runs):
|
963
980
|
if flow_run.id in self._submitting_flow_run_ids:
|
964
981
|
continue
|
@@ -1025,12 +1042,44 @@ class Runner:
|
|
1025
1042
|
) -> Union[Optional[int], Exception]:
|
1026
1043
|
run_logger = self._get_flow_run_logger(flow_run)
|
1027
1044
|
|
1045
|
+
if flow_run.deployment_id:
|
1046
|
+
deployment = await self._client.read_deployment(flow_run.deployment_id)
|
1047
|
+
if deployment and deployment.concurrency_limit:
|
1048
|
+
limit_name = f"deployment:{deployment.id}"
|
1049
|
+
concurrency_ctx = concurrency
|
1050
|
+
|
1051
|
+
# ensure that the global concurrency limit is available
|
1052
|
+
# and up-to-date before attempting to acquire a slot
|
1053
|
+
await self._client.upsert_global_concurrency_limit_by_name(
|
1054
|
+
limit_name, deployment.concurrency_limit
|
1055
|
+
)
|
1056
|
+
else:
|
1057
|
+
limit_name = ""
|
1058
|
+
concurrency_ctx = asyncnullcontext
|
1059
|
+
|
1028
1060
|
try:
|
1029
|
-
|
1030
|
-
|
1031
|
-
|
1032
|
-
|
1061
|
+
async with concurrency_ctx(limit_name, max_retries=0, strict=True):
|
1062
|
+
status_code = await self._run_process(
|
1063
|
+
flow_run=flow_run,
|
1064
|
+
task_status=task_status,
|
1065
|
+
entrypoint=entrypoint,
|
1066
|
+
)
|
1067
|
+
except (
|
1068
|
+
AcquireConcurrencySlotTimeoutError,
|
1069
|
+
ConcurrencySlotAcquisitionError,
|
1070
|
+
) as exc:
|
1071
|
+
self._logger.info(
|
1072
|
+
(
|
1073
|
+
"Deployment %s reached its concurrency limit when attempting to execute flow run %s. Will attempt to execute later."
|
1074
|
+
),
|
1075
|
+
flow_run.deployment_id,
|
1076
|
+
flow_run.name,
|
1033
1077
|
)
|
1078
|
+
await self._propose_scheduled_state(flow_run)
|
1079
|
+
|
1080
|
+
if not task_status._future.done():
|
1081
|
+
task_status.started(exc)
|
1082
|
+
return exc
|
1034
1083
|
except Exception as exc:
|
1035
1084
|
if not task_status._future.done():
|
1036
1085
|
# This flow run was being submitted and did not start successfully
|
@@ -1116,6 +1165,26 @@ class Runner:
|
|
1116
1165
|
exc_info=True,
|
1117
1166
|
)
|
1118
1167
|
|
1168
|
+
async def _propose_scheduled_state(self, flow_run: "FlowRun") -> None:
|
1169
|
+
run_logger = self._get_flow_run_logger(flow_run)
|
1170
|
+
try:
|
1171
|
+
state = await propose_state(
|
1172
|
+
self._client,
|
1173
|
+
AwaitingConcurrencySlot(),
|
1174
|
+
flow_run_id=flow_run.id,
|
1175
|
+
)
|
1176
|
+
self._logger.info(f"Flow run {flow_run.id} now has state {state.name}")
|
1177
|
+
except Abort as exc:
|
1178
|
+
run_logger.info(
|
1179
|
+
(
|
1180
|
+
f"Aborted rescheduling of flow run '{flow_run.id}'. "
|
1181
|
+
f"Server sent an abort signal: {exc}"
|
1182
|
+
),
|
1183
|
+
)
|
1184
|
+
pass
|
1185
|
+
except Exception:
|
1186
|
+
run_logger.exception(f"Failed to update state of flow run '{flow_run.id}'")
|
1187
|
+
|
1119
1188
|
async def _propose_crashed_state(self, flow_run: "FlowRun", message: str) -> None:
|
1120
1189
|
run_logger = self._get_flow_run_logger(flow_run)
|
1121
1190
|
try:
|
prefect/settings.py
CHANGED
@@ -637,7 +637,7 @@ PREFECT_API_KEY = Setting(
|
|
637
637
|
)
|
638
638
|
"""API key used to authenticate with a the Prefect API. Defaults to `None`."""
|
639
639
|
|
640
|
-
PREFECT_API_ENABLE_HTTP2 = Setting(bool, default=
|
640
|
+
PREFECT_API_ENABLE_HTTP2 = Setting(bool, default=False)
|
641
641
|
"""
|
642
642
|
If true, enable support for HTTP/2 for communicating with an API.
|
643
643
|
|