prefect-client 3.0.0rc18__py3-none-any.whl → 3.0.0rc19__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/_internal/concurrency/services.py +14 -0
- prefect/_internal/schemas/bases.py +1 -0
- prefect/blocks/core.py +36 -29
- prefect/client/orchestration.py +97 -2
- prefect/concurrency/v1/__init__.py +0 -0
- prefect/concurrency/v1/asyncio.py +143 -0
- prefect/concurrency/v1/context.py +27 -0
- prefect/concurrency/v1/events.py +61 -0
- prefect/concurrency/v1/services.py +116 -0
- prefect/concurrency/v1/sync.py +92 -0
- prefect/context.py +2 -2
- prefect/deployments/flow_runs.py +0 -7
- prefect/deployments/runner.py +11 -0
- prefect/events/clients.py +41 -0
- prefect/events/related.py +72 -73
- prefect/events/utilities.py +2 -0
- prefect/events/worker.py +12 -3
- prefect/flow_engine.py +2 -0
- prefect/flows.py +7 -0
- prefect/records/base.py +74 -18
- prefect/records/filesystem.py +207 -0
- prefect/records/memory.py +16 -3
- prefect/records/result_store.py +19 -14
- prefect/results.py +11 -0
- prefect/runner/runner.py +7 -4
- prefect/settings.py +0 -8
- prefect/task_engine.py +98 -209
- prefect/task_worker.py +7 -39
- prefect/tasks.py +0 -7
- prefect/transactions.py +67 -19
- prefect/utilities/asyncutils.py +3 -3
- prefect/utilities/callables.py +1 -3
- prefect/utilities/engine.py +1 -4
- {prefect_client-3.0.0rc18.dist-info → prefect_client-3.0.0rc19.dist-info}/METADATA +3 -4
- {prefect_client-3.0.0rc18.dist-info → prefect_client-3.0.0rc19.dist-info}/RECORD +38 -31
- {prefect_client-3.0.0rc18.dist-info → prefect_client-3.0.0rc19.dist-info}/LICENSE +0 -0
- {prefect_client-3.0.0rc18.dist-info → prefect_client-3.0.0rc19.dist-info}/WHEEL +0 -0
- {prefect_client-3.0.0rc18.dist-info → prefect_client-3.0.0rc19.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,92 @@
|
|
1
|
+
from contextlib import contextmanager
|
2
|
+
from typing import (
|
3
|
+
Generator,
|
4
|
+
List,
|
5
|
+
Optional,
|
6
|
+
TypeVar,
|
7
|
+
Union,
|
8
|
+
cast,
|
9
|
+
)
|
10
|
+
from uuid import UUID
|
11
|
+
|
12
|
+
import pendulum
|
13
|
+
|
14
|
+
from ...client.schemas.responses import MinimalConcurrencyLimitResponse
|
15
|
+
from ..sync import _call_async_function_from_sync
|
16
|
+
|
17
|
+
try:
|
18
|
+
from pendulum import Interval
|
19
|
+
except ImportError:
|
20
|
+
# pendulum < 3
|
21
|
+
from pendulum.period import Period as Interval # type: ignore
|
22
|
+
|
23
|
+
from .asyncio import (
|
24
|
+
_acquire_concurrency_slots,
|
25
|
+
_release_concurrency_slots,
|
26
|
+
)
|
27
|
+
from .events import (
|
28
|
+
_emit_concurrency_acquisition_events,
|
29
|
+
_emit_concurrency_release_events,
|
30
|
+
)
|
31
|
+
|
32
|
+
T = TypeVar("T")
|
33
|
+
|
34
|
+
|
35
|
+
@contextmanager
|
36
|
+
def concurrency(
|
37
|
+
names: Union[str, List[str]],
|
38
|
+
task_run_id: UUID,
|
39
|
+
timeout_seconds: Optional[float] = None,
|
40
|
+
) -> Generator[None, None, None]:
|
41
|
+
"""
|
42
|
+
A context manager that acquires and releases concurrency slots from the
|
43
|
+
given concurrency limits.
|
44
|
+
|
45
|
+
Args:
|
46
|
+
names: The names of the concurrency limits to acquire.
|
47
|
+
task_run_id: The task run ID acquiring the limits.
|
48
|
+
timeout_seconds: The number of seconds to wait to acquire the limits before
|
49
|
+
raising a `TimeoutError`. A timeout of `None` will wait indefinitely.
|
50
|
+
|
51
|
+
Raises:
|
52
|
+
TimeoutError: If the limits are not acquired within the given timeout.
|
53
|
+
|
54
|
+
Example:
|
55
|
+
A simple example of using the sync `concurrency` context manager:
|
56
|
+
```python
|
57
|
+
from prefect.concurrency.v1.sync import concurrency
|
58
|
+
|
59
|
+
def resource_heavy():
|
60
|
+
with concurrency("test"):
|
61
|
+
print("Resource heavy task")
|
62
|
+
|
63
|
+
def main():
|
64
|
+
resource_heavy()
|
65
|
+
```
|
66
|
+
"""
|
67
|
+
if not names:
|
68
|
+
yield
|
69
|
+
return
|
70
|
+
|
71
|
+
names = names if isinstance(names, list) else [names]
|
72
|
+
|
73
|
+
limits: List[MinimalConcurrencyLimitResponse] = _call_async_function_from_sync(
|
74
|
+
_acquire_concurrency_slots,
|
75
|
+
names,
|
76
|
+
timeout_seconds=timeout_seconds,
|
77
|
+
task_run_id=task_run_id,
|
78
|
+
)
|
79
|
+
acquisition_time = pendulum.now("UTC")
|
80
|
+
emitted_events = _emit_concurrency_acquisition_events(limits, task_run_id)
|
81
|
+
|
82
|
+
try:
|
83
|
+
yield
|
84
|
+
finally:
|
85
|
+
occupancy_period = cast(Interval, pendulum.now("UTC") - acquisition_time)
|
86
|
+
_call_async_function_from_sync(
|
87
|
+
_release_concurrency_slots,
|
88
|
+
names,
|
89
|
+
task_run_id,
|
90
|
+
occupancy_period.total_seconds(),
|
91
|
+
)
|
92
|
+
_emit_concurrency_release_events(limits, emitted_events, task_run_id)
|
prefect/context.py
CHANGED
@@ -131,7 +131,7 @@ class ContextModel(BaseModel):
|
|
131
131
|
extra="forbid",
|
132
132
|
)
|
133
133
|
|
134
|
-
def __enter__(self):
|
134
|
+
def __enter__(self) -> Self:
|
135
135
|
if self._token is not None:
|
136
136
|
raise RuntimeError(
|
137
137
|
"Context already entered. Context enter calls cannot be nested."
|
@@ -290,7 +290,7 @@ class AsyncClientContext(ContextModel):
|
|
290
290
|
if ctx:
|
291
291
|
yield ctx
|
292
292
|
else:
|
293
|
-
with cls() as ctx:
|
293
|
+
async with cls() as ctx:
|
294
294
|
yield ctx
|
295
295
|
|
296
296
|
|
prefect/deployments/flow_runs.py
CHANGED
@@ -5,7 +5,6 @@ from uuid import UUID
|
|
5
5
|
import anyio
|
6
6
|
import pendulum
|
7
7
|
|
8
|
-
from prefect._internal.compatibility.deprecated import deprecated_parameter
|
9
8
|
from prefect.client.schemas import FlowRun
|
10
9
|
from prefect.client.utilities import inject_client
|
11
10
|
from prefect.context import FlowRunContext, TaskRunContext
|
@@ -24,11 +23,6 @@ logger = get_logger(__name__)
|
|
24
23
|
|
25
24
|
|
26
25
|
@sync_compatible
|
27
|
-
@deprecated_parameter(
|
28
|
-
"infra_overrides",
|
29
|
-
start_date="Apr 2024",
|
30
|
-
help="Use `job_variables` instead.",
|
31
|
-
)
|
32
26
|
@inject_client
|
33
27
|
async def run_deployment(
|
34
28
|
name: Union[str, UUID],
|
@@ -42,7 +36,6 @@ async def run_deployment(
|
|
42
36
|
idempotency_key: Optional[str] = None,
|
43
37
|
work_queue_name: Optional[str] = None,
|
44
38
|
as_subflow: Optional[bool] = True,
|
45
|
-
infra_overrides: Optional[dict] = None,
|
46
39
|
job_variables: Optional[dict] = None,
|
47
40
|
) -> "FlowRun":
|
48
41
|
"""
|
prefect/deployments/runner.py
CHANGED
@@ -143,6 +143,10 @@ class RunnerDeployment(BaseModel):
|
|
143
143
|
default=None,
|
144
144
|
description="The schedules that should cause this deployment to run.",
|
145
145
|
)
|
146
|
+
concurrency_limit: Optional[int] = Field(
|
147
|
+
default=None,
|
148
|
+
description="The maximum number of concurrent runs of this deployment.",
|
149
|
+
)
|
146
150
|
paused: Optional[bool] = Field(
|
147
151
|
default=None, description="Whether or not the deployment is paused."
|
148
152
|
)
|
@@ -274,6 +278,7 @@ class RunnerDeployment(BaseModel):
|
|
274
278
|
version=self.version,
|
275
279
|
paused=self.paused,
|
276
280
|
schedules=self.schedules,
|
281
|
+
concurrency_limit=self.concurrency_limit,
|
277
282
|
parameters=self.parameters,
|
278
283
|
description=self.description,
|
279
284
|
tags=self.tags,
|
@@ -432,6 +437,7 @@ class RunnerDeployment(BaseModel):
|
|
432
437
|
rrule: Optional[Union[Iterable[str], str]] = None,
|
433
438
|
paused: Optional[bool] = None,
|
434
439
|
schedules: Optional["FlexibleScheduleList"] = None,
|
440
|
+
concurrency_limit: Optional[int] = None,
|
435
441
|
parameters: Optional[dict] = None,
|
436
442
|
triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
|
437
443
|
description: Optional[str] = None,
|
@@ -485,6 +491,7 @@ class RunnerDeployment(BaseModel):
|
|
485
491
|
name=Path(name).stem,
|
486
492
|
flow_name=flow.name,
|
487
493
|
schedules=constructed_schedules,
|
494
|
+
concurrency_limit=concurrency_limit,
|
488
495
|
paused=paused,
|
489
496
|
tags=tags or [],
|
490
497
|
triggers=triggers or [],
|
@@ -558,6 +565,7 @@ class RunnerDeployment(BaseModel):
|
|
558
565
|
rrule: Optional[Union[Iterable[str], str]] = None,
|
559
566
|
paused: Optional[bool] = None,
|
560
567
|
schedules: Optional["FlexibleScheduleList"] = None,
|
568
|
+
concurrency_limit: Optional[int] = None,
|
561
569
|
parameters: Optional[dict] = None,
|
562
570
|
triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
|
563
571
|
description: Optional[str] = None,
|
@@ -614,6 +622,7 @@ class RunnerDeployment(BaseModel):
|
|
614
622
|
name=Path(name).stem,
|
615
623
|
flow_name=flow.name,
|
616
624
|
schedules=constructed_schedules,
|
625
|
+
concurrency_limit=concurrency_limit,
|
617
626
|
paused=paused,
|
618
627
|
tags=tags or [],
|
619
628
|
triggers=triggers or [],
|
@@ -646,6 +655,7 @@ class RunnerDeployment(BaseModel):
|
|
646
655
|
rrule: Optional[Union[Iterable[str], str]] = None,
|
647
656
|
paused: Optional[bool] = None,
|
648
657
|
schedules: Optional["FlexibleScheduleList"] = None,
|
658
|
+
concurrency_limit: Optional[int] = None,
|
649
659
|
parameters: Optional[dict] = None,
|
650
660
|
triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
|
651
661
|
description: Optional[str] = None,
|
@@ -710,6 +720,7 @@ class RunnerDeployment(BaseModel):
|
|
710
720
|
name=Path(name).stem,
|
711
721
|
flow_name=flow.name,
|
712
722
|
schedules=constructed_schedules,
|
723
|
+
concurrency_limit=concurrency_limit,
|
713
724
|
paused=paused,
|
714
725
|
tags=tags or [],
|
715
726
|
triggers=triggers or [],
|
prefect/events/clients.py
CHANGED
@@ -346,6 +346,47 @@ class PrefectEventsClient(EventsClient):
|
|
346
346
|
await asyncio.sleep(1)
|
347
347
|
|
348
348
|
|
349
|
+
class AssertingPassthroughEventsClient(PrefectEventsClient):
|
350
|
+
"""A Prefect Events client that BOTH records all events sent to it for inspection
|
351
|
+
during tests AND sends them to a Prefect server."""
|
352
|
+
|
353
|
+
last: ClassVar["Optional[AssertingPassthroughEventsClient]"] = None
|
354
|
+
all: ClassVar[List["AssertingPassthroughEventsClient"]] = []
|
355
|
+
|
356
|
+
args: Tuple
|
357
|
+
kwargs: Dict[str, Any]
|
358
|
+
events: List[Event]
|
359
|
+
|
360
|
+
def __init__(self, *args, **kwargs):
|
361
|
+
super().__init__(*args, **kwargs)
|
362
|
+
AssertingPassthroughEventsClient.last = self
|
363
|
+
AssertingPassthroughEventsClient.all.append(self)
|
364
|
+
self.args = args
|
365
|
+
self.kwargs = kwargs
|
366
|
+
|
367
|
+
@classmethod
|
368
|
+
def reset(cls) -> None:
|
369
|
+
cls.last = None
|
370
|
+
cls.all = []
|
371
|
+
|
372
|
+
def pop_events(self) -> List[Event]:
|
373
|
+
events = self.events
|
374
|
+
self.events = []
|
375
|
+
return events
|
376
|
+
|
377
|
+
async def _emit(self, event: Event) -> None:
|
378
|
+
# actually send the event to the server
|
379
|
+
await super()._emit(event)
|
380
|
+
|
381
|
+
# record the event for inspection
|
382
|
+
self.events.append(event)
|
383
|
+
|
384
|
+
async def __aenter__(self) -> Self:
|
385
|
+
await super().__aenter__()
|
386
|
+
self.events = []
|
387
|
+
return self
|
388
|
+
|
389
|
+
|
349
390
|
class PrefectCloudEventsClient(PrefectEventsClient):
|
350
391
|
"""A Prefect Events client that streams events to a Prefect Cloud Workspace"""
|
351
392
|
|
prefect/events/related.py
CHANGED
@@ -21,6 +21,7 @@ from .schemas.events import RelatedResource
|
|
21
21
|
|
22
22
|
if TYPE_CHECKING:
|
23
23
|
from prefect._internal.schemas.bases import ObjectBaseModel
|
24
|
+
from prefect.client.orchestration import PrefectClient
|
24
25
|
|
25
26
|
ResourceCacheEntry = Dict[str, Union[str, "ObjectBaseModel", None]]
|
26
27
|
RelatedResourceCache = Dict[str, Tuple[ResourceCacheEntry, DateTime]]
|
@@ -54,9 +55,9 @@ def object_as_related_resource(kind: str, role: str, object: Any) -> RelatedReso
|
|
54
55
|
|
55
56
|
|
56
57
|
async def related_resources_from_run_context(
|
58
|
+
client: "PrefectClient",
|
57
59
|
exclude: Optional[Set[str]] = None,
|
58
60
|
) -> List[RelatedResource]:
|
59
|
-
from prefect.client.orchestration import get_client
|
60
61
|
from prefect.client.schemas.objects import FlowRun
|
61
62
|
from prefect.context import FlowRunContext, TaskRunContext
|
62
63
|
|
@@ -77,86 +78,84 @@ async def related_resources_from_run_context(
|
|
77
78
|
|
78
79
|
related_objects: List[ResourceCacheEntry] = []
|
79
80
|
|
80
|
-
async
|
81
|
+
async def dummy_read():
|
82
|
+
return {}
|
81
83
|
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
client_method=client.read_flow_run,
|
99
|
-
obj_id=flow_run_id,
|
100
|
-
cache=RESOURCE_CACHE,
|
101
|
-
)
|
84
|
+
if flow_run_context:
|
85
|
+
related_objects.append(
|
86
|
+
{
|
87
|
+
"kind": "flow-run",
|
88
|
+
"role": "flow-run",
|
89
|
+
"object": flow_run_context.flow_run,
|
90
|
+
},
|
91
|
+
)
|
92
|
+
else:
|
93
|
+
related_objects.append(
|
94
|
+
await _get_and_cache_related_object(
|
95
|
+
kind="flow-run",
|
96
|
+
role="flow-run",
|
97
|
+
client_method=client.read_flow_run,
|
98
|
+
obj_id=flow_run_id,
|
99
|
+
cache=RESOURCE_CACHE,
|
102
100
|
)
|
101
|
+
)
|
103
102
|
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
103
|
+
if task_run_context:
|
104
|
+
related_objects.append(
|
105
|
+
{
|
106
|
+
"kind": "task-run",
|
107
|
+
"role": "task-run",
|
108
|
+
"object": task_run_context.task_run,
|
109
|
+
},
|
110
|
+
)
|
112
111
|
|
113
|
-
|
112
|
+
flow_run = related_objects[0]["object"]
|
114
113
|
|
115
|
-
|
116
|
-
|
117
|
-
|
114
|
+
if isinstance(flow_run, FlowRun):
|
115
|
+
related_objects += list(
|
116
|
+
await asyncio.gather(
|
117
|
+
_get_and_cache_related_object(
|
118
|
+
kind="flow",
|
119
|
+
role="flow",
|
120
|
+
client_method=client.read_flow,
|
121
|
+
obj_id=flow_run.flow_id,
|
122
|
+
cache=RESOURCE_CACHE,
|
123
|
+
),
|
124
|
+
(
|
125
|
+
_get_and_cache_related_object(
|
126
|
+
kind="deployment",
|
127
|
+
role="deployment",
|
128
|
+
client_method=client.read_deployment,
|
129
|
+
obj_id=flow_run.deployment_id,
|
130
|
+
cache=RESOURCE_CACHE,
|
131
|
+
)
|
132
|
+
if flow_run.deployment_id
|
133
|
+
else dummy_read()
|
134
|
+
),
|
135
|
+
(
|
136
|
+
_get_and_cache_related_object(
|
137
|
+
kind="work-queue",
|
138
|
+
role="work-queue",
|
139
|
+
client_method=client.read_work_queue,
|
140
|
+
obj_id=flow_run.work_queue_id,
|
141
|
+
cache=RESOURCE_CACHE,
|
142
|
+
)
|
143
|
+
if flow_run.work_queue_id
|
144
|
+
else dummy_read()
|
145
|
+
),
|
146
|
+
(
|
118
147
|
_get_and_cache_related_object(
|
119
|
-
kind="
|
120
|
-
role="
|
121
|
-
client_method=client.
|
122
|
-
obj_id=flow_run.
|
148
|
+
kind="work-pool",
|
149
|
+
role="work-pool",
|
150
|
+
client_method=client.read_work_pool,
|
151
|
+
obj_id=flow_run.work_pool_name,
|
123
152
|
cache=RESOURCE_CACHE,
|
124
|
-
)
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
role="deployment",
|
129
|
-
client_method=client.read_deployment,
|
130
|
-
obj_id=flow_run.deployment_id,
|
131
|
-
cache=RESOURCE_CACHE,
|
132
|
-
)
|
133
|
-
if flow_run.deployment_id
|
134
|
-
else dummy_read()
|
135
|
-
),
|
136
|
-
(
|
137
|
-
_get_and_cache_related_object(
|
138
|
-
kind="work-queue",
|
139
|
-
role="work-queue",
|
140
|
-
client_method=client.read_work_queue,
|
141
|
-
obj_id=flow_run.work_queue_id,
|
142
|
-
cache=RESOURCE_CACHE,
|
143
|
-
)
|
144
|
-
if flow_run.work_queue_id
|
145
|
-
else dummy_read()
|
146
|
-
),
|
147
|
-
(
|
148
|
-
_get_and_cache_related_object(
|
149
|
-
kind="work-pool",
|
150
|
-
role="work-pool",
|
151
|
-
client_method=client.read_work_pool,
|
152
|
-
obj_id=flow_run.work_pool_name,
|
153
|
-
cache=RESOURCE_CACHE,
|
154
|
-
)
|
155
|
-
if flow_run.work_pool_name
|
156
|
-
else dummy_read()
|
157
|
-
),
|
158
|
-
)
|
153
|
+
)
|
154
|
+
if flow_run.work_pool_name
|
155
|
+
else dummy_read()
|
156
|
+
),
|
159
157
|
)
|
158
|
+
)
|
160
159
|
|
161
160
|
related = []
|
162
161
|
tags = set()
|
prefect/events/utilities.py
CHANGED
@@ -7,6 +7,7 @@ from pydantic_extra_types.pendulum_dt import DateTime
|
|
7
7
|
|
8
8
|
from .clients import (
|
9
9
|
AssertingEventsClient,
|
10
|
+
AssertingPassthroughEventsClient,
|
10
11
|
PrefectCloudEventsClient,
|
11
12
|
PrefectEventsClient,
|
12
13
|
)
|
@@ -49,6 +50,7 @@ def emit_event(
|
|
49
50
|
return None
|
50
51
|
|
51
52
|
operational_clients = [
|
53
|
+
AssertingPassthroughEventsClient,
|
52
54
|
AssertingEventsClient,
|
53
55
|
PrefectCloudEventsClient,
|
54
56
|
PrefectEventsClient,
|
prefect/events/worker.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
1
|
from contextlib import asynccontextmanager
|
2
2
|
from contextvars import Context, copy_context
|
3
|
-
from typing import Any, Dict, Optional, Tuple, Type
|
3
|
+
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Type
|
4
4
|
from uuid import UUID
|
5
5
|
|
6
6
|
from typing_extensions import Self
|
@@ -22,6 +22,9 @@ from .clients import (
|
|
22
22
|
from .related import related_resources_from_run_context
|
23
23
|
from .schemas.events import Event
|
24
24
|
|
25
|
+
if TYPE_CHECKING:
|
26
|
+
from prefect.client.orchestration import PrefectClient
|
27
|
+
|
25
28
|
|
26
29
|
def should_emit_events() -> bool:
|
27
30
|
return (
|
@@ -55,14 +58,18 @@ class EventsWorker(QueueService[Event]):
|
|
55
58
|
self.client_type = client_type
|
56
59
|
self.client_options = client_options
|
57
60
|
self._client: EventsClient
|
61
|
+
self._orchestration_client: "PrefectClient"
|
58
62
|
self._context_cache: Dict[UUID, Context] = {}
|
59
63
|
|
60
64
|
@asynccontextmanager
|
61
65
|
async def _lifespan(self):
|
62
66
|
self._client = self.client_type(**{k: v for k, v in self.client_options})
|
67
|
+
from prefect.client.orchestration import get_client
|
63
68
|
|
69
|
+
self._orchestration_client = get_client()
|
64
70
|
async with self._client:
|
65
|
-
|
71
|
+
async with self._orchestration_client:
|
72
|
+
yield
|
66
73
|
|
67
74
|
def _prepare_item(self, event: Event) -> Event:
|
68
75
|
self._context_cache[event.id] = copy_context()
|
@@ -77,7 +84,9 @@ class EventsWorker(QueueService[Event]):
|
|
77
84
|
|
78
85
|
async def attach_related_resources_from_context(self, event: Event):
|
79
86
|
exclude = {resource.id for resource in event.involved_resources}
|
80
|
-
event.related += await related_resources_from_run_context(
|
87
|
+
event.related += await related_resources_from_run_context(
|
88
|
+
client=self._orchestration_client, exclude=exclude
|
89
|
+
)
|
81
90
|
|
82
91
|
@classmethod
|
83
92
|
def instance(
|
prefect/flow_engine.py
CHANGED
@@ -30,6 +30,7 @@ from prefect.client.schemas import FlowRun, TaskRun
|
|
30
30
|
from prefect.client.schemas.filters import FlowRunFilter
|
31
31
|
from prefect.client.schemas.sorting import FlowRunSort
|
32
32
|
from prefect.concurrency.context import ConcurrencyContext
|
33
|
+
from prefect.concurrency.v1.context import ConcurrencyContext as ConcurrencyContextV1
|
33
34
|
from prefect.context import FlowRunContext, SyncClientContext, TagsContext
|
34
35
|
from prefect.exceptions import (
|
35
36
|
Abort,
|
@@ -506,6 +507,7 @@ class FlowRunEngine(Generic[P, R]):
|
|
506
507
|
task_runner=task_runner,
|
507
508
|
)
|
508
509
|
)
|
510
|
+
stack.enter_context(ConcurrencyContextV1())
|
509
511
|
stack.enter_context(ConcurrencyContext())
|
510
512
|
|
511
513
|
# set the logger to the flow run logger
|
prefect/flows.py
CHANGED
@@ -643,6 +643,7 @@ class Flow(Generic[P, R]):
|
|
643
643
|
rrule: Optional[Union[Iterable[str], str]] = None,
|
644
644
|
paused: Optional[bool] = None,
|
645
645
|
schedules: Optional[List["FlexibleScheduleList"]] = None,
|
646
|
+
concurrency_limit: Optional[int] = None,
|
646
647
|
parameters: Optional[dict] = None,
|
647
648
|
triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
|
648
649
|
description: Optional[str] = None,
|
@@ -666,6 +667,7 @@ class Flow(Generic[P, R]):
|
|
666
667
|
paused: Whether or not to set this deployment as paused.
|
667
668
|
schedules: A list of schedule objects defining when to execute runs of this deployment.
|
668
669
|
Used to define multiple schedules or additional scheduling options such as `timezone`.
|
670
|
+
concurrency_limit: The maximum number of runs of this deployment that can run at the same time.
|
669
671
|
parameters: A dictionary of default parameter values to pass to runs of this deployment.
|
670
672
|
triggers: A list of triggers that will kick off runs of this deployment.
|
671
673
|
description: A description for the created deployment. Defaults to the flow's
|
@@ -718,6 +720,7 @@ class Flow(Generic[P, R]):
|
|
718
720
|
rrule=rrule,
|
719
721
|
paused=paused,
|
720
722
|
schedules=schedules,
|
723
|
+
concurrency_limit=concurrency_limit,
|
721
724
|
tags=tags,
|
722
725
|
triggers=triggers,
|
723
726
|
parameters=parameters or {},
|
@@ -737,6 +740,7 @@ class Flow(Generic[P, R]):
|
|
737
740
|
rrule=rrule,
|
738
741
|
paused=paused,
|
739
742
|
schedules=schedules,
|
743
|
+
concurrency_limit=concurrency_limit,
|
740
744
|
tags=tags,
|
741
745
|
triggers=triggers,
|
742
746
|
parameters=parameters or {},
|
@@ -1055,6 +1059,7 @@ class Flow(Generic[P, R]):
|
|
1055
1059
|
rrule: Optional[str] = None,
|
1056
1060
|
paused: Optional[bool] = None,
|
1057
1061
|
schedules: Optional[List[DeploymentScheduleCreate]] = None,
|
1062
|
+
concurrency_limit: Optional[int] = None,
|
1058
1063
|
triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
|
1059
1064
|
parameters: Optional[dict] = None,
|
1060
1065
|
description: Optional[str] = None,
|
@@ -1101,6 +1106,7 @@ class Flow(Generic[P, R]):
|
|
1101
1106
|
paused: Whether or not to set this deployment as paused.
|
1102
1107
|
schedules: A list of schedule objects defining when to execute runs of this deployment.
|
1103
1108
|
Used to define multiple schedules or additional scheduling options like `timezone`.
|
1109
|
+
concurrency_limit: The maximum number of runs that can be executed concurrently.
|
1104
1110
|
parameters: A dictionary of default parameter values to pass to runs of this deployment.
|
1105
1111
|
description: A description for the created deployment. Defaults to the flow's
|
1106
1112
|
description if not provided.
|
@@ -1175,6 +1181,7 @@ class Flow(Generic[P, R]):
|
|
1175
1181
|
cron=cron,
|
1176
1182
|
rrule=rrule,
|
1177
1183
|
schedules=schedules,
|
1184
|
+
concurrency_limit=concurrency_limit,
|
1178
1185
|
paused=paused,
|
1179
1186
|
triggers=triggers,
|
1180
1187
|
parameters=parameters,
|
prefect/records/base.py
CHANGED
@@ -1,22 +1,88 @@
|
|
1
|
+
import abc
|
1
2
|
import os
|
2
3
|
import socket
|
3
4
|
import threading
|
4
5
|
from contextlib import contextmanager
|
5
6
|
from dataclasses import dataclass
|
6
|
-
from typing import Optional
|
7
|
+
from typing import TYPE_CHECKING, Optional
|
7
8
|
|
8
|
-
|
9
|
+
if TYPE_CHECKING:
|
10
|
+
from prefect.results import BaseResult
|
11
|
+
from prefect.transactions import IsolationLevel
|
9
12
|
|
10
13
|
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
+
@dataclass
|
15
|
+
class TransactionRecord:
|
16
|
+
"""
|
17
|
+
A dataclass representation of a transaction record.
|
18
|
+
"""
|
14
19
|
|
15
|
-
|
16
|
-
|
20
|
+
key: str
|
21
|
+
result: "BaseResult"
|
22
|
+
|
23
|
+
|
24
|
+
class RecordStore(abc.ABC):
|
25
|
+
@abc.abstractmethod
|
26
|
+
def read(
|
27
|
+
self, key: str, holder: Optional[str] = None
|
28
|
+
) -> Optional[TransactionRecord]:
|
29
|
+
"""
|
30
|
+
Read the transaction record with the given key.
|
31
|
+
|
32
|
+
Args:
|
33
|
+
key: Unique identifier for the transaction record.
|
34
|
+
holder: Unique identifier for the holder of the lock. If a lock exists on
|
35
|
+
the record being written, the read will be blocked until the lock is
|
36
|
+
released if the provided holder does not match the holder of the lock.
|
37
|
+
If not provided, a default holder based on the current host, process,
|
38
|
+
and thread will be used.
|
39
|
+
|
40
|
+
Returns:
|
41
|
+
TransactionRecord: The transaction record with the given key.
|
42
|
+
"""
|
43
|
+
...
|
44
|
+
|
45
|
+
@abc.abstractmethod
|
46
|
+
def write(self, key: str, result: "BaseResult", holder: Optional[str] = None):
|
47
|
+
"""
|
48
|
+
Write the transaction record with the given key.
|
49
|
+
|
50
|
+
Args:
|
51
|
+
key: Unique identifier for the transaction record.
|
52
|
+
record: The transaction record to write.
|
53
|
+
holder: Unique identifier for the holder of the lock. If a lock exists on
|
54
|
+
the record being written, the write will be rejected if the provided
|
55
|
+
holder does not match the holder of the lock. If not provided,
|
56
|
+
a default holder based on the current host, process, and thread will
|
57
|
+
be used.
|
58
|
+
"""
|
59
|
+
...
|
17
60
|
|
61
|
+
@abc.abstractmethod
|
18
62
|
def exists(self, key: str) -> bool:
|
19
|
-
|
63
|
+
"""
|
64
|
+
Check if the transaction record with the given key exists.
|
65
|
+
|
66
|
+
Args:
|
67
|
+
key: Unique identifier for the transaction record.
|
68
|
+
|
69
|
+
Returns:
|
70
|
+
bool: True if the record exists; False otherwise.
|
71
|
+
"""
|
72
|
+
...
|
73
|
+
|
74
|
+
@abc.abstractmethod
|
75
|
+
def supports_isolation_level(self, isolation_level: "IsolationLevel") -> bool:
|
76
|
+
"""
|
77
|
+
Check if the record store supports the given isolation level.
|
78
|
+
|
79
|
+
Args:
|
80
|
+
isolation_level: The isolation level to check.
|
81
|
+
|
82
|
+
Returns:
|
83
|
+
bool: True if the record store supports the isolation level; False otherwise.
|
84
|
+
"""
|
85
|
+
...
|
20
86
|
|
21
87
|
def acquire_lock(
|
22
88
|
self,
|
@@ -155,13 +221,3 @@ class RecordStore:
|
|
155
221
|
yield
|
156
222
|
finally:
|
157
223
|
self.release_lock(key=key, holder=holder)
|
158
|
-
|
159
|
-
|
160
|
-
@dataclass
|
161
|
-
class TransactionRecord:
|
162
|
-
"""
|
163
|
-
A dataclass representation of a transaction record.
|
164
|
-
"""
|
165
|
-
|
166
|
-
key: str
|
167
|
-
result: Optional[BaseResult] = None
|