prefect-client 3.1.10__py3-none-any.whl → 3.1.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/_experimental/lineage.py +7 -8
- prefect/_experimental/sla/__init__.py +0 -0
- prefect/_experimental/sla/client.py +66 -0
- prefect/_experimental/sla/objects.py +53 -0
- prefect/_internal/_logging.py +15 -3
- prefect/_internal/compatibility/async_dispatch.py +22 -16
- prefect/_internal/compatibility/deprecated.py +42 -18
- prefect/_internal/compatibility/migration.py +2 -2
- prefect/_internal/concurrency/inspection.py +12 -14
- prefect/_internal/concurrency/primitives.py +2 -2
- prefect/_internal/concurrency/services.py +154 -80
- prefect/_internal/concurrency/waiters.py +13 -9
- prefect/_internal/pydantic/annotations/pendulum.py +7 -7
- prefect/_internal/pytz.py +4 -3
- prefect/_internal/retries.py +10 -5
- prefect/_internal/schemas/bases.py +19 -10
- prefect/_internal/schemas/validators.py +227 -388
- prefect/_version.py +3 -3
- prefect/automations.py +236 -30
- prefect/blocks/__init__.py +3 -3
- prefect/blocks/abstract.py +53 -30
- prefect/blocks/core.py +183 -84
- prefect/blocks/notifications.py +133 -73
- prefect/blocks/redis.py +13 -9
- prefect/blocks/system.py +24 -11
- prefect/blocks/webhook.py +7 -5
- prefect/cache_policies.py +3 -2
- prefect/client/orchestration/__init__.py +1957 -0
- prefect/client/orchestration/_artifacts/__init__.py +0 -0
- prefect/client/orchestration/_artifacts/client.py +239 -0
- prefect/client/orchestration/_automations/__init__.py +0 -0
- prefect/client/orchestration/_automations/client.py +329 -0
- prefect/client/orchestration/_blocks_documents/__init__.py +0 -0
- prefect/client/orchestration/_blocks_documents/client.py +334 -0
- prefect/client/orchestration/_blocks_schemas/__init__.py +0 -0
- prefect/client/orchestration/_blocks_schemas/client.py +200 -0
- prefect/client/orchestration/_blocks_types/__init__.py +0 -0
- prefect/client/orchestration/_blocks_types/client.py +380 -0
- prefect/client/orchestration/_concurrency_limits/__init__.py +0 -0
- prefect/client/orchestration/_concurrency_limits/client.py +762 -0
- prefect/client/orchestration/_deployments/__init__.py +0 -0
- prefect/client/orchestration/_deployments/client.py +1128 -0
- prefect/client/orchestration/_flow_runs/__init__.py +0 -0
- prefect/client/orchestration/_flow_runs/client.py +903 -0
- prefect/client/orchestration/_flows/__init__.py +0 -0
- prefect/client/orchestration/_flows/client.py +343 -0
- prefect/client/orchestration/_logs/__init__.py +0 -0
- prefect/client/orchestration/_logs/client.py +97 -0
- prefect/client/orchestration/_variables/__init__.py +0 -0
- prefect/client/orchestration/_variables/client.py +157 -0
- prefect/client/orchestration/base.py +46 -0
- prefect/client/orchestration/routes.py +145 -0
- prefect/client/schemas/__init__.py +68 -28
- prefect/client/schemas/actions.py +2 -2
- prefect/client/schemas/filters.py +5 -0
- prefect/client/schemas/objects.py +8 -15
- prefect/client/schemas/schedules.py +22 -10
- prefect/concurrency/_asyncio.py +87 -0
- prefect/concurrency/{events.py → _events.py} +10 -10
- prefect/concurrency/asyncio.py +20 -104
- prefect/concurrency/context.py +6 -4
- prefect/concurrency/services.py +26 -74
- prefect/concurrency/sync.py +23 -44
- prefect/concurrency/v1/_asyncio.py +63 -0
- prefect/concurrency/v1/{events.py → _events.py} +13 -15
- prefect/concurrency/v1/asyncio.py +27 -80
- prefect/concurrency/v1/context.py +6 -4
- prefect/concurrency/v1/services.py +33 -79
- prefect/concurrency/v1/sync.py +18 -37
- prefect/context.py +66 -45
- prefect/deployments/base.py +10 -144
- prefect/deployments/flow_runs.py +12 -2
- prefect/deployments/runner.py +53 -4
- prefect/deployments/steps/pull.py +13 -0
- prefect/engine.py +17 -4
- prefect/events/clients.py +7 -1
- prefect/events/schemas/events.py +3 -2
- prefect/filesystems.py +6 -2
- prefect/flow_engine.py +101 -85
- prefect/flows.py +10 -1
- prefect/input/run_input.py +2 -1
- prefect/logging/logging.yml +1 -1
- prefect/main.py +1 -3
- prefect/results.py +2 -307
- prefect/runner/runner.py +4 -2
- prefect/runner/storage.py +87 -21
- prefect/serializers.py +32 -25
- prefect/settings/legacy.py +4 -4
- prefect/settings/models/api.py +3 -3
- prefect/settings/models/cli.py +3 -3
- prefect/settings/models/client.py +5 -3
- prefect/settings/models/cloud.py +8 -3
- prefect/settings/models/deployments.py +3 -3
- prefect/settings/models/experiments.py +4 -7
- prefect/settings/models/flows.py +3 -3
- prefect/settings/models/internal.py +4 -2
- prefect/settings/models/logging.py +4 -3
- prefect/settings/models/results.py +3 -3
- prefect/settings/models/root.py +3 -2
- prefect/settings/models/runner.py +4 -4
- prefect/settings/models/server/api.py +3 -3
- prefect/settings/models/server/database.py +11 -4
- prefect/settings/models/server/deployments.py +6 -2
- prefect/settings/models/server/ephemeral.py +4 -2
- prefect/settings/models/server/events.py +3 -2
- prefect/settings/models/server/flow_run_graph.py +6 -2
- prefect/settings/models/server/root.py +3 -3
- prefect/settings/models/server/services.py +26 -11
- prefect/settings/models/server/tasks.py +6 -3
- prefect/settings/models/server/ui.py +3 -3
- prefect/settings/models/tasks.py +5 -5
- prefect/settings/models/testing.py +3 -3
- prefect/settings/models/worker.py +5 -3
- prefect/settings/profiles.py +15 -2
- prefect/states.py +61 -45
- prefect/task_engine.py +54 -75
- prefect/task_runners.py +56 -55
- prefect/task_worker.py +2 -2
- prefect/tasks.py +90 -36
- prefect/telemetry/bootstrap.py +10 -9
- prefect/telemetry/run_telemetry.py +13 -8
- prefect/telemetry/services.py +4 -0
- prefect/transactions.py +4 -15
- prefect/utilities/_git.py +34 -0
- prefect/utilities/asyncutils.py +1 -1
- prefect/utilities/engine.py +3 -19
- prefect/utilities/generics.py +18 -0
- prefect/utilities/templating.py +25 -1
- prefect/workers/base.py +6 -3
- prefect/workers/process.py +1 -1
- {prefect_client-3.1.10.dist-info → prefect_client-3.1.12.dist-info}/METADATA +2 -2
- {prefect_client-3.1.10.dist-info → prefect_client-3.1.12.dist-info}/RECORD +135 -109
- prefect/client/orchestration.py +0 -4523
- prefect/records/__init__.py +0 -1
- prefect/records/base.py +0 -235
- prefect/records/filesystem.py +0 -213
- prefect/records/memory.py +0 -184
- prefect/records/result_store.py +0 -70
- {prefect_client-3.1.10.dist-info → prefect_client-3.1.12.dist-info}/LICENSE +0 -0
- {prefect_client-3.1.10.dist-info → prefect_client-3.1.12.dist-info}/WHEEL +0 -0
- {prefect_client-3.1.10.dist-info → prefect_client-3.1.12.dist-info}/top_level.txt +0 -0
prefect/concurrency/asyncio.py
CHANGED
@@ -1,42 +1,25 @@
|
|
1
|
-
import
|
1
|
+
from collections.abc import AsyncGenerator
|
2
2
|
from contextlib import asynccontextmanager
|
3
|
-
from typing import
|
3
|
+
from typing import Optional, Union
|
4
4
|
|
5
5
|
import anyio
|
6
|
-
import httpx
|
7
6
|
import pendulum
|
8
7
|
|
9
|
-
from
|
10
|
-
|
11
|
-
try:
|
12
|
-
from pendulum import Interval
|
13
|
-
except ImportError:
|
14
|
-
# pendulum < 3
|
15
|
-
from pendulum.period import Period as Interval # type: ignore
|
16
|
-
|
17
|
-
from prefect.client.orchestration import get_client
|
18
|
-
from prefect.client.schemas.responses import MinimalConcurrencyLimitResponse
|
19
|
-
from prefect.logging.loggers import get_run_logger
|
20
|
-
|
21
|
-
from .context import ConcurrencyContext
|
22
|
-
from .events import (
|
23
|
-
_emit_concurrency_acquisition_events,
|
24
|
-
_emit_concurrency_release_events,
|
8
|
+
from ._asyncio import (
|
9
|
+
AcquireConcurrencySlotTimeoutError as AcquireConcurrencySlotTimeoutError,
|
25
10
|
)
|
26
|
-
from .
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
class AcquireConcurrencySlotTimeoutError(TimeoutError):
|
34
|
-
"""Raised when acquiring a concurrency slot times out."""
|
11
|
+
from ._asyncio import ConcurrencySlotAcquisitionError as ConcurrencySlotAcquisitionError
|
12
|
+
from ._asyncio import aacquire_concurrency_slots, arelease_concurrency_slots
|
13
|
+
from ._events import (
|
14
|
+
emit_concurrency_acquisition_events,
|
15
|
+
emit_concurrency_release_events,
|
16
|
+
)
|
17
|
+
from .context import ConcurrencyContext
|
35
18
|
|
36
19
|
|
37
20
|
@asynccontextmanager
|
38
21
|
async def concurrency(
|
39
|
-
names: Union[str,
|
22
|
+
names: Union[str, list[str]],
|
40
23
|
occupy: int = 1,
|
41
24
|
timeout_seconds: Optional[float] = None,
|
42
25
|
max_retries: Optional[int] = None,
|
@@ -78,7 +61,7 @@ async def concurrency(
|
|
78
61
|
|
79
62
|
names = names if isinstance(names, list) else [names]
|
80
63
|
|
81
|
-
limits = await
|
64
|
+
limits = await aacquire_concurrency_slots(
|
82
65
|
names,
|
83
66
|
occupy,
|
84
67
|
timeout_seconds=timeout_seconds,
|
@@ -87,14 +70,14 @@ async def concurrency(
|
|
87
70
|
strict=strict,
|
88
71
|
)
|
89
72
|
acquisition_time = pendulum.now("UTC")
|
90
|
-
emitted_events =
|
73
|
+
emitted_events = emit_concurrency_acquisition_events(limits, occupy)
|
91
74
|
|
92
75
|
try:
|
93
76
|
yield
|
94
77
|
finally:
|
95
|
-
occupancy_period =
|
78
|
+
occupancy_period = pendulum.now("UTC") - acquisition_time
|
96
79
|
try:
|
97
|
-
await
|
80
|
+
await arelease_concurrency_slots(
|
98
81
|
names, occupy, occupancy_period.total_seconds()
|
99
82
|
)
|
100
83
|
except anyio.get_cancelled_exc_class():
|
@@ -106,11 +89,11 @@ async def concurrency(
|
|
106
89
|
(names, occupy, occupancy_period.total_seconds())
|
107
90
|
)
|
108
91
|
|
109
|
-
|
92
|
+
emit_concurrency_release_events(limits, occupy, emitted_events)
|
110
93
|
|
111
94
|
|
112
95
|
async def rate_limit(
|
113
|
-
names: Union[str,
|
96
|
+
names: Union[str, list[str]],
|
114
97
|
occupy: int = 1,
|
115
98
|
timeout_seconds: Optional[float] = None,
|
116
99
|
create_if_missing: Optional[bool] = None,
|
@@ -137,7 +120,7 @@ async def rate_limit(
|
|
137
120
|
|
138
121
|
names = names if isinstance(names, list) else [names]
|
139
122
|
|
140
|
-
limits = await
|
123
|
+
limits = await aacquire_concurrency_slots(
|
141
124
|
names,
|
142
125
|
occupy,
|
143
126
|
mode="rate_limit",
|
@@ -145,71 +128,4 @@ async def rate_limit(
|
|
145
128
|
create_if_missing=create_if_missing,
|
146
129
|
strict=strict,
|
147
130
|
)
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
@deprecated_parameter(
|
152
|
-
name="create_if_missing",
|
153
|
-
start_date="Sep 2024",
|
154
|
-
end_date="Oct 2024",
|
155
|
-
when=lambda x: x is not None,
|
156
|
-
help="Limits must be explicitly created before acquiring concurrency slots; see `strict` if you want to enforce this behavior.",
|
157
|
-
)
|
158
|
-
async def _aacquire_concurrency_slots(
|
159
|
-
names: List[str],
|
160
|
-
slots: int,
|
161
|
-
mode: Literal["concurrency", "rate_limit"] = "concurrency",
|
162
|
-
timeout_seconds: Optional[float] = None,
|
163
|
-
create_if_missing: Optional[bool] = None,
|
164
|
-
max_retries: Optional[int] = None,
|
165
|
-
strict: bool = False,
|
166
|
-
) -> List[MinimalConcurrencyLimitResponse]:
|
167
|
-
service = ConcurrencySlotAcquisitionService.instance(frozenset(names))
|
168
|
-
future = service.send(
|
169
|
-
(slots, mode, timeout_seconds, create_if_missing, max_retries)
|
170
|
-
)
|
171
|
-
response_or_exception = await asyncio.wrap_future(future)
|
172
|
-
|
173
|
-
if isinstance(response_or_exception, Exception):
|
174
|
-
if isinstance(response_or_exception, TimeoutError):
|
175
|
-
raise AcquireConcurrencySlotTimeoutError(
|
176
|
-
f"Attempt to acquire concurrency slots timed out after {timeout_seconds} second(s)"
|
177
|
-
) from response_or_exception
|
178
|
-
|
179
|
-
raise ConcurrencySlotAcquisitionError(
|
180
|
-
f"Unable to acquire concurrency slots on {names!r}"
|
181
|
-
) from response_or_exception
|
182
|
-
|
183
|
-
retval = _response_to_minimal_concurrency_limit_response(response_or_exception)
|
184
|
-
|
185
|
-
if strict and not retval:
|
186
|
-
raise ConcurrencySlotAcquisitionError(
|
187
|
-
f"Concurrency limits {names!r} must be created before acquiring slots"
|
188
|
-
)
|
189
|
-
elif not retval:
|
190
|
-
try:
|
191
|
-
logger = get_run_logger()
|
192
|
-
logger.warning(
|
193
|
-
f"Concurrency limits {names!r} do not exist - skipping acquisition."
|
194
|
-
)
|
195
|
-
except Exception:
|
196
|
-
pass
|
197
|
-
return retval
|
198
|
-
|
199
|
-
|
200
|
-
async def _arelease_concurrency_slots(
|
201
|
-
names: List[str], slots: int, occupancy_seconds: float
|
202
|
-
) -> List[MinimalConcurrencyLimitResponse]:
|
203
|
-
async with get_client() as client:
|
204
|
-
response = await client.release_concurrency_slots(
|
205
|
-
names=names, slots=slots, occupancy_seconds=occupancy_seconds
|
206
|
-
)
|
207
|
-
return _response_to_minimal_concurrency_limit_response(response)
|
208
|
-
|
209
|
-
|
210
|
-
def _response_to_minimal_concurrency_limit_response(
|
211
|
-
response: httpx.Response,
|
212
|
-
) -> List[MinimalConcurrencyLimitResponse]:
|
213
|
-
return [
|
214
|
-
MinimalConcurrencyLimitResponse.model_validate(obj_) for obj_ in response.json()
|
215
|
-
]
|
131
|
+
emit_concurrency_acquisition_events(limits, occupy)
|
prefect/concurrency/context.py
CHANGED
@@ -1,19 +1,21 @@
|
|
1
1
|
from contextvars import ContextVar
|
2
|
-
from typing import
|
2
|
+
from typing import Any, ClassVar
|
3
|
+
|
4
|
+
from typing_extensions import Self
|
3
5
|
|
4
6
|
from prefect.client.orchestration import get_client
|
5
7
|
from prefect.context import ContextModel, Field
|
6
8
|
|
7
9
|
|
8
10
|
class ConcurrencyContext(ContextModel):
|
9
|
-
__var__: ContextVar = ContextVar("concurrency")
|
11
|
+
__var__: ClassVar[ContextVar[Self]] = ContextVar("concurrency")
|
10
12
|
|
11
13
|
# Track the slots that have been acquired but were not able to be released
|
12
14
|
# due to cancellation or some other error. These slots are released when
|
13
15
|
# the context manager exits.
|
14
|
-
cleanup_slots:
|
16
|
+
cleanup_slots: list[tuple[list[str], int, float]] = Field(default_factory=list)
|
15
17
|
|
16
|
-
def __exit__(self, *exc_info):
|
18
|
+
def __exit__(self, *exc_info: Any) -> None:
|
17
19
|
if self.cleanup_slots:
|
18
20
|
with get_client(sync_client=True) as client:
|
19
21
|
for names, occupy, occupancy_seconds in self.cleanup_slots:
|
prefect/concurrency/services.py
CHANGED
@@ -1,31 +1,30 @@
|
|
1
1
|
import asyncio
|
2
|
-
import
|
2
|
+
from collections.abc import AsyncGenerator
|
3
3
|
from contextlib import asynccontextmanager
|
4
|
-
from typing import
|
5
|
-
TYPE_CHECKING,
|
6
|
-
AsyncGenerator,
|
7
|
-
FrozenSet,
|
8
|
-
Optional,
|
9
|
-
Tuple,
|
10
|
-
)
|
4
|
+
from typing import TYPE_CHECKING, Optional
|
11
5
|
|
12
6
|
import httpx
|
13
7
|
from starlette import status
|
8
|
+
from typing_extensions import TypeAlias, Unpack
|
14
9
|
|
15
10
|
from prefect._internal.concurrency import logger
|
16
|
-
from prefect._internal.concurrency.services import
|
11
|
+
from prefect._internal.concurrency.services import FutureQueueService
|
17
12
|
from prefect.client.orchestration import get_client
|
18
13
|
from prefect.utilities.timeout import timeout_async
|
19
14
|
|
20
15
|
if TYPE_CHECKING:
|
21
16
|
from prefect.client.orchestration import PrefectClient
|
22
17
|
|
18
|
+
_Item: TypeAlias = tuple[int, str, Optional[float], Optional[bool], Optional[int]]
|
23
19
|
|
24
|
-
|
25
|
-
|
20
|
+
|
21
|
+
class ConcurrencySlotAcquisitionService(
|
22
|
+
FutureQueueService[Unpack[_Item], httpx.Response]
|
23
|
+
):
|
24
|
+
def __init__(self, concurrency_limit_names: frozenset[str]):
|
26
25
|
super().__init__(concurrency_limit_names)
|
27
|
-
self._client:
|
28
|
-
self.concurrency_limit_names = sorted(list(concurrency_limit_names))
|
26
|
+
self._client: PrefectClient
|
27
|
+
self.concurrency_limit_names: list[str] = sorted(list(concurrency_limit_names))
|
29
28
|
|
30
29
|
@asynccontextmanager
|
31
30
|
async def _lifespan(self) -> AsyncGenerator[None, None]:
|
@@ -33,32 +32,7 @@ class ConcurrencySlotAcquisitionService(QueueService):
|
|
33
32
|
self._client = client
|
34
33
|
yield
|
35
34
|
|
36
|
-
async def
|
37
|
-
self,
|
38
|
-
item: Tuple[
|
39
|
-
int,
|
40
|
-
str,
|
41
|
-
Optional[float],
|
42
|
-
concurrent.futures.Future,
|
43
|
-
Optional[bool],
|
44
|
-
Optional[int],
|
45
|
-
],
|
46
|
-
) -> None:
|
47
|
-
occupy, mode, timeout_seconds, future, create_if_missing, max_retries = item
|
48
|
-
try:
|
49
|
-
response = await self.acquire_slots(
|
50
|
-
occupy, mode, timeout_seconds, create_if_missing, max_retries
|
51
|
-
)
|
52
|
-
except Exception as exc:
|
53
|
-
# If the request to the increment endpoint fails in a non-standard
|
54
|
-
# way, we need to set the future's result so that the caller can
|
55
|
-
# handle the exception and then re-raise.
|
56
|
-
future.set_result(exc)
|
57
|
-
raise exc
|
58
|
-
else:
|
59
|
-
future.set_result(response)
|
60
|
-
|
61
|
-
async def acquire_slots(
|
35
|
+
async def acquire(
|
62
36
|
self,
|
63
37
|
slots: int,
|
64
38
|
mode: str,
|
@@ -69,44 +43,22 @@ class ConcurrencySlotAcquisitionService(QueueService):
|
|
69
43
|
with timeout_async(seconds=timeout_seconds):
|
70
44
|
while True:
|
71
45
|
try:
|
72
|
-
|
46
|
+
return await self._client.increment_concurrency_slots(
|
73
47
|
names=self.concurrency_limit_names,
|
74
48
|
slots=slots,
|
75
49
|
mode=mode,
|
76
50
|
create_if_missing=create_if_missing,
|
77
51
|
)
|
78
|
-
except
|
79
|
-
if
|
80
|
-
|
81
|
-
and exc.response.status_code == status.HTTP_423_LOCKED
|
82
|
-
):
|
83
|
-
if max_retries is not None and max_retries <= 0:
|
84
|
-
raise exc
|
85
|
-
retry_after = float(exc.response.headers["Retry-After"])
|
86
|
-
logger.debug(
|
87
|
-
f"Unable to acquire concurrency slot. Retrying in {retry_after} second(s)."
|
88
|
-
)
|
89
|
-
await asyncio.sleep(retry_after)
|
90
|
-
if max_retries is not None:
|
91
|
-
max_retries -= 1
|
92
|
-
else:
|
93
|
-
raise exc
|
94
|
-
else:
|
95
|
-
return response
|
96
|
-
|
97
|
-
def send(
|
98
|
-
self, item: Tuple[int, str, Optional[float], Optional[bool], Optional[int]]
|
99
|
-
) -> concurrent.futures.Future:
|
100
|
-
with self._lock:
|
101
|
-
if self._stopped:
|
102
|
-
raise RuntimeError("Cannot put items in a stopped service instance.")
|
52
|
+
except httpx.HTTPStatusError as exc:
|
53
|
+
if not exc.response.status_code == status.HTTP_423_LOCKED:
|
54
|
+
raise
|
103
55
|
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
56
|
+
if max_retries is not None and max_retries <= 0:
|
57
|
+
raise exc
|
58
|
+
retry_after = float(exc.response.headers["Retry-After"])
|
59
|
+
logger.debug(
|
60
|
+
f"Unable to acquire concurrency slot. Retrying in {retry_after} second(s)."
|
61
|
+
)
|
62
|
+
await asyncio.sleep(retry_after)
|
63
|
+
if max_retries is not None:
|
64
|
+
max_retries -= 1
|
prefect/concurrency/sync.py
CHANGED
@@ -1,71 +1,54 @@
|
|
1
|
+
from collections.abc import Generator
|
1
2
|
from contextlib import contextmanager
|
2
|
-
from typing import
|
3
|
-
Generator,
|
4
|
-
List,
|
5
|
-
Optional,
|
6
|
-
TypeVar,
|
7
|
-
Union,
|
8
|
-
cast,
|
9
|
-
)
|
3
|
+
from typing import Optional, TypeVar, Union
|
10
4
|
|
11
5
|
import pendulum
|
12
6
|
from typing_extensions import Literal
|
13
7
|
|
14
|
-
from prefect.utilities.asyncutils import run_coro_as_sync
|
15
|
-
|
16
|
-
try:
|
17
|
-
from pendulum import Interval
|
18
|
-
except ImportError:
|
19
|
-
# pendulum < 3
|
20
|
-
from pendulum.period import Period as Interval # type: ignore
|
21
|
-
|
22
8
|
from prefect.client.schemas.responses import MinimalConcurrencyLimitResponse
|
9
|
+
from prefect.utilities.asyncutils import run_coro_as_sync
|
23
10
|
|
24
|
-
from .
|
25
|
-
|
26
|
-
|
11
|
+
from ._asyncio import (
|
12
|
+
aacquire_concurrency_slots,
|
13
|
+
arelease_concurrency_slots,
|
27
14
|
)
|
28
|
-
from .
|
29
|
-
|
30
|
-
|
15
|
+
from ._events import (
|
16
|
+
emit_concurrency_acquisition_events,
|
17
|
+
emit_concurrency_release_events,
|
31
18
|
)
|
32
19
|
|
33
20
|
T = TypeVar("T")
|
34
21
|
|
35
22
|
|
36
23
|
def _release_concurrency_slots(
|
37
|
-
names:
|
38
|
-
) ->
|
24
|
+
names: list[str], slots: int, occupancy_seconds: float
|
25
|
+
) -> list[MinimalConcurrencyLimitResponse]:
|
39
26
|
result = run_coro_as_sync(
|
40
|
-
|
27
|
+
arelease_concurrency_slots(names, slots, occupancy_seconds)
|
41
28
|
)
|
42
|
-
if result is None:
|
43
|
-
raise RuntimeError("Failed to release concurrency slots")
|
44
29
|
return result
|
45
30
|
|
46
31
|
|
47
32
|
def _acquire_concurrency_slots(
|
48
|
-
names:
|
33
|
+
names: list[str],
|
49
34
|
slots: int,
|
50
35
|
mode: Literal["concurrency", "rate_limit"] = "concurrency",
|
51
36
|
timeout_seconds: Optional[float] = None,
|
52
37
|
create_if_missing: Optional[bool] = None,
|
53
38
|
max_retries: Optional[int] = None,
|
54
39
|
strict: bool = False,
|
55
|
-
) ->
|
40
|
+
) -> list[MinimalConcurrencyLimitResponse]:
|
56
41
|
result = run_coro_as_sync(
|
57
|
-
|
42
|
+
aacquire_concurrency_slots(
|
58
43
|
names, slots, mode, timeout_seconds, create_if_missing, max_retries, strict
|
59
44
|
)
|
60
45
|
)
|
61
|
-
if result is None:
|
62
|
-
raise RuntimeError("Failed to acquire concurrency slots")
|
63
46
|
return result
|
64
47
|
|
65
48
|
|
66
49
|
@contextmanager
|
67
50
|
def concurrency(
|
68
|
-
names: Union[str,
|
51
|
+
names: Union[str, list[str]],
|
69
52
|
occupy: int = 1,
|
70
53
|
timeout_seconds: Optional[float] = None,
|
71
54
|
max_retries: Optional[int] = None,
|
@@ -107,7 +90,7 @@ def concurrency(
|
|
107
90
|
|
108
91
|
names = names if isinstance(names, list) else [names]
|
109
92
|
|
110
|
-
limits:
|
93
|
+
limits: list[MinimalConcurrencyLimitResponse] = _acquire_concurrency_slots(
|
111
94
|
names,
|
112
95
|
occupy,
|
113
96
|
timeout_seconds=timeout_seconds,
|
@@ -116,22 +99,18 @@ def concurrency(
|
|
116
99
|
max_retries=max_retries,
|
117
100
|
)
|
118
101
|
acquisition_time = pendulum.now("UTC")
|
119
|
-
emitted_events =
|
102
|
+
emitted_events = emit_concurrency_acquisition_events(limits, occupy)
|
120
103
|
|
121
104
|
try:
|
122
105
|
yield
|
123
106
|
finally:
|
124
|
-
occupancy_period =
|
125
|
-
_release_concurrency_slots(
|
126
|
-
|
127
|
-
occupy,
|
128
|
-
occupancy_period.total_seconds(),
|
129
|
-
)
|
130
|
-
_emit_concurrency_release_events(limits, occupy, emitted_events)
|
107
|
+
occupancy_period = pendulum.now("UTC") - acquisition_time
|
108
|
+
_release_concurrency_slots(names, occupy, occupancy_period.total_seconds())
|
109
|
+
emit_concurrency_release_events(limits, occupy, emitted_events)
|
131
110
|
|
132
111
|
|
133
112
|
def rate_limit(
|
134
|
-
names: Union[str,
|
113
|
+
names: Union[str, list[str]],
|
135
114
|
occupy: int = 1,
|
136
115
|
timeout_seconds: Optional[float] = None,
|
137
116
|
create_if_missing: Optional[bool] = None,
|
@@ -166,4 +145,4 @@ def rate_limit(
|
|
166
145
|
create_if_missing=create_if_missing,
|
167
146
|
strict=strict,
|
168
147
|
)
|
169
|
-
|
148
|
+
emit_concurrency_acquisition_events(limits, occupy)
|
@@ -0,0 +1,63 @@
|
|
1
|
+
import asyncio
|
2
|
+
from typing import Optional
|
3
|
+
from uuid import UUID
|
4
|
+
|
5
|
+
import httpx
|
6
|
+
|
7
|
+
from prefect.client.orchestration import get_client
|
8
|
+
from prefect.client.schemas.responses import MinimalConcurrencyLimitResponse
|
9
|
+
from prefect.utilities.asyncutils import sync_compatible
|
10
|
+
|
11
|
+
from .services import ConcurrencySlotAcquisitionService
|
12
|
+
|
13
|
+
|
14
|
+
class ConcurrencySlotAcquisitionError(Exception):
|
15
|
+
"""Raised when an unhandlable occurs while acquiring concurrency slots."""
|
16
|
+
|
17
|
+
|
18
|
+
class AcquireConcurrencySlotTimeoutError(TimeoutError):
|
19
|
+
"""Raised when acquiring a concurrency slot times out."""
|
20
|
+
|
21
|
+
|
22
|
+
@sync_compatible
|
23
|
+
async def acquire_concurrency_slots(
|
24
|
+
names: list[str],
|
25
|
+
task_run_id: UUID,
|
26
|
+
timeout_seconds: Optional[float] = None,
|
27
|
+
) -> list[MinimalConcurrencyLimitResponse]:
|
28
|
+
service = ConcurrencySlotAcquisitionService.instance(frozenset(names))
|
29
|
+
future = service.send((task_run_id, timeout_seconds))
|
30
|
+
try:
|
31
|
+
response = await asyncio.wrap_future(future)
|
32
|
+
except TimeoutError as timeout:
|
33
|
+
raise AcquireConcurrencySlotTimeoutError(
|
34
|
+
f"Attempt to acquire concurrency limits timed out after {timeout_seconds} second(s)"
|
35
|
+
) from timeout
|
36
|
+
except Exception as exc:
|
37
|
+
raise ConcurrencySlotAcquisitionError(
|
38
|
+
f"Unable to acquire concurrency limits {names!r}"
|
39
|
+
) from exc
|
40
|
+
else:
|
41
|
+
return _response_to_concurrency_limit_response(response)
|
42
|
+
|
43
|
+
|
44
|
+
@sync_compatible
|
45
|
+
async def release_concurrency_slots(
|
46
|
+
names: list[str], task_run_id: UUID, occupancy_seconds: float
|
47
|
+
) -> list[MinimalConcurrencyLimitResponse]:
|
48
|
+
async with get_client() as client:
|
49
|
+
response = await client.decrement_v1_concurrency_slots(
|
50
|
+
names=names,
|
51
|
+
task_run_id=task_run_id,
|
52
|
+
occupancy_seconds=occupancy_seconds,
|
53
|
+
)
|
54
|
+
return _response_to_concurrency_limit_response(response)
|
55
|
+
|
56
|
+
|
57
|
+
def _response_to_concurrency_limit_response(
|
58
|
+
response: httpx.Response,
|
59
|
+
) -> list[MinimalConcurrencyLimitResponse]:
|
60
|
+
data: list[MinimalConcurrencyLimitResponse] = response.json() or []
|
61
|
+
return [
|
62
|
+
MinimalConcurrencyLimitResponse.model_validate(limit) for limit in data if data
|
63
|
+
]
|
@@ -1,18 +1,18 @@
|
|
1
|
-
from typing import
|
1
|
+
from typing import Literal, Optional, Union
|
2
2
|
from uuid import UUID
|
3
3
|
|
4
4
|
from prefect.client.schemas.responses import MinimalConcurrencyLimitResponse
|
5
5
|
from prefect.events import Event, RelatedResource, emit_event
|
6
6
|
|
7
7
|
|
8
|
-
def
|
8
|
+
def emit_concurrency_event(
|
9
9
|
phase: Union[Literal["acquired"], Literal["released"]],
|
10
10
|
primary_limit: MinimalConcurrencyLimitResponse,
|
11
|
-
related_limits:
|
11
|
+
related_limits: list[MinimalConcurrencyLimitResponse],
|
12
12
|
task_run_id: UUID,
|
13
13
|
follows: Union[Event, None] = None,
|
14
14
|
) -> Union[Event, None]:
|
15
|
-
resource:
|
15
|
+
resource: dict[str, str] = {
|
16
16
|
"prefect.resource.id": f"prefect.concurrency-limit.v1.{primary_limit.id}",
|
17
17
|
"prefect.resource.name": primary_limit.name,
|
18
18
|
"limit": str(primary_limit.limit),
|
@@ -38,24 +38,22 @@ def _emit_concurrency_event(
|
|
38
38
|
)
|
39
39
|
|
40
40
|
|
41
|
-
def
|
42
|
-
limits:
|
41
|
+
def emit_concurrency_acquisition_events(
|
42
|
+
limits: list[MinimalConcurrencyLimitResponse],
|
43
43
|
task_run_id: UUID,
|
44
|
-
) ->
|
45
|
-
events = {}
|
44
|
+
) -> dict[UUID, Optional[Event]]:
|
45
|
+
events: dict[UUID, Optional[Event]] = {}
|
46
46
|
for limit in limits:
|
47
|
-
event =
|
47
|
+
event = emit_concurrency_event("acquired", limit, limits, task_run_id)
|
48
48
|
events[limit.id] = event
|
49
49
|
|
50
50
|
return events
|
51
51
|
|
52
52
|
|
53
|
-
def
|
54
|
-
limits:
|
55
|
-
events:
|
53
|
+
def emit_concurrency_release_events(
|
54
|
+
limits: list[MinimalConcurrencyLimitResponse],
|
55
|
+
events: dict[UUID, Optional[Event]],
|
56
56
|
task_run_id: UUID,
|
57
57
|
) -> None:
|
58
58
|
for limit in limits:
|
59
|
-
|
60
|
-
"released", limit, limits, task_run_id, events[limit.id]
|
61
|
-
)
|
59
|
+
emit_concurrency_event("released", limit, limits, task_run_id, events[limit.id])
|