prefect-client 3.1.9__py3-none-any.whl → 3.1.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. prefect/_experimental/lineage.py +7 -8
  2. prefect/_internal/_logging.py +15 -3
  3. prefect/_internal/compatibility/async_dispatch.py +22 -16
  4. prefect/_internal/compatibility/deprecated.py +42 -18
  5. prefect/_internal/compatibility/migration.py +2 -2
  6. prefect/_internal/concurrency/inspection.py +12 -14
  7. prefect/_internal/concurrency/primitives.py +2 -2
  8. prefect/_internal/concurrency/services.py +154 -80
  9. prefect/_internal/concurrency/waiters.py +13 -9
  10. prefect/_internal/pydantic/annotations/pendulum.py +7 -7
  11. prefect/_internal/pytz.py +4 -3
  12. prefect/_internal/retries.py +10 -5
  13. prefect/_internal/schemas/bases.py +19 -10
  14. prefect/_internal/schemas/validators.py +227 -388
  15. prefect/_version.py +3 -3
  16. prefect/artifacts.py +61 -74
  17. prefect/automations.py +27 -7
  18. prefect/blocks/core.py +3 -3
  19. prefect/client/{orchestration.py → orchestration/__init__.py} +38 -701
  20. prefect/client/orchestration/_artifacts/__init__.py +0 -0
  21. prefect/client/orchestration/_artifacts/client.py +239 -0
  22. prefect/client/orchestration/_concurrency_limits/__init__.py +0 -0
  23. prefect/client/orchestration/_concurrency_limits/client.py +762 -0
  24. prefect/client/orchestration/_logs/__init__.py +0 -0
  25. prefect/client/orchestration/_logs/client.py +95 -0
  26. prefect/client/orchestration/_variables/__init__.py +0 -0
  27. prefect/client/orchestration/_variables/client.py +157 -0
  28. prefect/client/orchestration/base.py +46 -0
  29. prefect/client/orchestration/routes.py +145 -0
  30. prefect/client/schemas/actions.py +2 -2
  31. prefect/client/schemas/filters.py +5 -0
  32. prefect/client/schemas/objects.py +3 -10
  33. prefect/client/schemas/schedules.py +22 -10
  34. prefect/concurrency/_asyncio.py +87 -0
  35. prefect/concurrency/{events.py → _events.py} +10 -10
  36. prefect/concurrency/asyncio.py +20 -104
  37. prefect/concurrency/context.py +6 -4
  38. prefect/concurrency/services.py +26 -74
  39. prefect/concurrency/sync.py +23 -44
  40. prefect/concurrency/v1/_asyncio.py +63 -0
  41. prefect/concurrency/v1/{events.py → _events.py} +13 -15
  42. prefect/concurrency/v1/asyncio.py +27 -80
  43. prefect/concurrency/v1/context.py +6 -4
  44. prefect/concurrency/v1/services.py +33 -79
  45. prefect/concurrency/v1/sync.py +18 -37
  46. prefect/context.py +66 -70
  47. prefect/deployments/base.py +4 -144
  48. prefect/deployments/flow_runs.py +12 -2
  49. prefect/deployments/runner.py +11 -3
  50. prefect/deployments/steps/pull.py +13 -0
  51. prefect/events/clients.py +7 -1
  52. prefect/events/schemas/events.py +3 -2
  53. prefect/flow_engine.py +54 -47
  54. prefect/flows.py +2 -1
  55. prefect/futures.py +42 -27
  56. prefect/input/run_input.py +2 -1
  57. prefect/locking/filesystem.py +8 -7
  58. prefect/locking/memory.py +5 -3
  59. prefect/locking/protocol.py +1 -1
  60. prefect/main.py +1 -3
  61. prefect/plugins.py +12 -10
  62. prefect/results.py +3 -308
  63. prefect/runner/storage.py +87 -21
  64. prefect/serializers.py +32 -25
  65. prefect/settings/legacy.py +4 -4
  66. prefect/settings/models/api.py +3 -3
  67. prefect/settings/models/cli.py +3 -3
  68. prefect/settings/models/client.py +5 -3
  69. prefect/settings/models/cloud.py +3 -3
  70. prefect/settings/models/deployments.py +3 -3
  71. prefect/settings/models/experiments.py +4 -2
  72. prefect/settings/models/flows.py +3 -3
  73. prefect/settings/models/internal.py +4 -2
  74. prefect/settings/models/logging.py +4 -3
  75. prefect/settings/models/results.py +3 -3
  76. prefect/settings/models/root.py +3 -2
  77. prefect/settings/models/runner.py +4 -4
  78. prefect/settings/models/server/api.py +3 -3
  79. prefect/settings/models/server/database.py +11 -4
  80. prefect/settings/models/server/deployments.py +6 -2
  81. prefect/settings/models/server/ephemeral.py +4 -2
  82. prefect/settings/models/server/events.py +3 -2
  83. prefect/settings/models/server/flow_run_graph.py +6 -2
  84. prefect/settings/models/server/root.py +3 -3
  85. prefect/settings/models/server/services.py +26 -11
  86. prefect/settings/models/server/tasks.py +6 -3
  87. prefect/settings/models/server/ui.py +3 -3
  88. prefect/settings/models/tasks.py +5 -5
  89. prefect/settings/models/testing.py +3 -3
  90. prefect/settings/models/worker.py +5 -3
  91. prefect/settings/profiles.py +15 -2
  92. prefect/states.py +4 -7
  93. prefect/task_engine.py +54 -75
  94. prefect/tasks.py +84 -32
  95. prefect/telemetry/processors.py +6 -6
  96. prefect/telemetry/run_telemetry.py +13 -8
  97. prefect/telemetry/services.py +32 -31
  98. prefect/transactions.py +4 -15
  99. prefect/utilities/_git.py +34 -0
  100. prefect/utilities/asyncutils.py +1 -1
  101. prefect/utilities/engine.py +3 -19
  102. prefect/utilities/generics.py +18 -0
  103. prefect/workers/__init__.py +2 -0
  104. {prefect_client-3.1.9.dist-info → prefect_client-3.1.11.dist-info}/METADATA +1 -1
  105. {prefect_client-3.1.9.dist-info → prefect_client-3.1.11.dist-info}/RECORD +108 -99
  106. prefect/records/__init__.py +0 -1
  107. prefect/records/base.py +0 -235
  108. prefect/records/filesystem.py +0 -213
  109. prefect/records/memory.py +0 -184
  110. prefect/records/result_store.py +0 -70
  111. {prefect_client-3.1.9.dist-info → prefect_client-3.1.11.dist-info}/LICENSE +0 -0
  112. {prefect_client-3.1.9.dist-info → prefect_client-3.1.11.dist-info}/WHEEL +0 -0
  113. {prefect_client-3.1.9.dist-info → prefect_client-3.1.11.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,4 @@
1
- from typing import Dict, List, Literal, Optional, Union
1
+ from typing import Literal, Optional, Union
2
2
  from uuid import UUID
3
3
 
4
4
  from prefect.client.schemas.responses import MinimalConcurrencyLimitResponse
@@ -8,11 +8,11 @@ from prefect.events import Event, RelatedResource, emit_event
8
8
  def _emit_concurrency_event(
9
9
  phase: Union[Literal["acquired"], Literal["released"]],
10
10
  primary_limit: MinimalConcurrencyLimitResponse,
11
- related_limits: List[MinimalConcurrencyLimitResponse],
11
+ related_limits: list[MinimalConcurrencyLimitResponse],
12
12
  slots: int,
13
13
  follows: Union[Event, None] = None,
14
14
  ) -> Union[Event, None]:
15
- resource: Dict[str, str] = {
15
+ resource: dict[str, str] = {
16
16
  "prefect.resource.id": f"prefect.concurrency-limit.{primary_limit.id}",
17
17
  "prefect.resource.name": primary_limit.name,
18
18
  "slots-acquired": str(slots),
@@ -38,11 +38,11 @@ def _emit_concurrency_event(
38
38
  )
39
39
 
40
40
 
41
- def _emit_concurrency_acquisition_events(
42
- limits: List[MinimalConcurrencyLimitResponse],
41
+ def emit_concurrency_acquisition_events(
42
+ limits: list[MinimalConcurrencyLimitResponse],
43
43
  occupy: int,
44
- ) -> Dict[UUID, Optional[Event]]:
45
- events = {}
44
+ ) -> dict[UUID, Optional[Event]]:
45
+ events: dict[UUID, Optional[Event]] = {}
46
46
  for limit in limits:
47
47
  event = _emit_concurrency_event("acquired", limit, limits, occupy)
48
48
  events[limit.id] = event
@@ -50,10 +50,10 @@ def _emit_concurrency_acquisition_events(
50
50
  return events
51
51
 
52
52
 
53
- def _emit_concurrency_release_events(
54
- limits: List[MinimalConcurrencyLimitResponse],
53
+ def emit_concurrency_release_events(
54
+ limits: list[MinimalConcurrencyLimitResponse],
55
55
  occupy: int,
56
- events: Dict[UUID, Optional[Event]],
56
+ events: dict[UUID, Optional[Event]],
57
57
  ) -> None:
58
58
  for limit in limits:
59
59
  _emit_concurrency_event("released", limit, limits, occupy, events[limit.id])
@@ -1,42 +1,25 @@
1
- import asyncio
1
+ from collections.abc import AsyncGenerator
2
2
  from contextlib import asynccontextmanager
3
- from typing import AsyncGenerator, List, Literal, Optional, Union, cast
3
+ from typing import Optional, Union
4
4
 
5
5
  import anyio
6
- import httpx
7
6
  import pendulum
8
7
 
9
- from prefect._internal.compatibility.deprecated import deprecated_parameter
10
-
11
- try:
12
- from pendulum import Interval
13
- except ImportError:
14
- # pendulum < 3
15
- from pendulum.period import Period as Interval # type: ignore
16
-
17
- from prefect.client.orchestration import get_client
18
- from prefect.client.schemas.responses import MinimalConcurrencyLimitResponse
19
- from prefect.logging.loggers import get_run_logger
20
-
21
- from .context import ConcurrencyContext
22
- from .events import (
23
- _emit_concurrency_acquisition_events,
24
- _emit_concurrency_release_events,
8
+ from ._asyncio import (
9
+ AcquireConcurrencySlotTimeoutError as AcquireConcurrencySlotTimeoutError,
25
10
  )
26
- from .services import ConcurrencySlotAcquisitionService
27
-
28
-
29
- class ConcurrencySlotAcquisitionError(Exception):
30
- """Raised when an unhandlable occurs while acquiring concurrency slots."""
31
-
32
-
33
- class AcquireConcurrencySlotTimeoutError(TimeoutError):
34
- """Raised when acquiring a concurrency slot times out."""
11
+ from ._asyncio import ConcurrencySlotAcquisitionError as ConcurrencySlotAcquisitionError
12
+ from ._asyncio import aacquire_concurrency_slots, arelease_concurrency_slots
13
+ from ._events import (
14
+ emit_concurrency_acquisition_events,
15
+ emit_concurrency_release_events,
16
+ )
17
+ from .context import ConcurrencyContext
35
18
 
36
19
 
37
20
  @asynccontextmanager
38
21
  async def concurrency(
39
- names: Union[str, List[str]],
22
+ names: Union[str, list[str]],
40
23
  occupy: int = 1,
41
24
  timeout_seconds: Optional[float] = None,
42
25
  max_retries: Optional[int] = None,
@@ -78,7 +61,7 @@ async def concurrency(
78
61
 
79
62
  names = names if isinstance(names, list) else [names]
80
63
 
81
- limits = await _aacquire_concurrency_slots(
64
+ limits = await aacquire_concurrency_slots(
82
65
  names,
83
66
  occupy,
84
67
  timeout_seconds=timeout_seconds,
@@ -87,14 +70,14 @@ async def concurrency(
87
70
  strict=strict,
88
71
  )
89
72
  acquisition_time = pendulum.now("UTC")
90
- emitted_events = _emit_concurrency_acquisition_events(limits, occupy)
73
+ emitted_events = emit_concurrency_acquisition_events(limits, occupy)
91
74
 
92
75
  try:
93
76
  yield
94
77
  finally:
95
- occupancy_period = cast(Interval, (pendulum.now("UTC") - acquisition_time))
78
+ occupancy_period = pendulum.now("UTC") - acquisition_time
96
79
  try:
97
- await _arelease_concurrency_slots(
80
+ await arelease_concurrency_slots(
98
81
  names, occupy, occupancy_period.total_seconds()
99
82
  )
100
83
  except anyio.get_cancelled_exc_class():
@@ -106,11 +89,11 @@ async def concurrency(
106
89
  (names, occupy, occupancy_period.total_seconds())
107
90
  )
108
91
 
109
- _emit_concurrency_release_events(limits, occupy, emitted_events)
92
+ emit_concurrency_release_events(limits, occupy, emitted_events)
110
93
 
111
94
 
112
95
  async def rate_limit(
113
- names: Union[str, List[str]],
96
+ names: Union[str, list[str]],
114
97
  occupy: int = 1,
115
98
  timeout_seconds: Optional[float] = None,
116
99
  create_if_missing: Optional[bool] = None,
@@ -137,7 +120,7 @@ async def rate_limit(
137
120
 
138
121
  names = names if isinstance(names, list) else [names]
139
122
 
140
- limits = await _aacquire_concurrency_slots(
123
+ limits = await aacquire_concurrency_slots(
141
124
  names,
142
125
  occupy,
143
126
  mode="rate_limit",
@@ -145,71 +128,4 @@ async def rate_limit(
145
128
  create_if_missing=create_if_missing,
146
129
  strict=strict,
147
130
  )
148
- _emit_concurrency_acquisition_events(limits, occupy)
149
-
150
-
151
- @deprecated_parameter(
152
- name="create_if_missing",
153
- start_date="Sep 2024",
154
- end_date="Oct 2024",
155
- when=lambda x: x is not None,
156
- help="Limits must be explicitly created before acquiring concurrency slots; see `strict` if you want to enforce this behavior.",
157
- )
158
- async def _aacquire_concurrency_slots(
159
- names: List[str],
160
- slots: int,
161
- mode: Literal["concurrency", "rate_limit"] = "concurrency",
162
- timeout_seconds: Optional[float] = None,
163
- create_if_missing: Optional[bool] = None,
164
- max_retries: Optional[int] = None,
165
- strict: bool = False,
166
- ) -> List[MinimalConcurrencyLimitResponse]:
167
- service = ConcurrencySlotAcquisitionService.instance(frozenset(names))
168
- future = service.send(
169
- (slots, mode, timeout_seconds, create_if_missing, max_retries)
170
- )
171
- response_or_exception = await asyncio.wrap_future(future)
172
-
173
- if isinstance(response_or_exception, Exception):
174
- if isinstance(response_or_exception, TimeoutError):
175
- raise AcquireConcurrencySlotTimeoutError(
176
- f"Attempt to acquire concurrency slots timed out after {timeout_seconds} second(s)"
177
- ) from response_or_exception
178
-
179
- raise ConcurrencySlotAcquisitionError(
180
- f"Unable to acquire concurrency slots on {names!r}"
181
- ) from response_or_exception
182
-
183
- retval = _response_to_minimal_concurrency_limit_response(response_or_exception)
184
-
185
- if strict and not retval:
186
- raise ConcurrencySlotAcquisitionError(
187
- f"Concurrency limits {names!r} must be created before acquiring slots"
188
- )
189
- elif not retval:
190
- try:
191
- logger = get_run_logger()
192
- logger.warning(
193
- f"Concurrency limits {names!r} do not exist - skipping acquisition."
194
- )
195
- except Exception:
196
- pass
197
- return retval
198
-
199
-
200
- async def _arelease_concurrency_slots(
201
- names: List[str], slots: int, occupancy_seconds: float
202
- ) -> List[MinimalConcurrencyLimitResponse]:
203
- async with get_client() as client:
204
- response = await client.release_concurrency_slots(
205
- names=names, slots=slots, occupancy_seconds=occupancy_seconds
206
- )
207
- return _response_to_minimal_concurrency_limit_response(response)
208
-
209
-
210
- def _response_to_minimal_concurrency_limit_response(
211
- response: httpx.Response,
212
- ) -> List[MinimalConcurrencyLimitResponse]:
213
- return [
214
- MinimalConcurrencyLimitResponse.model_validate(obj_) for obj_ in response.json()
215
- ]
131
+ emit_concurrency_acquisition_events(limits, occupy)
@@ -1,19 +1,21 @@
1
1
  from contextvars import ContextVar
2
- from typing import List, Tuple
2
+ from typing import Any, ClassVar
3
+
4
+ from typing_extensions import Self
3
5
 
4
6
  from prefect.client.orchestration import get_client
5
7
  from prefect.context import ContextModel, Field
6
8
 
7
9
 
8
10
  class ConcurrencyContext(ContextModel):
9
- __var__: ContextVar = ContextVar("concurrency")
11
+ __var__: ClassVar[ContextVar[Self]] = ContextVar("concurrency")
10
12
 
11
13
  # Track the slots that have been acquired but were not able to be released
12
14
  # due to cancellation or some other error. These slots are released when
13
15
  # the context manager exits.
14
- cleanup_slots: List[Tuple[List[str], int, float]] = Field(default_factory=list)
16
+ cleanup_slots: list[tuple[list[str], int, float]] = Field(default_factory=list)
15
17
 
16
- def __exit__(self, *exc_info):
18
+ def __exit__(self, *exc_info: Any) -> None:
17
19
  if self.cleanup_slots:
18
20
  with get_client(sync_client=True) as client:
19
21
  for names, occupy, occupancy_seconds in self.cleanup_slots:
@@ -1,31 +1,30 @@
1
1
  import asyncio
2
- import concurrent.futures
2
+ from collections.abc import AsyncGenerator
3
3
  from contextlib import asynccontextmanager
4
- from typing import (
5
- TYPE_CHECKING,
6
- AsyncGenerator,
7
- FrozenSet,
8
- Optional,
9
- Tuple,
10
- )
4
+ from typing import TYPE_CHECKING, Optional
11
5
 
12
6
  import httpx
13
7
  from starlette import status
8
+ from typing_extensions import TypeAlias, Unpack
14
9
 
15
10
  from prefect._internal.concurrency import logger
16
- from prefect._internal.concurrency.services import QueueService
11
+ from prefect._internal.concurrency.services import FutureQueueService
17
12
  from prefect.client.orchestration import get_client
18
13
  from prefect.utilities.timeout import timeout_async
19
14
 
20
15
  if TYPE_CHECKING:
21
16
  from prefect.client.orchestration import PrefectClient
22
17
 
18
+ _Item: TypeAlias = tuple[int, str, Optional[float], Optional[bool], Optional[int]]
23
19
 
24
- class ConcurrencySlotAcquisitionService(QueueService):
25
- def __init__(self, concurrency_limit_names: FrozenSet[str]):
20
+
21
+ class ConcurrencySlotAcquisitionService(
22
+ FutureQueueService[Unpack[_Item], httpx.Response]
23
+ ):
24
+ def __init__(self, concurrency_limit_names: frozenset[str]):
26
25
  super().__init__(concurrency_limit_names)
27
- self._client: "PrefectClient"
28
- self.concurrency_limit_names = sorted(list(concurrency_limit_names))
26
+ self._client: PrefectClient
27
+ self.concurrency_limit_names: list[str] = sorted(list(concurrency_limit_names))
29
28
 
30
29
  @asynccontextmanager
31
30
  async def _lifespan(self) -> AsyncGenerator[None, None]:
@@ -33,32 +32,7 @@ class ConcurrencySlotAcquisitionService(QueueService):
33
32
  self._client = client
34
33
  yield
35
34
 
36
- async def _handle(
37
- self,
38
- item: Tuple[
39
- int,
40
- str,
41
- Optional[float],
42
- concurrent.futures.Future,
43
- Optional[bool],
44
- Optional[int],
45
- ],
46
- ) -> None:
47
- occupy, mode, timeout_seconds, future, create_if_missing, max_retries = item
48
- try:
49
- response = await self.acquire_slots(
50
- occupy, mode, timeout_seconds, create_if_missing, max_retries
51
- )
52
- except Exception as exc:
53
- # If the request to the increment endpoint fails in a non-standard
54
- # way, we need to set the future's result so that the caller can
55
- # handle the exception and then re-raise.
56
- future.set_result(exc)
57
- raise exc
58
- else:
59
- future.set_result(response)
60
-
61
- async def acquire_slots(
35
+ async def acquire(
62
36
  self,
63
37
  slots: int,
64
38
  mode: str,
@@ -69,44 +43,22 @@ class ConcurrencySlotAcquisitionService(QueueService):
69
43
  with timeout_async(seconds=timeout_seconds):
70
44
  while True:
71
45
  try:
72
- response = await self._client.increment_concurrency_slots(
46
+ return await self._client.increment_concurrency_slots(
73
47
  names=self.concurrency_limit_names,
74
48
  slots=slots,
75
49
  mode=mode,
76
50
  create_if_missing=create_if_missing,
77
51
  )
78
- except Exception as exc:
79
- if (
80
- isinstance(exc, httpx.HTTPStatusError)
81
- and exc.response.status_code == status.HTTP_423_LOCKED
82
- ):
83
- if max_retries is not None and max_retries <= 0:
84
- raise exc
85
- retry_after = float(exc.response.headers["Retry-After"])
86
- logger.debug(
87
- f"Unable to acquire concurrency slot. Retrying in {retry_after} second(s)."
88
- )
89
- await asyncio.sleep(retry_after)
90
- if max_retries is not None:
91
- max_retries -= 1
92
- else:
93
- raise exc
94
- else:
95
- return response
96
-
97
- def send(
98
- self, item: Tuple[int, str, Optional[float], Optional[bool], Optional[int]]
99
- ) -> concurrent.futures.Future:
100
- with self._lock:
101
- if self._stopped:
102
- raise RuntimeError("Cannot put items in a stopped service instance.")
52
+ except httpx.HTTPStatusError as exc:
53
+ if not exc.response.status_code == status.HTTP_423_LOCKED:
54
+ raise
103
55
 
104
- logger.debug("Service %r enqueuing item %r", self, item)
105
- future: concurrent.futures.Future = concurrent.futures.Future()
106
-
107
- occupy, mode, timeout_seconds, create_if_missing, max_retries = item
108
- self._queue.put_nowait(
109
- (occupy, mode, timeout_seconds, future, create_if_missing, max_retries)
110
- )
111
-
112
- return future
56
+ if max_retries is not None and max_retries <= 0:
57
+ raise exc
58
+ retry_after = float(exc.response.headers["Retry-After"])
59
+ logger.debug(
60
+ f"Unable to acquire concurrency slot. Retrying in {retry_after} second(s)."
61
+ )
62
+ await asyncio.sleep(retry_after)
63
+ if max_retries is not None:
64
+ max_retries -= 1
@@ -1,71 +1,54 @@
1
+ from collections.abc import Generator
1
2
  from contextlib import contextmanager
2
- from typing import (
3
- Generator,
4
- List,
5
- Optional,
6
- TypeVar,
7
- Union,
8
- cast,
9
- )
3
+ from typing import Optional, TypeVar, Union
10
4
 
11
5
  import pendulum
12
6
  from typing_extensions import Literal
13
7
 
14
- from prefect.utilities.asyncutils import run_coro_as_sync
15
-
16
- try:
17
- from pendulum import Interval
18
- except ImportError:
19
- # pendulum < 3
20
- from pendulum.period import Period as Interval # type: ignore
21
-
22
8
  from prefect.client.schemas.responses import MinimalConcurrencyLimitResponse
9
+ from prefect.utilities.asyncutils import run_coro_as_sync
23
10
 
24
- from .asyncio import (
25
- _aacquire_concurrency_slots,
26
- _arelease_concurrency_slots,
11
+ from ._asyncio import (
12
+ aacquire_concurrency_slots,
13
+ arelease_concurrency_slots,
27
14
  )
28
- from .events import (
29
- _emit_concurrency_acquisition_events,
30
- _emit_concurrency_release_events,
15
+ from ._events import (
16
+ emit_concurrency_acquisition_events,
17
+ emit_concurrency_release_events,
31
18
  )
32
19
 
33
20
  T = TypeVar("T")
34
21
 
35
22
 
36
23
  def _release_concurrency_slots(
37
- names: List[str], slots: int, occupancy_seconds: float
38
- ) -> List[MinimalConcurrencyLimitResponse]:
24
+ names: list[str], slots: int, occupancy_seconds: float
25
+ ) -> list[MinimalConcurrencyLimitResponse]:
39
26
  result = run_coro_as_sync(
40
- _arelease_concurrency_slots(names, slots, occupancy_seconds)
27
+ arelease_concurrency_slots(names, slots, occupancy_seconds)
41
28
  )
42
- if result is None:
43
- raise RuntimeError("Failed to release concurrency slots")
44
29
  return result
45
30
 
46
31
 
47
32
  def _acquire_concurrency_slots(
48
- names: List[str],
33
+ names: list[str],
49
34
  slots: int,
50
35
  mode: Literal["concurrency", "rate_limit"] = "concurrency",
51
36
  timeout_seconds: Optional[float] = None,
52
37
  create_if_missing: Optional[bool] = None,
53
38
  max_retries: Optional[int] = None,
54
39
  strict: bool = False,
55
- ) -> List[MinimalConcurrencyLimitResponse]:
40
+ ) -> list[MinimalConcurrencyLimitResponse]:
56
41
  result = run_coro_as_sync(
57
- _aacquire_concurrency_slots(
42
+ aacquire_concurrency_slots(
58
43
  names, slots, mode, timeout_seconds, create_if_missing, max_retries, strict
59
44
  )
60
45
  )
61
- if result is None:
62
- raise RuntimeError("Failed to acquire concurrency slots")
63
46
  return result
64
47
 
65
48
 
66
49
  @contextmanager
67
50
  def concurrency(
68
- names: Union[str, List[str]],
51
+ names: Union[str, list[str]],
69
52
  occupy: int = 1,
70
53
  timeout_seconds: Optional[float] = None,
71
54
  max_retries: Optional[int] = None,
@@ -107,7 +90,7 @@ def concurrency(
107
90
 
108
91
  names = names if isinstance(names, list) else [names]
109
92
 
110
- limits: List[MinimalConcurrencyLimitResponse] = _acquire_concurrency_slots(
93
+ limits: list[MinimalConcurrencyLimitResponse] = _acquire_concurrency_slots(
111
94
  names,
112
95
  occupy,
113
96
  timeout_seconds=timeout_seconds,
@@ -116,22 +99,18 @@ def concurrency(
116
99
  max_retries=max_retries,
117
100
  )
118
101
  acquisition_time = pendulum.now("UTC")
119
- emitted_events = _emit_concurrency_acquisition_events(limits, occupy)
102
+ emitted_events = emit_concurrency_acquisition_events(limits, occupy)
120
103
 
121
104
  try:
122
105
  yield
123
106
  finally:
124
- occupancy_period = cast(Interval, pendulum.now("UTC") - acquisition_time)
125
- _release_concurrency_slots(
126
- names,
127
- occupy,
128
- occupancy_period.total_seconds(),
129
- )
130
- _emit_concurrency_release_events(limits, occupy, emitted_events)
107
+ occupancy_period = pendulum.now("UTC") - acquisition_time
108
+ _release_concurrency_slots(names, occupy, occupancy_period.total_seconds())
109
+ emit_concurrency_release_events(limits, occupy, emitted_events)
131
110
 
132
111
 
133
112
  def rate_limit(
134
- names: Union[str, List[str]],
113
+ names: Union[str, list[str]],
135
114
  occupy: int = 1,
136
115
  timeout_seconds: Optional[float] = None,
137
116
  create_if_missing: Optional[bool] = None,
@@ -166,4 +145,4 @@ def rate_limit(
166
145
  create_if_missing=create_if_missing,
167
146
  strict=strict,
168
147
  )
169
- _emit_concurrency_acquisition_events(limits, occupy)
148
+ emit_concurrency_acquisition_events(limits, occupy)
@@ -0,0 +1,63 @@
1
+ import asyncio
2
+ from typing import Optional
3
+ from uuid import UUID
4
+
5
+ import httpx
6
+
7
+ from prefect.client.orchestration import get_client
8
+ from prefect.client.schemas.responses import MinimalConcurrencyLimitResponse
9
+ from prefect.utilities.asyncutils import sync_compatible
10
+
11
+ from .services import ConcurrencySlotAcquisitionService
12
+
13
+
14
+ class ConcurrencySlotAcquisitionError(Exception):
15
+ """Raised when an unhandlable occurs while acquiring concurrency slots."""
16
+
17
+
18
+ class AcquireConcurrencySlotTimeoutError(TimeoutError):
19
+ """Raised when acquiring a concurrency slot times out."""
20
+
21
+
22
+ @sync_compatible
23
+ async def acquire_concurrency_slots(
24
+ names: list[str],
25
+ task_run_id: UUID,
26
+ timeout_seconds: Optional[float] = None,
27
+ ) -> list[MinimalConcurrencyLimitResponse]:
28
+ service = ConcurrencySlotAcquisitionService.instance(frozenset(names))
29
+ future = service.send((task_run_id, timeout_seconds))
30
+ try:
31
+ response = await asyncio.wrap_future(future)
32
+ except TimeoutError as timeout:
33
+ raise AcquireConcurrencySlotTimeoutError(
34
+ f"Attempt to acquire concurrency limits timed out after {timeout_seconds} second(s)"
35
+ ) from timeout
36
+ except Exception as exc:
37
+ raise ConcurrencySlotAcquisitionError(
38
+ f"Unable to acquire concurrency limits {names!r}"
39
+ ) from exc
40
+ else:
41
+ return _response_to_concurrency_limit_response(response)
42
+
43
+
44
+ @sync_compatible
45
+ async def release_concurrency_slots(
46
+ names: list[str], task_run_id: UUID, occupancy_seconds: float
47
+ ) -> list[MinimalConcurrencyLimitResponse]:
48
+ async with get_client() as client:
49
+ response = await client.decrement_v1_concurrency_slots(
50
+ names=names,
51
+ task_run_id=task_run_id,
52
+ occupancy_seconds=occupancy_seconds,
53
+ )
54
+ return _response_to_concurrency_limit_response(response)
55
+
56
+
57
+ def _response_to_concurrency_limit_response(
58
+ response: httpx.Response,
59
+ ) -> list[MinimalConcurrencyLimitResponse]:
60
+ data: list[MinimalConcurrencyLimitResponse] = response.json() or []
61
+ return [
62
+ MinimalConcurrencyLimitResponse.model_validate(limit) for limit in data if data
63
+ ]
@@ -1,18 +1,18 @@
1
- from typing import Dict, List, Literal, Optional, Union
1
+ from typing import Literal, Optional, Union
2
2
  from uuid import UUID
3
3
 
4
4
  from prefect.client.schemas.responses import MinimalConcurrencyLimitResponse
5
5
  from prefect.events import Event, RelatedResource, emit_event
6
6
 
7
7
 
8
- def _emit_concurrency_event(
8
+ def emit_concurrency_event(
9
9
  phase: Union[Literal["acquired"], Literal["released"]],
10
10
  primary_limit: MinimalConcurrencyLimitResponse,
11
- related_limits: List[MinimalConcurrencyLimitResponse],
11
+ related_limits: list[MinimalConcurrencyLimitResponse],
12
12
  task_run_id: UUID,
13
13
  follows: Union[Event, None] = None,
14
14
  ) -> Union[Event, None]:
15
- resource: Dict[str, str] = {
15
+ resource: dict[str, str] = {
16
16
  "prefect.resource.id": f"prefect.concurrency-limit.v1.{primary_limit.id}",
17
17
  "prefect.resource.name": primary_limit.name,
18
18
  "limit": str(primary_limit.limit),
@@ -38,24 +38,22 @@ def _emit_concurrency_event(
38
38
  )
39
39
 
40
40
 
41
- def _emit_concurrency_acquisition_events(
42
- limits: List[MinimalConcurrencyLimitResponse],
41
+ def emit_concurrency_acquisition_events(
42
+ limits: list[MinimalConcurrencyLimitResponse],
43
43
  task_run_id: UUID,
44
- ) -> Dict[UUID, Optional[Event]]:
45
- events = {}
44
+ ) -> dict[UUID, Optional[Event]]:
45
+ events: dict[UUID, Optional[Event]] = {}
46
46
  for limit in limits:
47
- event = _emit_concurrency_event("acquired", limit, limits, task_run_id)
47
+ event = emit_concurrency_event("acquired", limit, limits, task_run_id)
48
48
  events[limit.id] = event
49
49
 
50
50
  return events
51
51
 
52
52
 
53
- def _emit_concurrency_release_events(
54
- limits: List[MinimalConcurrencyLimitResponse],
55
- events: Dict[UUID, Optional[Event]],
53
+ def emit_concurrency_release_events(
54
+ limits: list[MinimalConcurrencyLimitResponse],
55
+ events: dict[UUID, Optional[Event]],
56
56
  task_run_id: UUID,
57
57
  ) -> None:
58
58
  for limit in limits:
59
- _emit_concurrency_event(
60
- "released", limit, limits, task_run_id, events[limit.id]
61
- )
59
+ emit_concurrency_event("released", limit, limits, task_run_id, events[limit.id])