prefect-client 3.0.0rc18__py3-none-any.whl → 3.0.0rc20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. prefect/__init__.py +0 -3
  2. prefect/_internal/concurrency/services.py +14 -0
  3. prefect/_internal/schemas/bases.py +1 -0
  4. prefect/blocks/core.py +41 -30
  5. prefect/blocks/system.py +48 -12
  6. prefect/client/cloud.py +56 -7
  7. prefect/client/collections.py +1 -1
  8. prefect/client/orchestration.py +111 -8
  9. prefect/client/schemas/objects.py +40 -2
  10. prefect/concurrency/asyncio.py +8 -2
  11. prefect/concurrency/services.py +16 -6
  12. prefect/concurrency/sync.py +4 -1
  13. prefect/concurrency/v1/__init__.py +0 -0
  14. prefect/concurrency/v1/asyncio.py +143 -0
  15. prefect/concurrency/v1/context.py +27 -0
  16. prefect/concurrency/v1/events.py +61 -0
  17. prefect/concurrency/v1/services.py +116 -0
  18. prefect/concurrency/v1/sync.py +92 -0
  19. prefect/context.py +2 -2
  20. prefect/deployments/flow_runs.py +0 -7
  21. prefect/deployments/runner.py +11 -0
  22. prefect/events/clients.py +41 -0
  23. prefect/events/related.py +72 -73
  24. prefect/events/utilities.py +2 -0
  25. prefect/events/worker.py +12 -3
  26. prefect/exceptions.py +6 -0
  27. prefect/flow_engine.py +5 -0
  28. prefect/flows.py +9 -2
  29. prefect/logging/handlers.py +4 -1
  30. prefect/main.py +8 -6
  31. prefect/records/base.py +74 -18
  32. prefect/records/filesystem.py +207 -0
  33. prefect/records/memory.py +16 -3
  34. prefect/records/result_store.py +19 -14
  35. prefect/results.py +232 -169
  36. prefect/runner/runner.py +7 -4
  37. prefect/settings.py +14 -15
  38. prefect/states.py +73 -18
  39. prefect/task_engine.py +127 -221
  40. prefect/task_worker.py +7 -39
  41. prefect/tasks.py +0 -7
  42. prefect/transactions.py +89 -27
  43. prefect/utilities/annotations.py +4 -3
  44. prefect/utilities/asyncutils.py +4 -4
  45. prefect/utilities/callables.py +1 -3
  46. prefect/utilities/dispatch.py +16 -11
  47. prefect/utilities/engine.py +1 -4
  48. prefect/utilities/schema_tools/hydration.py +13 -0
  49. prefect/workers/base.py +78 -18
  50. {prefect_client-3.0.0rc18.dist-info → prefect_client-3.0.0rc20.dist-info}/METADATA +3 -4
  51. {prefect_client-3.0.0rc18.dist-info → prefect_client-3.0.0rc20.dist-info}/RECORD +54 -48
  52. prefect/manifests.py +0 -21
  53. {prefect_client-3.0.0rc18.dist-info → prefect_client-3.0.0rc20.dist-info}/LICENSE +0 -0
  54. {prefect_client-3.0.0rc18.dist-info → prefect_client-3.0.0rc20.dist-info}/WHEEL +0 -0
  55. {prefect_client-3.0.0rc18.dist-info → prefect_client-3.0.0rc20.dist-info}/top_level.txt +0 -0
@@ -36,13 +36,18 @@ class ConcurrencySlotAcquisitionService(QueueService):
36
36
  async def _handle(
37
37
  self,
38
38
  item: Tuple[
39
- int, str, Optional[float], concurrent.futures.Future, Optional[bool]
39
+ int,
40
+ str,
41
+ Optional[float],
42
+ concurrent.futures.Future,
43
+ Optional[bool],
44
+ Optional[int],
40
45
  ],
41
46
  ) -> None:
42
- occupy, mode, timeout_seconds, future, create_if_missing = item
47
+ occupy, mode, timeout_seconds, future, create_if_missing, max_retries = item
43
48
  try:
44
49
  response = await self.acquire_slots(
45
- occupy, mode, timeout_seconds, create_if_missing
50
+ occupy, mode, timeout_seconds, create_if_missing, max_retries
46
51
  )
47
52
  except Exception as exc:
48
53
  # If the request to the increment endpoint fails in a non-standard
@@ -59,6 +64,7 @@ class ConcurrencySlotAcquisitionService(QueueService):
59
64
  mode: str,
60
65
  timeout_seconds: Optional[float] = None,
61
66
  create_if_missing: Optional[bool] = False,
67
+ max_retries: Optional[int] = None,
62
68
  ) -> httpx.Response:
63
69
  with timeout_async(seconds=timeout_seconds):
64
70
  while True:
@@ -74,15 +80,19 @@ class ConcurrencySlotAcquisitionService(QueueService):
74
80
  isinstance(exc, httpx.HTTPStatusError)
75
81
  and exc.response.status_code == status.HTTP_423_LOCKED
76
82
  ):
83
+ if max_retries is not None and max_retries <= 0:
84
+ raise exc
77
85
  retry_after = float(exc.response.headers["Retry-After"])
78
86
  await asyncio.sleep(retry_after)
87
+ if max_retries is not None:
88
+ max_retries -= 1
79
89
  else:
80
90
  raise exc
81
91
  else:
82
92
  return response
83
93
 
84
94
  def send(
85
- self, item: Tuple[int, str, Optional[float], Optional[bool]]
95
+ self, item: Tuple[int, str, Optional[float], Optional[bool], Optional[int]]
86
96
  ) -> concurrent.futures.Future:
87
97
  with self._lock:
88
98
  if self._stopped:
@@ -91,9 +101,9 @@ class ConcurrencySlotAcquisitionService(QueueService):
91
101
  logger.debug("Service %r enqueuing item %r", self, item)
92
102
  future: concurrent.futures.Future = concurrent.futures.Future()
93
103
 
94
- occupy, mode, timeout_seconds, create_if_missing = item
104
+ occupy, mode, timeout_seconds, create_if_missing, max_retries = item
95
105
  self._queue.put_nowait(
96
- (occupy, mode, timeout_seconds, future, create_if_missing)
106
+ (occupy, mode, timeout_seconds, future, create_if_missing, max_retries)
97
107
  )
98
108
 
99
109
  return future
@@ -40,7 +40,8 @@ def concurrency(
40
40
  names: Union[str, List[str]],
41
41
  occupy: int = 1,
42
42
  timeout_seconds: Optional[float] = None,
43
- create_if_missing: Optional[bool] = True,
43
+ create_if_missing: bool = True,
44
+ max_retries: Optional[int] = None,
44
45
  ) -> Generator[None, None, None]:
45
46
  """A context manager that acquires and releases concurrency slots from the
46
47
  given concurrency limits.
@@ -51,6 +52,7 @@ def concurrency(
51
52
  timeout_seconds: The number of seconds to wait for the slots to be acquired before
52
53
  raising a `TimeoutError`. A timeout of `None` will wait indefinitely.
53
54
  create_if_missing: Whether to create the concurrency limits if they do not exist.
55
+ max_retries: The maximum number of retries to acquire the concurrency slots.
54
56
 
55
57
  Raises:
56
58
  TimeoutError: If the slots are not acquired within the given timeout.
@@ -80,6 +82,7 @@ def concurrency(
80
82
  occupy,
81
83
  timeout_seconds=timeout_seconds,
82
84
  create_if_missing=create_if_missing,
85
+ max_retries=max_retries,
83
86
  )
84
87
  acquisition_time = pendulum.now("UTC")
85
88
  emitted_events = _emit_concurrency_acquisition_events(limits, occupy)
File without changes
@@ -0,0 +1,143 @@
1
+ import asyncio
2
+ from contextlib import asynccontextmanager
3
+ from typing import AsyncGenerator, List, Optional, Union, cast
4
+ from uuid import UUID
5
+
6
+ import anyio
7
+ import httpx
8
+ import pendulum
9
+
10
+ from ...client.schemas.responses import MinimalConcurrencyLimitResponse
11
+
12
+ try:
13
+ from pendulum import Interval
14
+ except ImportError:
15
+ # pendulum < 3
16
+ from pendulum.period import Period as Interval # type: ignore
17
+
18
+ from prefect.client.orchestration import get_client
19
+
20
+ from .context import ConcurrencyContext
21
+ from .events import (
22
+ _emit_concurrency_acquisition_events,
23
+ _emit_concurrency_release_events,
24
+ )
25
+ from .services import ConcurrencySlotAcquisitionService
26
+
27
+
28
+ class ConcurrencySlotAcquisitionError(Exception):
29
+ """Raised when an unhandlable occurs while acquiring concurrency slots."""
30
+
31
+
32
+ class AcquireConcurrencySlotTimeoutError(TimeoutError):
33
+ """Raised when acquiring a concurrency slot times out."""
34
+
35
+
36
+ @asynccontextmanager
37
+ async def concurrency(
38
+ names: Union[str, List[str]],
39
+ task_run_id: UUID,
40
+ timeout_seconds: Optional[float] = None,
41
+ ) -> AsyncGenerator[None, None]:
42
+ """A context manager that acquires and releases concurrency slots from the
43
+ given concurrency limits.
44
+
45
+ Args:
46
+ names: The names of the concurrency limits to acquire slots from.
47
+ task_run_id: The name of the task_run_id that is incrementing the slots.
48
+ timeout_seconds: The number of seconds to wait for the slots to be acquired before
49
+ raising a `TimeoutError`. A timeout of `None` will wait indefinitely.
50
+
51
+ Raises:
52
+ TimeoutError: If the slots are not acquired within the given timeout.
53
+
54
+ Example:
55
+ A simple example of using the async `concurrency` context manager:
56
+ ```python
57
+ from prefect.concurrency.v1.asyncio import concurrency
58
+
59
+ async def resource_heavy():
60
+ async with concurrency("test", task_run_id):
61
+ print("Resource heavy task")
62
+
63
+ async def main():
64
+ await resource_heavy()
65
+ ```
66
+ """
67
+ if not names:
68
+ yield
69
+ return
70
+
71
+ names_normalized: List[str] = names if isinstance(names, list) else [names]
72
+
73
+ limits = await _acquire_concurrency_slots(
74
+ names_normalized,
75
+ task_run_id=task_run_id,
76
+ timeout_seconds=timeout_seconds,
77
+ )
78
+ acquisition_time = pendulum.now("UTC")
79
+ emitted_events = _emit_concurrency_acquisition_events(limits, task_run_id)
80
+
81
+ try:
82
+ yield
83
+ finally:
84
+ occupancy_period = cast(Interval, (pendulum.now("UTC") - acquisition_time))
85
+ try:
86
+ await _release_concurrency_slots(
87
+ names_normalized, task_run_id, occupancy_period.total_seconds()
88
+ )
89
+ except anyio.get_cancelled_exc_class():
90
+ # The task was cancelled before it could release the slots. Add the
91
+ # slots to the cleanup list so they can be released when the
92
+ # concurrency context is exited.
93
+ if ctx := ConcurrencyContext.get():
94
+ ctx.cleanup_slots.append(
95
+ (names_normalized, occupancy_period.total_seconds(), task_run_id)
96
+ )
97
+
98
+ _emit_concurrency_release_events(limits, emitted_events, task_run_id)
99
+
100
+
101
+ async def _acquire_concurrency_slots(
102
+ names: List[str],
103
+ task_run_id: UUID,
104
+ timeout_seconds: Optional[float] = None,
105
+ ) -> List[MinimalConcurrencyLimitResponse]:
106
+ service = ConcurrencySlotAcquisitionService.instance(frozenset(names))
107
+ future = service.send((task_run_id, timeout_seconds))
108
+ response_or_exception = await asyncio.wrap_future(future)
109
+
110
+ if isinstance(response_or_exception, Exception):
111
+ if isinstance(response_or_exception, TimeoutError):
112
+ raise AcquireConcurrencySlotTimeoutError(
113
+ f"Attempt to acquire concurrency limits timed out after {timeout_seconds} second(s)"
114
+ ) from response_or_exception
115
+
116
+ raise ConcurrencySlotAcquisitionError(
117
+ f"Unable to acquire concurrency limits {names!r}"
118
+ ) from response_or_exception
119
+
120
+ return _response_to_concurrency_limit_response(response_or_exception)
121
+
122
+
123
+ async def _release_concurrency_slots(
124
+ names: List[str],
125
+ task_run_id: UUID,
126
+ occupancy_seconds: float,
127
+ ) -> List[MinimalConcurrencyLimitResponse]:
128
+ async with get_client() as client:
129
+ response = await client.decrement_v1_concurrency_slots(
130
+ names=names,
131
+ task_run_id=task_run_id,
132
+ occupancy_seconds=occupancy_seconds,
133
+ )
134
+ return _response_to_concurrency_limit_response(response)
135
+
136
+
137
+ def _response_to_concurrency_limit_response(
138
+ response: httpx.Response,
139
+ ) -> List[MinimalConcurrencyLimitResponse]:
140
+ data = response.json() or []
141
+ return [
142
+ MinimalConcurrencyLimitResponse.model_validate(limit) for limit in data if data
143
+ ]
@@ -0,0 +1,27 @@
1
+ from contextvars import ContextVar
2
+ from typing import List, Tuple
3
+ from uuid import UUID
4
+
5
+ from prefect.client.orchestration import get_client
6
+ from prefect.context import ContextModel, Field
7
+
8
+
9
+ class ConcurrencyContext(ContextModel):
10
+ __var__: ContextVar = ContextVar("concurrency_v1")
11
+
12
+ # Track the limits that have been acquired but were not able to be released
13
+ # due to cancellation or some other error. These limits are released when
14
+ # the context manager exits.
15
+ cleanup_slots: List[Tuple[List[str], float, UUID]] = Field(default_factory=list)
16
+
17
+ def __exit__(self, *exc_info):
18
+ if self.cleanup_slots:
19
+ with get_client(sync_client=True) as client:
20
+ for names, occupancy_seconds, task_run_id in self.cleanup_slots:
21
+ client.decrement_v1_concurrency_slots(
22
+ names=names,
23
+ occupancy_seconds=occupancy_seconds,
24
+ task_run_id=task_run_id,
25
+ )
26
+
27
+ return super().__exit__(*exc_info)
@@ -0,0 +1,61 @@
1
+ from typing import Dict, List, Literal, Optional, Union
2
+ from uuid import UUID
3
+
4
+ from prefect.client.schemas.responses import MinimalConcurrencyLimitResponse
5
+ from prefect.events import Event, RelatedResource, emit_event
6
+
7
+
8
+ def _emit_concurrency_event(
9
+ phase: Union[Literal["acquired"], Literal["released"]],
10
+ primary_limit: MinimalConcurrencyLimitResponse,
11
+ related_limits: List[MinimalConcurrencyLimitResponse],
12
+ task_run_id: UUID,
13
+ follows: Union[Event, None] = None,
14
+ ) -> Union[Event, None]:
15
+ resource: Dict[str, str] = {
16
+ "prefect.resource.id": f"prefect.concurrency-limit.v1.{primary_limit.id}",
17
+ "prefect.resource.name": primary_limit.name,
18
+ "limit": str(primary_limit.limit),
19
+ "task_run_id": str(task_run_id),
20
+ }
21
+
22
+ related = [
23
+ RelatedResource.model_validate(
24
+ {
25
+ "prefect.resource.id": f"prefect.concurrency-limit.v1.{limit.id}",
26
+ "prefect.resource.role": "concurrency-limit",
27
+ }
28
+ )
29
+ for limit in related_limits
30
+ if limit.id != primary_limit.id
31
+ ]
32
+
33
+ return emit_event(
34
+ f"prefect.concurrency-limit.v1.{phase}",
35
+ resource=resource,
36
+ related=related,
37
+ follows=follows,
38
+ )
39
+
40
+
41
+ def _emit_concurrency_acquisition_events(
42
+ limits: List[MinimalConcurrencyLimitResponse],
43
+ task_run_id: UUID,
44
+ ) -> Dict[UUID, Optional[Event]]:
45
+ events = {}
46
+ for limit in limits:
47
+ event = _emit_concurrency_event("acquired", limit, limits, task_run_id)
48
+ events[limit.id] = event
49
+
50
+ return events
51
+
52
+
53
+ def _emit_concurrency_release_events(
54
+ limits: List[MinimalConcurrencyLimitResponse],
55
+ events: Dict[UUID, Optional[Event]],
56
+ task_run_id: UUID,
57
+ ) -> None:
58
+ for limit in limits:
59
+ _emit_concurrency_event(
60
+ "released", limit, limits, task_run_id, events[limit.id]
61
+ )
@@ -0,0 +1,116 @@
1
+ import asyncio
2
+ import concurrent.futures
3
+ from contextlib import asynccontextmanager
4
+ from json import JSONDecodeError
5
+ from typing import (
6
+ TYPE_CHECKING,
7
+ AsyncGenerator,
8
+ FrozenSet,
9
+ Optional,
10
+ Tuple,
11
+ )
12
+ from uuid import UUID
13
+
14
+ import httpx
15
+ from starlette import status
16
+
17
+ from prefect._internal.concurrency import logger
18
+ from prefect._internal.concurrency.services import QueueService
19
+ from prefect.client.orchestration import get_client
20
+ from prefect.utilities.timeout import timeout_async
21
+
22
+ if TYPE_CHECKING:
23
+ from prefect.client.orchestration import PrefectClient
24
+
25
+
26
+ class ConcurrencySlotAcquisitionServiceError(Exception):
27
+ """Raised when an error occurs while acquiring concurrency slots."""
28
+
29
+
30
+ class ConcurrencySlotAcquisitionService(QueueService):
31
+ def __init__(self, concurrency_limit_names: FrozenSet[str]):
32
+ super().__init__(concurrency_limit_names)
33
+ self._client: "PrefectClient"
34
+ self.concurrency_limit_names = sorted(list(concurrency_limit_names))
35
+
36
+ @asynccontextmanager
37
+ async def _lifespan(self) -> AsyncGenerator[None, None]:
38
+ async with get_client() as client:
39
+ self._client = client
40
+ yield
41
+
42
+ async def _handle(
43
+ self,
44
+ item: Tuple[
45
+ UUID,
46
+ concurrent.futures.Future,
47
+ Optional[float],
48
+ ],
49
+ ) -> None:
50
+ task_run_id, future, timeout_seconds = item
51
+ try:
52
+ response = await self.acquire_slots(task_run_id, timeout_seconds)
53
+ except Exception as exc:
54
+ # If the request to the increment endpoint fails in a non-standard
55
+ # way, we need to set the future's result so that the caller can
56
+ # handle the exception and then re-raise.
57
+ future.set_result(exc)
58
+ raise exc
59
+ else:
60
+ future.set_result(response)
61
+
62
+ async def acquire_slots(
63
+ self,
64
+ task_run_id: UUID,
65
+ timeout_seconds: Optional[float] = None,
66
+ ) -> httpx.Response:
67
+ with timeout_async(seconds=timeout_seconds):
68
+ while True:
69
+ try:
70
+ response = await self._client.increment_v1_concurrency_slots(
71
+ task_run_id=task_run_id,
72
+ names=self.concurrency_limit_names,
73
+ )
74
+ except Exception as exc:
75
+ if (
76
+ isinstance(exc, httpx.HTTPStatusError)
77
+ and exc.response.status_code == status.HTTP_423_LOCKED
78
+ ):
79
+ retry_after = exc.response.headers.get("Retry-After")
80
+ if retry_after:
81
+ retry_after = float(retry_after)
82
+ await asyncio.sleep(retry_after)
83
+ else:
84
+ # We received a 423 but no Retry-After header. This
85
+ # should indicate that the server told us to abort
86
+ # because the concurrency limit is set to 0, i.e.
87
+ # effectively disabled.
88
+ try:
89
+ reason = exc.response.json()["detail"]
90
+ except (JSONDecodeError, KeyError):
91
+ logger.error(
92
+ "Failed to parse response from concurrency limit 423 Locked response: %s",
93
+ exc.response.content,
94
+ )
95
+ reason = "Concurrency limit is locked (server did not specify the reason)"
96
+ raise ConcurrencySlotAcquisitionServiceError(
97
+ reason
98
+ ) from exc
99
+
100
+ else:
101
+ raise exc # type: ignore
102
+ else:
103
+ return response
104
+
105
+ def send(self, item: Tuple[UUID, Optional[float]]) -> concurrent.futures.Future:
106
+ with self._lock:
107
+ if self._stopped:
108
+ raise RuntimeError("Cannot put items in a stopped service instance.")
109
+
110
+ logger.debug("Service %r enqueuing item %r", self, item)
111
+ future: concurrent.futures.Future = concurrent.futures.Future()
112
+
113
+ task_run_id, timeout_seconds = item
114
+ self._queue.put_nowait((task_run_id, future, timeout_seconds))
115
+
116
+ return future
@@ -0,0 +1,92 @@
1
+ from contextlib import contextmanager
2
+ from typing import (
3
+ Generator,
4
+ List,
5
+ Optional,
6
+ TypeVar,
7
+ Union,
8
+ cast,
9
+ )
10
+ from uuid import UUID
11
+
12
+ import pendulum
13
+
14
+ from ...client.schemas.responses import MinimalConcurrencyLimitResponse
15
+ from ..sync import _call_async_function_from_sync
16
+
17
+ try:
18
+ from pendulum import Interval
19
+ except ImportError:
20
+ # pendulum < 3
21
+ from pendulum.period import Period as Interval # type: ignore
22
+
23
+ from .asyncio import (
24
+ _acquire_concurrency_slots,
25
+ _release_concurrency_slots,
26
+ )
27
+ from .events import (
28
+ _emit_concurrency_acquisition_events,
29
+ _emit_concurrency_release_events,
30
+ )
31
+
32
+ T = TypeVar("T")
33
+
34
+
35
+ @contextmanager
36
+ def concurrency(
37
+ names: Union[str, List[str]],
38
+ task_run_id: UUID,
39
+ timeout_seconds: Optional[float] = None,
40
+ ) -> Generator[None, None, None]:
41
+ """
42
+ A context manager that acquires and releases concurrency slots from the
43
+ given concurrency limits.
44
+
45
+ Args:
46
+ names: The names of the concurrency limits to acquire.
47
+ task_run_id: The task run ID acquiring the limits.
48
+ timeout_seconds: The number of seconds to wait to acquire the limits before
49
+ raising a `TimeoutError`. A timeout of `None` will wait indefinitely.
50
+
51
+ Raises:
52
+ TimeoutError: If the limits are not acquired within the given timeout.
53
+
54
+ Example:
55
+ A simple example of using the sync `concurrency` context manager:
56
+ ```python
57
+ from prefect.concurrency.v1.sync import concurrency
58
+
59
+ def resource_heavy():
60
+ with concurrency("test"):
61
+ print("Resource heavy task")
62
+
63
+ def main():
64
+ resource_heavy()
65
+ ```
66
+ """
67
+ if not names:
68
+ yield
69
+ return
70
+
71
+ names = names if isinstance(names, list) else [names]
72
+
73
+ limits: List[MinimalConcurrencyLimitResponse] = _call_async_function_from_sync(
74
+ _acquire_concurrency_slots,
75
+ names,
76
+ timeout_seconds=timeout_seconds,
77
+ task_run_id=task_run_id,
78
+ )
79
+ acquisition_time = pendulum.now("UTC")
80
+ emitted_events = _emit_concurrency_acquisition_events(limits, task_run_id)
81
+
82
+ try:
83
+ yield
84
+ finally:
85
+ occupancy_period = cast(Interval, pendulum.now("UTC") - acquisition_time)
86
+ _call_async_function_from_sync(
87
+ _release_concurrency_slots,
88
+ names,
89
+ task_run_id,
90
+ occupancy_period.total_seconds(),
91
+ )
92
+ _emit_concurrency_release_events(limits, emitted_events, task_run_id)
prefect/context.py CHANGED
@@ -131,7 +131,7 @@ class ContextModel(BaseModel):
131
131
  extra="forbid",
132
132
  )
133
133
 
134
- def __enter__(self):
134
+ def __enter__(self) -> Self:
135
135
  if self._token is not None:
136
136
  raise RuntimeError(
137
137
  "Context already entered. Context enter calls cannot be nested."
@@ -290,7 +290,7 @@ class AsyncClientContext(ContextModel):
290
290
  if ctx:
291
291
  yield ctx
292
292
  else:
293
- with cls() as ctx:
293
+ async with cls() as ctx:
294
294
  yield ctx
295
295
 
296
296
 
@@ -5,7 +5,6 @@ from uuid import UUID
5
5
  import anyio
6
6
  import pendulum
7
7
 
8
- from prefect._internal.compatibility.deprecated import deprecated_parameter
9
8
  from prefect.client.schemas import FlowRun
10
9
  from prefect.client.utilities import inject_client
11
10
  from prefect.context import FlowRunContext, TaskRunContext
@@ -24,11 +23,6 @@ logger = get_logger(__name__)
24
23
 
25
24
 
26
25
  @sync_compatible
27
- @deprecated_parameter(
28
- "infra_overrides",
29
- start_date="Apr 2024",
30
- help="Use `job_variables` instead.",
31
- )
32
26
  @inject_client
33
27
  async def run_deployment(
34
28
  name: Union[str, UUID],
@@ -42,7 +36,6 @@ async def run_deployment(
42
36
  idempotency_key: Optional[str] = None,
43
37
  work_queue_name: Optional[str] = None,
44
38
  as_subflow: Optional[bool] = True,
45
- infra_overrides: Optional[dict] = None,
46
39
  job_variables: Optional[dict] = None,
47
40
  ) -> "FlowRun":
48
41
  """
@@ -143,6 +143,10 @@ class RunnerDeployment(BaseModel):
143
143
  default=None,
144
144
  description="The schedules that should cause this deployment to run.",
145
145
  )
146
+ concurrency_limit: Optional[int] = Field(
147
+ default=None,
148
+ description="The maximum number of concurrent runs of this deployment.",
149
+ )
146
150
  paused: Optional[bool] = Field(
147
151
  default=None, description="Whether or not the deployment is paused."
148
152
  )
@@ -274,6 +278,7 @@ class RunnerDeployment(BaseModel):
274
278
  version=self.version,
275
279
  paused=self.paused,
276
280
  schedules=self.schedules,
281
+ concurrency_limit=self.concurrency_limit,
277
282
  parameters=self.parameters,
278
283
  description=self.description,
279
284
  tags=self.tags,
@@ -432,6 +437,7 @@ class RunnerDeployment(BaseModel):
432
437
  rrule: Optional[Union[Iterable[str], str]] = None,
433
438
  paused: Optional[bool] = None,
434
439
  schedules: Optional["FlexibleScheduleList"] = None,
440
+ concurrency_limit: Optional[int] = None,
435
441
  parameters: Optional[dict] = None,
436
442
  triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
437
443
  description: Optional[str] = None,
@@ -485,6 +491,7 @@ class RunnerDeployment(BaseModel):
485
491
  name=Path(name).stem,
486
492
  flow_name=flow.name,
487
493
  schedules=constructed_schedules,
494
+ concurrency_limit=concurrency_limit,
488
495
  paused=paused,
489
496
  tags=tags or [],
490
497
  triggers=triggers or [],
@@ -558,6 +565,7 @@ class RunnerDeployment(BaseModel):
558
565
  rrule: Optional[Union[Iterable[str], str]] = None,
559
566
  paused: Optional[bool] = None,
560
567
  schedules: Optional["FlexibleScheduleList"] = None,
568
+ concurrency_limit: Optional[int] = None,
561
569
  parameters: Optional[dict] = None,
562
570
  triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
563
571
  description: Optional[str] = None,
@@ -614,6 +622,7 @@ class RunnerDeployment(BaseModel):
614
622
  name=Path(name).stem,
615
623
  flow_name=flow.name,
616
624
  schedules=constructed_schedules,
625
+ concurrency_limit=concurrency_limit,
617
626
  paused=paused,
618
627
  tags=tags or [],
619
628
  triggers=triggers or [],
@@ -646,6 +655,7 @@ class RunnerDeployment(BaseModel):
646
655
  rrule: Optional[Union[Iterable[str], str]] = None,
647
656
  paused: Optional[bool] = None,
648
657
  schedules: Optional["FlexibleScheduleList"] = None,
658
+ concurrency_limit: Optional[int] = None,
649
659
  parameters: Optional[dict] = None,
650
660
  triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
651
661
  description: Optional[str] = None,
@@ -710,6 +720,7 @@ class RunnerDeployment(BaseModel):
710
720
  name=Path(name).stem,
711
721
  flow_name=flow.name,
712
722
  schedules=constructed_schedules,
723
+ concurrency_limit=concurrency_limit,
713
724
  paused=paused,
714
725
  tags=tags or [],
715
726
  triggers=triggers or [],
prefect/events/clients.py CHANGED
@@ -346,6 +346,47 @@ class PrefectEventsClient(EventsClient):
346
346
  await asyncio.sleep(1)
347
347
 
348
348
 
349
+ class AssertingPassthroughEventsClient(PrefectEventsClient):
350
+ """A Prefect Events client that BOTH records all events sent to it for inspection
351
+ during tests AND sends them to a Prefect server."""
352
+
353
+ last: ClassVar["Optional[AssertingPassthroughEventsClient]"] = None
354
+ all: ClassVar[List["AssertingPassthroughEventsClient"]] = []
355
+
356
+ args: Tuple
357
+ kwargs: Dict[str, Any]
358
+ events: List[Event]
359
+
360
+ def __init__(self, *args, **kwargs):
361
+ super().__init__(*args, **kwargs)
362
+ AssertingPassthroughEventsClient.last = self
363
+ AssertingPassthroughEventsClient.all.append(self)
364
+ self.args = args
365
+ self.kwargs = kwargs
366
+
367
+ @classmethod
368
+ def reset(cls) -> None:
369
+ cls.last = None
370
+ cls.all = []
371
+
372
+ def pop_events(self) -> List[Event]:
373
+ events = self.events
374
+ self.events = []
375
+ return events
376
+
377
+ async def _emit(self, event: Event) -> None:
378
+ # actually send the event to the server
379
+ await super()._emit(event)
380
+
381
+ # record the event for inspection
382
+ self.events.append(event)
383
+
384
+ async def __aenter__(self) -> Self:
385
+ await super().__aenter__()
386
+ self.events = []
387
+ return self
388
+
389
+
349
390
  class PrefectCloudEventsClient(PrefectEventsClient):
350
391
  """A Prefect Events client that streams events to a Prefect Cloud Workspace"""
351
392