prefect-client 3.0.0rc13__py3-none-any.whl → 3.0.0rc15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. prefect/_internal/compatibility/deprecated.py +0 -53
  2. prefect/blocks/core.py +132 -4
  3. prefect/blocks/notifications.py +26 -3
  4. prefect/client/base.py +30 -24
  5. prefect/client/orchestration.py +121 -47
  6. prefect/client/utilities.py +4 -4
  7. prefect/concurrency/asyncio.py +48 -7
  8. prefect/concurrency/context.py +24 -0
  9. prefect/concurrency/services.py +24 -8
  10. prefect/concurrency/sync.py +30 -3
  11. prefect/context.py +85 -24
  12. prefect/events/clients.py +93 -60
  13. prefect/events/utilities.py +0 -2
  14. prefect/events/worker.py +9 -2
  15. prefect/flow_engine.py +6 -3
  16. prefect/flows.py +176 -12
  17. prefect/futures.py +84 -7
  18. prefect/profiles.toml +16 -2
  19. prefect/runner/runner.py +6 -1
  20. prefect/runner/storage.py +4 -0
  21. prefect/settings.py +108 -14
  22. prefect/task_engine.py +901 -285
  23. prefect/task_runs.py +24 -1
  24. prefect/task_worker.py +7 -1
  25. prefect/tasks.py +9 -5
  26. prefect/utilities/asyncutils.py +0 -6
  27. prefect/utilities/callables.py +5 -3
  28. prefect/utilities/engine.py +3 -0
  29. prefect/utilities/importtools.py +138 -58
  30. prefect/utilities/schema_tools/validation.py +30 -0
  31. prefect/utilities/services.py +32 -0
  32. {prefect_client-3.0.0rc13.dist-info → prefect_client-3.0.0rc15.dist-info}/METADATA +39 -39
  33. {prefect_client-3.0.0rc13.dist-info → prefect_client-3.0.0rc15.dist-info}/RECORD +36 -35
  34. {prefect_client-3.0.0rc13.dist-info → prefect_client-3.0.0rc15.dist-info}/WHEEL +1 -1
  35. {prefect_client-3.0.0rc13.dist-info → prefect_client-3.0.0rc15.dist-info}/LICENSE +0 -0
  36. {prefect_client-3.0.0rc13.dist-info → prefect_client-3.0.0rc15.dist-info}/top_level.txt +0 -0
@@ -2,6 +2,7 @@ import asyncio
2
2
  from contextlib import asynccontextmanager
3
3
  from typing import AsyncGenerator, List, Literal, Optional, Union, cast
4
4
 
5
+ import anyio
5
6
  import httpx
6
7
  import pendulum
7
8
 
@@ -14,6 +15,7 @@ except ImportError:
14
15
  from prefect.client.orchestration import get_client
15
16
  from prefect.client.schemas.responses import MinimalConcurrencyLimitResponse
16
17
 
18
+ from .context import ConcurrencyContext
17
19
  from .events import (
18
20
  _emit_concurrency_acquisition_events,
19
21
  _emit_concurrency_release_events,
@@ -34,6 +36,7 @@ async def concurrency(
34
36
  names: Union[str, List[str]],
35
37
  occupy: int = 1,
36
38
  timeout_seconds: Optional[float] = None,
39
+ create_if_missing: Optional[bool] = True,
37
40
  ) -> AsyncGenerator[None, None]:
38
41
  """A context manager that acquires and releases concurrency slots from the
39
42
  given concurrency limits.
@@ -43,6 +46,7 @@ async def concurrency(
43
46
  occupy: The number of slots to acquire and hold from each limit.
44
47
  timeout_seconds: The number of seconds to wait for the slots to be acquired before
45
48
  raising a `TimeoutError`. A timeout of `None` will wait indefinitely.
49
+ create_if_missing: Whether to create the concurrency limits if they do not exist.
46
50
 
47
51
  Raises:
48
52
  TimeoutError: If the slots are not acquired within the given timeout.
@@ -60,9 +64,17 @@ async def concurrency(
60
64
  await resource_heavy()
61
65
  ```
62
66
  """
67
+ if not names:
68
+ yield
69
+ return
70
+
63
71
  names = names if isinstance(names, list) else [names]
72
+
64
73
  limits = await _acquire_concurrency_slots(
65
- names, occupy, timeout_seconds=timeout_seconds
74
+ names,
75
+ occupy,
76
+ timeout_seconds=timeout_seconds,
77
+ create_if_missing=create_if_missing,
66
78
  )
67
79
  acquisition_time = pendulum.now("UTC")
68
80
  emitted_events = _emit_concurrency_acquisition_events(limits, occupy)
@@ -71,13 +83,28 @@ async def concurrency(
71
83
  yield
72
84
  finally:
73
85
  occupancy_period = cast(Interval, (pendulum.now("UTC") - acquisition_time))
74
- await _release_concurrency_slots(
75
- names, occupy, occupancy_period.total_seconds()
76
- )
86
+ try:
87
+ await _release_concurrency_slots(
88
+ names, occupy, occupancy_period.total_seconds()
89
+ )
90
+ except anyio.get_cancelled_exc_class():
91
+ # The task was cancelled before it could release the slots. Add the
92
+ # slots to the cleanup list so they can be released when the
93
+ # concurrency context is exited.
94
+ if ctx := ConcurrencyContext.get():
95
+ ctx.cleanup_slots.append(
96
+ (names, occupy, occupancy_period.total_seconds())
97
+ )
98
+
77
99
  _emit_concurrency_release_events(limits, occupy, emitted_events)
78
100
 
79
101
 
80
- async def rate_limit(names: Union[str, List[str]], occupy: int = 1) -> None:
102
+ async def rate_limit(
103
+ names: Union[str, List[str]],
104
+ occupy: int = 1,
105
+ timeout_seconds: Optional[float] = None,
106
+ create_if_missing: Optional[bool] = True,
107
+ ) -> None:
81
108
  """Block execution until an `occupy` number of slots of the concurrency
82
109
  limits given in `names` are acquired. Requires that all given concurrency
83
110
  limits have a slot decay.
@@ -85,9 +112,22 @@ async def rate_limit(names: Union[str, List[str]], occupy: int = 1) -> None:
85
112
  Args:
86
113
  names: The names of the concurrency limits to acquire slots from.
87
114
  occupy: The number of slots to acquire and hold from each limit.
115
+ timeout_seconds: The number of seconds to wait for the slots to be acquired before
116
+ raising a `TimeoutError`. A timeout of `None` will wait indefinitely.
117
+ create_if_missing: Whether to create the concurrency limits if they do not exist.
88
118
  """
119
+ if not names:
120
+ return
121
+
89
122
  names = names if isinstance(names, list) else [names]
90
- limits = await _acquire_concurrency_slots(names, occupy, mode="rate_limit")
123
+
124
+ limits = await _acquire_concurrency_slots(
125
+ names,
126
+ occupy,
127
+ mode="rate_limit",
128
+ timeout_seconds=timeout_seconds,
129
+ create_if_missing=create_if_missing,
130
+ )
91
131
  _emit_concurrency_acquisition_events(limits, occupy)
92
132
 
93
133
 
@@ -96,9 +136,10 @@ async def _acquire_concurrency_slots(
96
136
  slots: int,
97
137
  mode: Union[Literal["concurrency"], Literal["rate_limit"]] = "concurrency",
98
138
  timeout_seconds: Optional[float] = None,
139
+ create_if_missing: Optional[bool] = True,
99
140
  ) -> List[MinimalConcurrencyLimitResponse]:
100
141
  service = ConcurrencySlotAcquisitionService.instance(frozenset(names))
101
- future = service.send((slots, mode, timeout_seconds))
142
+ future = service.send((slots, mode, timeout_seconds, create_if_missing))
102
143
  response_or_exception = await asyncio.wrap_future(future)
103
144
 
104
145
  if isinstance(response_or_exception, Exception):
@@ -0,0 +1,24 @@
1
+ from contextvars import ContextVar
2
+ from typing import List, Tuple
3
+
4
+ from prefect.client.orchestration import get_client
5
+ from prefect.context import ContextModel, Field
6
+
7
+
8
+ class ConcurrencyContext(ContextModel):
9
+ __var__: ContextVar = ContextVar("concurrency")
10
+
11
+ # Track the slots that have been acquired but were not able to be released
12
+ # due to cancellation or some other error. These slots are released when
13
+ # the context manager exits.
14
+ cleanup_slots: List[Tuple[List[str], int, float]] = Field(default_factory=list)
15
+
16
+ def __exit__(self, *exc_info):
17
+ if self.cleanup_slots:
18
+ with get_client(sync_client=True) as client:
19
+ for names, occupy, occupancy_seconds in self.cleanup_slots:
20
+ client.release_concurrency_slots(
21
+ names=names, slots=occupy, occupancy_seconds=occupancy_seconds
22
+ )
23
+
24
+ return super().__exit__(*exc_info)
@@ -34,11 +34,16 @@ class ConcurrencySlotAcquisitionService(QueueService):
34
34
  yield
35
35
 
36
36
  async def _handle(
37
- self, item: Tuple[int, str, Optional[float], concurrent.futures.Future]
37
+ self,
38
+ item: Tuple[
39
+ int, str, Optional[float], concurrent.futures.Future, Optional[bool]
40
+ ],
38
41
  ) -> None:
39
- occupy, mode, timeout_seconds, future = item
42
+ occupy, mode, timeout_seconds, future, create_if_missing = item
40
43
  try:
41
- response = await self.acquire_slots(occupy, mode, timeout_seconds)
44
+ response = await self.acquire_slots(
45
+ occupy, mode, timeout_seconds, create_if_missing
46
+ )
42
47
  except Exception as exc:
43
48
  # If the request to the increment endpoint fails in a non-standard
44
49
  # way, we need to set the future's result so that the caller can
@@ -49,13 +54,20 @@ class ConcurrencySlotAcquisitionService(QueueService):
49
54
  future.set_result(response)
50
55
 
51
56
  async def acquire_slots(
52
- self, slots: int, mode: str, timeout_seconds: Optional[float] = None
57
+ self,
58
+ slots: int,
59
+ mode: str,
60
+ timeout_seconds: Optional[float] = None,
61
+ create_if_missing: Optional[bool] = False,
53
62
  ) -> httpx.Response:
54
63
  with timeout_async(seconds=timeout_seconds):
55
64
  while True:
56
65
  try:
57
66
  response = await self._client.increment_concurrency_slots(
58
- names=self.concurrency_limit_names, slots=slots, mode=mode
67
+ names=self.concurrency_limit_names,
68
+ slots=slots,
69
+ mode=mode,
70
+ create_if_missing=create_if_missing,
59
71
  )
60
72
  except Exception as exc:
61
73
  if (
@@ -69,7 +81,9 @@ class ConcurrencySlotAcquisitionService(QueueService):
69
81
  else:
70
82
  return response
71
83
 
72
- def send(self, item: Tuple[int, str, Optional[float]]) -> concurrent.futures.Future:
84
+ def send(
85
+ self, item: Tuple[int, str, Optional[float], Optional[bool]]
86
+ ) -> concurrent.futures.Future:
73
87
  with self._lock:
74
88
  if self._stopped:
75
89
  raise RuntimeError("Cannot put items in a stopped service instance.")
@@ -77,7 +91,9 @@ class ConcurrencySlotAcquisitionService(QueueService):
77
91
  logger.debug("Service %r enqueuing item %r", self, item)
78
92
  future: concurrent.futures.Future = concurrent.futures.Future()
79
93
 
80
- occupy, mode, timeout_seconds = item
81
- self._queue.put_nowait((occupy, mode, timeout_seconds, future))
94
+ occupy, mode, timeout_seconds, create_if_missing = item
95
+ self._queue.put_nowait(
96
+ (occupy, mode, timeout_seconds, future, create_if_missing)
97
+ )
82
98
 
83
99
  return future
@@ -40,6 +40,7 @@ def concurrency(
40
40
  names: Union[str, List[str]],
41
41
  occupy: int = 1,
42
42
  timeout_seconds: Optional[float] = None,
43
+ create_if_missing: Optional[bool] = True,
43
44
  ) -> Generator[None, None, None]:
44
45
  """A context manager that acquires and releases concurrency slots from the
45
46
  given concurrency limits.
@@ -49,6 +50,7 @@ def concurrency(
49
50
  occupy: The number of slots to acquire and hold from each limit.
50
51
  timeout_seconds: The number of seconds to wait for the slots to be acquired before
51
52
  raising a `TimeoutError`. A timeout of `None` will wait indefinitely.
53
+ create_if_missing: Whether to create the concurrency limits if they do not exist.
52
54
 
53
55
  Raises:
54
56
  TimeoutError: If the slots are not acquired within the given timeout.
@@ -66,10 +68,18 @@ def concurrency(
66
68
  resource_heavy()
67
69
  ```
68
70
  """
71
+ if not names:
72
+ yield
73
+ return
74
+
69
75
  names = names if isinstance(names, list) else [names]
70
76
 
71
77
  limits: List[MinimalConcurrencyLimitResponse] = _call_async_function_from_sync(
72
- _acquire_concurrency_slots, names, occupy, timeout_seconds=timeout_seconds
78
+ _acquire_concurrency_slots,
79
+ names,
80
+ occupy,
81
+ timeout_seconds=timeout_seconds,
82
+ create_if_missing=create_if_missing,
73
83
  )
74
84
  acquisition_time = pendulum.now("UTC")
75
85
  emitted_events = _emit_concurrency_acquisition_events(limits, occupy)
@@ -87,7 +97,12 @@ def concurrency(
87
97
  _emit_concurrency_release_events(limits, occupy, emitted_events)
88
98
 
89
99
 
90
- def rate_limit(names: Union[str, List[str]], occupy: int = 1) -> None:
100
+ def rate_limit(
101
+ names: Union[str, List[str]],
102
+ occupy: int = 1,
103
+ timeout_seconds: Optional[float] = None,
104
+ create_if_missing: Optional[bool] = True,
105
+ ) -> None:
91
106
  """Block execution until an `occupy` number of slots of the concurrency
92
107
  limits given in `names` are acquired. Requires that all given concurrency
93
108
  limits have a slot decay.
@@ -95,10 +110,22 @@ def rate_limit(names: Union[str, List[str]], occupy: int = 1) -> None:
95
110
  Args:
96
111
  names: The names of the concurrency limits to acquire slots from.
97
112
  occupy: The number of slots to acquire and hold from each limit.
113
+ timeout_seconds: The number of seconds to wait for the slots to be acquired before
114
+ raising a `TimeoutError`. A timeout of `None` will wait indefinitely.
115
+ create_if_missing: Whether to create the concurrency limits if they do not exist.
98
116
  """
117
+ if not names:
118
+ return
119
+
99
120
  names = names if isinstance(names, list) else [names]
121
+
100
122
  limits = _call_async_function_from_sync(
101
- _acquire_concurrency_slots, names, occupy, mode="rate_limit"
123
+ _acquire_concurrency_slots,
124
+ names,
125
+ occupy,
126
+ mode="rate_limit",
127
+ timeout_seconds=timeout_seconds,
128
+ create_if_missing=create_if_missing,
102
129
  )
103
130
  _emit_concurrency_acquisition_events(limits, occupy)
104
131
 
prefect/context.py CHANGED
@@ -10,12 +10,13 @@ import os
10
10
  import sys
11
11
  import warnings
12
12
  import weakref
13
- from contextlib import ExitStack, contextmanager
13
+ from contextlib import ExitStack, asynccontextmanager, contextmanager
14
14
  from contextvars import ContextVar, Token
15
15
  from pathlib import Path
16
16
  from typing import (
17
17
  TYPE_CHECKING,
18
18
  Any,
19
+ AsyncGenerator,
19
20
  Dict,
20
21
  Generator,
21
22
  Mapping,
@@ -44,6 +45,7 @@ from prefect.settings import PREFECT_HOME, Profile, Settings
44
45
  from prefect.states import State
45
46
  from prefect.task_runners import TaskRunner
46
47
  from prefect.utilities.asyncutils import run_coro_as_sync
48
+ from prefect.utilities.services import start_client_metrics_server
47
49
 
48
50
  T = TypeVar("T")
49
51
 
@@ -89,11 +91,12 @@ def hydrated_context(
89
91
  client = client or get_client(sync_client=True)
90
92
  if flow_run_context := serialized_context.get("flow_run_context"):
91
93
  flow = flow_run_context["flow"]
94
+ task_runner = stack.enter_context(flow.task_runner.duplicate())
92
95
  flow_run_context = FlowRunContext(
93
96
  **flow_run_context,
94
97
  client=client,
95
98
  result_factory=run_coro_as_sync(ResultFactory.from_flow(flow)),
96
- task_runner=flow.task_runner.duplicate(),
99
+ task_runner=task_runner,
97
100
  detached=True,
98
101
  )
99
102
  stack.enter_context(flow_run_context)
@@ -177,36 +180,34 @@ class ContextModel(BaseModel):
177
180
  return self.model_dump(exclude_unset=True)
178
181
 
179
182
 
180
- class ClientContext(ContextModel):
183
+ class SyncClientContext(ContextModel):
181
184
  """
182
- A context for managing the Prefect client instances.
185
+ A context for managing the sync Prefect client instances.
183
186
 
184
187
  Clients were formerly tracked on the TaskRunContext and FlowRunContext, but
185
188
  having two separate places and the addition of both sync and async clients
186
189
  made it difficult to manage. This context is intended to be the single
187
- source for clients.
190
+ source for sync clients.
188
191
 
189
- The client creates both sync and async clients, which can either be read
190
- directly from the context object OR loaded with get_client, inject_client,
191
- or other Prefect utilities.
192
+ The client creates a sync client, which can either be read directly from
193
+ the context object OR loaded with get_client, inject_client, or other
194
+ Prefect utilities.
192
195
 
193
- with ClientContext.get_or_create() as ctx:
196
+ with SyncClientContext.get_or_create() as ctx:
194
197
  c1 = get_client(sync_client=True)
195
198
  c2 = get_client(sync_client=True)
196
199
  assert c1 is c2
197
- assert c1 is ctx.sync_client
200
+ assert c1 is ctx.client
198
201
  """
199
202
 
200
- __var__ = ContextVar("clients")
201
- sync_client: SyncPrefectClient
202
- async_client: PrefectClient
203
+ __var__ = ContextVar("sync-client-context")
204
+ client: SyncPrefectClient
203
205
  _httpx_settings: Optional[dict[str, Any]] = PrivateAttr(None)
204
206
  _context_stack: int = PrivateAttr(0)
205
207
 
206
208
  def __init__(self, httpx_settings: Optional[dict[str, Any]] = None):
207
209
  super().__init__(
208
- sync_client=get_client(sync_client=True, httpx_settings=httpx_settings),
209
- async_client=get_client(sync_client=False, httpx_settings=httpx_settings),
210
+ client=get_client(sync_client=True, httpx_settings=httpx_settings),
210
211
  )
211
212
  self._httpx_settings = httpx_settings
212
213
  self._context_stack = 0
@@ -214,8 +215,7 @@ class ClientContext(ContextModel):
214
215
  def __enter__(self):
215
216
  self._context_stack += 1
216
217
  if self._context_stack == 1:
217
- self.sync_client.__enter__()
218
- run_coro_as_sync(self.async_client.__aenter__())
218
+ self.client.__enter__()
219
219
  return super().__enter__()
220
220
  else:
221
221
  return self
@@ -223,18 +223,74 @@ class ClientContext(ContextModel):
223
223
  def __exit__(self, *exc_info):
224
224
  self._context_stack -= 1
225
225
  if self._context_stack == 0:
226
- self.sync_client.__exit__(*exc_info)
227
- run_coro_as_sync(self.async_client.__aexit__(*exc_info))
226
+ self.client.__exit__(*exc_info)
228
227
  return super().__exit__(*exc_info)
229
228
 
230
229
  @classmethod
231
230
  @contextmanager
232
- def get_or_create(cls) -> Generator["ClientContext", None, None]:
233
- ctx = ClientContext.get()
231
+ def get_or_create(cls) -> Generator["SyncClientContext", None, None]:
232
+ ctx = SyncClientContext.get()
234
233
  if ctx:
235
234
  yield ctx
236
235
  else:
237
- with ClientContext() as ctx:
236
+ with SyncClientContext() as ctx:
237
+ yield ctx
238
+
239
+
240
+ class AsyncClientContext(ContextModel):
241
+ """
242
+ A context for managing the async Prefect client instances.
243
+
244
+ Clients were formerly tracked on the TaskRunContext and FlowRunContext, but
245
+ having two separate places and the addition of both sync and async clients
246
+ made it difficult to manage. This context is intended to be the single
247
+ source for async clients.
248
+
249
+ The client creates an async client, which can either be read directly from
250
+ the context object OR loaded with get_client, inject_client, or other
251
+ Prefect utilities.
252
+
253
+ with AsyncClientContext.get_or_create() as ctx:
254
+ c1 = get_client(sync_client=False)
255
+ c2 = get_client(sync_client=False)
256
+ assert c1 is c2
257
+ assert c1 is ctx.client
258
+ """
259
+
260
+ __var__ = ContextVar("async-client-context")
261
+ client: PrefectClient
262
+ _httpx_settings: Optional[dict[str, Any]] = PrivateAttr(None)
263
+ _context_stack: int = PrivateAttr(0)
264
+
265
+ def __init__(self, httpx_settings: Optional[dict[str, Any]] = None):
266
+ super().__init__(
267
+ client=get_client(sync_client=False, httpx_settings=httpx_settings),
268
+ )
269
+ self._httpx_settings = httpx_settings
270
+ self._context_stack = 0
271
+
272
+ async def __aenter__(self):
273
+ self._context_stack += 1
274
+ if self._context_stack == 1:
275
+ await self.client.__aenter__()
276
+ return super().__enter__()
277
+ else:
278
+ return self
279
+
280
+ async def __aexit__(self, *exc_info):
281
+ self._context_stack -= 1
282
+ if self._context_stack == 0:
283
+ await self.client.__aexit__(*exc_info)
284
+ return super().__exit__(*exc_info)
285
+
286
+ @classmethod
287
+ @asynccontextmanager
288
+ async def get_or_create(cls) -> AsyncGenerator[Self, None]:
289
+ ctx = cls.get()
290
+ if ctx:
291
+ yield ctx
292
+ else:
293
+ with cls() as ctx:
238
294
  yield ctx
239
295
 
240
296
 
@@ -248,6 +304,11 @@ class RunContext(ContextModel):
248
304
  client: The Prefect client instance being used for API communication
249
305
  """
250
306
 
307
+ def __init__(self, *args, **kwargs):
308
+ super().__init__(*args, **kwargs)
309
+
310
+ start_client_metrics_server()
311
+
251
312
  start_time: DateTime = Field(default_factory=lambda: pendulum.now("UTC"))
252
313
  input_keyset: Optional[Dict[str, Dict[str, str]]] = None
253
314
  client: Union[PrefectClient, SyncPrefectClient]
@@ -300,7 +361,7 @@ class EngineContext(RunContext):
300
361
  default_factory=weakref.WeakValueDictionary
301
362
  )
302
363
 
303
- # Events worker to emit events to Prefect Cloud
364
+ # Events worker to emit events
304
365
  events: Optional[EventsWorker] = None
305
366
 
306
367
  __var__: ContextVar = ContextVar("flow_run")
@@ -601,7 +662,7 @@ def root_settings_context():
601
662
  ),
602
663
  file=sys.stderr,
603
664
  )
604
- active_name = "default"
665
+ active_name = "ephemeral"
605
666
 
606
667
  with use_profile(
607
668
  profiles[active_name],