prefect-client 3.1.9__py3-none-any.whl → 3.1.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/_experimental/lineage.py +7 -8
- prefect/_internal/_logging.py +15 -3
- prefect/_internal/compatibility/async_dispatch.py +22 -16
- prefect/_internal/compatibility/deprecated.py +42 -18
- prefect/_internal/compatibility/migration.py +2 -2
- prefect/_internal/concurrency/inspection.py +12 -14
- prefect/_internal/concurrency/primitives.py +2 -2
- prefect/_internal/concurrency/services.py +154 -80
- prefect/_internal/concurrency/waiters.py +13 -9
- prefect/_internal/pydantic/annotations/pendulum.py +7 -7
- prefect/_internal/pytz.py +4 -3
- prefect/_internal/retries.py +10 -5
- prefect/_internal/schemas/bases.py +19 -10
- prefect/_internal/schemas/validators.py +227 -388
- prefect/_version.py +3 -3
- prefect/artifacts.py +61 -74
- prefect/automations.py +27 -7
- prefect/blocks/core.py +3 -3
- prefect/client/{orchestration.py → orchestration/__init__.py} +38 -701
- prefect/client/orchestration/_artifacts/__init__.py +0 -0
- prefect/client/orchestration/_artifacts/client.py +239 -0
- prefect/client/orchestration/_concurrency_limits/__init__.py +0 -0
- prefect/client/orchestration/_concurrency_limits/client.py +762 -0
- prefect/client/orchestration/_logs/__init__.py +0 -0
- prefect/client/orchestration/_logs/client.py +95 -0
- prefect/client/orchestration/_variables/__init__.py +0 -0
- prefect/client/orchestration/_variables/client.py +157 -0
- prefect/client/orchestration/base.py +46 -0
- prefect/client/orchestration/routes.py +145 -0
- prefect/client/schemas/actions.py +2 -2
- prefect/client/schemas/filters.py +5 -0
- prefect/client/schemas/objects.py +3 -10
- prefect/client/schemas/schedules.py +22 -10
- prefect/concurrency/_asyncio.py +87 -0
- prefect/concurrency/{events.py → _events.py} +10 -10
- prefect/concurrency/asyncio.py +20 -104
- prefect/concurrency/context.py +6 -4
- prefect/concurrency/services.py +26 -74
- prefect/concurrency/sync.py +23 -44
- prefect/concurrency/v1/_asyncio.py +63 -0
- prefect/concurrency/v1/{events.py → _events.py} +13 -15
- prefect/concurrency/v1/asyncio.py +27 -80
- prefect/concurrency/v1/context.py +6 -4
- prefect/concurrency/v1/services.py +33 -79
- prefect/concurrency/v1/sync.py +18 -37
- prefect/context.py +66 -70
- prefect/deployments/base.py +4 -144
- prefect/deployments/flow_runs.py +12 -2
- prefect/deployments/runner.py +11 -3
- prefect/deployments/steps/pull.py +13 -0
- prefect/events/clients.py +7 -1
- prefect/events/schemas/events.py +3 -2
- prefect/flow_engine.py +54 -47
- prefect/flows.py +2 -1
- prefect/futures.py +42 -27
- prefect/input/run_input.py +2 -1
- prefect/locking/filesystem.py +8 -7
- prefect/locking/memory.py +5 -3
- prefect/locking/protocol.py +1 -1
- prefect/main.py +1 -3
- prefect/plugins.py +12 -10
- prefect/results.py +3 -308
- prefect/runner/storage.py +87 -21
- prefect/serializers.py +32 -25
- prefect/settings/legacy.py +4 -4
- prefect/settings/models/api.py +3 -3
- prefect/settings/models/cli.py +3 -3
- prefect/settings/models/client.py +5 -3
- prefect/settings/models/cloud.py +3 -3
- prefect/settings/models/deployments.py +3 -3
- prefect/settings/models/experiments.py +4 -2
- prefect/settings/models/flows.py +3 -3
- prefect/settings/models/internal.py +4 -2
- prefect/settings/models/logging.py +4 -3
- prefect/settings/models/results.py +3 -3
- prefect/settings/models/root.py +3 -2
- prefect/settings/models/runner.py +4 -4
- prefect/settings/models/server/api.py +3 -3
- prefect/settings/models/server/database.py +11 -4
- prefect/settings/models/server/deployments.py +6 -2
- prefect/settings/models/server/ephemeral.py +4 -2
- prefect/settings/models/server/events.py +3 -2
- prefect/settings/models/server/flow_run_graph.py +6 -2
- prefect/settings/models/server/root.py +3 -3
- prefect/settings/models/server/services.py +26 -11
- prefect/settings/models/server/tasks.py +6 -3
- prefect/settings/models/server/ui.py +3 -3
- prefect/settings/models/tasks.py +5 -5
- prefect/settings/models/testing.py +3 -3
- prefect/settings/models/worker.py +5 -3
- prefect/settings/profiles.py +15 -2
- prefect/states.py +4 -7
- prefect/task_engine.py +54 -75
- prefect/tasks.py +84 -32
- prefect/telemetry/processors.py +6 -6
- prefect/telemetry/run_telemetry.py +13 -8
- prefect/telemetry/services.py +32 -31
- prefect/transactions.py +4 -15
- prefect/utilities/_git.py +34 -0
- prefect/utilities/asyncutils.py +1 -1
- prefect/utilities/engine.py +3 -19
- prefect/utilities/generics.py +18 -0
- prefect/workers/__init__.py +2 -0
- {prefect_client-3.1.9.dist-info → prefect_client-3.1.11.dist-info}/METADATA +1 -1
- {prefect_client-3.1.9.dist-info → prefect_client-3.1.11.dist-info}/RECORD +108 -99
- prefect/records/__init__.py +0 -1
- prefect/records/base.py +0 -235
- prefect/records/filesystem.py +0 -213
- prefect/records/memory.py +0 -184
- prefect/records/result_store.py +0 -70
- {prefect_client-3.1.9.dist-info → prefect_client-3.1.11.dist-info}/LICENSE +0 -0
- {prefect_client-3.1.9.dist-info → prefect_client-3.1.11.dist-info}/WHEEL +0 -0
- {prefect_client-3.1.9.dist-info → prefect_client-3.1.11.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,762 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
from typing import TYPE_CHECKING, Any
|
4
|
+
|
5
|
+
from httpx import HTTPStatusError, RequestError
|
6
|
+
|
7
|
+
from prefect.client.orchestration.base import BaseAsyncClient, BaseClient
|
8
|
+
from prefect.exceptions import ObjectNotFound
|
9
|
+
|
10
|
+
if TYPE_CHECKING:
|
11
|
+
from uuid import UUID
|
12
|
+
|
13
|
+
from httpx import Response
|
14
|
+
|
15
|
+
from prefect.client.schemas.actions import (
|
16
|
+
GlobalConcurrencyLimitCreate,
|
17
|
+
GlobalConcurrencyLimitUpdate,
|
18
|
+
)
|
19
|
+
from prefect.client.schemas.objects import ConcurrencyLimit
|
20
|
+
from prefect.client.schemas.responses import GlobalConcurrencyLimitResponse
|
21
|
+
|
22
|
+
|
23
|
+
class ConcurrencyLimitClient(BaseClient):
|
24
|
+
def create_concurrency_limit(
|
25
|
+
self,
|
26
|
+
tag: str,
|
27
|
+
concurrency_limit: int,
|
28
|
+
) -> "UUID":
|
29
|
+
"""
|
30
|
+
Create a tag concurrency limit in the Prefect API. These limits govern concurrently
|
31
|
+
running tasks.
|
32
|
+
|
33
|
+
Args:
|
34
|
+
tag: a tag the concurrency limit is applied to
|
35
|
+
concurrency_limit: the maximum number of concurrent task runs for a given tag
|
36
|
+
|
37
|
+
Raises:
|
38
|
+
httpx.RequestError: if the concurrency limit was not created for any reason
|
39
|
+
|
40
|
+
Returns:
|
41
|
+
the ID of the concurrency limit in the backend
|
42
|
+
"""
|
43
|
+
from prefect.client.schemas.actions import ConcurrencyLimitCreate
|
44
|
+
|
45
|
+
concurrency_limit_create = ConcurrencyLimitCreate(
|
46
|
+
tag=tag,
|
47
|
+
concurrency_limit=concurrency_limit,
|
48
|
+
)
|
49
|
+
response = self.request(
|
50
|
+
"POST",
|
51
|
+
"/concurrency_limits/",
|
52
|
+
json=concurrency_limit_create.model_dump(mode="json"),
|
53
|
+
)
|
54
|
+
|
55
|
+
concurrency_limit_id = response.json().get("id")
|
56
|
+
|
57
|
+
if not concurrency_limit_id:
|
58
|
+
raise RequestError(f"Malformed response: {response}")
|
59
|
+
from uuid import UUID
|
60
|
+
|
61
|
+
return UUID(concurrency_limit_id)
|
62
|
+
|
63
|
+
def read_concurrency_limit_by_tag(
|
64
|
+
self,
|
65
|
+
tag: str,
|
66
|
+
) -> "ConcurrencyLimit":
|
67
|
+
"""
|
68
|
+
Read the concurrency limit set on a specific tag.
|
69
|
+
|
70
|
+
Args:
|
71
|
+
tag: a tag the concurrency limit is applied to
|
72
|
+
|
73
|
+
Raises:
|
74
|
+
ObjectNotFound: If request returns 404
|
75
|
+
httpx.RequestError: if the concurrency limit was not created for any reason
|
76
|
+
|
77
|
+
Returns:
|
78
|
+
the concurrency limit set on a specific tag
|
79
|
+
"""
|
80
|
+
try:
|
81
|
+
response = self.request(
|
82
|
+
"GET",
|
83
|
+
"/concurrency_limits/tag/{tag}",
|
84
|
+
path_params={"tag": tag},
|
85
|
+
)
|
86
|
+
except HTTPStatusError as e:
|
87
|
+
if e.response.status_code == 404:
|
88
|
+
raise ObjectNotFound(http_exc=e) from e
|
89
|
+
else:
|
90
|
+
raise
|
91
|
+
|
92
|
+
concurrency_limit_id = response.json().get("id")
|
93
|
+
|
94
|
+
if not concurrency_limit_id:
|
95
|
+
raise RequestError(f"Malformed response: {response}")
|
96
|
+
from prefect.client.schemas.objects import ConcurrencyLimit
|
97
|
+
|
98
|
+
return ConcurrencyLimit.model_validate(response.json())
|
99
|
+
|
100
|
+
def read_concurrency_limits(
|
101
|
+
self,
|
102
|
+
limit: int,
|
103
|
+
offset: int,
|
104
|
+
) -> list["ConcurrencyLimit"]:
|
105
|
+
"""
|
106
|
+
Lists concurrency limits set on task run tags.
|
107
|
+
|
108
|
+
Args:
|
109
|
+
limit: the maximum number of concurrency limits returned
|
110
|
+
offset: the concurrency limit query offset
|
111
|
+
|
112
|
+
Returns:
|
113
|
+
a list of concurrency limits
|
114
|
+
"""
|
115
|
+
|
116
|
+
body = {
|
117
|
+
"limit": limit,
|
118
|
+
"offset": offset,
|
119
|
+
}
|
120
|
+
|
121
|
+
response = self.request("POST", "/concurrency_limits/filter", json=body)
|
122
|
+
from prefect.client.schemas.objects import ConcurrencyLimit
|
123
|
+
|
124
|
+
return ConcurrencyLimit.model_validate_list(response.json())
|
125
|
+
|
126
|
+
def reset_concurrency_limit_by_tag(
|
127
|
+
self,
|
128
|
+
tag: str,
|
129
|
+
slot_override: list["UUID | str"] | None = None,
|
130
|
+
) -> None:
|
131
|
+
"""
|
132
|
+
Resets the concurrency limit slots set on a specific tag.
|
133
|
+
|
134
|
+
Args:
|
135
|
+
tag: a tag the concurrency limit is applied to
|
136
|
+
slot_override: a list of task run IDs that are currently using a
|
137
|
+
concurrency slot, please check that any task run IDs included in
|
138
|
+
`slot_override` are currently running, otherwise those concurrency
|
139
|
+
slots will never be released.
|
140
|
+
|
141
|
+
Raises:
|
142
|
+
ObjectNotFound: If request returns 404
|
143
|
+
httpx.RequestError: If request fails
|
144
|
+
|
145
|
+
"""
|
146
|
+
if slot_override is not None:
|
147
|
+
slot_override = [str(slot) for slot in slot_override]
|
148
|
+
|
149
|
+
try:
|
150
|
+
self.request(
|
151
|
+
"POST",
|
152
|
+
"/concurrency_limits/tag/{tag}/reset",
|
153
|
+
path_params={"tag": tag},
|
154
|
+
json=dict(slot_override=slot_override),
|
155
|
+
)
|
156
|
+
except HTTPStatusError as e:
|
157
|
+
if e.response.status_code == 404:
|
158
|
+
raise ObjectNotFound(http_exc=e) from e
|
159
|
+
else:
|
160
|
+
raise
|
161
|
+
|
162
|
+
def delete_concurrency_limit_by_tag(
|
163
|
+
self,
|
164
|
+
tag: str,
|
165
|
+
) -> None:
|
166
|
+
"""
|
167
|
+
Delete the concurrency limit set on a specific tag.
|
168
|
+
|
169
|
+
Args:
|
170
|
+
tag: a tag the concurrency limit is applied to
|
171
|
+
|
172
|
+
Raises:
|
173
|
+
ObjectNotFound: If request returns 404
|
174
|
+
httpx.RequestError: If request fails
|
175
|
+
|
176
|
+
"""
|
177
|
+
try:
|
178
|
+
self.request(
|
179
|
+
"DELETE",
|
180
|
+
"/concurrency_limits/tag/{tag}",
|
181
|
+
path_params={"tag": tag},
|
182
|
+
)
|
183
|
+
except HTTPStatusError as e:
|
184
|
+
if e.response.status_code == 404:
|
185
|
+
raise ObjectNotFound(http_exc=e) from e
|
186
|
+
else:
|
187
|
+
raise
|
188
|
+
|
189
|
+
def increment_v1_concurrency_slots(
|
190
|
+
self,
|
191
|
+
names: list[str],
|
192
|
+
task_run_id: "UUID",
|
193
|
+
) -> "Response":
|
194
|
+
"""
|
195
|
+
Increment concurrency limit slots for the specified limits.
|
196
|
+
|
197
|
+
Args:
|
198
|
+
names (List[str]): A list of limit names for which to increment limits.
|
199
|
+
task_run_id (UUID): The task run ID incrementing the limits.
|
200
|
+
"""
|
201
|
+
data: dict[str, Any] = {
|
202
|
+
"names": names,
|
203
|
+
"task_run_id": str(task_run_id),
|
204
|
+
}
|
205
|
+
|
206
|
+
return self.request(
|
207
|
+
"POST",
|
208
|
+
"/concurrency_limits/increment",
|
209
|
+
json=data,
|
210
|
+
)
|
211
|
+
|
212
|
+
def decrement_v1_concurrency_slots(
|
213
|
+
self,
|
214
|
+
names: list[str],
|
215
|
+
task_run_id: "UUID",
|
216
|
+
occupancy_seconds: float,
|
217
|
+
) -> "Response":
|
218
|
+
"""
|
219
|
+
Decrement concurrency limit slots for the specified limits.
|
220
|
+
|
221
|
+
Args:
|
222
|
+
names: A list of limit names to decrement.
|
223
|
+
task_run_id: The task run ID that incremented the limits.
|
224
|
+
occupancy_seconds (float): The duration in seconds that the limits
|
225
|
+
were held.
|
226
|
+
|
227
|
+
Returns:
|
228
|
+
"Response": The HTTP response from the server.
|
229
|
+
"""
|
230
|
+
data: dict[str, Any] = {
|
231
|
+
"names": names,
|
232
|
+
"task_run_id": str(task_run_id),
|
233
|
+
"occupancy_seconds": occupancy_seconds,
|
234
|
+
}
|
235
|
+
|
236
|
+
return self.request(
|
237
|
+
"POST",
|
238
|
+
"/concurrency_limits/decrement",
|
239
|
+
json=data,
|
240
|
+
)
|
241
|
+
|
242
|
+
def increment_concurrency_slots(
|
243
|
+
self,
|
244
|
+
names: list[str],
|
245
|
+
slots: int,
|
246
|
+
mode: str,
|
247
|
+
create_if_missing: bool | None = None,
|
248
|
+
) -> "Response":
|
249
|
+
return self.request(
|
250
|
+
"POST",
|
251
|
+
"/v2/concurrency_limits/increment",
|
252
|
+
json={
|
253
|
+
"names": names,
|
254
|
+
"slots": slots,
|
255
|
+
"mode": mode,
|
256
|
+
"create_if_missing": create_if_missing if create_if_missing else False,
|
257
|
+
},
|
258
|
+
)
|
259
|
+
|
260
|
+
def release_concurrency_slots(
|
261
|
+
self, names: list[str], slots: int, occupancy_seconds: float
|
262
|
+
) -> "Response":
|
263
|
+
"""
|
264
|
+
Release concurrency slots for the specified limits.
|
265
|
+
|
266
|
+
Args:
|
267
|
+
names: A list of limit names for which to release slots.
|
268
|
+
slots: The number of concurrency slots to release.
|
269
|
+
occupancy_seconds (float): The duration in seconds that the slots
|
270
|
+
were occupied.
|
271
|
+
|
272
|
+
Returns:
|
273
|
+
"Response": The HTTP response from the server.
|
274
|
+
"""
|
275
|
+
|
276
|
+
return self.request(
|
277
|
+
"POST",
|
278
|
+
"/v2/concurrency_limits/decrement",
|
279
|
+
json={
|
280
|
+
"names": names,
|
281
|
+
"slots": slots,
|
282
|
+
"occupancy_seconds": occupancy_seconds,
|
283
|
+
},
|
284
|
+
)
|
285
|
+
|
286
|
+
def create_global_concurrency_limit(
|
287
|
+
self, concurrency_limit: "GlobalConcurrencyLimitCreate"
|
288
|
+
) -> "UUID":
|
289
|
+
response = self.request(
|
290
|
+
"POST",
|
291
|
+
"/v2/concurrency_limits/",
|
292
|
+
json=concurrency_limit.model_dump(mode="json", exclude_unset=True),
|
293
|
+
)
|
294
|
+
from uuid import UUID
|
295
|
+
|
296
|
+
return UUID(response.json()["id"])
|
297
|
+
|
298
|
+
def update_global_concurrency_limit(
|
299
|
+
self, name: str, concurrency_limit: "GlobalConcurrencyLimitUpdate"
|
300
|
+
) -> "Response":
|
301
|
+
try:
|
302
|
+
response = self.request(
|
303
|
+
"PATCH",
|
304
|
+
"/v2/concurrency_limits/{id_or_name}",
|
305
|
+
path_params={"id_or_name": name},
|
306
|
+
json=concurrency_limit.model_dump(mode="json", exclude_unset=True),
|
307
|
+
)
|
308
|
+
return response
|
309
|
+
except HTTPStatusError as e:
|
310
|
+
if e.response.status_code == 404:
|
311
|
+
raise ObjectNotFound(http_exc=e) from e
|
312
|
+
else:
|
313
|
+
raise
|
314
|
+
|
315
|
+
def delete_global_concurrency_limit_by_name(self, name: str) -> "Response":
|
316
|
+
try:
|
317
|
+
response = self.request(
|
318
|
+
"DELETE",
|
319
|
+
"/v2/concurrency_limits/{id_or_name}",
|
320
|
+
path_params={"id_or_name": name},
|
321
|
+
)
|
322
|
+
return response
|
323
|
+
except HTTPStatusError as e:
|
324
|
+
if e.response.status_code == 404:
|
325
|
+
raise ObjectNotFound(http_exc=e) from e
|
326
|
+
else:
|
327
|
+
raise
|
328
|
+
|
329
|
+
def read_global_concurrency_limit_by_name(
|
330
|
+
self, name: str
|
331
|
+
) -> "GlobalConcurrencyLimitResponse":
|
332
|
+
try:
|
333
|
+
response = self.request(
|
334
|
+
"GET",
|
335
|
+
"/v2/concurrency_limits/{id_or_name}",
|
336
|
+
path_params={"id_or_name": name},
|
337
|
+
)
|
338
|
+
from prefect.client.schemas.responses import GlobalConcurrencyLimitResponse
|
339
|
+
|
340
|
+
return GlobalConcurrencyLimitResponse.model_validate(response.json())
|
341
|
+
except HTTPStatusError as e:
|
342
|
+
if e.response.status_code == 404:
|
343
|
+
raise ObjectNotFound(http_exc=e) from e
|
344
|
+
else:
|
345
|
+
raise
|
346
|
+
|
347
|
+
def upsert_global_concurrency_limit_by_name(self, name: str, limit: int) -> None:
|
348
|
+
"""Creates a global concurrency limit with the given name and limit if one does not already exist.
|
349
|
+
|
350
|
+
If one does already exist matching the name then update it's limit if it is different.
|
351
|
+
|
352
|
+
Note: This is not done atomically.
|
353
|
+
"""
|
354
|
+
from prefect.client.schemas.actions import (
|
355
|
+
GlobalConcurrencyLimitCreate,
|
356
|
+
GlobalConcurrencyLimitUpdate,
|
357
|
+
)
|
358
|
+
|
359
|
+
try:
|
360
|
+
existing_limit = self.read_global_concurrency_limit_by_name(name)
|
361
|
+
except ObjectNotFound:
|
362
|
+
existing_limit = None
|
363
|
+
|
364
|
+
if not existing_limit:
|
365
|
+
self.create_global_concurrency_limit(
|
366
|
+
GlobalConcurrencyLimitCreate(
|
367
|
+
name=name,
|
368
|
+
limit=limit,
|
369
|
+
)
|
370
|
+
)
|
371
|
+
elif existing_limit.limit != limit:
|
372
|
+
self.update_global_concurrency_limit(
|
373
|
+
name, GlobalConcurrencyLimitUpdate(limit=limit)
|
374
|
+
)
|
375
|
+
|
376
|
+
def read_global_concurrency_limits(
|
377
|
+
self, limit: int = 10, offset: int = 0
|
378
|
+
) -> list["GlobalConcurrencyLimitResponse"]:
|
379
|
+
response = self.request(
|
380
|
+
"POST",
|
381
|
+
"/v2/concurrency_limits/filter",
|
382
|
+
json={
|
383
|
+
"limit": limit,
|
384
|
+
"offset": offset,
|
385
|
+
},
|
386
|
+
)
|
387
|
+
|
388
|
+
from prefect.client.schemas.responses import GlobalConcurrencyLimitResponse
|
389
|
+
|
390
|
+
return GlobalConcurrencyLimitResponse.model_validate_list(response.json())
|
391
|
+
|
392
|
+
|
393
|
+
class ConcurrencyLimitAsyncClient(BaseAsyncClient):
|
394
|
+
async def create_concurrency_limit(
|
395
|
+
self,
|
396
|
+
tag: str,
|
397
|
+
concurrency_limit: int,
|
398
|
+
) -> "UUID":
|
399
|
+
"""
|
400
|
+
Create a tag concurrency limit in the Prefect API. These limits govern concurrently
|
401
|
+
running tasks.
|
402
|
+
|
403
|
+
Args:
|
404
|
+
tag: a tag the concurrency limit is applied to
|
405
|
+
concurrency_limit: the maximum number of concurrent task runs for a given tag
|
406
|
+
|
407
|
+
Raises:
|
408
|
+
httpx.RequestError: if the concurrency limit was not created for any reason
|
409
|
+
|
410
|
+
Returns:
|
411
|
+
the ID of the concurrency limit in the backend
|
412
|
+
"""
|
413
|
+
from prefect.client.schemas.actions import ConcurrencyLimitCreate
|
414
|
+
|
415
|
+
concurrency_limit_create = ConcurrencyLimitCreate(
|
416
|
+
tag=tag,
|
417
|
+
concurrency_limit=concurrency_limit,
|
418
|
+
)
|
419
|
+
response = await self.request(
|
420
|
+
"POST",
|
421
|
+
"/concurrency_limits/",
|
422
|
+
json=concurrency_limit_create.model_dump(mode="json"),
|
423
|
+
)
|
424
|
+
|
425
|
+
concurrency_limit_id = response.json().get("id")
|
426
|
+
|
427
|
+
if not concurrency_limit_id:
|
428
|
+
raise RequestError(f"Malformed response: {response}")
|
429
|
+
from uuid import UUID
|
430
|
+
|
431
|
+
return UUID(concurrency_limit_id)
|
432
|
+
|
433
|
+
async def read_concurrency_limit_by_tag(
|
434
|
+
self,
|
435
|
+
tag: str,
|
436
|
+
) -> "ConcurrencyLimit":
|
437
|
+
"""
|
438
|
+
Read the concurrency limit set on a specific tag.
|
439
|
+
|
440
|
+
Args:
|
441
|
+
tag: a tag the concurrency limit is applied to
|
442
|
+
|
443
|
+
Raises:
|
444
|
+
ObjectNotFound: If request returns 404
|
445
|
+
httpx.RequestError: if the concurrency limit was not created for any reason
|
446
|
+
|
447
|
+
Returns:
|
448
|
+
the concurrency limit set on a specific tag
|
449
|
+
"""
|
450
|
+
try:
|
451
|
+
response = await self.request(
|
452
|
+
"GET",
|
453
|
+
"/concurrency_limits/tag/{tag}",
|
454
|
+
path_params={"tag": tag},
|
455
|
+
)
|
456
|
+
except HTTPStatusError as e:
|
457
|
+
if e.response.status_code == 404:
|
458
|
+
raise ObjectNotFound(http_exc=e) from e
|
459
|
+
else:
|
460
|
+
raise
|
461
|
+
|
462
|
+
concurrency_limit_id = response.json().get("id")
|
463
|
+
|
464
|
+
if not concurrency_limit_id:
|
465
|
+
raise RequestError(f"Malformed response: {response}")
|
466
|
+
from prefect.client.schemas.objects import ConcurrencyLimit
|
467
|
+
|
468
|
+
return ConcurrencyLimit.model_validate(response.json())
|
469
|
+
|
470
|
+
async def read_concurrency_limits(
|
471
|
+
self,
|
472
|
+
limit: int,
|
473
|
+
offset: int,
|
474
|
+
) -> list["ConcurrencyLimit"]:
|
475
|
+
"""
|
476
|
+
Lists concurrency limits set on task run tags.
|
477
|
+
|
478
|
+
Args:
|
479
|
+
limit: the maximum number of concurrency limits returned
|
480
|
+
offset: the concurrency limit query offset
|
481
|
+
|
482
|
+
Returns:
|
483
|
+
a list of concurrency limits
|
484
|
+
"""
|
485
|
+
|
486
|
+
body = {
|
487
|
+
"limit": limit,
|
488
|
+
"offset": offset,
|
489
|
+
}
|
490
|
+
|
491
|
+
response = await self.request("POST", "/concurrency_limits/filter", json=body)
|
492
|
+
from prefect.client.schemas.objects import ConcurrencyLimit
|
493
|
+
|
494
|
+
return ConcurrencyLimit.model_validate_list(response.json())
|
495
|
+
|
496
|
+
async def reset_concurrency_limit_by_tag(
|
497
|
+
self,
|
498
|
+
tag: str,
|
499
|
+
slot_override: list["UUID | str"] | None = None,
|
500
|
+
) -> None:
|
501
|
+
"""
|
502
|
+
Resets the concurrency limit slots set on a specific tag.
|
503
|
+
|
504
|
+
Args:
|
505
|
+
tag: a tag the concurrency limit is applied to
|
506
|
+
slot_override: a list of task run IDs that are currently using a
|
507
|
+
concurrency slot, please check that any task run IDs included in
|
508
|
+
`slot_override` are currently running, otherwise those concurrency
|
509
|
+
slots will never be released.
|
510
|
+
|
511
|
+
Raises:
|
512
|
+
ObjectNotFound: If request returns 404
|
513
|
+
httpx.RequestError: If request fails
|
514
|
+
|
515
|
+
"""
|
516
|
+
if slot_override is not None:
|
517
|
+
slot_override = [str(slot) for slot in slot_override]
|
518
|
+
|
519
|
+
try:
|
520
|
+
await self.request(
|
521
|
+
"POST",
|
522
|
+
"/concurrency_limits/tag/{tag}/reset",
|
523
|
+
path_params={"tag": tag},
|
524
|
+
json=dict(slot_override=slot_override),
|
525
|
+
)
|
526
|
+
except HTTPStatusError as e:
|
527
|
+
if e.response.status_code == 404:
|
528
|
+
raise ObjectNotFound(http_exc=e) from e
|
529
|
+
else:
|
530
|
+
raise
|
531
|
+
|
532
|
+
async def delete_concurrency_limit_by_tag(
|
533
|
+
self,
|
534
|
+
tag: str,
|
535
|
+
) -> None:
|
536
|
+
"""
|
537
|
+
Delete the concurrency limit set on a specific tag.
|
538
|
+
|
539
|
+
Args:
|
540
|
+
tag: a tag the concurrency limit is applied to
|
541
|
+
|
542
|
+
Raises:
|
543
|
+
ObjectNotFound: If request returns 404
|
544
|
+
httpx.RequestError: If request fails
|
545
|
+
|
546
|
+
"""
|
547
|
+
try:
|
548
|
+
await self.request(
|
549
|
+
"DELETE",
|
550
|
+
"/concurrency_limits/tag/{tag}",
|
551
|
+
path_params={"tag": tag},
|
552
|
+
)
|
553
|
+
except HTTPStatusError as e:
|
554
|
+
if e.response.status_code == 404:
|
555
|
+
raise ObjectNotFound(http_exc=e) from e
|
556
|
+
else:
|
557
|
+
raise
|
558
|
+
|
559
|
+
async def increment_v1_concurrency_slots(
|
560
|
+
self,
|
561
|
+
names: list[str],
|
562
|
+
task_run_id: "UUID",
|
563
|
+
) -> "Response":
|
564
|
+
"""
|
565
|
+
Increment concurrency limit slots for the specified limits.
|
566
|
+
|
567
|
+
Args:
|
568
|
+
names: A list of limit names for which to increment limits.
|
569
|
+
task_run_id: The task run ID incrementing the limits.
|
570
|
+
"""
|
571
|
+
data: dict[str, Any] = {
|
572
|
+
"names": names,
|
573
|
+
"task_run_id": str(task_run_id),
|
574
|
+
}
|
575
|
+
|
576
|
+
return await self.request(
|
577
|
+
"POST",
|
578
|
+
"/concurrency_limits/increment",
|
579
|
+
json=data,
|
580
|
+
)
|
581
|
+
|
582
|
+
async def decrement_v1_concurrency_slots(
|
583
|
+
self,
|
584
|
+
names: list[str],
|
585
|
+
task_run_id: "UUID",
|
586
|
+
occupancy_seconds: float,
|
587
|
+
) -> "Response":
|
588
|
+
"""
|
589
|
+
Decrement concurrency limit slots for the specified limits.
|
590
|
+
|
591
|
+
Args:
|
592
|
+
names: A list of limit names to decrement.
|
593
|
+
task_run_id: The task run ID that incremented the limits.
|
594
|
+
occupancy_seconds (float): The duration in seconds that the limits
|
595
|
+
were held.
|
596
|
+
|
597
|
+
Returns:
|
598
|
+
"Response": The HTTP response from the server.
|
599
|
+
"""
|
600
|
+
data: dict[str, Any] = {
|
601
|
+
"names": names,
|
602
|
+
"task_run_id": str(task_run_id),
|
603
|
+
"occupancy_seconds": occupancy_seconds,
|
604
|
+
}
|
605
|
+
|
606
|
+
return await self.request(
|
607
|
+
"POST",
|
608
|
+
"/concurrency_limits/decrement",
|
609
|
+
json=data,
|
610
|
+
)
|
611
|
+
|
612
|
+
async def increment_concurrency_slots(
|
613
|
+
self,
|
614
|
+
names: list[str],
|
615
|
+
slots: int,
|
616
|
+
mode: str,
|
617
|
+
create_if_missing: bool | None = None,
|
618
|
+
) -> "Response":
|
619
|
+
return await self.request(
|
620
|
+
"POST",
|
621
|
+
"/v2/concurrency_limits/increment",
|
622
|
+
json={
|
623
|
+
"names": names,
|
624
|
+
"slots": slots,
|
625
|
+
"mode": mode,
|
626
|
+
"create_if_missing": create_if_missing if create_if_missing else False,
|
627
|
+
},
|
628
|
+
)
|
629
|
+
|
630
|
+
async def release_concurrency_slots(
|
631
|
+
self, names: list[str], slots: int, occupancy_seconds: float
|
632
|
+
) -> "Response":
|
633
|
+
"""
|
634
|
+
Release concurrency slots for the specified limits.
|
635
|
+
|
636
|
+
Args:
|
637
|
+
names: A list of limit names for which to release slots.
|
638
|
+
slots: The number of concurrency slots to release.
|
639
|
+
occupancy_seconds (float): The duration in seconds that the slots
|
640
|
+
were occupied.
|
641
|
+
|
642
|
+
Returns:
|
643
|
+
"Response": The HTTP response from the server.
|
644
|
+
"""
|
645
|
+
|
646
|
+
return await self.request(
|
647
|
+
"POST",
|
648
|
+
"/v2/concurrency_limits/decrement",
|
649
|
+
json={
|
650
|
+
"names": names,
|
651
|
+
"slots": slots,
|
652
|
+
"occupancy_seconds": occupancy_seconds,
|
653
|
+
},
|
654
|
+
)
|
655
|
+
|
656
|
+
async def create_global_concurrency_limit(
|
657
|
+
self, concurrency_limit: "GlobalConcurrencyLimitCreate"
|
658
|
+
) -> "UUID":
|
659
|
+
response = await self.request(
|
660
|
+
"POST",
|
661
|
+
"/v2/concurrency_limits/",
|
662
|
+
json=concurrency_limit.model_dump(mode="json", exclude_unset=True),
|
663
|
+
)
|
664
|
+
from uuid import UUID
|
665
|
+
|
666
|
+
return UUID(response.json()["id"])
|
667
|
+
|
668
|
+
async def update_global_concurrency_limit(
|
669
|
+
self, name: str, concurrency_limit: "GlobalConcurrencyLimitUpdate"
|
670
|
+
) -> "Response":
|
671
|
+
try:
|
672
|
+
response = await self.request(
|
673
|
+
"PATCH",
|
674
|
+
"/v2/concurrency_limits/{id_or_name}",
|
675
|
+
path_params={"id_or_name": name},
|
676
|
+
json=concurrency_limit.model_dump(mode="json", exclude_unset=True),
|
677
|
+
)
|
678
|
+
return response
|
679
|
+
except HTTPStatusError as e:
|
680
|
+
if e.response.status_code == 404:
|
681
|
+
raise ObjectNotFound(http_exc=e) from e
|
682
|
+
else:
|
683
|
+
raise
|
684
|
+
|
685
|
+
async def delete_global_concurrency_limit_by_name(self, name: str) -> "Response":
|
686
|
+
try:
|
687
|
+
response = await self.request(
|
688
|
+
"DELETE",
|
689
|
+
"/v2/concurrency_limits/{id_or_name}",
|
690
|
+
path_params={"id_or_name": name},
|
691
|
+
)
|
692
|
+
return response
|
693
|
+
except HTTPStatusError as e:
|
694
|
+
if e.response.status_code == 404:
|
695
|
+
raise ObjectNotFound(http_exc=e) from e
|
696
|
+
else:
|
697
|
+
raise
|
698
|
+
|
699
|
+
async def read_global_concurrency_limit_by_name(
|
700
|
+
self, name: str
|
701
|
+
) -> "GlobalConcurrencyLimitResponse":
|
702
|
+
try:
|
703
|
+
response = await self.request(
|
704
|
+
"GET",
|
705
|
+
"/v2/concurrency_limits/{id_or_name}",
|
706
|
+
path_params={"id_or_name": name},
|
707
|
+
)
|
708
|
+
from prefect.client.schemas.responses import GlobalConcurrencyLimitResponse
|
709
|
+
|
710
|
+
return GlobalConcurrencyLimitResponse.model_validate(response.json())
|
711
|
+
except HTTPStatusError as e:
|
712
|
+
if e.response.status_code == 404:
|
713
|
+
raise ObjectNotFound(http_exc=e) from e
|
714
|
+
else:
|
715
|
+
raise
|
716
|
+
|
717
|
+
async def upsert_global_concurrency_limit_by_name(
|
718
|
+
self, name: str, limit: int
|
719
|
+
) -> None:
|
720
|
+
"""Creates a global concurrency limit with the given name and limit if one does not already exist.
|
721
|
+
|
722
|
+
If one does already exist matching the name then update it's limit if it is different.
|
723
|
+
|
724
|
+
Note: This is not done atomically.
|
725
|
+
"""
|
726
|
+
from prefect.client.schemas.actions import (
|
727
|
+
GlobalConcurrencyLimitCreate,
|
728
|
+
GlobalConcurrencyLimitUpdate,
|
729
|
+
)
|
730
|
+
|
731
|
+
try:
|
732
|
+
existing_limit = await self.read_global_concurrency_limit_by_name(name)
|
733
|
+
except ObjectNotFound:
|
734
|
+
existing_limit = None
|
735
|
+
|
736
|
+
if not existing_limit:
|
737
|
+
await self.create_global_concurrency_limit(
|
738
|
+
GlobalConcurrencyLimitCreate(
|
739
|
+
name=name,
|
740
|
+
limit=limit,
|
741
|
+
)
|
742
|
+
)
|
743
|
+
elif existing_limit.limit != limit:
|
744
|
+
await self.update_global_concurrency_limit(
|
745
|
+
name, GlobalConcurrencyLimitUpdate(limit=limit)
|
746
|
+
)
|
747
|
+
|
748
|
+
async def read_global_concurrency_limits(
|
749
|
+
self, limit: int = 10, offset: int = 0
|
750
|
+
) -> list["GlobalConcurrencyLimitResponse"]:
|
751
|
+
response = await self.request(
|
752
|
+
"POST",
|
753
|
+
"/v2/concurrency_limits/filter",
|
754
|
+
json={
|
755
|
+
"limit": limit,
|
756
|
+
"offset": offset,
|
757
|
+
},
|
758
|
+
)
|
759
|
+
|
760
|
+
from prefect.client.schemas.responses import GlobalConcurrencyLimitResponse
|
761
|
+
|
762
|
+
return GlobalConcurrencyLimitResponse.model_validate_list(response.json())
|