prefect-client 3.1.10__py3-none-any.whl → 3.1.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/_experimental/lineage.py +7 -8
- prefect/_experimental/sla/__init__.py +0 -0
- prefect/_experimental/sla/client.py +66 -0
- prefect/_experimental/sla/objects.py +53 -0
- prefect/_internal/_logging.py +15 -3
- prefect/_internal/compatibility/async_dispatch.py +22 -16
- prefect/_internal/compatibility/deprecated.py +42 -18
- prefect/_internal/compatibility/migration.py +2 -2
- prefect/_internal/concurrency/inspection.py +12 -14
- prefect/_internal/concurrency/primitives.py +2 -2
- prefect/_internal/concurrency/services.py +154 -80
- prefect/_internal/concurrency/waiters.py +13 -9
- prefect/_internal/pydantic/annotations/pendulum.py +7 -7
- prefect/_internal/pytz.py +4 -3
- prefect/_internal/retries.py +10 -5
- prefect/_internal/schemas/bases.py +19 -10
- prefect/_internal/schemas/validators.py +227 -388
- prefect/_version.py +3 -3
- prefect/automations.py +236 -30
- prefect/blocks/__init__.py +3 -3
- prefect/blocks/abstract.py +53 -30
- prefect/blocks/core.py +183 -84
- prefect/blocks/notifications.py +133 -73
- prefect/blocks/redis.py +13 -9
- prefect/blocks/system.py +24 -11
- prefect/blocks/webhook.py +7 -5
- prefect/cache_policies.py +3 -2
- prefect/client/orchestration/__init__.py +1957 -0
- prefect/client/orchestration/_artifacts/__init__.py +0 -0
- prefect/client/orchestration/_artifacts/client.py +239 -0
- prefect/client/orchestration/_automations/__init__.py +0 -0
- prefect/client/orchestration/_automations/client.py +329 -0
- prefect/client/orchestration/_blocks_documents/__init__.py +0 -0
- prefect/client/orchestration/_blocks_documents/client.py +334 -0
- prefect/client/orchestration/_blocks_schemas/__init__.py +0 -0
- prefect/client/orchestration/_blocks_schemas/client.py +200 -0
- prefect/client/orchestration/_blocks_types/__init__.py +0 -0
- prefect/client/orchestration/_blocks_types/client.py +380 -0
- prefect/client/orchestration/_concurrency_limits/__init__.py +0 -0
- prefect/client/orchestration/_concurrency_limits/client.py +762 -0
- prefect/client/orchestration/_deployments/__init__.py +0 -0
- prefect/client/orchestration/_deployments/client.py +1128 -0
- prefect/client/orchestration/_flow_runs/__init__.py +0 -0
- prefect/client/orchestration/_flow_runs/client.py +903 -0
- prefect/client/orchestration/_flows/__init__.py +0 -0
- prefect/client/orchestration/_flows/client.py +343 -0
- prefect/client/orchestration/_logs/__init__.py +0 -0
- prefect/client/orchestration/_logs/client.py +97 -0
- prefect/client/orchestration/_variables/__init__.py +0 -0
- prefect/client/orchestration/_variables/client.py +157 -0
- prefect/client/orchestration/base.py +46 -0
- prefect/client/orchestration/routes.py +145 -0
- prefect/client/schemas/__init__.py +68 -28
- prefect/client/schemas/actions.py +2 -2
- prefect/client/schemas/filters.py +5 -0
- prefect/client/schemas/objects.py +8 -15
- prefect/client/schemas/schedules.py +22 -10
- prefect/concurrency/_asyncio.py +87 -0
- prefect/concurrency/{events.py → _events.py} +10 -10
- prefect/concurrency/asyncio.py +20 -104
- prefect/concurrency/context.py +6 -4
- prefect/concurrency/services.py +26 -74
- prefect/concurrency/sync.py +23 -44
- prefect/concurrency/v1/_asyncio.py +63 -0
- prefect/concurrency/v1/{events.py → _events.py} +13 -15
- prefect/concurrency/v1/asyncio.py +27 -80
- prefect/concurrency/v1/context.py +6 -4
- prefect/concurrency/v1/services.py +33 -79
- prefect/concurrency/v1/sync.py +18 -37
- prefect/context.py +66 -45
- prefect/deployments/base.py +10 -144
- prefect/deployments/flow_runs.py +12 -2
- prefect/deployments/runner.py +53 -4
- prefect/deployments/steps/pull.py +13 -0
- prefect/engine.py +17 -4
- prefect/events/clients.py +7 -1
- prefect/events/schemas/events.py +3 -2
- prefect/filesystems.py +6 -2
- prefect/flow_engine.py +101 -85
- prefect/flows.py +10 -1
- prefect/input/run_input.py +2 -1
- prefect/logging/logging.yml +1 -1
- prefect/main.py +1 -3
- prefect/results.py +2 -307
- prefect/runner/runner.py +4 -2
- prefect/runner/storage.py +87 -21
- prefect/serializers.py +32 -25
- prefect/settings/legacy.py +4 -4
- prefect/settings/models/api.py +3 -3
- prefect/settings/models/cli.py +3 -3
- prefect/settings/models/client.py +5 -3
- prefect/settings/models/cloud.py +8 -3
- prefect/settings/models/deployments.py +3 -3
- prefect/settings/models/experiments.py +4 -7
- prefect/settings/models/flows.py +3 -3
- prefect/settings/models/internal.py +4 -2
- prefect/settings/models/logging.py +4 -3
- prefect/settings/models/results.py +3 -3
- prefect/settings/models/root.py +3 -2
- prefect/settings/models/runner.py +4 -4
- prefect/settings/models/server/api.py +3 -3
- prefect/settings/models/server/database.py +11 -4
- prefect/settings/models/server/deployments.py +6 -2
- prefect/settings/models/server/ephemeral.py +4 -2
- prefect/settings/models/server/events.py +3 -2
- prefect/settings/models/server/flow_run_graph.py +6 -2
- prefect/settings/models/server/root.py +3 -3
- prefect/settings/models/server/services.py +26 -11
- prefect/settings/models/server/tasks.py +6 -3
- prefect/settings/models/server/ui.py +3 -3
- prefect/settings/models/tasks.py +5 -5
- prefect/settings/models/testing.py +3 -3
- prefect/settings/models/worker.py +5 -3
- prefect/settings/profiles.py +15 -2
- prefect/states.py +61 -45
- prefect/task_engine.py +54 -75
- prefect/task_runners.py +56 -55
- prefect/task_worker.py +2 -2
- prefect/tasks.py +90 -36
- prefect/telemetry/bootstrap.py +10 -9
- prefect/telemetry/run_telemetry.py +13 -8
- prefect/telemetry/services.py +4 -0
- prefect/transactions.py +4 -15
- prefect/utilities/_git.py +34 -0
- prefect/utilities/asyncutils.py +1 -1
- prefect/utilities/engine.py +3 -19
- prefect/utilities/generics.py +18 -0
- prefect/utilities/templating.py +25 -1
- prefect/workers/base.py +6 -3
- prefect/workers/process.py +1 -1
- {prefect_client-3.1.10.dist-info → prefect_client-3.1.12.dist-info}/METADATA +2 -2
- {prefect_client-3.1.10.dist-info → prefect_client-3.1.12.dist-info}/RECORD +135 -109
- prefect/client/orchestration.py +0 -4523
- prefect/records/__init__.py +0 -1
- prefect/records/base.py +0 -235
- prefect/records/filesystem.py +0 -213
- prefect/records/memory.py +0 -184
- prefect/records/result_store.py +0 -70
- {prefect_client-3.1.10.dist-info → prefect_client-3.1.12.dist-info}/LICENSE +0 -0
- {prefect_client-3.1.10.dist-info → prefect_client-3.1.12.dist-info}/WHEEL +0 -0
- {prefect_client-3.1.10.dist-info → prefect_client-3.1.12.dist-info}/top_level.txt +0 -0
prefect/client/orchestration.py
DELETED
@@ -1,4523 +0,0 @@
|
|
1
|
-
import asyncio
|
2
|
-
import base64
|
3
|
-
import datetime
|
4
|
-
import ssl
|
5
|
-
import warnings
|
6
|
-
from collections.abc import Iterable
|
7
|
-
from contextlib import AsyncExitStack
|
8
|
-
from logging import Logger
|
9
|
-
from typing import TYPE_CHECKING, Any, Literal, NoReturn, Optional, Union, overload
|
10
|
-
from uuid import UUID, uuid4
|
11
|
-
|
12
|
-
import certifi
|
13
|
-
import httpcore
|
14
|
-
import httpx
|
15
|
-
import pendulum
|
16
|
-
import pydantic
|
17
|
-
from asgi_lifespan import LifespanManager
|
18
|
-
from packaging import version
|
19
|
-
from starlette import status
|
20
|
-
from typing_extensions import ParamSpec, Self, TypeVar
|
21
|
-
|
22
|
-
import prefect
|
23
|
-
import prefect.exceptions
|
24
|
-
import prefect.settings
|
25
|
-
import prefect.states
|
26
|
-
from prefect.client.constants import SERVER_API_VERSION
|
27
|
-
from prefect.client.schemas import FlowRun, OrchestrationResult, TaskRun, sorting
|
28
|
-
from prefect.client.schemas.actions import (
|
29
|
-
ArtifactCreate,
|
30
|
-
ArtifactUpdate,
|
31
|
-
BlockDocumentCreate,
|
32
|
-
BlockDocumentUpdate,
|
33
|
-
BlockSchemaCreate,
|
34
|
-
BlockTypeCreate,
|
35
|
-
BlockTypeUpdate,
|
36
|
-
ConcurrencyLimitCreate,
|
37
|
-
DeploymentCreate,
|
38
|
-
DeploymentFlowRunCreate,
|
39
|
-
DeploymentScheduleCreate,
|
40
|
-
DeploymentScheduleUpdate,
|
41
|
-
DeploymentUpdate,
|
42
|
-
FlowCreate,
|
43
|
-
FlowRunCreate,
|
44
|
-
FlowRunNotificationPolicyCreate,
|
45
|
-
FlowRunNotificationPolicyUpdate,
|
46
|
-
FlowRunUpdate,
|
47
|
-
GlobalConcurrencyLimitCreate,
|
48
|
-
GlobalConcurrencyLimitUpdate,
|
49
|
-
LogCreate,
|
50
|
-
TaskRunCreate,
|
51
|
-
TaskRunUpdate,
|
52
|
-
VariableCreate,
|
53
|
-
VariableUpdate,
|
54
|
-
WorkPoolCreate,
|
55
|
-
WorkPoolUpdate,
|
56
|
-
WorkQueueCreate,
|
57
|
-
WorkQueueUpdate,
|
58
|
-
)
|
59
|
-
from prefect.client.schemas.filters import (
|
60
|
-
ArtifactCollectionFilter,
|
61
|
-
ArtifactFilter,
|
62
|
-
DeploymentFilter,
|
63
|
-
FlowFilter,
|
64
|
-
FlowRunFilter,
|
65
|
-
FlowRunNotificationPolicyFilter,
|
66
|
-
LogFilter,
|
67
|
-
TaskRunFilter,
|
68
|
-
WorkerFilter,
|
69
|
-
WorkPoolFilter,
|
70
|
-
WorkQueueFilter,
|
71
|
-
WorkQueueFilterName,
|
72
|
-
)
|
73
|
-
from prefect.client.schemas.objects import (
|
74
|
-
Artifact,
|
75
|
-
ArtifactCollection,
|
76
|
-
BlockDocument,
|
77
|
-
BlockSchema,
|
78
|
-
BlockType,
|
79
|
-
ConcurrencyLimit,
|
80
|
-
ConcurrencyOptions,
|
81
|
-
Constant,
|
82
|
-
DeploymentSchedule,
|
83
|
-
Flow,
|
84
|
-
FlowRunInput,
|
85
|
-
FlowRunNotificationPolicy,
|
86
|
-
FlowRunPolicy,
|
87
|
-
Log,
|
88
|
-
Parameter,
|
89
|
-
TaskRunPolicy,
|
90
|
-
TaskRunResult,
|
91
|
-
Variable,
|
92
|
-
Worker,
|
93
|
-
WorkerMetadata,
|
94
|
-
WorkPool,
|
95
|
-
WorkQueue,
|
96
|
-
WorkQueueStatusDetail,
|
97
|
-
)
|
98
|
-
from prefect.client.schemas.responses import (
|
99
|
-
DeploymentResponse,
|
100
|
-
FlowRunResponse,
|
101
|
-
GlobalConcurrencyLimitResponse,
|
102
|
-
WorkerFlowRunResponse,
|
103
|
-
)
|
104
|
-
from prefect.client.schemas.schedules import SCHEDULE_TYPES
|
105
|
-
from prefect.client.schemas.sorting import (
|
106
|
-
ArtifactCollectionSort,
|
107
|
-
ArtifactSort,
|
108
|
-
DeploymentSort,
|
109
|
-
FlowRunSort,
|
110
|
-
FlowSort,
|
111
|
-
LogSort,
|
112
|
-
TaskRunSort,
|
113
|
-
)
|
114
|
-
from prefect.events import filters
|
115
|
-
from prefect.events.schemas.automations import Automation, AutomationCore
|
116
|
-
from prefect.logging import get_logger
|
117
|
-
from prefect.settings import (
|
118
|
-
PREFECT_API_AUTH_STRING,
|
119
|
-
PREFECT_API_DATABASE_CONNECTION_URL,
|
120
|
-
PREFECT_API_ENABLE_HTTP2,
|
121
|
-
PREFECT_API_KEY,
|
122
|
-
PREFECT_API_REQUEST_TIMEOUT,
|
123
|
-
PREFECT_API_SSL_CERT_FILE,
|
124
|
-
PREFECT_API_TLS_INSECURE_SKIP_VERIFY,
|
125
|
-
PREFECT_API_URL,
|
126
|
-
PREFECT_CLIENT_CSRF_SUPPORT_ENABLED,
|
127
|
-
PREFECT_CLOUD_API_URL,
|
128
|
-
PREFECT_SERVER_ALLOW_EPHEMERAL_MODE,
|
129
|
-
PREFECT_TESTING_UNIT_TEST_MODE,
|
130
|
-
get_current_settings,
|
131
|
-
)
|
132
|
-
from prefect.types import KeyValueLabelsField
|
133
|
-
|
134
|
-
if TYPE_CHECKING:
|
135
|
-
from prefect.flows import Flow as FlowObject
|
136
|
-
from prefect.tasks import Task as TaskObject
|
137
|
-
|
138
|
-
from prefect.client.base import (
|
139
|
-
ASGIApp,
|
140
|
-
PrefectHttpxAsyncClient,
|
141
|
-
PrefectHttpxSyncClient,
|
142
|
-
ServerType,
|
143
|
-
app_lifespan_context,
|
144
|
-
)
|
145
|
-
|
146
|
-
P = ParamSpec("P")
|
147
|
-
R = TypeVar("R", infer_variance=True)
|
148
|
-
T = TypeVar("T")
|
149
|
-
|
150
|
-
|
151
|
-
@overload
|
152
|
-
def get_client(
|
153
|
-
*,
|
154
|
-
httpx_settings: Optional[dict[str, Any]] = ...,
|
155
|
-
sync_client: Literal[False] = False,
|
156
|
-
) -> "PrefectClient":
|
157
|
-
...
|
158
|
-
|
159
|
-
|
160
|
-
@overload
|
161
|
-
def get_client(
|
162
|
-
*, httpx_settings: Optional[dict[str, Any]] = ..., sync_client: Literal[True] = ...
|
163
|
-
) -> "SyncPrefectClient":
|
164
|
-
...
|
165
|
-
|
166
|
-
|
167
|
-
def get_client(
|
168
|
-
httpx_settings: Optional[dict[str, Any]] = None, sync_client: bool = False
|
169
|
-
) -> Union["SyncPrefectClient", "PrefectClient"]:
|
170
|
-
"""
|
171
|
-
Retrieve a HTTP client for communicating with the Prefect REST API.
|
172
|
-
|
173
|
-
The client must be context managed; for example:
|
174
|
-
|
175
|
-
```python
|
176
|
-
async with get_client() as client:
|
177
|
-
await client.hello()
|
178
|
-
```
|
179
|
-
|
180
|
-
To return a synchronous client, pass sync_client=True:
|
181
|
-
|
182
|
-
```python
|
183
|
-
with get_client(sync_client=True) as client:
|
184
|
-
client.hello()
|
185
|
-
```
|
186
|
-
"""
|
187
|
-
import prefect.context
|
188
|
-
|
189
|
-
# try to load clients from a client context, if possible
|
190
|
-
# only load clients that match the provided config / loop
|
191
|
-
try:
|
192
|
-
loop = asyncio.get_running_loop()
|
193
|
-
except RuntimeError:
|
194
|
-
loop = None
|
195
|
-
|
196
|
-
if sync_client:
|
197
|
-
if client_ctx := prefect.context.SyncClientContext.get():
|
198
|
-
if (
|
199
|
-
client_ctx.client
|
200
|
-
and getattr(client_ctx, "_httpx_settings", None) == httpx_settings
|
201
|
-
):
|
202
|
-
return client_ctx.client
|
203
|
-
else:
|
204
|
-
if client_ctx := prefect.context.AsyncClientContext.get():
|
205
|
-
if (
|
206
|
-
client_ctx.client
|
207
|
-
and getattr(client_ctx, "_httpx_settings", None) == httpx_settings
|
208
|
-
and loop in (getattr(client_ctx.client, "_loop", None), None)
|
209
|
-
):
|
210
|
-
return client_ctx.client
|
211
|
-
|
212
|
-
api: str = PREFECT_API_URL.value()
|
213
|
-
server_type = None
|
214
|
-
|
215
|
-
if not api and PREFECT_SERVER_ALLOW_EPHEMERAL_MODE:
|
216
|
-
# create an ephemeral API if none was provided
|
217
|
-
from prefect.server.api.server import SubprocessASGIServer
|
218
|
-
|
219
|
-
server = SubprocessASGIServer()
|
220
|
-
server.start()
|
221
|
-
assert server.server_process is not None, "Server process did not start"
|
222
|
-
|
223
|
-
api = server.api_url
|
224
|
-
server_type = ServerType.EPHEMERAL
|
225
|
-
elif not api and not PREFECT_SERVER_ALLOW_EPHEMERAL_MODE:
|
226
|
-
raise ValueError(
|
227
|
-
"No Prefect API URL provided. Please set PREFECT_API_URL to the address of a running Prefect server."
|
228
|
-
)
|
229
|
-
|
230
|
-
if sync_client:
|
231
|
-
return SyncPrefectClient(
|
232
|
-
api,
|
233
|
-
auth_string=PREFECT_API_AUTH_STRING.value(),
|
234
|
-
api_key=PREFECT_API_KEY.value(),
|
235
|
-
httpx_settings=httpx_settings,
|
236
|
-
server_type=server_type,
|
237
|
-
)
|
238
|
-
else:
|
239
|
-
return PrefectClient(
|
240
|
-
api,
|
241
|
-
auth_string=PREFECT_API_AUTH_STRING.value(),
|
242
|
-
api_key=PREFECT_API_KEY.value(),
|
243
|
-
httpx_settings=httpx_settings,
|
244
|
-
server_type=server_type,
|
245
|
-
)
|
246
|
-
|
247
|
-
|
248
|
-
class PrefectClient:
|
249
|
-
"""
|
250
|
-
An asynchronous client for interacting with the [Prefect REST API](/api-ref/rest-api/).
|
251
|
-
|
252
|
-
Args:
|
253
|
-
api: the REST API URL or FastAPI application to connect to
|
254
|
-
api_key: An optional API key for authentication.
|
255
|
-
api_version: The API version this client is compatible with.
|
256
|
-
httpx_settings: An optional dictionary of settings to pass to the underlying
|
257
|
-
`httpx.AsyncClient`
|
258
|
-
|
259
|
-
Examples:
|
260
|
-
|
261
|
-
Say hello to a Prefect REST API
|
262
|
-
|
263
|
-
<div class="terminal">
|
264
|
-
```
|
265
|
-
>>> async with get_client() as client:
|
266
|
-
>>> response = await client.hello()
|
267
|
-
>>>
|
268
|
-
>>> print(response.json())
|
269
|
-
👋
|
270
|
-
```
|
271
|
-
</div>
|
272
|
-
"""
|
273
|
-
|
274
|
-
def __init__(
|
275
|
-
self,
|
276
|
-
api: Union[str, ASGIApp],
|
277
|
-
*,
|
278
|
-
auth_string: Optional[str] = None,
|
279
|
-
api_key: Optional[str] = None,
|
280
|
-
api_version: Optional[str] = None,
|
281
|
-
httpx_settings: Optional[dict[str, Any]] = None,
|
282
|
-
server_type: Optional[ServerType] = None,
|
283
|
-
) -> None:
|
284
|
-
httpx_settings = httpx_settings.copy() if httpx_settings else {}
|
285
|
-
httpx_settings.setdefault("headers", {})
|
286
|
-
|
287
|
-
if PREFECT_API_TLS_INSECURE_SKIP_VERIFY:
|
288
|
-
# Create an unverified context for insecure connections
|
289
|
-
ctx = ssl.create_default_context()
|
290
|
-
ctx.check_hostname = False
|
291
|
-
ctx.verify_mode = ssl.CERT_NONE
|
292
|
-
httpx_settings.setdefault("verify", ctx)
|
293
|
-
else:
|
294
|
-
cert_file = PREFECT_API_SSL_CERT_FILE.value()
|
295
|
-
if not cert_file:
|
296
|
-
cert_file = certifi.where()
|
297
|
-
# Create a verified context with the certificate file
|
298
|
-
ctx = ssl.create_default_context(cafile=cert_file)
|
299
|
-
httpx_settings.setdefault("verify", ctx)
|
300
|
-
|
301
|
-
if api_version is None:
|
302
|
-
api_version = SERVER_API_VERSION
|
303
|
-
httpx_settings["headers"].setdefault("X-PREFECT-API-VERSION", api_version)
|
304
|
-
if api_key:
|
305
|
-
httpx_settings["headers"].setdefault("Authorization", f"Bearer {api_key}")
|
306
|
-
|
307
|
-
if auth_string:
|
308
|
-
token = base64.b64encode(auth_string.encode("utf-8")).decode("utf-8")
|
309
|
-
httpx_settings["headers"].setdefault("Authorization", f"Basic {token}")
|
310
|
-
|
311
|
-
# Context management
|
312
|
-
self._context_stack: int = 0
|
313
|
-
self._exit_stack = AsyncExitStack()
|
314
|
-
self._ephemeral_app: Optional[ASGIApp] = None
|
315
|
-
self.manage_lifespan = True
|
316
|
-
self.server_type: ServerType
|
317
|
-
|
318
|
-
# Only set if this client started the lifespan of the application
|
319
|
-
self._ephemeral_lifespan: Optional[LifespanManager] = None
|
320
|
-
|
321
|
-
self._closed = False
|
322
|
-
self._started = False
|
323
|
-
|
324
|
-
# Connect to an external application
|
325
|
-
if isinstance(api, str):
|
326
|
-
if httpx_settings.get("app"):
|
327
|
-
raise ValueError(
|
328
|
-
"Invalid httpx settings: `app` cannot be set when providing an "
|
329
|
-
"api url. `app` is only for use with ephemeral instances. Provide "
|
330
|
-
"it as the `api` parameter instead."
|
331
|
-
)
|
332
|
-
httpx_settings.setdefault("base_url", api)
|
333
|
-
|
334
|
-
# See https://www.python-httpx.org/advanced/#pool-limit-configuration
|
335
|
-
httpx_settings.setdefault(
|
336
|
-
"limits",
|
337
|
-
httpx.Limits(
|
338
|
-
# We see instability when allowing the client to open many connections at once.
|
339
|
-
# Limiting concurrency results in more stable performance.
|
340
|
-
max_connections=16,
|
341
|
-
max_keepalive_connections=8,
|
342
|
-
# The Prefect Cloud LB will keep connections alive for 30s.
|
343
|
-
# Only allow the client to keep connections alive for 25s.
|
344
|
-
keepalive_expiry=25,
|
345
|
-
),
|
346
|
-
)
|
347
|
-
|
348
|
-
# See https://www.python-httpx.org/http2/
|
349
|
-
# Enabling HTTP/2 support on the client does not necessarily mean that your requests
|
350
|
-
# and responses will be transported over HTTP/2, since both the client and the server
|
351
|
-
# need to support HTTP/2. If you connect to a server that only supports HTTP/1.1 the
|
352
|
-
# client will use a standard HTTP/1.1 connection instead.
|
353
|
-
httpx_settings.setdefault("http2", PREFECT_API_ENABLE_HTTP2.value())
|
354
|
-
|
355
|
-
if server_type:
|
356
|
-
self.server_type = server_type
|
357
|
-
else:
|
358
|
-
self.server_type = (
|
359
|
-
ServerType.CLOUD
|
360
|
-
if api.startswith(PREFECT_CLOUD_API_URL.value())
|
361
|
-
else ServerType.SERVER
|
362
|
-
)
|
363
|
-
|
364
|
-
# Connect to an in-process application
|
365
|
-
else:
|
366
|
-
self._ephemeral_app = api
|
367
|
-
self.server_type = ServerType.EPHEMERAL
|
368
|
-
|
369
|
-
# When using an ephemeral server, server-side exceptions can be raised
|
370
|
-
# client-side breaking all of our response error code handling. To work
|
371
|
-
# around this, we create an ASGI transport with application exceptions
|
372
|
-
# disabled instead of using the application directly.
|
373
|
-
# refs:
|
374
|
-
# - https://github.com/PrefectHQ/prefect/pull/9637
|
375
|
-
# - https://github.com/encode/starlette/blob/d3a11205ed35f8e5a58a711db0ff59c86fa7bb31/starlette/middleware/errors.py#L184
|
376
|
-
# - https://github.com/tiangolo/fastapi/blob/8cc967a7605d3883bd04ceb5d25cc94ae079612f/fastapi/applications.py#L163-L164
|
377
|
-
httpx_settings.setdefault(
|
378
|
-
"transport",
|
379
|
-
httpx.ASGITransport(
|
380
|
-
app=self._ephemeral_app, raise_app_exceptions=False
|
381
|
-
),
|
382
|
-
)
|
383
|
-
httpx_settings.setdefault("base_url", "http://ephemeral-prefect/api")
|
384
|
-
|
385
|
-
# See https://www.python-httpx.org/advanced/#timeout-configuration
|
386
|
-
httpx_settings.setdefault(
|
387
|
-
"timeout",
|
388
|
-
httpx.Timeout(
|
389
|
-
connect=PREFECT_API_REQUEST_TIMEOUT.value(),
|
390
|
-
read=PREFECT_API_REQUEST_TIMEOUT.value(),
|
391
|
-
write=PREFECT_API_REQUEST_TIMEOUT.value(),
|
392
|
-
pool=PREFECT_API_REQUEST_TIMEOUT.value(),
|
393
|
-
),
|
394
|
-
)
|
395
|
-
|
396
|
-
if not PREFECT_TESTING_UNIT_TEST_MODE:
|
397
|
-
httpx_settings.setdefault("follow_redirects", True)
|
398
|
-
|
399
|
-
enable_csrf_support = (
|
400
|
-
self.server_type != ServerType.CLOUD
|
401
|
-
and PREFECT_CLIENT_CSRF_SUPPORT_ENABLED.value()
|
402
|
-
)
|
403
|
-
|
404
|
-
self._client = PrefectHttpxAsyncClient(
|
405
|
-
**httpx_settings, enable_csrf_support=enable_csrf_support
|
406
|
-
)
|
407
|
-
self._loop = None
|
408
|
-
|
409
|
-
# See https://www.python-httpx.org/advanced/#custom-transports
|
410
|
-
#
|
411
|
-
# If we're using an HTTP/S client (not the ephemeral client), adjust the
|
412
|
-
# transport to add retries _after_ it is instantiated. If we alter the transport
|
413
|
-
# before instantiation, the transport will not be aware of proxies unless we
|
414
|
-
# reproduce all of the logic to make it so.
|
415
|
-
#
|
416
|
-
# Only alter the transport to set our default of 3 retries, don't modify any
|
417
|
-
# transport a user may have provided via httpx_settings.
|
418
|
-
#
|
419
|
-
# Making liberal use of getattr and isinstance checks here to avoid any
|
420
|
-
# surprises if the internals of httpx or httpcore change on us
|
421
|
-
if isinstance(api, str) and not httpx_settings.get("transport"):
|
422
|
-
transport_for_url = getattr(self._client, "_transport_for_url", None)
|
423
|
-
if callable(transport_for_url):
|
424
|
-
server_transport = transport_for_url(httpx.URL(api))
|
425
|
-
if isinstance(server_transport, httpx.AsyncHTTPTransport):
|
426
|
-
pool = getattr(server_transport, "_pool", None)
|
427
|
-
if isinstance(pool, httpcore.AsyncConnectionPool):
|
428
|
-
setattr(pool, "_retries", 3)
|
429
|
-
|
430
|
-
self.logger: Logger = get_logger("client")
|
431
|
-
|
432
|
-
@property
|
433
|
-
def api_url(self) -> httpx.URL:
|
434
|
-
"""
|
435
|
-
Get the base URL for the API.
|
436
|
-
"""
|
437
|
-
return self._client.base_url
|
438
|
-
|
439
|
-
# API methods ----------------------------------------------------------------------
|
440
|
-
|
441
|
-
async def api_healthcheck(self) -> Optional[Exception]:
|
442
|
-
"""
|
443
|
-
Attempts to connect to the API and returns the encountered exception if not
|
444
|
-
successful.
|
445
|
-
|
446
|
-
If successful, returns `None`.
|
447
|
-
"""
|
448
|
-
try:
|
449
|
-
await self._client.get("/health")
|
450
|
-
return None
|
451
|
-
except Exception as exc:
|
452
|
-
return exc
|
453
|
-
|
454
|
-
async def hello(self) -> httpx.Response:
|
455
|
-
"""
|
456
|
-
Send a GET request to /hello for testing purposes.
|
457
|
-
"""
|
458
|
-
return await self._client.get("/hello")
|
459
|
-
|
460
|
-
async def create_flow(self, flow: "FlowObject[Any, Any]") -> UUID:
|
461
|
-
"""
|
462
|
-
Create a flow in the Prefect API.
|
463
|
-
|
464
|
-
Args:
|
465
|
-
flow: a [Flow][prefect.flows.Flow] object
|
466
|
-
|
467
|
-
Raises:
|
468
|
-
httpx.RequestError: if a flow was not created for any reason
|
469
|
-
|
470
|
-
Returns:
|
471
|
-
the ID of the flow in the backend
|
472
|
-
"""
|
473
|
-
return await self.create_flow_from_name(flow.name)
|
474
|
-
|
475
|
-
async def create_flow_from_name(self, flow_name: str) -> UUID:
|
476
|
-
"""
|
477
|
-
Create a flow in the Prefect API.
|
478
|
-
|
479
|
-
Args:
|
480
|
-
flow_name: the name of the new flow
|
481
|
-
|
482
|
-
Raises:
|
483
|
-
httpx.RequestError: if a flow was not created for any reason
|
484
|
-
|
485
|
-
Returns:
|
486
|
-
the ID of the flow in the backend
|
487
|
-
"""
|
488
|
-
flow_data = FlowCreate(name=flow_name)
|
489
|
-
response = await self._client.post(
|
490
|
-
"/flows/", json=flow_data.model_dump(mode="json")
|
491
|
-
)
|
492
|
-
|
493
|
-
flow_id = response.json().get("id")
|
494
|
-
if not flow_id:
|
495
|
-
raise httpx.RequestError(f"Malformed response: {response}")
|
496
|
-
|
497
|
-
# Return the id of the created flow
|
498
|
-
return UUID(flow_id)
|
499
|
-
|
500
|
-
async def read_flow(self, flow_id: UUID) -> Flow:
|
501
|
-
"""
|
502
|
-
Query the Prefect API for a flow by id.
|
503
|
-
|
504
|
-
Args:
|
505
|
-
flow_id: the flow ID of interest
|
506
|
-
|
507
|
-
Returns:
|
508
|
-
a [Flow model][prefect.client.schemas.objects.Flow] representation of the flow
|
509
|
-
"""
|
510
|
-
response = await self._client.get(f"/flows/{flow_id}")
|
511
|
-
return Flow.model_validate(response.json())
|
512
|
-
|
513
|
-
async def delete_flow(self, flow_id: UUID) -> None:
|
514
|
-
"""
|
515
|
-
Delete a flow by UUID.
|
516
|
-
|
517
|
-
Args:
|
518
|
-
flow_id: ID of the flow to be deleted
|
519
|
-
Raises:
|
520
|
-
prefect.exceptions.ObjectNotFound: If request returns 404
|
521
|
-
httpx.RequestError: If requests fail
|
522
|
-
"""
|
523
|
-
try:
|
524
|
-
await self._client.delete(f"/flows/{flow_id}")
|
525
|
-
except httpx.HTTPStatusError as e:
|
526
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
527
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
528
|
-
else:
|
529
|
-
raise
|
530
|
-
|
531
|
-
async def read_flows(
|
532
|
-
self,
|
533
|
-
*,
|
534
|
-
flow_filter: Optional[FlowFilter] = None,
|
535
|
-
flow_run_filter: Optional[FlowRunFilter] = None,
|
536
|
-
task_run_filter: Optional[TaskRunFilter] = None,
|
537
|
-
deployment_filter: Optional[DeploymentFilter] = None,
|
538
|
-
work_pool_filter: Optional[WorkPoolFilter] = None,
|
539
|
-
work_queue_filter: Optional[WorkQueueFilter] = None,
|
540
|
-
sort: Optional[FlowSort] = None,
|
541
|
-
limit: Optional[int] = None,
|
542
|
-
offset: int = 0,
|
543
|
-
) -> list[Flow]:
|
544
|
-
"""
|
545
|
-
Query the Prefect API for flows. Only flows matching all criteria will
|
546
|
-
be returned.
|
547
|
-
|
548
|
-
Args:
|
549
|
-
flow_filter: filter criteria for flows
|
550
|
-
flow_run_filter: filter criteria for flow runs
|
551
|
-
task_run_filter: filter criteria for task runs
|
552
|
-
deployment_filter: filter criteria for deployments
|
553
|
-
work_pool_filter: filter criteria for work pools
|
554
|
-
work_queue_filter: filter criteria for work pool queues
|
555
|
-
sort: sort criteria for the flows
|
556
|
-
limit: limit for the flow query
|
557
|
-
offset: offset for the flow query
|
558
|
-
|
559
|
-
Returns:
|
560
|
-
a list of Flow model representations of the flows
|
561
|
-
"""
|
562
|
-
body: dict[str, Any] = {
|
563
|
-
"flows": flow_filter.model_dump(mode="json") if flow_filter else None,
|
564
|
-
"flow_runs": (
|
565
|
-
flow_run_filter.model_dump(mode="json", exclude_unset=True)
|
566
|
-
if flow_run_filter
|
567
|
-
else None
|
568
|
-
),
|
569
|
-
"task_runs": (
|
570
|
-
task_run_filter.model_dump(mode="json") if task_run_filter else None
|
571
|
-
),
|
572
|
-
"deployments": (
|
573
|
-
deployment_filter.model_dump(mode="json") if deployment_filter else None
|
574
|
-
),
|
575
|
-
"work_pools": (
|
576
|
-
work_pool_filter.model_dump(mode="json") if work_pool_filter else None
|
577
|
-
),
|
578
|
-
"work_queues": (
|
579
|
-
work_queue_filter.model_dump(mode="json") if work_queue_filter else None
|
580
|
-
),
|
581
|
-
"sort": sort,
|
582
|
-
"limit": limit,
|
583
|
-
"offset": offset,
|
584
|
-
}
|
585
|
-
|
586
|
-
response = await self._client.post("/flows/filter", json=body)
|
587
|
-
return pydantic.TypeAdapter(list[Flow]).validate_python(response.json())
|
588
|
-
|
589
|
-
async def read_flow_by_name(
|
590
|
-
self,
|
591
|
-
flow_name: str,
|
592
|
-
) -> Flow:
|
593
|
-
"""
|
594
|
-
Query the Prefect API for a flow by name.
|
595
|
-
|
596
|
-
Args:
|
597
|
-
flow_name: the name of a flow
|
598
|
-
|
599
|
-
Returns:
|
600
|
-
a fully hydrated Flow model
|
601
|
-
"""
|
602
|
-
response = await self._client.get(f"/flows/name/{flow_name}")
|
603
|
-
return Flow.model_validate(response.json())
|
604
|
-
|
605
|
-
async def create_flow_run_from_deployment(
|
606
|
-
self,
|
607
|
-
deployment_id: UUID,
|
608
|
-
*,
|
609
|
-
parameters: Optional[dict[str, Any]] = None,
|
610
|
-
context: Optional[dict[str, Any]] = None,
|
611
|
-
state: Optional[prefect.states.State[Any]] = None,
|
612
|
-
name: Optional[str] = None,
|
613
|
-
tags: Optional[Iterable[str]] = None,
|
614
|
-
idempotency_key: Optional[str] = None,
|
615
|
-
parent_task_run_id: Optional[UUID] = None,
|
616
|
-
work_queue_name: Optional[str] = None,
|
617
|
-
job_variables: Optional[dict[str, Any]] = None,
|
618
|
-
) -> FlowRun:
|
619
|
-
"""
|
620
|
-
Create a flow run for a deployment.
|
621
|
-
|
622
|
-
Args:
|
623
|
-
deployment_id: The deployment ID to create the flow run from
|
624
|
-
parameters: Parameter overrides for this flow run. Merged with the
|
625
|
-
deployment defaults
|
626
|
-
context: Optional run context data
|
627
|
-
state: The initial state for the run. If not provided, defaults to
|
628
|
-
`Scheduled` for now. Should always be a `Scheduled` type.
|
629
|
-
name: An optional name for the flow run. If not provided, the server will
|
630
|
-
generate a name.
|
631
|
-
tags: An optional iterable of tags to apply to the flow run; these tags
|
632
|
-
are merged with the deployment's tags.
|
633
|
-
idempotency_key: Optional idempotency key for creation of the flow run.
|
634
|
-
If the key matches the key of an existing flow run, the existing run will
|
635
|
-
be returned instead of creating a new one.
|
636
|
-
parent_task_run_id: if a subflow run is being created, the placeholder task
|
637
|
-
run identifier in the parent flow
|
638
|
-
work_queue_name: An optional work queue name to add this run to. If not provided,
|
639
|
-
will default to the deployment's set work queue. If one is provided that does not
|
640
|
-
exist, a new work queue will be created within the deployment's work pool.
|
641
|
-
job_variables: Optional variables that will be supplied to the flow run job.
|
642
|
-
|
643
|
-
Raises:
|
644
|
-
httpx.RequestError: if the Prefect API does not successfully create a run for any reason
|
645
|
-
|
646
|
-
Returns:
|
647
|
-
The flow run model
|
648
|
-
"""
|
649
|
-
parameters = parameters or {}
|
650
|
-
context = context or {}
|
651
|
-
state = state or prefect.states.Scheduled()
|
652
|
-
tags = tags or []
|
653
|
-
|
654
|
-
flow_run_create = DeploymentFlowRunCreate(
|
655
|
-
parameters=parameters,
|
656
|
-
context=context,
|
657
|
-
state=state.to_state_create(),
|
658
|
-
tags=list(tags),
|
659
|
-
name=name,
|
660
|
-
idempotency_key=idempotency_key,
|
661
|
-
parent_task_run_id=parent_task_run_id,
|
662
|
-
job_variables=job_variables,
|
663
|
-
)
|
664
|
-
|
665
|
-
# done separately to avoid including this field in payloads sent to older API versions
|
666
|
-
if work_queue_name:
|
667
|
-
flow_run_create.work_queue_name = work_queue_name
|
668
|
-
|
669
|
-
response = await self._client.post(
|
670
|
-
f"/deployments/{deployment_id}/create_flow_run",
|
671
|
-
json=flow_run_create.model_dump(mode="json", exclude_unset=True),
|
672
|
-
)
|
673
|
-
return FlowRun.model_validate(response.json())
|
674
|
-
|
675
|
-
async def create_flow_run(
|
676
|
-
self,
|
677
|
-
flow: "FlowObject[Any, R]",
|
678
|
-
name: Optional[str] = None,
|
679
|
-
parameters: Optional[dict[str, Any]] = None,
|
680
|
-
context: Optional[dict[str, Any]] = None,
|
681
|
-
tags: Optional[Iterable[str]] = None,
|
682
|
-
parent_task_run_id: Optional[UUID] = None,
|
683
|
-
state: Optional["prefect.states.State[R]"] = None,
|
684
|
-
) -> FlowRun:
|
685
|
-
"""
|
686
|
-
Create a flow run for a flow.
|
687
|
-
|
688
|
-
Args:
|
689
|
-
flow: The flow model to create the flow run for
|
690
|
-
name: An optional name for the flow run
|
691
|
-
parameters: Parameter overrides for this flow run.
|
692
|
-
context: Optional run context data
|
693
|
-
tags: a list of tags to apply to this flow run
|
694
|
-
parent_task_run_id: if a subflow run is being created, the placeholder task
|
695
|
-
run identifier in the parent flow
|
696
|
-
state: The initial state for the run. If not provided, defaults to
|
697
|
-
`Scheduled` for now. Should always be a `Scheduled` type.
|
698
|
-
|
699
|
-
Raises:
|
700
|
-
httpx.RequestError: if the Prefect API does not successfully create a run for any reason
|
701
|
-
|
702
|
-
Returns:
|
703
|
-
The flow run model
|
704
|
-
"""
|
705
|
-
parameters = parameters or {}
|
706
|
-
context = context or {}
|
707
|
-
|
708
|
-
if state is None:
|
709
|
-
state = prefect.states.Pending()
|
710
|
-
|
711
|
-
# Retrieve the flow id
|
712
|
-
flow_id = await self.create_flow(flow)
|
713
|
-
|
714
|
-
flow_run_create = FlowRunCreate(
|
715
|
-
flow_id=flow_id,
|
716
|
-
flow_version=flow.version,
|
717
|
-
name=name,
|
718
|
-
parameters=parameters,
|
719
|
-
context=context,
|
720
|
-
tags=list(tags or []),
|
721
|
-
parent_task_run_id=parent_task_run_id,
|
722
|
-
state=state.to_state_create(),
|
723
|
-
empirical_policy=FlowRunPolicy(
|
724
|
-
retries=flow.retries,
|
725
|
-
retry_delay=int(flow.retry_delay_seconds or 0),
|
726
|
-
),
|
727
|
-
)
|
728
|
-
|
729
|
-
flow_run_create_json = flow_run_create.model_dump(mode="json")
|
730
|
-
response = await self._client.post("/flow_runs/", json=flow_run_create_json)
|
731
|
-
flow_run = FlowRun.model_validate(response.json())
|
732
|
-
|
733
|
-
# Restore the parameters to the local objects to retain expectations about
|
734
|
-
# Python objects
|
735
|
-
flow_run.parameters = parameters
|
736
|
-
|
737
|
-
return flow_run
|
738
|
-
|
739
|
-
async def update_flow_run(
|
740
|
-
self,
|
741
|
-
flow_run_id: UUID,
|
742
|
-
flow_version: Optional[str] = None,
|
743
|
-
parameters: Optional[dict[str, Any]] = None,
|
744
|
-
name: Optional[str] = None,
|
745
|
-
tags: Optional[Iterable[str]] = None,
|
746
|
-
empirical_policy: Optional[FlowRunPolicy] = None,
|
747
|
-
infrastructure_pid: Optional[str] = None,
|
748
|
-
job_variables: Optional[dict[str, Any]] = None,
|
749
|
-
) -> httpx.Response:
|
750
|
-
"""
|
751
|
-
Update a flow run's details.
|
752
|
-
|
753
|
-
Args:
|
754
|
-
flow_run_id: The identifier for the flow run to update.
|
755
|
-
flow_version: A new version string for the flow run.
|
756
|
-
parameters: A dictionary of parameter values for the flow run. This will not
|
757
|
-
be merged with any existing parameters.
|
758
|
-
name: A new name for the flow run.
|
759
|
-
empirical_policy: A new flow run orchestration policy. This will not be
|
760
|
-
merged with any existing policy.
|
761
|
-
tags: An iterable of new tags for the flow run. These will not be merged with
|
762
|
-
any existing tags.
|
763
|
-
infrastructure_pid: The id of flow run as returned by an
|
764
|
-
infrastructure block.
|
765
|
-
|
766
|
-
Returns:
|
767
|
-
an `httpx.Response` object from the PATCH request
|
768
|
-
"""
|
769
|
-
params: dict[str, Any] = {}
|
770
|
-
if flow_version is not None:
|
771
|
-
params["flow_version"] = flow_version
|
772
|
-
if parameters is not None:
|
773
|
-
params["parameters"] = parameters
|
774
|
-
if name is not None:
|
775
|
-
params["name"] = name
|
776
|
-
if tags is not None:
|
777
|
-
params["tags"] = tags
|
778
|
-
if empirical_policy is not None:
|
779
|
-
params["empirical_policy"] = empirical_policy
|
780
|
-
if infrastructure_pid:
|
781
|
-
params["infrastructure_pid"] = infrastructure_pid
|
782
|
-
if job_variables is not None:
|
783
|
-
params["job_variables"] = job_variables
|
784
|
-
|
785
|
-
flow_run_data = FlowRunUpdate(**params)
|
786
|
-
|
787
|
-
return await self._client.patch(
|
788
|
-
f"/flow_runs/{flow_run_id}",
|
789
|
-
json=flow_run_data.model_dump(mode="json", exclude_unset=True),
|
790
|
-
)
|
791
|
-
|
792
|
-
async def delete_flow_run(
|
793
|
-
self,
|
794
|
-
flow_run_id: UUID,
|
795
|
-
) -> None:
|
796
|
-
"""
|
797
|
-
Delete a flow run by UUID.
|
798
|
-
|
799
|
-
Args:
|
800
|
-
flow_run_id: The flow run UUID of interest.
|
801
|
-
Raises:
|
802
|
-
prefect.exceptions.ObjectNotFound: If request returns 404
|
803
|
-
httpx.RequestError: If requests fails
|
804
|
-
"""
|
805
|
-
try:
|
806
|
-
await self._client.delete(f"/flow_runs/{flow_run_id}")
|
807
|
-
except httpx.HTTPStatusError as e:
|
808
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
809
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
810
|
-
else:
|
811
|
-
raise
|
812
|
-
|
813
|
-
async def create_concurrency_limit(
|
814
|
-
self,
|
815
|
-
tag: str,
|
816
|
-
concurrency_limit: int,
|
817
|
-
) -> UUID:
|
818
|
-
"""
|
819
|
-
Create a tag concurrency limit in the Prefect API. These limits govern concurrently
|
820
|
-
running tasks.
|
821
|
-
|
822
|
-
Args:
|
823
|
-
tag: a tag the concurrency limit is applied to
|
824
|
-
concurrency_limit: the maximum number of concurrent task runs for a given tag
|
825
|
-
|
826
|
-
Raises:
|
827
|
-
httpx.RequestError: if the concurrency limit was not created for any reason
|
828
|
-
|
829
|
-
Returns:
|
830
|
-
the ID of the concurrency limit in the backend
|
831
|
-
"""
|
832
|
-
|
833
|
-
concurrency_limit_create = ConcurrencyLimitCreate(
|
834
|
-
tag=tag,
|
835
|
-
concurrency_limit=concurrency_limit,
|
836
|
-
)
|
837
|
-
response = await self._client.post(
|
838
|
-
"/concurrency_limits/",
|
839
|
-
json=concurrency_limit_create.model_dump(mode="json"),
|
840
|
-
)
|
841
|
-
|
842
|
-
concurrency_limit_id = response.json().get("id")
|
843
|
-
|
844
|
-
if not concurrency_limit_id:
|
845
|
-
raise httpx.RequestError(f"Malformed response: {response}")
|
846
|
-
|
847
|
-
return UUID(concurrency_limit_id)
|
848
|
-
|
849
|
-
async def read_concurrency_limit_by_tag(
|
850
|
-
self,
|
851
|
-
tag: str,
|
852
|
-
) -> ConcurrencyLimit:
|
853
|
-
"""
|
854
|
-
Read the concurrency limit set on a specific tag.
|
855
|
-
|
856
|
-
Args:
|
857
|
-
tag: a tag the concurrency limit is applied to
|
858
|
-
|
859
|
-
Raises:
|
860
|
-
prefect.exceptions.ObjectNotFound: If request returns 404
|
861
|
-
httpx.RequestError: if the concurrency limit was not created for any reason
|
862
|
-
|
863
|
-
Returns:
|
864
|
-
the concurrency limit set on a specific tag
|
865
|
-
"""
|
866
|
-
try:
|
867
|
-
response = await self._client.get(
|
868
|
-
f"/concurrency_limits/tag/{tag}",
|
869
|
-
)
|
870
|
-
except httpx.HTTPStatusError as e:
|
871
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
872
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
873
|
-
else:
|
874
|
-
raise
|
875
|
-
|
876
|
-
concurrency_limit_id = response.json().get("id")
|
877
|
-
|
878
|
-
if not concurrency_limit_id:
|
879
|
-
raise httpx.RequestError(f"Malformed response: {response}")
|
880
|
-
|
881
|
-
concurrency_limit = ConcurrencyLimit.model_validate(response.json())
|
882
|
-
return concurrency_limit
|
883
|
-
|
884
|
-
async def read_concurrency_limits(
|
885
|
-
self,
|
886
|
-
limit: int,
|
887
|
-
offset: int,
|
888
|
-
) -> list[ConcurrencyLimit]:
|
889
|
-
"""
|
890
|
-
Lists concurrency limits set on task run tags.
|
891
|
-
|
892
|
-
Args:
|
893
|
-
limit: the maximum number of concurrency limits returned
|
894
|
-
offset: the concurrency limit query offset
|
895
|
-
|
896
|
-
Returns:
|
897
|
-
a list of concurrency limits
|
898
|
-
"""
|
899
|
-
|
900
|
-
body = {
|
901
|
-
"limit": limit,
|
902
|
-
"offset": offset,
|
903
|
-
}
|
904
|
-
|
905
|
-
response = await self._client.post("/concurrency_limits/filter", json=body)
|
906
|
-
return pydantic.TypeAdapter(list[ConcurrencyLimit]).validate_python(
|
907
|
-
response.json()
|
908
|
-
)
|
909
|
-
|
910
|
-
async def reset_concurrency_limit_by_tag(
|
911
|
-
self,
|
912
|
-
tag: str,
|
913
|
-
slot_override: Optional[list[Union[UUID, str]]] = None,
|
914
|
-
) -> None:
|
915
|
-
"""
|
916
|
-
Resets the concurrency limit slots set on a specific tag.
|
917
|
-
|
918
|
-
Args:
|
919
|
-
tag: a tag the concurrency limit is applied to
|
920
|
-
slot_override: a list of task run IDs that are currently using a
|
921
|
-
concurrency slot, please check that any task run IDs included in
|
922
|
-
`slot_override` are currently running, otherwise those concurrency
|
923
|
-
slots will never be released.
|
924
|
-
|
925
|
-
Raises:
|
926
|
-
prefect.exceptions.ObjectNotFound: If request returns 404
|
927
|
-
httpx.RequestError: If request fails
|
928
|
-
|
929
|
-
"""
|
930
|
-
if slot_override is not None:
|
931
|
-
slot_override = [str(slot) for slot in slot_override]
|
932
|
-
|
933
|
-
try:
|
934
|
-
await self._client.post(
|
935
|
-
f"/concurrency_limits/tag/{tag}/reset",
|
936
|
-
json=dict(slot_override=slot_override),
|
937
|
-
)
|
938
|
-
except httpx.HTTPStatusError as e:
|
939
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
940
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
941
|
-
else:
|
942
|
-
raise
|
943
|
-
|
944
|
-
async def delete_concurrency_limit_by_tag(
|
945
|
-
self,
|
946
|
-
tag: str,
|
947
|
-
) -> None:
|
948
|
-
"""
|
949
|
-
Delete the concurrency limit set on a specific tag.
|
950
|
-
|
951
|
-
Args:
|
952
|
-
tag: a tag the concurrency limit is applied to
|
953
|
-
|
954
|
-
Raises:
|
955
|
-
prefect.exceptions.ObjectNotFound: If request returns 404
|
956
|
-
httpx.RequestError: If request fails
|
957
|
-
|
958
|
-
"""
|
959
|
-
try:
|
960
|
-
await self._client.delete(
|
961
|
-
f"/concurrency_limits/tag/{tag}",
|
962
|
-
)
|
963
|
-
except httpx.HTTPStatusError as e:
|
964
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
965
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
966
|
-
else:
|
967
|
-
raise
|
968
|
-
|
969
|
-
async def increment_v1_concurrency_slots(
|
970
|
-
self,
|
971
|
-
names: list[str],
|
972
|
-
task_run_id: UUID,
|
973
|
-
) -> httpx.Response:
|
974
|
-
"""
|
975
|
-
Increment concurrency limit slots for the specified limits.
|
976
|
-
|
977
|
-
Args:
|
978
|
-
names (List[str]): A list of limit names for which to increment limits.
|
979
|
-
task_run_id (UUID): The task run ID incrementing the limits.
|
980
|
-
"""
|
981
|
-
data: dict[str, Any] = {
|
982
|
-
"names": names,
|
983
|
-
"task_run_id": str(task_run_id),
|
984
|
-
}
|
985
|
-
|
986
|
-
return await self._client.post(
|
987
|
-
"/concurrency_limits/increment",
|
988
|
-
json=data,
|
989
|
-
)
|
990
|
-
|
991
|
-
async def decrement_v1_concurrency_slots(
|
992
|
-
self,
|
993
|
-
names: list[str],
|
994
|
-
task_run_id: UUID,
|
995
|
-
occupancy_seconds: float,
|
996
|
-
) -> httpx.Response:
|
997
|
-
"""
|
998
|
-
Decrement concurrency limit slots for the specified limits.
|
999
|
-
|
1000
|
-
Args:
|
1001
|
-
names (List[str]): A list of limit names to decrement.
|
1002
|
-
task_run_id (UUID): The task run ID that incremented the limits.
|
1003
|
-
occupancy_seconds (float): The duration in seconds that the limits
|
1004
|
-
were held.
|
1005
|
-
|
1006
|
-
Returns:
|
1007
|
-
httpx.Response: The HTTP response from the server.
|
1008
|
-
"""
|
1009
|
-
data: dict[str, Any] = {
|
1010
|
-
"names": names,
|
1011
|
-
"task_run_id": str(task_run_id),
|
1012
|
-
"occupancy_seconds": occupancy_seconds,
|
1013
|
-
}
|
1014
|
-
|
1015
|
-
return await self._client.post(
|
1016
|
-
"/concurrency_limits/decrement",
|
1017
|
-
json=data,
|
1018
|
-
)
|
1019
|
-
|
1020
|
-
async def create_work_queue(
|
1021
|
-
self,
|
1022
|
-
name: str,
|
1023
|
-
description: Optional[str] = None,
|
1024
|
-
is_paused: Optional[bool] = None,
|
1025
|
-
concurrency_limit: Optional[int] = None,
|
1026
|
-
priority: Optional[int] = None,
|
1027
|
-
work_pool_name: Optional[str] = None,
|
1028
|
-
) -> WorkQueue:
|
1029
|
-
"""
|
1030
|
-
Create a work queue.
|
1031
|
-
|
1032
|
-
Args:
|
1033
|
-
name: a unique name for the work queue
|
1034
|
-
description: An optional description for the work queue.
|
1035
|
-
is_paused: Whether or not the work queue is paused.
|
1036
|
-
concurrency_limit: An optional concurrency limit for the work queue.
|
1037
|
-
priority: The queue's priority. Lower values are higher priority (1 is the highest).
|
1038
|
-
work_pool_name: The name of the work pool to use for this queue.
|
1039
|
-
|
1040
|
-
Raises:
|
1041
|
-
prefect.exceptions.ObjectAlreadyExists: If request returns 409
|
1042
|
-
httpx.RequestError: If request fails
|
1043
|
-
|
1044
|
-
Returns:
|
1045
|
-
The created work queue
|
1046
|
-
"""
|
1047
|
-
create_model = WorkQueueCreate(name=name, filter=None)
|
1048
|
-
if description is not None:
|
1049
|
-
create_model.description = description
|
1050
|
-
if is_paused is not None:
|
1051
|
-
create_model.is_paused = is_paused
|
1052
|
-
if concurrency_limit is not None:
|
1053
|
-
create_model.concurrency_limit = concurrency_limit
|
1054
|
-
if priority is not None:
|
1055
|
-
create_model.priority = priority
|
1056
|
-
|
1057
|
-
data = create_model.model_dump(mode="json")
|
1058
|
-
try:
|
1059
|
-
if work_pool_name is not None:
|
1060
|
-
response = await self._client.post(
|
1061
|
-
f"/work_pools/{work_pool_name}/queues", json=data
|
1062
|
-
)
|
1063
|
-
else:
|
1064
|
-
response = await self._client.post("/work_queues/", json=data)
|
1065
|
-
except httpx.HTTPStatusError as e:
|
1066
|
-
if e.response.status_code == status.HTTP_409_CONFLICT:
|
1067
|
-
raise prefect.exceptions.ObjectAlreadyExists(http_exc=e) from e
|
1068
|
-
elif e.response.status_code == status.HTTP_404_NOT_FOUND:
|
1069
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
1070
|
-
else:
|
1071
|
-
raise
|
1072
|
-
return WorkQueue.model_validate(response.json())
|
1073
|
-
|
1074
|
-
async def read_work_queue_by_name(
|
1075
|
-
self,
|
1076
|
-
name: str,
|
1077
|
-
work_pool_name: Optional[str] = None,
|
1078
|
-
) -> WorkQueue:
|
1079
|
-
"""
|
1080
|
-
Read a work queue by name.
|
1081
|
-
|
1082
|
-
Args:
|
1083
|
-
name (str): a unique name for the work queue
|
1084
|
-
work_pool_name (str, optional): the name of the work pool
|
1085
|
-
the queue belongs to.
|
1086
|
-
|
1087
|
-
Raises:
|
1088
|
-
prefect.exceptions.ObjectNotFound: if no work queue is found
|
1089
|
-
httpx.HTTPStatusError: other status errors
|
1090
|
-
|
1091
|
-
Returns:
|
1092
|
-
WorkQueue: a work queue API object
|
1093
|
-
"""
|
1094
|
-
try:
|
1095
|
-
if work_pool_name is not None:
|
1096
|
-
response = await self._client.get(
|
1097
|
-
f"/work_pools/{work_pool_name}/queues/{name}"
|
1098
|
-
)
|
1099
|
-
else:
|
1100
|
-
response = await self._client.get(f"/work_queues/name/{name}")
|
1101
|
-
except httpx.HTTPStatusError as e:
|
1102
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
1103
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
1104
|
-
else:
|
1105
|
-
raise
|
1106
|
-
|
1107
|
-
return WorkQueue.model_validate(response.json())
|
1108
|
-
|
1109
|
-
async def update_work_queue(self, id: UUID, **kwargs: Any) -> None:
|
1110
|
-
"""
|
1111
|
-
Update properties of a work queue.
|
1112
|
-
|
1113
|
-
Args:
|
1114
|
-
id: the ID of the work queue to update
|
1115
|
-
**kwargs: the fields to update
|
1116
|
-
|
1117
|
-
Raises:
|
1118
|
-
ValueError: if no kwargs are provided
|
1119
|
-
prefect.exceptions.ObjectNotFound: if request returns 404
|
1120
|
-
httpx.RequestError: if the request fails
|
1121
|
-
|
1122
|
-
"""
|
1123
|
-
if not kwargs:
|
1124
|
-
raise ValueError("No fields provided to update.")
|
1125
|
-
|
1126
|
-
data = WorkQueueUpdate(**kwargs).model_dump(mode="json", exclude_unset=True)
|
1127
|
-
try:
|
1128
|
-
await self._client.patch(f"/work_queues/{id}", json=data)
|
1129
|
-
except httpx.HTTPStatusError as e:
|
1130
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
1131
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
1132
|
-
else:
|
1133
|
-
raise
|
1134
|
-
|
1135
|
-
async def get_runs_in_work_queue(
|
1136
|
-
self,
|
1137
|
-
id: UUID,
|
1138
|
-
limit: int = 10,
|
1139
|
-
scheduled_before: Optional[datetime.datetime] = None,
|
1140
|
-
) -> list[FlowRun]:
|
1141
|
-
"""
|
1142
|
-
Read flow runs off a work queue.
|
1143
|
-
|
1144
|
-
Args:
|
1145
|
-
id: the id of the work queue to read from
|
1146
|
-
limit: a limit on the number of runs to return
|
1147
|
-
scheduled_before: a timestamp; only runs scheduled before this time will be returned.
|
1148
|
-
Defaults to now.
|
1149
|
-
|
1150
|
-
Raises:
|
1151
|
-
prefect.exceptions.ObjectNotFound: If request returns 404
|
1152
|
-
httpx.RequestError: If request fails
|
1153
|
-
|
1154
|
-
Returns:
|
1155
|
-
List[FlowRun]: a list of FlowRun objects read from the queue
|
1156
|
-
"""
|
1157
|
-
if scheduled_before is None:
|
1158
|
-
scheduled_before = pendulum.now("UTC")
|
1159
|
-
|
1160
|
-
try:
|
1161
|
-
response = await self._client.post(
|
1162
|
-
f"/work_queues/{id}/get_runs",
|
1163
|
-
json={
|
1164
|
-
"limit": limit,
|
1165
|
-
"scheduled_before": scheduled_before.isoformat(),
|
1166
|
-
},
|
1167
|
-
)
|
1168
|
-
except httpx.HTTPStatusError as e:
|
1169
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
1170
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
1171
|
-
else:
|
1172
|
-
raise
|
1173
|
-
return pydantic.TypeAdapter(list[FlowRun]).validate_python(response.json())
|
1174
|
-
|
1175
|
-
async def read_work_queue(
|
1176
|
-
self,
|
1177
|
-
id: UUID,
|
1178
|
-
) -> WorkQueue:
|
1179
|
-
"""
|
1180
|
-
Read a work queue.
|
1181
|
-
|
1182
|
-
Args:
|
1183
|
-
id: the id of the work queue to load
|
1184
|
-
|
1185
|
-
Raises:
|
1186
|
-
prefect.exceptions.ObjectNotFound: If request returns 404
|
1187
|
-
httpx.RequestError: If request fails
|
1188
|
-
|
1189
|
-
Returns:
|
1190
|
-
WorkQueue: an instantiated WorkQueue object
|
1191
|
-
"""
|
1192
|
-
try:
|
1193
|
-
response = await self._client.get(f"/work_queues/{id}")
|
1194
|
-
except httpx.HTTPStatusError as e:
|
1195
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
1196
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
1197
|
-
else:
|
1198
|
-
raise
|
1199
|
-
return WorkQueue.model_validate(response.json())
|
1200
|
-
|
1201
|
-
async def read_work_queue_status(
|
1202
|
-
self,
|
1203
|
-
id: UUID,
|
1204
|
-
) -> WorkQueueStatusDetail:
|
1205
|
-
"""
|
1206
|
-
Read a work queue status.
|
1207
|
-
|
1208
|
-
Args:
|
1209
|
-
id: the id of the work queue to load
|
1210
|
-
|
1211
|
-
Raises:
|
1212
|
-
prefect.exceptions.ObjectNotFound: If request returns 404
|
1213
|
-
httpx.RequestError: If request fails
|
1214
|
-
|
1215
|
-
Returns:
|
1216
|
-
WorkQueueStatus: an instantiated WorkQueueStatus object
|
1217
|
-
"""
|
1218
|
-
try:
|
1219
|
-
response = await self._client.get(f"/work_queues/{id}/status")
|
1220
|
-
except httpx.HTTPStatusError as e:
|
1221
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
1222
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
1223
|
-
else:
|
1224
|
-
raise
|
1225
|
-
return WorkQueueStatusDetail.model_validate(response.json())
|
1226
|
-
|
1227
|
-
async def match_work_queues(
|
1228
|
-
self,
|
1229
|
-
prefixes: list[str],
|
1230
|
-
work_pool_name: Optional[str] = None,
|
1231
|
-
) -> list[WorkQueue]:
|
1232
|
-
"""
|
1233
|
-
Query the Prefect API for work queues with names with a specific prefix.
|
1234
|
-
|
1235
|
-
Args:
|
1236
|
-
prefixes: a list of strings used to match work queue name prefixes
|
1237
|
-
work_pool_name: an optional work pool name to scope the query to
|
1238
|
-
|
1239
|
-
Returns:
|
1240
|
-
a list of WorkQueue model representations
|
1241
|
-
of the work queues
|
1242
|
-
"""
|
1243
|
-
page_length = 100
|
1244
|
-
current_page = 0
|
1245
|
-
work_queues: list[WorkQueue] = []
|
1246
|
-
|
1247
|
-
while True:
|
1248
|
-
new_queues = await self.read_work_queues(
|
1249
|
-
work_pool_name=work_pool_name,
|
1250
|
-
offset=current_page * page_length,
|
1251
|
-
limit=page_length,
|
1252
|
-
work_queue_filter=WorkQueueFilter(
|
1253
|
-
name=WorkQueueFilterName(startswith_=prefixes)
|
1254
|
-
),
|
1255
|
-
)
|
1256
|
-
if not new_queues:
|
1257
|
-
break
|
1258
|
-
work_queues += new_queues
|
1259
|
-
current_page += 1
|
1260
|
-
|
1261
|
-
return work_queues
|
1262
|
-
|
1263
|
-
async def delete_work_queue_by_id(
|
1264
|
-
self,
|
1265
|
-
id: UUID,
|
1266
|
-
) -> None:
|
1267
|
-
"""
|
1268
|
-
Delete a work queue by its ID.
|
1269
|
-
|
1270
|
-
Args:
|
1271
|
-
id: the id of the work queue to delete
|
1272
|
-
|
1273
|
-
Raises:
|
1274
|
-
prefect.exceptions.ObjectNotFound: If request returns 404
|
1275
|
-
httpx.RequestError: If requests fails
|
1276
|
-
"""
|
1277
|
-
try:
|
1278
|
-
await self._client.delete(
|
1279
|
-
f"/work_queues/{id}",
|
1280
|
-
)
|
1281
|
-
except httpx.HTTPStatusError as e:
|
1282
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
1283
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
1284
|
-
else:
|
1285
|
-
raise
|
1286
|
-
|
1287
|
-
async def create_block_type(self, block_type: BlockTypeCreate) -> BlockType:
|
1288
|
-
"""
|
1289
|
-
Create a block type in the Prefect API.
|
1290
|
-
"""
|
1291
|
-
try:
|
1292
|
-
response = await self._client.post(
|
1293
|
-
"/block_types/",
|
1294
|
-
json=block_type.model_dump(
|
1295
|
-
mode="json", exclude_unset=True, exclude={"id"}
|
1296
|
-
),
|
1297
|
-
)
|
1298
|
-
except httpx.HTTPStatusError as e:
|
1299
|
-
if e.response.status_code == status.HTTP_409_CONFLICT:
|
1300
|
-
raise prefect.exceptions.ObjectAlreadyExists(http_exc=e) from e
|
1301
|
-
else:
|
1302
|
-
raise
|
1303
|
-
return BlockType.model_validate(response.json())
|
1304
|
-
|
1305
|
-
async def create_block_schema(self, block_schema: BlockSchemaCreate) -> BlockSchema:
|
1306
|
-
"""
|
1307
|
-
Create a block schema in the Prefect API.
|
1308
|
-
"""
|
1309
|
-
try:
|
1310
|
-
response = await self._client.post(
|
1311
|
-
"/block_schemas/",
|
1312
|
-
json=block_schema.model_dump(
|
1313
|
-
mode="json",
|
1314
|
-
exclude_unset=True,
|
1315
|
-
exclude={"id", "block_type", "checksum"},
|
1316
|
-
),
|
1317
|
-
)
|
1318
|
-
except httpx.HTTPStatusError as e:
|
1319
|
-
if e.response.status_code == status.HTTP_409_CONFLICT:
|
1320
|
-
raise prefect.exceptions.ObjectAlreadyExists(http_exc=e) from e
|
1321
|
-
else:
|
1322
|
-
raise
|
1323
|
-
return BlockSchema.model_validate(response.json())
|
1324
|
-
|
1325
|
-
async def create_block_document(
|
1326
|
-
self,
|
1327
|
-
block_document: Union[BlockDocument, BlockDocumentCreate],
|
1328
|
-
include_secrets: bool = True,
|
1329
|
-
) -> BlockDocument:
|
1330
|
-
"""
|
1331
|
-
Create a block document in the Prefect API. This data is used to configure a
|
1332
|
-
corresponding Block.
|
1333
|
-
|
1334
|
-
Args:
|
1335
|
-
include_secrets (bool): whether to include secret values
|
1336
|
-
on the stored Block, corresponding to Pydantic's `SecretStr` and
|
1337
|
-
`SecretBytes` fields. Note Blocks may not work as expected if
|
1338
|
-
this is set to `False`.
|
1339
|
-
"""
|
1340
|
-
block_document_data = block_document.model_dump(
|
1341
|
-
mode="json",
|
1342
|
-
exclude_unset=True,
|
1343
|
-
exclude={"id", "block_schema", "block_type"},
|
1344
|
-
context={"include_secrets": include_secrets},
|
1345
|
-
serialize_as_any=True,
|
1346
|
-
)
|
1347
|
-
try:
|
1348
|
-
response = await self._client.post(
|
1349
|
-
"/block_documents/",
|
1350
|
-
json=block_document_data,
|
1351
|
-
)
|
1352
|
-
except httpx.HTTPStatusError as e:
|
1353
|
-
if e.response.status_code == status.HTTP_409_CONFLICT:
|
1354
|
-
raise prefect.exceptions.ObjectAlreadyExists(http_exc=e) from e
|
1355
|
-
else:
|
1356
|
-
raise
|
1357
|
-
return BlockDocument.model_validate(response.json())
|
1358
|
-
|
1359
|
-
async def update_block_document(
|
1360
|
-
self,
|
1361
|
-
block_document_id: UUID,
|
1362
|
-
block_document: BlockDocumentUpdate,
|
1363
|
-
) -> None:
|
1364
|
-
"""
|
1365
|
-
Update a block document in the Prefect API.
|
1366
|
-
"""
|
1367
|
-
try:
|
1368
|
-
await self._client.patch(
|
1369
|
-
f"/block_documents/{block_document_id}",
|
1370
|
-
json=block_document.model_dump(
|
1371
|
-
mode="json",
|
1372
|
-
exclude_unset=True,
|
1373
|
-
include={"data", "merge_existing_data", "block_schema_id"},
|
1374
|
-
),
|
1375
|
-
)
|
1376
|
-
except httpx.HTTPStatusError as e:
|
1377
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
1378
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
1379
|
-
else:
|
1380
|
-
raise
|
1381
|
-
|
1382
|
-
async def delete_block_document(self, block_document_id: UUID) -> None:
|
1383
|
-
"""
|
1384
|
-
Delete a block document.
|
1385
|
-
"""
|
1386
|
-
try:
|
1387
|
-
await self._client.delete(f"/block_documents/{block_document_id}")
|
1388
|
-
except httpx.HTTPStatusError as e:
|
1389
|
-
if e.response.status_code == 404:
|
1390
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
1391
|
-
else:
|
1392
|
-
raise
|
1393
|
-
|
1394
|
-
async def read_block_type_by_slug(self, slug: str) -> BlockType:
|
1395
|
-
"""
|
1396
|
-
Read a block type by its slug.
|
1397
|
-
"""
|
1398
|
-
try:
|
1399
|
-
response = await self._client.get(f"/block_types/slug/{slug}")
|
1400
|
-
except httpx.HTTPStatusError as e:
|
1401
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
1402
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
1403
|
-
else:
|
1404
|
-
raise
|
1405
|
-
return BlockType.model_validate(response.json())
|
1406
|
-
|
1407
|
-
async def read_block_schema_by_checksum(
|
1408
|
-
self, checksum: str, version: Optional[str] = None
|
1409
|
-
) -> BlockSchema:
|
1410
|
-
"""
|
1411
|
-
Look up a block schema checksum
|
1412
|
-
"""
|
1413
|
-
try:
|
1414
|
-
url = f"/block_schemas/checksum/{checksum}"
|
1415
|
-
if version is not None:
|
1416
|
-
url = f"{url}?version={version}"
|
1417
|
-
response = await self._client.get(url)
|
1418
|
-
except httpx.HTTPStatusError as e:
|
1419
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
1420
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
1421
|
-
else:
|
1422
|
-
raise
|
1423
|
-
return BlockSchema.model_validate(response.json())
|
1424
|
-
|
1425
|
-
async def update_block_type(
|
1426
|
-
self, block_type_id: UUID, block_type: BlockTypeUpdate
|
1427
|
-
) -> None:
|
1428
|
-
"""
|
1429
|
-
Update a block document in the Prefect API.
|
1430
|
-
"""
|
1431
|
-
try:
|
1432
|
-
await self._client.patch(
|
1433
|
-
f"/block_types/{block_type_id}",
|
1434
|
-
json=block_type.model_dump(
|
1435
|
-
mode="json",
|
1436
|
-
exclude_unset=True,
|
1437
|
-
include=BlockTypeUpdate.updatable_fields(),
|
1438
|
-
),
|
1439
|
-
)
|
1440
|
-
except httpx.HTTPStatusError as e:
|
1441
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
1442
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
1443
|
-
else:
|
1444
|
-
raise
|
1445
|
-
|
1446
|
-
async def delete_block_type(self, block_type_id: UUID) -> None:
|
1447
|
-
"""
|
1448
|
-
Delete a block type.
|
1449
|
-
"""
|
1450
|
-
try:
|
1451
|
-
await self._client.delete(f"/block_types/{block_type_id}")
|
1452
|
-
except httpx.HTTPStatusError as e:
|
1453
|
-
if e.response.status_code == 404:
|
1454
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
1455
|
-
elif (
|
1456
|
-
e.response.status_code == status.HTTP_403_FORBIDDEN
|
1457
|
-
and e.response.json()["detail"]
|
1458
|
-
== "protected block types cannot be deleted."
|
1459
|
-
):
|
1460
|
-
raise prefect.exceptions.ProtectedBlockError(
|
1461
|
-
"Protected block types cannot be deleted."
|
1462
|
-
) from e
|
1463
|
-
else:
|
1464
|
-
raise
|
1465
|
-
|
1466
|
-
async def read_block_types(self) -> list[BlockType]:
|
1467
|
-
"""
|
1468
|
-
Read all block types
|
1469
|
-
Raises:
|
1470
|
-
httpx.RequestError: if the block types were not found
|
1471
|
-
|
1472
|
-
Returns:
|
1473
|
-
List of BlockTypes.
|
1474
|
-
"""
|
1475
|
-
response = await self._client.post("/block_types/filter", json={})
|
1476
|
-
return pydantic.TypeAdapter(list[BlockType]).validate_python(response.json())
|
1477
|
-
|
1478
|
-
async def read_block_schemas(self) -> list[BlockSchema]:
|
1479
|
-
"""
|
1480
|
-
Read all block schemas
|
1481
|
-
Raises:
|
1482
|
-
httpx.RequestError: if a valid block schema was not found
|
1483
|
-
|
1484
|
-
Returns:
|
1485
|
-
A BlockSchema.
|
1486
|
-
"""
|
1487
|
-
response = await self._client.post("/block_schemas/filter", json={})
|
1488
|
-
return pydantic.TypeAdapter(list[BlockSchema]).validate_python(response.json())
|
1489
|
-
|
1490
|
-
async def get_most_recent_block_schema_for_block_type(
|
1491
|
-
self,
|
1492
|
-
block_type_id: UUID,
|
1493
|
-
) -> Optional[BlockSchema]:
|
1494
|
-
"""
|
1495
|
-
Fetches the most recent block schema for a specified block type ID.
|
1496
|
-
|
1497
|
-
Args:
|
1498
|
-
block_type_id: The ID of the block type.
|
1499
|
-
|
1500
|
-
Raises:
|
1501
|
-
httpx.RequestError: If the request fails for any reason.
|
1502
|
-
|
1503
|
-
Returns:
|
1504
|
-
The most recent block schema or None.
|
1505
|
-
"""
|
1506
|
-
try:
|
1507
|
-
response = await self._client.post(
|
1508
|
-
"/block_schemas/filter",
|
1509
|
-
json={
|
1510
|
-
"block_schemas": {"block_type_id": {"any_": [str(block_type_id)]}},
|
1511
|
-
"limit": 1,
|
1512
|
-
},
|
1513
|
-
)
|
1514
|
-
except httpx.HTTPStatusError:
|
1515
|
-
raise
|
1516
|
-
return (
|
1517
|
-
BlockSchema.model_validate(response.json()[0]) if response.json() else None
|
1518
|
-
)
|
1519
|
-
|
1520
|
-
async def read_block_document(
|
1521
|
-
self,
|
1522
|
-
block_document_id: UUID,
|
1523
|
-
include_secrets: bool = True,
|
1524
|
-
) -> BlockDocument:
|
1525
|
-
"""
|
1526
|
-
Read the block document with the specified ID.
|
1527
|
-
|
1528
|
-
Args:
|
1529
|
-
block_document_id: the block document id
|
1530
|
-
include_secrets (bool): whether to include secret values
|
1531
|
-
on the Block, corresponding to Pydantic's `SecretStr` and
|
1532
|
-
`SecretBytes` fields. These fields are automatically obfuscated
|
1533
|
-
by Pydantic, but users can additionally choose not to receive
|
1534
|
-
their values from the API. Note that any business logic on the
|
1535
|
-
Block may not work if this is `False`.
|
1536
|
-
|
1537
|
-
Raises:
|
1538
|
-
httpx.RequestError: if the block document was not found for any reason
|
1539
|
-
|
1540
|
-
Returns:
|
1541
|
-
A block document or None.
|
1542
|
-
"""
|
1543
|
-
assert (
|
1544
|
-
block_document_id is not None
|
1545
|
-
), "Unexpected ID on block document. Was it persisted?"
|
1546
|
-
try:
|
1547
|
-
response = await self._client.get(
|
1548
|
-
f"/block_documents/{block_document_id}",
|
1549
|
-
params=dict(include_secrets=include_secrets),
|
1550
|
-
)
|
1551
|
-
except httpx.HTTPStatusError as e:
|
1552
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
1553
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
1554
|
-
else:
|
1555
|
-
raise
|
1556
|
-
return BlockDocument.model_validate(response.json())
|
1557
|
-
|
1558
|
-
async def read_block_document_by_name(
|
1559
|
-
self,
|
1560
|
-
name: str,
|
1561
|
-
block_type_slug: str,
|
1562
|
-
include_secrets: bool = True,
|
1563
|
-
) -> BlockDocument:
|
1564
|
-
"""
|
1565
|
-
Read the block document with the specified name that corresponds to a
|
1566
|
-
specific block type name.
|
1567
|
-
|
1568
|
-
Args:
|
1569
|
-
name: The block document name.
|
1570
|
-
block_type_slug: The block type slug.
|
1571
|
-
include_secrets (bool): whether to include secret values
|
1572
|
-
on the Block, corresponding to Pydantic's `SecretStr` and
|
1573
|
-
`SecretBytes` fields. These fields are automatically obfuscated
|
1574
|
-
by Pydantic, but users can additionally choose not to receive
|
1575
|
-
their values from the API. Note that any business logic on the
|
1576
|
-
Block may not work if this is `False`.
|
1577
|
-
|
1578
|
-
Raises:
|
1579
|
-
httpx.RequestError: if the block document was not found for any reason
|
1580
|
-
|
1581
|
-
Returns:
|
1582
|
-
A block document or None.
|
1583
|
-
"""
|
1584
|
-
try:
|
1585
|
-
response = await self._client.get(
|
1586
|
-
f"/block_types/slug/{block_type_slug}/block_documents/name/{name}",
|
1587
|
-
params=dict(include_secrets=include_secrets),
|
1588
|
-
)
|
1589
|
-
except httpx.HTTPStatusError as e:
|
1590
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
1591
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
1592
|
-
else:
|
1593
|
-
raise
|
1594
|
-
return BlockDocument.model_validate(response.json())
|
1595
|
-
|
1596
|
-
async def read_block_documents(
|
1597
|
-
self,
|
1598
|
-
block_schema_type: Optional[str] = None,
|
1599
|
-
offset: Optional[int] = None,
|
1600
|
-
limit: Optional[int] = None,
|
1601
|
-
include_secrets: bool = True,
|
1602
|
-
) -> list[BlockDocument]:
|
1603
|
-
"""
|
1604
|
-
Read block documents
|
1605
|
-
|
1606
|
-
Args:
|
1607
|
-
block_schema_type: an optional block schema type
|
1608
|
-
offset: an offset
|
1609
|
-
limit: the number of blocks to return
|
1610
|
-
include_secrets (bool): whether to include secret values
|
1611
|
-
on the Block, corresponding to Pydantic's `SecretStr` and
|
1612
|
-
`SecretBytes` fields. These fields are automatically obfuscated
|
1613
|
-
by Pydantic, but users can additionally choose not to receive
|
1614
|
-
their values from the API. Note that any business logic on the
|
1615
|
-
Block may not work if this is `False`.
|
1616
|
-
|
1617
|
-
Returns:
|
1618
|
-
A list of block documents
|
1619
|
-
"""
|
1620
|
-
response = await self._client.post(
|
1621
|
-
"/block_documents/filter",
|
1622
|
-
json=dict(
|
1623
|
-
block_schema_type=block_schema_type,
|
1624
|
-
offset=offset,
|
1625
|
-
limit=limit,
|
1626
|
-
include_secrets=include_secrets,
|
1627
|
-
),
|
1628
|
-
)
|
1629
|
-
return pydantic.TypeAdapter(list[BlockDocument]).validate_python(
|
1630
|
-
response.json()
|
1631
|
-
)
|
1632
|
-
|
1633
|
-
async def read_block_documents_by_type(
|
1634
|
-
self,
|
1635
|
-
block_type_slug: str,
|
1636
|
-
offset: Optional[int] = None,
|
1637
|
-
limit: Optional[int] = None,
|
1638
|
-
include_secrets: bool = True,
|
1639
|
-
) -> list[BlockDocument]:
|
1640
|
-
"""Retrieve block documents by block type slug.
|
1641
|
-
|
1642
|
-
Args:
|
1643
|
-
block_type_slug: The block type slug.
|
1644
|
-
offset: an offset
|
1645
|
-
limit: the number of blocks to return
|
1646
|
-
include_secrets: whether to include secret values
|
1647
|
-
|
1648
|
-
Returns:
|
1649
|
-
A list of block documents
|
1650
|
-
"""
|
1651
|
-
response = await self._client.get(
|
1652
|
-
f"/block_types/slug/{block_type_slug}/block_documents",
|
1653
|
-
params=dict(
|
1654
|
-
offset=offset,
|
1655
|
-
limit=limit,
|
1656
|
-
include_secrets=include_secrets,
|
1657
|
-
),
|
1658
|
-
)
|
1659
|
-
|
1660
|
-
return pydantic.TypeAdapter(list[BlockDocument]).validate_python(
|
1661
|
-
response.json()
|
1662
|
-
)
|
1663
|
-
|
1664
|
-
async def create_deployment(
|
1665
|
-
self,
|
1666
|
-
flow_id: UUID,
|
1667
|
-
name: str,
|
1668
|
-
version: Optional[str] = None,
|
1669
|
-
schedules: Optional[list[DeploymentScheduleCreate]] = None,
|
1670
|
-
concurrency_limit: Optional[int] = None,
|
1671
|
-
concurrency_options: Optional[ConcurrencyOptions] = None,
|
1672
|
-
parameters: Optional[dict[str, Any]] = None,
|
1673
|
-
description: Optional[str] = None,
|
1674
|
-
work_queue_name: Optional[str] = None,
|
1675
|
-
work_pool_name: Optional[str] = None,
|
1676
|
-
tags: Optional[list[str]] = None,
|
1677
|
-
storage_document_id: Optional[UUID] = None,
|
1678
|
-
path: Optional[str] = None,
|
1679
|
-
entrypoint: Optional[str] = None,
|
1680
|
-
infrastructure_document_id: Optional[UUID] = None,
|
1681
|
-
parameter_openapi_schema: Optional[dict[str, Any]] = None,
|
1682
|
-
paused: Optional[bool] = None,
|
1683
|
-
pull_steps: Optional[list[dict[str, Any]]] = None,
|
1684
|
-
enforce_parameter_schema: Optional[bool] = None,
|
1685
|
-
job_variables: Optional[dict[str, Any]] = None,
|
1686
|
-
) -> UUID:
|
1687
|
-
"""
|
1688
|
-
Create a deployment.
|
1689
|
-
|
1690
|
-
Args:
|
1691
|
-
flow_id: the flow ID to create a deployment for
|
1692
|
-
name: the name of the deployment
|
1693
|
-
version: an optional version string for the deployment
|
1694
|
-
tags: an optional list of tags to apply to the deployment
|
1695
|
-
storage_document_id: an reference to the storage block document
|
1696
|
-
used for the deployed flow
|
1697
|
-
infrastructure_document_id: an reference to the infrastructure block document
|
1698
|
-
to use for this deployment
|
1699
|
-
job_variables: A dictionary of dot delimited infrastructure overrides that
|
1700
|
-
will be applied at runtime; for example `env.CONFIG_KEY=config_value` or
|
1701
|
-
`namespace='prefect'`. This argument was previously named `infra_overrides`.
|
1702
|
-
Both arguments are supported for backwards compatibility.
|
1703
|
-
|
1704
|
-
Raises:
|
1705
|
-
httpx.RequestError: if the deployment was not created for any reason
|
1706
|
-
|
1707
|
-
Returns:
|
1708
|
-
the ID of the deployment in the backend
|
1709
|
-
"""
|
1710
|
-
|
1711
|
-
if parameter_openapi_schema is None:
|
1712
|
-
parameter_openapi_schema = {}
|
1713
|
-
deployment_create = DeploymentCreate(
|
1714
|
-
flow_id=flow_id,
|
1715
|
-
name=name,
|
1716
|
-
version=version,
|
1717
|
-
parameters=dict(parameters or {}),
|
1718
|
-
tags=list(tags or []),
|
1719
|
-
work_queue_name=work_queue_name,
|
1720
|
-
description=description,
|
1721
|
-
storage_document_id=storage_document_id,
|
1722
|
-
path=path,
|
1723
|
-
entrypoint=entrypoint,
|
1724
|
-
infrastructure_document_id=infrastructure_document_id,
|
1725
|
-
job_variables=dict(job_variables or {}),
|
1726
|
-
parameter_openapi_schema=parameter_openapi_schema,
|
1727
|
-
paused=paused,
|
1728
|
-
schedules=schedules or [],
|
1729
|
-
concurrency_limit=concurrency_limit,
|
1730
|
-
concurrency_options=concurrency_options,
|
1731
|
-
pull_steps=pull_steps,
|
1732
|
-
enforce_parameter_schema=enforce_parameter_schema,
|
1733
|
-
)
|
1734
|
-
|
1735
|
-
if work_pool_name is not None:
|
1736
|
-
deployment_create.work_pool_name = work_pool_name
|
1737
|
-
|
1738
|
-
# Exclude newer fields that are not set to avoid compatibility issues
|
1739
|
-
exclude = {
|
1740
|
-
field
|
1741
|
-
for field in ["work_pool_name", "work_queue_name"]
|
1742
|
-
if field not in deployment_create.model_fields_set
|
1743
|
-
}
|
1744
|
-
|
1745
|
-
if deployment_create.paused is None:
|
1746
|
-
exclude.add("paused")
|
1747
|
-
|
1748
|
-
if deployment_create.pull_steps is None:
|
1749
|
-
exclude.add("pull_steps")
|
1750
|
-
|
1751
|
-
if deployment_create.enforce_parameter_schema is None:
|
1752
|
-
exclude.add("enforce_parameter_schema")
|
1753
|
-
|
1754
|
-
json = deployment_create.model_dump(mode="json", exclude=exclude)
|
1755
|
-
response = await self._client.post(
|
1756
|
-
"/deployments/",
|
1757
|
-
json=json,
|
1758
|
-
)
|
1759
|
-
deployment_id = response.json().get("id")
|
1760
|
-
if not deployment_id:
|
1761
|
-
raise httpx.RequestError(f"Malformed response: {response}")
|
1762
|
-
|
1763
|
-
return UUID(deployment_id)
|
1764
|
-
|
1765
|
-
async def set_deployment_paused_state(
|
1766
|
-
self, deployment_id: UUID, paused: bool
|
1767
|
-
) -> None:
|
1768
|
-
await self._client.patch(
|
1769
|
-
f"/deployments/{deployment_id}", json={"paused": paused}
|
1770
|
-
)
|
1771
|
-
|
1772
|
-
async def update_deployment(
|
1773
|
-
self,
|
1774
|
-
deployment_id: UUID,
|
1775
|
-
deployment: DeploymentUpdate,
|
1776
|
-
) -> None:
|
1777
|
-
await self._client.patch(
|
1778
|
-
f"/deployments/{deployment_id}",
|
1779
|
-
json=deployment.model_dump(mode="json", exclude_unset=True),
|
1780
|
-
)
|
1781
|
-
|
1782
|
-
async def _create_deployment_from_schema(self, schema: DeploymentCreate) -> UUID:
|
1783
|
-
"""
|
1784
|
-
Create a deployment from a prepared `DeploymentCreate` schema.
|
1785
|
-
"""
|
1786
|
-
# TODO: We are likely to remove this method once we have considered the
|
1787
|
-
# packaging interface for deployments further.
|
1788
|
-
response = await self._client.post(
|
1789
|
-
"/deployments/", json=schema.model_dump(mode="json")
|
1790
|
-
)
|
1791
|
-
deployment_id = response.json().get("id")
|
1792
|
-
if not deployment_id:
|
1793
|
-
raise httpx.RequestError(f"Malformed response: {response}")
|
1794
|
-
|
1795
|
-
return UUID(deployment_id)
|
1796
|
-
|
1797
|
-
async def read_deployment(
|
1798
|
-
self,
|
1799
|
-
deployment_id: Union[UUID, str],
|
1800
|
-
) -> DeploymentResponse:
|
1801
|
-
"""
|
1802
|
-
Query the Prefect API for a deployment by id.
|
1803
|
-
|
1804
|
-
Args:
|
1805
|
-
deployment_id: the deployment ID of interest
|
1806
|
-
|
1807
|
-
Returns:
|
1808
|
-
a [Deployment model][prefect.client.schemas.objects.Deployment] representation of the deployment
|
1809
|
-
"""
|
1810
|
-
if not isinstance(deployment_id, UUID):
|
1811
|
-
try:
|
1812
|
-
deployment_id = UUID(deployment_id)
|
1813
|
-
except ValueError:
|
1814
|
-
raise ValueError(f"Invalid deployment ID: {deployment_id}")
|
1815
|
-
|
1816
|
-
try:
|
1817
|
-
response = await self._client.get(f"/deployments/{deployment_id}")
|
1818
|
-
except httpx.HTTPStatusError as e:
|
1819
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
1820
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
1821
|
-
else:
|
1822
|
-
raise
|
1823
|
-
return DeploymentResponse.model_validate(response.json())
|
1824
|
-
|
1825
|
-
async def read_deployment_by_name(
|
1826
|
-
self,
|
1827
|
-
name: str,
|
1828
|
-
) -> DeploymentResponse:
|
1829
|
-
"""
|
1830
|
-
Query the Prefect API for a deployment by name.
|
1831
|
-
|
1832
|
-
Args:
|
1833
|
-
name: A deployed flow's name: <FLOW_NAME>/<DEPLOYMENT_NAME>
|
1834
|
-
|
1835
|
-
Raises:
|
1836
|
-
prefect.exceptions.ObjectNotFound: If request returns 404
|
1837
|
-
httpx.RequestError: If request fails
|
1838
|
-
|
1839
|
-
Returns:
|
1840
|
-
a Deployment model representation of the deployment
|
1841
|
-
"""
|
1842
|
-
try:
|
1843
|
-
response = await self._client.get(f"/deployments/name/{name}")
|
1844
|
-
except httpx.HTTPStatusError as e:
|
1845
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
1846
|
-
from prefect.utilities.text import fuzzy_match_string
|
1847
|
-
|
1848
|
-
deployments = await self.read_deployments()
|
1849
|
-
flow_name_map = {
|
1850
|
-
flow.id: flow.name
|
1851
|
-
for flow in await asyncio.gather(
|
1852
|
-
*[
|
1853
|
-
self.read_flow(flow_id)
|
1854
|
-
for flow_id in {d.flow_id for d in deployments}
|
1855
|
-
]
|
1856
|
-
)
|
1857
|
-
}
|
1858
|
-
|
1859
|
-
raise prefect.exceptions.ObjectNotFound(
|
1860
|
-
http_exc=e,
|
1861
|
-
help_message=(
|
1862
|
-
f"Deployment {name!r} not found; did you mean {fuzzy_match!r}?"
|
1863
|
-
if (
|
1864
|
-
fuzzy_match := fuzzy_match_string(
|
1865
|
-
name,
|
1866
|
-
[
|
1867
|
-
f"{flow_name_map[d.flow_id]}/{d.name}"
|
1868
|
-
for d in deployments
|
1869
|
-
],
|
1870
|
-
)
|
1871
|
-
)
|
1872
|
-
else f"Deployment {name!r} not found. Try `prefect deployment ls` to find available deployments."
|
1873
|
-
),
|
1874
|
-
) from e
|
1875
|
-
else:
|
1876
|
-
raise
|
1877
|
-
|
1878
|
-
return DeploymentResponse.model_validate(response.json())
|
1879
|
-
|
1880
|
-
async def read_deployments(
|
1881
|
-
self,
|
1882
|
-
*,
|
1883
|
-
flow_filter: Optional[FlowFilter] = None,
|
1884
|
-
flow_run_filter: Optional[FlowRunFilter] = None,
|
1885
|
-
task_run_filter: Optional[TaskRunFilter] = None,
|
1886
|
-
deployment_filter: Optional[DeploymentFilter] = None,
|
1887
|
-
work_pool_filter: Optional[WorkPoolFilter] = None,
|
1888
|
-
work_queue_filter: Optional[WorkQueueFilter] = None,
|
1889
|
-
limit: Optional[int] = None,
|
1890
|
-
sort: Optional[DeploymentSort] = None,
|
1891
|
-
offset: int = 0,
|
1892
|
-
) -> list[DeploymentResponse]:
|
1893
|
-
"""
|
1894
|
-
Query the Prefect API for deployments. Only deployments matching all
|
1895
|
-
the provided criteria will be returned.
|
1896
|
-
|
1897
|
-
Args:
|
1898
|
-
flow_filter: filter criteria for flows
|
1899
|
-
flow_run_filter: filter criteria for flow runs
|
1900
|
-
task_run_filter: filter criteria for task runs
|
1901
|
-
deployment_filter: filter criteria for deployments
|
1902
|
-
work_pool_filter: filter criteria for work pools
|
1903
|
-
work_queue_filter: filter criteria for work pool queues
|
1904
|
-
limit: a limit for the deployment query
|
1905
|
-
offset: an offset for the deployment query
|
1906
|
-
|
1907
|
-
Returns:
|
1908
|
-
a list of Deployment model representations
|
1909
|
-
of the deployments
|
1910
|
-
"""
|
1911
|
-
body: dict[str, Any] = {
|
1912
|
-
"flows": flow_filter.model_dump(mode="json") if flow_filter else None,
|
1913
|
-
"flow_runs": (
|
1914
|
-
flow_run_filter.model_dump(mode="json", exclude_unset=True)
|
1915
|
-
if flow_run_filter
|
1916
|
-
else None
|
1917
|
-
),
|
1918
|
-
"task_runs": (
|
1919
|
-
task_run_filter.model_dump(mode="json") if task_run_filter else None
|
1920
|
-
),
|
1921
|
-
"deployments": (
|
1922
|
-
deployment_filter.model_dump(mode="json") if deployment_filter else None
|
1923
|
-
),
|
1924
|
-
"work_pools": (
|
1925
|
-
work_pool_filter.model_dump(mode="json") if work_pool_filter else None
|
1926
|
-
),
|
1927
|
-
"work_pool_queues": (
|
1928
|
-
work_queue_filter.model_dump(mode="json") if work_queue_filter else None
|
1929
|
-
),
|
1930
|
-
"limit": limit,
|
1931
|
-
"offset": offset,
|
1932
|
-
"sort": sort,
|
1933
|
-
}
|
1934
|
-
|
1935
|
-
response = await self._client.post("/deployments/filter", json=body)
|
1936
|
-
return pydantic.TypeAdapter(list[DeploymentResponse]).validate_python(
|
1937
|
-
response.json()
|
1938
|
-
)
|
1939
|
-
|
1940
|
-
async def delete_deployment(
|
1941
|
-
self,
|
1942
|
-
deployment_id: UUID,
|
1943
|
-
) -> None:
|
1944
|
-
"""
|
1945
|
-
Delete deployment by id.
|
1946
|
-
|
1947
|
-
Args:
|
1948
|
-
deployment_id: The deployment id of interest.
|
1949
|
-
Raises:
|
1950
|
-
prefect.exceptions.ObjectNotFound: If request returns 404
|
1951
|
-
httpx.RequestError: If requests fails
|
1952
|
-
"""
|
1953
|
-
try:
|
1954
|
-
await self._client.delete(f"/deployments/{deployment_id}")
|
1955
|
-
except httpx.HTTPStatusError as e:
|
1956
|
-
if e.response.status_code == 404:
|
1957
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
1958
|
-
else:
|
1959
|
-
raise
|
1960
|
-
|
1961
|
-
async def create_deployment_schedules(
|
1962
|
-
self,
|
1963
|
-
deployment_id: UUID,
|
1964
|
-
schedules: list[tuple[SCHEDULE_TYPES, bool]],
|
1965
|
-
) -> list[DeploymentSchedule]:
|
1966
|
-
"""
|
1967
|
-
Create deployment schedules.
|
1968
|
-
|
1969
|
-
Args:
|
1970
|
-
deployment_id: the deployment ID
|
1971
|
-
schedules: a list of tuples containing the schedule to create
|
1972
|
-
and whether or not it should be active.
|
1973
|
-
|
1974
|
-
Raises:
|
1975
|
-
httpx.RequestError: if the schedules were not created for any reason
|
1976
|
-
|
1977
|
-
Returns:
|
1978
|
-
the list of schedules created in the backend
|
1979
|
-
"""
|
1980
|
-
deployment_schedule_create = [
|
1981
|
-
DeploymentScheduleCreate(schedule=schedule[0], active=schedule[1])
|
1982
|
-
for schedule in schedules
|
1983
|
-
]
|
1984
|
-
|
1985
|
-
json = [
|
1986
|
-
deployment_schedule_create.model_dump(mode="json")
|
1987
|
-
for deployment_schedule_create in deployment_schedule_create
|
1988
|
-
]
|
1989
|
-
response = await self._client.post(
|
1990
|
-
f"/deployments/{deployment_id}/schedules", json=json
|
1991
|
-
)
|
1992
|
-
return pydantic.TypeAdapter(list[DeploymentSchedule]).validate_python(
|
1993
|
-
response.json()
|
1994
|
-
)
|
1995
|
-
|
1996
|
-
async def read_deployment_schedules(
|
1997
|
-
self,
|
1998
|
-
deployment_id: UUID,
|
1999
|
-
) -> list[DeploymentSchedule]:
|
2000
|
-
"""
|
2001
|
-
Query the Prefect API for a deployment's schedules.
|
2002
|
-
|
2003
|
-
Args:
|
2004
|
-
deployment_id: the deployment ID
|
2005
|
-
|
2006
|
-
Returns:
|
2007
|
-
a list of DeploymentSchedule model representations of the deployment schedules
|
2008
|
-
"""
|
2009
|
-
try:
|
2010
|
-
response = await self._client.get(f"/deployments/{deployment_id}/schedules")
|
2011
|
-
except httpx.HTTPStatusError as e:
|
2012
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
2013
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
2014
|
-
else:
|
2015
|
-
raise
|
2016
|
-
return pydantic.TypeAdapter(list[DeploymentSchedule]).validate_python(
|
2017
|
-
response.json()
|
2018
|
-
)
|
2019
|
-
|
2020
|
-
async def update_deployment_schedule(
|
2021
|
-
self,
|
2022
|
-
deployment_id: UUID,
|
2023
|
-
schedule_id: UUID,
|
2024
|
-
active: Optional[bool] = None,
|
2025
|
-
schedule: Optional[SCHEDULE_TYPES] = None,
|
2026
|
-
) -> None:
|
2027
|
-
"""
|
2028
|
-
Update a deployment schedule by ID.
|
2029
|
-
|
2030
|
-
Args:
|
2031
|
-
deployment_id: the deployment ID
|
2032
|
-
schedule_id: the deployment schedule ID of interest
|
2033
|
-
active: whether or not the schedule should be active
|
2034
|
-
schedule: the cron, rrule, or interval schedule this deployment schedule should use
|
2035
|
-
"""
|
2036
|
-
kwargs: dict[str, Any] = {}
|
2037
|
-
if active is not None:
|
2038
|
-
kwargs["active"] = active
|
2039
|
-
if schedule is not None:
|
2040
|
-
kwargs["schedule"] = schedule
|
2041
|
-
|
2042
|
-
deployment_schedule_update = DeploymentScheduleUpdate(**kwargs)
|
2043
|
-
json = deployment_schedule_update.model_dump(mode="json", exclude_unset=True)
|
2044
|
-
|
2045
|
-
try:
|
2046
|
-
await self._client.patch(
|
2047
|
-
f"/deployments/{deployment_id}/schedules/{schedule_id}", json=json
|
2048
|
-
)
|
2049
|
-
except httpx.HTTPStatusError as e:
|
2050
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
2051
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
2052
|
-
else:
|
2053
|
-
raise
|
2054
|
-
|
2055
|
-
async def delete_deployment_schedule(
|
2056
|
-
self,
|
2057
|
-
deployment_id: UUID,
|
2058
|
-
schedule_id: UUID,
|
2059
|
-
) -> None:
|
2060
|
-
"""
|
2061
|
-
Delete a deployment schedule.
|
2062
|
-
|
2063
|
-
Args:
|
2064
|
-
deployment_id: the deployment ID
|
2065
|
-
schedule_id: the ID of the deployment schedule to delete.
|
2066
|
-
|
2067
|
-
Raises:
|
2068
|
-
httpx.RequestError: if the schedules were not deleted for any reason
|
2069
|
-
"""
|
2070
|
-
try:
|
2071
|
-
await self._client.delete(
|
2072
|
-
f"/deployments/{deployment_id}/schedules/{schedule_id}"
|
2073
|
-
)
|
2074
|
-
except httpx.HTTPStatusError as e:
|
2075
|
-
if e.response.status_code == 404:
|
2076
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
2077
|
-
else:
|
2078
|
-
raise
|
2079
|
-
|
2080
|
-
async def read_flow_run(self, flow_run_id: UUID) -> FlowRun:
|
2081
|
-
"""
|
2082
|
-
Query the Prefect API for a flow run by id.
|
2083
|
-
|
2084
|
-
Args:
|
2085
|
-
flow_run_id: the flow run ID of interest
|
2086
|
-
|
2087
|
-
Returns:
|
2088
|
-
a Flow Run model representation of the flow run
|
2089
|
-
"""
|
2090
|
-
try:
|
2091
|
-
response = await self._client.get(f"/flow_runs/{flow_run_id}")
|
2092
|
-
except httpx.HTTPStatusError as e:
|
2093
|
-
if e.response.status_code == 404:
|
2094
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
2095
|
-
else:
|
2096
|
-
raise
|
2097
|
-
return FlowRun.model_validate(response.json())
|
2098
|
-
|
2099
|
-
async def resume_flow_run(
|
2100
|
-
self, flow_run_id: UUID, run_input: Optional[dict[str, Any]] = None
|
2101
|
-
) -> OrchestrationResult[Any]:
|
2102
|
-
"""
|
2103
|
-
Resumes a paused flow run.
|
2104
|
-
|
2105
|
-
Args:
|
2106
|
-
flow_run_id: the flow run ID of interest
|
2107
|
-
run_input: the input to resume the flow run with
|
2108
|
-
|
2109
|
-
Returns:
|
2110
|
-
an OrchestrationResult model representation of state orchestration output
|
2111
|
-
"""
|
2112
|
-
try:
|
2113
|
-
response = await self._client.post(
|
2114
|
-
f"/flow_runs/{flow_run_id}/resume", json={"run_input": run_input}
|
2115
|
-
)
|
2116
|
-
except httpx.HTTPStatusError:
|
2117
|
-
raise
|
2118
|
-
|
2119
|
-
result: OrchestrationResult[Any] = OrchestrationResult.model_validate(
|
2120
|
-
response.json()
|
2121
|
-
)
|
2122
|
-
return result
|
2123
|
-
|
2124
|
-
async def read_flow_runs(
|
2125
|
-
self,
|
2126
|
-
*,
|
2127
|
-
flow_filter: Optional[FlowFilter] = None,
|
2128
|
-
flow_run_filter: Optional[FlowRunFilter] = None,
|
2129
|
-
task_run_filter: Optional[TaskRunFilter] = None,
|
2130
|
-
deployment_filter: Optional[DeploymentFilter] = None,
|
2131
|
-
work_pool_filter: Optional[WorkPoolFilter] = None,
|
2132
|
-
work_queue_filter: Optional[WorkQueueFilter] = None,
|
2133
|
-
sort: Optional[FlowRunSort] = None,
|
2134
|
-
limit: Optional[int] = None,
|
2135
|
-
offset: int = 0,
|
2136
|
-
) -> list[FlowRun]:
|
2137
|
-
"""
|
2138
|
-
Query the Prefect API for flow runs. Only flow runs matching all criteria will
|
2139
|
-
be returned.
|
2140
|
-
|
2141
|
-
Args:
|
2142
|
-
flow_filter: filter criteria for flows
|
2143
|
-
flow_run_filter: filter criteria for flow runs
|
2144
|
-
task_run_filter: filter criteria for task runs
|
2145
|
-
deployment_filter: filter criteria for deployments
|
2146
|
-
work_pool_filter: filter criteria for work pools
|
2147
|
-
work_queue_filter: filter criteria for work pool queues
|
2148
|
-
sort: sort criteria for the flow runs
|
2149
|
-
limit: limit for the flow run query
|
2150
|
-
offset: offset for the flow run query
|
2151
|
-
|
2152
|
-
Returns:
|
2153
|
-
a list of Flow Run model representations
|
2154
|
-
of the flow runs
|
2155
|
-
"""
|
2156
|
-
body: dict[str, Any] = {
|
2157
|
-
"flows": flow_filter.model_dump(mode="json") if flow_filter else None,
|
2158
|
-
"flow_runs": (
|
2159
|
-
flow_run_filter.model_dump(mode="json", exclude_unset=True)
|
2160
|
-
if flow_run_filter
|
2161
|
-
else None
|
2162
|
-
),
|
2163
|
-
"task_runs": (
|
2164
|
-
task_run_filter.model_dump(mode="json") if task_run_filter else None
|
2165
|
-
),
|
2166
|
-
"deployments": (
|
2167
|
-
deployment_filter.model_dump(mode="json") if deployment_filter else None
|
2168
|
-
),
|
2169
|
-
"work_pools": (
|
2170
|
-
work_pool_filter.model_dump(mode="json") if work_pool_filter else None
|
2171
|
-
),
|
2172
|
-
"work_pool_queues": (
|
2173
|
-
work_queue_filter.model_dump(mode="json") if work_queue_filter else None
|
2174
|
-
),
|
2175
|
-
"sort": sort,
|
2176
|
-
"limit": limit,
|
2177
|
-
"offset": offset,
|
2178
|
-
}
|
2179
|
-
|
2180
|
-
response = await self._client.post("/flow_runs/filter", json=body)
|
2181
|
-
return pydantic.TypeAdapter(list[FlowRun]).validate_python(response.json())
|
2182
|
-
|
2183
|
-
async def set_flow_run_state(
|
2184
|
-
self,
|
2185
|
-
flow_run_id: Union[UUID, str],
|
2186
|
-
state: "prefect.states.State[T]",
|
2187
|
-
force: bool = False,
|
2188
|
-
) -> OrchestrationResult[T]:
|
2189
|
-
"""
|
2190
|
-
Set the state of a flow run.
|
2191
|
-
|
2192
|
-
Args:
|
2193
|
-
flow_run_id: the id of the flow run
|
2194
|
-
state: the state to set
|
2195
|
-
force: if True, disregard orchestration logic when setting the state,
|
2196
|
-
forcing the Prefect API to accept the state
|
2197
|
-
|
2198
|
-
Returns:
|
2199
|
-
an OrchestrationResult model representation of state orchestration output
|
2200
|
-
"""
|
2201
|
-
flow_run_id = (
|
2202
|
-
flow_run_id if isinstance(flow_run_id, UUID) else UUID(flow_run_id)
|
2203
|
-
)
|
2204
|
-
state_create = state.to_state_create()
|
2205
|
-
state_create.state_details.flow_run_id = flow_run_id
|
2206
|
-
state_create.state_details.transition_id = uuid4()
|
2207
|
-
try:
|
2208
|
-
response = await self._client.post(
|
2209
|
-
f"/flow_runs/{flow_run_id}/set_state",
|
2210
|
-
json=dict(
|
2211
|
-
state=state_create.model_dump(mode="json", serialize_as_any=True),
|
2212
|
-
force=force,
|
2213
|
-
),
|
2214
|
-
)
|
2215
|
-
except httpx.HTTPStatusError as e:
|
2216
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
2217
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
2218
|
-
else:
|
2219
|
-
raise
|
2220
|
-
|
2221
|
-
result: OrchestrationResult[T] = OrchestrationResult.model_validate(
|
2222
|
-
response.json()
|
2223
|
-
)
|
2224
|
-
return result
|
2225
|
-
|
2226
|
-
async def read_flow_run_states(
|
2227
|
-
self, flow_run_id: UUID
|
2228
|
-
) -> list[prefect.states.State]:
|
2229
|
-
"""
|
2230
|
-
Query for the states of a flow run
|
2231
|
-
|
2232
|
-
Args:
|
2233
|
-
flow_run_id: the id of the flow run
|
2234
|
-
|
2235
|
-
Returns:
|
2236
|
-
a list of State model representations
|
2237
|
-
of the flow run states
|
2238
|
-
"""
|
2239
|
-
response = await self._client.get(
|
2240
|
-
"/flow_run_states/", params=dict(flow_run_id=str(flow_run_id))
|
2241
|
-
)
|
2242
|
-
return pydantic.TypeAdapter(list[prefect.states.State]).validate_python(
|
2243
|
-
response.json()
|
2244
|
-
)
|
2245
|
-
|
2246
|
-
async def set_flow_run_name(self, flow_run_id: UUID, name: str) -> httpx.Response:
|
2247
|
-
flow_run_data = FlowRunUpdate(name=name)
|
2248
|
-
return await self._client.patch(
|
2249
|
-
f"/flow_runs/{flow_run_id}",
|
2250
|
-
json=flow_run_data.model_dump(mode="json", exclude_unset=True),
|
2251
|
-
)
|
2252
|
-
|
2253
|
-
async def set_task_run_name(self, task_run_id: UUID, name: str) -> httpx.Response:
|
2254
|
-
task_run_data = TaskRunUpdate(name=name)
|
2255
|
-
return await self._client.patch(
|
2256
|
-
f"/task_runs/{task_run_id}",
|
2257
|
-
json=task_run_data.model_dump(mode="json", exclude_unset=True),
|
2258
|
-
)
|
2259
|
-
|
2260
|
-
async def create_task_run(
|
2261
|
-
self,
|
2262
|
-
task: "TaskObject[P, R]",
|
2263
|
-
flow_run_id: Optional[UUID],
|
2264
|
-
dynamic_key: str,
|
2265
|
-
id: Optional[UUID] = None,
|
2266
|
-
name: Optional[str] = None,
|
2267
|
-
extra_tags: Optional[Iterable[str]] = None,
|
2268
|
-
state: Optional[prefect.states.State[R]] = None,
|
2269
|
-
task_inputs: Optional[
|
2270
|
-
dict[
|
2271
|
-
str,
|
2272
|
-
list[
|
2273
|
-
Union[
|
2274
|
-
TaskRunResult,
|
2275
|
-
Parameter,
|
2276
|
-
Constant,
|
2277
|
-
]
|
2278
|
-
],
|
2279
|
-
]
|
2280
|
-
] = None,
|
2281
|
-
) -> TaskRun:
|
2282
|
-
"""
|
2283
|
-
Create a task run
|
2284
|
-
|
2285
|
-
Args:
|
2286
|
-
task: The Task to run
|
2287
|
-
flow_run_id: The flow run id with which to associate the task run
|
2288
|
-
dynamic_key: A key unique to this particular run of a Task within the flow
|
2289
|
-
id: An optional ID for the task run. If not provided, one will be generated
|
2290
|
-
server-side.
|
2291
|
-
name: An optional name for the task run
|
2292
|
-
extra_tags: an optional list of extra tags to apply to the task run in
|
2293
|
-
addition to `task.tags`
|
2294
|
-
state: The initial state for the run. If not provided, defaults to
|
2295
|
-
`Pending` for now. Should always be a `Scheduled` type.
|
2296
|
-
task_inputs: the set of inputs passed to the task
|
2297
|
-
|
2298
|
-
Returns:
|
2299
|
-
The created task run.
|
2300
|
-
"""
|
2301
|
-
tags = set(task.tags).union(extra_tags or [])
|
2302
|
-
|
2303
|
-
if state is None:
|
2304
|
-
state = prefect.states.Pending()
|
2305
|
-
|
2306
|
-
retry_delay = task.retry_delay_seconds
|
2307
|
-
if isinstance(retry_delay, list):
|
2308
|
-
retry_delay = [int(rd) for rd in retry_delay]
|
2309
|
-
elif isinstance(retry_delay, float):
|
2310
|
-
retry_delay = int(retry_delay)
|
2311
|
-
|
2312
|
-
task_run_data = TaskRunCreate(
|
2313
|
-
id=id,
|
2314
|
-
name=name,
|
2315
|
-
flow_run_id=flow_run_id,
|
2316
|
-
task_key=task.task_key,
|
2317
|
-
dynamic_key=str(dynamic_key),
|
2318
|
-
tags=list(tags),
|
2319
|
-
task_version=task.version,
|
2320
|
-
empirical_policy=TaskRunPolicy(
|
2321
|
-
retries=task.retries,
|
2322
|
-
retry_delay=retry_delay,
|
2323
|
-
retry_jitter_factor=task.retry_jitter_factor,
|
2324
|
-
),
|
2325
|
-
state=state.to_state_create(),
|
2326
|
-
task_inputs=task_inputs or {},
|
2327
|
-
)
|
2328
|
-
content = task_run_data.model_dump_json(exclude={"id"} if id is None else None)
|
2329
|
-
|
2330
|
-
response = await self._client.post("/task_runs/", content=content)
|
2331
|
-
return TaskRun.model_validate(response.json())
|
2332
|
-
|
2333
|
-
async def read_task_run(self, task_run_id: UUID) -> TaskRun:
|
2334
|
-
"""
|
2335
|
-
Query the Prefect API for a task run by id.
|
2336
|
-
|
2337
|
-
Args:
|
2338
|
-
task_run_id: the task run ID of interest
|
2339
|
-
|
2340
|
-
Returns:
|
2341
|
-
a Task Run model representation of the task run
|
2342
|
-
"""
|
2343
|
-
try:
|
2344
|
-
response = await self._client.get(f"/task_runs/{task_run_id}")
|
2345
|
-
return TaskRun.model_validate(response.json())
|
2346
|
-
except httpx.HTTPStatusError as e:
|
2347
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
2348
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
2349
|
-
else:
|
2350
|
-
raise
|
2351
|
-
|
2352
|
-
async def read_task_runs(
|
2353
|
-
self,
|
2354
|
-
*,
|
2355
|
-
flow_filter: Optional[FlowFilter] = None,
|
2356
|
-
flow_run_filter: Optional[FlowRunFilter] = None,
|
2357
|
-
task_run_filter: Optional[TaskRunFilter] = None,
|
2358
|
-
deployment_filter: Optional[DeploymentFilter] = None,
|
2359
|
-
sort: Optional[TaskRunSort] = None,
|
2360
|
-
limit: Optional[int] = None,
|
2361
|
-
offset: int = 0,
|
2362
|
-
) -> list[TaskRun]:
|
2363
|
-
"""
|
2364
|
-
Query the Prefect API for task runs. Only task runs matching all criteria will
|
2365
|
-
be returned.
|
2366
|
-
|
2367
|
-
Args:
|
2368
|
-
flow_filter: filter criteria for flows
|
2369
|
-
flow_run_filter: filter criteria for flow runs
|
2370
|
-
task_run_filter: filter criteria for task runs
|
2371
|
-
deployment_filter: filter criteria for deployments
|
2372
|
-
sort: sort criteria for the task runs
|
2373
|
-
limit: a limit for the task run query
|
2374
|
-
offset: an offset for the task run query
|
2375
|
-
|
2376
|
-
Returns:
|
2377
|
-
a list of Task Run model representations
|
2378
|
-
of the task runs
|
2379
|
-
"""
|
2380
|
-
body: dict[str, Any] = {
|
2381
|
-
"flows": flow_filter.model_dump(mode="json") if flow_filter else None,
|
2382
|
-
"flow_runs": (
|
2383
|
-
flow_run_filter.model_dump(mode="json", exclude_unset=True)
|
2384
|
-
if flow_run_filter
|
2385
|
-
else None
|
2386
|
-
),
|
2387
|
-
"task_runs": (
|
2388
|
-
task_run_filter.model_dump(mode="json") if task_run_filter else None
|
2389
|
-
),
|
2390
|
-
"deployments": (
|
2391
|
-
deployment_filter.model_dump(mode="json") if deployment_filter else None
|
2392
|
-
),
|
2393
|
-
"sort": sort,
|
2394
|
-
"limit": limit,
|
2395
|
-
"offset": offset,
|
2396
|
-
}
|
2397
|
-
response = await self._client.post("/task_runs/filter", json=body)
|
2398
|
-
return pydantic.TypeAdapter(list[TaskRun]).validate_python(response.json())
|
2399
|
-
|
2400
|
-
async def delete_task_run(self, task_run_id: UUID) -> None:
|
2401
|
-
"""
|
2402
|
-
Delete a task run by id.
|
2403
|
-
|
2404
|
-
Args:
|
2405
|
-
task_run_id: the task run ID of interest
|
2406
|
-
Raises:
|
2407
|
-
prefect.exceptions.ObjectNotFound: If request returns 404
|
2408
|
-
httpx.RequestError: If requests fails
|
2409
|
-
"""
|
2410
|
-
try:
|
2411
|
-
await self._client.delete(f"/task_runs/{task_run_id}")
|
2412
|
-
except httpx.HTTPStatusError as e:
|
2413
|
-
if e.response.status_code == 404:
|
2414
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
2415
|
-
else:
|
2416
|
-
raise
|
2417
|
-
|
2418
|
-
async def set_task_run_state(
|
2419
|
-
self,
|
2420
|
-
task_run_id: UUID,
|
2421
|
-
state: prefect.states.State[T],
|
2422
|
-
force: bool = False,
|
2423
|
-
) -> OrchestrationResult[T]:
|
2424
|
-
"""
|
2425
|
-
Set the state of a task run.
|
2426
|
-
|
2427
|
-
Args:
|
2428
|
-
task_run_id: the id of the task run
|
2429
|
-
state: the state to set
|
2430
|
-
force: if True, disregard orchestration logic when setting the state,
|
2431
|
-
forcing the Prefect API to accept the state
|
2432
|
-
|
2433
|
-
Returns:
|
2434
|
-
an OrchestrationResult model representation of state orchestration output
|
2435
|
-
"""
|
2436
|
-
state_create = state.to_state_create()
|
2437
|
-
state_create.state_details.task_run_id = task_run_id
|
2438
|
-
response = await self._client.post(
|
2439
|
-
f"/task_runs/{task_run_id}/set_state",
|
2440
|
-
json=dict(state=state_create.model_dump(mode="json"), force=force),
|
2441
|
-
)
|
2442
|
-
result: OrchestrationResult[T] = OrchestrationResult.model_validate(
|
2443
|
-
response.json()
|
2444
|
-
)
|
2445
|
-
return result
|
2446
|
-
|
2447
|
-
async def read_task_run_states(
|
2448
|
-
self, task_run_id: UUID
|
2449
|
-
) -> list[prefect.states.State]:
|
2450
|
-
"""
|
2451
|
-
Query for the states of a task run
|
2452
|
-
|
2453
|
-
Args:
|
2454
|
-
task_run_id: the id of the task run
|
2455
|
-
|
2456
|
-
Returns:
|
2457
|
-
a list of State model representations of the task run states
|
2458
|
-
"""
|
2459
|
-
response = await self._client.get(
|
2460
|
-
"/task_run_states/", params=dict(task_run_id=str(task_run_id))
|
2461
|
-
)
|
2462
|
-
return pydantic.TypeAdapter(list[prefect.states.State]).validate_python(
|
2463
|
-
response.json()
|
2464
|
-
)
|
2465
|
-
|
2466
|
-
async def create_logs(
|
2467
|
-
self, logs: Iterable[Union[LogCreate, dict[str, Any]]]
|
2468
|
-
) -> None:
|
2469
|
-
"""
|
2470
|
-
Create logs for a flow or task run
|
2471
|
-
|
2472
|
-
Args:
|
2473
|
-
logs: An iterable of `LogCreate` objects or already json-compatible dicts
|
2474
|
-
"""
|
2475
|
-
serialized_logs = [
|
2476
|
-
log.model_dump(mode="json") if isinstance(log, LogCreate) else log
|
2477
|
-
for log in logs
|
2478
|
-
]
|
2479
|
-
await self._client.post("/logs/", json=serialized_logs)
|
2480
|
-
|
2481
|
-
async def create_flow_run_notification_policy(
|
2482
|
-
self,
|
2483
|
-
block_document_id: UUID,
|
2484
|
-
is_active: bool = True,
|
2485
|
-
tags: Optional[list[str]] = None,
|
2486
|
-
state_names: Optional[list[str]] = None,
|
2487
|
-
message_template: Optional[str] = None,
|
2488
|
-
) -> UUID:
|
2489
|
-
"""
|
2490
|
-
Create a notification policy for flow runs
|
2491
|
-
|
2492
|
-
Args:
|
2493
|
-
block_document_id: The block document UUID
|
2494
|
-
is_active: Whether the notification policy is active
|
2495
|
-
tags: List of flow tags
|
2496
|
-
state_names: List of state names
|
2497
|
-
message_template: Notification message template
|
2498
|
-
"""
|
2499
|
-
if tags is None:
|
2500
|
-
tags = []
|
2501
|
-
if state_names is None:
|
2502
|
-
state_names = []
|
2503
|
-
|
2504
|
-
policy = FlowRunNotificationPolicyCreate(
|
2505
|
-
block_document_id=block_document_id,
|
2506
|
-
is_active=is_active,
|
2507
|
-
tags=tags,
|
2508
|
-
state_names=state_names,
|
2509
|
-
message_template=message_template,
|
2510
|
-
)
|
2511
|
-
response = await self._client.post(
|
2512
|
-
"/flow_run_notification_policies/",
|
2513
|
-
json=policy.model_dump(mode="json"),
|
2514
|
-
)
|
2515
|
-
|
2516
|
-
policy_id = response.json().get("id")
|
2517
|
-
if not policy_id:
|
2518
|
-
raise httpx.RequestError(f"Malformed response: {response}")
|
2519
|
-
|
2520
|
-
return UUID(policy_id)
|
2521
|
-
|
2522
|
-
async def delete_flow_run_notification_policy(
|
2523
|
-
self,
|
2524
|
-
id: UUID,
|
2525
|
-
) -> None:
|
2526
|
-
"""
|
2527
|
-
Delete a flow run notification policy by id.
|
2528
|
-
|
2529
|
-
Args:
|
2530
|
-
id: UUID of the flow run notification policy to delete.
|
2531
|
-
Raises:
|
2532
|
-
prefect.exceptions.ObjectNotFound: If request returns 404
|
2533
|
-
httpx.RequestError: If requests fails
|
2534
|
-
"""
|
2535
|
-
try:
|
2536
|
-
await self._client.delete(f"/flow_run_notification_policies/{id}")
|
2537
|
-
except httpx.HTTPStatusError as e:
|
2538
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
2539
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
2540
|
-
else:
|
2541
|
-
raise
|
2542
|
-
|
2543
|
-
async def update_flow_run_notification_policy(
|
2544
|
-
self,
|
2545
|
-
id: UUID,
|
2546
|
-
block_document_id: Optional[UUID] = None,
|
2547
|
-
is_active: Optional[bool] = None,
|
2548
|
-
tags: Optional[list[str]] = None,
|
2549
|
-
state_names: Optional[list[str]] = None,
|
2550
|
-
message_template: Optional[str] = None,
|
2551
|
-
) -> None:
|
2552
|
-
"""
|
2553
|
-
Update a notification policy for flow runs
|
2554
|
-
|
2555
|
-
Args:
|
2556
|
-
id: UUID of the notification policy
|
2557
|
-
block_document_id: The block document UUID
|
2558
|
-
is_active: Whether the notification policy is active
|
2559
|
-
tags: List of flow tags
|
2560
|
-
state_names: List of state names
|
2561
|
-
message_template: Notification message template
|
2562
|
-
Raises:
|
2563
|
-
prefect.exceptions.ObjectNotFound: If request returns 404
|
2564
|
-
httpx.RequestError: If requests fails
|
2565
|
-
"""
|
2566
|
-
params: dict[str, Any] = {}
|
2567
|
-
if block_document_id is not None:
|
2568
|
-
params["block_document_id"] = block_document_id
|
2569
|
-
if is_active is not None:
|
2570
|
-
params["is_active"] = is_active
|
2571
|
-
if tags is not None:
|
2572
|
-
params["tags"] = tags
|
2573
|
-
if state_names is not None:
|
2574
|
-
params["state_names"] = state_names
|
2575
|
-
if message_template is not None:
|
2576
|
-
params["message_template"] = message_template
|
2577
|
-
|
2578
|
-
policy = FlowRunNotificationPolicyUpdate(**params)
|
2579
|
-
|
2580
|
-
try:
|
2581
|
-
await self._client.patch(
|
2582
|
-
f"/flow_run_notification_policies/{id}",
|
2583
|
-
json=policy.model_dump(mode="json", exclude_unset=True),
|
2584
|
-
)
|
2585
|
-
except httpx.HTTPStatusError as e:
|
2586
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
2587
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
2588
|
-
else:
|
2589
|
-
raise
|
2590
|
-
|
2591
|
-
async def read_flow_run_notification_policies(
|
2592
|
-
self,
|
2593
|
-
flow_run_notification_policy_filter: FlowRunNotificationPolicyFilter,
|
2594
|
-
limit: Optional[int] = None,
|
2595
|
-
offset: int = 0,
|
2596
|
-
) -> list[FlowRunNotificationPolicy]:
|
2597
|
-
"""
|
2598
|
-
Query the Prefect API for flow run notification policies. Only policies matching all criteria will
|
2599
|
-
be returned.
|
2600
|
-
|
2601
|
-
Args:
|
2602
|
-
flow_run_notification_policy_filter: filter criteria for notification policies
|
2603
|
-
limit: a limit for the notification policies query
|
2604
|
-
offset: an offset for the notification policies query
|
2605
|
-
|
2606
|
-
Returns:
|
2607
|
-
a list of FlowRunNotificationPolicy model representations
|
2608
|
-
of the notification policies
|
2609
|
-
"""
|
2610
|
-
body: dict[str, Any] = {
|
2611
|
-
"flow_run_notification_policy_filter": (
|
2612
|
-
flow_run_notification_policy_filter.model_dump(mode="json")
|
2613
|
-
if flow_run_notification_policy_filter
|
2614
|
-
else None
|
2615
|
-
),
|
2616
|
-
"limit": limit,
|
2617
|
-
"offset": offset,
|
2618
|
-
}
|
2619
|
-
response = await self._client.post(
|
2620
|
-
"/flow_run_notification_policies/filter", json=body
|
2621
|
-
)
|
2622
|
-
return pydantic.TypeAdapter(list[FlowRunNotificationPolicy]).validate_python(
|
2623
|
-
response.json()
|
2624
|
-
)
|
2625
|
-
|
2626
|
-
async def read_logs(
|
2627
|
-
self,
|
2628
|
-
log_filter: Optional[LogFilter] = None,
|
2629
|
-
limit: Optional[int] = None,
|
2630
|
-
offset: Optional[int] = None,
|
2631
|
-
sort: LogSort = LogSort.TIMESTAMP_ASC,
|
2632
|
-
) -> list[Log]:
|
2633
|
-
"""
|
2634
|
-
Read flow and task run logs.
|
2635
|
-
"""
|
2636
|
-
body: dict[str, Any] = {
|
2637
|
-
"logs": log_filter.model_dump(mode="json") if log_filter else None,
|
2638
|
-
"limit": limit,
|
2639
|
-
"offset": offset,
|
2640
|
-
"sort": sort,
|
2641
|
-
}
|
2642
|
-
|
2643
|
-
response = await self._client.post("/logs/filter", json=body)
|
2644
|
-
return pydantic.TypeAdapter(list[Log]).validate_python(response.json())
|
2645
|
-
|
2646
|
-
async def send_worker_heartbeat(
|
2647
|
-
self,
|
2648
|
-
work_pool_name: str,
|
2649
|
-
worker_name: str,
|
2650
|
-
heartbeat_interval_seconds: Optional[float] = None,
|
2651
|
-
get_worker_id: bool = False,
|
2652
|
-
worker_metadata: Optional[WorkerMetadata] = None,
|
2653
|
-
) -> Optional[UUID]:
|
2654
|
-
"""
|
2655
|
-
Sends a worker heartbeat for a given work pool.
|
2656
|
-
|
2657
|
-
Args:
|
2658
|
-
work_pool_name: The name of the work pool to heartbeat against.
|
2659
|
-
worker_name: The name of the worker sending the heartbeat.
|
2660
|
-
return_id: Whether to return the worker ID. Note: will return `None` if the connected server does not support returning worker IDs, even if `return_id` is `True`.
|
2661
|
-
worker_metadata: Metadata about the worker to send to the server.
|
2662
|
-
"""
|
2663
|
-
params: dict[str, Any] = {
|
2664
|
-
"name": worker_name,
|
2665
|
-
"heartbeat_interval_seconds": heartbeat_interval_seconds,
|
2666
|
-
}
|
2667
|
-
if worker_metadata:
|
2668
|
-
params["metadata"] = worker_metadata.model_dump(mode="json")
|
2669
|
-
if get_worker_id:
|
2670
|
-
params["return_id"] = get_worker_id
|
2671
|
-
|
2672
|
-
resp = await self._client.post(
|
2673
|
-
f"/work_pools/{work_pool_name}/workers/heartbeat",
|
2674
|
-
json=params,
|
2675
|
-
)
|
2676
|
-
|
2677
|
-
if (
|
2678
|
-
(
|
2679
|
-
self.server_type == ServerType.CLOUD
|
2680
|
-
or get_current_settings().testing.test_mode
|
2681
|
-
)
|
2682
|
-
and get_worker_id
|
2683
|
-
and resp.status_code == 200
|
2684
|
-
):
|
2685
|
-
return UUID(resp.text)
|
2686
|
-
else:
|
2687
|
-
return None
|
2688
|
-
|
2689
|
-
async def read_workers_for_work_pool(
|
2690
|
-
self,
|
2691
|
-
work_pool_name: str,
|
2692
|
-
worker_filter: Optional[WorkerFilter] = None,
|
2693
|
-
offset: Optional[int] = None,
|
2694
|
-
limit: Optional[int] = None,
|
2695
|
-
) -> list[Worker]:
|
2696
|
-
"""
|
2697
|
-
Reads workers for a given work pool.
|
2698
|
-
|
2699
|
-
Args:
|
2700
|
-
work_pool_name: The name of the work pool for which to get
|
2701
|
-
member workers.
|
2702
|
-
worker_filter: Criteria by which to filter workers.
|
2703
|
-
limit: Limit for the worker query.
|
2704
|
-
offset: Limit for the worker query.
|
2705
|
-
"""
|
2706
|
-
response = await self._client.post(
|
2707
|
-
f"/work_pools/{work_pool_name}/workers/filter",
|
2708
|
-
json={
|
2709
|
-
"workers": (
|
2710
|
-
worker_filter.model_dump(mode="json", exclude_unset=True)
|
2711
|
-
if worker_filter
|
2712
|
-
else None
|
2713
|
-
),
|
2714
|
-
"offset": offset,
|
2715
|
-
"limit": limit,
|
2716
|
-
},
|
2717
|
-
)
|
2718
|
-
|
2719
|
-
return pydantic.TypeAdapter(list[Worker]).validate_python(response.json())
|
2720
|
-
|
2721
|
-
async def read_work_pool(self, work_pool_name: str) -> WorkPool:
|
2722
|
-
"""
|
2723
|
-
Reads information for a given work pool
|
2724
|
-
|
2725
|
-
Args:
|
2726
|
-
work_pool_name: The name of the work pool to for which to get
|
2727
|
-
information.
|
2728
|
-
|
2729
|
-
Returns:
|
2730
|
-
Information about the requested work pool.
|
2731
|
-
"""
|
2732
|
-
try:
|
2733
|
-
response = await self._client.get(f"/work_pools/{work_pool_name}")
|
2734
|
-
return WorkPool.model_validate(response.json())
|
2735
|
-
except httpx.HTTPStatusError as e:
|
2736
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
2737
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
2738
|
-
else:
|
2739
|
-
raise
|
2740
|
-
|
2741
|
-
async def read_work_pools(
|
2742
|
-
self,
|
2743
|
-
limit: Optional[int] = None,
|
2744
|
-
offset: int = 0,
|
2745
|
-
work_pool_filter: Optional[WorkPoolFilter] = None,
|
2746
|
-
) -> list[WorkPool]:
|
2747
|
-
"""
|
2748
|
-
Reads work pools.
|
2749
|
-
|
2750
|
-
Args:
|
2751
|
-
limit: Limit for the work pool query.
|
2752
|
-
offset: Offset for the work pool query.
|
2753
|
-
work_pool_filter: Criteria by which to filter work pools.
|
2754
|
-
|
2755
|
-
Returns:
|
2756
|
-
A list of work pools.
|
2757
|
-
"""
|
2758
|
-
|
2759
|
-
body: dict[str, Any] = {
|
2760
|
-
"limit": limit,
|
2761
|
-
"offset": offset,
|
2762
|
-
"work_pools": (
|
2763
|
-
work_pool_filter.model_dump(mode="json") if work_pool_filter else None
|
2764
|
-
),
|
2765
|
-
}
|
2766
|
-
response = await self._client.post("/work_pools/filter", json=body)
|
2767
|
-
return pydantic.TypeAdapter(list[WorkPool]).validate_python(response.json())
|
2768
|
-
|
2769
|
-
async def create_work_pool(
|
2770
|
-
self,
|
2771
|
-
work_pool: WorkPoolCreate,
|
2772
|
-
overwrite: bool = False,
|
2773
|
-
) -> WorkPool:
|
2774
|
-
"""
|
2775
|
-
Creates a work pool with the provided configuration.
|
2776
|
-
|
2777
|
-
Args:
|
2778
|
-
work_pool: Desired configuration for the new work pool.
|
2779
|
-
|
2780
|
-
Returns:
|
2781
|
-
Information about the newly created work pool.
|
2782
|
-
"""
|
2783
|
-
try:
|
2784
|
-
response = await self._client.post(
|
2785
|
-
"/work_pools/",
|
2786
|
-
json=work_pool.model_dump(mode="json", exclude_unset=True),
|
2787
|
-
)
|
2788
|
-
except httpx.HTTPStatusError as e:
|
2789
|
-
if e.response.status_code == status.HTTP_409_CONFLICT:
|
2790
|
-
if overwrite:
|
2791
|
-
existing_work_pool = await self.read_work_pool(
|
2792
|
-
work_pool_name=work_pool.name
|
2793
|
-
)
|
2794
|
-
if existing_work_pool.type != work_pool.type:
|
2795
|
-
warnings.warn(
|
2796
|
-
"Overwriting work pool type is not supported. Ignoring provided type.",
|
2797
|
-
category=UserWarning,
|
2798
|
-
)
|
2799
|
-
await self.update_work_pool(
|
2800
|
-
work_pool_name=work_pool.name,
|
2801
|
-
work_pool=WorkPoolUpdate.model_validate(
|
2802
|
-
work_pool.model_dump(exclude={"name", "type"})
|
2803
|
-
),
|
2804
|
-
)
|
2805
|
-
response = await self._client.get(f"/work_pools/{work_pool.name}")
|
2806
|
-
else:
|
2807
|
-
raise prefect.exceptions.ObjectAlreadyExists(http_exc=e) from e
|
2808
|
-
else:
|
2809
|
-
raise
|
2810
|
-
|
2811
|
-
return WorkPool.model_validate(response.json())
|
2812
|
-
|
2813
|
-
async def update_work_pool(
|
2814
|
-
self,
|
2815
|
-
work_pool_name: str,
|
2816
|
-
work_pool: WorkPoolUpdate,
|
2817
|
-
) -> None:
|
2818
|
-
"""
|
2819
|
-
Updates a work pool.
|
2820
|
-
|
2821
|
-
Args:
|
2822
|
-
work_pool_name: Name of the work pool to update.
|
2823
|
-
work_pool: Fields to update in the work pool.
|
2824
|
-
"""
|
2825
|
-
try:
|
2826
|
-
await self._client.patch(
|
2827
|
-
f"/work_pools/{work_pool_name}",
|
2828
|
-
json=work_pool.model_dump(mode="json", exclude_unset=True),
|
2829
|
-
)
|
2830
|
-
except httpx.HTTPStatusError as e:
|
2831
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
2832
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
2833
|
-
else:
|
2834
|
-
raise
|
2835
|
-
|
2836
|
-
async def delete_work_pool(
|
2837
|
-
self,
|
2838
|
-
work_pool_name: str,
|
2839
|
-
) -> None:
|
2840
|
-
"""
|
2841
|
-
Deletes a work pool.
|
2842
|
-
|
2843
|
-
Args:
|
2844
|
-
work_pool_name: Name of the work pool to delete.
|
2845
|
-
"""
|
2846
|
-
try:
|
2847
|
-
await self._client.delete(f"/work_pools/{work_pool_name}")
|
2848
|
-
except httpx.HTTPStatusError as e:
|
2849
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
2850
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
2851
|
-
else:
|
2852
|
-
raise
|
2853
|
-
|
2854
|
-
async def read_work_queues(
|
2855
|
-
self,
|
2856
|
-
work_pool_name: Optional[str] = None,
|
2857
|
-
work_queue_filter: Optional[WorkQueueFilter] = None,
|
2858
|
-
limit: Optional[int] = None,
|
2859
|
-
offset: Optional[int] = None,
|
2860
|
-
) -> list[WorkQueue]:
|
2861
|
-
"""
|
2862
|
-
Retrieves queues for a work pool.
|
2863
|
-
|
2864
|
-
Args:
|
2865
|
-
work_pool_name: Name of the work pool for which to get queues.
|
2866
|
-
work_queue_filter: Criteria by which to filter queues.
|
2867
|
-
limit: Limit for the queue query.
|
2868
|
-
offset: Limit for the queue query.
|
2869
|
-
|
2870
|
-
Returns:
|
2871
|
-
List of queues for the specified work pool.
|
2872
|
-
"""
|
2873
|
-
json: dict[str, Any] = {
|
2874
|
-
"work_queues": (
|
2875
|
-
work_queue_filter.model_dump(mode="json", exclude_unset=True)
|
2876
|
-
if work_queue_filter
|
2877
|
-
else None
|
2878
|
-
),
|
2879
|
-
"limit": limit,
|
2880
|
-
"offset": offset,
|
2881
|
-
}
|
2882
|
-
|
2883
|
-
if work_pool_name:
|
2884
|
-
try:
|
2885
|
-
response = await self._client.post(
|
2886
|
-
f"/work_pools/{work_pool_name}/queues/filter",
|
2887
|
-
json=json,
|
2888
|
-
)
|
2889
|
-
except httpx.HTTPStatusError as e:
|
2890
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
2891
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
2892
|
-
else:
|
2893
|
-
raise
|
2894
|
-
else:
|
2895
|
-
response = await self._client.post("/work_queues/filter", json=json)
|
2896
|
-
|
2897
|
-
return pydantic.TypeAdapter(list[WorkQueue]).validate_python(response.json())
|
2898
|
-
|
2899
|
-
async def get_scheduled_flow_runs_for_deployments(
|
2900
|
-
self,
|
2901
|
-
deployment_ids: list[UUID],
|
2902
|
-
scheduled_before: Optional[datetime.datetime] = None,
|
2903
|
-
limit: Optional[int] = None,
|
2904
|
-
) -> list[FlowRunResponse]:
|
2905
|
-
body: dict[str, Any] = dict(deployment_ids=[str(id) for id in deployment_ids])
|
2906
|
-
if scheduled_before:
|
2907
|
-
body["scheduled_before"] = str(scheduled_before)
|
2908
|
-
if limit:
|
2909
|
-
body["limit"] = limit
|
2910
|
-
|
2911
|
-
response = await self._client.post(
|
2912
|
-
"/deployments/get_scheduled_flow_runs",
|
2913
|
-
json=body,
|
2914
|
-
)
|
2915
|
-
|
2916
|
-
return pydantic.TypeAdapter(list[FlowRunResponse]).validate_python(
|
2917
|
-
response.json()
|
2918
|
-
)
|
2919
|
-
|
2920
|
-
async def get_scheduled_flow_runs_for_work_pool(
|
2921
|
-
self,
|
2922
|
-
work_pool_name: str,
|
2923
|
-
work_queue_names: Optional[list[str]] = None,
|
2924
|
-
scheduled_before: Optional[datetime.datetime] = None,
|
2925
|
-
) -> list[WorkerFlowRunResponse]:
|
2926
|
-
"""
|
2927
|
-
Retrieves scheduled flow runs for the provided set of work pool queues.
|
2928
|
-
|
2929
|
-
Args:
|
2930
|
-
work_pool_name: The name of the work pool that the work pool
|
2931
|
-
queues are associated with.
|
2932
|
-
work_queue_names: The names of the work pool queues from which
|
2933
|
-
to get scheduled flow runs.
|
2934
|
-
scheduled_before: Datetime used to filter returned flow runs. Flow runs
|
2935
|
-
scheduled for after the given datetime string will not be returned.
|
2936
|
-
|
2937
|
-
Returns:
|
2938
|
-
A list of worker flow run responses containing information about the
|
2939
|
-
retrieved flow runs.
|
2940
|
-
"""
|
2941
|
-
body: dict[str, Any] = {}
|
2942
|
-
if work_queue_names is not None:
|
2943
|
-
body["work_queue_names"] = list(work_queue_names)
|
2944
|
-
if scheduled_before:
|
2945
|
-
body["scheduled_before"] = str(scheduled_before)
|
2946
|
-
|
2947
|
-
response = await self._client.post(
|
2948
|
-
f"/work_pools/{work_pool_name}/get_scheduled_flow_runs",
|
2949
|
-
json=body,
|
2950
|
-
)
|
2951
|
-
return pydantic.TypeAdapter(list[WorkerFlowRunResponse]).validate_python(
|
2952
|
-
response.json()
|
2953
|
-
)
|
2954
|
-
|
2955
|
-
async def create_artifact(
|
2956
|
-
self,
|
2957
|
-
artifact: ArtifactCreate,
|
2958
|
-
) -> Artifact:
|
2959
|
-
"""
|
2960
|
-
Creates an artifact with the provided configuration.
|
2961
|
-
|
2962
|
-
Args:
|
2963
|
-
artifact: Desired configuration for the new artifact.
|
2964
|
-
Returns:
|
2965
|
-
Information about the newly created artifact.
|
2966
|
-
"""
|
2967
|
-
|
2968
|
-
response = await self._client.post(
|
2969
|
-
"/artifacts/",
|
2970
|
-
json=artifact.model_dump(mode="json", exclude_unset=True),
|
2971
|
-
)
|
2972
|
-
|
2973
|
-
return Artifact.model_validate(response.json())
|
2974
|
-
|
2975
|
-
async def update_artifact(
|
2976
|
-
self,
|
2977
|
-
artifact_id: UUID,
|
2978
|
-
artifact: ArtifactUpdate,
|
2979
|
-
) -> None:
|
2980
|
-
"""
|
2981
|
-
Updates an artifact
|
2982
|
-
|
2983
|
-
Args:
|
2984
|
-
artifact: Desired values for the updated artifact.
|
2985
|
-
Returns:
|
2986
|
-
Information about the updated artifact.
|
2987
|
-
"""
|
2988
|
-
|
2989
|
-
await self._client.patch(
|
2990
|
-
f"/artifacts/{artifact_id}",
|
2991
|
-
json=artifact.model_dump(mode="json", exclude_unset=True),
|
2992
|
-
)
|
2993
|
-
|
2994
|
-
async def read_artifacts(
|
2995
|
-
self,
|
2996
|
-
*,
|
2997
|
-
artifact_filter: Optional[ArtifactFilter] = None,
|
2998
|
-
flow_run_filter: Optional[FlowRunFilter] = None,
|
2999
|
-
task_run_filter: Optional[TaskRunFilter] = None,
|
3000
|
-
sort: Optional[ArtifactSort] = None,
|
3001
|
-
limit: Optional[int] = None,
|
3002
|
-
offset: int = 0,
|
3003
|
-
) -> list[Artifact]:
|
3004
|
-
"""
|
3005
|
-
Query the Prefect API for artifacts. Only artifacts matching all criteria will
|
3006
|
-
be returned.
|
3007
|
-
Args:
|
3008
|
-
artifact_filter: filter criteria for artifacts
|
3009
|
-
flow_run_filter: filter criteria for flow runs
|
3010
|
-
task_run_filter: filter criteria for task runs
|
3011
|
-
sort: sort criteria for the artifacts
|
3012
|
-
limit: limit for the artifact query
|
3013
|
-
offset: offset for the artifact query
|
3014
|
-
Returns:
|
3015
|
-
a list of Artifact model representations of the artifacts
|
3016
|
-
"""
|
3017
|
-
body: dict[str, Any] = {
|
3018
|
-
"artifacts": (
|
3019
|
-
artifact_filter.model_dump(mode="json") if artifact_filter else None
|
3020
|
-
),
|
3021
|
-
"flow_runs": (
|
3022
|
-
flow_run_filter.model_dump(mode="json") if flow_run_filter else None
|
3023
|
-
),
|
3024
|
-
"task_runs": (
|
3025
|
-
task_run_filter.model_dump(mode="json") if task_run_filter else None
|
3026
|
-
),
|
3027
|
-
"sort": sort,
|
3028
|
-
"limit": limit,
|
3029
|
-
"offset": offset,
|
3030
|
-
}
|
3031
|
-
response = await self._client.post("/artifacts/filter", json=body)
|
3032
|
-
return pydantic.TypeAdapter(list[Artifact]).validate_python(response.json())
|
3033
|
-
|
3034
|
-
async def read_latest_artifacts(
|
3035
|
-
self,
|
3036
|
-
*,
|
3037
|
-
artifact_filter: Optional[ArtifactCollectionFilter] = None,
|
3038
|
-
flow_run_filter: Optional[FlowRunFilter] = None,
|
3039
|
-
task_run_filter: Optional[TaskRunFilter] = None,
|
3040
|
-
sort: Optional[ArtifactCollectionSort] = None,
|
3041
|
-
limit: Optional[int] = None,
|
3042
|
-
offset: int = 0,
|
3043
|
-
) -> list[ArtifactCollection]:
|
3044
|
-
"""
|
3045
|
-
Query the Prefect API for artifacts. Only artifacts matching all criteria will
|
3046
|
-
be returned.
|
3047
|
-
Args:
|
3048
|
-
artifact_filter: filter criteria for artifacts
|
3049
|
-
flow_run_filter: filter criteria for flow runs
|
3050
|
-
task_run_filter: filter criteria for task runs
|
3051
|
-
sort: sort criteria for the artifacts
|
3052
|
-
limit: limit for the artifact query
|
3053
|
-
offset: offset for the artifact query
|
3054
|
-
Returns:
|
3055
|
-
a list of Artifact model representations of the artifacts
|
3056
|
-
"""
|
3057
|
-
body: dict[str, Any] = {
|
3058
|
-
"artifacts": (
|
3059
|
-
artifact_filter.model_dump(mode="json") if artifact_filter else None
|
3060
|
-
),
|
3061
|
-
"flow_runs": (
|
3062
|
-
flow_run_filter.model_dump(mode="json") if flow_run_filter else None
|
3063
|
-
),
|
3064
|
-
"task_runs": (
|
3065
|
-
task_run_filter.model_dump(mode="json") if task_run_filter else None
|
3066
|
-
),
|
3067
|
-
"sort": sort,
|
3068
|
-
"limit": limit,
|
3069
|
-
"offset": offset,
|
3070
|
-
}
|
3071
|
-
response = await self._client.post("/artifacts/latest/filter", json=body)
|
3072
|
-
return pydantic.TypeAdapter(list[ArtifactCollection]).validate_python(
|
3073
|
-
response.json()
|
3074
|
-
)
|
3075
|
-
|
3076
|
-
async def delete_artifact(self, artifact_id: UUID) -> None:
|
3077
|
-
"""
|
3078
|
-
Deletes an artifact with the provided id.
|
3079
|
-
|
3080
|
-
Args:
|
3081
|
-
artifact_id: The id of the artifact to delete.
|
3082
|
-
"""
|
3083
|
-
try:
|
3084
|
-
await self._client.delete(f"/artifacts/{artifact_id}")
|
3085
|
-
except httpx.HTTPStatusError as e:
|
3086
|
-
if e.response.status_code == 404:
|
3087
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
3088
|
-
else:
|
3089
|
-
raise
|
3090
|
-
|
3091
|
-
async def create_variable(self, variable: VariableCreate) -> Variable:
|
3092
|
-
"""
|
3093
|
-
Creates an variable with the provided configuration.
|
3094
|
-
|
3095
|
-
Args:
|
3096
|
-
variable: Desired configuration for the new variable.
|
3097
|
-
Returns:
|
3098
|
-
Information about the newly created variable.
|
3099
|
-
"""
|
3100
|
-
response = await self._client.post(
|
3101
|
-
"/variables/",
|
3102
|
-
json=variable.model_dump(mode="json", exclude_unset=True),
|
3103
|
-
)
|
3104
|
-
return Variable(**response.json())
|
3105
|
-
|
3106
|
-
async def update_variable(self, variable: VariableUpdate) -> None:
|
3107
|
-
"""
|
3108
|
-
Updates a variable with the provided configuration.
|
3109
|
-
|
3110
|
-
Args:
|
3111
|
-
variable: Desired configuration for the updated variable.
|
3112
|
-
Returns:
|
3113
|
-
Information about the updated variable.
|
3114
|
-
"""
|
3115
|
-
await self._client.patch(
|
3116
|
-
f"/variables/name/{variable.name}",
|
3117
|
-
json=variable.model_dump(mode="json", exclude_unset=True),
|
3118
|
-
)
|
3119
|
-
|
3120
|
-
async def read_variable_by_name(self, name: str) -> Optional[Variable]:
|
3121
|
-
"""Reads a variable by name. Returns None if no variable is found."""
|
3122
|
-
try:
|
3123
|
-
response = await self._client.get(f"/variables/name/{name}")
|
3124
|
-
return Variable(**response.json())
|
3125
|
-
except httpx.HTTPStatusError as e:
|
3126
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
3127
|
-
return None
|
3128
|
-
else:
|
3129
|
-
raise
|
3130
|
-
|
3131
|
-
async def delete_variable_by_name(self, name: str) -> None:
|
3132
|
-
"""Deletes a variable by name."""
|
3133
|
-
try:
|
3134
|
-
await self._client.delete(f"/variables/name/{name}")
|
3135
|
-
except httpx.HTTPStatusError as e:
|
3136
|
-
if e.response.status_code == 404:
|
3137
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
3138
|
-
else:
|
3139
|
-
raise
|
3140
|
-
|
3141
|
-
async def read_variables(self, limit: Optional[int] = None) -> list[Variable]:
|
3142
|
-
"""Reads all variables."""
|
3143
|
-
response = await self._client.post("/variables/filter", json={"limit": limit})
|
3144
|
-
return pydantic.TypeAdapter(list[Variable]).validate_python(response.json())
|
3145
|
-
|
3146
|
-
async def read_worker_metadata(self) -> dict[str, Any]:
|
3147
|
-
"""Reads worker metadata stored in Prefect collection registry."""
|
3148
|
-
response = await self._client.get("collections/views/aggregate-worker-metadata")
|
3149
|
-
response.raise_for_status()
|
3150
|
-
return response.json()
|
3151
|
-
|
3152
|
-
async def increment_concurrency_slots(
|
3153
|
-
self,
|
3154
|
-
names: list[str],
|
3155
|
-
slots: int,
|
3156
|
-
mode: str,
|
3157
|
-
create_if_missing: Optional[bool] = None,
|
3158
|
-
) -> httpx.Response:
|
3159
|
-
return await self._client.post(
|
3160
|
-
"/v2/concurrency_limits/increment",
|
3161
|
-
json={
|
3162
|
-
"names": names,
|
3163
|
-
"slots": slots,
|
3164
|
-
"mode": mode,
|
3165
|
-
"create_if_missing": create_if_missing if create_if_missing else False,
|
3166
|
-
},
|
3167
|
-
)
|
3168
|
-
|
3169
|
-
async def release_concurrency_slots(
|
3170
|
-
self, names: list[str], slots: int, occupancy_seconds: float
|
3171
|
-
) -> httpx.Response:
|
3172
|
-
"""
|
3173
|
-
Release concurrency slots for the specified limits.
|
3174
|
-
|
3175
|
-
Args:
|
3176
|
-
names (List[str]): A list of limit names for which to release slots.
|
3177
|
-
slots (int): The number of concurrency slots to release.
|
3178
|
-
occupancy_seconds (float): The duration in seconds that the slots
|
3179
|
-
were occupied.
|
3180
|
-
|
3181
|
-
Returns:
|
3182
|
-
httpx.Response: The HTTP response from the server.
|
3183
|
-
"""
|
3184
|
-
|
3185
|
-
return await self._client.post(
|
3186
|
-
"/v2/concurrency_limits/decrement",
|
3187
|
-
json={
|
3188
|
-
"names": names,
|
3189
|
-
"slots": slots,
|
3190
|
-
"occupancy_seconds": occupancy_seconds,
|
3191
|
-
},
|
3192
|
-
)
|
3193
|
-
|
3194
|
-
async def create_global_concurrency_limit(
|
3195
|
-
self, concurrency_limit: GlobalConcurrencyLimitCreate
|
3196
|
-
) -> UUID:
|
3197
|
-
response = await self._client.post(
|
3198
|
-
"/v2/concurrency_limits/",
|
3199
|
-
json=concurrency_limit.model_dump(mode="json", exclude_unset=True),
|
3200
|
-
)
|
3201
|
-
return UUID(response.json()["id"])
|
3202
|
-
|
3203
|
-
async def update_global_concurrency_limit(
|
3204
|
-
self, name: str, concurrency_limit: GlobalConcurrencyLimitUpdate
|
3205
|
-
) -> httpx.Response:
|
3206
|
-
try:
|
3207
|
-
response = await self._client.patch(
|
3208
|
-
f"/v2/concurrency_limits/{name}",
|
3209
|
-
json=concurrency_limit.model_dump(mode="json", exclude_unset=True),
|
3210
|
-
)
|
3211
|
-
return response
|
3212
|
-
except httpx.HTTPStatusError as e:
|
3213
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
3214
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
3215
|
-
else:
|
3216
|
-
raise
|
3217
|
-
|
3218
|
-
async def delete_global_concurrency_limit_by_name(
|
3219
|
-
self, name: str
|
3220
|
-
) -> httpx.Response:
|
3221
|
-
try:
|
3222
|
-
response = await self._client.delete(f"/v2/concurrency_limits/{name}")
|
3223
|
-
return response
|
3224
|
-
except httpx.HTTPStatusError as e:
|
3225
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
3226
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
3227
|
-
else:
|
3228
|
-
raise
|
3229
|
-
|
3230
|
-
async def read_global_concurrency_limit_by_name(
|
3231
|
-
self, name: str
|
3232
|
-
) -> GlobalConcurrencyLimitResponse:
|
3233
|
-
try:
|
3234
|
-
response = await self._client.get(f"/v2/concurrency_limits/{name}")
|
3235
|
-
return GlobalConcurrencyLimitResponse.model_validate(response.json())
|
3236
|
-
except httpx.HTTPStatusError as e:
|
3237
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
3238
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
3239
|
-
else:
|
3240
|
-
raise
|
3241
|
-
|
3242
|
-
async def upsert_global_concurrency_limit_by_name(
|
3243
|
-
self, name: str, limit: int
|
3244
|
-
) -> None:
|
3245
|
-
"""Creates a global concurrency limit with the given name and limit if one does not already exist.
|
3246
|
-
|
3247
|
-
If one does already exist matching the name then update it's limit if it is different.
|
3248
|
-
|
3249
|
-
Note: This is not done atomically.
|
3250
|
-
"""
|
3251
|
-
try:
|
3252
|
-
existing_limit = await self.read_global_concurrency_limit_by_name(name)
|
3253
|
-
except prefect.exceptions.ObjectNotFound:
|
3254
|
-
existing_limit = None
|
3255
|
-
|
3256
|
-
if not existing_limit:
|
3257
|
-
await self.create_global_concurrency_limit(
|
3258
|
-
GlobalConcurrencyLimitCreate(
|
3259
|
-
name=name,
|
3260
|
-
limit=limit,
|
3261
|
-
)
|
3262
|
-
)
|
3263
|
-
elif existing_limit.limit != limit:
|
3264
|
-
await self.update_global_concurrency_limit(
|
3265
|
-
name, GlobalConcurrencyLimitUpdate(limit=limit)
|
3266
|
-
)
|
3267
|
-
|
3268
|
-
async def read_global_concurrency_limits(
|
3269
|
-
self, limit: int = 10, offset: int = 0
|
3270
|
-
) -> list[GlobalConcurrencyLimitResponse]:
|
3271
|
-
response = await self._client.post(
|
3272
|
-
"/v2/concurrency_limits/filter",
|
3273
|
-
json={
|
3274
|
-
"limit": limit,
|
3275
|
-
"offset": offset,
|
3276
|
-
},
|
3277
|
-
)
|
3278
|
-
return pydantic.TypeAdapter(
|
3279
|
-
list[GlobalConcurrencyLimitResponse]
|
3280
|
-
).validate_python(response.json())
|
3281
|
-
|
3282
|
-
async def create_flow_run_input(
|
3283
|
-
self, flow_run_id: UUID, key: str, value: str, sender: Optional[str] = None
|
3284
|
-
) -> None:
|
3285
|
-
"""
|
3286
|
-
Creates a flow run input.
|
3287
|
-
|
3288
|
-
Args:
|
3289
|
-
flow_run_id: The flow run id.
|
3290
|
-
key: The input key.
|
3291
|
-
value: The input value.
|
3292
|
-
sender: The sender of the input.
|
3293
|
-
"""
|
3294
|
-
|
3295
|
-
# Initialize the input to ensure that the key is valid.
|
3296
|
-
FlowRunInput(flow_run_id=flow_run_id, key=key, value=value)
|
3297
|
-
|
3298
|
-
response = await self._client.post(
|
3299
|
-
f"/flow_runs/{flow_run_id}/input",
|
3300
|
-
json={"key": key, "value": value, "sender": sender},
|
3301
|
-
)
|
3302
|
-
response.raise_for_status()
|
3303
|
-
|
3304
|
-
async def filter_flow_run_input(
|
3305
|
-
self, flow_run_id: UUID, key_prefix: str, limit: int, exclude_keys: set[str]
|
3306
|
-
) -> list[FlowRunInput]:
|
3307
|
-
response = await self._client.post(
|
3308
|
-
f"/flow_runs/{flow_run_id}/input/filter",
|
3309
|
-
json={
|
3310
|
-
"prefix": key_prefix,
|
3311
|
-
"limit": limit,
|
3312
|
-
"exclude_keys": list(exclude_keys),
|
3313
|
-
},
|
3314
|
-
)
|
3315
|
-
response.raise_for_status()
|
3316
|
-
return pydantic.TypeAdapter(list[FlowRunInput]).validate_python(response.json())
|
3317
|
-
|
3318
|
-
async def read_flow_run_input(self, flow_run_id: UUID, key: str) -> str:
|
3319
|
-
"""
|
3320
|
-
Reads a flow run input.
|
3321
|
-
|
3322
|
-
Args:
|
3323
|
-
flow_run_id: The flow run id.
|
3324
|
-
key: The input key.
|
3325
|
-
"""
|
3326
|
-
response = await self._client.get(f"/flow_runs/{flow_run_id}/input/{key}")
|
3327
|
-
response.raise_for_status()
|
3328
|
-
return response.content.decode()
|
3329
|
-
|
3330
|
-
async def delete_flow_run_input(self, flow_run_id: UUID, key: str) -> None:
|
3331
|
-
"""
|
3332
|
-
Deletes a flow run input.
|
3333
|
-
|
3334
|
-
Args:
|
3335
|
-
flow_run_id: The flow run id.
|
3336
|
-
key: The input key.
|
3337
|
-
"""
|
3338
|
-
response = await self._client.delete(f"/flow_runs/{flow_run_id}/input/{key}")
|
3339
|
-
response.raise_for_status()
|
3340
|
-
|
3341
|
-
async def create_automation(self, automation: AutomationCore) -> UUID:
|
3342
|
-
"""Creates an automation in Prefect Cloud."""
|
3343
|
-
response = await self._client.post(
|
3344
|
-
"/automations/",
|
3345
|
-
json=automation.model_dump(mode="json"),
|
3346
|
-
)
|
3347
|
-
|
3348
|
-
return UUID(response.json()["id"])
|
3349
|
-
|
3350
|
-
async def update_automation(
|
3351
|
-
self, automation_id: UUID, automation: AutomationCore
|
3352
|
-
) -> None:
|
3353
|
-
"""Updates an automation in Prefect Cloud."""
|
3354
|
-
response = await self._client.put(
|
3355
|
-
f"/automations/{automation_id}",
|
3356
|
-
json=automation.model_dump(mode="json", exclude_unset=True),
|
3357
|
-
)
|
3358
|
-
response.raise_for_status
|
3359
|
-
|
3360
|
-
async def read_automations(self) -> list[Automation]:
|
3361
|
-
response = await self._client.post("/automations/filter")
|
3362
|
-
response.raise_for_status()
|
3363
|
-
return pydantic.TypeAdapter(list[Automation]).validate_python(response.json())
|
3364
|
-
|
3365
|
-
async def find_automation(
|
3366
|
-
self, id_or_name: Union[str, UUID]
|
3367
|
-
) -> Optional[Automation]:
|
3368
|
-
if isinstance(id_or_name, str):
|
3369
|
-
name = id_or_name
|
3370
|
-
try:
|
3371
|
-
id = UUID(id_or_name)
|
3372
|
-
except ValueError:
|
3373
|
-
id = None
|
3374
|
-
else:
|
3375
|
-
id = id_or_name
|
3376
|
-
name = str(id)
|
3377
|
-
|
3378
|
-
if id:
|
3379
|
-
try:
|
3380
|
-
automation = await self.read_automation(id)
|
3381
|
-
return automation
|
3382
|
-
except prefect.exceptions.HTTPStatusError as e:
|
3383
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
3384
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
3385
|
-
|
3386
|
-
automations = await self.read_automations()
|
3387
|
-
|
3388
|
-
# Look for it by an exact name
|
3389
|
-
for automation in automations:
|
3390
|
-
if automation.name == name:
|
3391
|
-
return automation
|
3392
|
-
|
3393
|
-
# Look for it by a case-insensitive name
|
3394
|
-
for automation in automations:
|
3395
|
-
if automation.name.lower() == name.lower():
|
3396
|
-
return automation
|
3397
|
-
|
3398
|
-
return None
|
3399
|
-
|
3400
|
-
async def read_automation(
|
3401
|
-
self, automation_id: Union[UUID, str]
|
3402
|
-
) -> Optional[Automation]:
|
3403
|
-
response = await self._client.get(f"/automations/{automation_id}")
|
3404
|
-
if response.status_code == 404:
|
3405
|
-
return None
|
3406
|
-
response.raise_for_status()
|
3407
|
-
return Automation.model_validate(response.json())
|
3408
|
-
|
3409
|
-
async def read_automations_by_name(self, name: str) -> list[Automation]:
|
3410
|
-
"""
|
3411
|
-
Query the Prefect API for an automation by name. Only automations matching the provided name will be returned.
|
3412
|
-
|
3413
|
-
Args:
|
3414
|
-
name: the name of the automation to query
|
3415
|
-
|
3416
|
-
Returns:
|
3417
|
-
a list of Automation model representations of the automations
|
3418
|
-
"""
|
3419
|
-
automation_filter = filters.AutomationFilter(
|
3420
|
-
name=filters.AutomationFilterName(any_=[name])
|
3421
|
-
)
|
3422
|
-
|
3423
|
-
response = await self._client.post(
|
3424
|
-
"/automations/filter",
|
3425
|
-
json={
|
3426
|
-
"sort": sorting.AutomationSort.UPDATED_DESC,
|
3427
|
-
"automations": automation_filter.model_dump(mode="json")
|
3428
|
-
if automation_filter
|
3429
|
-
else None,
|
3430
|
-
},
|
3431
|
-
)
|
3432
|
-
|
3433
|
-
response.raise_for_status()
|
3434
|
-
|
3435
|
-
return pydantic.TypeAdapter(list[Automation]).validate_python(response.json())
|
3436
|
-
|
3437
|
-
async def pause_automation(self, automation_id: UUID) -> None:
|
3438
|
-
response = await self._client.patch(
|
3439
|
-
f"/automations/{automation_id}", json={"enabled": False}
|
3440
|
-
)
|
3441
|
-
response.raise_for_status()
|
3442
|
-
|
3443
|
-
async def resume_automation(self, automation_id: UUID) -> None:
|
3444
|
-
response = await self._client.patch(
|
3445
|
-
f"/automations/{automation_id}", json={"enabled": True}
|
3446
|
-
)
|
3447
|
-
response.raise_for_status()
|
3448
|
-
|
3449
|
-
async def delete_automation(self, automation_id: UUID) -> None:
|
3450
|
-
response = await self._client.delete(f"/automations/{automation_id}")
|
3451
|
-
if response.status_code == 404:
|
3452
|
-
return
|
3453
|
-
|
3454
|
-
response.raise_for_status()
|
3455
|
-
|
3456
|
-
async def read_resource_related_automations(
|
3457
|
-
self, resource_id: str
|
3458
|
-
) -> list[Automation]:
|
3459
|
-
response = await self._client.get(f"/automations/related-to/{resource_id}")
|
3460
|
-
response.raise_for_status()
|
3461
|
-
return pydantic.TypeAdapter(list[Automation]).validate_python(response.json())
|
3462
|
-
|
3463
|
-
async def delete_resource_owned_automations(self, resource_id: str) -> None:
|
3464
|
-
await self._client.delete(f"/automations/owned-by/{resource_id}")
|
3465
|
-
|
3466
|
-
async def api_version(self) -> str:
|
3467
|
-
res = await self._client.get("/admin/version")
|
3468
|
-
return res.json()
|
3469
|
-
|
3470
|
-
def client_version(self) -> str:
|
3471
|
-
return prefect.__version__
|
3472
|
-
|
3473
|
-
async def raise_for_api_version_mismatch(self) -> None:
|
3474
|
-
# Cloud is always compatible as a server
|
3475
|
-
if self.server_type == ServerType.CLOUD:
|
3476
|
-
return
|
3477
|
-
|
3478
|
-
try:
|
3479
|
-
api_version = await self.api_version()
|
3480
|
-
except Exception as e:
|
3481
|
-
if "Unauthorized" in str(e):
|
3482
|
-
raise e
|
3483
|
-
raise RuntimeError(f"Failed to reach API at {self.api_url}") from e
|
3484
|
-
|
3485
|
-
api_version = version.parse(api_version)
|
3486
|
-
client_version = version.parse(self.client_version())
|
3487
|
-
|
3488
|
-
if api_version.major != client_version.major:
|
3489
|
-
raise RuntimeError(
|
3490
|
-
f"Found incompatible versions: client: {client_version}, server: {api_version}. "
|
3491
|
-
f"Major versions must match."
|
3492
|
-
)
|
3493
|
-
|
3494
|
-
async def update_flow_run_labels(
|
3495
|
-
self, flow_run_id: UUID, labels: KeyValueLabelsField
|
3496
|
-
) -> None:
|
3497
|
-
"""
|
3498
|
-
Updates the labels of a flow run.
|
3499
|
-
"""
|
3500
|
-
|
3501
|
-
response = await self._client.patch(
|
3502
|
-
f"/flow_runs/{flow_run_id}/labels", json=labels
|
3503
|
-
)
|
3504
|
-
response.raise_for_status()
|
3505
|
-
|
3506
|
-
async def __aenter__(self) -> Self:
|
3507
|
-
"""
|
3508
|
-
Start the client.
|
3509
|
-
|
3510
|
-
If the client is already started, this will raise an exception.
|
3511
|
-
|
3512
|
-
If the client is already closed, this will raise an exception. Use a new client
|
3513
|
-
instance instead.
|
3514
|
-
"""
|
3515
|
-
if self._closed:
|
3516
|
-
# httpx.AsyncClient does not allow reuse so we will not either.
|
3517
|
-
raise RuntimeError(
|
3518
|
-
"The client cannot be started again after closing. "
|
3519
|
-
"Retrieve a new client with `get_client()` instead."
|
3520
|
-
)
|
3521
|
-
|
3522
|
-
self._context_stack += 1
|
3523
|
-
|
3524
|
-
if self._started:
|
3525
|
-
# allow reentrancy
|
3526
|
-
return self
|
3527
|
-
|
3528
|
-
self._loop = asyncio.get_running_loop()
|
3529
|
-
await self._exit_stack.__aenter__()
|
3530
|
-
|
3531
|
-
# Enter a lifespan context if using an ephemeral application.
|
3532
|
-
# See https://github.com/encode/httpx/issues/350
|
3533
|
-
if self._ephemeral_app and self.manage_lifespan:
|
3534
|
-
self._ephemeral_lifespan = await self._exit_stack.enter_async_context(
|
3535
|
-
app_lifespan_context(self._ephemeral_app)
|
3536
|
-
)
|
3537
|
-
|
3538
|
-
if self._ephemeral_app:
|
3539
|
-
self.logger.debug(
|
3540
|
-
"Using ephemeral application with database at "
|
3541
|
-
f"{PREFECT_API_DATABASE_CONNECTION_URL.value()}"
|
3542
|
-
)
|
3543
|
-
else:
|
3544
|
-
self.logger.debug(f"Connecting to API at {self.api_url}")
|
3545
|
-
|
3546
|
-
# Enter the httpx client's context
|
3547
|
-
await self._exit_stack.enter_async_context(self._client)
|
3548
|
-
|
3549
|
-
self._started = True
|
3550
|
-
|
3551
|
-
return self
|
3552
|
-
|
3553
|
-
async def __aexit__(self, *exc_info: Any) -> Optional[bool]:
|
3554
|
-
"""
|
3555
|
-
Shutdown the client.
|
3556
|
-
"""
|
3557
|
-
|
3558
|
-
self._context_stack -= 1
|
3559
|
-
if self._context_stack > 0:
|
3560
|
-
return
|
3561
|
-
self._closed = True
|
3562
|
-
return await self._exit_stack.__aexit__(*exc_info)
|
3563
|
-
|
3564
|
-
def __enter__(self) -> NoReturn:
|
3565
|
-
raise RuntimeError(
|
3566
|
-
"The `PrefectClient` must be entered with an async context. Use 'async "
|
3567
|
-
"with PrefectClient(...)' not 'with PrefectClient(...)'"
|
3568
|
-
)
|
3569
|
-
|
3570
|
-
def __exit__(self, *_: object) -> NoReturn:
|
3571
|
-
assert False, "This should never be called but must be defined for __enter__"
|
3572
|
-
|
3573
|
-
|
3574
|
-
class SyncPrefectClient:
|
3575
|
-
"""
|
3576
|
-
A synchronous client for interacting with the [Prefect REST API](/api-ref/rest-api/).
|
3577
|
-
|
3578
|
-
Args:
|
3579
|
-
api: the REST API URL or FastAPI application to connect to
|
3580
|
-
api_key: An optional API key for authentication.
|
3581
|
-
api_version: The API version this client is compatible with.
|
3582
|
-
httpx_settings: An optional dictionary of settings to pass to the underlying
|
3583
|
-
`httpx.Client`
|
3584
|
-
|
3585
|
-
Examples:
|
3586
|
-
|
3587
|
-
Say hello to a Prefect REST API
|
3588
|
-
|
3589
|
-
<div class="terminal">
|
3590
|
-
```
|
3591
|
-
>>> with get_client(sync_client=True) as client:
|
3592
|
-
>>> response = client.hello()
|
3593
|
-
>>>
|
3594
|
-
>>> print(response.json())
|
3595
|
-
👋
|
3596
|
-
```
|
3597
|
-
</div>
|
3598
|
-
"""
|
3599
|
-
|
3600
|
-
def __init__(
|
3601
|
-
self,
|
3602
|
-
api: Union[str, ASGIApp],
|
3603
|
-
*,
|
3604
|
-
auth_string: Optional[str] = None,
|
3605
|
-
api_key: Optional[str] = None,
|
3606
|
-
api_version: Optional[str] = None,
|
3607
|
-
httpx_settings: Optional[dict[str, Any]] = None,
|
3608
|
-
server_type: Optional[ServerType] = None,
|
3609
|
-
) -> None:
|
3610
|
-
httpx_settings = httpx_settings.copy() if httpx_settings else {}
|
3611
|
-
httpx_settings.setdefault("headers", {})
|
3612
|
-
|
3613
|
-
if PREFECT_API_TLS_INSECURE_SKIP_VERIFY:
|
3614
|
-
# Create an unverified context for insecure connections
|
3615
|
-
ctx = ssl.create_default_context()
|
3616
|
-
ctx.check_hostname = False
|
3617
|
-
ctx.verify_mode = ssl.CERT_NONE
|
3618
|
-
httpx_settings.setdefault("verify", ctx)
|
3619
|
-
else:
|
3620
|
-
cert_file = PREFECT_API_SSL_CERT_FILE.value()
|
3621
|
-
if not cert_file:
|
3622
|
-
cert_file = certifi.where()
|
3623
|
-
# Create a verified context with the certificate file
|
3624
|
-
ctx = ssl.create_default_context(cafile=cert_file)
|
3625
|
-
httpx_settings.setdefault("verify", ctx)
|
3626
|
-
|
3627
|
-
if api_version is None:
|
3628
|
-
api_version = SERVER_API_VERSION
|
3629
|
-
httpx_settings["headers"].setdefault("X-PREFECT-API-VERSION", api_version)
|
3630
|
-
if api_key:
|
3631
|
-
httpx_settings["headers"].setdefault("Authorization", f"Bearer {api_key}")
|
3632
|
-
|
3633
|
-
if auth_string:
|
3634
|
-
token = base64.b64encode(auth_string.encode("utf-8")).decode("utf-8")
|
3635
|
-
httpx_settings["headers"].setdefault("Authorization", f"Basic {token}")
|
3636
|
-
|
3637
|
-
# Context management
|
3638
|
-
self._context_stack: int = 0
|
3639
|
-
self._ephemeral_app: Optional[ASGIApp] = None
|
3640
|
-
self.manage_lifespan = True
|
3641
|
-
self.server_type: ServerType
|
3642
|
-
|
3643
|
-
self._closed = False
|
3644
|
-
self._started = False
|
3645
|
-
|
3646
|
-
# Connect to an external application
|
3647
|
-
if isinstance(api, str):
|
3648
|
-
if httpx_settings.get("app"):
|
3649
|
-
raise ValueError(
|
3650
|
-
"Invalid httpx settings: `app` cannot be set when providing an "
|
3651
|
-
"api url. `app` is only for use with ephemeral instances. Provide "
|
3652
|
-
"it as the `api` parameter instead."
|
3653
|
-
)
|
3654
|
-
httpx_settings.setdefault("base_url", api)
|
3655
|
-
|
3656
|
-
# See https://www.python-httpx.org/advanced/#pool-limit-configuration
|
3657
|
-
httpx_settings.setdefault(
|
3658
|
-
"limits",
|
3659
|
-
httpx.Limits(
|
3660
|
-
# We see instability when allowing the client to open many connections at once.
|
3661
|
-
# Limiting concurrency results in more stable performance.
|
3662
|
-
max_connections=16,
|
3663
|
-
max_keepalive_connections=8,
|
3664
|
-
# The Prefect Cloud LB will keep connections alive for 30s.
|
3665
|
-
# Only allow the client to keep connections alive for 25s.
|
3666
|
-
keepalive_expiry=25,
|
3667
|
-
),
|
3668
|
-
)
|
3669
|
-
|
3670
|
-
# See https://www.python-httpx.org/http2/
|
3671
|
-
# Enabling HTTP/2 support on the client does not necessarily mean that your requests
|
3672
|
-
# and responses will be transported over HTTP/2, since both the client and the server
|
3673
|
-
# need to support HTTP/2. If you connect to a server that only supports HTTP/1.1 the
|
3674
|
-
# client will use a standard HTTP/1.1 connection instead.
|
3675
|
-
httpx_settings.setdefault("http2", PREFECT_API_ENABLE_HTTP2.value())
|
3676
|
-
|
3677
|
-
if server_type:
|
3678
|
-
self.server_type = server_type
|
3679
|
-
else:
|
3680
|
-
self.server_type = (
|
3681
|
-
ServerType.CLOUD
|
3682
|
-
if api.startswith(PREFECT_CLOUD_API_URL.value())
|
3683
|
-
else ServerType.SERVER
|
3684
|
-
)
|
3685
|
-
|
3686
|
-
# Connect to an in-process application
|
3687
|
-
else:
|
3688
|
-
self._ephemeral_app = api
|
3689
|
-
self.server_type = ServerType.EPHEMERAL
|
3690
|
-
|
3691
|
-
# See https://www.python-httpx.org/advanced/#timeout-configuration
|
3692
|
-
httpx_settings.setdefault(
|
3693
|
-
"timeout",
|
3694
|
-
httpx.Timeout(
|
3695
|
-
connect=PREFECT_API_REQUEST_TIMEOUT.value(),
|
3696
|
-
read=PREFECT_API_REQUEST_TIMEOUT.value(),
|
3697
|
-
write=PREFECT_API_REQUEST_TIMEOUT.value(),
|
3698
|
-
pool=PREFECT_API_REQUEST_TIMEOUT.value(),
|
3699
|
-
),
|
3700
|
-
)
|
3701
|
-
|
3702
|
-
if not PREFECT_TESTING_UNIT_TEST_MODE:
|
3703
|
-
httpx_settings.setdefault("follow_redirects", True)
|
3704
|
-
|
3705
|
-
enable_csrf_support = (
|
3706
|
-
self.server_type != ServerType.CLOUD
|
3707
|
-
and PREFECT_CLIENT_CSRF_SUPPORT_ENABLED.value()
|
3708
|
-
)
|
3709
|
-
|
3710
|
-
self._client = PrefectHttpxSyncClient(
|
3711
|
-
**httpx_settings, enable_csrf_support=enable_csrf_support
|
3712
|
-
)
|
3713
|
-
|
3714
|
-
# See https://www.python-httpx.org/advanced/#custom-transports
|
3715
|
-
#
|
3716
|
-
# If we're using an HTTP/S client (not the ephemeral client), adjust the
|
3717
|
-
# transport to add retries _after_ it is instantiated. If we alter the transport
|
3718
|
-
# before instantiation, the transport will not be aware of proxies unless we
|
3719
|
-
# reproduce all of the logic to make it so.
|
3720
|
-
#
|
3721
|
-
# Only alter the transport to set our default of 3 retries, don't modify any
|
3722
|
-
# transport a user may have provided via httpx_settings.
|
3723
|
-
#
|
3724
|
-
# Making liberal use of getattr and isinstance checks here to avoid any
|
3725
|
-
# surprises if the internals of httpx or httpcore change on us
|
3726
|
-
if isinstance(api, str) and not httpx_settings.get("transport"):
|
3727
|
-
transport_for_url = getattr(self._client, "_transport_for_url", None)
|
3728
|
-
if callable(transport_for_url):
|
3729
|
-
server_transport = transport_for_url(httpx.URL(api))
|
3730
|
-
if isinstance(server_transport, httpx.HTTPTransport):
|
3731
|
-
pool = getattr(server_transport, "_pool", None)
|
3732
|
-
if isinstance(pool, httpcore.ConnectionPool):
|
3733
|
-
setattr(pool, "_retries", 3)
|
3734
|
-
|
3735
|
-
self.logger: Logger = get_logger("client")
|
3736
|
-
|
3737
|
-
@property
|
3738
|
-
def api_url(self) -> httpx.URL:
|
3739
|
-
"""
|
3740
|
-
Get the base URL for the API.
|
3741
|
-
"""
|
3742
|
-
return self._client.base_url
|
3743
|
-
|
3744
|
-
# Context management ----------------------------------------------------------------
|
3745
|
-
|
3746
|
-
def __enter__(self) -> "SyncPrefectClient":
|
3747
|
-
"""
|
3748
|
-
Start the client.
|
3749
|
-
|
3750
|
-
If the client is already started, this will raise an exception.
|
3751
|
-
|
3752
|
-
If the client is already closed, this will raise an exception. Use a new client
|
3753
|
-
instance instead.
|
3754
|
-
"""
|
3755
|
-
if self._closed:
|
3756
|
-
# httpx.Client does not allow reuse so we will not either.
|
3757
|
-
raise RuntimeError(
|
3758
|
-
"The client cannot be started again after closing. "
|
3759
|
-
"Retrieve a new client with `get_client()` instead."
|
3760
|
-
)
|
3761
|
-
|
3762
|
-
self._context_stack += 1
|
3763
|
-
|
3764
|
-
if self._started:
|
3765
|
-
# allow reentrancy
|
3766
|
-
return self
|
3767
|
-
|
3768
|
-
self._client.__enter__()
|
3769
|
-
self._started = True
|
3770
|
-
|
3771
|
-
return self
|
3772
|
-
|
3773
|
-
def __exit__(self, *exc_info: Any) -> None:
|
3774
|
-
"""
|
3775
|
-
Shutdown the client.
|
3776
|
-
"""
|
3777
|
-
self._context_stack -= 1
|
3778
|
-
if self._context_stack > 0:
|
3779
|
-
return
|
3780
|
-
self._closed = True
|
3781
|
-
self._client.__exit__(*exc_info)
|
3782
|
-
|
3783
|
-
# API methods ----------------------------------------------------------------------
|
3784
|
-
|
3785
|
-
def api_healthcheck(self) -> Optional[Exception]:
|
3786
|
-
"""
|
3787
|
-
Attempts to connect to the API and returns the encountered exception if not
|
3788
|
-
successful.
|
3789
|
-
|
3790
|
-
If successful, returns `None`.
|
3791
|
-
"""
|
3792
|
-
try:
|
3793
|
-
self._client.get("/health")
|
3794
|
-
return None
|
3795
|
-
except Exception as exc:
|
3796
|
-
return exc
|
3797
|
-
|
3798
|
-
def hello(self) -> httpx.Response:
|
3799
|
-
"""
|
3800
|
-
Send a GET request to /hello for testing purposes.
|
3801
|
-
"""
|
3802
|
-
return self._client.get("/hello")
|
3803
|
-
|
3804
|
-
def api_version(self) -> str:
|
3805
|
-
res = self._client.get("/admin/version")
|
3806
|
-
return res.json()
|
3807
|
-
|
3808
|
-
def client_version(self) -> str:
|
3809
|
-
return prefect.__version__
|
3810
|
-
|
3811
|
-
def raise_for_api_version_mismatch(self) -> None:
|
3812
|
-
# Cloud is always compatible as a server
|
3813
|
-
if self.server_type == ServerType.CLOUD:
|
3814
|
-
return
|
3815
|
-
|
3816
|
-
try:
|
3817
|
-
api_version = self.api_version()
|
3818
|
-
except Exception as e:
|
3819
|
-
if "Unauthorized" in str(e):
|
3820
|
-
raise e
|
3821
|
-
raise RuntimeError(f"Failed to reach API at {self.api_url}") from e
|
3822
|
-
|
3823
|
-
api_version = version.parse(api_version)
|
3824
|
-
client_version = version.parse(self.client_version())
|
3825
|
-
|
3826
|
-
if api_version.major != client_version.major:
|
3827
|
-
raise RuntimeError(
|
3828
|
-
f"Found incompatible versions: client: {client_version}, server: {api_version}. "
|
3829
|
-
f"Major versions must match."
|
3830
|
-
)
|
3831
|
-
|
3832
|
-
def create_flow(self, flow: "FlowObject[Any, Any]") -> UUID:
|
3833
|
-
"""
|
3834
|
-
Create a flow in the Prefect API.
|
3835
|
-
|
3836
|
-
Args:
|
3837
|
-
flow: a [Flow][prefect.flows.Flow] object
|
3838
|
-
|
3839
|
-
Raises:
|
3840
|
-
httpx.RequestError: if a flow was not created for any reason
|
3841
|
-
|
3842
|
-
Returns:
|
3843
|
-
the ID of the flow in the backend
|
3844
|
-
"""
|
3845
|
-
return self.create_flow_from_name(flow.name)
|
3846
|
-
|
3847
|
-
def create_flow_from_name(self, flow_name: str) -> UUID:
|
3848
|
-
"""
|
3849
|
-
Create a flow in the Prefect API.
|
3850
|
-
|
3851
|
-
Args:
|
3852
|
-
flow_name: the name of the new flow
|
3853
|
-
|
3854
|
-
Raises:
|
3855
|
-
httpx.RequestError: if a flow was not created for any reason
|
3856
|
-
|
3857
|
-
Returns:
|
3858
|
-
the ID of the flow in the backend
|
3859
|
-
"""
|
3860
|
-
flow_data = FlowCreate(name=flow_name)
|
3861
|
-
response = self._client.post("/flows/", json=flow_data.model_dump(mode="json"))
|
3862
|
-
|
3863
|
-
flow_id = response.json().get("id")
|
3864
|
-
if not flow_id:
|
3865
|
-
raise httpx.RequestError(f"Malformed response: {response}")
|
3866
|
-
|
3867
|
-
# Return the id of the created flow
|
3868
|
-
return UUID(flow_id)
|
3869
|
-
|
3870
|
-
def create_flow_run(
|
3871
|
-
self,
|
3872
|
-
flow: "FlowObject[Any, R]",
|
3873
|
-
name: Optional[str] = None,
|
3874
|
-
parameters: Optional[dict[str, Any]] = None,
|
3875
|
-
context: Optional[dict[str, Any]] = None,
|
3876
|
-
tags: Optional[Iterable[str]] = None,
|
3877
|
-
parent_task_run_id: Optional[UUID] = None,
|
3878
|
-
state: Optional["prefect.states.State[R]"] = None,
|
3879
|
-
) -> FlowRun:
|
3880
|
-
"""
|
3881
|
-
Create a flow run for a flow.
|
3882
|
-
|
3883
|
-
Args:
|
3884
|
-
flow: The flow model to create the flow run for
|
3885
|
-
name: An optional name for the flow run
|
3886
|
-
parameters: Parameter overrides for this flow run.
|
3887
|
-
context: Optional run context data
|
3888
|
-
tags: a list of tags to apply to this flow run
|
3889
|
-
parent_task_run_id: if a subflow run is being created, the placeholder task
|
3890
|
-
run identifier in the parent flow
|
3891
|
-
state: The initial state for the run. If not provided, defaults to
|
3892
|
-
`Scheduled` for now. Should always be a `Scheduled` type.
|
3893
|
-
|
3894
|
-
Raises:
|
3895
|
-
httpx.RequestError: if the Prefect API does not successfully create a run for any reason
|
3896
|
-
|
3897
|
-
Returns:
|
3898
|
-
The flow run model
|
3899
|
-
"""
|
3900
|
-
parameters = parameters or {}
|
3901
|
-
context = context or {}
|
3902
|
-
|
3903
|
-
if state is None:
|
3904
|
-
state = prefect.states.Pending()
|
3905
|
-
|
3906
|
-
# Retrieve the flow id
|
3907
|
-
flow_id = self.create_flow(flow)
|
3908
|
-
|
3909
|
-
flow_run_create = FlowRunCreate(
|
3910
|
-
flow_id=flow_id,
|
3911
|
-
flow_version=flow.version,
|
3912
|
-
name=name,
|
3913
|
-
parameters=parameters,
|
3914
|
-
context=context,
|
3915
|
-
tags=list(tags or []),
|
3916
|
-
parent_task_run_id=parent_task_run_id,
|
3917
|
-
state=state.to_state_create(),
|
3918
|
-
empirical_policy=FlowRunPolicy(
|
3919
|
-
retries=flow.retries,
|
3920
|
-
retry_delay=int(flow.retry_delay_seconds or 0),
|
3921
|
-
),
|
3922
|
-
)
|
3923
|
-
|
3924
|
-
flow_run_create_json = flow_run_create.model_dump(mode="json")
|
3925
|
-
response = self._client.post("/flow_runs/", json=flow_run_create_json)
|
3926
|
-
flow_run = FlowRun.model_validate(response.json())
|
3927
|
-
|
3928
|
-
# Restore the parameters to the local objects to retain expectations about
|
3929
|
-
# Python objects
|
3930
|
-
flow_run.parameters = parameters
|
3931
|
-
|
3932
|
-
return flow_run
|
3933
|
-
|
3934
|
-
def update_flow_run(
|
3935
|
-
self,
|
3936
|
-
flow_run_id: UUID,
|
3937
|
-
flow_version: Optional[str] = None,
|
3938
|
-
parameters: Optional[dict[str, Any]] = None,
|
3939
|
-
name: Optional[str] = None,
|
3940
|
-
tags: Optional[Iterable[str]] = None,
|
3941
|
-
empirical_policy: Optional[FlowRunPolicy] = None,
|
3942
|
-
infrastructure_pid: Optional[str] = None,
|
3943
|
-
job_variables: Optional[dict[str, Any]] = None,
|
3944
|
-
) -> httpx.Response:
|
3945
|
-
"""
|
3946
|
-
Update a flow run's details.
|
3947
|
-
|
3948
|
-
Args:
|
3949
|
-
flow_run_id: The identifier for the flow run to update.
|
3950
|
-
flow_version: A new version string for the flow run.
|
3951
|
-
parameters: A dictionary of parameter values for the flow run. This will not
|
3952
|
-
be merged with any existing parameters.
|
3953
|
-
name: A new name for the flow run.
|
3954
|
-
empirical_policy: A new flow run orchestration policy. This will not be
|
3955
|
-
merged with any existing policy.
|
3956
|
-
tags: An iterable of new tags for the flow run. These will not be merged with
|
3957
|
-
any existing tags.
|
3958
|
-
infrastructure_pid: The id of flow run as returned by an
|
3959
|
-
infrastructure block.
|
3960
|
-
|
3961
|
-
Returns:
|
3962
|
-
an `httpx.Response` object from the PATCH request
|
3963
|
-
"""
|
3964
|
-
params: dict[str, Any] = {}
|
3965
|
-
if flow_version is not None:
|
3966
|
-
params["flow_version"] = flow_version
|
3967
|
-
if parameters is not None:
|
3968
|
-
params["parameters"] = parameters
|
3969
|
-
if name is not None:
|
3970
|
-
params["name"] = name
|
3971
|
-
if tags is not None:
|
3972
|
-
params["tags"] = tags
|
3973
|
-
if empirical_policy is not None:
|
3974
|
-
params["empirical_policy"] = empirical_policy.model_dump(
|
3975
|
-
mode="json", exclude_unset=True
|
3976
|
-
)
|
3977
|
-
if infrastructure_pid:
|
3978
|
-
params["infrastructure_pid"] = infrastructure_pid
|
3979
|
-
if job_variables is not None:
|
3980
|
-
params["job_variables"] = job_variables
|
3981
|
-
|
3982
|
-
flow_run_data = FlowRunUpdate(**params)
|
3983
|
-
|
3984
|
-
return self._client.patch(
|
3985
|
-
f"/flow_runs/{flow_run_id}",
|
3986
|
-
json=flow_run_data.model_dump(mode="json", exclude_unset=True),
|
3987
|
-
)
|
3988
|
-
|
3989
|
-
def read_flow_run(self, flow_run_id: UUID) -> FlowRun:
|
3990
|
-
"""
|
3991
|
-
Query the Prefect API for a flow run by id.
|
3992
|
-
|
3993
|
-
Args:
|
3994
|
-
flow_run_id: the flow run ID of interest
|
3995
|
-
|
3996
|
-
Returns:
|
3997
|
-
a Flow Run model representation of the flow run
|
3998
|
-
"""
|
3999
|
-
try:
|
4000
|
-
response = self._client.get(f"/flow_runs/{flow_run_id}")
|
4001
|
-
except httpx.HTTPStatusError as e:
|
4002
|
-
if e.response.status_code == 404:
|
4003
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
4004
|
-
else:
|
4005
|
-
raise
|
4006
|
-
return FlowRun.model_validate(response.json())
|
4007
|
-
|
4008
|
-
def read_flow_runs(
|
4009
|
-
self,
|
4010
|
-
*,
|
4011
|
-
flow_filter: Optional[FlowFilter] = None,
|
4012
|
-
flow_run_filter: Optional[FlowRunFilter] = None,
|
4013
|
-
task_run_filter: Optional[TaskRunFilter] = None,
|
4014
|
-
deployment_filter: Optional[DeploymentFilter] = None,
|
4015
|
-
work_pool_filter: Optional[WorkPoolFilter] = None,
|
4016
|
-
work_queue_filter: Optional[WorkQueueFilter] = None,
|
4017
|
-
sort: Optional[FlowRunSort] = None,
|
4018
|
-
limit: Optional[int] = None,
|
4019
|
-
offset: int = 0,
|
4020
|
-
) -> list[FlowRun]:
|
4021
|
-
"""
|
4022
|
-
Query the Prefect API for flow runs. Only flow runs matching all criteria will
|
4023
|
-
be returned.
|
4024
|
-
|
4025
|
-
Args:
|
4026
|
-
flow_filter: filter criteria for flows
|
4027
|
-
flow_run_filter: filter criteria for flow runs
|
4028
|
-
task_run_filter: filter criteria for task runs
|
4029
|
-
deployment_filter: filter criteria for deployments
|
4030
|
-
work_pool_filter: filter criteria for work pools
|
4031
|
-
work_queue_filter: filter criteria for work pool queues
|
4032
|
-
sort: sort criteria for the flow runs
|
4033
|
-
limit: limit for the flow run query
|
4034
|
-
offset: offset for the flow run query
|
4035
|
-
|
4036
|
-
Returns:
|
4037
|
-
a list of Flow Run model representations
|
4038
|
-
of the flow runs
|
4039
|
-
"""
|
4040
|
-
body: dict[str, Any] = {
|
4041
|
-
"flows": flow_filter.model_dump(mode="json") if flow_filter else None,
|
4042
|
-
"flow_runs": (
|
4043
|
-
flow_run_filter.model_dump(mode="json", exclude_unset=True)
|
4044
|
-
if flow_run_filter
|
4045
|
-
else None
|
4046
|
-
),
|
4047
|
-
"task_runs": (
|
4048
|
-
task_run_filter.model_dump(mode="json") if task_run_filter else None
|
4049
|
-
),
|
4050
|
-
"deployments": (
|
4051
|
-
deployment_filter.model_dump(mode="json") if deployment_filter else None
|
4052
|
-
),
|
4053
|
-
"work_pools": (
|
4054
|
-
work_pool_filter.model_dump(mode="json") if work_pool_filter else None
|
4055
|
-
),
|
4056
|
-
"work_pool_queues": (
|
4057
|
-
work_queue_filter.model_dump(mode="json") if work_queue_filter else None
|
4058
|
-
),
|
4059
|
-
"sort": sort,
|
4060
|
-
"limit": limit,
|
4061
|
-
"offset": offset,
|
4062
|
-
}
|
4063
|
-
|
4064
|
-
response = self._client.post("/flow_runs/filter", json=body)
|
4065
|
-
return pydantic.TypeAdapter(list[FlowRun]).validate_python(response.json())
|
4066
|
-
|
4067
|
-
def set_flow_run_state(
|
4068
|
-
self,
|
4069
|
-
flow_run_id: UUID,
|
4070
|
-
state: "prefect.states.State[T]",
|
4071
|
-
force: bool = False,
|
4072
|
-
) -> OrchestrationResult[T]:
|
4073
|
-
"""
|
4074
|
-
Set the state of a flow run.
|
4075
|
-
|
4076
|
-
Args:
|
4077
|
-
flow_run_id: the id of the flow run
|
4078
|
-
state: the state to set
|
4079
|
-
force: if True, disregard orchestration logic when setting the state,
|
4080
|
-
forcing the Prefect API to accept the state
|
4081
|
-
|
4082
|
-
Returns:
|
4083
|
-
an OrchestrationResult model representation of state orchestration output
|
4084
|
-
"""
|
4085
|
-
state_create = state.to_state_create()
|
4086
|
-
state_create.state_details.flow_run_id = flow_run_id
|
4087
|
-
state_create.state_details.transition_id = uuid4()
|
4088
|
-
try:
|
4089
|
-
response = self._client.post(
|
4090
|
-
f"/flow_runs/{flow_run_id}/set_state",
|
4091
|
-
json=dict(
|
4092
|
-
state=state_create.model_dump(mode="json", serialize_as_any=True),
|
4093
|
-
force=force,
|
4094
|
-
),
|
4095
|
-
)
|
4096
|
-
except httpx.HTTPStatusError as e:
|
4097
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
4098
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
4099
|
-
else:
|
4100
|
-
raise
|
4101
|
-
|
4102
|
-
result: OrchestrationResult[T] = OrchestrationResult.model_validate(
|
4103
|
-
response.json()
|
4104
|
-
)
|
4105
|
-
return result
|
4106
|
-
|
4107
|
-
def set_flow_run_name(self, flow_run_id: UUID, name: str) -> httpx.Response:
|
4108
|
-
flow_run_data = FlowRunUpdate(name=name)
|
4109
|
-
return self._client.patch(
|
4110
|
-
f"/flow_runs/{flow_run_id}",
|
4111
|
-
json=flow_run_data.model_dump(mode="json", exclude_unset=True),
|
4112
|
-
)
|
4113
|
-
|
4114
|
-
def set_task_run_name(self, task_run_id: UUID, name: str) -> httpx.Response:
|
4115
|
-
task_run_data = TaskRunUpdate(name=name)
|
4116
|
-
return self._client.patch(
|
4117
|
-
f"/task_runs/{task_run_id}",
|
4118
|
-
json=task_run_data.model_dump(mode="json", exclude_unset=True),
|
4119
|
-
)
|
4120
|
-
|
4121
|
-
def create_task_run(
|
4122
|
-
self,
|
4123
|
-
task: "TaskObject[P, R]",
|
4124
|
-
flow_run_id: Optional[UUID],
|
4125
|
-
dynamic_key: str,
|
4126
|
-
id: Optional[UUID] = None,
|
4127
|
-
name: Optional[str] = None,
|
4128
|
-
extra_tags: Optional[Iterable[str]] = None,
|
4129
|
-
state: Optional[prefect.states.State[R]] = None,
|
4130
|
-
task_inputs: Optional[
|
4131
|
-
dict[
|
4132
|
-
str,
|
4133
|
-
list[
|
4134
|
-
Union[
|
4135
|
-
TaskRunResult,
|
4136
|
-
Parameter,
|
4137
|
-
Constant,
|
4138
|
-
]
|
4139
|
-
],
|
4140
|
-
]
|
4141
|
-
] = None,
|
4142
|
-
) -> TaskRun:
|
4143
|
-
"""
|
4144
|
-
Create a task run
|
4145
|
-
|
4146
|
-
Args:
|
4147
|
-
task: The Task to run
|
4148
|
-
flow_run_id: The flow run id with which to associate the task run
|
4149
|
-
dynamic_key: A key unique to this particular run of a Task within the flow
|
4150
|
-
id: An optional ID for the task run. If not provided, one will be generated
|
4151
|
-
server-side.
|
4152
|
-
name: An optional name for the task run
|
4153
|
-
extra_tags: an optional list of extra tags to apply to the task run in
|
4154
|
-
addition to `task.tags`
|
4155
|
-
state: The initial state for the run. If not provided, defaults to
|
4156
|
-
`Pending` for now. Should always be a `Scheduled` type.
|
4157
|
-
task_inputs: the set of inputs passed to the task
|
4158
|
-
|
4159
|
-
Returns:
|
4160
|
-
The created task run.
|
4161
|
-
"""
|
4162
|
-
tags = set(task.tags).union(extra_tags or [])
|
4163
|
-
|
4164
|
-
if state is None:
|
4165
|
-
state = prefect.states.Pending()
|
4166
|
-
|
4167
|
-
retry_delay = task.retry_delay_seconds
|
4168
|
-
if isinstance(retry_delay, list):
|
4169
|
-
retry_delay = [int(rd) for rd in retry_delay]
|
4170
|
-
elif isinstance(retry_delay, float):
|
4171
|
-
retry_delay = int(retry_delay)
|
4172
|
-
|
4173
|
-
task_run_data = TaskRunCreate(
|
4174
|
-
id=id,
|
4175
|
-
name=name,
|
4176
|
-
flow_run_id=flow_run_id,
|
4177
|
-
task_key=task.task_key,
|
4178
|
-
dynamic_key=dynamic_key,
|
4179
|
-
tags=list(tags),
|
4180
|
-
task_version=task.version,
|
4181
|
-
empirical_policy=TaskRunPolicy(
|
4182
|
-
retries=task.retries,
|
4183
|
-
retry_delay=retry_delay,
|
4184
|
-
retry_jitter_factor=task.retry_jitter_factor,
|
4185
|
-
),
|
4186
|
-
state=state.to_state_create(),
|
4187
|
-
task_inputs=task_inputs or {},
|
4188
|
-
)
|
4189
|
-
|
4190
|
-
content = task_run_data.model_dump_json(exclude={"id"} if id is None else None)
|
4191
|
-
|
4192
|
-
response = self._client.post("/task_runs/", content=content)
|
4193
|
-
return TaskRun.model_validate(response.json())
|
4194
|
-
|
4195
|
-
def read_task_run(self, task_run_id: UUID) -> TaskRun:
|
4196
|
-
"""
|
4197
|
-
Query the Prefect API for a task run by id.
|
4198
|
-
|
4199
|
-
Args:
|
4200
|
-
task_run_id: the task run ID of interest
|
4201
|
-
|
4202
|
-
Returns:
|
4203
|
-
a Task Run model representation of the task run
|
4204
|
-
"""
|
4205
|
-
try:
|
4206
|
-
response = self._client.get(f"/task_runs/{task_run_id}")
|
4207
|
-
return TaskRun.model_validate(response.json())
|
4208
|
-
except httpx.HTTPStatusError as e:
|
4209
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
4210
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
4211
|
-
else:
|
4212
|
-
raise
|
4213
|
-
|
4214
|
-
def read_task_runs(
|
4215
|
-
self,
|
4216
|
-
*,
|
4217
|
-
flow_filter: Optional[FlowFilter] = None,
|
4218
|
-
flow_run_filter: Optional[FlowRunFilter] = None,
|
4219
|
-
task_run_filter: Optional[TaskRunFilter] = None,
|
4220
|
-
deployment_filter: Optional[DeploymentFilter] = None,
|
4221
|
-
sort: Optional[TaskRunSort] = None,
|
4222
|
-
limit: Optional[int] = None,
|
4223
|
-
offset: int = 0,
|
4224
|
-
) -> list[TaskRun]:
|
4225
|
-
"""
|
4226
|
-
Query the Prefect API for task runs. Only task runs matching all criteria will
|
4227
|
-
be returned.
|
4228
|
-
|
4229
|
-
Args:
|
4230
|
-
flow_filter: filter criteria for flows
|
4231
|
-
flow_run_filter: filter criteria for flow runs
|
4232
|
-
task_run_filter: filter criteria for task runs
|
4233
|
-
deployment_filter: filter criteria for deployments
|
4234
|
-
sort: sort criteria for the task runs
|
4235
|
-
limit: a limit for the task run query
|
4236
|
-
offset: an offset for the task run query
|
4237
|
-
|
4238
|
-
Returns:
|
4239
|
-
a list of Task Run model representations
|
4240
|
-
of the task runs
|
4241
|
-
"""
|
4242
|
-
body: dict[str, Any] = {
|
4243
|
-
"flows": flow_filter.model_dump(mode="json") if flow_filter else None,
|
4244
|
-
"flow_runs": (
|
4245
|
-
flow_run_filter.model_dump(mode="json", exclude_unset=True)
|
4246
|
-
if flow_run_filter
|
4247
|
-
else None
|
4248
|
-
),
|
4249
|
-
"task_runs": (
|
4250
|
-
task_run_filter.model_dump(mode="json") if task_run_filter else None
|
4251
|
-
),
|
4252
|
-
"deployments": (
|
4253
|
-
deployment_filter.model_dump(mode="json") if deployment_filter else None
|
4254
|
-
),
|
4255
|
-
"sort": sort,
|
4256
|
-
"limit": limit,
|
4257
|
-
"offset": offset,
|
4258
|
-
}
|
4259
|
-
response = self._client.post("/task_runs/filter", json=body)
|
4260
|
-
return pydantic.TypeAdapter(list[TaskRun]).validate_python(response.json())
|
4261
|
-
|
4262
|
-
def set_task_run_state(
|
4263
|
-
self,
|
4264
|
-
task_run_id: UUID,
|
4265
|
-
state: prefect.states.State[Any],
|
4266
|
-
force: bool = False,
|
4267
|
-
) -> OrchestrationResult[Any]:
|
4268
|
-
"""
|
4269
|
-
Set the state of a task run.
|
4270
|
-
|
4271
|
-
Args:
|
4272
|
-
task_run_id: the id of the task run
|
4273
|
-
state: the state to set
|
4274
|
-
force: if True, disregard orchestration logic when setting the state,
|
4275
|
-
forcing the Prefect API to accept the state
|
4276
|
-
|
4277
|
-
Returns:
|
4278
|
-
an OrchestrationResult model representation of state orchestration output
|
4279
|
-
"""
|
4280
|
-
state_create = state.to_state_create()
|
4281
|
-
state_create.state_details.task_run_id = task_run_id
|
4282
|
-
response = self._client.post(
|
4283
|
-
f"/task_runs/{task_run_id}/set_state",
|
4284
|
-
json=dict(state=state_create.model_dump(mode="json"), force=force),
|
4285
|
-
)
|
4286
|
-
result: OrchestrationResult[Any] = OrchestrationResult.model_validate(
|
4287
|
-
response.json()
|
4288
|
-
)
|
4289
|
-
return result
|
4290
|
-
|
4291
|
-
def read_task_run_states(self, task_run_id: UUID) -> list[prefect.states.State]:
|
4292
|
-
"""
|
4293
|
-
Query for the states of a task run
|
4294
|
-
|
4295
|
-
Args:
|
4296
|
-
task_run_id: the id of the task run
|
4297
|
-
|
4298
|
-
Returns:
|
4299
|
-
a list of State model representations of the task run states
|
4300
|
-
"""
|
4301
|
-
response = self._client.get(
|
4302
|
-
"/task_run_states/", params=dict(task_run_id=str(task_run_id))
|
4303
|
-
)
|
4304
|
-
return pydantic.TypeAdapter(list[prefect.states.State]).validate_python(
|
4305
|
-
response.json()
|
4306
|
-
)
|
4307
|
-
|
4308
|
-
def read_deployment(
|
4309
|
-
self,
|
4310
|
-
deployment_id: UUID,
|
4311
|
-
) -> DeploymentResponse:
|
4312
|
-
"""
|
4313
|
-
Query the Prefect API for a deployment by id.
|
4314
|
-
|
4315
|
-
Args:
|
4316
|
-
deployment_id: the deployment ID of interest
|
4317
|
-
|
4318
|
-
Returns:
|
4319
|
-
a [Deployment model][prefect.client.schemas.objects.Deployment] representation of the deployment
|
4320
|
-
"""
|
4321
|
-
try:
|
4322
|
-
response = self._client.get(f"/deployments/{deployment_id}")
|
4323
|
-
except httpx.HTTPStatusError as e:
|
4324
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
4325
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
4326
|
-
else:
|
4327
|
-
raise
|
4328
|
-
return DeploymentResponse.model_validate(response.json())
|
4329
|
-
|
4330
|
-
def read_deployment_by_name(
|
4331
|
-
self,
|
4332
|
-
name: str,
|
4333
|
-
) -> DeploymentResponse:
|
4334
|
-
"""
|
4335
|
-
Query the Prefect API for a deployment by name.
|
4336
|
-
|
4337
|
-
Args:
|
4338
|
-
name: A deployed flow's name: <FLOW_NAME>/<DEPLOYMENT_NAME>
|
4339
|
-
|
4340
|
-
Raises:
|
4341
|
-
prefect.exceptions.ObjectNotFound: If request returns 404
|
4342
|
-
httpx.RequestError: If request fails
|
4343
|
-
|
4344
|
-
Returns:
|
4345
|
-
a Deployment model representation of the deployment
|
4346
|
-
"""
|
4347
|
-
try:
|
4348
|
-
response = self._client.get(f"/deployments/name/{name}")
|
4349
|
-
except httpx.HTTPStatusError as e:
|
4350
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
4351
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
4352
|
-
else:
|
4353
|
-
raise
|
4354
|
-
|
4355
|
-
return DeploymentResponse.model_validate(response.json())
|
4356
|
-
|
4357
|
-
def create_artifact(
|
4358
|
-
self,
|
4359
|
-
artifact: ArtifactCreate,
|
4360
|
-
) -> Artifact:
|
4361
|
-
"""
|
4362
|
-
Creates an artifact with the provided configuration.
|
4363
|
-
|
4364
|
-
Args:
|
4365
|
-
artifact: Desired configuration for the new artifact.
|
4366
|
-
Returns:
|
4367
|
-
Information about the newly created artifact.
|
4368
|
-
"""
|
4369
|
-
|
4370
|
-
response = self._client.post(
|
4371
|
-
"/artifacts/",
|
4372
|
-
json=artifact.model_dump(mode="json", exclude_unset=True),
|
4373
|
-
)
|
4374
|
-
|
4375
|
-
return Artifact.model_validate(response.json())
|
4376
|
-
|
4377
|
-
def release_concurrency_slots(
|
4378
|
-
self, names: list[str], slots: int, occupancy_seconds: float
|
4379
|
-
) -> httpx.Response:
|
4380
|
-
"""
|
4381
|
-
Release concurrency slots for the specified limits.
|
4382
|
-
|
4383
|
-
Args:
|
4384
|
-
names (List[str]): A list of limit names for which to release slots.
|
4385
|
-
slots (int): The number of concurrency slots to release.
|
4386
|
-
occupancy_seconds (float): The duration in seconds that the slots
|
4387
|
-
were occupied.
|
4388
|
-
|
4389
|
-
Returns:
|
4390
|
-
httpx.Response: The HTTP response from the server.
|
4391
|
-
"""
|
4392
|
-
return self._client.post(
|
4393
|
-
"/v2/concurrency_limits/decrement",
|
4394
|
-
json={
|
4395
|
-
"names": names,
|
4396
|
-
"slots": slots,
|
4397
|
-
"occupancy_seconds": occupancy_seconds,
|
4398
|
-
},
|
4399
|
-
)
|
4400
|
-
|
4401
|
-
def decrement_v1_concurrency_slots(
|
4402
|
-
self, names: list[str], occupancy_seconds: float, task_run_id: UUID
|
4403
|
-
) -> httpx.Response:
|
4404
|
-
"""
|
4405
|
-
Release the specified concurrency limits.
|
4406
|
-
|
4407
|
-
Args:
|
4408
|
-
names (List[str]): A list of limit names to decrement.
|
4409
|
-
occupancy_seconds (float): The duration in seconds that the slots
|
4410
|
-
were held.
|
4411
|
-
task_run_id (UUID): The task run ID that incremented the limits.
|
4412
|
-
|
4413
|
-
Returns:
|
4414
|
-
httpx.Response: The HTTP response from the server.
|
4415
|
-
"""
|
4416
|
-
return self._client.post(
|
4417
|
-
"/concurrency_limits/decrement",
|
4418
|
-
json={
|
4419
|
-
"names": names,
|
4420
|
-
"occupancy_seconds": occupancy_seconds,
|
4421
|
-
"task_run_id": str(task_run_id),
|
4422
|
-
},
|
4423
|
-
)
|
4424
|
-
|
4425
|
-
def update_flow_run_labels(
|
4426
|
-
self, flow_run_id: UUID, labels: KeyValueLabelsField
|
4427
|
-
) -> None:
|
4428
|
-
"""
|
4429
|
-
Updates the labels of a flow run.
|
4430
|
-
"""
|
4431
|
-
response = self._client.patch(
|
4432
|
-
f"/flow_runs/{flow_run_id}/labels",
|
4433
|
-
json=labels,
|
4434
|
-
)
|
4435
|
-
response.raise_for_status()
|
4436
|
-
|
4437
|
-
def read_block_document_by_name(
|
4438
|
-
self,
|
4439
|
-
name: str,
|
4440
|
-
block_type_slug: str,
|
4441
|
-
include_secrets: bool = True,
|
4442
|
-
) -> BlockDocument:
|
4443
|
-
"""
|
4444
|
-
Read the block document with the specified name that corresponds to a
|
4445
|
-
specific block type name.
|
4446
|
-
|
4447
|
-
Args:
|
4448
|
-
name: The block document name.
|
4449
|
-
block_type_slug: The block type slug.
|
4450
|
-
include_secrets (bool): whether to include secret values
|
4451
|
-
on the Block, corresponding to Pydantic's `SecretStr` and
|
4452
|
-
`SecretBytes` fields. These fields are automatically obfuscated
|
4453
|
-
by Pydantic, but users can additionally choose not to receive
|
4454
|
-
their values from the API. Note that any business logic on the
|
4455
|
-
Block may not work if this is `False`.
|
4456
|
-
|
4457
|
-
Raises:
|
4458
|
-
httpx.RequestError: if the block document was not found for any reason
|
4459
|
-
|
4460
|
-
Returns:
|
4461
|
-
A block document or None.
|
4462
|
-
"""
|
4463
|
-
try:
|
4464
|
-
response = self._client.get(
|
4465
|
-
f"/block_types/slug/{block_type_slug}/block_documents/name/{name}",
|
4466
|
-
params=dict(include_secrets=include_secrets),
|
4467
|
-
)
|
4468
|
-
except httpx.HTTPStatusError as e:
|
4469
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
4470
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
4471
|
-
else:
|
4472
|
-
raise
|
4473
|
-
return BlockDocument.model_validate(response.json())
|
4474
|
-
|
4475
|
-
def create_variable(self, variable: VariableCreate) -> Variable:
|
4476
|
-
"""
|
4477
|
-
Creates an variable with the provided configuration.
|
4478
|
-
|
4479
|
-
Args:
|
4480
|
-
variable: Desired configuration for the new variable.
|
4481
|
-
Returns:
|
4482
|
-
Information about the newly created variable.
|
4483
|
-
"""
|
4484
|
-
response = self._client.post(
|
4485
|
-
"/variables/",
|
4486
|
-
json=variable.model_dump(mode="json", exclude_unset=True),
|
4487
|
-
)
|
4488
|
-
return Variable(**response.json())
|
4489
|
-
|
4490
|
-
def update_variable(self, variable: VariableUpdate) -> None:
|
4491
|
-
"""
|
4492
|
-
Updates a variable with the provided configuration.
|
4493
|
-
|
4494
|
-
Args:
|
4495
|
-
variable: Desired configuration for the updated variable.
|
4496
|
-
Returns:
|
4497
|
-
Information about the updated variable.
|
4498
|
-
"""
|
4499
|
-
self._client.patch(
|
4500
|
-
f"/variables/name/{variable.name}",
|
4501
|
-
json=variable.model_dump(mode="json", exclude_unset=True),
|
4502
|
-
)
|
4503
|
-
|
4504
|
-
def read_variable_by_name(self, name: str) -> Optional[Variable]:
|
4505
|
-
"""Reads a variable by name. Returns None if no variable is found."""
|
4506
|
-
try:
|
4507
|
-
response = self._client.get(f"/variables/name/{name}")
|
4508
|
-
return Variable(**response.json())
|
4509
|
-
except httpx.HTTPStatusError as e:
|
4510
|
-
if e.response.status_code == status.HTTP_404_NOT_FOUND:
|
4511
|
-
return None
|
4512
|
-
else:
|
4513
|
-
raise
|
4514
|
-
|
4515
|
-
def delete_variable_by_name(self, name: str) -> None:
|
4516
|
-
"""Deletes a variable by name."""
|
4517
|
-
try:
|
4518
|
-
self._client.delete(f"/variables/name/{name}")
|
4519
|
-
except httpx.HTTPStatusError as e:
|
4520
|
-
if e.response.status_code == 404:
|
4521
|
-
raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
|
4522
|
-
else:
|
4523
|
-
raise
|