prefect-client 3.1.10__py3-none-any.whl → 3.1.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (141) hide show
  1. prefect/_experimental/lineage.py +7 -8
  2. prefect/_experimental/sla/__init__.py +0 -0
  3. prefect/_experimental/sla/client.py +66 -0
  4. prefect/_experimental/sla/objects.py +53 -0
  5. prefect/_internal/_logging.py +15 -3
  6. prefect/_internal/compatibility/async_dispatch.py +22 -16
  7. prefect/_internal/compatibility/deprecated.py +42 -18
  8. prefect/_internal/compatibility/migration.py +2 -2
  9. prefect/_internal/concurrency/inspection.py +12 -14
  10. prefect/_internal/concurrency/primitives.py +2 -2
  11. prefect/_internal/concurrency/services.py +154 -80
  12. prefect/_internal/concurrency/waiters.py +13 -9
  13. prefect/_internal/pydantic/annotations/pendulum.py +7 -7
  14. prefect/_internal/pytz.py +4 -3
  15. prefect/_internal/retries.py +10 -5
  16. prefect/_internal/schemas/bases.py +19 -10
  17. prefect/_internal/schemas/validators.py +227 -388
  18. prefect/_version.py +3 -3
  19. prefect/automations.py +236 -30
  20. prefect/blocks/__init__.py +3 -3
  21. prefect/blocks/abstract.py +53 -30
  22. prefect/blocks/core.py +183 -84
  23. prefect/blocks/notifications.py +133 -73
  24. prefect/blocks/redis.py +13 -9
  25. prefect/blocks/system.py +24 -11
  26. prefect/blocks/webhook.py +7 -5
  27. prefect/cache_policies.py +3 -2
  28. prefect/client/orchestration/__init__.py +1957 -0
  29. prefect/client/orchestration/_artifacts/__init__.py +0 -0
  30. prefect/client/orchestration/_artifacts/client.py +239 -0
  31. prefect/client/orchestration/_automations/__init__.py +0 -0
  32. prefect/client/orchestration/_automations/client.py +329 -0
  33. prefect/client/orchestration/_blocks_documents/__init__.py +0 -0
  34. prefect/client/orchestration/_blocks_documents/client.py +334 -0
  35. prefect/client/orchestration/_blocks_schemas/__init__.py +0 -0
  36. prefect/client/orchestration/_blocks_schemas/client.py +200 -0
  37. prefect/client/orchestration/_blocks_types/__init__.py +0 -0
  38. prefect/client/orchestration/_blocks_types/client.py +380 -0
  39. prefect/client/orchestration/_concurrency_limits/__init__.py +0 -0
  40. prefect/client/orchestration/_concurrency_limits/client.py +762 -0
  41. prefect/client/orchestration/_deployments/__init__.py +0 -0
  42. prefect/client/orchestration/_deployments/client.py +1128 -0
  43. prefect/client/orchestration/_flow_runs/__init__.py +0 -0
  44. prefect/client/orchestration/_flow_runs/client.py +903 -0
  45. prefect/client/orchestration/_flows/__init__.py +0 -0
  46. prefect/client/orchestration/_flows/client.py +343 -0
  47. prefect/client/orchestration/_logs/__init__.py +0 -0
  48. prefect/client/orchestration/_logs/client.py +97 -0
  49. prefect/client/orchestration/_variables/__init__.py +0 -0
  50. prefect/client/orchestration/_variables/client.py +157 -0
  51. prefect/client/orchestration/base.py +46 -0
  52. prefect/client/orchestration/routes.py +145 -0
  53. prefect/client/schemas/__init__.py +68 -28
  54. prefect/client/schemas/actions.py +2 -2
  55. prefect/client/schemas/filters.py +5 -0
  56. prefect/client/schemas/objects.py +8 -15
  57. prefect/client/schemas/schedules.py +22 -10
  58. prefect/concurrency/_asyncio.py +87 -0
  59. prefect/concurrency/{events.py → _events.py} +10 -10
  60. prefect/concurrency/asyncio.py +20 -104
  61. prefect/concurrency/context.py +6 -4
  62. prefect/concurrency/services.py +26 -74
  63. prefect/concurrency/sync.py +23 -44
  64. prefect/concurrency/v1/_asyncio.py +63 -0
  65. prefect/concurrency/v1/{events.py → _events.py} +13 -15
  66. prefect/concurrency/v1/asyncio.py +27 -80
  67. prefect/concurrency/v1/context.py +6 -4
  68. prefect/concurrency/v1/services.py +33 -79
  69. prefect/concurrency/v1/sync.py +18 -37
  70. prefect/context.py +66 -45
  71. prefect/deployments/base.py +10 -144
  72. prefect/deployments/flow_runs.py +12 -2
  73. prefect/deployments/runner.py +53 -4
  74. prefect/deployments/steps/pull.py +13 -0
  75. prefect/engine.py +17 -4
  76. prefect/events/clients.py +7 -1
  77. prefect/events/schemas/events.py +3 -2
  78. prefect/filesystems.py +6 -2
  79. prefect/flow_engine.py +101 -85
  80. prefect/flows.py +10 -1
  81. prefect/input/run_input.py +2 -1
  82. prefect/logging/logging.yml +1 -1
  83. prefect/main.py +1 -3
  84. prefect/results.py +2 -307
  85. prefect/runner/runner.py +4 -2
  86. prefect/runner/storage.py +87 -21
  87. prefect/serializers.py +32 -25
  88. prefect/settings/legacy.py +4 -4
  89. prefect/settings/models/api.py +3 -3
  90. prefect/settings/models/cli.py +3 -3
  91. prefect/settings/models/client.py +5 -3
  92. prefect/settings/models/cloud.py +8 -3
  93. prefect/settings/models/deployments.py +3 -3
  94. prefect/settings/models/experiments.py +4 -7
  95. prefect/settings/models/flows.py +3 -3
  96. prefect/settings/models/internal.py +4 -2
  97. prefect/settings/models/logging.py +4 -3
  98. prefect/settings/models/results.py +3 -3
  99. prefect/settings/models/root.py +3 -2
  100. prefect/settings/models/runner.py +4 -4
  101. prefect/settings/models/server/api.py +3 -3
  102. prefect/settings/models/server/database.py +11 -4
  103. prefect/settings/models/server/deployments.py +6 -2
  104. prefect/settings/models/server/ephemeral.py +4 -2
  105. prefect/settings/models/server/events.py +3 -2
  106. prefect/settings/models/server/flow_run_graph.py +6 -2
  107. prefect/settings/models/server/root.py +3 -3
  108. prefect/settings/models/server/services.py +26 -11
  109. prefect/settings/models/server/tasks.py +6 -3
  110. prefect/settings/models/server/ui.py +3 -3
  111. prefect/settings/models/tasks.py +5 -5
  112. prefect/settings/models/testing.py +3 -3
  113. prefect/settings/models/worker.py +5 -3
  114. prefect/settings/profiles.py +15 -2
  115. prefect/states.py +61 -45
  116. prefect/task_engine.py +54 -75
  117. prefect/task_runners.py +56 -55
  118. prefect/task_worker.py +2 -2
  119. prefect/tasks.py +90 -36
  120. prefect/telemetry/bootstrap.py +10 -9
  121. prefect/telemetry/run_telemetry.py +13 -8
  122. prefect/telemetry/services.py +4 -0
  123. prefect/transactions.py +4 -15
  124. prefect/utilities/_git.py +34 -0
  125. prefect/utilities/asyncutils.py +1 -1
  126. prefect/utilities/engine.py +3 -19
  127. prefect/utilities/generics.py +18 -0
  128. prefect/utilities/templating.py +25 -1
  129. prefect/workers/base.py +6 -3
  130. prefect/workers/process.py +1 -1
  131. {prefect_client-3.1.10.dist-info → prefect_client-3.1.12.dist-info}/METADATA +2 -2
  132. {prefect_client-3.1.10.dist-info → prefect_client-3.1.12.dist-info}/RECORD +135 -109
  133. prefect/client/orchestration.py +0 -4523
  134. prefect/records/__init__.py +0 -1
  135. prefect/records/base.py +0 -235
  136. prefect/records/filesystem.py +0 -213
  137. prefect/records/memory.py +0 -184
  138. prefect/records/result_store.py +0 -70
  139. {prefect_client-3.1.10.dist-info → prefect_client-3.1.12.dist-info}/LICENSE +0 -0
  140. {prefect_client-3.1.10.dist-info → prefect_client-3.1.12.dist-info}/WHEEL +0 -0
  141. {prefect_client-3.1.10.dist-info → prefect_client-3.1.12.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,1957 @@
1
+ import asyncio
2
+ import base64
3
+ import datetime
4
+ import ssl
5
+ import warnings
6
+ from collections.abc import Iterable
7
+ from contextlib import AsyncExitStack
8
+ from logging import Logger
9
+ from typing import TYPE_CHECKING, Any, Literal, NoReturn, Optional, Union, overload
10
+ from uuid import UUID
11
+
12
+ import certifi
13
+ import httpcore
14
+ import httpx
15
+ import pendulum
16
+ import pydantic
17
+ from asgi_lifespan import LifespanManager
18
+ from packaging import version
19
+ from starlette import status
20
+ from typing_extensions import ParamSpec, Self, TypeVar
21
+
22
+ from prefect.client.orchestration._artifacts.client import (
23
+ ArtifactClient,
24
+ ArtifactAsyncClient,
25
+ ArtifactCollectionClient,
26
+ ArtifactCollectionAsyncClient,
27
+ )
28
+
29
+ from prefect.client.orchestration._concurrency_limits.client import (
30
+ ConcurrencyLimitAsyncClient,
31
+ ConcurrencyLimitClient,
32
+ )
33
+
34
+ from prefect.client.orchestration._logs.client import (
35
+ LogClient,
36
+ LogAsyncClient,
37
+ )
38
+ from prefect.client.orchestration._variables.client import (
39
+ VariableClient,
40
+ VariableAsyncClient,
41
+ )
42
+
43
+ from prefect.client.orchestration._deployments.client import (
44
+ DeploymentClient,
45
+ DeploymentAsyncClient,
46
+ )
47
+ from prefect.client.orchestration._automations.client import (
48
+ AutomationClient,
49
+ AutomationAsyncClient,
50
+ )
51
+ from prefect._experimental.sla.client import SlaClient, SlaAsyncClient
52
+
53
+ from prefect.client.orchestration._flows.client import (
54
+ FlowClient,
55
+ FlowAsyncClient,
56
+ )
57
+ from prefect.client.orchestration._flow_runs.client import (
58
+ FlowRunClient,
59
+ FlowRunAsyncClient,
60
+ )
61
+
62
+ from prefect.client.orchestration._blocks_documents.client import (
63
+ BlocksDocumentClient,
64
+ BlocksDocumentAsyncClient,
65
+ )
66
+
67
+ from prefect.client.orchestration._blocks_schemas.client import (
68
+ BlocksSchemaClient,
69
+ BlocksSchemaAsyncClient,
70
+ )
71
+
72
+ from prefect.client.orchestration._blocks_types.client import (
73
+ BlocksTypeClient,
74
+ BlocksTypeAsyncClient,
75
+ )
76
+
77
+ import prefect
78
+ import prefect.exceptions
79
+ import prefect.settings
80
+ import prefect.states
81
+ from prefect.client.constants import SERVER_API_VERSION
82
+ from prefect.client.schemas import FlowRun, OrchestrationResult, TaskRun
83
+ from prefect.client.schemas.actions import (
84
+ FlowRunNotificationPolicyCreate,
85
+ FlowRunNotificationPolicyUpdate,
86
+ TaskRunCreate,
87
+ TaskRunUpdate,
88
+ WorkPoolCreate,
89
+ WorkPoolUpdate,
90
+ WorkQueueCreate,
91
+ WorkQueueUpdate,
92
+ )
93
+ from prefect.client.schemas.filters import (
94
+ DeploymentFilter,
95
+ FlowFilter,
96
+ FlowRunFilter,
97
+ FlowRunNotificationPolicyFilter,
98
+ TaskRunFilter,
99
+ WorkerFilter,
100
+ WorkPoolFilter,
101
+ WorkQueueFilter,
102
+ WorkQueueFilterName,
103
+ )
104
+ from prefect.client.schemas.objects import (
105
+ Constant,
106
+ FlowRunNotificationPolicy,
107
+ Parameter,
108
+ TaskRunPolicy,
109
+ TaskRunResult,
110
+ Worker,
111
+ WorkerMetadata,
112
+ WorkPool,
113
+ WorkQueue,
114
+ WorkQueueStatusDetail,
115
+ )
116
+ from prefect.client.schemas.responses import (
117
+ WorkerFlowRunResponse,
118
+ )
119
+ from prefect.client.schemas.sorting import (
120
+ TaskRunSort,
121
+ )
122
+ from prefect.logging import get_logger
123
+ from prefect.settings import (
124
+ PREFECT_API_AUTH_STRING,
125
+ PREFECT_API_DATABASE_CONNECTION_URL,
126
+ PREFECT_API_ENABLE_HTTP2,
127
+ PREFECT_API_KEY,
128
+ PREFECT_API_REQUEST_TIMEOUT,
129
+ PREFECT_API_SSL_CERT_FILE,
130
+ PREFECT_API_TLS_INSECURE_SKIP_VERIFY,
131
+ PREFECT_API_URL,
132
+ PREFECT_CLIENT_CSRF_SUPPORT_ENABLED,
133
+ PREFECT_CLOUD_API_URL,
134
+ PREFECT_SERVER_ALLOW_EPHEMERAL_MODE,
135
+ PREFECT_TESTING_UNIT_TEST_MODE,
136
+ get_current_settings,
137
+ )
138
+
139
+ if TYPE_CHECKING:
140
+ from prefect.tasks import Task as TaskObject
141
+
142
+ from prefect.client.base import (
143
+ ASGIApp,
144
+ PrefectHttpxAsyncClient,
145
+ PrefectHttpxSyncClient,
146
+ ServerType,
147
+ app_lifespan_context,
148
+ )
149
+
150
+ P = ParamSpec("P")
151
+ R = TypeVar("R", infer_variance=True)
152
+ T = TypeVar("T")
153
+
154
+
155
+ @overload
156
+ def get_client(
157
+ *,
158
+ httpx_settings: Optional[dict[str, Any]] = ...,
159
+ sync_client: Literal[False] = False,
160
+ ) -> "PrefectClient":
161
+ ...
162
+
163
+
164
+ @overload
165
+ def get_client(
166
+ *, httpx_settings: Optional[dict[str, Any]] = ..., sync_client: Literal[True] = ...
167
+ ) -> "SyncPrefectClient":
168
+ ...
169
+
170
+
171
+ def get_client(
172
+ httpx_settings: Optional[dict[str, Any]] = None, sync_client: bool = False
173
+ ) -> Union["SyncPrefectClient", "PrefectClient"]:
174
+ """
175
+ Retrieve a HTTP client for communicating with the Prefect REST API.
176
+
177
+ The client must be context managed; for example:
178
+
179
+ ```python
180
+ async with get_client() as client:
181
+ await client.hello()
182
+ ```
183
+
184
+ To return a synchronous client, pass sync_client=True:
185
+
186
+ ```python
187
+ with get_client(sync_client=True) as client:
188
+ client.hello()
189
+ ```
190
+ """
191
+ import prefect.context
192
+
193
+ # try to load clients from a client context, if possible
194
+ # only load clients that match the provided config / loop
195
+ try:
196
+ loop = asyncio.get_running_loop()
197
+ except RuntimeError:
198
+ loop = None
199
+
200
+ if sync_client:
201
+ if client_ctx := prefect.context.SyncClientContext.get():
202
+ if (
203
+ client_ctx.client
204
+ and getattr(client_ctx, "_httpx_settings", None) == httpx_settings
205
+ ):
206
+ return client_ctx.client
207
+ else:
208
+ if client_ctx := prefect.context.AsyncClientContext.get():
209
+ if (
210
+ client_ctx.client
211
+ and getattr(client_ctx, "_httpx_settings", None) == httpx_settings
212
+ and loop in (getattr(client_ctx.client, "_loop", None), None)
213
+ ):
214
+ return client_ctx.client
215
+
216
+ api: str = PREFECT_API_URL.value()
217
+ server_type = None
218
+
219
+ if not api and PREFECT_SERVER_ALLOW_EPHEMERAL_MODE:
220
+ # create an ephemeral API if none was provided
221
+ from prefect.server.api.server import SubprocessASGIServer
222
+
223
+ server = SubprocessASGIServer()
224
+ server.start()
225
+ assert server.server_process is not None, "Server process did not start"
226
+
227
+ api = server.api_url
228
+ server_type = ServerType.EPHEMERAL
229
+ elif not api and not PREFECT_SERVER_ALLOW_EPHEMERAL_MODE:
230
+ raise ValueError(
231
+ "No Prefect API URL provided. Please set PREFECT_API_URL to the address of a running Prefect server."
232
+ )
233
+
234
+ if sync_client:
235
+ return SyncPrefectClient(
236
+ api,
237
+ auth_string=PREFECT_API_AUTH_STRING.value(),
238
+ api_key=PREFECT_API_KEY.value(),
239
+ httpx_settings=httpx_settings,
240
+ server_type=server_type,
241
+ )
242
+ else:
243
+ return PrefectClient(
244
+ api,
245
+ auth_string=PREFECT_API_AUTH_STRING.value(),
246
+ api_key=PREFECT_API_KEY.value(),
247
+ httpx_settings=httpx_settings,
248
+ server_type=server_type,
249
+ )
250
+
251
+
252
+ class PrefectClient(
253
+ ArtifactAsyncClient,
254
+ ArtifactCollectionAsyncClient,
255
+ LogAsyncClient,
256
+ VariableAsyncClient,
257
+ ConcurrencyLimitAsyncClient,
258
+ DeploymentAsyncClient,
259
+ AutomationAsyncClient,
260
+ SlaAsyncClient,
261
+ FlowRunAsyncClient,
262
+ FlowAsyncClient,
263
+ BlocksDocumentAsyncClient,
264
+ BlocksSchemaAsyncClient,
265
+ BlocksTypeAsyncClient,
266
+ ):
267
+ """
268
+ An asynchronous client for interacting with the [Prefect REST API](/api-ref/rest-api/).
269
+
270
+ Args:
271
+ api: the REST API URL or FastAPI application to connect to
272
+ api_key: An optional API key for authentication.
273
+ api_version: The API version this client is compatible with.
274
+ httpx_settings: An optional dictionary of settings to pass to the underlying
275
+ `httpx.AsyncClient`
276
+
277
+ Examples:
278
+
279
+ Say hello to a Prefect REST API
280
+
281
+ <div class="terminal">
282
+ ```
283
+ >>> async with get_client() as client:
284
+ >>> response = await client.hello()
285
+ >>>
286
+ >>> print(response.json())
287
+ 👋
288
+ ```
289
+ </div>
290
+ """
291
+
292
+ def __init__(
293
+ self,
294
+ api: Union[str, ASGIApp],
295
+ *,
296
+ auth_string: Optional[str] = None,
297
+ api_key: Optional[str] = None,
298
+ api_version: Optional[str] = None,
299
+ httpx_settings: Optional[dict[str, Any]] = None,
300
+ server_type: Optional[ServerType] = None,
301
+ ) -> None:
302
+ httpx_settings = httpx_settings.copy() if httpx_settings else {}
303
+ httpx_settings.setdefault("headers", {})
304
+
305
+ if PREFECT_API_TLS_INSECURE_SKIP_VERIFY:
306
+ # Create an unverified context for insecure connections
307
+ ctx = ssl.create_default_context()
308
+ ctx.check_hostname = False
309
+ ctx.verify_mode = ssl.CERT_NONE
310
+ httpx_settings.setdefault("verify", ctx)
311
+ else:
312
+ cert_file = PREFECT_API_SSL_CERT_FILE.value()
313
+ if not cert_file:
314
+ cert_file = certifi.where()
315
+ # Create a verified context with the certificate file
316
+ ctx = ssl.create_default_context(cafile=cert_file)
317
+ httpx_settings.setdefault("verify", ctx)
318
+
319
+ if api_version is None:
320
+ api_version = SERVER_API_VERSION
321
+ httpx_settings["headers"].setdefault("X-PREFECT-API-VERSION", api_version)
322
+ if api_key:
323
+ httpx_settings["headers"].setdefault("Authorization", f"Bearer {api_key}")
324
+
325
+ if auth_string:
326
+ token = base64.b64encode(auth_string.encode("utf-8")).decode("utf-8")
327
+ httpx_settings["headers"].setdefault("Authorization", f"Basic {token}")
328
+
329
+ # Context management
330
+ self._context_stack: int = 0
331
+ self._exit_stack = AsyncExitStack()
332
+ self._ephemeral_app: Optional[ASGIApp] = None
333
+ self.manage_lifespan = True
334
+ self.server_type: ServerType
335
+
336
+ # Only set if this client started the lifespan of the application
337
+ self._ephemeral_lifespan: Optional[LifespanManager] = None
338
+
339
+ self._closed = False
340
+ self._started = False
341
+
342
+ # Connect to an external application
343
+ if isinstance(api, str):
344
+ if httpx_settings.get("app"):
345
+ raise ValueError(
346
+ "Invalid httpx settings: `app` cannot be set when providing an "
347
+ "api url. `app` is only for use with ephemeral instances. Provide "
348
+ "it as the `api` parameter instead."
349
+ )
350
+ httpx_settings.setdefault("base_url", api)
351
+
352
+ # See https://www.python-httpx.org/advanced/#pool-limit-configuration
353
+ httpx_settings.setdefault(
354
+ "limits",
355
+ httpx.Limits(
356
+ # We see instability when allowing the client to open many connections at once.
357
+ # Limiting concurrency results in more stable performance.
358
+ max_connections=16,
359
+ max_keepalive_connections=8,
360
+ # The Prefect Cloud LB will keep connections alive for 30s.
361
+ # Only allow the client to keep connections alive for 25s.
362
+ keepalive_expiry=25,
363
+ ),
364
+ )
365
+
366
+ # See https://www.python-httpx.org/http2/
367
+ # Enabling HTTP/2 support on the client does not necessarily mean that your requests
368
+ # and responses will be transported over HTTP/2, since both the client and the server
369
+ # need to support HTTP/2. If you connect to a server that only supports HTTP/1.1 the
370
+ # client will use a standard HTTP/1.1 connection instead.
371
+ httpx_settings.setdefault("http2", PREFECT_API_ENABLE_HTTP2.value())
372
+
373
+ if server_type:
374
+ self.server_type = server_type
375
+ else:
376
+ self.server_type = (
377
+ ServerType.CLOUD
378
+ if api.startswith(PREFECT_CLOUD_API_URL.value())
379
+ else ServerType.SERVER
380
+ )
381
+
382
+ # Connect to an in-process application
383
+ else:
384
+ self._ephemeral_app = api
385
+ self.server_type = ServerType.EPHEMERAL
386
+
387
+ # When using an ephemeral server, server-side exceptions can be raised
388
+ # client-side breaking all of our response error code handling. To work
389
+ # around this, we create an ASGI transport with application exceptions
390
+ # disabled instead of using the application directly.
391
+ # refs:
392
+ # - https://github.com/PrefectHQ/prefect/pull/9637
393
+ # - https://github.com/encode/starlette/blob/d3a11205ed35f8e5a58a711db0ff59c86fa7bb31/starlette/middleware/errors.py#L184
394
+ # - https://github.com/tiangolo/fastapi/blob/8cc967a7605d3883bd04ceb5d25cc94ae079612f/fastapi/applications.py#L163-L164
395
+ httpx_settings.setdefault(
396
+ "transport",
397
+ httpx.ASGITransport(
398
+ app=self._ephemeral_app, raise_app_exceptions=False
399
+ ),
400
+ )
401
+ httpx_settings.setdefault("base_url", "http://ephemeral-prefect/api")
402
+
403
+ # See https://www.python-httpx.org/advanced/#timeout-configuration
404
+ httpx_settings.setdefault(
405
+ "timeout",
406
+ httpx.Timeout(
407
+ connect=PREFECT_API_REQUEST_TIMEOUT.value(),
408
+ read=PREFECT_API_REQUEST_TIMEOUT.value(),
409
+ write=PREFECT_API_REQUEST_TIMEOUT.value(),
410
+ pool=PREFECT_API_REQUEST_TIMEOUT.value(),
411
+ ),
412
+ )
413
+
414
+ if not PREFECT_TESTING_UNIT_TEST_MODE:
415
+ httpx_settings.setdefault("follow_redirects", True)
416
+
417
+ enable_csrf_support = (
418
+ self.server_type != ServerType.CLOUD
419
+ and PREFECT_CLIENT_CSRF_SUPPORT_ENABLED.value()
420
+ )
421
+
422
+ self._client = PrefectHttpxAsyncClient(
423
+ **httpx_settings, enable_csrf_support=enable_csrf_support
424
+ )
425
+ self._loop = None
426
+
427
+ # See https://www.python-httpx.org/advanced/#custom-transports
428
+ #
429
+ # If we're using an HTTP/S client (not the ephemeral client), adjust the
430
+ # transport to add retries _after_ it is instantiated. If we alter the transport
431
+ # before instantiation, the transport will not be aware of proxies unless we
432
+ # reproduce all of the logic to make it so.
433
+ #
434
+ # Only alter the transport to set our default of 3 retries, don't modify any
435
+ # transport a user may have provided via httpx_settings.
436
+ #
437
+ # Making liberal use of getattr and isinstance checks here to avoid any
438
+ # surprises if the internals of httpx or httpcore change on us
439
+ if isinstance(api, str) and not httpx_settings.get("transport"):
440
+ transport_for_url = getattr(self._client, "_transport_for_url", None)
441
+ if callable(transport_for_url):
442
+ server_transport = transport_for_url(httpx.URL(api))
443
+ if isinstance(server_transport, httpx.AsyncHTTPTransport):
444
+ pool = getattr(server_transport, "_pool", None)
445
+ if isinstance(pool, httpcore.AsyncConnectionPool):
446
+ setattr(pool, "_retries", 3)
447
+
448
+ self.logger: Logger = get_logger("client")
449
+
450
+ @property
451
+ def api_url(self) -> httpx.URL:
452
+ """
453
+ Get the base URL for the API.
454
+ """
455
+ return self._client.base_url
456
+
457
+ # API methods ----------------------------------------------------------------------
458
+
459
+ async def api_healthcheck(self) -> Optional[Exception]:
460
+ """
461
+ Attempts to connect to the API and returns the encountered exception if not
462
+ successful.
463
+
464
+ If successful, returns `None`.
465
+ """
466
+ try:
467
+ await self._client.get("/health")
468
+ return None
469
+ except Exception as exc:
470
+ return exc
471
+
472
+ async def hello(self) -> httpx.Response:
473
+ """
474
+ Send a GET request to /hello for testing purposes.
475
+ """
476
+ return await self._client.get("/hello")
477
+
478
+ async def create_work_queue(
479
+ self,
480
+ name: str,
481
+ description: Optional[str] = None,
482
+ is_paused: Optional[bool] = None,
483
+ concurrency_limit: Optional[int] = None,
484
+ priority: Optional[int] = None,
485
+ work_pool_name: Optional[str] = None,
486
+ ) -> WorkQueue:
487
+ """
488
+ Create a work queue.
489
+
490
+ Args:
491
+ name: a unique name for the work queue
492
+ description: An optional description for the work queue.
493
+ is_paused: Whether or not the work queue is paused.
494
+ concurrency_limit: An optional concurrency limit for the work queue.
495
+ priority: The queue's priority. Lower values are higher priority (1 is the highest).
496
+ work_pool_name: The name of the work pool to use for this queue.
497
+
498
+ Raises:
499
+ prefect.exceptions.ObjectAlreadyExists: If request returns 409
500
+ httpx.RequestError: If request fails
501
+
502
+ Returns:
503
+ The created work queue
504
+ """
505
+ create_model = WorkQueueCreate(name=name, filter=None)
506
+ if description is not None:
507
+ create_model.description = description
508
+ if is_paused is not None:
509
+ create_model.is_paused = is_paused
510
+ if concurrency_limit is not None:
511
+ create_model.concurrency_limit = concurrency_limit
512
+ if priority is not None:
513
+ create_model.priority = priority
514
+
515
+ data = create_model.model_dump(mode="json")
516
+ try:
517
+ if work_pool_name is not None:
518
+ response = await self._client.post(
519
+ f"/work_pools/{work_pool_name}/queues", json=data
520
+ )
521
+ else:
522
+ response = await self._client.post("/work_queues/", json=data)
523
+ except httpx.HTTPStatusError as e:
524
+ if e.response.status_code == status.HTTP_409_CONFLICT:
525
+ raise prefect.exceptions.ObjectAlreadyExists(http_exc=e) from e
526
+ elif e.response.status_code == status.HTTP_404_NOT_FOUND:
527
+ raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
528
+ else:
529
+ raise
530
+ return WorkQueue.model_validate(response.json())
531
+
532
+ async def read_work_queue_by_name(
533
+ self,
534
+ name: str,
535
+ work_pool_name: Optional[str] = None,
536
+ ) -> WorkQueue:
537
+ """
538
+ Read a work queue by name.
539
+
540
+ Args:
541
+ name (str): a unique name for the work queue
542
+ work_pool_name (str, optional): the name of the work pool
543
+ the queue belongs to.
544
+
545
+ Raises:
546
+ prefect.exceptions.ObjectNotFound: if no work queue is found
547
+ httpx.HTTPStatusError: other status errors
548
+
549
+ Returns:
550
+ WorkQueue: a work queue API object
551
+ """
552
+ try:
553
+ if work_pool_name is not None:
554
+ response = await self._client.get(
555
+ f"/work_pools/{work_pool_name}/queues/{name}"
556
+ )
557
+ else:
558
+ response = await self._client.get(f"/work_queues/name/{name}")
559
+ except httpx.HTTPStatusError as e:
560
+ if e.response.status_code == status.HTTP_404_NOT_FOUND:
561
+ raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
562
+ else:
563
+ raise
564
+
565
+ return WorkQueue.model_validate(response.json())
566
+
567
+ async def update_work_queue(self, id: UUID, **kwargs: Any) -> None:
568
+ """
569
+ Update properties of a work queue.
570
+
571
+ Args:
572
+ id: the ID of the work queue to update
573
+ **kwargs: the fields to update
574
+
575
+ Raises:
576
+ ValueError: if no kwargs are provided
577
+ prefect.exceptions.ObjectNotFound: if request returns 404
578
+ httpx.RequestError: if the request fails
579
+
580
+ """
581
+ if not kwargs:
582
+ raise ValueError("No fields provided to update.")
583
+
584
+ data = WorkQueueUpdate(**kwargs).model_dump(mode="json", exclude_unset=True)
585
+ try:
586
+ await self._client.patch(f"/work_queues/{id}", json=data)
587
+ except httpx.HTTPStatusError as e:
588
+ if e.response.status_code == status.HTTP_404_NOT_FOUND:
589
+ raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
590
+ else:
591
+ raise
592
+
593
+ async def get_runs_in_work_queue(
594
+ self,
595
+ id: UUID,
596
+ limit: int = 10,
597
+ scheduled_before: Optional[datetime.datetime] = None,
598
+ ) -> list[FlowRun]:
599
+ """
600
+ Read flow runs off a work queue.
601
+
602
+ Args:
603
+ id: the id of the work queue to read from
604
+ limit: a limit on the number of runs to return
605
+ scheduled_before: a timestamp; only runs scheduled before this time will be returned.
606
+ Defaults to now.
607
+
608
+ Raises:
609
+ prefect.exceptions.ObjectNotFound: If request returns 404
610
+ httpx.RequestError: If request fails
611
+
612
+ Returns:
613
+ List[FlowRun]: a list of FlowRun objects read from the queue
614
+ """
615
+ if scheduled_before is None:
616
+ scheduled_before = pendulum.now("UTC")
617
+
618
+ try:
619
+ response = await self._client.post(
620
+ f"/work_queues/{id}/get_runs",
621
+ json={
622
+ "limit": limit,
623
+ "scheduled_before": scheduled_before.isoformat(),
624
+ },
625
+ )
626
+ except httpx.HTTPStatusError as e:
627
+ if e.response.status_code == status.HTTP_404_NOT_FOUND:
628
+ raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
629
+ else:
630
+ raise
631
+ return pydantic.TypeAdapter(list[FlowRun]).validate_python(response.json())
632
+
633
+ async def read_work_queue(
634
+ self,
635
+ id: UUID,
636
+ ) -> WorkQueue:
637
+ """
638
+ Read a work queue.
639
+
640
+ Args:
641
+ id: the id of the work queue to load
642
+
643
+ Raises:
644
+ prefect.exceptions.ObjectNotFound: If request returns 404
645
+ httpx.RequestError: If request fails
646
+
647
+ Returns:
648
+ WorkQueue: an instantiated WorkQueue object
649
+ """
650
+ try:
651
+ response = await self._client.get(f"/work_queues/{id}")
652
+ except httpx.HTTPStatusError as e:
653
+ if e.response.status_code == status.HTTP_404_NOT_FOUND:
654
+ raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
655
+ else:
656
+ raise
657
+ return WorkQueue.model_validate(response.json())
658
+
659
+ async def read_work_queue_status(
660
+ self,
661
+ id: UUID,
662
+ ) -> WorkQueueStatusDetail:
663
+ """
664
+ Read a work queue status.
665
+
666
+ Args:
667
+ id: the id of the work queue to load
668
+
669
+ Raises:
670
+ prefect.exceptions.ObjectNotFound: If request returns 404
671
+ httpx.RequestError: If request fails
672
+
673
+ Returns:
674
+ WorkQueueStatus: an instantiated WorkQueueStatus object
675
+ """
676
+ try:
677
+ response = await self._client.get(f"/work_queues/{id}/status")
678
+ except httpx.HTTPStatusError as e:
679
+ if e.response.status_code == status.HTTP_404_NOT_FOUND:
680
+ raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
681
+ else:
682
+ raise
683
+ return WorkQueueStatusDetail.model_validate(response.json())
684
+
685
+ async def match_work_queues(
686
+ self,
687
+ prefixes: list[str],
688
+ work_pool_name: Optional[str] = None,
689
+ ) -> list[WorkQueue]:
690
+ """
691
+ Query the Prefect API for work queues with names with a specific prefix.
692
+
693
+ Args:
694
+ prefixes: a list of strings used to match work queue name prefixes
695
+ work_pool_name: an optional work pool name to scope the query to
696
+
697
+ Returns:
698
+ a list of WorkQueue model representations
699
+ of the work queues
700
+ """
701
+ page_length = 100
702
+ current_page = 0
703
+ work_queues: list[WorkQueue] = []
704
+
705
+ while True:
706
+ new_queues = await self.read_work_queues(
707
+ work_pool_name=work_pool_name,
708
+ offset=current_page * page_length,
709
+ limit=page_length,
710
+ work_queue_filter=WorkQueueFilter(
711
+ name=WorkQueueFilterName(startswith_=prefixes)
712
+ ),
713
+ )
714
+ if not new_queues:
715
+ break
716
+ work_queues += new_queues
717
+ current_page += 1
718
+
719
+ return work_queues
720
+
721
+ async def delete_work_queue_by_id(
722
+ self,
723
+ id: UUID,
724
+ ) -> None:
725
+ """
726
+ Delete a work queue by its ID.
727
+
728
+ Args:
729
+ id: the id of the work queue to delete
730
+
731
+ Raises:
732
+ prefect.exceptions.ObjectNotFound: If request returns 404
733
+ httpx.RequestError: If requests fails
734
+ """
735
+ try:
736
+ await self._client.delete(
737
+ f"/work_queues/{id}",
738
+ )
739
+ except httpx.HTTPStatusError as e:
740
+ if e.response.status_code == status.HTTP_404_NOT_FOUND:
741
+ raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
742
+ else:
743
+ raise
744
+
745
+ async def set_task_run_name(self, task_run_id: UUID, name: str) -> httpx.Response:
746
+ task_run_data = TaskRunUpdate(name=name)
747
+ return await self._client.patch(
748
+ f"/task_runs/{task_run_id}",
749
+ json=task_run_data.model_dump(mode="json", exclude_unset=True),
750
+ )
751
+
752
+ async def create_task_run(
753
+ self,
754
+ task: "TaskObject[P, R]",
755
+ flow_run_id: Optional[UUID],
756
+ dynamic_key: str,
757
+ id: Optional[UUID] = None,
758
+ name: Optional[str] = None,
759
+ extra_tags: Optional[Iterable[str]] = None,
760
+ state: Optional[prefect.states.State[R]] = None,
761
+ task_inputs: Optional[
762
+ dict[
763
+ str,
764
+ list[
765
+ Union[
766
+ TaskRunResult,
767
+ Parameter,
768
+ Constant,
769
+ ]
770
+ ],
771
+ ]
772
+ ] = None,
773
+ ) -> TaskRun:
774
+ """
775
+ Create a task run
776
+
777
+ Args:
778
+ task: The Task to run
779
+ flow_run_id: The flow run id with which to associate the task run
780
+ dynamic_key: A key unique to this particular run of a Task within the flow
781
+ id: An optional ID for the task run. If not provided, one will be generated
782
+ server-side.
783
+ name: An optional name for the task run
784
+ extra_tags: an optional list of extra tags to apply to the task run in
785
+ addition to `task.tags`
786
+ state: The initial state for the run. If not provided, defaults to
787
+ `Pending` for now. Should always be a `Scheduled` type.
788
+ task_inputs: the set of inputs passed to the task
789
+
790
+ Returns:
791
+ The created task run.
792
+ """
793
+ tags = set(task.tags).union(extra_tags or [])
794
+
795
+ if state is None:
796
+ state = prefect.states.Pending()
797
+
798
+ retry_delay = task.retry_delay_seconds
799
+ if isinstance(retry_delay, list):
800
+ retry_delay = [int(rd) for rd in retry_delay]
801
+ elif isinstance(retry_delay, float):
802
+ retry_delay = int(retry_delay)
803
+
804
+ task_run_data = TaskRunCreate(
805
+ id=id,
806
+ name=name,
807
+ flow_run_id=flow_run_id,
808
+ task_key=task.task_key,
809
+ dynamic_key=str(dynamic_key),
810
+ tags=list(tags),
811
+ task_version=task.version,
812
+ empirical_policy=TaskRunPolicy(
813
+ retries=task.retries,
814
+ retry_delay=retry_delay,
815
+ retry_jitter_factor=task.retry_jitter_factor,
816
+ ),
817
+ state=state.to_state_create(),
818
+ task_inputs=task_inputs or {},
819
+ )
820
+ content = task_run_data.model_dump_json(exclude={"id"} if id is None else None)
821
+
822
+ response = await self._client.post("/task_runs/", content=content)
823
+ return TaskRun.model_validate(response.json())
824
+
825
+ async def read_task_run(self, task_run_id: UUID) -> TaskRun:
826
+ """
827
+ Query the Prefect API for a task run by id.
828
+
829
+ Args:
830
+ task_run_id: the task run ID of interest
831
+
832
+ Returns:
833
+ a Task Run model representation of the task run
834
+ """
835
+ try:
836
+ response = await self._client.get(f"/task_runs/{task_run_id}")
837
+ return TaskRun.model_validate(response.json())
838
+ except httpx.HTTPStatusError as e:
839
+ if e.response.status_code == status.HTTP_404_NOT_FOUND:
840
+ raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
841
+ else:
842
+ raise
843
+
844
+ async def read_task_runs(
845
+ self,
846
+ *,
847
+ flow_filter: Optional[FlowFilter] = None,
848
+ flow_run_filter: Optional[FlowRunFilter] = None,
849
+ task_run_filter: Optional[TaskRunFilter] = None,
850
+ deployment_filter: Optional[DeploymentFilter] = None,
851
+ sort: Optional[TaskRunSort] = None,
852
+ limit: Optional[int] = None,
853
+ offset: int = 0,
854
+ ) -> list[TaskRun]:
855
+ """
856
+ Query the Prefect API for task runs. Only task runs matching all criteria will
857
+ be returned.
858
+
859
+ Args:
860
+ flow_filter: filter criteria for flows
861
+ flow_run_filter: filter criteria for flow runs
862
+ task_run_filter: filter criteria for task runs
863
+ deployment_filter: filter criteria for deployments
864
+ sort: sort criteria for the task runs
865
+ limit: a limit for the task run query
866
+ offset: an offset for the task run query
867
+
868
+ Returns:
869
+ a list of Task Run model representations
870
+ of the task runs
871
+ """
872
+ body: dict[str, Any] = {
873
+ "flows": flow_filter.model_dump(mode="json") if flow_filter else None,
874
+ "flow_runs": (
875
+ flow_run_filter.model_dump(mode="json", exclude_unset=True)
876
+ if flow_run_filter
877
+ else None
878
+ ),
879
+ "task_runs": (
880
+ task_run_filter.model_dump(mode="json") if task_run_filter else None
881
+ ),
882
+ "deployments": (
883
+ deployment_filter.model_dump(mode="json") if deployment_filter else None
884
+ ),
885
+ "sort": sort,
886
+ "limit": limit,
887
+ "offset": offset,
888
+ }
889
+ response = await self._client.post("/task_runs/filter", json=body)
890
+ return pydantic.TypeAdapter(list[TaskRun]).validate_python(response.json())
891
+
892
+ async def delete_task_run(self, task_run_id: UUID) -> None:
893
+ """
894
+ Delete a task run by id.
895
+
896
+ Args:
897
+ task_run_id: the task run ID of interest
898
+ Raises:
899
+ prefect.exceptions.ObjectNotFound: If request returns 404
900
+ httpx.RequestError: If requests fails
901
+ """
902
+ try:
903
+ await self._client.delete(f"/task_runs/{task_run_id}")
904
+ except httpx.HTTPStatusError as e:
905
+ if e.response.status_code == 404:
906
+ raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
907
+ else:
908
+ raise
909
+
910
+ async def set_task_run_state(
911
+ self,
912
+ task_run_id: UUID,
913
+ state: prefect.states.State[T],
914
+ force: bool = False,
915
+ ) -> OrchestrationResult[T]:
916
+ """
917
+ Set the state of a task run.
918
+
919
+ Args:
920
+ task_run_id: the id of the task run
921
+ state: the state to set
922
+ force: if True, disregard orchestration logic when setting the state,
923
+ forcing the Prefect API to accept the state
924
+
925
+ Returns:
926
+ an OrchestrationResult model representation of state orchestration output
927
+ """
928
+ state_create = state.to_state_create()
929
+ state_create.state_details.task_run_id = task_run_id
930
+ response = await self._client.post(
931
+ f"/task_runs/{task_run_id}/set_state",
932
+ json=dict(state=state_create.model_dump(mode="json"), force=force),
933
+ )
934
+ result: OrchestrationResult[T] = OrchestrationResult.model_validate(
935
+ response.json()
936
+ )
937
+ return result
938
+
939
+ async def read_task_run_states(
940
+ self, task_run_id: UUID
941
+ ) -> list[prefect.states.State]:
942
+ """
943
+ Query for the states of a task run
944
+
945
+ Args:
946
+ task_run_id: the id of the task run
947
+
948
+ Returns:
949
+ a list of State model representations of the task run states
950
+ """
951
+ response = await self._client.get(
952
+ "/task_run_states/", params=dict(task_run_id=str(task_run_id))
953
+ )
954
+ return pydantic.TypeAdapter(list[prefect.states.State]).validate_python(
955
+ response.json()
956
+ )
957
+
958
+ async def create_flow_run_notification_policy(
959
+ self,
960
+ block_document_id: UUID,
961
+ is_active: bool = True,
962
+ tags: Optional[list[str]] = None,
963
+ state_names: Optional[list[str]] = None,
964
+ message_template: Optional[str] = None,
965
+ ) -> UUID:
966
+ """
967
+ Create a notification policy for flow runs
968
+
969
+ Args:
970
+ block_document_id: The block document UUID
971
+ is_active: Whether the notification policy is active
972
+ tags: List of flow tags
973
+ state_names: List of state names
974
+ message_template: Notification message template
975
+ """
976
+ if tags is None:
977
+ tags = []
978
+ if state_names is None:
979
+ state_names = []
980
+
981
+ policy = FlowRunNotificationPolicyCreate(
982
+ block_document_id=block_document_id,
983
+ is_active=is_active,
984
+ tags=tags,
985
+ state_names=state_names,
986
+ message_template=message_template,
987
+ )
988
+ response = await self._client.post(
989
+ "/flow_run_notification_policies/",
990
+ json=policy.model_dump(mode="json"),
991
+ )
992
+
993
+ policy_id = response.json().get("id")
994
+ if not policy_id:
995
+ raise httpx.RequestError(f"Malformed response: {response}")
996
+
997
+ return UUID(policy_id)
998
+
999
+ async def delete_flow_run_notification_policy(
1000
+ self,
1001
+ id: UUID,
1002
+ ) -> None:
1003
+ """
1004
+ Delete a flow run notification policy by id.
1005
+
1006
+ Args:
1007
+ id: UUID of the flow run notification policy to delete.
1008
+ Raises:
1009
+ prefect.exceptions.ObjectNotFound: If request returns 404
1010
+ httpx.RequestError: If requests fails
1011
+ """
1012
+ try:
1013
+ await self._client.delete(f"/flow_run_notification_policies/{id}")
1014
+ except httpx.HTTPStatusError as e:
1015
+ if e.response.status_code == status.HTTP_404_NOT_FOUND:
1016
+ raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
1017
+ else:
1018
+ raise
1019
+
1020
+ async def update_flow_run_notification_policy(
1021
+ self,
1022
+ id: UUID,
1023
+ block_document_id: Optional[UUID] = None,
1024
+ is_active: Optional[bool] = None,
1025
+ tags: Optional[list[str]] = None,
1026
+ state_names: Optional[list[str]] = None,
1027
+ message_template: Optional[str] = None,
1028
+ ) -> None:
1029
+ """
1030
+ Update a notification policy for flow runs
1031
+
1032
+ Args:
1033
+ id: UUID of the notification policy
1034
+ block_document_id: The block document UUID
1035
+ is_active: Whether the notification policy is active
1036
+ tags: List of flow tags
1037
+ state_names: List of state names
1038
+ message_template: Notification message template
1039
+ Raises:
1040
+ prefect.exceptions.ObjectNotFound: If request returns 404
1041
+ httpx.RequestError: If requests fails
1042
+ """
1043
+ params: dict[str, Any] = {}
1044
+ if block_document_id is not None:
1045
+ params["block_document_id"] = block_document_id
1046
+ if is_active is not None:
1047
+ params["is_active"] = is_active
1048
+ if tags is not None:
1049
+ params["tags"] = tags
1050
+ if state_names is not None:
1051
+ params["state_names"] = state_names
1052
+ if message_template is not None:
1053
+ params["message_template"] = message_template
1054
+
1055
+ policy = FlowRunNotificationPolicyUpdate(**params)
1056
+
1057
+ try:
1058
+ await self._client.patch(
1059
+ f"/flow_run_notification_policies/{id}",
1060
+ json=policy.model_dump(mode="json", exclude_unset=True),
1061
+ )
1062
+ except httpx.HTTPStatusError as e:
1063
+ if e.response.status_code == status.HTTP_404_NOT_FOUND:
1064
+ raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
1065
+ else:
1066
+ raise
1067
+
1068
+ async def read_flow_run_notification_policies(
1069
+ self,
1070
+ flow_run_notification_policy_filter: FlowRunNotificationPolicyFilter,
1071
+ limit: Optional[int] = None,
1072
+ offset: int = 0,
1073
+ ) -> list[FlowRunNotificationPolicy]:
1074
+ """
1075
+ Query the Prefect API for flow run notification policies. Only policies matching all criteria will
1076
+ be returned.
1077
+
1078
+ Args:
1079
+ flow_run_notification_policy_filter: filter criteria for notification policies
1080
+ limit: a limit for the notification policies query
1081
+ offset: an offset for the notification policies query
1082
+
1083
+ Returns:
1084
+ a list of FlowRunNotificationPolicy model representations
1085
+ of the notification policies
1086
+ """
1087
+ body: dict[str, Any] = {
1088
+ "flow_run_notification_policy_filter": (
1089
+ flow_run_notification_policy_filter.model_dump(mode="json")
1090
+ if flow_run_notification_policy_filter
1091
+ else None
1092
+ ),
1093
+ "limit": limit,
1094
+ "offset": offset,
1095
+ }
1096
+ response = await self._client.post(
1097
+ "/flow_run_notification_policies/filter", json=body
1098
+ )
1099
+ return pydantic.TypeAdapter(list[FlowRunNotificationPolicy]).validate_python(
1100
+ response.json()
1101
+ )
1102
+
1103
+ async def send_worker_heartbeat(
1104
+ self,
1105
+ work_pool_name: str,
1106
+ worker_name: str,
1107
+ heartbeat_interval_seconds: Optional[float] = None,
1108
+ get_worker_id: bool = False,
1109
+ worker_metadata: Optional[WorkerMetadata] = None,
1110
+ ) -> Optional[UUID]:
1111
+ """
1112
+ Sends a worker heartbeat for a given work pool.
1113
+
1114
+ Args:
1115
+ work_pool_name: The name of the work pool to heartbeat against.
1116
+ worker_name: The name of the worker sending the heartbeat.
1117
+ return_id: Whether to return the worker ID. Note: will return `None` if the connected server does not support returning worker IDs, even if `return_id` is `True`.
1118
+ worker_metadata: Metadata about the worker to send to the server.
1119
+ """
1120
+ params: dict[str, Any] = {
1121
+ "name": worker_name,
1122
+ "heartbeat_interval_seconds": heartbeat_interval_seconds,
1123
+ }
1124
+ if worker_metadata:
1125
+ params["metadata"] = worker_metadata.model_dump(mode="json")
1126
+ if get_worker_id:
1127
+ params["return_id"] = get_worker_id
1128
+
1129
+ resp = await self._client.post(
1130
+ f"/work_pools/{work_pool_name}/workers/heartbeat",
1131
+ json=params,
1132
+ )
1133
+
1134
+ if (
1135
+ (
1136
+ self.server_type == ServerType.CLOUD
1137
+ or get_current_settings().testing.test_mode
1138
+ )
1139
+ and get_worker_id
1140
+ and resp.status_code == 200
1141
+ ):
1142
+ return UUID(resp.text)
1143
+ else:
1144
+ return None
1145
+
1146
+ async def read_workers_for_work_pool(
1147
+ self,
1148
+ work_pool_name: str,
1149
+ worker_filter: Optional[WorkerFilter] = None,
1150
+ offset: Optional[int] = None,
1151
+ limit: Optional[int] = None,
1152
+ ) -> list[Worker]:
1153
+ """
1154
+ Reads workers for a given work pool.
1155
+
1156
+ Args:
1157
+ work_pool_name: The name of the work pool for which to get
1158
+ member workers.
1159
+ worker_filter: Criteria by which to filter workers.
1160
+ limit: Limit for the worker query.
1161
+ offset: Limit for the worker query.
1162
+ """
1163
+ response = await self._client.post(
1164
+ f"/work_pools/{work_pool_name}/workers/filter",
1165
+ json={
1166
+ "workers": (
1167
+ worker_filter.model_dump(mode="json", exclude_unset=True)
1168
+ if worker_filter
1169
+ else None
1170
+ ),
1171
+ "offset": offset,
1172
+ "limit": limit,
1173
+ },
1174
+ )
1175
+
1176
+ return pydantic.TypeAdapter(list[Worker]).validate_python(response.json())
1177
+
1178
+ async def read_work_pool(self, work_pool_name: str) -> WorkPool:
1179
+ """
1180
+ Reads information for a given work pool
1181
+
1182
+ Args:
1183
+ work_pool_name: The name of the work pool to for which to get
1184
+ information.
1185
+
1186
+ Returns:
1187
+ Information about the requested work pool.
1188
+ """
1189
+ try:
1190
+ response = await self._client.get(f"/work_pools/{work_pool_name}")
1191
+ return WorkPool.model_validate(response.json())
1192
+ except httpx.HTTPStatusError as e:
1193
+ if e.response.status_code == status.HTTP_404_NOT_FOUND:
1194
+ raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
1195
+ else:
1196
+ raise
1197
+
1198
+ async def read_work_pools(
1199
+ self,
1200
+ limit: Optional[int] = None,
1201
+ offset: int = 0,
1202
+ work_pool_filter: Optional[WorkPoolFilter] = None,
1203
+ ) -> list[WorkPool]:
1204
+ """
1205
+ Reads work pools.
1206
+
1207
+ Args:
1208
+ limit: Limit for the work pool query.
1209
+ offset: Offset for the work pool query.
1210
+ work_pool_filter: Criteria by which to filter work pools.
1211
+
1212
+ Returns:
1213
+ A list of work pools.
1214
+ """
1215
+
1216
+ body: dict[str, Any] = {
1217
+ "limit": limit,
1218
+ "offset": offset,
1219
+ "work_pools": (
1220
+ work_pool_filter.model_dump(mode="json") if work_pool_filter else None
1221
+ ),
1222
+ }
1223
+ response = await self._client.post("/work_pools/filter", json=body)
1224
+ return pydantic.TypeAdapter(list[WorkPool]).validate_python(response.json())
1225
+
1226
+ async def create_work_pool(
1227
+ self,
1228
+ work_pool: WorkPoolCreate,
1229
+ overwrite: bool = False,
1230
+ ) -> WorkPool:
1231
+ """
1232
+ Creates a work pool with the provided configuration.
1233
+
1234
+ Args:
1235
+ work_pool: Desired configuration for the new work pool.
1236
+
1237
+ Returns:
1238
+ Information about the newly created work pool.
1239
+ """
1240
+ try:
1241
+ response = await self._client.post(
1242
+ "/work_pools/",
1243
+ json=work_pool.model_dump(mode="json", exclude_unset=True),
1244
+ )
1245
+ except httpx.HTTPStatusError as e:
1246
+ if e.response.status_code == status.HTTP_409_CONFLICT:
1247
+ if overwrite:
1248
+ existing_work_pool = await self.read_work_pool(
1249
+ work_pool_name=work_pool.name
1250
+ )
1251
+ if existing_work_pool.type != work_pool.type:
1252
+ warnings.warn(
1253
+ "Overwriting work pool type is not supported. Ignoring provided type.",
1254
+ category=UserWarning,
1255
+ )
1256
+ await self.update_work_pool(
1257
+ work_pool_name=work_pool.name,
1258
+ work_pool=WorkPoolUpdate.model_validate(
1259
+ work_pool.model_dump(exclude={"name", "type"})
1260
+ ),
1261
+ )
1262
+ response = await self._client.get(f"/work_pools/{work_pool.name}")
1263
+ else:
1264
+ raise prefect.exceptions.ObjectAlreadyExists(http_exc=e) from e
1265
+ else:
1266
+ raise
1267
+
1268
+ return WorkPool.model_validate(response.json())
1269
+
1270
+ async def update_work_pool(
1271
+ self,
1272
+ work_pool_name: str,
1273
+ work_pool: WorkPoolUpdate,
1274
+ ) -> None:
1275
+ """
1276
+ Updates a work pool.
1277
+
1278
+ Args:
1279
+ work_pool_name: Name of the work pool to update.
1280
+ work_pool: Fields to update in the work pool.
1281
+ """
1282
+ try:
1283
+ await self._client.patch(
1284
+ f"/work_pools/{work_pool_name}",
1285
+ json=work_pool.model_dump(mode="json", exclude_unset=True),
1286
+ )
1287
+ except httpx.HTTPStatusError as e:
1288
+ if e.response.status_code == status.HTTP_404_NOT_FOUND:
1289
+ raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
1290
+ else:
1291
+ raise
1292
+
1293
+ async def delete_work_pool(
1294
+ self,
1295
+ work_pool_name: str,
1296
+ ) -> None:
1297
+ """
1298
+ Deletes a work pool.
1299
+
1300
+ Args:
1301
+ work_pool_name: Name of the work pool to delete.
1302
+ """
1303
+ try:
1304
+ await self._client.delete(f"/work_pools/{work_pool_name}")
1305
+ except httpx.HTTPStatusError as e:
1306
+ if e.response.status_code == status.HTTP_404_NOT_FOUND:
1307
+ raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
1308
+ else:
1309
+ raise
1310
+
1311
+ async def read_work_queues(
1312
+ self,
1313
+ work_pool_name: Optional[str] = None,
1314
+ work_queue_filter: Optional[WorkQueueFilter] = None,
1315
+ limit: Optional[int] = None,
1316
+ offset: Optional[int] = None,
1317
+ ) -> list[WorkQueue]:
1318
+ """
1319
+ Retrieves queues for a work pool.
1320
+
1321
+ Args:
1322
+ work_pool_name: Name of the work pool for which to get queues.
1323
+ work_queue_filter: Criteria by which to filter queues.
1324
+ limit: Limit for the queue query.
1325
+ offset: Limit for the queue query.
1326
+
1327
+ Returns:
1328
+ List of queues for the specified work pool.
1329
+ """
1330
+ json: dict[str, Any] = {
1331
+ "work_queues": (
1332
+ work_queue_filter.model_dump(mode="json", exclude_unset=True)
1333
+ if work_queue_filter
1334
+ else None
1335
+ ),
1336
+ "limit": limit,
1337
+ "offset": offset,
1338
+ }
1339
+
1340
+ if work_pool_name:
1341
+ try:
1342
+ response = await self._client.post(
1343
+ f"/work_pools/{work_pool_name}/queues/filter",
1344
+ json=json,
1345
+ )
1346
+ except httpx.HTTPStatusError as e:
1347
+ if e.response.status_code == status.HTTP_404_NOT_FOUND:
1348
+ raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
1349
+ else:
1350
+ raise
1351
+ else:
1352
+ response = await self._client.post("/work_queues/filter", json=json)
1353
+
1354
+ return pydantic.TypeAdapter(list[WorkQueue]).validate_python(response.json())
1355
+
1356
+ async def get_scheduled_flow_runs_for_work_pool(
1357
+ self,
1358
+ work_pool_name: str,
1359
+ work_queue_names: Optional[list[str]] = None,
1360
+ scheduled_before: Optional[datetime.datetime] = None,
1361
+ ) -> list[WorkerFlowRunResponse]:
1362
+ """
1363
+ Retrieves scheduled flow runs for the provided set of work pool queues.
1364
+
1365
+ Args:
1366
+ work_pool_name: The name of the work pool that the work pool
1367
+ queues are associated with.
1368
+ work_queue_names: The names of the work pool queues from which
1369
+ to get scheduled flow runs.
1370
+ scheduled_before: Datetime used to filter returned flow runs. Flow runs
1371
+ scheduled for after the given datetime string will not be returned.
1372
+
1373
+ Returns:
1374
+ A list of worker flow run responses containing information about the
1375
+ retrieved flow runs.
1376
+ """
1377
+ body: dict[str, Any] = {}
1378
+ if work_queue_names is not None:
1379
+ body["work_queue_names"] = list(work_queue_names)
1380
+ if scheduled_before:
1381
+ body["scheduled_before"] = str(scheduled_before)
1382
+
1383
+ response = await self._client.post(
1384
+ f"/work_pools/{work_pool_name}/get_scheduled_flow_runs",
1385
+ json=body,
1386
+ )
1387
+ return pydantic.TypeAdapter(list[WorkerFlowRunResponse]).validate_python(
1388
+ response.json()
1389
+ )
1390
+
1391
+ async def read_worker_metadata(self) -> dict[str, Any]:
1392
+ """Reads worker metadata stored in Prefect collection registry."""
1393
+ response = await self._client.get("collections/views/aggregate-worker-metadata")
1394
+ response.raise_for_status()
1395
+ return response.json()
1396
+
1397
+ async def api_version(self) -> str:
1398
+ res = await self._client.get("/admin/version")
1399
+ return res.json()
1400
+
1401
+ def client_version(self) -> str:
1402
+ return prefect.__version__
1403
+
1404
+ async def raise_for_api_version_mismatch(self) -> None:
1405
+ # Cloud is always compatible as a server
1406
+ if self.server_type == ServerType.CLOUD:
1407
+ return
1408
+
1409
+ try:
1410
+ api_version = await self.api_version()
1411
+ except Exception as e:
1412
+ if "Unauthorized" in str(e):
1413
+ raise e
1414
+ raise RuntimeError(f"Failed to reach API at {self.api_url}") from e
1415
+
1416
+ api_version = version.parse(api_version)
1417
+ client_version = version.parse(self.client_version())
1418
+
1419
+ if api_version.major != client_version.major:
1420
+ raise RuntimeError(
1421
+ f"Found incompatible versions: client: {client_version}, server: {api_version}. "
1422
+ f"Major versions must match."
1423
+ )
1424
+
1425
+ async def __aenter__(self) -> Self:
1426
+ """
1427
+ Start the client.
1428
+
1429
+ If the client is already started, this will raise an exception.
1430
+
1431
+ If the client is already closed, this will raise an exception. Use a new client
1432
+ instance instead.
1433
+ """
1434
+ if self._closed:
1435
+ # httpx.AsyncClient does not allow reuse so we will not either.
1436
+ raise RuntimeError(
1437
+ "The client cannot be started again after closing. "
1438
+ "Retrieve a new client with `get_client()` instead."
1439
+ )
1440
+
1441
+ self._context_stack += 1
1442
+
1443
+ if self._started:
1444
+ # allow reentrancy
1445
+ return self
1446
+
1447
+ self._loop = asyncio.get_running_loop()
1448
+ await self._exit_stack.__aenter__()
1449
+
1450
+ # Enter a lifespan context if using an ephemeral application.
1451
+ # See https://github.com/encode/httpx/issues/350
1452
+ if self._ephemeral_app and self.manage_lifespan:
1453
+ self._ephemeral_lifespan = await self._exit_stack.enter_async_context(
1454
+ app_lifespan_context(self._ephemeral_app)
1455
+ )
1456
+
1457
+ if self._ephemeral_app:
1458
+ self.logger.debug(
1459
+ "Using ephemeral application with database at "
1460
+ f"{PREFECT_API_DATABASE_CONNECTION_URL.value()}"
1461
+ )
1462
+ else:
1463
+ self.logger.debug(f"Connecting to API at {self.api_url}")
1464
+
1465
+ # Enter the httpx client's context
1466
+ await self._exit_stack.enter_async_context(self._client)
1467
+
1468
+ self._started = True
1469
+
1470
+ return self
1471
+
1472
+ async def __aexit__(self, *exc_info: Any) -> Optional[bool]:
1473
+ """
1474
+ Shutdown the client.
1475
+ """
1476
+
1477
+ self._context_stack -= 1
1478
+ if self._context_stack > 0:
1479
+ return
1480
+ self._closed = True
1481
+ return await self._exit_stack.__aexit__(*exc_info)
1482
+
1483
+ def __enter__(self) -> NoReturn:
1484
+ raise RuntimeError(
1485
+ "The `PrefectClient` must be entered with an async context. Use 'async "
1486
+ "with PrefectClient(...)' not 'with PrefectClient(...)'"
1487
+ )
1488
+
1489
+ def __exit__(self, *_: object) -> NoReturn:
1490
+ assert False, "This should never be called but must be defined for __enter__"
1491
+
1492
+
1493
+ class SyncPrefectClient(
1494
+ ArtifactClient,
1495
+ ArtifactCollectionClient,
1496
+ LogClient,
1497
+ VariableClient,
1498
+ ConcurrencyLimitClient,
1499
+ DeploymentClient,
1500
+ AutomationClient,
1501
+ SlaClient,
1502
+ FlowRunClient,
1503
+ FlowClient,
1504
+ BlocksDocumentClient,
1505
+ BlocksSchemaClient,
1506
+ BlocksTypeClient,
1507
+ ):
1508
+ """
1509
+ A synchronous client for interacting with the [Prefect REST API](/api-ref/rest-api/).
1510
+
1511
+ Args:
1512
+ api: the REST API URL or FastAPI application to connect to
1513
+ api_key: An optional API key for authentication.
1514
+ api_version: The API version this client is compatible with.
1515
+ httpx_settings: An optional dictionary of settings to pass to the underlying
1516
+ `httpx.Client`
1517
+
1518
+ Examples:
1519
+
1520
+ Say hello to a Prefect REST API
1521
+
1522
+ <div class="terminal">
1523
+ ```
1524
+ >>> with get_client(sync_client=True) as client:
1525
+ >>> response = client.hello()
1526
+ >>>
1527
+ >>> print(response.json())
1528
+ 👋
1529
+ ```
1530
+ </div>
1531
+ """
1532
+
1533
+ def __init__(
1534
+ self,
1535
+ api: Union[str, ASGIApp],
1536
+ *,
1537
+ auth_string: Optional[str] = None,
1538
+ api_key: Optional[str] = None,
1539
+ api_version: Optional[str] = None,
1540
+ httpx_settings: Optional[dict[str, Any]] = None,
1541
+ server_type: Optional[ServerType] = None,
1542
+ ) -> None:
1543
+ httpx_settings = httpx_settings.copy() if httpx_settings else {}
1544
+ httpx_settings.setdefault("headers", {})
1545
+
1546
+ if PREFECT_API_TLS_INSECURE_SKIP_VERIFY:
1547
+ # Create an unverified context for insecure connections
1548
+ ctx = ssl.create_default_context()
1549
+ ctx.check_hostname = False
1550
+ ctx.verify_mode = ssl.CERT_NONE
1551
+ httpx_settings.setdefault("verify", ctx)
1552
+ else:
1553
+ cert_file = PREFECT_API_SSL_CERT_FILE.value()
1554
+ if not cert_file:
1555
+ cert_file = certifi.where()
1556
+ # Create a verified context with the certificate file
1557
+ ctx = ssl.create_default_context(cafile=cert_file)
1558
+ httpx_settings.setdefault("verify", ctx)
1559
+
1560
+ if api_version is None:
1561
+ api_version = SERVER_API_VERSION
1562
+ httpx_settings["headers"].setdefault("X-PREFECT-API-VERSION", api_version)
1563
+ if api_key:
1564
+ httpx_settings["headers"].setdefault("Authorization", f"Bearer {api_key}")
1565
+
1566
+ if auth_string:
1567
+ token = base64.b64encode(auth_string.encode("utf-8")).decode("utf-8")
1568
+ httpx_settings["headers"].setdefault("Authorization", f"Basic {token}")
1569
+
1570
+ # Context management
1571
+ self._context_stack: int = 0
1572
+ self._ephemeral_app: Optional[ASGIApp] = None
1573
+ self.manage_lifespan = True
1574
+ self.server_type: ServerType
1575
+
1576
+ self._closed = False
1577
+ self._started = False
1578
+
1579
+ # Connect to an external application
1580
+ if isinstance(api, str):
1581
+ if httpx_settings.get("app"):
1582
+ raise ValueError(
1583
+ "Invalid httpx settings: `app` cannot be set when providing an "
1584
+ "api url. `app` is only for use with ephemeral instances. Provide "
1585
+ "it as the `api` parameter instead."
1586
+ )
1587
+ httpx_settings.setdefault("base_url", api)
1588
+
1589
+ # See https://www.python-httpx.org/advanced/#pool-limit-configuration
1590
+ httpx_settings.setdefault(
1591
+ "limits",
1592
+ httpx.Limits(
1593
+ # We see instability when allowing the client to open many connections at once.
1594
+ # Limiting concurrency results in more stable performance.
1595
+ max_connections=16,
1596
+ max_keepalive_connections=8,
1597
+ # The Prefect Cloud LB will keep connections alive for 30s.
1598
+ # Only allow the client to keep connections alive for 25s.
1599
+ keepalive_expiry=25,
1600
+ ),
1601
+ )
1602
+
1603
+ # See https://www.python-httpx.org/http2/
1604
+ # Enabling HTTP/2 support on the client does not necessarily mean that your requests
1605
+ # and responses will be transported over HTTP/2, since both the client and the server
1606
+ # need to support HTTP/2. If you connect to a server that only supports HTTP/1.1 the
1607
+ # client will use a standard HTTP/1.1 connection instead.
1608
+ httpx_settings.setdefault("http2", PREFECT_API_ENABLE_HTTP2.value())
1609
+
1610
+ if server_type:
1611
+ self.server_type = server_type
1612
+ else:
1613
+ self.server_type = (
1614
+ ServerType.CLOUD
1615
+ if api.startswith(PREFECT_CLOUD_API_URL.value())
1616
+ else ServerType.SERVER
1617
+ )
1618
+
1619
+ # Connect to an in-process application
1620
+ else:
1621
+ self._ephemeral_app = api
1622
+ self.server_type = ServerType.EPHEMERAL
1623
+
1624
+ # See https://www.python-httpx.org/advanced/#timeout-configuration
1625
+ httpx_settings.setdefault(
1626
+ "timeout",
1627
+ httpx.Timeout(
1628
+ connect=PREFECT_API_REQUEST_TIMEOUT.value(),
1629
+ read=PREFECT_API_REQUEST_TIMEOUT.value(),
1630
+ write=PREFECT_API_REQUEST_TIMEOUT.value(),
1631
+ pool=PREFECT_API_REQUEST_TIMEOUT.value(),
1632
+ ),
1633
+ )
1634
+
1635
+ if not PREFECT_TESTING_UNIT_TEST_MODE:
1636
+ httpx_settings.setdefault("follow_redirects", True)
1637
+
1638
+ enable_csrf_support = (
1639
+ self.server_type != ServerType.CLOUD
1640
+ and PREFECT_CLIENT_CSRF_SUPPORT_ENABLED.value()
1641
+ )
1642
+
1643
+ self._client = PrefectHttpxSyncClient(
1644
+ **httpx_settings, enable_csrf_support=enable_csrf_support
1645
+ )
1646
+
1647
+ # See https://www.python-httpx.org/advanced/#custom-transports
1648
+ #
1649
+ # If we're using an HTTP/S client (not the ephemeral client), adjust the
1650
+ # transport to add retries _after_ it is instantiated. If we alter the transport
1651
+ # before instantiation, the transport will not be aware of proxies unless we
1652
+ # reproduce all of the logic to make it so.
1653
+ #
1654
+ # Only alter the transport to set our default of 3 retries, don't modify any
1655
+ # transport a user may have provided via httpx_settings.
1656
+ #
1657
+ # Making liberal use of getattr and isinstance checks here to avoid any
1658
+ # surprises if the internals of httpx or httpcore change on us
1659
+ if isinstance(api, str) and not httpx_settings.get("transport"):
1660
+ transport_for_url = getattr(self._client, "_transport_for_url", None)
1661
+ if callable(transport_for_url):
1662
+ server_transport = transport_for_url(httpx.URL(api))
1663
+ if isinstance(server_transport, httpx.HTTPTransport):
1664
+ pool = getattr(server_transport, "_pool", None)
1665
+ if isinstance(pool, httpcore.ConnectionPool):
1666
+ setattr(pool, "_retries", 3)
1667
+
1668
+ self.logger: Logger = get_logger("client")
1669
+
1670
+ @property
1671
+ def api_url(self) -> httpx.URL:
1672
+ """
1673
+ Get the base URL for the API.
1674
+ """
1675
+ return self._client.base_url
1676
+
1677
+ # Context management ----------------------------------------------------------------
1678
+
1679
+ def __enter__(self) -> "SyncPrefectClient":
1680
+ """
1681
+ Start the client.
1682
+
1683
+ If the client is already started, this will raise an exception.
1684
+
1685
+ If the client is already closed, this will raise an exception. Use a new client
1686
+ instance instead.
1687
+ """
1688
+ if self._closed:
1689
+ # httpx.Client does not allow reuse so we will not either.
1690
+ raise RuntimeError(
1691
+ "The client cannot be started again after closing. "
1692
+ "Retrieve a new client with `get_client()` instead."
1693
+ )
1694
+
1695
+ self._context_stack += 1
1696
+
1697
+ if self._started:
1698
+ # allow reentrancy
1699
+ return self
1700
+
1701
+ self._client.__enter__()
1702
+ self._started = True
1703
+
1704
+ return self
1705
+
1706
+ def __exit__(self, *exc_info: Any) -> None:
1707
+ """
1708
+ Shutdown the client.
1709
+ """
1710
+ self._context_stack -= 1
1711
+ if self._context_stack > 0:
1712
+ return
1713
+ self._closed = True
1714
+ self._client.__exit__(*exc_info)
1715
+
1716
+ # API methods ----------------------------------------------------------------------
1717
+
1718
+ def api_healthcheck(self) -> Optional[Exception]:
1719
+ """
1720
+ Attempts to connect to the API and returns the encountered exception if not
1721
+ successful.
1722
+
1723
+ If successful, returns `None`.
1724
+ """
1725
+ try:
1726
+ self._client.get("/health")
1727
+ return None
1728
+ except Exception as exc:
1729
+ return exc
1730
+
1731
+ def hello(self) -> httpx.Response:
1732
+ """
1733
+ Send a GET request to /hello for testing purposes.
1734
+ """
1735
+ return self._client.get("/hello")
1736
+
1737
+ def api_version(self) -> str:
1738
+ res = self._client.get("/admin/version")
1739
+ return res.json()
1740
+
1741
+ def client_version(self) -> str:
1742
+ return prefect.__version__
1743
+
1744
+ def raise_for_api_version_mismatch(self) -> None:
1745
+ # Cloud is always compatible as a server
1746
+ if self.server_type == ServerType.CLOUD:
1747
+ return
1748
+
1749
+ try:
1750
+ api_version = self.api_version()
1751
+ except Exception as e:
1752
+ if "Unauthorized" in str(e):
1753
+ raise e
1754
+ raise RuntimeError(f"Failed to reach API at {self.api_url}") from e
1755
+
1756
+ api_version = version.parse(api_version)
1757
+ client_version = version.parse(self.client_version())
1758
+
1759
+ if api_version.major != client_version.major:
1760
+ raise RuntimeError(
1761
+ f"Found incompatible versions: client: {client_version}, server: {api_version}. "
1762
+ f"Major versions must match."
1763
+ )
1764
+
1765
+ def set_task_run_name(self, task_run_id: UUID, name: str) -> httpx.Response:
1766
+ task_run_data = TaskRunUpdate(name=name)
1767
+ return self._client.patch(
1768
+ f"/task_runs/{task_run_id}",
1769
+ json=task_run_data.model_dump(mode="json", exclude_unset=True),
1770
+ )
1771
+
1772
+ def create_task_run(
1773
+ self,
1774
+ task: "TaskObject[P, R]",
1775
+ flow_run_id: Optional[UUID],
1776
+ dynamic_key: str,
1777
+ id: Optional[UUID] = None,
1778
+ name: Optional[str] = None,
1779
+ extra_tags: Optional[Iterable[str]] = None,
1780
+ state: Optional[prefect.states.State[R]] = None,
1781
+ task_inputs: Optional[
1782
+ dict[
1783
+ str,
1784
+ list[
1785
+ Union[
1786
+ TaskRunResult,
1787
+ Parameter,
1788
+ Constant,
1789
+ ]
1790
+ ],
1791
+ ]
1792
+ ] = None,
1793
+ ) -> TaskRun:
1794
+ """
1795
+ Create a task run
1796
+
1797
+ Args:
1798
+ task: The Task to run
1799
+ flow_run_id: The flow run id with which to associate the task run
1800
+ dynamic_key: A key unique to this particular run of a Task within the flow
1801
+ id: An optional ID for the task run. If not provided, one will be generated
1802
+ server-side.
1803
+ name: An optional name for the task run
1804
+ extra_tags: an optional list of extra tags to apply to the task run in
1805
+ addition to `task.tags`
1806
+ state: The initial state for the run. If not provided, defaults to
1807
+ `Pending` for now. Should always be a `Scheduled` type.
1808
+ task_inputs: the set of inputs passed to the task
1809
+
1810
+ Returns:
1811
+ The created task run.
1812
+ """
1813
+ tags = set(task.tags).union(extra_tags or [])
1814
+
1815
+ if state is None:
1816
+ state = prefect.states.Pending()
1817
+
1818
+ retry_delay = task.retry_delay_seconds
1819
+ if isinstance(retry_delay, list):
1820
+ retry_delay = [int(rd) for rd in retry_delay]
1821
+ elif isinstance(retry_delay, float):
1822
+ retry_delay = int(retry_delay)
1823
+
1824
+ task_run_data = TaskRunCreate(
1825
+ id=id,
1826
+ name=name,
1827
+ flow_run_id=flow_run_id,
1828
+ task_key=task.task_key,
1829
+ dynamic_key=dynamic_key,
1830
+ tags=list(tags),
1831
+ task_version=task.version,
1832
+ empirical_policy=TaskRunPolicy(
1833
+ retries=task.retries,
1834
+ retry_delay=retry_delay,
1835
+ retry_jitter_factor=task.retry_jitter_factor,
1836
+ ),
1837
+ state=state.to_state_create(),
1838
+ task_inputs=task_inputs or {},
1839
+ )
1840
+
1841
+ content = task_run_data.model_dump_json(exclude={"id"} if id is None else None)
1842
+
1843
+ response = self._client.post("/task_runs/", content=content)
1844
+ return TaskRun.model_validate(response.json())
1845
+
1846
+ def read_task_run(self, task_run_id: UUID) -> TaskRun:
1847
+ """
1848
+ Query the Prefect API for a task run by id.
1849
+
1850
+ Args:
1851
+ task_run_id: the task run ID of interest
1852
+
1853
+ Returns:
1854
+ a Task Run model representation of the task run
1855
+ """
1856
+ try:
1857
+ response = self._client.get(f"/task_runs/{task_run_id}")
1858
+ return TaskRun.model_validate(response.json())
1859
+ except httpx.HTTPStatusError as e:
1860
+ if e.response.status_code == status.HTTP_404_NOT_FOUND:
1861
+ raise prefect.exceptions.ObjectNotFound(http_exc=e) from e
1862
+ else:
1863
+ raise
1864
+
1865
+ def read_task_runs(
1866
+ self,
1867
+ *,
1868
+ flow_filter: Optional[FlowFilter] = None,
1869
+ flow_run_filter: Optional[FlowRunFilter] = None,
1870
+ task_run_filter: Optional[TaskRunFilter] = None,
1871
+ deployment_filter: Optional[DeploymentFilter] = None,
1872
+ sort: Optional[TaskRunSort] = None,
1873
+ limit: Optional[int] = None,
1874
+ offset: int = 0,
1875
+ ) -> list[TaskRun]:
1876
+ """
1877
+ Query the Prefect API for task runs. Only task runs matching all criteria will
1878
+ be returned.
1879
+
1880
+ Args:
1881
+ flow_filter: filter criteria for flows
1882
+ flow_run_filter: filter criteria for flow runs
1883
+ task_run_filter: filter criteria for task runs
1884
+ deployment_filter: filter criteria for deployments
1885
+ sort: sort criteria for the task runs
1886
+ limit: a limit for the task run query
1887
+ offset: an offset for the task run query
1888
+
1889
+ Returns:
1890
+ a list of Task Run model representations
1891
+ of the task runs
1892
+ """
1893
+ body: dict[str, Any] = {
1894
+ "flows": flow_filter.model_dump(mode="json") if flow_filter else None,
1895
+ "flow_runs": (
1896
+ flow_run_filter.model_dump(mode="json", exclude_unset=True)
1897
+ if flow_run_filter
1898
+ else None
1899
+ ),
1900
+ "task_runs": (
1901
+ task_run_filter.model_dump(mode="json") if task_run_filter else None
1902
+ ),
1903
+ "deployments": (
1904
+ deployment_filter.model_dump(mode="json") if deployment_filter else None
1905
+ ),
1906
+ "sort": sort,
1907
+ "limit": limit,
1908
+ "offset": offset,
1909
+ }
1910
+ response = self._client.post("/task_runs/filter", json=body)
1911
+ return pydantic.TypeAdapter(list[TaskRun]).validate_python(response.json())
1912
+
1913
+ def set_task_run_state(
1914
+ self,
1915
+ task_run_id: UUID,
1916
+ state: prefect.states.State[Any],
1917
+ force: bool = False,
1918
+ ) -> OrchestrationResult[Any]:
1919
+ """
1920
+ Set the state of a task run.
1921
+
1922
+ Args:
1923
+ task_run_id: the id of the task run
1924
+ state: the state to set
1925
+ force: if True, disregard orchestration logic when setting the state,
1926
+ forcing the Prefect API to accept the state
1927
+
1928
+ Returns:
1929
+ an OrchestrationResult model representation of state orchestration output
1930
+ """
1931
+ state_create = state.to_state_create()
1932
+ state_create.state_details.task_run_id = task_run_id
1933
+ response = self._client.post(
1934
+ f"/task_runs/{task_run_id}/set_state",
1935
+ json=dict(state=state_create.model_dump(mode="json"), force=force),
1936
+ )
1937
+ result: OrchestrationResult[Any] = OrchestrationResult.model_validate(
1938
+ response.json()
1939
+ )
1940
+ return result
1941
+
1942
+ def read_task_run_states(self, task_run_id: UUID) -> list[prefect.states.State]:
1943
+ """
1944
+ Query for the states of a task run
1945
+
1946
+ Args:
1947
+ task_run_id: the id of the task run
1948
+
1949
+ Returns:
1950
+ a list of State model representations of the task run states
1951
+ """
1952
+ response = self._client.get(
1953
+ "/task_run_states/", params=dict(task_run_id=str(task_run_id))
1954
+ )
1955
+ return pydantic.TypeAdapter(list[prefect.states.State]).validate_python(
1956
+ response.json()
1957
+ )